aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/kernel')
-rw-r--r--arch/arm/kernel/entry-common.S16
-rw-r--r--arch/arm/kernel/head-nommu.S2
-rw-r--r--arch/arm/kernel/process.c4
-rw-r--r--arch/arm/kernel/ptrace.c43
-rw-r--r--arch/arm/kernel/sched_clock.c18
-rw-r--r--arch/arm/kernel/smp.c7
-rw-r--r--arch/arm/kernel/smp_twd.c54
-rw-r--r--arch/arm/kernel/vmlinux.lds.S19
8 files changed, 95 insertions, 68 deletions
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
index 34711757ba59..804153c0a9cf 100644
--- a/arch/arm/kernel/entry-common.S
+++ b/arch/arm/kernel/entry-common.S
@@ -417,16 +417,6 @@ local_restart:
417 ldr r10, [tsk, #TI_FLAGS] @ check for syscall tracing 417 ldr r10, [tsk, #TI_FLAGS] @ check for syscall tracing
418 stmdb sp!, {r4, r5} @ push fifth and sixth args 418 stmdb sp!, {r4, r5} @ push fifth and sixth args
419 419
420#ifdef CONFIG_SECCOMP
421 tst r10, #_TIF_SECCOMP
422 beq 1f
423 mov r0, scno
424 bl __secure_computing
425 add r0, sp, #S_R0 + S_OFF @ pointer to regs
426 ldmia r0, {r0 - r3} @ have to reload r0 - r3
4271:
428#endif
429
430 tst r10, #_TIF_SYSCALL_WORK @ are we tracing syscalls? 420 tst r10, #_TIF_SYSCALL_WORK @ are we tracing syscalls?
431 bne __sys_trace 421 bne __sys_trace
432 422
@@ -458,11 +448,13 @@ __sys_trace:
458 ldmccia r1, {r0 - r6} @ have to reload r0 - r6 448 ldmccia r1, {r0 - r6} @ have to reload r0 - r6
459 stmccia sp, {r4, r5} @ and update the stack args 449 stmccia sp, {r4, r5} @ and update the stack args
460 ldrcc pc, [tbl, scno, lsl #2] @ call sys_* routine 450 ldrcc pc, [tbl, scno, lsl #2] @ call sys_* routine
461 b 2b 451 cmp scno, #-1 @ skip the syscall?
452 bne 2b
453 add sp, sp, #S_OFF @ restore stack
454 b ret_slow_syscall
462 455
463__sys_trace_return: 456__sys_trace_return:
464 str r0, [sp, #S_R0 + S_OFF]! @ save returned r0 457 str r0, [sp, #S_R0 + S_OFF]! @ save returned r0
465 mov r1, scno
466 mov r0, sp 458 mov r0, sp
467 bl syscall_trace_exit 459 bl syscall_trace_exit
468 b ret_slow_syscall 460 b ret_slow_syscall
diff --git a/arch/arm/kernel/head-nommu.S b/arch/arm/kernel/head-nommu.S
index 278cfc144f44..2c228a07e58c 100644
--- a/arch/arm/kernel/head-nommu.S
+++ b/arch/arm/kernel/head-nommu.S
@@ -68,7 +68,7 @@ __after_proc_init:
68 * CP15 system control register value returned in r0 from 68 * CP15 system control register value returned in r0 from
69 * the CPU init function. 69 * the CPU init function.
70 */ 70 */
71#ifdef CONFIG_ALIGNMENT_TRAP 71#if defined(CONFIG_ALIGNMENT_TRAP) && __LINUX_ARM_ARCH__ < 6
72 orr r0, r0, #CR_A 72 orr r0, r0, #CR_A
73#else 73#else
74 bic r0, r0, #CR_A 74 bic r0, r0, #CR_A
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index 90084a6de35a..44bc0b327e2b 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -34,6 +34,7 @@
34#include <linux/leds.h> 34#include <linux/leds.h>
35 35
36#include <asm/cacheflush.h> 36#include <asm/cacheflush.h>
37#include <asm/idmap.h>
37#include <asm/processor.h> 38#include <asm/processor.h>
38#include <asm/thread_notify.h> 39#include <asm/thread_notify.h>
39#include <asm/stacktrace.h> 40#include <asm/stacktrace.h>
@@ -56,8 +57,6 @@ static const char *isa_modes[] = {
56 "ARM" , "Thumb" , "Jazelle", "ThumbEE" 57 "ARM" , "Thumb" , "Jazelle", "ThumbEE"
57}; 58};
58 59
59extern void setup_mm_for_reboot(void);
60
61static volatile int hlt_counter; 60static volatile int hlt_counter;
62 61
63void disable_hlt(void) 62void disable_hlt(void)
@@ -70,6 +69,7 @@ EXPORT_SYMBOL(disable_hlt);
70void enable_hlt(void) 69void enable_hlt(void)
71{ 70{
72 hlt_counter--; 71 hlt_counter--;
72 BUG_ON(hlt_counter < 0);
73} 73}
74 74
75EXPORT_SYMBOL(enable_hlt); 75EXPORT_SYMBOL(enable_hlt);
diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
index 739db3a1b2d2..03deeffd9f6d 100644
--- a/arch/arm/kernel/ptrace.c
+++ b/arch/arm/kernel/ptrace.c
@@ -916,16 +916,11 @@ enum ptrace_syscall_dir {
916 PTRACE_SYSCALL_EXIT, 916 PTRACE_SYSCALL_EXIT,
917}; 917};
918 918
919static int ptrace_syscall_trace(struct pt_regs *regs, int scno, 919static int tracehook_report_syscall(struct pt_regs *regs,
920 enum ptrace_syscall_dir dir) 920 enum ptrace_syscall_dir dir)
921{ 921{
922 unsigned long ip; 922 unsigned long ip;
923 923
924 current_thread_info()->syscall = scno;
925
926 if (!test_thread_flag(TIF_SYSCALL_TRACE))
927 return scno;
928
929 /* 924 /*
930 * IP is used to denote syscall entry/exit: 925 * IP is used to denote syscall entry/exit:
931 * IP = 0 -> entry, =1 -> exit 926 * IP = 0 -> entry, =1 -> exit
@@ -944,19 +939,41 @@ static int ptrace_syscall_trace(struct pt_regs *regs, int scno,
944 939
945asmlinkage int syscall_trace_enter(struct pt_regs *regs, int scno) 940asmlinkage int syscall_trace_enter(struct pt_regs *regs, int scno)
946{ 941{
947 scno = ptrace_syscall_trace(regs, scno, PTRACE_SYSCALL_ENTER); 942 current_thread_info()->syscall = scno;
943
944 /* Do the secure computing check first; failures should be fast. */
945 if (secure_computing(scno) == -1)
946 return -1;
947
948 if (test_thread_flag(TIF_SYSCALL_TRACE))
949 scno = tracehook_report_syscall(regs, PTRACE_SYSCALL_ENTER);
950
948 if (test_thread_flag(TIF_SYSCALL_TRACEPOINT)) 951 if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
949 trace_sys_enter(regs, scno); 952 trace_sys_enter(regs, scno);
953
950 audit_syscall_entry(AUDIT_ARCH_ARM, scno, regs->ARM_r0, regs->ARM_r1, 954 audit_syscall_entry(AUDIT_ARCH_ARM, scno, regs->ARM_r0, regs->ARM_r1,
951 regs->ARM_r2, regs->ARM_r3); 955 regs->ARM_r2, regs->ARM_r3);
956
952 return scno; 957 return scno;
953} 958}
954 959
955asmlinkage int syscall_trace_exit(struct pt_regs *regs, int scno) 960asmlinkage void syscall_trace_exit(struct pt_regs *regs)
956{ 961{
957 scno = ptrace_syscall_trace(regs, scno, PTRACE_SYSCALL_EXIT); 962 /*
958 if (test_thread_flag(TIF_SYSCALL_TRACEPOINT)) 963 * Audit the syscall before anything else, as a debugger may
959 trace_sys_exit(regs, scno); 964 * come in and change the current registers.
965 */
960 audit_syscall_exit(regs); 966 audit_syscall_exit(regs);
961 return scno; 967
968 /*
969 * Note that we haven't updated the ->syscall field for the
970 * current thread. This isn't a problem because it will have
971 * been set on syscall entry and there hasn't been an opportunity
972 * for a PTRACE_SET_SYSCALL since then.
973 */
974 if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
975 trace_sys_exit(regs, regs_return_value(regs));
976
977 if (test_thread_flag(TIF_SYSCALL_TRACE))
978 tracehook_report_syscall(regs, PTRACE_SYSCALL_EXIT);
962} 979}
diff --git a/arch/arm/kernel/sched_clock.c b/arch/arm/kernel/sched_clock.c
index e21bac20d90d..fc6692e2b603 100644
--- a/arch/arm/kernel/sched_clock.c
+++ b/arch/arm/kernel/sched_clock.c
@@ -107,13 +107,6 @@ static void sched_clock_poll(unsigned long wrap_ticks)
107 update_sched_clock(); 107 update_sched_clock();
108} 108}
109 109
110void __init setup_sched_clock_needs_suspend(u32 (*read)(void), int bits,
111 unsigned long rate)
112{
113 setup_sched_clock(read, bits, rate);
114 cd.needs_suspend = true;
115}
116
117void __init setup_sched_clock(u32 (*read)(void), int bits, unsigned long rate) 110void __init setup_sched_clock(u32 (*read)(void), int bits, unsigned long rate)
118{ 111{
119 unsigned long r, w; 112 unsigned long r, w;
@@ -189,18 +182,15 @@ void __init sched_clock_postinit(void)
189static int sched_clock_suspend(void) 182static int sched_clock_suspend(void)
190{ 183{
191 sched_clock_poll(sched_clock_timer.data); 184 sched_clock_poll(sched_clock_timer.data);
192 if (cd.needs_suspend) 185 cd.suspended = true;
193 cd.suspended = true;
194 return 0; 186 return 0;
195} 187}
196 188
197static void sched_clock_resume(void) 189static void sched_clock_resume(void)
198{ 190{
199 if (cd.needs_suspend) { 191 cd.epoch_cyc = read_sched_clock();
200 cd.epoch_cyc = read_sched_clock(); 192 cd.epoch_cyc_copy = cd.epoch_cyc;
201 cd.epoch_cyc_copy = cd.epoch_cyc; 193 cd.suspended = false;
202 cd.suspended = false;
203 }
204} 194}
205 195
206static struct syscore_ops sched_clock_ops = { 196static struct syscore_ops sched_clock_ops = {
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index fbc8b2623d82..57f537731979 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -421,6 +421,11 @@ void arch_send_call_function_ipi_mask(const struct cpumask *mask)
421 smp_cross_call(mask, IPI_CALL_FUNC); 421 smp_cross_call(mask, IPI_CALL_FUNC);
422} 422}
423 423
424void arch_send_wakeup_ipi_mask(const struct cpumask *mask)
425{
426 smp_cross_call(mask, IPI_WAKEUP);
427}
428
424void arch_send_call_function_single_ipi(int cpu) 429void arch_send_call_function_single_ipi(int cpu)
425{ 430{
426 smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE); 431 smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE);
@@ -443,7 +448,7 @@ void show_ipi_list(struct seq_file *p, int prec)
443 for (i = 0; i < NR_IPI; i++) { 448 for (i = 0; i < NR_IPI; i++) {
444 seq_printf(p, "%*s%u: ", prec - 1, "IPI", i); 449 seq_printf(p, "%*s%u: ", prec - 1, "IPI", i);
445 450
446 for_each_present_cpu(cpu) 451 for_each_online_cpu(cpu)
447 seq_printf(p, "%10u ", 452 seq_printf(p, "%10u ",
448 __get_irq_stat(cpu, ipi_irqs[i])); 453 __get_irq_stat(cpu, ipi_irqs[i]));
449 454
diff --git a/arch/arm/kernel/smp_twd.c b/arch/arm/kernel/smp_twd.c
index b22d700fea27..ff07879ad95d 100644
--- a/arch/arm/kernel/smp_twd.c
+++ b/arch/arm/kernel/smp_twd.c
@@ -31,6 +31,8 @@ static void __iomem *twd_base;
31 31
32static struct clk *twd_clk; 32static struct clk *twd_clk;
33static unsigned long twd_timer_rate; 33static unsigned long twd_timer_rate;
34static bool common_setup_called;
35static DEFINE_PER_CPU(bool, percpu_setup_called);
34 36
35static struct clock_event_device __percpu **twd_evt; 37static struct clock_event_device __percpu **twd_evt;
36static int twd_ppi; 38static int twd_ppi;
@@ -248,17 +250,9 @@ static struct clk *twd_get_clock(void)
248 return clk; 250 return clk;
249 } 251 }
250 252
251 err = clk_prepare(clk); 253 err = clk_prepare_enable(clk);
252 if (err) { 254 if (err) {
253 pr_err("smp_twd: clock failed to prepare: %d\n", err); 255 pr_err("smp_twd: clock failed to prepare+enable: %d\n", err);
254 clk_put(clk);
255 return ERR_PTR(err);
256 }
257
258 err = clk_enable(clk);
259 if (err) {
260 pr_err("smp_twd: clock failed to enable: %d\n", err);
261 clk_unprepare(clk);
262 clk_put(clk); 256 clk_put(clk);
263 return ERR_PTR(err); 257 return ERR_PTR(err);
264 } 258 }
@@ -272,15 +266,45 @@ static struct clk *twd_get_clock(void)
272static int __cpuinit twd_timer_setup(struct clock_event_device *clk) 266static int __cpuinit twd_timer_setup(struct clock_event_device *clk)
273{ 267{
274 struct clock_event_device **this_cpu_clk; 268 struct clock_event_device **this_cpu_clk;
269 int cpu = smp_processor_id();
270
271 /*
272 * If the basic setup for this CPU has been done before don't
273 * bother with the below.
274 */
275 if (per_cpu(percpu_setup_called, cpu)) {
276 __raw_writel(0, twd_base + TWD_TIMER_CONTROL);
277 clockevents_register_device(*__this_cpu_ptr(twd_evt));
278 enable_percpu_irq(clk->irq, 0);
279 return 0;
280 }
281 per_cpu(percpu_setup_called, cpu) = true;
275 282
276 if (!twd_clk) 283 /*
284 * This stuff only need to be done once for the entire TWD cluster
285 * during the runtime of the system.
286 */
287 if (!common_setup_called) {
277 twd_clk = twd_get_clock(); 288 twd_clk = twd_get_clock();
278 289
279 if (!IS_ERR_OR_NULL(twd_clk)) 290 /*
280 twd_timer_rate = clk_get_rate(twd_clk); 291 * We use IS_ERR_OR_NULL() here, because if the clock stubs
281 else 292 * are active we will get a valid clk reference which is
282 twd_calibrate_rate(); 293 * however NULL and will return the rate 0. In that case we
294 * need to calibrate the rate instead.
295 */
296 if (!IS_ERR_OR_NULL(twd_clk))
297 twd_timer_rate = clk_get_rate(twd_clk);
298 else
299 twd_calibrate_rate();
300
301 common_setup_called = true;
302 }
283 303
304 /*
305 * The following is done once per CPU the first time .setup() is
306 * called.
307 */
284 __raw_writel(0, twd_base + TWD_TIMER_CONTROL); 308 __raw_writel(0, twd_base + TWD_TIMER_CONTROL);
285 309
286 clk->name = "local_timer"; 310 clk->name = "local_timer";
diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
index 36ff15bbfdd4..b9f38e388b43 100644
--- a/arch/arm/kernel/vmlinux.lds.S
+++ b/arch/arm/kernel/vmlinux.lds.S
@@ -114,6 +114,15 @@ SECTIONS
114 114
115 RO_DATA(PAGE_SIZE) 115 RO_DATA(PAGE_SIZE)
116 116
117 . = ALIGN(4);
118 __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) {
119 __start___ex_table = .;
120#ifdef CONFIG_MMU
121 *(__ex_table)
122#endif
123 __stop___ex_table = .;
124 }
125
117#ifdef CONFIG_ARM_UNWIND 126#ifdef CONFIG_ARM_UNWIND
118 /* 127 /*
119 * Stack unwinding tables 128 * Stack unwinding tables
@@ -220,16 +229,6 @@ SECTIONS
220 READ_MOSTLY_DATA(L1_CACHE_BYTES) 229 READ_MOSTLY_DATA(L1_CACHE_BYTES)
221 230
222 /* 231 /*
223 * The exception fixup table (might need resorting at runtime)
224 */
225 . = ALIGN(4);
226 __start___ex_table = .;
227#ifdef CONFIG_MMU
228 *(__ex_table)
229#endif
230 __stop___ex_table = .;
231
232 /*
233 * and the usual data section 232 * and the usual data section
234 */ 233 */
235 DATA_DATA 234 DATA_DATA