aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/kernel')
-rw-r--r--arch/arm/kernel/armksyms.c4
-rw-r--r--arch/arm/kernel/entry-armv.S22
-rw-r--r--arch/arm/kernel/irq.c3
-rw-r--r--arch/arm/kernel/process.c20
-rw-r--r--arch/arm/kernel/ptrace.c49
-rw-r--r--arch/arm/kernel/setup.c10
-rw-r--r--arch/arm/kernel/smp.c47
7 files changed, 83 insertions, 72 deletions
diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
index 7b17a87a3311..7a3261f0bf79 100644
--- a/arch/arm/kernel/armksyms.c
+++ b/arch/arm/kernel/armksyms.c
@@ -9,6 +9,7 @@
9 */ 9 */
10#include <linux/module.h> 10#include <linux/module.h>
11#include <linux/string.h> 11#include <linux/string.h>
12#include <linux/cryptohash.h>
12#include <linux/delay.h> 13#include <linux/delay.h>
13#include <linux/in6.h> 14#include <linux/in6.h>
14#include <linux/syscalls.h> 15#include <linux/syscalls.h>
@@ -126,6 +127,9 @@ EXPORT_SYMBOL(__put_user_2);
126EXPORT_SYMBOL(__put_user_4); 127EXPORT_SYMBOL(__put_user_4);
127EXPORT_SYMBOL(__put_user_8); 128EXPORT_SYMBOL(__put_user_8);
128 129
130 /* crypto hash */
131EXPORT_SYMBOL(sha_transform);
132
129 /* gcc lib functions */ 133 /* gcc lib functions */
130EXPORT_SYMBOL(__ashldi3); 134EXPORT_SYMBOL(__ashldi3);
131EXPORT_SYMBOL(__ashrdi3); 135EXPORT_SYMBOL(__ashrdi3);
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index be439cab92c6..d9fb819bf7cc 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -47,6 +47,13 @@
47 movne r0, sp 47 movne r0, sp
48 adrne lr, 1b 48 adrne lr, 1b
49 bne do_IPI 49 bne do_IPI
50
51#ifdef CONFIG_LOCAL_TIMERS
52 test_for_ltirq r0, r6, r5, lr
53 movne r0, sp
54 adrne lr, 1b
55 bne do_local_timer
56#endif
50#endif 57#endif
51 58
52 .endm 59 .endm
@@ -785,7 +792,7 @@ __kuser_helper_end:
785 * SP points to a minimal amount of processor-private memory, the address 792 * SP points to a minimal amount of processor-private memory, the address
786 * of which is copied into r0 for the mode specific abort handler. 793 * of which is copied into r0 for the mode specific abort handler.
787 */ 794 */
788 .macro vector_stub, name, correction=0 795 .macro vector_stub, name, mode, correction=0
789 .align 5 796 .align 5
790 797
791vector_\name: 798vector_\name:
@@ -805,15 +812,14 @@ vector_\name:
805 @ Prepare for SVC32 mode. IRQs remain disabled. 812 @ Prepare for SVC32 mode. IRQs remain disabled.
806 @ 813 @
807 mrs r0, cpsr 814 mrs r0, cpsr
808 bic r0, r0, #MODE_MASK 815 eor r0, r0, #(\mode ^ SVC_MODE)
809 orr r0, r0, #SVC_MODE
810 msr spsr_cxsf, r0 816 msr spsr_cxsf, r0
811 817
812 @ 818 @
813 @ the branch table must immediately follow this code 819 @ the branch table must immediately follow this code
814 @ 820 @
815 mov r0, sp
816 and lr, lr, #0x0f 821 and lr, lr, #0x0f
822 mov r0, sp
817 ldr lr, [pc, lr, lsl #2] 823 ldr lr, [pc, lr, lsl #2]
818 movs pc, lr @ branch to handler in SVC mode 824 movs pc, lr @ branch to handler in SVC mode
819 .endm 825 .endm
@@ -823,7 +829,7 @@ __stubs_start:
823/* 829/*
824 * Interrupt dispatcher 830 * Interrupt dispatcher
825 */ 831 */
826 vector_stub irq, 4 832 vector_stub irq, IRQ_MODE, 4
827 833
828 .long __irq_usr @ 0 (USR_26 / USR_32) 834 .long __irq_usr @ 0 (USR_26 / USR_32)
829 .long __irq_invalid @ 1 (FIQ_26 / FIQ_32) 835 .long __irq_invalid @ 1 (FIQ_26 / FIQ_32)
@@ -846,7 +852,7 @@ __stubs_start:
846 * Data abort dispatcher 852 * Data abort dispatcher
847 * Enter in ABT mode, spsr = USR CPSR, lr = USR PC 853 * Enter in ABT mode, spsr = USR CPSR, lr = USR PC
848 */ 854 */
849 vector_stub dabt, 8 855 vector_stub dabt, ABT_MODE, 8
850 856
851 .long __dabt_usr @ 0 (USR_26 / USR_32) 857 .long __dabt_usr @ 0 (USR_26 / USR_32)
852 .long __dabt_invalid @ 1 (FIQ_26 / FIQ_32) 858 .long __dabt_invalid @ 1 (FIQ_26 / FIQ_32)
@@ -869,7 +875,7 @@ __stubs_start:
869 * Prefetch abort dispatcher 875 * Prefetch abort dispatcher
870 * Enter in ABT mode, spsr = USR CPSR, lr = USR PC 876 * Enter in ABT mode, spsr = USR CPSR, lr = USR PC
871 */ 877 */
872 vector_stub pabt, 4 878 vector_stub pabt, ABT_MODE, 4
873 879
874 .long __pabt_usr @ 0 (USR_26 / USR_32) 880 .long __pabt_usr @ 0 (USR_26 / USR_32)
875 .long __pabt_invalid @ 1 (FIQ_26 / FIQ_32) 881 .long __pabt_invalid @ 1 (FIQ_26 / FIQ_32)
@@ -892,7 +898,7 @@ __stubs_start:
892 * Undef instr entry dispatcher 898 * Undef instr entry dispatcher
893 * Enter in UND mode, spsr = SVC/USR CPSR, lr = SVC/USR PC 899 * Enter in UND mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
894 */ 900 */
895 vector_stub und 901 vector_stub und, UND_MODE
896 902
897 .long __und_usr @ 0 (USR_26 / USR_32) 903 .long __und_usr @ 0 (USR_26 / USR_32)
898 .long __und_invalid @ 1 (FIQ_26 / FIQ_32) 904 .long __und_invalid @ 1 (FIQ_26 / FIQ_32)
diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c
index 9def4404e1f2..d7099dbbb879 100644
--- a/arch/arm/kernel/irq.c
+++ b/arch/arm/kernel/irq.c
@@ -264,6 +264,7 @@ unlock:
264#endif 264#endif
265#ifdef CONFIG_SMP 265#ifdef CONFIG_SMP
266 show_ipi_list(p); 266 show_ipi_list(p);
267 show_local_irqs(p);
267#endif 268#endif
268 seq_printf(p, "Err: %10lu\n", irq_err_count); 269 seq_printf(p, "Err: %10lu\n", irq_err_count);
269 } 270 }
@@ -995,7 +996,7 @@ void __init init_irq_proc(void)
995 struct proc_dir_entry *dir; 996 struct proc_dir_entry *dir;
996 int irq; 997 int irq;
997 998
998 dir = proc_mkdir("irq", 0); 999 dir = proc_mkdir("irq", NULL);
999 if (!dir) 1000 if (!dir)
1000 return; 1001 return;
1001 1002
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index ba298277becd..30494aab829a 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -86,12 +86,16 @@ EXPORT_SYMBOL(pm_power_off);
86 */ 86 */
87void default_idle(void) 87void default_idle(void)
88{ 88{
89 local_irq_disable(); 89 if (hlt_counter)
90 if (!need_resched() && !hlt_counter) { 90 cpu_relax();
91 timer_dyn_reprogram(); 91 else {
92 arch_idle(); 92 local_irq_disable();
93 if (!need_resched()) {
94 timer_dyn_reprogram();
95 arch_idle();
96 }
97 local_irq_enable();
93 } 98 }
94 local_irq_enable();
95} 99}
96 100
97/* 101/*
@@ -116,13 +120,13 @@ void cpu_idle(void)
116 120
117 if (!idle) 121 if (!idle)
118 idle = default_idle; 122 idle = default_idle;
119 preempt_disable();
120 leds_event(led_idle_start); 123 leds_event(led_idle_start);
121 while (!need_resched()) 124 while (!need_resched())
122 idle(); 125 idle();
123 leds_event(led_idle_end); 126 leds_event(led_idle_end);
124 preempt_enable(); 127 preempt_enable_no_resched();
125 schedule(); 128 schedule();
129 preempt_disable();
126 } 130 }
127} 131}
128 132
@@ -355,7 +359,7 @@ copy_thread(int nr, unsigned long clone_flags, unsigned long stack_start,
355 struct thread_info *thread = p->thread_info; 359 struct thread_info *thread = p->thread_info;
356 struct pt_regs *childregs; 360 struct pt_regs *childregs;
357 361
358 childregs = ((struct pt_regs *)((unsigned long)thread + THREAD_START_SP)) - 1; 362 childregs = (void *)thread + THREAD_START_SP - sizeof(*regs);
359 *childregs = *regs; 363 *childregs = *regs;
360 childregs->ARM_r0 = 0; 364 childregs->ARM_r0 = 0;
361 childregs->ARM_sp = stack_start; 365 childregs->ARM_sp = stack_start;
diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
index 9bd8609a2926..9a340e790da5 100644
--- a/arch/arm/kernel/ptrace.c
+++ b/arch/arm/kernel/ptrace.c
@@ -648,7 +648,7 @@ static int ptrace_setwmmxregs(struct task_struct *tsk, void __user *ufp)
648 648
649#endif 649#endif
650 650
651static int do_ptrace(int request, struct task_struct *child, long addr, long data) 651long arch_ptrace(struct task_struct *child, long request, long addr, long data)
652{ 652{
653 unsigned long tmp; 653 unsigned long tmp;
654 int ret; 654 int ret;
@@ -782,53 +782,6 @@ static int do_ptrace(int request, struct task_struct *child, long addr, long dat
782 return ret; 782 return ret;
783} 783}
784 784
785asmlinkage long sys_ptrace(long request, long pid, long addr, long data)
786{
787 struct task_struct *child;
788 int ret;
789
790 lock_kernel();
791 ret = -EPERM;
792 if (request == PTRACE_TRACEME) {
793 /* are we already being traced? */
794 if (current->ptrace & PT_PTRACED)
795 goto out;
796 ret = security_ptrace(current->parent, current);
797 if (ret)
798 goto out;
799 /* set the ptrace bit in the process flags. */
800 current->ptrace |= PT_PTRACED;
801 ret = 0;
802 goto out;
803 }
804 ret = -ESRCH;
805 read_lock(&tasklist_lock);
806 child = find_task_by_pid(pid);
807 if (child)
808 get_task_struct(child);
809 read_unlock(&tasklist_lock);
810 if (!child)
811 goto out;
812
813 ret = -EPERM;
814 if (pid == 1) /* you may not mess with init */
815 goto out_tsk;
816
817 if (request == PTRACE_ATTACH) {
818 ret = ptrace_attach(child);
819 goto out_tsk;
820 }
821 ret = ptrace_check_attach(child, request == PTRACE_KILL);
822 if (ret == 0)
823 ret = do_ptrace(request, child, addr, data);
824
825out_tsk:
826 put_task_struct(child);
827out:
828 unlock_kernel();
829 return ret;
830}
831
832asmlinkage void syscall_trace(int why, struct pt_regs *regs) 785asmlinkage void syscall_trace(int why, struct pt_regs *regs)
833{ 786{
834 unsigned long ip; 787 unsigned long ip;
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index c9b69771f92e..85774165e9fd 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -338,7 +338,8 @@ void cpu_init(void)
338 BUG(); 338 BUG();
339 } 339 }
340 340
341 dump_cpu_info(cpu); 341 if (system_state == SYSTEM_BOOTING)
342 dump_cpu_info(cpu);
342 343
343 /* 344 /*
344 * setup stacks for re-entrant exception handlers 345 * setup stacks for re-entrant exception handlers
@@ -838,7 +839,12 @@ static int c_show(struct seq_file *m, void *v)
838 839
839#if defined(CONFIG_SMP) 840#if defined(CONFIG_SMP)
840 for_each_online_cpu(i) { 841 for_each_online_cpu(i) {
841 seq_printf(m, "Processor\t: %d\n", i); 842 /*
843 * glibc reads /proc/cpuinfo to determine the number of
844 * online processors, looking for lines beginning with
845 * "processor". Give glibc what it expects.
846 */
847 seq_printf(m, "processor\t: %d\n", i);
842 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n\n", 848 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n\n",
843 per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ), 849 per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ),
844 (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100); 850 (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100);
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index edb5a406922f..e55ea952f7aa 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -142,7 +142,7 @@ int __cpuinit __cpu_up(unsigned int cpu)
142 ret = -EIO; 142 ret = -EIO;
143 } 143 }
144 144
145 secondary_data.stack = 0; 145 secondary_data.stack = NULL;
146 secondary_data.pgdir = 0; 146 secondary_data.pgdir = 0;
147 147
148 *pmd_offset(pgd, PHYS_OFFSET) = __pmd(0); 148 *pmd_offset(pgd, PHYS_OFFSET) = __pmd(0);
@@ -185,6 +185,11 @@ int __cpuexit __cpu_disable(void)
185 migrate_irqs(); 185 migrate_irqs();
186 186
187 /* 187 /*
188 * Stop the local timer for this CPU.
189 */
190 local_timer_stop(cpu);
191
192 /*
188 * Flush user cache and TLB mappings, and then remove this CPU 193 * Flush user cache and TLB mappings, and then remove this CPU
189 * from the vm mask set of all processes. 194 * from the vm mask set of all processes.
190 */ 195 */
@@ -251,7 +256,9 @@ void __cpuexit cpu_die(void)
251asmlinkage void __cpuinit secondary_start_kernel(void) 256asmlinkage void __cpuinit secondary_start_kernel(void)
252{ 257{
253 struct mm_struct *mm = &init_mm; 258 struct mm_struct *mm = &init_mm;
254 unsigned int cpu = smp_processor_id(); 259 unsigned int cpu;
260
261 cpu = smp_processor_id();
255 262
256 printk("CPU%u: Booted secondary processor\n", cpu); 263 printk("CPU%u: Booted secondary processor\n", cpu);
257 264
@@ -268,6 +275,7 @@ asmlinkage void __cpuinit secondary_start_kernel(void)
268 local_flush_tlb_all(); 275 local_flush_tlb_all();
269 276
270 cpu_init(); 277 cpu_init();
278 preempt_disable();
271 279
272 /* 280 /*
273 * Give the platform a chance to do its own initialisation. 281 * Give the platform a chance to do its own initialisation.
@@ -290,6 +298,11 @@ asmlinkage void __cpuinit secondary_start_kernel(void)
290 cpu_set(cpu, cpu_online_map); 298 cpu_set(cpu, cpu_online_map);
291 299
292 /* 300 /*
301 * Setup local timer for this CPU.
302 */
303 local_timer_setup(cpu);
304
305 /*
293 * OK, it's off to the idle thread for us 306 * OK, it's off to the idle thread for us
294 */ 307 */
295 cpu_idle(); 308 cpu_idle();
@@ -359,8 +372,8 @@ static void send_ipi_message(cpumask_t callmap, enum ipi_msg_type msg)
359 * You must not call this function with disabled interrupts, from a 372 * You must not call this function with disabled interrupts, from a
360 * hardware interrupt handler, nor from a bottom half handler. 373 * hardware interrupt handler, nor from a bottom half handler.
361 */ 374 */
362int smp_call_function_on_cpu(void (*func)(void *info), void *info, int retry, 375static int smp_call_function_on_cpu(void (*func)(void *info), void *info,
363 int wait, cpumask_t callmap) 376 int retry, int wait, cpumask_t callmap)
364{ 377{
365 struct smp_call_struct data; 378 struct smp_call_struct data;
366 unsigned long timeout; 379 unsigned long timeout;
@@ -454,6 +467,18 @@ void show_ipi_list(struct seq_file *p)
454 seq_putc(p, '\n'); 467 seq_putc(p, '\n');
455} 468}
456 469
470void show_local_irqs(struct seq_file *p)
471{
472 unsigned int cpu;
473
474 seq_printf(p, "LOC: ");
475
476 for_each_present_cpu(cpu)
477 seq_printf(p, "%10u ", irq_stat[cpu].local_timer_irqs);
478
479 seq_putc(p, '\n');
480}
481
457static void ipi_timer(struct pt_regs *regs) 482static void ipi_timer(struct pt_regs *regs)
458{ 483{
459 int user = user_mode(regs); 484 int user = user_mode(regs);
@@ -464,6 +489,18 @@ static void ipi_timer(struct pt_regs *regs)
464 irq_exit(); 489 irq_exit();
465} 490}
466 491
492#ifdef CONFIG_LOCAL_TIMERS
493asmlinkage void do_local_timer(struct pt_regs *regs)
494{
495 int cpu = smp_processor_id();
496
497 if (local_timer_ack()) {
498 irq_stat[cpu].local_timer_irqs++;
499 ipi_timer(regs);
500 }
501}
502#endif
503
467/* 504/*
468 * ipi_call_function - handle IPI from smp_call_function() 505 * ipi_call_function - handle IPI from smp_call_function()
469 * 506 *
@@ -515,7 +552,7 @@ static void ipi_cpu_stop(unsigned int cpu)
515 * 552 *
516 * Bit 0 - Inter-processor function call 553 * Bit 0 - Inter-processor function call
517 */ 554 */
518void do_IPI(struct pt_regs *regs) 555asmlinkage void do_IPI(struct pt_regs *regs)
519{ 556{
520 unsigned int cpu = smp_processor_id(); 557 unsigned int cpu = smp_processor_id();
521 struct ipi_data *ipi = &per_cpu(ipi_data, cpu); 558 struct ipi_data *ipi = &per_cpu(ipi_data, cpu);