aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/kernel
diff options
context:
space:
mode:
authorRussell King <rmk+kernel@arm.linux.org.uk>2011-01-06 17:32:52 -0500
committerRussell King <rmk+kernel@arm.linux.org.uk>2011-01-06 17:32:52 -0500
commit4073723acb9cdcdbe4df9c0e0c376c65d1697e43 (patch)
treef41c17eac157b1223ce104845cf9b1e5a9e6a83d /arch/arm/kernel
parent58daf18cdcab550262a5f4681e1f1e073e21965a (diff)
parent4ec3eb13634529c0bc7466658d84d0bbe3244aea (diff)
Merge branch 'misc' into devel
Conflicts: arch/arm/Kconfig arch/arm/common/Makefile arch/arm/kernel/Makefile arch/arm/kernel/smp.c
Diffstat (limited to 'arch/arm/kernel')
-rw-r--r--arch/arm/kernel/Makefile4
-rw-r--r--arch/arm/kernel/entry-armv.S52
-rw-r--r--arch/arm/kernel/fiq.c10
-rw-r--r--arch/arm/kernel/head.S50
-rw-r--r--arch/arm/kernel/irq.c30
-rw-r--r--arch/arm/kernel/setup.c37
-rw-r--r--arch/arm/kernel/smp.c409
-rw-r--r--arch/arm/kernel/smp_tlb.c139
-rw-r--r--arch/arm/kernel/smp_twd.c10
-rw-r--r--arch/arm/kernel/swp_emulate.c267
-rw-r--r--arch/arm/kernel/time.c4
-rw-r--r--arch/arm/kernel/traps.c14
-rw-r--r--arch/arm/kernel/vmlinux.lds.S1
13 files changed, 676 insertions, 351 deletions
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile
index fd3ec49bfba6..7c33e6f29bcc 100644
--- a/arch/arm/kernel/Makefile
+++ b/arch/arm/kernel/Makefile
@@ -30,7 +30,7 @@ obj-$(CONFIG_ARTHUR) += arthur.o
30obj-$(CONFIG_ISA_DMA) += dma-isa.o 30obj-$(CONFIG_ISA_DMA) += dma-isa.o
31obj-$(CONFIG_PCI) += bios32.o isa.o 31obj-$(CONFIG_PCI) += bios32.o isa.o
32obj-$(CONFIG_HAVE_SCHED_CLOCK) += sched_clock.o 32obj-$(CONFIG_HAVE_SCHED_CLOCK) += sched_clock.o
33obj-$(CONFIG_SMP) += smp.o 33obj-$(CONFIG_SMP) += smp.o smp_tlb.o
34obj-$(CONFIG_HAVE_ARM_SCU) += smp_scu.o 34obj-$(CONFIG_HAVE_ARM_SCU) += smp_scu.o
35obj-$(CONFIG_HAVE_ARM_TWD) += smp_twd.o 35obj-$(CONFIG_HAVE_ARM_TWD) += smp_twd.o
36obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o 36obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o
@@ -44,6 +44,8 @@ obj-$(CONFIG_KGDB) += kgdb.o
44obj-$(CONFIG_ARM_UNWIND) += unwind.o 44obj-$(CONFIG_ARM_UNWIND) += unwind.o
45obj-$(CONFIG_HAVE_TCM) += tcm.o 45obj-$(CONFIG_HAVE_TCM) += tcm.o
46obj-$(CONFIG_CRASH_DUMP) += crash_dump.o 46obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
47obj-$(CONFIG_SWP_EMULATE) += swp_emulate.o
48CFLAGS_swp_emulate.o := -Wa,-march=armv7-a
47obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o 49obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
48 50
49obj-$(CONFIG_CRUNCH) += crunch.o crunch-bits.o 51obj-$(CONFIG_CRUNCH) += crunch.o crunch-bits.o
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index bb96a7d4bbf5..27f64489c1cb 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -25,42 +25,22 @@
25#include <asm/tls.h> 25#include <asm/tls.h>
26 26
27#include "entry-header.S" 27#include "entry-header.S"
28#include <asm/entry-macro-multi.S>
28 29
29/* 30/*
30 * Interrupt handling. Preserves r7, r8, r9 31 * Interrupt handling. Preserves r7, r8, r9
31 */ 32 */
32 .macro irq_handler 33 .macro irq_handler
33 get_irqnr_preamble r5, lr 34#ifdef CONFIG_MULTI_IRQ_HANDLER
341: get_irqnr_and_base r0, r6, r5, lr 35 ldr r5, =handle_arch_irq
35 movne r1, sp 36 mov r0, sp
36 @ 37 ldr r5, [r5]
37 @ routine called with r0 = irq number, r1 = struct pt_regs * 38 adr lr, BSYM(9997f)
38 @ 39 teq r5, #0
39 adrne lr, BSYM(1b) 40 movne pc, r5
40 bne asm_do_IRQ
41
42#ifdef CONFIG_SMP
43 /*
44 * XXX
45 *
46 * this macro assumes that irqstat (r6) and base (r5) are
47 * preserved from get_irqnr_and_base above
48 */
49 ALT_SMP(test_for_ipi r0, r6, r5, lr)
50 ALT_UP_B(9997f)
51 movne r0, sp
52 adrne lr, BSYM(1b)
53 bne do_IPI
54
55#ifdef CONFIG_LOCAL_TIMERS
56 test_for_ltirq r0, r6, r5, lr
57 movne r0, sp
58 adrne lr, BSYM(1b)
59 bne do_local_timer
60#endif 41#endif
42 arch_irq_handler_default
619997: 439997:
62#endif
63
64 .endm 44 .endm
65 45
66#ifdef CONFIG_KPROBES 46#ifdef CONFIG_KPROBES
@@ -735,7 +715,7 @@ ENTRY(__switch_to)
735 THUMB( stmia ip!, {r4 - sl, fp} ) @ Store most regs on stack 715 THUMB( stmia ip!, {r4 - sl, fp} ) @ Store most regs on stack
736 THUMB( str sp, [ip], #4 ) 716 THUMB( str sp, [ip], #4 )
737 THUMB( str lr, [ip], #4 ) 717 THUMB( str lr, [ip], #4 )
738#ifdef CONFIG_MMU 718#ifdef CONFIG_CPU_USE_DOMAINS
739 ldr r6, [r2, #TI_CPU_DOMAIN] 719 ldr r6, [r2, #TI_CPU_DOMAIN]
740#endif 720#endif
741 set_tls r3, r4, r5 721 set_tls r3, r4, r5
@@ -744,7 +724,7 @@ ENTRY(__switch_to)
744 ldr r8, =__stack_chk_guard 724 ldr r8, =__stack_chk_guard
745 ldr r7, [r7, #TSK_STACK_CANARY] 725 ldr r7, [r7, #TSK_STACK_CANARY]
746#endif 726#endif
747#ifdef CONFIG_MMU 727#ifdef CONFIG_CPU_USE_DOMAINS
748 mcr p15, 0, r6, c3, c0, 0 @ Set domain register 728 mcr p15, 0, r6, c3, c0, 0 @ Set domain register
749#endif 729#endif
750 mov r5, r0 730 mov r5, r0
@@ -842,7 +822,7 @@ __kuser_helper_start:
842 */ 822 */
843 823
844__kuser_memory_barrier: @ 0xffff0fa0 824__kuser_memory_barrier: @ 0xffff0fa0
845 smp_dmb 825 smp_dmb arm
846 usr_ret lr 826 usr_ret lr
847 827
848 .align 5 828 .align 5
@@ -959,7 +939,7 @@ kuser_cmpxchg_fixup:
959 939
960#else 940#else
961 941
962 smp_dmb 942 smp_dmb arm
9631: ldrex r3, [r2] 9431: ldrex r3, [r2]
964 subs r3, r3, r0 944 subs r3, r3, r0
965 strexeq r3, r1, [r2] 945 strexeq r3, r1, [r2]
@@ -1245,3 +1225,9 @@ cr_alignment:
1245 .space 4 1225 .space 4
1246cr_no_alignment: 1226cr_no_alignment:
1247 .space 4 1227 .space 4
1228
1229#ifdef CONFIG_MULTI_IRQ_HANDLER
1230 .globl handle_arch_irq
1231handle_arch_irq:
1232 .space 4
1233#endif
diff --git a/arch/arm/kernel/fiq.c b/arch/arm/kernel/fiq.c
index 6ff7919613d7..e72dc34eea1c 100644
--- a/arch/arm/kernel/fiq.c
+++ b/arch/arm/kernel/fiq.c
@@ -45,6 +45,7 @@
45#include <asm/fiq.h> 45#include <asm/fiq.h>
46#include <asm/irq.h> 46#include <asm/irq.h>
47#include <asm/system.h> 47#include <asm/system.h>
48#include <asm/traps.h>
48 49
49static unsigned long no_fiq_insn; 50static unsigned long no_fiq_insn;
50 51
@@ -67,17 +68,22 @@ static struct fiq_handler default_owner = {
67 68
68static struct fiq_handler *current_fiq = &default_owner; 69static struct fiq_handler *current_fiq = &default_owner;
69 70
70int show_fiq_list(struct seq_file *p, void *v) 71int show_fiq_list(struct seq_file *p, int prec)
71{ 72{
72 if (current_fiq != &default_owner) 73 if (current_fiq != &default_owner)
73 seq_printf(p, "FIQ: %s\n", current_fiq->name); 74 seq_printf(p, "%*s: %s\n", prec, "FIQ",
75 current_fiq->name);
74 76
75 return 0; 77 return 0;
76} 78}
77 79
78void set_fiq_handler(void *start, unsigned int length) 80void set_fiq_handler(void *start, unsigned int length)
79{ 81{
82#if defined(CONFIG_CPU_USE_DOMAINS)
80 memcpy((void *)0xffff001c, start, length); 83 memcpy((void *)0xffff001c, start, length);
84#else
85 memcpy(vectors_page + 0x1c, start, length);
86#endif
81 flush_icache_range(0xffff001c, 0xffff001c + length); 87 flush_icache_range(0xffff001c, 0xffff001c + length);
82 if (!vectors_high()) 88 if (!vectors_high())
83 flush_icache_range(0x1c, 0x1c + length); 89 flush_icache_range(0x1c, 0x1c + length);
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index 6bd82d25683c..f17d9a09e8fb 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -91,6 +91,11 @@ ENTRY(stext)
91 movs r8, r5 @ invalid machine (r5=0)? 91 movs r8, r5 @ invalid machine (r5=0)?
92 THUMB( it eq ) @ force fixup-able long branch encoding 92 THUMB( it eq ) @ force fixup-able long branch encoding
93 beq __error_a @ yes, error 'a' 93 beq __error_a @ yes, error 'a'
94
95 /*
96 * r1 = machine no, r2 = atags,
97 * r8 = machinfo, r9 = cpuid, r10 = procinfo
98 */
94 bl __vet_atags 99 bl __vet_atags
95#ifdef CONFIG_SMP_ON_UP 100#ifdef CONFIG_SMP_ON_UP
96 bl __fixup_smp 101 bl __fixup_smp
@@ -387,19 +392,19 @@ ENDPROC(__turn_mmu_on)
387 392
388#ifdef CONFIG_SMP_ON_UP 393#ifdef CONFIG_SMP_ON_UP
389__fixup_smp: 394__fixup_smp:
390 mov r7, #0x00070000 395 mov r4, #0x00070000
391 orr r6, r7, #0xff000000 @ mask 0xff070000 396 orr r3, r4, #0xff000000 @ mask 0xff070000
392 orr r7, r7, #0x41000000 @ val 0x41070000 397 orr r4, r4, #0x41000000 @ val 0x41070000
393 and r0, r9, r6 398 and r0, r9, r3
394 teq r0, r7 @ ARM CPU and ARMv6/v7? 399 teq r0, r4 @ ARM CPU and ARMv6/v7?
395 bne __fixup_smp_on_up @ no, assume UP 400 bne __fixup_smp_on_up @ no, assume UP
396 401
397 orr r6, r6, #0x0000ff00 402 orr r3, r3, #0x0000ff00
398 orr r6, r6, #0x000000f0 @ mask 0xff07fff0 403 orr r3, r3, #0x000000f0 @ mask 0xff07fff0
399 orr r7, r7, #0x0000b000 404 orr r4, r4, #0x0000b000
400 orr r7, r7, #0x00000020 @ val 0x4107b020 405 orr r4, r4, #0x00000020 @ val 0x4107b020
401 and r0, r9, r6 406 and r0, r9, r3
402 teq r0, r7 @ ARM 11MPCore? 407 teq r0, r4 @ ARM 11MPCore?
403 moveq pc, lr @ yes, assume SMP 408 moveq pc, lr @ yes, assume SMP
404 409
405 mrc p15, 0, r0, c0, c0, 5 @ read MPIDR 410 mrc p15, 0, r0, c0, c0, 5 @ read MPIDR
@@ -408,15 +413,22 @@ __fixup_smp:
408 413
409__fixup_smp_on_up: 414__fixup_smp_on_up:
410 adr r0, 1f 415 adr r0, 1f
411 ldmia r0, {r3, r6, r7} 416 ldmia r0, {r3 - r5}
412 sub r3, r0, r3 417 sub r3, r0, r3
413 add r6, r6, r3 418 add r4, r4, r3
414 add r7, r7, r3 419 add r5, r5, r3
4152: cmp r6, r7 4202: cmp r4, r5
416 ldmia r6!, {r0, r4} 421 movhs pc, lr
417 strlo r4, [r0, r3] 422 ldmia r4!, {r0, r6}
418 blo 2b 423 ARM( str r6, [r0, r3] )
419 mov pc, lr 424 THUMB( add r0, r0, r3 )
425#ifdef __ARMEB__
426 THUMB( mov r6, r6, ror #16 ) @ Convert word order for big-endian.
427#endif
428 THUMB( strh r6, [r0], #2 ) @ For Thumb-2, store as two halfwords
429 THUMB( mov r6, r6, lsr #16 ) @ to be robust against misaligned r3.
430 THUMB( strh r6, [r0] )
431 b 2b
420ENDPROC(__fixup_smp) 432ENDPROC(__fixup_smp)
421 433
422 .align 434 .align
diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c
index 6d616333340f..8135438b8818 100644
--- a/arch/arm/kernel/irq.c
+++ b/arch/arm/kernel/irq.c
@@ -38,6 +38,7 @@
38#include <linux/ftrace.h> 38#include <linux/ftrace.h>
39 39
40#include <asm/system.h> 40#include <asm/system.h>
41#include <asm/mach/arch.h>
41#include <asm/mach/irq.h> 42#include <asm/mach/irq.h>
42#include <asm/mach/time.h> 43#include <asm/mach/time.h>
43 44
@@ -48,8 +49,6 @@
48#define irq_finish(irq) do { } while (0) 49#define irq_finish(irq) do { } while (0)
49#endif 50#endif
50 51
51unsigned int arch_nr_irqs;
52void (*init_arch_irq)(void) __initdata = NULL;
53unsigned long irq_err_count; 52unsigned long irq_err_count;
54 53
55int show_interrupts(struct seq_file *p, void *v) 54int show_interrupts(struct seq_file *p, void *v)
@@ -58,11 +57,20 @@ int show_interrupts(struct seq_file *p, void *v)
58 struct irq_desc *desc; 57 struct irq_desc *desc;
59 struct irqaction * action; 58 struct irqaction * action;
60 unsigned long flags; 59 unsigned long flags;
60 int prec, n;
61
62 for (prec = 3, n = 1000; prec < 10 && n <= nr_irqs; prec++)
63 n *= 10;
64
65#ifdef CONFIG_SMP
66 if (prec < 4)
67 prec = 4;
68#endif
61 69
62 if (i == 0) { 70 if (i == 0) {
63 char cpuname[12]; 71 char cpuname[12];
64 72
65 seq_printf(p, " "); 73 seq_printf(p, "%*s ", prec, "");
66 for_each_present_cpu(cpu) { 74 for_each_present_cpu(cpu) {
67 sprintf(cpuname, "CPU%d", cpu); 75 sprintf(cpuname, "CPU%d", cpu);
68 seq_printf(p, " %10s", cpuname); 76 seq_printf(p, " %10s", cpuname);
@@ -77,7 +85,7 @@ int show_interrupts(struct seq_file *p, void *v)
77 if (!action) 85 if (!action)
78 goto unlock; 86 goto unlock;
79 87
80 seq_printf(p, "%3d: ", i); 88 seq_printf(p, "%*d: ", prec, i);
81 for_each_present_cpu(cpu) 89 for_each_present_cpu(cpu)
82 seq_printf(p, "%10u ", kstat_irqs_cpu(i, cpu)); 90 seq_printf(p, "%10u ", kstat_irqs_cpu(i, cpu));
83 seq_printf(p, " %10s", desc->chip->name ? : "-"); 91 seq_printf(p, " %10s", desc->chip->name ? : "-");
@@ -90,13 +98,15 @@ unlock:
90 raw_spin_unlock_irqrestore(&desc->lock, flags); 98 raw_spin_unlock_irqrestore(&desc->lock, flags);
91 } else if (i == nr_irqs) { 99 } else if (i == nr_irqs) {
92#ifdef CONFIG_FIQ 100#ifdef CONFIG_FIQ
93 show_fiq_list(p, v); 101 show_fiq_list(p, prec);
94#endif 102#endif
95#ifdef CONFIG_SMP 103#ifdef CONFIG_SMP
96 show_ipi_list(p); 104 show_ipi_list(p, prec);
97 show_local_irqs(p); 105#endif
106#ifdef CONFIG_LOCAL_TIMERS
107 show_local_irqs(p, prec);
98#endif 108#endif
99 seq_printf(p, "Err: %10lu\n", irq_err_count); 109 seq_printf(p, "%*s: %10lu\n", prec, "Err", irq_err_count);
100 } 110 }
101 return 0; 111 return 0;
102} 112}
@@ -156,13 +166,13 @@ void set_irq_flags(unsigned int irq, unsigned int iflags)
156 166
157void __init init_IRQ(void) 167void __init init_IRQ(void)
158{ 168{
159 init_arch_irq(); 169 machine_desc->init_irq();
160} 170}
161 171
162#ifdef CONFIG_SPARSE_IRQ 172#ifdef CONFIG_SPARSE_IRQ
163int __init arch_probe_nr_irqs(void) 173int __init arch_probe_nr_irqs(void)
164{ 174{
165 nr_irqs = arch_nr_irqs ? arch_nr_irqs : NR_IRQS; 175 nr_irqs = machine_desc->nr_irqs ? machine_desc->nr_irqs : NR_IRQS;
166 return nr_irqs; 176 return nr_irqs;
167} 177}
168#endif 178#endif
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index 336f14e0e5c2..3455ad33de4c 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -75,9 +75,9 @@ extern void reboot_setup(char *str);
75 75
76unsigned int processor_id; 76unsigned int processor_id;
77EXPORT_SYMBOL(processor_id); 77EXPORT_SYMBOL(processor_id);
78unsigned int __machine_arch_type; 78unsigned int __machine_arch_type __read_mostly;
79EXPORT_SYMBOL(__machine_arch_type); 79EXPORT_SYMBOL(__machine_arch_type);
80unsigned int cacheid; 80unsigned int cacheid __read_mostly;
81EXPORT_SYMBOL(cacheid); 81EXPORT_SYMBOL(cacheid);
82 82
83unsigned int __atags_pointer __initdata; 83unsigned int __atags_pointer __initdata;
@@ -91,24 +91,24 @@ EXPORT_SYMBOL(system_serial_low);
91unsigned int system_serial_high; 91unsigned int system_serial_high;
92EXPORT_SYMBOL(system_serial_high); 92EXPORT_SYMBOL(system_serial_high);
93 93
94unsigned int elf_hwcap; 94unsigned int elf_hwcap __read_mostly;
95EXPORT_SYMBOL(elf_hwcap); 95EXPORT_SYMBOL(elf_hwcap);
96 96
97 97
98#ifdef MULTI_CPU 98#ifdef MULTI_CPU
99struct processor processor; 99struct processor processor __read_mostly;
100#endif 100#endif
101#ifdef MULTI_TLB 101#ifdef MULTI_TLB
102struct cpu_tlb_fns cpu_tlb; 102struct cpu_tlb_fns cpu_tlb __read_mostly;
103#endif 103#endif
104#ifdef MULTI_USER 104#ifdef MULTI_USER
105struct cpu_user_fns cpu_user; 105struct cpu_user_fns cpu_user __read_mostly;
106#endif 106#endif
107#ifdef MULTI_CACHE 107#ifdef MULTI_CACHE
108struct cpu_cache_fns cpu_cache; 108struct cpu_cache_fns cpu_cache __read_mostly;
109#endif 109#endif
110#ifdef CONFIG_OUTER_CACHE 110#ifdef CONFIG_OUTER_CACHE
111struct outer_cache_fns outer_cache; 111struct outer_cache_fns outer_cache __read_mostly;
112EXPORT_SYMBOL(outer_cache); 112EXPORT_SYMBOL(outer_cache);
113#endif 113#endif
114 114
@@ -126,6 +126,7 @@ EXPORT_SYMBOL(elf_platform);
126static const char *cpu_name; 126static const char *cpu_name;
127static const char *machine_name; 127static const char *machine_name;
128static char __initdata cmd_line[COMMAND_LINE_SIZE]; 128static char __initdata cmd_line[COMMAND_LINE_SIZE];
129struct machine_desc *machine_desc __initdata;
129 130
130static char default_command_line[COMMAND_LINE_SIZE] __initdata = CONFIG_CMDLINE; 131static char default_command_line[COMMAND_LINE_SIZE] __initdata = CONFIG_CMDLINE;
131static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } }; 132static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } };
@@ -708,13 +709,11 @@ static struct init_tags {
708 { 0, ATAG_NONE } 709 { 0, ATAG_NONE }
709}; 710};
710 711
711static void (*init_machine)(void) __initdata;
712
713static int __init customize_machine(void) 712static int __init customize_machine(void)
714{ 713{
715 /* customizes platform devices, or adds new ones */ 714 /* customizes platform devices, or adds new ones */
716 if (init_machine) 715 if (machine_desc->init_machine)
717 init_machine(); 716 machine_desc->init_machine();
718 return 0; 717 return 0;
719} 718}
720arch_initcall(customize_machine); 719arch_initcall(customize_machine);
@@ -809,6 +808,7 @@ void __init setup_arch(char **cmdline_p)
809 808
810 setup_processor(); 809 setup_processor();
811 mdesc = setup_machine(machine_arch_type); 810 mdesc = setup_machine(machine_arch_type);
811 machine_desc = mdesc;
812 machine_name = mdesc->name; 812 machine_name = mdesc->name;
813 813
814 if (mdesc->soft_reboot) 814 if (mdesc->soft_reboot)
@@ -868,13 +868,9 @@ void __init setup_arch(char **cmdline_p)
868 cpu_init(); 868 cpu_init();
869 tcm_init(); 869 tcm_init();
870 870
871 /* 871#ifdef CONFIG_MULTI_IRQ_HANDLER
872 * Set up various architecture-specific pointers 872 handle_arch_irq = mdesc->handle_irq;
873 */ 873#endif
874 arch_nr_irqs = mdesc->nr_irqs;
875 init_arch_irq = mdesc->init_irq;
876 system_timer = mdesc->timer;
877 init_machine = mdesc->init_machine;
878 874
879#ifdef CONFIG_VT 875#ifdef CONFIG_VT
880#if defined(CONFIG_VGA_CONSOLE) 876#if defined(CONFIG_VGA_CONSOLE)
@@ -884,6 +880,9 @@ void __init setup_arch(char **cmdline_p)
884#endif 880#endif
885#endif 881#endif
886 early_trap_init(); 882 early_trap_init();
883
884 if (mdesc->init_early)
885 mdesc->init_early();
887} 886}
888 887
889 888
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index b6b78b22031b..5ec79b4ff950 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -25,6 +25,7 @@
25#include <linux/irq.h> 25#include <linux/irq.h>
26#include <linux/percpu.h> 26#include <linux/percpu.h>
27#include <linux/clockchips.h> 27#include <linux/clockchips.h>
28#include <linux/completion.h>
28 29
29#include <asm/atomic.h> 30#include <asm/atomic.h>
30#include <asm/cacheflush.h> 31#include <asm/cacheflush.h>
@@ -38,7 +39,6 @@
38#include <asm/tlbflush.h> 39#include <asm/tlbflush.h>
39#include <asm/ptrace.h> 40#include <asm/ptrace.h>
40#include <asm/localtimer.h> 41#include <asm/localtimer.h>
41#include <asm/smp_plat.h>
42 42
43/* 43/*
44 * as from 2.5, kernels no longer have an init_tasks structure 44 * as from 2.5, kernels no longer have an init_tasks structure
@@ -47,22 +47,8 @@
47 */ 47 */
48struct secondary_data secondary_data; 48struct secondary_data secondary_data;
49 49
50/*
51 * structures for inter-processor calls
52 * - A collection of single bit ipi messages.
53 */
54struct ipi_data {
55 spinlock_t lock;
56 unsigned long ipi_count;
57 unsigned long bits;
58};
59
60static DEFINE_PER_CPU(struct ipi_data, ipi_data) = {
61 .lock = SPIN_LOCK_UNLOCKED,
62};
63
64enum ipi_msg_type { 50enum ipi_msg_type {
65 IPI_TIMER, 51 IPI_TIMER = 2,
66 IPI_RESCHEDULE, 52 IPI_RESCHEDULE,
67 IPI_CALL_FUNC, 53 IPI_CALL_FUNC,
68 IPI_CALL_FUNC_SINGLE, 54 IPI_CALL_FUNC_SINGLE,
@@ -178,8 +164,12 @@ int __cpuinit __cpu_up(unsigned int cpu)
178 barrier(); 164 barrier();
179 } 165 }
180 166
181 if (!cpu_online(cpu)) 167 if (!cpu_online(cpu)) {
168 pr_crit("CPU%u: failed to come online\n", cpu);
182 ret = -EIO; 169 ret = -EIO;
170 }
171 } else {
172 pr_err("CPU%u: failed to boot: %d\n", cpu, ret);
183 } 173 }
184 174
185 secondary_data.stack = NULL; 175 secondary_data.stack = NULL;
@@ -195,18 +185,12 @@ int __cpuinit __cpu_up(unsigned int cpu)
195 185
196 pgd_free(&init_mm, pgd); 186 pgd_free(&init_mm, pgd);
197 187
198 if (ret) {
199 printk(KERN_CRIT "CPU%u: processor failed to boot\n", cpu);
200
201 /*
202 * FIXME: We need to clean up the new idle thread. --rmk
203 */
204 }
205
206 return ret; 188 return ret;
207} 189}
208 190
209#ifdef CONFIG_HOTPLUG_CPU 191#ifdef CONFIG_HOTPLUG_CPU
192static void percpu_timer_stop(void);
193
210/* 194/*
211 * __cpu_disable runs on the processor to be shutdown. 195 * __cpu_disable runs on the processor to be shutdown.
212 */ 196 */
@@ -234,7 +218,7 @@ int __cpu_disable(void)
234 /* 218 /*
235 * Stop the local timer for this CPU. 219 * Stop the local timer for this CPU.
236 */ 220 */
237 local_timer_stop(); 221 percpu_timer_stop();
238 222
239 /* 223 /*
240 * Flush user cache and TLB mappings, and then remove this CPU 224 * Flush user cache and TLB mappings, and then remove this CPU
@@ -253,12 +237,20 @@ int __cpu_disable(void)
253 return 0; 237 return 0;
254} 238}
255 239
240static DECLARE_COMPLETION(cpu_died);
241
256/* 242/*
257 * called on the thread which is asking for a CPU to be shutdown - 243 * called on the thread which is asking for a CPU to be shutdown -
258 * waits until shutdown has completed, or it is timed out. 244 * waits until shutdown has completed, or it is timed out.
259 */ 245 */
260void __cpu_die(unsigned int cpu) 246void __cpu_die(unsigned int cpu)
261{ 247{
248 if (!wait_for_completion_timeout(&cpu_died, msecs_to_jiffies(5000))) {
249 pr_err("CPU%u: cpu didn't die\n", cpu);
250 return;
251 }
252 printk(KERN_NOTICE "CPU%u: shutdown\n", cpu);
253
262 if (!platform_cpu_kill(cpu)) 254 if (!platform_cpu_kill(cpu))
263 printk("CPU%u: unable to kill\n", cpu); 255 printk("CPU%u: unable to kill\n", cpu);
264} 256}
@@ -275,12 +267,17 @@ void __ref cpu_die(void)
275{ 267{
276 unsigned int cpu = smp_processor_id(); 268 unsigned int cpu = smp_processor_id();
277 269
278 local_irq_disable();
279 idle_task_exit(); 270 idle_task_exit();
280 271
272 local_irq_disable();
273 mb();
274
275 /* Tell __cpu_die() that this CPU is now safe to dispose of */
276 complete(&cpu_died);
277
281 /* 278 /*
282 * actual CPU shutdown procedure is at least platform (if not 279 * actual CPU shutdown procedure is at least platform (if not
283 * CPU) specific 280 * CPU) specific.
284 */ 281 */
285 platform_cpu_die(cpu); 282 platform_cpu_die(cpu);
286 283
@@ -290,6 +287,7 @@ void __ref cpu_die(void)
290 * to be repeated to undo the effects of taking the CPU offline. 287 * to be repeated to undo the effects of taking the CPU offline.
291 */ 288 */
292 __asm__("mov sp, %0\n" 289 __asm__("mov sp, %0\n"
290 " mov fp, #0\n"
293 " b secondary_start_kernel" 291 " b secondary_start_kernel"
294 : 292 :
295 : "r" (task_stack_page(current) + THREAD_SIZE - 8)); 293 : "r" (task_stack_page(current) + THREAD_SIZE - 8));
@@ -297,6 +295,17 @@ void __ref cpu_die(void)
297#endif /* CONFIG_HOTPLUG_CPU */ 295#endif /* CONFIG_HOTPLUG_CPU */
298 296
299/* 297/*
298 * Called by both boot and secondaries to move global data into
299 * per-processor storage.
300 */
301static void __cpuinit smp_store_cpu_info(unsigned int cpuid)
302{
303 struct cpuinfo_arm *cpu_info = &per_cpu(cpu_data, cpuid);
304
305 cpu_info->loops_per_jiffy = loops_per_jiffy;
306}
307
308/*
300 * This is the secondary CPU boot entry. We're using this CPUs 309 * This is the secondary CPU boot entry. We're using this CPUs
301 * idle thread stack, but a set of temporary page tables. 310 * idle thread stack, but a set of temporary page tables.
302 */ 311 */
@@ -320,6 +329,7 @@ asmlinkage void __cpuinit secondary_start_kernel(void)
320 329
321 cpu_init(); 330 cpu_init();
322 preempt_disable(); 331 preempt_disable();
332 trace_hardirqs_off();
323 333
324 /* 334 /*
325 * Give the platform a chance to do its own initialisation. 335 * Give the platform a chance to do its own initialisation.
@@ -353,17 +363,6 @@ asmlinkage void __cpuinit secondary_start_kernel(void)
353 cpu_idle(); 363 cpu_idle();
354} 364}
355 365
356/*
357 * Called by both boot and secondaries to move global data into
358 * per-processor storage.
359 */
360void __cpuinit smp_store_cpu_info(unsigned int cpuid)
361{
362 struct cpuinfo_arm *cpu_info = &per_cpu(cpu_data, cpuid);
363
364 cpu_info->loops_per_jiffy = loops_per_jiffy;
365}
366
367void __init smp_cpus_done(unsigned int max_cpus) 366void __init smp_cpus_done(unsigned int max_cpus)
368{ 367{
369 int cpu; 368 int cpu;
@@ -386,61 +385,80 @@ void __init smp_prepare_boot_cpu(void)
386 per_cpu(cpu_data, cpu).idle = current; 385 per_cpu(cpu_data, cpu).idle = current;
387} 386}
388 387
389static void send_ipi_message(const struct cpumask *mask, enum ipi_msg_type msg) 388void __init smp_prepare_cpus(unsigned int max_cpus)
390{ 389{
391 unsigned long flags; 390 unsigned int ncores = num_possible_cpus();
392 unsigned int cpu;
393
394 local_irq_save(flags);
395 391
396 for_each_cpu(cpu, mask) { 392 smp_store_cpu_info(smp_processor_id());
397 struct ipi_data *ipi = &per_cpu(ipi_data, cpu);
398
399 spin_lock(&ipi->lock);
400 ipi->bits |= 1 << msg;
401 spin_unlock(&ipi->lock);
402 }
403 393
404 /* 394 /*
405 * Call the platform specific cross-CPU call function. 395 * are we trying to boot more cores than exist?
406 */ 396 */
407 smp_cross_call(mask); 397 if (max_cpus > ncores)
398 max_cpus = ncores;
399
400 if (max_cpus > 1) {
401 /*
402 * Enable the local timer or broadcast device for the
403 * boot CPU, but only if we have more than one CPU.
404 */
405 percpu_timer_setup();
408 406
409 local_irq_restore(flags); 407 /*
408 * Initialise the SCU if there are more than one CPU
409 * and let them know where to start.
410 */
411 platform_smp_prepare_cpus(max_cpus);
412 }
410} 413}
411 414
412void arch_send_call_function_ipi_mask(const struct cpumask *mask) 415void arch_send_call_function_ipi_mask(const struct cpumask *mask)
413{ 416{
414 send_ipi_message(mask, IPI_CALL_FUNC); 417 smp_cross_call(mask, IPI_CALL_FUNC);
415} 418}
416 419
417void arch_send_call_function_single_ipi(int cpu) 420void arch_send_call_function_single_ipi(int cpu)
418{ 421{
419 send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE); 422 smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE);
420} 423}
421 424
422void show_ipi_list(struct seq_file *p) 425static const char *ipi_types[NR_IPI] = {
426#define S(x,s) [x - IPI_TIMER] = s
427 S(IPI_TIMER, "Timer broadcast interrupts"),
428 S(IPI_RESCHEDULE, "Rescheduling interrupts"),
429 S(IPI_CALL_FUNC, "Function call interrupts"),
430 S(IPI_CALL_FUNC_SINGLE, "Single function call interrupts"),
431 S(IPI_CPU_STOP, "CPU stop interrupts"),
432};
433
434void show_ipi_list(struct seq_file *p, int prec)
423{ 435{
424 unsigned int cpu; 436 unsigned int cpu, i;
425 437
426 seq_puts(p, "IPI:"); 438 for (i = 0; i < NR_IPI; i++) {
439 seq_printf(p, "%*s%u: ", prec - 1, "IPI", i);
427 440
428 for_each_present_cpu(cpu) 441 for_each_present_cpu(cpu)
429 seq_printf(p, " %10lu", per_cpu(ipi_data, cpu).ipi_count); 442 seq_printf(p, "%10u ",
443 __get_irq_stat(cpu, ipi_irqs[i]));
430 444
431 seq_putc(p, '\n'); 445 seq_printf(p, " %s\n", ipi_types[i]);
446 }
432} 447}
433 448
434void show_local_irqs(struct seq_file *p) 449u64 smp_irq_stat_cpu(unsigned int cpu)
435{ 450{
436 unsigned int cpu; 451 u64 sum = 0;
452 int i;
437 453
438 seq_printf(p, "LOC: "); 454 for (i = 0; i < NR_IPI; i++)
455 sum += __get_irq_stat(cpu, ipi_irqs[i]);
439 456
440 for_each_present_cpu(cpu) 457#ifdef CONFIG_LOCAL_TIMERS
441 seq_printf(p, "%10u ", irq_stat[cpu].local_timer_irqs); 458 sum += __get_irq_stat(cpu, local_timer_irqs);
459#endif
442 460
443 seq_putc(p, '\n'); 461 return sum;
444} 462}
445 463
446/* 464/*
@@ -463,18 +481,30 @@ asmlinkage void __exception_irq_entry do_local_timer(struct pt_regs *regs)
463 int cpu = smp_processor_id(); 481 int cpu = smp_processor_id();
464 482
465 if (local_timer_ack()) { 483 if (local_timer_ack()) {
466 irq_stat[cpu].local_timer_irqs++; 484 __inc_irq_stat(cpu, local_timer_irqs);
467 ipi_timer(); 485 ipi_timer();
468 } 486 }
469 487
470 set_irq_regs(old_regs); 488 set_irq_regs(old_regs);
471} 489}
490
491void show_local_irqs(struct seq_file *p, int prec)
492{
493 unsigned int cpu;
494
495 seq_printf(p, "%*s: ", prec, "LOC");
496
497 for_each_present_cpu(cpu)
498 seq_printf(p, "%10u ", __get_irq_stat(cpu, local_timer_irqs));
499
500 seq_printf(p, " Local timer interrupts\n");
501}
472#endif 502#endif
473 503
474#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST 504#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
475static void smp_timer_broadcast(const struct cpumask *mask) 505static void smp_timer_broadcast(const struct cpumask *mask)
476{ 506{
477 send_ipi_message(mask, IPI_TIMER); 507 smp_cross_call(mask, IPI_TIMER);
478} 508}
479#else 509#else
480#define smp_timer_broadcast NULL 510#define smp_timer_broadcast NULL
@@ -511,6 +541,21 @@ void __cpuinit percpu_timer_setup(void)
511 local_timer_setup(evt); 541 local_timer_setup(evt);
512} 542}
513 543
544#ifdef CONFIG_HOTPLUG_CPU
545/*
546 * The generic clock events code purposely does not stop the local timer
547 * on CPU_DEAD/CPU_DEAD_FROZEN hotplug events, so we have to do it
548 * manually here.
549 */
550static void percpu_timer_stop(void)
551{
552 unsigned int cpu = smp_processor_id();
553 struct clock_event_device *evt = &per_cpu(percpu_clockevent, cpu);
554
555 evt->set_mode(CLOCK_EVT_MODE_UNUSED, evt);
556}
557#endif
558
514static DEFINE_SPINLOCK(stop_lock); 559static DEFINE_SPINLOCK(stop_lock);
515 560
516/* 561/*
@@ -537,216 +582,76 @@ static void ipi_cpu_stop(unsigned int cpu)
537 582
538/* 583/*
539 * Main handler for inter-processor interrupts 584 * Main handler for inter-processor interrupts
540 *
541 * For ARM, the ipimask now only identifies a single
542 * category of IPI (Bit 1 IPIs have been replaced by a
543 * different mechanism):
544 *
545 * Bit 0 - Inter-processor function call
546 */ 585 */
547asmlinkage void __exception_irq_entry do_IPI(struct pt_regs *regs) 586asmlinkage void __exception_irq_entry do_IPI(int ipinr, struct pt_regs *regs)
548{ 587{
549 unsigned int cpu = smp_processor_id(); 588 unsigned int cpu = smp_processor_id();
550 struct ipi_data *ipi = &per_cpu(ipi_data, cpu);
551 struct pt_regs *old_regs = set_irq_regs(regs); 589 struct pt_regs *old_regs = set_irq_regs(regs);
552 590
553 ipi->ipi_count++; 591 if (ipinr >= IPI_TIMER && ipinr < IPI_TIMER + NR_IPI)
554 592 __inc_irq_stat(cpu, ipi_irqs[ipinr - IPI_TIMER]);
555 for (;;) {
556 unsigned long msgs;
557 593
558 spin_lock(&ipi->lock); 594 switch (ipinr) {
559 msgs = ipi->bits; 595 case IPI_TIMER:
560 ipi->bits = 0; 596 ipi_timer();
561 spin_unlock(&ipi->lock); 597 break;
562
563 if (!msgs)
564 break;
565
566 do {
567 unsigned nextmsg;
568
569 nextmsg = msgs & -msgs;
570 msgs &= ~nextmsg;
571 nextmsg = ffz(~nextmsg);
572
573 switch (nextmsg) {
574 case IPI_TIMER:
575 ipi_timer();
576 break;
577 598
578 case IPI_RESCHEDULE: 599 case IPI_RESCHEDULE:
579 /* 600 /*
580 * nothing more to do - eveything is 601 * nothing more to do - eveything is
581 * done on the interrupt return path 602 * done on the interrupt return path
582 */ 603 */
583 break; 604 break;
584 605
585 case IPI_CALL_FUNC: 606 case IPI_CALL_FUNC:
586 generic_smp_call_function_interrupt(); 607 generic_smp_call_function_interrupt();
587 break; 608 break;
588 609
589 case IPI_CALL_FUNC_SINGLE: 610 case IPI_CALL_FUNC_SINGLE:
590 generic_smp_call_function_single_interrupt(); 611 generic_smp_call_function_single_interrupt();
591 break; 612 break;
592 613
593 case IPI_CPU_STOP: 614 case IPI_CPU_STOP:
594 ipi_cpu_stop(cpu); 615 ipi_cpu_stop(cpu);
595 break; 616 break;
596 617
597 default: 618 default:
598 printk(KERN_CRIT "CPU%u: Unknown IPI message 0x%x\n", 619 printk(KERN_CRIT "CPU%u: Unknown IPI message 0x%x\n",
599 cpu, nextmsg); 620 cpu, ipinr);
600 break; 621 break;
601 }
602 } while (msgs);
603 } 622 }
604
605 set_irq_regs(old_regs); 623 set_irq_regs(old_regs);
606} 624}
607 625
608void smp_send_reschedule(int cpu) 626void smp_send_reschedule(int cpu)
609{ 627{
610 send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE); 628 smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
611} 629}
612 630
613void smp_send_stop(void) 631void smp_send_stop(void)
614{ 632{
615 cpumask_t mask = cpu_online_map; 633 unsigned long timeout;
616 cpu_clear(smp_processor_id(), mask);
617 if (!cpus_empty(mask))
618 send_ipi_message(&mask, IPI_CPU_STOP);
619}
620 634
621/* 635 if (num_online_cpus() > 1) {
622 * not supported here 636 cpumask_t mask = cpu_online_map;
623 */ 637 cpu_clear(smp_processor_id(), mask);
624int setup_profiling_timer(unsigned int multiplier)
625{
626 return -EINVAL;
627}
628 638
629static void 639 smp_cross_call(&mask, IPI_CPU_STOP);
630on_each_cpu_mask(void (*func)(void *), void *info, int wait, 640 }
631 const struct cpumask *mask)
632{
633 preempt_disable();
634 641
635 smp_call_function_many(mask, func, info, wait); 642 /* Wait up to one second for other CPUs to stop */
636 if (cpumask_test_cpu(smp_processor_id(), mask)) 643 timeout = USEC_PER_SEC;
637 func(info); 644 while (num_online_cpus() > 1 && timeout--)
645 udelay(1);
638 646
639 preempt_enable(); 647 if (num_online_cpus() > 1)
648 pr_warning("SMP: failed to stop secondary CPUs\n");
640} 649}
641 650
642/**********************************************************************/
643
644/* 651/*
645 * TLB operations 652 * not supported here
646 */ 653 */
647struct tlb_args { 654int setup_profiling_timer(unsigned int multiplier)
648 struct vm_area_struct *ta_vma;
649 unsigned long ta_start;
650 unsigned long ta_end;
651};
652
653static inline void ipi_flush_tlb_all(void *ignored)
654{
655 local_flush_tlb_all();
656}
657
658static inline void ipi_flush_tlb_mm(void *arg)
659{
660 struct mm_struct *mm = (struct mm_struct *)arg;
661
662 local_flush_tlb_mm(mm);
663}
664
665static inline void ipi_flush_tlb_page(void *arg)
666{
667 struct tlb_args *ta = (struct tlb_args *)arg;
668
669 local_flush_tlb_page(ta->ta_vma, ta->ta_start);
670}
671
672static inline void ipi_flush_tlb_kernel_page(void *arg)
673{
674 struct tlb_args *ta = (struct tlb_args *)arg;
675
676 local_flush_tlb_kernel_page(ta->ta_start);
677}
678
679static inline void ipi_flush_tlb_range(void *arg)
680{
681 struct tlb_args *ta = (struct tlb_args *)arg;
682
683 local_flush_tlb_range(ta->ta_vma, ta->ta_start, ta->ta_end);
684}
685
686static inline void ipi_flush_tlb_kernel_range(void *arg)
687{
688 struct tlb_args *ta = (struct tlb_args *)arg;
689
690 local_flush_tlb_kernel_range(ta->ta_start, ta->ta_end);
691}
692
693void flush_tlb_all(void)
694{
695 if (tlb_ops_need_broadcast())
696 on_each_cpu(ipi_flush_tlb_all, NULL, 1);
697 else
698 local_flush_tlb_all();
699}
700
701void flush_tlb_mm(struct mm_struct *mm)
702{
703 if (tlb_ops_need_broadcast())
704 on_each_cpu_mask(ipi_flush_tlb_mm, mm, 1, mm_cpumask(mm));
705 else
706 local_flush_tlb_mm(mm);
707}
708
709void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
710{
711 if (tlb_ops_need_broadcast()) {
712 struct tlb_args ta;
713 ta.ta_vma = vma;
714 ta.ta_start = uaddr;
715 on_each_cpu_mask(ipi_flush_tlb_page, &ta, 1, mm_cpumask(vma->vm_mm));
716 } else
717 local_flush_tlb_page(vma, uaddr);
718}
719
720void flush_tlb_kernel_page(unsigned long kaddr)
721{
722 if (tlb_ops_need_broadcast()) {
723 struct tlb_args ta;
724 ta.ta_start = kaddr;
725 on_each_cpu(ipi_flush_tlb_kernel_page, &ta, 1);
726 } else
727 local_flush_tlb_kernel_page(kaddr);
728}
729
730void flush_tlb_range(struct vm_area_struct *vma,
731 unsigned long start, unsigned long end)
732{
733 if (tlb_ops_need_broadcast()) {
734 struct tlb_args ta;
735 ta.ta_vma = vma;
736 ta.ta_start = start;
737 ta.ta_end = end;
738 on_each_cpu_mask(ipi_flush_tlb_range, &ta, 1, mm_cpumask(vma->vm_mm));
739 } else
740 local_flush_tlb_range(vma, start, end);
741}
742
743void flush_tlb_kernel_range(unsigned long start, unsigned long end)
744{ 655{
745 if (tlb_ops_need_broadcast()) { 656 return -EINVAL;
746 struct tlb_args ta;
747 ta.ta_start = start;
748 ta.ta_end = end;
749 on_each_cpu(ipi_flush_tlb_kernel_range, &ta, 1);
750 } else
751 local_flush_tlb_kernel_range(start, end);
752} 657}
diff --git a/arch/arm/kernel/smp_tlb.c b/arch/arm/kernel/smp_tlb.c
new file mode 100644
index 000000000000..7dcb35285be7
--- /dev/null
+++ b/arch/arm/kernel/smp_tlb.c
@@ -0,0 +1,139 @@
1/*
2 * linux/arch/arm/kernel/smp_tlb.c
3 *
4 * Copyright (C) 2002 ARM Limited, All Rights Reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#include <linux/preempt.h>
11#include <linux/smp.h>
12
13#include <asm/smp_plat.h>
14#include <asm/tlbflush.h>
15
16static void on_each_cpu_mask(void (*func)(void *), void *info, int wait,
17 const struct cpumask *mask)
18{
19 preempt_disable();
20
21 smp_call_function_many(mask, func, info, wait);
22 if (cpumask_test_cpu(smp_processor_id(), mask))
23 func(info);
24
25 preempt_enable();
26}
27
28/**********************************************************************/
29
30/*
31 * TLB operations
32 */
33struct tlb_args {
34 struct vm_area_struct *ta_vma;
35 unsigned long ta_start;
36 unsigned long ta_end;
37};
38
39static inline void ipi_flush_tlb_all(void *ignored)
40{
41 local_flush_tlb_all();
42}
43
44static inline void ipi_flush_tlb_mm(void *arg)
45{
46 struct mm_struct *mm = (struct mm_struct *)arg;
47
48 local_flush_tlb_mm(mm);
49}
50
51static inline void ipi_flush_tlb_page(void *arg)
52{
53 struct tlb_args *ta = (struct tlb_args *)arg;
54
55 local_flush_tlb_page(ta->ta_vma, ta->ta_start);
56}
57
58static inline void ipi_flush_tlb_kernel_page(void *arg)
59{
60 struct tlb_args *ta = (struct tlb_args *)arg;
61
62 local_flush_tlb_kernel_page(ta->ta_start);
63}
64
65static inline void ipi_flush_tlb_range(void *arg)
66{
67 struct tlb_args *ta = (struct tlb_args *)arg;
68
69 local_flush_tlb_range(ta->ta_vma, ta->ta_start, ta->ta_end);
70}
71
72static inline void ipi_flush_tlb_kernel_range(void *arg)
73{
74 struct tlb_args *ta = (struct tlb_args *)arg;
75
76 local_flush_tlb_kernel_range(ta->ta_start, ta->ta_end);
77}
78
79void flush_tlb_all(void)
80{
81 if (tlb_ops_need_broadcast())
82 on_each_cpu(ipi_flush_tlb_all, NULL, 1);
83 else
84 local_flush_tlb_all();
85}
86
87void flush_tlb_mm(struct mm_struct *mm)
88{
89 if (tlb_ops_need_broadcast())
90 on_each_cpu_mask(ipi_flush_tlb_mm, mm, 1, mm_cpumask(mm));
91 else
92 local_flush_tlb_mm(mm);
93}
94
95void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
96{
97 if (tlb_ops_need_broadcast()) {
98 struct tlb_args ta;
99 ta.ta_vma = vma;
100 ta.ta_start = uaddr;
101 on_each_cpu_mask(ipi_flush_tlb_page, &ta, 1, mm_cpumask(vma->vm_mm));
102 } else
103 local_flush_tlb_page(vma, uaddr);
104}
105
106void flush_tlb_kernel_page(unsigned long kaddr)
107{
108 if (tlb_ops_need_broadcast()) {
109 struct tlb_args ta;
110 ta.ta_start = kaddr;
111 on_each_cpu(ipi_flush_tlb_kernel_page, &ta, 1);
112 } else
113 local_flush_tlb_kernel_page(kaddr);
114}
115
116void flush_tlb_range(struct vm_area_struct *vma,
117 unsigned long start, unsigned long end)
118{
119 if (tlb_ops_need_broadcast()) {
120 struct tlb_args ta;
121 ta.ta_vma = vma;
122 ta.ta_start = start;
123 ta.ta_end = end;
124 on_each_cpu_mask(ipi_flush_tlb_range, &ta, 1, mm_cpumask(vma->vm_mm));
125 } else
126 local_flush_tlb_range(vma, start, end);
127}
128
129void flush_tlb_kernel_range(unsigned long start, unsigned long end)
130{
131 if (tlb_ops_need_broadcast()) {
132 struct tlb_args ta;
133 ta.ta_start = start;
134 ta.ta_end = end;
135 on_each_cpu(ipi_flush_tlb_kernel_range, &ta, 1);
136 } else
137 local_flush_tlb_kernel_range(start, end);
138}
139
diff --git a/arch/arm/kernel/smp_twd.c b/arch/arm/kernel/smp_twd.c
index 67f933ec4177..dd790745b3ef 100644
--- a/arch/arm/kernel/smp_twd.c
+++ b/arch/arm/kernel/smp_twd.c
@@ -145,13 +145,3 @@ void __cpuinit twd_timer_setup(struct clock_event_device *clk)
145 145
146 clockevents_register_device(clk); 146 clockevents_register_device(clk);
147} 147}
148
149#ifdef CONFIG_HOTPLUG_CPU
150/*
151 * take a local timer down
152 */
153void twd_timer_stop(void)
154{
155 __raw_writel(0, twd_base + TWD_TIMER_CONTROL);
156}
157#endif
diff --git a/arch/arm/kernel/swp_emulate.c b/arch/arm/kernel/swp_emulate.c
new file mode 100644
index 000000000000..7a5760922914
--- /dev/null
+++ b/arch/arm/kernel/swp_emulate.c
@@ -0,0 +1,267 @@
1/*
2 * linux/arch/arm/kernel/swp_emulate.c
3 *
4 * Copyright (C) 2009 ARM Limited
5 * __user_* functions adapted from include/asm/uaccess.h
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * Implements emulation of the SWP/SWPB instructions using load-exclusive and
12 * store-exclusive for processors that have them disabled (or future ones that
13 * might not implement them).
14 *
15 * Syntax of SWP{B} instruction: SWP{B}<c> <Rt>, <Rt2>, [<Rn>]
16 * Where: Rt = destination
17 * Rt2 = source
18 * Rn = address
19 */
20
21#include <linux/init.h>
22#include <linux/kernel.h>
23#include <linux/proc_fs.h>
24#include <linux/sched.h>
25#include <linux/syscalls.h>
26#include <linux/perf_event.h>
27
28#include <asm/traps.h>
29#include <asm/uaccess.h>
30
31/*
32 * Error-checking SWP macros implemented using ldrex{b}/strex{b}
33 */
34#define __user_swpX_asm(data, addr, res, temp, B) \
35 __asm__ __volatile__( \
36 " mov %2, %1\n" \
37 "0: ldrex"B" %1, [%3]\n" \
38 "1: strex"B" %0, %2, [%3]\n" \
39 " cmp %0, #0\n" \
40 " movne %0, %4\n" \
41 "2:\n" \
42 " .section .fixup,\"ax\"\n" \
43 " .align 2\n" \
44 "3: mov %0, %5\n" \
45 " b 2b\n" \
46 " .previous\n" \
47 " .section __ex_table,\"a\"\n" \
48 " .align 3\n" \
49 " .long 0b, 3b\n" \
50 " .long 1b, 3b\n" \
51 " .previous" \
52 : "=&r" (res), "+r" (data), "=&r" (temp) \
53 : "r" (addr), "i" (-EAGAIN), "i" (-EFAULT) \
54 : "cc", "memory")
55
56#define __user_swp_asm(data, addr, res, temp) \
57 __user_swpX_asm(data, addr, res, temp, "")
58#define __user_swpb_asm(data, addr, res, temp) \
59 __user_swpX_asm(data, addr, res, temp, "b")
60
61/*
62 * Macros/defines for extracting register numbers from instruction.
63 */
64#define EXTRACT_REG_NUM(instruction, offset) \
65 (((instruction) & (0xf << (offset))) >> (offset))
66#define RN_OFFSET 16
67#define RT_OFFSET 12
68#define RT2_OFFSET 0
69/*
70 * Bit 22 of the instruction encoding distinguishes between
71 * the SWP and SWPB variants (bit set means SWPB).
72 */
73#define TYPE_SWPB (1 << 22)
74
75static unsigned long swpcounter;
76static unsigned long swpbcounter;
77static unsigned long abtcounter;
78static pid_t previous_pid;
79
80#ifdef CONFIG_PROC_FS
81static int proc_read_status(char *page, char **start, off_t off, int count,
82 int *eof, void *data)
83{
84 char *p = page;
85 int len;
86
87 p += sprintf(p, "Emulated SWP:\t\t%lu\n", swpcounter);
88 p += sprintf(p, "Emulated SWPB:\t\t%lu\n", swpbcounter);
89 p += sprintf(p, "Aborted SWP{B}:\t\t%lu\n", abtcounter);
90 if (previous_pid != 0)
91 p += sprintf(p, "Last process:\t\t%d\n", previous_pid);
92
93 len = (p - page) - off;
94 if (len < 0)
95 len = 0;
96
97 *eof = (len <= count) ? 1 : 0;
98 *start = page + off;
99
100 return len;
101}
102#endif
103
104/*
105 * Set up process info to signal segmentation fault - called on access error.
106 */
107static void set_segfault(struct pt_regs *regs, unsigned long addr)
108{
109 siginfo_t info;
110
111 if (find_vma(current->mm, addr) == NULL)
112 info.si_code = SEGV_MAPERR;
113 else
114 info.si_code = SEGV_ACCERR;
115
116 info.si_signo = SIGSEGV;
117 info.si_errno = 0;
118 info.si_addr = (void *) instruction_pointer(regs);
119
120 pr_debug("SWP{B} emulation: access caused memory abort!\n");
121 arm_notify_die("Illegal memory access", regs, &info, 0, 0);
122
123 abtcounter++;
124}
125
126static int emulate_swpX(unsigned int address, unsigned int *data,
127 unsigned int type)
128{
129 unsigned int res = 0;
130
131 if ((type != TYPE_SWPB) && (address & 0x3)) {
132 /* SWP to unaligned address not permitted */
133 pr_debug("SWP instruction on unaligned pointer!\n");
134 return -EFAULT;
135 }
136
137 while (1) {
138 unsigned long temp;
139
140 /*
141 * Barrier required between accessing protected resource and
142 * releasing a lock for it. Legacy code might not have done
143 * this, and we cannot determine that this is not the case
144 * being emulated, so insert always.
145 */
146 smp_mb();
147
148 if (type == TYPE_SWPB)
149 __user_swpb_asm(*data, address, res, temp);
150 else
151 __user_swp_asm(*data, address, res, temp);
152
153 if (likely(res != -EAGAIN) || signal_pending(current))
154 break;
155
156 cond_resched();
157 }
158
159 if (res == 0) {
160 /*
161 * Barrier also required between aquiring a lock for a
162 * protected resource and accessing the resource. Inserted for
163 * same reason as above.
164 */
165 smp_mb();
166
167 if (type == TYPE_SWPB)
168 swpbcounter++;
169 else
170 swpcounter++;
171 }
172
173 return res;
174}
175
176/*
177 * swp_handler logs the id of calling process, dissects the instruction, sanity
178 * checks the memory location, calls emulate_swpX for the actual operation and
179 * deals with fixup/error handling before returning
180 */
181static int swp_handler(struct pt_regs *regs, unsigned int instr)
182{
183 unsigned int address, destreg, data, type;
184 unsigned int res = 0;
185
186 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, regs->ARM_pc);
187
188 if (current->pid != previous_pid) {
189 pr_debug("\"%s\" (%ld) uses deprecated SWP{B} instruction\n",
190 current->comm, (unsigned long)current->pid);
191 previous_pid = current->pid;
192 }
193
194 address = regs->uregs[EXTRACT_REG_NUM(instr, RN_OFFSET)];
195 data = regs->uregs[EXTRACT_REG_NUM(instr, RT2_OFFSET)];
196 destreg = EXTRACT_REG_NUM(instr, RT_OFFSET);
197
198 type = instr & TYPE_SWPB;
199
200 pr_debug("addr in r%d->0x%08x, dest is r%d, source in r%d->0x%08x)\n",
201 EXTRACT_REG_NUM(instr, RN_OFFSET), address,
202 destreg, EXTRACT_REG_NUM(instr, RT2_OFFSET), data);
203
204 /* Check access in reasonable access range for both SWP and SWPB */
205 if (!access_ok(VERIFY_WRITE, (address & ~3), 4)) {
206 pr_debug("SWP{B} emulation: access to %p not allowed!\n",
207 (void *)address);
208 res = -EFAULT;
209 } else {
210 res = emulate_swpX(address, &data, type);
211 }
212
213 if (res == 0) {
214 /*
215 * On successful emulation, revert the adjustment to the PC
216 * made in kernel/traps.c in order to resume execution at the
217 * instruction following the SWP{B}.
218 */
219 regs->ARM_pc += 4;
220 regs->uregs[destreg] = data;
221 } else if (res == -EFAULT) {
222 /*
223 * Memory errors do not mean emulation failed.
224 * Set up signal info to return SEGV, then return OK
225 */
226 set_segfault(regs, address);
227 }
228
229 return 0;
230}
231
232/*
233 * Only emulate SWP/SWPB executed in ARM state/User mode.
234 * The kernel must be SWP free and SWP{B} does not exist in Thumb/ThumbEE.
235 */
236static struct undef_hook swp_hook = {
237 .instr_mask = 0x0fb00ff0,
238 .instr_val = 0x01000090,
239 .cpsr_mask = MODE_MASK | PSR_T_BIT | PSR_J_BIT,
240 .cpsr_val = USR_MODE,
241 .fn = swp_handler
242};
243
244/*
245 * Register handler and create status file in /proc/cpu
246 * Invoked as late_initcall, since not needed before init spawned.
247 */
248static int __init swp_emulation_init(void)
249{
250#ifdef CONFIG_PROC_FS
251 struct proc_dir_entry *res;
252
253 res = create_proc_entry("cpu/swp_emulation", S_IRUGO, NULL);
254
255 if (!res)
256 return -ENOMEM;
257
258 res->read_proc = proc_read_status;
259#endif /* CONFIG_PROC_FS */
260
261 printk(KERN_NOTICE "Registering SWP/SWPB emulation handler\n");
262 register_undef_hook(&swp_hook);
263
264 return 0;
265}
266
267late_initcall(swp_emulation_init);
diff --git a/arch/arm/kernel/time.c b/arch/arm/kernel/time.c
index 38c261f9951c..f1e2eb19a67d 100644
--- a/arch/arm/kernel/time.c
+++ b/arch/arm/kernel/time.c
@@ -30,12 +30,13 @@
30#include <asm/leds.h> 30#include <asm/leds.h>
31#include <asm/thread_info.h> 31#include <asm/thread_info.h>
32#include <asm/stacktrace.h> 32#include <asm/stacktrace.h>
33#include <asm/mach/arch.h>
33#include <asm/mach/time.h> 34#include <asm/mach/time.h>
34 35
35/* 36/*
36 * Our system timer. 37 * Our system timer.
37 */ 38 */
38struct sys_timer *system_timer; 39static struct sys_timer *system_timer;
39 40
40#if defined(CONFIG_RTC_DRV_CMOS) || defined(CONFIG_RTC_DRV_CMOS_MODULE) 41#if defined(CONFIG_RTC_DRV_CMOS) || defined(CONFIG_RTC_DRV_CMOS_MODULE)
41/* this needs a better home */ 42/* this needs a better home */
@@ -160,6 +161,7 @@ device_initcall(timer_init_sysfs);
160 161
161void __init time_init(void) 162void __init time_init(void)
162{ 163{
164 system_timer = machine_desc->timer;
163 system_timer->init(); 165 system_timer->init();
164} 166}
165 167
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index 446aee97436f..e02f4f7537c5 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -37,6 +37,8 @@
37 37
38static const char *handler[]= { "prefetch abort", "data abort", "address exception", "interrupt" }; 38static const char *handler[]= { "prefetch abort", "data abort", "address exception", "interrupt" };
39 39
40void *vectors_page;
41
40#ifdef CONFIG_DEBUG_USER 42#ifdef CONFIG_DEBUG_USER
41unsigned int user_debug; 43unsigned int user_debug;
42 44
@@ -756,7 +758,11 @@ static void __init kuser_get_tls_init(unsigned long vectors)
756 758
757void __init early_trap_init(void) 759void __init early_trap_init(void)
758{ 760{
761#if defined(CONFIG_CPU_USE_DOMAINS)
759 unsigned long vectors = CONFIG_VECTORS_BASE; 762 unsigned long vectors = CONFIG_VECTORS_BASE;
763#else
764 unsigned long vectors = (unsigned long)vectors_page;
765#endif
760 extern char __stubs_start[], __stubs_end[]; 766 extern char __stubs_start[], __stubs_end[];
761 extern char __vectors_start[], __vectors_end[]; 767 extern char __vectors_start[], __vectors_end[];
762 extern char __kuser_helper_start[], __kuser_helper_end[]; 768 extern char __kuser_helper_start[], __kuser_helper_end[];
@@ -780,10 +786,10 @@ void __init early_trap_init(void)
780 * Copy signal return handlers into the vector page, and 786 * Copy signal return handlers into the vector page, and
781 * set sigreturn to be a pointer to these. 787 * set sigreturn to be a pointer to these.
782 */ 788 */
783 memcpy((void *)KERN_SIGRETURN_CODE, sigreturn_codes, 789 memcpy((void *)(vectors + KERN_SIGRETURN_CODE - CONFIG_VECTORS_BASE),
784 sizeof(sigreturn_codes)); 790 sigreturn_codes, sizeof(sigreturn_codes));
785 memcpy((void *)KERN_RESTART_CODE, syscall_restart_code, 791 memcpy((void *)(vectors + KERN_RESTART_CODE - CONFIG_VECTORS_BASE),
786 sizeof(syscall_restart_code)); 792 syscall_restart_code, sizeof(syscall_restart_code));
787 793
788 flush_icache_range(vectors, vectors + PAGE_SIZE); 794 flush_icache_range(vectors, vectors + PAGE_SIZE);
789 modify_domain(DOMAIN_USER, DOMAIN_CLIENT); 795 modify_domain(DOMAIN_USER, DOMAIN_CLIENT);
diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
index 897c1a8f1694..86b66f3f2031 100644
--- a/arch/arm/kernel/vmlinux.lds.S
+++ b/arch/arm/kernel/vmlinux.lds.S
@@ -168,6 +168,7 @@ SECTIONS
168 168
169 NOSAVE_DATA 169 NOSAVE_DATA
170 CACHELINE_ALIGNED_DATA(32) 170 CACHELINE_ALIGNED_DATA(32)
171 READ_MOSTLY_DATA(32)
171 172
172 /* 173 /*
173 * The exception fixup table (might need resorting at runtime) 174 * The exception fixup table (might need resorting at runtime)