aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/kernel')
-rw-r--r--arch/arm/kernel/Makefile1
-rw-r--r--arch/arm/kernel/asm-offsets.c11
-rw-r--r--arch/arm/kernel/bios32.c5
-rw-r--r--arch/arm/kernel/entry-armv.S3
-rw-r--r--arch/arm/kernel/head-common.S90
-rw-r--r--arch/arm/kernel/head-nommu.S3
-rw-r--r--arch/arm/kernel/head.S69
-rw-r--r--arch/arm/kernel/hw_breakpoint.c68
-rw-r--r--arch/arm/kernel/irq.c50
-rw-r--r--arch/arm/kernel/kprobes-decode.c2
-rw-r--r--arch/arm/kernel/module.c49
-rw-r--r--arch/arm/kernel/perf_event.c2
-rw-r--r--arch/arm/kernel/pmu.c22
-rw-r--r--arch/arm/kernel/ptrace.c389
-rw-r--r--arch/arm/kernel/ptrace.h37
-rw-r--r--arch/arm/kernel/return_address.c1
-rw-r--r--arch/arm/kernel/setup.c43
-rw-r--r--arch/arm/kernel/signal.c13
-rw-r--r--arch/arm/kernel/sleep.S134
-rw-r--r--arch/arm/kernel/smp_scu.c23
-rw-r--r--arch/arm/kernel/smp_twd.c7
-rw-r--r--arch/arm/kernel/traps.c4
-rw-r--r--arch/arm/kernel/vmlinux.lds.S11
23 files changed, 407 insertions, 630 deletions
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile
index 185ee822c935..74554f1742d7 100644
--- a/arch/arm/kernel/Makefile
+++ b/arch/arm/kernel/Makefile
@@ -29,6 +29,7 @@ obj-$(CONFIG_MODULES) += armksyms.o module.o
29obj-$(CONFIG_ARTHUR) += arthur.o 29obj-$(CONFIG_ARTHUR) += arthur.o
30obj-$(CONFIG_ISA_DMA) += dma-isa.o 30obj-$(CONFIG_ISA_DMA) += dma-isa.o
31obj-$(CONFIG_PCI) += bios32.o isa.o 31obj-$(CONFIG_PCI) += bios32.o isa.o
32obj-$(CONFIG_PM) += sleep.o
32obj-$(CONFIG_HAVE_SCHED_CLOCK) += sched_clock.o 33obj-$(CONFIG_HAVE_SCHED_CLOCK) += sched_clock.o
33obj-$(CONFIG_SMP) += smp.o smp_tlb.o 34obj-$(CONFIG_SMP) += smp.o smp_tlb.o
34obj-$(CONFIG_HAVE_ARM_SCU) += smp_scu.o 35obj-$(CONFIG_HAVE_ARM_SCU) += smp_scu.o
diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c
index 82da66172132..927522cfc12e 100644
--- a/arch/arm/kernel/asm-offsets.c
+++ b/arch/arm/kernel/asm-offsets.c
@@ -13,6 +13,9 @@
13#include <linux/sched.h> 13#include <linux/sched.h>
14#include <linux/mm.h> 14#include <linux/mm.h>
15#include <linux/dma-mapping.h> 15#include <linux/dma-mapping.h>
16#include <asm/cacheflush.h>
17#include <asm/glue-df.h>
18#include <asm/glue-pf.h>
16#include <asm/mach/arch.h> 19#include <asm/mach/arch.h>
17#include <asm/thread_info.h> 20#include <asm/thread_info.h>
18#include <asm/memory.h> 21#include <asm/memory.h>
@@ -114,6 +117,14 @@ int main(void)
114#ifdef MULTI_PABORT 117#ifdef MULTI_PABORT
115 DEFINE(PROCESSOR_PABT_FUNC, offsetof(struct processor, _prefetch_abort)); 118 DEFINE(PROCESSOR_PABT_FUNC, offsetof(struct processor, _prefetch_abort));
116#endif 119#endif
120#ifdef MULTI_CPU
121 DEFINE(CPU_SLEEP_SIZE, offsetof(struct processor, suspend_size));
122 DEFINE(CPU_DO_SUSPEND, offsetof(struct processor, do_suspend));
123 DEFINE(CPU_DO_RESUME, offsetof(struct processor, do_resume));
124#endif
125#ifdef MULTI_CACHE
126 DEFINE(CACHE_FLUSH_KERN_ALL, offsetof(struct cpu_cache_fns, flush_kern_all));
127#endif
117 BLANK(); 128 BLANK();
118 DEFINE(DMA_BIDIRECTIONAL, DMA_BIDIRECTIONAL); 129 DEFINE(DMA_BIDIRECTIONAL, DMA_BIDIRECTIONAL);
119 DEFINE(DMA_TO_DEVICE, DMA_TO_DEVICE); 130 DEFINE(DMA_TO_DEVICE, DMA_TO_DEVICE);
diff --git a/arch/arm/kernel/bios32.c b/arch/arm/kernel/bios32.c
index c6273a3bfc25..d86fcd44b220 100644
--- a/arch/arm/kernel/bios32.c
+++ b/arch/arm/kernel/bios32.c
@@ -583,6 +583,11 @@ void __init pci_common_init(struct hw_pci *hw)
583 * Assign resources. 583 * Assign resources.
584 */ 584 */
585 pci_bus_assign_resources(bus); 585 pci_bus_assign_resources(bus);
586
587 /*
588 * Enable bridges
589 */
590 pci_enable_bridges(bus);
586 } 591 }
587 592
588 /* 593 /*
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index 2b46fea36c9f..e8d885676807 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -16,7 +16,8 @@
16 */ 16 */
17 17
18#include <asm/memory.h> 18#include <asm/memory.h>
19#include <asm/glue.h> 19#include <asm/glue-df.h>
20#include <asm/glue-pf.h>
20#include <asm/vfpmacros.h> 21#include <asm/vfpmacros.h>
21#include <mach/entry-macro.S> 22#include <mach/entry-macro.S>
22#include <asm/thread_notify.h> 23#include <asm/thread_notify.h>
diff --git a/arch/arm/kernel/head-common.S b/arch/arm/kernel/head-common.S
index 8f57515bbdb0..c84b57d27d07 100644
--- a/arch/arm/kernel/head-common.S
+++ b/arch/arm/kernel/head-common.S
@@ -25,83 +25,6 @@
25 * machine ID for example). 25 * machine ID for example).
26 */ 26 */
27 __HEAD 27 __HEAD
28__error_a:
29#ifdef CONFIG_DEBUG_LL
30 mov r4, r1 @ preserve machine ID
31 adr r0, str_a1
32 bl printascii
33 mov r0, r4
34 bl printhex8
35 adr r0, str_a2
36 bl printascii
37 adr r3, __lookup_machine_type_data
38 ldmia r3, {r4, r5, r6} @ get machine desc list
39 sub r4, r3, r4 @ get offset between virt&phys
40 add r5, r5, r4 @ convert virt addresses to
41 add r6, r6, r4 @ physical address space
421: ldr r0, [r5, #MACHINFO_TYPE] @ get machine type
43 bl printhex8
44 mov r0, #'\t'
45 bl printch
46 ldr r0, [r5, #MACHINFO_NAME] @ get machine name
47 add r0, r0, r4
48 bl printascii
49 mov r0, #'\n'
50 bl printch
51 add r5, r5, #SIZEOF_MACHINE_DESC @ next machine_desc
52 cmp r5, r6
53 blo 1b
54 adr r0, str_a3
55 bl printascii
56 b __error
57ENDPROC(__error_a)
58
59str_a1: .asciz "\nError: unrecognized/unsupported machine ID (r1 = 0x"
60str_a2: .asciz ").\n\nAvailable machine support:\n\nID (hex)\tNAME\n"
61str_a3: .asciz "\nPlease check your kernel config and/or bootloader.\n"
62 .align
63#else
64 b __error
65#endif
66
67/*
68 * Lookup machine architecture in the linker-build list of architectures.
69 * Note that we can't use the absolute addresses for the __arch_info
70 * lists since we aren't running with the MMU on (and therefore, we are
71 * not in the correct address space). We have to calculate the offset.
72 *
73 * r1 = machine architecture number
74 * Returns:
75 * r3, r4, r6 corrupted
76 * r5 = mach_info pointer in physical address space
77 */
78__lookup_machine_type:
79 adr r3, __lookup_machine_type_data
80 ldmia r3, {r4, r5, r6}
81 sub r3, r3, r4 @ get offset between virt&phys
82 add r5, r5, r3 @ convert virt addresses to
83 add r6, r6, r3 @ physical address space
841: ldr r3, [r5, #MACHINFO_TYPE] @ get machine type
85 teq r3, r1 @ matches loader number?
86 beq 2f @ found
87 add r5, r5, #SIZEOF_MACHINE_DESC @ next machine_desc
88 cmp r5, r6
89 blo 1b
90 mov r5, #0 @ unknown machine
912: mov pc, lr
92ENDPROC(__lookup_machine_type)
93
94/*
95 * Look in arch/arm/kernel/arch.[ch] for information about the
96 * __arch_info structures.
97 */
98 .align 2
99 .type __lookup_machine_type_data, %object
100__lookup_machine_type_data:
101 .long .
102 .long __arch_info_begin
103 .long __arch_info_end
104 .size __lookup_machine_type_data, . - __lookup_machine_type_data
105 28
106/* Determine validity of the r2 atags pointer. The heuristic requires 29/* Determine validity of the r2 atags pointer. The heuristic requires
107 * that the pointer be aligned, in the first 16k of physical RAM and 30 * that the pointer be aligned, in the first 16k of physical RAM and
@@ -109,8 +32,6 @@ __lookup_machine_type_data:
109 * of this function may be more lenient with the physical address and 32 * of this function may be more lenient with the physical address and
110 * may also be able to move the ATAGS block if necessary. 33 * may also be able to move the ATAGS block if necessary.
111 * 34 *
112 * r8 = machinfo
113 *
114 * Returns: 35 * Returns:
115 * r2 either valid atags pointer, or zero 36 * r2 either valid atags pointer, or zero
116 * r5, r6 corrupted 37 * r5, r6 corrupted
@@ -185,17 +106,6 @@ __mmap_switched_data:
185 .size __mmap_switched_data, . - __mmap_switched_data 106 .size __mmap_switched_data, . - __mmap_switched_data
186 107
187/* 108/*
188 * This provides a C-API version of __lookup_machine_type
189 */
190ENTRY(lookup_machine_type)
191 stmfd sp!, {r4 - r6, lr}
192 mov r1, r0
193 bl __lookup_machine_type
194 mov r0, r5
195 ldmfd sp!, {r4 - r6, pc}
196ENDPROC(lookup_machine_type)
197
198/*
199 * This provides a C-API version of __lookup_processor_type 109 * This provides a C-API version of __lookup_processor_type
200 */ 110 */
201ENTRY(lookup_processor_type) 111ENTRY(lookup_processor_type)
diff --git a/arch/arm/kernel/head-nommu.S b/arch/arm/kernel/head-nommu.S
index 814ce1a73270..6b1e0ad9ec3b 100644
--- a/arch/arm/kernel/head-nommu.S
+++ b/arch/arm/kernel/head-nommu.S
@@ -44,9 +44,6 @@ ENTRY(stext)
44 bl __lookup_processor_type @ r5=procinfo r9=cpuid 44 bl __lookup_processor_type @ r5=procinfo r9=cpuid
45 movs r10, r5 @ invalid processor (r5=0)? 45 movs r10, r5 @ invalid processor (r5=0)?
46 beq __error_p @ yes, error 'p' 46 beq __error_p @ yes, error 'p'
47 bl __lookup_machine_type @ r5=machinfo
48 movs r8, r5 @ invalid machine (r5=0)?
49 beq __error_a @ yes, error 'a'
50 47
51 adr lr, BSYM(__after_proc_init) @ return (PIC) address 48 adr lr, BSYM(__after_proc_init) @ return (PIC) address
52 ARM( add pc, r10, #PROCINFO_INITFUNC ) 49 ARM( add pc, r10, #PROCINFO_INITFUNC )
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index f17d9a09e8fb..60fe2795f4a3 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -87,14 +87,10 @@ ENTRY(stext)
87 movs r10, r5 @ invalid processor (r5=0)? 87 movs r10, r5 @ invalid processor (r5=0)?
88 THUMB( it eq ) @ force fixup-able long branch encoding 88 THUMB( it eq ) @ force fixup-able long branch encoding
89 beq __error_p @ yes, error 'p' 89 beq __error_p @ yes, error 'p'
90 bl __lookup_machine_type @ r5=machinfo
91 movs r8, r5 @ invalid machine (r5=0)?
92 THUMB( it eq ) @ force fixup-able long branch encoding
93 beq __error_a @ yes, error 'a'
94 90
95 /* 91 /*
96 * r1 = machine no, r2 = atags, 92 * r1 = machine no, r2 = atags,
97 * r8 = machinfo, r9 = cpuid, r10 = procinfo 93 * r9 = cpuid, r10 = procinfo
98 */ 94 */
99 bl __vet_atags 95 bl __vet_atags
100#ifdef CONFIG_SMP_ON_UP 96#ifdef CONFIG_SMP_ON_UP
@@ -105,7 +101,7 @@ ENTRY(stext)
105 /* 101 /*
106 * The following calls CPU specific code in a position independent 102 * The following calls CPU specific code in a position independent
107 * manner. See arch/arm/mm/proc-*.S for details. r10 = base of 103 * manner. See arch/arm/mm/proc-*.S for details. r10 = base of
108 * xxx_proc_info structure selected by __lookup_machine_type 104 * xxx_proc_info structure selected by __lookup_processor_type
109 * above. On return, the CPU will be ready for the MMU to be 105 * above. On return, the CPU will be ready for the MMU to be
110 * turned on, and r0 will hold the CPU control register value. 106 * turned on, and r0 will hold the CPU control register value.
111 */ 107 */
@@ -124,7 +120,6 @@ ENDPROC(stext)
124 * amount which are required to get the kernel running, which 120 * amount which are required to get the kernel running, which
125 * generally means mapping in the kernel code. 121 * generally means mapping in the kernel code.
126 * 122 *
127 * r8 = machinfo
128 * r9 = cpuid 123 * r9 = cpuid
129 * r10 = procinfo 124 * r10 = procinfo
130 * 125 *
@@ -391,25 +386,24 @@ ENDPROC(__turn_mmu_on)
391 386
392 387
393#ifdef CONFIG_SMP_ON_UP 388#ifdef CONFIG_SMP_ON_UP
389 __INIT
394__fixup_smp: 390__fixup_smp:
395 mov r4, #0x00070000 391 and r3, r9, #0x000f0000 @ architecture version
396 orr r3, r4, #0xff000000 @ mask 0xff070000 392 teq r3, #0x000f0000 @ CPU ID supported?
397 orr r4, r4, #0x41000000 @ val 0x41070000
398 and r0, r9, r3
399 teq r0, r4 @ ARM CPU and ARMv6/v7?
400 bne __fixup_smp_on_up @ no, assume UP 393 bne __fixup_smp_on_up @ no, assume UP
401 394
402 orr r3, r3, #0x0000ff00 395 bic r3, r9, #0x00ff0000
403 orr r3, r3, #0x000000f0 @ mask 0xff07fff0 396 bic r3, r3, #0x0000000f @ mask 0xff00fff0
397 mov r4, #0x41000000
404 orr r4, r4, #0x0000b000 398 orr r4, r4, #0x0000b000
405 orr r4, r4, #0x00000020 @ val 0x4107b020 399 orr r4, r4, #0x00000020 @ val 0x4100b020
406 and r0, r9, r3 400 teq r3, r4 @ ARM 11MPCore?
407 teq r0, r4 @ ARM 11MPCore?
408 moveq pc, lr @ yes, assume SMP 401 moveq pc, lr @ yes, assume SMP
409 402
410 mrc p15, 0, r0, c0, c0, 5 @ read MPIDR 403 mrc p15, 0, r0, c0, c0, 5 @ read MPIDR
411 tst r0, #1 << 31 404 and r0, r0, #0xc0000000 @ multiprocessing extensions and
412 movne pc, lr @ bit 31 => SMP 405 teq r0, #0x80000000 @ not part of a uniprocessor system?
406 moveq pc, lr @ yes, assume SMP
413 407
414__fixup_smp_on_up: 408__fixup_smp_on_up:
415 adr r0, 1f 409 adr r0, 1f
@@ -417,18 +411,7 @@ __fixup_smp_on_up:
417 sub r3, r0, r3 411 sub r3, r0, r3
418 add r4, r4, r3 412 add r4, r4, r3
419 add r5, r5, r3 413 add r5, r5, r3
4202: cmp r4, r5 414 b __do_fixup_smp_on_up
421 movhs pc, lr
422 ldmia r4!, {r0, r6}
423 ARM( str r6, [r0, r3] )
424 THUMB( add r0, r0, r3 )
425#ifdef __ARMEB__
426 THUMB( mov r6, r6, ror #16 ) @ Convert word order for big-endian.
427#endif
428 THUMB( strh r6, [r0], #2 ) @ For Thumb-2, store as two halfwords
429 THUMB( mov r6, r6, lsr #16 ) @ to be robust against misaligned r3.
430 THUMB( strh r6, [r0] )
431 b 2b
432ENDPROC(__fixup_smp) 415ENDPROC(__fixup_smp)
433 416
434 .align 417 .align
@@ -442,7 +425,31 @@ smp_on_up:
442 ALT_SMP(.long 1) 425 ALT_SMP(.long 1)
443 ALT_UP(.long 0) 426 ALT_UP(.long 0)
444 .popsection 427 .popsection
428#endif
445 429
430 .text
431__do_fixup_smp_on_up:
432 cmp r4, r5
433 movhs pc, lr
434 ldmia r4!, {r0, r6}
435 ARM( str r6, [r0, r3] )
436 THUMB( add r0, r0, r3 )
437#ifdef __ARMEB__
438 THUMB( mov r6, r6, ror #16 ) @ Convert word order for big-endian.
446#endif 439#endif
440 THUMB( strh r6, [r0], #2 ) @ For Thumb-2, store as two halfwords
441 THUMB( mov r6, r6, lsr #16 ) @ to be robust against misaligned r3.
442 THUMB( strh r6, [r0] )
443 b __do_fixup_smp_on_up
444ENDPROC(__do_fixup_smp_on_up)
445
446ENTRY(fixup_smp)
447 stmfd sp!, {r4 - r6, lr}
448 mov r4, r0
449 add r5, r0, r1
450 mov r3, #0
451 bl __do_fixup_smp_on_up
452 ldmfd sp!, {r4 - r6, pc}
453ENDPROC(fixup_smp)
447 454
448#include "head-common.S" 455#include "head-common.S"
diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c
index c9f3f0467570..44b84fe6e1b0 100644
--- a/arch/arm/kernel/hw_breakpoint.c
+++ b/arch/arm/kernel/hw_breakpoint.c
@@ -137,11 +137,10 @@ static u8 get_debug_arch(void)
137 u32 didr; 137 u32 didr;
138 138
139 /* Do we implement the extended CPUID interface? */ 139 /* Do we implement the extended CPUID interface? */
140 if (((read_cpuid_id() >> 16) & 0xf) != 0xf) { 140 if (WARN_ONCE((((read_cpuid_id() >> 16) & 0xf) != 0xf),
141 pr_warning("CPUID feature registers not supported. " 141 "CPUID feature registers not supported. "
142 "Assuming v6 debug is present.\n"); 142 "Assuming v6 debug is present.\n"))
143 return ARM_DEBUG_ARCH_V6; 143 return ARM_DEBUG_ARCH_V6;
144 }
145 144
146 ARM_DBG_READ(c0, 0, didr); 145 ARM_DBG_READ(c0, 0, didr);
147 return (didr >> 16) & 0xf; 146 return (didr >> 16) & 0xf;
@@ -152,6 +151,12 @@ u8 arch_get_debug_arch(void)
152 return debug_arch; 151 return debug_arch;
153} 152}
154 153
154static int debug_arch_supported(void)
155{
156 u8 arch = get_debug_arch();
157 return arch >= ARM_DEBUG_ARCH_V6 && arch <= ARM_DEBUG_ARCH_V7_ECP14;
158}
159
155/* Determine number of BRP register available. */ 160/* Determine number of BRP register available. */
156static int get_num_brp_resources(void) 161static int get_num_brp_resources(void)
157{ 162{
@@ -268,6 +273,9 @@ out:
268 273
269int hw_breakpoint_slots(int type) 274int hw_breakpoint_slots(int type)
270{ 275{
276 if (!debug_arch_supported())
277 return 0;
278
271 /* 279 /*
272 * We can be called early, so don't rely on 280 * We can be called early, so don't rely on
273 * our static variables being initialised. 281 * our static variables being initialised.
@@ -828,20 +836,33 @@ static int hw_breakpoint_pending(unsigned long addr, unsigned int fsr,
828/* 836/*
829 * One-time initialisation. 837 * One-time initialisation.
830 */ 838 */
831static void reset_ctrl_regs(void *unused) 839static void reset_ctrl_regs(void *info)
832{ 840{
833 int i; 841 int i, cpu = smp_processor_id();
842 u32 dbg_power;
843 cpumask_t *cpumask = info;
834 844
835 /* 845 /*
836 * v7 debug contains save and restore registers so that debug state 846 * v7 debug contains save and restore registers so that debug state
837 * can be maintained across low-power modes without leaving 847 * can be maintained across low-power modes without leaving the debug
838 * the debug logic powered up. It is IMPLEMENTATION DEFINED whether 848 * logic powered up. It is IMPLEMENTATION DEFINED whether we can access
839 * we can write to the debug registers out of reset, so we must 849 * the debug registers out of reset, so we must unlock the OS Lock
840 * unlock the OS Lock Access Register to avoid taking undefined 850 * Access Register to avoid taking undefined instruction exceptions
841 * instruction exceptions later on. 851 * later on.
842 */ 852 */
843 if (debug_arch >= ARM_DEBUG_ARCH_V7_ECP14) { 853 if (debug_arch >= ARM_DEBUG_ARCH_V7_ECP14) {
844 /* 854 /*
855 * Ensure sticky power-down is clear (i.e. debug logic is
856 * powered up).
857 */
858 asm volatile("mrc p14, 0, %0, c1, c5, 4" : "=r" (dbg_power));
859 if ((dbg_power & 0x1) == 0) {
860 pr_warning("CPU %d debug is powered down!\n", cpu);
861 cpumask_or(cpumask, cpumask, cpumask_of(cpu));
862 return;
863 }
864
865 /*
845 * Unconditionally clear the lock by writing a value 866 * Unconditionally clear the lock by writing a value
846 * other than 0xC5ACCE55 to the access register. 867 * other than 0xC5ACCE55 to the access register.
847 */ 868 */
@@ -879,10 +900,11 @@ static struct notifier_block __cpuinitdata dbg_reset_nb = {
879static int __init arch_hw_breakpoint_init(void) 900static int __init arch_hw_breakpoint_init(void)
880{ 901{
881 u32 dscr; 902 u32 dscr;
903 cpumask_t cpumask = { CPU_BITS_NONE };
882 904
883 debug_arch = get_debug_arch(); 905 debug_arch = get_debug_arch();
884 906
885 if (debug_arch > ARM_DEBUG_ARCH_V7_ECP14) { 907 if (!debug_arch_supported()) {
886 pr_info("debug architecture 0x%x unsupported.\n", debug_arch); 908 pr_info("debug architecture 0x%x unsupported.\n", debug_arch);
887 return 0; 909 return 0;
888 } 910 }
@@ -899,18 +921,24 @@ static int __init arch_hw_breakpoint_init(void)
899 pr_info("%d breakpoint(s) reserved for watchpoint " 921 pr_info("%d breakpoint(s) reserved for watchpoint "
900 "single-step.\n", core_num_reserved_brps); 922 "single-step.\n", core_num_reserved_brps);
901 923
924 /*
925 * Reset the breakpoint resources. We assume that a halting
926 * debugger will leave the world in a nice state for us.
927 */
928 on_each_cpu(reset_ctrl_regs, &cpumask, 1);
929 if (!cpumask_empty(&cpumask)) {
930 core_num_brps = 0;
931 core_num_reserved_brps = 0;
932 core_num_wrps = 0;
933 return 0;
934 }
935
902 ARM_DBG_READ(c1, 0, dscr); 936 ARM_DBG_READ(c1, 0, dscr);
903 if (dscr & ARM_DSCR_HDBGEN) { 937 if (dscr & ARM_DSCR_HDBGEN) {
938 max_watchpoint_len = 4;
904 pr_warning("halting debug mode enabled. Assuming maximum " 939 pr_warning("halting debug mode enabled. Assuming maximum "
905 "watchpoint size of 4 bytes."); 940 "watchpoint size of %u bytes.", max_watchpoint_len);
906 } else { 941 } else {
907 /*
908 * Reset the breakpoint resources. We assume that a halting
909 * debugger will leave the world in a nice state for us.
910 */
911 smp_call_function(reset_ctrl_regs, NULL, 1);
912 reset_ctrl_regs(NULL);
913
914 /* Work out the maximum supported watchpoint length. */ 942 /* Work out the maximum supported watchpoint length. */
915 max_watchpoint_len = get_max_wp_len(); 943 max_watchpoint_len = get_max_wp_len();
916 pr_info("maximum watchpoint size is %u bytes.\n", 944 pr_info("maximum watchpoint size is %u bytes.\n",
diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c
index 28536e352deb..3535d3793e65 100644
--- a/arch/arm/kernel/irq.c
+++ b/arch/arm/kernel/irq.c
@@ -179,14 +179,21 @@ int __init arch_probe_nr_irqs(void)
179 179
180#ifdef CONFIG_HOTPLUG_CPU 180#ifdef CONFIG_HOTPLUG_CPU
181 181
182static void route_irq(struct irq_desc *desc, unsigned int irq, unsigned int cpu) 182static bool migrate_one_irq(struct irq_data *d)
183{ 183{
184 pr_debug("IRQ%u: moving from cpu%u to cpu%u\n", irq, desc->irq_data.node, cpu); 184 unsigned int cpu = cpumask_any_and(d->affinity, cpu_online_mask);
185 bool ret = false;
185 186
186 raw_spin_lock_irq(&desc->lock); 187 if (cpu >= nr_cpu_ids) {
187 desc->irq_data.chip->irq_set_affinity(&desc->irq_data, 188 cpu = cpumask_any(cpu_online_mask);
188 cpumask_of(cpu), false); 189 ret = true;
189 raw_spin_unlock_irq(&desc->lock); 190 }
191
192 pr_debug("IRQ%u: moving from cpu%u to cpu%u\n", d->irq, d->node, cpu);
193
194 d->chip->irq_set_affinity(d, cpumask_of(cpu), true);
195
196 return ret;
190} 197}
191 198
192/* 199/*
@@ -198,25 +205,30 @@ void migrate_irqs(void)
198{ 205{
199 unsigned int i, cpu = smp_processor_id(); 206 unsigned int i, cpu = smp_processor_id();
200 struct irq_desc *desc; 207 struct irq_desc *desc;
208 unsigned long flags;
209
210 local_irq_save(flags);
201 211
202 for_each_irq_desc(i, desc) { 212 for_each_irq_desc(i, desc) {
203 struct irq_data *d = &desc->irq_data; 213 struct irq_data *d = &desc->irq_data;
214 bool affinity_broken = false;
204 215
205 if (d->node == cpu) { 216 raw_spin_lock(&desc->lock);
206 unsigned int newcpu = cpumask_any_and(d->affinity, 217 do {
207 cpu_online_mask); 218 if (desc->action == NULL)
208 if (newcpu >= nr_cpu_ids) { 219 break;
209 if (printk_ratelimit())
210 printk(KERN_INFO "IRQ%u no longer affine to CPU%u\n",
211 i, cpu);
212 220
213 cpumask_setall(d->affinity); 221 if (d->node != cpu)
214 newcpu = cpumask_any_and(d->affinity, 222 break;
215 cpu_online_mask);
216 }
217 223
218 route_irq(desc, i, newcpu); 224 affinity_broken = migrate_one_irq(d);
219 } 225 } while (0);
226 raw_spin_unlock(&desc->lock);
227
228 if (affinity_broken && printk_ratelimit())
229 pr_warning("IRQ%u no longer affine to CPU%u\n", i, cpu);
220 } 230 }
231
232 local_irq_restore(flags);
221} 233}
222#endif /* CONFIG_HOTPLUG_CPU */ 234#endif /* CONFIG_HOTPLUG_CPU */
diff --git a/arch/arm/kernel/kprobes-decode.c b/arch/arm/kernel/kprobes-decode.c
index 2c1f0050c9c4..8f6ed43861f1 100644
--- a/arch/arm/kernel/kprobes-decode.c
+++ b/arch/arm/kernel/kprobes-decode.c
@@ -1437,7 +1437,7 @@ arm_kprobe_decode_insn(kprobe_opcode_t insn, struct arch_specific_insn *asi)
1437 1437
1438 return space_cccc_1100_010x(insn, asi); 1438 return space_cccc_1100_010x(insn, asi);
1439 1439
1440 } else if ((insn & 0x0e000000) == 0x0c400000) { 1440 } else if ((insn & 0x0e000000) == 0x0c000000) {
1441 1441
1442 return space_cccc_110x(insn, asi); 1442 return space_cccc_110x(insn, asi);
1443 1443
diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c
index 2cfe8161b478..6fcf22cf385c 100644
--- a/arch/arm/kernel/module.c
+++ b/arch/arm/kernel/module.c
@@ -22,6 +22,7 @@
22 22
23#include <asm/pgtable.h> 23#include <asm/pgtable.h>
24#include <asm/sections.h> 24#include <asm/sections.h>
25#include <asm/smp_plat.h>
25#include <asm/unwind.h> 26#include <asm/unwind.h>
26 27
27#ifdef CONFIG_XIP_KERNEL 28#ifdef CONFIG_XIP_KERNEL
@@ -75,6 +76,7 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
75 for (i = 0; i < relsec->sh_size / sizeof(Elf32_Rel); i++, rel++) { 76 for (i = 0; i < relsec->sh_size / sizeof(Elf32_Rel); i++, rel++) {
76 unsigned long loc; 77 unsigned long loc;
77 Elf32_Sym *sym; 78 Elf32_Sym *sym;
79 const char *symname;
78 s32 offset; 80 s32 offset;
79#ifdef CONFIG_THUMB2_KERNEL 81#ifdef CONFIG_THUMB2_KERNEL
80 u32 upper, lower, sign, j1, j2; 82 u32 upper, lower, sign, j1, j2;
@@ -82,18 +84,18 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
82 84
83 offset = ELF32_R_SYM(rel->r_info); 85 offset = ELF32_R_SYM(rel->r_info);
84 if (offset < 0 || offset > (symsec->sh_size / sizeof(Elf32_Sym))) { 86 if (offset < 0 || offset > (symsec->sh_size / sizeof(Elf32_Sym))) {
85 printk(KERN_ERR "%s: bad relocation, section %d reloc %d\n", 87 pr_err("%s: section %u reloc %u: bad relocation sym offset\n",
86 module->name, relindex, i); 88 module->name, relindex, i);
87 return -ENOEXEC; 89 return -ENOEXEC;
88 } 90 }
89 91
90 sym = ((Elf32_Sym *)symsec->sh_addr) + offset; 92 sym = ((Elf32_Sym *)symsec->sh_addr) + offset;
93 symname = strtab + sym->st_name;
91 94
92 if (rel->r_offset < 0 || rel->r_offset > dstsec->sh_size - sizeof(u32)) { 95 if (rel->r_offset < 0 || rel->r_offset > dstsec->sh_size - sizeof(u32)) {
93 printk(KERN_ERR "%s: out of bounds relocation, " 96 pr_err("%s: section %u reloc %u sym '%s': out of bounds relocation, offset %d size %u\n",
94 "section %d reloc %d offset %d size %d\n", 97 module->name, relindex, i, symname,
95 module->name, relindex, i, rel->r_offset, 98 rel->r_offset, dstsec->sh_size);
96 dstsec->sh_size);
97 return -ENOEXEC; 99 return -ENOEXEC;
98 } 100 }
99 101
@@ -119,10 +121,10 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
119 if (offset & 3 || 121 if (offset & 3 ||
120 offset <= (s32)0xfe000000 || 122 offset <= (s32)0xfe000000 ||
121 offset >= (s32)0x02000000) { 123 offset >= (s32)0x02000000) {
122 printk(KERN_ERR 124 pr_err("%s: section %u reloc %u sym '%s': relocation %u out of range (%#lx -> %#x)\n",
123 "%s: relocation out of range, section " 125 module->name, relindex, i, symname,
124 "%d reloc %d sym '%s'\n", module->name, 126 ELF32_R_TYPE(rel->r_info), loc,
125 relindex, i, strtab + sym->st_name); 127 sym->st_value);
126 return -ENOEXEC; 128 return -ENOEXEC;
127 } 129 }
128 130
@@ -195,10 +197,10 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
195 if (!(offset & 1) || 197 if (!(offset & 1) ||
196 offset <= (s32)0xff000000 || 198 offset <= (s32)0xff000000 ||
197 offset >= (s32)0x01000000) { 199 offset >= (s32)0x01000000) {
198 printk(KERN_ERR 200 pr_err("%s: section %u reloc %u sym '%s': relocation %u out of range (%#lx -> %#x)\n",
199 "%s: relocation out of range, section " 201 module->name, relindex, i, symname,
200 "%d reloc %d sym '%s'\n", module->name, 202 ELF32_R_TYPE(rel->r_info), loc,
201 relindex, i, strtab + sym->st_name); 203 sym->st_value);
202 return -ENOEXEC; 204 return -ENOEXEC;
203 } 205 }
204 206
@@ -268,12 +270,28 @@ struct mod_unwind_map {
268 const Elf_Shdr *txt_sec; 270 const Elf_Shdr *txt_sec;
269}; 271};
270 272
273static const Elf_Shdr *find_mod_section(const Elf32_Ehdr *hdr,
274 const Elf_Shdr *sechdrs, const char *name)
275{
276 const Elf_Shdr *s, *se;
277 const char *secstrs = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
278
279 for (s = sechdrs, se = sechdrs + hdr->e_shnum; s < se; s++)
280 if (strcmp(name, secstrs + s->sh_name) == 0)
281 return s;
282
283 return NULL;
284}
285
286extern void fixup_smp(const void *, unsigned long);
287
271int module_finalize(const Elf32_Ehdr *hdr, const Elf_Shdr *sechdrs, 288int module_finalize(const Elf32_Ehdr *hdr, const Elf_Shdr *sechdrs,
272 struct module *mod) 289 struct module *mod)
273{ 290{
291 const Elf_Shdr * __maybe_unused s = NULL;
274#ifdef CONFIG_ARM_UNWIND 292#ifdef CONFIG_ARM_UNWIND
275 const char *secstrs = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset; 293 const char *secstrs = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
276 const Elf_Shdr *s, *sechdrs_end = sechdrs + hdr->e_shnum; 294 const Elf_Shdr *sechdrs_end = sechdrs + hdr->e_shnum;
277 struct mod_unwind_map maps[ARM_SEC_MAX]; 295 struct mod_unwind_map maps[ARM_SEC_MAX];
278 int i; 296 int i;
279 297
@@ -315,6 +333,9 @@ int module_finalize(const Elf32_Ehdr *hdr, const Elf_Shdr *sechdrs,
315 maps[i].txt_sec->sh_addr, 333 maps[i].txt_sec->sh_addr,
316 maps[i].txt_sec->sh_size); 334 maps[i].txt_sec->sh_size);
317#endif 335#endif
336 s = find_mod_section(hdr, sechdrs, ".alt.smp.init");
337 if (s && !is_smp())
338 fixup_smp((void *)s->sh_addr, s->sh_size);
318 return 0; 339 return 0;
319} 340}
320 341
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index 5efa2647a2fb..d150ad1ccb5d 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -700,7 +700,7 @@ user_backtrace(struct frame_tail __user *tail,
700 * Frame pointers should strictly progress back up the stack 700 * Frame pointers should strictly progress back up the stack
701 * (towards higher addresses). 701 * (towards higher addresses).
702 */ 702 */
703 if (tail >= buftail.fp) 703 if (tail + 1 >= buftail.fp)
704 return NULL; 704 return NULL;
705 705
706 return buftail.fp - 1; 706 return buftail.fp - 1;
diff --git a/arch/arm/kernel/pmu.c b/arch/arm/kernel/pmu.c
index b8af96ea62e6..2c79eec19262 100644
--- a/arch/arm/kernel/pmu.c
+++ b/arch/arm/kernel/pmu.c
@@ -97,28 +97,34 @@ set_irq_affinity(int irq,
97 irq, cpu); 97 irq, cpu);
98 return err; 98 return err;
99#else 99#else
100 return 0; 100 return -EINVAL;
101#endif 101#endif
102} 102}
103 103
104static int 104static int
105init_cpu_pmu(void) 105init_cpu_pmu(void)
106{ 106{
107 int i, err = 0; 107 int i, irqs, err = 0;
108 struct platform_device *pdev = pmu_devices[ARM_PMU_DEVICE_CPU]; 108 struct platform_device *pdev = pmu_devices[ARM_PMU_DEVICE_CPU];
109 109
110 if (!pdev) { 110 if (!pdev)
111 err = -ENODEV; 111 return -ENODEV;
112 goto out; 112
113 } 113 irqs = pdev->num_resources;
114
115 /*
116 * If we have a single PMU interrupt that we can't shift, assume that
117 * we're running on a uniprocessor machine and continue.
118 */
119 if (irqs == 1 && !irq_can_set_affinity(platform_get_irq(pdev, 0)))
120 return 0;
114 121
115 for (i = 0; i < pdev->num_resources; ++i) { 122 for (i = 0; i < irqs; ++i) {
116 err = set_irq_affinity(platform_get_irq(pdev, i), i); 123 err = set_irq_affinity(platform_get_irq(pdev, i), i);
117 if (err) 124 if (err)
118 break; 125 break;
119 } 126 }
120 127
121out:
122 return err; 128 return err;
123} 129}
124 130
diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
index 19c6816db61e..2bf27f364d09 100644
--- a/arch/arm/kernel/ptrace.c
+++ b/arch/arm/kernel/ptrace.c
@@ -26,8 +26,6 @@
26#include <asm/system.h> 26#include <asm/system.h>
27#include <asm/traps.h> 27#include <asm/traps.h>
28 28
29#include "ptrace.h"
30
31#define REG_PC 15 29#define REG_PC 15
32#define REG_PSR 16 30#define REG_PSR 16
33/* 31/*
@@ -184,389 +182,12 @@ put_user_reg(struct task_struct *task, int offset, long data)
184 return ret; 182 return ret;
185} 183}
186 184
187static inline int
188read_u32(struct task_struct *task, unsigned long addr, u32 *res)
189{
190 int ret;
191
192 ret = access_process_vm(task, addr, res, sizeof(*res), 0);
193
194 return ret == sizeof(*res) ? 0 : -EIO;
195}
196
197static inline int
198read_instr(struct task_struct *task, unsigned long addr, u32 *res)
199{
200 int ret;
201
202 if (addr & 1) {
203 u16 val;
204 ret = access_process_vm(task, addr & ~1, &val, sizeof(val), 0);
205 ret = ret == sizeof(val) ? 0 : -EIO;
206 *res = val;
207 } else {
208 u32 val;
209 ret = access_process_vm(task, addr & ~3, &val, sizeof(val), 0);
210 ret = ret == sizeof(val) ? 0 : -EIO;
211 *res = val;
212 }
213 return ret;
214}
215
216/*
217 * Get value of register `rn' (in the instruction)
218 */
219static unsigned long
220ptrace_getrn(struct task_struct *child, unsigned long insn)
221{
222 unsigned int reg = (insn >> 16) & 15;
223 unsigned long val;
224
225 val = get_user_reg(child, reg);
226 if (reg == 15)
227 val += 8;
228
229 return val;
230}
231
232/*
233 * Get value of operand 2 (in an ALU instruction)
234 */
235static unsigned long
236ptrace_getaluop2(struct task_struct *child, unsigned long insn)
237{
238 unsigned long val;
239 int shift;
240 int type;
241
242 if (insn & 1 << 25) {
243 val = insn & 255;
244 shift = (insn >> 8) & 15;
245 type = 3;
246 } else {
247 val = get_user_reg (child, insn & 15);
248
249 if (insn & (1 << 4))
250 shift = (int)get_user_reg (child, (insn >> 8) & 15);
251 else
252 shift = (insn >> 7) & 31;
253
254 type = (insn >> 5) & 3;
255 }
256
257 switch (type) {
258 case 0: val <<= shift; break;
259 case 1: val >>= shift; break;
260 case 2:
261 val = (((signed long)val) >> shift);
262 break;
263 case 3:
264 val = (val >> shift) | (val << (32 - shift));
265 break;
266 }
267 return val;
268}
269
270/*
271 * Get value of operand 2 (in a LDR instruction)
272 */
273static unsigned long
274ptrace_getldrop2(struct task_struct *child, unsigned long insn)
275{
276 unsigned long val;
277 int shift;
278 int type;
279
280 val = get_user_reg(child, insn & 15);
281 shift = (insn >> 7) & 31;
282 type = (insn >> 5) & 3;
283
284 switch (type) {
285 case 0: val <<= shift; break;
286 case 1: val >>= shift; break;
287 case 2:
288 val = (((signed long)val) >> shift);
289 break;
290 case 3:
291 val = (val >> shift) | (val << (32 - shift));
292 break;
293 }
294 return val;
295}
296
297#define OP_MASK 0x01e00000
298#define OP_AND 0x00000000
299#define OP_EOR 0x00200000
300#define OP_SUB 0x00400000
301#define OP_RSB 0x00600000
302#define OP_ADD 0x00800000
303#define OP_ADC 0x00a00000
304#define OP_SBC 0x00c00000
305#define OP_RSC 0x00e00000
306#define OP_ORR 0x01800000
307#define OP_MOV 0x01a00000
308#define OP_BIC 0x01c00000
309#define OP_MVN 0x01e00000
310
311static unsigned long
312get_branch_address(struct task_struct *child, unsigned long pc, unsigned long insn)
313{
314 u32 alt = 0;
315
316 switch (insn & 0x0e000000) {
317 case 0x00000000:
318 case 0x02000000: {
319 /*
320 * data processing
321 */
322 long aluop1, aluop2, ccbit;
323
324 if ((insn & 0x0fffffd0) == 0x012fff10) {
325 /*
326 * bx or blx
327 */
328 alt = get_user_reg(child, insn & 15);
329 break;
330 }
331
332
333 if ((insn & 0xf000) != 0xf000)
334 break;
335
336 aluop1 = ptrace_getrn(child, insn);
337 aluop2 = ptrace_getaluop2(child, insn);
338 ccbit = get_user_reg(child, REG_PSR) & PSR_C_BIT ? 1 : 0;
339
340 switch (insn & OP_MASK) {
341 case OP_AND: alt = aluop1 & aluop2; break;
342 case OP_EOR: alt = aluop1 ^ aluop2; break;
343 case OP_SUB: alt = aluop1 - aluop2; break;
344 case OP_RSB: alt = aluop2 - aluop1; break;
345 case OP_ADD: alt = aluop1 + aluop2; break;
346 case OP_ADC: alt = aluop1 + aluop2 + ccbit; break;
347 case OP_SBC: alt = aluop1 - aluop2 + ccbit; break;
348 case OP_RSC: alt = aluop2 - aluop1 + ccbit; break;
349 case OP_ORR: alt = aluop1 | aluop2; break;
350 case OP_MOV: alt = aluop2; break;
351 case OP_BIC: alt = aluop1 & ~aluop2; break;
352 case OP_MVN: alt = ~aluop2; break;
353 }
354 break;
355 }
356
357 case 0x04000000:
358 case 0x06000000:
359 /*
360 * ldr
361 */
362 if ((insn & 0x0010f000) == 0x0010f000) {
363 unsigned long base;
364
365 base = ptrace_getrn(child, insn);
366 if (insn & 1 << 24) {
367 long aluop2;
368
369 if (insn & 0x02000000)
370 aluop2 = ptrace_getldrop2(child, insn);
371 else
372 aluop2 = insn & 0xfff;
373
374 if (insn & 1 << 23)
375 base += aluop2;
376 else
377 base -= aluop2;
378 }
379 read_u32(child, base, &alt);
380 }
381 break;
382
383 case 0x08000000:
384 /*
385 * ldm
386 */
387 if ((insn & 0x00108000) == 0x00108000) {
388 unsigned long base;
389 unsigned int nr_regs;
390
391 if (insn & (1 << 23)) {
392 nr_regs = hweight16(insn & 65535) << 2;
393
394 if (!(insn & (1 << 24)))
395 nr_regs -= 4;
396 } else {
397 if (insn & (1 << 24))
398 nr_regs = -4;
399 else
400 nr_regs = 0;
401 }
402
403 base = ptrace_getrn(child, insn);
404
405 read_u32(child, base + nr_regs, &alt);
406 break;
407 }
408 break;
409
410 case 0x0a000000: {
411 /*
412 * bl or b
413 */
414 signed long displ;
415 /* It's a branch/branch link: instead of trying to
416 * figure out whether the branch will be taken or not,
417 * we'll put a breakpoint at both locations. This is
418 * simpler, more reliable, and probably not a whole lot
419 * slower than the alternative approach of emulating the
420 * branch.
421 */
422 displ = (insn & 0x00ffffff) << 8;
423 displ = (displ >> 6) + 8;
424 if (displ != 0 && displ != 4)
425 alt = pc + displ;
426 }
427 break;
428 }
429
430 return alt;
431}
432
433static int
434swap_insn(struct task_struct *task, unsigned long addr,
435 void *old_insn, void *new_insn, int size)
436{
437 int ret;
438
439 ret = access_process_vm(task, addr, old_insn, size, 0);
440 if (ret == size)
441 ret = access_process_vm(task, addr, new_insn, size, 1);
442 return ret;
443}
444
445static void
446add_breakpoint(struct task_struct *task, struct debug_info *dbg, unsigned long addr)
447{
448 int nr = dbg->nsaved;
449
450 if (nr < 2) {
451 u32 new_insn = BREAKINST_ARM;
452 int res;
453
454 res = swap_insn(task, addr, &dbg->bp[nr].insn, &new_insn, 4);
455
456 if (res == 4) {
457 dbg->bp[nr].address = addr;
458 dbg->nsaved += 1;
459 }
460 } else
461 printk(KERN_ERR "ptrace: too many breakpoints\n");
462}
463
464/*
465 * Clear one breakpoint in the user program. We copy what the hardware
466 * does and use bit 0 of the address to indicate whether this is a Thumb
467 * breakpoint or an ARM breakpoint.
468 */
469static void clear_breakpoint(struct task_struct *task, struct debug_entry *bp)
470{
471 unsigned long addr = bp->address;
472 union debug_insn old_insn;
473 int ret;
474
475 if (addr & 1) {
476 ret = swap_insn(task, addr & ~1, &old_insn.thumb,
477 &bp->insn.thumb, 2);
478
479 if (ret != 2 || old_insn.thumb != BREAKINST_THUMB)
480 printk(KERN_ERR "%s:%d: corrupted Thumb breakpoint at "
481 "0x%08lx (0x%04x)\n", task->comm,
482 task_pid_nr(task), addr, old_insn.thumb);
483 } else {
484 ret = swap_insn(task, addr & ~3, &old_insn.arm,
485 &bp->insn.arm, 4);
486
487 if (ret != 4 || old_insn.arm != BREAKINST_ARM)
488 printk(KERN_ERR "%s:%d: corrupted ARM breakpoint at "
489 "0x%08lx (0x%08x)\n", task->comm,
490 task_pid_nr(task), addr, old_insn.arm);
491 }
492}
493
494void ptrace_set_bpt(struct task_struct *child)
495{
496 struct pt_regs *regs;
497 unsigned long pc;
498 u32 insn;
499 int res;
500
501 regs = task_pt_regs(child);
502 pc = instruction_pointer(regs);
503
504 if (thumb_mode(regs)) {
505 printk(KERN_WARNING "ptrace: can't handle thumb mode\n");
506 return;
507 }
508
509 res = read_instr(child, pc, &insn);
510 if (!res) {
511 struct debug_info *dbg = &child->thread.debug;
512 unsigned long alt;
513
514 dbg->nsaved = 0;
515
516 alt = get_branch_address(child, pc, insn);
517 if (alt)
518 add_breakpoint(child, dbg, alt);
519
520 /*
521 * Note that we ignore the result of setting the above
522 * breakpoint since it may fail. When it does, this is
523 * not so much an error, but a forewarning that we may
524 * be receiving a prefetch abort shortly.
525 *
526 * If we don't set this breakpoint here, then we can
527 * lose control of the thread during single stepping.
528 */
529 if (!alt || predicate(insn) != PREDICATE_ALWAYS)
530 add_breakpoint(child, dbg, pc + 4);
531 }
532}
533
534/*
535 * Ensure no single-step breakpoint is pending. Returns non-zero
536 * value if child was being single-stepped.
537 */
538void ptrace_cancel_bpt(struct task_struct *child)
539{
540 int i, nsaved = child->thread.debug.nsaved;
541
542 child->thread.debug.nsaved = 0;
543
544 if (nsaved > 2) {
545 printk("ptrace_cancel_bpt: bogus nsaved: %d!\n", nsaved);
546 nsaved = 2;
547 }
548
549 for (i = 0; i < nsaved; i++)
550 clear_breakpoint(child, &child->thread.debug.bp[i]);
551}
552
553void user_disable_single_step(struct task_struct *task)
554{
555 task->ptrace &= ~PT_SINGLESTEP;
556 ptrace_cancel_bpt(task);
557}
558
559void user_enable_single_step(struct task_struct *task)
560{
561 task->ptrace |= PT_SINGLESTEP;
562}
563
564/* 185/*
565 * Called by kernel/ptrace.c when detaching.. 186 * Called by kernel/ptrace.c when detaching..
566 */ 187 */
567void ptrace_disable(struct task_struct *child) 188void ptrace_disable(struct task_struct *child)
568{ 189{
569 user_disable_single_step(child); 190 /* Nothing to do. */
570} 191}
571 192
572/* 193/*
@@ -576,8 +197,6 @@ void ptrace_break(struct task_struct *tsk, struct pt_regs *regs)
576{ 197{
577 siginfo_t info; 198 siginfo_t info;
578 199
579 ptrace_cancel_bpt(tsk);
580
581 info.si_signo = SIGTRAP; 200 info.si_signo = SIGTRAP;
582 info.si_errno = 0; 201 info.si_errno = 0;
583 info.si_code = TRAP_BRKPT; 202 info.si_code = TRAP_BRKPT;
@@ -996,10 +615,10 @@ static int ptrace_gethbpregs(struct task_struct *tsk, long num,
996 while (!(arch_ctrl.len & 0x1)) 615 while (!(arch_ctrl.len & 0x1))
997 arch_ctrl.len >>= 1; 616 arch_ctrl.len >>= 1;
998 617
999 if (idx & 0x1) 618 if (num & 0x1)
1000 reg = encode_ctrl_reg(arch_ctrl);
1001 else
1002 reg = bp->attr.bp_addr; 619 reg = bp->attr.bp_addr;
620 else
621 reg = encode_ctrl_reg(arch_ctrl);
1003 } 622 }
1004 623
1005put: 624put:
diff --git a/arch/arm/kernel/ptrace.h b/arch/arm/kernel/ptrace.h
deleted file mode 100644
index 3926605b82ea..000000000000
--- a/arch/arm/kernel/ptrace.h
+++ /dev/null
@@ -1,37 +0,0 @@
1/*
2 * linux/arch/arm/kernel/ptrace.h
3 *
4 * Copyright (C) 2000-2003 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#include <linux/ptrace.h>
11
12extern void ptrace_cancel_bpt(struct task_struct *);
13extern void ptrace_set_bpt(struct task_struct *);
14extern void ptrace_break(struct task_struct *, struct pt_regs *);
15
16/*
17 * Send SIGTRAP if we're single-stepping
18 */
19static inline void single_step_trap(struct task_struct *task)
20{
21 if (task->ptrace & PT_SINGLESTEP) {
22 ptrace_cancel_bpt(task);
23 send_sig(SIGTRAP, task, 1);
24 }
25}
26
27static inline void single_step_clear(struct task_struct *task)
28{
29 if (task->ptrace & PT_SINGLESTEP)
30 ptrace_cancel_bpt(task);
31}
32
33static inline void single_step_set(struct task_struct *task)
34{
35 if (task->ptrace & PT_SINGLESTEP)
36 ptrace_set_bpt(task);
37}
diff --git a/arch/arm/kernel/return_address.c b/arch/arm/kernel/return_address.c
index df246da4ceca..0b13a72f855d 100644
--- a/arch/arm/kernel/return_address.c
+++ b/arch/arm/kernel/return_address.c
@@ -9,6 +9,7 @@
9 * the Free Software Foundation. 9 * the Free Software Foundation.
10 */ 10 */
11#include <linux/module.h> 11#include <linux/module.h>
12#include <linux/ftrace.h>
12 13
13#if defined(CONFIG_FRAME_POINTER) && !defined(CONFIG_ARM_UNWIND) 14#if defined(CONFIG_FRAME_POINTER) && !defined(CONFIG_ARM_UNWIND)
14#include <linux/sched.h> 15#include <linux/sched.h>
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index 420b8d6485d6..db2382853450 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -226,8 +226,8 @@ int cpu_architecture(void)
226 * Register 0 and check for VMSAv7 or PMSAv7 */ 226 * Register 0 and check for VMSAv7 or PMSAv7 */
227 asm("mrc p15, 0, %0, c0, c1, 4" 227 asm("mrc p15, 0, %0, c0, c1, 4"
228 : "=r" (mmfr0)); 228 : "=r" (mmfr0));
229 if ((mmfr0 & 0x0000000f) == 0x00000003 || 229 if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
230 (mmfr0 & 0x000000f0) == 0x00000030) 230 (mmfr0 & 0x000000f0) >= 0x00000030)
231 cpu_arch = CPU_ARCH_ARMv7; 231 cpu_arch = CPU_ARCH_ARMv7;
232 else if ((mmfr0 & 0x0000000f) == 0x00000002 || 232 else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
233 (mmfr0 & 0x000000f0) == 0x00000020) 233 (mmfr0 & 0x000000f0) == 0x00000020)
@@ -308,7 +308,44 @@ static void __init cacheid_init(void)
308 * already provide the required functionality. 308 * already provide the required functionality.
309 */ 309 */
310extern struct proc_info_list *lookup_processor_type(unsigned int); 310extern struct proc_info_list *lookup_processor_type(unsigned int);
311extern struct machine_desc *lookup_machine_type(unsigned int); 311
312static void __init early_print(const char *str, ...)
313{
314 extern void printascii(const char *);
315 char buf[256];
316 va_list ap;
317
318 va_start(ap, str);
319 vsnprintf(buf, sizeof(buf), str, ap);
320 va_end(ap);
321
322#ifdef CONFIG_DEBUG_LL
323 printascii(buf);
324#endif
325 printk("%s", buf);
326}
327
328static struct machine_desc * __init lookup_machine_type(unsigned int type)
329{
330 extern struct machine_desc __arch_info_begin[], __arch_info_end[];
331 struct machine_desc *p;
332
333 for (p = __arch_info_begin; p < __arch_info_end; p++)
334 if (type == p->nr)
335 return p;
336
337 early_print("\n"
338 "Error: unrecognized/unsupported machine ID (r1 = 0x%08x).\n\n"
339 "Available machine support:\n\nID (hex)\tNAME\n", type);
340
341 for (p = __arch_info_begin; p < __arch_info_end; p++)
342 early_print("%08x\t%s\n", p->nr, p->name);
343
344 early_print("\nPlease check your kernel config and/or bootloader.\n");
345
346 while (true)
347 /* can't use cpu_relax() here as it may require MMU setup */;
348}
312 349
313static void __init feat_v6_fixup(void) 350static void __init feat_v6_fixup(void)
314{ 351{
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
index 907d5a620bca..cb8398317644 100644
--- a/arch/arm/kernel/signal.c
+++ b/arch/arm/kernel/signal.c
@@ -20,7 +20,6 @@
20#include <asm/unistd.h> 20#include <asm/unistd.h>
21#include <asm/vfp.h> 21#include <asm/vfp.h>
22 22
23#include "ptrace.h"
24#include "signal.h" 23#include "signal.h"
25 24
26#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) 25#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
@@ -348,8 +347,6 @@ asmlinkage int sys_sigreturn(struct pt_regs *regs)
348 if (restore_sigframe(regs, frame)) 347 if (restore_sigframe(regs, frame))
349 goto badframe; 348 goto badframe;
350 349
351 single_step_trap(current);
352
353 return regs->ARM_r0; 350 return regs->ARM_r0;
354 351
355badframe: 352badframe:
@@ -383,8 +380,6 @@ asmlinkage int sys_rt_sigreturn(struct pt_regs *regs)
383 if (do_sigaltstack(&frame->sig.uc.uc_stack, NULL, regs->ARM_sp) == -EFAULT) 380 if (do_sigaltstack(&frame->sig.uc.uc_stack, NULL, regs->ARM_sp) == -EFAULT)
384 goto badframe; 381 goto badframe;
385 382
386 single_step_trap(current);
387
388 return regs->ARM_r0; 383 return regs->ARM_r0;
389 384
390badframe: 385badframe:
@@ -474,7 +469,9 @@ setup_return(struct pt_regs *regs, struct k_sigaction *ka,
474 unsigned long handler = (unsigned long)ka->sa.sa_handler; 469 unsigned long handler = (unsigned long)ka->sa.sa_handler;
475 unsigned long retcode; 470 unsigned long retcode;
476 int thumb = 0; 471 int thumb = 0;
477 unsigned long cpsr = regs->ARM_cpsr & ~PSR_f; 472 unsigned long cpsr = regs->ARM_cpsr & ~(PSR_f | PSR_E_BIT);
473
474 cpsr |= PSR_ENDSTATE;
478 475
479 /* 476 /*
480 * Maybe we need to deliver a 32-bit signal to a 26-bit task. 477 * Maybe we need to deliver a 32-bit signal to a 26-bit task.
@@ -704,8 +701,6 @@ static void do_signal(struct pt_regs *regs, int syscall)
704 if (try_to_freeze()) 701 if (try_to_freeze())
705 goto no_signal; 702 goto no_signal;
706 703
707 single_step_clear(current);
708
709 signr = get_signal_to_deliver(&info, &ka, regs, NULL); 704 signr = get_signal_to_deliver(&info, &ka, regs, NULL);
710 if (signr > 0) { 705 if (signr > 0) {
711 sigset_t *oldset; 706 sigset_t *oldset;
@@ -724,7 +719,6 @@ static void do_signal(struct pt_regs *regs, int syscall)
724 if (test_thread_flag(TIF_RESTORE_SIGMASK)) 719 if (test_thread_flag(TIF_RESTORE_SIGMASK))
725 clear_thread_flag(TIF_RESTORE_SIGMASK); 720 clear_thread_flag(TIF_RESTORE_SIGMASK);
726 } 721 }
727 single_step_set(current);
728 return; 722 return;
729 } 723 }
730 724
@@ -770,7 +764,6 @@ static void do_signal(struct pt_regs *regs, int syscall)
770 sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL); 764 sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
771 } 765 }
772 } 766 }
773 single_step_set(current);
774} 767}
775 768
776asmlinkage void 769asmlinkage void
diff --git a/arch/arm/kernel/sleep.S b/arch/arm/kernel/sleep.S
new file mode 100644
index 000000000000..bfad698a02e7
--- /dev/null
+++ b/arch/arm/kernel/sleep.S
@@ -0,0 +1,134 @@
1#include <linux/linkage.h>
2#include <linux/threads.h>
3#include <asm/asm-offsets.h>
4#include <asm/assembler.h>
5#include <asm/glue-cache.h>
6#include <asm/glue-proc.h>
7#include <asm/system.h>
8 .text
9
10/*
11 * Save CPU state for a suspend
12 * r1 = v:p offset
13 * r3 = virtual return function
14 * Note: sp is decremented to allocate space for CPU state on stack
15 * r0-r3,r9,r10,lr corrupted
16 */
17ENTRY(cpu_suspend)
18 mov r9, lr
19#ifdef MULTI_CPU
20 ldr r10, =processor
21 mov r2, sp @ current virtual SP
22 ldr r0, [r10, #CPU_SLEEP_SIZE] @ size of CPU sleep state
23 ldr ip, [r10, #CPU_DO_RESUME] @ virtual resume function
24 sub sp, sp, r0 @ allocate CPU state on stack
25 mov r0, sp @ save pointer
26 add ip, ip, r1 @ convert resume fn to phys
27 stmfd sp!, {r1, r2, r3, ip} @ save v:p, virt SP, retfn, phys resume fn
28 ldr r3, =sleep_save_sp
29 add r2, sp, r1 @ convert SP to phys
30#ifdef CONFIG_SMP
31 ALT_SMP(mrc p15, 0, lr, c0, c0, 5)
32 ALT_UP(mov lr, #0)
33 and lr, lr, #15
34 str r2, [r3, lr, lsl #2] @ save phys SP
35#else
36 str r2, [r3] @ save phys SP
37#endif
38 mov lr, pc
39 ldr pc, [r10, #CPU_DO_SUSPEND] @ save CPU state
40#else
41 mov r2, sp @ current virtual SP
42 ldr r0, =cpu_suspend_size
43 sub sp, sp, r0 @ allocate CPU state on stack
44 mov r0, sp @ save pointer
45 stmfd sp!, {r1, r2, r3} @ save v:p, virt SP, return fn
46 ldr r3, =sleep_save_sp
47 add r2, sp, r1 @ convert SP to phys
48#ifdef CONFIG_SMP
49 ALT_SMP(mrc p15, 0, lr, c0, c0, 5)
50 ALT_UP(mov lr, #0)
51 and lr, lr, #15
52 str r2, [r3, lr, lsl #2] @ save phys SP
53#else
54 str r2, [r3] @ save phys SP
55#endif
56 bl cpu_do_suspend
57#endif
58
59 @ flush data cache
60#ifdef MULTI_CACHE
61 ldr r10, =cpu_cache
62 mov lr, r9
63 ldr pc, [r10, #CACHE_FLUSH_KERN_ALL]
64#else
65 mov lr, r9
66 b __cpuc_flush_kern_all
67#endif
68ENDPROC(cpu_suspend)
69 .ltorg
70
71/*
72 * r0 = control register value
73 * r1 = v:p offset (preserved by cpu_do_resume)
74 * r2 = phys page table base
75 * r3 = L1 section flags
76 */
77ENTRY(cpu_resume_mmu)
78 adr r4, cpu_resume_turn_mmu_on
79 mov r4, r4, lsr #20
80 orr r3, r3, r4, lsl #20
81 ldr r5, [r2, r4, lsl #2] @ save old mapping
82 str r3, [r2, r4, lsl #2] @ setup 1:1 mapping for mmu code
83 sub r2, r2, r1
84 ldr r3, =cpu_resume_after_mmu
85 bic r1, r0, #CR_C @ ensure D-cache is disabled
86 b cpu_resume_turn_mmu_on
87ENDPROC(cpu_resume_mmu)
88 .ltorg
89 .align 5
90cpu_resume_turn_mmu_on:
91 mcr p15, 0, r1, c1, c0, 0 @ turn on MMU, I-cache, etc
92 mrc p15, 0, r1, c0, c0, 0 @ read id reg
93 mov r1, r1
94 mov r1, r1
95 mov pc, r3 @ jump to virtual address
96ENDPROC(cpu_resume_turn_mmu_on)
97cpu_resume_after_mmu:
98 str r5, [r2, r4, lsl #2] @ restore old mapping
99 mcr p15, 0, r0, c1, c0, 0 @ turn on D-cache
100 mov pc, lr
101ENDPROC(cpu_resume_after_mmu)
102
103/*
104 * Note: Yes, part of the following code is located into the .data section.
105 * This is to allow sleep_save_sp to be accessed with a relative load
106 * while we can't rely on any MMU translation. We could have put
107 * sleep_save_sp in the .text section as well, but some setups might
108 * insist on it to be truly read-only.
109 */
110 .data
111 .align
112ENTRY(cpu_resume)
113#ifdef CONFIG_SMP
114 adr r0, sleep_save_sp
115 ALT_SMP(mrc p15, 0, r1, c0, c0, 5)
116 ALT_UP(mov r1, #0)
117 and r1, r1, #15
118 ldr r0, [r0, r1, lsl #2] @ stack phys addr
119#else
120 ldr r0, sleep_save_sp @ stack phys addr
121#endif
122 msr cpsr_c, #PSR_I_BIT | PSR_F_BIT | SVC_MODE @ set SVC, irqs off
123#ifdef MULTI_CPU
124 ldmia r0!, {r1, sp, lr, pc} @ load v:p, stack, return fn, resume fn
125#else
126 ldmia r0!, {r1, sp, lr} @ load v:p, stack, return fn
127 b cpu_do_resume
128#endif
129ENDPROC(cpu_resume)
130
131sleep_save_sp:
132 .rept CONFIG_NR_CPUS
133 .long 0 @ preserve stack phys ptr here
134 .endr
diff --git a/arch/arm/kernel/smp_scu.c b/arch/arm/kernel/smp_scu.c
index 9ab4149bd983..a1e757c3439b 100644
--- a/arch/arm/kernel/smp_scu.c
+++ b/arch/arm/kernel/smp_scu.c
@@ -50,3 +50,26 @@ void __init scu_enable(void __iomem *scu_base)
50 */ 50 */
51 flush_cache_all(); 51 flush_cache_all();
52} 52}
53
54/*
55 * Set the executing CPUs power mode as defined. This will be in
56 * preparation for it executing a WFI instruction.
57 *
58 * This function must be called with preemption disabled, and as it
59 * has the side effect of disabling coherency, caches must have been
60 * flushed. Interrupts must also have been disabled.
61 */
62int scu_power_mode(void __iomem *scu_base, unsigned int mode)
63{
64 unsigned int val;
65 int cpu = smp_processor_id();
66
67 if (mode > 3 || mode == 1 || cpu > 3)
68 return -EINVAL;
69
70 val = __raw_readb(scu_base + SCU_CPU_STATUS + cpu) & ~0x03;
71 val |= mode;
72 __raw_writeb(val, scu_base + SCU_CPU_STATUS + cpu);
73
74 return 0;
75}
diff --git a/arch/arm/kernel/smp_twd.c b/arch/arm/kernel/smp_twd.c
index fd9156698ab9..60636f499cb3 100644
--- a/arch/arm/kernel/smp_twd.c
+++ b/arch/arm/kernel/smp_twd.c
@@ -36,6 +36,7 @@ static void twd_set_mode(enum clock_event_mode mode,
36 /* timer load already set up */ 36 /* timer load already set up */
37 ctrl = TWD_TIMER_CONTROL_ENABLE | TWD_TIMER_CONTROL_IT_ENABLE 37 ctrl = TWD_TIMER_CONTROL_ENABLE | TWD_TIMER_CONTROL_IT_ENABLE
38 | TWD_TIMER_CONTROL_PERIODIC; 38 | TWD_TIMER_CONTROL_PERIODIC;
39 __raw_writel(twd_timer_rate / HZ, twd_base + TWD_TIMER_LOAD);
39 break; 40 break;
40 case CLOCK_EVT_MODE_ONESHOT: 41 case CLOCK_EVT_MODE_ONESHOT:
41 /* period set, and timer enabled in 'next_event' hook */ 42 /* period set, and timer enabled in 'next_event' hook */
@@ -81,7 +82,7 @@ int twd_timer_ack(void)
81 82
82static void __cpuinit twd_calibrate_rate(void) 83static void __cpuinit twd_calibrate_rate(void)
83{ 84{
84 unsigned long load, count; 85 unsigned long count;
85 u64 waitjiffies; 86 u64 waitjiffies;
86 87
87 /* 88 /*
@@ -116,10 +117,6 @@ static void __cpuinit twd_calibrate_rate(void)
116 printk("%lu.%02luMHz.\n", twd_timer_rate / 1000000, 117 printk("%lu.%02luMHz.\n", twd_timer_rate / 1000000,
117 (twd_timer_rate / 1000000) % 100); 118 (twd_timer_rate / 1000000) % 100);
118 } 119 }
119
120 load = twd_timer_rate / HZ;
121
122 __raw_writel(load, twd_base + TWD_TIMER_LOAD);
123} 120}
124 121
125/* 122/*
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index ee57640ba2bb..21ac43f1c2d0 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -23,6 +23,7 @@
23#include <linux/kexec.h> 23#include <linux/kexec.h>
24#include <linux/delay.h> 24#include <linux/delay.h>
25#include <linux/init.h> 25#include <linux/init.h>
26#include <linux/sched.h>
26 27
27#include <asm/atomic.h> 28#include <asm/atomic.h>
28#include <asm/cacheflush.h> 29#include <asm/cacheflush.h>
@@ -32,7 +33,6 @@
32#include <asm/unwind.h> 33#include <asm/unwind.h>
33#include <asm/tls.h> 34#include <asm/tls.h>
34 35
35#include "ptrace.h"
36#include "signal.h" 36#include "signal.h"
37 37
38static const char *handler[]= { "prefetch abort", "data abort", "address exception", "interrupt" }; 38static const char *handler[]= { "prefetch abort", "data abort", "address exception", "interrupt" };
@@ -256,7 +256,7 @@ static int __die(const char *str, int err, struct thread_info *thread, struct pt
256 return ret; 256 return ret;
257} 257}
258 258
259DEFINE_SPINLOCK(die_lock); 259static DEFINE_SPINLOCK(die_lock);
260 260
261/* 261/*
262 * This function is protected against re-entrancy. 262 * This function is protected against re-entrancy.
diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
index 86b66f3f2031..61462790757f 100644
--- a/arch/arm/kernel/vmlinux.lds.S
+++ b/arch/arm/kernel/vmlinux.lds.S
@@ -21,6 +21,12 @@
21#define ARM_CPU_KEEP(x) 21#define ARM_CPU_KEEP(x)
22#endif 22#endif
23 23
24#if defined(CONFIG_SMP_ON_UP) && !defined(CONFIG_DEBUG_SPINLOCK)
25#define ARM_EXIT_KEEP(x) x
26#else
27#define ARM_EXIT_KEEP(x)
28#endif
29
24OUTPUT_ARCH(arm) 30OUTPUT_ARCH(arm)
25ENTRY(stext) 31ENTRY(stext)
26 32
@@ -43,6 +49,7 @@ SECTIONS
43 _sinittext = .; 49 _sinittext = .;
44 HEAD_TEXT 50 HEAD_TEXT
45 INIT_TEXT 51 INIT_TEXT
52 ARM_EXIT_KEEP(EXIT_TEXT)
46 _einittext = .; 53 _einittext = .;
47 ARM_CPU_DISCARD(PROC_INFO) 54 ARM_CPU_DISCARD(PROC_INFO)
48 __arch_info_begin = .; 55 __arch_info_begin = .;
@@ -67,6 +74,7 @@ SECTIONS
67#ifndef CONFIG_XIP_KERNEL 74#ifndef CONFIG_XIP_KERNEL
68 __init_begin = _stext; 75 __init_begin = _stext;
69 INIT_DATA 76 INIT_DATA
77 ARM_EXIT_KEEP(EXIT_DATA)
70#endif 78#endif
71 } 79 }
72 80
@@ -162,6 +170,7 @@ SECTIONS
162 . = ALIGN(PAGE_SIZE); 170 . = ALIGN(PAGE_SIZE);
163 __init_begin = .; 171 __init_begin = .;
164 INIT_DATA 172 INIT_DATA
173 ARM_EXIT_KEEP(EXIT_DATA)
165 . = ALIGN(PAGE_SIZE); 174 . = ALIGN(PAGE_SIZE);
166 __init_end = .; 175 __init_end = .;
167#endif 176#endif
@@ -247,6 +256,8 @@ SECTIONS
247 } 256 }
248#endif 257#endif
249 258
259 NOTES
260
250 BSS_SECTION(0, 0, 0) 261 BSS_SECTION(0, 0, 0)
251 _end = .; 262 _end = .;
252 263