diff options
Diffstat (limited to 'arch/arm/kernel')
-rw-r--r-- | arch/arm/kernel/head.S | 38 | ||||
-rw-r--r-- | arch/arm/kernel/hw_breakpoint.c | 68 | ||||
-rw-r--r-- | arch/arm/kernel/kprobes-decode.c | 2 | ||||
-rw-r--r-- | arch/arm/kernel/module.c | 22 | ||||
-rw-r--r-- | arch/arm/kernel/perf_event.c | 2 | ||||
-rw-r--r-- | arch/arm/kernel/pmu.c | 22 | ||||
-rw-r--r-- | arch/arm/kernel/ptrace.c | 6 | ||||
-rw-r--r-- | arch/arm/kernel/setup.c | 4 | ||||
-rw-r--r-- | arch/arm/kernel/signal.c | 4 | ||||
-rw-r--r-- | arch/arm/kernel/vmlinux.lds.S | 11 |
10 files changed, 130 insertions, 49 deletions
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S index c0225da3fb21..f06ff9feb0db 100644 --- a/arch/arm/kernel/head.S +++ b/arch/arm/kernel/head.S | |||
@@ -391,6 +391,7 @@ ENDPROC(__turn_mmu_on) | |||
391 | 391 | ||
392 | 392 | ||
393 | #ifdef CONFIG_SMP_ON_UP | 393 | #ifdef CONFIG_SMP_ON_UP |
394 | __INIT | ||
394 | __fixup_smp: | 395 | __fixup_smp: |
395 | and r3, r9, #0x000f0000 @ architecture version | 396 | and r3, r9, #0x000f0000 @ architecture version |
396 | teq r3, #0x000f0000 @ CPU ID supported? | 397 | teq r3, #0x000f0000 @ CPU ID supported? |
@@ -415,18 +416,7 @@ __fixup_smp_on_up: | |||
415 | sub r3, r0, r3 | 416 | sub r3, r0, r3 |
416 | add r4, r4, r3 | 417 | add r4, r4, r3 |
417 | add r5, r5, r3 | 418 | add r5, r5, r3 |
418 | 2: cmp r4, r5 | 419 | b __do_fixup_smp_on_up |
419 | movhs pc, lr | ||
420 | ldmia r4!, {r0, r6} | ||
421 | ARM( str r6, [r0, r3] ) | ||
422 | THUMB( add r0, r0, r3 ) | ||
423 | #ifdef __ARMEB__ | ||
424 | THUMB( mov r6, r6, ror #16 ) @ Convert word order for big-endian. | ||
425 | #endif | ||
426 | THUMB( strh r6, [r0], #2 ) @ For Thumb-2, store as two halfwords | ||
427 | THUMB( mov r6, r6, lsr #16 ) @ to be robust against misaligned r3. | ||
428 | THUMB( strh r6, [r0] ) | ||
429 | b 2b | ||
430 | ENDPROC(__fixup_smp) | 420 | ENDPROC(__fixup_smp) |
431 | 421 | ||
432 | .align | 422 | .align |
@@ -440,7 +430,31 @@ smp_on_up: | |||
440 | ALT_SMP(.long 1) | 430 | ALT_SMP(.long 1) |
441 | ALT_UP(.long 0) | 431 | ALT_UP(.long 0) |
442 | .popsection | 432 | .popsection |
433 | #endif | ||
443 | 434 | ||
435 | .text | ||
436 | __do_fixup_smp_on_up: | ||
437 | cmp r4, r5 | ||
438 | movhs pc, lr | ||
439 | ldmia r4!, {r0, r6} | ||
440 | ARM( str r6, [r0, r3] ) | ||
441 | THUMB( add r0, r0, r3 ) | ||
442 | #ifdef __ARMEB__ | ||
443 | THUMB( mov r6, r6, ror #16 ) @ Convert word order for big-endian. | ||
444 | #endif | 444 | #endif |
445 | THUMB( strh r6, [r0], #2 ) @ For Thumb-2, store as two halfwords | ||
446 | THUMB( mov r6, r6, lsr #16 ) @ to be robust against misaligned r3. | ||
447 | THUMB( strh r6, [r0] ) | ||
448 | b __do_fixup_smp_on_up | ||
449 | ENDPROC(__do_fixup_smp_on_up) | ||
450 | |||
451 | ENTRY(fixup_smp) | ||
452 | stmfd sp!, {r4 - r6, lr} | ||
453 | mov r4, r0 | ||
454 | add r5, r0, r1 | ||
455 | mov r3, #0 | ||
456 | bl __do_fixup_smp_on_up | ||
457 | ldmfd sp!, {r4 - r6, pc} | ||
458 | ENDPROC(fixup_smp) | ||
445 | 459 | ||
446 | #include "head-common.S" | 460 | #include "head-common.S" |
diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c index c9f3f0467570..44b84fe6e1b0 100644 --- a/arch/arm/kernel/hw_breakpoint.c +++ b/arch/arm/kernel/hw_breakpoint.c | |||
@@ -137,11 +137,10 @@ static u8 get_debug_arch(void) | |||
137 | u32 didr; | 137 | u32 didr; |
138 | 138 | ||
139 | /* Do we implement the extended CPUID interface? */ | 139 | /* Do we implement the extended CPUID interface? */ |
140 | if (((read_cpuid_id() >> 16) & 0xf) != 0xf) { | 140 | if (WARN_ONCE((((read_cpuid_id() >> 16) & 0xf) != 0xf), |
141 | pr_warning("CPUID feature registers not supported. " | 141 | "CPUID feature registers not supported. " |
142 | "Assuming v6 debug is present.\n"); | 142 | "Assuming v6 debug is present.\n")) |
143 | return ARM_DEBUG_ARCH_V6; | 143 | return ARM_DEBUG_ARCH_V6; |
144 | } | ||
145 | 144 | ||
146 | ARM_DBG_READ(c0, 0, didr); | 145 | ARM_DBG_READ(c0, 0, didr); |
147 | return (didr >> 16) & 0xf; | 146 | return (didr >> 16) & 0xf; |
@@ -152,6 +151,12 @@ u8 arch_get_debug_arch(void) | |||
152 | return debug_arch; | 151 | return debug_arch; |
153 | } | 152 | } |
154 | 153 | ||
154 | static int debug_arch_supported(void) | ||
155 | { | ||
156 | u8 arch = get_debug_arch(); | ||
157 | return arch >= ARM_DEBUG_ARCH_V6 && arch <= ARM_DEBUG_ARCH_V7_ECP14; | ||
158 | } | ||
159 | |||
155 | /* Determine number of BRP register available. */ | 160 | /* Determine number of BRP register available. */ |
156 | static int get_num_brp_resources(void) | 161 | static int get_num_brp_resources(void) |
157 | { | 162 | { |
@@ -268,6 +273,9 @@ out: | |||
268 | 273 | ||
269 | int hw_breakpoint_slots(int type) | 274 | int hw_breakpoint_slots(int type) |
270 | { | 275 | { |
276 | if (!debug_arch_supported()) | ||
277 | return 0; | ||
278 | |||
271 | /* | 279 | /* |
272 | * We can be called early, so don't rely on | 280 | * We can be called early, so don't rely on |
273 | * our static variables being initialised. | 281 | * our static variables being initialised. |
@@ -828,20 +836,33 @@ static int hw_breakpoint_pending(unsigned long addr, unsigned int fsr, | |||
828 | /* | 836 | /* |
829 | * One-time initialisation. | 837 | * One-time initialisation. |
830 | */ | 838 | */ |
831 | static void reset_ctrl_regs(void *unused) | 839 | static void reset_ctrl_regs(void *info) |
832 | { | 840 | { |
833 | int i; | 841 | int i, cpu = smp_processor_id(); |
842 | u32 dbg_power; | ||
843 | cpumask_t *cpumask = info; | ||
834 | 844 | ||
835 | /* | 845 | /* |
836 | * v7 debug contains save and restore registers so that debug state | 846 | * v7 debug contains save and restore registers so that debug state |
837 | * can be maintained across low-power modes without leaving | 847 | * can be maintained across low-power modes without leaving the debug |
838 | * the debug logic powered up. It is IMPLEMENTATION DEFINED whether | 848 | * logic powered up. It is IMPLEMENTATION DEFINED whether we can access |
839 | * we can write to the debug registers out of reset, so we must | 849 | * the debug registers out of reset, so we must unlock the OS Lock |
840 | * unlock the OS Lock Access Register to avoid taking undefined | 850 | * Access Register to avoid taking undefined instruction exceptions |
841 | * instruction exceptions later on. | 851 | * later on. |
842 | */ | 852 | */ |
843 | if (debug_arch >= ARM_DEBUG_ARCH_V7_ECP14) { | 853 | if (debug_arch >= ARM_DEBUG_ARCH_V7_ECP14) { |
844 | /* | 854 | /* |
855 | * Ensure sticky power-down is clear (i.e. debug logic is | ||
856 | * powered up). | ||
857 | */ | ||
858 | asm volatile("mrc p14, 0, %0, c1, c5, 4" : "=r" (dbg_power)); | ||
859 | if ((dbg_power & 0x1) == 0) { | ||
860 | pr_warning("CPU %d debug is powered down!\n", cpu); | ||
861 | cpumask_or(cpumask, cpumask, cpumask_of(cpu)); | ||
862 | return; | ||
863 | } | ||
864 | |||
865 | /* | ||
845 | * Unconditionally clear the lock by writing a value | 866 | * Unconditionally clear the lock by writing a value |
846 | * other than 0xC5ACCE55 to the access register. | 867 | * other than 0xC5ACCE55 to the access register. |
847 | */ | 868 | */ |
@@ -879,10 +900,11 @@ static struct notifier_block __cpuinitdata dbg_reset_nb = { | |||
879 | static int __init arch_hw_breakpoint_init(void) | 900 | static int __init arch_hw_breakpoint_init(void) |
880 | { | 901 | { |
881 | u32 dscr; | 902 | u32 dscr; |
903 | cpumask_t cpumask = { CPU_BITS_NONE }; | ||
882 | 904 | ||
883 | debug_arch = get_debug_arch(); | 905 | debug_arch = get_debug_arch(); |
884 | 906 | ||
885 | if (debug_arch > ARM_DEBUG_ARCH_V7_ECP14) { | 907 | if (!debug_arch_supported()) { |
886 | pr_info("debug architecture 0x%x unsupported.\n", debug_arch); | 908 | pr_info("debug architecture 0x%x unsupported.\n", debug_arch); |
887 | return 0; | 909 | return 0; |
888 | } | 910 | } |
@@ -899,18 +921,24 @@ static int __init arch_hw_breakpoint_init(void) | |||
899 | pr_info("%d breakpoint(s) reserved for watchpoint " | 921 | pr_info("%d breakpoint(s) reserved for watchpoint " |
900 | "single-step.\n", core_num_reserved_brps); | 922 | "single-step.\n", core_num_reserved_brps); |
901 | 923 | ||
924 | /* | ||
925 | * Reset the breakpoint resources. We assume that a halting | ||
926 | * debugger will leave the world in a nice state for us. | ||
927 | */ | ||
928 | on_each_cpu(reset_ctrl_regs, &cpumask, 1); | ||
929 | if (!cpumask_empty(&cpumask)) { | ||
930 | core_num_brps = 0; | ||
931 | core_num_reserved_brps = 0; | ||
932 | core_num_wrps = 0; | ||
933 | return 0; | ||
934 | } | ||
935 | |||
902 | ARM_DBG_READ(c1, 0, dscr); | 936 | ARM_DBG_READ(c1, 0, dscr); |
903 | if (dscr & ARM_DSCR_HDBGEN) { | 937 | if (dscr & ARM_DSCR_HDBGEN) { |
938 | max_watchpoint_len = 4; | ||
904 | pr_warning("halting debug mode enabled. Assuming maximum " | 939 | pr_warning("halting debug mode enabled. Assuming maximum " |
905 | "watchpoint size of 4 bytes."); | 940 | "watchpoint size of %u bytes.", max_watchpoint_len); |
906 | } else { | 941 | } else { |
907 | /* | ||
908 | * Reset the breakpoint resources. We assume that a halting | ||
909 | * debugger will leave the world in a nice state for us. | ||
910 | */ | ||
911 | smp_call_function(reset_ctrl_regs, NULL, 1); | ||
912 | reset_ctrl_regs(NULL); | ||
913 | |||
914 | /* Work out the maximum supported watchpoint length. */ | 942 | /* Work out the maximum supported watchpoint length. */ |
915 | max_watchpoint_len = get_max_wp_len(); | 943 | max_watchpoint_len = get_max_wp_len(); |
916 | pr_info("maximum watchpoint size is %u bytes.\n", | 944 | pr_info("maximum watchpoint size is %u bytes.\n", |
diff --git a/arch/arm/kernel/kprobes-decode.c b/arch/arm/kernel/kprobes-decode.c index 2c1f0050c9c4..8f6ed43861f1 100644 --- a/arch/arm/kernel/kprobes-decode.c +++ b/arch/arm/kernel/kprobes-decode.c | |||
@@ -1437,7 +1437,7 @@ arm_kprobe_decode_insn(kprobe_opcode_t insn, struct arch_specific_insn *asi) | |||
1437 | 1437 | ||
1438 | return space_cccc_1100_010x(insn, asi); | 1438 | return space_cccc_1100_010x(insn, asi); |
1439 | 1439 | ||
1440 | } else if ((insn & 0x0e000000) == 0x0c400000) { | 1440 | } else if ((insn & 0x0e000000) == 0x0c000000) { |
1441 | 1441 | ||
1442 | return space_cccc_110x(insn, asi); | 1442 | return space_cccc_110x(insn, asi); |
1443 | 1443 | ||
diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c index 2cfe8161b478..6d4105e6872f 100644 --- a/arch/arm/kernel/module.c +++ b/arch/arm/kernel/module.c | |||
@@ -22,6 +22,7 @@ | |||
22 | 22 | ||
23 | #include <asm/pgtable.h> | 23 | #include <asm/pgtable.h> |
24 | #include <asm/sections.h> | 24 | #include <asm/sections.h> |
25 | #include <asm/smp_plat.h> | ||
25 | #include <asm/unwind.h> | 26 | #include <asm/unwind.h> |
26 | 27 | ||
27 | #ifdef CONFIG_XIP_KERNEL | 28 | #ifdef CONFIG_XIP_KERNEL |
@@ -268,12 +269,28 @@ struct mod_unwind_map { | |||
268 | const Elf_Shdr *txt_sec; | 269 | const Elf_Shdr *txt_sec; |
269 | }; | 270 | }; |
270 | 271 | ||
272 | static const Elf_Shdr *find_mod_section(const Elf32_Ehdr *hdr, | ||
273 | const Elf_Shdr *sechdrs, const char *name) | ||
274 | { | ||
275 | const Elf_Shdr *s, *se; | ||
276 | const char *secstrs = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset; | ||
277 | |||
278 | for (s = sechdrs, se = sechdrs + hdr->e_shnum; s < se; s++) | ||
279 | if (strcmp(name, secstrs + s->sh_name) == 0) | ||
280 | return s; | ||
281 | |||
282 | return NULL; | ||
283 | } | ||
284 | |||
285 | extern void fixup_smp(const void *, unsigned long); | ||
286 | |||
271 | int module_finalize(const Elf32_Ehdr *hdr, const Elf_Shdr *sechdrs, | 287 | int module_finalize(const Elf32_Ehdr *hdr, const Elf_Shdr *sechdrs, |
272 | struct module *mod) | 288 | struct module *mod) |
273 | { | 289 | { |
290 | const Elf_Shdr * __maybe_unused s = NULL; | ||
274 | #ifdef CONFIG_ARM_UNWIND | 291 | #ifdef CONFIG_ARM_UNWIND |
275 | const char *secstrs = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset; | 292 | const char *secstrs = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset; |
276 | const Elf_Shdr *s, *sechdrs_end = sechdrs + hdr->e_shnum; | 293 | const Elf_Shdr *sechdrs_end = sechdrs + hdr->e_shnum; |
277 | struct mod_unwind_map maps[ARM_SEC_MAX]; | 294 | struct mod_unwind_map maps[ARM_SEC_MAX]; |
278 | int i; | 295 | int i; |
279 | 296 | ||
@@ -315,6 +332,9 @@ int module_finalize(const Elf32_Ehdr *hdr, const Elf_Shdr *sechdrs, | |||
315 | maps[i].txt_sec->sh_addr, | 332 | maps[i].txt_sec->sh_addr, |
316 | maps[i].txt_sec->sh_size); | 333 | maps[i].txt_sec->sh_size); |
317 | #endif | 334 | #endif |
335 | s = find_mod_section(hdr, sechdrs, ".alt.smp.init"); | ||
336 | if (s && !is_smp()) | ||
337 | fixup_smp((void *)s->sh_addr, s->sh_size); | ||
318 | return 0; | 338 | return 0; |
319 | } | 339 | } |
320 | 340 | ||
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c index 5efa2647a2fb..d150ad1ccb5d 100644 --- a/arch/arm/kernel/perf_event.c +++ b/arch/arm/kernel/perf_event.c | |||
@@ -700,7 +700,7 @@ user_backtrace(struct frame_tail __user *tail, | |||
700 | * Frame pointers should strictly progress back up the stack | 700 | * Frame pointers should strictly progress back up the stack |
701 | * (towards higher addresses). | 701 | * (towards higher addresses). |
702 | */ | 702 | */ |
703 | if (tail >= buftail.fp) | 703 | if (tail + 1 >= buftail.fp) |
704 | return NULL; | 704 | return NULL; |
705 | 705 | ||
706 | return buftail.fp - 1; | 706 | return buftail.fp - 1; |
diff --git a/arch/arm/kernel/pmu.c b/arch/arm/kernel/pmu.c index b8af96ea62e6..2c79eec19262 100644 --- a/arch/arm/kernel/pmu.c +++ b/arch/arm/kernel/pmu.c | |||
@@ -97,28 +97,34 @@ set_irq_affinity(int irq, | |||
97 | irq, cpu); | 97 | irq, cpu); |
98 | return err; | 98 | return err; |
99 | #else | 99 | #else |
100 | return 0; | 100 | return -EINVAL; |
101 | #endif | 101 | #endif |
102 | } | 102 | } |
103 | 103 | ||
104 | static int | 104 | static int |
105 | init_cpu_pmu(void) | 105 | init_cpu_pmu(void) |
106 | { | 106 | { |
107 | int i, err = 0; | 107 | int i, irqs, err = 0; |
108 | struct platform_device *pdev = pmu_devices[ARM_PMU_DEVICE_CPU]; | 108 | struct platform_device *pdev = pmu_devices[ARM_PMU_DEVICE_CPU]; |
109 | 109 | ||
110 | if (!pdev) { | 110 | if (!pdev) |
111 | err = -ENODEV; | 111 | return -ENODEV; |
112 | goto out; | 112 | |
113 | } | 113 | irqs = pdev->num_resources; |
114 | |||
115 | /* | ||
116 | * If we have a single PMU interrupt that we can't shift, assume that | ||
117 | * we're running on a uniprocessor machine and continue. | ||
118 | */ | ||
119 | if (irqs == 1 && !irq_can_set_affinity(platform_get_irq(pdev, 0))) | ||
120 | return 0; | ||
114 | 121 | ||
115 | for (i = 0; i < pdev->num_resources; ++i) { | 122 | for (i = 0; i < irqs; ++i) { |
116 | err = set_irq_affinity(platform_get_irq(pdev, i), i); | 123 | err = set_irq_affinity(platform_get_irq(pdev, i), i); |
117 | if (err) | 124 | if (err) |
118 | break; | 125 | break; |
119 | } | 126 | } |
120 | 127 | ||
121 | out: | ||
122 | return err; | 128 | return err; |
123 | } | 129 | } |
124 | 130 | ||
diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c index 19c6816db61e..b13e70f63d71 100644 --- a/arch/arm/kernel/ptrace.c +++ b/arch/arm/kernel/ptrace.c | |||
@@ -996,10 +996,10 @@ static int ptrace_gethbpregs(struct task_struct *tsk, long num, | |||
996 | while (!(arch_ctrl.len & 0x1)) | 996 | while (!(arch_ctrl.len & 0x1)) |
997 | arch_ctrl.len >>= 1; | 997 | arch_ctrl.len >>= 1; |
998 | 998 | ||
999 | if (idx & 0x1) | 999 | if (num & 0x1) |
1000 | reg = encode_ctrl_reg(arch_ctrl); | ||
1001 | else | ||
1002 | reg = bp->attr.bp_addr; | 1000 | reg = bp->attr.bp_addr; |
1001 | else | ||
1002 | reg = encode_ctrl_reg(arch_ctrl); | ||
1003 | } | 1003 | } |
1004 | 1004 | ||
1005 | put: | 1005 | put: |
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index 420b8d6485d6..5ea4fb718b97 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c | |||
@@ -226,8 +226,8 @@ int cpu_architecture(void) | |||
226 | * Register 0 and check for VMSAv7 or PMSAv7 */ | 226 | * Register 0 and check for VMSAv7 or PMSAv7 */ |
227 | asm("mrc p15, 0, %0, c0, c1, 4" | 227 | asm("mrc p15, 0, %0, c0, c1, 4" |
228 | : "=r" (mmfr0)); | 228 | : "=r" (mmfr0)); |
229 | if ((mmfr0 & 0x0000000f) == 0x00000003 || | 229 | if ((mmfr0 & 0x0000000f) >= 0x00000003 || |
230 | (mmfr0 & 0x000000f0) == 0x00000030) | 230 | (mmfr0 & 0x000000f0) >= 0x00000030) |
231 | cpu_arch = CPU_ARCH_ARMv7; | 231 | cpu_arch = CPU_ARCH_ARMv7; |
232 | else if ((mmfr0 & 0x0000000f) == 0x00000002 || | 232 | else if ((mmfr0 & 0x0000000f) == 0x00000002 || |
233 | (mmfr0 & 0x000000f0) == 0x00000020) | 233 | (mmfr0 & 0x000000f0) == 0x00000020) |
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c index 907d5a620bca..abaf8445ce25 100644 --- a/arch/arm/kernel/signal.c +++ b/arch/arm/kernel/signal.c | |||
@@ -474,7 +474,9 @@ setup_return(struct pt_regs *regs, struct k_sigaction *ka, | |||
474 | unsigned long handler = (unsigned long)ka->sa.sa_handler; | 474 | unsigned long handler = (unsigned long)ka->sa.sa_handler; |
475 | unsigned long retcode; | 475 | unsigned long retcode; |
476 | int thumb = 0; | 476 | int thumb = 0; |
477 | unsigned long cpsr = regs->ARM_cpsr & ~PSR_f; | 477 | unsigned long cpsr = regs->ARM_cpsr & ~(PSR_f | PSR_E_BIT); |
478 | |||
479 | cpsr |= PSR_ENDSTATE; | ||
478 | 480 | ||
479 | /* | 481 | /* |
480 | * Maybe we need to deliver a 32-bit signal to a 26-bit task. | 482 | * Maybe we need to deliver a 32-bit signal to a 26-bit task. |
diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S index 86b66f3f2031..61462790757f 100644 --- a/arch/arm/kernel/vmlinux.lds.S +++ b/arch/arm/kernel/vmlinux.lds.S | |||
@@ -21,6 +21,12 @@ | |||
21 | #define ARM_CPU_KEEP(x) | 21 | #define ARM_CPU_KEEP(x) |
22 | #endif | 22 | #endif |
23 | 23 | ||
24 | #if defined(CONFIG_SMP_ON_UP) && !defined(CONFIG_DEBUG_SPINLOCK) | ||
25 | #define ARM_EXIT_KEEP(x) x | ||
26 | #else | ||
27 | #define ARM_EXIT_KEEP(x) | ||
28 | #endif | ||
29 | |||
24 | OUTPUT_ARCH(arm) | 30 | OUTPUT_ARCH(arm) |
25 | ENTRY(stext) | 31 | ENTRY(stext) |
26 | 32 | ||
@@ -43,6 +49,7 @@ SECTIONS | |||
43 | _sinittext = .; | 49 | _sinittext = .; |
44 | HEAD_TEXT | 50 | HEAD_TEXT |
45 | INIT_TEXT | 51 | INIT_TEXT |
52 | ARM_EXIT_KEEP(EXIT_TEXT) | ||
46 | _einittext = .; | 53 | _einittext = .; |
47 | ARM_CPU_DISCARD(PROC_INFO) | 54 | ARM_CPU_DISCARD(PROC_INFO) |
48 | __arch_info_begin = .; | 55 | __arch_info_begin = .; |
@@ -67,6 +74,7 @@ SECTIONS | |||
67 | #ifndef CONFIG_XIP_KERNEL | 74 | #ifndef CONFIG_XIP_KERNEL |
68 | __init_begin = _stext; | 75 | __init_begin = _stext; |
69 | INIT_DATA | 76 | INIT_DATA |
77 | ARM_EXIT_KEEP(EXIT_DATA) | ||
70 | #endif | 78 | #endif |
71 | } | 79 | } |
72 | 80 | ||
@@ -162,6 +170,7 @@ SECTIONS | |||
162 | . = ALIGN(PAGE_SIZE); | 170 | . = ALIGN(PAGE_SIZE); |
163 | __init_begin = .; | 171 | __init_begin = .; |
164 | INIT_DATA | 172 | INIT_DATA |
173 | ARM_EXIT_KEEP(EXIT_DATA) | ||
165 | . = ALIGN(PAGE_SIZE); | 174 | . = ALIGN(PAGE_SIZE); |
166 | __init_end = .; | 175 | __init_end = .; |
167 | #endif | 176 | #endif |
@@ -247,6 +256,8 @@ SECTIONS | |||
247 | } | 256 | } |
248 | #endif | 257 | #endif |
249 | 258 | ||
259 | NOTES | ||
260 | |||
250 | BSS_SECTION(0, 0, 0) | 261 | BSS_SECTION(0, 0, 0) |
251 | _end = .; | 262 | _end = .; |
252 | 263 | ||