aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRussell King <rmk+kernel@arm.linux.org.uk>2011-12-06 15:27:54 -0500
committerRussell King <rmk+kernel@arm.linux.org.uk>2011-12-06 15:27:54 -0500
commit3ee0fc5ca129cbae81c073756febcb1c552af446 (patch)
tree08f061c1ec9e948df760a0746441bec9f290d774
parentdeee6d5359969a0ce4e2760cfd7b9f379bd5698a (diff)
parent4e8ee7de227e3ab9a72040b448ad728c5428a042 (diff)
Merge branch 'kexec/idmap' of git://git.kernel.org/pub/scm/linux/kernel/git/will/linux into devel-stable
-rw-r--r--arch/arm/include/asm/idmap.h14
-rw-r--r--arch/arm/include/asm/pgtable.h3
-rw-r--r--arch/arm/kernel/head.S18
-rw-r--r--arch/arm/kernel/sleep.S2
-rw-r--r--arch/arm/kernel/smp.c32
-rw-r--r--arch/arm/kernel/suspend.c18
-rw-r--r--arch/arm/kernel/vmlinux.lds.S7
-rw-r--r--arch/arm/mm/idmap.c63
-rw-r--r--arch/arm/mm/proc-arm1020.S3
-rw-r--r--arch/arm/mm/proc-arm1020e.S3
-rw-r--r--arch/arm/mm/proc-arm1022.S3
-rw-r--r--arch/arm/mm/proc-arm1026.S3
-rw-r--r--arch/arm/mm/proc-arm6_7.S4
-rw-r--r--arch/arm/mm/proc-arm720.S3
-rw-r--r--arch/arm/mm/proc-arm740.S3
-rw-r--r--arch/arm/mm/proc-arm7tdmi.S3
-rw-r--r--arch/arm/mm/proc-arm920.S3
-rw-r--r--arch/arm/mm/proc-arm922.S3
-rw-r--r--arch/arm/mm/proc-arm925.S3
-rw-r--r--arch/arm/mm/proc-arm926.S3
-rw-r--r--arch/arm/mm/proc-arm940.S3
-rw-r--r--arch/arm/mm/proc-arm946.S3
-rw-r--r--arch/arm/mm/proc-arm9tdmi.S3
-rw-r--r--arch/arm/mm/proc-fa526.S3
-rw-r--r--arch/arm/mm/proc-feroceon.S3
-rw-r--r--arch/arm/mm/proc-mohawk.S3
-rw-r--r--arch/arm/mm/proc-sa110.S3
-rw-r--r--arch/arm/mm/proc-sa1100.S3
-rw-r--r--arch/arm/mm/proc-v6.S3
-rw-r--r--arch/arm/mm/proc-v7.S2
-rw-r--r--arch/arm/mm/proc-xsc3.S3
-rw-r--r--arch/arm/mm/proc-xscale.S3
32 files changed, 140 insertions, 89 deletions
diff --git a/arch/arm/include/asm/idmap.h b/arch/arm/include/asm/idmap.h
new file mode 100644
index 000000000000..bf863edb517d
--- /dev/null
+++ b/arch/arm/include/asm/idmap.h
@@ -0,0 +1,14 @@
1#ifndef __ASM_IDMAP_H
2#define __ASM_IDMAP_H
3
4#include <linux/compiler.h>
5#include <asm/pgtable.h>
6
7/* Tag a function as requiring to be executed via an identity mapping. */
8#define __idmap __section(.idmap.text) noinline notrace
9
10extern pgd_t *idmap_pgd;
11
12void setup_mm_for_reboot(void);
13
14#endif /* __ASM_IDMAP_H */
diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
index bcae9b81a6d0..a784859cc7a9 100644
--- a/arch/arm/include/asm/pgtable.h
+++ b/arch/arm/include/asm/pgtable.h
@@ -347,9 +347,6 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
347 347
348#define pgtable_cache_init() do { } while (0) 348#define pgtable_cache_init() do { } while (0)
349 349
350void identity_mapping_add(pgd_t *, unsigned long, unsigned long);
351void identity_mapping_del(pgd_t *, unsigned long, unsigned long);
352
353#endif /* !__ASSEMBLY__ */ 350#endif /* !__ASSEMBLY__ */
354 351
355#endif /* CONFIG_MMU */ 352#endif /* CONFIG_MMU */
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index 08c82fd844a8..fb2945be5a51 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -170,11 +170,11 @@ __create_page_tables:
170 * Create identity mapping to cater for __enable_mmu. 170 * Create identity mapping to cater for __enable_mmu.
171 * This identity mapping will be removed by paging_init(). 171 * This identity mapping will be removed by paging_init().
172 */ 172 */
173 adr r0, __enable_mmu_loc 173 adr r0, __turn_mmu_on_loc
174 ldmia r0, {r3, r5, r6} 174 ldmia r0, {r3, r5, r6}
175 sub r0, r0, r3 @ virt->phys offset 175 sub r0, r0, r3 @ virt->phys offset
176 add r5, r5, r0 @ phys __enable_mmu 176 add r5, r5, r0 @ phys __turn_mmu_on
177 add r6, r6, r0 @ phys __enable_mmu_end 177 add r6, r6, r0 @ phys __turn_mmu_on_end
178 mov r5, r5, lsr #SECTION_SHIFT 178 mov r5, r5, lsr #SECTION_SHIFT
179 mov r6, r6, lsr #SECTION_SHIFT 179 mov r6, r6, lsr #SECTION_SHIFT
180 180
@@ -287,10 +287,10 @@ __create_page_tables:
287ENDPROC(__create_page_tables) 287ENDPROC(__create_page_tables)
288 .ltorg 288 .ltorg
289 .align 289 .align
290__enable_mmu_loc: 290__turn_mmu_on_loc:
291 .long . 291 .long .
292 .long __enable_mmu 292 .long __turn_mmu_on
293 .long __enable_mmu_end 293 .long __turn_mmu_on_end
294 294
295#if defined(CONFIG_SMP) 295#if defined(CONFIG_SMP)
296 __CPUINIT 296 __CPUINIT
@@ -398,15 +398,17 @@ ENDPROC(__enable_mmu)
398 * other registers depend on the function called upon completion 398 * other registers depend on the function called upon completion
399 */ 399 */
400 .align 5 400 .align 5
401__turn_mmu_on: 401 .pushsection .idmap.text, "ax"
402ENTRY(__turn_mmu_on)
402 mov r0, r0 403 mov r0, r0
403 mcr p15, 0, r0, c1, c0, 0 @ write control reg 404 mcr p15, 0, r0, c1, c0, 0 @ write control reg
404 mrc p15, 0, r3, c0, c0, 0 @ read id reg 405 mrc p15, 0, r3, c0, c0, 0 @ read id reg
405 mov r3, r3 406 mov r3, r3
406 mov r3, r13 407 mov r3, r13
407 mov pc, r3 408 mov pc, r3
408__enable_mmu_end: 409__turn_mmu_on_end:
409ENDPROC(__turn_mmu_on) 410ENDPROC(__turn_mmu_on)
411 .popsection
410 412
411 413
412#ifdef CONFIG_SMP_ON_UP 414#ifdef CONFIG_SMP_ON_UP
diff --git a/arch/arm/kernel/sleep.S b/arch/arm/kernel/sleep.S
index 020e99c845e7..9e64231c8cfe 100644
--- a/arch/arm/kernel/sleep.S
+++ b/arch/arm/kernel/sleep.S
@@ -54,6 +54,7 @@ ENDPROC(cpu_suspend_abort)
54 * r0 = control register value 54 * r0 = control register value
55 */ 55 */
56 .align 5 56 .align 5
57 .pushsection .idmap.text,"ax"
57ENTRY(cpu_resume_mmu) 58ENTRY(cpu_resume_mmu)
58 ldr r3, =cpu_resume_after_mmu 59 ldr r3, =cpu_resume_after_mmu
59 mcr p15, 0, r0, c1, c0, 0 @ turn on MMU, I-cache, etc 60 mcr p15, 0, r0, c1, c0, 0 @ turn on MMU, I-cache, etc
@@ -62,6 +63,7 @@ ENTRY(cpu_resume_mmu)
62 mov r0, r0 63 mov r0, r0
63 mov pc, r3 @ jump to virtual address 64 mov pc, r3 @ jump to virtual address
64ENDPROC(cpu_resume_mmu) 65ENDPROC(cpu_resume_mmu)
66 .popsection
65cpu_resume_after_mmu: 67cpu_resume_after_mmu:
66 bl cpu_init @ restore the und/abt/irq banked regs 68 bl cpu_init @ restore the und/abt/irq banked regs
67 mov r0, #0 @ return zero on success 69 mov r0, #0 @ return zero on success
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index ef5640b9e218..76ff28d87bf3 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -31,6 +31,7 @@
31#include <asm/cpu.h> 31#include <asm/cpu.h>
32#include <asm/cputype.h> 32#include <asm/cputype.h>
33#include <asm/exception.h> 33#include <asm/exception.h>
34#include <asm/idmap.h>
34#include <asm/topology.h> 35#include <asm/topology.h>
35#include <asm/mmu_context.h> 36#include <asm/mmu_context.h>
36#include <asm/pgtable.h> 37#include <asm/pgtable.h>
@@ -61,7 +62,6 @@ int __cpuinit __cpu_up(unsigned int cpu)
61{ 62{
62 struct cpuinfo_arm *ci = &per_cpu(cpu_data, cpu); 63 struct cpuinfo_arm *ci = &per_cpu(cpu_data, cpu);
63 struct task_struct *idle = ci->idle; 64 struct task_struct *idle = ci->idle;
64 pgd_t *pgd;
65 int ret; 65 int ret;
66 66
67 /* 67 /*
@@ -84,29 +84,11 @@ int __cpuinit __cpu_up(unsigned int cpu)
84 } 84 }
85 85
86 /* 86 /*
87 * Allocate initial page tables to allow the new CPU to
88 * enable the MMU safely. This essentially means a set
89 * of our "standard" page tables, with the addition of
90 * a 1:1 mapping for the physical address of the kernel.
91 */
92 pgd = pgd_alloc(&init_mm);
93 if (!pgd)
94 return -ENOMEM;
95
96 if (PHYS_OFFSET != PAGE_OFFSET) {
97#ifndef CONFIG_HOTPLUG_CPU
98 identity_mapping_add(pgd, __pa(__init_begin), __pa(__init_end));
99#endif
100 identity_mapping_add(pgd, __pa(_stext), __pa(_etext));
101 identity_mapping_add(pgd, __pa(_sdata), __pa(_edata));
102 }
103
104 /*
105 * We need to tell the secondary core where to find 87 * We need to tell the secondary core where to find
106 * its stack and the page tables. 88 * its stack and the page tables.
107 */ 89 */
108 secondary_data.stack = task_stack_page(idle) + THREAD_START_SP; 90 secondary_data.stack = task_stack_page(idle) + THREAD_START_SP;
109 secondary_data.pgdir = virt_to_phys(pgd); 91 secondary_data.pgdir = virt_to_phys(idmap_pgd);
110 secondary_data.swapper_pg_dir = virt_to_phys(swapper_pg_dir); 92 secondary_data.swapper_pg_dir = virt_to_phys(swapper_pg_dir);
111 __cpuc_flush_dcache_area(&secondary_data, sizeof(secondary_data)); 93 __cpuc_flush_dcache_area(&secondary_data, sizeof(secondary_data));
112 outer_clean_range(__pa(&secondary_data), __pa(&secondary_data + 1)); 94 outer_clean_range(__pa(&secondary_data), __pa(&secondary_data + 1));
@@ -142,16 +124,6 @@ int __cpuinit __cpu_up(unsigned int cpu)
142 secondary_data.stack = NULL; 124 secondary_data.stack = NULL;
143 secondary_data.pgdir = 0; 125 secondary_data.pgdir = 0;
144 126
145 if (PHYS_OFFSET != PAGE_OFFSET) {
146#ifndef CONFIG_HOTPLUG_CPU
147 identity_mapping_del(pgd, __pa(__init_begin), __pa(__init_end));
148#endif
149 identity_mapping_del(pgd, __pa(_stext), __pa(_etext));
150 identity_mapping_del(pgd, __pa(_sdata), __pa(_edata));
151 }
152
153 pgd_free(&init_mm, pgd);
154
155 return ret; 127 return ret;
156} 128}
157 129
diff --git a/arch/arm/kernel/suspend.c b/arch/arm/kernel/suspend.c
index 93a22d282c16..1794cc3b0f18 100644
--- a/arch/arm/kernel/suspend.c
+++ b/arch/arm/kernel/suspend.c
@@ -1,13 +1,12 @@
1#include <linux/init.h> 1#include <linux/init.h>
2 2
3#include <asm/idmap.h>
3#include <asm/pgalloc.h> 4#include <asm/pgalloc.h>
4#include <asm/pgtable.h> 5#include <asm/pgtable.h>
5#include <asm/memory.h> 6#include <asm/memory.h>
6#include <asm/suspend.h> 7#include <asm/suspend.h>
7#include <asm/tlbflush.h> 8#include <asm/tlbflush.h>
8 9
9static pgd_t *suspend_pgd;
10
11extern int __cpu_suspend(unsigned long, int (*)(unsigned long)); 10extern int __cpu_suspend(unsigned long, int (*)(unsigned long));
12extern void cpu_resume_mmu(void); 11extern void cpu_resume_mmu(void);
13 12
@@ -21,7 +20,7 @@ void __cpu_suspend_save(u32 *ptr, u32 ptrsz, u32 sp, u32 *save_ptr)
21 *save_ptr = virt_to_phys(ptr); 20 *save_ptr = virt_to_phys(ptr);
22 21
23 /* This must correspond to the LDM in cpu_resume() assembly */ 22 /* This must correspond to the LDM in cpu_resume() assembly */
24 *ptr++ = virt_to_phys(suspend_pgd); 23 *ptr++ = virt_to_phys(idmap_pgd);
25 *ptr++ = sp; 24 *ptr++ = sp;
26 *ptr++ = virt_to_phys(cpu_do_resume); 25 *ptr++ = virt_to_phys(cpu_do_resume);
27 26
@@ -42,7 +41,7 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
42 struct mm_struct *mm = current->active_mm; 41 struct mm_struct *mm = current->active_mm;
43 int ret; 42 int ret;
44 43
45 if (!suspend_pgd) 44 if (!idmap_pgd)
46 return -EINVAL; 45 return -EINVAL;
47 46
48 /* 47 /*
@@ -59,14 +58,3 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
59 58
60 return ret; 59 return ret;
61} 60}
62
63static int __init cpu_suspend_init(void)
64{
65 suspend_pgd = pgd_alloc(&init_mm);
66 if (suspend_pgd) {
67 unsigned long addr = virt_to_phys(cpu_resume_mmu);
68 identity_mapping_add(suspend_pgd, addr, addr + SECTION_SIZE);
69 }
70 return suspend_pgd ? 0 : -ENOMEM;
71}
72core_initcall(cpu_suspend_init);
diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
index 20b3041e0860..f76e75548670 100644
--- a/arch/arm/kernel/vmlinux.lds.S
+++ b/arch/arm/kernel/vmlinux.lds.S
@@ -13,6 +13,12 @@
13 *(.proc.info.init) \ 13 *(.proc.info.init) \
14 VMLINUX_SYMBOL(__proc_info_end) = .; 14 VMLINUX_SYMBOL(__proc_info_end) = .;
15 15
16#define IDMAP_TEXT \
17 ALIGN_FUNCTION(); \
18 VMLINUX_SYMBOL(__idmap_text_start) = .; \
19 *(.idmap.text) \
20 VMLINUX_SYMBOL(__idmap_text_end) = .;
21
16#ifdef CONFIG_HOTPLUG_CPU 22#ifdef CONFIG_HOTPLUG_CPU
17#define ARM_CPU_DISCARD(x) 23#define ARM_CPU_DISCARD(x)
18#define ARM_CPU_KEEP(x) x 24#define ARM_CPU_KEEP(x) x
@@ -92,6 +98,7 @@ SECTIONS
92 SCHED_TEXT 98 SCHED_TEXT
93 LOCK_TEXT 99 LOCK_TEXT
94 KPROBES_TEXT 100 KPROBES_TEXT
101 IDMAP_TEXT
95#ifdef CONFIG_MMU 102#ifdef CONFIG_MMU
96 *(.fixup) 103 *(.fixup)
97#endif 104#endif
diff --git a/arch/arm/mm/idmap.c b/arch/arm/mm/idmap.c
index 296ad2eaddb0..660f1bc68f99 100644
--- a/arch/arm/mm/idmap.c
+++ b/arch/arm/mm/idmap.c
@@ -1,8 +1,12 @@
1#include <linux/kernel.h> 1#include <linux/kernel.h>
2 2
3#include <asm/cputype.h> 3#include <asm/cputype.h>
4#include <asm/idmap.h>
4#include <asm/pgalloc.h> 5#include <asm/pgalloc.h>
5#include <asm/pgtable.h> 6#include <asm/pgtable.h>
7#include <asm/sections.h>
8
9pgd_t *idmap_pgd;
6 10
7static void idmap_add_pmd(pud_t *pud, unsigned long addr, unsigned long end, 11static void idmap_add_pmd(pud_t *pud, unsigned long addr, unsigned long end,
8 unsigned long prot) 12 unsigned long prot)
@@ -28,7 +32,7 @@ static void idmap_add_pud(pgd_t *pgd, unsigned long addr, unsigned long end,
28 } while (pud++, addr = next, addr != end); 32 } while (pud++, addr = next, addr != end);
29} 33}
30 34
31void identity_mapping_add(pgd_t *pgd, unsigned long addr, unsigned long end) 35static void identity_mapping_add(pgd_t *pgd, unsigned long addr, unsigned long end)
32{ 36{
33 unsigned long prot, next; 37 unsigned long prot, next;
34 38
@@ -43,48 +47,41 @@ void identity_mapping_add(pgd_t *pgd, unsigned long addr, unsigned long end)
43 } while (pgd++, addr = next, addr != end); 47 } while (pgd++, addr = next, addr != end);
44} 48}
45 49
46#ifdef CONFIG_SMP 50extern char __idmap_text_start[], __idmap_text_end[];
47static void idmap_del_pmd(pud_t *pud, unsigned long addr, unsigned long end)
48{
49 pmd_t *pmd = pmd_offset(pud, addr);
50 pmd_clear(pmd);
51}
52 51
53static void idmap_del_pud(pgd_t *pgd, unsigned long addr, unsigned long end) 52static int __init init_static_idmap(void)
54{ 53{
55 pud_t *pud = pud_offset(pgd, addr); 54 phys_addr_t idmap_start, idmap_end;
56 unsigned long next;
57 55
58 do { 56 idmap_pgd = pgd_alloc(&init_mm);
59 next = pud_addr_end(addr, end); 57 if (!idmap_pgd)
60 idmap_del_pmd(pud, addr, next); 58 return -ENOMEM;
61 } while (pud++, addr = next, addr != end);
62}
63 59
64void identity_mapping_del(pgd_t *pgd, unsigned long addr, unsigned long end) 60 /* Add an identity mapping for the physical address of the section. */
65{ 61 idmap_start = virt_to_phys((void *)__idmap_text_start);
66 unsigned long next; 62 idmap_end = virt_to_phys((void *)__idmap_text_end);
67 63
68 pgd += pgd_index(addr); 64 pr_info("Setting up static identity map for 0x%llx - 0x%llx\n",
69 do { 65 (long long)idmap_start, (long long)idmap_end);
70 next = pgd_addr_end(addr, end); 66 identity_mapping_add(idmap_pgd, idmap_start, idmap_end);
71 idmap_del_pud(pgd, addr, next); 67
72 } while (pgd++, addr = next, addr != end); 68 return 0;
73} 69}
74#endif 70early_initcall(init_static_idmap);
75 71
76/* 72/*
77 * In order to soft-boot, we need to insert a 1:1 mapping in place of 73 * In order to soft-boot, we need to switch to a 1:1 mapping for the
78 * the user-mode pages. This will then ensure that we have predictable 74 * cpu_reset functions. This will then ensure that we have predictable
79 * results when turning the mmu off 75 * results when turning off the mmu.
80 */ 76 */
81void setup_mm_for_reboot(void) 77void setup_mm_for_reboot(void)
82{ 78{
83 /* 79 /* Clean and invalidate L1. */
84 * We need to access to user-mode page tables here. For kernel threads 80 flush_cache_all();
85 * we don't have any user-mode mappings so we use the context that we 81
86 * "borrowed". 82 /* Switch to the identity mapping. */
87 */ 83 cpu_switch_mm(idmap_pgd, &init_mm);
88 identity_mapping_add(current->active_mm->pgd, 0, TASK_SIZE); 84
85 /* Flush the TLB. */
89 local_flush_tlb_all(); 86 local_flush_tlb_all();
90} 87}
diff --git a/arch/arm/mm/proc-arm1020.S b/arch/arm/mm/proc-arm1020.S
index 67469665d47a..234951345eb3 100644
--- a/arch/arm/mm/proc-arm1020.S
+++ b/arch/arm/mm/proc-arm1020.S
@@ -95,6 +95,7 @@ ENTRY(cpu_arm1020_proc_fin)
95 * loc: location to jump to for soft reset 95 * loc: location to jump to for soft reset
96 */ 96 */
97 .align 5 97 .align 5
98 .pushsection .idmap.text, "ax"
98ENTRY(cpu_arm1020_reset) 99ENTRY(cpu_arm1020_reset)
99 mov ip, #0 100 mov ip, #0
100 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches 101 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
@@ -107,6 +108,8 @@ ENTRY(cpu_arm1020_reset)
107 bic ip, ip, #0x1100 @ ...i...s........ 108 bic ip, ip, #0x1100 @ ...i...s........
108 mcr p15, 0, ip, c1, c0, 0 @ ctrl register 109 mcr p15, 0, ip, c1, c0, 0 @ ctrl register
109 mov pc, r0 110 mov pc, r0
111ENDPROC(cpu_arm1020_reset)
112 .popsection
110 113
111/* 114/*
112 * cpu_arm1020_do_idle() 115 * cpu_arm1020_do_idle()
diff --git a/arch/arm/mm/proc-arm1020e.S b/arch/arm/mm/proc-arm1020e.S
index 4251421c0ed5..c244b06caac9 100644
--- a/arch/arm/mm/proc-arm1020e.S
+++ b/arch/arm/mm/proc-arm1020e.S
@@ -95,6 +95,7 @@ ENTRY(cpu_arm1020e_proc_fin)
95 * loc: location to jump to for soft reset 95 * loc: location to jump to for soft reset
96 */ 96 */
97 .align 5 97 .align 5
98 .pushsection .idmap.text, "ax"
98ENTRY(cpu_arm1020e_reset) 99ENTRY(cpu_arm1020e_reset)
99 mov ip, #0 100 mov ip, #0
100 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches 101 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
@@ -107,6 +108,8 @@ ENTRY(cpu_arm1020e_reset)
107 bic ip, ip, #0x1100 @ ...i...s........ 108 bic ip, ip, #0x1100 @ ...i...s........
108 mcr p15, 0, ip, c1, c0, 0 @ ctrl register 109 mcr p15, 0, ip, c1, c0, 0 @ ctrl register
109 mov pc, r0 110 mov pc, r0
111ENDPROC(cpu_arm1020e_reset)
112 .popsection
110 113
111/* 114/*
112 * cpu_arm1020e_do_idle() 115 * cpu_arm1020e_do_idle()
diff --git a/arch/arm/mm/proc-arm1022.S b/arch/arm/mm/proc-arm1022.S
index d283cf3d06e3..38fe22efd18f 100644
--- a/arch/arm/mm/proc-arm1022.S
+++ b/arch/arm/mm/proc-arm1022.S
@@ -84,6 +84,7 @@ ENTRY(cpu_arm1022_proc_fin)
84 * loc: location to jump to for soft reset 84 * loc: location to jump to for soft reset
85 */ 85 */
86 .align 5 86 .align 5
87 .pushsection .idmap.text, "ax"
87ENTRY(cpu_arm1022_reset) 88ENTRY(cpu_arm1022_reset)
88 mov ip, #0 89 mov ip, #0
89 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches 90 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
@@ -96,6 +97,8 @@ ENTRY(cpu_arm1022_reset)
96 bic ip, ip, #0x1100 @ ...i...s........ 97 bic ip, ip, #0x1100 @ ...i...s........
97 mcr p15, 0, ip, c1, c0, 0 @ ctrl register 98 mcr p15, 0, ip, c1, c0, 0 @ ctrl register
98 mov pc, r0 99 mov pc, r0
100ENDPROC(cpu_arm1022_reset)
101 .popsection
99 102
100/* 103/*
101 * cpu_arm1022_do_idle() 104 * cpu_arm1022_do_idle()
diff --git a/arch/arm/mm/proc-arm1026.S b/arch/arm/mm/proc-arm1026.S
index 678a1ceafed2..3eb9c3c26c75 100644
--- a/arch/arm/mm/proc-arm1026.S
+++ b/arch/arm/mm/proc-arm1026.S
@@ -84,6 +84,7 @@ ENTRY(cpu_arm1026_proc_fin)
84 * loc: location to jump to for soft reset 84 * loc: location to jump to for soft reset
85 */ 85 */
86 .align 5 86 .align 5
87 .pushsection .idmap.text, "ax"
87ENTRY(cpu_arm1026_reset) 88ENTRY(cpu_arm1026_reset)
88 mov ip, #0 89 mov ip, #0
89 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches 90 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
@@ -96,6 +97,8 @@ ENTRY(cpu_arm1026_reset)
96 bic ip, ip, #0x1100 @ ...i...s........ 97 bic ip, ip, #0x1100 @ ...i...s........
97 mcr p15, 0, ip, c1, c0, 0 @ ctrl register 98 mcr p15, 0, ip, c1, c0, 0 @ ctrl register
98 mov pc, r0 99 mov pc, r0
100ENDPROC(cpu_arm1026_reset)
101 .popsection
99 102
100/* 103/*
101 * cpu_arm1026_do_idle() 104 * cpu_arm1026_do_idle()
diff --git a/arch/arm/mm/proc-arm6_7.S b/arch/arm/mm/proc-arm6_7.S
index e5b974cddac3..4fbeb5b8e6c2 100644
--- a/arch/arm/mm/proc-arm6_7.S
+++ b/arch/arm/mm/proc-arm6_7.S
@@ -225,6 +225,7 @@ ENTRY(cpu_arm7_set_pte_ext)
225 * Params : r0 = address to jump to 225 * Params : r0 = address to jump to
226 * Notes : This sets up everything for a reset 226 * Notes : This sets up everything for a reset
227 */ 227 */
228 .pushsection .idmap.text, "ax"
228ENTRY(cpu_arm6_reset) 229ENTRY(cpu_arm6_reset)
229ENTRY(cpu_arm7_reset) 230ENTRY(cpu_arm7_reset)
230 mov r1, #0 231 mov r1, #0
@@ -235,6 +236,9 @@ ENTRY(cpu_arm7_reset)
235 mov r1, #0x30 236 mov r1, #0x30
236 mcr p15, 0, r1, c1, c0, 0 @ turn off MMU etc 237 mcr p15, 0, r1, c1, c0, 0 @ turn off MMU etc
237 mov pc, r0 238 mov pc, r0
239ENDPROC(cpu_arm6_reset)
240ENDPROC(cpu_arm7_reset)
241 .popsection
238 242
239 __CPUINIT 243 __CPUINIT
240 244
diff --git a/arch/arm/mm/proc-arm720.S b/arch/arm/mm/proc-arm720.S
index 55f4e290665a..0ac908c7ade1 100644
--- a/arch/arm/mm/proc-arm720.S
+++ b/arch/arm/mm/proc-arm720.S
@@ -101,6 +101,7 @@ ENTRY(cpu_arm720_set_pte_ext)
101 * Params : r0 = address to jump to 101 * Params : r0 = address to jump to
102 * Notes : This sets up everything for a reset 102 * Notes : This sets up everything for a reset
103 */ 103 */
104 .pushsection .idmap.text, "ax"
104ENTRY(cpu_arm720_reset) 105ENTRY(cpu_arm720_reset)
105 mov ip, #0 106 mov ip, #0
106 mcr p15, 0, ip, c7, c7, 0 @ invalidate cache 107 mcr p15, 0, ip, c7, c7, 0 @ invalidate cache
@@ -112,6 +113,8 @@ ENTRY(cpu_arm720_reset)
112 bic ip, ip, #0x2100 @ ..v....s........ 113 bic ip, ip, #0x2100 @ ..v....s........
113 mcr p15, 0, ip, c1, c0, 0 @ ctrl register 114 mcr p15, 0, ip, c1, c0, 0 @ ctrl register
114 mov pc, r0 115 mov pc, r0
116ENDPROC(cpu_arm720_reset)
117 .popsection
115 118
116 __CPUINIT 119 __CPUINIT
117 120
diff --git a/arch/arm/mm/proc-arm740.S b/arch/arm/mm/proc-arm740.S
index 4506be3adda6..dc5de5d53f20 100644
--- a/arch/arm/mm/proc-arm740.S
+++ b/arch/arm/mm/proc-arm740.S
@@ -49,6 +49,7 @@ ENTRY(cpu_arm740_proc_fin)
49 * Params : r0 = address to jump to 49 * Params : r0 = address to jump to
50 * Notes : This sets up everything for a reset 50 * Notes : This sets up everything for a reset
51 */ 51 */
52 .pushsection .idmap.text, "ax"
52ENTRY(cpu_arm740_reset) 53ENTRY(cpu_arm740_reset)
53 mov ip, #0 54 mov ip, #0
54 mcr p15, 0, ip, c7, c0, 0 @ invalidate cache 55 mcr p15, 0, ip, c7, c0, 0 @ invalidate cache
@@ -56,6 +57,8 @@ ENTRY(cpu_arm740_reset)
56 bic ip, ip, #0x0000000c @ ............wc.. 57 bic ip, ip, #0x0000000c @ ............wc..
57 mcr p15, 0, ip, c1, c0, 0 @ ctrl register 58 mcr p15, 0, ip, c1, c0, 0 @ ctrl register
58 mov pc, r0 59 mov pc, r0
60ENDPROC(cpu_arm740_reset)
61 .popsection
59 62
60 __CPUINIT 63 __CPUINIT
61 64
diff --git a/arch/arm/mm/proc-arm7tdmi.S b/arch/arm/mm/proc-arm7tdmi.S
index 7e0e1fe4ed4d..6ddea3e464bd 100644
--- a/arch/arm/mm/proc-arm7tdmi.S
+++ b/arch/arm/mm/proc-arm7tdmi.S
@@ -45,8 +45,11 @@ ENTRY(cpu_arm7tdmi_proc_fin)
45 * Params : loc(r0) address to jump to 45 * Params : loc(r0) address to jump to
46 * Purpose : Sets up everything for a reset and jump to the location for soft reset. 46 * Purpose : Sets up everything for a reset and jump to the location for soft reset.
47 */ 47 */
48 .pushsection .idmap.text, "ax"
48ENTRY(cpu_arm7tdmi_reset) 49ENTRY(cpu_arm7tdmi_reset)
49 mov pc, r0 50 mov pc, r0
51ENDPROC(cpu_arm7tdmi_reset)
52 .popsection
50 53
51 __CPUINIT 54 __CPUINIT
52 55
diff --git a/arch/arm/mm/proc-arm920.S b/arch/arm/mm/proc-arm920.S
index 88fb3d9e0640..cb941ae95f66 100644
--- a/arch/arm/mm/proc-arm920.S
+++ b/arch/arm/mm/proc-arm920.S
@@ -85,6 +85,7 @@ ENTRY(cpu_arm920_proc_fin)
85 * loc: location to jump to for soft reset 85 * loc: location to jump to for soft reset
86 */ 86 */
87 .align 5 87 .align 5
88 .pushsection .idmap.text, "ax"
88ENTRY(cpu_arm920_reset) 89ENTRY(cpu_arm920_reset)
89 mov ip, #0 90 mov ip, #0
90 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches 91 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
@@ -97,6 +98,8 @@ ENTRY(cpu_arm920_reset)
97 bic ip, ip, #0x1100 @ ...i...s........ 98 bic ip, ip, #0x1100 @ ...i...s........
98 mcr p15, 0, ip, c1, c0, 0 @ ctrl register 99 mcr p15, 0, ip, c1, c0, 0 @ ctrl register
99 mov pc, r0 100 mov pc, r0
101ENDPROC(cpu_arm920_reset)
102 .popsection
100 103
101/* 104/*
102 * cpu_arm920_do_idle() 105 * cpu_arm920_do_idle()
diff --git a/arch/arm/mm/proc-arm922.S b/arch/arm/mm/proc-arm922.S
index 490e18833857..4ec0e074dd55 100644
--- a/arch/arm/mm/proc-arm922.S
+++ b/arch/arm/mm/proc-arm922.S
@@ -87,6 +87,7 @@ ENTRY(cpu_arm922_proc_fin)
87 * loc: location to jump to for soft reset 87 * loc: location to jump to for soft reset
88 */ 88 */
89 .align 5 89 .align 5
90 .pushsection .idmap.text, "ax"
90ENTRY(cpu_arm922_reset) 91ENTRY(cpu_arm922_reset)
91 mov ip, #0 92 mov ip, #0
92 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches 93 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
@@ -99,6 +100,8 @@ ENTRY(cpu_arm922_reset)
99 bic ip, ip, #0x1100 @ ...i...s........ 100 bic ip, ip, #0x1100 @ ...i...s........
100 mcr p15, 0, ip, c1, c0, 0 @ ctrl register 101 mcr p15, 0, ip, c1, c0, 0 @ ctrl register
101 mov pc, r0 102 mov pc, r0
103ENDPROC(cpu_arm922_reset)
104 .popsection
102 105
103/* 106/*
104 * cpu_arm922_do_idle() 107 * cpu_arm922_do_idle()
diff --git a/arch/arm/mm/proc-arm925.S b/arch/arm/mm/proc-arm925.S
index 51d494be057e..9dccd9a365b3 100644
--- a/arch/arm/mm/proc-arm925.S
+++ b/arch/arm/mm/proc-arm925.S
@@ -108,6 +108,7 @@ ENTRY(cpu_arm925_proc_fin)
108 * loc: location to jump to for soft reset 108 * loc: location to jump to for soft reset
109 */ 109 */
110 .align 5 110 .align 5
111 .pushsection .idmap.text, "ax"
111ENTRY(cpu_arm925_reset) 112ENTRY(cpu_arm925_reset)
112 /* Send software reset to MPU and DSP */ 113 /* Send software reset to MPU and DSP */
113 mov ip, #0xff000000 114 mov ip, #0xff000000
@@ -115,6 +116,8 @@ ENTRY(cpu_arm925_reset)
115 orr ip, ip, #0x0000ce00 116 orr ip, ip, #0x0000ce00
116 mov r4, #1 117 mov r4, #1
117 strh r4, [ip, #0x10] 118 strh r4, [ip, #0x10]
119ENDPROC(cpu_arm925_reset)
120 .popsection
118 121
119 mov ip, #0 122 mov ip, #0
120 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches 123 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
diff --git a/arch/arm/mm/proc-arm926.S b/arch/arm/mm/proc-arm926.S
index 9f8fd91f918a..820259b81a1f 100644
--- a/arch/arm/mm/proc-arm926.S
+++ b/arch/arm/mm/proc-arm926.S
@@ -77,6 +77,7 @@ ENTRY(cpu_arm926_proc_fin)
77 * loc: location to jump to for soft reset 77 * loc: location to jump to for soft reset
78 */ 78 */
79 .align 5 79 .align 5
80 .pushsection .idmap.text, "ax"
80ENTRY(cpu_arm926_reset) 81ENTRY(cpu_arm926_reset)
81 mov ip, #0 82 mov ip, #0
82 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches 83 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
@@ -89,6 +90,8 @@ ENTRY(cpu_arm926_reset)
89 bic ip, ip, #0x1100 @ ...i...s........ 90 bic ip, ip, #0x1100 @ ...i...s........
90 mcr p15, 0, ip, c1, c0, 0 @ ctrl register 91 mcr p15, 0, ip, c1, c0, 0 @ ctrl register
91 mov pc, r0 92 mov pc, r0
93ENDPROC(cpu_arm926_reset)
94 .popsection
92 95
93/* 96/*
94 * cpu_arm926_do_idle() 97 * cpu_arm926_do_idle()
diff --git a/arch/arm/mm/proc-arm940.S b/arch/arm/mm/proc-arm940.S
index ac750d506153..9fdc0a170974 100644
--- a/arch/arm/mm/proc-arm940.S
+++ b/arch/arm/mm/proc-arm940.S
@@ -48,6 +48,7 @@ ENTRY(cpu_arm940_proc_fin)
48 * Params : r0 = address to jump to 48 * Params : r0 = address to jump to
49 * Notes : This sets up everything for a reset 49 * Notes : This sets up everything for a reset
50 */ 50 */
51 .pushsection .idmap.text, "ax"
51ENTRY(cpu_arm940_reset) 52ENTRY(cpu_arm940_reset)
52 mov ip, #0 53 mov ip, #0
53 mcr p15, 0, ip, c7, c5, 0 @ flush I cache 54 mcr p15, 0, ip, c7, c5, 0 @ flush I cache
@@ -58,6 +59,8 @@ ENTRY(cpu_arm940_reset)
58 bic ip, ip, #0x00001000 @ i-cache 59 bic ip, ip, #0x00001000 @ i-cache
59 mcr p15, 0, ip, c1, c0, 0 @ ctrl register 60 mcr p15, 0, ip, c1, c0, 0 @ ctrl register
60 mov pc, r0 61 mov pc, r0
62ENDPROC(cpu_arm940_reset)
63 .popsection
61 64
62/* 65/*
63 * cpu_arm940_do_idle() 66 * cpu_arm940_do_idle()
diff --git a/arch/arm/mm/proc-arm946.S b/arch/arm/mm/proc-arm946.S
index 683af3a182b7..f684cfedcca9 100644
--- a/arch/arm/mm/proc-arm946.S
+++ b/arch/arm/mm/proc-arm946.S
@@ -55,6 +55,7 @@ ENTRY(cpu_arm946_proc_fin)
55 * Params : r0 = address to jump to 55 * Params : r0 = address to jump to
56 * Notes : This sets up everything for a reset 56 * Notes : This sets up everything for a reset
57 */ 57 */
58 .pushsection .idmap.text, "ax"
58ENTRY(cpu_arm946_reset) 59ENTRY(cpu_arm946_reset)
59 mov ip, #0 60 mov ip, #0
60 mcr p15, 0, ip, c7, c5, 0 @ flush I cache 61 mcr p15, 0, ip, c7, c5, 0 @ flush I cache
@@ -65,6 +66,8 @@ ENTRY(cpu_arm946_reset)
65 bic ip, ip, #0x00001000 @ i-cache 66 bic ip, ip, #0x00001000 @ i-cache
66 mcr p15, 0, ip, c1, c0, 0 @ ctrl register 67 mcr p15, 0, ip, c1, c0, 0 @ ctrl register
67 mov pc, r0 68 mov pc, r0
69ENDPROC(cpu_arm946_reset)
70 .popsection
68 71
69/* 72/*
70 * cpu_arm946_do_idle() 73 * cpu_arm946_do_idle()
diff --git a/arch/arm/mm/proc-arm9tdmi.S b/arch/arm/mm/proc-arm9tdmi.S
index 2120f9e2af7f..8881391dfb9e 100644
--- a/arch/arm/mm/proc-arm9tdmi.S
+++ b/arch/arm/mm/proc-arm9tdmi.S
@@ -45,8 +45,11 @@ ENTRY(cpu_arm9tdmi_proc_fin)
45 * Params : loc(r0) address to jump to 45 * Params : loc(r0) address to jump to
46 * Purpose : Sets up everything for a reset and jump to the location for soft reset. 46 * Purpose : Sets up everything for a reset and jump to the location for soft reset.
47 */ 47 */
48 .pushsection .idmap.text, "ax"
48ENTRY(cpu_arm9tdmi_reset) 49ENTRY(cpu_arm9tdmi_reset)
49 mov pc, r0 50 mov pc, r0
51ENDPROC(cpu_arm9tdmi_reset)
52 .popsection
50 53
51 __CPUINIT 54 __CPUINIT
52 55
diff --git a/arch/arm/mm/proc-fa526.S b/arch/arm/mm/proc-fa526.S
index 4c7a5710472b..272558a133a3 100644
--- a/arch/arm/mm/proc-fa526.S
+++ b/arch/arm/mm/proc-fa526.S
@@ -57,6 +57,7 @@ ENTRY(cpu_fa526_proc_fin)
57 * loc: location to jump to for soft reset 57 * loc: location to jump to for soft reset
58 */ 58 */
59 .align 4 59 .align 4
60 .pushsection .idmap.text, "ax"
60ENTRY(cpu_fa526_reset) 61ENTRY(cpu_fa526_reset)
61/* TODO: Use CP8 if possible... */ 62/* TODO: Use CP8 if possible... */
62 mov ip, #0 63 mov ip, #0
@@ -73,6 +74,8 @@ ENTRY(cpu_fa526_reset)
73 nop 74 nop
74 nop 75 nop
75 mov pc, r0 76 mov pc, r0
77ENDPROC(cpu_fa526_reset)
78 .popsection
76 79
77/* 80/*
78 * cpu_fa526_do_idle() 81 * cpu_fa526_do_idle()
diff --git a/arch/arm/mm/proc-feroceon.S b/arch/arm/mm/proc-feroceon.S
index 8a6c2f78c1c3..ba3c500584ac 100644
--- a/arch/arm/mm/proc-feroceon.S
+++ b/arch/arm/mm/proc-feroceon.S
@@ -98,6 +98,7 @@ ENTRY(cpu_feroceon_proc_fin)
98 * loc: location to jump to for soft reset 98 * loc: location to jump to for soft reset
99 */ 99 */
100 .align 5 100 .align 5
101 .pushsection .idmap.text, "ax"
101ENTRY(cpu_feroceon_reset) 102ENTRY(cpu_feroceon_reset)
102 mov ip, #0 103 mov ip, #0
103 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches 104 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
@@ -110,6 +111,8 @@ ENTRY(cpu_feroceon_reset)
110 bic ip, ip, #0x1100 @ ...i...s........ 111 bic ip, ip, #0x1100 @ ...i...s........
111 mcr p15, 0, ip, c1, c0, 0 @ ctrl register 112 mcr p15, 0, ip, c1, c0, 0 @ ctrl register
112 mov pc, r0 113 mov pc, r0
114ENDPROC(cpu_feroceon_reset)
115 .popsection
113 116
114/* 117/*
115 * cpu_feroceon_do_idle() 118 * cpu_feroceon_do_idle()
diff --git a/arch/arm/mm/proc-mohawk.S b/arch/arm/mm/proc-mohawk.S
index db52b0fb14a0..cdfedc5b8ad8 100644
--- a/arch/arm/mm/proc-mohawk.S
+++ b/arch/arm/mm/proc-mohawk.S
@@ -69,6 +69,7 @@ ENTRY(cpu_mohawk_proc_fin)
69 * (same as arm926) 69 * (same as arm926)
70 */ 70 */
71 .align 5 71 .align 5
72 .pushsection .idmap.text, "ax"
72ENTRY(cpu_mohawk_reset) 73ENTRY(cpu_mohawk_reset)
73 mov ip, #0 74 mov ip, #0
74 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches 75 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
@@ -79,6 +80,8 @@ ENTRY(cpu_mohawk_reset)
79 bic ip, ip, #0x1100 @ ...i...s........ 80 bic ip, ip, #0x1100 @ ...i...s........
80 mcr p15, 0, ip, c1, c0, 0 @ ctrl register 81 mcr p15, 0, ip, c1, c0, 0 @ ctrl register
81 mov pc, r0 82 mov pc, r0
83ENDPROC(cpu_mohawk_reset)
84 .popsection
82 85
83/* 86/*
84 * cpu_mohawk_do_idle() 87 * cpu_mohawk_do_idle()
diff --git a/arch/arm/mm/proc-sa110.S b/arch/arm/mm/proc-sa110.S
index d50ada26edd6..775d70fba937 100644
--- a/arch/arm/mm/proc-sa110.S
+++ b/arch/arm/mm/proc-sa110.S
@@ -62,6 +62,7 @@ ENTRY(cpu_sa110_proc_fin)
62 * loc: location to jump to for soft reset 62 * loc: location to jump to for soft reset
63 */ 63 */
64 .align 5 64 .align 5
65 .pushsection .idmap.text, "ax"
65ENTRY(cpu_sa110_reset) 66ENTRY(cpu_sa110_reset)
66 mov ip, #0 67 mov ip, #0
67 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches 68 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
@@ -74,6 +75,8 @@ ENTRY(cpu_sa110_reset)
74 bic ip, ip, #0x1100 @ ...i...s........ 75 bic ip, ip, #0x1100 @ ...i...s........
75 mcr p15, 0, ip, c1, c0, 0 @ ctrl register 76 mcr p15, 0, ip, c1, c0, 0 @ ctrl register
76 mov pc, r0 77 mov pc, r0
78ENDPROC(cpu_sa110_reset)
79 .popsection
77 80
78/* 81/*
79 * cpu_sa110_do_idle(type) 82 * cpu_sa110_do_idle(type)
diff --git a/arch/arm/mm/proc-sa1100.S b/arch/arm/mm/proc-sa1100.S
index 7d91545d089b..3aa0da11fd84 100644
--- a/arch/arm/mm/proc-sa1100.S
+++ b/arch/arm/mm/proc-sa1100.S
@@ -70,6 +70,7 @@ ENTRY(cpu_sa1100_proc_fin)
70 * loc: location to jump to for soft reset 70 * loc: location to jump to for soft reset
71 */ 71 */
72 .align 5 72 .align 5
73 .pushsection .idmap.text, "ax"
73ENTRY(cpu_sa1100_reset) 74ENTRY(cpu_sa1100_reset)
74 mov ip, #0 75 mov ip, #0
75 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches 76 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
@@ -82,6 +83,8 @@ ENTRY(cpu_sa1100_reset)
82 bic ip, ip, #0x1100 @ ...i...s........ 83 bic ip, ip, #0x1100 @ ...i...s........
83 mcr p15, 0, ip, c1, c0, 0 @ ctrl register 84 mcr p15, 0, ip, c1, c0, 0 @ ctrl register
84 mov pc, r0 85 mov pc, r0
86ENDPROC(cpu_sa1100_reset)
87 .popsection
85 88
86/* 89/*
87 * cpu_sa1100_do_idle(type) 90 * cpu_sa1100_do_idle(type)
diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S
index d061d2fa5506..5900cd520e84 100644
--- a/arch/arm/mm/proc-v6.S
+++ b/arch/arm/mm/proc-v6.S
@@ -55,6 +55,7 @@ ENTRY(cpu_v6_proc_fin)
55 * - loc - location to jump to for soft reset 55 * - loc - location to jump to for soft reset
56 */ 56 */
57 .align 5 57 .align 5
58 .pushsection .idmap.text, "ax"
58ENTRY(cpu_v6_reset) 59ENTRY(cpu_v6_reset)
59 mrc p15, 0, r1, c1, c0, 0 @ ctrl register 60 mrc p15, 0, r1, c1, c0, 0 @ ctrl register
60 bic r1, r1, #0x1 @ ...............m 61 bic r1, r1, #0x1 @ ...............m
@@ -62,6 +63,8 @@ ENTRY(cpu_v6_reset)
62 mov r1, #0 63 mov r1, #0
63 mcr p15, 0, r1, c7, c5, 4 @ ISB 64 mcr p15, 0, r1, c7, c5, 4 @ ISB
64 mov pc, r0 65 mov pc, r0
66ENDPROC(cpu_v6_reset)
67 .popsection
65 68
66/* 69/*
67 * cpu_v6_do_idle() 70 * cpu_v6_do_idle()
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index 2c559ac38142..66a185f018a0 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -63,6 +63,7 @@ ENDPROC(cpu_v7_proc_fin)
63 * caches disabled. 63 * caches disabled.
64 */ 64 */
65 .align 5 65 .align 5
66 .pushsection .idmap.text, "ax"
66ENTRY(cpu_v7_reset) 67ENTRY(cpu_v7_reset)
67 mrc p15, 0, r1, c1, c0, 0 @ ctrl register 68 mrc p15, 0, r1, c1, c0, 0 @ ctrl register
68 bic r1, r1, #0x1 @ ...............m 69 bic r1, r1, #0x1 @ ...............m
@@ -71,6 +72,7 @@ ENTRY(cpu_v7_reset)
71 isb 72 isb
72 mov pc, r0 73 mov pc, r0
73ENDPROC(cpu_v7_reset) 74ENDPROC(cpu_v7_reset)
75 .popsection
74 76
75/* 77/*
76 * cpu_v7_do_idle() 78 * cpu_v7_do_idle()
diff --git a/arch/arm/mm/proc-xsc3.S b/arch/arm/mm/proc-xsc3.S
index abf0507a08ae..b0d57869da2d 100644
--- a/arch/arm/mm/proc-xsc3.S
+++ b/arch/arm/mm/proc-xsc3.S
@@ -105,6 +105,7 @@ ENTRY(cpu_xsc3_proc_fin)
105 * loc: location to jump to for soft reset 105 * loc: location to jump to for soft reset
106 */ 106 */
107 .align 5 107 .align 5
108 .pushsection .idmap.text, "ax"
108ENTRY(cpu_xsc3_reset) 109ENTRY(cpu_xsc3_reset)
109 mov r1, #PSR_F_BIT|PSR_I_BIT|SVC_MODE 110 mov r1, #PSR_F_BIT|PSR_I_BIT|SVC_MODE
110 msr cpsr_c, r1 @ reset CPSR 111 msr cpsr_c, r1 @ reset CPSR
@@ -119,6 +120,8 @@ ENTRY(cpu_xsc3_reset)
119 @ already containing those two last instructions to survive. 120 @ already containing those two last instructions to survive.
120 mcr p15, 0, ip, c8, c7, 0 @ invalidate I and D TLBs 121 mcr p15, 0, ip, c8, c7, 0 @ invalidate I and D TLBs
121 mov pc, r0 122 mov pc, r0
123ENDPROC(cpu_xsc3_reset)
124 .popsection
122 125
123/* 126/*
124 * cpu_xsc3_do_idle() 127 * cpu_xsc3_do_idle()
diff --git a/arch/arm/mm/proc-xscale.S b/arch/arm/mm/proc-xscale.S
index 3277904bebaf..4ffebaa595ee 100644
--- a/arch/arm/mm/proc-xscale.S
+++ b/arch/arm/mm/proc-xscale.S
@@ -142,6 +142,7 @@ ENTRY(cpu_xscale_proc_fin)
142 * Beware PXA270 erratum E7. 142 * Beware PXA270 erratum E7.
143 */ 143 */
144 .align 5 144 .align 5
145 .pushsection .idmap.text, "ax"
145ENTRY(cpu_xscale_reset) 146ENTRY(cpu_xscale_reset)
146 mov r1, #PSR_F_BIT|PSR_I_BIT|SVC_MODE 147 mov r1, #PSR_F_BIT|PSR_I_BIT|SVC_MODE
147 msr cpsr_c, r1 @ reset CPSR 148 msr cpsr_c, r1 @ reset CPSR
@@ -160,6 +161,8 @@ ENTRY(cpu_xscale_reset)
160 @ already containing those two last instructions to survive. 161 @ already containing those two last instructions to survive.
161 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs 162 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
162 mov pc, r0 163 mov pc, r0
164ENDPROC(cpu_xscale_reset)
165 .popsection
163 166
164/* 167/*
165 * cpu_xscale_do_idle() 168 * cpu_xscale_do_idle()