diff options
Diffstat (limited to 'arch/arm/kernel/smp.c')
-rw-r--r-- | arch/arm/kernel/smp.c | 107 |
1 files changed, 107 insertions, 0 deletions
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index ecc8c3332408..45ed036336e0 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c | |||
@@ -24,6 +24,9 @@ | |||
24 | #include <asm/atomic.h> | 24 | #include <asm/atomic.h> |
25 | #include <asm/cacheflush.h> | 25 | #include <asm/cacheflush.h> |
26 | #include <asm/cpu.h> | 26 | #include <asm/cpu.h> |
27 | #include <asm/mmu_context.h> | ||
28 | #include <asm/pgtable.h> | ||
29 | #include <asm/pgalloc.h> | ||
27 | #include <asm/processor.h> | 30 | #include <asm/processor.h> |
28 | #include <asm/tlbflush.h> | 31 | #include <asm/tlbflush.h> |
29 | #include <asm/ptrace.h> | 32 | #include <asm/ptrace.h> |
@@ -37,6 +40,13 @@ cpumask_t cpu_present_mask; | |||
37 | cpumask_t cpu_online_map; | 40 | cpumask_t cpu_online_map; |
38 | 41 | ||
39 | /* | 42 | /* |
43 | * as from 2.5, kernels no longer have an init_tasks structure | ||
44 | * so we need some other way of telling a new secondary core | ||
45 | * where to place its SVC stack | ||
46 | */ | ||
47 | struct secondary_data secondary_data; | ||
48 | |||
49 | /* | ||
40 | * structures for inter-processor calls | 50 | * structures for inter-processor calls |
41 | * - A collection of single bit ipi messages. | 51 | * - A collection of single bit ipi messages. |
42 | */ | 52 | */ |
@@ -71,6 +81,8 @@ static DEFINE_SPINLOCK(smp_call_function_lock); | |||
71 | int __init __cpu_up(unsigned int cpu) | 81 | int __init __cpu_up(unsigned int cpu) |
72 | { | 82 | { |
73 | struct task_struct *idle; | 83 | struct task_struct *idle; |
84 | pgd_t *pgd; | ||
85 | pmd_t *pmd; | ||
74 | int ret; | 86 | int ret; |
75 | 87 | ||
76 | /* | 88 | /* |
@@ -84,9 +96,54 @@ int __init __cpu_up(unsigned int cpu) | |||
84 | } | 96 | } |
85 | 97 | ||
86 | /* | 98 | /* |
99 | * Allocate initial page tables to allow the new CPU to | ||
100 | * enable the MMU safely. This essentially means a set | ||
101 | * of our "standard" page tables, with the addition of | ||
102 | * a 1:1 mapping for the physical address of the kernel. | ||
103 | */ | ||
104 | pgd = pgd_alloc(&init_mm); | ||
105 | pmd = pmd_offset(pgd, PHYS_OFFSET); | ||
106 | *pmd = __pmd((PHYS_OFFSET & PGDIR_MASK) | | ||
107 | PMD_TYPE_SECT | PMD_SECT_AP_WRITE); | ||
108 | |||
109 | /* | ||
110 | * We need to tell the secondary core where to find | ||
111 | * its stack and the page tables. | ||
112 | */ | ||
113 | secondary_data.stack = (void *)idle->thread_info + THREAD_SIZE - 8; | ||
114 | secondary_data.pgdir = virt_to_phys(pgd); | ||
115 | wmb(); | ||
116 | |||
117 | /* | ||
87 | * Now bring the CPU into our world. | 118 | * Now bring the CPU into our world. |
88 | */ | 119 | */ |
89 | ret = boot_secondary(cpu, idle); | 120 | ret = boot_secondary(cpu, idle); |
121 | if (ret == 0) { | ||
122 | unsigned long timeout; | ||
123 | |||
124 | /* | ||
125 | * CPU was successfully started, wait for it | ||
126 | * to come online or time out. | ||
127 | */ | ||
128 | timeout = jiffies + HZ; | ||
129 | while (time_before(jiffies, timeout)) { | ||
130 | if (cpu_online(cpu)) | ||
131 | break; | ||
132 | |||
133 | udelay(10); | ||
134 | barrier(); | ||
135 | } | ||
136 | |||
137 | if (!cpu_online(cpu)) | ||
138 | ret = -EIO; | ||
139 | } | ||
140 | |||
141 | secondary_data.stack = 0; | ||
142 | secondary_data.pgdir = 0; | ||
143 | |||
144 | *pmd_offset(pgd, PHYS_OFFSET) = __pmd(0); | ||
145 | pgd_free(pgd); | ||
146 | |||
90 | if (ret) { | 147 | if (ret) { |
91 | printk(KERN_CRIT "cpu_up: processor %d failed to boot\n", cpu); | 148 | printk(KERN_CRIT "cpu_up: processor %d failed to boot\n", cpu); |
92 | /* | 149 | /* |
@@ -98,6 +155,56 @@ int __init __cpu_up(unsigned int cpu) | |||
98 | } | 155 | } |
99 | 156 | ||
100 | /* | 157 | /* |
158 | * This is the secondary CPU boot entry. We're using this CPUs | ||
159 | * idle thread stack, but a set of temporary page tables. | ||
160 | */ | ||
161 | asmlinkage void __init secondary_start_kernel(void) | ||
162 | { | ||
163 | struct mm_struct *mm = &init_mm; | ||
164 | unsigned int cpu = smp_processor_id(); | ||
165 | |||
166 | printk("CPU%u: Booted secondary processor\n", cpu); | ||
167 | |||
168 | /* | ||
169 | * All kernel threads share the same mm context; grab a | ||
170 | * reference and switch to it. | ||
171 | */ | ||
172 | atomic_inc(&mm->mm_users); | ||
173 | atomic_inc(&mm->mm_count); | ||
174 | current->active_mm = mm; | ||
175 | cpu_set(cpu, mm->cpu_vm_mask); | ||
176 | cpu_switch_mm(mm->pgd, mm); | ||
177 | enter_lazy_tlb(mm, current); | ||
178 | |||
179 | cpu_init(); | ||
180 | |||
181 | /* | ||
182 | * Give the platform a chance to do its own initialisation. | ||
183 | */ | ||
184 | platform_secondary_init(cpu); | ||
185 | |||
186 | /* | ||
187 | * Enable local interrupts. | ||
188 | */ | ||
189 | local_irq_enable(); | ||
190 | local_fiq_enable(); | ||
191 | |||
192 | calibrate_delay(); | ||
193 | |||
194 | smp_store_cpu_info(cpu); | ||
195 | |||
196 | /* | ||
197 | * OK, now it's safe to let the boot CPU continue | ||
198 | */ | ||
199 | cpu_set(cpu, cpu_online_map); | ||
200 | |||
201 | /* | ||
202 | * OK, it's off to the idle thread for us | ||
203 | */ | ||
204 | cpu_idle(); | ||
205 | } | ||
206 | |||
207 | /* | ||
101 | * Called by both boot and secondaries to move global data into | 208 | * Called by both boot and secondaries to move global data into |
102 | * per-processor storage. | 209 | * per-processor storage. |
103 | */ | 210 | */ |