diff options
Diffstat (limited to 'arch/arm/kernel')
| -rw-r--r-- | arch/arm/kernel/irq.c | 31 | ||||
| -rw-r--r-- | arch/arm/kernel/process.c | 9 | ||||
| -rw-r--r-- | arch/arm/kernel/smp.c | 109 |
3 files changed, 140 insertions, 9 deletions
diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c index 3284118f356b..9def4404e1f2 100644 --- a/arch/arm/kernel/irq.c +++ b/arch/arm/kernel/irq.c | |||
| @@ -1050,3 +1050,34 @@ static int __init noirqdebug_setup(char *str) | |||
| 1050 | } | 1050 | } |
| 1051 | 1051 | ||
| 1052 | __setup("noirqdebug", noirqdebug_setup); | 1052 | __setup("noirqdebug", noirqdebug_setup); |
| 1053 | |||
| 1054 | #ifdef CONFIG_HOTPLUG_CPU | ||
| 1055 | /* | ||
| 1056 | * The CPU has been marked offline. Migrate IRQs off this CPU. If | ||
| 1057 | * the affinity settings do not allow other CPUs, force them onto any | ||
| 1058 | * available CPU. | ||
| 1059 | */ | ||
| 1060 | void migrate_irqs(void) | ||
| 1061 | { | ||
| 1062 | unsigned int i, cpu = smp_processor_id(); | ||
| 1063 | |||
| 1064 | for (i = 0; i < NR_IRQS; i++) { | ||
| 1065 | struct irqdesc *desc = irq_desc + i; | ||
| 1066 | |||
| 1067 | if (desc->cpu == cpu) { | ||
| 1068 | unsigned int newcpu = any_online_cpu(desc->affinity); | ||
| 1069 | |||
| 1070 | if (newcpu == NR_CPUS) { | ||
| 1071 | if (printk_ratelimit()) | ||
| 1072 | printk(KERN_INFO "IRQ%u no longer affine to CPU%u\n", | ||
| 1073 | i, cpu); | ||
| 1074 | |||
| 1075 | cpus_setall(desc->affinity); | ||
| 1076 | newcpu = any_online_cpu(desc->affinity); | ||
| 1077 | } | ||
| 1078 | |||
| 1079 | route_irq(desc, i, newcpu); | ||
| 1080 | } | ||
| 1081 | } | ||
| 1082 | } | ||
| 1083 | #endif /* CONFIG_HOTPLUG_CPU */ | ||
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index 409db6d5ec99..ba298277becd 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c | |||
| @@ -26,6 +26,7 @@ | |||
| 26 | #include <linux/interrupt.h> | 26 | #include <linux/interrupt.h> |
| 27 | #include <linux/kallsyms.h> | 27 | #include <linux/kallsyms.h> |
| 28 | #include <linux/init.h> | 28 | #include <linux/init.h> |
| 29 | #include <linux/cpu.h> | ||
| 29 | 30 | ||
| 30 | #include <asm/system.h> | 31 | #include <asm/system.h> |
| 31 | #include <asm/io.h> | 32 | #include <asm/io.h> |
| @@ -105,6 +106,14 @@ void cpu_idle(void) | |||
| 105 | /* endless idle loop with no priority at all */ | 106 | /* endless idle loop with no priority at all */ |
| 106 | while (1) { | 107 | while (1) { |
| 107 | void (*idle)(void) = pm_idle; | 108 | void (*idle)(void) = pm_idle; |
| 109 | |||
| 110 | #ifdef CONFIG_HOTPLUG_CPU | ||
| 111 | if (cpu_is_offline(smp_processor_id())) { | ||
| 112 | leds_event(led_idle_start); | ||
| 113 | cpu_die(); | ||
| 114 | } | ||
| 115 | #endif | ||
| 116 | |||
| 108 | if (!idle) | 117 | if (!idle) |
| 109 | idle = default_idle; | 118 | idle = default_idle; |
| 110 | preempt_disable(); | 119 | preempt_disable(); |
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index 826164945747..edb5a406922f 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c | |||
| @@ -80,19 +80,23 @@ static DEFINE_SPINLOCK(smp_call_function_lock); | |||
| 80 | 80 | ||
| 81 | int __cpuinit __cpu_up(unsigned int cpu) | 81 | int __cpuinit __cpu_up(unsigned int cpu) |
| 82 | { | 82 | { |
| 83 | struct task_struct *idle; | 83 | struct cpuinfo_arm *ci = &per_cpu(cpu_data, cpu); |
| 84 | struct task_struct *idle = ci->idle; | ||
| 84 | pgd_t *pgd; | 85 | pgd_t *pgd; |
| 85 | pmd_t *pmd; | 86 | pmd_t *pmd; |
| 86 | int ret; | 87 | int ret; |
| 87 | 88 | ||
| 88 | /* | 89 | /* |
| 89 | * Spawn a new process manually. Grab a pointer to | 90 | * Spawn a new process manually, if not already done. |
| 90 | * its task struct so we can mess with it | 91 | * Grab a pointer to its task struct so we can mess with it |
| 91 | */ | 92 | */ |
| 92 | idle = fork_idle(cpu); | 93 | if (!idle) { |
| 93 | if (IS_ERR(idle)) { | 94 | idle = fork_idle(cpu); |
| 94 | printk(KERN_ERR "CPU%u: fork() failed\n", cpu); | 95 | if (IS_ERR(idle)) { |
| 95 | return PTR_ERR(idle); | 96 | printk(KERN_ERR "CPU%u: fork() failed\n", cpu); |
| 97 | return PTR_ERR(idle); | ||
| 98 | } | ||
| 99 | ci->idle = idle; | ||
| 96 | } | 100 | } |
| 97 | 101 | ||
| 98 | /* | 102 | /* |
| @@ -155,6 +159,91 @@ int __cpuinit __cpu_up(unsigned int cpu) | |||
| 155 | return ret; | 159 | return ret; |
| 156 | } | 160 | } |
| 157 | 161 | ||
| 162 | #ifdef CONFIG_HOTPLUG_CPU | ||
| 163 | /* | ||
| 164 | * __cpu_disable runs on the processor to be shutdown. | ||
| 165 | */ | ||
| 166 | int __cpuexit __cpu_disable(void) | ||
| 167 | { | ||
| 168 | unsigned int cpu = smp_processor_id(); | ||
| 169 | struct task_struct *p; | ||
| 170 | int ret; | ||
| 171 | |||
| 172 | ret = mach_cpu_disable(cpu); | ||
| 173 | if (ret) | ||
| 174 | return ret; | ||
| 175 | |||
| 176 | /* | ||
| 177 | * Take this CPU offline. Once we clear this, we can't return, | ||
| 178 | * and we must not schedule until we're ready to give up the cpu. | ||
| 179 | */ | ||
| 180 | cpu_clear(cpu, cpu_online_map); | ||
| 181 | |||
| 182 | /* | ||
| 183 | * OK - migrate IRQs away from this CPU | ||
| 184 | */ | ||
| 185 | migrate_irqs(); | ||
| 186 | |||
| 187 | /* | ||
| 188 | * Flush user cache and TLB mappings, and then remove this CPU | ||
| 189 | * from the vm mask set of all processes. | ||
| 190 | */ | ||
| 191 | flush_cache_all(); | ||
| 192 | local_flush_tlb_all(); | ||
| 193 | |||
| 194 | read_lock(&tasklist_lock); | ||
| 195 | for_each_process(p) { | ||
| 196 | if (p->mm) | ||
| 197 | cpu_clear(cpu, p->mm->cpu_vm_mask); | ||
| 198 | } | ||
| 199 | read_unlock(&tasklist_lock); | ||
| 200 | |||
| 201 | return 0; | ||
| 202 | } | ||
| 203 | |||
| 204 | /* | ||
| 205 | * called on the thread which is asking for a CPU to be shutdown - | ||
| 206 | * waits until shutdown has completed, or it is timed out. | ||
| 207 | */ | ||
| 208 | void __cpuexit __cpu_die(unsigned int cpu) | ||
| 209 | { | ||
| 210 | if (!platform_cpu_kill(cpu)) | ||
| 211 | printk("CPU%u: unable to kill\n", cpu); | ||
| 212 | } | ||
| 213 | |||
| 214 | /* | ||
| 215 | * Called from the idle thread for the CPU which has been shutdown. | ||
| 216 | * | ||
| 217 | * Note that we disable IRQs here, but do not re-enable them | ||
| 218 | * before returning to the caller. This is also the behaviour | ||
| 219 | * of the other hotplug-cpu capable cores, so presumably coming | ||
| 220 | * out of idle fixes this. | ||
| 221 | */ | ||
| 222 | void __cpuexit cpu_die(void) | ||
| 223 | { | ||
| 224 | unsigned int cpu = smp_processor_id(); | ||
| 225 | |||
| 226 | local_irq_disable(); | ||
| 227 | idle_task_exit(); | ||
| 228 | |||
| 229 | /* | ||
| 230 | * actual CPU shutdown procedure is at least platform (if not | ||
| 231 | * CPU) specific | ||
| 232 | */ | ||
| 233 | platform_cpu_die(cpu); | ||
| 234 | |||
| 235 | /* | ||
| 236 | * Do not return to the idle loop - jump back to the secondary | ||
| 237 | * cpu initialisation. There's some initialisation which needs | ||
| 238 | * to be repeated to undo the effects of taking the CPU offline. | ||
| 239 | */ | ||
| 240 | __asm__("mov sp, %0\n" | ||
| 241 | " b secondary_start_kernel" | ||
| 242 | : | ||
| 243 | : "r" ((void *)current->thread_info + THREAD_SIZE - 8)); | ||
| 244 | } | ||
| 245 | #endif /* CONFIG_HOTPLUG_CPU */ | ||
| 246 | |||
| 158 | /* | 247 | /* |
| 159 | * This is the secondary CPU boot entry. We're using this CPUs | 248 | * This is the secondary CPU boot entry. We're using this CPUs |
| 160 | * idle thread stack, but a set of temporary page tables. | 249 | * idle thread stack, but a set of temporary page tables. |
| @@ -236,6 +325,8 @@ void __init smp_prepare_boot_cpu(void) | |||
| 236 | { | 325 | { |
| 237 | unsigned int cpu = smp_processor_id(); | 326 | unsigned int cpu = smp_processor_id(); |
| 238 | 327 | ||
| 328 | per_cpu(cpu_data, cpu).idle = current; | ||
| 329 | |||
| 239 | cpu_set(cpu, cpu_possible_map); | 330 | cpu_set(cpu, cpu_possible_map); |
| 240 | cpu_set(cpu, cpu_present_map); | 331 | cpu_set(cpu, cpu_present_map); |
| 241 | cpu_set(cpu, cpu_online_map); | 332 | cpu_set(cpu, cpu_online_map); |
| @@ -309,8 +400,8 @@ int smp_call_function_on_cpu(void (*func)(void *info), void *info, int retry, | |||
| 309 | printk(KERN_CRIT | 400 | printk(KERN_CRIT |
| 310 | "CPU%u: smp_call_function timeout for %p(%p)\n" | 401 | "CPU%u: smp_call_function timeout for %p(%p)\n" |
| 311 | " callmap %lx pending %lx, %swait\n", | 402 | " callmap %lx pending %lx, %swait\n", |
| 312 | smp_processor_id(), func, info, callmap, data.pending, | 403 | smp_processor_id(), func, info, *cpus_addr(callmap), |
| 313 | wait ? "" : "no "); | 404 | *cpus_addr(data.pending), wait ? "" : "no "); |
| 314 | 405 | ||
| 315 | /* | 406 | /* |
| 316 | * TRACE | 407 | * TRACE |
