aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/kernel/smp.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/kernel/smp.c')
-rw-r--r--arch/arm/kernel/smp.c69
1 files changed, 0 insertions, 69 deletions
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 4dc883a77adc..54aa994c4b20 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -41,7 +41,6 @@
41#include <asm/sections.h> 41#include <asm/sections.h>
42#include <asm/tlbflush.h> 42#include <asm/tlbflush.h>
43#include <asm/ptrace.h> 43#include <asm/ptrace.h>
44#include <asm/localtimer.h>
45#include <asm/smp_plat.h> 44#include <asm/smp_plat.h>
46#include <asm/virt.h> 45#include <asm/virt.h>
47#include <asm/mach/arch.h> 46#include <asm/mach/arch.h>
@@ -133,8 +132,6 @@ int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
133} 132}
134 133
135#ifdef CONFIG_HOTPLUG_CPU 134#ifdef CONFIG_HOTPLUG_CPU
136static void percpu_timer_stop(void);
137
138static int platform_cpu_kill(unsigned int cpu) 135static int platform_cpu_kill(unsigned int cpu)
139{ 136{
140 if (smp_ops.cpu_kill) 137 if (smp_ops.cpu_kill)
@@ -178,11 +175,6 @@ int __cpuinit __cpu_disable(void)
178 migrate_irqs(); 175 migrate_irqs();
179 176
180 /* 177 /*
181 * Stop the local timer for this CPU.
182 */
183 percpu_timer_stop();
184
185 /*
186 * Flush user cache and TLB mappings, and then remove this CPU 178 * Flush user cache and TLB mappings, and then remove this CPU
187 * from the vm mask set of all processes. 179 * from the vm mask set of all processes.
188 * 180 *
@@ -303,8 +295,6 @@ static void __cpuinit smp_store_cpu_info(unsigned int cpuid)
303 store_cpu_topology(cpuid); 295 store_cpu_topology(cpuid);
304} 296}
305 297
306static void percpu_timer_setup(void);
307
308/* 298/*
309 * This is the secondary CPU boot entry. We're using this CPUs 299 * This is the secondary CPU boot entry. We're using this CPUs
310 * idle thread stack, but a set of temporary page tables. 300 * idle thread stack, but a set of temporary page tables.
@@ -359,11 +349,6 @@ asmlinkage void __cpuinit secondary_start_kernel(void)
359 set_cpu_online(cpu, true); 349 set_cpu_online(cpu, true);
360 complete(&cpu_running); 350 complete(&cpu_running);
361 351
362 /*
363 * Setup the percpu timer for this CPU.
364 */
365 percpu_timer_setup();
366
367 local_irq_enable(); 352 local_irq_enable();
368 local_fiq_enable(); 353 local_fiq_enable();
369 354
@@ -410,12 +395,6 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
410 max_cpus = ncores; 395 max_cpus = ncores;
411 if (ncores > 1 && max_cpus) { 396 if (ncores > 1 && max_cpus) {
412 /* 397 /*
413 * Enable the local timer or broadcast device for the
414 * boot CPU, but only if we have more than one CPU.
415 */
416 percpu_timer_setup();
417
418 /*
419 * Initialise the present map, which describes the set of CPUs 398 * Initialise the present map, which describes the set of CPUs
420 * actually populated at the present time. A platform should 399 * actually populated at the present time. A platform should
421 * re-initialize the map in the platforms smp_prepare_cpus() 400 * re-initialize the map in the platforms smp_prepare_cpus()
@@ -491,11 +470,6 @@ u64 smp_irq_stat_cpu(unsigned int cpu)
491 return sum; 470 return sum;
492} 471}
493 472
494/*
495 * Timer (local or broadcast) support
496 */
497static DEFINE_PER_CPU(struct clock_event_device, percpu_clockevent);
498
499#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST 473#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
500void tick_broadcast(const struct cpumask *mask) 474void tick_broadcast(const struct cpumask *mask)
501{ 475{
@@ -503,49 +477,6 @@ void tick_broadcast(const struct cpumask *mask)
503} 477}
504#endif 478#endif
505 479
506static struct local_timer_ops *lt_ops;
507
508#ifdef CONFIG_LOCAL_TIMERS
509int local_timer_register(struct local_timer_ops *ops)
510{
511 if (!is_smp() || !setup_max_cpus)
512 return -ENXIO;
513
514 if (lt_ops)
515 return -EBUSY;
516
517 lt_ops = ops;
518 return 0;
519}
520#endif
521
522static void __cpuinit percpu_timer_setup(void)
523{
524 unsigned int cpu = smp_processor_id();
525 struct clock_event_device *evt = &per_cpu(percpu_clockevent, cpu);
526
527 evt->cpumask = cpumask_of(cpu);
528
529 if (lt_ops)
530 lt_ops->setup(evt);
531}
532
533#ifdef CONFIG_HOTPLUG_CPU
534/*
535 * The generic clock events code purposely does not stop the local timer
536 * on CPU_DEAD/CPU_DEAD_FROZEN hotplug events, so we have to do it
537 * manually here.
538 */
539static void percpu_timer_stop(void)
540{
541 unsigned int cpu = smp_processor_id();
542 struct clock_event_device *evt = &per_cpu(percpu_clockevent, cpu);
543
544 if (lt_ops)
545 lt_ops->stop(evt);
546}
547#endif
548
549static DEFINE_RAW_SPINLOCK(stop_lock); 480static DEFINE_RAW_SPINLOCK(stop_lock);
550 481
551/* 482/*