aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/kernel/smp.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/kernel/smp.c')
-rw-r--r--arch/arm/kernel/smp.c87
1 files changed, 0 insertions, 87 deletions
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 92d10e503746..72024ea8a3a6 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -41,7 +41,6 @@
41#include <asm/sections.h> 41#include <asm/sections.h>
42#include <asm/tlbflush.h> 42#include <asm/tlbflush.h>
43#include <asm/ptrace.h> 43#include <asm/ptrace.h>
44#include <asm/localtimer.h>
45#include <asm/smp_plat.h> 44#include <asm/smp_plat.h>
46#include <asm/virt.h> 45#include <asm/virt.h>
47#include <asm/mach/arch.h> 46#include <asm/mach/arch.h>
@@ -156,8 +155,6 @@ int platform_can_cpu_hotplug(void)
156} 155}
157 156
158#ifdef CONFIG_HOTPLUG_CPU 157#ifdef CONFIG_HOTPLUG_CPU
159static void percpu_timer_stop(void);
160
161static int platform_cpu_kill(unsigned int cpu) 158static int platform_cpu_kill(unsigned int cpu)
162{ 159{
163 if (smp_ops.cpu_kill) 160 if (smp_ops.cpu_kill)
@@ -201,11 +198,6 @@ int __cpu_disable(void)
201 migrate_irqs(); 198 migrate_irqs();
202 199
203 /* 200 /*
204 * Stop the local timer for this CPU.
205 */
206 percpu_timer_stop();
207
208 /*
209 * Flush user cache and TLB mappings, and then remove this CPU 201 * Flush user cache and TLB mappings, and then remove this CPU
210 * from the vm mask set of all processes. 202 * from the vm mask set of all processes.
211 * 203 *
@@ -326,8 +318,6 @@ static void smp_store_cpu_info(unsigned int cpuid)
326 store_cpu_topology(cpuid); 318 store_cpu_topology(cpuid);
327} 319}
328 320
329static void percpu_timer_setup(void);
330
331/* 321/*
332 * This is the secondary CPU boot entry. We're using this CPUs 322 * This is the secondary CPU boot entry. We're using this CPUs
333 * idle thread stack, but a set of temporary page tables. 323 * idle thread stack, but a set of temporary page tables.
@@ -382,11 +372,6 @@ asmlinkage void secondary_start_kernel(void)
382 set_cpu_online(cpu, true); 372 set_cpu_online(cpu, true);
383 complete(&cpu_running); 373 complete(&cpu_running);
384 374
385 /*
386 * Setup the percpu timer for this CPU.
387 */
388 percpu_timer_setup();
389
390 local_irq_enable(); 375 local_irq_enable();
391 local_fiq_enable(); 376 local_fiq_enable();
392 377
@@ -424,12 +409,6 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
424 max_cpus = ncores; 409 max_cpus = ncores;
425 if (ncores > 1 && max_cpus) { 410 if (ncores > 1 && max_cpus) {
426 /* 411 /*
427 * Enable the local timer or broadcast device for the
428 * boot CPU, but only if we have more than one CPU.
429 */
430 percpu_timer_setup();
431
432 /*
433 * Initialise the present map, which describes the set of CPUs 412 * Initialise the present map, which describes the set of CPUs
434 * actually populated at the present time. A platform should 413 * actually populated at the present time. A platform should
435 * re-initialize the map in the platforms smp_prepare_cpus() 414 * re-initialize the map in the platforms smp_prepare_cpus()
@@ -505,11 +484,6 @@ u64 smp_irq_stat_cpu(unsigned int cpu)
505 return sum; 484 return sum;
506} 485}
507 486
508/*
509 * Timer (local or broadcast) support
510 */
511static DEFINE_PER_CPU(struct clock_event_device, percpu_clockevent);
512
513#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST 487#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
514void tick_broadcast(const struct cpumask *mask) 488void tick_broadcast(const struct cpumask *mask)
515{ 489{
@@ -517,67 +491,6 @@ void tick_broadcast(const struct cpumask *mask)
517} 491}
518#endif 492#endif
519 493
520static void broadcast_timer_set_mode(enum clock_event_mode mode,
521 struct clock_event_device *evt)
522{
523}
524
525static void broadcast_timer_setup(struct clock_event_device *evt)
526{
527 evt->name = "dummy_timer";
528 evt->features = CLOCK_EVT_FEAT_ONESHOT |
529 CLOCK_EVT_FEAT_PERIODIC |
530 CLOCK_EVT_FEAT_DUMMY;
531 evt->rating = 100;
532 evt->mult = 1;
533 evt->set_mode = broadcast_timer_set_mode;
534
535 clockevents_register_device(evt);
536}
537
538static struct local_timer_ops *lt_ops;
539
540#ifdef CONFIG_LOCAL_TIMERS
541int local_timer_register(struct local_timer_ops *ops)
542{
543 if (!is_smp() || !setup_max_cpus)
544 return -ENXIO;
545
546 if (lt_ops)
547 return -EBUSY;
548
549 lt_ops = ops;
550 return 0;
551}
552#endif
553
554static void percpu_timer_setup(void)
555{
556 unsigned int cpu = smp_processor_id();
557 struct clock_event_device *evt = &per_cpu(percpu_clockevent, cpu);
558
559 evt->cpumask = cpumask_of(cpu);
560
561 if (!lt_ops || lt_ops->setup(evt))
562 broadcast_timer_setup(evt);
563}
564
565#ifdef CONFIG_HOTPLUG_CPU
566/*
567 * The generic clock events code purposely does not stop the local timer
568 * on CPU_DEAD/CPU_DEAD_FROZEN hotplug events, so we have to do it
569 * manually here.
570 */
571static void percpu_timer_stop(void)
572{
573 unsigned int cpu = smp_processor_id();
574 struct clock_event_device *evt = &per_cpu(percpu_clockevent, cpu);
575
576 if (lt_ops)
577 lt_ops->stop(evt);
578}
579#endif
580
581static DEFINE_RAW_SPINLOCK(stop_lock); 494static DEFINE_RAW_SPINLOCK(stop_lock);
582 495
583/* 496/*