aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorRichard Henderson <rth@twiddle.net>2013-07-14 13:57:34 -0400
committerMatt Turner <mattst88@gmail.com>2013-11-16 19:33:19 -0500
commita1659d6d128a7e0c2985bce7c957b66af1f71181 (patch)
tree9f53e3fc589023cde0a573a23855535225848146 /arch
parentdb2d3260617ae8c9076ef12e6de06bd5b3d82cd3 (diff)
alpha: Switch to GENERIC_CLOCKEVENTS
This allows us to get rid of some hacky code for SMP. Get rid of some cycle counter hackery that's now handled by generic code via clocksource + clock_event_device objects. Signed-off-by: Richard Henderson <rth@twiddle.net>
Diffstat (limited to 'arch')
-rw-r--r--arch/alpha/Kconfig1
-rw-r--r--arch/alpha/kernel/irq_alpha.c14
-rw-r--r--arch/alpha/kernel/proto.h2
-rw-r--r--arch/alpha/kernel/smp.c33
-rw-r--r--arch/alpha/kernel/time.c112
5 files changed, 53 insertions, 109 deletions
diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig
index 5d863d171b94..d39dc9b95a2c 100644
--- a/arch/alpha/Kconfig
+++ b/arch/alpha/Kconfig
@@ -16,6 +16,7 @@ config ALPHA
16 select ARCH_WANT_IPC_PARSE_VERSION 16 select ARCH_WANT_IPC_PARSE_VERSION
17 select ARCH_HAVE_NMI_SAFE_CMPXCHG 17 select ARCH_HAVE_NMI_SAFE_CMPXCHG
18 select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE 18 select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
19 select GENERIC_CLOCKEVENTS
19 select GENERIC_SMP_IDLE_THREAD 20 select GENERIC_SMP_IDLE_THREAD
20 select GENERIC_STRNCPY_FROM_USER 21 select GENERIC_STRNCPY_FROM_USER
21 select GENERIC_STRNLEN_USER 22 select GENERIC_STRNLEN_USER
diff --git a/arch/alpha/kernel/irq_alpha.c b/arch/alpha/kernel/irq_alpha.c
index 28e4429596f3..6990ddc0fbaf 100644
--- a/arch/alpha/kernel/irq_alpha.c
+++ b/arch/alpha/kernel/irq_alpha.c
@@ -66,21 +66,7 @@ do_entInt(unsigned long type, unsigned long vector,
66 break; 66 break;
67 case 1: 67 case 1:
68 old_regs = set_irq_regs(regs); 68 old_regs = set_irq_regs(regs);
69#ifdef CONFIG_SMP
70 {
71 long cpu;
72
73 smp_percpu_timer_interrupt(regs);
74 cpu = smp_processor_id();
75 if (cpu != boot_cpuid) {
76 kstat_incr_irqs_this_cpu(RTC_IRQ, irq_to_desc(RTC_IRQ));
77 } else {
78 handle_irq(RTC_IRQ);
79 }
80 }
81#else
82 handle_irq(RTC_IRQ); 69 handle_irq(RTC_IRQ);
83#endif
84 set_irq_regs(old_regs); 70 set_irq_regs(old_regs);
85 return; 71 return;
86 case 2: 72 case 2:
diff --git a/arch/alpha/kernel/proto.h b/arch/alpha/kernel/proto.h
index 3b250fa5f2c1..bc806893afe0 100644
--- a/arch/alpha/kernel/proto.h
+++ b/arch/alpha/kernel/proto.h
@@ -135,13 +135,13 @@ extern void unregister_srm_console(void);
135/* smp.c */ 135/* smp.c */
136extern void setup_smp(void); 136extern void setup_smp(void);
137extern void handle_ipi(struct pt_regs *); 137extern void handle_ipi(struct pt_regs *);
138extern void smp_percpu_timer_interrupt(struct pt_regs *);
139 138
140/* bios32.c */ 139/* bios32.c */
141/* extern void reset_for_srm(void); */ 140/* extern void reset_for_srm(void); */
142 141
143/* time.c */ 142/* time.c */
144extern irqreturn_t timer_interrupt(int irq, void *dev); 143extern irqreturn_t timer_interrupt(int irq, void *dev);
144extern void init_clockevent(void);
145extern void common_init_rtc(void); 145extern void common_init_rtc(void);
146extern unsigned long est_cycle_freq; 146extern unsigned long est_cycle_freq;
147 147
diff --git a/arch/alpha/kernel/smp.c b/arch/alpha/kernel/smp.c
index 9dbbcb3b9146..99ac36d5de4e 100644
--- a/arch/alpha/kernel/smp.c
+++ b/arch/alpha/kernel/smp.c
@@ -138,9 +138,11 @@ smp_callin(void)
138 138
139 /* Get our local ticker going. */ 139 /* Get our local ticker going. */
140 smp_setup_percpu_timer(cpuid); 140 smp_setup_percpu_timer(cpuid);
141 init_clockevent();
141 142
142 /* Call platform-specific callin, if specified */ 143 /* Call platform-specific callin, if specified */
143 if (alpha_mv.smp_callin) alpha_mv.smp_callin(); 144 if (alpha_mv.smp_callin)
145 alpha_mv.smp_callin();
144 146
145 /* All kernel threads share the same mm context. */ 147 /* All kernel threads share the same mm context. */
146 atomic_inc(&init_mm.mm_count); 148 atomic_inc(&init_mm.mm_count);
@@ -498,35 +500,6 @@ smp_cpus_done(unsigned int max_cpus)
498 ((bogosum + 2500) / (5000/HZ)) % 100); 500 ((bogosum + 2500) / (5000/HZ)) % 100);
499} 501}
500 502
501
502void
503smp_percpu_timer_interrupt(struct pt_regs *regs)
504{
505 struct pt_regs *old_regs;
506 int cpu = smp_processor_id();
507 unsigned long user = user_mode(regs);
508 struct cpuinfo_alpha *data = &cpu_data[cpu];
509
510 old_regs = set_irq_regs(regs);
511
512 /* Record kernel PC. */
513 profile_tick(CPU_PROFILING);
514
515 if (!--data->prof_counter) {
516 /* We need to make like a normal interrupt -- otherwise
517 timer interrupts ignore the global interrupt lock,
518 which would be a Bad Thing. */
519 irq_enter();
520
521 update_process_times(user);
522
523 data->prof_counter = data->prof_multiplier;
524
525 irq_exit();
526 }
527 set_irq_regs(old_regs);
528}
529
530int 503int
531setup_profiling_timer(unsigned int multiplier) 504setup_profiling_timer(unsigned int multiplier)
532{ 505{
diff --git a/arch/alpha/kernel/time.c b/arch/alpha/kernel/time.c
index 0d72e2df4b0e..08ff3f502a76 100644
--- a/arch/alpha/kernel/time.c
+++ b/arch/alpha/kernel/time.c
@@ -42,6 +42,7 @@
42#include <linux/time.h> 42#include <linux/time.h>
43#include <linux/timex.h> 43#include <linux/timex.h>
44#include <linux/clocksource.h> 44#include <linux/clocksource.h>
45#include <linux/clockchips.h>
45 46
46#include "proto.h" 47#include "proto.h"
47#include "irq_impl.h" 48#include "irq_impl.h"
@@ -49,25 +50,6 @@
49DEFINE_SPINLOCK(rtc_lock); 50DEFINE_SPINLOCK(rtc_lock);
50EXPORT_SYMBOL(rtc_lock); 51EXPORT_SYMBOL(rtc_lock);
51 52
52#define TICK_SIZE (tick_nsec / 1000)
53
54/*
55 * Shift amount by which scaled_ticks_per_cycle is scaled. Shifting
56 * by 48 gives us 16 bits for HZ while keeping the accuracy good even
57 * for large CPU clock rates.
58 */
59#define FIX_SHIFT 48
60
61/* lump static variables together for more efficient access: */
62static struct {
63 /* cycle counter last time it got invoked */
64 __u32 last_time;
65 /* ticks/cycle * 2^48 */
66 unsigned long scaled_ticks_per_cycle;
67 /* partial unused tick */
68 unsigned long partial_tick;
69} state;
70
71unsigned long est_cycle_freq; 53unsigned long est_cycle_freq;
72 54
73#ifdef CONFIG_IRQ_WORK 55#ifdef CONFIG_IRQ_WORK
@@ -96,49 +78,64 @@ static inline __u32 rpcc(void)
96 return __builtin_alpha_rpcc(); 78 return __builtin_alpha_rpcc();
97} 79}
98 80
81
82
99/* 83/*
100 * timer_interrupt() needs to keep up the real-time clock, 84 * The RTC as a clock_event_device primitive.
101 * as well as call the "xtime_update()" routine every clocktick
102 */ 85 */
103irqreturn_t timer_interrupt(int irq, void *dev)
104{
105 unsigned long delta;
106 __u32 now;
107 long nticks;
108 86
109#ifndef CONFIG_SMP 87static DEFINE_PER_CPU(struct clock_event_device, cpu_ce);
110 /* Not SMP, do kernel PC profiling here. */
111 profile_tick(CPU_PROFILING);
112#endif
113 88
114 /* 89irqreturn_t
115 * Calculate how many ticks have passed since the last update, 90timer_interrupt(int irq, void *dev)
116 * including any previous partial leftover. Save any resulting 91{
117 * fraction for the next pass. 92 int cpu = smp_processor_id();
118 */ 93 struct clock_event_device *ce = &per_cpu(cpu_ce, cpu);
119 now = rpcc();
120 delta = now - state.last_time;
121 state.last_time = now;
122 delta = delta * state.scaled_ticks_per_cycle + state.partial_tick;
123 state.partial_tick = delta & ((1UL << FIX_SHIFT) - 1);
124 nticks = delta >> FIX_SHIFT;
125 94
126 if (nticks) 95 /* Don't run the hook for UNUSED or SHUTDOWN. */
127 xtime_update(nticks); 96 if (likely(ce->mode == CLOCK_EVT_MODE_PERIODIC))
97 ce->event_handler(ce);
128 98
129 if (test_irq_work_pending()) { 99 if (test_irq_work_pending()) {
130 clear_irq_work_pending(); 100 clear_irq_work_pending();
131 irq_work_run(); 101 irq_work_run();
132 } 102 }
133 103
134#ifndef CONFIG_SMP
135 while (nticks--)
136 update_process_times(user_mode(get_irq_regs()));
137#endif
138
139 return IRQ_HANDLED; 104 return IRQ_HANDLED;
140} 105}
141 106
107static void
108rtc_ce_set_mode(enum clock_event_mode mode, struct clock_event_device *ce)
109{
110 /* The mode member of CE is updated in generic code.
111 Since we only support periodic events, nothing to do. */
112}
113
114static int
115rtc_ce_set_next_event(unsigned long evt, struct clock_event_device *ce)
116{
117 /* This hook is for oneshot mode, which we don't support. */
118 return -EINVAL;
119}
120
121void __init
122init_clockevent(void)
123{
124 int cpu = smp_processor_id();
125 struct clock_event_device *ce = &per_cpu(cpu_ce, cpu);
126
127 *ce = (struct clock_event_device){
128 .name = "rtc",
129 .features = CLOCK_EVT_FEAT_PERIODIC,
130 .rating = 100,
131 .cpumask = cpumask_of(cpu),
132 .set_mode = rtc_ce_set_mode,
133 .set_next_event = rtc_ce_set_next_event,
134 };
135
136 clockevents_config_and_register(ce, CONFIG_HZ, 0, 0);
137}
138
142void __init 139void __init
143common_init_rtc(void) 140common_init_rtc(void)
144{ 141{
@@ -372,22 +369,9 @@ time_init(void)
372 clocksource_register_hz(&clocksource_rpcc, cycle_freq); 369 clocksource_register_hz(&clocksource_rpcc, cycle_freq);
373#endif 370#endif
374 371
375 /* From John Bowman <bowman@math.ualberta.ca>: allow the values
376 to settle, as the Update-In-Progress bit going low isn't good
377 enough on some hardware. 2ms is our guess; we haven't found
378 bogomips yet, but this is close on a 500Mhz box. */
379 __delay(1000000);
380
381 if (HZ > (1<<16)) {
382 extern void __you_loose (void);
383 __you_loose();
384 }
385
386 state.last_time = cc1;
387 state.scaled_ticks_per_cycle
388 = ((unsigned long) HZ << FIX_SHIFT) / cycle_freq;
389 state.partial_tick = 0L;
390
391 /* Startup the timer source. */ 372 /* Startup the timer source. */
392 alpha_mv.init_rtc(); 373 alpha_mv.init_rtc();
374
375 /* Start up the clock event device. */
376 init_clockevent();
393} 377}