diff options
author | Stephen Boyd <sboyd@codeaurora.org> | 2013-02-15 20:31:31 -0500 |
---|---|---|
committer | Stephen Boyd <sboyd@codeaurora.org> | 2013-06-24 20:46:50 -0400 |
commit | 4d70c59bb5be9e41a06b9f11ecfba75c14f9fea7 (patch) | |
tree | ab2cd4ce9e2f95a3d8392f97d1dd8ed4b39deb78 /arch/arm/mach-msm/timer.c | |
parent | 05a6548556d007143fcb291bfdfa6b2bb3e63e95 (diff) |
ARM: msm: Divorce msm_timer from local timer API
Separate the msm_timer from the local timer API. This will allow
us to remove ARM local timer support in the near future and gets
us closer to moving this driver to drivers/clocksource.
Acked-by: David Brown <davidb@codeaurora.org>
Acked-by: Marc Zyngier <marc.zyngier@arm.com>
Cc: Daniel Walker <dwalker@fifo99.com>
Cc: Bryan Huntsman <bryanh@codeaurora.org>
Signed-off-by: Stephen Boyd <sboyd@codeaurora.org>
Diffstat (limited to 'arch/arm/mach-msm/timer.c')
-rw-r--r-- | arch/arm/mach-msm/timer.c | 126 |
1 files changed, 70 insertions, 56 deletions
diff --git a/arch/arm/mach-msm/timer.c b/arch/arm/mach-msm/timer.c index b6418fd5fe0d..dacdcfdbc342 100644 --- a/arch/arm/mach-msm/timer.c +++ b/arch/arm/mach-msm/timer.c | |||
@@ -16,6 +16,7 @@ | |||
16 | 16 | ||
17 | #include <linux/clocksource.h> | 17 | #include <linux/clocksource.h> |
18 | #include <linux/clockchips.h> | 18 | #include <linux/clockchips.h> |
19 | #include <linux/cpu.h> | ||
19 | #include <linux/init.h> | 20 | #include <linux/init.h> |
20 | #include <linux/interrupt.h> | 21 | #include <linux/interrupt.h> |
21 | #include <linux/irq.h> | 22 | #include <linux/irq.h> |
@@ -26,7 +27,6 @@ | |||
26 | #include <linux/sched_clock.h> | 27 | #include <linux/sched_clock.h> |
27 | 28 | ||
28 | #include <asm/mach/time.h> | 29 | #include <asm/mach/time.h> |
29 | #include <asm/localtimer.h> | ||
30 | 30 | ||
31 | #include "common.h" | 31 | #include "common.h" |
32 | 32 | ||
@@ -49,7 +49,7 @@ static void __iomem *sts_base; | |||
49 | 49 | ||
50 | static irqreturn_t msm_timer_interrupt(int irq, void *dev_id) | 50 | static irqreturn_t msm_timer_interrupt(int irq, void *dev_id) |
51 | { | 51 | { |
52 | struct clock_event_device *evt = *(struct clock_event_device **)dev_id; | 52 | struct clock_event_device *evt = dev_id; |
53 | /* Stop the timer tick */ | 53 | /* Stop the timer tick */ |
54 | if (evt->mode == CLOCK_EVT_MODE_ONESHOT) { | 54 | if (evt->mode == CLOCK_EVT_MODE_ONESHOT) { |
55 | u32 ctrl = readl_relaxed(event_base + TIMER_ENABLE); | 55 | u32 ctrl = readl_relaxed(event_base + TIMER_ENABLE); |
@@ -101,18 +101,7 @@ static void msm_timer_set_mode(enum clock_event_mode mode, | |||
101 | writel_relaxed(ctrl, event_base + TIMER_ENABLE); | 101 | writel_relaxed(ctrl, event_base + TIMER_ENABLE); |
102 | } | 102 | } |
103 | 103 | ||
104 | static struct clock_event_device msm_clockevent = { | 104 | static struct clock_event_device __percpu *msm_evt; |
105 | .name = "gp_timer", | ||
106 | .features = CLOCK_EVT_FEAT_ONESHOT, | ||
107 | .rating = 200, | ||
108 | .set_next_event = msm_timer_set_next_event, | ||
109 | .set_mode = msm_timer_set_mode, | ||
110 | }; | ||
111 | |||
112 | static union { | ||
113 | struct clock_event_device *evt; | ||
114 | struct clock_event_device * __percpu *percpu_evt; | ||
115 | } msm_evt; | ||
116 | 105 | ||
117 | static void __iomem *source_base; | 106 | static void __iomem *source_base; |
118 | 107 | ||
@@ -138,37 +127,65 @@ static struct clocksource msm_clocksource = { | |||
138 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, | 127 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, |
139 | }; | 128 | }; |
140 | 129 | ||
141 | #ifdef CONFIG_LOCAL_TIMERS | 130 | static int msm_timer_irq; |
131 | static int msm_timer_has_ppi; | ||
132 | |||
142 | static int __cpuinit msm_local_timer_setup(struct clock_event_device *evt) | 133 | static int __cpuinit msm_local_timer_setup(struct clock_event_device *evt) |
143 | { | 134 | { |
144 | /* Use existing clock_event for cpu 0 */ | 135 | int cpu = smp_processor_id(); |
145 | if (!smp_processor_id()) | 136 | int err; |
146 | return 0; | 137 | |
147 | 138 | evt->irq = msm_timer_irq; | |
148 | evt->irq = msm_clockevent.irq; | 139 | evt->name = "msm_timer"; |
149 | evt->name = "local_timer"; | 140 | evt->features = CLOCK_EVT_FEAT_ONESHOT; |
150 | evt->features = msm_clockevent.features; | 141 | evt->rating = 200; |
151 | evt->rating = msm_clockevent.rating; | ||
152 | evt->set_mode = msm_timer_set_mode; | 142 | evt->set_mode = msm_timer_set_mode; |
153 | evt->set_next_event = msm_timer_set_next_event; | 143 | evt->set_next_event = msm_timer_set_next_event; |
144 | evt->cpumask = cpumask_of(cpu); | ||
145 | |||
146 | clockevents_config_and_register(evt, GPT_HZ, 4, 0xffffffff); | ||
147 | |||
148 | if (msm_timer_has_ppi) { | ||
149 | enable_percpu_irq(evt->irq, IRQ_TYPE_EDGE_RISING); | ||
150 | } else { | ||
151 | err = request_irq(evt->irq, msm_timer_interrupt, | ||
152 | IRQF_TIMER | IRQF_NOBALANCING | | ||
153 | IRQF_TRIGGER_RISING, "gp_timer", evt); | ||
154 | if (err) | ||
155 | pr_err("request_irq failed\n"); | ||
156 | } | ||
154 | 157 | ||
155 | *__this_cpu_ptr(msm_evt.percpu_evt) = evt; | ||
156 | clockevents_config_and_register(evt, GPT_HZ, 4, 0xf0000000); | ||
157 | enable_percpu_irq(evt->irq, IRQ_TYPE_EDGE_RISING); | ||
158 | return 0; | 158 | return 0; |
159 | } | 159 | } |
160 | 160 | ||
161 | static void msm_local_timer_stop(struct clock_event_device *evt) | 161 | static void __cpuinit msm_local_timer_stop(struct clock_event_device *evt) |
162 | { | 162 | { |
163 | evt->set_mode(CLOCK_EVT_MODE_UNUSED, evt); | 163 | evt->set_mode(CLOCK_EVT_MODE_UNUSED, evt); |
164 | disable_percpu_irq(evt->irq); | 164 | disable_percpu_irq(evt->irq); |
165 | } | 165 | } |
166 | 166 | ||
167 | static struct local_timer_ops msm_local_timer_ops __cpuinitdata = { | 167 | static int __cpuinit msm_timer_cpu_notify(struct notifier_block *self, |
168 | .setup = msm_local_timer_setup, | 168 | unsigned long action, void *hcpu) |
169 | .stop = msm_local_timer_stop, | 169 | { |
170 | /* | ||
171 | * Grab cpu pointer in each case to avoid spurious | ||
172 | * preemptible warnings | ||
173 | */ | ||
174 | switch (action & ~CPU_TASKS_FROZEN) { | ||
175 | case CPU_STARTING: | ||
176 | msm_local_timer_setup(this_cpu_ptr(msm_evt)); | ||
177 | break; | ||
178 | case CPU_DYING: | ||
179 | msm_local_timer_stop(this_cpu_ptr(msm_evt)); | ||
180 | break; | ||
181 | } | ||
182 | |||
183 | return NOTIFY_OK; | ||
184 | } | ||
185 | |||
186 | static struct notifier_block msm_timer_cpu_nb __cpuinitdata = { | ||
187 | .notifier_call = msm_timer_cpu_notify, | ||
170 | }; | 188 | }; |
171 | #endif /* CONFIG_LOCAL_TIMERS */ | ||
172 | 189 | ||
173 | static notrace u32 msm_sched_clock_read(void) | 190 | static notrace u32 msm_sched_clock_read(void) |
174 | { | 191 | { |
@@ -178,38 +195,35 @@ static notrace u32 msm_sched_clock_read(void) | |||
178 | static void __init msm_timer_init(u32 dgt_hz, int sched_bits, int irq, | 195 | static void __init msm_timer_init(u32 dgt_hz, int sched_bits, int irq, |
179 | bool percpu) | 196 | bool percpu) |
180 | { | 197 | { |
181 | struct clock_event_device *ce = &msm_clockevent; | ||
182 | struct clocksource *cs = &msm_clocksource; | 198 | struct clocksource *cs = &msm_clocksource; |
183 | int res; | 199 | int res = 0; |
200 | |||
201 | msm_timer_irq = irq; | ||
202 | msm_timer_has_ppi = percpu; | ||
203 | |||
204 | msm_evt = alloc_percpu(struct clock_event_device); | ||
205 | if (!msm_evt) { | ||
206 | pr_err("memory allocation failed for clockevents\n"); | ||
207 | goto err; | ||
208 | } | ||
184 | 209 | ||
185 | ce->cpumask = cpumask_of(0); | 210 | if (percpu) |
186 | ce->irq = irq; | 211 | res = request_percpu_irq(irq, msm_timer_interrupt, |
212 | "gp_timer", msm_evt); | ||
187 | 213 | ||
188 | clockevents_config_and_register(ce, GPT_HZ, 4, 0xffffffff); | 214 | if (res) { |
189 | if (percpu) { | 215 | pr_err("request_percpu_irq failed\n"); |
190 | msm_evt.percpu_evt = alloc_percpu(struct clock_event_device *); | 216 | } else { |
191 | if (!msm_evt.percpu_evt) { | 217 | res = register_cpu_notifier(&msm_timer_cpu_nb); |
192 | pr_err("memory allocation failed for %s\n", ce->name); | 218 | if (res) { |
219 | free_percpu_irq(irq, msm_evt); | ||
193 | goto err; | 220 | goto err; |
194 | } | 221 | } |
195 | *__this_cpu_ptr(msm_evt.percpu_evt) = ce; | 222 | |
196 | res = request_percpu_irq(ce->irq, msm_timer_interrupt, | 223 | /* Immediately configure the timer on the boot CPU */ |
197 | ce->name, msm_evt.percpu_evt); | 224 | msm_local_timer_setup(__this_cpu_ptr(msm_evt)); |
198 | if (!res) { | ||
199 | enable_percpu_irq(ce->irq, IRQ_TYPE_EDGE_RISING); | ||
200 | #ifdef CONFIG_LOCAL_TIMERS | ||
201 | local_timer_register(&msm_local_timer_ops); | ||
202 | #endif | ||
203 | } | ||
204 | } else { | ||
205 | msm_evt.evt = ce; | ||
206 | res = request_irq(ce->irq, msm_timer_interrupt, | ||
207 | IRQF_TIMER | IRQF_NOBALANCING | | ||
208 | IRQF_TRIGGER_RISING, ce->name, &msm_evt.evt); | ||
209 | } | 225 | } |
210 | 226 | ||
211 | if (res) | ||
212 | pr_err("request_irq failed for %s\n", ce->name); | ||
213 | err: | 227 | err: |
214 | writel_relaxed(TIMER_ENABLE_EN, source_base + TIMER_ENABLE); | 228 | writel_relaxed(TIMER_ENABLE_EN, source_base + TIMER_ENABLE); |
215 | res = clocksource_register_hz(cs, dgt_hz); | 229 | res = clocksource_register_hz(cs, dgt_hz); |