aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mach-msm
diff options
context:
space:
mode:
authorStephen Boyd <sboyd@codeaurora.org>2011-11-08 13:34:07 -0500
committerDavid Brown <davidb@codeaurora.org>2011-11-10 13:36:33 -0500
commit2a00c1068b2c1ae451e230ef8bd010d7b2f56f54 (patch)
treea419230baad3776586482b070f4723ddec38a305 /arch/arm/mach-msm
parenta850c3f6446d30b47c984d3f9e45c935385fd592 (diff)
msm: timer: Remove msm_clocks[] and simplify code
We can simplify the timer code now that we only use the DGT for the clocksource and the GPT for the clockevent. Get rid of the msm_clocks[] array and propagate the changes throughout the code. This reduces the lines of code in this file and improves readability. Signed-off-by: Stephen Boyd <sboyd@codeaurora.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Marc Zyngier <marc.zyngier@arm.com> Signed-off-by: David Brown <davidb@codeaurora.org>
Diffstat (limited to 'arch/arm/mach-msm')
-rw-r--r--arch/arm/mach-msm/timer.c221
1 files changed, 76 insertions, 145 deletions
diff --git a/arch/arm/mach-msm/timer.c b/arch/arm/mach-msm/timer.c
index 9f3671a43314..fc0646442e09 100644
--- a/arch/arm/mach-msm/timer.c
+++ b/arch/arm/mach-msm/timer.c
@@ -40,8 +40,6 @@
40 40
41#define GPT_HZ 32768 41#define GPT_HZ 32768
42 42
43#define MSM_GLOBAL_TIMER MSM_CLOCK_GPT
44
45/* TODO: Remove these ifdefs */ 43/* TODO: Remove these ifdefs */
46#if defined(CONFIG_ARCH_QSD8X50) 44#if defined(CONFIG_ARCH_QSD8X50)
47#define DGT_HZ (19200000 / 4) /* 19.2 MHz / 4 by default */ 45#define DGT_HZ (19200000 / 4) /* 19.2 MHz / 4 by default */
@@ -57,31 +55,7 @@
57#define MSM_DGT_SHIFT (5) 55#define MSM_DGT_SHIFT (5)
58#endif 56#endif
59 57
60struct msm_clock { 58static void __iomem *event_base;
61 struct clock_event_device clockevent;
62 struct clocksource clocksource;
63 unsigned int irq;
64 void __iomem *regbase;
65 uint32_t freq;
66 uint32_t shift;
67 void __iomem *global_counter;
68 void __iomem *local_counter;
69 union {
70 struct clock_event_device *evt;
71 struct clock_event_device __percpu **percpu_evt;
72 };
73};
74
75enum {
76 MSM_CLOCK_GPT,
77 MSM_CLOCK_DGT,
78 NR_TIMERS,
79};
80
81
82static struct msm_clock msm_clocks[];
83
84static struct msm_clock *clockevent_to_clock(struct clock_event_device *evt);
85 59
86static irqreturn_t msm_timer_interrupt(int irq, void *dev_id) 60static irqreturn_t msm_timer_interrupt(int irq, void *dev_id)
87{ 61{
@@ -90,59 +64,31 @@ static irqreturn_t msm_timer_interrupt(int irq, void *dev_id)
90 return IRQ_HANDLED; 64 return IRQ_HANDLED;
91 /* Stop the timer tick */ 65 /* Stop the timer tick */
92 if (evt->mode == CLOCK_EVT_MODE_ONESHOT) { 66 if (evt->mode == CLOCK_EVT_MODE_ONESHOT) {
93 struct msm_clock *clock = clockevent_to_clock(evt); 67 u32 ctrl = readl_relaxed(event_base + TIMER_ENABLE);
94 u32 ctrl = readl_relaxed(clock->regbase + TIMER_ENABLE);
95 ctrl &= ~TIMER_ENABLE_EN; 68 ctrl &= ~TIMER_ENABLE_EN;
96 writel_relaxed(ctrl, clock->regbase + TIMER_ENABLE); 69 writel_relaxed(ctrl, event_base + TIMER_ENABLE);
97 } 70 }
98 evt->event_handler(evt); 71 evt->event_handler(evt);
99 return IRQ_HANDLED; 72 return IRQ_HANDLED;
100} 73}
101 74
102static cycle_t msm_read_timer_count(struct clocksource *cs)
103{
104 struct msm_clock *clk = container_of(cs, struct msm_clock, clocksource);
105
106 /*
107 * Shift timer count down by a constant due to unreliable lower bits
108 * on some targets.
109 */
110 return readl(clk->global_counter) >> clk->shift;
111}
112
113static struct msm_clock *clockevent_to_clock(struct clock_event_device *evt)
114{
115#ifdef CONFIG_SMP
116 int i;
117 for (i = 0; i < NR_TIMERS; i++)
118 if (evt == &(msm_clocks[i].clockevent))
119 return &msm_clocks[i];
120 return &msm_clocks[MSM_GLOBAL_TIMER];
121#else
122 return container_of(evt, struct msm_clock, clockevent);
123#endif
124}
125
126static int msm_timer_set_next_event(unsigned long cycles, 75static int msm_timer_set_next_event(unsigned long cycles,
127 struct clock_event_device *evt) 76 struct clock_event_device *evt)
128{ 77{
129 struct msm_clock *clock = clockevent_to_clock(evt); 78 u32 ctrl = readl_relaxed(event_base + TIMER_ENABLE);
130 u32 match = cycles << clock->shift;
131 u32 ctrl = readl_relaxed(clock->regbase + TIMER_ENABLE);
132 79
133 writel_relaxed(0, clock->regbase + TIMER_CLEAR); 80 writel_relaxed(0, event_base + TIMER_CLEAR);
134 writel_relaxed(match, clock->regbase + TIMER_MATCH_VAL); 81 writel_relaxed(cycles, event_base + TIMER_MATCH_VAL);
135 writel_relaxed(ctrl | TIMER_ENABLE_EN, clock->regbase + TIMER_ENABLE); 82 writel_relaxed(ctrl | TIMER_ENABLE_EN, event_base + TIMER_ENABLE);
136 return 0; 83 return 0;
137} 84}
138 85
139static void msm_timer_set_mode(enum clock_event_mode mode, 86static void msm_timer_set_mode(enum clock_event_mode mode,
140 struct clock_event_device *evt) 87 struct clock_event_device *evt)
141{ 88{
142 struct msm_clock *clock = clockevent_to_clock(evt);
143 u32 ctrl; 89 u32 ctrl;
144 90
145 ctrl = readl_relaxed(clock->regbase + TIMER_ENABLE); 91 ctrl = readl_relaxed(event_base + TIMER_ENABLE);
146 ctrl &= ~(TIMER_ENABLE_EN | TIMER_ENABLE_CLR_ON_MATCH_EN); 92 ctrl &= ~(TIMER_ENABLE_EN | TIMER_ENABLE_CLR_ON_MATCH_EN);
147 93
148 switch (mode) { 94 switch (mode) {
@@ -156,59 +102,61 @@ static void msm_timer_set_mode(enum clock_event_mode mode,
156 case CLOCK_EVT_MODE_SHUTDOWN: 102 case CLOCK_EVT_MODE_SHUTDOWN:
157 break; 103 break;
158 } 104 }
159 writel_relaxed(ctrl, clock->regbase + TIMER_ENABLE); 105 writel_relaxed(ctrl, event_base + TIMER_ENABLE);
160} 106}
161 107
162static struct msm_clock msm_clocks[] = { 108static struct clock_event_device msm_clockevent = {
163 [MSM_CLOCK_GPT] = { 109 .name = "gp_timer",
164 .clockevent = { 110 .features = CLOCK_EVT_FEAT_ONESHOT,
165 .name = "gp_timer", 111 .shift = 32,
166 .features = CLOCK_EVT_FEAT_ONESHOT, 112 .rating = 200,
167 .shift = 32, 113 .set_next_event = msm_timer_set_next_event,
168 .rating = 200, 114 .set_mode = msm_timer_set_mode,
169 .set_next_event = msm_timer_set_next_event, 115};
170 .set_mode = msm_timer_set_mode, 116
171 }, 117static union {
172 .irq = INT_GP_TIMER_EXP, 118 struct clock_event_device *evt;
173 .freq = GPT_HZ, 119 struct clock_event_device __percpu **percpu_evt;
174 }, 120} msm_evt;
175 [MSM_CLOCK_DGT] = { 121
176 .clocksource = { 122static void __iomem *source_base;
177 .name = "dg_timer", 123
178 .rating = 300, 124static cycle_t msm_read_timer_count(struct clocksource *cs)
179 .read = msm_read_timer_count, 125{
180 .mask = CLOCKSOURCE_MASK((32 - MSM_DGT_SHIFT)), 126 /*
181 .flags = CLOCK_SOURCE_IS_CONTINUOUS, 127 * Shift timer count down by a constant due to unreliable lower bits
182 }, 128 * on some targets.
183 .freq = DGT_HZ >> MSM_DGT_SHIFT, 129 */
184 .shift = MSM_DGT_SHIFT, 130 return readl_relaxed(source_base + TIMER_COUNT_VAL) >> MSM_DGT_SHIFT;
185 } 131}
132
133static struct clocksource msm_clocksource = {
134 .name = "dg_timer",
135 .rating = 300,
136 .read = msm_read_timer_count,
137 .mask = CLOCKSOURCE_MASK((32 - MSM_DGT_SHIFT)),
138 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
186}; 139};
187 140
188static void __init msm_timer_init(void) 141static void __init msm_timer_init(void)
189{ 142{
190 struct msm_clock *clock; 143 struct clock_event_device *ce = &msm_clockevent;
191 struct clock_event_device *ce = &msm_clocks[MSM_CLOCK_GPT].clockevent; 144 struct clocksource *cs = &msm_clocksource;
192 struct clocksource *cs = &msm_clocks[MSM_CLOCK_DGT].clocksource;
193 int res; 145 int res;
194 int global_offset = 0;
195
196 146
197 if (cpu_is_msm7x01()) { 147 if (cpu_is_msm7x01()) {
198 msm_clocks[MSM_CLOCK_GPT].regbase = MSM_CSR_BASE; 148 event_base = MSM_CSR_BASE;
199 msm_clocks[MSM_CLOCK_DGT].regbase = MSM_CSR_BASE + 0x10; 149 source_base = MSM_CSR_BASE + 0x10;
200 } else if (cpu_is_msm7x30()) { 150 } else if (cpu_is_msm7x30()) {
201 msm_clocks[MSM_CLOCK_GPT].regbase = MSM_CSR_BASE + 0x04; 151 event_base = MSM_CSR_BASE + 0x04;
202 msm_clocks[MSM_CLOCK_DGT].regbase = MSM_CSR_BASE + 0x24; 152 source_base = MSM_CSR_BASE + 0x24;
203 } else if (cpu_is_qsd8x50()) { 153 } else if (cpu_is_qsd8x50()) {
204 msm_clocks[MSM_CLOCK_GPT].regbase = MSM_CSR_BASE; 154 event_base = MSM_CSR_BASE;
205 msm_clocks[MSM_CLOCK_DGT].regbase = MSM_CSR_BASE + 0x10; 155 source_base = MSM_CSR_BASE + 0x10;
206 } else if (cpu_is_msm8x60() || cpu_is_msm8960()) { 156 } else if (cpu_is_msm8x60() || cpu_is_msm8960()) {
207 msm_clocks[MSM_CLOCK_GPT].regbase = MSM_TMR_BASE + 0x04; 157 event_base = MSM_TMR_BASE + 0x04;
208 msm_clocks[MSM_CLOCK_DGT].regbase = MSM_TMR_BASE + 0x24; 158 /* Use CPU0's timer as the global clock source. */
209 159 source_base = MSM_TMR0_BASE + 0x24;
210 /* Use CPU0's timer as the global timer. */
211 global_offset = MSM_TMR0_BASE - MSM_TMR_BASE;
212 } else 160 } else
213 BUG(); 161 BUG();
214 162
@@ -216,88 +164,71 @@ static void __init msm_timer_init(void)
216 writel(DGT_CLK_CTL_DIV_4, MSM_TMR_BASE + DGT_CLK_CTL); 164 writel(DGT_CLK_CTL_DIV_4, MSM_TMR_BASE + DGT_CLK_CTL);
217#endif 165#endif
218 166
219 clock = &msm_clocks[MSM_CLOCK_GPT]; 167 writel_relaxed(0, event_base + TIMER_ENABLE);
220 clock->local_counter = clock->regbase + TIMER_COUNT_VAL; 168 writel_relaxed(0, event_base + TIMER_CLEAR);
221 169 writel_relaxed(~0, event_base + TIMER_MATCH_VAL);
222 writel_relaxed(0, clock->regbase + TIMER_ENABLE); 170 ce->mult = div_sc(GPT_HZ, NSEC_PER_SEC, ce->shift);
223 writel_relaxed(0, clock->regbase + TIMER_CLEAR);
224 writel_relaxed(~0, clock->regbase + TIMER_MATCH_VAL);
225 ce->mult = div_sc(clock->freq, NSEC_PER_SEC, ce->shift);
226 /* 171 /*
227 * allow at least 10 seconds to notice that the timer 172 * allow at least 10 seconds to notice that the timer
228 * wrapped 173 * wrapped
229 */ 174 */
230 ce->max_delta_ns = 175 ce->max_delta_ns = clockevent_delta2ns(0xf0000000, ce);
231 clockevent_delta2ns(0xf0000000 >> clock->shift, ce);
232 /* 4 gets rounded down to 3 */ 176 /* 4 gets rounded down to 3 */
233 ce->min_delta_ns = clockevent_delta2ns(4, ce); 177 ce->min_delta_ns = clockevent_delta2ns(4, ce);
234 ce->cpumask = cpumask_of(0); 178 ce->cpumask = cpumask_of(0);
235 179
236 ce->irq = clock->irq; 180 ce->irq = INT_GP_TIMER_EXP;
237 if (cpu_is_msm8x60() || cpu_is_msm8960()) { 181 if (cpu_is_msm8x60() || cpu_is_msm8960()) {
238 clock->percpu_evt = alloc_percpu(struct clock_event_device *); 182 msm_evt.percpu_evt = alloc_percpu(struct clock_event_device *);
239 if (!clock->percpu_evt) { 183 if (!msm_evt.percpu_evt) {
240 pr_err("memory allocation failed for %s\n", ce->name); 184 pr_err("memory allocation failed for %s\n", ce->name);
241 goto err; 185 goto err;
242 } 186 }
243 187 *__this_cpu_ptr(msm_evt.percpu_evt) = ce;
244 *__this_cpu_ptr(clock->percpu_evt) = ce;
245 res = request_percpu_irq(ce->irq, msm_timer_interrupt, 188 res = request_percpu_irq(ce->irq, msm_timer_interrupt,
246 ce->name, clock->percpu_evt); 189 ce->name, msm_evt.percpu_evt);
247 if (!res) 190 if (!res)
248 enable_percpu_irq(ce->irq, 0); 191 enable_percpu_irq(ce->irq, 0);
249 } else { 192 } else {
250 clock->evt = ce; 193 msm_evt.evt = ce;
251 res = request_irq(ce->irq, msm_timer_interrupt, 194 res = request_irq(ce->irq, msm_timer_interrupt,
252 IRQF_TIMER | IRQF_NOBALANCING | 195 IRQF_TIMER | IRQF_NOBALANCING |
253 IRQF_TRIGGER_RISING, ce->name, &clock->evt); 196 IRQF_TRIGGER_RISING, ce->name, &msm_evt.evt);
254 } 197 }
255 198
256 if (res) 199 if (res)
257 pr_err("request_irq failed for %s\n", ce->name); 200 pr_err("request_irq failed for %s\n", ce->name);
258
259 clockevents_register_device(ce); 201 clockevents_register_device(ce);
260err: 202err:
261 clock = &msm_clocks[MSM_CLOCK_DGT]; 203 writel_relaxed(TIMER_ENABLE_EN, source_base + TIMER_ENABLE);
262 clock->local_counter = clock->regbase + TIMER_COUNT_VAL; 204 res = clocksource_register_hz(cs, DGT_HZ >> MSM_DGT_SHIFT);
263 clock->global_counter = clock->local_counter + global_offset;
264 writel_relaxed(TIMER_ENABLE_EN, clock->regbase + TIMER_ENABLE);
265 res = clocksource_register_hz(cs, clock->freq);
266 if (res) 205 if (res)
267 pr_err("clocksource_register failed for %s\n", cs->name); 206 pr_err("clocksource_register failed\n");
268} 207}
269 208
270#ifdef CONFIG_LOCAL_TIMERS 209#ifdef CONFIG_LOCAL_TIMERS
271int __cpuinit local_timer_setup(struct clock_event_device *evt) 210int __cpuinit local_timer_setup(struct clock_event_device *evt)
272{ 211{
273 static bool local_timer_inited;
274 struct msm_clock *clock = &msm_clocks[MSM_GLOBAL_TIMER];
275
276 /* Use existing clock_event for cpu 0 */ 212 /* Use existing clock_event for cpu 0 */
277 if (!smp_processor_id()) 213 if (!smp_processor_id())
278 return 0; 214 return 0;
279 215
280 if (!local_timer_inited) { 216 writel_relaxed(0, event_base + TIMER_ENABLE);
281 writel(0, clock->regbase + TIMER_ENABLE); 217 writel_relaxed(0, event_base + TIMER_CLEAR);
282 writel(0, clock->regbase + TIMER_CLEAR); 218 writel_relaxed(~0, event_base + TIMER_MATCH_VAL);
283 writel(~0, clock->regbase + TIMER_MATCH_VAL); 219 evt->irq = msm_clockevent.irq;
284 local_timer_inited = true;
285 }
286 evt->irq = clock->irq;
287 evt->name = "local_timer"; 220 evt->name = "local_timer";
288 evt->features = CLOCK_EVT_FEAT_ONESHOT; 221 evt->features = msm_clockevent.features;
289 evt->rating = clock->clockevent.rating; 222 evt->rating = msm_clockevent.rating;
290 evt->set_mode = msm_timer_set_mode; 223 evt->set_mode = msm_timer_set_mode;
291 evt->set_next_event = msm_timer_set_next_event; 224 evt->set_next_event = msm_timer_set_next_event;
292 evt->shift = clock->clockevent.shift; 225 evt->shift = msm_clockevent.shift;
293 evt->mult = div_sc(clock->freq, NSEC_PER_SEC, evt->shift); 226 evt->mult = div_sc(GPT_HZ, NSEC_PER_SEC, evt->shift);
294 evt->max_delta_ns = 227 evt->max_delta_ns = clockevent_delta2ns(0xf0000000, evt);
295 clockevent_delta2ns(0xf0000000 >> clock->shift, evt);
296 evt->min_delta_ns = clockevent_delta2ns(4, evt); 228 evt->min_delta_ns = clockevent_delta2ns(4, evt);
297 229
298 *__this_cpu_ptr(clock->percpu_evt) = evt; 230 *__this_cpu_ptr(msm_evt.percpu_evt) = evt;
299 enable_percpu_irq(evt->irq, 0); 231 enable_percpu_irq(evt->irq, 0);
300
301 clockevents_register_device(evt); 232 clockevents_register_device(evt);
302 return 0; 233 return 0;
303} 234}