aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/clocksource
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-12-15 17:15:17 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2016-12-15 17:15:17 -0500
commitd25b6af91ec600faaff3a7e863f19d3e16593e52 (patch)
tree30aaf6a1fa9b767779afd478189d5be58a0a2f4c /drivers/clocksource
parent179a7ba6806805bd4cd7a5e4574b83353c5615ad (diff)
parent7badf6fefca8278e749e82411fdb98b123cca50e (diff)
Merge tag 'arc-4.10-rc1-part1' of git://git.kernel.org/pub/scm/linux/kernel/git/vgupta/arc
Pull ARC updates from Vineet Gupta: "These are mostly timer/clocksource driver updates which were Reviewed/Acked by Daniel but had to be merged via ARC tree due to dependencies. I will follow up with another pull request with actual ARC changes early next week ! Summary: - Moving ARC timer driver into drivers/clocksource - EZChip timer driver updates [Noam] - ARC AXS103 and HAPS platform updates [Alexey]" * tag 'arc-4.10-rc1-part1' of git://git.kernel.org/pub/scm/linux/kernel/git/vgupta/arc: ARC: axs10x: really enable ARC PGU ARC: rename Zebu platform support to HAPS clocksource: nps: avoid maybe-uninitialized warning clocksource: Add clockevent support to NPS400 driver clocksource: update "fn" at CLOCKSOURCE_OF_DECLARE() of nps400 timer soc: Support for NPS HW scheduling clocksource: import ARC timer driver ARC: breakout timer include code into separate header ... ARC: move mcip.h into include/soc and adjust the includes ARC: breakout aux handling into a separate header ARC: time: move time_init() out of the driver ARC: timer: gfrc, rtc: build under same option (64-bit timers) ARC: timer: gfrc, rtc: Read BCR to detect whether hardware exists ... ARC: timer: gfrc, rtc: deuglify big endian code
Diffstat (limited to 'drivers/clocksource')
-rw-r--r--drivers/clocksource/Kconfig20
-rw-r--r--drivers/clocksource/Makefile1
-rw-r--r--drivers/clocksource/arc_timer.c336
-rw-r--r--drivers/clocksource/timer-nps.c224
4 files changed, 561 insertions, 20 deletions
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index e2c6e43cf8ca..4866f7aa32e6 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -282,6 +282,26 @@ config CLKSRC_MPS2
282 select CLKSRC_MMIO 282 select CLKSRC_MMIO
283 select CLKSRC_OF 283 select CLKSRC_OF
284 284
285config ARC_TIMERS
286 bool "Support for 32-bit TIMERn counters in ARC Cores" if COMPILE_TEST
287 depends on GENERIC_CLOCKEVENTS
288 select CLKSRC_OF
289 help
290 These are legacy 32-bit TIMER0 and TIMER1 counters found on all ARC cores
291 (ARC700 as well as ARC HS38).
292 TIMER0 serves as clockevent while TIMER1 provides clocksource
293
294config ARC_TIMERS_64BIT
295 bool "Support for 64-bit counters in ARC HS38 cores" if COMPILE_TEST
296 depends on GENERIC_CLOCKEVENTS
297 depends on ARC_TIMERS
298 select CLKSRC_OF
299 help
300 This enables 2 different 64-bit timers: RTC (for UP) and GFRC (for SMP)
301 RTC is implemented inside the core, while GFRC sits outside the core in
302 ARConnect IP block. Driver automatically picks one of them for clocksource
303 as appropriate.
304
285config ARM_ARCH_TIMER 305config ARM_ARCH_TIMER
286 bool 306 bool
287 select CLKSRC_OF if OF 307 select CLKSRC_OF if OF
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile
index cf87f407f1ad..a14111e1f087 100644
--- a/drivers/clocksource/Makefile
+++ b/drivers/clocksource/Makefile
@@ -51,6 +51,7 @@ obj-$(CONFIG_CLKSRC_TI_32K) += timer-ti-32k.o
51obj-$(CONFIG_CLKSRC_NPS) += timer-nps.o 51obj-$(CONFIG_CLKSRC_NPS) += timer-nps.o
52obj-$(CONFIG_OXNAS_RPS_TIMER) += timer-oxnas-rps.o 52obj-$(CONFIG_OXNAS_RPS_TIMER) += timer-oxnas-rps.o
53 53
54obj-$(CONFIG_ARC_TIMERS) += arc_timer.o
54obj-$(CONFIG_ARM_ARCH_TIMER) += arm_arch_timer.o 55obj-$(CONFIG_ARM_ARCH_TIMER) += arm_arch_timer.o
55obj-$(CONFIG_ARM_GLOBAL_TIMER) += arm_global_timer.o 56obj-$(CONFIG_ARM_GLOBAL_TIMER) += arm_global_timer.o
56obj-$(CONFIG_ARMV7M_SYSTICK) += armv7m_systick.o 57obj-$(CONFIG_ARMV7M_SYSTICK) += armv7m_systick.o
diff --git a/drivers/clocksource/arc_timer.c b/drivers/clocksource/arc_timer.c
new file mode 100644
index 000000000000..a49748d826c0
--- /dev/null
+++ b/drivers/clocksource/arc_timer.c
@@ -0,0 +1,336 @@
1/*
2 * Copyright (C) 2016-17 Synopsys, Inc. (www.synopsys.com)
3 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
9
10/* ARC700 has two 32bit independent prog Timers: TIMER0 and TIMER1, Each can be
11 * programmed to go from @count to @limit and optionally interrupt.
12 * We've designated TIMER0 for clockevents and TIMER1 for clocksource
13 *
14 * ARCv2 based HS38 cores have RTC (in-core) and GFRC (inside ARConnect/MCIP)
15 * which are suitable for UP and SMP based clocksources respectively
16 */
17
18#include <linux/interrupt.h>
19#include <linux/clk.h>
20#include <linux/clk-provider.h>
21#include <linux/clocksource.h>
22#include <linux/clockchips.h>
23#include <linux/cpu.h>
24#include <linux/of.h>
25#include <linux/of_irq.h>
26
27#include <soc/arc/timers.h>
28#include <soc/arc/mcip.h>
29
30
31static unsigned long arc_timer_freq;
32
33static int noinline arc_get_timer_clk(struct device_node *node)
34{
35 struct clk *clk;
36 int ret;
37
38 clk = of_clk_get(node, 0);
39 if (IS_ERR(clk)) {
40 pr_err("timer missing clk");
41 return PTR_ERR(clk);
42 }
43
44 ret = clk_prepare_enable(clk);
45 if (ret) {
46 pr_err("Couldn't enable parent clk\n");
47 return ret;
48 }
49
50 arc_timer_freq = clk_get_rate(clk);
51
52 return 0;
53}
54
55/********** Clock Source Device *********/
56
57#ifdef CONFIG_ARC_TIMERS_64BIT
58
59static cycle_t arc_read_gfrc(struct clocksource *cs)
60{
61 unsigned long flags;
62 u32 l, h;
63
64 local_irq_save(flags);
65
66 __mcip_cmd(CMD_GFRC_READ_LO, 0);
67 l = read_aux_reg(ARC_REG_MCIP_READBACK);
68
69 __mcip_cmd(CMD_GFRC_READ_HI, 0);
70 h = read_aux_reg(ARC_REG_MCIP_READBACK);
71
72 local_irq_restore(flags);
73
74 return (((cycle_t)h) << 32) | l;
75}
76
77static struct clocksource arc_counter_gfrc = {
78 .name = "ARConnect GFRC",
79 .rating = 400,
80 .read = arc_read_gfrc,
81 .mask = CLOCKSOURCE_MASK(64),
82 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
83};
84
85static int __init arc_cs_setup_gfrc(struct device_node *node)
86{
87 struct mcip_bcr mp;
88 int ret;
89
90 READ_BCR(ARC_REG_MCIP_BCR, mp);
91 if (!mp.gfrc) {
92 pr_warn("Global-64-bit-Ctr clocksource not detected");
93 return -ENXIO;
94 }
95
96 ret = arc_get_timer_clk(node);
97 if (ret)
98 return ret;
99
100 return clocksource_register_hz(&arc_counter_gfrc, arc_timer_freq);
101}
102CLOCKSOURCE_OF_DECLARE(arc_gfrc, "snps,archs-timer-gfrc", arc_cs_setup_gfrc);
103
104#define AUX_RTC_CTRL 0x103
105#define AUX_RTC_LOW 0x104
106#define AUX_RTC_HIGH 0x105
107
108static cycle_t arc_read_rtc(struct clocksource *cs)
109{
110 unsigned long status;
111 u32 l, h;
112
113 /*
114 * hardware has an internal state machine which tracks readout of
115 * low/high and updates the CTRL.status if
116 * - interrupt/exception taken between the two reads
117 * - high increments after low has been read
118 */
119 do {
120 l = read_aux_reg(AUX_RTC_LOW);
121 h = read_aux_reg(AUX_RTC_HIGH);
122 status = read_aux_reg(AUX_RTC_CTRL);
123 } while (!(status & _BITUL(31)));
124
125 return (((cycle_t)h) << 32) | l;
126}
127
128static struct clocksource arc_counter_rtc = {
129 .name = "ARCv2 RTC",
130 .rating = 350,
131 .read = arc_read_rtc,
132 .mask = CLOCKSOURCE_MASK(64),
133 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
134};
135
136static int __init arc_cs_setup_rtc(struct device_node *node)
137{
138 struct bcr_timer timer;
139 int ret;
140
141 READ_BCR(ARC_REG_TIMERS_BCR, timer);
142 if (!timer.rtc) {
143 pr_warn("Local-64-bit-Ctr clocksource not detected");
144 return -ENXIO;
145 }
146
147 /* Local to CPU hence not usable in SMP */
148 if (IS_ENABLED(CONFIG_SMP)) {
149 pr_warn("Local-64-bit-Ctr not usable in SMP");
150 return -EINVAL;
151 }
152
153 ret = arc_get_timer_clk(node);
154 if (ret)
155 return ret;
156
157 write_aux_reg(AUX_RTC_CTRL, 1);
158
159 return clocksource_register_hz(&arc_counter_rtc, arc_timer_freq);
160}
161CLOCKSOURCE_OF_DECLARE(arc_rtc, "snps,archs-timer-rtc", arc_cs_setup_rtc);
162
163#endif
164
165/*
166 * 32bit TIMER1 to keep counting monotonically and wraparound
167 */
168
169static cycle_t arc_read_timer1(struct clocksource *cs)
170{
171 return (cycle_t) read_aux_reg(ARC_REG_TIMER1_CNT);
172}
173
174static struct clocksource arc_counter_timer1 = {
175 .name = "ARC Timer1",
176 .rating = 300,
177 .read = arc_read_timer1,
178 .mask = CLOCKSOURCE_MASK(32),
179 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
180};
181
182static int __init arc_cs_setup_timer1(struct device_node *node)
183{
184 int ret;
185
186 /* Local to CPU hence not usable in SMP */
187 if (IS_ENABLED(CONFIG_SMP))
188 return -EINVAL;
189
190 ret = arc_get_timer_clk(node);
191 if (ret)
192 return ret;
193
194 write_aux_reg(ARC_REG_TIMER1_LIMIT, ARC_TIMERN_MAX);
195 write_aux_reg(ARC_REG_TIMER1_CNT, 0);
196 write_aux_reg(ARC_REG_TIMER1_CTRL, TIMER_CTRL_NH);
197
198 return clocksource_register_hz(&arc_counter_timer1, arc_timer_freq);
199}
200
201/********** Clock Event Device *********/
202
203static int arc_timer_irq;
204
205/*
206 * Arm the timer to interrupt after @cycles
207 * The distinction for oneshot/periodic is done in arc_event_timer_ack() below
208 */
209static void arc_timer_event_setup(unsigned int cycles)
210{
211 write_aux_reg(ARC_REG_TIMER0_LIMIT, cycles);
212 write_aux_reg(ARC_REG_TIMER0_CNT, 0); /* start from 0 */
213
214 write_aux_reg(ARC_REG_TIMER0_CTRL, TIMER_CTRL_IE | TIMER_CTRL_NH);
215}
216
217
218static int arc_clkevent_set_next_event(unsigned long delta,
219 struct clock_event_device *dev)
220{
221 arc_timer_event_setup(delta);
222 return 0;
223}
224
225static int arc_clkevent_set_periodic(struct clock_event_device *dev)
226{
227 /*
228 * At X Hz, 1 sec = 1000ms -> X cycles;
229 * 10ms -> X / 100 cycles
230 */
231 arc_timer_event_setup(arc_timer_freq / HZ);
232 return 0;
233}
234
235static DEFINE_PER_CPU(struct clock_event_device, arc_clockevent_device) = {
236 .name = "ARC Timer0",
237 .features = CLOCK_EVT_FEAT_ONESHOT |
238 CLOCK_EVT_FEAT_PERIODIC,
239 .rating = 300,
240 .set_next_event = arc_clkevent_set_next_event,
241 .set_state_periodic = arc_clkevent_set_periodic,
242};
243
244static irqreturn_t timer_irq_handler(int irq, void *dev_id)
245{
246 /*
247 * Note that generic IRQ core could have passed @evt for @dev_id if
248 * irq_set_chip_and_handler() asked for handle_percpu_devid_irq()
249 */
250 struct clock_event_device *evt = this_cpu_ptr(&arc_clockevent_device);
251 int irq_reenable = clockevent_state_periodic(evt);
252
253 /*
254 * Any write to CTRL reg ACks the interrupt, we rewrite the
255 * Count when [N]ot [H]alted bit.
256 * And re-arm it if perioid by [I]nterrupt [E]nable bit
257 */
258 write_aux_reg(ARC_REG_TIMER0_CTRL, irq_reenable | TIMER_CTRL_NH);
259
260 evt->event_handler(evt);
261
262 return IRQ_HANDLED;
263}
264
265
266static int arc_timer_starting_cpu(unsigned int cpu)
267{
268 struct clock_event_device *evt = this_cpu_ptr(&arc_clockevent_device);
269
270 evt->cpumask = cpumask_of(smp_processor_id());
271
272 clockevents_config_and_register(evt, arc_timer_freq, 0, ARC_TIMERN_MAX);
273 enable_percpu_irq(arc_timer_irq, 0);
274 return 0;
275}
276
277static int arc_timer_dying_cpu(unsigned int cpu)
278{
279 disable_percpu_irq(arc_timer_irq);
280 return 0;
281}
282
283/*
284 * clockevent setup for boot CPU
285 */
286static int __init arc_clockevent_setup(struct device_node *node)
287{
288 struct clock_event_device *evt = this_cpu_ptr(&arc_clockevent_device);
289 int ret;
290
291 arc_timer_irq = irq_of_parse_and_map(node, 0);
292 if (arc_timer_irq <= 0) {
293 pr_err("clockevent: missing irq");
294 return -EINVAL;
295 }
296
297 ret = arc_get_timer_clk(node);
298 if (ret) {
299 pr_err("clockevent: missing clk");
300 return ret;
301 }
302
303 /* Needs apriori irq_set_percpu_devid() done in intc map function */
304 ret = request_percpu_irq(arc_timer_irq, timer_irq_handler,
305 "Timer0 (per-cpu-tick)", evt);
306 if (ret) {
307 pr_err("clockevent: unable to request irq\n");
308 return ret;
309 }
310
311 ret = cpuhp_setup_state(CPUHP_AP_ARC_TIMER_STARTING,
312 "AP_ARC_TIMER_STARTING",
313 arc_timer_starting_cpu,
314 arc_timer_dying_cpu);
315 if (ret) {
316 pr_err("Failed to setup hotplug state");
317 return ret;
318 }
319 return 0;
320}
321
322static int __init arc_of_timer_init(struct device_node *np)
323{
324 static int init_count = 0;
325 int ret;
326
327 if (!init_count) {
328 init_count = 1;
329 ret = arc_clockevent_setup(np);
330 } else {
331 ret = arc_cs_setup_timer1(np);
332 }
333
334 return ret;
335}
336CLOCKSOURCE_OF_DECLARE(arc_clkevt, "snps,arc-timer", arc_of_timer_init);
diff --git a/drivers/clocksource/timer-nps.c b/drivers/clocksource/timer-nps.c
index 70c149af8ee0..8da5e93b6810 100644
--- a/drivers/clocksource/timer-nps.c
+++ b/drivers/clocksource/timer-nps.c
@@ -46,7 +46,36 @@
46/* This array is per cluster of CPUs (Each NPS400 cluster got 256 CPUs) */ 46/* This array is per cluster of CPUs (Each NPS400 cluster got 256 CPUs) */
47static void *nps_msu_reg_low_addr[NPS_CLUSTER_NUM] __read_mostly; 47static void *nps_msu_reg_low_addr[NPS_CLUSTER_NUM] __read_mostly;
48 48
49static unsigned long nps_timer_rate; 49static int __init nps_get_timer_clk(struct device_node *node,
50 unsigned long *timer_freq,
51 struct clk **clk)
52{
53 int ret;
54
55 *clk = of_clk_get(node, 0);
56 ret = PTR_ERR_OR_ZERO(*clk);
57 if (ret) {
58 pr_err("timer missing clk");
59 return ret;
60 }
61
62 ret = clk_prepare_enable(*clk);
63 if (ret) {
64 pr_err("Couldn't enable parent clk\n");
65 clk_put(*clk);
66 return ret;
67 }
68
69 *timer_freq = clk_get_rate(*clk);
70 if (!(*timer_freq)) {
71 pr_err("Couldn't get clk rate\n");
72 clk_disable_unprepare(*clk);
73 clk_put(*clk);
74 return -EINVAL;
75 }
76
77 return 0;
78}
50 79
51static cycle_t nps_clksrc_read(struct clocksource *clksrc) 80static cycle_t nps_clksrc_read(struct clocksource *clksrc)
52{ 81{
@@ -55,26 +84,24 @@ static cycle_t nps_clksrc_read(struct clocksource *clksrc)
55 return (cycle_t)ioread32be(nps_msu_reg_low_addr[cluster]); 84 return (cycle_t)ioread32be(nps_msu_reg_low_addr[cluster]);
56} 85}
57 86
58static int __init nps_setup_clocksource(struct device_node *node, 87static int __init nps_setup_clocksource(struct device_node *node)
59 struct clk *clk)
60{ 88{
61 int ret, cluster; 89 int ret, cluster;
90 struct clk *clk;
91 unsigned long nps_timer1_freq;
92
62 93
63 for (cluster = 0; cluster < NPS_CLUSTER_NUM; cluster++) 94 for (cluster = 0; cluster < NPS_CLUSTER_NUM; cluster++)
64 nps_msu_reg_low_addr[cluster] = 95 nps_msu_reg_low_addr[cluster] =
65 nps_host_reg((cluster << NPS_CLUSTER_OFFSET), 96 nps_host_reg((cluster << NPS_CLUSTER_OFFSET),
66 NPS_MSU_BLKID, NPS_MSU_TICK_LOW); 97 NPS_MSU_BLKID, NPS_MSU_TICK_LOW);
67 98
68 ret = clk_prepare_enable(clk); 99 ret = nps_get_timer_clk(node, &nps_timer1_freq, &clk);
69 if (ret) { 100 if (ret)
70 pr_err("Couldn't enable parent clock\n");
71 return ret; 101 return ret;
72 }
73
74 nps_timer_rate = clk_get_rate(clk);
75 102
76 ret = clocksource_mmio_init(nps_msu_reg_low_addr, "EZnps-tick", 103 ret = clocksource_mmio_init(nps_msu_reg_low_addr, "nps-tick",
77 nps_timer_rate, 301, 32, nps_clksrc_read); 104 nps_timer1_freq, 300, 32, nps_clksrc_read);
78 if (ret) { 105 if (ret) {
79 pr_err("Couldn't register clock source.\n"); 106 pr_err("Couldn't register clock source.\n");
80 clk_disable_unprepare(clk); 107 clk_disable_unprepare(clk);
@@ -83,18 +110,175 @@ static int __init nps_setup_clocksource(struct device_node *node,
83 return ret; 110 return ret;
84} 111}
85 112
86static int __init nps_timer_init(struct device_node *node) 113CLOCKSOURCE_OF_DECLARE(ezchip_nps400_clksrc, "ezchip,nps400-timer",
114 nps_setup_clocksource);
115CLOCKSOURCE_OF_DECLARE(ezchip_nps400_clk_src, "ezchip,nps400-timer1",
116 nps_setup_clocksource);
117
118#ifdef CONFIG_EZNPS_MTM_EXT
119#include <soc/nps/mtm.h>
120
121/* Timer related Aux registers */
122#define NPS_REG_TIMER0_TSI 0xFFFFF850
123#define NPS_REG_TIMER0_LIMIT 0x23
124#define NPS_REG_TIMER0_CTRL 0x22
125#define NPS_REG_TIMER0_CNT 0x21
126
127/*
128 * Interrupt Enabled (IE) - re-arm the timer
129 * Not Halted (NH) - is cleared when working with JTAG (for debug)
130 */
131#define TIMER0_CTRL_IE BIT(0)
132#define TIMER0_CTRL_NH BIT(1)
133
134static unsigned long nps_timer0_freq;
135static unsigned long nps_timer0_irq;
136
137static void nps_clkevent_rm_thread(void)
138{
139 int thread;
140 unsigned int cflags, enabled_threads;
141
142 hw_schd_save(&cflags);
143
144 enabled_threads = read_aux_reg(NPS_REG_TIMER0_TSI);
145
146 /* remove thread from TSI1 */
147 thread = read_aux_reg(CTOP_AUX_THREAD_ID);
148 enabled_threads &= ~(1 << thread);
149 write_aux_reg(NPS_REG_TIMER0_TSI, enabled_threads);
150
151 /* Acknowledge and if needed re-arm the timer */
152 if (!enabled_threads)
153 write_aux_reg(NPS_REG_TIMER0_CTRL, TIMER0_CTRL_NH);
154 else
155 write_aux_reg(NPS_REG_TIMER0_CTRL,
156 TIMER0_CTRL_IE | TIMER0_CTRL_NH);
157
158 hw_schd_restore(cflags);
159}
160
161static void nps_clkevent_add_thread(unsigned long delta)
162{
163 int thread;
164 unsigned int cflags, enabled_threads;
165
166 hw_schd_save(&cflags);
167
168 /* add thread to TSI1 */
169 thread = read_aux_reg(CTOP_AUX_THREAD_ID);
170 enabled_threads = read_aux_reg(NPS_REG_TIMER0_TSI);
171 enabled_threads |= (1 << thread);
172 write_aux_reg(NPS_REG_TIMER0_TSI, enabled_threads);
173
174 /* set next timer event */
175 write_aux_reg(NPS_REG_TIMER0_LIMIT, delta);
176 write_aux_reg(NPS_REG_TIMER0_CNT, 0);
177 write_aux_reg(NPS_REG_TIMER0_CTRL,
178 TIMER0_CTRL_IE | TIMER0_CTRL_NH);
179
180 hw_schd_restore(cflags);
181}
182
183/*
184 * Whenever anyone tries to change modes, we just mask interrupts
185 * and wait for the next event to get set.
186 */
187static int nps_clkevent_set_state(struct clock_event_device *dev)
188{
189 nps_clkevent_rm_thread();
190 disable_percpu_irq(nps_timer0_irq);
191
192 return 0;
193}
194
195static int nps_clkevent_set_next_event(unsigned long delta,
196 struct clock_event_device *dev)
197{
198 nps_clkevent_add_thread(delta);
199 enable_percpu_irq(nps_timer0_irq, IRQ_TYPE_NONE);
200
201 return 0;
202}
203
204static DEFINE_PER_CPU(struct clock_event_device, nps_clockevent_device) = {
205 .name = "NPS Timer0",
206 .features = CLOCK_EVT_FEAT_ONESHOT,
207 .rating = 300,
208 .set_next_event = nps_clkevent_set_next_event,
209 .set_state_oneshot = nps_clkevent_set_state,
210 .set_state_oneshot_stopped = nps_clkevent_set_state,
211 .set_state_shutdown = nps_clkevent_set_state,
212 .tick_resume = nps_clkevent_set_state,
213};
214
215static irqreturn_t timer_irq_handler(int irq, void *dev_id)
216{
217 struct clock_event_device *evt = dev_id;
218
219 nps_clkevent_rm_thread();
220 evt->event_handler(evt);
221
222 return IRQ_HANDLED;
223}
224
225static int nps_timer_starting_cpu(unsigned int cpu)
226{
227 struct clock_event_device *evt = this_cpu_ptr(&nps_clockevent_device);
228
229 evt->cpumask = cpumask_of(smp_processor_id());
230
231 clockevents_config_and_register(evt, nps_timer0_freq, 0, ULONG_MAX);
232 enable_percpu_irq(nps_timer0_irq, IRQ_TYPE_NONE);
233
234 return 0;
235}
236
237static int nps_timer_dying_cpu(unsigned int cpu)
238{
239 disable_percpu_irq(nps_timer0_irq);
240 return 0;
241}
242
243static int __init nps_setup_clockevent(struct device_node *node)
87{ 244{
88 struct clk *clk; 245 struct clk *clk;
246 int ret;
89 247
90 clk = of_clk_get(node, 0); 248 nps_timer0_irq = irq_of_parse_and_map(node, 0);
91 if (IS_ERR(clk)) { 249 if (nps_timer0_irq <= 0) {
92 pr_err("Can't get timer clock.\n"); 250 pr_err("clockevent: missing irq");
93 return PTR_ERR(clk); 251 return -EINVAL;
94 } 252 }
95 253
96 return nps_setup_clocksource(node, clk); 254 ret = nps_get_timer_clk(node, &nps_timer0_freq, &clk);
255 if (ret)
256 return ret;
257
258 /* Needs apriori irq_set_percpu_devid() done in intc map function */
259 ret = request_percpu_irq(nps_timer0_irq, timer_irq_handler,
260 "Timer0 (per-cpu-tick)",
261 &nps_clockevent_device);
262 if (ret) {
263 pr_err("Couldn't request irq\n");
264 clk_disable_unprepare(clk);
265 return ret;
266 }
267
268 ret = cpuhp_setup_state(CPUHP_AP_ARC_TIMER_STARTING,
269 "clockevents/nps:starting",
270 nps_timer_starting_cpu,
271 nps_timer_dying_cpu);
272 if (ret) {
273 pr_err("Failed to setup hotplug state");
274 clk_disable_unprepare(clk);
275 free_percpu_irq(nps_timer0_irq, &nps_clockevent_device);
276 return ret;
277 }
278
279 return 0;
97} 280}
98 281
99CLOCKSOURCE_OF_DECLARE(ezchip_nps400_clksrc, "ezchip,nps400-timer", 282CLOCKSOURCE_OF_DECLARE(ezchip_nps400_clk_evt, "ezchip,nps400-timer0",
100 nps_timer_init); 283 nps_setup_clockevent);
284#endif /* CONFIG_EZNPS_MTM_EXT */