diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2013-09-06 16:21:16 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-09-06 16:21:16 -0400 |
commit | 8e73e367f7dc50f1d1bc22a63e5764bb4eea9b48 (patch) | |
tree | 9bf593c1fc7612bcdd64b9ba46e41d340f9e94d3 /drivers/clocksource | |
parent | d2f3e9eb7c9e12e89f0ac5f0dbc7a9aed0ea925d (diff) | |
parent | 7323f219533e01cc075ba45a76f3e5b214adb23f (diff) |
Merge tag 'cleanup-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/arm/arm-soc
Pull ARM SoC cleanups from Olof Johansson:
"This branch contains code cleanups, moves and removals for 3.12.
There's a large number of various cleanups, and a nice net removal of
13500 lines of code.
Highlights worth mentioning are:
- A series of patches from Stephen Boyd removing the ARM local timer
API.
- Move of Qualcomm MSM IOMMU code to drivers/iommu.
- Samsung PWM driver cleanups from Tomasz Figa, removing legacy PWM
driver and switching over to the drivers/pwm one.
- Removal of some unusued auto-generated headers for OMAP2+ (PRM/CM).
There's also a move of a header file out of include/linux/i2c/ to
platform_data, where it really belongs. It touches mostly ARM
platform code for include changes so we took it through our tree"
* tag 'cleanup-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/arm/arm-soc: (83 commits)
ARM: OMAP2+: Add back the define for AM33XX_RST_GLOBAL_WARM_SW_MASK
gpio: (gpio-pca953x) move header to linux/platform_data/
arm: zynq: hotplug: Remove unreachable code
ARM: SAMSUNG: Remove unnecessary exynos4_default_sdhci*()
tegra: simplify use of devm_ioremap_resource
ARM: SAMSUNG: Remove plat/regs-timer.h header
ARM: SAMSUNG: Remove remaining uses of plat/regs-timer.h header
ARM: SAMSUNG: Remove pwm-clock infrastructure
ARM: SAMSUNG: Remove old PWM timer platform devices
pwm: Remove superseded pwm-samsung-legacy driver
ARM: SAMSUNG: Modify board files to use new PWM platform device
ARM: SAMSUNG: Rework private data handling in dev-backlight
pwm: Add new pwm-samsung driver
ARM: mach-mvebu: remove redundant DT parsing and validation
ARM: msm: Only compile io.c on platforms that use it
iommu/msm: Move mach includes to iommu directory
ARM: msm: Remove devices-iommu.c
ARM: msm: Move mach/board.h contents to common.h
ARM: msm: Migrate msm_timer to CLOCKSOURCE_OF_DECLARE
ARM: msm: Remove TMR and TMR0 static mappings
...
Diffstat (limited to 'drivers/clocksource')
-rw-r--r-- | drivers/clocksource/Kconfig | 1 | ||||
-rw-r--r-- | drivers/clocksource/exynos_mct.c | 58 | ||||
-rw-r--r-- | drivers/clocksource/samsung_pwm_timer.c | 108 | ||||
-rw-r--r-- | drivers/clocksource/time-armada-370-xp.c | 92 | ||||
-rw-r--r-- | drivers/clocksource/timer-marco.c | 98 |
5 files changed, 197 insertions, 160 deletions
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig index b7b9b040a89b..41c69469ce20 100644 --- a/drivers/clocksource/Kconfig +++ b/drivers/clocksource/Kconfig | |||
@@ -99,7 +99,6 @@ config CLKSRC_EXYNOS_MCT | |||
99 | 99 | ||
100 | config CLKSRC_SAMSUNG_PWM | 100 | config CLKSRC_SAMSUNG_PWM |
101 | bool | 101 | bool |
102 | select CLKSRC_MMIO | ||
103 | help | 102 | help |
104 | This is a new clocksource driver for the PWM timer found in | 103 | This is a new clocksource driver for the PWM timer found in |
105 | Samsung S3C, S5P and Exynos SoCs, replacing an earlier driver | 104 | Samsung S3C, S5P and Exynos SoCs, replacing an earlier driver |
diff --git a/drivers/clocksource/exynos_mct.c b/drivers/clocksource/exynos_mct.c index b2bbc415f120..5b34768f4d7c 100644 --- a/drivers/clocksource/exynos_mct.c +++ b/drivers/clocksource/exynos_mct.c | |||
@@ -16,6 +16,7 @@ | |||
16 | #include <linux/err.h> | 16 | #include <linux/err.h> |
17 | #include <linux/clk.h> | 17 | #include <linux/clk.h> |
18 | #include <linux/clockchips.h> | 18 | #include <linux/clockchips.h> |
19 | #include <linux/cpu.h> | ||
19 | #include <linux/platform_device.h> | 20 | #include <linux/platform_device.h> |
20 | #include <linux/delay.h> | 21 | #include <linux/delay.h> |
21 | #include <linux/percpu.h> | 22 | #include <linux/percpu.h> |
@@ -24,7 +25,6 @@ | |||
24 | #include <linux/of_address.h> | 25 | #include <linux/of_address.h> |
25 | #include <linux/clocksource.h> | 26 | #include <linux/clocksource.h> |
26 | 27 | ||
27 | #include <asm/localtimer.h> | ||
28 | #include <asm/mach/time.h> | 28 | #include <asm/mach/time.h> |
29 | 29 | ||
30 | #define EXYNOS4_MCTREG(x) (x) | 30 | #define EXYNOS4_MCTREG(x) (x) |
@@ -80,7 +80,7 @@ static unsigned int mct_int_type; | |||
80 | static int mct_irqs[MCT_NR_IRQS]; | 80 | static int mct_irqs[MCT_NR_IRQS]; |
81 | 81 | ||
82 | struct mct_clock_event_device { | 82 | struct mct_clock_event_device { |
83 | struct clock_event_device *evt; | 83 | struct clock_event_device evt; |
84 | unsigned long base; | 84 | unsigned long base; |
85 | char name[10]; | 85 | char name[10]; |
86 | }; | 86 | }; |
@@ -295,8 +295,6 @@ static void exynos4_clockevent_init(void) | |||
295 | setup_irq(mct_irqs[MCT_G0_IRQ], &mct_comp_event_irq); | 295 | setup_irq(mct_irqs[MCT_G0_IRQ], &mct_comp_event_irq); |
296 | } | 296 | } |
297 | 297 | ||
298 | #ifdef CONFIG_LOCAL_TIMERS | ||
299 | |||
300 | static DEFINE_PER_CPU(struct mct_clock_event_device, percpu_mct_tick); | 298 | static DEFINE_PER_CPU(struct mct_clock_event_device, percpu_mct_tick); |
301 | 299 | ||
302 | /* Clock event handling */ | 300 | /* Clock event handling */ |
@@ -369,7 +367,7 @@ static inline void exynos4_tick_set_mode(enum clock_event_mode mode, | |||
369 | 367 | ||
370 | static int exynos4_mct_tick_clear(struct mct_clock_event_device *mevt) | 368 | static int exynos4_mct_tick_clear(struct mct_clock_event_device *mevt) |
371 | { | 369 | { |
372 | struct clock_event_device *evt = mevt->evt; | 370 | struct clock_event_device *evt = &mevt->evt; |
373 | 371 | ||
374 | /* | 372 | /* |
375 | * This is for supporting oneshot mode. | 373 | * This is for supporting oneshot mode. |
@@ -391,7 +389,7 @@ static int exynos4_mct_tick_clear(struct mct_clock_event_device *mevt) | |||
391 | static irqreturn_t exynos4_mct_tick_isr(int irq, void *dev_id) | 389 | static irqreturn_t exynos4_mct_tick_isr(int irq, void *dev_id) |
392 | { | 390 | { |
393 | struct mct_clock_event_device *mevt = dev_id; | 391 | struct mct_clock_event_device *mevt = dev_id; |
394 | struct clock_event_device *evt = mevt->evt; | 392 | struct clock_event_device *evt = &mevt->evt; |
395 | 393 | ||
396 | exynos4_mct_tick_clear(mevt); | 394 | exynos4_mct_tick_clear(mevt); |
397 | 395 | ||
@@ -405,8 +403,7 @@ static int exynos4_local_timer_setup(struct clock_event_device *evt) | |||
405 | struct mct_clock_event_device *mevt; | 403 | struct mct_clock_event_device *mevt; |
406 | unsigned int cpu = smp_processor_id(); | 404 | unsigned int cpu = smp_processor_id(); |
407 | 405 | ||
408 | mevt = this_cpu_ptr(&percpu_mct_tick); | 406 | mevt = container_of(evt, struct mct_clock_event_device, evt); |
409 | mevt->evt = evt; | ||
410 | 407 | ||
411 | mevt->base = EXYNOS4_MCT_L_BASE(cpu); | 408 | mevt->base = EXYNOS4_MCT_L_BASE(cpu); |
412 | sprintf(mevt->name, "mct_tick%d", cpu); | 409 | sprintf(mevt->name, "mct_tick%d", cpu); |
@@ -448,14 +445,37 @@ static void exynos4_local_timer_stop(struct clock_event_device *evt) | |||
448 | disable_percpu_irq(mct_irqs[MCT_L0_IRQ]); | 445 | disable_percpu_irq(mct_irqs[MCT_L0_IRQ]); |
449 | } | 446 | } |
450 | 447 | ||
451 | static struct local_timer_ops exynos4_mct_tick_ops = { | 448 | static int exynos4_mct_cpu_notify(struct notifier_block *self, |
452 | .setup = exynos4_local_timer_setup, | 449 | unsigned long action, void *hcpu) |
453 | .stop = exynos4_local_timer_stop, | 450 | { |
451 | struct mct_clock_event_device *mevt; | ||
452 | |||
453 | /* | ||
454 | * Grab cpu pointer in each case to avoid spurious | ||
455 | * preemptible warnings | ||
456 | */ | ||
457 | switch (action & ~CPU_TASKS_FROZEN) { | ||
458 | case CPU_STARTING: | ||
459 | mevt = this_cpu_ptr(&percpu_mct_tick); | ||
460 | exynos4_local_timer_setup(&mevt->evt); | ||
461 | break; | ||
462 | case CPU_DYING: | ||
463 | mevt = this_cpu_ptr(&percpu_mct_tick); | ||
464 | exynos4_local_timer_stop(&mevt->evt); | ||
465 | break; | ||
466 | } | ||
467 | |||
468 | return NOTIFY_OK; | ||
469 | } | ||
470 | |||
471 | static struct notifier_block exynos4_mct_cpu_nb = { | ||
472 | .notifier_call = exynos4_mct_cpu_notify, | ||
454 | }; | 473 | }; |
455 | #endif /* CONFIG_LOCAL_TIMERS */ | ||
456 | 474 | ||
457 | static void __init exynos4_timer_resources(struct device_node *np, void __iomem *base) | 475 | static void __init exynos4_timer_resources(struct device_node *np, void __iomem *base) |
458 | { | 476 | { |
477 | int err; | ||
478 | struct mct_clock_event_device *mevt = this_cpu_ptr(&percpu_mct_tick); | ||
459 | struct clk *mct_clk, *tick_clk; | 479 | struct clk *mct_clk, *tick_clk; |
460 | 480 | ||
461 | tick_clk = np ? of_clk_get_by_name(np, "fin_pll") : | 481 | tick_clk = np ? of_clk_get_by_name(np, "fin_pll") : |
@@ -473,9 +493,7 @@ static void __init exynos4_timer_resources(struct device_node *np, void __iomem | |||
473 | if (!reg_base) | 493 | if (!reg_base) |
474 | panic("%s: unable to ioremap mct address space\n", __func__); | 494 | panic("%s: unable to ioremap mct address space\n", __func__); |
475 | 495 | ||
476 | #ifdef CONFIG_LOCAL_TIMERS | ||
477 | if (mct_int_type == MCT_INT_PPI) { | 496 | if (mct_int_type == MCT_INT_PPI) { |
478 | int err; | ||
479 | 497 | ||
480 | err = request_percpu_irq(mct_irqs[MCT_L0_IRQ], | 498 | err = request_percpu_irq(mct_irqs[MCT_L0_IRQ], |
481 | exynos4_mct_tick_isr, "MCT", | 499 | exynos4_mct_tick_isr, "MCT", |
@@ -484,8 +502,16 @@ static void __init exynos4_timer_resources(struct device_node *np, void __iomem | |||
484 | mct_irqs[MCT_L0_IRQ], err); | 502 | mct_irqs[MCT_L0_IRQ], err); |
485 | } | 503 | } |
486 | 504 | ||
487 | local_timer_register(&exynos4_mct_tick_ops); | 505 | err = register_cpu_notifier(&exynos4_mct_cpu_nb); |
488 | #endif /* CONFIG_LOCAL_TIMERS */ | 506 | if (err) |
507 | goto out_irq; | ||
508 | |||
509 | /* Immediately configure the timer on the boot CPU */ | ||
510 | exynos4_local_timer_setup(&mevt->evt); | ||
511 | return; | ||
512 | |||
513 | out_irq: | ||
514 | free_percpu_irq(mct_irqs[MCT_L0_IRQ], &percpu_mct_tick); | ||
489 | } | 515 | } |
490 | 516 | ||
491 | void __init mct_init(void __iomem *base, int irq_g0, int irq_l0, int irq_l1) | 517 | void __init mct_init(void __iomem *base, int irq_g0, int irq_l0, int irq_l1) |
diff --git a/drivers/clocksource/samsung_pwm_timer.c b/drivers/clocksource/samsung_pwm_timer.c index 584b5472eea3..ac60f8b8a5f7 100644 --- a/drivers/clocksource/samsung_pwm_timer.c +++ b/drivers/clocksource/samsung_pwm_timer.c | |||
@@ -44,16 +44,28 @@ | |||
44 | #define TCFG1_SHIFT(x) ((x) * 4) | 44 | #define TCFG1_SHIFT(x) ((x) * 4) |
45 | #define TCFG1_MUX_MASK 0xf | 45 | #define TCFG1_MUX_MASK 0xf |
46 | 46 | ||
47 | /* | ||
48 | * Each channel occupies 4 bits in TCON register, but there is a gap of 4 | ||
49 | * bits (one channel) after channel 0, so channels have different numbering | ||
50 | * when accessing TCON register. | ||
51 | * | ||
52 | * In addition, the location of autoreload bit for channel 4 (TCON channel 5) | ||
53 | * in its set of bits is 2 as opposed to 3 for other channels. | ||
54 | */ | ||
47 | #define TCON_START(chan) (1 << (4 * (chan) + 0)) | 55 | #define TCON_START(chan) (1 << (4 * (chan) + 0)) |
48 | #define TCON_MANUALUPDATE(chan) (1 << (4 * (chan) + 1)) | 56 | #define TCON_MANUALUPDATE(chan) (1 << (4 * (chan) + 1)) |
49 | #define TCON_INVERT(chan) (1 << (4 * (chan) + 2)) | 57 | #define TCON_INVERT(chan) (1 << (4 * (chan) + 2)) |
50 | #define TCON_AUTORELOAD(chan) (1 << (4 * (chan) + 3)) | 58 | #define _TCON_AUTORELOAD(chan) (1 << (4 * (chan) + 3)) |
59 | #define _TCON_AUTORELOAD4(chan) (1 << (4 * (chan) + 2)) | ||
60 | #define TCON_AUTORELOAD(chan) \ | ||
61 | ((chan < 5) ? _TCON_AUTORELOAD(chan) : _TCON_AUTORELOAD4(chan)) | ||
51 | 62 | ||
52 | DEFINE_SPINLOCK(samsung_pwm_lock); | 63 | DEFINE_SPINLOCK(samsung_pwm_lock); |
53 | EXPORT_SYMBOL(samsung_pwm_lock); | 64 | EXPORT_SYMBOL(samsung_pwm_lock); |
54 | 65 | ||
55 | struct samsung_pwm_clocksource { | 66 | struct samsung_pwm_clocksource { |
56 | void __iomem *base; | 67 | void __iomem *base; |
68 | void __iomem *source_reg; | ||
57 | unsigned int irq[SAMSUNG_PWM_NUM]; | 69 | unsigned int irq[SAMSUNG_PWM_NUM]; |
58 | struct samsung_pwm_variant variant; | 70 | struct samsung_pwm_variant variant; |
59 | 71 | ||
@@ -195,17 +207,6 @@ static int samsung_set_next_event(unsigned long cycles, | |||
195 | return 0; | 207 | return 0; |
196 | } | 208 | } |
197 | 209 | ||
198 | static void samsung_timer_resume(void) | ||
199 | { | ||
200 | /* event timer restart */ | ||
201 | samsung_time_setup(pwm.event_id, pwm.clock_count_per_tick - 1); | ||
202 | samsung_time_start(pwm.event_id, true); | ||
203 | |||
204 | /* source timer restart */ | ||
205 | samsung_time_setup(pwm.source_id, pwm.tcnt_max); | ||
206 | samsung_time_start(pwm.source_id, true); | ||
207 | } | ||
208 | |||
209 | static void samsung_set_mode(enum clock_event_mode mode, | 210 | static void samsung_set_mode(enum clock_event_mode mode, |
210 | struct clock_event_device *evt) | 211 | struct clock_event_device *evt) |
211 | { | 212 | { |
@@ -222,20 +223,29 @@ static void samsung_set_mode(enum clock_event_mode mode, | |||
222 | 223 | ||
223 | case CLOCK_EVT_MODE_UNUSED: | 224 | case CLOCK_EVT_MODE_UNUSED: |
224 | case CLOCK_EVT_MODE_SHUTDOWN: | 225 | case CLOCK_EVT_MODE_SHUTDOWN: |
225 | break; | ||
226 | |||
227 | case CLOCK_EVT_MODE_RESUME: | 226 | case CLOCK_EVT_MODE_RESUME: |
228 | samsung_timer_resume(); | ||
229 | break; | 227 | break; |
230 | } | 228 | } |
231 | } | 229 | } |
232 | 230 | ||
231 | static void samsung_clockevent_resume(struct clock_event_device *cev) | ||
232 | { | ||
233 | samsung_timer_set_prescale(pwm.event_id, pwm.tscaler_div); | ||
234 | samsung_timer_set_divisor(pwm.event_id, pwm.tdiv); | ||
235 | |||
236 | if (pwm.variant.has_tint_cstat) { | ||
237 | u32 mask = (1 << pwm.event_id); | ||
238 | writel(mask | (mask << 5), pwm.base + REG_TINT_CSTAT); | ||
239 | } | ||
240 | } | ||
241 | |||
233 | static struct clock_event_device time_event_device = { | 242 | static struct clock_event_device time_event_device = { |
234 | .name = "samsung_event_timer", | 243 | .name = "samsung_event_timer", |
235 | .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, | 244 | .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, |
236 | .rating = 200, | 245 | .rating = 200, |
237 | .set_next_event = samsung_set_next_event, | 246 | .set_next_event = samsung_set_next_event, |
238 | .set_mode = samsung_set_mode, | 247 | .set_mode = samsung_set_mode, |
248 | .resume = samsung_clockevent_resume, | ||
239 | }; | 249 | }; |
240 | 250 | ||
241 | static irqreturn_t samsung_clock_event_isr(int irq, void *dev_id) | 251 | static irqreturn_t samsung_clock_event_isr(int irq, void *dev_id) |
@@ -286,23 +296,34 @@ static void __init samsung_clockevent_init(void) | |||
286 | } | 296 | } |
287 | } | 297 | } |
288 | 298 | ||
289 | static void __iomem *samsung_timer_reg(void) | 299 | static void samsung_clocksource_suspend(struct clocksource *cs) |
290 | { | 300 | { |
291 | switch (pwm.source_id) { | 301 | samsung_time_stop(pwm.source_id); |
292 | case 0: | ||
293 | case 1: | ||
294 | case 2: | ||
295 | case 3: | ||
296 | return pwm.base + pwm.source_id * 0x0c + 0x14; | ||
297 | |||
298 | case 4: | ||
299 | return pwm.base + 0x40; | ||
300 | |||
301 | default: | ||
302 | BUG(); | ||
303 | } | ||
304 | } | 302 | } |
305 | 303 | ||
304 | static void samsung_clocksource_resume(struct clocksource *cs) | ||
305 | { | ||
306 | samsung_timer_set_prescale(pwm.source_id, pwm.tscaler_div); | ||
307 | samsung_timer_set_divisor(pwm.source_id, pwm.tdiv); | ||
308 | |||
309 | samsung_time_setup(pwm.source_id, pwm.tcnt_max); | ||
310 | samsung_time_start(pwm.source_id, true); | ||
311 | } | ||
312 | |||
313 | static cycle_t samsung_clocksource_read(struct clocksource *c) | ||
314 | { | ||
315 | return ~readl_relaxed(pwm.source_reg); | ||
316 | } | ||
317 | |||
318 | static struct clocksource samsung_clocksource = { | ||
319 | .name = "samsung_clocksource_timer", | ||
320 | .rating = 250, | ||
321 | .read = samsung_clocksource_read, | ||
322 | .suspend = samsung_clocksource_suspend, | ||
323 | .resume = samsung_clocksource_resume, | ||
324 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, | ||
325 | }; | ||
326 | |||
306 | /* | 327 | /* |
307 | * Override the global weak sched_clock symbol with this | 328 | * Override the global weak sched_clock symbol with this |
308 | * local implementation which uses the clocksource to get some | 329 | * local implementation which uses the clocksource to get some |
@@ -312,17 +333,11 @@ static void __iomem *samsung_timer_reg(void) | |||
312 | */ | 333 | */ |
313 | static u32 notrace samsung_read_sched_clock(void) | 334 | static u32 notrace samsung_read_sched_clock(void) |
314 | { | 335 | { |
315 | void __iomem *reg = samsung_timer_reg(); | 336 | return samsung_clocksource_read(NULL); |
316 | |||
317 | if (!reg) | ||
318 | return 0; | ||
319 | |||
320 | return ~__raw_readl(reg); | ||
321 | } | 337 | } |
322 | 338 | ||
323 | static void __init samsung_clocksource_init(void) | 339 | static void __init samsung_clocksource_init(void) |
324 | { | 340 | { |
325 | void __iomem *reg = samsung_timer_reg(); | ||
326 | unsigned long pclk; | 341 | unsigned long pclk; |
327 | unsigned long clock_rate; | 342 | unsigned long clock_rate; |
328 | int ret; | 343 | int ret; |
@@ -337,12 +352,16 @@ static void __init samsung_clocksource_init(void) | |||
337 | samsung_time_setup(pwm.source_id, pwm.tcnt_max); | 352 | samsung_time_setup(pwm.source_id, pwm.tcnt_max); |
338 | samsung_time_start(pwm.source_id, true); | 353 | samsung_time_start(pwm.source_id, true); |
339 | 354 | ||
355 | if (pwm.source_id == 4) | ||
356 | pwm.source_reg = pwm.base + 0x40; | ||
357 | else | ||
358 | pwm.source_reg = pwm.base + pwm.source_id * 0x0c + 0x14; | ||
359 | |||
340 | setup_sched_clock(samsung_read_sched_clock, | 360 | setup_sched_clock(samsung_read_sched_clock, |
341 | pwm.variant.bits, clock_rate); | 361 | pwm.variant.bits, clock_rate); |
342 | 362 | ||
343 | ret = clocksource_mmio_init(reg, "samsung_clocksource_timer", | 363 | samsung_clocksource.mask = CLOCKSOURCE_MASK(pwm.variant.bits); |
344 | clock_rate, 250, pwm.variant.bits, | 364 | ret = clocksource_register_hz(&samsung_clocksource, clock_rate); |
345 | clocksource_mmio_readl_down); | ||
346 | if (ret) | 365 | if (ret) |
347 | panic("samsung_clocksource_timer: can't register clocksource\n"); | 366 | panic("samsung_clocksource_timer: can't register clocksource\n"); |
348 | } | 367 | } |
@@ -404,7 +423,6 @@ void __init samsung_pwm_clocksource_init(void __iomem *base, | |||
404 | static void __init samsung_pwm_alloc(struct device_node *np, | 423 | static void __init samsung_pwm_alloc(struct device_node *np, |
405 | const struct samsung_pwm_variant *variant) | 424 | const struct samsung_pwm_variant *variant) |
406 | { | 425 | { |
407 | struct resource res; | ||
408 | struct property *prop; | 426 | struct property *prop; |
409 | const __be32 *cur; | 427 | const __be32 *cur; |
410 | u32 val; | 428 | u32 val; |
@@ -423,17 +441,9 @@ static void __init samsung_pwm_alloc(struct device_node *np, | |||
423 | pwm.variant.output_mask |= 1 << val; | 441 | pwm.variant.output_mask |= 1 << val; |
424 | } | 442 | } |
425 | 443 | ||
426 | of_address_to_resource(np, 0, &res); | 444 | pwm.base = of_iomap(np, 0); |
427 | if (!request_mem_region(res.start, | ||
428 | resource_size(&res), "samsung-pwm")) { | ||
429 | pr_err("%s: failed to request IO mem region\n", __func__); | ||
430 | return; | ||
431 | } | ||
432 | |||
433 | pwm.base = ioremap(res.start, resource_size(&res)); | ||
434 | if (!pwm.base) { | 445 | if (!pwm.base) { |
435 | pr_err("%s: failed to map PWM registers\n", __func__); | 446 | pr_err("%s: failed to map PWM registers\n", __func__); |
436 | release_mem_region(res.start, resource_size(&res)); | ||
437 | return; | 447 | return; |
438 | } | 448 | } |
439 | 449 | ||
diff --git a/drivers/clocksource/time-armada-370-xp.c b/drivers/clocksource/time-armada-370-xp.c index 1b04b7e1d39b..847cab6f6e31 100644 --- a/drivers/clocksource/time-armada-370-xp.c +++ b/drivers/clocksource/time-armada-370-xp.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <linux/platform_device.h> | 19 | #include <linux/platform_device.h> |
20 | #include <linux/kernel.h> | 20 | #include <linux/kernel.h> |
21 | #include <linux/clk.h> | 21 | #include <linux/clk.h> |
22 | #include <linux/cpu.h> | ||
22 | #include <linux/timer.h> | 23 | #include <linux/timer.h> |
23 | #include <linux/clockchips.h> | 24 | #include <linux/clockchips.h> |
24 | #include <linux/interrupt.h> | 25 | #include <linux/interrupt.h> |
@@ -28,9 +29,9 @@ | |||
28 | #include <linux/irq.h> | 29 | #include <linux/irq.h> |
29 | #include <linux/module.h> | 30 | #include <linux/module.h> |
30 | #include <linux/sched_clock.h> | 31 | #include <linux/sched_clock.h> |
31 | |||
32 | #include <asm/localtimer.h> | ||
33 | #include <linux/percpu.h> | 32 | #include <linux/percpu.h> |
33 | #include <linux/time-armada-370-xp.h> | ||
34 | |||
34 | /* | 35 | /* |
35 | * Timer block registers. | 36 | * Timer block registers. |
36 | */ | 37 | */ |
@@ -69,7 +70,7 @@ static bool timer25Mhz = true; | |||
69 | */ | 70 | */ |
70 | static u32 ticks_per_jiffy; | 71 | static u32 ticks_per_jiffy; |
71 | 72 | ||
72 | static struct clock_event_device __percpu **percpu_armada_370_xp_evt; | 73 | static struct clock_event_device __percpu *armada_370_xp_evt; |
73 | 74 | ||
74 | static u32 notrace armada_370_xp_read_sched_clock(void) | 75 | static u32 notrace armada_370_xp_read_sched_clock(void) |
75 | { | 76 | { |
@@ -142,21 +143,14 @@ armada_370_xp_clkevt_mode(enum clock_event_mode mode, | |||
142 | } | 143 | } |
143 | } | 144 | } |
144 | 145 | ||
145 | static struct clock_event_device armada_370_xp_clkevt = { | 146 | static int armada_370_xp_clkevt_irq; |
146 | .name = "armada_370_xp_per_cpu_tick", | ||
147 | .features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC, | ||
148 | .shift = 32, | ||
149 | .rating = 300, | ||
150 | .set_next_event = armada_370_xp_clkevt_next_event, | ||
151 | .set_mode = armada_370_xp_clkevt_mode, | ||
152 | }; | ||
153 | 147 | ||
154 | static irqreturn_t armada_370_xp_timer_interrupt(int irq, void *dev_id) | 148 | static irqreturn_t armada_370_xp_timer_interrupt(int irq, void *dev_id) |
155 | { | 149 | { |
156 | /* | 150 | /* |
157 | * ACK timer interrupt and call event handler. | 151 | * ACK timer interrupt and call event handler. |
158 | */ | 152 | */ |
159 | struct clock_event_device *evt = *(struct clock_event_device **)dev_id; | 153 | struct clock_event_device *evt = dev_id; |
160 | 154 | ||
161 | writel(TIMER0_CLR_MASK, local_base + LCL_TIMER_EVENTS_STATUS); | 155 | writel(TIMER0_CLR_MASK, local_base + LCL_TIMER_EVENTS_STATUS); |
162 | evt->event_handler(evt); | 156 | evt->event_handler(evt); |
@@ -172,42 +166,55 @@ static int armada_370_xp_timer_setup(struct clock_event_device *evt) | |||
172 | u32 u; | 166 | u32 u; |
173 | int cpu = smp_processor_id(); | 167 | int cpu = smp_processor_id(); |
174 | 168 | ||
175 | /* Use existing clock_event for cpu 0 */ | ||
176 | if (!smp_processor_id()) | ||
177 | return 0; | ||
178 | |||
179 | u = readl(local_base + TIMER_CTRL_OFF); | 169 | u = readl(local_base + TIMER_CTRL_OFF); |
180 | if (timer25Mhz) | 170 | if (timer25Mhz) |
181 | writel(u | TIMER0_25MHZ, local_base + TIMER_CTRL_OFF); | 171 | writel(u | TIMER0_25MHZ, local_base + TIMER_CTRL_OFF); |
182 | else | 172 | else |
183 | writel(u & ~TIMER0_25MHZ, local_base + TIMER_CTRL_OFF); | 173 | writel(u & ~TIMER0_25MHZ, local_base + TIMER_CTRL_OFF); |
184 | 174 | ||
185 | evt->name = armada_370_xp_clkevt.name; | 175 | evt->name = "armada_370_xp_per_cpu_tick", |
186 | evt->irq = armada_370_xp_clkevt.irq; | 176 | evt->features = CLOCK_EVT_FEAT_ONESHOT | |
187 | evt->features = armada_370_xp_clkevt.features; | 177 | CLOCK_EVT_FEAT_PERIODIC; |
188 | evt->shift = armada_370_xp_clkevt.shift; | 178 | evt->shift = 32, |
189 | evt->rating = armada_370_xp_clkevt.rating, | 179 | evt->rating = 300, |
190 | evt->set_next_event = armada_370_xp_clkevt_next_event, | 180 | evt->set_next_event = armada_370_xp_clkevt_next_event, |
191 | evt->set_mode = armada_370_xp_clkevt_mode, | 181 | evt->set_mode = armada_370_xp_clkevt_mode, |
182 | evt->irq = armada_370_xp_clkevt_irq; | ||
192 | evt->cpumask = cpumask_of(cpu); | 183 | evt->cpumask = cpumask_of(cpu); |
193 | 184 | ||
194 | *__this_cpu_ptr(percpu_armada_370_xp_evt) = evt; | ||
195 | |||
196 | clockevents_config_and_register(evt, timer_clk, 1, 0xfffffffe); | 185 | clockevents_config_and_register(evt, timer_clk, 1, 0xfffffffe); |
197 | enable_percpu_irq(evt->irq, 0); | 186 | enable_percpu_irq(evt->irq, 0); |
198 | 187 | ||
199 | return 0; | 188 | return 0; |
200 | } | 189 | } |
201 | 190 | ||
202 | static void armada_370_xp_timer_stop(struct clock_event_device *evt) | 191 | static void armada_370_xp_timer_stop(struct clock_event_device *evt) |
203 | { | 192 | { |
204 | evt->set_mode(CLOCK_EVT_MODE_UNUSED, evt); | 193 | evt->set_mode(CLOCK_EVT_MODE_UNUSED, evt); |
205 | disable_percpu_irq(evt->irq); | 194 | disable_percpu_irq(evt->irq); |
206 | } | 195 | } |
207 | 196 | ||
208 | static struct local_timer_ops armada_370_xp_local_timer_ops = { | 197 | static int armada_370_xp_timer_cpu_notify(struct notifier_block *self, |
209 | .setup = armada_370_xp_timer_setup, | 198 | unsigned long action, void *hcpu) |
210 | .stop = armada_370_xp_timer_stop, | 199 | { |
200 | /* | ||
201 | * Grab cpu pointer in each case to avoid spurious | ||
202 | * preemptible warnings | ||
203 | */ | ||
204 | switch (action & ~CPU_TASKS_FROZEN) { | ||
205 | case CPU_STARTING: | ||
206 | armada_370_xp_timer_setup(this_cpu_ptr(armada_370_xp_evt)); | ||
207 | break; | ||
208 | case CPU_DYING: | ||
209 | armada_370_xp_timer_stop(this_cpu_ptr(armada_370_xp_evt)); | ||
210 | break; | ||
211 | } | ||
212 | |||
213 | return NOTIFY_OK; | ||
214 | } | ||
215 | |||
216 | static struct notifier_block armada_370_xp_timer_cpu_nb = { | ||
217 | .notifier_call = armada_370_xp_timer_cpu_notify, | ||
211 | }; | 218 | }; |
212 | 219 | ||
213 | void __init armada_370_xp_timer_init(void) | 220 | void __init armada_370_xp_timer_init(void) |
@@ -223,9 +230,6 @@ void __init armada_370_xp_timer_init(void) | |||
223 | 230 | ||
224 | if (of_find_property(np, "marvell,timer-25Mhz", NULL)) { | 231 | if (of_find_property(np, "marvell,timer-25Mhz", NULL)) { |
225 | /* The fixed 25MHz timer is available so let's use it */ | 232 | /* The fixed 25MHz timer is available so let's use it */ |
226 | u = readl(local_base + TIMER_CTRL_OFF); | ||
227 | writel(u | TIMER0_25MHZ, | ||
228 | local_base + TIMER_CTRL_OFF); | ||
229 | u = readl(timer_base + TIMER_CTRL_OFF); | 233 | u = readl(timer_base + TIMER_CTRL_OFF); |
230 | writel(u | TIMER0_25MHZ, | 234 | writel(u | TIMER0_25MHZ, |
231 | timer_base + TIMER_CTRL_OFF); | 235 | timer_base + TIMER_CTRL_OFF); |
@@ -235,9 +239,6 @@ void __init armada_370_xp_timer_init(void) | |||
235 | struct clk *clk = of_clk_get(np, 0); | 239 | struct clk *clk = of_clk_get(np, 0); |
236 | WARN_ON(IS_ERR(clk)); | 240 | WARN_ON(IS_ERR(clk)); |
237 | rate = clk_get_rate(clk); | 241 | rate = clk_get_rate(clk); |
238 | u = readl(local_base + TIMER_CTRL_OFF); | ||
239 | writel(u & ~(TIMER0_25MHZ), | ||
240 | local_base + TIMER_CTRL_OFF); | ||
241 | 242 | ||
242 | u = readl(timer_base + TIMER_CTRL_OFF); | 243 | u = readl(timer_base + TIMER_CTRL_OFF); |
243 | writel(u & ~(TIMER0_25MHZ), | 244 | writel(u & ~(TIMER0_25MHZ), |
@@ -251,7 +252,7 @@ void __init armada_370_xp_timer_init(void) | |||
251 | * We use timer 0 as clocksource, and private(local) timer 0 | 252 | * We use timer 0 as clocksource, and private(local) timer 0 |
252 | * for clockevents | 253 | * for clockevents |
253 | */ | 254 | */ |
254 | armada_370_xp_clkevt.irq = irq_of_parse_and_map(np, 4); | 255 | armada_370_xp_clkevt_irq = irq_of_parse_and_map(np, 4); |
255 | 256 | ||
256 | ticks_per_jiffy = (timer_clk + HZ / 2) / HZ; | 257 | ticks_per_jiffy = (timer_clk + HZ / 2) / HZ; |
257 | 258 | ||
@@ -276,26 +277,19 @@ void __init armada_370_xp_timer_init(void) | |||
276 | "armada_370_xp_clocksource", | 277 | "armada_370_xp_clocksource", |
277 | timer_clk, 300, 32, clocksource_mmio_readl_down); | 278 | timer_clk, 300, 32, clocksource_mmio_readl_down); |
278 | 279 | ||
279 | /* Register the clockevent on the private timer of CPU 0 */ | 280 | register_cpu_notifier(&armada_370_xp_timer_cpu_nb); |
280 | armada_370_xp_clkevt.cpumask = cpumask_of(0); | ||
281 | clockevents_config_and_register(&armada_370_xp_clkevt, | ||
282 | timer_clk, 1, 0xfffffffe); | ||
283 | 281 | ||
284 | percpu_armada_370_xp_evt = alloc_percpu(struct clock_event_device *); | 282 | armada_370_xp_evt = alloc_percpu(struct clock_event_device); |
285 | 283 | ||
286 | 284 | ||
287 | /* | 285 | /* |
288 | * Setup clockevent timer (interrupt-driven). | 286 | * Setup clockevent timer (interrupt-driven). |
289 | */ | 287 | */ |
290 | *__this_cpu_ptr(percpu_armada_370_xp_evt) = &armada_370_xp_clkevt; | 288 | res = request_percpu_irq(armada_370_xp_clkevt_irq, |
291 | res = request_percpu_irq(armada_370_xp_clkevt.irq, | ||
292 | armada_370_xp_timer_interrupt, | 289 | armada_370_xp_timer_interrupt, |
293 | armada_370_xp_clkevt.name, | 290 | "armada_370_xp_per_cpu_tick", |
294 | percpu_armada_370_xp_evt); | 291 | armada_370_xp_evt); |
295 | if (!res) { | 292 | /* Immediately configure the timer on the boot CPU */ |
296 | enable_percpu_irq(armada_370_xp_clkevt.irq, 0); | 293 | if (!res) |
297 | #ifdef CONFIG_LOCAL_TIMERS | 294 | armada_370_xp_timer_setup(this_cpu_ptr(armada_370_xp_evt)); |
298 | local_timer_register(&armada_370_xp_local_timer_ops); | ||
299 | #endif | ||
300 | } | ||
301 | } | 295 | } |
diff --git a/drivers/clocksource/timer-marco.c b/drivers/clocksource/timer-marco.c index 62876baa3ab9..09a17d9a6594 100644 --- a/drivers/clocksource/timer-marco.c +++ b/drivers/clocksource/timer-marco.c | |||
@@ -10,6 +10,7 @@ | |||
10 | #include <linux/interrupt.h> | 10 | #include <linux/interrupt.h> |
11 | #include <linux/clockchips.h> | 11 | #include <linux/clockchips.h> |
12 | #include <linux/clocksource.h> | 12 | #include <linux/clocksource.h> |
13 | #include <linux/cpu.h> | ||
13 | #include <linux/bitops.h> | 14 | #include <linux/bitops.h> |
14 | #include <linux/irq.h> | 15 | #include <linux/irq.h> |
15 | #include <linux/clk.h> | 16 | #include <linux/clk.h> |
@@ -18,7 +19,6 @@ | |||
18 | #include <linux/of_irq.h> | 19 | #include <linux/of_irq.h> |
19 | #include <linux/of_address.h> | 20 | #include <linux/of_address.h> |
20 | #include <linux/sched_clock.h> | 21 | #include <linux/sched_clock.h> |
21 | #include <asm/localtimer.h> | ||
22 | #include <asm/mach/time.h> | 22 | #include <asm/mach/time.h> |
23 | 23 | ||
24 | #define SIRFSOC_TIMER_32COUNTER_0_CTRL 0x0000 | 24 | #define SIRFSOC_TIMER_32COUNTER_0_CTRL 0x0000 |
@@ -151,13 +151,7 @@ static void sirfsoc_clocksource_resume(struct clocksource *cs) | |||
151 | BIT(1) | BIT(0), sirfsoc_timer_base + SIRFSOC_TIMER_64COUNTER_CTRL); | 151 | BIT(1) | BIT(0), sirfsoc_timer_base + SIRFSOC_TIMER_64COUNTER_CTRL); |
152 | } | 152 | } |
153 | 153 | ||
154 | static struct clock_event_device sirfsoc_clockevent = { | 154 | static struct clock_event_device __percpu *sirfsoc_clockevent; |
155 | .name = "sirfsoc_clockevent", | ||
156 | .rating = 200, | ||
157 | .features = CLOCK_EVT_FEAT_ONESHOT, | ||
158 | .set_mode = sirfsoc_timer_set_mode, | ||
159 | .set_next_event = sirfsoc_timer_set_next_event, | ||
160 | }; | ||
161 | 155 | ||
162 | static struct clocksource sirfsoc_clocksource = { | 156 | static struct clocksource sirfsoc_clocksource = { |
163 | .name = "sirfsoc_clocksource", | 157 | .name = "sirfsoc_clocksource", |
@@ -173,11 +167,8 @@ static struct irqaction sirfsoc_timer_irq = { | |||
173 | .name = "sirfsoc_timer0", | 167 | .name = "sirfsoc_timer0", |
174 | .flags = IRQF_TIMER | IRQF_NOBALANCING, | 168 | .flags = IRQF_TIMER | IRQF_NOBALANCING, |
175 | .handler = sirfsoc_timer_interrupt, | 169 | .handler = sirfsoc_timer_interrupt, |
176 | .dev_id = &sirfsoc_clockevent, | ||
177 | }; | 170 | }; |
178 | 171 | ||
179 | #ifdef CONFIG_LOCAL_TIMERS | ||
180 | |||
181 | static struct irqaction sirfsoc_timer1_irq = { | 172 | static struct irqaction sirfsoc_timer1_irq = { |
182 | .name = "sirfsoc_timer1", | 173 | .name = "sirfsoc_timer1", |
183 | .flags = IRQF_TIMER | IRQF_NOBALANCING, | 174 | .flags = IRQF_TIMER | IRQF_NOBALANCING, |
@@ -186,24 +177,28 @@ static struct irqaction sirfsoc_timer1_irq = { | |||
186 | 177 | ||
187 | static int sirfsoc_local_timer_setup(struct clock_event_device *ce) | 178 | static int sirfsoc_local_timer_setup(struct clock_event_device *ce) |
188 | { | 179 | { |
189 | /* Use existing clock_event for cpu 0 */ | 180 | int cpu = smp_processor_id(); |
190 | if (!smp_processor_id()) | 181 | struct irqaction *action; |
191 | return 0; | 182 | |
183 | if (cpu == 0) | ||
184 | action = &sirfsoc_timer_irq; | ||
185 | else | ||
186 | action = &sirfsoc_timer1_irq; | ||
192 | 187 | ||
193 | ce->irq = sirfsoc_timer1_irq.irq; | 188 | ce->irq = action->irq; |
194 | ce->name = "local_timer"; | 189 | ce->name = "local_timer"; |
195 | ce->features = sirfsoc_clockevent.features; | 190 | ce->features = CLOCK_EVT_FEAT_ONESHOT; |
196 | ce->rating = sirfsoc_clockevent.rating; | 191 | ce->rating = 200; |
197 | ce->set_mode = sirfsoc_timer_set_mode; | 192 | ce->set_mode = sirfsoc_timer_set_mode; |
198 | ce->set_next_event = sirfsoc_timer_set_next_event; | 193 | ce->set_next_event = sirfsoc_timer_set_next_event; |
199 | ce->shift = sirfsoc_clockevent.shift; | 194 | clockevents_calc_mult_shift(ce, CLOCK_TICK_RATE, 60); |
200 | ce->mult = sirfsoc_clockevent.mult; | 195 | ce->max_delta_ns = clockevent_delta2ns(-2, ce); |
201 | ce->max_delta_ns = sirfsoc_clockevent.max_delta_ns; | 196 | ce->min_delta_ns = clockevent_delta2ns(2, ce); |
202 | ce->min_delta_ns = sirfsoc_clockevent.min_delta_ns; | 197 | ce->cpumask = cpumask_of(cpu); |
203 | 198 | ||
204 | sirfsoc_timer1_irq.dev_id = ce; | 199 | action->dev_id = ce; |
205 | BUG_ON(setup_irq(ce->irq, &sirfsoc_timer1_irq)); | 200 | BUG_ON(setup_irq(ce->irq, action)); |
206 | irq_set_affinity(sirfsoc_timer1_irq.irq, cpumask_of(1)); | 201 | irq_set_affinity(action->irq, cpumask_of(cpu)); |
207 | 202 | ||
208 | clockevents_register_device(ce); | 203 | clockevents_register_device(ce); |
209 | return 0; | 204 | return 0; |
@@ -211,31 +206,48 @@ static int sirfsoc_local_timer_setup(struct clock_event_device *ce) | |||
211 | 206 | ||
212 | static void sirfsoc_local_timer_stop(struct clock_event_device *ce) | 207 | static void sirfsoc_local_timer_stop(struct clock_event_device *ce) |
213 | { | 208 | { |
209 | int cpu = smp_processor_id(); | ||
210 | |||
214 | sirfsoc_timer_count_disable(1); | 211 | sirfsoc_timer_count_disable(1); |
215 | 212 | ||
216 | remove_irq(sirfsoc_timer1_irq.irq, &sirfsoc_timer1_irq); | 213 | if (cpu == 0) |
214 | remove_irq(sirfsoc_timer_irq.irq, &sirfsoc_timer_irq); | ||
215 | else | ||
216 | remove_irq(sirfsoc_timer1_irq.irq, &sirfsoc_timer1_irq); | ||
217 | } | 217 | } |
218 | 218 | ||
219 | static struct local_timer_ops sirfsoc_local_timer_ops = { | 219 | static int sirfsoc_cpu_notify(struct notifier_block *self, |
220 | .setup = sirfsoc_local_timer_setup, | 220 | unsigned long action, void *hcpu) |
221 | .stop = sirfsoc_local_timer_stop, | 221 | { |
222 | /* | ||
223 | * Grab cpu pointer in each case to avoid spurious | ||
224 | * preemptible warnings | ||
225 | */ | ||
226 | switch (action & ~CPU_TASKS_FROZEN) { | ||
227 | case CPU_STARTING: | ||
228 | sirfsoc_local_timer_setup(this_cpu_ptr(sirfsoc_clockevent)); | ||
229 | break; | ||
230 | case CPU_DYING: | ||
231 | sirfsoc_local_timer_stop(this_cpu_ptr(sirfsoc_clockevent)); | ||
232 | break; | ||
233 | } | ||
234 | |||
235 | return NOTIFY_OK; | ||
236 | } | ||
237 | |||
238 | static struct notifier_block sirfsoc_cpu_nb = { | ||
239 | .notifier_call = sirfsoc_cpu_notify, | ||
222 | }; | 240 | }; |
223 | #endif /* CONFIG_LOCAL_TIMERS */ | ||
224 | 241 | ||
225 | static void __init sirfsoc_clockevent_init(void) | 242 | static void __init sirfsoc_clockevent_init(void) |
226 | { | 243 | { |
227 | clockevents_calc_mult_shift(&sirfsoc_clockevent, CLOCK_TICK_RATE, 60); | 244 | sirfsoc_clockevent = alloc_percpu(struct clock_event_device); |
228 | 245 | BUG_ON(!sirfsoc_clockevent); | |
229 | sirfsoc_clockevent.max_delta_ns = | 246 | |
230 | clockevent_delta2ns(-2, &sirfsoc_clockevent); | 247 | BUG_ON(register_cpu_notifier(&sirfsoc_cpu_nb)); |
231 | sirfsoc_clockevent.min_delta_ns = | 248 | |
232 | clockevent_delta2ns(2, &sirfsoc_clockevent); | 249 | /* Immediately configure the timer on the boot CPU */ |
233 | 250 | sirfsoc_local_timer_setup(this_cpu_ptr(sirfsoc_clockevent)); | |
234 | sirfsoc_clockevent.cpumask = cpumask_of(0); | ||
235 | clockevents_register_device(&sirfsoc_clockevent); | ||
236 | #ifdef CONFIG_LOCAL_TIMERS | ||
237 | local_timer_register(&sirfsoc_local_timer_ops); | ||
238 | #endif | ||
239 | } | 251 | } |
240 | 252 | ||
241 | /* initialize the kernel jiffy timer source */ | 253 | /* initialize the kernel jiffy timer source */ |
@@ -273,8 +285,6 @@ static void __init sirfsoc_marco_timer_init(void) | |||
273 | 285 | ||
274 | BUG_ON(clocksource_register_hz(&sirfsoc_clocksource, CLOCK_TICK_RATE)); | 286 | BUG_ON(clocksource_register_hz(&sirfsoc_clocksource, CLOCK_TICK_RATE)); |
275 | 287 | ||
276 | BUG_ON(setup_irq(sirfsoc_timer_irq.irq, &sirfsoc_timer_irq)); | ||
277 | |||
278 | sirfsoc_clockevent_init(); | 288 | sirfsoc_clockevent_init(); |
279 | } | 289 | } |
280 | 290 | ||
@@ -288,11 +298,9 @@ static void __init sirfsoc_of_timer_init(struct device_node *np) | |||
288 | if (!sirfsoc_timer_irq.irq) | 298 | if (!sirfsoc_timer_irq.irq) |
289 | panic("No irq passed for timer0 via DT\n"); | 299 | panic("No irq passed for timer0 via DT\n"); |
290 | 300 | ||
291 | #ifdef CONFIG_LOCAL_TIMERS | ||
292 | sirfsoc_timer1_irq.irq = irq_of_parse_and_map(np, 1); | 301 | sirfsoc_timer1_irq.irq = irq_of_parse_and_map(np, 1); |
293 | if (!sirfsoc_timer1_irq.irq) | 302 | if (!sirfsoc_timer1_irq.irq) |
294 | panic("No irq passed for timer1 via DT\n"); | 303 | panic("No irq passed for timer1 via DT\n"); |
295 | #endif | ||
296 | 304 | ||
297 | sirfsoc_marco_timer_init(); | 305 | sirfsoc_marco_timer_init(); |
298 | } | 306 | } |