aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2014-05-27 04:03:39 -0400
committerThomas Gleixner <tglx@linutronix.de>2014-05-27 04:06:05 -0400
commit331b483f42fb4d04d52ce920ae10a71411c859a4 (patch)
tree1f6399d3dc5a6a93b54b01bc2bb46b5f26c7dbab
parent309179fabddd074f7da63c5602bc32cb6de677f9 (diff)
parent2529c3a330797000d699d70c9a65b8525c6652de (diff)
Merge branch 'clockevents/3.16' of git://git.linaro.org/people/daniel.lezcano/linux into timers/core
This pull request contains the following changes: * Laurent Pinchart did a lot of modifications to prepare the DT support. These modifications include a lot of cleanup (structure renaming, preparation to support multiple channel, kzalloc usage, ...) and then finishes to drop the old code to the new one. * Jingoo Han removed the dev_err when an allocation fails because this error is already given by the mm subsystems. * Matthew Leach added the ARM global timer with vexpress, enabled the ARM global timer with the A5 and added the definition in the DT. He also fixed a invalid check when looking for an usable ARM global timer for A9 * Maxime Ripard added the support for AllWinner A31 for sun4i and made the timer reset optional through the DT * Stephen Boyd used the msm timer for the udelay * Uwe Kleine-König fixed the non-standard 'compatible' binding for efm32 * Xiubo Li clarified the types for the clocksource_mmio_read* and added a new Flextimer Module (FTM) with its bindings * Yang Wei added the 'notrace' attribute to 'read_sched_clock' for the dw_apb_timer
-rw-r--r--Documentation/devicetree/bindings/arm/global_timer.txt7
-rw-r--r--Documentation/devicetree/bindings/timer/allwinner,sun5i-a13-hstimer.txt4
-rw-r--r--Documentation/devicetree/bindings/timer/energymicro,efm32-timer.txt (renamed from Documentation/devicetree/bindings/timer/efm32,timer.txt)4
-rw-r--r--Documentation/devicetree/bindings/timer/fsl,ftm-timer.txt31
-rw-r--r--arch/arm/boot/dts/sun6i-a31.dtsi11
-rw-r--r--arch/arm/boot/dts/vexpress-v2p-ca5s.dts10
-rw-r--r--arch/arm/boot/dts/vf610.dtsi13
-rw-r--r--arch/arm/mach-vexpress/Kconfig1
-rw-r--r--drivers/clocksource/Kconfig5
-rw-r--r--drivers/clocksource/Makefile1
-rw-r--r--drivers/clocksource/arm_global_timer.c5
-rw-r--r--drivers/clocksource/dw_apb_timer_of.c2
-rw-r--r--drivers/clocksource/em_sti.c4
-rw-r--r--drivers/clocksource/fsl_ftm_timer.c367
-rw-r--r--drivers/clocksource/mmio.c8
-rw-r--r--drivers/clocksource/qcom-timer.c13
-rw-r--r--drivers/clocksource/sh_cmt.c958
-rw-r--r--drivers/clocksource/sh_mtu2.c490
-rw-r--r--drivers/clocksource/sh_tmu.c543
-rw-r--r--drivers/clocksource/time-efm32.c3
-rw-r--r--drivers/clocksource/timer-sun5i.c6
-rw-r--r--include/linux/sh_timer.h1
22 files changed, 1785 insertions, 702 deletions
diff --git a/Documentation/devicetree/bindings/arm/global_timer.txt b/Documentation/devicetree/bindings/arm/global_timer.txt
index 1e548981eda4..bdae3a818793 100644
--- a/Documentation/devicetree/bindings/arm/global_timer.txt
+++ b/Documentation/devicetree/bindings/arm/global_timer.txt
@@ -4,8 +4,11 @@
4 4
5** Timer node required properties: 5** Timer node required properties:
6 6
7- compatible : Should be "arm,cortex-a9-global-timer" 7- compatible : should contain
8 Driver supports versions r2p0 and above. 8 * "arm,cortex-a5-global-timer" for Cortex-A5 global timers.
9 * "arm,cortex-a9-global-timer" for Cortex-A9 global
10 timers or any compatible implementation. Note: driver
11 supports versions r2p0 and above.
9 12
10- interrupts : One interrupt to each core 13- interrupts : One interrupt to each core
11 14
diff --git a/Documentation/devicetree/bindings/timer/allwinner,sun5i-a13-hstimer.txt b/Documentation/devicetree/bindings/timer/allwinner,sun5i-a13-hstimer.txt
index 7c26154b8bbb..27cfc7d7ccd7 100644
--- a/Documentation/devicetree/bindings/timer/allwinner,sun5i-a13-hstimer.txt
+++ b/Documentation/devicetree/bindings/timer/allwinner,sun5i-a13-hstimer.txt
@@ -9,6 +9,9 @@ Required properties:
9 one) 9 one)
10- clocks: phandle to the source clock (usually the AHB clock) 10- clocks: phandle to the source clock (usually the AHB clock)
11 11
12Optionnal properties:
13- resets: phandle to a reset controller asserting the timer
14
12Example: 15Example:
13 16
14timer@01c60000 { 17timer@01c60000 {
@@ -19,4 +22,5 @@ timer@01c60000 {
19 <0 53 1>, 22 <0 53 1>,
20 <0 54 1>; 23 <0 54 1>;
21 clocks = <&ahb1_gates 19>; 24 clocks = <&ahb1_gates 19>;
25 resets = <&ahb1rst 19>;
22}; 26};
diff --git a/Documentation/devicetree/bindings/timer/efm32,timer.txt b/Documentation/devicetree/bindings/timer/energymicro,efm32-timer.txt
index 97a568f696c9..e502c11b2211 100644
--- a/Documentation/devicetree/bindings/timer/efm32,timer.txt
+++ b/Documentation/devicetree/bindings/timer/energymicro,efm32-timer.txt
@@ -6,7 +6,7 @@ channels and can be used as PWM or Quadrature Decoder. Available clock sources
6are the cpu's HFPERCLK (with a 10-bit prescaler) or an external pin. 6are the cpu's HFPERCLK (with a 10-bit prescaler) or an external pin.
7 7
8Required properties: 8Required properties:
9- compatible : Should be efm32,timer 9- compatible : Should be "energymicro,efm32-timer"
10- reg : Address and length of the register set 10- reg : Address and length of the register set
11- clocks : Should contain a reference to the HFPERCLK 11- clocks : Should contain a reference to the HFPERCLK
12 12
@@ -16,7 +16,7 @@ Optional properties:
16Example: 16Example:
17 17
18timer@40010c00 { 18timer@40010c00 {
19 compatible = "efm32,timer"; 19 compatible = "energymicro,efm32-timer";
20 reg = <0x40010c00 0x400>; 20 reg = <0x40010c00 0x400>;
21 interrupts = <14>; 21 interrupts = <14>;
22 clocks = <&cmu clk_HFPERCLKTIMER3>; 22 clocks = <&cmu clk_HFPERCLKTIMER3>;
diff --git a/Documentation/devicetree/bindings/timer/fsl,ftm-timer.txt b/Documentation/devicetree/bindings/timer/fsl,ftm-timer.txt
new file mode 100644
index 000000000000..aa8c40230e5e
--- /dev/null
+++ b/Documentation/devicetree/bindings/timer/fsl,ftm-timer.txt
@@ -0,0 +1,31 @@
1Freescale FlexTimer Module (FTM) Timer
2
3Required properties:
4
5- compatible : should be "fsl,ftm-timer"
6- reg : Specifies base physical address and size of the register sets for the
7 clock event device and clock source device.
8- interrupts : Should be the clock event device interrupt.
9- clocks : The clocks provided by the SoC to drive the timer, must contain an
10 entry for each entry in clock-names.
11- clock-names : Must include the following entries:
12 o "ftm-evt"
13 o "ftm-src"
14 o "ftm-evt-counter-en"
15 o "ftm-src-counter-en"
16- big-endian: One boolean property, the big endian mode will be in use if it is
17 present, or the little endian mode will be in use for all the device registers.
18
19Example:
20ftm: ftm@400b8000 {
21 compatible = "fsl,ftm-timer";
22 reg = <0x400b8000 0x1000 0x400b9000 0x1000>;
23 interrupts = <0 44 IRQ_TYPE_LEVEL_HIGH>;
24 clock-names = "ftm-evt", "ftm-src",
25 "ftm-evt-counter-en", "ftm-src-counter-en";
26 clocks = <&clks VF610_CLK_FTM2>,
27 <&clks VF610_CLK_FTM3>,
28 <&clks VF610_CLK_FTM2_EXT_FIX_EN>,
29 <&clks VF610_CLK_FTM3_EXT_FIX_EN>;
30 big-endian;
31};
diff --git a/arch/arm/boot/dts/sun6i-a31.dtsi b/arch/arm/boot/dts/sun6i-a31.dtsi
index d45efa74827c..8cee8a15b90b 100644
--- a/arch/arm/boot/dts/sun6i-a31.dtsi
+++ b/arch/arm/boot/dts/sun6i-a31.dtsi
@@ -428,6 +428,17 @@
428 status = "disabled"; 428 status = "disabled";
429 }; 429 };
430 430
431 timer@01c60000 {
432 compatible = "allwinner,sun6i-a31-hstimer", "allwinner,sun7i-a20-hstimer";
433 reg = <0x01c60000 0x1000>;
434 interrupts = <0 51 4>,
435 <0 52 4>,
436 <0 53 4>,
437 <0 54 4>;
438 clocks = <&ahb1_gates 19>;
439 resets = <&ahb1_rst 19>;
440 };
441
431 spi0: spi@01c68000 { 442 spi0: spi@01c68000 {
432 compatible = "allwinner,sun6i-a31-spi"; 443 compatible = "allwinner,sun6i-a31-spi";
433 reg = <0x01c68000 0x1000>; 444 reg = <0x01c68000 0x1000>;
diff --git a/arch/arm/boot/dts/vexpress-v2p-ca5s.dts b/arch/arm/boot/dts/vexpress-v2p-ca5s.dts
index c544a5504591..d2709b73316b 100644
--- a/arch/arm/boot/dts/vexpress-v2p-ca5s.dts
+++ b/arch/arm/boot/dts/vexpress-v2p-ca5s.dts
@@ -88,6 +88,14 @@
88 interrupts = <1 13 0x304>; 88 interrupts = <1 13 0x304>;
89 }; 89 };
90 90
91 timer@2c000200 {
92 compatible = "arm,cortex-a5-global-timer",
93 "arm,cortex-a9-global-timer";
94 reg = <0x2c000200 0x20>;
95 interrupts = <1 11 0x304>;
96 clocks = <&oscclk0>;
97 };
98
91 watchdog@2c000620 { 99 watchdog@2c000620 {
92 compatible = "arm,cortex-a5-twd-wdt"; 100 compatible = "arm,cortex-a5-twd-wdt";
93 reg = <0x2c000620 0x20>; 101 reg = <0x2c000620 0x20>;
@@ -120,7 +128,7 @@
120 compatible = "arm,vexpress,config-bus"; 128 compatible = "arm,vexpress,config-bus";
121 arm,vexpress,config-bridge = <&v2m_sysreg>; 129 arm,vexpress,config-bridge = <&v2m_sysreg>;
122 130
123 osc@0 { 131 oscclk0: osc@0 {
124 /* CPU and internal AXI reference clock */ 132 /* CPU and internal AXI reference clock */
125 compatible = "arm,vexpress-osc"; 133 compatible = "arm,vexpress-osc";
126 arm,vexpress-sysreg,func = <1 0>; 134 arm,vexpress-sysreg,func = <1 0>;
diff --git a/arch/arm/boot/dts/vf610.dtsi b/arch/arm/boot/dts/vf610.dtsi
index b8ce0aa7b157..3c91b84066a1 100644
--- a/arch/arm/boot/dts/vf610.dtsi
+++ b/arch/arm/boot/dts/vf610.dtsi
@@ -347,6 +347,19 @@
347 status = "disabled"; 347 status = "disabled";
348 }; 348 };
349 349
350 ftm: ftm@400b8000 {
351 compatible = "fsl,ftm-timer";
352 reg = <0x400b8000 0x1000 0x400b9000 0x1000>;
353 interrupts = <0 44 IRQ_TYPE_LEVEL_HIGH>;
354 clock-names = "ftm-evt", "ftm-src",
355 "ftm-evt-counter-en", "ftm-src-counter-en";
356 clocks = <&clks VF610_CLK_FTM2>,
357 <&clks VF610_CLK_FTM3>,
358 <&clks VF610_CLK_FTM2_EXT_FIX_EN>,
359 <&clks VF610_CLK_FTM3_EXT_FIX_EN>;
360 status = "disabled";
361 };
362
350 fec0: ethernet@400d0000 { 363 fec0: ethernet@400d0000 {
351 compatible = "fsl,mvf600-fec"; 364 compatible = "fsl,mvf600-fec";
352 reg = <0x400d0000 0x1000>; 365 reg = <0x400d0000 0x1000>;
diff --git a/arch/arm/mach-vexpress/Kconfig b/arch/arm/mach-vexpress/Kconfig
index 657d52d0391f..e9811a07829a 100644
--- a/arch/arm/mach-vexpress/Kconfig
+++ b/arch/arm/mach-vexpress/Kconfig
@@ -4,6 +4,7 @@ config ARCH_VEXPRESS
4 select ARCH_SUPPORTS_BIG_ENDIAN 4 select ARCH_SUPPORTS_BIG_ENDIAN
5 select ARM_AMBA 5 select ARM_AMBA
6 select ARM_GIC 6 select ARM_GIC
7 select ARM_GLOBAL_TIMER
7 select ARM_TIMER_SP804 8 select ARM_TIMER_SP804
8 select COMMON_CLK_VERSATILE 9 select COMMON_CLK_VERSATILE
9 select HAVE_ARM_SCU if SMP 10 select HAVE_ARM_SCU if SMP
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index 96918e1f26a3..04377675c3fa 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -136,6 +136,11 @@ config CLKSRC_SAMSUNG_PWM
136 for all devicetree enabled platforms. This driver will be 136 for all devicetree enabled platforms. This driver will be
137 needed only on systems that do not have the Exynos MCT available. 137 needed only on systems that do not have the Exynos MCT available.
138 138
139config FSL_FTM_TIMER
140 bool
141 help
142 Support for Freescale FlexTimer Module (FTM) timer.
143
139config VF_PIT_TIMER 144config VF_PIT_TIMER
140 bool 145 bool
141 help 146 help
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile
index 98cb6c51aa87..0770916a818e 100644
--- a/drivers/clocksource/Makefile
+++ b/drivers/clocksource/Makefile
@@ -31,6 +31,7 @@ obj-$(CONFIG_CADENCE_TTC_TIMER) += cadence_ttc_timer.o
31obj-$(CONFIG_CLKSRC_EFM32) += time-efm32.o 31obj-$(CONFIG_CLKSRC_EFM32) += time-efm32.o
32obj-$(CONFIG_CLKSRC_EXYNOS_MCT) += exynos_mct.o 32obj-$(CONFIG_CLKSRC_EXYNOS_MCT) += exynos_mct.o
33obj-$(CONFIG_CLKSRC_SAMSUNG_PWM) += samsung_pwm_timer.o 33obj-$(CONFIG_CLKSRC_SAMSUNG_PWM) += samsung_pwm_timer.o
34obj-$(CONFIG_FSL_FTM_TIMER) += fsl_ftm_timer.o
34obj-$(CONFIG_VF_PIT_TIMER) += vf_pit_timer.o 35obj-$(CONFIG_VF_PIT_TIMER) += vf_pit_timer.o
35obj-$(CONFIG_CLKSRC_QCOM) += qcom-timer.o 36obj-$(CONFIG_CLKSRC_QCOM) += qcom-timer.o
36 37
diff --git a/drivers/clocksource/arm_global_timer.c b/drivers/clocksource/arm_global_timer.c
index 0fc31d029e52..60e5a170c4d2 100644
--- a/drivers/clocksource/arm_global_timer.c
+++ b/drivers/clocksource/arm_global_timer.c
@@ -246,11 +246,12 @@ static void __init global_timer_of_register(struct device_node *np)
246 int err = 0; 246 int err = 0;
247 247
248 /* 248 /*
249 * In r2p0 the comparators for each processor with the global timer 249 * In A9 r2p0 the comparators for each processor with the global timer
250 * fire when the timer value is greater than or equal to. In previous 250 * fire when the timer value is greater than or equal to. In previous
251 * revisions the comparators fired when the timer value was equal to. 251 * revisions the comparators fired when the timer value was equal to.
252 */ 252 */
253 if ((read_cpuid_id() & 0xf0000f) < 0x200000) { 253 if (read_cpuid_part_number() == ARM_CPU_PART_CORTEX_A9
254 && (read_cpuid_id() & 0xf0000f) < 0x200000) {
254 pr_warn("global-timer: non support for this cpu version.\n"); 255 pr_warn("global-timer: non support for this cpu version.\n");
255 return; 256 return;
256 } 257 }
diff --git a/drivers/clocksource/dw_apb_timer_of.c b/drivers/clocksource/dw_apb_timer_of.c
index 2a2ea2717f3a..d305fb089767 100644
--- a/drivers/clocksource/dw_apb_timer_of.c
+++ b/drivers/clocksource/dw_apb_timer_of.c
@@ -106,7 +106,7 @@ static void __init add_clocksource(struct device_node *source_timer)
106 sched_rate = rate; 106 sched_rate = rate;
107} 107}
108 108
109static u64 read_sched_clock(void) 109static u64 notrace read_sched_clock(void)
110{ 110{
111 return ~__raw_readl(sched_io_base); 111 return ~__raw_readl(sched_io_base);
112} 112}
diff --git a/drivers/clocksource/em_sti.c b/drivers/clocksource/em_sti.c
index 9d170834fcf3..d0a7bd66b8b9 100644
--- a/drivers/clocksource/em_sti.c
+++ b/drivers/clocksource/em_sti.c
@@ -318,10 +318,8 @@ static int em_sti_probe(struct platform_device *pdev)
318 int irq; 318 int irq;
319 319
320 p = devm_kzalloc(&pdev->dev, sizeof(*p), GFP_KERNEL); 320 p = devm_kzalloc(&pdev->dev, sizeof(*p), GFP_KERNEL);
321 if (p == NULL) { 321 if (p == NULL)
322 dev_err(&pdev->dev, "failed to allocate driver data\n");
323 return -ENOMEM; 322 return -ENOMEM;
324 }
325 323
326 p->pdev = pdev; 324 p->pdev = pdev;
327 platform_set_drvdata(pdev, p); 325 platform_set_drvdata(pdev, p);
diff --git a/drivers/clocksource/fsl_ftm_timer.c b/drivers/clocksource/fsl_ftm_timer.c
new file mode 100644
index 000000000000..454227d4f895
--- /dev/null
+++ b/drivers/clocksource/fsl_ftm_timer.c
@@ -0,0 +1,367 @@
1/*
2 * Freescale FlexTimer Module (FTM) timer driver.
3 *
4 * Copyright 2014 Freescale Semiconductor, Inc.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version 2
9 * of the License, or (at your option) any later version.
10 */
11
12#include <linux/clk.h>
13#include <linux/clockchips.h>
14#include <linux/clocksource.h>
15#include <linux/err.h>
16#include <linux/interrupt.h>
17#include <linux/io.h>
18#include <linux/of_address.h>
19#include <linux/of_irq.h>
20#include <linux/sched_clock.h>
21#include <linux/slab.h>
22
23#define FTM_SC 0x00
24#define FTM_SC_CLK_SHIFT 3
25#define FTM_SC_CLK_MASK (0x3 << FTM_SC_CLK_SHIFT)
26#define FTM_SC_CLK(c) ((c) << FTM_SC_CLK_SHIFT)
27#define FTM_SC_PS_MASK 0x7
28#define FTM_SC_TOIE BIT(6)
29#define FTM_SC_TOF BIT(7)
30
31#define FTM_CNT 0x04
32#define FTM_MOD 0x08
33#define FTM_CNTIN 0x4C
34
35#define FTM_PS_MAX 7
36
37struct ftm_clock_device {
38 void __iomem *clksrc_base;
39 void __iomem *clkevt_base;
40 unsigned long periodic_cyc;
41 unsigned long ps;
42 bool big_endian;
43};
44
45static struct ftm_clock_device *priv;
46
47static inline u32 ftm_readl(void __iomem *addr)
48{
49 if (priv->big_endian)
50 return ioread32be(addr);
51 else
52 return ioread32(addr);
53}
54
55static inline void ftm_writel(u32 val, void __iomem *addr)
56{
57 if (priv->big_endian)
58 iowrite32be(val, addr);
59 else
60 iowrite32(val, addr);
61}
62
63static inline void ftm_counter_enable(void __iomem *base)
64{
65 u32 val;
66
67 /* select and enable counter clock source */
68 val = ftm_readl(base + FTM_SC);
69 val &= ~(FTM_SC_PS_MASK | FTM_SC_CLK_MASK);
70 val |= priv->ps | FTM_SC_CLK(1);
71 ftm_writel(val, base + FTM_SC);
72}
73
74static inline void ftm_counter_disable(void __iomem *base)
75{
76 u32 val;
77
78 /* disable counter clock source */
79 val = ftm_readl(base + FTM_SC);
80 val &= ~(FTM_SC_PS_MASK | FTM_SC_CLK_MASK);
81 ftm_writel(val, base + FTM_SC);
82}
83
84static inline void ftm_irq_acknowledge(void __iomem *base)
85{
86 u32 val;
87
88 val = ftm_readl(base + FTM_SC);
89 val &= ~FTM_SC_TOF;
90 ftm_writel(val, base + FTM_SC);
91}
92
93static inline void ftm_irq_enable(void __iomem *base)
94{
95 u32 val;
96
97 val = ftm_readl(base + FTM_SC);
98 val |= FTM_SC_TOIE;
99 ftm_writel(val, base + FTM_SC);
100}
101
102static inline void ftm_irq_disable(void __iomem *base)
103{
104 u32 val;
105
106 val = ftm_readl(base + FTM_SC);
107 val &= ~FTM_SC_TOIE;
108 ftm_writel(val, base + FTM_SC);
109}
110
111static inline void ftm_reset_counter(void __iomem *base)
112{
113 /*
114 * The CNT register contains the FTM counter value.
115 * Reset clears the CNT register. Writing any value to COUNT
116 * updates the counter with its initial value, CNTIN.
117 */
118 ftm_writel(0x00, base + FTM_CNT);
119}
120
121static u64 ftm_read_sched_clock(void)
122{
123 return ftm_readl(priv->clksrc_base + FTM_CNT);
124}
125
126static int ftm_set_next_event(unsigned long delta,
127 struct clock_event_device *unused)
128{
129 /*
130 * The CNNIN and MOD are all double buffer registers, writing
131 * to the MOD register latches the value into a buffer. The MOD
132 * register is updated with the value of its write buffer with
133 * the following scenario:
134 * a, the counter source clock is diabled.
135 */
136 ftm_counter_disable(priv->clkevt_base);
137
138 /* Force the value of CNTIN to be loaded into the FTM counter */
139 ftm_reset_counter(priv->clkevt_base);
140
141 /*
142 * The counter increments until the value of MOD is reached,
143 * at which point the counter is reloaded with the value of CNTIN.
144 * The TOF (the overflow flag) bit is set when the FTM counter
145 * changes from MOD to CNTIN. So we should using the delta - 1.
146 */
147 ftm_writel(delta - 1, priv->clkevt_base + FTM_MOD);
148
149 ftm_counter_enable(priv->clkevt_base);
150
151 ftm_irq_enable(priv->clkevt_base);
152
153 return 0;
154}
155
156static void ftm_set_mode(enum clock_event_mode mode,
157 struct clock_event_device *evt)
158{
159 switch (mode) {
160 case CLOCK_EVT_MODE_PERIODIC:
161 ftm_set_next_event(priv->periodic_cyc, evt);
162 break;
163 case CLOCK_EVT_MODE_ONESHOT:
164 ftm_counter_disable(priv->clkevt_base);
165 break;
166 default:
167 return;
168 }
169}
170
171static irqreturn_t ftm_evt_interrupt(int irq, void *dev_id)
172{
173 struct clock_event_device *evt = dev_id;
174
175 ftm_irq_acknowledge(priv->clkevt_base);
176
177 if (likely(evt->mode == CLOCK_EVT_MODE_ONESHOT)) {
178 ftm_irq_disable(priv->clkevt_base);
179 ftm_counter_disable(priv->clkevt_base);
180 }
181
182 evt->event_handler(evt);
183
184 return IRQ_HANDLED;
185}
186
187static struct clock_event_device ftm_clockevent = {
188 .name = "Freescale ftm timer",
189 .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
190 .set_mode = ftm_set_mode,
191 .set_next_event = ftm_set_next_event,
192 .rating = 300,
193};
194
195static struct irqaction ftm_timer_irq = {
196 .name = "Freescale ftm timer",
197 .flags = IRQF_TIMER | IRQF_IRQPOLL,
198 .handler = ftm_evt_interrupt,
199 .dev_id = &ftm_clockevent,
200};
201
202static int __init ftm_clockevent_init(unsigned long freq, int irq)
203{
204 int err;
205
206 ftm_writel(0x00, priv->clkevt_base + FTM_CNTIN);
207 ftm_writel(~0UL, priv->clkevt_base + FTM_MOD);
208
209 ftm_reset_counter(priv->clkevt_base);
210
211 err = setup_irq(irq, &ftm_timer_irq);
212 if (err) {
213 pr_err("ftm: setup irq failed: %d\n", err);
214 return err;
215 }
216
217 ftm_clockevent.cpumask = cpumask_of(0);
218 ftm_clockevent.irq = irq;
219
220 clockevents_config_and_register(&ftm_clockevent,
221 freq / (1 << priv->ps),
222 1, 0xffff);
223
224 ftm_counter_enable(priv->clkevt_base);
225
226 return 0;
227}
228
229static int __init ftm_clocksource_init(unsigned long freq)
230{
231 int err;
232
233 ftm_writel(0x00, priv->clksrc_base + FTM_CNTIN);
234 ftm_writel(~0UL, priv->clksrc_base + FTM_MOD);
235
236 ftm_reset_counter(priv->clksrc_base);
237
238 sched_clock_register(ftm_read_sched_clock, 16, freq / (1 << priv->ps));
239 err = clocksource_mmio_init(priv->clksrc_base + FTM_CNT, "fsl-ftm",
240 freq / (1 << priv->ps), 300, 16,
241 clocksource_mmio_readl_up);
242 if (err) {
243 pr_err("ftm: init clock source mmio failed: %d\n", err);
244 return err;
245 }
246
247 ftm_counter_enable(priv->clksrc_base);
248
249 return 0;
250}
251
252static int __init __ftm_clk_init(struct device_node *np, char *cnt_name,
253 char *ftm_name)
254{
255 struct clk *clk;
256 int err;
257
258 clk = of_clk_get_by_name(np, cnt_name);
259 if (IS_ERR(clk)) {
260 pr_err("ftm: Cannot get \"%s\": %ld\n", cnt_name, PTR_ERR(clk));
261 return PTR_ERR(clk);
262 }
263 err = clk_prepare_enable(clk);
264 if (err) {
265 pr_err("ftm: clock failed to prepare+enable \"%s\": %d\n",
266 cnt_name, err);
267 return err;
268 }
269
270 clk = of_clk_get_by_name(np, ftm_name);
271 if (IS_ERR(clk)) {
272 pr_err("ftm: Cannot get \"%s\": %ld\n", ftm_name, PTR_ERR(clk));
273 return PTR_ERR(clk);
274 }
275 err = clk_prepare_enable(clk);
276 if (err)
277 pr_err("ftm: clock failed to prepare+enable \"%s\": %d\n",
278 ftm_name, err);
279
280 return clk_get_rate(clk);
281}
282
283static unsigned long __init ftm_clk_init(struct device_node *np)
284{
285 unsigned long freq;
286
287 freq = __ftm_clk_init(np, "ftm-evt-counter-en", "ftm-evt");
288 if (freq <= 0)
289 return 0;
290
291 freq = __ftm_clk_init(np, "ftm-src-counter-en", "ftm-src");
292 if (freq <= 0)
293 return 0;
294
295 return freq;
296}
297
298static int __init ftm_calc_closest_round_cyc(unsigned long freq)
299{
300 priv->ps = 0;
301
302 /* The counter register is only using the lower 16 bits, and
303 * if the 'freq' value is to big here, then the periodic_cyc
304 * may exceed 0xFFFF.
305 */
306 do {
307 priv->periodic_cyc = DIV_ROUND_CLOSEST(freq,
308 HZ * (1 << priv->ps++));
309 } while (priv->periodic_cyc > 0xFFFF);
310
311 if (priv->ps > FTM_PS_MAX) {
312 pr_err("ftm: the prescaler is %lu > %d\n",
313 priv->ps, FTM_PS_MAX);
314 return -EINVAL;
315 }
316
317 return 0;
318}
319
320static void __init ftm_timer_init(struct device_node *np)
321{
322 unsigned long freq;
323 int irq;
324
325 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
326 if (!priv)
327 return;
328
329 priv->clkevt_base = of_iomap(np, 0);
330 if (!priv->clkevt_base) {
331 pr_err("ftm: unable to map event timer registers\n");
332 goto err;
333 }
334
335 priv->clksrc_base = of_iomap(np, 1);
336 if (!priv->clksrc_base) {
337 pr_err("ftm: unable to map source timer registers\n");
338 goto err;
339 }
340
341 irq = irq_of_parse_and_map(np, 0);
342 if (irq <= 0) {
343 pr_err("ftm: unable to get IRQ from DT, %d\n", irq);
344 goto err;
345 }
346
347 priv->big_endian = of_property_read_bool(np, "big-endian");
348
349 freq = ftm_clk_init(np);
350 if (!freq)
351 goto err;
352
353 if (ftm_calc_closest_round_cyc(freq))
354 goto err;
355
356 if (ftm_clocksource_init(freq))
357 goto err;
358
359 if (ftm_clockevent_init(freq, irq))
360 goto err;
361
362 return;
363
364err:
365 kfree(priv);
366}
367CLOCKSOURCE_OF_DECLARE(flextimer, "fsl,ftm-timer", ftm_timer_init);
diff --git a/drivers/clocksource/mmio.c b/drivers/clocksource/mmio.c
index c0e25125a55e..1593ade2a815 100644
--- a/drivers/clocksource/mmio.c
+++ b/drivers/clocksource/mmio.c
@@ -22,22 +22,22 @@ static inline struct clocksource_mmio *to_mmio_clksrc(struct clocksource *c)
22 22
23cycle_t clocksource_mmio_readl_up(struct clocksource *c) 23cycle_t clocksource_mmio_readl_up(struct clocksource *c)
24{ 24{
25 return readl_relaxed(to_mmio_clksrc(c)->reg); 25 return (cycle_t)readl_relaxed(to_mmio_clksrc(c)->reg);
26} 26}
27 27
28cycle_t clocksource_mmio_readl_down(struct clocksource *c) 28cycle_t clocksource_mmio_readl_down(struct clocksource *c)
29{ 29{
30 return ~readl_relaxed(to_mmio_clksrc(c)->reg); 30 return ~(cycle_t)readl_relaxed(to_mmio_clksrc(c)->reg) & c->mask;
31} 31}
32 32
33cycle_t clocksource_mmio_readw_up(struct clocksource *c) 33cycle_t clocksource_mmio_readw_up(struct clocksource *c)
34{ 34{
35 return readw_relaxed(to_mmio_clksrc(c)->reg); 35 return (cycle_t)readw_relaxed(to_mmio_clksrc(c)->reg);
36} 36}
37 37
38cycle_t clocksource_mmio_readw_down(struct clocksource *c) 38cycle_t clocksource_mmio_readw_down(struct clocksource *c)
39{ 39{
40 return ~(unsigned)readw_relaxed(to_mmio_clksrc(c)->reg); 40 return ~(cycle_t)readw_relaxed(to_mmio_clksrc(c)->reg) & c->mask;
41} 41}
42 42
43/** 43/**
diff --git a/drivers/clocksource/qcom-timer.c b/drivers/clocksource/qcom-timer.c
index e807acf4c665..8d115db1e651 100644
--- a/drivers/clocksource/qcom-timer.c
+++ b/drivers/clocksource/qcom-timer.c
@@ -26,6 +26,8 @@
26#include <linux/of_irq.h> 26#include <linux/of_irq.h>
27#include <linux/sched_clock.h> 27#include <linux/sched_clock.h>
28 28
29#include <asm/delay.h>
30
29#define TIMER_MATCH_VAL 0x0000 31#define TIMER_MATCH_VAL 0x0000
30#define TIMER_COUNT_VAL 0x0004 32#define TIMER_COUNT_VAL 0x0004
31#define TIMER_ENABLE 0x0008 33#define TIMER_ENABLE 0x0008
@@ -179,6 +181,15 @@ static u64 notrace msm_sched_clock_read(void)
179 return msm_clocksource.read(&msm_clocksource); 181 return msm_clocksource.read(&msm_clocksource);
180} 182}
181 183
184static unsigned long msm_read_current_timer(void)
185{
186 return msm_clocksource.read(&msm_clocksource);
187}
188
189static struct delay_timer msm_delay_timer = {
190 .read_current_timer = msm_read_current_timer,
191};
192
182static void __init msm_timer_init(u32 dgt_hz, int sched_bits, int irq, 193static void __init msm_timer_init(u32 dgt_hz, int sched_bits, int irq,
183 bool percpu) 194 bool percpu)
184{ 195{
@@ -217,6 +228,8 @@ err:
217 if (res) 228 if (res)
218 pr_err("clocksource_register failed\n"); 229 pr_err("clocksource_register failed\n");
219 sched_clock_register(msm_sched_clock_read, sched_bits, dgt_hz); 230 sched_clock_register(msm_sched_clock_read, sched_bits, dgt_hz);
231 msm_delay_timer.freq = dgt_hz;
232 register_current_timer_delay(&msm_delay_timer);
220} 233}
221 234
222#ifdef CONFIG_ARCH_QCOM 235#ifdef CONFIG_ARCH_QCOM
diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c
index 0b1836a6c539..dfa780396b91 100644
--- a/drivers/clocksource/sh_cmt.c
+++ b/drivers/clocksource/sh_cmt.c
@@ -11,40 +11,93 @@
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details. 13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */ 14 */
19 15
16#include <linux/clk.h>
17#include <linux/clockchips.h>
18#include <linux/clocksource.h>
19#include <linux/delay.h>
20#include <linux/err.h>
20#include <linux/init.h> 21#include <linux/init.h>
21#include <linux/platform_device.h>
22#include <linux/spinlock.h>
23#include <linux/interrupt.h> 22#include <linux/interrupt.h>
24#include <linux/ioport.h>
25#include <linux/io.h> 23#include <linux/io.h>
26#include <linux/clk.h> 24#include <linux/ioport.h>
27#include <linux/irq.h> 25#include <linux/irq.h>
28#include <linux/err.h>
29#include <linux/delay.h>
30#include <linux/clocksource.h>
31#include <linux/clockchips.h>
32#include <linux/sh_timer.h>
33#include <linux/slab.h>
34#include <linux/module.h> 26#include <linux/module.h>
27#include <linux/platform_device.h>
35#include <linux/pm_domain.h> 28#include <linux/pm_domain.h>
36#include <linux/pm_runtime.h> 29#include <linux/pm_runtime.h>
30#include <linux/sh_timer.h>
31#include <linux/slab.h>
32#include <linux/spinlock.h>
33
34struct sh_cmt_device;
35
36/*
37 * The CMT comes in 5 different identified flavours, depending not only on the
38 * SoC but also on the particular instance. The following table lists the main
39 * characteristics of those flavours.
40 *
41 * 16B 32B 32B-F 48B 48B-2
42 * -----------------------------------------------------------------------------
43 * Channels 2 1/4 1 6 2/8
44 * Control Width 16 16 16 16 32
45 * Counter Width 16 32 32 32/48 32/48
46 * Shared Start/Stop Y Y Y Y N
47 *
48 * The 48-bit gen2 version has a per-channel start/stop register located in the
49 * channel registers block. All other versions have a shared start/stop register
50 * located in the global space.
51 *
52 * Channels are indexed from 0 to N-1 in the documentation. The channel index
53 * infers the start/stop bit position in the control register and the channel
54 * registers block address. Some CMT instances have a subset of channels
55 * available, in which case the index in the documentation doesn't match the
56 * "real" index as implemented in hardware. This is for instance the case with
57 * CMT0 on r8a7740, which is a 32-bit variant with a single channel numbered 0
58 * in the documentation but using start/stop bit 5 and having its registers
59 * block at 0x60.
60 *
61 * Similarly CMT0 on r8a73a4, r8a7790 and r8a7791, while implementing 32-bit
62 * channels only, is a 48-bit gen2 CMT with the 48-bit channels unavailable.
63 */
64
65enum sh_cmt_model {
66 SH_CMT_16BIT,
67 SH_CMT_32BIT,
68 SH_CMT_32BIT_FAST,
69 SH_CMT_48BIT,
70 SH_CMT_48BIT_GEN2,
71};
72
73struct sh_cmt_info {
74 enum sh_cmt_model model;
37 75
38struct sh_cmt_priv {
39 void __iomem *mapbase;
40 void __iomem *mapbase_str;
41 struct clk *clk;
42 unsigned long width; /* 16 or 32 bit version of hardware block */ 76 unsigned long width; /* 16 or 32 bit version of hardware block */
43 unsigned long overflow_bit; 77 unsigned long overflow_bit;
44 unsigned long clear_bits; 78 unsigned long clear_bits;
45 struct irqaction irqaction;
46 struct platform_device *pdev;
47 79
80 /* callbacks for CMSTR and CMCSR access */
81 unsigned long (*read_control)(void __iomem *base, unsigned long offs);
82 void (*write_control)(void __iomem *base, unsigned long offs,
83 unsigned long value);
84
85 /* callbacks for CMCNT and CMCOR access */
86 unsigned long (*read_count)(void __iomem *base, unsigned long offs);
87 void (*write_count)(void __iomem *base, unsigned long offs,
88 unsigned long value);
89};
90
91struct sh_cmt_channel {
92 struct sh_cmt_device *cmt;
93
94 unsigned int index; /* Index in the documentation */
95 unsigned int hwidx; /* Real hardware index */
96
97 void __iomem *iostart;
98 void __iomem *ioctrl;
99
100 unsigned int timer_bit;
48 unsigned long flags; 101 unsigned long flags;
49 unsigned long match_value; 102 unsigned long match_value;
50 unsigned long next_match_value; 103 unsigned long next_match_value;
@@ -55,38 +108,52 @@ struct sh_cmt_priv {
55 struct clocksource cs; 108 struct clocksource cs;
56 unsigned long total_cycles; 109 unsigned long total_cycles;
57 bool cs_enabled; 110 bool cs_enabled;
111};
58 112
59 /* callbacks for CMSTR and CMCSR access */ 113struct sh_cmt_device {
60 unsigned long (*read_control)(void __iomem *base, unsigned long offs); 114 struct platform_device *pdev;
61 void (*write_control)(void __iomem *base, unsigned long offs,
62 unsigned long value);
63 115
64 /* callbacks for CMCNT and CMCOR access */ 116 const struct sh_cmt_info *info;
65 unsigned long (*read_count)(void __iomem *base, unsigned long offs); 117 bool legacy;
66 void (*write_count)(void __iomem *base, unsigned long offs, 118
67 unsigned long value); 119 void __iomem *mapbase_ch;
120 void __iomem *mapbase;
121 struct clk *clk;
122
123 struct sh_cmt_channel *channels;
124 unsigned int num_channels;
125
126 bool has_clockevent;
127 bool has_clocksource;
68}; 128};
69 129
70/* Examples of supported CMT timer register layouts and I/O access widths: 130#define SH_CMT16_CMCSR_CMF (1 << 7)
71 * 131#define SH_CMT16_CMCSR_CMIE (1 << 6)
72 * "16-bit counter and 16-bit control" as found on sh7263: 132#define SH_CMT16_CMCSR_CKS8 (0 << 0)
73 * CMSTR 0xfffec000 16-bit 133#define SH_CMT16_CMCSR_CKS32 (1 << 0)
74 * CMCSR 0xfffec002 16-bit 134#define SH_CMT16_CMCSR_CKS128 (2 << 0)
75 * CMCNT 0xfffec004 16-bit 135#define SH_CMT16_CMCSR_CKS512 (3 << 0)
76 * CMCOR 0xfffec006 16-bit 136#define SH_CMT16_CMCSR_CKS_MASK (3 << 0)
77 * 137
78 * "32-bit counter and 16-bit control" as found on sh7372, sh73a0, r8a7740: 138#define SH_CMT32_CMCSR_CMF (1 << 15)
79 * CMSTR 0xffca0000 16-bit 139#define SH_CMT32_CMCSR_OVF (1 << 14)
80 * CMCSR 0xffca0060 16-bit 140#define SH_CMT32_CMCSR_WRFLG (1 << 13)
81 * CMCNT 0xffca0064 32-bit 141#define SH_CMT32_CMCSR_STTF (1 << 12)
82 * CMCOR 0xffca0068 32-bit 142#define SH_CMT32_CMCSR_STPF (1 << 11)
83 * 143#define SH_CMT32_CMCSR_SSIE (1 << 10)
84 * "32-bit counter and 32-bit control" as found on r8a73a4 and r8a7790: 144#define SH_CMT32_CMCSR_CMS (1 << 9)
85 * CMSTR 0xffca0500 32-bit 145#define SH_CMT32_CMCSR_CMM (1 << 8)
86 * CMCSR 0xffca0510 32-bit 146#define SH_CMT32_CMCSR_CMTOUT_IE (1 << 7)
87 * CMCNT 0xffca0514 32-bit 147#define SH_CMT32_CMCSR_CMR_NONE (0 << 4)
88 * CMCOR 0xffca0518 32-bit 148#define SH_CMT32_CMCSR_CMR_DMA (1 << 4)
89 */ 149#define SH_CMT32_CMCSR_CMR_IRQ (2 << 4)
150#define SH_CMT32_CMCSR_CMR_MASK (3 << 4)
151#define SH_CMT32_CMCSR_DBGIVD (1 << 3)
152#define SH_CMT32_CMCSR_CKS_RCLK8 (4 << 0)
153#define SH_CMT32_CMCSR_CKS_RCLK32 (5 << 0)
154#define SH_CMT32_CMCSR_CKS_RCLK128 (6 << 0)
155#define SH_CMT32_CMCSR_CKS_RCLK1 (7 << 0)
156#define SH_CMT32_CMCSR_CKS_MASK (7 << 0)
90 157
91static unsigned long sh_cmt_read16(void __iomem *base, unsigned long offs) 158static unsigned long sh_cmt_read16(void __iomem *base, unsigned long offs)
92{ 159{
@@ -110,64 +177,123 @@ static void sh_cmt_write32(void __iomem *base, unsigned long offs,
110 iowrite32(value, base + (offs << 2)); 177 iowrite32(value, base + (offs << 2));
111} 178}
112 179
180static const struct sh_cmt_info sh_cmt_info[] = {
181 [SH_CMT_16BIT] = {
182 .model = SH_CMT_16BIT,
183 .width = 16,
184 .overflow_bit = SH_CMT16_CMCSR_CMF,
185 .clear_bits = ~SH_CMT16_CMCSR_CMF,
186 .read_control = sh_cmt_read16,
187 .write_control = sh_cmt_write16,
188 .read_count = sh_cmt_read16,
189 .write_count = sh_cmt_write16,
190 },
191 [SH_CMT_32BIT] = {
192 .model = SH_CMT_32BIT,
193 .width = 32,
194 .overflow_bit = SH_CMT32_CMCSR_CMF,
195 .clear_bits = ~(SH_CMT32_CMCSR_CMF | SH_CMT32_CMCSR_OVF),
196 .read_control = sh_cmt_read16,
197 .write_control = sh_cmt_write16,
198 .read_count = sh_cmt_read32,
199 .write_count = sh_cmt_write32,
200 },
201 [SH_CMT_32BIT_FAST] = {
202 .model = SH_CMT_32BIT_FAST,
203 .width = 32,
204 .overflow_bit = SH_CMT32_CMCSR_CMF,
205 .clear_bits = ~(SH_CMT32_CMCSR_CMF | SH_CMT32_CMCSR_OVF),
206 .read_control = sh_cmt_read16,
207 .write_control = sh_cmt_write16,
208 .read_count = sh_cmt_read32,
209 .write_count = sh_cmt_write32,
210 },
211 [SH_CMT_48BIT] = {
212 .model = SH_CMT_48BIT,
213 .width = 32,
214 .overflow_bit = SH_CMT32_CMCSR_CMF,
215 .clear_bits = ~(SH_CMT32_CMCSR_CMF | SH_CMT32_CMCSR_OVF),
216 .read_control = sh_cmt_read32,
217 .write_control = sh_cmt_write32,
218 .read_count = sh_cmt_read32,
219 .write_count = sh_cmt_write32,
220 },
221 [SH_CMT_48BIT_GEN2] = {
222 .model = SH_CMT_48BIT_GEN2,
223 .width = 32,
224 .overflow_bit = SH_CMT32_CMCSR_CMF,
225 .clear_bits = ~(SH_CMT32_CMCSR_CMF | SH_CMT32_CMCSR_OVF),
226 .read_control = sh_cmt_read32,
227 .write_control = sh_cmt_write32,
228 .read_count = sh_cmt_read32,
229 .write_count = sh_cmt_write32,
230 },
231};
232
113#define CMCSR 0 /* channel register */ 233#define CMCSR 0 /* channel register */
114#define CMCNT 1 /* channel register */ 234#define CMCNT 1 /* channel register */
115#define CMCOR 2 /* channel register */ 235#define CMCOR 2 /* channel register */
116 236
117static inline unsigned long sh_cmt_read_cmstr(struct sh_cmt_priv *p) 237static inline unsigned long sh_cmt_read_cmstr(struct sh_cmt_channel *ch)
118{ 238{
119 return p->read_control(p->mapbase_str, 0); 239 if (ch->iostart)
240 return ch->cmt->info->read_control(ch->iostart, 0);
241 else
242 return ch->cmt->info->read_control(ch->cmt->mapbase, 0);
120} 243}
121 244
122static inline unsigned long sh_cmt_read_cmcsr(struct sh_cmt_priv *p) 245static inline void sh_cmt_write_cmstr(struct sh_cmt_channel *ch,
246 unsigned long value)
123{ 247{
124 return p->read_control(p->mapbase, CMCSR); 248 if (ch->iostart)
249 ch->cmt->info->write_control(ch->iostart, 0, value);
250 else
251 ch->cmt->info->write_control(ch->cmt->mapbase, 0, value);
125} 252}
126 253
127static inline unsigned long sh_cmt_read_cmcnt(struct sh_cmt_priv *p) 254static inline unsigned long sh_cmt_read_cmcsr(struct sh_cmt_channel *ch)
128{ 255{
129 return p->read_count(p->mapbase, CMCNT); 256 return ch->cmt->info->read_control(ch->ioctrl, CMCSR);
130} 257}
131 258
132static inline void sh_cmt_write_cmstr(struct sh_cmt_priv *p, 259static inline void sh_cmt_write_cmcsr(struct sh_cmt_channel *ch,
133 unsigned long value) 260 unsigned long value)
134{ 261{
135 p->write_control(p->mapbase_str, 0, value); 262 ch->cmt->info->write_control(ch->ioctrl, CMCSR, value);
136} 263}
137 264
138static inline void sh_cmt_write_cmcsr(struct sh_cmt_priv *p, 265static inline unsigned long sh_cmt_read_cmcnt(struct sh_cmt_channel *ch)
139 unsigned long value)
140{ 266{
141 p->write_control(p->mapbase, CMCSR, value); 267 return ch->cmt->info->read_count(ch->ioctrl, CMCNT);
142} 268}
143 269
144static inline void sh_cmt_write_cmcnt(struct sh_cmt_priv *p, 270static inline void sh_cmt_write_cmcnt(struct sh_cmt_channel *ch,
145 unsigned long value) 271 unsigned long value)
146{ 272{
147 p->write_count(p->mapbase, CMCNT, value); 273 ch->cmt->info->write_count(ch->ioctrl, CMCNT, value);
148} 274}
149 275
150static inline void sh_cmt_write_cmcor(struct sh_cmt_priv *p, 276static inline void sh_cmt_write_cmcor(struct sh_cmt_channel *ch,
151 unsigned long value) 277 unsigned long value)
152{ 278{
153 p->write_count(p->mapbase, CMCOR, value); 279 ch->cmt->info->write_count(ch->ioctrl, CMCOR, value);
154} 280}
155 281
156static unsigned long sh_cmt_get_counter(struct sh_cmt_priv *p, 282static unsigned long sh_cmt_get_counter(struct sh_cmt_channel *ch,
157 int *has_wrapped) 283 int *has_wrapped)
158{ 284{
159 unsigned long v1, v2, v3; 285 unsigned long v1, v2, v3;
160 int o1, o2; 286 int o1, o2;
161 287
162 o1 = sh_cmt_read_cmcsr(p) & p->overflow_bit; 288 o1 = sh_cmt_read_cmcsr(ch) & ch->cmt->info->overflow_bit;
163 289
164 /* Make sure the timer value is stable. Stolen from acpi_pm.c */ 290 /* Make sure the timer value is stable. Stolen from acpi_pm.c */
165 do { 291 do {
166 o2 = o1; 292 o2 = o1;
167 v1 = sh_cmt_read_cmcnt(p); 293 v1 = sh_cmt_read_cmcnt(ch);
168 v2 = sh_cmt_read_cmcnt(p); 294 v2 = sh_cmt_read_cmcnt(ch);
169 v3 = sh_cmt_read_cmcnt(p); 295 v3 = sh_cmt_read_cmcnt(ch);
170 o1 = sh_cmt_read_cmcsr(p) & p->overflow_bit; 296 o1 = sh_cmt_read_cmcsr(ch) & ch->cmt->info->overflow_bit;
171 } while (unlikely((o1 != o2) || (v1 > v2 && v1 < v3) 297 } while (unlikely((o1 != o2) || (v1 > v2 && v1 < v3)
172 || (v2 > v3 && v2 < v1) || (v3 > v1 && v3 < v2))); 298 || (v2 > v3 && v2 < v1) || (v3 > v1 && v3 < v2)));
173 299
@@ -177,52 +303,56 @@ static unsigned long sh_cmt_get_counter(struct sh_cmt_priv *p,
177 303
178static DEFINE_RAW_SPINLOCK(sh_cmt_lock); 304static DEFINE_RAW_SPINLOCK(sh_cmt_lock);
179 305
180static void sh_cmt_start_stop_ch(struct sh_cmt_priv *p, int start) 306static void sh_cmt_start_stop_ch(struct sh_cmt_channel *ch, int start)
181{ 307{
182 struct sh_timer_config *cfg = p->pdev->dev.platform_data;
183 unsigned long flags, value; 308 unsigned long flags, value;
184 309
185 /* start stop register shared by multiple timer channels */ 310 /* start stop register shared by multiple timer channels */
186 raw_spin_lock_irqsave(&sh_cmt_lock, flags); 311 raw_spin_lock_irqsave(&sh_cmt_lock, flags);
187 value = sh_cmt_read_cmstr(p); 312 value = sh_cmt_read_cmstr(ch);
188 313
189 if (start) 314 if (start)
190 value |= 1 << cfg->timer_bit; 315 value |= 1 << ch->timer_bit;
191 else 316 else
192 value &= ~(1 << cfg->timer_bit); 317 value &= ~(1 << ch->timer_bit);
193 318
194 sh_cmt_write_cmstr(p, value); 319 sh_cmt_write_cmstr(ch, value);
195 raw_spin_unlock_irqrestore(&sh_cmt_lock, flags); 320 raw_spin_unlock_irqrestore(&sh_cmt_lock, flags);
196} 321}
197 322
198static int sh_cmt_enable(struct sh_cmt_priv *p, unsigned long *rate) 323static int sh_cmt_enable(struct sh_cmt_channel *ch, unsigned long *rate)
199{ 324{
200 int k, ret; 325 int k, ret;
201 326
202 pm_runtime_get_sync(&p->pdev->dev); 327 pm_runtime_get_sync(&ch->cmt->pdev->dev);
203 dev_pm_syscore_device(&p->pdev->dev, true); 328 dev_pm_syscore_device(&ch->cmt->pdev->dev, true);
204 329
205 /* enable clock */ 330 /* enable clock */
206 ret = clk_enable(p->clk); 331 ret = clk_enable(ch->cmt->clk);
207 if (ret) { 332 if (ret) {
208 dev_err(&p->pdev->dev, "cannot enable clock\n"); 333 dev_err(&ch->cmt->pdev->dev, "ch%u: cannot enable clock\n",
334 ch->index);
209 goto err0; 335 goto err0;
210 } 336 }
211 337
212 /* make sure channel is disabled */ 338 /* make sure channel is disabled */
213 sh_cmt_start_stop_ch(p, 0); 339 sh_cmt_start_stop_ch(ch, 0);
214 340
215 /* configure channel, periodic mode and maximum timeout */ 341 /* configure channel, periodic mode and maximum timeout */
216 if (p->width == 16) { 342 if (ch->cmt->info->width == 16) {
217 *rate = clk_get_rate(p->clk) / 512; 343 *rate = clk_get_rate(ch->cmt->clk) / 512;
218 sh_cmt_write_cmcsr(p, 0x43); 344 sh_cmt_write_cmcsr(ch, SH_CMT16_CMCSR_CMIE |
345 SH_CMT16_CMCSR_CKS512);
219 } else { 346 } else {
220 *rate = clk_get_rate(p->clk) / 8; 347 *rate = clk_get_rate(ch->cmt->clk) / 8;
221 sh_cmt_write_cmcsr(p, 0x01a4); 348 sh_cmt_write_cmcsr(ch, SH_CMT32_CMCSR_CMM |
349 SH_CMT32_CMCSR_CMTOUT_IE |
350 SH_CMT32_CMCSR_CMR_IRQ |
351 SH_CMT32_CMCSR_CKS_RCLK8);
222 } 352 }
223 353
224 sh_cmt_write_cmcor(p, 0xffffffff); 354 sh_cmt_write_cmcor(ch, 0xffffffff);
225 sh_cmt_write_cmcnt(p, 0); 355 sh_cmt_write_cmcnt(ch, 0);
226 356
227 /* 357 /*
228 * According to the sh73a0 user's manual, as CMCNT can be operated 358 * According to the sh73a0 user's manual, as CMCNT can be operated
@@ -236,41 +366,42 @@ static int sh_cmt_enable(struct sh_cmt_priv *p, unsigned long *rate)
236 * take RCLKx2 at maximum. 366 * take RCLKx2 at maximum.
237 */ 367 */
238 for (k = 0; k < 100; k++) { 368 for (k = 0; k < 100; k++) {
239 if (!sh_cmt_read_cmcnt(p)) 369 if (!sh_cmt_read_cmcnt(ch))
240 break; 370 break;
241 udelay(1); 371 udelay(1);
242 } 372 }
243 373
244 if (sh_cmt_read_cmcnt(p)) { 374 if (sh_cmt_read_cmcnt(ch)) {
245 dev_err(&p->pdev->dev, "cannot clear CMCNT\n"); 375 dev_err(&ch->cmt->pdev->dev, "ch%u: cannot clear CMCNT\n",
376 ch->index);
246 ret = -ETIMEDOUT; 377 ret = -ETIMEDOUT;
247 goto err1; 378 goto err1;
248 } 379 }
249 380
250 /* enable channel */ 381 /* enable channel */
251 sh_cmt_start_stop_ch(p, 1); 382 sh_cmt_start_stop_ch(ch, 1);
252 return 0; 383 return 0;
253 err1: 384 err1:
254 /* stop clock */ 385 /* stop clock */
255 clk_disable(p->clk); 386 clk_disable(ch->cmt->clk);
256 387
257 err0: 388 err0:
258 return ret; 389 return ret;
259} 390}
260 391
261static void sh_cmt_disable(struct sh_cmt_priv *p) 392static void sh_cmt_disable(struct sh_cmt_channel *ch)
262{ 393{
263 /* disable channel */ 394 /* disable channel */
264 sh_cmt_start_stop_ch(p, 0); 395 sh_cmt_start_stop_ch(ch, 0);
265 396
266 /* disable interrupts in CMT block */ 397 /* disable interrupts in CMT block */
267 sh_cmt_write_cmcsr(p, 0); 398 sh_cmt_write_cmcsr(ch, 0);
268 399
269 /* stop clock */ 400 /* stop clock */
270 clk_disable(p->clk); 401 clk_disable(ch->cmt->clk);
271 402
272 dev_pm_syscore_device(&p->pdev->dev, false); 403 dev_pm_syscore_device(&ch->cmt->pdev->dev, false);
273 pm_runtime_put(&p->pdev->dev); 404 pm_runtime_put(&ch->cmt->pdev->dev);
274} 405}
275 406
276/* private flags */ 407/* private flags */
@@ -280,24 +411,24 @@ static void sh_cmt_disable(struct sh_cmt_priv *p)
280#define FLAG_SKIPEVENT (1 << 3) 411#define FLAG_SKIPEVENT (1 << 3)
281#define FLAG_IRQCONTEXT (1 << 4) 412#define FLAG_IRQCONTEXT (1 << 4)
282 413
283static void sh_cmt_clock_event_program_verify(struct sh_cmt_priv *p, 414static void sh_cmt_clock_event_program_verify(struct sh_cmt_channel *ch,
284 int absolute) 415 int absolute)
285{ 416{
286 unsigned long new_match; 417 unsigned long new_match;
287 unsigned long value = p->next_match_value; 418 unsigned long value = ch->next_match_value;
288 unsigned long delay = 0; 419 unsigned long delay = 0;
289 unsigned long now = 0; 420 unsigned long now = 0;
290 int has_wrapped; 421 int has_wrapped;
291 422
292 now = sh_cmt_get_counter(p, &has_wrapped); 423 now = sh_cmt_get_counter(ch, &has_wrapped);
293 p->flags |= FLAG_REPROGRAM; /* force reprogram */ 424 ch->flags |= FLAG_REPROGRAM; /* force reprogram */
294 425
295 if (has_wrapped) { 426 if (has_wrapped) {
296 /* we're competing with the interrupt handler. 427 /* we're competing with the interrupt handler.
297 * -> let the interrupt handler reprogram the timer. 428 * -> let the interrupt handler reprogram the timer.
298 * -> interrupt number two handles the event. 429 * -> interrupt number two handles the event.
299 */ 430 */
300 p->flags |= FLAG_SKIPEVENT; 431 ch->flags |= FLAG_SKIPEVENT;
301 return; 432 return;
302 } 433 }
303 434
@@ -309,20 +440,20 @@ static void sh_cmt_clock_event_program_verify(struct sh_cmt_priv *p,
309 * but don't save the new match value yet. 440 * but don't save the new match value yet.
310 */ 441 */
311 new_match = now + value + delay; 442 new_match = now + value + delay;
312 if (new_match > p->max_match_value) 443 if (new_match > ch->max_match_value)
313 new_match = p->max_match_value; 444 new_match = ch->max_match_value;
314 445
315 sh_cmt_write_cmcor(p, new_match); 446 sh_cmt_write_cmcor(ch, new_match);
316 447
317 now = sh_cmt_get_counter(p, &has_wrapped); 448 now = sh_cmt_get_counter(ch, &has_wrapped);
318 if (has_wrapped && (new_match > p->match_value)) { 449 if (has_wrapped && (new_match > ch->match_value)) {
319 /* we are changing to a greater match value, 450 /* we are changing to a greater match value,
320 * so this wrap must be caused by the counter 451 * so this wrap must be caused by the counter
321 * matching the old value. 452 * matching the old value.
322 * -> first interrupt reprograms the timer. 453 * -> first interrupt reprograms the timer.
323 * -> interrupt number two handles the event. 454 * -> interrupt number two handles the event.
324 */ 455 */
325 p->flags |= FLAG_SKIPEVENT; 456 ch->flags |= FLAG_SKIPEVENT;
326 break; 457 break;
327 } 458 }
328 459
@@ -333,7 +464,7 @@ static void sh_cmt_clock_event_program_verify(struct sh_cmt_priv *p,
333 * -> save programmed match value. 464 * -> save programmed match value.
334 * -> let isr handle the event. 465 * -> let isr handle the event.
335 */ 466 */
336 p->match_value = new_match; 467 ch->match_value = new_match;
337 break; 468 break;
338 } 469 }
339 470
@@ -344,7 +475,7 @@ static void sh_cmt_clock_event_program_verify(struct sh_cmt_priv *p,
344 * -> save programmed match value. 475 * -> save programmed match value.
345 * -> let isr handle the event. 476 * -> let isr handle the event.
346 */ 477 */
347 p->match_value = new_match; 478 ch->match_value = new_match;
348 break; 479 break;
349 } 480 }
350 481
@@ -360,138 +491,141 @@ static void sh_cmt_clock_event_program_verify(struct sh_cmt_priv *p,
360 delay = 1; 491 delay = 1;
361 492
362 if (!delay) 493 if (!delay)
363 dev_warn(&p->pdev->dev, "too long delay\n"); 494 dev_warn(&ch->cmt->pdev->dev, "ch%u: too long delay\n",
495 ch->index);
364 496
365 } while (delay); 497 } while (delay);
366} 498}
367 499
368static void __sh_cmt_set_next(struct sh_cmt_priv *p, unsigned long delta) 500static void __sh_cmt_set_next(struct sh_cmt_channel *ch, unsigned long delta)
369{ 501{
370 if (delta > p->max_match_value) 502 if (delta > ch->max_match_value)
371 dev_warn(&p->pdev->dev, "delta out of range\n"); 503 dev_warn(&ch->cmt->pdev->dev, "ch%u: delta out of range\n",
504 ch->index);
372 505
373 p->next_match_value = delta; 506 ch->next_match_value = delta;
374 sh_cmt_clock_event_program_verify(p, 0); 507 sh_cmt_clock_event_program_verify(ch, 0);
375} 508}
376 509
377static void sh_cmt_set_next(struct sh_cmt_priv *p, unsigned long delta) 510static void sh_cmt_set_next(struct sh_cmt_channel *ch, unsigned long delta)
378{ 511{
379 unsigned long flags; 512 unsigned long flags;
380 513
381 raw_spin_lock_irqsave(&p->lock, flags); 514 raw_spin_lock_irqsave(&ch->lock, flags);
382 __sh_cmt_set_next(p, delta); 515 __sh_cmt_set_next(ch, delta);
383 raw_spin_unlock_irqrestore(&p->lock, flags); 516 raw_spin_unlock_irqrestore(&ch->lock, flags);
384} 517}
385 518
386static irqreturn_t sh_cmt_interrupt(int irq, void *dev_id) 519static irqreturn_t sh_cmt_interrupt(int irq, void *dev_id)
387{ 520{
388 struct sh_cmt_priv *p = dev_id; 521 struct sh_cmt_channel *ch = dev_id;
389 522
390 /* clear flags */ 523 /* clear flags */
391 sh_cmt_write_cmcsr(p, sh_cmt_read_cmcsr(p) & p->clear_bits); 524 sh_cmt_write_cmcsr(ch, sh_cmt_read_cmcsr(ch) &
525 ch->cmt->info->clear_bits);
392 526
393 /* update clock source counter to begin with if enabled 527 /* update clock source counter to begin with if enabled
394 * the wrap flag should be cleared by the timer specific 528 * the wrap flag should be cleared by the timer specific
395 * isr before we end up here. 529 * isr before we end up here.
396 */ 530 */
397 if (p->flags & FLAG_CLOCKSOURCE) 531 if (ch->flags & FLAG_CLOCKSOURCE)
398 p->total_cycles += p->match_value + 1; 532 ch->total_cycles += ch->match_value + 1;
399 533
400 if (!(p->flags & FLAG_REPROGRAM)) 534 if (!(ch->flags & FLAG_REPROGRAM))
401 p->next_match_value = p->max_match_value; 535 ch->next_match_value = ch->max_match_value;
402 536
403 p->flags |= FLAG_IRQCONTEXT; 537 ch->flags |= FLAG_IRQCONTEXT;
404 538
405 if (p->flags & FLAG_CLOCKEVENT) { 539 if (ch->flags & FLAG_CLOCKEVENT) {
406 if (!(p->flags & FLAG_SKIPEVENT)) { 540 if (!(ch->flags & FLAG_SKIPEVENT)) {
407 if (p->ced.mode == CLOCK_EVT_MODE_ONESHOT) { 541 if (ch->ced.mode == CLOCK_EVT_MODE_ONESHOT) {
408 p->next_match_value = p->max_match_value; 542 ch->next_match_value = ch->max_match_value;
409 p->flags |= FLAG_REPROGRAM; 543 ch->flags |= FLAG_REPROGRAM;
410 } 544 }
411 545
412 p->ced.event_handler(&p->ced); 546 ch->ced.event_handler(&ch->ced);
413 } 547 }
414 } 548 }
415 549
416 p->flags &= ~FLAG_SKIPEVENT; 550 ch->flags &= ~FLAG_SKIPEVENT;
417 551
418 if (p->flags & FLAG_REPROGRAM) { 552 if (ch->flags & FLAG_REPROGRAM) {
419 p->flags &= ~FLAG_REPROGRAM; 553 ch->flags &= ~FLAG_REPROGRAM;
420 sh_cmt_clock_event_program_verify(p, 1); 554 sh_cmt_clock_event_program_verify(ch, 1);
421 555
422 if (p->flags & FLAG_CLOCKEVENT) 556 if (ch->flags & FLAG_CLOCKEVENT)
423 if ((p->ced.mode == CLOCK_EVT_MODE_SHUTDOWN) 557 if ((ch->ced.mode == CLOCK_EVT_MODE_SHUTDOWN)
424 || (p->match_value == p->next_match_value)) 558 || (ch->match_value == ch->next_match_value))
425 p->flags &= ~FLAG_REPROGRAM; 559 ch->flags &= ~FLAG_REPROGRAM;
426 } 560 }
427 561
428 p->flags &= ~FLAG_IRQCONTEXT; 562 ch->flags &= ~FLAG_IRQCONTEXT;
429 563
430 return IRQ_HANDLED; 564 return IRQ_HANDLED;
431} 565}
432 566
433static int sh_cmt_start(struct sh_cmt_priv *p, unsigned long flag) 567static int sh_cmt_start(struct sh_cmt_channel *ch, unsigned long flag)
434{ 568{
435 int ret = 0; 569 int ret = 0;
436 unsigned long flags; 570 unsigned long flags;
437 571
438 raw_spin_lock_irqsave(&p->lock, flags); 572 raw_spin_lock_irqsave(&ch->lock, flags);
439 573
440 if (!(p->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE))) 574 if (!(ch->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE)))
441 ret = sh_cmt_enable(p, &p->rate); 575 ret = sh_cmt_enable(ch, &ch->rate);
442 576
443 if (ret) 577 if (ret)
444 goto out; 578 goto out;
445 p->flags |= flag; 579 ch->flags |= flag;
446 580
447 /* setup timeout if no clockevent */ 581 /* setup timeout if no clockevent */
448 if ((flag == FLAG_CLOCKSOURCE) && (!(p->flags & FLAG_CLOCKEVENT))) 582 if ((flag == FLAG_CLOCKSOURCE) && (!(ch->flags & FLAG_CLOCKEVENT)))
449 __sh_cmt_set_next(p, p->max_match_value); 583 __sh_cmt_set_next(ch, ch->max_match_value);
450 out: 584 out:
451 raw_spin_unlock_irqrestore(&p->lock, flags); 585 raw_spin_unlock_irqrestore(&ch->lock, flags);
452 586
453 return ret; 587 return ret;
454} 588}
455 589
456static void sh_cmt_stop(struct sh_cmt_priv *p, unsigned long flag) 590static void sh_cmt_stop(struct sh_cmt_channel *ch, unsigned long flag)
457{ 591{
458 unsigned long flags; 592 unsigned long flags;
459 unsigned long f; 593 unsigned long f;
460 594
461 raw_spin_lock_irqsave(&p->lock, flags); 595 raw_spin_lock_irqsave(&ch->lock, flags);
462 596
463 f = p->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE); 597 f = ch->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE);
464 p->flags &= ~flag; 598 ch->flags &= ~flag;
465 599
466 if (f && !(p->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE))) 600 if (f && !(ch->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE)))
467 sh_cmt_disable(p); 601 sh_cmt_disable(ch);
468 602
469 /* adjust the timeout to maximum if only clocksource left */ 603 /* adjust the timeout to maximum if only clocksource left */
470 if ((flag == FLAG_CLOCKEVENT) && (p->flags & FLAG_CLOCKSOURCE)) 604 if ((flag == FLAG_CLOCKEVENT) && (ch->flags & FLAG_CLOCKSOURCE))
471 __sh_cmt_set_next(p, p->max_match_value); 605 __sh_cmt_set_next(ch, ch->max_match_value);
472 606
473 raw_spin_unlock_irqrestore(&p->lock, flags); 607 raw_spin_unlock_irqrestore(&ch->lock, flags);
474} 608}
475 609
476static struct sh_cmt_priv *cs_to_sh_cmt(struct clocksource *cs) 610static struct sh_cmt_channel *cs_to_sh_cmt(struct clocksource *cs)
477{ 611{
478 return container_of(cs, struct sh_cmt_priv, cs); 612 return container_of(cs, struct sh_cmt_channel, cs);
479} 613}
480 614
481static cycle_t sh_cmt_clocksource_read(struct clocksource *cs) 615static cycle_t sh_cmt_clocksource_read(struct clocksource *cs)
482{ 616{
483 struct sh_cmt_priv *p = cs_to_sh_cmt(cs); 617 struct sh_cmt_channel *ch = cs_to_sh_cmt(cs);
484 unsigned long flags, raw; 618 unsigned long flags, raw;
485 unsigned long value; 619 unsigned long value;
486 int has_wrapped; 620 int has_wrapped;
487 621
488 raw_spin_lock_irqsave(&p->lock, flags); 622 raw_spin_lock_irqsave(&ch->lock, flags);
489 value = p->total_cycles; 623 value = ch->total_cycles;
490 raw = sh_cmt_get_counter(p, &has_wrapped); 624 raw = sh_cmt_get_counter(ch, &has_wrapped);
491 625
492 if (unlikely(has_wrapped)) 626 if (unlikely(has_wrapped))
493 raw += p->match_value + 1; 627 raw += ch->match_value + 1;
494 raw_spin_unlock_irqrestore(&p->lock, flags); 628 raw_spin_unlock_irqrestore(&ch->lock, flags);
495 629
496 return value + raw; 630 return value + raw;
497} 631}
@@ -499,53 +633,53 @@ static cycle_t sh_cmt_clocksource_read(struct clocksource *cs)
499static int sh_cmt_clocksource_enable(struct clocksource *cs) 633static int sh_cmt_clocksource_enable(struct clocksource *cs)
500{ 634{
501 int ret; 635 int ret;
502 struct sh_cmt_priv *p = cs_to_sh_cmt(cs); 636 struct sh_cmt_channel *ch = cs_to_sh_cmt(cs);
503 637
504 WARN_ON(p->cs_enabled); 638 WARN_ON(ch->cs_enabled);
505 639
506 p->total_cycles = 0; 640 ch->total_cycles = 0;
507 641
508 ret = sh_cmt_start(p, FLAG_CLOCKSOURCE); 642 ret = sh_cmt_start(ch, FLAG_CLOCKSOURCE);
509 if (!ret) { 643 if (!ret) {
510 __clocksource_updatefreq_hz(cs, p->rate); 644 __clocksource_updatefreq_hz(cs, ch->rate);
511 p->cs_enabled = true; 645 ch->cs_enabled = true;
512 } 646 }
513 return ret; 647 return ret;
514} 648}
515 649
516static void sh_cmt_clocksource_disable(struct clocksource *cs) 650static void sh_cmt_clocksource_disable(struct clocksource *cs)
517{ 651{
518 struct sh_cmt_priv *p = cs_to_sh_cmt(cs); 652 struct sh_cmt_channel *ch = cs_to_sh_cmt(cs);
519 653
520 WARN_ON(!p->cs_enabled); 654 WARN_ON(!ch->cs_enabled);
521 655
522 sh_cmt_stop(p, FLAG_CLOCKSOURCE); 656 sh_cmt_stop(ch, FLAG_CLOCKSOURCE);
523 p->cs_enabled = false; 657 ch->cs_enabled = false;
524} 658}
525 659
526static void sh_cmt_clocksource_suspend(struct clocksource *cs) 660static void sh_cmt_clocksource_suspend(struct clocksource *cs)
527{ 661{
528 struct sh_cmt_priv *p = cs_to_sh_cmt(cs); 662 struct sh_cmt_channel *ch = cs_to_sh_cmt(cs);
529 663
530 sh_cmt_stop(p, FLAG_CLOCKSOURCE); 664 sh_cmt_stop(ch, FLAG_CLOCKSOURCE);
531 pm_genpd_syscore_poweroff(&p->pdev->dev); 665 pm_genpd_syscore_poweroff(&ch->cmt->pdev->dev);
532} 666}
533 667
534static void sh_cmt_clocksource_resume(struct clocksource *cs) 668static void sh_cmt_clocksource_resume(struct clocksource *cs)
535{ 669{
536 struct sh_cmt_priv *p = cs_to_sh_cmt(cs); 670 struct sh_cmt_channel *ch = cs_to_sh_cmt(cs);
537 671
538 pm_genpd_syscore_poweron(&p->pdev->dev); 672 pm_genpd_syscore_poweron(&ch->cmt->pdev->dev);
539 sh_cmt_start(p, FLAG_CLOCKSOURCE); 673 sh_cmt_start(ch, FLAG_CLOCKSOURCE);
540} 674}
541 675
542static int sh_cmt_register_clocksource(struct sh_cmt_priv *p, 676static int sh_cmt_register_clocksource(struct sh_cmt_channel *ch,
543 char *name, unsigned long rating) 677 const char *name)
544{ 678{
545 struct clocksource *cs = &p->cs; 679 struct clocksource *cs = &ch->cs;
546 680
547 cs->name = name; 681 cs->name = name;
548 cs->rating = rating; 682 cs->rating = 125;
549 cs->read = sh_cmt_clocksource_read; 683 cs->read = sh_cmt_clocksource_read;
550 cs->enable = sh_cmt_clocksource_enable; 684 cs->enable = sh_cmt_clocksource_enable;
551 cs->disable = sh_cmt_clocksource_disable; 685 cs->disable = sh_cmt_clocksource_disable;
@@ -554,47 +688,48 @@ static int sh_cmt_register_clocksource(struct sh_cmt_priv *p,
554 cs->mask = CLOCKSOURCE_MASK(sizeof(unsigned long) * 8); 688 cs->mask = CLOCKSOURCE_MASK(sizeof(unsigned long) * 8);
555 cs->flags = CLOCK_SOURCE_IS_CONTINUOUS; 689 cs->flags = CLOCK_SOURCE_IS_CONTINUOUS;
556 690
557 dev_info(&p->pdev->dev, "used as clock source\n"); 691 dev_info(&ch->cmt->pdev->dev, "ch%u: used as clock source\n",
692 ch->index);
558 693
559 /* Register with dummy 1 Hz value, gets updated in ->enable() */ 694 /* Register with dummy 1 Hz value, gets updated in ->enable() */
560 clocksource_register_hz(cs, 1); 695 clocksource_register_hz(cs, 1);
561 return 0; 696 return 0;
562} 697}
563 698
564static struct sh_cmt_priv *ced_to_sh_cmt(struct clock_event_device *ced) 699static struct sh_cmt_channel *ced_to_sh_cmt(struct clock_event_device *ced)
565{ 700{
566 return container_of(ced, struct sh_cmt_priv, ced); 701 return container_of(ced, struct sh_cmt_channel, ced);
567} 702}
568 703
569static void sh_cmt_clock_event_start(struct sh_cmt_priv *p, int periodic) 704static void sh_cmt_clock_event_start(struct sh_cmt_channel *ch, int periodic)
570{ 705{
571 struct clock_event_device *ced = &p->ced; 706 struct clock_event_device *ced = &ch->ced;
572 707
573 sh_cmt_start(p, FLAG_CLOCKEVENT); 708 sh_cmt_start(ch, FLAG_CLOCKEVENT);
574 709
575 /* TODO: calculate good shift from rate and counter bit width */ 710 /* TODO: calculate good shift from rate and counter bit width */
576 711
577 ced->shift = 32; 712 ced->shift = 32;
578 ced->mult = div_sc(p->rate, NSEC_PER_SEC, ced->shift); 713 ced->mult = div_sc(ch->rate, NSEC_PER_SEC, ced->shift);
579 ced->max_delta_ns = clockevent_delta2ns(p->max_match_value, ced); 714 ced->max_delta_ns = clockevent_delta2ns(ch->max_match_value, ced);
580 ced->min_delta_ns = clockevent_delta2ns(0x1f, ced); 715 ced->min_delta_ns = clockevent_delta2ns(0x1f, ced);
581 716
582 if (periodic) 717 if (periodic)
583 sh_cmt_set_next(p, ((p->rate + HZ/2) / HZ) - 1); 718 sh_cmt_set_next(ch, ((ch->rate + HZ/2) / HZ) - 1);
584 else 719 else
585 sh_cmt_set_next(p, p->max_match_value); 720 sh_cmt_set_next(ch, ch->max_match_value);
586} 721}
587 722
588static void sh_cmt_clock_event_mode(enum clock_event_mode mode, 723static void sh_cmt_clock_event_mode(enum clock_event_mode mode,
589 struct clock_event_device *ced) 724 struct clock_event_device *ced)
590{ 725{
591 struct sh_cmt_priv *p = ced_to_sh_cmt(ced); 726 struct sh_cmt_channel *ch = ced_to_sh_cmt(ced);
592 727
593 /* deal with old setting first */ 728 /* deal with old setting first */
594 switch (ced->mode) { 729 switch (ced->mode) {
595 case CLOCK_EVT_MODE_PERIODIC: 730 case CLOCK_EVT_MODE_PERIODIC:
596 case CLOCK_EVT_MODE_ONESHOT: 731 case CLOCK_EVT_MODE_ONESHOT:
597 sh_cmt_stop(p, FLAG_CLOCKEVENT); 732 sh_cmt_stop(ch, FLAG_CLOCKEVENT);
598 break; 733 break;
599 default: 734 default:
600 break; 735 break;
@@ -602,16 +737,18 @@ static void sh_cmt_clock_event_mode(enum clock_event_mode mode,
602 737
603 switch (mode) { 738 switch (mode) {
604 case CLOCK_EVT_MODE_PERIODIC: 739 case CLOCK_EVT_MODE_PERIODIC:
605 dev_info(&p->pdev->dev, "used for periodic clock events\n"); 740 dev_info(&ch->cmt->pdev->dev,
606 sh_cmt_clock_event_start(p, 1); 741 "ch%u: used for periodic clock events\n", ch->index);
742 sh_cmt_clock_event_start(ch, 1);
607 break; 743 break;
608 case CLOCK_EVT_MODE_ONESHOT: 744 case CLOCK_EVT_MODE_ONESHOT:
609 dev_info(&p->pdev->dev, "used for oneshot clock events\n"); 745 dev_info(&ch->cmt->pdev->dev,
610 sh_cmt_clock_event_start(p, 0); 746 "ch%u: used for oneshot clock events\n", ch->index);
747 sh_cmt_clock_event_start(ch, 0);
611 break; 748 break;
612 case CLOCK_EVT_MODE_SHUTDOWN: 749 case CLOCK_EVT_MODE_SHUTDOWN:
613 case CLOCK_EVT_MODE_UNUSED: 750 case CLOCK_EVT_MODE_UNUSED:
614 sh_cmt_stop(p, FLAG_CLOCKEVENT); 751 sh_cmt_stop(ch, FLAG_CLOCKEVENT);
615 break; 752 break;
616 default: 753 default:
617 break; 754 break;
@@ -621,196 +758,341 @@ static void sh_cmt_clock_event_mode(enum clock_event_mode mode,
621static int sh_cmt_clock_event_next(unsigned long delta, 758static int sh_cmt_clock_event_next(unsigned long delta,
622 struct clock_event_device *ced) 759 struct clock_event_device *ced)
623{ 760{
624 struct sh_cmt_priv *p = ced_to_sh_cmt(ced); 761 struct sh_cmt_channel *ch = ced_to_sh_cmt(ced);
625 762
626 BUG_ON(ced->mode != CLOCK_EVT_MODE_ONESHOT); 763 BUG_ON(ced->mode != CLOCK_EVT_MODE_ONESHOT);
627 if (likely(p->flags & FLAG_IRQCONTEXT)) 764 if (likely(ch->flags & FLAG_IRQCONTEXT))
628 p->next_match_value = delta - 1; 765 ch->next_match_value = delta - 1;
629 else 766 else
630 sh_cmt_set_next(p, delta - 1); 767 sh_cmt_set_next(ch, delta - 1);
631 768
632 return 0; 769 return 0;
633} 770}
634 771
635static void sh_cmt_clock_event_suspend(struct clock_event_device *ced) 772static void sh_cmt_clock_event_suspend(struct clock_event_device *ced)
636{ 773{
637 struct sh_cmt_priv *p = ced_to_sh_cmt(ced); 774 struct sh_cmt_channel *ch = ced_to_sh_cmt(ced);
638 775
639 pm_genpd_syscore_poweroff(&p->pdev->dev); 776 pm_genpd_syscore_poweroff(&ch->cmt->pdev->dev);
640 clk_unprepare(p->clk); 777 clk_unprepare(ch->cmt->clk);
641} 778}
642 779
643static void sh_cmt_clock_event_resume(struct clock_event_device *ced) 780static void sh_cmt_clock_event_resume(struct clock_event_device *ced)
644{ 781{
645 struct sh_cmt_priv *p = ced_to_sh_cmt(ced); 782 struct sh_cmt_channel *ch = ced_to_sh_cmt(ced);
646 783
647 clk_prepare(p->clk); 784 clk_prepare(ch->cmt->clk);
648 pm_genpd_syscore_poweron(&p->pdev->dev); 785 pm_genpd_syscore_poweron(&ch->cmt->pdev->dev);
649} 786}
650 787
651static void sh_cmt_register_clockevent(struct sh_cmt_priv *p, 788static int sh_cmt_register_clockevent(struct sh_cmt_channel *ch,
652 char *name, unsigned long rating) 789 const char *name)
653{ 790{
654 struct clock_event_device *ced = &p->ced; 791 struct clock_event_device *ced = &ch->ced;
792 int irq;
793 int ret;
655 794
656 memset(ced, 0, sizeof(*ced)); 795 irq = platform_get_irq(ch->cmt->pdev, ch->cmt->legacy ? 0 : ch->index);
796 if (irq < 0) {
797 dev_err(&ch->cmt->pdev->dev, "ch%u: failed to get irq\n",
798 ch->index);
799 return irq;
800 }
801
802 ret = request_irq(irq, sh_cmt_interrupt,
803 IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING,
804 dev_name(&ch->cmt->pdev->dev), ch);
805 if (ret) {
806 dev_err(&ch->cmt->pdev->dev, "ch%u: failed to request irq %d\n",
807 ch->index, irq);
808 return ret;
809 }
657 810
658 ced->name = name; 811 ced->name = name;
659 ced->features = CLOCK_EVT_FEAT_PERIODIC; 812 ced->features = CLOCK_EVT_FEAT_PERIODIC;
660 ced->features |= CLOCK_EVT_FEAT_ONESHOT; 813 ced->features |= CLOCK_EVT_FEAT_ONESHOT;
661 ced->rating = rating; 814 ced->rating = 125;
662 ced->cpumask = cpumask_of(0); 815 ced->cpumask = cpu_possible_mask;
663 ced->set_next_event = sh_cmt_clock_event_next; 816 ced->set_next_event = sh_cmt_clock_event_next;
664 ced->set_mode = sh_cmt_clock_event_mode; 817 ced->set_mode = sh_cmt_clock_event_mode;
665 ced->suspend = sh_cmt_clock_event_suspend; 818 ced->suspend = sh_cmt_clock_event_suspend;
666 ced->resume = sh_cmt_clock_event_resume; 819 ced->resume = sh_cmt_clock_event_resume;
667 820
668 dev_info(&p->pdev->dev, "used for clock events\n"); 821 dev_info(&ch->cmt->pdev->dev, "ch%u: used for clock events\n",
822 ch->index);
669 clockevents_register_device(ced); 823 clockevents_register_device(ced);
824
825 return 0;
670} 826}
671 827
672static int sh_cmt_register(struct sh_cmt_priv *p, char *name, 828static int sh_cmt_register(struct sh_cmt_channel *ch, const char *name,
673 unsigned long clockevent_rating, 829 bool clockevent, bool clocksource)
674 unsigned long clocksource_rating)
675{ 830{
676 if (clockevent_rating) 831 int ret;
677 sh_cmt_register_clockevent(p, name, clockevent_rating);
678 832
679 if (clocksource_rating) 833 if (clockevent) {
680 sh_cmt_register_clocksource(p, name, clocksource_rating); 834 ch->cmt->has_clockevent = true;
835 ret = sh_cmt_register_clockevent(ch, name);
836 if (ret < 0)
837 return ret;
838 }
839
840 if (clocksource) {
841 ch->cmt->has_clocksource = true;
842 sh_cmt_register_clocksource(ch, name);
843 }
681 844
682 return 0; 845 return 0;
683} 846}
684 847
685static int sh_cmt_setup(struct sh_cmt_priv *p, struct platform_device *pdev) 848static int sh_cmt_setup_channel(struct sh_cmt_channel *ch, unsigned int index,
849 unsigned int hwidx, bool clockevent,
850 bool clocksource, struct sh_cmt_device *cmt)
686{ 851{
687 struct sh_timer_config *cfg = pdev->dev.platform_data; 852 int ret;
688 struct resource *res, *res2;
689 int irq, ret;
690 ret = -ENXIO;
691 853
692 memset(p, 0, sizeof(*p)); 854 /* Skip unused channels. */
693 p->pdev = pdev; 855 if (!clockevent && !clocksource)
856 return 0;
694 857
695 if (!cfg) { 858 ch->cmt = cmt;
696 dev_err(&p->pdev->dev, "missing platform data\n"); 859 ch->index = index;
697 goto err0; 860 ch->hwidx = hwidx;
861
862 /*
863 * Compute the address of the channel control register block. For the
864 * timers with a per-channel start/stop register, compute its address
865 * as well.
866 *
867 * For legacy configuration the address has been mapped explicitly.
868 */
869 if (cmt->legacy) {
870 ch->ioctrl = cmt->mapbase_ch;
871 } else {
872 switch (cmt->info->model) {
873 case SH_CMT_16BIT:
874 ch->ioctrl = cmt->mapbase + 2 + ch->hwidx * 6;
875 break;
876 case SH_CMT_32BIT:
877 case SH_CMT_48BIT:
878 ch->ioctrl = cmt->mapbase + 0x10 + ch->hwidx * 0x10;
879 break;
880 case SH_CMT_32BIT_FAST:
881 /*
882 * The 32-bit "fast" timer has a single channel at hwidx
883 * 5 but is located at offset 0x40 instead of 0x60 for
884 * some reason.
885 */
886 ch->ioctrl = cmt->mapbase + 0x40;
887 break;
888 case SH_CMT_48BIT_GEN2:
889 ch->iostart = cmt->mapbase + ch->hwidx * 0x100;
890 ch->ioctrl = ch->iostart + 0x10;
891 break;
892 }
698 } 893 }
699 894
700 res = platform_get_resource(p->pdev, IORESOURCE_MEM, 0); 895 if (cmt->info->width == (sizeof(ch->max_match_value) * 8))
701 if (!res) { 896 ch->max_match_value = ~0;
702 dev_err(&p->pdev->dev, "failed to get I/O memory\n"); 897 else
703 goto err0; 898 ch->max_match_value = (1 << cmt->info->width) - 1;
899
900 ch->match_value = ch->max_match_value;
901 raw_spin_lock_init(&ch->lock);
902
903 if (cmt->legacy) {
904 ch->timer_bit = ch->hwidx;
905 } else {
906 ch->timer_bit = cmt->info->model == SH_CMT_48BIT_GEN2
907 ? 0 : ch->hwidx;
704 } 908 }
705 909
706 /* optional resource for the shared timer start/stop register */ 910 ret = sh_cmt_register(ch, dev_name(&cmt->pdev->dev),
707 res2 = platform_get_resource(p->pdev, IORESOURCE_MEM, 1); 911 clockevent, clocksource);
912 if (ret) {
913 dev_err(&cmt->pdev->dev, "ch%u: registration failed\n",
914 ch->index);
915 return ret;
916 }
917 ch->cs_enabled = false;
708 918
709 irq = platform_get_irq(p->pdev, 0); 919 return 0;
710 if (irq < 0) { 920}
711 dev_err(&p->pdev->dev, "failed to get irq\n"); 921
712 goto err0; 922static int sh_cmt_map_memory(struct sh_cmt_device *cmt)
923{
924 struct resource *mem;
925
926 mem = platform_get_resource(cmt->pdev, IORESOURCE_MEM, 0);
927 if (!mem) {
928 dev_err(&cmt->pdev->dev, "failed to get I/O memory\n");
929 return -ENXIO;
713 } 930 }
714 931
715 /* map memory, let mapbase point to our channel */ 932 cmt->mapbase = ioremap_nocache(mem->start, resource_size(mem));
716 p->mapbase = ioremap_nocache(res->start, resource_size(res)); 933 if (cmt->mapbase == NULL) {
717 if (p->mapbase == NULL) { 934 dev_err(&cmt->pdev->dev, "failed to remap I/O memory\n");
718 dev_err(&p->pdev->dev, "failed to remap I/O memory\n"); 935 return -ENXIO;
719 goto err0;
720 } 936 }
721 937
722 /* map second resource for CMSTR */ 938 return 0;
723 p->mapbase_str = ioremap_nocache(res2 ? res2->start : 939}
724 res->start - cfg->channel_offset, 940
725 res2 ? resource_size(res2) : 2); 941static int sh_cmt_map_memory_legacy(struct sh_cmt_device *cmt)
726 if (p->mapbase_str == NULL) { 942{
727 dev_err(&p->pdev->dev, "failed to remap I/O second memory\n"); 943 struct sh_timer_config *cfg = cmt->pdev->dev.platform_data;
728 goto err1; 944 struct resource *res, *res2;
945
946 /* map memory, let mapbase_ch point to our channel */
947 res = platform_get_resource(cmt->pdev, IORESOURCE_MEM, 0);
948 if (!res) {
949 dev_err(&cmt->pdev->dev, "failed to get I/O memory\n");
950 return -ENXIO;
729 } 951 }
730 952
731 /* request irq using setup_irq() (too early for request_irq()) */ 953 cmt->mapbase_ch = ioremap_nocache(res->start, resource_size(res));
732 p->irqaction.name = dev_name(&p->pdev->dev); 954 if (cmt->mapbase_ch == NULL) {
733 p->irqaction.handler = sh_cmt_interrupt; 955 dev_err(&cmt->pdev->dev, "failed to remap I/O memory\n");
734 p->irqaction.dev_id = p; 956 return -ENXIO;
735 p->irqaction.flags = IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING;
736
737 /* get hold of clock */
738 p->clk = clk_get(&p->pdev->dev, "cmt_fck");
739 if (IS_ERR(p->clk)) {
740 dev_err(&p->pdev->dev, "cannot get clock\n");
741 ret = PTR_ERR(p->clk);
742 goto err2;
743 } 957 }
744 958
745 ret = clk_prepare(p->clk); 959 /* optional resource for the shared timer start/stop register */
746 if (ret < 0) 960 res2 = platform_get_resource(cmt->pdev, IORESOURCE_MEM, 1);
747 goto err3;
748 961
749 if (res2 && (resource_size(res2) == 4)) { 962 /* map second resource for CMSTR */
750 /* assume both CMSTR and CMCSR to be 32-bit */ 963 cmt->mapbase = ioremap_nocache(res2 ? res2->start :
751 p->read_control = sh_cmt_read32; 964 res->start - cfg->channel_offset,
752 p->write_control = sh_cmt_write32; 965 res2 ? resource_size(res2) : 2);
753 } else { 966 if (cmt->mapbase == NULL) {
754 p->read_control = sh_cmt_read16; 967 dev_err(&cmt->pdev->dev, "failed to remap I/O second memory\n");
755 p->write_control = sh_cmt_write16; 968 iounmap(cmt->mapbase_ch);
969 return -ENXIO;
756 } 970 }
757 971
758 if (resource_size(res) == 6) { 972 /* identify the model based on the resources */
759 p->width = 16; 973 if (resource_size(res) == 6)
760 p->read_count = sh_cmt_read16; 974 cmt->info = &sh_cmt_info[SH_CMT_16BIT];
761 p->write_count = sh_cmt_write16; 975 else if (res2 && (resource_size(res2) == 4))
762 p->overflow_bit = 0x80; 976 cmt->info = &sh_cmt_info[SH_CMT_48BIT_GEN2];
763 p->clear_bits = ~0x80; 977 else
764 } else { 978 cmt->info = &sh_cmt_info[SH_CMT_32BIT];
765 p->width = 32; 979
766 p->read_count = sh_cmt_read32; 980 return 0;
767 p->write_count = sh_cmt_write32; 981}
768 p->overflow_bit = 0x8000; 982
769 p->clear_bits = ~0xc000; 983static void sh_cmt_unmap_memory(struct sh_cmt_device *cmt)
984{
985 iounmap(cmt->mapbase);
986 if (cmt->mapbase_ch)
987 iounmap(cmt->mapbase_ch);
988}
989
990static int sh_cmt_setup(struct sh_cmt_device *cmt, struct platform_device *pdev)
991{
992 struct sh_timer_config *cfg = pdev->dev.platform_data;
993 const struct platform_device_id *id = pdev->id_entry;
994 unsigned int hw_channels;
995 int ret;
996
997 memset(cmt, 0, sizeof(*cmt));
998 cmt->pdev = pdev;
999
1000 if (!cfg) {
1001 dev_err(&cmt->pdev->dev, "missing platform data\n");
1002 return -ENXIO;
770 } 1003 }
771 1004
772 if (p->width == (sizeof(p->max_match_value) * 8)) 1005 cmt->info = (const struct sh_cmt_info *)id->driver_data;
773 p->max_match_value = ~0; 1006 cmt->legacy = cmt->info ? false : true;
1007
1008 /* Get hold of clock. */
1009 cmt->clk = clk_get(&cmt->pdev->dev, cmt->legacy ? "cmt_fck" : "fck");
1010 if (IS_ERR(cmt->clk)) {
1011 dev_err(&cmt->pdev->dev, "cannot get clock\n");
1012 return PTR_ERR(cmt->clk);
1013 }
1014
1015 ret = clk_prepare(cmt->clk);
1016 if (ret < 0)
1017 goto err_clk_put;
1018
1019 /*
1020 * Map the memory resource(s). We need to support both the legacy
1021 * platform device configuration (with one device per channel) and the
1022 * new version (with multiple channels per device).
1023 */
1024 if (cmt->legacy)
1025 ret = sh_cmt_map_memory_legacy(cmt);
774 else 1026 else
775 p->max_match_value = (1 << p->width) - 1; 1027 ret = sh_cmt_map_memory(cmt);
776 1028
777 p->match_value = p->max_match_value; 1029 if (ret < 0)
778 raw_spin_lock_init(&p->lock); 1030 goto err_clk_unprepare;
779 1031
780 ret = sh_cmt_register(p, (char *)dev_name(&p->pdev->dev), 1032 /* Allocate and setup the channels. */
781 cfg->clockevent_rating, 1033 if (cmt->legacy) {
782 cfg->clocksource_rating); 1034 cmt->num_channels = 1;
783 if (ret) { 1035 hw_channels = 0;
784 dev_err(&p->pdev->dev, "registration failed\n"); 1036 } else {
785 goto err4; 1037 cmt->num_channels = hweight8(cfg->channels_mask);
1038 hw_channels = cfg->channels_mask;
786 } 1039 }
787 p->cs_enabled = false;
788 1040
789 ret = setup_irq(irq, &p->irqaction); 1041 cmt->channels = kzalloc(cmt->num_channels * sizeof(*cmt->channels),
790 if (ret) { 1042 GFP_KERNEL);
791 dev_err(&p->pdev->dev, "failed to request irq %d\n", irq); 1043 if (cmt->channels == NULL) {
792 goto err4; 1044 ret = -ENOMEM;
1045 goto err_unmap;
793 } 1046 }
794 1047
795 platform_set_drvdata(pdev, p); 1048 if (cmt->legacy) {
1049 ret = sh_cmt_setup_channel(&cmt->channels[0],
1050 cfg->timer_bit, cfg->timer_bit,
1051 cfg->clockevent_rating != 0,
1052 cfg->clocksource_rating != 0, cmt);
1053 if (ret < 0)
1054 goto err_unmap;
1055 } else {
1056 unsigned int mask = hw_channels;
1057 unsigned int i;
1058
1059 /*
1060 * Use the first channel as a clock event device and the second
1061 * channel as a clock source. If only one channel is available
1062 * use it for both.
1063 */
1064 for (i = 0; i < cmt->num_channels; ++i) {
1065 unsigned int hwidx = ffs(mask) - 1;
1066 bool clocksource = i == 1 || cmt->num_channels == 1;
1067 bool clockevent = i == 0;
1068
1069 ret = sh_cmt_setup_channel(&cmt->channels[i], i, hwidx,
1070 clockevent, clocksource,
1071 cmt);
1072 if (ret < 0)
1073 goto err_unmap;
1074
1075 mask &= ~(1 << hwidx);
1076 }
1077 }
1078
1079 platform_set_drvdata(pdev, cmt);
796 1080
797 return 0; 1081 return 0;
798err4: 1082
799 clk_unprepare(p->clk); 1083err_unmap:
800err3: 1084 kfree(cmt->channels);
801 clk_put(p->clk); 1085 sh_cmt_unmap_memory(cmt);
802err2: 1086err_clk_unprepare:
803 iounmap(p->mapbase_str); 1087 clk_unprepare(cmt->clk);
804err1: 1088err_clk_put:
805 iounmap(p->mapbase); 1089 clk_put(cmt->clk);
806err0:
807 return ret; 1090 return ret;
808} 1091}
809 1092
810static int sh_cmt_probe(struct platform_device *pdev) 1093static int sh_cmt_probe(struct platform_device *pdev)
811{ 1094{
812 struct sh_cmt_priv *p = platform_get_drvdata(pdev); 1095 struct sh_cmt_device *cmt = platform_get_drvdata(pdev);
813 struct sh_timer_config *cfg = pdev->dev.platform_data;
814 int ret; 1096 int ret;
815 1097
816 if (!is_early_platform_device(pdev)) { 1098 if (!is_early_platform_device(pdev)) {
@@ -818,20 +1100,18 @@ static int sh_cmt_probe(struct platform_device *pdev)
818 pm_runtime_enable(&pdev->dev); 1100 pm_runtime_enable(&pdev->dev);
819 } 1101 }
820 1102
821 if (p) { 1103 if (cmt) {
822 dev_info(&pdev->dev, "kept as earlytimer\n"); 1104 dev_info(&pdev->dev, "kept as earlytimer\n");
823 goto out; 1105 goto out;
824 } 1106 }
825 1107
826 p = kmalloc(sizeof(*p), GFP_KERNEL); 1108 cmt = kzalloc(sizeof(*cmt), GFP_KERNEL);
827 if (p == NULL) { 1109 if (cmt == NULL)
828 dev_err(&pdev->dev, "failed to allocate driver data\n");
829 return -ENOMEM; 1110 return -ENOMEM;
830 }
831 1111
832 ret = sh_cmt_setup(p, pdev); 1112 ret = sh_cmt_setup(cmt, pdev);
833 if (ret) { 1113 if (ret) {
834 kfree(p); 1114 kfree(cmt);
835 pm_runtime_idle(&pdev->dev); 1115 pm_runtime_idle(&pdev->dev);
836 return ret; 1116 return ret;
837 } 1117 }
@@ -839,7 +1119,7 @@ static int sh_cmt_probe(struct platform_device *pdev)
839 return 0; 1119 return 0;
840 1120
841 out: 1121 out:
842 if (cfg->clockevent_rating || cfg->clocksource_rating) 1122 if (cmt->has_clockevent || cmt->has_clocksource)
843 pm_runtime_irq_safe(&pdev->dev); 1123 pm_runtime_irq_safe(&pdev->dev);
844 else 1124 else
845 pm_runtime_idle(&pdev->dev); 1125 pm_runtime_idle(&pdev->dev);
@@ -852,12 +1132,24 @@ static int sh_cmt_remove(struct platform_device *pdev)
852 return -EBUSY; /* cannot unregister clockevent and clocksource */ 1132 return -EBUSY; /* cannot unregister clockevent and clocksource */
853} 1133}
854 1134
1135static const struct platform_device_id sh_cmt_id_table[] = {
1136 { "sh_cmt", 0 },
1137 { "sh-cmt-16", (kernel_ulong_t)&sh_cmt_info[SH_CMT_16BIT] },
1138 { "sh-cmt-32", (kernel_ulong_t)&sh_cmt_info[SH_CMT_32BIT] },
1139 { "sh-cmt-32-fast", (kernel_ulong_t)&sh_cmt_info[SH_CMT_32BIT_FAST] },
1140 { "sh-cmt-48", (kernel_ulong_t)&sh_cmt_info[SH_CMT_48BIT] },
1141 { "sh-cmt-48-gen2", (kernel_ulong_t)&sh_cmt_info[SH_CMT_48BIT_GEN2] },
1142 { }
1143};
1144MODULE_DEVICE_TABLE(platform, sh_cmt_id_table);
1145
855static struct platform_driver sh_cmt_device_driver = { 1146static struct platform_driver sh_cmt_device_driver = {
856 .probe = sh_cmt_probe, 1147 .probe = sh_cmt_probe,
857 .remove = sh_cmt_remove, 1148 .remove = sh_cmt_remove,
858 .driver = { 1149 .driver = {
859 .name = "sh_cmt", 1150 .name = "sh_cmt",
860 } 1151 },
1152 .id_table = sh_cmt_id_table,
861}; 1153};
862 1154
863static int __init sh_cmt_init(void) 1155static int __init sh_cmt_init(void)
diff --git a/drivers/clocksource/sh_mtu2.c b/drivers/clocksource/sh_mtu2.c
index e30d76e0a6fa..188d4e092efc 100644
--- a/drivers/clocksource/sh_mtu2.c
+++ b/drivers/clocksource/sh_mtu2.c
@@ -11,37 +11,48 @@
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details. 13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */ 14 */
19 15
16#include <linux/clk.h>
17#include <linux/clockchips.h>
18#include <linux/delay.h>
19#include <linux/err.h>
20#include <linux/init.h> 20#include <linux/init.h>
21#include <linux/platform_device.h>
22#include <linux/spinlock.h>
23#include <linux/interrupt.h> 21#include <linux/interrupt.h>
24#include <linux/ioport.h>
25#include <linux/delay.h>
26#include <linux/io.h> 22#include <linux/io.h>
27#include <linux/clk.h> 23#include <linux/ioport.h>
28#include <linux/irq.h> 24#include <linux/irq.h>
29#include <linux/err.h>
30#include <linux/clockchips.h>
31#include <linux/sh_timer.h>
32#include <linux/slab.h>
33#include <linux/module.h> 25#include <linux/module.h>
26#include <linux/platform_device.h>
34#include <linux/pm_domain.h> 27#include <linux/pm_domain.h>
35#include <linux/pm_runtime.h> 28#include <linux/pm_runtime.h>
29#include <linux/sh_timer.h>
30#include <linux/slab.h>
31#include <linux/spinlock.h>
32
33struct sh_mtu2_device;
34
35struct sh_mtu2_channel {
36 struct sh_mtu2_device *mtu;
37 unsigned int index;
38
39 void __iomem *base;
40 int irq;
41
42 struct clock_event_device ced;
43};
44
45struct sh_mtu2_device {
46 struct platform_device *pdev;
36 47
37struct sh_mtu2_priv {
38 void __iomem *mapbase; 48 void __iomem *mapbase;
39 struct clk *clk; 49 struct clk *clk;
40 struct irqaction irqaction; 50
41 struct platform_device *pdev; 51 struct sh_mtu2_channel *channels;
42 unsigned long rate; 52 unsigned int num_channels;
43 unsigned long periodic; 53
44 struct clock_event_device ced; 54 bool legacy;
55 bool has_clockevent;
45}; 56};
46 57
47static DEFINE_RAW_SPINLOCK(sh_mtu2_lock); 58static DEFINE_RAW_SPINLOCK(sh_mtu2_lock);
@@ -55,6 +66,88 @@ static DEFINE_RAW_SPINLOCK(sh_mtu2_lock);
55#define TCNT 5 /* channel register */ 66#define TCNT 5 /* channel register */
56#define TGR 6 /* channel register */ 67#define TGR 6 /* channel register */
57 68
69#define TCR_CCLR_NONE (0 << 5)
70#define TCR_CCLR_TGRA (1 << 5)
71#define TCR_CCLR_TGRB (2 << 5)
72#define TCR_CCLR_SYNC (3 << 5)
73#define TCR_CCLR_TGRC (5 << 5)
74#define TCR_CCLR_TGRD (6 << 5)
75#define TCR_CCLR_MASK (7 << 5)
76#define TCR_CKEG_RISING (0 << 3)
77#define TCR_CKEG_FALLING (1 << 3)
78#define TCR_CKEG_BOTH (2 << 3)
79#define TCR_CKEG_MASK (3 << 3)
80/* Values 4 to 7 are channel-dependent */
81#define TCR_TPSC_P1 (0 << 0)
82#define TCR_TPSC_P4 (1 << 0)
83#define TCR_TPSC_P16 (2 << 0)
84#define TCR_TPSC_P64 (3 << 0)
85#define TCR_TPSC_CH0_TCLKA (4 << 0)
86#define TCR_TPSC_CH0_TCLKB (5 << 0)
87#define TCR_TPSC_CH0_TCLKC (6 << 0)
88#define TCR_TPSC_CH0_TCLKD (7 << 0)
89#define TCR_TPSC_CH1_TCLKA (4 << 0)
90#define TCR_TPSC_CH1_TCLKB (5 << 0)
91#define TCR_TPSC_CH1_P256 (6 << 0)
92#define TCR_TPSC_CH1_TCNT2 (7 << 0)
93#define TCR_TPSC_CH2_TCLKA (4 << 0)
94#define TCR_TPSC_CH2_TCLKB (5 << 0)
95#define TCR_TPSC_CH2_TCLKC (6 << 0)
96#define TCR_TPSC_CH2_P1024 (7 << 0)
97#define TCR_TPSC_CH34_P256 (4 << 0)
98#define TCR_TPSC_CH34_P1024 (5 << 0)
99#define TCR_TPSC_CH34_TCLKA (6 << 0)
100#define TCR_TPSC_CH34_TCLKB (7 << 0)
101#define TCR_TPSC_MASK (7 << 0)
102
103#define TMDR_BFE (1 << 6)
104#define TMDR_BFB (1 << 5)
105#define TMDR_BFA (1 << 4)
106#define TMDR_MD_NORMAL (0 << 0)
107#define TMDR_MD_PWM_1 (2 << 0)
108#define TMDR_MD_PWM_2 (3 << 0)
109#define TMDR_MD_PHASE_1 (4 << 0)
110#define TMDR_MD_PHASE_2 (5 << 0)
111#define TMDR_MD_PHASE_3 (6 << 0)
112#define TMDR_MD_PHASE_4 (7 << 0)
113#define TMDR_MD_PWM_SYNC (8 << 0)
114#define TMDR_MD_PWM_COMP_CREST (13 << 0)
115#define TMDR_MD_PWM_COMP_TROUGH (14 << 0)
116#define TMDR_MD_PWM_COMP_BOTH (15 << 0)
117#define TMDR_MD_MASK (15 << 0)
118
119#define TIOC_IOCH(n) ((n) << 4)
120#define TIOC_IOCL(n) ((n) << 0)
121#define TIOR_OC_RETAIN (0 << 0)
122#define TIOR_OC_0_CLEAR (1 << 0)
123#define TIOR_OC_0_SET (2 << 0)
124#define TIOR_OC_0_TOGGLE (3 << 0)
125#define TIOR_OC_1_CLEAR (5 << 0)
126#define TIOR_OC_1_SET (6 << 0)
127#define TIOR_OC_1_TOGGLE (7 << 0)
128#define TIOR_IC_RISING (8 << 0)
129#define TIOR_IC_FALLING (9 << 0)
130#define TIOR_IC_BOTH (10 << 0)
131#define TIOR_IC_TCNT (12 << 0)
132#define TIOR_MASK (15 << 0)
133
134#define TIER_TTGE (1 << 7)
135#define TIER_TTGE2 (1 << 6)
136#define TIER_TCIEU (1 << 5)
137#define TIER_TCIEV (1 << 4)
138#define TIER_TGIED (1 << 3)
139#define TIER_TGIEC (1 << 2)
140#define TIER_TGIEB (1 << 1)
141#define TIER_TGIEA (1 << 0)
142
143#define TSR_TCFD (1 << 7)
144#define TSR_TCFU (1 << 5)
145#define TSR_TCFV (1 << 4)
146#define TSR_TGFD (1 << 3)
147#define TSR_TGFC (1 << 2)
148#define TSR_TGFB (1 << 1)
149#define TSR_TGFA (1 << 0)
150
58static unsigned long mtu2_reg_offs[] = { 151static unsigned long mtu2_reg_offs[] = {
59 [TCR] = 0, 152 [TCR] = 0,
60 [TMDR] = 1, 153 [TMDR] = 1,
@@ -65,135 +158,143 @@ static unsigned long mtu2_reg_offs[] = {
65 [TGR] = 8, 158 [TGR] = 8,
66}; 159};
67 160
68static inline unsigned long sh_mtu2_read(struct sh_mtu2_priv *p, int reg_nr) 161static inline unsigned long sh_mtu2_read(struct sh_mtu2_channel *ch, int reg_nr)
69{ 162{
70 struct sh_timer_config *cfg = p->pdev->dev.platform_data;
71 void __iomem *base = p->mapbase;
72 unsigned long offs; 163 unsigned long offs;
73 164
74 if (reg_nr == TSTR) 165 if (reg_nr == TSTR) {
75 return ioread8(base + cfg->channel_offset); 166 if (ch->mtu->legacy)
167 return ioread8(ch->mtu->mapbase);
168 else
169 return ioread8(ch->mtu->mapbase + 0x280);
170 }
76 171
77 offs = mtu2_reg_offs[reg_nr]; 172 offs = mtu2_reg_offs[reg_nr];
78 173
79 if ((reg_nr == TCNT) || (reg_nr == TGR)) 174 if ((reg_nr == TCNT) || (reg_nr == TGR))
80 return ioread16(base + offs); 175 return ioread16(ch->base + offs);
81 else 176 else
82 return ioread8(base + offs); 177 return ioread8(ch->base + offs);
83} 178}
84 179
85static inline void sh_mtu2_write(struct sh_mtu2_priv *p, int reg_nr, 180static inline void sh_mtu2_write(struct sh_mtu2_channel *ch, int reg_nr,
86 unsigned long value) 181 unsigned long value)
87{ 182{
88 struct sh_timer_config *cfg = p->pdev->dev.platform_data;
89 void __iomem *base = p->mapbase;
90 unsigned long offs; 183 unsigned long offs;
91 184
92 if (reg_nr == TSTR) { 185 if (reg_nr == TSTR) {
93 iowrite8(value, base + cfg->channel_offset); 186 if (ch->mtu->legacy)
94 return; 187 return iowrite8(value, ch->mtu->mapbase);
188 else
189 return iowrite8(value, ch->mtu->mapbase + 0x280);
95 } 190 }
96 191
97 offs = mtu2_reg_offs[reg_nr]; 192 offs = mtu2_reg_offs[reg_nr];
98 193
99 if ((reg_nr == TCNT) || (reg_nr == TGR)) 194 if ((reg_nr == TCNT) || (reg_nr == TGR))
100 iowrite16(value, base + offs); 195 iowrite16(value, ch->base + offs);
101 else 196 else
102 iowrite8(value, base + offs); 197 iowrite8(value, ch->base + offs);
103} 198}
104 199
105static void sh_mtu2_start_stop_ch(struct sh_mtu2_priv *p, int start) 200static void sh_mtu2_start_stop_ch(struct sh_mtu2_channel *ch, int start)
106{ 201{
107 struct sh_timer_config *cfg = p->pdev->dev.platform_data;
108 unsigned long flags, value; 202 unsigned long flags, value;
109 203
110 /* start stop register shared by multiple timer channels */ 204 /* start stop register shared by multiple timer channels */
111 raw_spin_lock_irqsave(&sh_mtu2_lock, flags); 205 raw_spin_lock_irqsave(&sh_mtu2_lock, flags);
112 value = sh_mtu2_read(p, TSTR); 206 value = sh_mtu2_read(ch, TSTR);
113 207
114 if (start) 208 if (start)
115 value |= 1 << cfg->timer_bit; 209 value |= 1 << ch->index;
116 else 210 else
117 value &= ~(1 << cfg->timer_bit); 211 value &= ~(1 << ch->index);
118 212
119 sh_mtu2_write(p, TSTR, value); 213 sh_mtu2_write(ch, TSTR, value);
120 raw_spin_unlock_irqrestore(&sh_mtu2_lock, flags); 214 raw_spin_unlock_irqrestore(&sh_mtu2_lock, flags);
121} 215}
122 216
123static int sh_mtu2_enable(struct sh_mtu2_priv *p) 217static int sh_mtu2_enable(struct sh_mtu2_channel *ch)
124{ 218{
219 unsigned long periodic;
220 unsigned long rate;
125 int ret; 221 int ret;
126 222
127 pm_runtime_get_sync(&p->pdev->dev); 223 pm_runtime_get_sync(&ch->mtu->pdev->dev);
128 dev_pm_syscore_device(&p->pdev->dev, true); 224 dev_pm_syscore_device(&ch->mtu->pdev->dev, true);
129 225
130 /* enable clock */ 226 /* enable clock */
131 ret = clk_enable(p->clk); 227 ret = clk_enable(ch->mtu->clk);
132 if (ret) { 228 if (ret) {
133 dev_err(&p->pdev->dev, "cannot enable clock\n"); 229 dev_err(&ch->mtu->pdev->dev, "ch%u: cannot enable clock\n",
230 ch->index);
134 return ret; 231 return ret;
135 } 232 }
136 233
137 /* make sure channel is disabled */ 234 /* make sure channel is disabled */
138 sh_mtu2_start_stop_ch(p, 0); 235 sh_mtu2_start_stop_ch(ch, 0);
139 236
140 p->rate = clk_get_rate(p->clk) / 64; 237 rate = clk_get_rate(ch->mtu->clk) / 64;
141 p->periodic = (p->rate + HZ/2) / HZ; 238 periodic = (rate + HZ/2) / HZ;
142 239
143 /* "Periodic Counter Operation" */ 240 /*
144 sh_mtu2_write(p, TCR, 0x23); /* TGRA clear, divide clock by 64 */ 241 * "Periodic Counter Operation"
145 sh_mtu2_write(p, TIOR, 0); 242 * Clear on TGRA compare match, divide clock by 64.
146 sh_mtu2_write(p, TGR, p->periodic); 243 */
147 sh_mtu2_write(p, TCNT, 0); 244 sh_mtu2_write(ch, TCR, TCR_CCLR_TGRA | TCR_TPSC_P64);
148 sh_mtu2_write(p, TMDR, 0); 245 sh_mtu2_write(ch, TIOR, TIOC_IOCH(TIOR_OC_0_CLEAR) |
149 sh_mtu2_write(p, TIER, 0x01); 246 TIOC_IOCL(TIOR_OC_0_CLEAR));
247 sh_mtu2_write(ch, TGR, periodic);
248 sh_mtu2_write(ch, TCNT, 0);
249 sh_mtu2_write(ch, TMDR, TMDR_MD_NORMAL);
250 sh_mtu2_write(ch, TIER, TIER_TGIEA);
150 251
151 /* enable channel */ 252 /* enable channel */
152 sh_mtu2_start_stop_ch(p, 1); 253 sh_mtu2_start_stop_ch(ch, 1);
153 254
154 return 0; 255 return 0;
155} 256}
156 257
157static void sh_mtu2_disable(struct sh_mtu2_priv *p) 258static void sh_mtu2_disable(struct sh_mtu2_channel *ch)
158{ 259{
159 /* disable channel */ 260 /* disable channel */
160 sh_mtu2_start_stop_ch(p, 0); 261 sh_mtu2_start_stop_ch(ch, 0);
161 262
162 /* stop clock */ 263 /* stop clock */
163 clk_disable(p->clk); 264 clk_disable(ch->mtu->clk);
164 265
165 dev_pm_syscore_device(&p->pdev->dev, false); 266 dev_pm_syscore_device(&ch->mtu->pdev->dev, false);
166 pm_runtime_put(&p->pdev->dev); 267 pm_runtime_put(&ch->mtu->pdev->dev);
167} 268}
168 269
169static irqreturn_t sh_mtu2_interrupt(int irq, void *dev_id) 270static irqreturn_t sh_mtu2_interrupt(int irq, void *dev_id)
170{ 271{
171 struct sh_mtu2_priv *p = dev_id; 272 struct sh_mtu2_channel *ch = dev_id;
172 273
173 /* acknowledge interrupt */ 274 /* acknowledge interrupt */
174 sh_mtu2_read(p, TSR); 275 sh_mtu2_read(ch, TSR);
175 sh_mtu2_write(p, TSR, 0xfe); 276 sh_mtu2_write(ch, TSR, ~TSR_TGFA);
176 277
177 /* notify clockevent layer */ 278 /* notify clockevent layer */
178 p->ced.event_handler(&p->ced); 279 ch->ced.event_handler(&ch->ced);
179 return IRQ_HANDLED; 280 return IRQ_HANDLED;
180} 281}
181 282
182static struct sh_mtu2_priv *ced_to_sh_mtu2(struct clock_event_device *ced) 283static struct sh_mtu2_channel *ced_to_sh_mtu2(struct clock_event_device *ced)
183{ 284{
184 return container_of(ced, struct sh_mtu2_priv, ced); 285 return container_of(ced, struct sh_mtu2_channel, ced);
185} 286}
186 287
187static void sh_mtu2_clock_event_mode(enum clock_event_mode mode, 288static void sh_mtu2_clock_event_mode(enum clock_event_mode mode,
188 struct clock_event_device *ced) 289 struct clock_event_device *ced)
189{ 290{
190 struct sh_mtu2_priv *p = ced_to_sh_mtu2(ced); 291 struct sh_mtu2_channel *ch = ced_to_sh_mtu2(ced);
191 int disabled = 0; 292 int disabled = 0;
192 293
193 /* deal with old setting first */ 294 /* deal with old setting first */
194 switch (ced->mode) { 295 switch (ced->mode) {
195 case CLOCK_EVT_MODE_PERIODIC: 296 case CLOCK_EVT_MODE_PERIODIC:
196 sh_mtu2_disable(p); 297 sh_mtu2_disable(ch);
197 disabled = 1; 298 disabled = 1;
198 break; 299 break;
199 default: 300 default:
@@ -202,12 +303,13 @@ static void sh_mtu2_clock_event_mode(enum clock_event_mode mode,
202 303
203 switch (mode) { 304 switch (mode) {
204 case CLOCK_EVT_MODE_PERIODIC: 305 case CLOCK_EVT_MODE_PERIODIC:
205 dev_info(&p->pdev->dev, "used for periodic clock events\n"); 306 dev_info(&ch->mtu->pdev->dev,
206 sh_mtu2_enable(p); 307 "ch%u: used for periodic clock events\n", ch->index);
308 sh_mtu2_enable(ch);
207 break; 309 break;
208 case CLOCK_EVT_MODE_UNUSED: 310 case CLOCK_EVT_MODE_UNUSED:
209 if (!disabled) 311 if (!disabled)
210 sh_mtu2_disable(p); 312 sh_mtu2_disable(ch);
211 break; 313 break;
212 case CLOCK_EVT_MODE_SHUTDOWN: 314 case CLOCK_EVT_MODE_SHUTDOWN:
213 default: 315 default:
@@ -217,125 +319,207 @@ static void sh_mtu2_clock_event_mode(enum clock_event_mode mode,
217 319
218static void sh_mtu2_clock_event_suspend(struct clock_event_device *ced) 320static void sh_mtu2_clock_event_suspend(struct clock_event_device *ced)
219{ 321{
220 pm_genpd_syscore_poweroff(&ced_to_sh_mtu2(ced)->pdev->dev); 322 pm_genpd_syscore_poweroff(&ced_to_sh_mtu2(ced)->mtu->pdev->dev);
221} 323}
222 324
223static void sh_mtu2_clock_event_resume(struct clock_event_device *ced) 325static void sh_mtu2_clock_event_resume(struct clock_event_device *ced)
224{ 326{
225 pm_genpd_syscore_poweron(&ced_to_sh_mtu2(ced)->pdev->dev); 327 pm_genpd_syscore_poweron(&ced_to_sh_mtu2(ced)->mtu->pdev->dev);
226} 328}
227 329
228static void sh_mtu2_register_clockevent(struct sh_mtu2_priv *p, 330static void sh_mtu2_register_clockevent(struct sh_mtu2_channel *ch,
229 char *name, unsigned long rating) 331 const char *name)
230{ 332{
231 struct clock_event_device *ced = &p->ced; 333 struct clock_event_device *ced = &ch->ced;
232 int ret; 334 int ret;
233 335
234 memset(ced, 0, sizeof(*ced));
235
236 ced->name = name; 336 ced->name = name;
237 ced->features = CLOCK_EVT_FEAT_PERIODIC; 337 ced->features = CLOCK_EVT_FEAT_PERIODIC;
238 ced->rating = rating; 338 ced->rating = 200;
239 ced->cpumask = cpumask_of(0); 339 ced->cpumask = cpu_possible_mask;
240 ced->set_mode = sh_mtu2_clock_event_mode; 340 ced->set_mode = sh_mtu2_clock_event_mode;
241 ced->suspend = sh_mtu2_clock_event_suspend; 341 ced->suspend = sh_mtu2_clock_event_suspend;
242 ced->resume = sh_mtu2_clock_event_resume; 342 ced->resume = sh_mtu2_clock_event_resume;
243 343
244 dev_info(&p->pdev->dev, "used for clock events\n"); 344 dev_info(&ch->mtu->pdev->dev, "ch%u: used for clock events\n",
345 ch->index);
245 clockevents_register_device(ced); 346 clockevents_register_device(ced);
246 347
247 ret = setup_irq(p->irqaction.irq, &p->irqaction); 348 ret = request_irq(ch->irq, sh_mtu2_interrupt,
349 IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING,
350 dev_name(&ch->mtu->pdev->dev), ch);
248 if (ret) { 351 if (ret) {
249 dev_err(&p->pdev->dev, "failed to request irq %d\n", 352 dev_err(&ch->mtu->pdev->dev, "ch%u: failed to request irq %d\n",
250 p->irqaction.irq); 353 ch->index, ch->irq);
251 return; 354 return;
252 } 355 }
253} 356}
254 357
255static int sh_mtu2_register(struct sh_mtu2_priv *p, char *name, 358static int sh_mtu2_register(struct sh_mtu2_channel *ch, const char *name,
256 unsigned long clockevent_rating) 359 bool clockevent)
257{ 360{
258 if (clockevent_rating) 361 if (clockevent) {
259 sh_mtu2_register_clockevent(p, name, clockevent_rating); 362 ch->mtu->has_clockevent = true;
363 sh_mtu2_register_clockevent(ch, name);
364 }
260 365
261 return 0; 366 return 0;
262} 367}
263 368
264static int sh_mtu2_setup(struct sh_mtu2_priv *p, struct platform_device *pdev) 369static int sh_mtu2_setup_channel(struct sh_mtu2_channel *ch, unsigned int index,
370 struct sh_mtu2_device *mtu)
265{ 371{
266 struct sh_timer_config *cfg = pdev->dev.platform_data; 372 static const unsigned int channel_offsets[] = {
267 struct resource *res; 373 0x300, 0x380, 0x000,
268 int irq, ret; 374 };
269 ret = -ENXIO; 375 bool clockevent;
376
377 ch->mtu = mtu;
378
379 if (mtu->legacy) {
380 struct sh_timer_config *cfg = mtu->pdev->dev.platform_data;
381
382 clockevent = cfg->clockevent_rating != 0;
270 383
271 memset(p, 0, sizeof(*p)); 384 ch->irq = platform_get_irq(mtu->pdev, 0);
272 p->pdev = pdev; 385 ch->base = mtu->mapbase - cfg->channel_offset;
386 ch->index = cfg->timer_bit;
387 } else {
388 char name[6];
273 389
274 if (!cfg) { 390 clockevent = true;
275 dev_err(&p->pdev->dev, "missing platform data\n"); 391
276 goto err0; 392 sprintf(name, "tgi%ua", index);
393 ch->irq = platform_get_irq_byname(mtu->pdev, name);
394 ch->base = mtu->mapbase + channel_offsets[index];
395 ch->index = index;
277 } 396 }
278 397
279 platform_set_drvdata(pdev, p); 398 if (ch->irq < 0) {
399 /* Skip channels with no declared interrupt. */
400 if (!mtu->legacy)
401 return 0;
402
403 dev_err(&mtu->pdev->dev, "ch%u: failed to get irq\n",
404 ch->index);
405 return ch->irq;
406 }
407
408 return sh_mtu2_register(ch, dev_name(&mtu->pdev->dev), clockevent);
409}
280 410
281 res = platform_get_resource(p->pdev, IORESOURCE_MEM, 0); 411static int sh_mtu2_map_memory(struct sh_mtu2_device *mtu)
412{
413 struct resource *res;
414
415 res = platform_get_resource(mtu->pdev, IORESOURCE_MEM, 0);
282 if (!res) { 416 if (!res) {
283 dev_err(&p->pdev->dev, "failed to get I/O memory\n"); 417 dev_err(&mtu->pdev->dev, "failed to get I/O memory\n");
284 goto err0; 418 return -ENXIO;
419 }
420
421 mtu->mapbase = ioremap_nocache(res->start, resource_size(res));
422 if (mtu->mapbase == NULL)
423 return -ENXIO;
424
425 /*
426 * In legacy platform device configuration (with one device per channel)
427 * the resource points to the channel base address.
428 */
429 if (mtu->legacy) {
430 struct sh_timer_config *cfg = mtu->pdev->dev.platform_data;
431 mtu->mapbase += cfg->channel_offset;
285 } 432 }
286 433
287 irq = platform_get_irq(p->pdev, 0); 434 return 0;
288 if (irq < 0) { 435}
289 dev_err(&p->pdev->dev, "failed to get irq\n"); 436
290 goto err0; 437static void sh_mtu2_unmap_memory(struct sh_mtu2_device *mtu)
438{
439 if (mtu->legacy) {
440 struct sh_timer_config *cfg = mtu->pdev->dev.platform_data;
441 mtu->mapbase -= cfg->channel_offset;
291 } 442 }
292 443
293 /* map memory, let mapbase point to our channel */ 444 iounmap(mtu->mapbase);
294 p->mapbase = ioremap_nocache(res->start, resource_size(res)); 445}
295 if (p->mapbase == NULL) { 446
296 dev_err(&p->pdev->dev, "failed to remap I/O memory\n"); 447static int sh_mtu2_setup(struct sh_mtu2_device *mtu,
297 goto err0; 448 struct platform_device *pdev)
449{
450 struct sh_timer_config *cfg = pdev->dev.platform_data;
451 const struct platform_device_id *id = pdev->id_entry;
452 unsigned int i;
453 int ret;
454
455 mtu->pdev = pdev;
456 mtu->legacy = id->driver_data;
457
458 if (mtu->legacy && !cfg) {
459 dev_err(&mtu->pdev->dev, "missing platform data\n");
460 return -ENXIO;
298 } 461 }
299 462
300 /* setup data for setup_irq() (too early for request_irq()) */ 463 /* Get hold of clock. */
301 p->irqaction.name = dev_name(&p->pdev->dev); 464 mtu->clk = clk_get(&mtu->pdev->dev, mtu->legacy ? "mtu2_fck" : "fck");
302 p->irqaction.handler = sh_mtu2_interrupt; 465 if (IS_ERR(mtu->clk)) {
303 p->irqaction.dev_id = p; 466 dev_err(&mtu->pdev->dev, "cannot get clock\n");
304 p->irqaction.irq = irq; 467 return PTR_ERR(mtu->clk);
305 p->irqaction.flags = IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING;
306
307 /* get hold of clock */
308 p->clk = clk_get(&p->pdev->dev, "mtu2_fck");
309 if (IS_ERR(p->clk)) {
310 dev_err(&p->pdev->dev, "cannot get clock\n");
311 ret = PTR_ERR(p->clk);
312 goto err1;
313 } 468 }
314 469
315 ret = clk_prepare(p->clk); 470 ret = clk_prepare(mtu->clk);
316 if (ret < 0) 471 if (ret < 0)
317 goto err2; 472 goto err_clk_put;
318 473
319 ret = sh_mtu2_register(p, (char *)dev_name(&p->pdev->dev), 474 /* Map the memory resource. */
320 cfg->clockevent_rating); 475 ret = sh_mtu2_map_memory(mtu);
321 if (ret < 0) 476 if (ret < 0) {
322 goto err3; 477 dev_err(&mtu->pdev->dev, "failed to remap I/O memory\n");
478 goto err_clk_unprepare;
479 }
480
481 /* Allocate and setup the channels. */
482 if (mtu->legacy)
483 mtu->num_channels = 1;
484 else
485 mtu->num_channels = 3;
486
487 mtu->channels = kzalloc(sizeof(*mtu->channels) * mtu->num_channels,
488 GFP_KERNEL);
489 if (mtu->channels == NULL) {
490 ret = -ENOMEM;
491 goto err_unmap;
492 }
493
494 if (mtu->legacy) {
495 ret = sh_mtu2_setup_channel(&mtu->channels[0], 0, mtu);
496 if (ret < 0)
497 goto err_unmap;
498 } else {
499 for (i = 0; i < mtu->num_channels; ++i) {
500 ret = sh_mtu2_setup_channel(&mtu->channels[i], i, mtu);
501 if (ret < 0)
502 goto err_unmap;
503 }
504 }
505
506 platform_set_drvdata(pdev, mtu);
323 507
324 return 0; 508 return 0;
325 err3: 509
326 clk_unprepare(p->clk); 510err_unmap:
327 err2: 511 kfree(mtu->channels);
328 clk_put(p->clk); 512 sh_mtu2_unmap_memory(mtu);
329 err1: 513err_clk_unprepare:
330 iounmap(p->mapbase); 514 clk_unprepare(mtu->clk);
331 err0: 515err_clk_put:
516 clk_put(mtu->clk);
332 return ret; 517 return ret;
333} 518}
334 519
335static int sh_mtu2_probe(struct platform_device *pdev) 520static int sh_mtu2_probe(struct platform_device *pdev)
336{ 521{
337 struct sh_mtu2_priv *p = platform_get_drvdata(pdev); 522 struct sh_mtu2_device *mtu = platform_get_drvdata(pdev);
338 struct sh_timer_config *cfg = pdev->dev.platform_data;
339 int ret; 523 int ret;
340 524
341 if (!is_early_platform_device(pdev)) { 525 if (!is_early_platform_device(pdev)) {
@@ -343,20 +527,18 @@ static int sh_mtu2_probe(struct platform_device *pdev)
343 pm_runtime_enable(&pdev->dev); 527 pm_runtime_enable(&pdev->dev);
344 } 528 }
345 529
346 if (p) { 530 if (mtu) {
347 dev_info(&pdev->dev, "kept as earlytimer\n"); 531 dev_info(&pdev->dev, "kept as earlytimer\n");
348 goto out; 532 goto out;
349 } 533 }
350 534
351 p = kmalloc(sizeof(*p), GFP_KERNEL); 535 mtu = kzalloc(sizeof(*mtu), GFP_KERNEL);
352 if (p == NULL) { 536 if (mtu == NULL)
353 dev_err(&pdev->dev, "failed to allocate driver data\n");
354 return -ENOMEM; 537 return -ENOMEM;
355 }
356 538
357 ret = sh_mtu2_setup(p, pdev); 539 ret = sh_mtu2_setup(mtu, pdev);
358 if (ret) { 540 if (ret) {
359 kfree(p); 541 kfree(mtu);
360 pm_runtime_idle(&pdev->dev); 542 pm_runtime_idle(&pdev->dev);
361 return ret; 543 return ret;
362 } 544 }
@@ -364,7 +546,7 @@ static int sh_mtu2_probe(struct platform_device *pdev)
364 return 0; 546 return 0;
365 547
366 out: 548 out:
367 if (cfg->clockevent_rating) 549 if (mtu->has_clockevent)
368 pm_runtime_irq_safe(&pdev->dev); 550 pm_runtime_irq_safe(&pdev->dev);
369 else 551 else
370 pm_runtime_idle(&pdev->dev); 552 pm_runtime_idle(&pdev->dev);
@@ -377,12 +559,20 @@ static int sh_mtu2_remove(struct platform_device *pdev)
377 return -EBUSY; /* cannot unregister clockevent */ 559 return -EBUSY; /* cannot unregister clockevent */
378} 560}
379 561
562static const struct platform_device_id sh_mtu2_id_table[] = {
563 { "sh_mtu2", 1 },
564 { "sh-mtu2", 0 },
565 { },
566};
567MODULE_DEVICE_TABLE(platform, sh_mtu2_id_table);
568
380static struct platform_driver sh_mtu2_device_driver = { 569static struct platform_driver sh_mtu2_device_driver = {
381 .probe = sh_mtu2_probe, 570 .probe = sh_mtu2_probe,
382 .remove = sh_mtu2_remove, 571 .remove = sh_mtu2_remove,
383 .driver = { 572 .driver = {
384 .name = "sh_mtu2", 573 .name = "sh_mtu2",
385 } 574 },
575 .id_table = sh_mtu2_id_table,
386}; 576};
387 577
388static int __init sh_mtu2_init(void) 578static int __init sh_mtu2_init(void)
diff --git a/drivers/clocksource/sh_tmu.c b/drivers/clocksource/sh_tmu.c
index ecd7b60bfdfa..6bd17a8f3dd4 100644
--- a/drivers/clocksource/sh_tmu.c
+++ b/drivers/clocksource/sh_tmu.c
@@ -11,35 +11,41 @@
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details. 13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */ 14 */
19 15
16#include <linux/clk.h>
17#include <linux/clockchips.h>
18#include <linux/clocksource.h>
19#include <linux/delay.h>
20#include <linux/err.h>
20#include <linux/init.h> 21#include <linux/init.h>
21#include <linux/platform_device.h>
22#include <linux/spinlock.h>
23#include <linux/interrupt.h> 22#include <linux/interrupt.h>
24#include <linux/ioport.h>
25#include <linux/delay.h>
26#include <linux/io.h> 23#include <linux/io.h>
27#include <linux/clk.h> 24#include <linux/ioport.h>
28#include <linux/irq.h> 25#include <linux/irq.h>
29#include <linux/err.h>
30#include <linux/clocksource.h>
31#include <linux/clockchips.h>
32#include <linux/sh_timer.h>
33#include <linux/slab.h>
34#include <linux/module.h> 26#include <linux/module.h>
27#include <linux/platform_device.h>
35#include <linux/pm_domain.h> 28#include <linux/pm_domain.h>
36#include <linux/pm_runtime.h> 29#include <linux/pm_runtime.h>
30#include <linux/sh_timer.h>
31#include <linux/slab.h>
32#include <linux/spinlock.h>
33
34enum sh_tmu_model {
35 SH_TMU_LEGACY,
36 SH_TMU,
37 SH_TMU_SH3,
38};
39
40struct sh_tmu_device;
41
42struct sh_tmu_channel {
43 struct sh_tmu_device *tmu;
44 unsigned int index;
45
46 void __iomem *base;
47 int irq;
37 48
38struct sh_tmu_priv {
39 void __iomem *mapbase;
40 struct clk *clk;
41 struct irqaction irqaction;
42 struct platform_device *pdev;
43 unsigned long rate; 49 unsigned long rate;
44 unsigned long periodic; 50 unsigned long periodic;
45 struct clock_event_device ced; 51 struct clock_event_device ced;
@@ -48,6 +54,21 @@ struct sh_tmu_priv {
48 unsigned int enable_count; 54 unsigned int enable_count;
49}; 55};
50 56
57struct sh_tmu_device {
58 struct platform_device *pdev;
59
60 void __iomem *mapbase;
61 struct clk *clk;
62
63 enum sh_tmu_model model;
64
65 struct sh_tmu_channel *channels;
66 unsigned int num_channels;
67
68 bool has_clockevent;
69 bool has_clocksource;
70};
71
51static DEFINE_RAW_SPINLOCK(sh_tmu_lock); 72static DEFINE_RAW_SPINLOCK(sh_tmu_lock);
52 73
53#define TSTR -1 /* shared register */ 74#define TSTR -1 /* shared register */
@@ -55,189 +76,208 @@ static DEFINE_RAW_SPINLOCK(sh_tmu_lock);
55#define TCNT 1 /* channel register */ 76#define TCNT 1 /* channel register */
56#define TCR 2 /* channel register */ 77#define TCR 2 /* channel register */
57 78
58static inline unsigned long sh_tmu_read(struct sh_tmu_priv *p, int reg_nr) 79#define TCR_UNF (1 << 8)
80#define TCR_UNIE (1 << 5)
81#define TCR_TPSC_CLK4 (0 << 0)
82#define TCR_TPSC_CLK16 (1 << 0)
83#define TCR_TPSC_CLK64 (2 << 0)
84#define TCR_TPSC_CLK256 (3 << 0)
85#define TCR_TPSC_CLK1024 (4 << 0)
86#define TCR_TPSC_MASK (7 << 0)
87
88static inline unsigned long sh_tmu_read(struct sh_tmu_channel *ch, int reg_nr)
59{ 89{
60 struct sh_timer_config *cfg = p->pdev->dev.platform_data;
61 void __iomem *base = p->mapbase;
62 unsigned long offs; 90 unsigned long offs;
63 91
64 if (reg_nr == TSTR) 92 if (reg_nr == TSTR) {
65 return ioread8(base - cfg->channel_offset); 93 switch (ch->tmu->model) {
94 case SH_TMU_LEGACY:
95 return ioread8(ch->tmu->mapbase);
96 case SH_TMU_SH3:
97 return ioread8(ch->tmu->mapbase + 2);
98 case SH_TMU:
99 return ioread8(ch->tmu->mapbase + 4);
100 }
101 }
66 102
67 offs = reg_nr << 2; 103 offs = reg_nr << 2;
68 104
69 if (reg_nr == TCR) 105 if (reg_nr == TCR)
70 return ioread16(base + offs); 106 return ioread16(ch->base + offs);
71 else 107 else
72 return ioread32(base + offs); 108 return ioread32(ch->base + offs);
73} 109}
74 110
75static inline void sh_tmu_write(struct sh_tmu_priv *p, int reg_nr, 111static inline void sh_tmu_write(struct sh_tmu_channel *ch, int reg_nr,
76 unsigned long value) 112 unsigned long value)
77{ 113{
78 struct sh_timer_config *cfg = p->pdev->dev.platform_data;
79 void __iomem *base = p->mapbase;
80 unsigned long offs; 114 unsigned long offs;
81 115
82 if (reg_nr == TSTR) { 116 if (reg_nr == TSTR) {
83 iowrite8(value, base - cfg->channel_offset); 117 switch (ch->tmu->model) {
84 return; 118 case SH_TMU_LEGACY:
119 return iowrite8(value, ch->tmu->mapbase);
120 case SH_TMU_SH3:
121 return iowrite8(value, ch->tmu->mapbase + 2);
122 case SH_TMU:
123 return iowrite8(value, ch->tmu->mapbase + 4);
124 }
85 } 125 }
86 126
87 offs = reg_nr << 2; 127 offs = reg_nr << 2;
88 128
89 if (reg_nr == TCR) 129 if (reg_nr == TCR)
90 iowrite16(value, base + offs); 130 iowrite16(value, ch->base + offs);
91 else 131 else
92 iowrite32(value, base + offs); 132 iowrite32(value, ch->base + offs);
93} 133}
94 134
95static void sh_tmu_start_stop_ch(struct sh_tmu_priv *p, int start) 135static void sh_tmu_start_stop_ch(struct sh_tmu_channel *ch, int start)
96{ 136{
97 struct sh_timer_config *cfg = p->pdev->dev.platform_data;
98 unsigned long flags, value; 137 unsigned long flags, value;
99 138
100 /* start stop register shared by multiple timer channels */ 139 /* start stop register shared by multiple timer channels */
101 raw_spin_lock_irqsave(&sh_tmu_lock, flags); 140 raw_spin_lock_irqsave(&sh_tmu_lock, flags);
102 value = sh_tmu_read(p, TSTR); 141 value = sh_tmu_read(ch, TSTR);
103 142
104 if (start) 143 if (start)
105 value |= 1 << cfg->timer_bit; 144 value |= 1 << ch->index;
106 else 145 else
107 value &= ~(1 << cfg->timer_bit); 146 value &= ~(1 << ch->index);
108 147
109 sh_tmu_write(p, TSTR, value); 148 sh_tmu_write(ch, TSTR, value);
110 raw_spin_unlock_irqrestore(&sh_tmu_lock, flags); 149 raw_spin_unlock_irqrestore(&sh_tmu_lock, flags);
111} 150}
112 151
113static int __sh_tmu_enable(struct sh_tmu_priv *p) 152static int __sh_tmu_enable(struct sh_tmu_channel *ch)
114{ 153{
115 int ret; 154 int ret;
116 155
117 /* enable clock */ 156 /* enable clock */
118 ret = clk_enable(p->clk); 157 ret = clk_enable(ch->tmu->clk);
119 if (ret) { 158 if (ret) {
120 dev_err(&p->pdev->dev, "cannot enable clock\n"); 159 dev_err(&ch->tmu->pdev->dev, "ch%u: cannot enable clock\n",
160 ch->index);
121 return ret; 161 return ret;
122 } 162 }
123 163
124 /* make sure channel is disabled */ 164 /* make sure channel is disabled */
125 sh_tmu_start_stop_ch(p, 0); 165 sh_tmu_start_stop_ch(ch, 0);
126 166
127 /* maximum timeout */ 167 /* maximum timeout */
128 sh_tmu_write(p, TCOR, 0xffffffff); 168 sh_tmu_write(ch, TCOR, 0xffffffff);
129 sh_tmu_write(p, TCNT, 0xffffffff); 169 sh_tmu_write(ch, TCNT, 0xffffffff);
130 170
131 /* configure channel to parent clock / 4, irq off */ 171 /* configure channel to parent clock / 4, irq off */
132 p->rate = clk_get_rate(p->clk) / 4; 172 ch->rate = clk_get_rate(ch->tmu->clk) / 4;
133 sh_tmu_write(p, TCR, 0x0000); 173 sh_tmu_write(ch, TCR, TCR_TPSC_CLK4);
134 174
135 /* enable channel */ 175 /* enable channel */
136 sh_tmu_start_stop_ch(p, 1); 176 sh_tmu_start_stop_ch(ch, 1);
137 177
138 return 0; 178 return 0;
139} 179}
140 180
141static int sh_tmu_enable(struct sh_tmu_priv *p) 181static int sh_tmu_enable(struct sh_tmu_channel *ch)
142{ 182{
143 if (p->enable_count++ > 0) 183 if (ch->enable_count++ > 0)
144 return 0; 184 return 0;
145 185
146 pm_runtime_get_sync(&p->pdev->dev); 186 pm_runtime_get_sync(&ch->tmu->pdev->dev);
147 dev_pm_syscore_device(&p->pdev->dev, true); 187 dev_pm_syscore_device(&ch->tmu->pdev->dev, true);
148 188
149 return __sh_tmu_enable(p); 189 return __sh_tmu_enable(ch);
150} 190}
151 191
152static void __sh_tmu_disable(struct sh_tmu_priv *p) 192static void __sh_tmu_disable(struct sh_tmu_channel *ch)
153{ 193{
154 /* disable channel */ 194 /* disable channel */
155 sh_tmu_start_stop_ch(p, 0); 195 sh_tmu_start_stop_ch(ch, 0);
156 196
157 /* disable interrupts in TMU block */ 197 /* disable interrupts in TMU block */
158 sh_tmu_write(p, TCR, 0x0000); 198 sh_tmu_write(ch, TCR, TCR_TPSC_CLK4);
159 199
160 /* stop clock */ 200 /* stop clock */
161 clk_disable(p->clk); 201 clk_disable(ch->tmu->clk);
162} 202}
163 203
164static void sh_tmu_disable(struct sh_tmu_priv *p) 204static void sh_tmu_disable(struct sh_tmu_channel *ch)
165{ 205{
166 if (WARN_ON(p->enable_count == 0)) 206 if (WARN_ON(ch->enable_count == 0))
167 return; 207 return;
168 208
169 if (--p->enable_count > 0) 209 if (--ch->enable_count > 0)
170 return; 210 return;
171 211
172 __sh_tmu_disable(p); 212 __sh_tmu_disable(ch);
173 213
174 dev_pm_syscore_device(&p->pdev->dev, false); 214 dev_pm_syscore_device(&ch->tmu->pdev->dev, false);
175 pm_runtime_put(&p->pdev->dev); 215 pm_runtime_put(&ch->tmu->pdev->dev);
176} 216}
177 217
178static void sh_tmu_set_next(struct sh_tmu_priv *p, unsigned long delta, 218static void sh_tmu_set_next(struct sh_tmu_channel *ch, unsigned long delta,
179 int periodic) 219 int periodic)
180{ 220{
181 /* stop timer */ 221 /* stop timer */
182 sh_tmu_start_stop_ch(p, 0); 222 sh_tmu_start_stop_ch(ch, 0);
183 223
184 /* acknowledge interrupt */ 224 /* acknowledge interrupt */
185 sh_tmu_read(p, TCR); 225 sh_tmu_read(ch, TCR);
186 226
187 /* enable interrupt */ 227 /* enable interrupt */
188 sh_tmu_write(p, TCR, 0x0020); 228 sh_tmu_write(ch, TCR, TCR_UNIE | TCR_TPSC_CLK4);
189 229
190 /* reload delta value in case of periodic timer */ 230 /* reload delta value in case of periodic timer */
191 if (periodic) 231 if (periodic)
192 sh_tmu_write(p, TCOR, delta); 232 sh_tmu_write(ch, TCOR, delta);
193 else 233 else
194 sh_tmu_write(p, TCOR, 0xffffffff); 234 sh_tmu_write(ch, TCOR, 0xffffffff);
195 235
196 sh_tmu_write(p, TCNT, delta); 236 sh_tmu_write(ch, TCNT, delta);
197 237
198 /* start timer */ 238 /* start timer */
199 sh_tmu_start_stop_ch(p, 1); 239 sh_tmu_start_stop_ch(ch, 1);
200} 240}
201 241
202static irqreturn_t sh_tmu_interrupt(int irq, void *dev_id) 242static irqreturn_t sh_tmu_interrupt(int irq, void *dev_id)
203{ 243{
204 struct sh_tmu_priv *p = dev_id; 244 struct sh_tmu_channel *ch = dev_id;
205 245
206 /* disable or acknowledge interrupt */ 246 /* disable or acknowledge interrupt */
207 if (p->ced.mode == CLOCK_EVT_MODE_ONESHOT) 247 if (ch->ced.mode == CLOCK_EVT_MODE_ONESHOT)
208 sh_tmu_write(p, TCR, 0x0000); 248 sh_tmu_write(ch, TCR, TCR_TPSC_CLK4);
209 else 249 else
210 sh_tmu_write(p, TCR, 0x0020); 250 sh_tmu_write(ch, TCR, TCR_UNIE | TCR_TPSC_CLK4);
211 251
212 /* notify clockevent layer */ 252 /* notify clockevent layer */
213 p->ced.event_handler(&p->ced); 253 ch->ced.event_handler(&ch->ced);
214 return IRQ_HANDLED; 254 return IRQ_HANDLED;
215} 255}
216 256
217static struct sh_tmu_priv *cs_to_sh_tmu(struct clocksource *cs) 257static struct sh_tmu_channel *cs_to_sh_tmu(struct clocksource *cs)
218{ 258{
219 return container_of(cs, struct sh_tmu_priv, cs); 259 return container_of(cs, struct sh_tmu_channel, cs);
220} 260}
221 261
222static cycle_t sh_tmu_clocksource_read(struct clocksource *cs) 262static cycle_t sh_tmu_clocksource_read(struct clocksource *cs)
223{ 263{
224 struct sh_tmu_priv *p = cs_to_sh_tmu(cs); 264 struct sh_tmu_channel *ch = cs_to_sh_tmu(cs);
225 265
226 return sh_tmu_read(p, TCNT) ^ 0xffffffff; 266 return sh_tmu_read(ch, TCNT) ^ 0xffffffff;
227} 267}
228 268
229static int sh_tmu_clocksource_enable(struct clocksource *cs) 269static int sh_tmu_clocksource_enable(struct clocksource *cs)
230{ 270{
231 struct sh_tmu_priv *p = cs_to_sh_tmu(cs); 271 struct sh_tmu_channel *ch = cs_to_sh_tmu(cs);
232 int ret; 272 int ret;
233 273
234 if (WARN_ON(p->cs_enabled)) 274 if (WARN_ON(ch->cs_enabled))
235 return 0; 275 return 0;
236 276
237 ret = sh_tmu_enable(p); 277 ret = sh_tmu_enable(ch);
238 if (!ret) { 278 if (!ret) {
239 __clocksource_updatefreq_hz(cs, p->rate); 279 __clocksource_updatefreq_hz(cs, ch->rate);
240 p->cs_enabled = true; 280 ch->cs_enabled = true;
241 } 281 }
242 282
243 return ret; 283 return ret;
@@ -245,48 +285,48 @@ static int sh_tmu_clocksource_enable(struct clocksource *cs)
245 285
246static void sh_tmu_clocksource_disable(struct clocksource *cs) 286static void sh_tmu_clocksource_disable(struct clocksource *cs)
247{ 287{
248 struct sh_tmu_priv *p = cs_to_sh_tmu(cs); 288 struct sh_tmu_channel *ch = cs_to_sh_tmu(cs);
249 289
250 if (WARN_ON(!p->cs_enabled)) 290 if (WARN_ON(!ch->cs_enabled))
251 return; 291 return;
252 292
253 sh_tmu_disable(p); 293 sh_tmu_disable(ch);
254 p->cs_enabled = false; 294 ch->cs_enabled = false;
255} 295}
256 296
257static void sh_tmu_clocksource_suspend(struct clocksource *cs) 297static void sh_tmu_clocksource_suspend(struct clocksource *cs)
258{ 298{
259 struct sh_tmu_priv *p = cs_to_sh_tmu(cs); 299 struct sh_tmu_channel *ch = cs_to_sh_tmu(cs);
260 300
261 if (!p->cs_enabled) 301 if (!ch->cs_enabled)
262 return; 302 return;
263 303
264 if (--p->enable_count == 0) { 304 if (--ch->enable_count == 0) {
265 __sh_tmu_disable(p); 305 __sh_tmu_disable(ch);
266 pm_genpd_syscore_poweroff(&p->pdev->dev); 306 pm_genpd_syscore_poweroff(&ch->tmu->pdev->dev);
267 } 307 }
268} 308}
269 309
270static void sh_tmu_clocksource_resume(struct clocksource *cs) 310static void sh_tmu_clocksource_resume(struct clocksource *cs)
271{ 311{
272 struct sh_tmu_priv *p = cs_to_sh_tmu(cs); 312 struct sh_tmu_channel *ch = cs_to_sh_tmu(cs);
273 313
274 if (!p->cs_enabled) 314 if (!ch->cs_enabled)
275 return; 315 return;
276 316
277 if (p->enable_count++ == 0) { 317 if (ch->enable_count++ == 0) {
278 pm_genpd_syscore_poweron(&p->pdev->dev); 318 pm_genpd_syscore_poweron(&ch->tmu->pdev->dev);
279 __sh_tmu_enable(p); 319 __sh_tmu_enable(ch);
280 } 320 }
281} 321}
282 322
283static int sh_tmu_register_clocksource(struct sh_tmu_priv *p, 323static int sh_tmu_register_clocksource(struct sh_tmu_channel *ch,
284 char *name, unsigned long rating) 324 const char *name)
285{ 325{
286 struct clocksource *cs = &p->cs; 326 struct clocksource *cs = &ch->cs;
287 327
288 cs->name = name; 328 cs->name = name;
289 cs->rating = rating; 329 cs->rating = 200;
290 cs->read = sh_tmu_clocksource_read; 330 cs->read = sh_tmu_clocksource_read;
291 cs->enable = sh_tmu_clocksource_enable; 331 cs->enable = sh_tmu_clocksource_enable;
292 cs->disable = sh_tmu_clocksource_disable; 332 cs->disable = sh_tmu_clocksource_disable;
@@ -295,43 +335,44 @@ static int sh_tmu_register_clocksource(struct sh_tmu_priv *p,
295 cs->mask = CLOCKSOURCE_MASK(32); 335 cs->mask = CLOCKSOURCE_MASK(32);
296 cs->flags = CLOCK_SOURCE_IS_CONTINUOUS; 336 cs->flags = CLOCK_SOURCE_IS_CONTINUOUS;
297 337
298 dev_info(&p->pdev->dev, "used as clock source\n"); 338 dev_info(&ch->tmu->pdev->dev, "ch%u: used as clock source\n",
339 ch->index);
299 340
300 /* Register with dummy 1 Hz value, gets updated in ->enable() */ 341 /* Register with dummy 1 Hz value, gets updated in ->enable() */
301 clocksource_register_hz(cs, 1); 342 clocksource_register_hz(cs, 1);
302 return 0; 343 return 0;
303} 344}
304 345
305static struct sh_tmu_priv *ced_to_sh_tmu(struct clock_event_device *ced) 346static struct sh_tmu_channel *ced_to_sh_tmu(struct clock_event_device *ced)
306{ 347{
307 return container_of(ced, struct sh_tmu_priv, ced); 348 return container_of(ced, struct sh_tmu_channel, ced);
308} 349}
309 350
310static void sh_tmu_clock_event_start(struct sh_tmu_priv *p, int periodic) 351static void sh_tmu_clock_event_start(struct sh_tmu_channel *ch, int periodic)
311{ 352{
312 struct clock_event_device *ced = &p->ced; 353 struct clock_event_device *ced = &ch->ced;
313 354
314 sh_tmu_enable(p); 355 sh_tmu_enable(ch);
315 356
316 clockevents_config(ced, p->rate); 357 clockevents_config(ced, ch->rate);
317 358
318 if (periodic) { 359 if (periodic) {
319 p->periodic = (p->rate + HZ/2) / HZ; 360 ch->periodic = (ch->rate + HZ/2) / HZ;
320 sh_tmu_set_next(p, p->periodic, 1); 361 sh_tmu_set_next(ch, ch->periodic, 1);
321 } 362 }
322} 363}
323 364
324static void sh_tmu_clock_event_mode(enum clock_event_mode mode, 365static void sh_tmu_clock_event_mode(enum clock_event_mode mode,
325 struct clock_event_device *ced) 366 struct clock_event_device *ced)
326{ 367{
327 struct sh_tmu_priv *p = ced_to_sh_tmu(ced); 368 struct sh_tmu_channel *ch = ced_to_sh_tmu(ced);
328 int disabled = 0; 369 int disabled = 0;
329 370
330 /* deal with old setting first */ 371 /* deal with old setting first */
331 switch (ced->mode) { 372 switch (ced->mode) {
332 case CLOCK_EVT_MODE_PERIODIC: 373 case CLOCK_EVT_MODE_PERIODIC:
333 case CLOCK_EVT_MODE_ONESHOT: 374 case CLOCK_EVT_MODE_ONESHOT:
334 sh_tmu_disable(p); 375 sh_tmu_disable(ch);
335 disabled = 1; 376 disabled = 1;
336 break; 377 break;
337 default: 378 default:
@@ -340,16 +381,18 @@ static void sh_tmu_clock_event_mode(enum clock_event_mode mode,
340 381
341 switch (mode) { 382 switch (mode) {
342 case CLOCK_EVT_MODE_PERIODIC: 383 case CLOCK_EVT_MODE_PERIODIC:
343 dev_info(&p->pdev->dev, "used for periodic clock events\n"); 384 dev_info(&ch->tmu->pdev->dev,
344 sh_tmu_clock_event_start(p, 1); 385 "ch%u: used for periodic clock events\n", ch->index);
386 sh_tmu_clock_event_start(ch, 1);
345 break; 387 break;
346 case CLOCK_EVT_MODE_ONESHOT: 388 case CLOCK_EVT_MODE_ONESHOT:
347 dev_info(&p->pdev->dev, "used for oneshot clock events\n"); 389 dev_info(&ch->tmu->pdev->dev,
348 sh_tmu_clock_event_start(p, 0); 390 "ch%u: used for oneshot clock events\n", ch->index);
391 sh_tmu_clock_event_start(ch, 0);
349 break; 392 break;
350 case CLOCK_EVT_MODE_UNUSED: 393 case CLOCK_EVT_MODE_UNUSED:
351 if (!disabled) 394 if (!disabled)
352 sh_tmu_disable(p); 395 sh_tmu_disable(ch);
353 break; 396 break;
354 case CLOCK_EVT_MODE_SHUTDOWN: 397 case CLOCK_EVT_MODE_SHUTDOWN:
355 default: 398 default:
@@ -360,147 +403,234 @@ static void sh_tmu_clock_event_mode(enum clock_event_mode mode,
360static int sh_tmu_clock_event_next(unsigned long delta, 403static int sh_tmu_clock_event_next(unsigned long delta,
361 struct clock_event_device *ced) 404 struct clock_event_device *ced)
362{ 405{
363 struct sh_tmu_priv *p = ced_to_sh_tmu(ced); 406 struct sh_tmu_channel *ch = ced_to_sh_tmu(ced);
364 407
365 BUG_ON(ced->mode != CLOCK_EVT_MODE_ONESHOT); 408 BUG_ON(ced->mode != CLOCK_EVT_MODE_ONESHOT);
366 409
367 /* program new delta value */ 410 /* program new delta value */
368 sh_tmu_set_next(p, delta, 0); 411 sh_tmu_set_next(ch, delta, 0);
369 return 0; 412 return 0;
370} 413}
371 414
372static void sh_tmu_clock_event_suspend(struct clock_event_device *ced) 415static void sh_tmu_clock_event_suspend(struct clock_event_device *ced)
373{ 416{
374 pm_genpd_syscore_poweroff(&ced_to_sh_tmu(ced)->pdev->dev); 417 pm_genpd_syscore_poweroff(&ced_to_sh_tmu(ced)->tmu->pdev->dev);
375} 418}
376 419
377static void sh_tmu_clock_event_resume(struct clock_event_device *ced) 420static void sh_tmu_clock_event_resume(struct clock_event_device *ced)
378{ 421{
379 pm_genpd_syscore_poweron(&ced_to_sh_tmu(ced)->pdev->dev); 422 pm_genpd_syscore_poweron(&ced_to_sh_tmu(ced)->tmu->pdev->dev);
380} 423}
381 424
382static void sh_tmu_register_clockevent(struct sh_tmu_priv *p, 425static void sh_tmu_register_clockevent(struct sh_tmu_channel *ch,
383 char *name, unsigned long rating) 426 const char *name)
384{ 427{
385 struct clock_event_device *ced = &p->ced; 428 struct clock_event_device *ced = &ch->ced;
386 int ret; 429 int ret;
387 430
388 memset(ced, 0, sizeof(*ced));
389
390 ced->name = name; 431 ced->name = name;
391 ced->features = CLOCK_EVT_FEAT_PERIODIC; 432 ced->features = CLOCK_EVT_FEAT_PERIODIC;
392 ced->features |= CLOCK_EVT_FEAT_ONESHOT; 433 ced->features |= CLOCK_EVT_FEAT_ONESHOT;
393 ced->rating = rating; 434 ced->rating = 200;
394 ced->cpumask = cpumask_of(0); 435 ced->cpumask = cpumask_of(0);
395 ced->set_next_event = sh_tmu_clock_event_next; 436 ced->set_next_event = sh_tmu_clock_event_next;
396 ced->set_mode = sh_tmu_clock_event_mode; 437 ced->set_mode = sh_tmu_clock_event_mode;
397 ced->suspend = sh_tmu_clock_event_suspend; 438 ced->suspend = sh_tmu_clock_event_suspend;
398 ced->resume = sh_tmu_clock_event_resume; 439 ced->resume = sh_tmu_clock_event_resume;
399 440
400 dev_info(&p->pdev->dev, "used for clock events\n"); 441 dev_info(&ch->tmu->pdev->dev, "ch%u: used for clock events\n",
442 ch->index);
401 443
402 clockevents_config_and_register(ced, 1, 0x300, 0xffffffff); 444 clockevents_config_and_register(ced, 1, 0x300, 0xffffffff);
403 445
404 ret = setup_irq(p->irqaction.irq, &p->irqaction); 446 ret = request_irq(ch->irq, sh_tmu_interrupt,
447 IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING,
448 dev_name(&ch->tmu->pdev->dev), ch);
405 if (ret) { 449 if (ret) {
406 dev_err(&p->pdev->dev, "failed to request irq %d\n", 450 dev_err(&ch->tmu->pdev->dev, "ch%u: failed to request irq %d\n",
407 p->irqaction.irq); 451 ch->index, ch->irq);
408 return; 452 return;
409 } 453 }
410} 454}
411 455
412static int sh_tmu_register(struct sh_tmu_priv *p, char *name, 456static int sh_tmu_register(struct sh_tmu_channel *ch, const char *name,
413 unsigned long clockevent_rating, 457 bool clockevent, bool clocksource)
414 unsigned long clocksource_rating)
415{ 458{
416 if (clockevent_rating) 459 if (clockevent) {
417 sh_tmu_register_clockevent(p, name, clockevent_rating); 460 ch->tmu->has_clockevent = true;
418 else if (clocksource_rating) 461 sh_tmu_register_clockevent(ch, name);
419 sh_tmu_register_clocksource(p, name, clocksource_rating); 462 } else if (clocksource) {
463 ch->tmu->has_clocksource = true;
464 sh_tmu_register_clocksource(ch, name);
465 }
420 466
421 return 0; 467 return 0;
422} 468}
423 469
424static int sh_tmu_setup(struct sh_tmu_priv *p, struct platform_device *pdev) 470static int sh_tmu_channel_setup(struct sh_tmu_channel *ch, unsigned int index,
471 bool clockevent, bool clocksource,
472 struct sh_tmu_device *tmu)
425{ 473{
426 struct sh_timer_config *cfg = pdev->dev.platform_data; 474 /* Skip unused channels. */
427 struct resource *res; 475 if (!clockevent && !clocksource)
428 int irq, ret; 476 return 0;
429 ret = -ENXIO;
430 477
431 memset(p, 0, sizeof(*p)); 478 ch->tmu = tmu;
432 p->pdev = pdev; 479
480 if (tmu->model == SH_TMU_LEGACY) {
481 struct sh_timer_config *cfg = tmu->pdev->dev.platform_data;
482
483 /*
484 * The SH3 variant (SH770x, SH7705, SH7710 and SH7720) maps
485 * channel registers blocks at base + 2 + 12 * index, while all
486 * other variants map them at base + 4 + 12 * index. We can
487 * compute the index by just dividing by 12, the 2 bytes or 4
488 * bytes offset being hidden by the integer division.
489 */
490 ch->index = cfg->channel_offset / 12;
491 ch->base = tmu->mapbase + cfg->channel_offset;
492 } else {
493 ch->index = index;
494
495 if (tmu->model == SH_TMU_SH3)
496 ch->base = tmu->mapbase + 4 + ch->index * 12;
497 else
498 ch->base = tmu->mapbase + 8 + ch->index * 12;
499 }
433 500
434 if (!cfg) { 501 ch->irq = platform_get_irq(tmu->pdev, index);
435 dev_err(&p->pdev->dev, "missing platform data\n"); 502 if (ch->irq < 0) {
436 goto err0; 503 dev_err(&tmu->pdev->dev, "ch%u: failed to get irq\n",
504 ch->index);
505 return ch->irq;
437 } 506 }
438 507
439 platform_set_drvdata(pdev, p); 508 ch->cs_enabled = false;
509 ch->enable_count = 0;
510
511 return sh_tmu_register(ch, dev_name(&tmu->pdev->dev),
512 clockevent, clocksource);
513}
514
515static int sh_tmu_map_memory(struct sh_tmu_device *tmu)
516{
517 struct resource *res;
440 518
441 res = platform_get_resource(p->pdev, IORESOURCE_MEM, 0); 519 res = platform_get_resource(tmu->pdev, IORESOURCE_MEM, 0);
442 if (!res) { 520 if (!res) {
443 dev_err(&p->pdev->dev, "failed to get I/O memory\n"); 521 dev_err(&tmu->pdev->dev, "failed to get I/O memory\n");
444 goto err0; 522 return -ENXIO;
523 }
524
525 tmu->mapbase = ioremap_nocache(res->start, resource_size(res));
526 if (tmu->mapbase == NULL)
527 return -ENXIO;
528
529 /*
530 * In legacy platform device configuration (with one device per channel)
531 * the resource points to the channel base address.
532 */
533 if (tmu->model == SH_TMU_LEGACY) {
534 struct sh_timer_config *cfg = tmu->pdev->dev.platform_data;
535 tmu->mapbase -= cfg->channel_offset;
445 } 536 }
446 537
447 irq = platform_get_irq(p->pdev, 0); 538 return 0;
448 if (irq < 0) { 539}
449 dev_err(&p->pdev->dev, "failed to get irq\n"); 540
450 goto err0; 541static void sh_tmu_unmap_memory(struct sh_tmu_device *tmu)
542{
543 if (tmu->model == SH_TMU_LEGACY) {
544 struct sh_timer_config *cfg = tmu->pdev->dev.platform_data;
545 tmu->mapbase += cfg->channel_offset;
451 } 546 }
452 547
453 /* map memory, let mapbase point to our channel */ 548 iounmap(tmu->mapbase);
454 p->mapbase = ioremap_nocache(res->start, resource_size(res)); 549}
455 if (p->mapbase == NULL) { 550
456 dev_err(&p->pdev->dev, "failed to remap I/O memory\n"); 551static int sh_tmu_setup(struct sh_tmu_device *tmu, struct platform_device *pdev)
457 goto err0; 552{
553 struct sh_timer_config *cfg = pdev->dev.platform_data;
554 const struct platform_device_id *id = pdev->id_entry;
555 unsigned int i;
556 int ret;
557
558 if (!cfg) {
559 dev_err(&tmu->pdev->dev, "missing platform data\n");
560 return -ENXIO;
458 } 561 }
459 562
460 /* setup data for setup_irq() (too early for request_irq()) */ 563 tmu->pdev = pdev;
461 p->irqaction.name = dev_name(&p->pdev->dev); 564 tmu->model = id->driver_data;
462 p->irqaction.handler = sh_tmu_interrupt; 565
463 p->irqaction.dev_id = p; 566 /* Get hold of clock. */
464 p->irqaction.irq = irq; 567 tmu->clk = clk_get(&tmu->pdev->dev,
465 p->irqaction.flags = IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING; 568 tmu->model == SH_TMU_LEGACY ? "tmu_fck" : "fck");
466 569 if (IS_ERR(tmu->clk)) {
467 /* get hold of clock */ 570 dev_err(&tmu->pdev->dev, "cannot get clock\n");
468 p->clk = clk_get(&p->pdev->dev, "tmu_fck"); 571 return PTR_ERR(tmu->clk);
469 if (IS_ERR(p->clk)) {
470 dev_err(&p->pdev->dev, "cannot get clock\n");
471 ret = PTR_ERR(p->clk);
472 goto err1;
473 } 572 }
474 573
475 ret = clk_prepare(p->clk); 574 ret = clk_prepare(tmu->clk);
476 if (ret < 0) 575 if (ret < 0)
477 goto err2; 576 goto err_clk_put;
478 577
479 p->cs_enabled = false; 578 /* Map the memory resource. */
480 p->enable_count = 0; 579 ret = sh_tmu_map_memory(tmu);
580 if (ret < 0) {
581 dev_err(&tmu->pdev->dev, "failed to remap I/O memory\n");
582 goto err_clk_unprepare;
583 }
481 584
482 ret = sh_tmu_register(p, (char *)dev_name(&p->pdev->dev), 585 /* Allocate and setup the channels. */
483 cfg->clockevent_rating, 586 if (tmu->model == SH_TMU_LEGACY)
484 cfg->clocksource_rating); 587 tmu->num_channels = 1;
485 if (ret < 0) 588 else
486 goto err3; 589 tmu->num_channels = hweight8(cfg->channels_mask);
590
591 tmu->channels = kzalloc(sizeof(*tmu->channels) * tmu->num_channels,
592 GFP_KERNEL);
593 if (tmu->channels == NULL) {
594 ret = -ENOMEM;
595 goto err_unmap;
596 }
597
598 if (tmu->model == SH_TMU_LEGACY) {
599 ret = sh_tmu_channel_setup(&tmu->channels[0], 0,
600 cfg->clockevent_rating != 0,
601 cfg->clocksource_rating != 0, tmu);
602 if (ret < 0)
603 goto err_unmap;
604 } else {
605 /*
606 * Use the first channel as a clock event device and the second
607 * channel as a clock source.
608 */
609 for (i = 0; i < tmu->num_channels; ++i) {
610 ret = sh_tmu_channel_setup(&tmu->channels[i], i,
611 i == 0, i == 1, tmu);
612 if (ret < 0)
613 goto err_unmap;
614 }
615 }
616
617 platform_set_drvdata(pdev, tmu);
487 618
488 return 0; 619 return 0;
489 620
490 err3: 621err_unmap:
491 clk_unprepare(p->clk); 622 kfree(tmu->channels);
492 err2: 623 sh_tmu_unmap_memory(tmu);
493 clk_put(p->clk); 624err_clk_unprepare:
494 err1: 625 clk_unprepare(tmu->clk);
495 iounmap(p->mapbase); 626err_clk_put:
496 err0: 627 clk_put(tmu->clk);
497 return ret; 628 return ret;
498} 629}
499 630
500static int sh_tmu_probe(struct platform_device *pdev) 631static int sh_tmu_probe(struct platform_device *pdev)
501{ 632{
502 struct sh_tmu_priv *p = platform_get_drvdata(pdev); 633 struct sh_tmu_device *tmu = platform_get_drvdata(pdev);
503 struct sh_timer_config *cfg = pdev->dev.platform_data;
504 int ret; 634 int ret;
505 635
506 if (!is_early_platform_device(pdev)) { 636 if (!is_early_platform_device(pdev)) {
@@ -508,20 +638,18 @@ static int sh_tmu_probe(struct platform_device *pdev)
508 pm_runtime_enable(&pdev->dev); 638 pm_runtime_enable(&pdev->dev);
509 } 639 }
510 640
511 if (p) { 641 if (tmu) {
512 dev_info(&pdev->dev, "kept as earlytimer\n"); 642 dev_info(&pdev->dev, "kept as earlytimer\n");
513 goto out; 643 goto out;
514 } 644 }
515 645
516 p = kmalloc(sizeof(*p), GFP_KERNEL); 646 tmu = kzalloc(sizeof(*tmu), GFP_KERNEL);
517 if (p == NULL) { 647 if (tmu == NULL)
518 dev_err(&pdev->dev, "failed to allocate driver data\n");
519 return -ENOMEM; 648 return -ENOMEM;
520 }
521 649
522 ret = sh_tmu_setup(p, pdev); 650 ret = sh_tmu_setup(tmu, pdev);
523 if (ret) { 651 if (ret) {
524 kfree(p); 652 kfree(tmu);
525 pm_runtime_idle(&pdev->dev); 653 pm_runtime_idle(&pdev->dev);
526 return ret; 654 return ret;
527 } 655 }
@@ -529,7 +657,7 @@ static int sh_tmu_probe(struct platform_device *pdev)
529 return 0; 657 return 0;
530 658
531 out: 659 out:
532 if (cfg->clockevent_rating || cfg->clocksource_rating) 660 if (tmu->has_clockevent || tmu->has_clocksource)
533 pm_runtime_irq_safe(&pdev->dev); 661 pm_runtime_irq_safe(&pdev->dev);
534 else 662 else
535 pm_runtime_idle(&pdev->dev); 663 pm_runtime_idle(&pdev->dev);
@@ -542,12 +670,21 @@ static int sh_tmu_remove(struct platform_device *pdev)
542 return -EBUSY; /* cannot unregister clockevent and clocksource */ 670 return -EBUSY; /* cannot unregister clockevent and clocksource */
543} 671}
544 672
673static const struct platform_device_id sh_tmu_id_table[] = {
674 { "sh_tmu", SH_TMU_LEGACY },
675 { "sh-tmu", SH_TMU },
676 { "sh-tmu-sh3", SH_TMU_SH3 },
677 { }
678};
679MODULE_DEVICE_TABLE(platform, sh_tmu_id_table);
680
545static struct platform_driver sh_tmu_device_driver = { 681static struct platform_driver sh_tmu_device_driver = {
546 .probe = sh_tmu_probe, 682 .probe = sh_tmu_probe,
547 .remove = sh_tmu_remove, 683 .remove = sh_tmu_remove,
548 .driver = { 684 .driver = {
549 .name = "sh_tmu", 685 .name = "sh_tmu",
550 } 686 },
687 .id_table = sh_tmu_id_table,
551}; 688};
552 689
553static int __init sh_tmu_init(void) 690static int __init sh_tmu_init(void)
diff --git a/drivers/clocksource/time-efm32.c b/drivers/clocksource/time-efm32.c
index 1a6205b7bed3..bba62f9deefb 100644
--- a/drivers/clocksource/time-efm32.c
+++ b/drivers/clocksource/time-efm32.c
@@ -272,4 +272,5 @@ static void __init efm32_timer_init(struct device_node *np)
272 } 272 }
273 } 273 }
274} 274}
275CLOCKSOURCE_OF_DECLARE(efm32, "efm32,timer", efm32_timer_init); 275CLOCKSOURCE_OF_DECLARE(efm32compat, "efm32,timer", efm32_timer_init);
276CLOCKSOURCE_OF_DECLARE(efm32, "energymicro,efm32-timer", efm32_timer_init);
diff --git a/drivers/clocksource/timer-sun5i.c b/drivers/clocksource/timer-sun5i.c
index deebcd6469fc..02268448dc85 100644
--- a/drivers/clocksource/timer-sun5i.c
+++ b/drivers/clocksource/timer-sun5i.c
@@ -16,6 +16,7 @@
16#include <linux/interrupt.h> 16#include <linux/interrupt.h>
17#include <linux/irq.h> 17#include <linux/irq.h>
18#include <linux/irqreturn.h> 18#include <linux/irqreturn.h>
19#include <linux/reset.h>
19#include <linux/sched_clock.h> 20#include <linux/sched_clock.h>
20#include <linux/of.h> 21#include <linux/of.h>
21#include <linux/of_address.h> 22#include <linux/of_address.h>
@@ -143,6 +144,7 @@ static u64 sun5i_timer_sched_read(void)
143 144
144static void __init sun5i_timer_init(struct device_node *node) 145static void __init sun5i_timer_init(struct device_node *node)
145{ 146{
147 struct reset_control *rstc;
146 unsigned long rate; 148 unsigned long rate;
147 struct clk *clk; 149 struct clk *clk;
148 int ret, irq; 150 int ret, irq;
@@ -162,6 +164,10 @@ static void __init sun5i_timer_init(struct device_node *node)
162 clk_prepare_enable(clk); 164 clk_prepare_enable(clk);
163 rate = clk_get_rate(clk); 165 rate = clk_get_rate(clk);
164 166
167 rstc = of_reset_control_get(node, NULL);
168 if (!IS_ERR(rstc))
169 reset_control_deassert(rstc);
170
165 writel(~0, timer_base + TIMER_INTVAL_LO_REG(1)); 171 writel(~0, timer_base + TIMER_INTVAL_LO_REG(1));
166 writel(TIMER_CTL_ENABLE | TIMER_CTL_RELOAD, 172 writel(TIMER_CTL_ENABLE | TIMER_CTL_RELOAD,
167 timer_base + TIMER_CTL_REG(1)); 173 timer_base + TIMER_CTL_REG(1));
diff --git a/include/linux/sh_timer.h b/include/linux/sh_timer.h
index 4d9dcd138315..8e1e036d6d45 100644
--- a/include/linux/sh_timer.h
+++ b/include/linux/sh_timer.h
@@ -7,6 +7,7 @@ struct sh_timer_config {
7 int timer_bit; 7 int timer_bit;
8 unsigned long clockevent_rating; 8 unsigned long clockevent_rating;
9 unsigned long clocksource_rating; 9 unsigned long clocksource_rating;
10 unsigned int channels_mask;
10}; 11};
11 12
12#endif /* __SH_TIMER_H__ */ 13#endif /* __SH_TIMER_H__ */