aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-08-05 20:46:42 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-08-05 20:46:42 -0400
commite7fda6c4c3c1a7d6996dd75fd84670fa0b5d448f (patch)
treedaa51c16462c318b890acf7f01fba5827275dd74 /drivers
parent08d69a25714429850cf9ef71f22d8cdc9189d93f (diff)
parent953dec21aed4038464fec02f96a2f1b8701a5bce (diff)
Merge branch 'timers-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull timer and time updates from Thomas Gleixner: "A rather large update of timers, timekeeping & co - Core timekeeping code is year-2038 safe now for 32bit machines. Now we just need to fix all in kernel users and the gazillion of user space interfaces which rely on timespec/timeval :) - Better cache layout for the timekeeping internal data structures. - Proper nanosecond based interfaces for in kernel users. - Tree wide cleanup of code which wants nanoseconds but does hoops and loops to convert back and forth from timespecs. Some of it definitely belongs into the ugly code museum. - Consolidation of the timekeeping interface zoo. - A fast NMI safe accessor to clock monotonic for tracing. This is a long standing request to support correlated user/kernel space traces. With proper NTP frequency correction it's also suitable for correlation of traces accross separate machines. - Checkpoint/restart support for timerfd. - A few NOHZ[_FULL] improvements in the [hr]timer code. - Code move from kernel to kernel/time of all time* related code. - New clocksource/event drivers from the ARM universe. I'm really impressed that despite an architected timer in the newer chips SoC manufacturers insist on inventing new and differently broken SoC specific timers. [ Ed. "Impressed"? I don't think that word means what you think it means ] - Another round of code move from arch to drivers. Looks like most of the legacy mess in ARM regarding timers is sorted out except for a few obnoxious strongholds. - The usual updates and fixlets all over the place" * 'timers-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (114 commits) timekeeping: Fixup typo in update_vsyscall_old definition clocksource: document some basic timekeeping concepts timekeeping: Use cached ntp_tick_length when accumulating error timekeeping: Rework frequency adjustments to work better w/ nohz timekeeping: Minor fixup for timespec64->timespec assignment ftrace: Provide trace clocks monotonic timekeeping: Provide fast and NMI safe access to CLOCK_MONOTONIC seqcount: Add raw_write_seqcount_latch() seqcount: Provide raw_read_seqcount() timekeeping: Use tk_read_base as argument for timekeeping_get_ns() timekeeping: Create struct tk_read_base and use it in struct timekeeper timekeeping: Restructure the timekeeper some more clocksource: Get rid of cycle_last clocksource: Move cycle_last validation to core code clocksource: Make delta calculation a function wireless: ath9k: Get rid of timespec conversions drm: vmwgfx: Use nsec based interfaces drm: i915: Use nsec based interfaces timekeeping: Provide ktime_get_raw() hangcheck-timer: Use ktime_get_ns() ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/char/hangcheck-timer.c33
-rw-r--r--drivers/clocksource/Kconfig14
-rw-r--r--drivers/clocksource/Makefile3
-rw-r--r--drivers/clocksource/clps711x-timer.c131
-rw-r--r--drivers/clocksource/exynos_mct.c63
-rw-r--r--drivers/clocksource/mtk_timer.c261
-rw-r--r--drivers/clocksource/pxa_timer.c227
-rw-r--r--drivers/clocksource/sh_cmt.c233
-rw-r--r--drivers/clocksource/sh_mtu2.c146
-rw-r--r--drivers/clocksource/sh_tmu.c127
-rw-r--r--drivers/clocksource/timer-marco.c3
-rw-r--r--drivers/clocksource/timer-prima2.c3
-rw-r--r--drivers/connector/cn_proc.c36
-rw-r--r--drivers/firewire/core-cdev.c6
-rw-r--r--drivers/gpu/drm/drm_irq.c16
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h2
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c33
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c12
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_marker.c44
-rw-r--r--drivers/hwmon/ibmaem.c6
-rw-r--r--drivers/input/evdev.c7
-rw-r--r--drivers/mfd/cros_ec_spi.c8
-rw-r--r--drivers/misc/ioc4.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c16
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c7
-rw-r--r--drivers/of/address.c36
27 files changed, 973 insertions, 511 deletions
diff --git a/drivers/char/hangcheck-timer.c b/drivers/char/hangcheck-timer.c
index f953c96efc86..ebc4c73d8ca4 100644
--- a/drivers/char/hangcheck-timer.c
+++ b/drivers/char/hangcheck-timer.c
@@ -49,7 +49,7 @@
49#include <asm/uaccess.h> 49#include <asm/uaccess.h>
50#include <linux/sysrq.h> 50#include <linux/sysrq.h>
51#include <linux/timer.h> 51#include <linux/timer.h>
52#include <linux/time.h> 52#include <linux/hrtimer.h>
53 53
54#define VERSION_STR "0.9.1" 54#define VERSION_STR "0.9.1"
55 55
@@ -117,24 +117,7 @@ __setup("hcheck_reboot", hangcheck_parse_reboot);
117__setup("hcheck_dump_tasks", hangcheck_parse_dump_tasks); 117__setup("hcheck_dump_tasks", hangcheck_parse_dump_tasks);
118#endif /* not MODULE */ 118#endif /* not MODULE */
119 119
120#if defined(CONFIG_S390) 120#define TIMER_FREQ 1000000000ULL
121# define HAVE_MONOTONIC
122# define TIMER_FREQ 1000000000ULL
123#else
124# define TIMER_FREQ 1000000000ULL
125#endif
126
127#ifdef HAVE_MONOTONIC
128extern unsigned long long monotonic_clock(void);
129#else
130static inline unsigned long long monotonic_clock(void)
131{
132 struct timespec ts;
133 getrawmonotonic(&ts);
134 return timespec_to_ns(&ts);
135}
136#endif /* HAVE_MONOTONIC */
137
138 121
139/* Last time scheduled */ 122/* Last time scheduled */
140static unsigned long long hangcheck_tsc, hangcheck_tsc_margin; 123static unsigned long long hangcheck_tsc, hangcheck_tsc_margin;
@@ -143,12 +126,11 @@ static void hangcheck_fire(unsigned long);
143 126
144static DEFINE_TIMER(hangcheck_ticktock, hangcheck_fire, 0, 0); 127static DEFINE_TIMER(hangcheck_ticktock, hangcheck_fire, 0, 0);
145 128
146
147static void hangcheck_fire(unsigned long data) 129static void hangcheck_fire(unsigned long data)
148{ 130{
149 unsigned long long cur_tsc, tsc_diff; 131 unsigned long long cur_tsc, tsc_diff;
150 132
151 cur_tsc = monotonic_clock(); 133 cur_tsc = ktime_get_ns();
152 134
153 if (cur_tsc > hangcheck_tsc) 135 if (cur_tsc > hangcheck_tsc)
154 tsc_diff = cur_tsc - hangcheck_tsc; 136 tsc_diff = cur_tsc - hangcheck_tsc;
@@ -177,7 +159,7 @@ static void hangcheck_fire(unsigned long data)
177 tsc_diff, tsc_diff - hangcheck_tick*TIMER_FREQ); 159 tsc_diff, tsc_diff - hangcheck_tick*TIMER_FREQ);
178#endif 160#endif
179 mod_timer(&hangcheck_ticktock, jiffies + (hangcheck_tick*HZ)); 161 mod_timer(&hangcheck_ticktock, jiffies + (hangcheck_tick*HZ));
180 hangcheck_tsc = monotonic_clock(); 162 hangcheck_tsc = ktime_get_ns();
181} 163}
182 164
183 165
@@ -185,16 +167,11 @@ static int __init hangcheck_init(void)
185{ 167{
186 printk("Hangcheck: starting hangcheck timer %s (tick is %d seconds, margin is %d seconds).\n", 168 printk("Hangcheck: starting hangcheck timer %s (tick is %d seconds, margin is %d seconds).\n",
187 VERSION_STR, hangcheck_tick, hangcheck_margin); 169 VERSION_STR, hangcheck_tick, hangcheck_margin);
188#if defined (HAVE_MONOTONIC)
189 printk("Hangcheck: Using monotonic_clock().\n");
190#else
191 printk("Hangcheck: Using getrawmonotonic().\n");
192#endif /* HAVE_MONOTONIC */
193 hangcheck_tsc_margin = 170 hangcheck_tsc_margin =
194 (unsigned long long)(hangcheck_margin + hangcheck_tick); 171 (unsigned long long)(hangcheck_margin + hangcheck_tick);
195 hangcheck_tsc_margin *= (unsigned long long)TIMER_FREQ; 172 hangcheck_tsc_margin *= (unsigned long long)TIMER_FREQ;
196 173
197 hangcheck_tsc = monotonic_clock(); 174 hangcheck_tsc = ktime_get_ns();
198 mod_timer(&hangcheck_ticktock, jiffies + (hangcheck_tick*HZ)); 175 mod_timer(&hangcheck_ticktock, jiffies + (hangcheck_tick*HZ));
199 176
200 return 0; 177 return 0;
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index 065131cbfcc0..cfd6519df661 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -1,3 +1,5 @@
1menu "Clock Source drivers"
2
1config CLKSRC_OF 3config CLKSRC_OF
2 bool 4 bool
3 5
@@ -125,6 +127,7 @@ config CLKSRC_METAG_GENERIC
125 127
126config CLKSRC_EXYNOS_MCT 128config CLKSRC_EXYNOS_MCT
127 def_bool y if ARCH_EXYNOS 129 def_bool y if ARCH_EXYNOS
130 depends on !ARM64
128 help 131 help
129 Support for Multi Core Timer controller on Exynos SoCs. 132 Support for Multi Core Timer controller on Exynos SoCs.
130 133
@@ -149,6 +152,11 @@ config VF_PIT_TIMER
149config SYS_SUPPORTS_SH_CMT 152config SYS_SUPPORTS_SH_CMT
150 bool 153 bool
151 154
155config MTK_TIMER
156 select CLKSRC_OF
157 select CLKSRC_MMIO
158 bool
159
152config SYS_SUPPORTS_SH_MTU2 160config SYS_SUPPORTS_SH_MTU2
153 bool 161 bool
154 162
@@ -173,7 +181,7 @@ config SH_TIMER_MTU2
173 default SYS_SUPPORTS_SH_MTU2 181 default SYS_SUPPORTS_SH_MTU2
174 help 182 help
175 This enables build of a clockevent driver for the Multi-Function 183 This enables build of a clockevent driver for the Multi-Function
176 Timer Pulse Unit 2 (TMU2) hardware available on SoCs from Renesas. 184 Timer Pulse Unit 2 (MTU2) hardware available on SoCs from Renesas.
177 This hardware comes with 16 bit-timer registers. 185 This hardware comes with 16 bit-timer registers.
178 186
179config SH_TIMER_TMU 187config SH_TIMER_TMU
@@ -187,7 +195,7 @@ config SH_TIMER_TMU
187 195
188config EM_TIMER_STI 196config EM_TIMER_STI
189 bool "Renesas STI timer driver" if COMPILE_TEST 197 bool "Renesas STI timer driver" if COMPILE_TEST
190 depends on GENERIC_CLOCKEVENTS 198 depends on GENERIC_CLOCKEVENTS && HAS_IOMEM
191 default SYS_SUPPORTS_EM_STI 199 default SYS_SUPPORTS_EM_STI
192 help 200 help
193 This enables build of a clocksource and clockevent driver for 201 This enables build of a clocksource and clockevent driver for
@@ -207,3 +215,5 @@ config CLKSRC_VERSATILE
207 counter available in the "System Registers" block of 215 counter available in the "System Registers" block of
208 ARM Versatile, RealView and Versatile Express reference 216 ARM Versatile, RealView and Versatile Express reference
209 platforms. 217 platforms.
218
219endmenu
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile
index 800b1303c236..7fd9fd1dff42 100644
--- a/drivers/clocksource/Makefile
+++ b/drivers/clocksource/Makefile
@@ -16,9 +16,11 @@ obj-$(CONFIG_CLKSRC_DBX500_PRCMU) += clksrc-dbx500-prcmu.o
16obj-$(CONFIG_ARMADA_370_XP_TIMER) += time-armada-370-xp.o 16obj-$(CONFIG_ARMADA_370_XP_TIMER) += time-armada-370-xp.o
17obj-$(CONFIG_ORION_TIMER) += time-orion.o 17obj-$(CONFIG_ORION_TIMER) += time-orion.o
18obj-$(CONFIG_ARCH_BCM2835) += bcm2835_timer.o 18obj-$(CONFIG_ARCH_BCM2835) += bcm2835_timer.o
19obj-$(CONFIG_ARCH_CLPS711X) += clps711x-timer.o
19obj-$(CONFIG_ARCH_MARCO) += timer-marco.o 20obj-$(CONFIG_ARCH_MARCO) += timer-marco.o
20obj-$(CONFIG_ARCH_MOXART) += moxart_timer.o 21obj-$(CONFIG_ARCH_MOXART) += moxart_timer.o
21obj-$(CONFIG_ARCH_MXS) += mxs_timer.o 22obj-$(CONFIG_ARCH_MXS) += mxs_timer.o
23obj-$(CONFIG_ARCH_PXA) += pxa_timer.o
22obj-$(CONFIG_ARCH_PRIMA2) += timer-prima2.o 24obj-$(CONFIG_ARCH_PRIMA2) += timer-prima2.o
23obj-$(CONFIG_ARCH_U300) += timer-u300.o 25obj-$(CONFIG_ARCH_U300) += timer-u300.o
24obj-$(CONFIG_SUN4I_TIMER) += sun4i_timer.o 26obj-$(CONFIG_SUN4I_TIMER) += sun4i_timer.o
@@ -34,6 +36,7 @@ obj-$(CONFIG_CLKSRC_SAMSUNG_PWM) += samsung_pwm_timer.o
34obj-$(CONFIG_FSL_FTM_TIMER) += fsl_ftm_timer.o 36obj-$(CONFIG_FSL_FTM_TIMER) += fsl_ftm_timer.o
35obj-$(CONFIG_VF_PIT_TIMER) += vf_pit_timer.o 37obj-$(CONFIG_VF_PIT_TIMER) += vf_pit_timer.o
36obj-$(CONFIG_CLKSRC_QCOM) += qcom-timer.o 38obj-$(CONFIG_CLKSRC_QCOM) += qcom-timer.o
39obj-$(CONFIG_MTK_TIMER) += mtk_timer.o
37 40
38obj-$(CONFIG_ARM_ARCH_TIMER) += arm_arch_timer.o 41obj-$(CONFIG_ARM_ARCH_TIMER) += arm_arch_timer.o
39obj-$(CONFIG_ARM_GLOBAL_TIMER) += arm_global_timer.o 42obj-$(CONFIG_ARM_GLOBAL_TIMER) += arm_global_timer.o
diff --git a/drivers/clocksource/clps711x-timer.c b/drivers/clocksource/clps711x-timer.c
new file mode 100644
index 000000000000..d83ec1f2fddc
--- /dev/null
+++ b/drivers/clocksource/clps711x-timer.c
@@ -0,0 +1,131 @@
1/*
2 * Cirrus Logic CLPS711X clocksource driver
3 *
4 * Copyright (C) 2014 Alexander Shiyan <shc_work@mail.ru>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 */
11
12#include <linux/clk.h>
13#include <linux/clockchips.h>
14#include <linux/clocksource.h>
15#include <linux/interrupt.h>
16#include <linux/io.h>
17#include <linux/of_address.h>
18#include <linux/of_irq.h>
19#include <linux/sched_clock.h>
20#include <linux/slab.h>
21
22enum {
23 CLPS711X_CLKSRC_CLOCKSOURCE,
24 CLPS711X_CLKSRC_CLOCKEVENT,
25};
26
27static void __iomem *tcd;
28
29static u64 notrace clps711x_sched_clock_read(void)
30{
31 return ~readw(tcd);
32}
33
34static int __init _clps711x_clksrc_init(struct clk *clock, void __iomem *base)
35{
36 unsigned long rate;
37
38 if (!base)
39 return -ENOMEM;
40 if (IS_ERR(clock))
41 return PTR_ERR(clock);
42
43 rate = clk_get_rate(clock);
44
45 tcd = base;
46
47 clocksource_mmio_init(tcd, "clps711x-clocksource", rate, 300, 16,
48 clocksource_mmio_readw_down);
49
50 sched_clock_register(clps711x_sched_clock_read, 16, rate);
51
52 return 0;
53}
54
55static irqreturn_t clps711x_timer_interrupt(int irq, void *dev_id)
56{
57 struct clock_event_device *evt = dev_id;
58
59 evt->event_handler(evt);
60
61 return IRQ_HANDLED;
62}
63
64static void clps711x_clockevent_set_mode(enum clock_event_mode mode,
65 struct clock_event_device *evt)
66{
67}
68
69static int __init _clps711x_clkevt_init(struct clk *clock, void __iomem *base,
70 unsigned int irq)
71{
72 struct clock_event_device *clkevt;
73 unsigned long rate;
74
75 if (!irq)
76 return -EINVAL;
77 if (!base)
78 return -ENOMEM;
79 if (IS_ERR(clock))
80 return PTR_ERR(clock);
81
82 clkevt = kzalloc(sizeof(*clkevt), GFP_KERNEL);
83 if (!clkevt)
84 return -ENOMEM;
85
86 rate = clk_get_rate(clock);
87
88 /* Set Timer prescaler */
89 writew(DIV_ROUND_CLOSEST(rate, HZ), base);
90
91 clkevt->name = "clps711x-clockevent";
92 clkevt->rating = 300;
93 clkevt->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_C3STOP;
94 clkevt->set_mode = clps711x_clockevent_set_mode;
95 clkevt->cpumask = cpumask_of(0);
96 clockevents_config_and_register(clkevt, HZ, 0, 0);
97
98 return request_irq(irq, clps711x_timer_interrupt, IRQF_TIMER,
99 "clps711x-timer", clkevt);
100}
101
102void __init clps711x_clksrc_init(void __iomem *tc1_base, void __iomem *tc2_base,
103 unsigned int irq)
104{
105 struct clk *tc1 = clk_get_sys("clps711x-timer.0", NULL);
106 struct clk *tc2 = clk_get_sys("clps711x-timer.1", NULL);
107
108 BUG_ON(_clps711x_clksrc_init(tc1, tc1_base));
109 BUG_ON(_clps711x_clkevt_init(tc2, tc2_base, irq));
110}
111
112#ifdef CONFIG_CLKSRC_OF
113static void __init clps711x_timer_init(struct device_node *np)
114{
115 unsigned int irq = irq_of_parse_and_map(np, 0);
116 struct clk *clock = of_clk_get(np, 0);
117 void __iomem *base = of_iomap(np, 0);
118
119 switch (of_alias_get_id(np, "timer")) {
120 case CLPS711X_CLKSRC_CLOCKSOURCE:
121 BUG_ON(_clps711x_clksrc_init(clock, base));
122 break;
123 case CLPS711X_CLKSRC_CLOCKEVENT:
124 BUG_ON(_clps711x_clkevt_init(clock, base, irq));
125 break;
126 default:
127 break;
128 }
129}
130CLOCKSOURCE_OF_DECLARE(clps711x, "cirrus,clps711x-timer", clps711x_timer_init);
131#endif
diff --git a/drivers/clocksource/exynos_mct.c b/drivers/clocksource/exynos_mct.c
index ab51bf20a3ed..9403061a2acc 100644
--- a/drivers/clocksource/exynos_mct.c
+++ b/drivers/clocksource/exynos_mct.c
@@ -94,7 +94,7 @@ static void exynos4_mct_write(unsigned int value, unsigned long offset)
94 u32 mask; 94 u32 mask;
95 u32 i; 95 u32 i;
96 96
97 __raw_writel(value, reg_base + offset); 97 writel_relaxed(value, reg_base + offset);
98 98
99 if (likely(offset >= EXYNOS4_MCT_L_BASE(0))) { 99 if (likely(offset >= EXYNOS4_MCT_L_BASE(0))) {
100 stat_addr = (offset & ~EXYNOS4_MCT_L_MASK) + MCT_L_WSTAT_OFFSET; 100 stat_addr = (offset & ~EXYNOS4_MCT_L_MASK) + MCT_L_WSTAT_OFFSET;
@@ -144,8 +144,8 @@ static void exynos4_mct_write(unsigned int value, unsigned long offset)
144 144
145 /* Wait maximum 1 ms until written values are applied */ 145 /* Wait maximum 1 ms until written values are applied */
146 for (i = 0; i < loops_per_jiffy / 1000 * HZ; i++) 146 for (i = 0; i < loops_per_jiffy / 1000 * HZ; i++)
147 if (__raw_readl(reg_base + stat_addr) & mask) { 147 if (readl_relaxed(reg_base + stat_addr) & mask) {
148 __raw_writel(mask, reg_base + stat_addr); 148 writel_relaxed(mask, reg_base + stat_addr);
149 return; 149 return;
150 } 150 }
151 151
@@ -157,28 +157,51 @@ static void exynos4_mct_frc_start(void)
157{ 157{
158 u32 reg; 158 u32 reg;
159 159
160 reg = __raw_readl(reg_base + EXYNOS4_MCT_G_TCON); 160 reg = readl_relaxed(reg_base + EXYNOS4_MCT_G_TCON);
161 reg |= MCT_G_TCON_START; 161 reg |= MCT_G_TCON_START;
162 exynos4_mct_write(reg, EXYNOS4_MCT_G_TCON); 162 exynos4_mct_write(reg, EXYNOS4_MCT_G_TCON);
163} 163}
164 164
165static cycle_t notrace _exynos4_frc_read(void) 165/**
166 * exynos4_read_count_64 - Read all 64-bits of the global counter
167 *
168 * This will read all 64-bits of the global counter taking care to make sure
169 * that the upper and lower half match. Note that reading the MCT can be quite
170 * slow (hundreds of nanoseconds) so you should use the 32-bit (lower half
171 * only) version when possible.
172 *
173 * Returns the number of cycles in the global counter.
174 */
175static u64 exynos4_read_count_64(void)
166{ 176{
167 unsigned int lo, hi; 177 unsigned int lo, hi;
168 u32 hi2 = __raw_readl(reg_base + EXYNOS4_MCT_G_CNT_U); 178 u32 hi2 = readl_relaxed(reg_base + EXYNOS4_MCT_G_CNT_U);
169 179
170 do { 180 do {
171 hi = hi2; 181 hi = hi2;
172 lo = __raw_readl(reg_base + EXYNOS4_MCT_G_CNT_L); 182 lo = readl_relaxed(reg_base + EXYNOS4_MCT_G_CNT_L);
173 hi2 = __raw_readl(reg_base + EXYNOS4_MCT_G_CNT_U); 183 hi2 = readl_relaxed(reg_base + EXYNOS4_MCT_G_CNT_U);
174 } while (hi != hi2); 184 } while (hi != hi2);
175 185
176 return ((cycle_t)hi << 32) | lo; 186 return ((cycle_t)hi << 32) | lo;
177} 187}
178 188
189/**
190 * exynos4_read_count_32 - Read the lower 32-bits of the global counter
191 *
192 * This will read just the lower 32-bits of the global counter. This is marked
193 * as notrace so it can be used by the scheduler clock.
194 *
195 * Returns the number of cycles in the global counter (lower 32 bits).
196 */
197static u32 notrace exynos4_read_count_32(void)
198{
199 return readl_relaxed(reg_base + EXYNOS4_MCT_G_CNT_L);
200}
201
179static cycle_t exynos4_frc_read(struct clocksource *cs) 202static cycle_t exynos4_frc_read(struct clocksource *cs)
180{ 203{
181 return _exynos4_frc_read(); 204 return exynos4_read_count_32();
182} 205}
183 206
184static void exynos4_frc_resume(struct clocksource *cs) 207static void exynos4_frc_resume(struct clocksource *cs)
@@ -190,21 +213,23 @@ struct clocksource mct_frc = {
190 .name = "mct-frc", 213 .name = "mct-frc",
191 .rating = 400, 214 .rating = 400,
192 .read = exynos4_frc_read, 215 .read = exynos4_frc_read,
193 .mask = CLOCKSOURCE_MASK(64), 216 .mask = CLOCKSOURCE_MASK(32),
194 .flags = CLOCK_SOURCE_IS_CONTINUOUS, 217 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
195 .resume = exynos4_frc_resume, 218 .resume = exynos4_frc_resume,
196}; 219};
197 220
198static u64 notrace exynos4_read_sched_clock(void) 221static u64 notrace exynos4_read_sched_clock(void)
199{ 222{
200 return _exynos4_frc_read(); 223 return exynos4_read_count_32();
201} 224}
202 225
203static struct delay_timer exynos4_delay_timer; 226static struct delay_timer exynos4_delay_timer;
204 227
205static cycles_t exynos4_read_current_timer(void) 228static cycles_t exynos4_read_current_timer(void)
206{ 229{
207 return _exynos4_frc_read(); 230 BUILD_BUG_ON_MSG(sizeof(cycles_t) != sizeof(u32),
231 "cycles_t needs to move to 32-bit for ARM64 usage");
232 return exynos4_read_count_32();
208} 233}
209 234
210static void __init exynos4_clocksource_init(void) 235static void __init exynos4_clocksource_init(void)
@@ -218,14 +243,14 @@ static void __init exynos4_clocksource_init(void)
218 if (clocksource_register_hz(&mct_frc, clk_rate)) 243 if (clocksource_register_hz(&mct_frc, clk_rate))
219 panic("%s: can't register clocksource\n", mct_frc.name); 244 panic("%s: can't register clocksource\n", mct_frc.name);
220 245
221 sched_clock_register(exynos4_read_sched_clock, 64, clk_rate); 246 sched_clock_register(exynos4_read_sched_clock, 32, clk_rate);
222} 247}
223 248
224static void exynos4_mct_comp0_stop(void) 249static void exynos4_mct_comp0_stop(void)
225{ 250{
226 unsigned int tcon; 251 unsigned int tcon;
227 252
228 tcon = __raw_readl(reg_base + EXYNOS4_MCT_G_TCON); 253 tcon = readl_relaxed(reg_base + EXYNOS4_MCT_G_TCON);
229 tcon &= ~(MCT_G_TCON_COMP0_ENABLE | MCT_G_TCON_COMP0_AUTO_INC); 254 tcon &= ~(MCT_G_TCON_COMP0_ENABLE | MCT_G_TCON_COMP0_AUTO_INC);
230 255
231 exynos4_mct_write(tcon, EXYNOS4_MCT_G_TCON); 256 exynos4_mct_write(tcon, EXYNOS4_MCT_G_TCON);
@@ -238,14 +263,14 @@ static void exynos4_mct_comp0_start(enum clock_event_mode mode,
238 unsigned int tcon; 263 unsigned int tcon;
239 cycle_t comp_cycle; 264 cycle_t comp_cycle;
240 265
241 tcon = __raw_readl(reg_base + EXYNOS4_MCT_G_TCON); 266 tcon = readl_relaxed(reg_base + EXYNOS4_MCT_G_TCON);
242 267
243 if (mode == CLOCK_EVT_MODE_PERIODIC) { 268 if (mode == CLOCK_EVT_MODE_PERIODIC) {
244 tcon |= MCT_G_TCON_COMP0_AUTO_INC; 269 tcon |= MCT_G_TCON_COMP0_AUTO_INC;
245 exynos4_mct_write(cycles, EXYNOS4_MCT_G_COMP0_ADD_INCR); 270 exynos4_mct_write(cycles, EXYNOS4_MCT_G_COMP0_ADD_INCR);
246 } 271 }
247 272
248 comp_cycle = exynos4_frc_read(&mct_frc) + cycles; 273 comp_cycle = exynos4_read_count_64() + cycles;
249 exynos4_mct_write((u32)comp_cycle, EXYNOS4_MCT_G_COMP0_L); 274 exynos4_mct_write((u32)comp_cycle, EXYNOS4_MCT_G_COMP0_L);
250 exynos4_mct_write((u32)(comp_cycle >> 32), EXYNOS4_MCT_G_COMP0_U); 275 exynos4_mct_write((u32)(comp_cycle >> 32), EXYNOS4_MCT_G_COMP0_U);
251 276
@@ -327,7 +352,7 @@ static void exynos4_mct_tick_stop(struct mct_clock_event_device *mevt)
327 unsigned long mask = MCT_L_TCON_INT_START | MCT_L_TCON_TIMER_START; 352 unsigned long mask = MCT_L_TCON_INT_START | MCT_L_TCON_TIMER_START;
328 unsigned long offset = mevt->base + MCT_L_TCON_OFFSET; 353 unsigned long offset = mevt->base + MCT_L_TCON_OFFSET;
329 354
330 tmp = __raw_readl(reg_base + offset); 355 tmp = readl_relaxed(reg_base + offset);
331 if (tmp & mask) { 356 if (tmp & mask) {
332 tmp &= ~mask; 357 tmp &= ~mask;
333 exynos4_mct_write(tmp, offset); 358 exynos4_mct_write(tmp, offset);
@@ -349,7 +374,7 @@ static void exynos4_mct_tick_start(unsigned long cycles,
349 /* enable MCT tick interrupt */ 374 /* enable MCT tick interrupt */
350 exynos4_mct_write(0x1, mevt->base + MCT_L_INT_ENB_OFFSET); 375 exynos4_mct_write(0x1, mevt->base + MCT_L_INT_ENB_OFFSET);
351 376
352 tmp = __raw_readl(reg_base + mevt->base + MCT_L_TCON_OFFSET); 377 tmp = readl_relaxed(reg_base + mevt->base + MCT_L_TCON_OFFSET);
353 tmp |= MCT_L_TCON_INT_START | MCT_L_TCON_TIMER_START | 378 tmp |= MCT_L_TCON_INT_START | MCT_L_TCON_TIMER_START |
354 MCT_L_TCON_INTERVAL_MODE; 379 MCT_L_TCON_INTERVAL_MODE;
355 exynos4_mct_write(tmp, mevt->base + MCT_L_TCON_OFFSET); 380 exynos4_mct_write(tmp, mevt->base + MCT_L_TCON_OFFSET);
@@ -401,7 +426,7 @@ static int exynos4_mct_tick_clear(struct mct_clock_event_device *mevt)
401 exynos4_mct_tick_stop(mevt); 426 exynos4_mct_tick_stop(mevt);
402 427
403 /* Clear the MCT tick interrupt */ 428 /* Clear the MCT tick interrupt */
404 if (__raw_readl(reg_base + mevt->base + MCT_L_INT_CSTAT_OFFSET) & 1) { 429 if (readl_relaxed(reg_base + mevt->base + MCT_L_INT_CSTAT_OFFSET) & 1) {
405 exynos4_mct_write(0x1, mevt->base + MCT_L_INT_CSTAT_OFFSET); 430 exynos4_mct_write(0x1, mevt->base + MCT_L_INT_CSTAT_OFFSET);
406 return 1; 431 return 1;
407 } else { 432 } else {
diff --git a/drivers/clocksource/mtk_timer.c b/drivers/clocksource/mtk_timer.c
new file mode 100644
index 000000000000..32a3d25795d3
--- /dev/null
+++ b/drivers/clocksource/mtk_timer.c
@@ -0,0 +1,261 @@
1/*
2 * Mediatek SoCs General-Purpose Timer handling.
3 *
4 * Copyright (C) 2014 Matthias Brugger
5 *
6 * Matthias Brugger <matthias.bgg@gmail.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 */
18
19#include <linux/clk.h>
20#include <linux/clockchips.h>
21#include <linux/interrupt.h>
22#include <linux/irq.h>
23#include <linux/irqreturn.h>
24#include <linux/of.h>
25#include <linux/of_address.h>
26#include <linux/of_irq.h>
27#include <linux/slab.h>
28
29#define GPT_IRQ_EN_REG 0x00
30#define GPT_IRQ_ENABLE(val) BIT((val) - 1)
31#define GPT_IRQ_ACK_REG 0x08
32#define GPT_IRQ_ACK(val) BIT((val) - 1)
33
34#define TIMER_CTRL_REG(val) (0x10 * (val))
35#define TIMER_CTRL_OP(val) (((val) & 0x3) << 4)
36#define TIMER_CTRL_OP_ONESHOT (0)
37#define TIMER_CTRL_OP_REPEAT (1)
38#define TIMER_CTRL_OP_FREERUN (3)
39#define TIMER_CTRL_CLEAR (2)
40#define TIMER_CTRL_ENABLE (1)
41#define TIMER_CTRL_DISABLE (0)
42
43#define TIMER_CLK_REG(val) (0x04 + (0x10 * (val)))
44#define TIMER_CLK_SRC(val) (((val) & 0x1) << 4)
45#define TIMER_CLK_SRC_SYS13M (0)
46#define TIMER_CLK_SRC_RTC32K (1)
47#define TIMER_CLK_DIV1 (0x0)
48#define TIMER_CLK_DIV2 (0x1)
49
50#define TIMER_CNT_REG(val) (0x08 + (0x10 * (val)))
51#define TIMER_CMP_REG(val) (0x0C + (0x10 * (val)))
52
53#define GPT_CLK_EVT 1
54#define GPT_CLK_SRC 2
55
56struct mtk_clock_event_device {
57 void __iomem *gpt_base;
58 u32 ticks_per_jiffy;
59 struct clock_event_device dev;
60};
61
62static inline struct mtk_clock_event_device *to_mtk_clk(
63 struct clock_event_device *c)
64{
65 return container_of(c, struct mtk_clock_event_device, dev);
66}
67
68static void mtk_clkevt_time_stop(struct mtk_clock_event_device *evt, u8 timer)
69{
70 u32 val;
71
72 val = readl(evt->gpt_base + TIMER_CTRL_REG(timer));
73 writel(val & ~TIMER_CTRL_ENABLE, evt->gpt_base +
74 TIMER_CTRL_REG(timer));
75}
76
77static void mtk_clkevt_time_setup(struct mtk_clock_event_device *evt,
78 unsigned long delay, u8 timer)
79{
80 writel(delay, evt->gpt_base + TIMER_CMP_REG(timer));
81}
82
83static void mtk_clkevt_time_start(struct mtk_clock_event_device *evt,
84 bool periodic, u8 timer)
85{
86 u32 val;
87
88 /* Acknowledge interrupt */
89 writel(GPT_IRQ_ACK(timer), evt->gpt_base + GPT_IRQ_ACK_REG);
90
91 val = readl(evt->gpt_base + TIMER_CTRL_REG(timer));
92
93 /* Clear 2 bit timer operation mode field */
94 val &= ~TIMER_CTRL_OP(0x3);
95
96 if (periodic)
97 val |= TIMER_CTRL_OP(TIMER_CTRL_OP_REPEAT);
98 else
99 val |= TIMER_CTRL_OP(TIMER_CTRL_OP_ONESHOT);
100
101 writel(val | TIMER_CTRL_ENABLE | TIMER_CTRL_CLEAR,
102 evt->gpt_base + TIMER_CTRL_REG(timer));
103}
104
105static void mtk_clkevt_mode(enum clock_event_mode mode,
106 struct clock_event_device *clk)
107{
108 struct mtk_clock_event_device *evt = to_mtk_clk(clk);
109
110 mtk_clkevt_time_stop(evt, GPT_CLK_EVT);
111
112 switch (mode) {
113 case CLOCK_EVT_MODE_PERIODIC:
114 mtk_clkevt_time_setup(evt, evt->ticks_per_jiffy, GPT_CLK_EVT);
115 mtk_clkevt_time_start(evt, true, GPT_CLK_EVT);
116 break;
117 case CLOCK_EVT_MODE_ONESHOT:
118 /* Timer is enabled in set_next_event */
119 break;
120 case CLOCK_EVT_MODE_UNUSED:
121 case CLOCK_EVT_MODE_SHUTDOWN:
122 default:
123 /* No more interrupts will occur as source is disabled */
124 break;
125 }
126}
127
128static int mtk_clkevt_next_event(unsigned long event,
129 struct clock_event_device *clk)
130{
131 struct mtk_clock_event_device *evt = to_mtk_clk(clk);
132
133 mtk_clkevt_time_stop(evt, GPT_CLK_EVT);
134 mtk_clkevt_time_setup(evt, event, GPT_CLK_EVT);
135 mtk_clkevt_time_start(evt, false, GPT_CLK_EVT);
136
137 return 0;
138}
139
140static irqreturn_t mtk_timer_interrupt(int irq, void *dev_id)
141{
142 struct mtk_clock_event_device *evt = dev_id;
143
144 /* Acknowledge timer0 irq */
145 writel(GPT_IRQ_ACK(GPT_CLK_EVT), evt->gpt_base + GPT_IRQ_ACK_REG);
146 evt->dev.event_handler(&evt->dev);
147
148 return IRQ_HANDLED;
149}
150
151static void mtk_timer_global_reset(struct mtk_clock_event_device *evt)
152{
153 /* Disable all interrupts */
154 writel(0x0, evt->gpt_base + GPT_IRQ_EN_REG);
155 /* Acknowledge all interrupts */
156 writel(0x3f, evt->gpt_base + GPT_IRQ_ACK_REG);
157}
158
159static void
160mtk_timer_setup(struct mtk_clock_event_device *evt, u8 timer, u8 option)
161{
162 writel(TIMER_CTRL_CLEAR | TIMER_CTRL_DISABLE,
163 evt->gpt_base + TIMER_CTRL_REG(timer));
164
165 writel(TIMER_CLK_SRC(TIMER_CLK_SRC_SYS13M) | TIMER_CLK_DIV1,
166 evt->gpt_base + TIMER_CLK_REG(timer));
167
168 writel(0x0, evt->gpt_base + TIMER_CMP_REG(timer));
169
170 writel(TIMER_CTRL_OP(option) | TIMER_CTRL_ENABLE,
171 evt->gpt_base + TIMER_CTRL_REG(timer));
172}
173
174static void mtk_timer_enable_irq(struct mtk_clock_event_device *evt, u8 timer)
175{
176 u32 val;
177
178 val = readl(evt->gpt_base + GPT_IRQ_EN_REG);
179 writel(val | GPT_IRQ_ENABLE(timer),
180 evt->gpt_base + GPT_IRQ_EN_REG);
181}
182
183static void __init mtk_timer_init(struct device_node *node)
184{
185 struct mtk_clock_event_device *evt;
186 struct resource res;
187 unsigned long rate = 0;
188 struct clk *clk;
189
190 evt = kzalloc(sizeof(*evt), GFP_KERNEL);
191 if (!evt) {
192 pr_warn("Can't allocate mtk clock event driver struct");
193 return;
194 }
195
196 evt->dev.name = "mtk_tick";
197 evt->dev.rating = 300;
198 evt->dev.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
199 evt->dev.set_mode = mtk_clkevt_mode;
200 evt->dev.set_next_event = mtk_clkevt_next_event;
201 evt->dev.cpumask = cpu_possible_mask;
202
203 evt->gpt_base = of_io_request_and_map(node, 0, "mtk-timer");
204 if (IS_ERR(evt->gpt_base)) {
205 pr_warn("Can't get resource\n");
206 return;
207 }
208
209 evt->dev.irq = irq_of_parse_and_map(node, 0);
210 if (evt->dev.irq <= 0) {
211 pr_warn("Can't parse IRQ");
212 goto err_mem;
213 }
214
215 clk = of_clk_get(node, 0);
216 if (IS_ERR(clk)) {
217 pr_warn("Can't get timer clock");
218 goto err_irq;
219 }
220
221 if (clk_prepare_enable(clk)) {
222 pr_warn("Can't prepare clock");
223 goto err_clk_put;
224 }
225 rate = clk_get_rate(clk);
226
227 if (request_irq(evt->dev.irq, mtk_timer_interrupt,
228 IRQF_TIMER | IRQF_IRQPOLL, "mtk_timer", evt)) {
229 pr_warn("failed to setup irq %d\n", evt->dev.irq);
230 goto err_clk_disable;
231 }
232
233 evt->ticks_per_jiffy = DIV_ROUND_UP(rate, HZ);
234
235 mtk_timer_global_reset(evt);
236
237 /* Configure clock source */
238 mtk_timer_setup(evt, GPT_CLK_SRC, TIMER_CTRL_OP_FREERUN);
239 clocksource_mmio_init(evt->gpt_base + TIMER_CNT_REG(GPT_CLK_SRC),
240 node->name, rate, 300, 32, clocksource_mmio_readl_up);
241
242 /* Configure clock event */
243 mtk_timer_setup(evt, GPT_CLK_EVT, TIMER_CTRL_OP_REPEAT);
244 mtk_timer_enable_irq(evt, GPT_CLK_EVT);
245
246 clockevents_config_and_register(&evt->dev, rate, 0x3,
247 0xffffffff);
248 return;
249
250err_clk_disable:
251 clk_disable_unprepare(clk);
252err_clk_put:
253 clk_put(clk);
254err_irq:
255 irq_dispose_mapping(evt->dev.irq);
256err_mem:
257 iounmap(evt->gpt_base);
258 of_address_to_resource(node, 0, &res);
259 release_mem_region(res.start, resource_size(&res));
260}
261CLOCKSOURCE_OF_DECLARE(mtk_mt6577, "mediatek,mt6577-timer", mtk_timer_init);
diff --git a/drivers/clocksource/pxa_timer.c b/drivers/clocksource/pxa_timer.c
new file mode 100644
index 000000000000..941f3f344e08
--- /dev/null
+++ b/drivers/clocksource/pxa_timer.c
@@ -0,0 +1,227 @@
1/*
2 * arch/arm/mach-pxa/time.c
3 *
4 * PXA clocksource, clockevents, and OST interrupt handlers.
5 * Copyright (c) 2007 by Bill Gatliff <bgat@billgatliff.com>.
6 *
7 * Derived from Nicolas Pitre's PXA timer handler Copyright (c) 2001
8 * by MontaVista Software, Inc. (Nico, your code rocks!)
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 */
14
15#include <linux/kernel.h>
16#include <linux/init.h>
17#include <linux/interrupt.h>
18#include <linux/clk.h>
19#include <linux/clockchips.h>
20#include <linux/of_address.h>
21#include <linux/of_irq.h>
22#include <linux/sched_clock.h>
23
24#include <asm/div64.h>
25
26#define OSMR0 0x00 /* OS Timer 0 Match Register */
27#define OSMR1 0x04 /* OS Timer 1 Match Register */
28#define OSMR2 0x08 /* OS Timer 2 Match Register */
29#define OSMR3 0x0C /* OS Timer 3 Match Register */
30
31#define OSCR 0x10 /* OS Timer Counter Register */
32#define OSSR 0x14 /* OS Timer Status Register */
33#define OWER 0x18 /* OS Timer Watchdog Enable Register */
34#define OIER 0x1C /* OS Timer Interrupt Enable Register */
35
36#define OSSR_M3 (1 << 3) /* Match status channel 3 */
37#define OSSR_M2 (1 << 2) /* Match status channel 2 */
38#define OSSR_M1 (1 << 1) /* Match status channel 1 */
39#define OSSR_M0 (1 << 0) /* Match status channel 0 */
40
41#define OIER_E0 (1 << 0) /* Interrupt enable channel 0 */
42
43/*
44 * This is PXA's sched_clock implementation. This has a resolution
45 * of at least 308 ns and a maximum value of 208 days.
46 *
47 * The return value is guaranteed to be monotonic in that range as
48 * long as there is always less than 582 seconds between successive
49 * calls to sched_clock() which should always be the case in practice.
50 */
51
52#define timer_readl(reg) readl_relaxed(timer_base + (reg))
53#define timer_writel(val, reg) writel_relaxed((val), timer_base + (reg))
54
55static void __iomem *timer_base;
56
57static u64 notrace pxa_read_sched_clock(void)
58{
59 return timer_readl(OSCR);
60}
61
62
63#define MIN_OSCR_DELTA 16
64
65static irqreturn_t
66pxa_ost0_interrupt(int irq, void *dev_id)
67{
68 struct clock_event_device *c = dev_id;
69
70 /* Disarm the compare/match, signal the event. */
71 timer_writel(timer_readl(OIER) & ~OIER_E0, OIER);
72 timer_writel(OSSR_M0, OSSR);
73 c->event_handler(c);
74
75 return IRQ_HANDLED;
76}
77
78static int
79pxa_osmr0_set_next_event(unsigned long delta, struct clock_event_device *dev)
80{
81 unsigned long next, oscr;
82
83 timer_writel(timer_readl(OIER) | OIER_E0, OIER);
84 next = timer_readl(OSCR) + delta;
85 timer_writel(next, OSMR0);
86 oscr = timer_readl(OSCR);
87
88 return (signed)(next - oscr) <= MIN_OSCR_DELTA ? -ETIME : 0;
89}
90
91static void
92pxa_osmr0_set_mode(enum clock_event_mode mode, struct clock_event_device *dev)
93{
94 switch (mode) {
95 case CLOCK_EVT_MODE_ONESHOT:
96 timer_writel(timer_readl(OIER) & ~OIER_E0, OIER);
97 timer_writel(OSSR_M0, OSSR);
98 break;
99
100 case CLOCK_EVT_MODE_UNUSED:
101 case CLOCK_EVT_MODE_SHUTDOWN:
102 /* initializing, released, or preparing for suspend */
103 timer_writel(timer_readl(OIER) & ~OIER_E0, OIER);
104 timer_writel(OSSR_M0, OSSR);
105 break;
106
107 case CLOCK_EVT_MODE_RESUME:
108 case CLOCK_EVT_MODE_PERIODIC:
109 break;
110 }
111}
112
113#ifdef CONFIG_PM
114static unsigned long osmr[4], oier, oscr;
115
116static void pxa_timer_suspend(struct clock_event_device *cedev)
117{
118 osmr[0] = timer_readl(OSMR0);
119 osmr[1] = timer_readl(OSMR1);
120 osmr[2] = timer_readl(OSMR2);
121 osmr[3] = timer_readl(OSMR3);
122 oier = timer_readl(OIER);
123 oscr = timer_readl(OSCR);
124}
125
126static void pxa_timer_resume(struct clock_event_device *cedev)
127{
128 /*
129 * Ensure that we have at least MIN_OSCR_DELTA between match
130 * register 0 and the OSCR, to guarantee that we will receive
131 * the one-shot timer interrupt. We adjust OSMR0 in preference
132 * to OSCR to guarantee that OSCR is monotonically incrementing.
133 */
134 if (osmr[0] - oscr < MIN_OSCR_DELTA)
135 osmr[0] += MIN_OSCR_DELTA;
136
137 timer_writel(osmr[0], OSMR0);
138 timer_writel(osmr[1], OSMR1);
139 timer_writel(osmr[2], OSMR2);
140 timer_writel(osmr[3], OSMR3);
141 timer_writel(oier, OIER);
142 timer_writel(oscr, OSCR);
143}
144#else
145#define pxa_timer_suspend NULL
146#define pxa_timer_resume NULL
147#endif
148
149static struct clock_event_device ckevt_pxa_osmr0 = {
150 .name = "osmr0",
151 .features = CLOCK_EVT_FEAT_ONESHOT,
152 .rating = 200,
153 .set_next_event = pxa_osmr0_set_next_event,
154 .set_mode = pxa_osmr0_set_mode,
155 .suspend = pxa_timer_suspend,
156 .resume = pxa_timer_resume,
157};
158
159static struct irqaction pxa_ost0_irq = {
160 .name = "ost0",
161 .flags = IRQF_TIMER | IRQF_IRQPOLL,
162 .handler = pxa_ost0_interrupt,
163 .dev_id = &ckevt_pxa_osmr0,
164};
165
166static void pxa_timer_common_init(int irq, unsigned long clock_tick_rate)
167{
168 timer_writel(0, OIER);
169 timer_writel(OSSR_M0 | OSSR_M1 | OSSR_M2 | OSSR_M3, OSSR);
170
171 sched_clock_register(pxa_read_sched_clock, 32, clock_tick_rate);
172
173 ckevt_pxa_osmr0.cpumask = cpumask_of(0);
174
175 setup_irq(irq, &pxa_ost0_irq);
176
177 clocksource_mmio_init(timer_base + OSCR, "oscr0", clock_tick_rate, 200,
178 32, clocksource_mmio_readl_up);
179 clockevents_config_and_register(&ckevt_pxa_osmr0, clock_tick_rate,
180 MIN_OSCR_DELTA * 2, 0x7fffffff);
181}
182
183static void __init pxa_timer_dt_init(struct device_node *np)
184{
185 struct clk *clk;
186 int irq;
187
188 /* timer registers are shared with watchdog timer */
189 timer_base = of_iomap(np, 0);
190 if (!timer_base)
191 panic("%s: unable to map resource\n", np->name);
192
193 clk = of_clk_get(np, 0);
194 if (IS_ERR(clk)) {
195 pr_crit("%s: unable to get clk\n", np->name);
196 return;
197 }
198 clk_prepare_enable(clk);
199
200 /* we are only interested in OS-timer0 irq */
201 irq = irq_of_parse_and_map(np, 0);
202 if (irq <= 0) {
203 pr_crit("%s: unable to parse OS-timer0 irq\n", np->name);
204 return;
205 }
206
207 pxa_timer_common_init(irq, clk_get_rate(clk));
208}
209CLOCKSOURCE_OF_DECLARE(pxa_timer, "marvell,pxa-timer", pxa_timer_dt_init);
210
211/*
212 * Legacy timer init for non device-tree boards.
213 */
214void __init pxa_timer_nodt_init(int irq, void __iomem *base,
215 unsigned long clock_tick_rate)
216{
217 struct clk *clk;
218
219 timer_base = base;
220 clk = clk_get(NULL, "OSTIMER0");
221 if (clk && !IS_ERR(clk))
222 clk_prepare_enable(clk);
223 else
224 pr_crit("%s: unable to get clk\n", __func__);
225
226 pxa_timer_common_init(irq, clock_tick_rate);
227}
diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c
index dfa780396b91..2bd13b53b727 100644
--- a/drivers/clocksource/sh_cmt.c
+++ b/drivers/clocksource/sh_cmt.c
@@ -24,6 +24,7 @@
24#include <linux/ioport.h> 24#include <linux/ioport.h>
25#include <linux/irq.h> 25#include <linux/irq.h>
26#include <linux/module.h> 26#include <linux/module.h>
27#include <linux/of.h>
27#include <linux/platform_device.h> 28#include <linux/platform_device.h>
28#include <linux/pm_domain.h> 29#include <linux/pm_domain.h>
29#include <linux/pm_runtime.h> 30#include <linux/pm_runtime.h>
@@ -114,14 +115,15 @@ struct sh_cmt_device {
114 struct platform_device *pdev; 115 struct platform_device *pdev;
115 116
116 const struct sh_cmt_info *info; 117 const struct sh_cmt_info *info;
117 bool legacy;
118 118
119 void __iomem *mapbase_ch;
120 void __iomem *mapbase; 119 void __iomem *mapbase;
121 struct clk *clk; 120 struct clk *clk;
122 121
122 raw_spinlock_t lock; /* Protect the shared start/stop register */
123
123 struct sh_cmt_channel *channels; 124 struct sh_cmt_channel *channels;
124 unsigned int num_channels; 125 unsigned int num_channels;
126 unsigned int hw_channels;
125 127
126 bool has_clockevent; 128 bool has_clockevent;
127 bool has_clocksource; 129 bool has_clocksource;
@@ -301,14 +303,12 @@ static unsigned long sh_cmt_get_counter(struct sh_cmt_channel *ch,
301 return v2; 303 return v2;
302} 304}
303 305
304static DEFINE_RAW_SPINLOCK(sh_cmt_lock);
305
306static void sh_cmt_start_stop_ch(struct sh_cmt_channel *ch, int start) 306static void sh_cmt_start_stop_ch(struct sh_cmt_channel *ch, int start)
307{ 307{
308 unsigned long flags, value; 308 unsigned long flags, value;
309 309
310 /* start stop register shared by multiple timer channels */ 310 /* start stop register shared by multiple timer channels */
311 raw_spin_lock_irqsave(&sh_cmt_lock, flags); 311 raw_spin_lock_irqsave(&ch->cmt->lock, flags);
312 value = sh_cmt_read_cmstr(ch); 312 value = sh_cmt_read_cmstr(ch);
313 313
314 if (start) 314 if (start)
@@ -317,7 +317,7 @@ static void sh_cmt_start_stop_ch(struct sh_cmt_channel *ch, int start)
317 value &= ~(1 << ch->timer_bit); 317 value &= ~(1 << ch->timer_bit);
318 318
319 sh_cmt_write_cmstr(ch, value); 319 sh_cmt_write_cmstr(ch, value);
320 raw_spin_unlock_irqrestore(&sh_cmt_lock, flags); 320 raw_spin_unlock_irqrestore(&ch->cmt->lock, flags);
321} 321}
322 322
323static int sh_cmt_enable(struct sh_cmt_channel *ch, unsigned long *rate) 323static int sh_cmt_enable(struct sh_cmt_channel *ch, unsigned long *rate)
@@ -792,7 +792,7 @@ static int sh_cmt_register_clockevent(struct sh_cmt_channel *ch,
792 int irq; 792 int irq;
793 int ret; 793 int ret;
794 794
795 irq = platform_get_irq(ch->cmt->pdev, ch->cmt->legacy ? 0 : ch->index); 795 irq = platform_get_irq(ch->cmt->pdev, ch->index);
796 if (irq < 0) { 796 if (irq < 0) {
797 dev_err(&ch->cmt->pdev->dev, "ch%u: failed to get irq\n", 797 dev_err(&ch->cmt->pdev->dev, "ch%u: failed to get irq\n",
798 ch->index); 798 ch->index);
@@ -863,33 +863,26 @@ static int sh_cmt_setup_channel(struct sh_cmt_channel *ch, unsigned int index,
863 * Compute the address of the channel control register block. For the 863 * Compute the address of the channel control register block. For the
864 * timers with a per-channel start/stop register, compute its address 864 * timers with a per-channel start/stop register, compute its address
865 * as well. 865 * as well.
866 *
867 * For legacy configuration the address has been mapped explicitly.
868 */ 866 */
869 if (cmt->legacy) { 867 switch (cmt->info->model) {
870 ch->ioctrl = cmt->mapbase_ch; 868 case SH_CMT_16BIT:
871 } else { 869 ch->ioctrl = cmt->mapbase + 2 + ch->hwidx * 6;
872 switch (cmt->info->model) { 870 break;
873 case SH_CMT_16BIT: 871 case SH_CMT_32BIT:
874 ch->ioctrl = cmt->mapbase + 2 + ch->hwidx * 6; 872 case SH_CMT_48BIT:
875 break; 873 ch->ioctrl = cmt->mapbase + 0x10 + ch->hwidx * 0x10;
876 case SH_CMT_32BIT: 874 break;
877 case SH_CMT_48BIT: 875 case SH_CMT_32BIT_FAST:
878 ch->ioctrl = cmt->mapbase + 0x10 + ch->hwidx * 0x10; 876 /*
879 break; 877 * The 32-bit "fast" timer has a single channel at hwidx 5 but
880 case SH_CMT_32BIT_FAST: 878 * is located at offset 0x40 instead of 0x60 for some reason.
881 /* 879 */
882 * The 32-bit "fast" timer has a single channel at hwidx 880 ch->ioctrl = cmt->mapbase + 0x40;
883 * 5 but is located at offset 0x40 instead of 0x60 for 881 break;
884 * some reason. 882 case SH_CMT_48BIT_GEN2:
885 */ 883 ch->iostart = cmt->mapbase + ch->hwidx * 0x100;
886 ch->ioctrl = cmt->mapbase + 0x40; 884 ch->ioctrl = ch->iostart + 0x10;
887 break; 885 break;
888 case SH_CMT_48BIT_GEN2:
889 ch->iostart = cmt->mapbase + ch->hwidx * 0x100;
890 ch->ioctrl = ch->iostart + 0x10;
891 break;
892 }
893 } 886 }
894 887
895 if (cmt->info->width == (sizeof(ch->max_match_value) * 8)) 888 if (cmt->info->width == (sizeof(ch->max_match_value) * 8))
@@ -900,12 +893,7 @@ static int sh_cmt_setup_channel(struct sh_cmt_channel *ch, unsigned int index,
900 ch->match_value = ch->max_match_value; 893 ch->match_value = ch->max_match_value;
901 raw_spin_lock_init(&ch->lock); 894 raw_spin_lock_init(&ch->lock);
902 895
903 if (cmt->legacy) { 896 ch->timer_bit = cmt->info->model == SH_CMT_48BIT_GEN2 ? 0 : ch->hwidx;
904 ch->timer_bit = ch->hwidx;
905 } else {
906 ch->timer_bit = cmt->info->model == SH_CMT_48BIT_GEN2
907 ? 0 : ch->hwidx;
908 }
909 897
910 ret = sh_cmt_register(ch, dev_name(&cmt->pdev->dev), 898 ret = sh_cmt_register(ch, dev_name(&cmt->pdev->dev),
911 clockevent, clocksource); 899 clockevent, clocksource);
@@ -938,75 +926,65 @@ static int sh_cmt_map_memory(struct sh_cmt_device *cmt)
938 return 0; 926 return 0;
939} 927}
940 928
941static int sh_cmt_map_memory_legacy(struct sh_cmt_device *cmt) 929static const struct platform_device_id sh_cmt_id_table[] = {
942{ 930 { "sh-cmt-16", (kernel_ulong_t)&sh_cmt_info[SH_CMT_16BIT] },
943 struct sh_timer_config *cfg = cmt->pdev->dev.platform_data; 931 { "sh-cmt-32", (kernel_ulong_t)&sh_cmt_info[SH_CMT_32BIT] },
944 struct resource *res, *res2; 932 { "sh-cmt-32-fast", (kernel_ulong_t)&sh_cmt_info[SH_CMT_32BIT_FAST] },
945 933 { "sh-cmt-48", (kernel_ulong_t)&sh_cmt_info[SH_CMT_48BIT] },
946 /* map memory, let mapbase_ch point to our channel */ 934 { "sh-cmt-48-gen2", (kernel_ulong_t)&sh_cmt_info[SH_CMT_48BIT_GEN2] },
947 res = platform_get_resource(cmt->pdev, IORESOURCE_MEM, 0); 935 { }
948 if (!res) { 936};
949 dev_err(&cmt->pdev->dev, "failed to get I/O memory\n"); 937MODULE_DEVICE_TABLE(platform, sh_cmt_id_table);
950 return -ENXIO;
951 }
952
953 cmt->mapbase_ch = ioremap_nocache(res->start, resource_size(res));
954 if (cmt->mapbase_ch == NULL) {
955 dev_err(&cmt->pdev->dev, "failed to remap I/O memory\n");
956 return -ENXIO;
957 }
958
959 /* optional resource for the shared timer start/stop register */
960 res2 = platform_get_resource(cmt->pdev, IORESOURCE_MEM, 1);
961
962 /* map second resource for CMSTR */
963 cmt->mapbase = ioremap_nocache(res2 ? res2->start :
964 res->start - cfg->channel_offset,
965 res2 ? resource_size(res2) : 2);
966 if (cmt->mapbase == NULL) {
967 dev_err(&cmt->pdev->dev, "failed to remap I/O second memory\n");
968 iounmap(cmt->mapbase_ch);
969 return -ENXIO;
970 }
971
972 /* identify the model based on the resources */
973 if (resource_size(res) == 6)
974 cmt->info = &sh_cmt_info[SH_CMT_16BIT];
975 else if (res2 && (resource_size(res2) == 4))
976 cmt->info = &sh_cmt_info[SH_CMT_48BIT_GEN2];
977 else
978 cmt->info = &sh_cmt_info[SH_CMT_32BIT];
979 938
980 return 0; 939static const struct of_device_id sh_cmt_of_table[] __maybe_unused = {
981} 940 { .compatible = "renesas,cmt-32", .data = &sh_cmt_info[SH_CMT_32BIT] },
941 { .compatible = "renesas,cmt-32-fast", .data = &sh_cmt_info[SH_CMT_32BIT_FAST] },
942 { .compatible = "renesas,cmt-48", .data = &sh_cmt_info[SH_CMT_48BIT] },
943 { .compatible = "renesas,cmt-48-gen2", .data = &sh_cmt_info[SH_CMT_48BIT_GEN2] },
944 { }
945};
946MODULE_DEVICE_TABLE(of, sh_cmt_of_table);
982 947
983static void sh_cmt_unmap_memory(struct sh_cmt_device *cmt) 948static int sh_cmt_parse_dt(struct sh_cmt_device *cmt)
984{ 949{
985 iounmap(cmt->mapbase); 950 struct device_node *np = cmt->pdev->dev.of_node;
986 if (cmt->mapbase_ch) 951
987 iounmap(cmt->mapbase_ch); 952 return of_property_read_u32(np, "renesas,channels-mask",
953 &cmt->hw_channels);
988} 954}
989 955
990static int sh_cmt_setup(struct sh_cmt_device *cmt, struct platform_device *pdev) 956static int sh_cmt_setup(struct sh_cmt_device *cmt, struct platform_device *pdev)
991{ 957{
992 struct sh_timer_config *cfg = pdev->dev.platform_data; 958 unsigned int mask;
993 const struct platform_device_id *id = pdev->id_entry; 959 unsigned int i;
994 unsigned int hw_channels;
995 int ret; 960 int ret;
996 961
997 memset(cmt, 0, sizeof(*cmt)); 962 memset(cmt, 0, sizeof(*cmt));
998 cmt->pdev = pdev; 963 cmt->pdev = pdev;
964 raw_spin_lock_init(&cmt->lock);
965
966 if (IS_ENABLED(CONFIG_OF) && pdev->dev.of_node) {
967 const struct of_device_id *id;
968
969 id = of_match_node(sh_cmt_of_table, pdev->dev.of_node);
970 cmt->info = id->data;
999 971
1000 if (!cfg) { 972 ret = sh_cmt_parse_dt(cmt);
973 if (ret < 0)
974 return ret;
975 } else if (pdev->dev.platform_data) {
976 struct sh_timer_config *cfg = pdev->dev.platform_data;
977 const struct platform_device_id *id = pdev->id_entry;
978
979 cmt->info = (const struct sh_cmt_info *)id->driver_data;
980 cmt->hw_channels = cfg->channels_mask;
981 } else {
1001 dev_err(&cmt->pdev->dev, "missing platform data\n"); 982 dev_err(&cmt->pdev->dev, "missing platform data\n");
1002 return -ENXIO; 983 return -ENXIO;
1003 } 984 }
1004 985
1005 cmt->info = (const struct sh_cmt_info *)id->driver_data;
1006 cmt->legacy = cmt->info ? false : true;
1007
1008 /* Get hold of clock. */ 986 /* Get hold of clock. */
1009 cmt->clk = clk_get(&cmt->pdev->dev, cmt->legacy ? "cmt_fck" : "fck"); 987 cmt->clk = clk_get(&cmt->pdev->dev, "fck");
1010 if (IS_ERR(cmt->clk)) { 988 if (IS_ERR(cmt->clk)) {
1011 dev_err(&cmt->pdev->dev, "cannot get clock\n"); 989 dev_err(&cmt->pdev->dev, "cannot get clock\n");
1012 return PTR_ERR(cmt->clk); 990 return PTR_ERR(cmt->clk);
@@ -1016,28 +994,13 @@ static int sh_cmt_setup(struct sh_cmt_device *cmt, struct platform_device *pdev)
1016 if (ret < 0) 994 if (ret < 0)
1017 goto err_clk_put; 995 goto err_clk_put;
1018 996
1019 /* 997 /* Map the memory resource(s). */
1020 * Map the memory resource(s). We need to support both the legacy 998 ret = sh_cmt_map_memory(cmt);
1021 * platform device configuration (with one device per channel) and the
1022 * new version (with multiple channels per device).
1023 */
1024 if (cmt->legacy)
1025 ret = sh_cmt_map_memory_legacy(cmt);
1026 else
1027 ret = sh_cmt_map_memory(cmt);
1028
1029 if (ret < 0) 999 if (ret < 0)
1030 goto err_clk_unprepare; 1000 goto err_clk_unprepare;
1031 1001
1032 /* Allocate and setup the channels. */ 1002 /* Allocate and setup the channels. */
1033 if (cmt->legacy) { 1003 cmt->num_channels = hweight8(cmt->hw_channels);
1034 cmt->num_channels = 1;
1035 hw_channels = 0;
1036 } else {
1037 cmt->num_channels = hweight8(cfg->channels_mask);
1038 hw_channels = cfg->channels_mask;
1039 }
1040
1041 cmt->channels = kzalloc(cmt->num_channels * sizeof(*cmt->channels), 1004 cmt->channels = kzalloc(cmt->num_channels * sizeof(*cmt->channels),
1042 GFP_KERNEL); 1005 GFP_KERNEL);
1043 if (cmt->channels == NULL) { 1006 if (cmt->channels == NULL) {
@@ -1045,35 +1008,21 @@ static int sh_cmt_setup(struct sh_cmt_device *cmt, struct platform_device *pdev)
1045 goto err_unmap; 1008 goto err_unmap;
1046 } 1009 }
1047 1010
1048 if (cmt->legacy) { 1011 /*
1049 ret = sh_cmt_setup_channel(&cmt->channels[0], 1012 * Use the first channel as a clock event device and the second channel
1050 cfg->timer_bit, cfg->timer_bit, 1013 * as a clock source. If only one channel is available use it for both.
1051 cfg->clockevent_rating != 0, 1014 */
1052 cfg->clocksource_rating != 0, cmt); 1015 for (i = 0, mask = cmt->hw_channels; i < cmt->num_channels; ++i) {
1016 unsigned int hwidx = ffs(mask) - 1;
1017 bool clocksource = i == 1 || cmt->num_channels == 1;
1018 bool clockevent = i == 0;
1019
1020 ret = sh_cmt_setup_channel(&cmt->channels[i], i, hwidx,
1021 clockevent, clocksource, cmt);
1053 if (ret < 0) 1022 if (ret < 0)
1054 goto err_unmap; 1023 goto err_unmap;
1055 } else {
1056 unsigned int mask = hw_channels;
1057 unsigned int i;
1058 1024
1059 /* 1025 mask &= ~(1 << hwidx);
1060 * Use the first channel as a clock event device and the second
1061 * channel as a clock source. If only one channel is available
1062 * use it for both.
1063 */
1064 for (i = 0; i < cmt->num_channels; ++i) {
1065 unsigned int hwidx = ffs(mask) - 1;
1066 bool clocksource = i == 1 || cmt->num_channels == 1;
1067 bool clockevent = i == 0;
1068
1069 ret = sh_cmt_setup_channel(&cmt->channels[i], i, hwidx,
1070 clockevent, clocksource,
1071 cmt);
1072 if (ret < 0)
1073 goto err_unmap;
1074
1075 mask &= ~(1 << hwidx);
1076 }
1077 } 1026 }
1078 1027
1079 platform_set_drvdata(pdev, cmt); 1028 platform_set_drvdata(pdev, cmt);
@@ -1082,7 +1031,7 @@ static int sh_cmt_setup(struct sh_cmt_device *cmt, struct platform_device *pdev)
1082 1031
1083err_unmap: 1032err_unmap:
1084 kfree(cmt->channels); 1033 kfree(cmt->channels);
1085 sh_cmt_unmap_memory(cmt); 1034 iounmap(cmt->mapbase);
1086err_clk_unprepare: 1035err_clk_unprepare:
1087 clk_unprepare(cmt->clk); 1036 clk_unprepare(cmt->clk);
1088err_clk_put: 1037err_clk_put:
@@ -1132,22 +1081,12 @@ static int sh_cmt_remove(struct platform_device *pdev)
1132 return -EBUSY; /* cannot unregister clockevent and clocksource */ 1081 return -EBUSY; /* cannot unregister clockevent and clocksource */
1133} 1082}
1134 1083
1135static const struct platform_device_id sh_cmt_id_table[] = {
1136 { "sh_cmt", 0 },
1137 { "sh-cmt-16", (kernel_ulong_t)&sh_cmt_info[SH_CMT_16BIT] },
1138 { "sh-cmt-32", (kernel_ulong_t)&sh_cmt_info[SH_CMT_32BIT] },
1139 { "sh-cmt-32-fast", (kernel_ulong_t)&sh_cmt_info[SH_CMT_32BIT_FAST] },
1140 { "sh-cmt-48", (kernel_ulong_t)&sh_cmt_info[SH_CMT_48BIT] },
1141 { "sh-cmt-48-gen2", (kernel_ulong_t)&sh_cmt_info[SH_CMT_48BIT_GEN2] },
1142 { }
1143};
1144MODULE_DEVICE_TABLE(platform, sh_cmt_id_table);
1145
1146static struct platform_driver sh_cmt_device_driver = { 1084static struct platform_driver sh_cmt_device_driver = {
1147 .probe = sh_cmt_probe, 1085 .probe = sh_cmt_probe,
1148 .remove = sh_cmt_remove, 1086 .remove = sh_cmt_remove,
1149 .driver = { 1087 .driver = {
1150 .name = "sh_cmt", 1088 .name = "sh_cmt",
1089 .of_match_table = of_match_ptr(sh_cmt_of_table),
1151 }, 1090 },
1152 .id_table = sh_cmt_id_table, 1091 .id_table = sh_cmt_id_table,
1153}; 1092};
diff --git a/drivers/clocksource/sh_mtu2.c b/drivers/clocksource/sh_mtu2.c
index 188d4e092efc..3d88698cf2b8 100644
--- a/drivers/clocksource/sh_mtu2.c
+++ b/drivers/clocksource/sh_mtu2.c
@@ -23,6 +23,7 @@
23#include <linux/ioport.h> 23#include <linux/ioport.h>
24#include <linux/irq.h> 24#include <linux/irq.h>
25#include <linux/module.h> 25#include <linux/module.h>
26#include <linux/of.h>
26#include <linux/platform_device.h> 27#include <linux/platform_device.h>
27#include <linux/pm_domain.h> 28#include <linux/pm_domain.h>
28#include <linux/pm_runtime.h> 29#include <linux/pm_runtime.h>
@@ -37,7 +38,6 @@ struct sh_mtu2_channel {
37 unsigned int index; 38 unsigned int index;
38 39
39 void __iomem *base; 40 void __iomem *base;
40 int irq;
41 41
42 struct clock_event_device ced; 42 struct clock_event_device ced;
43}; 43};
@@ -48,15 +48,14 @@ struct sh_mtu2_device {
48 void __iomem *mapbase; 48 void __iomem *mapbase;
49 struct clk *clk; 49 struct clk *clk;
50 50
51 raw_spinlock_t lock; /* Protect the shared registers */
52
51 struct sh_mtu2_channel *channels; 53 struct sh_mtu2_channel *channels;
52 unsigned int num_channels; 54 unsigned int num_channels;
53 55
54 bool legacy;
55 bool has_clockevent; 56 bool has_clockevent;
56}; 57};
57 58
58static DEFINE_RAW_SPINLOCK(sh_mtu2_lock);
59
60#define TSTR -1 /* shared register */ 59#define TSTR -1 /* shared register */
61#define TCR 0 /* channel register */ 60#define TCR 0 /* channel register */
62#define TMDR 1 /* channel register */ 61#define TMDR 1 /* channel register */
@@ -162,12 +161,8 @@ static inline unsigned long sh_mtu2_read(struct sh_mtu2_channel *ch, int reg_nr)
162{ 161{
163 unsigned long offs; 162 unsigned long offs;
164 163
165 if (reg_nr == TSTR) { 164 if (reg_nr == TSTR)
166 if (ch->mtu->legacy) 165 return ioread8(ch->mtu->mapbase + 0x280);
167 return ioread8(ch->mtu->mapbase);
168 else
169 return ioread8(ch->mtu->mapbase + 0x280);
170 }
171 166
172 offs = mtu2_reg_offs[reg_nr]; 167 offs = mtu2_reg_offs[reg_nr];
173 168
@@ -182,12 +177,8 @@ static inline void sh_mtu2_write(struct sh_mtu2_channel *ch, int reg_nr,
182{ 177{
183 unsigned long offs; 178 unsigned long offs;
184 179
185 if (reg_nr == TSTR) { 180 if (reg_nr == TSTR)
186 if (ch->mtu->legacy) 181 return iowrite8(value, ch->mtu->mapbase + 0x280);
187 return iowrite8(value, ch->mtu->mapbase);
188 else
189 return iowrite8(value, ch->mtu->mapbase + 0x280);
190 }
191 182
192 offs = mtu2_reg_offs[reg_nr]; 183 offs = mtu2_reg_offs[reg_nr];
193 184
@@ -202,7 +193,7 @@ static void sh_mtu2_start_stop_ch(struct sh_mtu2_channel *ch, int start)
202 unsigned long flags, value; 193 unsigned long flags, value;
203 194
204 /* start stop register shared by multiple timer channels */ 195 /* start stop register shared by multiple timer channels */
205 raw_spin_lock_irqsave(&sh_mtu2_lock, flags); 196 raw_spin_lock_irqsave(&ch->mtu->lock, flags);
206 value = sh_mtu2_read(ch, TSTR); 197 value = sh_mtu2_read(ch, TSTR);
207 198
208 if (start) 199 if (start)
@@ -211,7 +202,7 @@ static void sh_mtu2_start_stop_ch(struct sh_mtu2_channel *ch, int start)
211 value &= ~(1 << ch->index); 202 value &= ~(1 << ch->index);
212 203
213 sh_mtu2_write(ch, TSTR, value); 204 sh_mtu2_write(ch, TSTR, value);
214 raw_spin_unlock_irqrestore(&sh_mtu2_lock, flags); 205 raw_spin_unlock_irqrestore(&ch->mtu->lock, flags);
215} 206}
216 207
217static int sh_mtu2_enable(struct sh_mtu2_channel *ch) 208static int sh_mtu2_enable(struct sh_mtu2_channel *ch)
@@ -331,7 +322,6 @@ static void sh_mtu2_register_clockevent(struct sh_mtu2_channel *ch,
331 const char *name) 322 const char *name)
332{ 323{
333 struct clock_event_device *ced = &ch->ced; 324 struct clock_event_device *ced = &ch->ced;
334 int ret;
335 325
336 ced->name = name; 326 ced->name = name;
337 ced->features = CLOCK_EVT_FEAT_PERIODIC; 327 ced->features = CLOCK_EVT_FEAT_PERIODIC;
@@ -344,24 +334,12 @@ static void sh_mtu2_register_clockevent(struct sh_mtu2_channel *ch,
344 dev_info(&ch->mtu->pdev->dev, "ch%u: used for clock events\n", 334 dev_info(&ch->mtu->pdev->dev, "ch%u: used for clock events\n",
345 ch->index); 335 ch->index);
346 clockevents_register_device(ced); 336 clockevents_register_device(ced);
347
348 ret = request_irq(ch->irq, sh_mtu2_interrupt,
349 IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING,
350 dev_name(&ch->mtu->pdev->dev), ch);
351 if (ret) {
352 dev_err(&ch->mtu->pdev->dev, "ch%u: failed to request irq %d\n",
353 ch->index, ch->irq);
354 return;
355 }
356} 337}
357 338
358static int sh_mtu2_register(struct sh_mtu2_channel *ch, const char *name, 339static int sh_mtu2_register(struct sh_mtu2_channel *ch, const char *name)
359 bool clockevent)
360{ 340{
361 if (clockevent) { 341 ch->mtu->has_clockevent = true;
362 ch->mtu->has_clockevent = true; 342 sh_mtu2_register_clockevent(ch, name);
363 sh_mtu2_register_clockevent(ch, name);
364 }
365 343
366 return 0; 344 return 0;
367} 345}
@@ -372,40 +350,32 @@ static int sh_mtu2_setup_channel(struct sh_mtu2_channel *ch, unsigned int index,
372 static const unsigned int channel_offsets[] = { 350 static const unsigned int channel_offsets[] = {
373 0x300, 0x380, 0x000, 351 0x300, 0x380, 0x000,
374 }; 352 };
375 bool clockevent; 353 char name[6];
354 int irq;
355 int ret;
376 356
377 ch->mtu = mtu; 357 ch->mtu = mtu;
378 358
379 if (mtu->legacy) { 359 sprintf(name, "tgi%ua", index);
380 struct sh_timer_config *cfg = mtu->pdev->dev.platform_data; 360 irq = platform_get_irq_byname(mtu->pdev, name);
381 361 if (irq < 0) {
382 clockevent = cfg->clockevent_rating != 0;
383
384 ch->irq = platform_get_irq(mtu->pdev, 0);
385 ch->base = mtu->mapbase - cfg->channel_offset;
386 ch->index = cfg->timer_bit;
387 } else {
388 char name[6];
389
390 clockevent = true;
391
392 sprintf(name, "tgi%ua", index);
393 ch->irq = platform_get_irq_byname(mtu->pdev, name);
394 ch->base = mtu->mapbase + channel_offsets[index];
395 ch->index = index;
396 }
397
398 if (ch->irq < 0) {
399 /* Skip channels with no declared interrupt. */ 362 /* Skip channels with no declared interrupt. */
400 if (!mtu->legacy) 363 return 0;
401 return 0; 364 }
402 365
403 dev_err(&mtu->pdev->dev, "ch%u: failed to get irq\n", 366 ret = request_irq(irq, sh_mtu2_interrupt,
404 ch->index); 367 IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING,
405 return ch->irq; 368 dev_name(&ch->mtu->pdev->dev), ch);
369 if (ret) {
370 dev_err(&ch->mtu->pdev->dev, "ch%u: failed to request irq %d\n",
371 index, irq);
372 return ret;
406 } 373 }
407 374
408 return sh_mtu2_register(ch, dev_name(&mtu->pdev->dev), clockevent); 375 ch->base = mtu->mapbase + channel_offsets[index];
376 ch->index = index;
377
378 return sh_mtu2_register(ch, dev_name(&mtu->pdev->dev));
409} 379}
410 380
411static int sh_mtu2_map_memory(struct sh_mtu2_device *mtu) 381static int sh_mtu2_map_memory(struct sh_mtu2_device *mtu)
@@ -422,46 +392,21 @@ static int sh_mtu2_map_memory(struct sh_mtu2_device *mtu)
422 if (mtu->mapbase == NULL) 392 if (mtu->mapbase == NULL)
423 return -ENXIO; 393 return -ENXIO;
424 394
425 /*
426 * In legacy platform device configuration (with one device per channel)
427 * the resource points to the channel base address.
428 */
429 if (mtu->legacy) {
430 struct sh_timer_config *cfg = mtu->pdev->dev.platform_data;
431 mtu->mapbase += cfg->channel_offset;
432 }
433
434 return 0; 395 return 0;
435} 396}
436 397
437static void sh_mtu2_unmap_memory(struct sh_mtu2_device *mtu)
438{
439 if (mtu->legacy) {
440 struct sh_timer_config *cfg = mtu->pdev->dev.platform_data;
441 mtu->mapbase -= cfg->channel_offset;
442 }
443
444 iounmap(mtu->mapbase);
445}
446
447static int sh_mtu2_setup(struct sh_mtu2_device *mtu, 398static int sh_mtu2_setup(struct sh_mtu2_device *mtu,
448 struct platform_device *pdev) 399 struct platform_device *pdev)
449{ 400{
450 struct sh_timer_config *cfg = pdev->dev.platform_data;
451 const struct platform_device_id *id = pdev->id_entry;
452 unsigned int i; 401 unsigned int i;
453 int ret; 402 int ret;
454 403
455 mtu->pdev = pdev; 404 mtu->pdev = pdev;
456 mtu->legacy = id->driver_data;
457 405
458 if (mtu->legacy && !cfg) { 406 raw_spin_lock_init(&mtu->lock);
459 dev_err(&mtu->pdev->dev, "missing platform data\n");
460 return -ENXIO;
461 }
462 407
463 /* Get hold of clock. */ 408 /* Get hold of clock. */
464 mtu->clk = clk_get(&mtu->pdev->dev, mtu->legacy ? "mtu2_fck" : "fck"); 409 mtu->clk = clk_get(&mtu->pdev->dev, "fck");
465 if (IS_ERR(mtu->clk)) { 410 if (IS_ERR(mtu->clk)) {
466 dev_err(&mtu->pdev->dev, "cannot get clock\n"); 411 dev_err(&mtu->pdev->dev, "cannot get clock\n");
467 return PTR_ERR(mtu->clk); 412 return PTR_ERR(mtu->clk);
@@ -479,10 +424,7 @@ static int sh_mtu2_setup(struct sh_mtu2_device *mtu,
479 } 424 }
480 425
481 /* Allocate and setup the channels. */ 426 /* Allocate and setup the channels. */
482 if (mtu->legacy) 427 mtu->num_channels = 3;
483 mtu->num_channels = 1;
484 else
485 mtu->num_channels = 3;
486 428
487 mtu->channels = kzalloc(sizeof(*mtu->channels) * mtu->num_channels, 429 mtu->channels = kzalloc(sizeof(*mtu->channels) * mtu->num_channels,
488 GFP_KERNEL); 430 GFP_KERNEL);
@@ -491,16 +433,10 @@ static int sh_mtu2_setup(struct sh_mtu2_device *mtu,
491 goto err_unmap; 433 goto err_unmap;
492 } 434 }
493 435
494 if (mtu->legacy) { 436 for (i = 0; i < mtu->num_channels; ++i) {
495 ret = sh_mtu2_setup_channel(&mtu->channels[0], 0, mtu); 437 ret = sh_mtu2_setup_channel(&mtu->channels[i], i, mtu);
496 if (ret < 0) 438 if (ret < 0)
497 goto err_unmap; 439 goto err_unmap;
498 } else {
499 for (i = 0; i < mtu->num_channels; ++i) {
500 ret = sh_mtu2_setup_channel(&mtu->channels[i], i, mtu);
501 if (ret < 0)
502 goto err_unmap;
503 }
504 } 440 }
505 441
506 platform_set_drvdata(pdev, mtu); 442 platform_set_drvdata(pdev, mtu);
@@ -509,7 +445,7 @@ static int sh_mtu2_setup(struct sh_mtu2_device *mtu,
509 445
510err_unmap: 446err_unmap:
511 kfree(mtu->channels); 447 kfree(mtu->channels);
512 sh_mtu2_unmap_memory(mtu); 448 iounmap(mtu->mapbase);
513err_clk_unprepare: 449err_clk_unprepare:
514 clk_unprepare(mtu->clk); 450 clk_unprepare(mtu->clk);
515err_clk_put: 451err_clk_put:
@@ -560,17 +496,23 @@ static int sh_mtu2_remove(struct platform_device *pdev)
560} 496}
561 497
562static const struct platform_device_id sh_mtu2_id_table[] = { 498static const struct platform_device_id sh_mtu2_id_table[] = {
563 { "sh_mtu2", 1 },
564 { "sh-mtu2", 0 }, 499 { "sh-mtu2", 0 },
565 { }, 500 { },
566}; 501};
567MODULE_DEVICE_TABLE(platform, sh_mtu2_id_table); 502MODULE_DEVICE_TABLE(platform, sh_mtu2_id_table);
568 503
504static const struct of_device_id sh_mtu2_of_table[] __maybe_unused = {
505 { .compatible = "renesas,mtu2" },
506 { }
507};
508MODULE_DEVICE_TABLE(of, sh_mtu2_of_table);
509
569static struct platform_driver sh_mtu2_device_driver = { 510static struct platform_driver sh_mtu2_device_driver = {
570 .probe = sh_mtu2_probe, 511 .probe = sh_mtu2_probe,
571 .remove = sh_mtu2_remove, 512 .remove = sh_mtu2_remove,
572 .driver = { 513 .driver = {
573 .name = "sh_mtu2", 514 .name = "sh_mtu2",
515 .of_match_table = of_match_ptr(sh_mtu2_of_table),
574 }, 516 },
575 .id_table = sh_mtu2_id_table, 517 .id_table = sh_mtu2_id_table,
576}; 518};
diff --git a/drivers/clocksource/sh_tmu.c b/drivers/clocksource/sh_tmu.c
index 6bd17a8f3dd4..0f665b8f2461 100644
--- a/drivers/clocksource/sh_tmu.c
+++ b/drivers/clocksource/sh_tmu.c
@@ -24,6 +24,7 @@
24#include <linux/ioport.h> 24#include <linux/ioport.h>
25#include <linux/irq.h> 25#include <linux/irq.h>
26#include <linux/module.h> 26#include <linux/module.h>
27#include <linux/of.h>
27#include <linux/platform_device.h> 28#include <linux/platform_device.h>
28#include <linux/pm_domain.h> 29#include <linux/pm_domain.h>
29#include <linux/pm_runtime.h> 30#include <linux/pm_runtime.h>
@@ -32,7 +33,6 @@
32#include <linux/spinlock.h> 33#include <linux/spinlock.h>
33 34
34enum sh_tmu_model { 35enum sh_tmu_model {
35 SH_TMU_LEGACY,
36 SH_TMU, 36 SH_TMU,
37 SH_TMU_SH3, 37 SH_TMU_SH3,
38}; 38};
@@ -62,6 +62,8 @@ struct sh_tmu_device {
62 62
63 enum sh_tmu_model model; 63 enum sh_tmu_model model;
64 64
65 raw_spinlock_t lock; /* Protect the shared start/stop register */
66
65 struct sh_tmu_channel *channels; 67 struct sh_tmu_channel *channels;
66 unsigned int num_channels; 68 unsigned int num_channels;
67 69
@@ -69,8 +71,6 @@ struct sh_tmu_device {
69 bool has_clocksource; 71 bool has_clocksource;
70}; 72};
71 73
72static DEFINE_RAW_SPINLOCK(sh_tmu_lock);
73
74#define TSTR -1 /* shared register */ 74#define TSTR -1 /* shared register */
75#define TCOR 0 /* channel register */ 75#define TCOR 0 /* channel register */
76#define TCNT 1 /* channel register */ 76#define TCNT 1 /* channel register */
@@ -91,8 +91,6 @@ static inline unsigned long sh_tmu_read(struct sh_tmu_channel *ch, int reg_nr)
91 91
92 if (reg_nr == TSTR) { 92 if (reg_nr == TSTR) {
93 switch (ch->tmu->model) { 93 switch (ch->tmu->model) {
94 case SH_TMU_LEGACY:
95 return ioread8(ch->tmu->mapbase);
96 case SH_TMU_SH3: 94 case SH_TMU_SH3:
97 return ioread8(ch->tmu->mapbase + 2); 95 return ioread8(ch->tmu->mapbase + 2);
98 case SH_TMU: 96 case SH_TMU:
@@ -115,8 +113,6 @@ static inline void sh_tmu_write(struct sh_tmu_channel *ch, int reg_nr,
115 113
116 if (reg_nr == TSTR) { 114 if (reg_nr == TSTR) {
117 switch (ch->tmu->model) { 115 switch (ch->tmu->model) {
118 case SH_TMU_LEGACY:
119 return iowrite8(value, ch->tmu->mapbase);
120 case SH_TMU_SH3: 116 case SH_TMU_SH3:
121 return iowrite8(value, ch->tmu->mapbase + 2); 117 return iowrite8(value, ch->tmu->mapbase + 2);
122 case SH_TMU: 118 case SH_TMU:
@@ -137,7 +133,7 @@ static void sh_tmu_start_stop_ch(struct sh_tmu_channel *ch, int start)
137 unsigned long flags, value; 133 unsigned long flags, value;
138 134
139 /* start stop register shared by multiple timer channels */ 135 /* start stop register shared by multiple timer channels */
140 raw_spin_lock_irqsave(&sh_tmu_lock, flags); 136 raw_spin_lock_irqsave(&ch->tmu->lock, flags);
141 value = sh_tmu_read(ch, TSTR); 137 value = sh_tmu_read(ch, TSTR);
142 138
143 if (start) 139 if (start)
@@ -146,7 +142,7 @@ static void sh_tmu_start_stop_ch(struct sh_tmu_channel *ch, int start)
146 value &= ~(1 << ch->index); 142 value &= ~(1 << ch->index);
147 143
148 sh_tmu_write(ch, TSTR, value); 144 sh_tmu_write(ch, TSTR, value);
149 raw_spin_unlock_irqrestore(&sh_tmu_lock, flags); 145 raw_spin_unlock_irqrestore(&ch->tmu->lock, flags);
150} 146}
151 147
152static int __sh_tmu_enable(struct sh_tmu_channel *ch) 148static int __sh_tmu_enable(struct sh_tmu_channel *ch)
@@ -476,27 +472,12 @@ static int sh_tmu_channel_setup(struct sh_tmu_channel *ch, unsigned int index,
476 return 0; 472 return 0;
477 473
478 ch->tmu = tmu; 474 ch->tmu = tmu;
475 ch->index = index;
479 476
480 if (tmu->model == SH_TMU_LEGACY) { 477 if (tmu->model == SH_TMU_SH3)
481 struct sh_timer_config *cfg = tmu->pdev->dev.platform_data; 478 ch->base = tmu->mapbase + 4 + ch->index * 12;
482 479 else
483 /* 480 ch->base = tmu->mapbase + 8 + ch->index * 12;
484 * The SH3 variant (SH770x, SH7705, SH7710 and SH7720) maps
485 * channel registers blocks at base + 2 + 12 * index, while all
486 * other variants map them at base + 4 + 12 * index. We can
487 * compute the index by just dividing by 12, the 2 bytes or 4
488 * bytes offset being hidden by the integer division.
489 */
490 ch->index = cfg->channel_offset / 12;
491 ch->base = tmu->mapbase + cfg->channel_offset;
492 } else {
493 ch->index = index;
494
495 if (tmu->model == SH_TMU_SH3)
496 ch->base = tmu->mapbase + 4 + ch->index * 12;
497 else
498 ch->base = tmu->mapbase + 8 + ch->index * 12;
499 }
500 481
501 ch->irq = platform_get_irq(tmu->pdev, index); 482 ch->irq = platform_get_irq(tmu->pdev, index);
502 if (ch->irq < 0) { 483 if (ch->irq < 0) {
@@ -526,46 +507,53 @@ static int sh_tmu_map_memory(struct sh_tmu_device *tmu)
526 if (tmu->mapbase == NULL) 507 if (tmu->mapbase == NULL)
527 return -ENXIO; 508 return -ENXIO;
528 509
529 /*
530 * In legacy platform device configuration (with one device per channel)
531 * the resource points to the channel base address.
532 */
533 if (tmu->model == SH_TMU_LEGACY) {
534 struct sh_timer_config *cfg = tmu->pdev->dev.platform_data;
535 tmu->mapbase -= cfg->channel_offset;
536 }
537
538 return 0; 510 return 0;
539} 511}
540 512
541static void sh_tmu_unmap_memory(struct sh_tmu_device *tmu) 513static int sh_tmu_parse_dt(struct sh_tmu_device *tmu)
542{ 514{
543 if (tmu->model == SH_TMU_LEGACY) { 515 struct device_node *np = tmu->pdev->dev.of_node;
544 struct sh_timer_config *cfg = tmu->pdev->dev.platform_data; 516
545 tmu->mapbase += cfg->channel_offset; 517 tmu->model = SH_TMU;
518 tmu->num_channels = 3;
519
520 of_property_read_u32(np, "#renesas,channels", &tmu->num_channels);
521
522 if (tmu->num_channels != 2 && tmu->num_channels != 3) {
523 dev_err(&tmu->pdev->dev, "invalid number of channels %u\n",
524 tmu->num_channels);
525 return -EINVAL;
546 } 526 }
547 527
548 iounmap(tmu->mapbase); 528 return 0;
549} 529}
550 530
551static int sh_tmu_setup(struct sh_tmu_device *tmu, struct platform_device *pdev) 531static int sh_tmu_setup(struct sh_tmu_device *tmu, struct platform_device *pdev)
552{ 532{
553 struct sh_timer_config *cfg = pdev->dev.platform_data;
554 const struct platform_device_id *id = pdev->id_entry;
555 unsigned int i; 533 unsigned int i;
556 int ret; 534 int ret;
557 535
558 if (!cfg) { 536 tmu->pdev = pdev;
537
538 raw_spin_lock_init(&tmu->lock);
539
540 if (IS_ENABLED(CONFIG_OF) && pdev->dev.of_node) {
541 ret = sh_tmu_parse_dt(tmu);
542 if (ret < 0)
543 return ret;
544 } else if (pdev->dev.platform_data) {
545 const struct platform_device_id *id = pdev->id_entry;
546 struct sh_timer_config *cfg = pdev->dev.platform_data;
547
548 tmu->model = id->driver_data;
549 tmu->num_channels = hweight8(cfg->channels_mask);
550 } else {
559 dev_err(&tmu->pdev->dev, "missing platform data\n"); 551 dev_err(&tmu->pdev->dev, "missing platform data\n");
560 return -ENXIO; 552 return -ENXIO;
561 } 553 }
562 554
563 tmu->pdev = pdev;
564 tmu->model = id->driver_data;
565
566 /* Get hold of clock. */ 555 /* Get hold of clock. */
567 tmu->clk = clk_get(&tmu->pdev->dev, 556 tmu->clk = clk_get(&tmu->pdev->dev, "fck");
568 tmu->model == SH_TMU_LEGACY ? "tmu_fck" : "fck");
569 if (IS_ERR(tmu->clk)) { 557 if (IS_ERR(tmu->clk)) {
570 dev_err(&tmu->pdev->dev, "cannot get clock\n"); 558 dev_err(&tmu->pdev->dev, "cannot get clock\n");
571 return PTR_ERR(tmu->clk); 559 return PTR_ERR(tmu->clk);
@@ -583,11 +571,6 @@ static int sh_tmu_setup(struct sh_tmu_device *tmu, struct platform_device *pdev)
583 } 571 }
584 572
585 /* Allocate and setup the channels. */ 573 /* Allocate and setup the channels. */
586 if (tmu->model == SH_TMU_LEGACY)
587 tmu->num_channels = 1;
588 else
589 tmu->num_channels = hweight8(cfg->channels_mask);
590
591 tmu->channels = kzalloc(sizeof(*tmu->channels) * tmu->num_channels, 574 tmu->channels = kzalloc(sizeof(*tmu->channels) * tmu->num_channels,
592 GFP_KERNEL); 575 GFP_KERNEL);
593 if (tmu->channels == NULL) { 576 if (tmu->channels == NULL) {
@@ -595,23 +578,15 @@ static int sh_tmu_setup(struct sh_tmu_device *tmu, struct platform_device *pdev)
595 goto err_unmap; 578 goto err_unmap;
596 } 579 }
597 580
598 if (tmu->model == SH_TMU_LEGACY) { 581 /*
599 ret = sh_tmu_channel_setup(&tmu->channels[0], 0, 582 * Use the first channel as a clock event device and the second channel
600 cfg->clockevent_rating != 0, 583 * as a clock source.
601 cfg->clocksource_rating != 0, tmu); 584 */
585 for (i = 0; i < tmu->num_channels; ++i) {
586 ret = sh_tmu_channel_setup(&tmu->channels[i], i,
587 i == 0, i == 1, tmu);
602 if (ret < 0) 588 if (ret < 0)
603 goto err_unmap; 589 goto err_unmap;
604 } else {
605 /*
606 * Use the first channel as a clock event device and the second
607 * channel as a clock source.
608 */
609 for (i = 0; i < tmu->num_channels; ++i) {
610 ret = sh_tmu_channel_setup(&tmu->channels[i], i,
611 i == 0, i == 1, tmu);
612 if (ret < 0)
613 goto err_unmap;
614 }
615 } 590 }
616 591
617 platform_set_drvdata(pdev, tmu); 592 platform_set_drvdata(pdev, tmu);
@@ -620,7 +595,7 @@ static int sh_tmu_setup(struct sh_tmu_device *tmu, struct platform_device *pdev)
620 595
621err_unmap: 596err_unmap:
622 kfree(tmu->channels); 597 kfree(tmu->channels);
623 sh_tmu_unmap_memory(tmu); 598 iounmap(tmu->mapbase);
624err_clk_unprepare: 599err_clk_unprepare:
625 clk_unprepare(tmu->clk); 600 clk_unprepare(tmu->clk);
626err_clk_put: 601err_clk_put:
@@ -671,18 +646,24 @@ static int sh_tmu_remove(struct platform_device *pdev)
671} 646}
672 647
673static const struct platform_device_id sh_tmu_id_table[] = { 648static const struct platform_device_id sh_tmu_id_table[] = {
674 { "sh_tmu", SH_TMU_LEGACY },
675 { "sh-tmu", SH_TMU }, 649 { "sh-tmu", SH_TMU },
676 { "sh-tmu-sh3", SH_TMU_SH3 }, 650 { "sh-tmu-sh3", SH_TMU_SH3 },
677 { } 651 { }
678}; 652};
679MODULE_DEVICE_TABLE(platform, sh_tmu_id_table); 653MODULE_DEVICE_TABLE(platform, sh_tmu_id_table);
680 654
655static const struct of_device_id sh_tmu_of_table[] __maybe_unused = {
656 { .compatible = "renesas,tmu" },
657 { }
658};
659MODULE_DEVICE_TABLE(of, sh_tmu_of_table);
660
681static struct platform_driver sh_tmu_device_driver = { 661static struct platform_driver sh_tmu_device_driver = {
682 .probe = sh_tmu_probe, 662 .probe = sh_tmu_probe,
683 .remove = sh_tmu_remove, 663 .remove = sh_tmu_remove,
684 .driver = { 664 .driver = {
685 .name = "sh_tmu", 665 .name = "sh_tmu",
666 .of_match_table = of_match_ptr(sh_tmu_of_table),
686 }, 667 },
687 .id_table = sh_tmu_id_table, 668 .id_table = sh_tmu_id_table,
688}; 669};
diff --git a/drivers/clocksource/timer-marco.c b/drivers/clocksource/timer-marco.c
index dbd30398222a..330e93064692 100644
--- a/drivers/clocksource/timer-marco.c
+++ b/drivers/clocksource/timer-marco.c
@@ -260,6 +260,9 @@ static void __init sirfsoc_marco_timer_init(struct device_node *np)
260 260
261 clk = of_clk_get(np, 0); 261 clk = of_clk_get(np, 0);
262 BUG_ON(IS_ERR(clk)); 262 BUG_ON(IS_ERR(clk));
263
264 BUG_ON(clk_prepare_enable(clk));
265
263 rate = clk_get_rate(clk); 266 rate = clk_get_rate(clk);
264 267
265 BUG_ON(rate < MARCO_CLOCK_FREQ); 268 BUG_ON(rate < MARCO_CLOCK_FREQ);
diff --git a/drivers/clocksource/timer-prima2.c b/drivers/clocksource/timer-prima2.c
index a722aac7ac02..ce18d570e1cd 100644
--- a/drivers/clocksource/timer-prima2.c
+++ b/drivers/clocksource/timer-prima2.c
@@ -200,6 +200,9 @@ static void __init sirfsoc_prima2_timer_init(struct device_node *np)
200 200
201 clk = of_clk_get(np, 0); 201 clk = of_clk_get(np, 0);
202 BUG_ON(IS_ERR(clk)); 202 BUG_ON(IS_ERR(clk));
203
204 BUG_ON(clk_prepare_enable(clk));
205
203 rate = clk_get_rate(clk); 206 rate = clk_get_rate(clk);
204 207
205 BUG_ON(rate < PRIMA2_CLOCK_FREQ); 208 BUG_ON(rate < PRIMA2_CLOCK_FREQ);
diff --git a/drivers/connector/cn_proc.c b/drivers/connector/cn_proc.c
index ccdd4c7e748b..15d06fcf0b50 100644
--- a/drivers/connector/cn_proc.c
+++ b/drivers/connector/cn_proc.c
@@ -69,7 +69,6 @@ void proc_fork_connector(struct task_struct *task)
69 struct cn_msg *msg; 69 struct cn_msg *msg;
70 struct proc_event *ev; 70 struct proc_event *ev;
71 __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8); 71 __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
72 struct timespec ts;
73 struct task_struct *parent; 72 struct task_struct *parent;
74 73
75 if (atomic_read(&proc_event_num_listeners) < 1) 74 if (atomic_read(&proc_event_num_listeners) < 1)
@@ -79,8 +78,7 @@ void proc_fork_connector(struct task_struct *task)
79 ev = (struct proc_event *)msg->data; 78 ev = (struct proc_event *)msg->data;
80 memset(&ev->event_data, 0, sizeof(ev->event_data)); 79 memset(&ev->event_data, 0, sizeof(ev->event_data));
81 get_seq(&msg->seq, &ev->cpu); 80 get_seq(&msg->seq, &ev->cpu);
82 ktime_get_ts(&ts); /* get high res monotonic timestamp */ 81 ev->timestamp_ns = ktime_get_ns();
83 ev->timestamp_ns = timespec_to_ns(&ts);
84 ev->what = PROC_EVENT_FORK; 82 ev->what = PROC_EVENT_FORK;
85 rcu_read_lock(); 83 rcu_read_lock();
86 parent = rcu_dereference(task->real_parent); 84 parent = rcu_dereference(task->real_parent);
@@ -102,7 +100,6 @@ void proc_exec_connector(struct task_struct *task)
102{ 100{
103 struct cn_msg *msg; 101 struct cn_msg *msg;
104 struct proc_event *ev; 102 struct proc_event *ev;
105 struct timespec ts;
106 __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8); 103 __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
107 104
108 if (atomic_read(&proc_event_num_listeners) < 1) 105 if (atomic_read(&proc_event_num_listeners) < 1)
@@ -112,8 +109,7 @@ void proc_exec_connector(struct task_struct *task)
112 ev = (struct proc_event *)msg->data; 109 ev = (struct proc_event *)msg->data;
113 memset(&ev->event_data, 0, sizeof(ev->event_data)); 110 memset(&ev->event_data, 0, sizeof(ev->event_data));
114 get_seq(&msg->seq, &ev->cpu); 111 get_seq(&msg->seq, &ev->cpu);
115 ktime_get_ts(&ts); /* get high res monotonic timestamp */ 112 ev->timestamp_ns = ktime_get_ns();
116 ev->timestamp_ns = timespec_to_ns(&ts);
117 ev->what = PROC_EVENT_EXEC; 113 ev->what = PROC_EVENT_EXEC;
118 ev->event_data.exec.process_pid = task->pid; 114 ev->event_data.exec.process_pid = task->pid;
119 ev->event_data.exec.process_tgid = task->tgid; 115 ev->event_data.exec.process_tgid = task->tgid;
@@ -130,7 +126,6 @@ void proc_id_connector(struct task_struct *task, int which_id)
130 struct cn_msg *msg; 126 struct cn_msg *msg;
131 struct proc_event *ev; 127 struct proc_event *ev;
132 __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8); 128 __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
133 struct timespec ts;
134 const struct cred *cred; 129 const struct cred *cred;
135 130
136 if (atomic_read(&proc_event_num_listeners) < 1) 131 if (atomic_read(&proc_event_num_listeners) < 1)
@@ -156,8 +151,7 @@ void proc_id_connector(struct task_struct *task, int which_id)
156 } 151 }
157 rcu_read_unlock(); 152 rcu_read_unlock();
158 get_seq(&msg->seq, &ev->cpu); 153 get_seq(&msg->seq, &ev->cpu);
159 ktime_get_ts(&ts); /* get high res monotonic timestamp */ 154 ev->timestamp_ns = ktime_get_ns();
160 ev->timestamp_ns = timespec_to_ns(&ts);
161 155
162 memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id)); 156 memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
163 msg->ack = 0; /* not used */ 157 msg->ack = 0; /* not used */
@@ -170,7 +164,6 @@ void proc_sid_connector(struct task_struct *task)
170{ 164{
171 struct cn_msg *msg; 165 struct cn_msg *msg;
172 struct proc_event *ev; 166 struct proc_event *ev;
173 struct timespec ts;
174 __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8); 167 __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
175 168
176 if (atomic_read(&proc_event_num_listeners) < 1) 169 if (atomic_read(&proc_event_num_listeners) < 1)
@@ -180,8 +173,7 @@ void proc_sid_connector(struct task_struct *task)
180 ev = (struct proc_event *)msg->data; 173 ev = (struct proc_event *)msg->data;
181 memset(&ev->event_data, 0, sizeof(ev->event_data)); 174 memset(&ev->event_data, 0, sizeof(ev->event_data));
182 get_seq(&msg->seq, &ev->cpu); 175 get_seq(&msg->seq, &ev->cpu);
183 ktime_get_ts(&ts); /* get high res monotonic timestamp */ 176 ev->timestamp_ns = ktime_get_ns();
184 ev->timestamp_ns = timespec_to_ns(&ts);
185 ev->what = PROC_EVENT_SID; 177 ev->what = PROC_EVENT_SID;
186 ev->event_data.sid.process_pid = task->pid; 178 ev->event_data.sid.process_pid = task->pid;
187 ev->event_data.sid.process_tgid = task->tgid; 179 ev->event_data.sid.process_tgid = task->tgid;
@@ -197,7 +189,6 @@ void proc_ptrace_connector(struct task_struct *task, int ptrace_id)
197{ 189{
198 struct cn_msg *msg; 190 struct cn_msg *msg;
199 struct proc_event *ev; 191 struct proc_event *ev;
200 struct timespec ts;
201 __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8); 192 __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
202 193
203 if (atomic_read(&proc_event_num_listeners) < 1) 194 if (atomic_read(&proc_event_num_listeners) < 1)
@@ -207,8 +198,7 @@ void proc_ptrace_connector(struct task_struct *task, int ptrace_id)
207 ev = (struct proc_event *)msg->data; 198 ev = (struct proc_event *)msg->data;
208 memset(&ev->event_data, 0, sizeof(ev->event_data)); 199 memset(&ev->event_data, 0, sizeof(ev->event_data));
209 get_seq(&msg->seq, &ev->cpu); 200 get_seq(&msg->seq, &ev->cpu);
210 ktime_get_ts(&ts); /* get high res monotonic timestamp */ 201 ev->timestamp_ns = ktime_get_ns();
211 ev->timestamp_ns = timespec_to_ns(&ts);
212 ev->what = PROC_EVENT_PTRACE; 202 ev->what = PROC_EVENT_PTRACE;
213 ev->event_data.ptrace.process_pid = task->pid; 203 ev->event_data.ptrace.process_pid = task->pid;
214 ev->event_data.ptrace.process_tgid = task->tgid; 204 ev->event_data.ptrace.process_tgid = task->tgid;
@@ -232,7 +222,6 @@ void proc_comm_connector(struct task_struct *task)
232{ 222{
233 struct cn_msg *msg; 223 struct cn_msg *msg;
234 struct proc_event *ev; 224 struct proc_event *ev;
235 struct timespec ts;
236 __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8); 225 __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
237 226
238 if (atomic_read(&proc_event_num_listeners) < 1) 227 if (atomic_read(&proc_event_num_listeners) < 1)
@@ -242,8 +231,7 @@ void proc_comm_connector(struct task_struct *task)
242 ev = (struct proc_event *)msg->data; 231 ev = (struct proc_event *)msg->data;
243 memset(&ev->event_data, 0, sizeof(ev->event_data)); 232 memset(&ev->event_data, 0, sizeof(ev->event_data));
244 get_seq(&msg->seq, &ev->cpu); 233 get_seq(&msg->seq, &ev->cpu);
245 ktime_get_ts(&ts); /* get high res monotonic timestamp */ 234 ev->timestamp_ns = ktime_get_ns();
246 ev->timestamp_ns = timespec_to_ns(&ts);
247 ev->what = PROC_EVENT_COMM; 235 ev->what = PROC_EVENT_COMM;
248 ev->event_data.comm.process_pid = task->pid; 236 ev->event_data.comm.process_pid = task->pid;
249 ev->event_data.comm.process_tgid = task->tgid; 237 ev->event_data.comm.process_tgid = task->tgid;
@@ -261,7 +249,6 @@ void proc_coredump_connector(struct task_struct *task)
261 struct cn_msg *msg; 249 struct cn_msg *msg;
262 struct proc_event *ev; 250 struct proc_event *ev;
263 __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8); 251 __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
264 struct timespec ts;
265 252
266 if (atomic_read(&proc_event_num_listeners) < 1) 253 if (atomic_read(&proc_event_num_listeners) < 1)
267 return; 254 return;
@@ -270,8 +257,7 @@ void proc_coredump_connector(struct task_struct *task)
270 ev = (struct proc_event *)msg->data; 257 ev = (struct proc_event *)msg->data;
271 memset(&ev->event_data, 0, sizeof(ev->event_data)); 258 memset(&ev->event_data, 0, sizeof(ev->event_data));
272 get_seq(&msg->seq, &ev->cpu); 259 get_seq(&msg->seq, &ev->cpu);
273 ktime_get_ts(&ts); /* get high res monotonic timestamp */ 260 ev->timestamp_ns = ktime_get_ns();
274 ev->timestamp_ns = timespec_to_ns(&ts);
275 ev->what = PROC_EVENT_COREDUMP; 261 ev->what = PROC_EVENT_COREDUMP;
276 ev->event_data.coredump.process_pid = task->pid; 262 ev->event_data.coredump.process_pid = task->pid;
277 ev->event_data.coredump.process_tgid = task->tgid; 263 ev->event_data.coredump.process_tgid = task->tgid;
@@ -288,7 +274,6 @@ void proc_exit_connector(struct task_struct *task)
288 struct cn_msg *msg; 274 struct cn_msg *msg;
289 struct proc_event *ev; 275 struct proc_event *ev;
290 __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8); 276 __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
291 struct timespec ts;
292 277
293 if (atomic_read(&proc_event_num_listeners) < 1) 278 if (atomic_read(&proc_event_num_listeners) < 1)
294 return; 279 return;
@@ -297,8 +282,7 @@ void proc_exit_connector(struct task_struct *task)
297 ev = (struct proc_event *)msg->data; 282 ev = (struct proc_event *)msg->data;
298 memset(&ev->event_data, 0, sizeof(ev->event_data)); 283 memset(&ev->event_data, 0, sizeof(ev->event_data));
299 get_seq(&msg->seq, &ev->cpu); 284 get_seq(&msg->seq, &ev->cpu);
300 ktime_get_ts(&ts); /* get high res monotonic timestamp */ 285 ev->timestamp_ns = ktime_get_ns();
301 ev->timestamp_ns = timespec_to_ns(&ts);
302 ev->what = PROC_EVENT_EXIT; 286 ev->what = PROC_EVENT_EXIT;
303 ev->event_data.exit.process_pid = task->pid; 287 ev->event_data.exit.process_pid = task->pid;
304 ev->event_data.exit.process_tgid = task->tgid; 288 ev->event_data.exit.process_tgid = task->tgid;
@@ -325,7 +309,6 @@ static void cn_proc_ack(int err, int rcvd_seq, int rcvd_ack)
325 struct cn_msg *msg; 309 struct cn_msg *msg;
326 struct proc_event *ev; 310 struct proc_event *ev;
327 __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8); 311 __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
328 struct timespec ts;
329 312
330 if (atomic_read(&proc_event_num_listeners) < 1) 313 if (atomic_read(&proc_event_num_listeners) < 1)
331 return; 314 return;
@@ -334,8 +317,7 @@ static void cn_proc_ack(int err, int rcvd_seq, int rcvd_ack)
334 ev = (struct proc_event *)msg->data; 317 ev = (struct proc_event *)msg->data;
335 memset(&ev->event_data, 0, sizeof(ev->event_data)); 318 memset(&ev->event_data, 0, sizeof(ev->event_data));
336 msg->seq = rcvd_seq; 319 msg->seq = rcvd_seq;
337 ktime_get_ts(&ts); /* get high res monotonic timestamp */ 320 ev->timestamp_ns = ktime_get_ns();
338 ev->timestamp_ns = timespec_to_ns(&ts);
339 ev->cpu = -1; 321 ev->cpu = -1;
340 ev->what = PROC_EVENT_NONE; 322 ev->what = PROC_EVENT_NONE;
341 ev->event_data.ack.err = err; 323 ev->event_data.ack.err = err;
diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
index d7d5c8af92b9..5d997a33907e 100644
--- a/drivers/firewire/core-cdev.c
+++ b/drivers/firewire/core-cdev.c
@@ -1214,9 +1214,9 @@ static int ioctl_get_cycle_timer2(struct client *client, union ioctl_arg *arg)
1214 cycle_time = card->driver->read_csr(card, CSR_CYCLE_TIME); 1214 cycle_time = card->driver->read_csr(card, CSR_CYCLE_TIME);
1215 1215
1216 switch (a->clk_id) { 1216 switch (a->clk_id) {
1217 case CLOCK_REALTIME: getnstimeofday(&ts); break; 1217 case CLOCK_REALTIME: getnstimeofday(&ts); break;
1218 case CLOCK_MONOTONIC: do_posix_clock_monotonic_gettime(&ts); break; 1218 case CLOCK_MONOTONIC: ktime_get_ts(&ts); break;
1219 case CLOCK_MONOTONIC_RAW: getrawmonotonic(&ts); break; 1219 case CLOCK_MONOTONIC_RAW: getrawmonotonic(&ts); break;
1220 default: 1220 default:
1221 ret = -EINVAL; 1221 ret = -EINVAL;
1222 } 1222 }
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 0de123afdb34..08ba1209228e 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -542,8 +542,8 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
542 const struct drm_crtc *refcrtc, 542 const struct drm_crtc *refcrtc,
543 const struct drm_display_mode *mode) 543 const struct drm_display_mode *mode)
544{ 544{
545 ktime_t stime, etime, mono_time_offset;
546 struct timeval tv_etime; 545 struct timeval tv_etime;
546 ktime_t stime, etime;
547 int vbl_status; 547 int vbl_status;
548 int vpos, hpos, i; 548 int vpos, hpos, i;
549 int framedur_ns, linedur_ns, pixeldur_ns, delta_ns, duration_ns; 549 int framedur_ns, linedur_ns, pixeldur_ns, delta_ns, duration_ns;
@@ -588,13 +588,6 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
588 vbl_status = dev->driver->get_scanout_position(dev, crtc, flags, &vpos, 588 vbl_status = dev->driver->get_scanout_position(dev, crtc, flags, &vpos,
589 &hpos, &stime, &etime); 589 &hpos, &stime, &etime);
590 590
591 /*
592 * Get correction for CLOCK_MONOTONIC -> CLOCK_REALTIME if
593 * CLOCK_REALTIME is requested.
594 */
595 if (!drm_timestamp_monotonic)
596 mono_time_offset = ktime_get_monotonic_offset();
597
598 /* Return as no-op if scanout query unsupported or failed. */ 591 /* Return as no-op if scanout query unsupported or failed. */
599 if (!(vbl_status & DRM_SCANOUTPOS_VALID)) { 592 if (!(vbl_status & DRM_SCANOUTPOS_VALID)) {
600 DRM_DEBUG("crtc %d : scanoutpos query failed [%d].\n", 593 DRM_DEBUG("crtc %d : scanoutpos query failed [%d].\n",
@@ -633,7 +626,7 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
633 delta_ns = vpos * linedur_ns + hpos * pixeldur_ns; 626 delta_ns = vpos * linedur_ns + hpos * pixeldur_ns;
634 627
635 if (!drm_timestamp_monotonic) 628 if (!drm_timestamp_monotonic)
636 etime = ktime_sub(etime, mono_time_offset); 629 etime = ktime_mono_to_real(etime);
637 630
638 /* save this only for debugging purposes */ 631 /* save this only for debugging purposes */
639 tv_etime = ktime_to_timeval(etime); 632 tv_etime = ktime_to_timeval(etime);
@@ -664,10 +657,7 @@ static struct timeval get_drm_timestamp(void)
664{ 657{
665 ktime_t now; 658 ktime_t now;
666 659
667 now = ktime_get(); 660 now = drm_timestamp_monotonic ? ktime_get() : ktime_get_real();
668 if (!drm_timestamp_monotonic)
669 now = ktime_sub(now, ktime_get_monotonic_offset());
670
671 return ktime_to_timeval(now); 661 return ktime_to_timeval(now);
672} 662}
673 663
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 374f964323ad..1f7700897dfc 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -931,7 +931,7 @@ struct intel_ilk_power_mgmt {
931 unsigned long last_time1; 931 unsigned long last_time1;
932 unsigned long chipset_power; 932 unsigned long chipset_power;
933 u64 last_count2; 933 u64 last_count2;
934 struct timespec last_time2; 934 u64 last_time2;
935 unsigned long gfx_power; 935 unsigned long gfx_power;
936 u8 corr; 936 u8 corr;
937 937
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index d893e4da5dce..f247d922e44a 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1149,16 +1149,16 @@ static bool can_wait_boost(struct drm_i915_file_private *file_priv)
1149static int __wait_seqno(struct intel_engine_cs *ring, u32 seqno, 1149static int __wait_seqno(struct intel_engine_cs *ring, u32 seqno,
1150 unsigned reset_counter, 1150 unsigned reset_counter,
1151 bool interruptible, 1151 bool interruptible,
1152 struct timespec *timeout, 1152 s64 *timeout,
1153 struct drm_i915_file_private *file_priv) 1153 struct drm_i915_file_private *file_priv)
1154{ 1154{
1155 struct drm_device *dev = ring->dev; 1155 struct drm_device *dev = ring->dev;
1156 struct drm_i915_private *dev_priv = dev->dev_private; 1156 struct drm_i915_private *dev_priv = dev->dev_private;
1157 const bool irq_test_in_progress = 1157 const bool irq_test_in_progress =
1158 ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_ring_flag(ring); 1158 ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_ring_flag(ring);
1159 struct timespec before, now;
1160 DEFINE_WAIT(wait); 1159 DEFINE_WAIT(wait);
1161 unsigned long timeout_expire; 1160 unsigned long timeout_expire;
1161 s64 before, now;
1162 int ret; 1162 int ret;
1163 1163
1164 WARN(dev_priv->pm.irqs_disabled, "IRQs disabled\n"); 1164 WARN(dev_priv->pm.irqs_disabled, "IRQs disabled\n");
@@ -1166,7 +1166,7 @@ static int __wait_seqno(struct intel_engine_cs *ring, u32 seqno,
1166 if (i915_seqno_passed(ring->get_seqno(ring, true), seqno)) 1166 if (i915_seqno_passed(ring->get_seqno(ring, true), seqno))
1167 return 0; 1167 return 0;
1168 1168
1169 timeout_expire = timeout ? jiffies + timespec_to_jiffies_timeout(timeout) : 0; 1169 timeout_expire = timeout ? jiffies + nsecs_to_jiffies((u64)*timeout) : 0;
1170 1170
1171 if (INTEL_INFO(dev)->gen >= 6 && can_wait_boost(file_priv)) { 1171 if (INTEL_INFO(dev)->gen >= 6 && can_wait_boost(file_priv)) {
1172 gen6_rps_boost(dev_priv); 1172 gen6_rps_boost(dev_priv);
@@ -1181,7 +1181,7 @@ static int __wait_seqno(struct intel_engine_cs *ring, u32 seqno,
1181 1181
1182 /* Record current time in case interrupted by signal, or wedged */ 1182 /* Record current time in case interrupted by signal, or wedged */
1183 trace_i915_gem_request_wait_begin(ring, seqno); 1183 trace_i915_gem_request_wait_begin(ring, seqno);
1184 getrawmonotonic(&before); 1184 before = ktime_get_raw_ns();
1185 for (;;) { 1185 for (;;) {
1186 struct timer_list timer; 1186 struct timer_list timer;
1187 1187
@@ -1230,7 +1230,7 @@ static int __wait_seqno(struct intel_engine_cs *ring, u32 seqno,
1230 destroy_timer_on_stack(&timer); 1230 destroy_timer_on_stack(&timer);
1231 } 1231 }
1232 } 1232 }
1233 getrawmonotonic(&now); 1233 now = ktime_get_raw_ns();
1234 trace_i915_gem_request_wait_end(ring, seqno); 1234 trace_i915_gem_request_wait_end(ring, seqno);
1235 1235
1236 if (!irq_test_in_progress) 1236 if (!irq_test_in_progress)
@@ -1239,10 +1239,9 @@ static int __wait_seqno(struct intel_engine_cs *ring, u32 seqno,
1239 finish_wait(&ring->irq_queue, &wait); 1239 finish_wait(&ring->irq_queue, &wait);
1240 1240
1241 if (timeout) { 1241 if (timeout) {
1242 struct timespec sleep_time = timespec_sub(now, before); 1242 s64 tres = *timeout - (now - before);
1243 *timeout = timespec_sub(*timeout, sleep_time); 1243
1244 if (!timespec_valid(timeout)) /* i.e. negative time remains */ 1244 *timeout = tres < 0 ? 0 : tres;
1245 set_normalized_timespec(timeout, 0, 0);
1246 } 1245 }
1247 1246
1248 return ret; 1247 return ret;
@@ -2746,16 +2745,10 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
2746 struct drm_i915_gem_wait *args = data; 2745 struct drm_i915_gem_wait *args = data;
2747 struct drm_i915_gem_object *obj; 2746 struct drm_i915_gem_object *obj;
2748 struct intel_engine_cs *ring = NULL; 2747 struct intel_engine_cs *ring = NULL;
2749 struct timespec timeout_stack, *timeout = NULL;
2750 unsigned reset_counter; 2748 unsigned reset_counter;
2751 u32 seqno = 0; 2749 u32 seqno = 0;
2752 int ret = 0; 2750 int ret = 0;
2753 2751
2754 if (args->timeout_ns >= 0) {
2755 timeout_stack = ns_to_timespec(args->timeout_ns);
2756 timeout = &timeout_stack;
2757 }
2758
2759 ret = i915_mutex_lock_interruptible(dev); 2752 ret = i915_mutex_lock_interruptible(dev);
2760 if (ret) 2753 if (ret)
2761 return ret; 2754 return ret;
@@ -2780,9 +2773,9 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
2780 goto out; 2773 goto out;
2781 2774
2782 /* Do this after OLR check to make sure we make forward progress polling 2775 /* Do this after OLR check to make sure we make forward progress polling
2783 * on this IOCTL with a 0 timeout (like busy ioctl) 2776 * on this IOCTL with a timeout <=0 (like busy ioctl)
2784 */ 2777 */
2785 if (!args->timeout_ns) { 2778 if (args->timeout_ns <= 0) {
2786 ret = -ETIME; 2779 ret = -ETIME;
2787 goto out; 2780 goto out;
2788 } 2781 }
@@ -2791,10 +2784,8 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
2791 reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter); 2784 reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
2792 mutex_unlock(&dev->struct_mutex); 2785 mutex_unlock(&dev->struct_mutex);
2793 2786
2794 ret = __wait_seqno(ring, seqno, reset_counter, true, timeout, file->driver_priv); 2787 return __wait_seqno(ring, seqno, reset_counter, true, &args->timeout_ns,
2795 if (timeout) 2788 file->driver_priv);
2796 args->timeout_ns = timespec_to_ns(timeout);
2797 return ret;
2798 2789
2799out: 2790out:
2800 drm_gem_object_unreference(&obj->base); 2791 drm_gem_object_unreference(&obj->base);
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index ee72807069e4..f1233f544f3e 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -2993,7 +2993,7 @@ static void ironlake_enable_drps(struct drm_device *dev)
2993 I915_READ(0x112e0); 2993 I915_READ(0x112e0);
2994 dev_priv->ips.last_time1 = jiffies_to_msecs(jiffies); 2994 dev_priv->ips.last_time1 = jiffies_to_msecs(jiffies);
2995 dev_priv->ips.last_count2 = I915_READ(0x112f4); 2995 dev_priv->ips.last_count2 = I915_READ(0x112f4);
2996 getrawmonotonic(&dev_priv->ips.last_time2); 2996 dev_priv->ips.last_time2 = ktime_get_raw_ns();
2997 2997
2998 spin_unlock_irq(&mchdev_lock); 2998 spin_unlock_irq(&mchdev_lock);
2999} 2999}
@@ -4314,18 +4314,16 @@ static u16 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid)
4314 4314
4315static void __i915_update_gfx_val(struct drm_i915_private *dev_priv) 4315static void __i915_update_gfx_val(struct drm_i915_private *dev_priv)
4316{ 4316{
4317 struct timespec now, diff1; 4317 u64 now, diff, diffms;
4318 u64 diff;
4319 unsigned long diffms;
4320 u32 count; 4318 u32 count;
4321 4319
4322 assert_spin_locked(&mchdev_lock); 4320 assert_spin_locked(&mchdev_lock);
4323 4321
4324 getrawmonotonic(&now); 4322 now = ktime_get_raw_ns();
4325 diff1 = timespec_sub(now, dev_priv->ips.last_time2); 4323 diffms = now - dev_priv->ips.last_time2;
4324 do_div(diffms, NSEC_PER_MSEC);
4326 4325
4327 /* Don't divide by 0 */ 4326 /* Don't divide by 0 */
4328 diffms = diff1.tv_sec * 1000 + diff1.tv_nsec / 1000000;
4329 if (!diffms) 4327 if (!diffms)
4330 return; 4328 return;
4331 4329
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 6b252a887ae2..c886c024c637 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -159,8 +159,8 @@ struct vmw_surface {
159 159
160struct vmw_marker_queue { 160struct vmw_marker_queue {
161 struct list_head head; 161 struct list_head head;
162 struct timespec lag; 162 u64 lag;
163 struct timespec lag_time; 163 u64 lag_time;
164 spinlock_t lock; 164 spinlock_t lock;
165}; 165};
166 166
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
index 8a8725c2716c..efd1ffd68185 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
@@ -31,14 +31,14 @@
31struct vmw_marker { 31struct vmw_marker {
32 struct list_head head; 32 struct list_head head;
33 uint32_t seqno; 33 uint32_t seqno;
34 struct timespec submitted; 34 u64 submitted;
35}; 35};
36 36
37void vmw_marker_queue_init(struct vmw_marker_queue *queue) 37void vmw_marker_queue_init(struct vmw_marker_queue *queue)
38{ 38{
39 INIT_LIST_HEAD(&queue->head); 39 INIT_LIST_HEAD(&queue->head);
40 queue->lag = ns_to_timespec(0); 40 queue->lag = 0;
41 getrawmonotonic(&queue->lag_time); 41 queue->lag_time = ktime_get_raw_ns();
42 spin_lock_init(&queue->lock); 42 spin_lock_init(&queue->lock);
43} 43}
44 44
@@ -62,7 +62,7 @@ int vmw_marker_push(struct vmw_marker_queue *queue,
62 return -ENOMEM; 62 return -ENOMEM;
63 63
64 marker->seqno = seqno; 64 marker->seqno = seqno;
65 getrawmonotonic(&marker->submitted); 65 marker->submitted = ktime_get_raw_ns();
66 spin_lock(&queue->lock); 66 spin_lock(&queue->lock);
67 list_add_tail(&marker->head, &queue->head); 67 list_add_tail(&marker->head, &queue->head);
68 spin_unlock(&queue->lock); 68 spin_unlock(&queue->lock);
@@ -74,14 +74,14 @@ int vmw_marker_pull(struct vmw_marker_queue *queue,
74 uint32_t signaled_seqno) 74 uint32_t signaled_seqno)
75{ 75{
76 struct vmw_marker *marker, *next; 76 struct vmw_marker *marker, *next;
77 struct timespec now;
78 bool updated = false; 77 bool updated = false;
78 u64 now;
79 79
80 spin_lock(&queue->lock); 80 spin_lock(&queue->lock);
81 getrawmonotonic(&now); 81 now = ktime_get_raw_ns();
82 82
83 if (list_empty(&queue->head)) { 83 if (list_empty(&queue->head)) {
84 queue->lag = ns_to_timespec(0); 84 queue->lag = 0;
85 queue->lag_time = now; 85 queue->lag_time = now;
86 updated = true; 86 updated = true;
87 goto out_unlock; 87 goto out_unlock;
@@ -91,7 +91,7 @@ int vmw_marker_pull(struct vmw_marker_queue *queue,
91 if (signaled_seqno - marker->seqno > (1 << 30)) 91 if (signaled_seqno - marker->seqno > (1 << 30))
92 continue; 92 continue;
93 93
94 queue->lag = timespec_sub(now, marker->submitted); 94 queue->lag = now - marker->submitted;
95 queue->lag_time = now; 95 queue->lag_time = now;
96 updated = true; 96 updated = true;
97 list_del(&marker->head); 97 list_del(&marker->head);
@@ -104,27 +104,13 @@ out_unlock:
104 return (updated) ? 0 : -EBUSY; 104 return (updated) ? 0 : -EBUSY;
105} 105}
106 106
107static struct timespec vmw_timespec_add(struct timespec t1, 107static u64 vmw_fifo_lag(struct vmw_marker_queue *queue)
108 struct timespec t2)
109{ 108{
110 t1.tv_sec += t2.tv_sec; 109 u64 now;
111 t1.tv_nsec += t2.tv_nsec;
112 if (t1.tv_nsec >= 1000000000L) {
113 t1.tv_sec += 1;
114 t1.tv_nsec -= 1000000000L;
115 }
116
117 return t1;
118}
119
120static struct timespec vmw_fifo_lag(struct vmw_marker_queue *queue)
121{
122 struct timespec now;
123 110
124 spin_lock(&queue->lock); 111 spin_lock(&queue->lock);
125 getrawmonotonic(&now); 112 now = ktime_get_raw_ns();
126 queue->lag = vmw_timespec_add(queue->lag, 113 queue->lag += now - queue->lag_time;
127 timespec_sub(now, queue->lag_time));
128 queue->lag_time = now; 114 queue->lag_time = now;
129 spin_unlock(&queue->lock); 115 spin_unlock(&queue->lock);
130 return queue->lag; 116 return queue->lag;
@@ -134,11 +120,9 @@ static struct timespec vmw_fifo_lag(struct vmw_marker_queue *queue)
134static bool vmw_lag_lt(struct vmw_marker_queue *queue, 120static bool vmw_lag_lt(struct vmw_marker_queue *queue,
135 uint32_t us) 121 uint32_t us)
136{ 122{
137 struct timespec lag, cond; 123 u64 cond = (u64) us * NSEC_PER_USEC;
138 124
139 cond = ns_to_timespec((s64) us * 1000); 125 return vmw_fifo_lag(queue) <= cond;
140 lag = vmw_fifo_lag(queue);
141 return (timespec_compare(&lag, &cond) < 1);
142} 126}
143 127
144int vmw_wait_lag(struct vmw_private *dev_priv, 128int vmw_wait_lag(struct vmw_private *dev_priv,
diff --git a/drivers/hwmon/ibmaem.c b/drivers/hwmon/ibmaem.c
index 632f1dc0fe1f..7a8a6fbf11ff 100644
--- a/drivers/hwmon/ibmaem.c
+++ b/drivers/hwmon/ibmaem.c
@@ -842,11 +842,10 @@ static ssize_t aem_show_power(struct device *dev,
842 struct aem_data *data = dev_get_drvdata(dev); 842 struct aem_data *data = dev_get_drvdata(dev);
843 u64 before, after, delta, time; 843 u64 before, after, delta, time;
844 signed long leftover; 844 signed long leftover;
845 struct timespec b, a;
846 845
847 mutex_lock(&data->lock); 846 mutex_lock(&data->lock);
848 update_aem_energy_one(data, attr->index); 847 update_aem_energy_one(data, attr->index);
849 getnstimeofday(&b); 848 time = ktime_get_ns();
850 before = data->energy[attr->index]; 849 before = data->energy[attr->index];
851 850
852 leftover = schedule_timeout_interruptible( 851 leftover = schedule_timeout_interruptible(
@@ -858,11 +857,10 @@ static ssize_t aem_show_power(struct device *dev,
858 } 857 }
859 858
860 update_aem_energy_one(data, attr->index); 859 update_aem_energy_one(data, attr->index);
861 getnstimeofday(&a); 860 time = ktime_get_ns() - time;
862 after = data->energy[attr->index]; 861 after = data->energy[attr->index];
863 mutex_unlock(&data->lock); 862 mutex_unlock(&data->lock);
864 863
865 time = timespec_to_ns(&a) - timespec_to_ns(&b);
866 delta = (after - before) * UJ_PER_MJ; 864 delta = (after - before) * UJ_PER_MJ;
867 865
868 return sprintf(buf, "%llu\n", 866 return sprintf(buf, "%llu\n",
diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c
index fd325ec9f064..de055451d1af 100644
--- a/drivers/input/evdev.c
+++ b/drivers/input/evdev.c
@@ -108,9 +108,8 @@ static void evdev_queue_syn_dropped(struct evdev_client *client)
108 struct input_event ev; 108 struct input_event ev;
109 ktime_t time; 109 ktime_t time;
110 110
111 time = ktime_get(); 111 time = (client->clkid == CLOCK_MONOTONIC) ?
112 if (client->clkid != CLOCK_MONOTONIC) 112 ktime_get() : ktime_get_real();
113 time = ktime_sub(time, ktime_get_monotonic_offset());
114 113
115 ev.time = ktime_to_timeval(time); 114 ev.time = ktime_to_timeval(time);
116 ev.type = EV_SYN; 115 ev.type = EV_SYN;
@@ -202,7 +201,7 @@ static void evdev_events(struct input_handle *handle,
202 ktime_t time_mono, time_real; 201 ktime_t time_mono, time_real;
203 202
204 time_mono = ktime_get(); 203 time_mono = ktime_get();
205 time_real = ktime_sub(time_mono, ktime_get_monotonic_offset()); 204 time_real = ktime_mono_to_real(time_mono);
206 205
207 rcu_read_lock(); 206 rcu_read_lock();
208 207
diff --git a/drivers/mfd/cros_ec_spi.c b/drivers/mfd/cros_ec_spi.c
index 0b8d32829166..8c1c7cc373f8 100644
--- a/drivers/mfd/cros_ec_spi.c
+++ b/drivers/mfd/cros_ec_spi.c
@@ -225,7 +225,6 @@ static int cros_ec_command_spi_xfer(struct cros_ec_device *ec_dev,
225 u8 *ptr; 225 u8 *ptr;
226 int sum; 226 int sum;
227 int ret = 0, final_ret; 227 int ret = 0, final_ret;
228 struct timespec ts;
229 228
230 /* 229 /*
231 * We have the shared ec_dev buffer plus we do lots of separate spi_sync 230 * We have the shared ec_dev buffer plus we do lots of separate spi_sync
@@ -239,11 +238,9 @@ static int cros_ec_command_spi_xfer(struct cros_ec_device *ec_dev,
239 238
240 /* If it's too soon to do another transaction, wait */ 239 /* If it's too soon to do another transaction, wait */
241 if (ec_spi->last_transfer_ns) { 240 if (ec_spi->last_transfer_ns) {
242 struct timespec ts;
243 unsigned long delay; /* The delay completed so far */ 241 unsigned long delay; /* The delay completed so far */
244 242
245 ktime_get_ts(&ts); 243 delay = ktime_get_ns() - ec_spi->last_transfer_ns;
246 delay = timespec_to_ns(&ts) - ec_spi->last_transfer_ns;
247 if (delay < EC_SPI_RECOVERY_TIME_NS) 244 if (delay < EC_SPI_RECOVERY_TIME_NS)
248 ndelay(EC_SPI_RECOVERY_TIME_NS - delay); 245 ndelay(EC_SPI_RECOVERY_TIME_NS - delay);
249 } 246 }
@@ -280,8 +277,7 @@ static int cros_ec_command_spi_xfer(struct cros_ec_device *ec_dev,
280 } 277 }
281 278
282 final_ret = spi_sync(ec_spi->spi, &msg); 279 final_ret = spi_sync(ec_spi->spi, &msg);
283 ktime_get_ts(&ts); 280 ec_spi->last_transfer_ns = ktime_get_ns();
284 ec_spi->last_transfer_ns = timespec_to_ns(&ts);
285 if (!ret) 281 if (!ret)
286 ret = final_ret; 282 ret = final_ret;
287 if (ret < 0) { 283 if (ret < 0) {
diff --git a/drivers/misc/ioc4.c b/drivers/misc/ioc4.c
index 06f6ad29ceff..3336ddca45ac 100644
--- a/drivers/misc/ioc4.c
+++ b/drivers/misc/ioc4.c
@@ -145,7 +145,6 @@ ioc4_clock_calibrate(struct ioc4_driver_data *idd)
145 union ioc4_int_out int_out; 145 union ioc4_int_out int_out;
146 union ioc4_gpcr gpcr; 146 union ioc4_gpcr gpcr;
147 unsigned int state, last_state = 1; 147 unsigned int state, last_state = 1;
148 struct timespec start_ts, end_ts;
149 uint64_t start, end, period; 148 uint64_t start, end, period;
150 unsigned int count = 0; 149 unsigned int count = 0;
151 150
@@ -174,10 +173,10 @@ ioc4_clock_calibrate(struct ioc4_driver_data *idd)
174 if (!last_state && state) { 173 if (!last_state && state) {
175 count++; 174 count++;
176 if (count == IOC4_CALIBRATE_END) { 175 if (count == IOC4_CALIBRATE_END) {
177 ktime_get_ts(&end_ts); 176 end = ktime_get_ns();
178 break; 177 break;
179 } else if (count == IOC4_CALIBRATE_DISCARD) 178 } else if (count == IOC4_CALIBRATE_DISCARD)
180 ktime_get_ts(&start_ts); 179 start = ktime_get_ns();
181 } 180 }
182 last_state = state; 181 last_state = state;
183 } while (1); 182 } while (1);
@@ -192,8 +191,6 @@ ioc4_clock_calibrate(struct ioc4_driver_data *idd)
192 * by which the IOC4 generates the square wave, to get the 191 * by which the IOC4 generates the square wave, to get the
193 * period of an IOC4 INT_OUT count. 192 * period of an IOC4 INT_OUT count.
194 */ 193 */
195 end = end_ts.tv_sec * NSEC_PER_SEC + end_ts.tv_nsec;
196 start = start_ts.tv_sec * NSEC_PER_SEC + start_ts.tv_nsec;
197 period = (end - start) / 194 period = (end - start) /
198 (IOC4_CALIBRATE_CYCLES * 2 * (IOC4_CALIBRATE_COUNT + 1)); 195 (IOC4_CALIBRATE_CYCLES * 2 * (IOC4_CALIBRATE_COUNT + 1));
199 196
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index 87d1b018a9c3..67f8f5a1dc86 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -548,7 +548,7 @@ static void cmd_work_handler(struct work_struct *work)
548 lay->status_own = CMD_OWNER_HW; 548 lay->status_own = CMD_OWNER_HW;
549 set_signature(ent, !cmd->checksum_disabled); 549 set_signature(ent, !cmd->checksum_disabled);
550 dump_command(dev, ent, 1); 550 dump_command(dev, ent, 1);
551 ktime_get_ts(&ent->ts1); 551 ent->ts1 = ktime_get_ns();
552 552
553 /* ring doorbell after the descriptor is valid */ 553 /* ring doorbell after the descriptor is valid */
554 wmb(); 554 wmb();
@@ -637,7 +637,6 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
637{ 637{
638 struct mlx5_cmd *cmd = &dev->cmd; 638 struct mlx5_cmd *cmd = &dev->cmd;
639 struct mlx5_cmd_work_ent *ent; 639 struct mlx5_cmd_work_ent *ent;
640 ktime_t t1, t2, delta;
641 struct mlx5_cmd_stats *stats; 640 struct mlx5_cmd_stats *stats;
642 int err = 0; 641 int err = 0;
643 s64 ds; 642 s64 ds;
@@ -668,10 +667,7 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
668 if (err == -ETIMEDOUT) 667 if (err == -ETIMEDOUT)
669 goto out; 668 goto out;
670 669
671 t1 = timespec_to_ktime(ent->ts1); 670 ds = ent->ts2 - ent->ts1;
672 t2 = timespec_to_ktime(ent->ts2);
673 delta = ktime_sub(t2, t1);
674 ds = ktime_to_ns(delta);
675 op = be16_to_cpu(((struct mlx5_inbox_hdr *)in->first.data)->opcode); 671 op = be16_to_cpu(((struct mlx5_inbox_hdr *)in->first.data)->opcode);
676 if (op < ARRAY_SIZE(cmd->stats)) { 672 if (op < ARRAY_SIZE(cmd->stats)) {
677 stats = &cmd->stats[op]; 673 stats = &cmd->stats[op];
@@ -1135,7 +1131,6 @@ void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, unsigned long vector)
1135 void *context; 1131 void *context;
1136 int err; 1132 int err;
1137 int i; 1133 int i;
1138 ktime_t t1, t2, delta;
1139 s64 ds; 1134 s64 ds;
1140 struct mlx5_cmd_stats *stats; 1135 struct mlx5_cmd_stats *stats;
1141 unsigned long flags; 1136 unsigned long flags;
@@ -1149,7 +1144,7 @@ void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, unsigned long vector)
1149 sem = &cmd->pages_sem; 1144 sem = &cmd->pages_sem;
1150 else 1145 else
1151 sem = &cmd->sem; 1146 sem = &cmd->sem;
1152 ktime_get_ts(&ent->ts2); 1147 ent->ts2 = ktime_get_ns();
1153 memcpy(ent->out->first.data, ent->lay->out, sizeof(ent->lay->out)); 1148 memcpy(ent->out->first.data, ent->lay->out, sizeof(ent->lay->out));
1154 dump_command(dev, ent, 0); 1149 dump_command(dev, ent, 0);
1155 if (!ent->ret) { 1150 if (!ent->ret) {
@@ -1163,10 +1158,7 @@ void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, unsigned long vector)
1163 } 1158 }
1164 free_ent(cmd, ent->idx); 1159 free_ent(cmd, ent->idx);
1165 if (ent->callback) { 1160 if (ent->callback) {
1166 t1 = timespec_to_ktime(ent->ts1); 1161 ds = ent->ts2 - ent->ts1;
1167 t2 = timespec_to_ktime(ent->ts2);
1168 delta = ktime_sub(t2, t1);
1169 ds = ktime_to_ns(delta);
1170 if (ent->op < ARRAY_SIZE(cmd->stats)) { 1162 if (ent->op < ARRAY_SIZE(cmd->stats)) {
1171 stats = &cmd->stats[ent->op]; 1163 stats = &cmd->stats[ent->op];
1172 spin_lock_irqsave(&stats->lock, flags); 1164 spin_lock_irqsave(&stats->lock, flags);
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index 2a8ed8375ec0..14b80b1b450c 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -1734,7 +1734,6 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
1734 struct ath9k_hw_cal_data *caldata, bool fastcc) 1734 struct ath9k_hw_cal_data *caldata, bool fastcc)
1735{ 1735{
1736 struct ath_common *common = ath9k_hw_common(ah); 1736 struct ath_common *common = ath9k_hw_common(ah);
1737 struct timespec ts;
1738 u32 saveLedState; 1737 u32 saveLedState;
1739 u32 saveDefAntenna; 1738 u32 saveDefAntenna;
1740 u32 macStaId1; 1739 u32 macStaId1;
@@ -1784,8 +1783,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
1784 1783
1785 /* Save TSF before chip reset, a cold reset clears it */ 1784 /* Save TSF before chip reset, a cold reset clears it */
1786 tsf = ath9k_hw_gettsf64(ah); 1785 tsf = ath9k_hw_gettsf64(ah);
1787 getrawmonotonic(&ts); 1786 usec = ktime_to_us(ktime_get_raw());
1788 usec = ts.tv_sec * 1000000ULL + ts.tv_nsec / 1000;
1789 1787
1790 saveLedState = REG_READ(ah, AR_CFG_LED) & 1788 saveLedState = REG_READ(ah, AR_CFG_LED) &
1791 (AR_CFG_LED_ASSOC_CTL | AR_CFG_LED_MODE_SEL | 1789 (AR_CFG_LED_ASSOC_CTL | AR_CFG_LED_MODE_SEL |
@@ -1818,8 +1816,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
1818 } 1816 }
1819 1817
1820 /* Restore TSF */ 1818 /* Restore TSF */
1821 getrawmonotonic(&ts); 1819 usec = ktime_to_us(ktime_get_raw()) - usec;
1822 usec = ts.tv_sec * 1000000ULL + ts.tv_nsec / 1000 - usec;
1823 ath9k_hw_settsf64(ah, tsf + usec); 1820 ath9k_hw_settsf64(ah, tsf + usec);
1824 1821
1825 if (AR_SREV_9280_20_OR_LATER(ah)) 1822 if (AR_SREV_9280_20_OR_LATER(ah))
diff --git a/drivers/of/address.c b/drivers/of/address.c
index 5edfcb0da37d..e3718250d66e 100644
--- a/drivers/of/address.c
+++ b/drivers/of/address.c
@@ -702,6 +702,42 @@ void __iomem *of_iomap(struct device_node *np, int index)
702} 702}
703EXPORT_SYMBOL(of_iomap); 703EXPORT_SYMBOL(of_iomap);
704 704
705/*
706 * of_io_request_and_map - Requests a resource and maps the memory mapped IO
707 * for a given device_node
708 * @device: the device whose io range will be mapped
709 * @index: index of the io range
710 * @name: name of the resource
711 *
712 * Returns a pointer to the requested and mapped memory or an ERR_PTR() encoded
713 * error code on failure. Usage example:
714 *
715 * base = of_io_request_and_map(node, 0, "foo");
716 * if (IS_ERR(base))
717 * return PTR_ERR(base);
718 */
719void __iomem *of_io_request_and_map(struct device_node *np, int index,
720 char *name)
721{
722 struct resource res;
723 void __iomem *mem;
724
725 if (of_address_to_resource(np, index, &res))
726 return IOMEM_ERR_PTR(-EINVAL);
727
728 if (!request_mem_region(res.start, resource_size(&res), name))
729 return IOMEM_ERR_PTR(-EBUSY);
730
731 mem = ioremap(res.start, resource_size(&res));
732 if (!mem) {
733 release_mem_region(res.start, resource_size(&res));
734 return IOMEM_ERR_PTR(-ENOMEM);
735 }
736
737 return mem;
738}
739EXPORT_SYMBOL(of_io_request_and_map);
740
705/** 741/**
706 * of_dma_get_range - Get DMA range info 742 * of_dma_get_range - Get DMA range info
707 * @np: device node to get DMA range info 743 * @np: device node to get DMA range info