diff options
author | Daniel Thompson <daniel.thompson@linaro.org> | 2015-03-26 15:23:24 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2015-03-27 03:33:58 -0400 |
commit | 13dbeb384d2d3aa555ea48d511e8cb110bd172e0 (patch) | |
tree | a368f8a4eaeec87369c337b0d677f8d18a065dd6 | |
parent | cf7c9c170787d6870af54684822f58acc00a966c (diff) |
timers, sched/clock: Remove suspend from clock_read_data()
Currently cd.read_data.suspended is read by the hotpath function
sched_clock(). This variable need not be accessed on the
hotpath. In fact, once it is removed, we can remove the
conditional branches from sched_clock() and install a dummy
read_sched_clock function to suspend the clock.
The new master copy of the function pointer
(actual_read_sched_clock) is introduced and is used for all
reads of the clock hardware except those within sched_clock
itself.
Suggested-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Daniel Thompson <daniel.thompson@linaro.org>
Signed-off-by: John Stultz <john.stultz@linaro.org>
Reviewed-by: Stephen Boyd <sboyd@codeaurora.org>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Russell King <linux@arm.linux.org.uk>
Cc: Will Deacon <will.deacon@arm.com>
Link: http://lkml.kernel.org/r/1427397806-20889-4-git-send-email-john.stultz@linaro.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r-- | kernel/time/sched_clock.c | 40 |
1 files changed, 25 insertions, 15 deletions
diff --git a/kernel/time/sched_clock.c b/kernel/time/sched_clock.c index 872e0685d1fb..52ea5d976393 100644 --- a/kernel/time/sched_clock.c +++ b/kernel/time/sched_clock.c | |||
@@ -28,10 +28,9 @@ | |||
28 | * @read_sched_clock: Current clock source (or dummy source when suspended) | 28 | * @read_sched_clock: Current clock source (or dummy source when suspended) |
29 | * @mult: Multipler for scaled math conversion | 29 | * @mult: Multipler for scaled math conversion |
30 | * @shift: Shift value for scaled math conversion | 30 | * @shift: Shift value for scaled math conversion |
31 | * @suspended: Flag to indicate if the clock is suspended (stopped) | ||
32 | * | 31 | * |
33 | * Care must be taken when updating this structure; it is read by | 32 | * Care must be taken when updating this structure; it is read by |
34 | * some very hot code paths. It occupies <=48 bytes and, when combined | 33 | * some very hot code paths. It occupies <=40 bytes and, when combined |
35 | * with the seqcount used to synchronize access, comfortably fits into | 34 | * with the seqcount used to synchronize access, comfortably fits into |
36 | * a 64 byte cache line. | 35 | * a 64 byte cache line. |
37 | */ | 36 | */ |
@@ -42,7 +41,6 @@ struct clock_read_data { | |||
42 | u64 (*read_sched_clock)(void); | 41 | u64 (*read_sched_clock)(void); |
43 | u32 mult; | 42 | u32 mult; |
44 | u32 shift; | 43 | u32 shift; |
45 | bool suspended; | ||
46 | }; | 44 | }; |
47 | 45 | ||
48 | /** | 46 | /** |
@@ -64,6 +62,7 @@ struct clock_data { | |||
64 | struct clock_read_data read_data; | 62 | struct clock_read_data read_data; |
65 | ktime_t wrap_kt; | 63 | ktime_t wrap_kt; |
66 | unsigned long rate; | 64 | unsigned long rate; |
65 | u64 (*actual_read_sched_clock)(void); | ||
67 | }; | 66 | }; |
68 | 67 | ||
69 | static struct hrtimer sched_clock_timer; | 68 | static struct hrtimer sched_clock_timer; |
@@ -83,6 +82,8 @@ static u64 notrace jiffy_sched_clock_read(void) | |||
83 | static struct clock_data cd ____cacheline_aligned = { | 82 | static struct clock_data cd ____cacheline_aligned = { |
84 | .read_data = { .mult = NSEC_PER_SEC / HZ, | 83 | .read_data = { .mult = NSEC_PER_SEC / HZ, |
85 | .read_sched_clock = jiffy_sched_clock_read, }, | 84 | .read_sched_clock = jiffy_sched_clock_read, }, |
85 | .actual_read_sched_clock = jiffy_sched_clock_read, | ||
86 | |||
86 | }; | 87 | }; |
87 | 88 | ||
88 | static inline u64 notrace cyc_to_ns(u64 cyc, u32 mult, u32 shift) | 89 | static inline u64 notrace cyc_to_ns(u64 cyc, u32 mult, u32 shift) |
@@ -99,12 +100,9 @@ unsigned long long notrace sched_clock(void) | |||
99 | do { | 100 | do { |
100 | seq = raw_read_seqcount_begin(&cd.seq); | 101 | seq = raw_read_seqcount_begin(&cd.seq); |
101 | 102 | ||
102 | res = rd->epoch_ns; | 103 | cyc = (rd->read_sched_clock() - rd->epoch_cyc) & |
103 | if (!rd->suspended) { | 104 | rd->sched_clock_mask; |
104 | cyc = rd->read_sched_clock(); | 105 | res = rd->epoch_ns + cyc_to_ns(cyc, rd->mult, rd->shift); |
105 | cyc = (cyc - rd->epoch_cyc) & rd->sched_clock_mask; | ||
106 | res += cyc_to_ns(cyc, rd->mult, rd->shift); | ||
107 | } | ||
108 | } while (read_seqcount_retry(&cd.seq, seq)); | 106 | } while (read_seqcount_retry(&cd.seq, seq)); |
109 | 107 | ||
110 | return res; | 108 | return res; |
@@ -120,7 +118,7 @@ static void notrace update_sched_clock(void) | |||
120 | u64 ns; | 118 | u64 ns; |
121 | struct clock_read_data *rd = &cd.read_data; | 119 | struct clock_read_data *rd = &cd.read_data; |
122 | 120 | ||
123 | cyc = rd->read_sched_clock(); | 121 | cyc = cd.actual_read_sched_clock(); |
124 | ns = rd->epoch_ns + | 122 | ns = rd->epoch_ns + |
125 | cyc_to_ns((cyc - rd->epoch_cyc) & rd->sched_clock_mask, | 123 | cyc_to_ns((cyc - rd->epoch_cyc) & rd->sched_clock_mask, |
126 | rd->mult, rd->shift); | 124 | rd->mult, rd->shift); |
@@ -166,10 +164,11 @@ void __init sched_clock_register(u64 (*read)(void), int bits, | |||
166 | 164 | ||
167 | /* update epoch for new counter and update epoch_ns from old counter*/ | 165 | /* update epoch for new counter and update epoch_ns from old counter*/ |
168 | new_epoch = read(); | 166 | new_epoch = read(); |
169 | cyc = rd->read_sched_clock(); | 167 | cyc = cd.actual_read_sched_clock(); |
170 | ns = rd->epoch_ns + | 168 | ns = rd->epoch_ns + |
171 | cyc_to_ns((cyc - rd->epoch_cyc) & rd->sched_clock_mask, | 169 | cyc_to_ns((cyc - rd->epoch_cyc) & rd->sched_clock_mask, |
172 | rd->mult, rd->shift); | 170 | rd->mult, rd->shift); |
171 | cd.actual_read_sched_clock = read; | ||
173 | 172 | ||
174 | raw_write_seqcount_begin(&cd.seq); | 173 | raw_write_seqcount_begin(&cd.seq); |
175 | rd->read_sched_clock = read; | 174 | rd->read_sched_clock = read; |
@@ -209,7 +208,7 @@ void __init sched_clock_postinit(void) | |||
209 | * If no sched_clock function has been provided at that point, | 208 | * If no sched_clock function has been provided at that point, |
210 | * make it the final one one. | 209 | * make it the final one one. |
211 | */ | 210 | */ |
212 | if (cd.read_data.read_sched_clock == jiffy_sched_clock_read) | 211 | if (cd.actual_read_sched_clock == jiffy_sched_clock_read) |
213 | sched_clock_register(jiffy_sched_clock_read, BITS_PER_LONG, HZ); | 212 | sched_clock_register(jiffy_sched_clock_read, BITS_PER_LONG, HZ); |
214 | 213 | ||
215 | update_sched_clock(); | 214 | update_sched_clock(); |
@@ -223,13 +222,24 @@ void __init sched_clock_postinit(void) | |||
223 | hrtimer_start(&sched_clock_timer, cd.wrap_kt, HRTIMER_MODE_REL); | 222 | hrtimer_start(&sched_clock_timer, cd.wrap_kt, HRTIMER_MODE_REL); |
224 | } | 223 | } |
225 | 224 | ||
225 | /* | ||
226 | * Clock read function for use when the clock is suspended. | ||
227 | * | ||
228 | * This function makes it appear to sched_clock() as if the clock | ||
229 | * stopped counting at its last update. | ||
230 | */ | ||
231 | static u64 notrace suspended_sched_clock_read(void) | ||
232 | { | ||
233 | return cd.read_data.epoch_cyc; | ||
234 | } | ||
235 | |||
226 | static int sched_clock_suspend(void) | 236 | static int sched_clock_suspend(void) |
227 | { | 237 | { |
228 | struct clock_read_data *rd = &cd.read_data; | 238 | struct clock_read_data *rd = &cd.read_data; |
229 | 239 | ||
230 | update_sched_clock(); | 240 | update_sched_clock(); |
231 | hrtimer_cancel(&sched_clock_timer); | 241 | hrtimer_cancel(&sched_clock_timer); |
232 | rd->suspended = true; | 242 | rd->read_sched_clock = suspended_sched_clock_read; |
233 | return 0; | 243 | return 0; |
234 | } | 244 | } |
235 | 245 | ||
@@ -237,9 +247,9 @@ static void sched_clock_resume(void) | |||
237 | { | 247 | { |
238 | struct clock_read_data *rd = &cd.read_data; | 248 | struct clock_read_data *rd = &cd.read_data; |
239 | 249 | ||
240 | rd->epoch_cyc = rd->read_sched_clock(); | 250 | rd->epoch_cyc = cd.actual_read_sched_clock(); |
241 | hrtimer_start(&sched_clock_timer, cd.wrap_kt, HRTIMER_MODE_REL); | 251 | hrtimer_start(&sched_clock_timer, cd.wrap_kt, HRTIMER_MODE_REL); |
242 | rd->suspended = false; | 252 | rd->read_sched_clock = cd.actual_read_sched_clock; |
243 | } | 253 | } |
244 | 254 | ||
245 | static struct syscore_ops sched_clock_ops = { | 255 | static struct syscore_ops sched_clock_ops = { |