diff options
| author | Daniel Thompson <daniel.thompson@linaro.org> | 2015-03-26 15:23:26 -0400 |
|---|---|---|
| committer | Ingo Molnar <mingo@kernel.org> | 2015-03-27 03:34:00 -0400 |
| commit | 1809bfa44e1019e397fabaa6f2349bb7237e57a4 (patch) | |
| tree | 3d2eba58ee4389f00e87b752599c9703f34b7150 /kernel/time | |
| parent | 9fee69a8c8070b38b558161a3f18bd5e2b664682 (diff) | |
timers, sched/clock: Avoid deadlock during read from NMI
Currently it is possible for an NMI (or FIQ on ARM) to come in
and read sched_clock() whilst update_sched_clock() has locked
the seqcount for writing. This results in the NMI handler
locking up when it calls raw_read_seqcount_begin().
This patch fixes the NMI safety issues by providing banked clock
data. This is a similar approach to the one used in Thomas
Gleixner's 4396e058c52e("timekeeping: Provide fast and NMI safe
access to CLOCK_MONOTONIC").
Suggested-by: Stephen Boyd <sboyd@codeaurora.org>
Signed-off-by: Daniel Thompson <daniel.thompson@linaro.org>
Signed-off-by: John Stultz <john.stultz@linaro.org>
Reviewed-by: Stephen Boyd <sboyd@codeaurora.org>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Russell King <linux@arm.linux.org.uk>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Will Deacon <will.deacon@arm.com>
Link: http://lkml.kernel.org/r/1427397806-20889-6-git-send-email-john.stultz@linaro.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/time')
| -rw-r--r-- | kernel/time/sched_clock.c | 103 |
1 files changed, 68 insertions, 35 deletions
diff --git a/kernel/time/sched_clock.c b/kernel/time/sched_clock.c index 8adb9d0c969a..eeea1e950b72 100644 --- a/kernel/time/sched_clock.c +++ b/kernel/time/sched_clock.c | |||
| @@ -47,19 +47,20 @@ struct clock_read_data { | |||
| 47 | * struct clock_data - all data needed for sched_clock (including | 47 | * struct clock_data - all data needed for sched_clock (including |
| 48 | * registration of a new clock source) | 48 | * registration of a new clock source) |
| 49 | * | 49 | * |
| 50 | * @seq: Sequence counter for protecting updates. | 50 | * @seq: Sequence counter for protecting updates. The lowest |
| 51 | * bit is the index for @read_data. | ||
| 51 | * @read_data: Data required to read from sched_clock. | 52 | * @read_data: Data required to read from sched_clock. |
| 52 | * @wrap_kt: Duration for which clock can run before wrapping | 53 | * @wrap_kt: Duration for which clock can run before wrapping |
| 53 | * @rate: Tick rate of the registered clock | 54 | * @rate: Tick rate of the registered clock |
| 54 | * @actual_read_sched_clock: Registered clock read function | 55 | * @actual_read_sched_clock: Registered clock read function |
| 55 | * | 56 | * |
| 56 | * The ordering of this structure has been chosen to optimize cache | 57 | * The ordering of this structure has been chosen to optimize cache |
| 57 | * performance. In particular seq and read_data (combined) should fit | 58 | * performance. In particular seq and read_data[0] (combined) should fit |
| 58 | * into a single 64 byte cache line. | 59 | * into a single 64 byte cache line. |
| 59 | */ | 60 | */ |
| 60 | struct clock_data { | 61 | struct clock_data { |
| 61 | seqcount_t seq; | 62 | seqcount_t seq; |
| 62 | struct clock_read_data read_data; | 63 | struct clock_read_data read_data[2]; |
| 63 | ktime_t wrap_kt; | 64 | ktime_t wrap_kt; |
| 64 | unsigned long rate; | 65 | unsigned long rate; |
| 65 | u64 (*actual_read_sched_clock)(void); | 66 | u64 (*actual_read_sched_clock)(void); |
| @@ -80,10 +81,9 @@ static u64 notrace jiffy_sched_clock_read(void) | |||
| 80 | } | 81 | } |
| 81 | 82 | ||
| 82 | static struct clock_data cd ____cacheline_aligned = { | 83 | static struct clock_data cd ____cacheline_aligned = { |
| 83 | .read_data = { .mult = NSEC_PER_SEC / HZ, | 84 | .read_data[0] = { .mult = NSEC_PER_SEC / HZ, |
| 84 | .read_sched_clock = jiffy_sched_clock_read, }, | 85 | .read_sched_clock = jiffy_sched_clock_read, }, |
| 85 | .actual_read_sched_clock = jiffy_sched_clock_read, | 86 | .actual_read_sched_clock = jiffy_sched_clock_read, |
| 86 | |||
| 87 | }; | 87 | }; |
| 88 | 88 | ||
| 89 | static inline u64 notrace cyc_to_ns(u64 cyc, u32 mult, u32 shift) | 89 | static inline u64 notrace cyc_to_ns(u64 cyc, u32 mult, u32 shift) |
| @@ -95,10 +95,11 @@ unsigned long long notrace sched_clock(void) | |||
| 95 | { | 95 | { |
| 96 | u64 cyc, res; | 96 | u64 cyc, res; |
| 97 | unsigned long seq; | 97 | unsigned long seq; |
| 98 | struct clock_read_data *rd = &cd.read_data; | 98 | struct clock_read_data *rd; |
| 99 | 99 | ||
| 100 | do { | 100 | do { |
| 101 | seq = raw_read_seqcount_begin(&cd.seq); | 101 | seq = raw_read_seqcount(&cd.seq); |
| 102 | rd = cd.read_data + (seq & 1); | ||
| 102 | 103 | ||
| 103 | cyc = (rd->read_sched_clock() - rd->epoch_cyc) & | 104 | cyc = (rd->read_sched_clock() - rd->epoch_cyc) & |
| 104 | rd->sched_clock_mask; | 105 | rd->sched_clock_mask; |
| @@ -109,26 +110,50 @@ unsigned long long notrace sched_clock(void) | |||
| 109 | } | 110 | } |
| 110 | 111 | ||
| 111 | /* | 112 | /* |
| 113 | * Updating the data required to read the clock. | ||
| 114 | * | ||
| 115 | * sched_clock will never observe mis-matched data even if called from | ||
| 116 | * an NMI. We do this by maintaining an odd/even copy of the data and | ||
| 117 | * steering sched_clock to one or the other using a sequence counter. | ||
| 118 | * In order to preserve the data cache profile of sched_clock as much | ||
| 119 | * as possible the system reverts back to the even copy when the update | ||
| 120 | * completes; the odd copy is used *only* during an update. | ||
| 121 | */ | ||
| 122 | static void update_clock_read_data(struct clock_read_data *rd) | ||
| 123 | { | ||
| 124 | /* update the backup (odd) copy with the new data */ | ||
| 125 | cd.read_data[1] = *rd; | ||
| 126 | |||
| 127 | /* steer readers towards the odd copy */ | ||
| 128 | raw_write_seqcount_latch(&cd.seq); | ||
| 129 | |||
| 130 | /* now its safe for us to update the normal (even) copy */ | ||
| 131 | cd.read_data[0] = *rd; | ||
| 132 | |||
| 133 | /* switch readers back to the even copy */ | ||
| 134 | raw_write_seqcount_latch(&cd.seq); | ||
| 135 | } | ||
| 136 | |||
| 137 | /* | ||
| 112 | * Atomically update the sched_clock epoch. | 138 | * Atomically update the sched_clock epoch. |
| 113 | */ | 139 | */ |
| 114 | static void update_sched_clock(void) | 140 | static void update_sched_clock(void) |
| 115 | { | 141 | { |
| 116 | unsigned long flags; | ||
| 117 | u64 cyc; | 142 | u64 cyc; |
| 118 | u64 ns; | 143 | u64 ns; |
| 119 | struct clock_read_data *rd = &cd.read_data; | 144 | struct clock_read_data rd; |
| 145 | |||
| 146 | rd = cd.read_data[0]; | ||
| 120 | 147 | ||
| 121 | cyc = cd.actual_read_sched_clock(); | 148 | cyc = cd.actual_read_sched_clock(); |
| 122 | ns = rd->epoch_ns + | 149 | ns = rd.epoch_ns + |
| 123 | cyc_to_ns((cyc - rd->epoch_cyc) & rd->sched_clock_mask, | 150 | cyc_to_ns((cyc - rd.epoch_cyc) & rd.sched_clock_mask, |
| 124 | rd->mult, rd->shift); | 151 | rd.mult, rd.shift); |
| 125 | 152 | ||
| 126 | raw_local_irq_save(flags); | 153 | rd.epoch_ns = ns; |
| 127 | raw_write_seqcount_begin(&cd.seq); | 154 | rd.epoch_cyc = cyc; |
| 128 | rd->epoch_ns = ns; | 155 | |
| 129 | rd->epoch_cyc = cyc; | 156 | update_clock_read_data(&rd); |
| 130 | raw_write_seqcount_end(&cd.seq); | ||
| 131 | raw_local_irq_restore(flags); | ||
| 132 | } | 157 | } |
| 133 | 158 | ||
| 134 | static enum hrtimer_restart sched_clock_poll(struct hrtimer *hrt) | 159 | static enum hrtimer_restart sched_clock_poll(struct hrtimer *hrt) |
| @@ -145,7 +170,7 @@ void __init sched_clock_register(u64 (*read)(void), int bits, | |||
| 145 | u32 new_mult, new_shift; | 170 | u32 new_mult, new_shift; |
| 146 | unsigned long r; | 171 | unsigned long r; |
| 147 | char r_unit; | 172 | char r_unit; |
| 148 | struct clock_read_data *rd = &cd.read_data; | 173 | struct clock_read_data rd; |
| 149 | 174 | ||
| 150 | if (cd.rate > rate) | 175 | if (cd.rate > rate) |
| 151 | return; | 176 | return; |
| @@ -162,22 +187,23 @@ void __init sched_clock_register(u64 (*read)(void), int bits, | |||
| 162 | wrap = clocks_calc_max_nsecs(new_mult, new_shift, 0, new_mask, NULL); | 187 | wrap = clocks_calc_max_nsecs(new_mult, new_shift, 0, new_mask, NULL); |
| 163 | cd.wrap_kt = ns_to_ktime(wrap); | 188 | cd.wrap_kt = ns_to_ktime(wrap); |
| 164 | 189 | ||
| 190 | rd = cd.read_data[0]; | ||
| 191 | |||
| 165 | /* update epoch for new counter and update epoch_ns from old counter*/ | 192 | /* update epoch for new counter and update epoch_ns from old counter*/ |
| 166 | new_epoch = read(); | 193 | new_epoch = read(); |
| 167 | cyc = cd.actual_read_sched_clock(); | 194 | cyc = cd.actual_read_sched_clock(); |
| 168 | ns = rd->epoch_ns + | 195 | ns = rd.epoch_ns + |
| 169 | cyc_to_ns((cyc - rd->epoch_cyc) & rd->sched_clock_mask, | 196 | cyc_to_ns((cyc - rd.epoch_cyc) & rd.sched_clock_mask, |
| 170 | rd->mult, rd->shift); | 197 | rd.mult, rd.shift); |
| 171 | cd.actual_read_sched_clock = read; | 198 | cd.actual_read_sched_clock = read; |
| 172 | 199 | ||
| 173 | raw_write_seqcount_begin(&cd.seq); | 200 | rd.read_sched_clock = read; |
| 174 | rd->read_sched_clock = read; | 201 | rd.sched_clock_mask = new_mask; |
| 175 | rd->sched_clock_mask = new_mask; | 202 | rd.mult = new_mult; |
| 176 | rd->mult = new_mult; | 203 | rd.shift = new_shift; |
| 177 | rd->shift = new_shift; | 204 | rd.epoch_cyc = new_epoch; |
| 178 | rd->epoch_cyc = new_epoch; | 205 | rd.epoch_ns = ns; |
| 179 | rd->epoch_ns = ns; | 206 | update_clock_read_data(&rd); |
| 180 | raw_write_seqcount_end(&cd.seq); | ||
| 181 | 207 | ||
| 182 | r = rate; | 208 | r = rate; |
| 183 | if (r >= 4000000) { | 209 | if (r >= 4000000) { |
| @@ -227,15 +253,22 @@ void __init sched_clock_postinit(void) | |||
| 227 | * | 253 | * |
| 228 | * This function makes it appear to sched_clock() as if the clock | 254 | * This function makes it appear to sched_clock() as if the clock |
| 229 | * stopped counting at its last update. | 255 | * stopped counting at its last update. |
| 256 | * | ||
| 257 | * This function must only be called from the critical | ||
| 258 | * section in sched_clock(). It relies on the read_seqcount_retry() | ||
| 259 | * at the end of the critical section to be sure we observe the | ||
| 260 | * correct copy of epoch_cyc. | ||
| 230 | */ | 261 | */ |
| 231 | static u64 notrace suspended_sched_clock_read(void) | 262 | static u64 notrace suspended_sched_clock_read(void) |
| 232 | { | 263 | { |
| 233 | return cd.read_data.epoch_cyc; | 264 | unsigned long seq = raw_read_seqcount(&cd.seq); |
| 265 | |||
| 266 | return cd.read_data[seq & 1].epoch_cyc; | ||
| 234 | } | 267 | } |
| 235 | 268 | ||
| 236 | static int sched_clock_suspend(void) | 269 | static int sched_clock_suspend(void) |
| 237 | { | 270 | { |
| 238 | struct clock_read_data *rd = &cd.read_data; | 271 | struct clock_read_data *rd = &cd.read_data[0]; |
| 239 | 272 | ||
| 240 | update_sched_clock(); | 273 | update_sched_clock(); |
| 241 | hrtimer_cancel(&sched_clock_timer); | 274 | hrtimer_cancel(&sched_clock_timer); |
| @@ -245,7 +278,7 @@ static int sched_clock_suspend(void) | |||
| 245 | 278 | ||
| 246 | static void sched_clock_resume(void) | 279 | static void sched_clock_resume(void) |
| 247 | { | 280 | { |
| 248 | struct clock_read_data *rd = &cd.read_data; | 281 | struct clock_read_data *rd = &cd.read_data[0]; |
| 249 | 282 | ||
| 250 | rd->epoch_cyc = cd.actual_read_sched_clock(); | 283 | rd->epoch_cyc = cd.actual_read_sched_clock(); |
| 251 | hrtimer_start(&sched_clock_timer, cd.wrap_kt, HRTIMER_MODE_REL); | 284 | hrtimer_start(&sched_clock_timer, cd.wrap_kt, HRTIMER_MODE_REL); |
