diff options
-rw-r--r-- | kernel/time/sched_clock.c | 26 |
1 files changed, 11 insertions, 15 deletions
diff --git a/kernel/time/sched_clock.c b/kernel/time/sched_clock.c index ca3bc5c7027c..1751e956add9 100644 --- a/kernel/time/sched_clock.c +++ b/kernel/time/sched_clock.c | |||
@@ -58,23 +58,21 @@ static inline u64 notrace cyc_to_ns(u64 cyc, u32 mult, u32 shift) | |||
58 | 58 | ||
59 | unsigned long long notrace sched_clock(void) | 59 | unsigned long long notrace sched_clock(void) |
60 | { | 60 | { |
61 | u64 epoch_ns; | 61 | u64 cyc, res; |
62 | u64 epoch_cyc; | ||
63 | u64 cyc; | ||
64 | unsigned long seq; | 62 | unsigned long seq; |
65 | 63 | ||
66 | if (cd.suspended) | ||
67 | return cd.epoch_ns; | ||
68 | |||
69 | do { | 64 | do { |
70 | seq = raw_read_seqcount_begin(&cd.seq); | 65 | seq = raw_read_seqcount_begin(&cd.seq); |
71 | epoch_cyc = cd.epoch_cyc; | 66 | |
72 | epoch_ns = cd.epoch_ns; | 67 | res = cd.epoch_ns; |
68 | if (!cd.suspended) { | ||
69 | cyc = read_sched_clock(); | ||
70 | cyc = (cyc - cd.epoch_cyc) & sched_clock_mask; | ||
71 | res += cyc_to_ns(cyc, cd.mult, cd.shift); | ||
72 | } | ||
73 | } while (read_seqcount_retry(&cd.seq, seq)); | 73 | } while (read_seqcount_retry(&cd.seq, seq)); |
74 | 74 | ||
75 | cyc = read_sched_clock(); | 75 | return res; |
76 | cyc = (cyc - epoch_cyc) & sched_clock_mask; | ||
77 | return epoch_ns + cyc_to_ns(cyc, cd.mult, cd.shift); | ||
78 | } | 76 | } |
79 | 77 | ||
80 | /* | 78 | /* |
@@ -111,7 +109,6 @@ void __init sched_clock_register(u64 (*read)(void), int bits, | |||
111 | { | 109 | { |
112 | u64 res, wrap, new_mask, new_epoch, cyc, ns; | 110 | u64 res, wrap, new_mask, new_epoch, cyc, ns; |
113 | u32 new_mult, new_shift; | 111 | u32 new_mult, new_shift; |
114 | ktime_t new_wrap_kt; | ||
115 | unsigned long r; | 112 | unsigned long r; |
116 | char r_unit; | 113 | char r_unit; |
117 | 114 | ||
@@ -124,10 +121,11 @@ void __init sched_clock_register(u64 (*read)(void), int bits, | |||
124 | clocks_calc_mult_shift(&new_mult, &new_shift, rate, NSEC_PER_SEC, 3600); | 121 | clocks_calc_mult_shift(&new_mult, &new_shift, rate, NSEC_PER_SEC, 3600); |
125 | 122 | ||
126 | new_mask = CLOCKSOURCE_MASK(bits); | 123 | new_mask = CLOCKSOURCE_MASK(bits); |
124 | cd.rate = rate; | ||
127 | 125 | ||
128 | /* calculate how many nanosecs until we risk wrapping */ | 126 | /* calculate how many nanosecs until we risk wrapping */ |
129 | wrap = clocks_calc_max_nsecs(new_mult, new_shift, 0, new_mask, NULL); | 127 | wrap = clocks_calc_max_nsecs(new_mult, new_shift, 0, new_mask, NULL); |
130 | new_wrap_kt = ns_to_ktime(wrap); | 128 | cd.wrap_kt = ns_to_ktime(wrap); |
131 | 129 | ||
132 | /* update epoch for new counter and update epoch_ns from old counter*/ | 130 | /* update epoch for new counter and update epoch_ns from old counter*/ |
133 | new_epoch = read(); | 131 | new_epoch = read(); |
@@ -138,8 +136,6 @@ void __init sched_clock_register(u64 (*read)(void), int bits, | |||
138 | raw_write_seqcount_begin(&cd.seq); | 136 | raw_write_seqcount_begin(&cd.seq); |
139 | read_sched_clock = read; | 137 | read_sched_clock = read; |
140 | sched_clock_mask = new_mask; | 138 | sched_clock_mask = new_mask; |
141 | cd.rate = rate; | ||
142 | cd.wrap_kt = new_wrap_kt; | ||
143 | cd.mult = new_mult; | 139 | cd.mult = new_mult; |
144 | cd.shift = new_shift; | 140 | cd.shift = new_shift; |
145 | cd.epoch_cyc = new_epoch; | 141 | cd.epoch_cyc = new_epoch; |