aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched_clock.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched_clock.c')
-rw-r--r--kernel/sched_clock.c33
1 files changed, 21 insertions, 12 deletions
diff --git a/kernel/sched_clock.c b/kernel/sched_clock.c
index 857a1291fd2..074edc98937 100644
--- a/kernel/sched_clock.c
+++ b/kernel/sched_clock.c
@@ -149,7 +149,7 @@ static void lock_double_clock(struct sched_clock_data *data1,
149u64 sched_clock_cpu(int cpu) 149u64 sched_clock_cpu(int cpu)
150{ 150{
151 struct sched_clock_data *scd = cpu_sdc(cpu); 151 struct sched_clock_data *scd = cpu_sdc(cpu);
152 u64 now, clock; 152 u64 now, clock, this_clock, remote_clock;
153 153
154 if (unlikely(!sched_clock_running)) 154 if (unlikely(!sched_clock_running))
155 return 0ull; 155 return 0ull;
@@ -158,26 +158,36 @@ u64 sched_clock_cpu(int cpu)
158 now = sched_clock(); 158 now = sched_clock();
159 159
160 if (cpu != raw_smp_processor_id()) { 160 if (cpu != raw_smp_processor_id()) {
161 /*
162 * in order to update a remote cpu's clock based on our
163 * unstable raw time rebase it against:
164 * tick_raw (offset between raw counters)
165 * tick_gotd (tick offset between cpus)
166 */
167 struct sched_clock_data *my_scd = this_scd(); 161 struct sched_clock_data *my_scd = this_scd();
168 162
169 lock_double_clock(scd, my_scd); 163 lock_double_clock(scd, my_scd);
170 164
171 now += scd->tick_raw - my_scd->tick_raw; 165 this_clock = __update_sched_clock(my_scd, now);
172 now += my_scd->tick_gtod - scd->tick_gtod; 166 remote_clock = scd->clock;
167
168 /*
169 * Use the opportunity that we have both locks
170 * taken to couple the two clocks: we take the
171 * larger time as the latest time for both
172 * runqueues. (this creates monotonic movement)
173 */
174 if (likely(remote_clock < this_clock)) {
175 clock = this_clock;
176 scd->clock = clock;
177 } else {
178 /*
179 * Should be rare, but possible:
180 */
181 clock = remote_clock;
182 my_scd->clock = remote_clock;
183 }
173 184
174 __raw_spin_unlock(&my_scd->lock); 185 __raw_spin_unlock(&my_scd->lock);
175 } else { 186 } else {
176 __raw_spin_lock(&scd->lock); 187 __raw_spin_lock(&scd->lock);
188 clock = __update_sched_clock(scd, now);
177 } 189 }
178 190
179 clock = __update_sched_clock(scd, now);
180
181 __raw_spin_unlock(&scd->lock); 191 __raw_spin_unlock(&scd->lock);
182 192
183 return clock; 193 return clock;
@@ -223,7 +233,6 @@ EXPORT_SYMBOL_GPL(sched_clock_idle_sleep_event);
223void sched_clock_idle_wakeup_event(u64 delta_ns) 233void sched_clock_idle_wakeup_event(u64 delta_ns)
224{ 234{
225 struct sched_clock_data *scd = this_scd(); 235 struct sched_clock_data *scd = this_scd();
226 u64 now = sched_clock();
227 236
228 /* 237 /*
229 * Override the previous timestamp and ignore all 238 * Override the previous timestamp and ignore all