diff options
Diffstat (limited to 'kernel/hrtimer.c')
-rw-r--r-- | kernel/hrtimer.c | 51 |
1 files changed, 49 insertions, 2 deletions
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index c71bcd549241..b675a67c9ac3 100644 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c | |||
@@ -43,6 +43,8 @@ | |||
43 | #include <linux/seq_file.h> | 43 | #include <linux/seq_file.h> |
44 | #include <linux/err.h> | 44 | #include <linux/err.h> |
45 | #include <linux/debugobjects.h> | 45 | #include <linux/debugobjects.h> |
46 | #include <linux/sched.h> | ||
47 | #include <linux/timer.h> | ||
46 | 48 | ||
47 | #include <asm/uaccess.h> | 49 | #include <asm/uaccess.h> |
48 | 50 | ||
@@ -198,8 +200,19 @@ switch_hrtimer_base(struct hrtimer *timer, struct hrtimer_clock_base *base, | |||
198 | { | 200 | { |
199 | struct hrtimer_clock_base *new_base; | 201 | struct hrtimer_clock_base *new_base; |
200 | struct hrtimer_cpu_base *new_cpu_base; | 202 | struct hrtimer_cpu_base *new_cpu_base; |
203 | int cpu, preferred_cpu = -1; | ||
204 | |||
205 | cpu = smp_processor_id(); | ||
206 | #if defined(CONFIG_NO_HZ) && defined(CONFIG_SMP) | ||
207 | if (!pinned && get_sysctl_timer_migration() && idle_cpu(cpu)) { | ||
208 | preferred_cpu = get_nohz_load_balancer(); | ||
209 | if (preferred_cpu >= 0) | ||
210 | cpu = preferred_cpu; | ||
211 | } | ||
212 | #endif | ||
201 | 213 | ||
202 | new_cpu_base = &__get_cpu_var(hrtimer_bases); | 214 | again: |
215 | new_cpu_base = &per_cpu(hrtimer_bases, cpu); | ||
203 | new_base = &new_cpu_base->clock_base[base->index]; | 216 | new_base = &new_cpu_base->clock_base[base->index]; |
204 | 217 | ||
205 | if (base != new_base) { | 218 | if (base != new_base) { |
@@ -219,6 +232,40 @@ switch_hrtimer_base(struct hrtimer *timer, struct hrtimer_clock_base *base, | |||
219 | timer->base = NULL; | 232 | timer->base = NULL; |
220 | spin_unlock(&base->cpu_base->lock); | 233 | spin_unlock(&base->cpu_base->lock); |
221 | spin_lock(&new_base->cpu_base->lock); | 234 | spin_lock(&new_base->cpu_base->lock); |
235 | |||
236 | /* Optimized away for NOHZ=n SMP=n */ | ||
237 | if (cpu == preferred_cpu) { | ||
238 | /* Calculate clock monotonic expiry time */ | ||
239 | #ifdef CONFIG_HIGH_RES_TIMERS | ||
240 | ktime_t expires = ktime_sub(hrtimer_get_expires(timer), | ||
241 | new_base->offset); | ||
242 | #else | ||
243 | ktime_t expires = hrtimer_get_expires(timer); | ||
244 | #endif | ||
245 | |||
246 | /* | ||
247 | * Get the next event on target cpu from the | ||
248 | * clock events layer. | ||
249 | * This covers the highres=off nohz=on case as well. | ||
250 | */ | ||
251 | ktime_t next = clockevents_get_next_event(cpu); | ||
252 | |||
253 | ktime_t delta = ktime_sub(expires, next); | ||
254 | |||
255 | /* | ||
256 | * We do not migrate the timer when it is expiring | ||
257 | * before the next event on the target cpu because | ||
258 | * we cannot reprogram the target cpu hardware and | ||
259 | * we would cause it to fire late. | ||
260 | */ | ||
261 | if (delta.tv64 < 0) { | ||
262 | cpu = smp_processor_id(); | ||
263 | spin_unlock(&new_base->cpu_base->lock); | ||
264 | spin_lock(&base->cpu_base->lock); | ||
265 | timer->base = base; | ||
266 | goto again; | ||
267 | } | ||
268 | } | ||
222 | timer->base = new_base; | 269 | timer->base = new_base; |
223 | } | 270 | } |
224 | return new_base; | 271 | return new_base; |
@@ -236,7 +283,7 @@ lock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags) | |||
236 | return base; | 283 | return base; |
237 | } | 284 | } |
238 | 285 | ||
239 | # define switch_hrtimer_base(t, b) (b) | 286 | # define switch_hrtimer_base(t, b, p) (b) |
240 | 287 | ||
241 | #endif /* !CONFIG_SMP */ | 288 | #endif /* !CONFIG_SMP */ |
242 | 289 | ||