summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRafael J. Wysocki <rafael.j.wysocki@intel.com>2018-04-03 17:17:00 -0400
committerRafael J. Wysocki <rafael.j.wysocki@intel.com>2018-04-07 12:49:54 -0400
commita59855cd8c613ba4bb95147f6176360d95f75e60 (patch)
treeb37aceab76a9bec83e04910ee33c3fbc3f6e88ac
parent23a8d888107ce4ce444eab2dcebf4cfb3578770b (diff)
time: hrtimer: Introduce hrtimer_next_event_without()
The next set of changes will need to compute the time to the next hrtimer event over all hrtimers except for the scheduler tick one. To that end introduce a new helper function, hrtimer_next_event_without(), for computing the time until the next hrtimer event over all timers except for one and modify the underlying code in __hrtimer_next_event_base() to prepare it for being called by that new function. No intentional changes in functionality. Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Frederic Weisbecker <frederic@kernel.org>
-rw-r--r--include/linux/hrtimer.h1
-rw-r--r--kernel/time/hrtimer.c55
2 files changed, 54 insertions, 2 deletions
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
index c7902ca7c9f4..3892e9c8b2de 100644
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
@@ -426,6 +426,7 @@ static inline ktime_t hrtimer_get_remaining(const struct hrtimer *timer)
426} 426}
427 427
428extern u64 hrtimer_get_next_event(void); 428extern u64 hrtimer_get_next_event(void);
429extern u64 hrtimer_next_event_without(const struct hrtimer *exclude);
429 430
430extern bool hrtimer_active(const struct hrtimer *timer); 431extern bool hrtimer_active(const struct hrtimer *timer);
431 432
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
index 23788100e214..6d387dbd7304 100644
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
@@ -490,6 +490,7 @@ __next_base(struct hrtimer_cpu_base *cpu_base, unsigned int *active)
490 while ((base = __next_base((cpu_base), &(active)))) 490 while ((base = __next_base((cpu_base), &(active))))
491 491
492static ktime_t __hrtimer_next_event_base(struct hrtimer_cpu_base *cpu_base, 492static ktime_t __hrtimer_next_event_base(struct hrtimer_cpu_base *cpu_base,
493 const struct hrtimer *exclude,
493 unsigned int active, 494 unsigned int active,
494 ktime_t expires_next) 495 ktime_t expires_next)
495{ 496{
@@ -502,9 +503,24 @@ static ktime_t __hrtimer_next_event_base(struct hrtimer_cpu_base *cpu_base,
502 503
503 next = timerqueue_getnext(&base->active); 504 next = timerqueue_getnext(&base->active);
504 timer = container_of(next, struct hrtimer, node); 505 timer = container_of(next, struct hrtimer, node);
506 if (timer == exclude) {
507 /* Get to the next timer in the queue. */
508 struct rb_node *rbn = rb_next(&next->node);
509
510 next = rb_entry_safe(rbn, struct timerqueue_node, node);
511 if (!next)
512 continue;
513
514 timer = container_of(next, struct hrtimer, node);
515 }
505 expires = ktime_sub(hrtimer_get_expires(timer), base->offset); 516 expires = ktime_sub(hrtimer_get_expires(timer), base->offset);
506 if (expires < expires_next) { 517 if (expires < expires_next) {
507 expires_next = expires; 518 expires_next = expires;
519
520 /* Skip cpu_base update if a timer is being excluded. */
521 if (exclude)
522 continue;
523
508 if (timer->is_soft) 524 if (timer->is_soft)
509 cpu_base->softirq_next_timer = timer; 525 cpu_base->softirq_next_timer = timer;
510 else 526 else
@@ -548,7 +564,8 @@ __hrtimer_get_next_event(struct hrtimer_cpu_base *cpu_base, unsigned int active_
548 if (!cpu_base->softirq_activated && (active_mask & HRTIMER_ACTIVE_SOFT)) { 564 if (!cpu_base->softirq_activated && (active_mask & HRTIMER_ACTIVE_SOFT)) {
549 active = cpu_base->active_bases & HRTIMER_ACTIVE_SOFT; 565 active = cpu_base->active_bases & HRTIMER_ACTIVE_SOFT;
550 cpu_base->softirq_next_timer = NULL; 566 cpu_base->softirq_next_timer = NULL;
551 expires_next = __hrtimer_next_event_base(cpu_base, active, KTIME_MAX); 567 expires_next = __hrtimer_next_event_base(cpu_base, NULL,
568 active, KTIME_MAX);
552 569
553 next_timer = cpu_base->softirq_next_timer; 570 next_timer = cpu_base->softirq_next_timer;
554 } 571 }
@@ -556,7 +573,8 @@ __hrtimer_get_next_event(struct hrtimer_cpu_base *cpu_base, unsigned int active_
556 if (active_mask & HRTIMER_ACTIVE_HARD) { 573 if (active_mask & HRTIMER_ACTIVE_HARD) {
557 active = cpu_base->active_bases & HRTIMER_ACTIVE_HARD; 574 active = cpu_base->active_bases & HRTIMER_ACTIVE_HARD;
558 cpu_base->next_timer = next_timer; 575 cpu_base->next_timer = next_timer;
559 expires_next = __hrtimer_next_event_base(cpu_base, active, expires_next); 576 expires_next = __hrtimer_next_event_base(cpu_base, NULL, active,
577 expires_next);
560 } 578 }
561 579
562 return expires_next; 580 return expires_next;
@@ -1202,6 +1220,39 @@ u64 hrtimer_get_next_event(void)
1202 1220
1203 return expires; 1221 return expires;
1204} 1222}
1223
1224/**
1225 * hrtimer_next_event_without - time until next expiry event w/o one timer
1226 * @exclude: timer to exclude
1227 *
1228 * Returns the next expiry time over all timers except for the @exclude one or
1229 * KTIME_MAX if none of them is pending.
1230 */
1231u64 hrtimer_next_event_without(const struct hrtimer *exclude)
1232{
1233 struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
1234 u64 expires = KTIME_MAX;
1235 unsigned long flags;
1236
1237 raw_spin_lock_irqsave(&cpu_base->lock, flags);
1238
1239 if (__hrtimer_hres_active(cpu_base)) {
1240 unsigned int active;
1241
1242 if (!cpu_base->softirq_activated) {
1243 active = cpu_base->active_bases & HRTIMER_ACTIVE_SOFT;
1244 expires = __hrtimer_next_event_base(cpu_base, exclude,
1245 active, KTIME_MAX);
1246 }
1247 active = cpu_base->active_bases & HRTIMER_ACTIVE_HARD;
1248 expires = __hrtimer_next_event_base(cpu_base, exclude, active,
1249 expires);
1250 }
1251
1252 raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
1253
1254 return expires;
1255}
1205#endif 1256#endif
1206 1257
1207static inline int hrtimer_clockid_to_base(clockid_t clock_id) 1258static inline int hrtimer_clockid_to_base(clockid_t clock_id)