aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAnna-Maria Gleixner <anna-maria@linutronix.de>2017-12-21 05:41:56 -0500
committerIngo Molnar <mingo@kernel.org>2018-01-15 21:01:20 -0500
commitc458b1d102036eaa2c70e03000c959bd491c2037 (patch)
tree60311825bdc27386a846492cf9ef198a5a29a5af
parent98ecadd4305d8677ba77162152485798d47dcc85 (diff)
hrtimer: Prepare handling of hard and softirq based hrtimers
The softirq based hrtimer can utilize most of the existing hrtimers functions, but need to operate on a different data set. Add an 'active_mask' parameter to various functions so the hard and soft bases can be selected. Fixup the existing callers and hand in the ACTIVE_HARD mask. Signed-off-by: Anna-Maria Gleixner <anna-maria@linutronix.de> Cc: Christoph Hellwig <hch@lst.de> Cc: John Stultz <john.stultz@linaro.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: keescook@chromium.org Link: http://lkml.kernel.org/r/20171221104205.7269-28-anna-maria@linutronix.de Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--kernel/time/hrtimer.c38
1 files changed, 29 insertions, 9 deletions
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
index e2353f5cdf51..ba4674e9adc2 100644
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
@@ -60,6 +60,15 @@
60#include "tick-internal.h" 60#include "tick-internal.h"
61 61
62/* 62/*
63 * Masks for selecting the soft and hard context timers from
64 * cpu_base->active
65 */
66#define MASK_SHIFT (HRTIMER_BASE_MONOTONIC_SOFT)
67#define HRTIMER_ACTIVE_HARD ((1U << MASK_SHIFT) - 1)
68#define HRTIMER_ACTIVE_SOFT (HRTIMER_ACTIVE_HARD << MASK_SHIFT)
69#define HRTIMER_ACTIVE_ALL (HRTIMER_ACTIVE_SOFT | HRTIMER_ACTIVE_HARD)
70
71/*
63 * The timer bases: 72 * The timer bases:
64 * 73 *
65 * There are more clockids than hrtimer bases. Thus, we index 74 * There are more clockids than hrtimer bases. Thus, we index
@@ -507,13 +516,24 @@ static ktime_t __hrtimer_next_event_base(struct hrtimer_cpu_base *cpu_base,
507 return expires_next; 516 return expires_next;
508} 517}
509 518
510static ktime_t __hrtimer_get_next_event(struct hrtimer_cpu_base *cpu_base) 519/*
520 * Recomputes cpu_base::*next_timer and returns the earliest expires_next but
521 * does not set cpu_base::*expires_next, that is done by hrtimer_reprogram.
522 *
523 * @active_mask must be one of:
524 * - HRTIMER_ACTIVE,
525 * - HRTIMER_ACTIVE_SOFT, or
526 * - HRTIMER_ACTIVE_HARD.
527 */
528static ktime_t __hrtimer_get_next_event(struct hrtimer_cpu_base *cpu_base,
529 unsigned int active_mask)
511{ 530{
512 unsigned int active = cpu_base->active_bases; 531 unsigned int active;
513 ktime_t expires_next = KTIME_MAX; 532 ktime_t expires_next = KTIME_MAX;
514 533
515 cpu_base->next_timer = NULL; 534 cpu_base->next_timer = NULL;
516 535
536 active = cpu_base->active_bases & active_mask;
517 expires_next = __hrtimer_next_event_base(cpu_base, active, expires_next); 537 expires_next = __hrtimer_next_event_base(cpu_base, active, expires_next);
518 538
519 return expires_next; 539 return expires_next;
@@ -553,7 +573,7 @@ hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base, int skip_equal)
553{ 573{
554 ktime_t expires_next; 574 ktime_t expires_next;
555 575
556 expires_next = __hrtimer_get_next_event(cpu_base); 576 expires_next = __hrtimer_get_next_event(cpu_base, HRTIMER_ACTIVE_HARD);
557 577
558 if (skip_equal && expires_next == cpu_base->expires_next) 578 if (skip_equal && expires_next == cpu_base->expires_next)
559 return; 579 return;
@@ -1074,7 +1094,7 @@ u64 hrtimer_get_next_event(void)
1074 raw_spin_lock_irqsave(&cpu_base->lock, flags); 1094 raw_spin_lock_irqsave(&cpu_base->lock, flags);
1075 1095
1076 if (!__hrtimer_hres_active(cpu_base)) 1096 if (!__hrtimer_hres_active(cpu_base))
1077 expires = __hrtimer_get_next_event(cpu_base); 1097 expires = __hrtimer_get_next_event(cpu_base, HRTIMER_ACTIVE_HARD);
1078 1098
1079 raw_spin_unlock_irqrestore(&cpu_base->lock, flags); 1099 raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
1080 1100
@@ -1248,10 +1268,10 @@ static void __run_hrtimer(struct hrtimer_cpu_base *cpu_base,
1248} 1268}
1249 1269
1250static void __hrtimer_run_queues(struct hrtimer_cpu_base *cpu_base, ktime_t now, 1270static void __hrtimer_run_queues(struct hrtimer_cpu_base *cpu_base, ktime_t now,
1251 unsigned long flags) 1271 unsigned long flags, unsigned int active_mask)
1252{ 1272{
1253 struct hrtimer_clock_base *base; 1273 struct hrtimer_clock_base *base;
1254 unsigned int active = cpu_base->active_bases; 1274 unsigned int active = cpu_base->active_bases & active_mask;
1255 1275
1256 for_each_active_base(base, cpu_base, active) { 1276 for_each_active_base(base, cpu_base, active) {
1257 struct timerqueue_node *node; 1277 struct timerqueue_node *node;
@@ -1314,10 +1334,10 @@ retry:
1314 */ 1334 */
1315 cpu_base->expires_next = KTIME_MAX; 1335 cpu_base->expires_next = KTIME_MAX;
1316 1336
1317 __hrtimer_run_queues(cpu_base, now, flags); 1337 __hrtimer_run_queues(cpu_base, now, flags, HRTIMER_ACTIVE_HARD);
1318 1338
1319 /* Reevaluate the clock bases for the next expiry */ 1339 /* Reevaluate the clock bases for the next expiry */
1320 expires_next = __hrtimer_get_next_event(cpu_base); 1340 expires_next = __hrtimer_get_next_event(cpu_base, HRTIMER_ACTIVE_HARD);
1321 /* 1341 /*
1322 * Store the new expiry value so the migration code can verify 1342 * Store the new expiry value so the migration code can verify
1323 * against it. 1343 * against it.
@@ -1421,7 +1441,7 @@ void hrtimer_run_queues(void)
1421 1441
1422 raw_spin_lock_irqsave(&cpu_base->lock, flags); 1442 raw_spin_lock_irqsave(&cpu_base->lock, flags);
1423 now = hrtimer_update_base(cpu_base); 1443 now = hrtimer_update_base(cpu_base);
1424 __hrtimer_run_queues(cpu_base, now, flags); 1444 __hrtimer_run_queues(cpu_base, now, flags, HRTIMER_ACTIVE_HARD);
1425 raw_spin_unlock_irqrestore(&cpu_base->lock, flags); 1445 raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
1426} 1446}
1427 1447