aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/time/tick-sched.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/time/tick-sched.c')
-rw-r--r--kernel/time/tick-sched.c22
1 files changed, 11 insertions, 11 deletions
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 71496a20e670..2c115fdab397 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -58,21 +58,21 @@ static void tick_do_update_jiffies64(ktime_t now)
58 * Do a quick check without holding jiffies_lock: 58 * Do a quick check without holding jiffies_lock:
59 */ 59 */
60 delta = ktime_sub(now, last_jiffies_update); 60 delta = ktime_sub(now, last_jiffies_update);
61 if (delta.tv64 < tick_period.tv64) 61 if (delta < tick_period)
62 return; 62 return;
63 63
64 /* Reevaluate with jiffies_lock held */ 64 /* Reevaluate with jiffies_lock held */
65 write_seqlock(&jiffies_lock); 65 write_seqlock(&jiffies_lock);
66 66
67 delta = ktime_sub(now, last_jiffies_update); 67 delta = ktime_sub(now, last_jiffies_update);
68 if (delta.tv64 >= tick_period.tv64) { 68 if (delta >= tick_period) {
69 69
70 delta = ktime_sub(delta, tick_period); 70 delta = ktime_sub(delta, tick_period);
71 last_jiffies_update = ktime_add(last_jiffies_update, 71 last_jiffies_update = ktime_add(last_jiffies_update,
72 tick_period); 72 tick_period);
73 73
74 /* Slow path for long timeouts */ 74 /* Slow path for long timeouts */
75 if (unlikely(delta.tv64 >= tick_period.tv64)) { 75 if (unlikely(delta >= tick_period)) {
76 s64 incr = ktime_to_ns(tick_period); 76 s64 incr = ktime_to_ns(tick_period);
77 77
78 ticks = ktime_divns(delta, incr); 78 ticks = ktime_divns(delta, incr);
@@ -101,7 +101,7 @@ static ktime_t tick_init_jiffy_update(void)
101 101
102 write_seqlock(&jiffies_lock); 102 write_seqlock(&jiffies_lock);
103 /* Did we start the jiffies update yet ? */ 103 /* Did we start the jiffies update yet ? */
104 if (last_jiffies_update.tv64 == 0) 104 if (last_jiffies_update == 0)
105 last_jiffies_update = tick_next_period; 105 last_jiffies_update = tick_next_period;
106 period = last_jiffies_update; 106 period = last_jiffies_update;
107 write_sequnlock(&jiffies_lock); 107 write_sequnlock(&jiffies_lock);
@@ -669,7 +669,7 @@ static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts,
669 /* Read jiffies and the time when jiffies were updated last */ 669 /* Read jiffies and the time when jiffies were updated last */
670 do { 670 do {
671 seq = read_seqbegin(&jiffies_lock); 671 seq = read_seqbegin(&jiffies_lock);
672 basemono = last_jiffies_update.tv64; 672 basemono = last_jiffies_update;
673 basejiff = jiffies; 673 basejiff = jiffies;
674 } while (read_seqretry(&jiffies_lock, seq)); 674 } while (read_seqretry(&jiffies_lock, seq));
675 ts->last_jiffies = basejiff; 675 ts->last_jiffies = basejiff;
@@ -697,7 +697,7 @@ static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts,
697 */ 697 */
698 delta = next_tick - basemono; 698 delta = next_tick - basemono;
699 if (delta <= (u64)TICK_NSEC) { 699 if (delta <= (u64)TICK_NSEC) {
700 tick.tv64 = 0; 700 tick = 0;
701 701
702 /* 702 /*
703 * Tell the timer code that the base is not idle, i.e. undo 703 * Tell the timer code that the base is not idle, i.e. undo
@@ -764,10 +764,10 @@ static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts,
764 expires = KTIME_MAX; 764 expires = KTIME_MAX;
765 765
766 expires = min_t(u64, expires, next_tick); 766 expires = min_t(u64, expires, next_tick);
767 tick.tv64 = expires; 767 tick = expires;
768 768
769 /* Skip reprogram of event if its not changed */ 769 /* Skip reprogram of event if its not changed */
770 if (ts->tick_stopped && (expires == dev->next_event.tv64)) 770 if (ts->tick_stopped && (expires == dev->next_event))
771 goto out; 771 goto out;
772 772
773 /* 773 /*
@@ -864,7 +864,7 @@ static bool can_stop_idle_tick(int cpu, struct tick_sched *ts)
864 } 864 }
865 865
866 if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE)) { 866 if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE)) {
867 ts->sleep_length = (ktime_t) { .tv64 = NSEC_PER_SEC/HZ }; 867 ts->sleep_length = NSEC_PER_SEC / HZ;
868 return false; 868 return false;
869 } 869 }
870 870
@@ -914,7 +914,7 @@ static void __tick_nohz_idle_enter(struct tick_sched *ts)
914 ts->idle_calls++; 914 ts->idle_calls++;
915 915
916 expires = tick_nohz_stop_sched_tick(ts, now, cpu); 916 expires = tick_nohz_stop_sched_tick(ts, now, cpu);
917 if (expires.tv64 > 0LL) { 917 if (expires > 0LL) {
918 ts->idle_sleeps++; 918 ts->idle_sleeps++;
919 ts->idle_expires = expires; 919 ts->idle_expires = expires;
920 } 920 }
@@ -1051,7 +1051,7 @@ static void tick_nohz_handler(struct clock_event_device *dev)
1051 struct pt_regs *regs = get_irq_regs(); 1051 struct pt_regs *regs = get_irq_regs();
1052 ktime_t now = ktime_get(); 1052 ktime_t now = ktime_get();
1053 1053
1054 dev->next_event.tv64 = KTIME_MAX; 1054 dev->next_event = KTIME_MAX;
1055 1055
1056 tick_sched_do_timer(now); 1056 tick_sched_do_timer(now);
1057 tick_sched_handle(ts, regs); 1057 tick_sched_handle(ts, regs);