aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/time
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/time')
-rw-r--r--kernel/time/jiffies.c6
-rw-r--r--kernel/time/sched_clock.c52
-rw-r--r--kernel/time/tick-broadcast.c7
-rw-r--r--kernel/time/tick-common.c1
-rw-r--r--kernel/time/tick-internal.h5
-rw-r--r--kernel/time/tick-sched.c61
-rw-r--r--kernel/time/timekeeping.c53
7 files changed, 100 insertions, 85 deletions
diff --git a/kernel/time/jiffies.c b/kernel/time/jiffies.c
index 7a925ba456fb..a6a5bf53e86d 100644
--- a/kernel/time/jiffies.c
+++ b/kernel/time/jiffies.c
@@ -51,7 +51,13 @@
51 * HZ shrinks, so values greater than 8 overflow 32bits when 51 * HZ shrinks, so values greater than 8 overflow 32bits when
52 * HZ=100. 52 * HZ=100.
53 */ 53 */
54#if HZ < 34
55#define JIFFIES_SHIFT 6
56#elif HZ < 67
57#define JIFFIES_SHIFT 7
58#else
54#define JIFFIES_SHIFT 8 59#define JIFFIES_SHIFT 8
60#endif
55 61
56static cycle_t jiffies_read(struct clocksource *cs) 62static cycle_t jiffies_read(struct clocksource *cs)
57{ 63{
diff --git a/kernel/time/sched_clock.c b/kernel/time/sched_clock.c
index 68b799375981..4d23dc4d8139 100644
--- a/kernel/time/sched_clock.c
+++ b/kernel/time/sched_clock.c
@@ -74,7 +74,7 @@ unsigned long long notrace sched_clock(void)
74 return cd.epoch_ns; 74 return cd.epoch_ns;
75 75
76 do { 76 do {
77 seq = read_seqcount_begin(&cd.seq); 77 seq = raw_read_seqcount_begin(&cd.seq);
78 epoch_cyc = cd.epoch_cyc; 78 epoch_cyc = cd.epoch_cyc;
79 epoch_ns = cd.epoch_ns; 79 epoch_ns = cd.epoch_ns;
80 } while (read_seqcount_retry(&cd.seq, seq)); 80 } while (read_seqcount_retry(&cd.seq, seq));
@@ -99,10 +99,10 @@ static void notrace update_sched_clock(void)
99 cd.mult, cd.shift); 99 cd.mult, cd.shift);
100 100
101 raw_local_irq_save(flags); 101 raw_local_irq_save(flags);
102 write_seqcount_begin(&cd.seq); 102 raw_write_seqcount_begin(&cd.seq);
103 cd.epoch_ns = ns; 103 cd.epoch_ns = ns;
104 cd.epoch_cyc = cyc; 104 cd.epoch_cyc = cyc;
105 write_seqcount_end(&cd.seq); 105 raw_write_seqcount_end(&cd.seq);
106 raw_local_irq_restore(flags); 106 raw_local_irq_restore(flags);
107} 107}
108 108
@@ -116,20 +116,42 @@ static enum hrtimer_restart sched_clock_poll(struct hrtimer *hrt)
116void __init sched_clock_register(u64 (*read)(void), int bits, 116void __init sched_clock_register(u64 (*read)(void), int bits,
117 unsigned long rate) 117 unsigned long rate)
118{ 118{
119 u64 res, wrap, new_mask, new_epoch, cyc, ns;
120 u32 new_mult, new_shift;
121 ktime_t new_wrap_kt;
119 unsigned long r; 122 unsigned long r;
120 u64 res, wrap;
121 char r_unit; 123 char r_unit;
122 124
123 if (cd.rate > rate) 125 if (cd.rate > rate)
124 return; 126 return;
125 127
126 WARN_ON(!irqs_disabled()); 128 WARN_ON(!irqs_disabled());
127 read_sched_clock = read;
128 sched_clock_mask = CLOCKSOURCE_MASK(bits);
129 cd.rate = rate;
130 129
131 /* calculate the mult/shift to convert counter ticks to ns. */ 130 /* calculate the mult/shift to convert counter ticks to ns. */
132 clocks_calc_mult_shift(&cd.mult, &cd.shift, rate, NSEC_PER_SEC, 3600); 131 clocks_calc_mult_shift(&new_mult, &new_shift, rate, NSEC_PER_SEC, 3600);
132
133 new_mask = CLOCKSOURCE_MASK(bits);
134
135 /* calculate how many ns until we wrap */
136 wrap = clocks_calc_max_nsecs(new_mult, new_shift, 0, new_mask);
137 new_wrap_kt = ns_to_ktime(wrap - (wrap >> 3));
138
139 /* update epoch for new counter and update epoch_ns from old counter*/
140 new_epoch = read();
141 cyc = read_sched_clock();
142 ns = cd.epoch_ns + cyc_to_ns((cyc - cd.epoch_cyc) & sched_clock_mask,
143 cd.mult, cd.shift);
144
145 raw_write_seqcount_begin(&cd.seq);
146 read_sched_clock = read;
147 sched_clock_mask = new_mask;
148 cd.rate = rate;
149 cd.wrap_kt = new_wrap_kt;
150 cd.mult = new_mult;
151 cd.shift = new_shift;
152 cd.epoch_cyc = new_epoch;
153 cd.epoch_ns = ns;
154 raw_write_seqcount_end(&cd.seq);
133 155
134 r = rate; 156 r = rate;
135 if (r >= 4000000) { 157 if (r >= 4000000) {
@@ -141,22 +163,12 @@ void __init sched_clock_register(u64 (*read)(void), int bits,
141 } else 163 } else
142 r_unit = ' '; 164 r_unit = ' ';
143 165
144 /* calculate how many ns until we wrap */
145 wrap = clocks_calc_max_nsecs(cd.mult, cd.shift, 0, sched_clock_mask);
146 cd.wrap_kt = ns_to_ktime(wrap - (wrap >> 3));
147
148 /* calculate the ns resolution of this counter */ 166 /* calculate the ns resolution of this counter */
149 res = cyc_to_ns(1ULL, cd.mult, cd.shift); 167 res = cyc_to_ns(1ULL, new_mult, new_shift);
168
150 pr_info("sched_clock: %u bits at %lu%cHz, resolution %lluns, wraps every %lluns\n", 169 pr_info("sched_clock: %u bits at %lu%cHz, resolution %lluns, wraps every %lluns\n",
151 bits, r, r_unit, res, wrap); 170 bits, r, r_unit, res, wrap);
152 171
153 update_sched_clock();
154
155 /*
156 * Ensure that sched_clock() starts off at 0ns
157 */
158 cd.epoch_ns = 0;
159
160 /* Enable IRQ time accounting if we have a fast enough sched_clock */ 172 /* Enable IRQ time accounting if we have a fast enough sched_clock */
161 if (irqtime > 0 || (irqtime == -1 && rate >= 1000000)) 173 if (irqtime > 0 || (irqtime == -1 && rate >= 1000000))
162 enable_sched_clock_irqtime(); 174 enable_sched_clock_irqtime();
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
index 9532690daaa9..98977a57ac72 100644
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -538,10 +538,10 @@ int tick_resume_broadcast_oneshot(struct clock_event_device *bc)
538 * Called from irq_enter() when idle was interrupted to reenable the 538 * Called from irq_enter() when idle was interrupted to reenable the
539 * per cpu device. 539 * per cpu device.
540 */ 540 */
541void tick_check_oneshot_broadcast(int cpu) 541void tick_check_oneshot_broadcast_this_cpu(void)
542{ 542{
543 if (cpumask_test_cpu(cpu, tick_broadcast_oneshot_mask)) { 543 if (cpumask_test_cpu(smp_processor_id(), tick_broadcast_oneshot_mask)) {
544 struct tick_device *td = &per_cpu(tick_cpu_device, cpu); 544 struct tick_device *td = &__get_cpu_var(tick_cpu_device);
545 545
546 /* 546 /*
547 * We might be in the middle of switching over from 547 * We might be in the middle of switching over from
@@ -756,6 +756,7 @@ out:
756static void tick_broadcast_clear_oneshot(int cpu) 756static void tick_broadcast_clear_oneshot(int cpu)
757{ 757{
758 cpumask_clear_cpu(cpu, tick_broadcast_oneshot_mask); 758 cpumask_clear_cpu(cpu, tick_broadcast_oneshot_mask);
759 cpumask_clear_cpu(cpu, tick_broadcast_pending_mask);
759} 760}
760 761
761static void tick_broadcast_init_next_event(struct cpumask *mask, 762static void tick_broadcast_init_next_event(struct cpumask *mask,
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c
index 162b03ab0ad2..20b2fe37d105 100644
--- a/kernel/time/tick-common.c
+++ b/kernel/time/tick-common.c
@@ -85,6 +85,7 @@ static void tick_periodic(int cpu)
85 85
86 do_timer(1); 86 do_timer(1);
87 write_sequnlock(&jiffies_lock); 87 write_sequnlock(&jiffies_lock);
88 update_wall_time();
88 } 89 }
89 90
90 update_process_times(user_mode(get_irq_regs())); 91 update_process_times(user_mode(get_irq_regs()));
diff --git a/kernel/time/tick-internal.h b/kernel/time/tick-internal.h
index 18e71f7fbc2a..8329669b51ec 100644
--- a/kernel/time/tick-internal.h
+++ b/kernel/time/tick-internal.h
@@ -51,7 +51,7 @@ extern void tick_broadcast_switch_to_oneshot(void);
51extern void tick_shutdown_broadcast_oneshot(unsigned int *cpup); 51extern void tick_shutdown_broadcast_oneshot(unsigned int *cpup);
52extern int tick_resume_broadcast_oneshot(struct clock_event_device *bc); 52extern int tick_resume_broadcast_oneshot(struct clock_event_device *bc);
53extern int tick_broadcast_oneshot_active(void); 53extern int tick_broadcast_oneshot_active(void);
54extern void tick_check_oneshot_broadcast(int cpu); 54extern void tick_check_oneshot_broadcast_this_cpu(void);
55bool tick_broadcast_oneshot_available(void); 55bool tick_broadcast_oneshot_available(void);
56# else /* BROADCAST */ 56# else /* BROADCAST */
57static inline void tick_broadcast_setup_oneshot(struct clock_event_device *bc) 57static inline void tick_broadcast_setup_oneshot(struct clock_event_device *bc)
@@ -62,7 +62,7 @@ static inline void tick_broadcast_oneshot_control(unsigned long reason) { }
62static inline void tick_broadcast_switch_to_oneshot(void) { } 62static inline void tick_broadcast_switch_to_oneshot(void) { }
63static inline void tick_shutdown_broadcast_oneshot(unsigned int *cpup) { } 63static inline void tick_shutdown_broadcast_oneshot(unsigned int *cpup) { }
64static inline int tick_broadcast_oneshot_active(void) { return 0; } 64static inline int tick_broadcast_oneshot_active(void) { return 0; }
65static inline void tick_check_oneshot_broadcast(int cpu) { } 65static inline void tick_check_oneshot_broadcast_this_cpu(void) { }
66static inline bool tick_broadcast_oneshot_available(void) { return true; } 66static inline bool tick_broadcast_oneshot_available(void) { return true; }
67# endif /* !BROADCAST */ 67# endif /* !BROADCAST */
68 68
@@ -155,3 +155,4 @@ static inline int tick_device_is_functional(struct clock_event_device *dev)
155#endif 155#endif
156 156
157extern void do_timer(unsigned long ticks); 157extern void do_timer(unsigned long ticks);
158extern void update_wall_time(void);
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index ea20f7d1ac2c..9f8af69c67ec 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -86,6 +86,7 @@ static void tick_do_update_jiffies64(ktime_t now)
86 tick_next_period = ktime_add(last_jiffies_update, tick_period); 86 tick_next_period = ktime_add(last_jiffies_update, tick_period);
87 } 87 }
88 write_sequnlock(&jiffies_lock); 88 write_sequnlock(&jiffies_lock);
89 update_wall_time();
89} 90}
90 91
91/* 92/*
@@ -177,7 +178,7 @@ static bool can_stop_full_tick(void)
177 * TODO: kick full dynticks CPUs when 178 * TODO: kick full dynticks CPUs when
178 * sched_clock_stable is set. 179 * sched_clock_stable is set.
179 */ 180 */
180 if (!sched_clock_stable) { 181 if (!sched_clock_stable()) {
181 trace_tick_stop(0, "unstable sched clock\n"); 182 trace_tick_stop(0, "unstable sched clock\n");
182 /* 183 /*
183 * Don't allow the user to think they can get 184 * Don't allow the user to think they can get
@@ -391,11 +392,9 @@ __setup("nohz=", setup_tick_nohz);
391 */ 392 */
392static void tick_nohz_update_jiffies(ktime_t now) 393static void tick_nohz_update_jiffies(ktime_t now)
393{ 394{
394 int cpu = smp_processor_id();
395 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
396 unsigned long flags; 395 unsigned long flags;
397 396
398 ts->idle_waketime = now; 397 __this_cpu_write(tick_cpu_sched.idle_waketime, now);
399 398
400 local_irq_save(flags); 399 local_irq_save(flags);
401 tick_do_update_jiffies64(now); 400 tick_do_update_jiffies64(now);
@@ -426,17 +425,15 @@ update_ts_time_stats(int cpu, struct tick_sched *ts, ktime_t now, u64 *last_upda
426 425
427} 426}
428 427
429static void tick_nohz_stop_idle(int cpu, ktime_t now) 428static void tick_nohz_stop_idle(struct tick_sched *ts, ktime_t now)
430{ 429{
431 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); 430 update_ts_time_stats(smp_processor_id(), ts, now, NULL);
432
433 update_ts_time_stats(cpu, ts, now, NULL);
434 ts->idle_active = 0; 431 ts->idle_active = 0;
435 432
436 sched_clock_idle_wakeup_event(0); 433 sched_clock_idle_wakeup_event(0);
437} 434}
438 435
439static ktime_t tick_nohz_start_idle(int cpu, struct tick_sched *ts) 436static ktime_t tick_nohz_start_idle(struct tick_sched *ts)
440{ 437{
441 ktime_t now = ktime_get(); 438 ktime_t now = ktime_get();
442 439
@@ -536,12 +533,13 @@ static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts,
536 struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev; 533 struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev;
537 u64 time_delta; 534 u64 time_delta;
538 535
536 time_delta = timekeeping_max_deferment();
537
539 /* Read jiffies and the time when jiffies were updated last */ 538 /* Read jiffies and the time when jiffies were updated last */
540 do { 539 do {
541 seq = read_seqbegin(&jiffies_lock); 540 seq = read_seqbegin(&jiffies_lock);
542 last_update = last_jiffies_update; 541 last_update = last_jiffies_update;
543 last_jiffies = jiffies; 542 last_jiffies = jiffies;
544 time_delta = timekeeping_max_deferment();
545 } while (read_seqretry(&jiffies_lock, seq)); 543 } while (read_seqretry(&jiffies_lock, seq));
546 544
547 if (rcu_needs_cpu(cpu, &rcu_delta_jiffies) || 545 if (rcu_needs_cpu(cpu, &rcu_delta_jiffies) ||
@@ -681,18 +679,18 @@ out:
681static void tick_nohz_full_stop_tick(struct tick_sched *ts) 679static void tick_nohz_full_stop_tick(struct tick_sched *ts)
682{ 680{
683#ifdef CONFIG_NO_HZ_FULL 681#ifdef CONFIG_NO_HZ_FULL
684 int cpu = smp_processor_id(); 682 int cpu = smp_processor_id();
685 683
686 if (!tick_nohz_full_cpu(cpu) || is_idle_task(current)) 684 if (!tick_nohz_full_cpu(cpu) || is_idle_task(current))
687 return; 685 return;
688 686
689 if (!ts->tick_stopped && ts->nohz_mode == NOHZ_MODE_INACTIVE) 687 if (!ts->tick_stopped && ts->nohz_mode == NOHZ_MODE_INACTIVE)
690 return; 688 return;
691 689
692 if (!can_stop_full_tick()) 690 if (!can_stop_full_tick())
693 return; 691 return;
694 692
695 tick_nohz_stop_sched_tick(ts, ktime_get(), cpu); 693 tick_nohz_stop_sched_tick(ts, ktime_get(), cpu);
696#endif 694#endif
697} 695}
698 696
@@ -754,7 +752,7 @@ static void __tick_nohz_idle_enter(struct tick_sched *ts)
754 ktime_t now, expires; 752 ktime_t now, expires;
755 int cpu = smp_processor_id(); 753 int cpu = smp_processor_id();
756 754
757 now = tick_nohz_start_idle(cpu, ts); 755 now = tick_nohz_start_idle(ts);
758 756
759 if (can_stop_idle_tick(cpu, ts)) { 757 if (can_stop_idle_tick(cpu, ts)) {
760 int was_stopped = ts->tick_stopped; 758 int was_stopped = ts->tick_stopped;
@@ -911,8 +909,7 @@ static void tick_nohz_account_idle_ticks(struct tick_sched *ts)
911 */ 909 */
912void tick_nohz_idle_exit(void) 910void tick_nohz_idle_exit(void)
913{ 911{
914 int cpu = smp_processor_id(); 912 struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched);
915 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
916 ktime_t now; 913 ktime_t now;
917 914
918 local_irq_disable(); 915 local_irq_disable();
@@ -925,7 +922,7 @@ void tick_nohz_idle_exit(void)
925 now = ktime_get(); 922 now = ktime_get();
926 923
927 if (ts->idle_active) 924 if (ts->idle_active)
928 tick_nohz_stop_idle(cpu, now); 925 tick_nohz_stop_idle(ts, now);
929 926
930 if (ts->tick_stopped) { 927 if (ts->tick_stopped) {
931 tick_nohz_restart_sched_tick(ts, now); 928 tick_nohz_restart_sched_tick(ts, now);
@@ -1009,12 +1006,10 @@ static void tick_nohz_switch_to_nohz(void)
1009 * timer and do not touch the other magic bits which need to be done 1006 * timer and do not touch the other magic bits which need to be done
1010 * when idle is left. 1007 * when idle is left.
1011 */ 1008 */
1012static void tick_nohz_kick_tick(int cpu, ktime_t now) 1009static void tick_nohz_kick_tick(struct tick_sched *ts, ktime_t now)
1013{ 1010{
1014#if 0 1011#if 0
1015 /* Switch back to 2.6.27 behaviour */ 1012 /* Switch back to 2.6.27 behaviour */
1016
1017 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
1018 ktime_t delta; 1013 ktime_t delta;
1019 1014
1020 /* 1015 /*
@@ -1029,36 +1024,36 @@ static void tick_nohz_kick_tick(int cpu, ktime_t now)
1029#endif 1024#endif
1030} 1025}
1031 1026
1032static inline void tick_check_nohz(int cpu) 1027static inline void tick_nohz_irq_enter(void)
1033{ 1028{
1034 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); 1029 struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched);
1035 ktime_t now; 1030 ktime_t now;
1036 1031
1037 if (!ts->idle_active && !ts->tick_stopped) 1032 if (!ts->idle_active && !ts->tick_stopped)
1038 return; 1033 return;
1039 now = ktime_get(); 1034 now = ktime_get();
1040 if (ts->idle_active) 1035 if (ts->idle_active)
1041 tick_nohz_stop_idle(cpu, now); 1036 tick_nohz_stop_idle(ts, now);
1042 if (ts->tick_stopped) { 1037 if (ts->tick_stopped) {
1043 tick_nohz_update_jiffies(now); 1038 tick_nohz_update_jiffies(now);
1044 tick_nohz_kick_tick(cpu, now); 1039 tick_nohz_kick_tick(ts, now);
1045 } 1040 }
1046} 1041}
1047 1042
1048#else 1043#else
1049 1044
1050static inline void tick_nohz_switch_to_nohz(void) { } 1045static inline void tick_nohz_switch_to_nohz(void) { }
1051static inline void tick_check_nohz(int cpu) { } 1046static inline void tick_nohz_irq_enter(void) { }
1052 1047
1053#endif /* CONFIG_NO_HZ_COMMON */ 1048#endif /* CONFIG_NO_HZ_COMMON */
1054 1049
1055/* 1050/*
1056 * Called from irq_enter to notify about the possible interruption of idle() 1051 * Called from irq_enter to notify about the possible interruption of idle()
1057 */ 1052 */
1058void tick_check_idle(int cpu) 1053void tick_irq_enter(void)
1059{ 1054{
1060 tick_check_oneshot_broadcast(cpu); 1055 tick_check_oneshot_broadcast_this_cpu();
1061 tick_check_nohz(cpu); 1056 tick_nohz_irq_enter();
1062} 1057}
1063 1058
1064/* 1059/*
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index 87b4f00284c9..0aa4ce81bc16 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -77,7 +77,7 @@ static void tk_set_wall_to_mono(struct timekeeper *tk, struct timespec wtm)
77 tk->wall_to_monotonic = wtm; 77 tk->wall_to_monotonic = wtm;
78 set_normalized_timespec(&tmp, -wtm.tv_sec, -wtm.tv_nsec); 78 set_normalized_timespec(&tmp, -wtm.tv_sec, -wtm.tv_nsec);
79 tk->offs_real = timespec_to_ktime(tmp); 79 tk->offs_real = timespec_to_ktime(tmp);
80 tk->offs_tai = ktime_sub(tk->offs_real, ktime_set(tk->tai_offset, 0)); 80 tk->offs_tai = ktime_add(tk->offs_real, ktime_set(tk->tai_offset, 0));
81} 81}
82 82
83static void tk_set_sleep_time(struct timekeeper *tk, struct timespec t) 83static void tk_set_sleep_time(struct timekeeper *tk, struct timespec t)
@@ -90,8 +90,9 @@ static void tk_set_sleep_time(struct timekeeper *tk, struct timespec t)
90} 90}
91 91
92/** 92/**
93 * timekeeper_setup_internals - Set up internals to use clocksource clock. 93 * tk_setup_internals - Set up internals to use clocksource clock.
94 * 94 *
95 * @tk: The target timekeeper to setup.
95 * @clock: Pointer to clocksource. 96 * @clock: Pointer to clocksource.
96 * 97 *
97 * Calculates a fixed cycle/nsec interval for a given clocksource/adjustment 98 * Calculates a fixed cycle/nsec interval for a given clocksource/adjustment
@@ -595,7 +596,7 @@ s32 timekeeping_get_tai_offset(void)
595static void __timekeeping_set_tai_offset(struct timekeeper *tk, s32 tai_offset) 596static void __timekeeping_set_tai_offset(struct timekeeper *tk, s32 tai_offset)
596{ 597{
597 tk->tai_offset = tai_offset; 598 tk->tai_offset = tai_offset;
598 tk->offs_tai = ktime_sub(tk->offs_real, ktime_set(tai_offset, 0)); 599 tk->offs_tai = ktime_add(tk->offs_real, ktime_set(tai_offset, 0));
599} 600}
600 601
601/** 602/**
@@ -610,6 +611,7 @@ void timekeeping_set_tai_offset(s32 tai_offset)
610 raw_spin_lock_irqsave(&timekeeper_lock, flags); 611 raw_spin_lock_irqsave(&timekeeper_lock, flags);
611 write_seqcount_begin(&timekeeper_seq); 612 write_seqcount_begin(&timekeeper_seq);
612 __timekeeping_set_tai_offset(tk, tai_offset); 613 __timekeeping_set_tai_offset(tk, tai_offset);
614 timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET);
613 write_seqcount_end(&timekeeper_seq); 615 write_seqcount_end(&timekeeper_seq);
614 raw_spin_unlock_irqrestore(&timekeeper_lock, flags); 616 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
615 clock_was_set(); 617 clock_was_set();
@@ -1023,6 +1025,8 @@ static int timekeeping_suspend(void)
1023 timekeeping_suspend_time = 1025 timekeeping_suspend_time =
1024 timespec_add(timekeeping_suspend_time, delta_delta); 1026 timespec_add(timekeeping_suspend_time, delta_delta);
1025 } 1027 }
1028
1029 timekeeping_update(tk, TK_MIRROR);
1026 write_seqcount_end(&timekeeper_seq); 1030 write_seqcount_end(&timekeeper_seq);
1027 raw_spin_unlock_irqrestore(&timekeeper_lock, flags); 1031 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1028 1032
@@ -1130,16 +1134,6 @@ static void timekeeping_adjust(struct timekeeper *tk, s64 offset)
1130 * we can adjust by 1. 1134 * we can adjust by 1.
1131 */ 1135 */
1132 error >>= 2; 1136 error >>= 2;
1133 /*
1134 * XXX - In update_wall_time, we round up to the next
1135 * nanosecond, and store the amount rounded up into
1136 * the error. This causes the likely below to be unlikely.
1137 *
1138 * The proper fix is to avoid rounding up by using
1139 * the high precision tk->xtime_nsec instead of
1140 * xtime.tv_nsec everywhere. Fixing this will take some
1141 * time.
1142 */
1143 if (likely(error <= interval)) 1137 if (likely(error <= interval))
1144 adj = 1; 1138 adj = 1;
1145 else 1139 else
@@ -1255,7 +1249,7 @@ out_adjust:
1255static inline unsigned int accumulate_nsecs_to_secs(struct timekeeper *tk) 1249static inline unsigned int accumulate_nsecs_to_secs(struct timekeeper *tk)
1256{ 1250{
1257 u64 nsecps = (u64)NSEC_PER_SEC << tk->shift; 1251 u64 nsecps = (u64)NSEC_PER_SEC << tk->shift;
1258 unsigned int action = 0; 1252 unsigned int clock_set = 0;
1259 1253
1260 while (tk->xtime_nsec >= nsecps) { 1254 while (tk->xtime_nsec >= nsecps) {
1261 int leap; 1255 int leap;
@@ -1277,11 +1271,10 @@ static inline unsigned int accumulate_nsecs_to_secs(struct timekeeper *tk)
1277 1271
1278 __timekeeping_set_tai_offset(tk, tk->tai_offset - leap); 1272 __timekeeping_set_tai_offset(tk, tk->tai_offset - leap);
1279 1273
1280 clock_was_set_delayed(); 1274 clock_set = TK_CLOCK_WAS_SET;
1281 action = TK_CLOCK_WAS_SET;
1282 } 1275 }
1283 } 1276 }
1284 return action; 1277 return clock_set;
1285} 1278}
1286 1279
1287/** 1280/**
@@ -1294,7 +1287,8 @@ static inline unsigned int accumulate_nsecs_to_secs(struct timekeeper *tk)
1294 * Returns the unconsumed cycles. 1287 * Returns the unconsumed cycles.
1295 */ 1288 */
1296static cycle_t logarithmic_accumulation(struct timekeeper *tk, cycle_t offset, 1289static cycle_t logarithmic_accumulation(struct timekeeper *tk, cycle_t offset,
1297 u32 shift) 1290 u32 shift,
1291 unsigned int *clock_set)
1298{ 1292{
1299 cycle_t interval = tk->cycle_interval << shift; 1293 cycle_t interval = tk->cycle_interval << shift;
1300 u64 raw_nsecs; 1294 u64 raw_nsecs;
@@ -1308,7 +1302,7 @@ static cycle_t logarithmic_accumulation(struct timekeeper *tk, cycle_t offset,
1308 tk->cycle_last += interval; 1302 tk->cycle_last += interval;
1309 1303
1310 tk->xtime_nsec += tk->xtime_interval << shift; 1304 tk->xtime_nsec += tk->xtime_interval << shift;
1311 accumulate_nsecs_to_secs(tk); 1305 *clock_set |= accumulate_nsecs_to_secs(tk);
1312 1306
1313 /* Accumulate raw time */ 1307 /* Accumulate raw time */
1314 raw_nsecs = (u64)tk->raw_interval << shift; 1308 raw_nsecs = (u64)tk->raw_interval << shift;
@@ -1359,14 +1353,14 @@ static inline void old_vsyscall_fixup(struct timekeeper *tk)
1359 * update_wall_time - Uses the current clocksource to increment the wall time 1353 * update_wall_time - Uses the current clocksource to increment the wall time
1360 * 1354 *
1361 */ 1355 */
1362static void update_wall_time(void) 1356void update_wall_time(void)
1363{ 1357{
1364 struct clocksource *clock; 1358 struct clocksource *clock;
1365 struct timekeeper *real_tk = &timekeeper; 1359 struct timekeeper *real_tk = &timekeeper;
1366 struct timekeeper *tk = &shadow_timekeeper; 1360 struct timekeeper *tk = &shadow_timekeeper;
1367 cycle_t offset; 1361 cycle_t offset;
1368 int shift = 0, maxshift; 1362 int shift = 0, maxshift;
1369 unsigned int action; 1363 unsigned int clock_set = 0;
1370 unsigned long flags; 1364 unsigned long flags;
1371 1365
1372 raw_spin_lock_irqsave(&timekeeper_lock, flags); 1366 raw_spin_lock_irqsave(&timekeeper_lock, flags);
@@ -1401,7 +1395,8 @@ static void update_wall_time(void)
1401 maxshift = (64 - (ilog2(ntp_tick_length())+1)) - 1; 1395 maxshift = (64 - (ilog2(ntp_tick_length())+1)) - 1;
1402 shift = min(shift, maxshift); 1396 shift = min(shift, maxshift);
1403 while (offset >= tk->cycle_interval) { 1397 while (offset >= tk->cycle_interval) {
1404 offset = logarithmic_accumulation(tk, offset, shift); 1398 offset = logarithmic_accumulation(tk, offset, shift,
1399 &clock_set);
1405 if (offset < tk->cycle_interval<<shift) 1400 if (offset < tk->cycle_interval<<shift)
1406 shift--; 1401 shift--;
1407 } 1402 }
@@ -1419,7 +1414,7 @@ static void update_wall_time(void)
1419 * Finally, make sure that after the rounding 1414 * Finally, make sure that after the rounding
1420 * xtime_nsec isn't larger than NSEC_PER_SEC 1415 * xtime_nsec isn't larger than NSEC_PER_SEC
1421 */ 1416 */
1422 action = accumulate_nsecs_to_secs(tk); 1417 clock_set |= accumulate_nsecs_to_secs(tk);
1423 1418
1424 write_seqcount_begin(&timekeeper_seq); 1419 write_seqcount_begin(&timekeeper_seq);
1425 /* Update clock->cycle_last with the new value */ 1420 /* Update clock->cycle_last with the new value */
@@ -1435,10 +1430,12 @@ static void update_wall_time(void)
1435 * updating. 1430 * updating.
1436 */ 1431 */
1437 memcpy(real_tk, tk, sizeof(*tk)); 1432 memcpy(real_tk, tk, sizeof(*tk));
1438 timekeeping_update(real_tk, action); 1433 timekeeping_update(real_tk, clock_set);
1439 write_seqcount_end(&timekeeper_seq); 1434 write_seqcount_end(&timekeeper_seq);
1440out: 1435out:
1441 raw_spin_unlock_irqrestore(&timekeeper_lock, flags); 1436 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1437 if (clock_set)
1438 clock_was_set();
1442} 1439}
1443 1440
1444/** 1441/**
@@ -1583,7 +1580,6 @@ struct timespec get_monotonic_coarse(void)
1583void do_timer(unsigned long ticks) 1580void do_timer(unsigned long ticks)
1584{ 1581{
1585 jiffies_64 += ticks; 1582 jiffies_64 += ticks;
1586 update_wall_time();
1587 calc_global_load(ticks); 1583 calc_global_load(ticks);
1588} 1584}
1589 1585
@@ -1698,12 +1694,14 @@ int do_adjtimex(struct timex *txc)
1698 1694
1699 if (tai != orig_tai) { 1695 if (tai != orig_tai) {
1700 __timekeeping_set_tai_offset(tk, tai); 1696 __timekeeping_set_tai_offset(tk, tai);
1701 update_pvclock_gtod(tk, true); 1697 timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET);
1702 clock_was_set_delayed();
1703 } 1698 }
1704 write_seqcount_end(&timekeeper_seq); 1699 write_seqcount_end(&timekeeper_seq);
1705 raw_spin_unlock_irqrestore(&timekeeper_lock, flags); 1700 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1706 1701
1702 if (tai != orig_tai)
1703 clock_was_set();
1704
1707 ntp_notify_cmos_timer(); 1705 ntp_notify_cmos_timer();
1708 1706
1709 return ret; 1707 return ret;
@@ -1739,4 +1737,5 @@ void xtime_update(unsigned long ticks)
1739 write_seqlock(&jiffies_lock); 1737 write_seqlock(&jiffies_lock);
1740 do_timer(ticks); 1738 do_timer(ticks);
1741 write_sequnlock(&jiffies_lock); 1739 write_sequnlock(&jiffies_lock);
1740 update_wall_time();
1742} 1741}