aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/time
diff options
context:
space:
mode:
authorJiri Kosina <jkosina@suse.cz>2014-11-20 08:42:02 -0500
committerJiri Kosina <jkosina@suse.cz>2014-11-20 08:42:02 -0500
commita02001086bbfb4da35d1228bebc2f1b442db455f (patch)
tree62ab47936cef06fd08657ca5b6cd1df98c19be57 /kernel/time
parenteff264efeeb0898408e8c9df72d8a32621035bed (diff)
parentfc14f9c1272f62c3e8d01300f52467c0d9af50f9 (diff)
Merge Linus' tree to be be to apply submitted patches to newer code than
current trivial.git base
Diffstat (limited to 'kernel/time')
-rw-r--r--kernel/time/alarmtimer.c34
-rw-r--r--kernel/time/clockevents.c2
-rw-r--r--kernel/time/hrtimer.c23
-rw-r--r--kernel/time/posix-cpu-timers.c14
-rw-r--r--kernel/time/posix-timers.c1
-rw-r--r--kernel/time/tick-broadcast.c2
-rw-r--r--kernel/time/tick-common.c7
-rw-r--r--kernel/time/tick-internal.h7
-rw-r--r--kernel/time/tick-oneshot.c2
-rw-r--r--kernel/time/tick-sched.c100
-rw-r--r--kernel/time/time.c56
-rw-r--r--kernel/time/timekeeping.c10
-rw-r--r--kernel/time/timer.c4
13 files changed, 156 insertions, 106 deletions
diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
index 4aec4a457431..a7077d3ae52f 100644
--- a/kernel/time/alarmtimer.c
+++ b/kernel/time/alarmtimer.c
@@ -464,18 +464,26 @@ static enum alarmtimer_type clock2alarm(clockid_t clockid)
464static enum alarmtimer_restart alarm_handle_timer(struct alarm *alarm, 464static enum alarmtimer_restart alarm_handle_timer(struct alarm *alarm,
465 ktime_t now) 465 ktime_t now)
466{ 466{
467 unsigned long flags;
467 struct k_itimer *ptr = container_of(alarm, struct k_itimer, 468 struct k_itimer *ptr = container_of(alarm, struct k_itimer,
468 it.alarm.alarmtimer); 469 it.alarm.alarmtimer);
469 if (posix_timer_event(ptr, 0) != 0) 470 enum alarmtimer_restart result = ALARMTIMER_NORESTART;
470 ptr->it_overrun++; 471
472 spin_lock_irqsave(&ptr->it_lock, flags);
473 if ((ptr->it_sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE) {
474 if (posix_timer_event(ptr, 0) != 0)
475 ptr->it_overrun++;
476 }
471 477
472 /* Re-add periodic timers */ 478 /* Re-add periodic timers */
473 if (ptr->it.alarm.interval.tv64) { 479 if (ptr->it.alarm.interval.tv64) {
474 ptr->it_overrun += alarm_forward(alarm, now, 480 ptr->it_overrun += alarm_forward(alarm, now,
475 ptr->it.alarm.interval); 481 ptr->it.alarm.interval);
476 return ALARMTIMER_RESTART; 482 result = ALARMTIMER_RESTART;
477 } 483 }
478 return ALARMTIMER_NORESTART; 484 spin_unlock_irqrestore(&ptr->it_lock, flags);
485
486 return result;
479} 487}
480 488
481/** 489/**
@@ -541,18 +549,22 @@ static int alarm_timer_create(struct k_itimer *new_timer)
541 * @new_timer: k_itimer pointer 549 * @new_timer: k_itimer pointer
542 * @cur_setting: itimerspec data to fill 550 * @cur_setting: itimerspec data to fill
543 * 551 *
544 * Copies the itimerspec data out from the k_itimer 552 * Copies out the current itimerspec data
545 */ 553 */
546static void alarm_timer_get(struct k_itimer *timr, 554static void alarm_timer_get(struct k_itimer *timr,
547 struct itimerspec *cur_setting) 555 struct itimerspec *cur_setting)
548{ 556{
549 memset(cur_setting, 0, sizeof(struct itimerspec)); 557 ktime_t relative_expiry_time =
558 alarm_expires_remaining(&(timr->it.alarm.alarmtimer));
559
560 if (ktime_to_ns(relative_expiry_time) > 0) {
561 cur_setting->it_value = ktime_to_timespec(relative_expiry_time);
562 } else {
563 cur_setting->it_value.tv_sec = 0;
564 cur_setting->it_value.tv_nsec = 0;
565 }
550 566
551 cur_setting->it_interval = 567 cur_setting->it_interval = ktime_to_timespec(timr->it.alarm.interval);
552 ktime_to_timespec(timr->it.alarm.interval);
553 cur_setting->it_value =
554 ktime_to_timespec(timr->it.alarm.alarmtimer.node.expires);
555 return;
556} 568}
557 569
558/** 570/**
diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c
index 9c94c19f1305..55449909f114 100644
--- a/kernel/time/clockevents.c
+++ b/kernel/time/clockevents.c
@@ -72,7 +72,7 @@ static u64 cev_delta2ns(unsigned long latch, struct clock_event_device *evt,
72 * Also omit the add if it would overflow the u64 boundary. 72 * Also omit the add if it would overflow the u64 boundary.
73 */ 73 */
74 if ((~0ULL - clc > rnd) && 74 if ((~0ULL - clc > rnd) &&
75 (!ismax || evt->mult <= (1U << evt->shift))) 75 (!ismax || evt->mult <= (1ULL << evt->shift)))
76 clc += rnd; 76 clc += rnd;
77 77
78 do_div(clc, evt->mult); 78 do_div(clc, evt->mult);
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
index 1c2fe7de2842..37e50aadd471 100644
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
@@ -558,7 +558,7 @@ hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base, int skip_equal)
558static int hrtimer_reprogram(struct hrtimer *timer, 558static int hrtimer_reprogram(struct hrtimer *timer,
559 struct hrtimer_clock_base *base) 559 struct hrtimer_clock_base *base)
560{ 560{
561 struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases); 561 struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
562 ktime_t expires = ktime_sub(hrtimer_get_expires(timer), base->offset); 562 ktime_t expires = ktime_sub(hrtimer_get_expires(timer), base->offset);
563 int res; 563 int res;
564 564
@@ -629,7 +629,7 @@ static inline ktime_t hrtimer_update_base(struct hrtimer_cpu_base *base)
629 */ 629 */
630static void retrigger_next_event(void *arg) 630static void retrigger_next_event(void *arg)
631{ 631{
632 struct hrtimer_cpu_base *base = &__get_cpu_var(hrtimer_bases); 632 struct hrtimer_cpu_base *base = this_cpu_ptr(&hrtimer_bases);
633 633
634 if (!hrtimer_hres_active()) 634 if (!hrtimer_hres_active())
635 return; 635 return;
@@ -903,7 +903,7 @@ remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base)
903 */ 903 */
904 debug_deactivate(timer); 904 debug_deactivate(timer);
905 timer_stats_hrtimer_clear_start_info(timer); 905 timer_stats_hrtimer_clear_start_info(timer);
906 reprogram = base->cpu_base == &__get_cpu_var(hrtimer_bases); 906 reprogram = base->cpu_base == this_cpu_ptr(&hrtimer_bases);
907 /* 907 /*
908 * We must preserve the CALLBACK state flag here, 908 * We must preserve the CALLBACK state flag here,
909 * otherwise we could move the timer base in 909 * otherwise we could move the timer base in
@@ -963,7 +963,7 @@ int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
963 * on dynticks target. 963 * on dynticks target.
964 */ 964 */
965 wake_up_nohz_cpu(new_base->cpu_base->cpu); 965 wake_up_nohz_cpu(new_base->cpu_base->cpu);
966 } else if (new_base->cpu_base == &__get_cpu_var(hrtimer_bases) && 966 } else if (new_base->cpu_base == this_cpu_ptr(&hrtimer_bases) &&
967 hrtimer_reprogram(timer, new_base)) { 967 hrtimer_reprogram(timer, new_base)) {
968 /* 968 /*
969 * Only allow reprogramming if the new base is on this CPU. 969 * Only allow reprogramming if the new base is on this CPU.
@@ -1103,7 +1103,7 @@ EXPORT_SYMBOL_GPL(hrtimer_get_remaining);
1103 */ 1103 */
1104ktime_t hrtimer_get_next_event(void) 1104ktime_t hrtimer_get_next_event(void)
1105{ 1105{
1106 struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases); 1106 struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
1107 struct hrtimer_clock_base *base = cpu_base->clock_base; 1107 struct hrtimer_clock_base *base = cpu_base->clock_base;
1108 ktime_t delta, mindelta = { .tv64 = KTIME_MAX }; 1108 ktime_t delta, mindelta = { .tv64 = KTIME_MAX };
1109 unsigned long flags; 1109 unsigned long flags;
@@ -1144,7 +1144,7 @@ static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
1144 1144
1145 memset(timer, 0, sizeof(struct hrtimer)); 1145 memset(timer, 0, sizeof(struct hrtimer));
1146 1146
1147 cpu_base = &__raw_get_cpu_var(hrtimer_bases); 1147 cpu_base = raw_cpu_ptr(&hrtimer_bases);
1148 1148
1149 if (clock_id == CLOCK_REALTIME && mode != HRTIMER_MODE_ABS) 1149 if (clock_id == CLOCK_REALTIME && mode != HRTIMER_MODE_ABS)
1150 clock_id = CLOCK_MONOTONIC; 1150 clock_id = CLOCK_MONOTONIC;
@@ -1187,7 +1187,7 @@ int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp)
1187 struct hrtimer_cpu_base *cpu_base; 1187 struct hrtimer_cpu_base *cpu_base;
1188 int base = hrtimer_clockid_to_base(which_clock); 1188 int base = hrtimer_clockid_to_base(which_clock);
1189 1189
1190 cpu_base = &__raw_get_cpu_var(hrtimer_bases); 1190 cpu_base = raw_cpu_ptr(&hrtimer_bases);
1191 *tp = ktime_to_timespec(cpu_base->clock_base[base].resolution); 1191 *tp = ktime_to_timespec(cpu_base->clock_base[base].resolution);
1192 1192
1193 return 0; 1193 return 0;
@@ -1242,7 +1242,7 @@ static void __run_hrtimer(struct hrtimer *timer, ktime_t *now)
1242 */ 1242 */
1243void hrtimer_interrupt(struct clock_event_device *dev) 1243void hrtimer_interrupt(struct clock_event_device *dev)
1244{ 1244{
1245 struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases); 1245 struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
1246 ktime_t expires_next, now, entry_time, delta; 1246 ktime_t expires_next, now, entry_time, delta;
1247 int i, retries = 0; 1247 int i, retries = 0;
1248 1248
@@ -1376,7 +1376,7 @@ static void __hrtimer_peek_ahead_timers(void)
1376 if (!hrtimer_hres_active()) 1376 if (!hrtimer_hres_active())
1377 return; 1377 return;
1378 1378
1379 td = &__get_cpu_var(tick_cpu_device); 1379 td = this_cpu_ptr(&tick_cpu_device);
1380 if (td && td->evtdev) 1380 if (td && td->evtdev)
1381 hrtimer_interrupt(td->evtdev); 1381 hrtimer_interrupt(td->evtdev);
1382} 1382}
@@ -1440,7 +1440,7 @@ void hrtimer_run_pending(void)
1440void hrtimer_run_queues(void) 1440void hrtimer_run_queues(void)
1441{ 1441{
1442 struct timerqueue_node *node; 1442 struct timerqueue_node *node;
1443 struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases); 1443 struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
1444 struct hrtimer_clock_base *base; 1444 struct hrtimer_clock_base *base;
1445 int index, gettime = 1; 1445 int index, gettime = 1;
1446 1446
@@ -1679,7 +1679,7 @@ static void migrate_hrtimers(int scpu)
1679 1679
1680 local_irq_disable(); 1680 local_irq_disable();
1681 old_base = &per_cpu(hrtimer_bases, scpu); 1681 old_base = &per_cpu(hrtimer_bases, scpu);
1682 new_base = &__get_cpu_var(hrtimer_bases); 1682 new_base = this_cpu_ptr(&hrtimer_bases);
1683 /* 1683 /*
1684 * The caller is globally serialized and nobody else 1684 * The caller is globally serialized and nobody else
1685 * takes two locks at once, deadlock is not possible. 1685 * takes two locks at once, deadlock is not possible.
@@ -1776,7 +1776,6 @@ schedule_hrtimeout_range_clock(ktime_t *expires, unsigned long delta,
1776 */ 1776 */
1777 if (!expires) { 1777 if (!expires) {
1778 schedule(); 1778 schedule();
1779 __set_current_state(TASK_RUNNING);
1780 return -EINTR; 1779 return -EINTR;
1781 } 1780 }
1782 1781
diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c
index 3b8946416a5f..492b986195d5 100644
--- a/kernel/time/posix-cpu-timers.c
+++ b/kernel/time/posix-cpu-timers.c
@@ -272,22 +272,8 @@ static int posix_cpu_clock_get_task(struct task_struct *tsk,
272 if (same_thread_group(tsk, current)) 272 if (same_thread_group(tsk, current))
273 err = cpu_clock_sample(which_clock, tsk, &rtn); 273 err = cpu_clock_sample(which_clock, tsk, &rtn);
274 } else { 274 } else {
275 unsigned long flags;
276 struct sighand_struct *sighand;
277
278 /*
279 * while_each_thread() is not yet entirely RCU safe,
280 * keep locking the group while sampling process
281 * clock for now.
282 */
283 sighand = lock_task_sighand(tsk, &flags);
284 if (!sighand)
285 return err;
286
287 if (tsk == current || thread_group_leader(tsk)) 275 if (tsk == current || thread_group_leader(tsk))
288 err = cpu_clock_sample_group(which_clock, tsk, &rtn); 276 err = cpu_clock_sample_group(which_clock, tsk, &rtn);
289
290 unlock_task_sighand(tsk, &flags);
291 } 277 }
292 278
293 if (!err) 279 if (!err)
diff --git a/kernel/time/posix-timers.c b/kernel/time/posix-timers.c
index 42b463ad90f2..31ea01f42e1f 100644
--- a/kernel/time/posix-timers.c
+++ b/kernel/time/posix-timers.c
@@ -636,6 +636,7 @@ SYSCALL_DEFINE3(timer_create, const clockid_t, which_clock,
636 goto out; 636 goto out;
637 } 637 }
638 } else { 638 } else {
639 memset(&event.sigev_value, 0, sizeof(event.sigev_value));
639 event.sigev_notify = SIGEV_SIGNAL; 640 event.sigev_notify = SIGEV_SIGNAL;
640 event.sigev_signo = SIGALRM; 641 event.sigev_signo = SIGALRM;
641 event.sigev_value.sival_int = new_timer->it_id; 642 event.sigev_value.sival_int = new_timer->it_id;
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
index 64c5990fd500..066f0ec05e48 100644
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -554,7 +554,7 @@ int tick_resume_broadcast_oneshot(struct clock_event_device *bc)
554void tick_check_oneshot_broadcast_this_cpu(void) 554void tick_check_oneshot_broadcast_this_cpu(void)
555{ 555{
556 if (cpumask_test_cpu(smp_processor_id(), tick_broadcast_oneshot_mask)) { 556 if (cpumask_test_cpu(smp_processor_id(), tick_broadcast_oneshot_mask)) {
557 struct tick_device *td = &__get_cpu_var(tick_cpu_device); 557 struct tick_device *td = this_cpu_ptr(&tick_cpu_device);
558 558
559 /* 559 /*
560 * We might be in the middle of switching over from 560 * We might be in the middle of switching over from
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c
index 0a0608edeb26..7efeedf53ebd 100644
--- a/kernel/time/tick-common.c
+++ b/kernel/time/tick-common.c
@@ -224,7 +224,7 @@ static void tick_setup_device(struct tick_device *td,
224 224
225void tick_install_replacement(struct clock_event_device *newdev) 225void tick_install_replacement(struct clock_event_device *newdev)
226{ 226{
227 struct tick_device *td = &__get_cpu_var(tick_cpu_device); 227 struct tick_device *td = this_cpu_ptr(&tick_cpu_device);
228 int cpu = smp_processor_id(); 228 int cpu = smp_processor_id();
229 229
230 clockevents_exchange_device(td->evtdev, newdev); 230 clockevents_exchange_device(td->evtdev, newdev);
@@ -374,14 +374,14 @@ void tick_shutdown(unsigned int *cpup)
374 374
375void tick_suspend(void) 375void tick_suspend(void)
376{ 376{
377 struct tick_device *td = &__get_cpu_var(tick_cpu_device); 377 struct tick_device *td = this_cpu_ptr(&tick_cpu_device);
378 378
379 clockevents_shutdown(td->evtdev); 379 clockevents_shutdown(td->evtdev);
380} 380}
381 381
382void tick_resume(void) 382void tick_resume(void)
383{ 383{
384 struct tick_device *td = &__get_cpu_var(tick_cpu_device); 384 struct tick_device *td = this_cpu_ptr(&tick_cpu_device);
385 int broadcast = tick_resume_broadcast(); 385 int broadcast = tick_resume_broadcast();
386 386
387 clockevents_set_mode(td->evtdev, CLOCK_EVT_MODE_RESUME); 387 clockevents_set_mode(td->evtdev, CLOCK_EVT_MODE_RESUME);
@@ -400,4 +400,5 @@ void tick_resume(void)
400void __init tick_init(void) 400void __init tick_init(void)
401{ 401{
402 tick_broadcast_init(); 402 tick_broadcast_init();
403 tick_nohz_init();
403} 404}
diff --git a/kernel/time/tick-internal.h b/kernel/time/tick-internal.h
index c19c1d84b6f3..366aeb4f2c66 100644
--- a/kernel/time/tick-internal.h
+++ b/kernel/time/tick-internal.h
@@ -99,6 +99,13 @@ static inline int tick_broadcast_oneshot_active(void) { return 0; }
99static inline bool tick_broadcast_oneshot_available(void) { return false; } 99static inline bool tick_broadcast_oneshot_available(void) { return false; }
100#endif /* !TICK_ONESHOT */ 100#endif /* !TICK_ONESHOT */
101 101
102/* NO_HZ_FULL internal */
103#ifdef CONFIG_NO_HZ_FULL
104extern void tick_nohz_init(void);
105# else
106static inline void tick_nohz_init(void) { }
107#endif
108
102/* 109/*
103 * Broadcasting support 110 * Broadcasting support
104 */ 111 */
diff --git a/kernel/time/tick-oneshot.c b/kernel/time/tick-oneshot.c
index 824109060a33..7ce740e78e1b 100644
--- a/kernel/time/tick-oneshot.c
+++ b/kernel/time/tick-oneshot.c
@@ -59,7 +59,7 @@ void tick_setup_oneshot(struct clock_event_device *newdev,
59 */ 59 */
60int tick_switch_to_oneshot(void (*handler)(struct clock_event_device *)) 60int tick_switch_to_oneshot(void (*handler)(struct clock_event_device *))
61{ 61{
62 struct tick_device *td = &__get_cpu_var(tick_cpu_device); 62 struct tick_device *td = this_cpu_ptr(&tick_cpu_device);
63 struct clock_event_device *dev = td->evtdev; 63 struct clock_event_device *dev = td->evtdev;
64 64
65 if (!dev || !(dev->features & CLOCK_EVT_FEAT_ONESHOT) || 65 if (!dev || !(dev->features & CLOCK_EVT_FEAT_ONESHOT) ||
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 99aa6ee3908f..7b5741fc4110 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -205,7 +205,7 @@ static void tick_nohz_restart_sched_tick(struct tick_sched *ts, ktime_t now);
205 */ 205 */
206void __tick_nohz_full_check(void) 206void __tick_nohz_full_check(void)
207{ 207{
208 struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); 208 struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
209 209
210 if (tick_nohz_full_cpu(smp_processor_id())) { 210 if (tick_nohz_full_cpu(smp_processor_id())) {
211 if (ts->tick_stopped && !is_idle_task(current)) { 211 if (ts->tick_stopped && !is_idle_task(current)) {
@@ -225,6 +225,20 @@ static DEFINE_PER_CPU(struct irq_work, nohz_full_kick_work) = {
225}; 225};
226 226
227/* 227/*
228 * Kick this CPU if it's full dynticks in order to force it to
229 * re-evaluate its dependency on the tick and restart it if necessary.
230 * This kick, unlike tick_nohz_full_kick_cpu() and tick_nohz_full_kick_all(),
231 * is NMI safe.
232 */
233void tick_nohz_full_kick(void)
234{
235 if (!tick_nohz_full_cpu(smp_processor_id()))
236 return;
237
238 irq_work_queue(&__get_cpu_var(nohz_full_kick_work));
239}
240
241/*
228 * Kick the CPU if it's full dynticks in order to force it to 242 * Kick the CPU if it's full dynticks in order to force it to
229 * re-evaluate its dependency on the tick and restart it if necessary. 243 * re-evaluate its dependency on the tick and restart it if necessary.
230 */ 244 */
@@ -281,22 +295,12 @@ out:
281/* Parse the boot-time nohz CPU list from the kernel parameters. */ 295/* Parse the boot-time nohz CPU list from the kernel parameters. */
282static int __init tick_nohz_full_setup(char *str) 296static int __init tick_nohz_full_setup(char *str)
283{ 297{
284 int cpu;
285
286 alloc_bootmem_cpumask_var(&tick_nohz_full_mask); 298 alloc_bootmem_cpumask_var(&tick_nohz_full_mask);
287 alloc_bootmem_cpumask_var(&housekeeping_mask);
288 if (cpulist_parse(str, tick_nohz_full_mask) < 0) { 299 if (cpulist_parse(str, tick_nohz_full_mask) < 0) {
289 pr_warning("NOHZ: Incorrect nohz_full cpumask\n"); 300 pr_warning("NOHZ: Incorrect nohz_full cpumask\n");
301 free_bootmem_cpumask_var(tick_nohz_full_mask);
290 return 1; 302 return 1;
291 } 303 }
292
293 cpu = smp_processor_id();
294 if (cpumask_test_cpu(cpu, tick_nohz_full_mask)) {
295 pr_warning("NO_HZ: Clearing %d from nohz_full range for timekeeping\n", cpu);
296 cpumask_clear_cpu(cpu, tick_nohz_full_mask);
297 }
298 cpumask_andnot(housekeeping_mask,
299 cpu_possible_mask, tick_nohz_full_mask);
300 tick_nohz_full_running = true; 304 tick_nohz_full_running = true;
301 305
302 return 1; 306 return 1;
@@ -335,18 +339,11 @@ static int tick_nohz_init_all(void)
335 339
336#ifdef CONFIG_NO_HZ_FULL_ALL 340#ifdef CONFIG_NO_HZ_FULL_ALL
337 if (!alloc_cpumask_var(&tick_nohz_full_mask, GFP_KERNEL)) { 341 if (!alloc_cpumask_var(&tick_nohz_full_mask, GFP_KERNEL)) {
338 pr_err("NO_HZ: Can't allocate full dynticks cpumask\n"); 342 WARN(1, "NO_HZ: Can't allocate full dynticks cpumask\n");
339 return err;
340 }
341 if (!alloc_cpumask_var(&housekeeping_mask, GFP_KERNEL)) {
342 pr_err("NO_HZ: Can't allocate not-full dynticks cpumask\n");
343 return err; 343 return err;
344 } 344 }
345 err = 0; 345 err = 0;
346 cpumask_setall(tick_nohz_full_mask); 346 cpumask_setall(tick_nohz_full_mask);
347 cpumask_clear_cpu(smp_processor_id(), tick_nohz_full_mask);
348 cpumask_clear(housekeeping_mask);
349 cpumask_set_cpu(smp_processor_id(), housekeeping_mask);
350 tick_nohz_full_running = true; 347 tick_nohz_full_running = true;
351#endif 348#endif
352 return err; 349 return err;
@@ -361,6 +358,37 @@ void __init tick_nohz_init(void)
361 return; 358 return;
362 } 359 }
363 360
361 if (!alloc_cpumask_var(&housekeeping_mask, GFP_KERNEL)) {
362 WARN(1, "NO_HZ: Can't allocate not-full dynticks cpumask\n");
363 cpumask_clear(tick_nohz_full_mask);
364 tick_nohz_full_running = false;
365 return;
366 }
367
368 /*
369 * Full dynticks uses irq work to drive the tick rescheduling on safe
370 * locking contexts. But then we need irq work to raise its own
371 * interrupts to avoid circular dependency on the tick
372 */
373 if (!arch_irq_work_has_interrupt()) {
374 pr_warning("NO_HZ: Can't run full dynticks because arch doesn't "
375 "support irq work self-IPIs\n");
376 cpumask_clear(tick_nohz_full_mask);
377 cpumask_copy(housekeeping_mask, cpu_possible_mask);
378 tick_nohz_full_running = false;
379 return;
380 }
381
382 cpu = smp_processor_id();
383
384 if (cpumask_test_cpu(cpu, tick_nohz_full_mask)) {
385 pr_warning("NO_HZ: Clearing %d from nohz_full range for timekeeping\n", cpu);
386 cpumask_clear_cpu(cpu, tick_nohz_full_mask);
387 }
388
389 cpumask_andnot(housekeeping_mask,
390 cpu_possible_mask, tick_nohz_full_mask);
391
364 for_each_cpu(cpu, tick_nohz_full_mask) 392 for_each_cpu(cpu, tick_nohz_full_mask)
365 context_tracking_cpu_set(cpu); 393 context_tracking_cpu_set(cpu);
366 394
@@ -545,7 +573,7 @@ static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts,
545 unsigned long seq, last_jiffies, next_jiffies, delta_jiffies; 573 unsigned long seq, last_jiffies, next_jiffies, delta_jiffies;
546 ktime_t last_update, expires, ret = { .tv64 = 0 }; 574 ktime_t last_update, expires, ret = { .tv64 = 0 };
547 unsigned long rcu_delta_jiffies; 575 unsigned long rcu_delta_jiffies;
548 struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev; 576 struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev);
549 u64 time_delta; 577 u64 time_delta;
550 578
551 time_delta = timekeeping_max_deferment(); 579 time_delta = timekeeping_max_deferment();
@@ -558,7 +586,7 @@ static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts,
558 } while (read_seqretry(&jiffies_lock, seq)); 586 } while (read_seqretry(&jiffies_lock, seq));
559 587
560 if (rcu_needs_cpu(cpu, &rcu_delta_jiffies) || 588 if (rcu_needs_cpu(cpu, &rcu_delta_jiffies) ||
561 arch_needs_cpu(cpu) || irq_work_needs_cpu()) { 589 arch_needs_cpu() || irq_work_needs_cpu()) {
562 next_jiffies = last_jiffies + 1; 590 next_jiffies = last_jiffies + 1;
563 delta_jiffies = 1; 591 delta_jiffies = 1;
564 } else { 592 } else {
@@ -813,7 +841,7 @@ void tick_nohz_idle_enter(void)
813 841
814 local_irq_disable(); 842 local_irq_disable();
815 843
816 ts = &__get_cpu_var(tick_cpu_sched); 844 ts = this_cpu_ptr(&tick_cpu_sched);
817 ts->inidle = 1; 845 ts->inidle = 1;
818 __tick_nohz_idle_enter(ts); 846 __tick_nohz_idle_enter(ts);
819 847
@@ -831,7 +859,7 @@ EXPORT_SYMBOL_GPL(tick_nohz_idle_enter);
831 */ 859 */
832void tick_nohz_irq_exit(void) 860void tick_nohz_irq_exit(void)
833{ 861{
834 struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); 862 struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
835 863
836 if (ts->inidle) 864 if (ts->inidle)
837 __tick_nohz_idle_enter(ts); 865 __tick_nohz_idle_enter(ts);
@@ -846,7 +874,7 @@ void tick_nohz_irq_exit(void)
846 */ 874 */
847ktime_t tick_nohz_get_sleep_length(void) 875ktime_t tick_nohz_get_sleep_length(void)
848{ 876{
849 struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); 877 struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
850 878
851 return ts->sleep_length; 879 return ts->sleep_length;
852} 880}
@@ -924,7 +952,7 @@ static void tick_nohz_account_idle_ticks(struct tick_sched *ts)
924 */ 952 */
925void tick_nohz_idle_exit(void) 953void tick_nohz_idle_exit(void)
926{ 954{
927 struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); 955 struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
928 ktime_t now; 956 ktime_t now;
929 957
930 local_irq_disable(); 958 local_irq_disable();
@@ -959,7 +987,7 @@ static int tick_nohz_reprogram(struct tick_sched *ts, ktime_t now)
959 */ 987 */
960static void tick_nohz_handler(struct clock_event_device *dev) 988static void tick_nohz_handler(struct clock_event_device *dev)
961{ 989{
962 struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); 990 struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
963 struct pt_regs *regs = get_irq_regs(); 991 struct pt_regs *regs = get_irq_regs();
964 ktime_t now = ktime_get(); 992 ktime_t now = ktime_get();
965 993
@@ -968,6 +996,10 @@ static void tick_nohz_handler(struct clock_event_device *dev)
968 tick_sched_do_timer(now); 996 tick_sched_do_timer(now);
969 tick_sched_handle(ts, regs); 997 tick_sched_handle(ts, regs);
970 998
999 /* No need to reprogram if we are running tickless */
1000 if (unlikely(ts->tick_stopped))
1001 return;
1002
971 while (tick_nohz_reprogram(ts, now)) { 1003 while (tick_nohz_reprogram(ts, now)) {
972 now = ktime_get(); 1004 now = ktime_get();
973 tick_do_update_jiffies64(now); 1005 tick_do_update_jiffies64(now);
@@ -979,7 +1011,7 @@ static void tick_nohz_handler(struct clock_event_device *dev)
979 */ 1011 */
980static void tick_nohz_switch_to_nohz(void) 1012static void tick_nohz_switch_to_nohz(void)
981{ 1013{
982 struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); 1014 struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
983 ktime_t next; 1015 ktime_t next;
984 1016
985 if (!tick_nohz_enabled) 1017 if (!tick_nohz_enabled)
@@ -1041,7 +1073,7 @@ static void tick_nohz_kick_tick(struct tick_sched *ts, ktime_t now)
1041 1073
1042static inline void tick_nohz_irq_enter(void) 1074static inline void tick_nohz_irq_enter(void)
1043{ 1075{
1044 struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); 1076 struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
1045 ktime_t now; 1077 ktime_t now;
1046 1078
1047 if (!ts->idle_active && !ts->tick_stopped) 1079 if (!ts->idle_active && !ts->tick_stopped)
@@ -1095,6 +1127,10 @@ static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer)
1095 if (regs) 1127 if (regs)
1096 tick_sched_handle(ts, regs); 1128 tick_sched_handle(ts, regs);
1097 1129
1130 /* No need to reprogram if we are in idle or full dynticks mode */
1131 if (unlikely(ts->tick_stopped))
1132 return HRTIMER_NORESTART;
1133
1098 hrtimer_forward(timer, now, tick_period); 1134 hrtimer_forward(timer, now, tick_period);
1099 1135
1100 return HRTIMER_RESTART; 1136 return HRTIMER_RESTART;
@@ -1115,7 +1151,7 @@ early_param("skew_tick", skew_tick);
1115 */ 1151 */
1116void tick_setup_sched_timer(void) 1152void tick_setup_sched_timer(void)
1117{ 1153{
1118 struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); 1154 struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
1119 ktime_t now = ktime_get(); 1155 ktime_t now = ktime_get();
1120 1156
1121 /* 1157 /*
@@ -1184,7 +1220,7 @@ void tick_clock_notify(void)
1184 */ 1220 */
1185void tick_oneshot_notify(void) 1221void tick_oneshot_notify(void)
1186{ 1222{
1187 struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); 1223 struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
1188 1224
1189 set_bit(0, &ts->check_clocks); 1225 set_bit(0, &ts->check_clocks);
1190} 1226}
@@ -1199,7 +1235,7 @@ void tick_oneshot_notify(void)
1199 */ 1235 */
1200int tick_check_oneshot_change(int allow_nohz) 1236int tick_check_oneshot_change(int allow_nohz)
1201{ 1237{
1202 struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); 1238 struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
1203 1239
1204 if (!test_and_clear_bit(0, &ts->check_clocks)) 1240 if (!test_and_clear_bit(0, &ts->check_clocks))
1205 return 0; 1241 return 0;
diff --git a/kernel/time/time.c b/kernel/time/time.c
index f0294ba14634..a9ae20fb0b11 100644
--- a/kernel/time/time.c
+++ b/kernel/time/time.c
@@ -559,17 +559,20 @@ EXPORT_SYMBOL(usecs_to_jiffies);
559 * that a remainder subtract here would not do the right thing as the 559 * that a remainder subtract here would not do the right thing as the
560 * resolution values don't fall on second boundries. I.e. the line: 560 * resolution values don't fall on second boundries. I.e. the line:
561 * nsec -= nsec % TICK_NSEC; is NOT a correct resolution rounding. 561 * nsec -= nsec % TICK_NSEC; is NOT a correct resolution rounding.
562 * Note that due to the small error in the multiplier here, this
563 * rounding is incorrect for sufficiently large values of tv_nsec, but
564 * well formed timespecs should have tv_nsec < NSEC_PER_SEC, so we're
565 * OK.
562 * 566 *
563 * Rather, we just shift the bits off the right. 567 * Rather, we just shift the bits off the right.
564 * 568 *
565 * The >> (NSEC_JIFFIE_SC - SEC_JIFFIE_SC) converts the scaled nsec 569 * The >> (NSEC_JIFFIE_SC - SEC_JIFFIE_SC) converts the scaled nsec
566 * value to a scaled second value. 570 * value to a scaled second value.
567 */ 571 */
568unsigned long 572static unsigned long
569timespec_to_jiffies(const struct timespec *value) 573__timespec_to_jiffies(unsigned long sec, long nsec)
570{ 574{
571 unsigned long sec = value->tv_sec; 575 nsec = nsec + TICK_NSEC - 1;
572 long nsec = value->tv_nsec + TICK_NSEC - 1;
573 576
574 if (sec >= MAX_SEC_IN_JIFFIES){ 577 if (sec >= MAX_SEC_IN_JIFFIES){
575 sec = MAX_SEC_IN_JIFFIES; 578 sec = MAX_SEC_IN_JIFFIES;
@@ -580,6 +583,13 @@ timespec_to_jiffies(const struct timespec *value)
580 (NSEC_JIFFIE_SC - SEC_JIFFIE_SC))) >> SEC_JIFFIE_SC; 583 (NSEC_JIFFIE_SC - SEC_JIFFIE_SC))) >> SEC_JIFFIE_SC;
581 584
582} 585}
586
587unsigned long
588timespec_to_jiffies(const struct timespec *value)
589{
590 return __timespec_to_jiffies(value->tv_sec, value->tv_nsec);
591}
592
583EXPORT_SYMBOL(timespec_to_jiffies); 593EXPORT_SYMBOL(timespec_to_jiffies);
584 594
585void 595void
@@ -596,31 +606,27 @@ jiffies_to_timespec(const unsigned long jiffies, struct timespec *value)
596} 606}
597EXPORT_SYMBOL(jiffies_to_timespec); 607EXPORT_SYMBOL(jiffies_to_timespec);
598 608
599/* Same for "timeval" 609/*
600 * 610 * We could use a similar algorithm to timespec_to_jiffies (with a
601 * Well, almost. The problem here is that the real system resolution is 611 * different multiplier for usec instead of nsec). But this has a
602 * in nanoseconds and the value being converted is in micro seconds. 612 * problem with rounding: we can't exactly add TICK_NSEC - 1 to the
603 * Also for some machines (those that use HZ = 1024, in-particular), 613 * usec value, since it's not necessarily integral.
604 * there is a LARGE error in the tick size in microseconds. 614 *
605 615 * We could instead round in the intermediate scaled representation
606 * The solution we use is to do the rounding AFTER we convert the 616 * (i.e. in units of 1/2^(large scale) jiffies) but that's also
607 * microsecond part. Thus the USEC_ROUND, the bits to be shifted off. 617 * perilous: the scaling introduces a small positive error, which
608 * Instruction wise, this should cost only an additional add with carry 618 * combined with a division-rounding-upward (i.e. adding 2^(scale) - 1
609 * instruction above the way it was done above. 619 * units to the intermediate before shifting) leads to accidental
620 * overflow and overestimates.
621 *
622 * At the cost of one additional multiplication by a constant, just
623 * use the timespec implementation.
610 */ 624 */
611unsigned long 625unsigned long
612timeval_to_jiffies(const struct timeval *value) 626timeval_to_jiffies(const struct timeval *value)
613{ 627{
614 unsigned long sec = value->tv_sec; 628 return __timespec_to_jiffies(value->tv_sec,
615 long usec = value->tv_usec; 629 value->tv_usec * NSEC_PER_USEC);
616
617 if (sec >= MAX_SEC_IN_JIFFIES){
618 sec = MAX_SEC_IN_JIFFIES;
619 usec = 0;
620 }
621 return (((u64)sec * SEC_CONVERSION) +
622 (((u64)usec * USEC_CONVERSION + USEC_ROUND) >>
623 (USEC_JIFFIE_SC - SEC_JIFFIE_SC))) >> SEC_JIFFIE_SC;
624} 630}
625EXPORT_SYMBOL(timeval_to_jiffies); 631EXPORT_SYMBOL(timeval_to_jiffies);
626 632
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index f36b02838a47..ec1791fae965 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -338,10 +338,11 @@ EXPORT_SYMBOL_GPL(ktime_get_mono_fast_ns);
338 338
339static inline void update_vsyscall(struct timekeeper *tk) 339static inline void update_vsyscall(struct timekeeper *tk)
340{ 340{
341 struct timespec xt; 341 struct timespec xt, wm;
342 342
343 xt = timespec64_to_timespec(tk_xtime(tk)); 343 xt = timespec64_to_timespec(tk_xtime(tk));
344 update_vsyscall_old(&xt, &tk->wall_to_monotonic, tk->tkr.clock, tk->tkr.mult, 344 wm = timespec64_to_timespec(tk->wall_to_monotonic);
345 update_vsyscall_old(&xt, &wm, tk->tkr.clock, tk->tkr.mult,
345 tk->tkr.cycle_last); 346 tk->tkr.cycle_last);
346} 347}
347 348
@@ -441,11 +442,12 @@ static void timekeeping_update(struct timekeeper *tk, unsigned int action)
441 tk->ntp_error = 0; 442 tk->ntp_error = 0;
442 ntp_clear(); 443 ntp_clear();
443 } 444 }
444 update_vsyscall(tk);
445 update_pvclock_gtod(tk, action & TK_CLOCK_WAS_SET);
446 445
447 tk_update_ktime_data(tk); 446 tk_update_ktime_data(tk);
448 447
448 update_vsyscall(tk);
449 update_pvclock_gtod(tk, action & TK_CLOCK_WAS_SET);
450
449 if (action & TK_MIRROR) 451 if (action & TK_MIRROR)
450 memcpy(&shadow_timekeeper, &tk_core.timekeeper, 452 memcpy(&shadow_timekeeper, &tk_core.timekeeper,
451 sizeof(tk_core.timekeeper)); 453 sizeof(tk_core.timekeeper));
diff --git a/kernel/time/timer.c b/kernel/time/timer.c
index aca5dfe2fa3d..3260ffdb368f 100644
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
@@ -655,7 +655,7 @@ static inline void debug_assert_init(struct timer_list *timer)
655static void do_init_timer(struct timer_list *timer, unsigned int flags, 655static void do_init_timer(struct timer_list *timer, unsigned int flags,
656 const char *name, struct lock_class_key *key) 656 const char *name, struct lock_class_key *key)
657{ 657{
658 struct tvec_base *base = __raw_get_cpu_var(tvec_bases); 658 struct tvec_base *base = raw_cpu_read(tvec_bases);
659 659
660 timer->entry.next = NULL; 660 timer->entry.next = NULL;
661 timer->base = (void *)((unsigned long)base | flags); 661 timer->base = (void *)((unsigned long)base | flags);
@@ -1385,7 +1385,7 @@ void update_process_times(int user_tick)
1385 rcu_check_callbacks(cpu, user_tick); 1385 rcu_check_callbacks(cpu, user_tick);
1386#ifdef CONFIG_IRQ_WORK 1386#ifdef CONFIG_IRQ_WORK
1387 if (in_irq()) 1387 if (in_irq())
1388 irq_work_run(); 1388 irq_work_tick();
1389#endif 1389#endif
1390 scheduler_tick(); 1390 scheduler_tick();
1391 run_posix_cpu_timers(p); 1391 run_posix_cpu_timers(p);