diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-01-20 14:34:26 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-01-20 14:34:26 -0500 |
commit | 6c6461435611e1d4843516f2d55e8316c009112e (patch) | |
tree | 2285f7ef3257dcb30342f931430ad755fc5d299b /kernel | |
parent | a0fa1dd3cdbccec9597fe53b6177a9aa6e20f2f8 (diff) | |
parent | 00e2bcd6d35f59fce7fa0e76e24d08f74c6a8506 (diff) |
Merge branch 'timers-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull timer changes from Ingo Molnar:
- ARM clocksource/clockevent improvements and fixes
- generic timekeeping updates: TAI fixes/improvements, cleanups
- Posix cpu timer cleanups and improvements
- dynticks updates: full dynticks bugfixes, optimizations and cleanups
* 'timers-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (46 commits)
clocksource: Timer-sun5i: Switch to sched_clock_register()
timekeeping: Remove comment that's mostly out of date
rtc-cmos: Add an alarm disable quirk
timekeeper: fix comment typo for tk_setup_internals()
timekeeping: Fix missing timekeeping_update in suspend path
timekeeping: Fix CLOCK_TAI timer/nanosleep delays
tick/timekeeping: Call update_wall_time outside the jiffies lock
timekeeping: Avoid possible deadlock from clock_was_set_delayed
timekeeping: Fix potential lost pv notification of time change
timekeeping: Fix lost updates to tai adjustment
clocksource: sh_cmt: Add clk_prepare/unprepare support
clocksource: bcm_kona_timer: Remove unused bcm_timer_ids
clocksource: vt8500: Remove deprecated IRQF_DISABLED
clocksource: tegra: Remove deprecated IRQF_DISABLED
clocksource: misc drivers: Remove deprecated IRQF_DISABLED
clocksource: sh_mtu2: Remove unnecessary platform_set_drvdata()
clocksource: sh_tmu: Remove unnecessary platform_set_drvdata()
clocksource: armada-370-xp: Enable timer divider only when needed
clocksource: clksrc-of: Warn if no clock sources are found
clocksource: orion: Switch to sched_clock_register()
...
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/context_tracking.c | 8 | ||||
-rw-r--r-- | kernel/posix-cpu-timers.c | 327 | ||||
-rw-r--r-- | kernel/softirq.c | 4 | ||||
-rw-r--r-- | kernel/time/tick-broadcast.c | 6 | ||||
-rw-r--r-- | kernel/time/tick-common.c | 1 | ||||
-rw-r--r-- | kernel/time/tick-internal.h | 5 | ||||
-rw-r--r-- | kernel/time/tick-sched.c | 40 | ||||
-rw-r--r-- | kernel/time/timekeeping.c | 53 |
8 files changed, 190 insertions, 254 deletions
diff --git a/kernel/context_tracking.c b/kernel/context_tracking.c index e5f3917aa05b..6cb20d2e7ee0 100644 --- a/kernel/context_tracking.c +++ b/kernel/context_tracking.c | |||
@@ -53,10 +53,10 @@ void context_tracking_user_enter(void) | |||
53 | /* | 53 | /* |
54 | * Repeat the user_enter() check here because some archs may be calling | 54 | * Repeat the user_enter() check here because some archs may be calling |
55 | * this from asm and if no CPU needs context tracking, they shouldn't | 55 | * this from asm and if no CPU needs context tracking, they shouldn't |
56 | * go further. Repeat the check here until they support the static key | 56 | * go further. Repeat the check here until they support the inline static |
57 | * check. | 57 | * key check. |
58 | */ | 58 | */ |
59 | if (!static_key_false(&context_tracking_enabled)) | 59 | if (!context_tracking_is_enabled()) |
60 | return; | 60 | return; |
61 | 61 | ||
62 | /* | 62 | /* |
@@ -160,7 +160,7 @@ void context_tracking_user_exit(void) | |||
160 | { | 160 | { |
161 | unsigned long flags; | 161 | unsigned long flags; |
162 | 162 | ||
163 | if (!static_key_false(&context_tracking_enabled)) | 163 | if (!context_tracking_is_enabled()) |
164 | return; | 164 | return; |
165 | 165 | ||
166 | if (in_interrupt()) | 166 | if (in_interrupt()) |
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c index c7f31aa272f7..3b8946416a5f 100644 --- a/kernel/posix-cpu-timers.c +++ b/kernel/posix-cpu-timers.c | |||
@@ -233,7 +233,8 @@ void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times) | |||
233 | 233 | ||
234 | /* | 234 | /* |
235 | * Sample a process (thread group) clock for the given group_leader task. | 235 | * Sample a process (thread group) clock for the given group_leader task. |
236 | * Must be called with tasklist_lock held for reading. | 236 | * Must be called with task sighand lock held for safe while_each_thread() |
237 | * traversal. | ||
237 | */ | 238 | */ |
238 | static int cpu_clock_sample_group(const clockid_t which_clock, | 239 | static int cpu_clock_sample_group(const clockid_t which_clock, |
239 | struct task_struct *p, | 240 | struct task_struct *p, |
@@ -260,30 +261,53 @@ static int cpu_clock_sample_group(const clockid_t which_clock, | |||
260 | return 0; | 261 | return 0; |
261 | } | 262 | } |
262 | 263 | ||
264 | static int posix_cpu_clock_get_task(struct task_struct *tsk, | ||
265 | const clockid_t which_clock, | ||
266 | struct timespec *tp) | ||
267 | { | ||
268 | int err = -EINVAL; | ||
269 | unsigned long long rtn; | ||
270 | |||
271 | if (CPUCLOCK_PERTHREAD(which_clock)) { | ||
272 | if (same_thread_group(tsk, current)) | ||
273 | err = cpu_clock_sample(which_clock, tsk, &rtn); | ||
274 | } else { | ||
275 | unsigned long flags; | ||
276 | struct sighand_struct *sighand; | ||
277 | |||
278 | /* | ||
279 | * while_each_thread() is not yet entirely RCU safe, | ||
280 | * keep locking the group while sampling process | ||
281 | * clock for now. | ||
282 | */ | ||
283 | sighand = lock_task_sighand(tsk, &flags); | ||
284 | if (!sighand) | ||
285 | return err; | ||
286 | |||
287 | if (tsk == current || thread_group_leader(tsk)) | ||
288 | err = cpu_clock_sample_group(which_clock, tsk, &rtn); | ||
289 | |||
290 | unlock_task_sighand(tsk, &flags); | ||
291 | } | ||
292 | |||
293 | if (!err) | ||
294 | sample_to_timespec(which_clock, rtn, tp); | ||
295 | |||
296 | return err; | ||
297 | } | ||
298 | |||
263 | 299 | ||
264 | static int posix_cpu_clock_get(const clockid_t which_clock, struct timespec *tp) | 300 | static int posix_cpu_clock_get(const clockid_t which_clock, struct timespec *tp) |
265 | { | 301 | { |
266 | const pid_t pid = CPUCLOCK_PID(which_clock); | 302 | const pid_t pid = CPUCLOCK_PID(which_clock); |
267 | int error = -EINVAL; | 303 | int err = -EINVAL; |
268 | unsigned long long rtn; | ||
269 | 304 | ||
270 | if (pid == 0) { | 305 | if (pid == 0) { |
271 | /* | 306 | /* |
272 | * Special case constant value for our own clocks. | 307 | * Special case constant value for our own clocks. |
273 | * We don't have to do any lookup to find ourselves. | 308 | * We don't have to do any lookup to find ourselves. |
274 | */ | 309 | */ |
275 | if (CPUCLOCK_PERTHREAD(which_clock)) { | 310 | err = posix_cpu_clock_get_task(current, which_clock, tp); |
276 | /* | ||
277 | * Sampling just ourselves we can do with no locking. | ||
278 | */ | ||
279 | error = cpu_clock_sample(which_clock, | ||
280 | current, &rtn); | ||
281 | } else { | ||
282 | read_lock(&tasklist_lock); | ||
283 | error = cpu_clock_sample_group(which_clock, | ||
284 | current, &rtn); | ||
285 | read_unlock(&tasklist_lock); | ||
286 | } | ||
287 | } else { | 311 | } else { |
288 | /* | 312 | /* |
289 | * Find the given PID, and validate that the caller | 313 | * Find the given PID, and validate that the caller |
@@ -292,29 +316,12 @@ static int posix_cpu_clock_get(const clockid_t which_clock, struct timespec *tp) | |||
292 | struct task_struct *p; | 316 | struct task_struct *p; |
293 | rcu_read_lock(); | 317 | rcu_read_lock(); |
294 | p = find_task_by_vpid(pid); | 318 | p = find_task_by_vpid(pid); |
295 | if (p) { | 319 | if (p) |
296 | if (CPUCLOCK_PERTHREAD(which_clock)) { | 320 | err = posix_cpu_clock_get_task(p, which_clock, tp); |
297 | if (same_thread_group(p, current)) { | ||
298 | error = cpu_clock_sample(which_clock, | ||
299 | p, &rtn); | ||
300 | } | ||
301 | } else { | ||
302 | read_lock(&tasklist_lock); | ||
303 | if (thread_group_leader(p) && p->sighand) { | ||
304 | error = | ||
305 | cpu_clock_sample_group(which_clock, | ||
306 | p, &rtn); | ||
307 | } | ||
308 | read_unlock(&tasklist_lock); | ||
309 | } | ||
310 | } | ||
311 | rcu_read_unlock(); | 321 | rcu_read_unlock(); |
312 | } | 322 | } |
313 | 323 | ||
314 | if (error) | 324 | return err; |
315 | return error; | ||
316 | sample_to_timespec(which_clock, rtn, tp); | ||
317 | return 0; | ||
318 | } | 325 | } |
319 | 326 | ||
320 | 327 | ||
@@ -371,36 +378,40 @@ static int posix_cpu_timer_create(struct k_itimer *new_timer) | |||
371 | */ | 378 | */ |
372 | static int posix_cpu_timer_del(struct k_itimer *timer) | 379 | static int posix_cpu_timer_del(struct k_itimer *timer) |
373 | { | 380 | { |
374 | struct task_struct *p = timer->it.cpu.task; | ||
375 | int ret = 0; | 381 | int ret = 0; |
382 | unsigned long flags; | ||
383 | struct sighand_struct *sighand; | ||
384 | struct task_struct *p = timer->it.cpu.task; | ||
376 | 385 | ||
377 | if (likely(p != NULL)) { | 386 | WARN_ON_ONCE(p == NULL); |
378 | read_lock(&tasklist_lock); | ||
379 | if (unlikely(p->sighand == NULL)) { | ||
380 | /* | ||
381 | * We raced with the reaping of the task. | ||
382 | * The deletion should have cleared us off the list. | ||
383 | */ | ||
384 | BUG_ON(!list_empty(&timer->it.cpu.entry)); | ||
385 | } else { | ||
386 | spin_lock(&p->sighand->siglock); | ||
387 | if (timer->it.cpu.firing) | ||
388 | ret = TIMER_RETRY; | ||
389 | else | ||
390 | list_del(&timer->it.cpu.entry); | ||
391 | spin_unlock(&p->sighand->siglock); | ||
392 | } | ||
393 | read_unlock(&tasklist_lock); | ||
394 | 387 | ||
395 | if (!ret) | 388 | /* |
396 | put_task_struct(p); | 389 | * Protect against sighand release/switch in exit/exec and process/ |
390 | * thread timer list entry concurrent read/writes. | ||
391 | */ | ||
392 | sighand = lock_task_sighand(p, &flags); | ||
393 | if (unlikely(sighand == NULL)) { | ||
394 | /* | ||
395 | * We raced with the reaping of the task. | ||
396 | * The deletion should have cleared us off the list. | ||
397 | */ | ||
398 | WARN_ON_ONCE(!list_empty(&timer->it.cpu.entry)); | ||
399 | } else { | ||
400 | if (timer->it.cpu.firing) | ||
401 | ret = TIMER_RETRY; | ||
402 | else | ||
403 | list_del(&timer->it.cpu.entry); | ||
404 | |||
405 | unlock_task_sighand(p, &flags); | ||
397 | } | 406 | } |
398 | 407 | ||
408 | if (!ret) | ||
409 | put_task_struct(p); | ||
410 | |||
399 | return ret; | 411 | return ret; |
400 | } | 412 | } |
401 | 413 | ||
402 | static void cleanup_timers_list(struct list_head *head, | 414 | static void cleanup_timers_list(struct list_head *head) |
403 | unsigned long long curr) | ||
404 | { | 415 | { |
405 | struct cpu_timer_list *timer, *next; | 416 | struct cpu_timer_list *timer, *next; |
406 | 417 | ||
@@ -414,16 +425,11 @@ static void cleanup_timers_list(struct list_head *head, | |||
414 | * time for later timer_gettime calls to return. | 425 | * time for later timer_gettime calls to return. |
415 | * This must be called with the siglock held. | 426 | * This must be called with the siglock held. |
416 | */ | 427 | */ |
417 | static void cleanup_timers(struct list_head *head, | 428 | static void cleanup_timers(struct list_head *head) |
418 | cputime_t utime, cputime_t stime, | ||
419 | unsigned long long sum_exec_runtime) | ||
420 | { | 429 | { |
421 | 430 | cleanup_timers_list(head); | |
422 | cputime_t ptime = utime + stime; | 431 | cleanup_timers_list(++head); |
423 | 432 | cleanup_timers_list(++head); | |
424 | cleanup_timers_list(head, cputime_to_expires(ptime)); | ||
425 | cleanup_timers_list(++head, cputime_to_expires(utime)); | ||
426 | cleanup_timers_list(++head, sum_exec_runtime); | ||
427 | } | 433 | } |
428 | 434 | ||
429 | /* | 435 | /* |
@@ -433,41 +439,14 @@ static void cleanup_timers(struct list_head *head, | |||
433 | */ | 439 | */ |
434 | void posix_cpu_timers_exit(struct task_struct *tsk) | 440 | void posix_cpu_timers_exit(struct task_struct *tsk) |
435 | { | 441 | { |
436 | cputime_t utime, stime; | ||
437 | |||
438 | add_device_randomness((const void*) &tsk->se.sum_exec_runtime, | 442 | add_device_randomness((const void*) &tsk->se.sum_exec_runtime, |
439 | sizeof(unsigned long long)); | 443 | sizeof(unsigned long long)); |
440 | task_cputime(tsk, &utime, &stime); | 444 | cleanup_timers(tsk->cpu_timers); |
441 | cleanup_timers(tsk->cpu_timers, | ||
442 | utime, stime, tsk->se.sum_exec_runtime); | ||
443 | 445 | ||
444 | } | 446 | } |
445 | void posix_cpu_timers_exit_group(struct task_struct *tsk) | 447 | void posix_cpu_timers_exit_group(struct task_struct *tsk) |
446 | { | 448 | { |
447 | struct signal_struct *const sig = tsk->signal; | 449 | cleanup_timers(tsk->signal->cpu_timers); |
448 | cputime_t utime, stime; | ||
449 | |||
450 | task_cputime(tsk, &utime, &stime); | ||
451 | cleanup_timers(tsk->signal->cpu_timers, | ||
452 | utime + sig->utime, stime + sig->stime, | ||
453 | tsk->se.sum_exec_runtime + sig->sum_sched_runtime); | ||
454 | } | ||
455 | |||
456 | static void clear_dead_task(struct k_itimer *itimer, unsigned long long now) | ||
457 | { | ||
458 | struct cpu_timer_list *timer = &itimer->it.cpu; | ||
459 | |||
460 | /* | ||
461 | * That's all for this thread or process. | ||
462 | * We leave our residual in expires to be reported. | ||
463 | */ | ||
464 | put_task_struct(timer->task); | ||
465 | timer->task = NULL; | ||
466 | if (timer->expires < now) { | ||
467 | timer->expires = 0; | ||
468 | } else { | ||
469 | timer->expires -= now; | ||
470 | } | ||
471 | } | 450 | } |
472 | 451 | ||
473 | static inline int expires_gt(cputime_t expires, cputime_t new_exp) | 452 | static inline int expires_gt(cputime_t expires, cputime_t new_exp) |
@@ -477,8 +456,7 @@ static inline int expires_gt(cputime_t expires, cputime_t new_exp) | |||
477 | 456 | ||
478 | /* | 457 | /* |
479 | * Insert the timer on the appropriate list before any timers that | 458 | * Insert the timer on the appropriate list before any timers that |
480 | * expire later. This must be called with the tasklist_lock held | 459 | * expire later. This must be called with the sighand lock held. |
481 | * for reading, interrupts disabled and p->sighand->siglock taken. | ||
482 | */ | 460 | */ |
483 | static void arm_timer(struct k_itimer *timer) | 461 | static void arm_timer(struct k_itimer *timer) |
484 | { | 462 | { |
@@ -569,7 +547,8 @@ static void cpu_timer_fire(struct k_itimer *timer) | |||
569 | 547 | ||
570 | /* | 548 | /* |
571 | * Sample a process (thread group) timer for the given group_leader task. | 549 | * Sample a process (thread group) timer for the given group_leader task. |
572 | * Must be called with tasklist_lock held for reading. | 550 | * Must be called with task sighand lock held for safe while_each_thread() |
551 | * traversal. | ||
573 | */ | 552 | */ |
574 | static int cpu_timer_sample_group(const clockid_t which_clock, | 553 | static int cpu_timer_sample_group(const clockid_t which_clock, |
575 | struct task_struct *p, | 554 | struct task_struct *p, |
@@ -608,7 +587,8 @@ static DECLARE_WORK(nohz_kick_work, nohz_kick_work_fn); | |||
608 | */ | 587 | */ |
609 | static void posix_cpu_timer_kick_nohz(void) | 588 | static void posix_cpu_timer_kick_nohz(void) |
610 | { | 589 | { |
611 | schedule_work(&nohz_kick_work); | 590 | if (context_tracking_is_enabled()) |
591 | schedule_work(&nohz_kick_work); | ||
612 | } | 592 | } |
613 | 593 | ||
614 | bool posix_cpu_timers_can_stop_tick(struct task_struct *tsk) | 594 | bool posix_cpu_timers_can_stop_tick(struct task_struct *tsk) |
@@ -631,43 +611,39 @@ static inline void posix_cpu_timer_kick_nohz(void) { } | |||
631 | * If we return TIMER_RETRY, it's necessary to release the timer's lock | 611 | * If we return TIMER_RETRY, it's necessary to release the timer's lock |
632 | * and try again. (This happens when the timer is in the middle of firing.) | 612 | * and try again. (This happens when the timer is in the middle of firing.) |
633 | */ | 613 | */ |
634 | static int posix_cpu_timer_set(struct k_itimer *timer, int flags, | 614 | static int posix_cpu_timer_set(struct k_itimer *timer, int timer_flags, |
635 | struct itimerspec *new, struct itimerspec *old) | 615 | struct itimerspec *new, struct itimerspec *old) |
636 | { | 616 | { |
617 | unsigned long flags; | ||
618 | struct sighand_struct *sighand; | ||
637 | struct task_struct *p = timer->it.cpu.task; | 619 | struct task_struct *p = timer->it.cpu.task; |
638 | unsigned long long old_expires, new_expires, old_incr, val; | 620 | unsigned long long old_expires, new_expires, old_incr, val; |
639 | int ret; | 621 | int ret; |
640 | 622 | ||
641 | if (unlikely(p == NULL)) { | 623 | WARN_ON_ONCE(p == NULL); |
642 | /* | ||
643 | * Timer refers to a dead task's clock. | ||
644 | */ | ||
645 | return -ESRCH; | ||
646 | } | ||
647 | 624 | ||
648 | new_expires = timespec_to_sample(timer->it_clock, &new->it_value); | 625 | new_expires = timespec_to_sample(timer->it_clock, &new->it_value); |
649 | 626 | ||
650 | read_lock(&tasklist_lock); | ||
651 | /* | 627 | /* |
652 | * We need the tasklist_lock to protect against reaping that | 628 | * Protect against sighand release/switch in exit/exec and p->cpu_timers |
653 | * clears p->sighand. If p has just been reaped, we can no | 629 | * and p->signal->cpu_timers read/write in arm_timer() |
630 | */ | ||
631 | sighand = lock_task_sighand(p, &flags); | ||
632 | /* | ||
633 | * If p has just been reaped, we can no | ||
654 | * longer get any information about it at all. | 634 | * longer get any information about it at all. |
655 | */ | 635 | */ |
656 | if (unlikely(p->sighand == NULL)) { | 636 | if (unlikely(sighand == NULL)) { |
657 | read_unlock(&tasklist_lock); | ||
658 | put_task_struct(p); | ||
659 | timer->it.cpu.task = NULL; | ||
660 | return -ESRCH; | 637 | return -ESRCH; |
661 | } | 638 | } |
662 | 639 | ||
663 | /* | 640 | /* |
664 | * Disarm any old timer after extracting its expiry time. | 641 | * Disarm any old timer after extracting its expiry time. |
665 | */ | 642 | */ |
666 | BUG_ON(!irqs_disabled()); | 643 | WARN_ON_ONCE(!irqs_disabled()); |
667 | 644 | ||
668 | ret = 0; | 645 | ret = 0; |
669 | old_incr = timer->it.cpu.incr; | 646 | old_incr = timer->it.cpu.incr; |
670 | spin_lock(&p->sighand->siglock); | ||
671 | old_expires = timer->it.cpu.expires; | 647 | old_expires = timer->it.cpu.expires; |
672 | if (unlikely(timer->it.cpu.firing)) { | 648 | if (unlikely(timer->it.cpu.firing)) { |
673 | timer->it.cpu.firing = -1; | 649 | timer->it.cpu.firing = -1; |
@@ -724,12 +700,11 @@ static int posix_cpu_timer_set(struct k_itimer *timer, int flags, | |||
724 | * disable this firing since we are already reporting | 700 | * disable this firing since we are already reporting |
725 | * it as an overrun (thanks to bump_cpu_timer above). | 701 | * it as an overrun (thanks to bump_cpu_timer above). |
726 | */ | 702 | */ |
727 | spin_unlock(&p->sighand->siglock); | 703 | unlock_task_sighand(p, &flags); |
728 | read_unlock(&tasklist_lock); | ||
729 | goto out; | 704 | goto out; |
730 | } | 705 | } |
731 | 706 | ||
732 | if (new_expires != 0 && !(flags & TIMER_ABSTIME)) { | 707 | if (new_expires != 0 && !(timer_flags & TIMER_ABSTIME)) { |
733 | new_expires += val; | 708 | new_expires += val; |
734 | } | 709 | } |
735 | 710 | ||
@@ -743,9 +718,7 @@ static int posix_cpu_timer_set(struct k_itimer *timer, int flags, | |||
743 | arm_timer(timer); | 718 | arm_timer(timer); |
744 | } | 719 | } |
745 | 720 | ||
746 | spin_unlock(&p->sighand->siglock); | 721 | unlock_task_sighand(p, &flags); |
747 | read_unlock(&tasklist_lock); | ||
748 | |||
749 | /* | 722 | /* |
750 | * Install the new reload setting, and | 723 | * Install the new reload setting, and |
751 | * set up the signal and overrun bookkeeping. | 724 | * set up the signal and overrun bookkeeping. |
@@ -787,7 +760,8 @@ static void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec *itp) | |||
787 | { | 760 | { |
788 | unsigned long long now; | 761 | unsigned long long now; |
789 | struct task_struct *p = timer->it.cpu.task; | 762 | struct task_struct *p = timer->it.cpu.task; |
790 | int clear_dead; | 763 | |
764 | WARN_ON_ONCE(p == NULL); | ||
791 | 765 | ||
792 | /* | 766 | /* |
793 | * Easy part: convert the reload time. | 767 | * Easy part: convert the reload time. |
@@ -800,52 +774,34 @@ static void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec *itp) | |||
800 | return; | 774 | return; |
801 | } | 775 | } |
802 | 776 | ||
803 | if (unlikely(p == NULL)) { | ||
804 | /* | ||
805 | * This task already died and the timer will never fire. | ||
806 | * In this case, expires is actually the dead value. | ||
807 | */ | ||
808 | dead: | ||
809 | sample_to_timespec(timer->it_clock, timer->it.cpu.expires, | ||
810 | &itp->it_value); | ||
811 | return; | ||
812 | } | ||
813 | |||
814 | /* | 777 | /* |
815 | * Sample the clock to take the difference with the expiry time. | 778 | * Sample the clock to take the difference with the expiry time. |
816 | */ | 779 | */ |
817 | if (CPUCLOCK_PERTHREAD(timer->it_clock)) { | 780 | if (CPUCLOCK_PERTHREAD(timer->it_clock)) { |
818 | cpu_clock_sample(timer->it_clock, p, &now); | 781 | cpu_clock_sample(timer->it_clock, p, &now); |
819 | clear_dead = p->exit_state; | ||
820 | } else { | 782 | } else { |
821 | read_lock(&tasklist_lock); | 783 | struct sighand_struct *sighand; |
822 | if (unlikely(p->sighand == NULL)) { | 784 | unsigned long flags; |
785 | |||
786 | /* | ||
787 | * Protect against sighand release/switch in exit/exec and | ||
788 | * also make timer sampling safe if it ends up calling | ||
789 | * thread_group_cputime(). | ||
790 | */ | ||
791 | sighand = lock_task_sighand(p, &flags); | ||
792 | if (unlikely(sighand == NULL)) { | ||
823 | /* | 793 | /* |
824 | * The process has been reaped. | 794 | * The process has been reaped. |
825 | * We can't even collect a sample any more. | 795 | * We can't even collect a sample any more. |
826 | * Call the timer disarmed, nothing else to do. | 796 | * Call the timer disarmed, nothing else to do. |
827 | */ | 797 | */ |
828 | put_task_struct(p); | ||
829 | timer->it.cpu.task = NULL; | ||
830 | timer->it.cpu.expires = 0; | 798 | timer->it.cpu.expires = 0; |
831 | read_unlock(&tasklist_lock); | 799 | sample_to_timespec(timer->it_clock, timer->it.cpu.expires, |
832 | goto dead; | 800 | &itp->it_value); |
833 | } else { | 801 | } else { |
834 | cpu_timer_sample_group(timer->it_clock, p, &now); | 802 | cpu_timer_sample_group(timer->it_clock, p, &now); |
835 | clear_dead = (unlikely(p->exit_state) && | 803 | unlock_task_sighand(p, &flags); |
836 | thread_group_empty(p)); | ||
837 | } | 804 | } |
838 | read_unlock(&tasklist_lock); | ||
839 | } | ||
840 | |||
841 | if (unlikely(clear_dead)) { | ||
842 | /* | ||
843 | * We've noticed that the thread is dead, but | ||
844 | * not yet reaped. Take this opportunity to | ||
845 | * drop our task ref. | ||
846 | */ | ||
847 | clear_dead_task(timer, now); | ||
848 | goto dead; | ||
849 | } | 805 | } |
850 | 806 | ||
851 | if (now < timer->it.cpu.expires) { | 807 | if (now < timer->it.cpu.expires) { |
@@ -1059,14 +1015,12 @@ static void check_process_timers(struct task_struct *tsk, | |||
1059 | */ | 1015 | */ |
1060 | void posix_cpu_timer_schedule(struct k_itimer *timer) | 1016 | void posix_cpu_timer_schedule(struct k_itimer *timer) |
1061 | { | 1017 | { |
1018 | struct sighand_struct *sighand; | ||
1019 | unsigned long flags; | ||
1062 | struct task_struct *p = timer->it.cpu.task; | 1020 | struct task_struct *p = timer->it.cpu.task; |
1063 | unsigned long long now; | 1021 | unsigned long long now; |
1064 | 1022 | ||
1065 | if (unlikely(p == NULL)) | 1023 | WARN_ON_ONCE(p == NULL); |
1066 | /* | ||
1067 | * The task was cleaned up already, no future firings. | ||
1068 | */ | ||
1069 | goto out; | ||
1070 | 1024 | ||
1071 | /* | 1025 | /* |
1072 | * Fetch the current sample and update the timer's expiry time. | 1026 | * Fetch the current sample and update the timer's expiry time. |
@@ -1074,49 +1028,45 @@ void posix_cpu_timer_schedule(struct k_itimer *timer) | |||
1074 | if (CPUCLOCK_PERTHREAD(timer->it_clock)) { | 1028 | if (CPUCLOCK_PERTHREAD(timer->it_clock)) { |
1075 | cpu_clock_sample(timer->it_clock, p, &now); | 1029 | cpu_clock_sample(timer->it_clock, p, &now); |
1076 | bump_cpu_timer(timer, now); | 1030 | bump_cpu_timer(timer, now); |
1077 | if (unlikely(p->exit_state)) { | 1031 | if (unlikely(p->exit_state)) |
1078 | clear_dead_task(timer, now); | 1032 | goto out; |
1033 | |||
1034 | /* Protect timer list r/w in arm_timer() */ | ||
1035 | sighand = lock_task_sighand(p, &flags); | ||
1036 | if (!sighand) | ||
1079 | goto out; | 1037 | goto out; |
1080 | } | ||
1081 | read_lock(&tasklist_lock); /* arm_timer needs it. */ | ||
1082 | spin_lock(&p->sighand->siglock); | ||
1083 | } else { | 1038 | } else { |
1084 | read_lock(&tasklist_lock); | 1039 | /* |
1085 | if (unlikely(p->sighand == NULL)) { | 1040 | * Protect arm_timer() and timer sampling in case of call to |
1041 | * thread_group_cputime(). | ||
1042 | */ | ||
1043 | sighand = lock_task_sighand(p, &flags); | ||
1044 | if (unlikely(sighand == NULL)) { | ||
1086 | /* | 1045 | /* |
1087 | * The process has been reaped. | 1046 | * The process has been reaped. |
1088 | * We can't even collect a sample any more. | 1047 | * We can't even collect a sample any more. |
1089 | */ | 1048 | */ |
1090 | put_task_struct(p); | ||
1091 | timer->it.cpu.task = p = NULL; | ||
1092 | timer->it.cpu.expires = 0; | 1049 | timer->it.cpu.expires = 0; |
1093 | goto out_unlock; | 1050 | goto out; |
1094 | } else if (unlikely(p->exit_state) && thread_group_empty(p)) { | 1051 | } else if (unlikely(p->exit_state) && thread_group_empty(p)) { |
1095 | /* | 1052 | unlock_task_sighand(p, &flags); |
1096 | * We've noticed that the thread is dead, but | 1053 | /* Optimizations: if the process is dying, no need to rearm */ |
1097 | * not yet reaped. Take this opportunity to | 1054 | goto out; |
1098 | * drop our task ref. | ||
1099 | */ | ||
1100 | cpu_timer_sample_group(timer->it_clock, p, &now); | ||
1101 | clear_dead_task(timer, now); | ||
1102 | goto out_unlock; | ||
1103 | } | 1055 | } |
1104 | spin_lock(&p->sighand->siglock); | ||
1105 | cpu_timer_sample_group(timer->it_clock, p, &now); | 1056 | cpu_timer_sample_group(timer->it_clock, p, &now); |
1106 | bump_cpu_timer(timer, now); | 1057 | bump_cpu_timer(timer, now); |
1107 | /* Leave the tasklist_lock locked for the call below. */ | 1058 | /* Leave the sighand locked for the call below. */ |
1108 | } | 1059 | } |
1109 | 1060 | ||
1110 | /* | 1061 | /* |
1111 | * Now re-arm for the new expiry time. | 1062 | * Now re-arm for the new expiry time. |
1112 | */ | 1063 | */ |
1113 | BUG_ON(!irqs_disabled()); | 1064 | WARN_ON_ONCE(!irqs_disabled()); |
1114 | arm_timer(timer); | 1065 | arm_timer(timer); |
1115 | spin_unlock(&p->sighand->siglock); | 1066 | unlock_task_sighand(p, &flags); |
1116 | |||
1117 | out_unlock: | ||
1118 | read_unlock(&tasklist_lock); | ||
1119 | 1067 | ||
1068 | /* Kick full dynticks CPUs in case they need to tick on the new timer */ | ||
1069 | posix_cpu_timer_kick_nohz(); | ||
1120 | out: | 1070 | out: |
1121 | timer->it_overrun_last = timer->it_overrun; | 1071 | timer->it_overrun_last = timer->it_overrun; |
1122 | timer->it_overrun = -1; | 1072 | timer->it_overrun = -1; |
@@ -1200,7 +1150,7 @@ void run_posix_cpu_timers(struct task_struct *tsk) | |||
1200 | struct k_itimer *timer, *next; | 1150 | struct k_itimer *timer, *next; |
1201 | unsigned long flags; | 1151 | unsigned long flags; |
1202 | 1152 | ||
1203 | BUG_ON(!irqs_disabled()); | 1153 | WARN_ON_ONCE(!irqs_disabled()); |
1204 | 1154 | ||
1205 | /* | 1155 | /* |
1206 | * The fast path checks that there are no expired thread or thread | 1156 | * The fast path checks that there are no expired thread or thread |
@@ -1256,13 +1206,6 @@ void run_posix_cpu_timers(struct task_struct *tsk) | |||
1256 | cpu_timer_fire(timer); | 1206 | cpu_timer_fire(timer); |
1257 | spin_unlock(&timer->it_lock); | 1207 | spin_unlock(&timer->it_lock); |
1258 | } | 1208 | } |
1259 | |||
1260 | /* | ||
1261 | * In case some timers were rescheduled after the queue got emptied, | ||
1262 | * wake up full dynticks CPUs. | ||
1263 | */ | ||
1264 | if (tsk->signal->cputimer.running) | ||
1265 | posix_cpu_timer_kick_nohz(); | ||
1266 | } | 1209 | } |
1267 | 1210 | ||
1268 | /* | 1211 | /* |
@@ -1274,7 +1217,7 @@ void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx, | |||
1274 | { | 1217 | { |
1275 | unsigned long long now; | 1218 | unsigned long long now; |
1276 | 1219 | ||
1277 | BUG_ON(clock_idx == CPUCLOCK_SCHED); | 1220 | WARN_ON_ONCE(clock_idx == CPUCLOCK_SCHED); |
1278 | cpu_timer_sample_group(clock_idx, tsk, &now); | 1221 | cpu_timer_sample_group(clock_idx, tsk, &now); |
1279 | 1222 | ||
1280 | if (oldval) { | 1223 | if (oldval) { |
diff --git a/kernel/softirq.c b/kernel/softirq.c index 8b93b3770f85..8a1e6e104892 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c | |||
@@ -319,8 +319,6 @@ asmlinkage void do_softirq(void) | |||
319 | */ | 319 | */ |
320 | void irq_enter(void) | 320 | void irq_enter(void) |
321 | { | 321 | { |
322 | int cpu = smp_processor_id(); | ||
323 | |||
324 | rcu_irq_enter(); | 322 | rcu_irq_enter(); |
325 | if (is_idle_task(current) && !in_interrupt()) { | 323 | if (is_idle_task(current) && !in_interrupt()) { |
326 | /* | 324 | /* |
@@ -328,7 +326,7 @@ void irq_enter(void) | |||
328 | * here, as softirq will be serviced on return from interrupt. | 326 | * here, as softirq will be serviced on return from interrupt. |
329 | */ | 327 | */ |
330 | local_bh_disable(); | 328 | local_bh_disable(); |
331 | tick_check_idle(cpu); | 329 | tick_check_idle(); |
332 | _local_bh_enable(); | 330 | _local_bh_enable(); |
333 | } | 331 | } |
334 | 332 | ||
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c index 9532690daaa9..43780ab5e279 100644 --- a/kernel/time/tick-broadcast.c +++ b/kernel/time/tick-broadcast.c | |||
@@ -538,10 +538,10 @@ int tick_resume_broadcast_oneshot(struct clock_event_device *bc) | |||
538 | * Called from irq_enter() when idle was interrupted to reenable the | 538 | * Called from irq_enter() when idle was interrupted to reenable the |
539 | * per cpu device. | 539 | * per cpu device. |
540 | */ | 540 | */ |
541 | void tick_check_oneshot_broadcast(int cpu) | 541 | void tick_check_oneshot_broadcast_this_cpu(void) |
542 | { | 542 | { |
543 | if (cpumask_test_cpu(cpu, tick_broadcast_oneshot_mask)) { | 543 | if (cpumask_test_cpu(smp_processor_id(), tick_broadcast_oneshot_mask)) { |
544 | struct tick_device *td = &per_cpu(tick_cpu_device, cpu); | 544 | struct tick_device *td = &__get_cpu_var(tick_cpu_device); |
545 | 545 | ||
546 | /* | 546 | /* |
547 | * We might be in the middle of switching over from | 547 | * We might be in the middle of switching over from |
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c index 162b03ab0ad2..20b2fe37d105 100644 --- a/kernel/time/tick-common.c +++ b/kernel/time/tick-common.c | |||
@@ -85,6 +85,7 @@ static void tick_periodic(int cpu) | |||
85 | 85 | ||
86 | do_timer(1); | 86 | do_timer(1); |
87 | write_sequnlock(&jiffies_lock); | 87 | write_sequnlock(&jiffies_lock); |
88 | update_wall_time(); | ||
88 | } | 89 | } |
89 | 90 | ||
90 | update_process_times(user_mode(get_irq_regs())); | 91 | update_process_times(user_mode(get_irq_regs())); |
diff --git a/kernel/time/tick-internal.h b/kernel/time/tick-internal.h index 18e71f7fbc2a..8329669b51ec 100644 --- a/kernel/time/tick-internal.h +++ b/kernel/time/tick-internal.h | |||
@@ -51,7 +51,7 @@ extern void tick_broadcast_switch_to_oneshot(void); | |||
51 | extern void tick_shutdown_broadcast_oneshot(unsigned int *cpup); | 51 | extern void tick_shutdown_broadcast_oneshot(unsigned int *cpup); |
52 | extern int tick_resume_broadcast_oneshot(struct clock_event_device *bc); | 52 | extern int tick_resume_broadcast_oneshot(struct clock_event_device *bc); |
53 | extern int tick_broadcast_oneshot_active(void); | 53 | extern int tick_broadcast_oneshot_active(void); |
54 | extern void tick_check_oneshot_broadcast(int cpu); | 54 | extern void tick_check_oneshot_broadcast_this_cpu(void); |
55 | bool tick_broadcast_oneshot_available(void); | 55 | bool tick_broadcast_oneshot_available(void); |
56 | # else /* BROADCAST */ | 56 | # else /* BROADCAST */ |
57 | static inline void tick_broadcast_setup_oneshot(struct clock_event_device *bc) | 57 | static inline void tick_broadcast_setup_oneshot(struct clock_event_device *bc) |
@@ -62,7 +62,7 @@ static inline void tick_broadcast_oneshot_control(unsigned long reason) { } | |||
62 | static inline void tick_broadcast_switch_to_oneshot(void) { } | 62 | static inline void tick_broadcast_switch_to_oneshot(void) { } |
63 | static inline void tick_shutdown_broadcast_oneshot(unsigned int *cpup) { } | 63 | static inline void tick_shutdown_broadcast_oneshot(unsigned int *cpup) { } |
64 | static inline int tick_broadcast_oneshot_active(void) { return 0; } | 64 | static inline int tick_broadcast_oneshot_active(void) { return 0; } |
65 | static inline void tick_check_oneshot_broadcast(int cpu) { } | 65 | static inline void tick_check_oneshot_broadcast_this_cpu(void) { } |
66 | static inline bool tick_broadcast_oneshot_available(void) { return true; } | 66 | static inline bool tick_broadcast_oneshot_available(void) { return true; } |
67 | # endif /* !BROADCAST */ | 67 | # endif /* !BROADCAST */ |
68 | 68 | ||
@@ -155,3 +155,4 @@ static inline int tick_device_is_functional(struct clock_event_device *dev) | |||
155 | #endif | 155 | #endif |
156 | 156 | ||
157 | extern void do_timer(unsigned long ticks); | 157 | extern void do_timer(unsigned long ticks); |
158 | extern void update_wall_time(void); | ||
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index c833249ab0fb..08cb0c3b8ccb 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c | |||
@@ -86,6 +86,7 @@ static void tick_do_update_jiffies64(ktime_t now) | |||
86 | tick_next_period = ktime_add(last_jiffies_update, tick_period); | 86 | tick_next_period = ktime_add(last_jiffies_update, tick_period); |
87 | } | 87 | } |
88 | write_sequnlock(&jiffies_lock); | 88 | write_sequnlock(&jiffies_lock); |
89 | update_wall_time(); | ||
89 | } | 90 | } |
90 | 91 | ||
91 | /* | 92 | /* |
@@ -391,11 +392,9 @@ __setup("nohz=", setup_tick_nohz); | |||
391 | */ | 392 | */ |
392 | static void tick_nohz_update_jiffies(ktime_t now) | 393 | static void tick_nohz_update_jiffies(ktime_t now) |
393 | { | 394 | { |
394 | int cpu = smp_processor_id(); | ||
395 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); | ||
396 | unsigned long flags; | 395 | unsigned long flags; |
397 | 396 | ||
398 | ts->idle_waketime = now; | 397 | __this_cpu_write(tick_cpu_sched.idle_waketime, now); |
399 | 398 | ||
400 | local_irq_save(flags); | 399 | local_irq_save(flags); |
401 | tick_do_update_jiffies64(now); | 400 | tick_do_update_jiffies64(now); |
@@ -426,17 +425,15 @@ update_ts_time_stats(int cpu, struct tick_sched *ts, ktime_t now, u64 *last_upda | |||
426 | 425 | ||
427 | } | 426 | } |
428 | 427 | ||
429 | static void tick_nohz_stop_idle(int cpu, ktime_t now) | 428 | static void tick_nohz_stop_idle(struct tick_sched *ts, ktime_t now) |
430 | { | 429 | { |
431 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); | 430 | update_ts_time_stats(smp_processor_id(), ts, now, NULL); |
432 | |||
433 | update_ts_time_stats(cpu, ts, now, NULL); | ||
434 | ts->idle_active = 0; | 431 | ts->idle_active = 0; |
435 | 432 | ||
436 | sched_clock_idle_wakeup_event(0); | 433 | sched_clock_idle_wakeup_event(0); |
437 | } | 434 | } |
438 | 435 | ||
439 | static ktime_t tick_nohz_start_idle(int cpu, struct tick_sched *ts) | 436 | static ktime_t tick_nohz_start_idle(struct tick_sched *ts) |
440 | { | 437 | { |
441 | ktime_t now = ktime_get(); | 438 | ktime_t now = ktime_get(); |
442 | 439 | ||
@@ -754,7 +751,7 @@ static void __tick_nohz_idle_enter(struct tick_sched *ts) | |||
754 | ktime_t now, expires; | 751 | ktime_t now, expires; |
755 | int cpu = smp_processor_id(); | 752 | int cpu = smp_processor_id(); |
756 | 753 | ||
757 | now = tick_nohz_start_idle(cpu, ts); | 754 | now = tick_nohz_start_idle(ts); |
758 | 755 | ||
759 | if (can_stop_idle_tick(cpu, ts)) { | 756 | if (can_stop_idle_tick(cpu, ts)) { |
760 | int was_stopped = ts->tick_stopped; | 757 | int was_stopped = ts->tick_stopped; |
@@ -911,8 +908,7 @@ static void tick_nohz_account_idle_ticks(struct tick_sched *ts) | |||
911 | */ | 908 | */ |
912 | void tick_nohz_idle_exit(void) | 909 | void tick_nohz_idle_exit(void) |
913 | { | 910 | { |
914 | int cpu = smp_processor_id(); | 911 | struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); |
915 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); | ||
916 | ktime_t now; | 912 | ktime_t now; |
917 | 913 | ||
918 | local_irq_disable(); | 914 | local_irq_disable(); |
@@ -925,7 +921,7 @@ void tick_nohz_idle_exit(void) | |||
925 | now = ktime_get(); | 921 | now = ktime_get(); |
926 | 922 | ||
927 | if (ts->idle_active) | 923 | if (ts->idle_active) |
928 | tick_nohz_stop_idle(cpu, now); | 924 | tick_nohz_stop_idle(ts, now); |
929 | 925 | ||
930 | if (ts->tick_stopped) { | 926 | if (ts->tick_stopped) { |
931 | tick_nohz_restart_sched_tick(ts, now); | 927 | tick_nohz_restart_sched_tick(ts, now); |
@@ -1009,12 +1005,10 @@ static void tick_nohz_switch_to_nohz(void) | |||
1009 | * timer and do not touch the other magic bits which need to be done | 1005 | * timer and do not touch the other magic bits which need to be done |
1010 | * when idle is left. | 1006 | * when idle is left. |
1011 | */ | 1007 | */ |
1012 | static void tick_nohz_kick_tick(int cpu, ktime_t now) | 1008 | static void tick_nohz_kick_tick(struct tick_sched *ts, ktime_t now) |
1013 | { | 1009 | { |
1014 | #if 0 | 1010 | #if 0 |
1015 | /* Switch back to 2.6.27 behaviour */ | 1011 | /* Switch back to 2.6.27 behaviour */ |
1016 | |||
1017 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); | ||
1018 | ktime_t delta; | 1012 | ktime_t delta; |
1019 | 1013 | ||
1020 | /* | 1014 | /* |
@@ -1029,36 +1023,36 @@ static void tick_nohz_kick_tick(int cpu, ktime_t now) | |||
1029 | #endif | 1023 | #endif |
1030 | } | 1024 | } |
1031 | 1025 | ||
1032 | static inline void tick_check_nohz(int cpu) | 1026 | static inline void tick_check_nohz_this_cpu(void) |
1033 | { | 1027 | { |
1034 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); | 1028 | struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); |
1035 | ktime_t now; | 1029 | ktime_t now; |
1036 | 1030 | ||
1037 | if (!ts->idle_active && !ts->tick_stopped) | 1031 | if (!ts->idle_active && !ts->tick_stopped) |
1038 | return; | 1032 | return; |
1039 | now = ktime_get(); | 1033 | now = ktime_get(); |
1040 | if (ts->idle_active) | 1034 | if (ts->idle_active) |
1041 | tick_nohz_stop_idle(cpu, now); | 1035 | tick_nohz_stop_idle(ts, now); |
1042 | if (ts->tick_stopped) { | 1036 | if (ts->tick_stopped) { |
1043 | tick_nohz_update_jiffies(now); | 1037 | tick_nohz_update_jiffies(now); |
1044 | tick_nohz_kick_tick(cpu, now); | 1038 | tick_nohz_kick_tick(ts, now); |
1045 | } | 1039 | } |
1046 | } | 1040 | } |
1047 | 1041 | ||
1048 | #else | 1042 | #else |
1049 | 1043 | ||
1050 | static inline void tick_nohz_switch_to_nohz(void) { } | 1044 | static inline void tick_nohz_switch_to_nohz(void) { } |
1051 | static inline void tick_check_nohz(int cpu) { } | 1045 | static inline void tick_check_nohz_this_cpu(void) { } |
1052 | 1046 | ||
1053 | #endif /* CONFIG_NO_HZ_COMMON */ | 1047 | #endif /* CONFIG_NO_HZ_COMMON */ |
1054 | 1048 | ||
1055 | /* | 1049 | /* |
1056 | * Called from irq_enter to notify about the possible interruption of idle() | 1050 | * Called from irq_enter to notify about the possible interruption of idle() |
1057 | */ | 1051 | */ |
1058 | void tick_check_idle(int cpu) | 1052 | void tick_check_idle(void) |
1059 | { | 1053 | { |
1060 | tick_check_oneshot_broadcast(cpu); | 1054 | tick_check_oneshot_broadcast_this_cpu(); |
1061 | tick_check_nohz(cpu); | 1055 | tick_check_nohz_this_cpu(); |
1062 | } | 1056 | } |
1063 | 1057 | ||
1064 | /* | 1058 | /* |
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index 87b4f00284c9..0aa4ce81bc16 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c | |||
@@ -77,7 +77,7 @@ static void tk_set_wall_to_mono(struct timekeeper *tk, struct timespec wtm) | |||
77 | tk->wall_to_monotonic = wtm; | 77 | tk->wall_to_monotonic = wtm; |
78 | set_normalized_timespec(&tmp, -wtm.tv_sec, -wtm.tv_nsec); | 78 | set_normalized_timespec(&tmp, -wtm.tv_sec, -wtm.tv_nsec); |
79 | tk->offs_real = timespec_to_ktime(tmp); | 79 | tk->offs_real = timespec_to_ktime(tmp); |
80 | tk->offs_tai = ktime_sub(tk->offs_real, ktime_set(tk->tai_offset, 0)); | 80 | tk->offs_tai = ktime_add(tk->offs_real, ktime_set(tk->tai_offset, 0)); |
81 | } | 81 | } |
82 | 82 | ||
83 | static void tk_set_sleep_time(struct timekeeper *tk, struct timespec t) | 83 | static void tk_set_sleep_time(struct timekeeper *tk, struct timespec t) |
@@ -90,8 +90,9 @@ static void tk_set_sleep_time(struct timekeeper *tk, struct timespec t) | |||
90 | } | 90 | } |
91 | 91 | ||
92 | /** | 92 | /** |
93 | * timekeeper_setup_internals - Set up internals to use clocksource clock. | 93 | * tk_setup_internals - Set up internals to use clocksource clock. |
94 | * | 94 | * |
95 | * @tk: The target timekeeper to setup. | ||
95 | * @clock: Pointer to clocksource. | 96 | * @clock: Pointer to clocksource. |
96 | * | 97 | * |
97 | * Calculates a fixed cycle/nsec interval for a given clocksource/adjustment | 98 | * Calculates a fixed cycle/nsec interval for a given clocksource/adjustment |
@@ -595,7 +596,7 @@ s32 timekeeping_get_tai_offset(void) | |||
595 | static void __timekeeping_set_tai_offset(struct timekeeper *tk, s32 tai_offset) | 596 | static void __timekeeping_set_tai_offset(struct timekeeper *tk, s32 tai_offset) |
596 | { | 597 | { |
597 | tk->tai_offset = tai_offset; | 598 | tk->tai_offset = tai_offset; |
598 | tk->offs_tai = ktime_sub(tk->offs_real, ktime_set(tai_offset, 0)); | 599 | tk->offs_tai = ktime_add(tk->offs_real, ktime_set(tai_offset, 0)); |
599 | } | 600 | } |
600 | 601 | ||
601 | /** | 602 | /** |
@@ -610,6 +611,7 @@ void timekeeping_set_tai_offset(s32 tai_offset) | |||
610 | raw_spin_lock_irqsave(&timekeeper_lock, flags); | 611 | raw_spin_lock_irqsave(&timekeeper_lock, flags); |
611 | write_seqcount_begin(&timekeeper_seq); | 612 | write_seqcount_begin(&timekeeper_seq); |
612 | __timekeeping_set_tai_offset(tk, tai_offset); | 613 | __timekeeping_set_tai_offset(tk, tai_offset); |
614 | timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET); | ||
613 | write_seqcount_end(&timekeeper_seq); | 615 | write_seqcount_end(&timekeeper_seq); |
614 | raw_spin_unlock_irqrestore(&timekeeper_lock, flags); | 616 | raw_spin_unlock_irqrestore(&timekeeper_lock, flags); |
615 | clock_was_set(); | 617 | clock_was_set(); |
@@ -1023,6 +1025,8 @@ static int timekeeping_suspend(void) | |||
1023 | timekeeping_suspend_time = | 1025 | timekeeping_suspend_time = |
1024 | timespec_add(timekeeping_suspend_time, delta_delta); | 1026 | timespec_add(timekeeping_suspend_time, delta_delta); |
1025 | } | 1027 | } |
1028 | |||
1029 | timekeeping_update(tk, TK_MIRROR); | ||
1026 | write_seqcount_end(&timekeeper_seq); | 1030 | write_seqcount_end(&timekeeper_seq); |
1027 | raw_spin_unlock_irqrestore(&timekeeper_lock, flags); | 1031 | raw_spin_unlock_irqrestore(&timekeeper_lock, flags); |
1028 | 1032 | ||
@@ -1130,16 +1134,6 @@ static void timekeeping_adjust(struct timekeeper *tk, s64 offset) | |||
1130 | * we can adjust by 1. | 1134 | * we can adjust by 1. |
1131 | */ | 1135 | */ |
1132 | error >>= 2; | 1136 | error >>= 2; |
1133 | /* | ||
1134 | * XXX - In update_wall_time, we round up to the next | ||
1135 | * nanosecond, and store the amount rounded up into | ||
1136 | * the error. This causes the likely below to be unlikely. | ||
1137 | * | ||
1138 | * The proper fix is to avoid rounding up by using | ||
1139 | * the high precision tk->xtime_nsec instead of | ||
1140 | * xtime.tv_nsec everywhere. Fixing this will take some | ||
1141 | * time. | ||
1142 | */ | ||
1143 | if (likely(error <= interval)) | 1137 | if (likely(error <= interval)) |
1144 | adj = 1; | 1138 | adj = 1; |
1145 | else | 1139 | else |
@@ -1255,7 +1249,7 @@ out_adjust: | |||
1255 | static inline unsigned int accumulate_nsecs_to_secs(struct timekeeper *tk) | 1249 | static inline unsigned int accumulate_nsecs_to_secs(struct timekeeper *tk) |
1256 | { | 1250 | { |
1257 | u64 nsecps = (u64)NSEC_PER_SEC << tk->shift; | 1251 | u64 nsecps = (u64)NSEC_PER_SEC << tk->shift; |
1258 | unsigned int action = 0; | 1252 | unsigned int clock_set = 0; |
1259 | 1253 | ||
1260 | while (tk->xtime_nsec >= nsecps) { | 1254 | while (tk->xtime_nsec >= nsecps) { |
1261 | int leap; | 1255 | int leap; |
@@ -1277,11 +1271,10 @@ static inline unsigned int accumulate_nsecs_to_secs(struct timekeeper *tk) | |||
1277 | 1271 | ||
1278 | __timekeeping_set_tai_offset(tk, tk->tai_offset - leap); | 1272 | __timekeeping_set_tai_offset(tk, tk->tai_offset - leap); |
1279 | 1273 | ||
1280 | clock_was_set_delayed(); | 1274 | clock_set = TK_CLOCK_WAS_SET; |
1281 | action = TK_CLOCK_WAS_SET; | ||
1282 | } | 1275 | } |
1283 | } | 1276 | } |
1284 | return action; | 1277 | return clock_set; |
1285 | } | 1278 | } |
1286 | 1279 | ||
1287 | /** | 1280 | /** |
@@ -1294,7 +1287,8 @@ static inline unsigned int accumulate_nsecs_to_secs(struct timekeeper *tk) | |||
1294 | * Returns the unconsumed cycles. | 1287 | * Returns the unconsumed cycles. |
1295 | */ | 1288 | */ |
1296 | static cycle_t logarithmic_accumulation(struct timekeeper *tk, cycle_t offset, | 1289 | static cycle_t logarithmic_accumulation(struct timekeeper *tk, cycle_t offset, |
1297 | u32 shift) | 1290 | u32 shift, |
1291 | unsigned int *clock_set) | ||
1298 | { | 1292 | { |
1299 | cycle_t interval = tk->cycle_interval << shift; | 1293 | cycle_t interval = tk->cycle_interval << shift; |
1300 | u64 raw_nsecs; | 1294 | u64 raw_nsecs; |
@@ -1308,7 +1302,7 @@ static cycle_t logarithmic_accumulation(struct timekeeper *tk, cycle_t offset, | |||
1308 | tk->cycle_last += interval; | 1302 | tk->cycle_last += interval; |
1309 | 1303 | ||
1310 | tk->xtime_nsec += tk->xtime_interval << shift; | 1304 | tk->xtime_nsec += tk->xtime_interval << shift; |
1311 | accumulate_nsecs_to_secs(tk); | 1305 | *clock_set |= accumulate_nsecs_to_secs(tk); |
1312 | 1306 | ||
1313 | /* Accumulate raw time */ | 1307 | /* Accumulate raw time */ |
1314 | raw_nsecs = (u64)tk->raw_interval << shift; | 1308 | raw_nsecs = (u64)tk->raw_interval << shift; |
@@ -1359,14 +1353,14 @@ static inline void old_vsyscall_fixup(struct timekeeper *tk) | |||
1359 | * update_wall_time - Uses the current clocksource to increment the wall time | 1353 | * update_wall_time - Uses the current clocksource to increment the wall time |
1360 | * | 1354 | * |
1361 | */ | 1355 | */ |
1362 | static void update_wall_time(void) | 1356 | void update_wall_time(void) |
1363 | { | 1357 | { |
1364 | struct clocksource *clock; | 1358 | struct clocksource *clock; |
1365 | struct timekeeper *real_tk = &timekeeper; | 1359 | struct timekeeper *real_tk = &timekeeper; |
1366 | struct timekeeper *tk = &shadow_timekeeper; | 1360 | struct timekeeper *tk = &shadow_timekeeper; |
1367 | cycle_t offset; | 1361 | cycle_t offset; |
1368 | int shift = 0, maxshift; | 1362 | int shift = 0, maxshift; |
1369 | unsigned int action; | 1363 | unsigned int clock_set = 0; |
1370 | unsigned long flags; | 1364 | unsigned long flags; |
1371 | 1365 | ||
1372 | raw_spin_lock_irqsave(&timekeeper_lock, flags); | 1366 | raw_spin_lock_irqsave(&timekeeper_lock, flags); |
@@ -1401,7 +1395,8 @@ static void update_wall_time(void) | |||
1401 | maxshift = (64 - (ilog2(ntp_tick_length())+1)) - 1; | 1395 | maxshift = (64 - (ilog2(ntp_tick_length())+1)) - 1; |
1402 | shift = min(shift, maxshift); | 1396 | shift = min(shift, maxshift); |
1403 | while (offset >= tk->cycle_interval) { | 1397 | while (offset >= tk->cycle_interval) { |
1404 | offset = logarithmic_accumulation(tk, offset, shift); | 1398 | offset = logarithmic_accumulation(tk, offset, shift, |
1399 | &clock_set); | ||
1405 | if (offset < tk->cycle_interval<<shift) | 1400 | if (offset < tk->cycle_interval<<shift) |
1406 | shift--; | 1401 | shift--; |
1407 | } | 1402 | } |
@@ -1419,7 +1414,7 @@ static void update_wall_time(void) | |||
1419 | * Finally, make sure that after the rounding | 1414 | * Finally, make sure that after the rounding |
1420 | * xtime_nsec isn't larger than NSEC_PER_SEC | 1415 | * xtime_nsec isn't larger than NSEC_PER_SEC |
1421 | */ | 1416 | */ |
1422 | action = accumulate_nsecs_to_secs(tk); | 1417 | clock_set |= accumulate_nsecs_to_secs(tk); |
1423 | 1418 | ||
1424 | write_seqcount_begin(&timekeeper_seq); | 1419 | write_seqcount_begin(&timekeeper_seq); |
1425 | /* Update clock->cycle_last with the new value */ | 1420 | /* Update clock->cycle_last with the new value */ |
@@ -1435,10 +1430,12 @@ static void update_wall_time(void) | |||
1435 | * updating. | 1430 | * updating. |
1436 | */ | 1431 | */ |
1437 | memcpy(real_tk, tk, sizeof(*tk)); | 1432 | memcpy(real_tk, tk, sizeof(*tk)); |
1438 | timekeeping_update(real_tk, action); | 1433 | timekeeping_update(real_tk, clock_set); |
1439 | write_seqcount_end(&timekeeper_seq); | 1434 | write_seqcount_end(&timekeeper_seq); |
1440 | out: | 1435 | out: |
1441 | raw_spin_unlock_irqrestore(&timekeeper_lock, flags); | 1436 | raw_spin_unlock_irqrestore(&timekeeper_lock, flags); |
1437 | if (clock_set) | ||
1438 | clock_was_set(); | ||
1442 | } | 1439 | } |
1443 | 1440 | ||
1444 | /** | 1441 | /** |
@@ -1583,7 +1580,6 @@ struct timespec get_monotonic_coarse(void) | |||
1583 | void do_timer(unsigned long ticks) | 1580 | void do_timer(unsigned long ticks) |
1584 | { | 1581 | { |
1585 | jiffies_64 += ticks; | 1582 | jiffies_64 += ticks; |
1586 | update_wall_time(); | ||
1587 | calc_global_load(ticks); | 1583 | calc_global_load(ticks); |
1588 | } | 1584 | } |
1589 | 1585 | ||
@@ -1698,12 +1694,14 @@ int do_adjtimex(struct timex *txc) | |||
1698 | 1694 | ||
1699 | if (tai != orig_tai) { | 1695 | if (tai != orig_tai) { |
1700 | __timekeeping_set_tai_offset(tk, tai); | 1696 | __timekeeping_set_tai_offset(tk, tai); |
1701 | update_pvclock_gtod(tk, true); | 1697 | timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET); |
1702 | clock_was_set_delayed(); | ||
1703 | } | 1698 | } |
1704 | write_seqcount_end(&timekeeper_seq); | 1699 | write_seqcount_end(&timekeeper_seq); |
1705 | raw_spin_unlock_irqrestore(&timekeeper_lock, flags); | 1700 | raw_spin_unlock_irqrestore(&timekeeper_lock, flags); |
1706 | 1701 | ||
1702 | if (tai != orig_tai) | ||
1703 | clock_was_set(); | ||
1704 | |||
1707 | ntp_notify_cmos_timer(); | 1705 | ntp_notify_cmos_timer(); |
1708 | 1706 | ||
1709 | return ret; | 1707 | return ret; |
@@ -1739,4 +1737,5 @@ void xtime_update(unsigned long ticks) | |||
1739 | write_seqlock(&jiffies_lock); | 1737 | write_seqlock(&jiffies_lock); |
1740 | do_timer(ticks); | 1738 | do_timer(ticks); |
1741 | write_sequnlock(&jiffies_lock); | 1739 | write_sequnlock(&jiffies_lock); |
1740 | update_wall_time(); | ||
1742 | } | 1741 | } |