diff options
Diffstat (limited to 'kernel/sched/cputime.c')
-rw-r--r-- | kernel/sched/cputime.c | 74 |
1 files changed, 28 insertions, 46 deletions
diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c index a7959e05a9d5..99947919e30b 100644 --- a/kernel/sched/cputime.c +++ b/kernel/sched/cputime.c | |||
@@ -121,7 +121,7 @@ static inline void task_group_account_field(struct task_struct *p, int index, | |||
121 | * is the only cgroup, then nothing else should be necessary. | 121 | * is the only cgroup, then nothing else should be necessary. |
122 | * | 122 | * |
123 | */ | 123 | */ |
124 | __get_cpu_var(kernel_cpustat).cpustat[index] += tmp; | 124 | __this_cpu_add(kernel_cpustat.cpustat[index], tmp); |
125 | 125 | ||
126 | cpuacct_account_field(p, index, tmp); | 126 | cpuacct_account_field(p, index, tmp); |
127 | } | 127 | } |
@@ -378,11 +378,8 @@ static inline void irqtime_account_process_tick(struct task_struct *p, int user_ | |||
378 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | 378 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING |
379 | 379 | ||
380 | #ifndef __ARCH_HAS_VTIME_TASK_SWITCH | 380 | #ifndef __ARCH_HAS_VTIME_TASK_SWITCH |
381 | void vtime_task_switch(struct task_struct *prev) | 381 | void vtime_common_task_switch(struct task_struct *prev) |
382 | { | 382 | { |
383 | if (!vtime_accounting_enabled()) | ||
384 | return; | ||
385 | |||
386 | if (is_idle_task(prev)) | 383 | if (is_idle_task(prev)) |
387 | vtime_account_idle(prev); | 384 | vtime_account_idle(prev); |
388 | else | 385 | else |
@@ -404,11 +401,8 @@ void vtime_task_switch(struct task_struct *prev) | |||
404 | * vtime_account(). | 401 | * vtime_account(). |
405 | */ | 402 | */ |
406 | #ifndef __ARCH_HAS_VTIME_ACCOUNT | 403 | #ifndef __ARCH_HAS_VTIME_ACCOUNT |
407 | void vtime_account_irq_enter(struct task_struct *tsk) | 404 | void vtime_common_account_irq_enter(struct task_struct *tsk) |
408 | { | 405 | { |
409 | if (!vtime_accounting_enabled()) | ||
410 | return; | ||
411 | |||
412 | if (!in_interrupt()) { | 406 | if (!in_interrupt()) { |
413 | /* | 407 | /* |
414 | * If we interrupted user, context_tracking_in_user() | 408 | * If we interrupted user, context_tracking_in_user() |
@@ -428,7 +422,7 @@ void vtime_account_irq_enter(struct task_struct *tsk) | |||
428 | } | 422 | } |
429 | vtime_account_system(tsk); | 423 | vtime_account_system(tsk); |
430 | } | 424 | } |
431 | EXPORT_SYMBOL_GPL(vtime_account_irq_enter); | 425 | EXPORT_SYMBOL_GPL(vtime_common_account_irq_enter); |
432 | #endif /* __ARCH_HAS_VTIME_ACCOUNT */ | 426 | #endif /* __ARCH_HAS_VTIME_ACCOUNT */ |
433 | #endif /* CONFIG_VIRT_CPU_ACCOUNTING */ | 427 | #endif /* CONFIG_VIRT_CPU_ACCOUNTING */ |
434 | 428 | ||
@@ -557,16 +551,7 @@ static void cputime_adjust(struct task_cputime *curr, | |||
557 | struct cputime *prev, | 551 | struct cputime *prev, |
558 | cputime_t *ut, cputime_t *st) | 552 | cputime_t *ut, cputime_t *st) |
559 | { | 553 | { |
560 | cputime_t rtime, stime, utime, total; | 554 | cputime_t rtime, stime, utime; |
561 | |||
562 | if (vtime_accounting_enabled()) { | ||
563 | *ut = curr->utime; | ||
564 | *st = curr->stime; | ||
565 | return; | ||
566 | } | ||
567 | |||
568 | stime = curr->stime; | ||
569 | total = stime + curr->utime; | ||
570 | 555 | ||
571 | /* | 556 | /* |
572 | * Tick based cputime accounting depend on random scheduling | 557 | * Tick based cputime accounting depend on random scheduling |
@@ -588,13 +573,19 @@ static void cputime_adjust(struct task_cputime *curr, | |||
588 | if (prev->stime + prev->utime >= rtime) | 573 | if (prev->stime + prev->utime >= rtime) |
589 | goto out; | 574 | goto out; |
590 | 575 | ||
591 | if (total) { | 576 | stime = curr->stime; |
577 | utime = curr->utime; | ||
578 | |||
579 | if (utime == 0) { | ||
580 | stime = rtime; | ||
581 | } else if (stime == 0) { | ||
582 | utime = rtime; | ||
583 | } else { | ||
584 | cputime_t total = stime + utime; | ||
585 | |||
592 | stime = scale_stime((__force u64)stime, | 586 | stime = scale_stime((__force u64)stime, |
593 | (__force u64)rtime, (__force u64)total); | 587 | (__force u64)rtime, (__force u64)total); |
594 | utime = rtime - stime; | 588 | utime = rtime - stime; |
595 | } else { | ||
596 | stime = rtime; | ||
597 | utime = 0; | ||
598 | } | 589 | } |
599 | 590 | ||
600 | /* | 591 | /* |
@@ -664,23 +655,17 @@ static void __vtime_account_system(struct task_struct *tsk) | |||
664 | 655 | ||
665 | void vtime_account_system(struct task_struct *tsk) | 656 | void vtime_account_system(struct task_struct *tsk) |
666 | { | 657 | { |
667 | if (!vtime_accounting_enabled()) | ||
668 | return; | ||
669 | |||
670 | write_seqlock(&tsk->vtime_seqlock); | 658 | write_seqlock(&tsk->vtime_seqlock); |
671 | __vtime_account_system(tsk); | 659 | __vtime_account_system(tsk); |
672 | write_sequnlock(&tsk->vtime_seqlock); | 660 | write_sequnlock(&tsk->vtime_seqlock); |
673 | } | 661 | } |
674 | 662 | ||
675 | void vtime_account_irq_exit(struct task_struct *tsk) | 663 | void vtime_gen_account_irq_exit(struct task_struct *tsk) |
676 | { | 664 | { |
677 | if (!vtime_accounting_enabled()) | ||
678 | return; | ||
679 | |||
680 | write_seqlock(&tsk->vtime_seqlock); | 665 | write_seqlock(&tsk->vtime_seqlock); |
666 | __vtime_account_system(tsk); | ||
681 | if (context_tracking_in_user()) | 667 | if (context_tracking_in_user()) |
682 | tsk->vtime_snap_whence = VTIME_USER; | 668 | tsk->vtime_snap_whence = VTIME_USER; |
683 | __vtime_account_system(tsk); | ||
684 | write_sequnlock(&tsk->vtime_seqlock); | 669 | write_sequnlock(&tsk->vtime_seqlock); |
685 | } | 670 | } |
686 | 671 | ||
@@ -688,12 +673,8 @@ void vtime_account_user(struct task_struct *tsk) | |||
688 | { | 673 | { |
689 | cputime_t delta_cpu; | 674 | cputime_t delta_cpu; |
690 | 675 | ||
691 | if (!vtime_accounting_enabled()) | ||
692 | return; | ||
693 | |||
694 | delta_cpu = get_vtime_delta(tsk); | ||
695 | |||
696 | write_seqlock(&tsk->vtime_seqlock); | 676 | write_seqlock(&tsk->vtime_seqlock); |
677 | delta_cpu = get_vtime_delta(tsk); | ||
697 | tsk->vtime_snap_whence = VTIME_SYS; | 678 | tsk->vtime_snap_whence = VTIME_SYS; |
698 | account_user_time(tsk, delta_cpu, cputime_to_scaled(delta_cpu)); | 679 | account_user_time(tsk, delta_cpu, cputime_to_scaled(delta_cpu)); |
699 | write_sequnlock(&tsk->vtime_seqlock); | 680 | write_sequnlock(&tsk->vtime_seqlock); |
@@ -701,22 +682,27 @@ void vtime_account_user(struct task_struct *tsk) | |||
701 | 682 | ||
702 | void vtime_user_enter(struct task_struct *tsk) | 683 | void vtime_user_enter(struct task_struct *tsk) |
703 | { | 684 | { |
704 | if (!vtime_accounting_enabled()) | ||
705 | return; | ||
706 | |||
707 | write_seqlock(&tsk->vtime_seqlock); | 685 | write_seqlock(&tsk->vtime_seqlock); |
708 | tsk->vtime_snap_whence = VTIME_USER; | ||
709 | __vtime_account_system(tsk); | 686 | __vtime_account_system(tsk); |
687 | tsk->vtime_snap_whence = VTIME_USER; | ||
710 | write_sequnlock(&tsk->vtime_seqlock); | 688 | write_sequnlock(&tsk->vtime_seqlock); |
711 | } | 689 | } |
712 | 690 | ||
713 | void vtime_guest_enter(struct task_struct *tsk) | 691 | void vtime_guest_enter(struct task_struct *tsk) |
714 | { | 692 | { |
693 | /* | ||
694 | * The flags must be updated under the lock with | ||
695 | * the vtime_snap flush and update. | ||
696 | * That enforces a right ordering and update sequence | ||
697 | * synchronization against the reader (task_gtime()) | ||
698 | * that can thus safely catch up with a tickless delta. | ||
699 | */ | ||
715 | write_seqlock(&tsk->vtime_seqlock); | 700 | write_seqlock(&tsk->vtime_seqlock); |
716 | __vtime_account_system(tsk); | 701 | __vtime_account_system(tsk); |
717 | current->flags |= PF_VCPU; | 702 | current->flags |= PF_VCPU; |
718 | write_sequnlock(&tsk->vtime_seqlock); | 703 | write_sequnlock(&tsk->vtime_seqlock); |
719 | } | 704 | } |
705 | EXPORT_SYMBOL_GPL(vtime_guest_enter); | ||
720 | 706 | ||
721 | void vtime_guest_exit(struct task_struct *tsk) | 707 | void vtime_guest_exit(struct task_struct *tsk) |
722 | { | 708 | { |
@@ -725,6 +711,7 @@ void vtime_guest_exit(struct task_struct *tsk) | |||
725 | current->flags &= ~PF_VCPU; | 711 | current->flags &= ~PF_VCPU; |
726 | write_sequnlock(&tsk->vtime_seqlock); | 712 | write_sequnlock(&tsk->vtime_seqlock); |
727 | } | 713 | } |
714 | EXPORT_SYMBOL_GPL(vtime_guest_exit); | ||
728 | 715 | ||
729 | void vtime_account_idle(struct task_struct *tsk) | 716 | void vtime_account_idle(struct task_struct *tsk) |
730 | { | 717 | { |
@@ -733,11 +720,6 @@ void vtime_account_idle(struct task_struct *tsk) | |||
733 | account_idle_time(delta_cpu); | 720 | account_idle_time(delta_cpu); |
734 | } | 721 | } |
735 | 722 | ||
736 | bool vtime_accounting_enabled(void) | ||
737 | { | ||
738 | return context_tracking_active(); | ||
739 | } | ||
740 | |||
741 | void arch_vtime_task_switch(struct task_struct *prev) | 723 | void arch_vtime_task_switch(struct task_struct *prev) |
742 | { | 724 | { |
743 | write_seqlock(&prev->vtime_seqlock); | 725 | write_seqlock(&prev->vtime_seqlock); |