summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorWanpeng Li <wanpeng.li@hotmail.com>2017-07-09 03:40:28 -0400
committerIngo Molnar <mingo@kernel.org>2017-07-14 04:27:15 -0400
commit0e4097c3354e2f5a5ad8affd9dc7f7f7d00bb6b9 (patch)
tree7806ceddfe0be9927e01527f5db610310bdbd71a /kernel
parent242fc35290bd8cf0effc6e3474e3a417985de2f3 (diff)
sched/cputime: Don't use smp_processor_id() in preemptible context
Recent kernels trigger this warning: BUG: using smp_processor_id() in preemptible [00000000] code: 99-trinity/181 caller is debug_smp_processor_id+0x17/0x19 CPU: 0 PID: 181 Comm: 99-trinity Not tainted 4.12.0-01059-g2a42eb9 #1 Call Trace: dump_stack+0x82/0xb8 check_preemption_disabled() debug_smp_processor_id() vtime_delta() task_cputime() thread_group_cputime() thread_group_cputime_adjusted() wait_consider_task() do_wait() SYSC_wait4() do_syscall_64() entry_SYSCALL64_slow_path() As Frederic pointed out: | Although those sched_clock_cpu() things seem to only matter when the | sched_clock() is unstable. And that stability is a condition for nohz_full | to work anyway. So probably sched_clock() alone would be enough. This patch fixes it by replacing sched_clock_cpu() with sched_clock() to avoid calling smp_processor_id() in a preemptible context. Reported-by: Xiaolong Ye <xiaolong.ye@intel.com> Signed-off-by: Wanpeng Li <wanpeng.li@hotmail.com> Cc: Frederic Weisbecker <fweisbec@gmail.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Luiz Capitulino <lcapitulino@redhat.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Rik van Riel <riel@redhat.com> Cc: Thomas Gleixner <tglx@linutronix.de> Link: http://lkml.kernel.org/r/1499586028-7402-1-git-send-email-wanpeng.li@hotmail.com [ Prettified the changelog. ] Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched/cputime.c6
1 files changed, 3 insertions, 3 deletions
diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c
index 6e3ea4ac1bda..14d2dbf97c53 100644
--- a/kernel/sched/cputime.c
+++ b/kernel/sched/cputime.c
@@ -683,7 +683,7 @@ static u64 vtime_delta(struct vtime *vtime)
683{ 683{
684 unsigned long long clock; 684 unsigned long long clock;
685 685
686 clock = sched_clock_cpu(smp_processor_id()); 686 clock = sched_clock();
687 if (clock < vtime->starttime) 687 if (clock < vtime->starttime)
688 return 0; 688 return 0;
689 689
@@ -814,7 +814,7 @@ void arch_vtime_task_switch(struct task_struct *prev)
814 814
815 write_seqcount_begin(&vtime->seqcount); 815 write_seqcount_begin(&vtime->seqcount);
816 vtime->state = VTIME_SYS; 816 vtime->state = VTIME_SYS;
817 vtime->starttime = sched_clock_cpu(smp_processor_id()); 817 vtime->starttime = sched_clock();
818 write_seqcount_end(&vtime->seqcount); 818 write_seqcount_end(&vtime->seqcount);
819} 819}
820 820
@@ -826,7 +826,7 @@ void vtime_init_idle(struct task_struct *t, int cpu)
826 local_irq_save(flags); 826 local_irq_save(flags);
827 write_seqcount_begin(&vtime->seqcount); 827 write_seqcount_begin(&vtime->seqcount);
828 vtime->state = VTIME_SYS; 828 vtime->state = VTIME_SYS;
829 vtime->starttime = sched_clock_cpu(cpu); 829 vtime->starttime = sched_clock();
830 write_seqcount_end(&vtime->seqcount); 830 write_seqcount_end(&vtime->seqcount);
831 local_irq_restore(flags); 831 local_irq_restore(flags);
832} 832}