aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorFrederic Weisbecker <fweisbec@gmail.com>2016-09-25 20:29:20 -0400
committerIngo Molnar <mingo@kernel.org>2016-09-30 05:46:40 -0400
commit19d23dbfeb10724675152915e76e03d771f23d9d (patch)
treed67760ee107a72bfb3fd4ca7f584e63aa689bb07
parent68107df5f2cb5dc3785be40162bfe2f19a178bbb (diff)
sched/irqtime: Consolidate accounting synchronization with u64_stats API
The irqtime accounting currently implement its own ad hoc implementation of u64_stats API. Lets rather consolidate it with the appropriate library. Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Reviewed-by: Rik van Riel <riel@redhat.com> Cc: Eric Dumazet <eric.dumazet@gmail.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Mike Galbraith <efault@gmx.de> Cc: Paolo Bonzini <pbonzini@redhat.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Wanpeng Li <wanpeng.li@hotmail.com> Link: http://lkml.kernel.org/r/1474849761-12678-5-git-send-email-fweisbec@gmail.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--kernel/sched/cputime.c31
-rw-r--r--kernel/sched/sched.h53
2 files changed, 29 insertions, 55 deletions
diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c
index 94b1a72879ec..1cea2f100798 100644
--- a/kernel/sched/cputime.c
+++ b/kernel/sched/cputime.c
@@ -23,10 +23,8 @@
23 * task when irq is in progress while we read rq->clock. That is a worthy 23 * task when irq is in progress while we read rq->clock. That is a worthy
24 * compromise in place of having locks on each irq in account_system_time. 24 * compromise in place of having locks on each irq in account_system_time.
25 */ 25 */
26DEFINE_PER_CPU(u64, cpu_hardirq_time); 26DEFINE_PER_CPU(struct irqtime, cpu_irqtime);
27DEFINE_PER_CPU(u64, cpu_softirq_time);
28 27
29static DEFINE_PER_CPU(u64, irq_start_time);
30static int sched_clock_irqtime; 28static int sched_clock_irqtime;
31 29
32void enable_sched_clock_irqtime(void) 30void enable_sched_clock_irqtime(void)
@@ -39,16 +37,13 @@ void disable_sched_clock_irqtime(void)
39 sched_clock_irqtime = 0; 37 sched_clock_irqtime = 0;
40} 38}
41 39
42#ifndef CONFIG_64BIT
43DEFINE_PER_CPU(seqcount_t, irq_time_seq);
44#endif /* CONFIG_64BIT */
45
46/* 40/*
47 * Called before incrementing preempt_count on {soft,}irq_enter 41 * Called before incrementing preempt_count on {soft,}irq_enter
48 * and before decrementing preempt_count on {soft,}irq_exit. 42 * and before decrementing preempt_count on {soft,}irq_exit.
49 */ 43 */
50void irqtime_account_irq(struct task_struct *curr) 44void irqtime_account_irq(struct task_struct *curr)
51{ 45{
46 struct irqtime *irqtime = this_cpu_ptr(&cpu_irqtime);
52 s64 delta; 47 s64 delta;
53 int cpu; 48 int cpu;
54 49
@@ -56,10 +51,10 @@ void irqtime_account_irq(struct task_struct *curr)
56 return; 51 return;
57 52
58 cpu = smp_processor_id(); 53 cpu = smp_processor_id();
59 delta = sched_clock_cpu(cpu) - __this_cpu_read(irq_start_time); 54 delta = sched_clock_cpu(cpu) - irqtime->irq_start_time;
60 __this_cpu_add(irq_start_time, delta); 55 irqtime->irq_start_time += delta;
61 56
62 irq_time_write_begin(); 57 u64_stats_update_begin(&irqtime->sync);
63 /* 58 /*
64 * We do not account for softirq time from ksoftirqd here. 59 * We do not account for softirq time from ksoftirqd here.
65 * We want to continue accounting softirq time to ksoftirqd thread 60 * We want to continue accounting softirq time to ksoftirqd thread
@@ -67,11 +62,11 @@ void irqtime_account_irq(struct task_struct *curr)
67 * that do not consume any time, but still wants to run. 62 * that do not consume any time, but still wants to run.
68 */ 63 */
69 if (hardirq_count()) 64 if (hardirq_count())
70 __this_cpu_add(cpu_hardirq_time, delta); 65 irqtime->hardirq_time += delta;
71 else if (in_serving_softirq() && curr != this_cpu_ksoftirqd()) 66 else if (in_serving_softirq() && curr != this_cpu_ksoftirqd())
72 __this_cpu_add(cpu_softirq_time, delta); 67 irqtime->softirq_time += delta;
73 68
74 irq_time_write_end(); 69 u64_stats_update_end(&irqtime->sync);
75} 70}
76EXPORT_SYMBOL_GPL(irqtime_account_irq); 71EXPORT_SYMBOL_GPL(irqtime_account_irq);
77 72
@@ -79,9 +74,10 @@ static cputime_t irqtime_account_hi_update(cputime_t maxtime)
79{ 74{
80 u64 *cpustat = kcpustat_this_cpu->cpustat; 75 u64 *cpustat = kcpustat_this_cpu->cpustat;
81 cputime_t irq_cputime; 76 cputime_t irq_cputime;
77 u64 nsecs;
82 78
83 irq_cputime = nsecs_to_cputime64(__this_cpu_read(cpu_hardirq_time)) - 79 nsecs = __this_cpu_read(cpu_irqtime.hardirq_time);
84 cpustat[CPUTIME_IRQ]; 80 irq_cputime = nsecs_to_cputime64(nsecs) - cpustat[CPUTIME_IRQ];
85 irq_cputime = min(irq_cputime, maxtime); 81 irq_cputime = min(irq_cputime, maxtime);
86 cpustat[CPUTIME_IRQ] += irq_cputime; 82 cpustat[CPUTIME_IRQ] += irq_cputime;
87 83
@@ -92,9 +88,10 @@ static cputime_t irqtime_account_si_update(cputime_t maxtime)
92{ 88{
93 u64 *cpustat = kcpustat_this_cpu->cpustat; 89 u64 *cpustat = kcpustat_this_cpu->cpustat;
94 cputime_t softirq_cputime; 90 cputime_t softirq_cputime;
91 u64 nsecs;
95 92
96 softirq_cputime = nsecs_to_cputime64(__this_cpu_read(cpu_softirq_time)) - 93 nsecs = __this_cpu_read(cpu_irqtime.softirq_time);
97 cpustat[CPUTIME_SOFTIRQ]; 94 softirq_cputime = nsecs_to_cputime64(nsecs) - cpustat[CPUTIME_SOFTIRQ];
98 softirq_cputime = min(softirq_cputime, maxtime); 95 softirq_cputime = min(softirq_cputime, maxtime);
99 cpustat[CPUTIME_SOFTIRQ] += softirq_cputime; 96 cpustat[CPUTIME_SOFTIRQ] += softirq_cputime;
100 97
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 5489d07a4643..19b99869809d 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -2,6 +2,7 @@
2#include <linux/sched.h> 2#include <linux/sched.h>
3#include <linux/sched/sysctl.h> 3#include <linux/sched/sysctl.h>
4#include <linux/sched/rt.h> 4#include <linux/sched/rt.h>
5#include <linux/u64_stats_sync.h>
5#include <linux/sched/deadline.h> 6#include <linux/sched/deadline.h>
6#include <linux/binfmts.h> 7#include <linux/binfmts.h>
7#include <linux/mutex.h> 8#include <linux/mutex.h>
@@ -1735,52 +1736,28 @@ static inline void nohz_balance_exit_idle(unsigned int cpu) { }
1735#endif 1736#endif
1736 1737
1737#ifdef CONFIG_IRQ_TIME_ACCOUNTING 1738#ifdef CONFIG_IRQ_TIME_ACCOUNTING
1739struct irqtime {
1740 u64 hardirq_time;
1741 u64 softirq_time;
1742 u64 irq_start_time;
1743 struct u64_stats_sync sync;
1744};
1738 1745
1739DECLARE_PER_CPU(u64, cpu_hardirq_time); 1746DECLARE_PER_CPU(struct irqtime, cpu_irqtime);
1740DECLARE_PER_CPU(u64, cpu_softirq_time);
1741
1742#ifndef CONFIG_64BIT
1743DECLARE_PER_CPU(seqcount_t, irq_time_seq);
1744
1745static inline void irq_time_write_begin(void)
1746{
1747 __this_cpu_inc(irq_time_seq.sequence);
1748 smp_wmb();
1749}
1750
1751static inline void irq_time_write_end(void)
1752{
1753 smp_wmb();
1754 __this_cpu_inc(irq_time_seq.sequence);
1755}
1756 1747
1757static inline u64 irq_time_read(int cpu) 1748static inline u64 irq_time_read(int cpu)
1758{ 1749{
1759 u64 irq_time; 1750 struct irqtime *irqtime = &per_cpu(cpu_irqtime, cpu);
1760 unsigned seq; 1751 unsigned int seq;
1752 u64 total;
1761 1753
1762 do { 1754 do {
1763 seq = read_seqcount_begin(&per_cpu(irq_time_seq, cpu)); 1755 seq = __u64_stats_fetch_begin(&irqtime->sync);
1764 irq_time = per_cpu(cpu_softirq_time, cpu) + 1756 total = irqtime->softirq_time + irqtime->hardirq_time;
1765 per_cpu(cpu_hardirq_time, cpu); 1757 } while (__u64_stats_fetch_retry(&irqtime->sync, seq));
1766 } while (read_seqcount_retry(&per_cpu(irq_time_seq, cpu), seq));
1767
1768 return irq_time;
1769}
1770#else /* CONFIG_64BIT */
1771static inline void irq_time_write_begin(void)
1772{
1773}
1774 1758
1775static inline void irq_time_write_end(void) 1759 return total;
1776{
1777}
1778
1779static inline u64 irq_time_read(int cpu)
1780{
1781 return per_cpu(cpu_softirq_time, cpu) + per_cpu(cpu_hardirq_time, cpu);
1782} 1760}
1783#endif /* CONFIG_64BIT */
1784#endif /* CONFIG_IRQ_TIME_ACCOUNTING */ 1761#endif /* CONFIG_IRQ_TIME_ACCOUNTING */
1785 1762
1786#ifdef CONFIG_CPU_FREQ 1763#ifdef CONFIG_CPU_FREQ