diff options
author | Glauber Costa <glommer@parallels.com> | 2011-11-28 11:45:17 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2011-12-06 03:06:38 -0500 |
commit | 3292beb340c76884427faa1f5d6085719477d889 (patch) | |
tree | cb7e431b2a15fa66ef5278d485131bac7a125fbd | |
parent | 786d6dc7aeb2bfbfe417507b7beb83919f319db3 (diff) |
sched/accounting: Change cpustat fields to an array
This patch changes fields in cpustat from a structure, to an
u64 array. Math gets easier, and the code is more flexible.
Signed-off-by: Glauber Costa <glommer@parallels.com>
Reviewed-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Paul Tuner <pjt@google.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Link: http://lkml.kernel.org/r/1322498719-2255-2-git-send-email-glommer@parallels.com
Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r-- | arch/s390/appldata/appldata_os.c | 16 | ||||
-rw-r--r-- | arch/x86/include/asm/i387.h | 2 | ||||
-rw-r--r-- | drivers/cpufreq/cpufreq_conservative.c | 38 | ||||
-rw-r--r-- | drivers/cpufreq/cpufreq_ondemand.c | 38 | ||||
-rw-r--r-- | drivers/macintosh/rack-meter.c | 8 | ||||
-rw-r--r-- | fs/proc/stat.c | 63 | ||||
-rw-r--r-- | fs/proc/uptime.c | 4 | ||||
-rw-r--r-- | include/linux/kernel_stat.h | 36 | ||||
-rw-r--r-- | kernel/sched/core.c | 78 |
9 files changed, 142 insertions, 141 deletions
diff --git a/arch/s390/appldata/appldata_os.c b/arch/s390/appldata/appldata_os.c index 92f1cb745d69..4de031d6b76c 100644 --- a/arch/s390/appldata/appldata_os.c +++ b/arch/s390/appldata/appldata_os.c | |||
@@ -115,21 +115,21 @@ static void appldata_get_os_data(void *data) | |||
115 | j = 0; | 115 | j = 0; |
116 | for_each_online_cpu(i) { | 116 | for_each_online_cpu(i) { |
117 | os_data->os_cpu[j].per_cpu_user = | 117 | os_data->os_cpu[j].per_cpu_user = |
118 | cputime_to_jiffies(kstat_cpu(i).cpustat.user); | 118 | cputime_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_USER]); |
119 | os_data->os_cpu[j].per_cpu_nice = | 119 | os_data->os_cpu[j].per_cpu_nice = |
120 | cputime_to_jiffies(kstat_cpu(i).cpustat.nice); | 120 | cputime_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_NICE]); |
121 | os_data->os_cpu[j].per_cpu_system = | 121 | os_data->os_cpu[j].per_cpu_system = |
122 | cputime_to_jiffies(kstat_cpu(i).cpustat.system); | 122 | cputime_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_SYSTEM]); |
123 | os_data->os_cpu[j].per_cpu_idle = | 123 | os_data->os_cpu[j].per_cpu_idle = |
124 | cputime_to_jiffies(kstat_cpu(i).cpustat.idle); | 124 | cputime_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_IDLE]); |
125 | os_data->os_cpu[j].per_cpu_irq = | 125 | os_data->os_cpu[j].per_cpu_irq = |
126 | cputime_to_jiffies(kstat_cpu(i).cpustat.irq); | 126 | cputime_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_IRQ]); |
127 | os_data->os_cpu[j].per_cpu_softirq = | 127 | os_data->os_cpu[j].per_cpu_softirq = |
128 | cputime_to_jiffies(kstat_cpu(i).cpustat.softirq); | 128 | cputime_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ]); |
129 | os_data->os_cpu[j].per_cpu_iowait = | 129 | os_data->os_cpu[j].per_cpu_iowait = |
130 | cputime_to_jiffies(kstat_cpu(i).cpustat.iowait); | 130 | cputime_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_IOWAIT]); |
131 | os_data->os_cpu[j].per_cpu_steal = | 131 | os_data->os_cpu[j].per_cpu_steal = |
132 | cputime_to_jiffies(kstat_cpu(i).cpustat.steal); | 132 | cputime_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_STEAL]); |
133 | os_data->os_cpu[j].cpu_id = i; | 133 | os_data->os_cpu[j].cpu_id = i; |
134 | j++; | 134 | j++; |
135 | } | 135 | } |
diff --git a/arch/x86/include/asm/i387.h b/arch/x86/include/asm/i387.h index c9e09ea05644..6919e936345b 100644 --- a/arch/x86/include/asm/i387.h +++ b/arch/x86/include/asm/i387.h | |||
@@ -218,7 +218,7 @@ static inline void fpu_fxsave(struct fpu *fpu) | |||
218 | #ifdef CONFIG_SMP | 218 | #ifdef CONFIG_SMP |
219 | #define safe_address (__per_cpu_offset[0]) | 219 | #define safe_address (__per_cpu_offset[0]) |
220 | #else | 220 | #else |
221 | #define safe_address (kstat_cpu(0).cpustat.user) | 221 | #define safe_address (__get_cpu_var(kernel_cpustat).cpustat[CPUTIME_USER]) |
222 | #endif | 222 | #endif |
223 | 223 | ||
224 | /* | 224 | /* |
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c index c97b468ee9f7..118bff73fed3 100644 --- a/drivers/cpufreq/cpufreq_conservative.c +++ b/drivers/cpufreq/cpufreq_conservative.c | |||
@@ -95,27 +95,26 @@ static struct dbs_tuners { | |||
95 | .freq_step = 5, | 95 | .freq_step = 5, |
96 | }; | 96 | }; |
97 | 97 | ||
98 | static inline cputime64_t get_cpu_idle_time_jiffy(unsigned int cpu, | 98 | static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall) |
99 | cputime64_t *wall) | ||
100 | { | 99 | { |
101 | cputime64_t idle_time; | 100 | u64 idle_time; |
102 | cputime64_t cur_wall_time; | 101 | cputime64_t cur_wall_time; |
103 | cputime64_t busy_time; | 102 | u64 busy_time; |
104 | 103 | ||
105 | cur_wall_time = jiffies64_to_cputime64(get_jiffies_64()); | 104 | cur_wall_time = jiffies64_to_cputime64(get_jiffies_64()); |
106 | busy_time = cputime64_add(kstat_cpu(cpu).cpustat.user, | 105 | busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER] + |
107 | kstat_cpu(cpu).cpustat.system); | 106 | kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM]; |
108 | 107 | ||
109 | busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.irq); | 108 | busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ]; |
110 | busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.softirq); | 109 | busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ]; |
111 | busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.steal); | 110 | busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL]; |
112 | busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.nice); | 111 | busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE]; |
113 | 112 | ||
114 | idle_time = cputime64_sub(cur_wall_time, busy_time); | 113 | idle_time = cputime64_sub(cur_wall_time, busy_time); |
115 | if (wall) | 114 | if (wall) |
116 | *wall = (cputime64_t)jiffies_to_usecs(cur_wall_time); | 115 | *wall = jiffies_to_usecs(cur_wall_time); |
117 | 116 | ||
118 | return (cputime64_t)jiffies_to_usecs(idle_time); | 117 | return jiffies_to_usecs(idle_time); |
119 | } | 118 | } |
120 | 119 | ||
121 | static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall) | 120 | static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall) |
@@ -272,7 +271,7 @@ static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b, | |||
272 | dbs_info->prev_cpu_idle = get_cpu_idle_time(j, | 271 | dbs_info->prev_cpu_idle = get_cpu_idle_time(j, |
273 | &dbs_info->prev_cpu_wall); | 272 | &dbs_info->prev_cpu_wall); |
274 | if (dbs_tuners_ins.ignore_nice) | 273 | if (dbs_tuners_ins.ignore_nice) |
275 | dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice; | 274 | dbs_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE]; |
276 | } | 275 | } |
277 | return count; | 276 | return count; |
278 | } | 277 | } |
@@ -362,11 +361,11 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) | |||
362 | j_dbs_info->prev_cpu_idle = cur_idle_time; | 361 | j_dbs_info->prev_cpu_idle = cur_idle_time; |
363 | 362 | ||
364 | if (dbs_tuners_ins.ignore_nice) { | 363 | if (dbs_tuners_ins.ignore_nice) { |
365 | cputime64_t cur_nice; | 364 | u64 cur_nice; |
366 | unsigned long cur_nice_jiffies; | 365 | unsigned long cur_nice_jiffies; |
367 | 366 | ||
368 | cur_nice = cputime64_sub(kstat_cpu(j).cpustat.nice, | 367 | cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE] - |
369 | j_dbs_info->prev_cpu_nice); | 368 | j_dbs_info->prev_cpu_nice; |
370 | /* | 369 | /* |
371 | * Assumption: nice time between sampling periods will | 370 | * Assumption: nice time between sampling periods will |
372 | * be less than 2^32 jiffies for 32 bit sys | 371 | * be less than 2^32 jiffies for 32 bit sys |
@@ -374,7 +373,7 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) | |||
374 | cur_nice_jiffies = (unsigned long) | 373 | cur_nice_jiffies = (unsigned long) |
375 | cputime64_to_jiffies64(cur_nice); | 374 | cputime64_to_jiffies64(cur_nice); |
376 | 375 | ||
377 | j_dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice; | 376 | j_dbs_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE]; |
378 | idle_time += jiffies_to_usecs(cur_nice_jiffies); | 377 | idle_time += jiffies_to_usecs(cur_nice_jiffies); |
379 | } | 378 | } |
380 | 379 | ||
@@ -501,10 +500,9 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, | |||
501 | 500 | ||
502 | j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j, | 501 | j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j, |
503 | &j_dbs_info->prev_cpu_wall); | 502 | &j_dbs_info->prev_cpu_wall); |
504 | if (dbs_tuners_ins.ignore_nice) { | 503 | if (dbs_tuners_ins.ignore_nice) |
505 | j_dbs_info->prev_cpu_nice = | 504 | j_dbs_info->prev_cpu_nice = |
506 | kstat_cpu(j).cpustat.nice; | 505 | kcpustat_cpu(j).cpustat[CPUTIME_NICE]; |
507 | } | ||
508 | } | 506 | } |
509 | this_dbs_info->down_skip = 0; | 507 | this_dbs_info->down_skip = 0; |
510 | this_dbs_info->requested_freq = policy->cur; | 508 | this_dbs_info->requested_freq = policy->cur; |
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c index fa8af4ebb1d6..f3d327cee43f 100644 --- a/drivers/cpufreq/cpufreq_ondemand.c +++ b/drivers/cpufreq/cpufreq_ondemand.c | |||
@@ -119,27 +119,26 @@ static struct dbs_tuners { | |||
119 | .powersave_bias = 0, | 119 | .powersave_bias = 0, |
120 | }; | 120 | }; |
121 | 121 | ||
122 | static inline cputime64_t get_cpu_idle_time_jiffy(unsigned int cpu, | 122 | static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall) |
123 | cputime64_t *wall) | ||
124 | { | 123 | { |
125 | cputime64_t idle_time; | 124 | u64 idle_time; |
126 | cputime64_t cur_wall_time; | 125 | cputime64_t cur_wall_time; |
127 | cputime64_t busy_time; | 126 | u64 busy_time; |
128 | 127 | ||
129 | cur_wall_time = jiffies64_to_cputime64(get_jiffies_64()); | 128 | cur_wall_time = jiffies64_to_cputime64(get_jiffies_64()); |
130 | busy_time = cputime64_add(kstat_cpu(cpu).cpustat.user, | 129 | busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER] + |
131 | kstat_cpu(cpu).cpustat.system); | 130 | kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM]; |
132 | 131 | ||
133 | busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.irq); | 132 | busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ]; |
134 | busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.softirq); | 133 | busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ]; |
135 | busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.steal); | 134 | busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL]; |
136 | busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.nice); | 135 | busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE]; |
137 | 136 | ||
138 | idle_time = cputime64_sub(cur_wall_time, busy_time); | 137 | idle_time = cputime64_sub(cur_wall_time, busy_time); |
139 | if (wall) | 138 | if (wall) |
140 | *wall = (cputime64_t)jiffies_to_usecs(cur_wall_time); | 139 | *wall = jiffies_to_usecs(cur_wall_time); |
141 | 140 | ||
142 | return (cputime64_t)jiffies_to_usecs(idle_time); | 141 | return jiffies_to_usecs(idle_time); |
143 | } | 142 | } |
144 | 143 | ||
145 | static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall) | 144 | static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall) |
@@ -345,7 +344,7 @@ static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b, | |||
345 | dbs_info->prev_cpu_idle = get_cpu_idle_time(j, | 344 | dbs_info->prev_cpu_idle = get_cpu_idle_time(j, |
346 | &dbs_info->prev_cpu_wall); | 345 | &dbs_info->prev_cpu_wall); |
347 | if (dbs_tuners_ins.ignore_nice) | 346 | if (dbs_tuners_ins.ignore_nice) |
348 | dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice; | 347 | dbs_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE]; |
349 | 348 | ||
350 | } | 349 | } |
351 | return count; | 350 | return count; |
@@ -455,11 +454,11 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) | |||
455 | j_dbs_info->prev_cpu_iowait = cur_iowait_time; | 454 | j_dbs_info->prev_cpu_iowait = cur_iowait_time; |
456 | 455 | ||
457 | if (dbs_tuners_ins.ignore_nice) { | 456 | if (dbs_tuners_ins.ignore_nice) { |
458 | cputime64_t cur_nice; | 457 | u64 cur_nice; |
459 | unsigned long cur_nice_jiffies; | 458 | unsigned long cur_nice_jiffies; |
460 | 459 | ||
461 | cur_nice = cputime64_sub(kstat_cpu(j).cpustat.nice, | 460 | cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE] - |
462 | j_dbs_info->prev_cpu_nice); | 461 | j_dbs_info->prev_cpu_nice; |
463 | /* | 462 | /* |
464 | * Assumption: nice time between sampling periods will | 463 | * Assumption: nice time between sampling periods will |
465 | * be less than 2^32 jiffies for 32 bit sys | 464 | * be less than 2^32 jiffies for 32 bit sys |
@@ -467,7 +466,7 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) | |||
467 | cur_nice_jiffies = (unsigned long) | 466 | cur_nice_jiffies = (unsigned long) |
468 | cputime64_to_jiffies64(cur_nice); | 467 | cputime64_to_jiffies64(cur_nice); |
469 | 468 | ||
470 | j_dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice; | 469 | j_dbs_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE]; |
471 | idle_time += jiffies_to_usecs(cur_nice_jiffies); | 470 | idle_time += jiffies_to_usecs(cur_nice_jiffies); |
472 | } | 471 | } |
473 | 472 | ||
@@ -646,10 +645,9 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, | |||
646 | 645 | ||
647 | j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j, | 646 | j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j, |
648 | &j_dbs_info->prev_cpu_wall); | 647 | &j_dbs_info->prev_cpu_wall); |
649 | if (dbs_tuners_ins.ignore_nice) { | 648 | if (dbs_tuners_ins.ignore_nice) |
650 | j_dbs_info->prev_cpu_nice = | 649 | j_dbs_info->prev_cpu_nice = |
651 | kstat_cpu(j).cpustat.nice; | 650 | kcpustat_cpu(j).cpustat[CPUTIME_NICE]; |
652 | } | ||
653 | } | 651 | } |
654 | this_dbs_info->cpu = cpu; | 652 | this_dbs_info->cpu = cpu; |
655 | this_dbs_info->rate_mult = 1; | 653 | this_dbs_info->rate_mult = 1; |
diff --git a/drivers/macintosh/rack-meter.c b/drivers/macintosh/rack-meter.c index 2637c139777b..66d7f1c7baa1 100644 --- a/drivers/macintosh/rack-meter.c +++ b/drivers/macintosh/rack-meter.c | |||
@@ -81,13 +81,13 @@ static int rackmeter_ignore_nice; | |||
81 | */ | 81 | */ |
82 | static inline cputime64_t get_cpu_idle_time(unsigned int cpu) | 82 | static inline cputime64_t get_cpu_idle_time(unsigned int cpu) |
83 | { | 83 | { |
84 | cputime64_t retval; | 84 | u64 retval; |
85 | 85 | ||
86 | retval = cputime64_add(kstat_cpu(cpu).cpustat.idle, | 86 | retval = kcpustat_cpu(cpu).cpustat[CPUTIME_IDLE] + |
87 | kstat_cpu(cpu).cpustat.iowait); | 87 | kcpustat_cpu(cpu).cpustat[CPUTIME_IOWAIT]; |
88 | 88 | ||
89 | if (rackmeter_ignore_nice) | 89 | if (rackmeter_ignore_nice) |
90 | retval = cputime64_add(retval, kstat_cpu(cpu).cpustat.nice); | 90 | retval += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE]; |
91 | 91 | ||
92 | return retval; | 92 | return retval; |
93 | } | 93 | } |
diff --git a/fs/proc/stat.c b/fs/proc/stat.c index 42b274da92c3..8a6ab666e9f8 100644 --- a/fs/proc/stat.c +++ b/fs/proc/stat.c | |||
@@ -22,29 +22,27 @@ | |||
22 | #define arch_idle_time(cpu) 0 | 22 | #define arch_idle_time(cpu) 0 |
23 | #endif | 23 | #endif |
24 | 24 | ||
25 | static cputime64_t get_idle_time(int cpu) | 25 | static u64 get_idle_time(int cpu) |
26 | { | 26 | { |
27 | u64 idle_time = get_cpu_idle_time_us(cpu, NULL); | 27 | u64 idle, idle_time = get_cpu_idle_time_us(cpu, NULL); |
28 | cputime64_t idle; | ||
29 | 28 | ||
30 | if (idle_time == -1ULL) { | 29 | if (idle_time == -1ULL) { |
31 | /* !NO_HZ so we can rely on cpustat.idle */ | 30 | /* !NO_HZ so we can rely on cpustat.idle */ |
32 | idle = kstat_cpu(cpu).cpustat.idle; | 31 | idle = kcpustat_cpu(cpu).cpustat[CPUTIME_IDLE]; |
33 | idle = cputime64_add(idle, arch_idle_time(cpu)); | 32 | idle += arch_idle_time(cpu); |
34 | } else | 33 | } else |
35 | idle = usecs_to_cputime(idle_time); | 34 | idle = usecs_to_cputime(idle_time); |
36 | 35 | ||
37 | return idle; | 36 | return idle; |
38 | } | 37 | } |
39 | 38 | ||
40 | static cputime64_t get_iowait_time(int cpu) | 39 | static u64 get_iowait_time(int cpu) |
41 | { | 40 | { |
42 | u64 iowait_time = get_cpu_iowait_time_us(cpu, NULL); | 41 | u64 iowait, iowait_time = get_cpu_iowait_time_us(cpu, NULL); |
43 | cputime64_t iowait; | ||
44 | 42 | ||
45 | if (iowait_time == -1ULL) | 43 | if (iowait_time == -1ULL) |
46 | /* !NO_HZ so we can rely on cpustat.iowait */ | 44 | /* !NO_HZ so we can rely on cpustat.iowait */ |
47 | iowait = kstat_cpu(cpu).cpustat.iowait; | 45 | iowait = kcpustat_cpu(cpu).cpustat[CPUTIME_IOWAIT]; |
48 | else | 46 | else |
49 | iowait = usecs_to_cputime(iowait_time); | 47 | iowait = usecs_to_cputime(iowait_time); |
50 | 48 | ||
@@ -55,33 +53,30 @@ static int show_stat(struct seq_file *p, void *v) | |||
55 | { | 53 | { |
56 | int i, j; | 54 | int i, j; |
57 | unsigned long jif; | 55 | unsigned long jif; |
58 | cputime64_t user, nice, system, idle, iowait, irq, softirq, steal; | 56 | u64 user, nice, system, idle, iowait, irq, softirq, steal; |
59 | cputime64_t guest, guest_nice; | 57 | u64 guest, guest_nice; |
60 | u64 sum = 0; | 58 | u64 sum = 0; |
61 | u64 sum_softirq = 0; | 59 | u64 sum_softirq = 0; |
62 | unsigned int per_softirq_sums[NR_SOFTIRQS] = {0}; | 60 | unsigned int per_softirq_sums[NR_SOFTIRQS] = {0}; |
63 | struct timespec boottime; | 61 | struct timespec boottime; |
64 | 62 | ||
65 | user = nice = system = idle = iowait = | 63 | user = nice = system = idle = iowait = |
66 | irq = softirq = steal = cputime64_zero; | 64 | irq = softirq = steal = 0; |
67 | guest = guest_nice = cputime64_zero; | 65 | guest = guest_nice = 0; |
68 | getboottime(&boottime); | 66 | getboottime(&boottime); |
69 | jif = boottime.tv_sec; | 67 | jif = boottime.tv_sec; |
70 | 68 | ||
71 | for_each_possible_cpu(i) { | 69 | for_each_possible_cpu(i) { |
72 | user = cputime64_add(user, kstat_cpu(i).cpustat.user); | 70 | user += kcpustat_cpu(i).cpustat[CPUTIME_USER]; |
73 | nice = cputime64_add(nice, kstat_cpu(i).cpustat.nice); | 71 | nice += kcpustat_cpu(i).cpustat[CPUTIME_NICE]; |
74 | system = cputime64_add(system, kstat_cpu(i).cpustat.system); | 72 | system += kcpustat_cpu(i).cpustat[CPUTIME_SYSTEM]; |
75 | idle = cputime64_add(idle, get_idle_time(i)); | 73 | idle += get_idle_time(i); |
76 | iowait = cputime64_add(iowait, get_iowait_time(i)); | 74 | iowait += get_iowait_time(i); |
77 | irq = cputime64_add(irq, kstat_cpu(i).cpustat.irq); | 75 | irq += kcpustat_cpu(i).cpustat[CPUTIME_IRQ]; |
78 | softirq = cputime64_add(softirq, kstat_cpu(i).cpustat.softirq); | 76 | softirq += kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ]; |
79 | steal = cputime64_add(steal, kstat_cpu(i).cpustat.steal); | 77 | steal += kcpustat_cpu(i).cpustat[CPUTIME_STEAL]; |
80 | guest = cputime64_add(guest, kstat_cpu(i).cpustat.guest); | 78 | guest += kcpustat_cpu(i).cpustat[CPUTIME_GUEST]; |
81 | guest_nice = cputime64_add(guest_nice, | 79 | guest_nice += kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE]; |
82 | kstat_cpu(i).cpustat.guest_nice); | ||
83 | sum += kstat_cpu_irqs_sum(i); | ||
84 | sum += arch_irq_stat_cpu(i); | ||
85 | 80 | ||
86 | for (j = 0; j < NR_SOFTIRQS; j++) { | 81 | for (j = 0; j < NR_SOFTIRQS; j++) { |
87 | unsigned int softirq_stat = kstat_softirqs_cpu(j, i); | 82 | unsigned int softirq_stat = kstat_softirqs_cpu(j, i); |
@@ -106,16 +101,16 @@ static int show_stat(struct seq_file *p, void *v) | |||
106 | (unsigned long long)cputime64_to_clock_t(guest_nice)); | 101 | (unsigned long long)cputime64_to_clock_t(guest_nice)); |
107 | for_each_online_cpu(i) { | 102 | for_each_online_cpu(i) { |
108 | /* Copy values here to work around gcc-2.95.3, gcc-2.96 */ | 103 | /* Copy values here to work around gcc-2.95.3, gcc-2.96 */ |
109 | user = kstat_cpu(i).cpustat.user; | 104 | user = kcpustat_cpu(i).cpustat[CPUTIME_USER]; |
110 | nice = kstat_cpu(i).cpustat.nice; | 105 | nice = kcpustat_cpu(i).cpustat[CPUTIME_NICE]; |
111 | system = kstat_cpu(i).cpustat.system; | 106 | system = kcpustat_cpu(i).cpustat[CPUTIME_SYSTEM]; |
112 | idle = get_idle_time(i); | 107 | idle = get_idle_time(i); |
113 | iowait = get_iowait_time(i); | 108 | iowait = get_iowait_time(i); |
114 | irq = kstat_cpu(i).cpustat.irq; | 109 | irq = kcpustat_cpu(i).cpustat[CPUTIME_IRQ]; |
115 | softirq = kstat_cpu(i).cpustat.softirq; | 110 | softirq = kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ]; |
116 | steal = kstat_cpu(i).cpustat.steal; | 111 | steal = kcpustat_cpu(i).cpustat[CPUTIME_STEAL]; |
117 | guest = kstat_cpu(i).cpustat.guest; | 112 | guest = kcpustat_cpu(i).cpustat[CPUTIME_GUEST]; |
118 | guest_nice = kstat_cpu(i).cpustat.guest_nice; | 113 | guest_nice = kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE]; |
119 | seq_printf(p, | 114 | seq_printf(p, |
120 | "cpu%d %llu %llu %llu %llu %llu %llu %llu %llu %llu " | 115 | "cpu%d %llu %llu %llu %llu %llu %llu %llu %llu %llu " |
121 | "%llu\n", | 116 | "%llu\n", |
diff --git a/fs/proc/uptime.c b/fs/proc/uptime.c index 766b1d456050..0fb22e464e72 100644 --- a/fs/proc/uptime.c +++ b/fs/proc/uptime.c | |||
@@ -12,10 +12,10 @@ static int uptime_proc_show(struct seq_file *m, void *v) | |||
12 | struct timespec uptime; | 12 | struct timespec uptime; |
13 | struct timespec idle; | 13 | struct timespec idle; |
14 | int i; | 14 | int i; |
15 | cputime_t idletime = cputime_zero; | 15 | u64 idletime = 0; |
16 | 16 | ||
17 | for_each_possible_cpu(i) | 17 | for_each_possible_cpu(i) |
18 | idletime = cputime64_add(idletime, kstat_cpu(i).cpustat.idle); | 18 | idletime += kcpustat_cpu(i).cpustat[CPUTIME_IDLE]; |
19 | 19 | ||
20 | do_posix_clock_monotonic_gettime(&uptime); | 20 | do_posix_clock_monotonic_gettime(&uptime); |
21 | monotonic_to_bootbased(&uptime); | 21 | monotonic_to_bootbased(&uptime); |
diff --git a/include/linux/kernel_stat.h b/include/linux/kernel_stat.h index 0cce2db580c3..2fbd9053c2df 100644 --- a/include/linux/kernel_stat.h +++ b/include/linux/kernel_stat.h | |||
@@ -6,6 +6,7 @@ | |||
6 | #include <linux/percpu.h> | 6 | #include <linux/percpu.h> |
7 | #include <linux/cpumask.h> | 7 | #include <linux/cpumask.h> |
8 | #include <linux/interrupt.h> | 8 | #include <linux/interrupt.h> |
9 | #include <linux/sched.h> | ||
9 | #include <asm/irq.h> | 10 | #include <asm/irq.h> |
10 | #include <asm/cputime.h> | 11 | #include <asm/cputime.h> |
11 | 12 | ||
@@ -15,21 +16,25 @@ | |||
15 | * used by rstatd/perfmeter | 16 | * used by rstatd/perfmeter |
16 | */ | 17 | */ |
17 | 18 | ||
18 | struct cpu_usage_stat { | 19 | enum cpu_usage_stat { |
19 | cputime64_t user; | 20 | CPUTIME_USER, |
20 | cputime64_t nice; | 21 | CPUTIME_NICE, |
21 | cputime64_t system; | 22 | CPUTIME_SYSTEM, |
22 | cputime64_t softirq; | 23 | CPUTIME_SOFTIRQ, |
23 | cputime64_t irq; | 24 | CPUTIME_IRQ, |
24 | cputime64_t idle; | 25 | CPUTIME_IDLE, |
25 | cputime64_t iowait; | 26 | CPUTIME_IOWAIT, |
26 | cputime64_t steal; | 27 | CPUTIME_STEAL, |
27 | cputime64_t guest; | 28 | CPUTIME_GUEST, |
28 | cputime64_t guest_nice; | 29 | CPUTIME_GUEST_NICE, |
30 | NR_STATS, | ||
31 | }; | ||
32 | |||
33 | struct kernel_cpustat { | ||
34 | u64 cpustat[NR_STATS]; | ||
29 | }; | 35 | }; |
30 | 36 | ||
31 | struct kernel_stat { | 37 | struct kernel_stat { |
32 | struct cpu_usage_stat cpustat; | ||
33 | #ifndef CONFIG_GENERIC_HARDIRQS | 38 | #ifndef CONFIG_GENERIC_HARDIRQS |
34 | unsigned int irqs[NR_IRQS]; | 39 | unsigned int irqs[NR_IRQS]; |
35 | #endif | 40 | #endif |
@@ -38,10 +43,13 @@ struct kernel_stat { | |||
38 | }; | 43 | }; |
39 | 44 | ||
40 | DECLARE_PER_CPU(struct kernel_stat, kstat); | 45 | DECLARE_PER_CPU(struct kernel_stat, kstat); |
46 | DECLARE_PER_CPU(struct kernel_cpustat, kernel_cpustat); | ||
41 | 47 | ||
42 | #define kstat_cpu(cpu) per_cpu(kstat, cpu) | ||
43 | /* Must have preemption disabled for this to be meaningful. */ | 48 | /* Must have preemption disabled for this to be meaningful. */ |
44 | #define kstat_this_cpu __get_cpu_var(kstat) | 49 | #define kstat_this_cpu (&__get_cpu_var(kstat)) |
50 | #define kcpustat_this_cpu (&__get_cpu_var(kernel_cpustat)) | ||
51 | #define kstat_cpu(cpu) per_cpu(kstat, cpu) | ||
52 | #define kcpustat_cpu(cpu) per_cpu(kernel_cpustat, cpu) | ||
45 | 53 | ||
46 | extern unsigned long long nr_context_switches(void); | 54 | extern unsigned long long nr_context_switches(void); |
47 | 55 | ||
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 699ff1499a8a..dbbe35ff93fc 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c | |||
@@ -896,14 +896,14 @@ static void update_rq_clock_task(struct rq *rq, s64 delta) | |||
896 | #ifdef CONFIG_IRQ_TIME_ACCOUNTING | 896 | #ifdef CONFIG_IRQ_TIME_ACCOUNTING |
897 | static int irqtime_account_hi_update(void) | 897 | static int irqtime_account_hi_update(void) |
898 | { | 898 | { |
899 | struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; | 899 | u64 *cpustat = kcpustat_this_cpu->cpustat; |
900 | unsigned long flags; | 900 | unsigned long flags; |
901 | u64 latest_ns; | 901 | u64 latest_ns; |
902 | int ret = 0; | 902 | int ret = 0; |
903 | 903 | ||
904 | local_irq_save(flags); | 904 | local_irq_save(flags); |
905 | latest_ns = this_cpu_read(cpu_hardirq_time); | 905 | latest_ns = this_cpu_read(cpu_hardirq_time); |
906 | if (cputime64_gt(nsecs_to_cputime64(latest_ns), cpustat->irq)) | 906 | if (cputime64_gt(nsecs_to_cputime64(latest_ns), cpustat[CPUTIME_IRQ])) |
907 | ret = 1; | 907 | ret = 1; |
908 | local_irq_restore(flags); | 908 | local_irq_restore(flags); |
909 | return ret; | 909 | return ret; |
@@ -911,14 +911,14 @@ static int irqtime_account_hi_update(void) | |||
911 | 911 | ||
912 | static int irqtime_account_si_update(void) | 912 | static int irqtime_account_si_update(void) |
913 | { | 913 | { |
914 | struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; | 914 | u64 *cpustat = kcpustat_this_cpu->cpustat; |
915 | unsigned long flags; | 915 | unsigned long flags; |
916 | u64 latest_ns; | 916 | u64 latest_ns; |
917 | int ret = 0; | 917 | int ret = 0; |
918 | 918 | ||
919 | local_irq_save(flags); | 919 | local_irq_save(flags); |
920 | latest_ns = this_cpu_read(cpu_softirq_time); | 920 | latest_ns = this_cpu_read(cpu_softirq_time); |
921 | if (cputime64_gt(nsecs_to_cputime64(latest_ns), cpustat->softirq)) | 921 | if (cputime64_gt(nsecs_to_cputime64(latest_ns), cpustat[CPUTIME_SOFTIRQ])) |
922 | ret = 1; | 922 | ret = 1; |
923 | local_irq_restore(flags); | 923 | local_irq_restore(flags); |
924 | return ret; | 924 | return ret; |
@@ -2500,8 +2500,10 @@ unlock: | |||
2500 | #endif | 2500 | #endif |
2501 | 2501 | ||
2502 | DEFINE_PER_CPU(struct kernel_stat, kstat); | 2502 | DEFINE_PER_CPU(struct kernel_stat, kstat); |
2503 | DEFINE_PER_CPU(struct kernel_cpustat, kernel_cpustat); | ||
2503 | 2504 | ||
2504 | EXPORT_PER_CPU_SYMBOL(kstat); | 2505 | EXPORT_PER_CPU_SYMBOL(kstat); |
2506 | EXPORT_PER_CPU_SYMBOL(kernel_cpustat); | ||
2505 | 2507 | ||
2506 | /* | 2508 | /* |
2507 | * Return any ns on the sched_clock that have not yet been accounted in | 2509 | * Return any ns on the sched_clock that have not yet been accounted in |
@@ -2563,8 +2565,9 @@ unsigned long long task_sched_runtime(struct task_struct *p) | |||
2563 | void account_user_time(struct task_struct *p, cputime_t cputime, | 2565 | void account_user_time(struct task_struct *p, cputime_t cputime, |
2564 | cputime_t cputime_scaled) | 2566 | cputime_t cputime_scaled) |
2565 | { | 2567 | { |
2566 | struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; | 2568 | u64 *cpustat = kcpustat_this_cpu->cpustat; |
2567 | cputime64_t tmp; | 2569 | u64 tmp; |
2570 | int index; | ||
2568 | 2571 | ||
2569 | /* Add user time to process. */ | 2572 | /* Add user time to process. */ |
2570 | p->utime = cputime_add(p->utime, cputime); | 2573 | p->utime = cputime_add(p->utime, cputime); |
@@ -2573,10 +2576,9 @@ void account_user_time(struct task_struct *p, cputime_t cputime, | |||
2573 | 2576 | ||
2574 | /* Add user time to cpustat. */ | 2577 | /* Add user time to cpustat. */ |
2575 | tmp = cputime_to_cputime64(cputime); | 2578 | tmp = cputime_to_cputime64(cputime); |
2576 | if (TASK_NICE(p) > 0) | 2579 | |
2577 | cpustat->nice = cputime64_add(cpustat->nice, tmp); | 2580 | index = (TASK_NICE(p) > 0) ? CPUTIME_NICE : CPUTIME_USER; |
2578 | else | 2581 | cpustat[index] += tmp; |
2579 | cpustat->user = cputime64_add(cpustat->user, tmp); | ||
2580 | 2582 | ||
2581 | cpuacct_update_stats(p, CPUACCT_STAT_USER, cputime); | 2583 | cpuacct_update_stats(p, CPUACCT_STAT_USER, cputime); |
2582 | /* Account for user time used */ | 2584 | /* Account for user time used */ |
@@ -2592,8 +2594,8 @@ void account_user_time(struct task_struct *p, cputime_t cputime, | |||
2592 | static void account_guest_time(struct task_struct *p, cputime_t cputime, | 2594 | static void account_guest_time(struct task_struct *p, cputime_t cputime, |
2593 | cputime_t cputime_scaled) | 2595 | cputime_t cputime_scaled) |
2594 | { | 2596 | { |
2595 | cputime64_t tmp; | 2597 | u64 tmp; |
2596 | struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; | 2598 | u64 *cpustat = kcpustat_this_cpu->cpustat; |
2597 | 2599 | ||
2598 | tmp = cputime_to_cputime64(cputime); | 2600 | tmp = cputime_to_cputime64(cputime); |
2599 | 2601 | ||
@@ -2605,11 +2607,11 @@ static void account_guest_time(struct task_struct *p, cputime_t cputime, | |||
2605 | 2607 | ||
2606 | /* Add guest time to cpustat. */ | 2608 | /* Add guest time to cpustat. */ |
2607 | if (TASK_NICE(p) > 0) { | 2609 | if (TASK_NICE(p) > 0) { |
2608 | cpustat->nice = cputime64_add(cpustat->nice, tmp); | 2610 | cpustat[CPUTIME_NICE] += tmp; |
2609 | cpustat->guest_nice = cputime64_add(cpustat->guest_nice, tmp); | 2611 | cpustat[CPUTIME_GUEST_NICE] += tmp; |
2610 | } else { | 2612 | } else { |
2611 | cpustat->user = cputime64_add(cpustat->user, tmp); | 2613 | cpustat[CPUTIME_USER] += tmp; |
2612 | cpustat->guest = cputime64_add(cpustat->guest, tmp); | 2614 | cpustat[CPUTIME_GUEST] += tmp; |
2613 | } | 2615 | } |
2614 | } | 2616 | } |
2615 | 2617 | ||
@@ -2622,9 +2624,10 @@ static void account_guest_time(struct task_struct *p, cputime_t cputime, | |||
2622 | */ | 2624 | */ |
2623 | static inline | 2625 | static inline |
2624 | void __account_system_time(struct task_struct *p, cputime_t cputime, | 2626 | void __account_system_time(struct task_struct *p, cputime_t cputime, |
2625 | cputime_t cputime_scaled, cputime64_t *target_cputime64) | 2627 | cputime_t cputime_scaled, int index) |
2626 | { | 2628 | { |
2627 | cputime64_t tmp = cputime_to_cputime64(cputime); | 2629 | u64 tmp = cputime_to_cputime64(cputime); |
2630 | u64 *cpustat = kcpustat_this_cpu->cpustat; | ||
2628 | 2631 | ||
2629 | /* Add system time to process. */ | 2632 | /* Add system time to process. */ |
2630 | p->stime = cputime_add(p->stime, cputime); | 2633 | p->stime = cputime_add(p->stime, cputime); |
@@ -2632,7 +2635,7 @@ void __account_system_time(struct task_struct *p, cputime_t cputime, | |||
2632 | account_group_system_time(p, cputime); | 2635 | account_group_system_time(p, cputime); |
2633 | 2636 | ||
2634 | /* Add system time to cpustat. */ | 2637 | /* Add system time to cpustat. */ |
2635 | *target_cputime64 = cputime64_add(*target_cputime64, tmp); | 2638 | cpustat[index] += tmp; |
2636 | cpuacct_update_stats(p, CPUACCT_STAT_SYSTEM, cputime); | 2639 | cpuacct_update_stats(p, CPUACCT_STAT_SYSTEM, cputime); |
2637 | 2640 | ||
2638 | /* Account for system time used */ | 2641 | /* Account for system time used */ |
@@ -2649,8 +2652,7 @@ void __account_system_time(struct task_struct *p, cputime_t cputime, | |||
2649 | void account_system_time(struct task_struct *p, int hardirq_offset, | 2652 | void account_system_time(struct task_struct *p, int hardirq_offset, |
2650 | cputime_t cputime, cputime_t cputime_scaled) | 2653 | cputime_t cputime, cputime_t cputime_scaled) |
2651 | { | 2654 | { |
2652 | struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; | 2655 | int index; |
2653 | cputime64_t *target_cputime64; | ||
2654 | 2656 | ||
2655 | if ((p->flags & PF_VCPU) && (irq_count() - hardirq_offset == 0)) { | 2657 | if ((p->flags & PF_VCPU) && (irq_count() - hardirq_offset == 0)) { |
2656 | account_guest_time(p, cputime, cputime_scaled); | 2658 | account_guest_time(p, cputime, cputime_scaled); |
@@ -2658,13 +2660,13 @@ void account_system_time(struct task_struct *p, int hardirq_offset, | |||
2658 | } | 2660 | } |
2659 | 2661 | ||
2660 | if (hardirq_count() - hardirq_offset) | 2662 | if (hardirq_count() - hardirq_offset) |
2661 | target_cputime64 = &cpustat->irq; | 2663 | index = CPUTIME_IRQ; |
2662 | else if (in_serving_softirq()) | 2664 | else if (in_serving_softirq()) |
2663 | target_cputime64 = &cpustat->softirq; | 2665 | index = CPUTIME_SOFTIRQ; |
2664 | else | 2666 | else |
2665 | target_cputime64 = &cpustat->system; | 2667 | index = CPUTIME_SYSTEM; |
2666 | 2668 | ||
2667 | __account_system_time(p, cputime, cputime_scaled, target_cputime64); | 2669 | __account_system_time(p, cputime, cputime_scaled, index); |
2668 | } | 2670 | } |
2669 | 2671 | ||
2670 | /* | 2672 | /* |
@@ -2673,10 +2675,10 @@ void account_system_time(struct task_struct *p, int hardirq_offset, | |||
2673 | */ | 2675 | */ |
2674 | void account_steal_time(cputime_t cputime) | 2676 | void account_steal_time(cputime_t cputime) |
2675 | { | 2677 | { |
2676 | struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; | 2678 | u64 *cpustat = kcpustat_this_cpu->cpustat; |
2677 | cputime64_t cputime64 = cputime_to_cputime64(cputime); | 2679 | u64 cputime64 = cputime_to_cputime64(cputime); |
2678 | 2680 | ||
2679 | cpustat->steal = cputime64_add(cpustat->steal, cputime64); | 2681 | cpustat[CPUTIME_STEAL] += cputime64; |
2680 | } | 2682 | } |
2681 | 2683 | ||
2682 | /* | 2684 | /* |
@@ -2685,14 +2687,14 @@ void account_steal_time(cputime_t cputime) | |||
2685 | */ | 2687 | */ |
2686 | void account_idle_time(cputime_t cputime) | 2688 | void account_idle_time(cputime_t cputime) |
2687 | { | 2689 | { |
2688 | struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; | 2690 | u64 *cpustat = kcpustat_this_cpu->cpustat; |
2689 | cputime64_t cputime64 = cputime_to_cputime64(cputime); | 2691 | u64 cputime64 = cputime_to_cputime64(cputime); |
2690 | struct rq *rq = this_rq(); | 2692 | struct rq *rq = this_rq(); |
2691 | 2693 | ||
2692 | if (atomic_read(&rq->nr_iowait) > 0) | 2694 | if (atomic_read(&rq->nr_iowait) > 0) |
2693 | cpustat->iowait = cputime64_add(cpustat->iowait, cputime64); | 2695 | cpustat[CPUTIME_IOWAIT] += cputime64; |
2694 | else | 2696 | else |
2695 | cpustat->idle = cputime64_add(cpustat->idle, cputime64); | 2697 | cpustat[CPUTIME_IDLE] += cputime64; |
2696 | } | 2698 | } |
2697 | 2699 | ||
2698 | static __always_inline bool steal_account_process_tick(void) | 2700 | static __always_inline bool steal_account_process_tick(void) |
@@ -2742,16 +2744,16 @@ static void irqtime_account_process_tick(struct task_struct *p, int user_tick, | |||
2742 | struct rq *rq) | 2744 | struct rq *rq) |
2743 | { | 2745 | { |
2744 | cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy); | 2746 | cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy); |
2745 | cputime64_t tmp = cputime_to_cputime64(cputime_one_jiffy); | 2747 | u64 tmp = cputime_to_cputime64(cputime_one_jiffy); |
2746 | struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; | 2748 | u64 *cpustat = kcpustat_this_cpu->cpustat; |
2747 | 2749 | ||
2748 | if (steal_account_process_tick()) | 2750 | if (steal_account_process_tick()) |
2749 | return; | 2751 | return; |
2750 | 2752 | ||
2751 | if (irqtime_account_hi_update()) { | 2753 | if (irqtime_account_hi_update()) { |
2752 | cpustat->irq = cputime64_add(cpustat->irq, tmp); | 2754 | cpustat[CPUTIME_IRQ] += tmp; |
2753 | } else if (irqtime_account_si_update()) { | 2755 | } else if (irqtime_account_si_update()) { |
2754 | cpustat->softirq = cputime64_add(cpustat->softirq, tmp); | 2756 | cpustat[CPUTIME_SOFTIRQ] += tmp; |
2755 | } else if (this_cpu_ksoftirqd() == p) { | 2757 | } else if (this_cpu_ksoftirqd() == p) { |
2756 | /* | 2758 | /* |
2757 | * ksoftirqd time do not get accounted in cpu_softirq_time. | 2759 | * ksoftirqd time do not get accounted in cpu_softirq_time. |
@@ -2759,7 +2761,7 @@ static void irqtime_account_process_tick(struct task_struct *p, int user_tick, | |||
2759 | * Also, p->stime needs to be updated for ksoftirqd. | 2761 | * Also, p->stime needs to be updated for ksoftirqd. |
2760 | */ | 2762 | */ |
2761 | __account_system_time(p, cputime_one_jiffy, one_jiffy_scaled, | 2763 | __account_system_time(p, cputime_one_jiffy, one_jiffy_scaled, |
2762 | &cpustat->softirq); | 2764 | CPUTIME_SOFTIRQ); |
2763 | } else if (user_tick) { | 2765 | } else if (user_tick) { |
2764 | account_user_time(p, cputime_one_jiffy, one_jiffy_scaled); | 2766 | account_user_time(p, cputime_one_jiffy, one_jiffy_scaled); |
2765 | } else if (p == rq->idle) { | 2767 | } else if (p == rq->idle) { |
@@ -2768,7 +2770,7 @@ static void irqtime_account_process_tick(struct task_struct *p, int user_tick, | |||
2768 | account_guest_time(p, cputime_one_jiffy, one_jiffy_scaled); | 2770 | account_guest_time(p, cputime_one_jiffy, one_jiffy_scaled); |
2769 | } else { | 2771 | } else { |
2770 | __account_system_time(p, cputime_one_jiffy, one_jiffy_scaled, | 2772 | __account_system_time(p, cputime_one_jiffy, one_jiffy_scaled, |
2771 | &cpustat->system); | 2773 | CPUTIME_SYSTEM); |
2772 | } | 2774 | } |
2773 | } | 2775 | } |
2774 | 2776 | ||