diff options
author | Martin Schwidefsky <schwidefsky@de.ibm.com> | 2011-12-15 08:56:09 -0500 |
---|---|---|
committer | Martin Schwidefsky <schwidefsky@de.ibm.com> | 2011-12-15 08:56:19 -0500 |
commit | 648616343cdbe904c585a6c12e323d3b3c72e46f (patch) | |
tree | 514bce1b52663db4ab5662b637c764cf3c2ed1eb /drivers/cpufreq/cpufreq_conservative.c | |
parent | 55b02d2f4445ad625213817a1736bf2884d32547 (diff) |
[S390] cputime: add sparse checking and cleanup
Make cputime_t and cputime64_t nocast to enable sparse checking to
detect incorrect use of cputime. Drop the cputime macros for simple
scalar operations. The conversion macros are still needed.
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'drivers/cpufreq/cpufreq_conservative.c')
-rw-r--r-- | drivers/cpufreq/cpufreq_conservative.c | 29 |
1 files changed, 14 insertions, 15 deletions
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c index c97b468ee9f7..7f31a031c0b5 100644 --- a/drivers/cpufreq/cpufreq_conservative.c +++ b/drivers/cpufreq/cpufreq_conservative.c | |||
@@ -103,15 +103,14 @@ static inline cputime64_t get_cpu_idle_time_jiffy(unsigned int cpu, | |||
103 | cputime64_t busy_time; | 103 | cputime64_t busy_time; |
104 | 104 | ||
105 | cur_wall_time = jiffies64_to_cputime64(get_jiffies_64()); | 105 | cur_wall_time = jiffies64_to_cputime64(get_jiffies_64()); |
106 | busy_time = cputime64_add(kstat_cpu(cpu).cpustat.user, | 106 | busy_time = kstat_cpu(cpu).cpustat.user; |
107 | kstat_cpu(cpu).cpustat.system); | 107 | busy_time += kstat_cpu(cpu).cpustat.system; |
108 | 108 | busy_time += kstat_cpu(cpu).cpustat.irq; | |
109 | busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.irq); | 109 | busy_time += kstat_cpu(cpu).cpustat.softirq; |
110 | busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.softirq); | 110 | busy_time += kstat_cpu(cpu).cpustat.steal; |
111 | busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.steal); | 111 | busy_time += kstat_cpu(cpu).cpustat.nice; |
112 | busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.nice); | 112 | |
113 | 113 | idle_time = cur_wall_time - busy_time; | |
114 | idle_time = cputime64_sub(cur_wall_time, busy_time); | ||
115 | if (wall) | 114 | if (wall) |
116 | *wall = (cputime64_t)jiffies_to_usecs(cur_wall_time); | 115 | *wall = (cputime64_t)jiffies_to_usecs(cur_wall_time); |
117 | 116 | ||
@@ -353,20 +352,20 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) | |||
353 | 352 | ||
354 | cur_idle_time = get_cpu_idle_time(j, &cur_wall_time); | 353 | cur_idle_time = get_cpu_idle_time(j, &cur_wall_time); |
355 | 354 | ||
356 | wall_time = (unsigned int) cputime64_sub(cur_wall_time, | 355 | wall_time = (unsigned int) |
357 | j_dbs_info->prev_cpu_wall); | 356 | (cur_wall_time - j_dbs_info->prev_cpu_wall); |
358 | j_dbs_info->prev_cpu_wall = cur_wall_time; | 357 | j_dbs_info->prev_cpu_wall = cur_wall_time; |
359 | 358 | ||
360 | idle_time = (unsigned int) cputime64_sub(cur_idle_time, | 359 | idle_time = (unsigned int) |
361 | j_dbs_info->prev_cpu_idle); | 360 | (cur_idle_time - j_dbs_info->prev_cpu_idle); |
362 | j_dbs_info->prev_cpu_idle = cur_idle_time; | 361 | j_dbs_info->prev_cpu_idle = cur_idle_time; |
363 | 362 | ||
364 | if (dbs_tuners_ins.ignore_nice) { | 363 | if (dbs_tuners_ins.ignore_nice) { |
365 | cputime64_t cur_nice; | 364 | cputime64_t cur_nice; |
366 | unsigned long cur_nice_jiffies; | 365 | unsigned long cur_nice_jiffies; |
367 | 366 | ||
368 | cur_nice = cputime64_sub(kstat_cpu(j).cpustat.nice, | 367 | cur_nice = kstat_cpu(j).cpustat.nice - |
369 | j_dbs_info->prev_cpu_nice); | 368 | j_dbs_info->prev_cpu_nice; |
370 | /* | 369 | /* |
371 | * Assumption: nice time between sampling periods will | 370 | * Assumption: nice time between sampling periods will |
372 | * be less than 2^32 jiffies for 32 bit sys | 371 | * be less than 2^32 jiffies for 32 bit sys |