aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c50
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c54
-rw-r--r--drivers/cpufreq/cpufreq_stats.c5
-rw-r--r--drivers/macintosh/rack-meter.c14
4 files changed, 58 insertions, 65 deletions
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index c97b468ee9f7..235a340e81f2 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -95,27 +95,26 @@ static struct dbs_tuners {
95 .freq_step = 5, 95 .freq_step = 5,
96}; 96};
97 97
98static inline cputime64_t get_cpu_idle_time_jiffy(unsigned int cpu, 98static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
99 cputime64_t *wall)
100{ 99{
101 cputime64_t idle_time; 100 u64 idle_time;
102 cputime64_t cur_wall_time; 101 u64 cur_wall_time;
103 cputime64_t busy_time; 102 u64 busy_time;
104 103
105 cur_wall_time = jiffies64_to_cputime64(get_jiffies_64()); 104 cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
106 busy_time = cputime64_add(kstat_cpu(cpu).cpustat.user,
107 kstat_cpu(cpu).cpustat.system);
108 105
109 busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.irq); 106 busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
110 busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.softirq); 107 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
111 busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.steal); 108 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
112 busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.nice); 109 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
110 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
111 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
113 112
114 idle_time = cputime64_sub(cur_wall_time, busy_time); 113 idle_time = cur_wall_time - busy_time;
115 if (wall) 114 if (wall)
116 *wall = (cputime64_t)jiffies_to_usecs(cur_wall_time); 115 *wall = jiffies_to_usecs(cur_wall_time);
117 116
118 return (cputime64_t)jiffies_to_usecs(idle_time); 117 return jiffies_to_usecs(idle_time);
119} 118}
120 119
121static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall) 120static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall)
@@ -272,7 +271,7 @@ static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b,
272 dbs_info->prev_cpu_idle = get_cpu_idle_time(j, 271 dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
273 &dbs_info->prev_cpu_wall); 272 &dbs_info->prev_cpu_wall);
274 if (dbs_tuners_ins.ignore_nice) 273 if (dbs_tuners_ins.ignore_nice)
275 dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice; 274 dbs_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
276 } 275 }
277 return count; 276 return count;
278} 277}
@@ -353,20 +352,20 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
353 352
354 cur_idle_time = get_cpu_idle_time(j, &cur_wall_time); 353 cur_idle_time = get_cpu_idle_time(j, &cur_wall_time);
355 354
356 wall_time = (unsigned int) cputime64_sub(cur_wall_time, 355 wall_time = (unsigned int)
357 j_dbs_info->prev_cpu_wall); 356 (cur_wall_time - j_dbs_info->prev_cpu_wall);
358 j_dbs_info->prev_cpu_wall = cur_wall_time; 357 j_dbs_info->prev_cpu_wall = cur_wall_time;
359 358
360 idle_time = (unsigned int) cputime64_sub(cur_idle_time, 359 idle_time = (unsigned int)
361 j_dbs_info->prev_cpu_idle); 360 (cur_idle_time - j_dbs_info->prev_cpu_idle);
362 j_dbs_info->prev_cpu_idle = cur_idle_time; 361 j_dbs_info->prev_cpu_idle = cur_idle_time;
363 362
364 if (dbs_tuners_ins.ignore_nice) { 363 if (dbs_tuners_ins.ignore_nice) {
365 cputime64_t cur_nice; 364 u64 cur_nice;
366 unsigned long cur_nice_jiffies; 365 unsigned long cur_nice_jiffies;
367 366
368 cur_nice = cputime64_sub(kstat_cpu(j).cpustat.nice, 367 cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE] -
369 j_dbs_info->prev_cpu_nice); 368 j_dbs_info->prev_cpu_nice;
370 /* 369 /*
371 * Assumption: nice time between sampling periods will 370 * Assumption: nice time between sampling periods will
372 * be less than 2^32 jiffies for 32 bit sys 371 * be less than 2^32 jiffies for 32 bit sys
@@ -374,7 +373,7 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
374 cur_nice_jiffies = (unsigned long) 373 cur_nice_jiffies = (unsigned long)
375 cputime64_to_jiffies64(cur_nice); 374 cputime64_to_jiffies64(cur_nice);
376 375
377 j_dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice; 376 j_dbs_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
378 idle_time += jiffies_to_usecs(cur_nice_jiffies); 377 idle_time += jiffies_to_usecs(cur_nice_jiffies);
379 } 378 }
380 379
@@ -501,10 +500,9 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
501 500
502 j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j, 501 j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
503 &j_dbs_info->prev_cpu_wall); 502 &j_dbs_info->prev_cpu_wall);
504 if (dbs_tuners_ins.ignore_nice) { 503 if (dbs_tuners_ins.ignore_nice)
505 j_dbs_info->prev_cpu_nice = 504 j_dbs_info->prev_cpu_nice =
506 kstat_cpu(j).cpustat.nice; 505 kcpustat_cpu(j).cpustat[CPUTIME_NICE];
507 }
508 } 506 }
509 this_dbs_info->down_skip = 0; 507 this_dbs_info->down_skip = 0;
510 this_dbs_info->requested_freq = policy->cur; 508 this_dbs_info->requested_freq = policy->cur;
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index fa8af4ebb1d6..3d679eee70a1 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -119,27 +119,26 @@ static struct dbs_tuners {
119 .powersave_bias = 0, 119 .powersave_bias = 0,
120}; 120};
121 121
122static inline cputime64_t get_cpu_idle_time_jiffy(unsigned int cpu, 122static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
123 cputime64_t *wall)
124{ 123{
125 cputime64_t idle_time; 124 u64 idle_time;
126 cputime64_t cur_wall_time; 125 u64 cur_wall_time;
127 cputime64_t busy_time; 126 u64 busy_time;
128 127
129 cur_wall_time = jiffies64_to_cputime64(get_jiffies_64()); 128 cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
130 busy_time = cputime64_add(kstat_cpu(cpu).cpustat.user,
131 kstat_cpu(cpu).cpustat.system);
132 129
133 busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.irq); 130 busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
134 busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.softirq); 131 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
135 busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.steal); 132 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
136 busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.nice); 133 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
134 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
135 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
137 136
138 idle_time = cputime64_sub(cur_wall_time, busy_time); 137 idle_time = cur_wall_time - busy_time;
139 if (wall) 138 if (wall)
140 *wall = (cputime64_t)jiffies_to_usecs(cur_wall_time); 139 *wall = jiffies_to_usecs(cur_wall_time);
141 140
142 return (cputime64_t)jiffies_to_usecs(idle_time); 141 return jiffies_to_usecs(idle_time);
143} 142}
144 143
145static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall) 144static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall)
@@ -345,7 +344,7 @@ static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b,
345 dbs_info->prev_cpu_idle = get_cpu_idle_time(j, 344 dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
346 &dbs_info->prev_cpu_wall); 345 &dbs_info->prev_cpu_wall);
347 if (dbs_tuners_ins.ignore_nice) 346 if (dbs_tuners_ins.ignore_nice)
348 dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice; 347 dbs_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
349 348
350 } 349 }
351 return count; 350 return count;
@@ -442,24 +441,24 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
442 cur_idle_time = get_cpu_idle_time(j, &cur_wall_time); 441 cur_idle_time = get_cpu_idle_time(j, &cur_wall_time);
443 cur_iowait_time = get_cpu_iowait_time(j, &cur_wall_time); 442 cur_iowait_time = get_cpu_iowait_time(j, &cur_wall_time);
444 443
445 wall_time = (unsigned int) cputime64_sub(cur_wall_time, 444 wall_time = (unsigned int)
446 j_dbs_info->prev_cpu_wall); 445 (cur_wall_time - j_dbs_info->prev_cpu_wall);
447 j_dbs_info->prev_cpu_wall = cur_wall_time; 446 j_dbs_info->prev_cpu_wall = cur_wall_time;
448 447
449 idle_time = (unsigned int) cputime64_sub(cur_idle_time, 448 idle_time = (unsigned int)
450 j_dbs_info->prev_cpu_idle); 449 (cur_idle_time - j_dbs_info->prev_cpu_idle);
451 j_dbs_info->prev_cpu_idle = cur_idle_time; 450 j_dbs_info->prev_cpu_idle = cur_idle_time;
452 451
453 iowait_time = (unsigned int) cputime64_sub(cur_iowait_time, 452 iowait_time = (unsigned int)
454 j_dbs_info->prev_cpu_iowait); 453 (cur_iowait_time - j_dbs_info->prev_cpu_iowait);
455 j_dbs_info->prev_cpu_iowait = cur_iowait_time; 454 j_dbs_info->prev_cpu_iowait = cur_iowait_time;
456 455
457 if (dbs_tuners_ins.ignore_nice) { 456 if (dbs_tuners_ins.ignore_nice) {
458 cputime64_t cur_nice; 457 u64 cur_nice;
459 unsigned long cur_nice_jiffies; 458 unsigned long cur_nice_jiffies;
460 459
461 cur_nice = cputime64_sub(kstat_cpu(j).cpustat.nice, 460 cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE] -
462 j_dbs_info->prev_cpu_nice); 461 j_dbs_info->prev_cpu_nice;
463 /* 462 /*
464 * Assumption: nice time between sampling periods will 463 * Assumption: nice time between sampling periods will
465 * be less than 2^32 jiffies for 32 bit sys 464 * be less than 2^32 jiffies for 32 bit sys
@@ -467,7 +466,7 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
467 cur_nice_jiffies = (unsigned long) 466 cur_nice_jiffies = (unsigned long)
468 cputime64_to_jiffies64(cur_nice); 467 cputime64_to_jiffies64(cur_nice);
469 468
470 j_dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice; 469 j_dbs_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
471 idle_time += jiffies_to_usecs(cur_nice_jiffies); 470 idle_time += jiffies_to_usecs(cur_nice_jiffies);
472 } 471 }
473 472
@@ -646,10 +645,9 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
646 645
647 j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j, 646 j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
648 &j_dbs_info->prev_cpu_wall); 647 &j_dbs_info->prev_cpu_wall);
649 if (dbs_tuners_ins.ignore_nice) { 648 if (dbs_tuners_ins.ignore_nice)
650 j_dbs_info->prev_cpu_nice = 649 j_dbs_info->prev_cpu_nice =
651 kstat_cpu(j).cpustat.nice; 650 kcpustat_cpu(j).cpustat[CPUTIME_NICE];
652 }
653 } 651 }
654 this_dbs_info->cpu = cpu; 652 this_dbs_info->cpu = cpu;
655 this_dbs_info->rate_mult = 1; 653 this_dbs_info->rate_mult = 1;
diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
index c5072a91e848..2a508edd768b 100644
--- a/drivers/cpufreq/cpufreq_stats.c
+++ b/drivers/cpufreq/cpufreq_stats.c
@@ -61,9 +61,8 @@ static int cpufreq_stats_update(unsigned int cpu)
61 spin_lock(&cpufreq_stats_lock); 61 spin_lock(&cpufreq_stats_lock);
62 stat = per_cpu(cpufreq_stats_table, cpu); 62 stat = per_cpu(cpufreq_stats_table, cpu);
63 if (stat->time_in_state) 63 if (stat->time_in_state)
64 stat->time_in_state[stat->last_index] = 64 stat->time_in_state[stat->last_index] +=
65 cputime64_add(stat->time_in_state[stat->last_index], 65 cur_time - stat->last_time;
66 cputime_sub(cur_time, stat->last_time));
67 stat->last_time = cur_time; 66 stat->last_time = cur_time;
68 spin_unlock(&cpufreq_stats_lock); 67 spin_unlock(&cpufreq_stats_lock);
69 return 0; 68 return 0;
diff --git a/drivers/macintosh/rack-meter.c b/drivers/macintosh/rack-meter.c
index 2637c139777b..6dc26b61219b 100644
--- a/drivers/macintosh/rack-meter.c
+++ b/drivers/macintosh/rack-meter.c
@@ -81,13 +81,13 @@ static int rackmeter_ignore_nice;
81 */ 81 */
82static inline cputime64_t get_cpu_idle_time(unsigned int cpu) 82static inline cputime64_t get_cpu_idle_time(unsigned int cpu)
83{ 83{
84 cputime64_t retval; 84 u64 retval;
85 85
86 retval = cputime64_add(kstat_cpu(cpu).cpustat.idle, 86 retval = kcpustat_cpu(cpu).cpustat[CPUTIME_IDLE] +
87 kstat_cpu(cpu).cpustat.iowait); 87 kcpustat_cpu(cpu).cpustat[CPUTIME_IOWAIT];
88 88
89 if (rackmeter_ignore_nice) 89 if (rackmeter_ignore_nice)
90 retval = cputime64_add(retval, kstat_cpu(cpu).cpustat.nice); 90 retval += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
91 91
92 return retval; 92 return retval;
93} 93}
@@ -220,13 +220,11 @@ static void rackmeter_do_timer(struct work_struct *work)
220 int i, offset, load, cumm, pause; 220 int i, offset, load, cumm, pause;
221 221
222 cur_jiffies = jiffies64_to_cputime64(get_jiffies_64()); 222 cur_jiffies = jiffies64_to_cputime64(get_jiffies_64());
223 total_ticks = (unsigned int)cputime64_sub(cur_jiffies, 223 total_ticks = (unsigned int) (cur_jiffies - rcpu->prev_wall);
224 rcpu->prev_wall);
225 rcpu->prev_wall = cur_jiffies; 224 rcpu->prev_wall = cur_jiffies;
226 225
227 total_idle_ticks = get_cpu_idle_time(cpu); 226 total_idle_ticks = get_cpu_idle_time(cpu);
228 idle_ticks = (unsigned int) cputime64_sub(total_idle_ticks, 227 idle_ticks = (unsigned int) (total_idle_ticks - rcpu->prev_idle);
229 rcpu->prev_idle);
230 rcpu->prev_idle = total_idle_ticks; 228 rcpu->prev_idle = total_idle_ticks;
231 229
232 /* We do a very dumb calculation to update the LEDs for now, 230 /* We do a very dumb calculation to update the LEDs for now,