aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/kernel/cpu/cpufreq/powernow-k8.c28
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c47
2 files changed, 47 insertions, 28 deletions
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
index 5c28b37dea11..fb039cd345d8 100644
--- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
+++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
@@ -939,10 +939,25 @@ static void powernow_k8_cpu_exit_acpi(struct powernow_k8_data *data)
939 free_cpumask_var(data->acpi_data.shared_cpu_map); 939 free_cpumask_var(data->acpi_data.shared_cpu_map);
940} 940}
941 941
942static int get_transition_latency(struct powernow_k8_data *data)
943{
944 int max_latency = 0;
945 int i;
946 for (i = 0; i < data->acpi_data.state_count; i++) {
947 int cur_latency = data->acpi_data.states[i].transition_latency
948 + data->acpi_data.states[i].bus_master_latency;
949 if (cur_latency > max_latency)
950 max_latency = cur_latency;
951 }
952 /* value in usecs, needs to be in nanoseconds */
953 return 1000 * max_latency;
954}
955
942#else 956#else
943static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data) { return -ENODEV; } 957static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data) { return -ENODEV; }
944static void powernow_k8_cpu_exit_acpi(struct powernow_k8_data *data) { return; } 958static void powernow_k8_cpu_exit_acpi(struct powernow_k8_data *data) { return; }
945static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, unsigned int index) { return; } 959static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, unsigned int index) { return; }
960static int get_transition_latency(struct powernow_k8_data *data) { return 0; }
946#endif /* CONFIG_X86_POWERNOW_K8_ACPI */ 961#endif /* CONFIG_X86_POWERNOW_K8_ACPI */
947 962
948/* Take a frequency, and issue the fid/vid transition command */ 963/* Take a frequency, and issue the fid/vid transition command */
@@ -1173,7 +1188,13 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
1173 if (rc) { 1188 if (rc) {
1174 goto err_out; 1189 goto err_out;
1175 } 1190 }
1176 } 1191 /* Take a crude guess here.
1192 * That guess was in microseconds, so multiply with 1000 */
1193 pol->cpuinfo.transition_latency = (
1194 ((data->rvo + 8) * data->vstable * VST_UNITS_20US) +
1195 ((1 << data->irt) * 30)) * 1000;
1196 } else /* ACPI _PSS objects available */
1197 pol->cpuinfo.transition_latency = get_transition_latency(data);
1177 1198
1178 /* only run on specific CPU from here on */ 1199 /* only run on specific CPU from here on */
1179 oldmask = current->cpus_allowed; 1200 oldmask = current->cpus_allowed;
@@ -1204,11 +1225,6 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
1204 cpumask_copy(pol->cpus, &per_cpu(cpu_core_map, pol->cpu)); 1225 cpumask_copy(pol->cpus, &per_cpu(cpu_core_map, pol->cpu));
1205 data->available_cores = pol->cpus; 1226 data->available_cores = pol->cpus;
1206 1227
1207 /* Take a crude guess here.
1208 * That guess was in microseconds, so multiply with 1000 */
1209 pol->cpuinfo.transition_latency = (((data->rvo + 8) * data->vstable * VST_UNITS_20US)
1210 + (3 * (1 << data->irt) * 10)) * 1000;
1211
1212 if (cpu_family == CPU_HW_PSTATE) 1228 if (cpu_family == CPU_HW_PSTATE)
1213 pol->cur = find_khz_freq_from_pstate(data->powernow_table, data->currpstate); 1229 pol->cur = find_khz_freq_from_pstate(data->powernow_table, data->currpstate);
1214 else 1230 else
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index 6a2b036c9389..6f45b1658a67 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -117,11 +117,7 @@ static inline cputime64_t get_cpu_idle_time_jiffy(unsigned int cpu,
117 busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.irq); 117 busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.irq);
118 busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.softirq); 118 busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.softirq);
119 busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.steal); 119 busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.steal);
120 120 busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.nice);
121 if (!dbs_tuners_ins.ignore_nice) {
122 busy_time = cputime64_add(busy_time,
123 kstat_cpu(cpu).cpustat.nice);
124 }
125 121
126 idle_time = cputime64_sub(cur_wall_time, busy_time); 122 idle_time = cputime64_sub(cur_wall_time, busy_time);
127 if (wall) 123 if (wall)
@@ -137,23 +133,6 @@ static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall)
137 if (idle_time == -1ULL) 133 if (idle_time == -1ULL)
138 return get_cpu_idle_time_jiffy(cpu, wall); 134 return get_cpu_idle_time_jiffy(cpu, wall);
139 135
140 if (dbs_tuners_ins.ignore_nice) {
141 cputime64_t cur_nice;
142 unsigned long cur_nice_jiffies;
143 struct cpu_dbs_info_s *dbs_info;
144
145 dbs_info = &per_cpu(cpu_dbs_info, cpu);
146 cur_nice = cputime64_sub(kstat_cpu(cpu).cpustat.nice,
147 dbs_info->prev_cpu_nice);
148 /*
149 * Assumption: nice time between sampling periods will be
150 * less than 2^32 jiffies for 32 bit sys
151 */
152 cur_nice_jiffies = (unsigned long)
153 cputime64_to_jiffies64(cur_nice);
154 dbs_info->prev_cpu_nice = kstat_cpu(cpu).cpustat.nice;
155 return idle_time + jiffies_to_usecs(cur_nice_jiffies);
156 }
157 return idle_time; 136 return idle_time;
158} 137}
159 138
@@ -319,6 +298,9 @@ static ssize_t store_ignore_nice_load(struct cpufreq_policy *policy,
319 dbs_info = &per_cpu(cpu_dbs_info, j); 298 dbs_info = &per_cpu(cpu_dbs_info, j);
320 dbs_info->prev_cpu_idle = get_cpu_idle_time(j, 299 dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
321 &dbs_info->prev_cpu_wall); 300 &dbs_info->prev_cpu_wall);
301 if (dbs_tuners_ins.ignore_nice)
302 dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice;
303
322 } 304 }
323 mutex_unlock(&dbs_mutex); 305 mutex_unlock(&dbs_mutex);
324 306
@@ -419,6 +401,23 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
419 j_dbs_info->prev_cpu_idle); 401 j_dbs_info->prev_cpu_idle);
420 j_dbs_info->prev_cpu_idle = cur_idle_time; 402 j_dbs_info->prev_cpu_idle = cur_idle_time;
421 403
404 if (dbs_tuners_ins.ignore_nice) {
405 cputime64_t cur_nice;
406 unsigned long cur_nice_jiffies;
407
408 cur_nice = cputime64_sub(kstat_cpu(j).cpustat.nice,
409 j_dbs_info->prev_cpu_nice);
410 /*
411 * Assumption: nice time between sampling periods will
412 * be less than 2^32 jiffies for 32 bit sys
413 */
414 cur_nice_jiffies = (unsigned long)
415 cputime64_to_jiffies64(cur_nice);
416
417 j_dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice;
418 idle_time += jiffies_to_usecs(cur_nice_jiffies);
419 }
420
422 if (unlikely(!wall_time || wall_time < idle_time)) 421 if (unlikely(!wall_time || wall_time < idle_time))
423 continue; 422 continue;
424 423
@@ -575,6 +574,10 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
575 574
576 j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j, 575 j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
577 &j_dbs_info->prev_cpu_wall); 576 &j_dbs_info->prev_cpu_wall);
577 if (dbs_tuners_ins.ignore_nice) {
578 j_dbs_info->prev_cpu_nice =
579 kstat_cpu(j).cpustat.nice;
580 }
578 } 581 }
579 this_dbs_info->cpu = cpu; 582 this_dbs_info->cpu = cpu;
580 /* 583 /*