aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRafael J. Wysocki <rafael.j.wysocki@intel.com>2013-08-07 17:11:43 -0400
committerRafael J. Wysocki <rafael.j.wysocki@intel.com>2013-08-07 17:11:43 -0400
commit1133bfa6dcf034639486982629d29472737d5e96 (patch)
tree8ab074d3dc96defa0e4b2032ecfc9cf38b86caef
parentd8d3b4711297e101bbad826474013edbe342c333 (diff)
parentcffe4e0e7413eb29fb8bd035c8b12b33a4b8522a (diff)
Merge branch 'pm-cpufreq-ondemand' into pm-cpufreq
* pm-cpufreq: cpufreq: Remove unused function __cpufreq_driver_getavg() cpufreq: Remove unused APERF/MPERF support cpufreq: ondemand: Change the calculation of target frequency
-rw-r--r--arch/x86/include/asm/processor.h29
-rw-r--r--drivers/cpufreq/Makefile2
-rw-r--r--drivers/cpufreq/acpi-cpufreq.c5
-rw-r--r--drivers/cpufreq/cpufreq.c12
-rw-r--r--drivers/cpufreq/cpufreq_governor.c10
-rw-r--r--drivers/cpufreq/cpufreq_governor.h1
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c39
-rw-r--r--drivers/cpufreq/mperf.c51
-rw-r--r--drivers/cpufreq/mperf.h9
-rw-r--r--include/linux/cpufreq.h6
10 files changed, 9 insertions, 155 deletions
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 24cf5aefb704..4f4a3d98c170 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -942,35 +942,6 @@ extern int set_tsc_mode(unsigned int val);
942 942
943extern u16 amd_get_nb_id(int cpu); 943extern u16 amd_get_nb_id(int cpu);
944 944
945struct aperfmperf {
946 u64 aperf, mperf;
947};
948
949static inline void get_aperfmperf(struct aperfmperf *am)
950{
951 WARN_ON_ONCE(!boot_cpu_has(X86_FEATURE_APERFMPERF));
952
953 rdmsrl(MSR_IA32_APERF, am->aperf);
954 rdmsrl(MSR_IA32_MPERF, am->mperf);
955}
956
957#define APERFMPERF_SHIFT 10
958
959static inline
960unsigned long calc_aperfmperf_ratio(struct aperfmperf *old,
961 struct aperfmperf *new)
962{
963 u64 aperf = new->aperf - old->aperf;
964 u64 mperf = new->mperf - old->mperf;
965 unsigned long ratio = aperf;
966
967 mperf >>= APERFMPERF_SHIFT;
968 if (mperf)
969 ratio = div64_u64(aperf, mperf);
970
971 return ratio;
972}
973
974extern unsigned long arch_align_stack(unsigned long sp); 945extern unsigned long arch_align_stack(unsigned long sp);
975extern void free_init_pages(char *what, unsigned long begin, unsigned long end); 946extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
976 947
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index d345b5a7aa71..ad5866c2ada0 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -23,7 +23,7 @@ obj-$(CONFIG_GENERIC_CPUFREQ_CPU0) += cpufreq-cpu0.o
23# powernow-k8 can load then. ACPI is preferred to all other hardware-specific drivers. 23# powernow-k8 can load then. ACPI is preferred to all other hardware-specific drivers.
24# speedstep-* is preferred over p4-clockmod. 24# speedstep-* is preferred over p4-clockmod.
25 25
26obj-$(CONFIG_X86_ACPI_CPUFREQ) += acpi-cpufreq.o mperf.o 26obj-$(CONFIG_X86_ACPI_CPUFREQ) += acpi-cpufreq.o
27obj-$(CONFIG_X86_POWERNOW_K8) += powernow-k8.o 27obj-$(CONFIG_X86_POWERNOW_K8) += powernow-k8.o
28obj-$(CONFIG_X86_PCC_CPUFREQ) += pcc-cpufreq.o 28obj-$(CONFIG_X86_PCC_CPUFREQ) += pcc-cpufreq.o
29obj-$(CONFIG_X86_POWERNOW_K6) += powernow-k6.o 29obj-$(CONFIG_X86_POWERNOW_K6) += powernow-k6.o
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
index 39264020b88a..e673670d2321 100644
--- a/drivers/cpufreq/acpi-cpufreq.c
+++ b/drivers/cpufreq/acpi-cpufreq.c
@@ -45,7 +45,6 @@
45#include <asm/msr.h> 45#include <asm/msr.h>
46#include <asm/processor.h> 46#include <asm/processor.h>
47#include <asm/cpufeature.h> 47#include <asm/cpufeature.h>
48#include "mperf.h"
49 48
50MODULE_AUTHOR("Paul Diefenbaugh, Dominik Brodowski"); 49MODULE_AUTHOR("Paul Diefenbaugh, Dominik Brodowski");
51MODULE_DESCRIPTION("ACPI Processor P-States Driver"); 50MODULE_DESCRIPTION("ACPI Processor P-States Driver");
@@ -861,10 +860,6 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
861 /* notify BIOS that we exist */ 860 /* notify BIOS that we exist */
862 acpi_processor_notify_smm(THIS_MODULE); 861 acpi_processor_notify_smm(THIS_MODULE);
863 862
864 /* Check for APERF/MPERF support in hardware */
865 if (boot_cpu_has(X86_FEATURE_APERFMPERF))
866 acpi_cpufreq_driver.getavg = cpufreq_get_measured_perf;
867
868 pr_debug("CPU%u - ACPI performance management activated.\n", cpu); 863 pr_debug("CPU%u - ACPI performance management activated.\n", cpu);
869 for (i = 0; i < perf->state_count; i++) 864 for (i = 0; i < perf->state_count; i++)
870 pr_debug(" %cP%d: %d MHz, %d mW, %d uS\n", 865 pr_debug(" %cP%d: %d MHz, %d mW, %d uS\n",
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 1faf320a5038..e34bd94e12b4 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -1670,18 +1670,6 @@ fail:
1670} 1670}
1671EXPORT_SYMBOL_GPL(cpufreq_driver_target); 1671EXPORT_SYMBOL_GPL(cpufreq_driver_target);
1672 1672
1673int __cpufreq_driver_getavg(struct cpufreq_policy *policy, unsigned int cpu)
1674{
1675 if (cpufreq_disabled())
1676 return 0;
1677
1678 if (!cpufreq_driver->getavg)
1679 return 0;
1680
1681 return cpufreq_driver->getavg(policy, cpu);
1682}
1683EXPORT_SYMBOL_GPL(__cpufreq_driver_getavg);
1684
1685/* 1673/*
1686 * when "event" is CPUFREQ_GOV_LIMITS 1674 * when "event" is CPUFREQ_GOV_LIMITS
1687 */ 1675 */
diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
index 7b839a8db2a7..7409dbd1d897 100644
--- a/drivers/cpufreq/cpufreq_governor.c
+++ b/drivers/cpufreq/cpufreq_governor.c
@@ -53,7 +53,7 @@ void dbs_check_cpu(struct dbs_data *dbs_data, int cpu)
53 53
54 policy = cdbs->cur_policy; 54 policy = cdbs->cur_policy;
55 55
56 /* Get Absolute Load (in terms of freq for ondemand gov) */ 56 /* Get Absolute Load */
57 for_each_cpu(j, policy->cpus) { 57 for_each_cpu(j, policy->cpus) {
58 struct cpu_dbs_common_info *j_cdbs; 58 struct cpu_dbs_common_info *j_cdbs;
59 u64 cur_wall_time, cur_idle_time; 59 u64 cur_wall_time, cur_idle_time;
@@ -104,14 +104,6 @@ void dbs_check_cpu(struct dbs_data *dbs_data, int cpu)
104 104
105 load = 100 * (wall_time - idle_time) / wall_time; 105 load = 100 * (wall_time - idle_time) / wall_time;
106 106
107 if (dbs_data->cdata->governor == GOV_ONDEMAND) {
108 int freq_avg = __cpufreq_driver_getavg(policy, j);
109 if (freq_avg <= 0)
110 freq_avg = policy->cur;
111
112 load *= freq_avg;
113 }
114
115 if (load > max_load) 107 if (load > max_load)
116 max_load = load; 108 max_load = load;
117 } 109 }
diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h
index 6663ec3b3056..0e0dd4c82020 100644
--- a/drivers/cpufreq/cpufreq_governor.h
+++ b/drivers/cpufreq/cpufreq_governor.h
@@ -169,7 +169,6 @@ struct od_dbs_tuners {
169 unsigned int sampling_rate; 169 unsigned int sampling_rate;
170 unsigned int sampling_down_factor; 170 unsigned int sampling_down_factor;
171 unsigned int up_threshold; 171 unsigned int up_threshold;
172 unsigned int adj_up_threshold;
173 unsigned int powersave_bias; 172 unsigned int powersave_bias;
174 unsigned int io_is_busy; 173 unsigned int io_is_busy;
175}; 174};
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index 93eb5cbcc1f6..a3c5574f9b3a 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -29,11 +29,9 @@
29#include "cpufreq_governor.h" 29#include "cpufreq_governor.h"
30 30
31/* On-demand governor macros */ 31/* On-demand governor macros */
32#define DEF_FREQUENCY_DOWN_DIFFERENTIAL (10)
33#define DEF_FREQUENCY_UP_THRESHOLD (80) 32#define DEF_FREQUENCY_UP_THRESHOLD (80)
34#define DEF_SAMPLING_DOWN_FACTOR (1) 33#define DEF_SAMPLING_DOWN_FACTOR (1)
35#define MAX_SAMPLING_DOWN_FACTOR (100000) 34#define MAX_SAMPLING_DOWN_FACTOR (100000)
36#define MICRO_FREQUENCY_DOWN_DIFFERENTIAL (3)
37#define MICRO_FREQUENCY_UP_THRESHOLD (95) 35#define MICRO_FREQUENCY_UP_THRESHOLD (95)
38#define MICRO_FREQUENCY_MIN_SAMPLE_RATE (10000) 36#define MICRO_FREQUENCY_MIN_SAMPLE_RATE (10000)
39#define MIN_FREQUENCY_UP_THRESHOLD (11) 37#define MIN_FREQUENCY_UP_THRESHOLD (11)
@@ -161,14 +159,10 @@ static void dbs_freq_increase(struct cpufreq_policy *p, unsigned int freq)
161 159
162/* 160/*
163 * Every sampling_rate, we check, if current idle time is less than 20% 161 * Every sampling_rate, we check, if current idle time is less than 20%
164 * (default), then we try to increase frequency. Every sampling_rate, we look 162 * (default), then we try to increase frequency. Else, we adjust the frequency
165 * for the lowest frequency which can sustain the load while keeping idle time 163 * proportional to load.
166 * over 30%. If such a frequency exist, we try to decrease to this frequency.
167 *
168 * Any frequency increase takes it to the maximum frequency. Frequency reduction
169 * happens at minimum steps of 5% (default) of current frequency
170 */ 164 */
171static void od_check_cpu(int cpu, unsigned int load_freq) 165static void od_check_cpu(int cpu, unsigned int load)
172{ 166{
173 struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu); 167 struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
174 struct cpufreq_policy *policy = dbs_info->cdbs.cur_policy; 168 struct cpufreq_policy *policy = dbs_info->cdbs.cur_policy;
@@ -178,29 +172,17 @@ static void od_check_cpu(int cpu, unsigned int load_freq)
178 dbs_info->freq_lo = 0; 172 dbs_info->freq_lo = 0;
179 173
180 /* Check for frequency increase */ 174 /* Check for frequency increase */
181 if (load_freq > od_tuners->up_threshold * policy->cur) { 175 if (load > od_tuners->up_threshold) {
182 /* If switching to max speed, apply sampling_down_factor */ 176 /* If switching to max speed, apply sampling_down_factor */
183 if (policy->cur < policy->max) 177 if (policy->cur < policy->max)
184 dbs_info->rate_mult = 178 dbs_info->rate_mult =
185 od_tuners->sampling_down_factor; 179 od_tuners->sampling_down_factor;
186 dbs_freq_increase(policy, policy->max); 180 dbs_freq_increase(policy, policy->max);
187 return; 181 return;
188 } 182 } else {
189 183 /* Calculate the next frequency proportional to load */
190 /* Check for frequency decrease */
191 /* if we cannot reduce the frequency anymore, break out early */
192 if (policy->cur == policy->min)
193 return;
194
195 /*
196 * The optimal frequency is the frequency that is the lowest that can
197 * support the current CPU usage without triggering the up policy. To be
198 * safe, we focus 10 points under the threshold.
199 */
200 if (load_freq < od_tuners->adj_up_threshold
201 * policy->cur) {
202 unsigned int freq_next; 184 unsigned int freq_next;
203 freq_next = load_freq / od_tuners->adj_up_threshold; 185 freq_next = load * policy->cpuinfo.max_freq / 100;
204 186
205 /* No longer fully busy, reset rate_mult */ 187 /* No longer fully busy, reset rate_mult */
206 dbs_info->rate_mult = 1; 188 dbs_info->rate_mult = 1;
@@ -374,9 +356,6 @@ static ssize_t store_up_threshold(struct dbs_data *dbs_data, const char *buf,
374 input < MIN_FREQUENCY_UP_THRESHOLD) { 356 input < MIN_FREQUENCY_UP_THRESHOLD) {
375 return -EINVAL; 357 return -EINVAL;
376 } 358 }
377 /* Calculate the new adj_up_threshold */
378 od_tuners->adj_up_threshold += input;
379 od_tuners->adj_up_threshold -= od_tuners->up_threshold;
380 359
381 od_tuners->up_threshold = input; 360 od_tuners->up_threshold = input;
382 return count; 361 return count;
@@ -525,8 +504,6 @@ static int od_init(struct dbs_data *dbs_data)
525 if (idle_time != -1ULL) { 504 if (idle_time != -1ULL) {
526 /* Idle micro accounting is supported. Use finer thresholds */ 505 /* Idle micro accounting is supported. Use finer thresholds */
527 tuners->up_threshold = MICRO_FREQUENCY_UP_THRESHOLD; 506 tuners->up_threshold = MICRO_FREQUENCY_UP_THRESHOLD;
528 tuners->adj_up_threshold = MICRO_FREQUENCY_UP_THRESHOLD -
529 MICRO_FREQUENCY_DOWN_DIFFERENTIAL;
530 /* 507 /*
531 * In nohz/micro accounting case we set the minimum frequency 508 * In nohz/micro accounting case we set the minimum frequency
532 * not depending on HZ, but fixed (very low). The deferred 509 * not depending on HZ, but fixed (very low). The deferred
@@ -535,8 +512,6 @@ static int od_init(struct dbs_data *dbs_data)
535 dbs_data->min_sampling_rate = MICRO_FREQUENCY_MIN_SAMPLE_RATE; 512 dbs_data->min_sampling_rate = MICRO_FREQUENCY_MIN_SAMPLE_RATE;
536 } else { 513 } else {
537 tuners->up_threshold = DEF_FREQUENCY_UP_THRESHOLD; 514 tuners->up_threshold = DEF_FREQUENCY_UP_THRESHOLD;
538 tuners->adj_up_threshold = DEF_FREQUENCY_UP_THRESHOLD -
539 DEF_FREQUENCY_DOWN_DIFFERENTIAL;
540 515
541 /* For correct statistics, we need 10 ticks for each measure */ 516 /* For correct statistics, we need 10 ticks for each measure */
542 dbs_data->min_sampling_rate = MIN_SAMPLING_RATE_RATIO * 517 dbs_data->min_sampling_rate = MIN_SAMPLING_RATE_RATIO *
diff --git a/drivers/cpufreq/mperf.c b/drivers/cpufreq/mperf.c
deleted file mode 100644
index 911e193018ae..000000000000
--- a/drivers/cpufreq/mperf.c
+++ /dev/null
@@ -1,51 +0,0 @@
1#include <linux/kernel.h>
2#include <linux/smp.h>
3#include <linux/module.h>
4#include <linux/init.h>
5#include <linux/cpufreq.h>
6#include <linux/slab.h>
7
8#include "mperf.h"
9
10static DEFINE_PER_CPU(struct aperfmperf, acfreq_old_perf);
11
12/* Called via smp_call_function_single(), on the target CPU */
13static void read_measured_perf_ctrs(void *_cur)
14{
15 struct aperfmperf *am = _cur;
16
17 get_aperfmperf(am);
18}
19
20/*
21 * Return the measured active (C0) frequency on this CPU since last call
22 * to this function.
23 * Input: cpu number
24 * Return: Average CPU frequency in terms of max frequency (zero on error)
25 *
26 * We use IA32_MPERF and IA32_APERF MSRs to get the measured performance
27 * over a period of time, while CPU is in C0 state.
28 * IA32_MPERF counts at the rate of max advertised frequency
29 * IA32_APERF counts at the rate of actual CPU frequency
30 * Only IA32_APERF/IA32_MPERF ratio is architecturally defined and
31 * no meaning should be associated with absolute values of these MSRs.
32 */
33unsigned int cpufreq_get_measured_perf(struct cpufreq_policy *policy,
34 unsigned int cpu)
35{
36 struct aperfmperf perf;
37 unsigned long ratio;
38 unsigned int retval;
39
40 if (smp_call_function_single(cpu, read_measured_perf_ctrs, &perf, 1))
41 return 0;
42
43 ratio = calc_aperfmperf_ratio(&per_cpu(acfreq_old_perf, cpu), &perf);
44 per_cpu(acfreq_old_perf, cpu) = perf;
45
46 retval = (policy->cpuinfo.max_freq * ratio) >> APERFMPERF_SHIFT;
47
48 return retval;
49}
50EXPORT_SYMBOL_GPL(cpufreq_get_measured_perf);
51MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/mperf.h b/drivers/cpufreq/mperf.h
deleted file mode 100644
index 5dbf2950dc22..000000000000
--- a/drivers/cpufreq/mperf.h
+++ /dev/null
@@ -1,9 +0,0 @@
1/*
2 * (c) 2010 Advanced Micro Devices, Inc.
3 * Your use of this code is subject to the terms and conditions of the
4 * GNU general public license version 2. See "COPYING" or
5 * http://www.gnu.org/licenses/gpl.html
6 */
7
8unsigned int cpufreq_get_measured_perf(struct cpufreq_policy *policy,
9 unsigned int cpu);
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 90d5a15120d5..e1fd215e16c9 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -216,10 +216,6 @@ extern int cpufreq_driver_target(struct cpufreq_policy *policy,
216extern int __cpufreq_driver_target(struct cpufreq_policy *policy, 216extern int __cpufreq_driver_target(struct cpufreq_policy *policy,
217 unsigned int target_freq, 217 unsigned int target_freq,
218 unsigned int relation); 218 unsigned int relation);
219
220extern int __cpufreq_driver_getavg(struct cpufreq_policy *policy,
221 unsigned int cpu);
222
223int cpufreq_register_governor(struct cpufreq_governor *governor); 219int cpufreq_register_governor(struct cpufreq_governor *governor);
224void cpufreq_unregister_governor(struct cpufreq_governor *governor); 220void cpufreq_unregister_governor(struct cpufreq_governor *governor);
225 221
@@ -258,8 +254,6 @@ struct cpufreq_driver {
258 unsigned int (*get) (unsigned int cpu); 254 unsigned int (*get) (unsigned int cpu);
259 255
260 /* optional */ 256 /* optional */
261 unsigned int (*getavg) (struct cpufreq_policy *policy,
262 unsigned int cpu);
263 int (*bios_limit) (int cpu, unsigned int *limit); 257 int (*bios_limit) (int cpu, unsigned int *limit);
264 258
265 int (*exit) (struct cpufreq_policy *policy); 259 int (*exit) (struct cpufreq_policy *policy);