aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRafael J. Wysocki <rafael.j.wysocki@intel.com>2014-11-17 19:22:29 -0500
committerRafael J. Wysocki <rafael.j.wysocki@intel.com>2014-11-17 19:22:29 -0500
commitbd2a0f6754f18f801ed1e490bf678fc3be013eca (patch)
tree93c1e594bff4a5cbd553d2df428cefa8e28f42e3
parentfc14f9c1272f62c3e8d01300f52467c0d9af50f9 (diff)
parent7e7e8fe69820c6fa31395dbbd8e348e3c69cd2a9 (diff)
Merge back cpufreq material for 3.19-rc1.
-rw-r--r--Documentation/cpu-freq/intel-pstate.txt37
-rw-r--r--Documentation/kernel-parameters.txt3
-rw-r--r--arch/x86/include/asm/cpufeature.h5
-rw-r--r--arch/x86/include/uapi/asm/msr-index.h41
-rw-r--r--arch/x86/kernel/cpu/scattered.c5
-rw-r--r--drivers/cpufreq/Kconfig11
-rw-r--r--drivers/cpufreq/Makefile1
-rw-r--r--drivers/cpufreq/cpufreq-dt.c9
-rw-r--r--drivers/cpufreq/cpufreq.c6
-rw-r--r--drivers/cpufreq/intel_pstate.c101
-rw-r--r--drivers/cpufreq/ls1x-cpufreq.c223
-rw-r--r--drivers/cpufreq/pcc-cpufreq.c7
12 files changed, 430 insertions, 19 deletions
diff --git a/Documentation/cpu-freq/intel-pstate.txt b/Documentation/cpu-freq/intel-pstate.txt
index a69ffe1d54d5..765d7fc0e692 100644
--- a/Documentation/cpu-freq/intel-pstate.txt
+++ b/Documentation/cpu-freq/intel-pstate.txt
@@ -1,17 +1,28 @@
1Intel P-state driver 1Intel P-state driver
2-------------------- 2--------------------
3 3
4This driver implements a scaling driver with an internal governor for 4This driver provides an interface to control the P state selection for
5Intel Core processors. The driver follows the same model as the 5SandyBridge+ Intel processors. The driver can operate two different
6Transmeta scaling driver (longrun.c) and implements the setpolicy() 6modes based on the processor model legacy and Hardware P state (HWP)
7instead of target(). Scaling drivers that implement setpolicy() are 7mode.
8assumed to implement internal governors by the cpufreq core. All the 8
9logic for selecting the current P state is contained within the 9In legacy mode the driver implements a scaling driver with an internal
10driver; no external governor is used by the cpufreq core. 10governor for Intel Core processors. The driver follows the same model
11 11as the Transmeta scaling driver (longrun.c) and implements the
12Intel SandyBridge+ processors are supported. 12setpolicy() instead of target(). Scaling drivers that implement
13 13setpolicy() are assumed to implement internal governors by the cpufreq
14New sysfs files for controlling P state selection have been added to 14core. All the logic for selecting the current P state is contained
15within the driver; no external governor is used by the cpufreq core.
16
17In HWP mode P state selection is implemented in the processor
18itself. The driver provides the interfaces between the cpufreq core and
19the processor to control P state selection based on user preferences
20and reporting frequency to the cpufreq core. In this mode the
21internal governor code is disabled.
22
23In addtion to the interfaces provided by the cpufreq core for
24controlling frequency the driver provides sysfs files for
25controlling P state selection. These files have been added to
15/sys/devices/system/cpu/intel_pstate/ 26/sys/devices/system/cpu/intel_pstate/
16 27
17 max_perf_pct: limits the maximum P state that will be requested by 28 max_perf_pct: limits the maximum P state that will be requested by
@@ -33,7 +44,9 @@ frequency is fiction for Intel Core processors. Even if the scaling
33driver selects a single P state the actual frequency the processor 44driver selects a single P state the actual frequency the processor
34will run at is selected by the processor itself. 45will run at is selected by the processor itself.
35 46
36New debugfs files have also been added to /sys/kernel/debug/pstate_snb/ 47For legacy mode debugfs files have also been added to allow tuning of
48the internal governor algorythm. These files are located at
49/sys/kernel/debug/pstate_snb/ These files are NOT present in HWP mode.
37 50
38 deadband 51 deadband
39 d_gain_pct 52 d_gain_pct
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 479f33204a37..5fdf714f40a0 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -1446,6 +1446,9 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
1446 disable 1446 disable
1447 Do not enable intel_pstate as the default 1447 Do not enable intel_pstate as the default
1448 scaling driver for the supported processors 1448 scaling driver for the supported processors
1449 no_hwp
1450 Do not enable hardware P state control (HWP)
1451 if available.
1449 1452
1450 intremap= [X86-64, Intel-IOMMU] 1453 intremap= [X86-64, Intel-IOMMU]
1451 on enable Interrupt Remapping (default) 1454 on enable Interrupt Remapping (default)
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
index 0bb1335313b2..aede2c347bde 100644
--- a/arch/x86/include/asm/cpufeature.h
+++ b/arch/x86/include/asm/cpufeature.h
@@ -189,6 +189,11 @@
189#define X86_FEATURE_DTHERM ( 7*32+ 7) /* Digital Thermal Sensor */ 189#define X86_FEATURE_DTHERM ( 7*32+ 7) /* Digital Thermal Sensor */
190#define X86_FEATURE_HW_PSTATE ( 7*32+ 8) /* AMD HW-PState */ 190#define X86_FEATURE_HW_PSTATE ( 7*32+ 8) /* AMD HW-PState */
191#define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */ 191#define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */
192#define X86_FEATURE_HWP ( 7*32+ 10) /* "hwp" Intel HWP */
193#define X86_FEATURE_HWP_NOITFY ( 7*32+ 11) /* Intel HWP_NOTIFY */
194#define X86_FEATURE_HWP_ACT_WINDOW ( 7*32+ 12) /* Intel HWP_ACT_WINDOW */
195#define X86_FEATURE_HWP_EPP ( 7*32+13) /* Intel HWP_EPP */
196#define X86_FEATURE_HWP_PKG_REQ ( 7*32+14) /* Intel HWP_PKG_REQ */
192 197
193/* Virtualization flags: Linux defined, word 8 */ 198/* Virtualization flags: Linux defined, word 8 */
194#define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */ 199#define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */
diff --git a/arch/x86/include/uapi/asm/msr-index.h b/arch/x86/include/uapi/asm/msr-index.h
index e21331ce368f..62838e54947d 100644
--- a/arch/x86/include/uapi/asm/msr-index.h
+++ b/arch/x86/include/uapi/asm/msr-index.h
@@ -152,6 +152,45 @@
152#define MSR_CC6_DEMOTION_POLICY_CONFIG 0x00000668 152#define MSR_CC6_DEMOTION_POLICY_CONFIG 0x00000668
153#define MSR_MC6_DEMOTION_POLICY_CONFIG 0x00000669 153#define MSR_MC6_DEMOTION_POLICY_CONFIG 0x00000669
154 154
155/* Hardware P state interface */
156#define MSR_PPERF 0x0000064e
157#define MSR_PERF_LIMIT_REASONS 0x0000064f
158#define MSR_PM_ENABLE 0x00000770
159#define MSR_HWP_CAPABILITIES 0x00000771
160#define MSR_HWP_REQUEST_PKG 0x00000772
161#define MSR_HWP_INTERRUPT 0x00000773
162#define MSR_HWP_REQUEST 0x00000774
163#define MSR_HWP_STATUS 0x00000777
164
165/* CPUID.6.EAX */
166#define HWP_BASE_BIT (1<<7)
167#define HWP_NOTIFICATIONS_BIT (1<<8)
168#define HWP_ACTIVITY_WINDOW_BIT (1<<9)
169#define HWP_ENERGY_PERF_PREFERENCE_BIT (1<<10)
170#define HWP_PACKAGE_LEVEL_REQUEST_BIT (1<<11)
171
172/* IA32_HWP_CAPABILITIES */
173#define HWP_HIGHEST_PERF(x) (x & 0xff)
174#define HWP_GUARANTEED_PERF(x) ((x & (0xff << 8)) >>8)
175#define HWP_MOSTEFFICIENT_PERF(x) ((x & (0xff << 16)) >>16)
176#define HWP_LOWEST_PERF(x) ((x & (0xff << 24)) >>24)
177
178/* IA32_HWP_REQUEST */
179#define HWP_MIN_PERF(x) (x & 0xff)
180#define HWP_MAX_PERF(x) ((x & 0xff) << 8)
181#define HWP_DESIRED_PERF(x) ((x & 0xff) << 16)
182#define HWP_ENERGY_PERF_PREFERENCE(x) ((x & 0xff) << 24)
183#define HWP_ACTIVITY_WINDOW(x) ((x & 0xff3) << 32)
184#define HWP_PACKAGE_CONTROL(x) ((x & 0x1) << 42)
185
186/* IA32_HWP_STATUS */
187#define HWP_GUARANTEED_CHANGE(x) (x & 0x1)
188#define HWP_EXCURSION_TO_MINIMUM(x) (x & 0x4)
189
190/* IA32_HWP_INTERRUPT */
191#define HWP_CHANGE_TO_GUARANTEED_INT(x) (x & 0x1)
192#define HWP_EXCURSION_TO_MINIMUM_INT(x) (x & 0x2)
193
155#define MSR_AMD64_MC0_MASK 0xc0010044 194#define MSR_AMD64_MC0_MASK 0xc0010044
156 195
157#define MSR_IA32_MCx_CTL(x) (MSR_IA32_MC0_CTL + 4*(x)) 196#define MSR_IA32_MCx_CTL(x) (MSR_IA32_MC0_CTL + 4*(x))
@@ -345,6 +384,8 @@
345 384
346#define MSR_IA32_TEMPERATURE_TARGET 0x000001a2 385#define MSR_IA32_TEMPERATURE_TARGET 0x000001a2
347 386
387#define MSR_MISC_PWR_MGMT 0x000001aa
388
348#define MSR_IA32_ENERGY_PERF_BIAS 0x000001b0 389#define MSR_IA32_ENERGY_PERF_BIAS 0x000001b0
349#define ENERGY_PERF_BIAS_PERFORMANCE 0 390#define ENERGY_PERF_BIAS_PERFORMANCE 0
350#define ENERGY_PERF_BIAS_NORMAL 6 391#define ENERGY_PERF_BIAS_NORMAL 6
diff --git a/arch/x86/kernel/cpu/scattered.c b/arch/x86/kernel/cpu/scattered.c
index 4a8013d55947..60639093d536 100644
--- a/arch/x86/kernel/cpu/scattered.c
+++ b/arch/x86/kernel/cpu/scattered.c
@@ -36,6 +36,11 @@ void init_scattered_cpuid_features(struct cpuinfo_x86 *c)
36 { X86_FEATURE_ARAT, CR_EAX, 2, 0x00000006, 0 }, 36 { X86_FEATURE_ARAT, CR_EAX, 2, 0x00000006, 0 },
37 { X86_FEATURE_PLN, CR_EAX, 4, 0x00000006, 0 }, 37 { X86_FEATURE_PLN, CR_EAX, 4, 0x00000006, 0 },
38 { X86_FEATURE_PTS, CR_EAX, 6, 0x00000006, 0 }, 38 { X86_FEATURE_PTS, CR_EAX, 6, 0x00000006, 0 },
39 { X86_FEATURE_HWP, CR_EAX, 7, 0x00000006, 0 },
40 { X86_FEATURE_HWP_NOITFY, CR_EAX, 8, 0x00000006, 0 },
41 { X86_FEATURE_HWP_ACT_WINDOW, CR_EAX, 9, 0x00000006, 0 },
42 { X86_FEATURE_HWP_EPP, CR_EAX,10, 0x00000006, 0 },
43 { X86_FEATURE_HWP_PKG_REQ, CR_EAX,11, 0x00000006, 0 },
39 { X86_FEATURE_APERFMPERF, CR_ECX, 0, 0x00000006, 0 }, 44 { X86_FEATURE_APERFMPERF, CR_ECX, 0, 0x00000006, 0 },
40 { X86_FEATURE_EPB, CR_ECX, 3, 0x00000006, 0 }, 45 { X86_FEATURE_EPB, CR_ECX, 3, 0x00000006, 0 },
41 { X86_FEATURE_HW_PSTATE, CR_EDX, 7, 0x80000007, 0 }, 46 { X86_FEATURE_HW_PSTATE, CR_EDX, 7, 0x80000007, 0 },
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
index 3489f8f5fada..4de4dfae4ccc 100644
--- a/drivers/cpufreq/Kconfig
+++ b/drivers/cpufreq/Kconfig
@@ -63,7 +63,6 @@ config CPU_FREQ_DEFAULT_GOV_PERFORMANCE
63 63
64config CPU_FREQ_DEFAULT_GOV_POWERSAVE 64config CPU_FREQ_DEFAULT_GOV_POWERSAVE
65 bool "powersave" 65 bool "powersave"
66 depends on EXPERT
67 select CPU_FREQ_GOV_POWERSAVE 66 select CPU_FREQ_GOV_POWERSAVE
68 help 67 help
69 Use the CPUFreq governor 'powersave' as default. This sets 68 Use the CPUFreq governor 'powersave' as default. This sets
@@ -250,6 +249,16 @@ config LOONGSON2_CPUFREQ
250 249
251 If in doubt, say N. 250 If in doubt, say N.
252 251
252config LOONGSON1_CPUFREQ
253 tristate "Loongson1 CPUFreq Driver"
254 help
255 This option adds a CPUFreq driver for loongson1 processors which
256 support software configurable cpu frequency.
257
258 For details, take a look at <file:Documentation/cpu-freq/>.
259
260 If in doubt, say N.
261
253endmenu 262endmenu
254 263
255menu "PowerPC CPU frequency scaling drivers" 264menu "PowerPC CPU frequency scaling drivers"
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index 40c53dc1937e..215e447abec6 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -98,6 +98,7 @@ obj-$(CONFIG_CRIS_MACH_ARTPEC3) += cris-artpec3-cpufreq.o
98obj-$(CONFIG_ETRAXFS) += cris-etraxfs-cpufreq.o 98obj-$(CONFIG_ETRAXFS) += cris-etraxfs-cpufreq.o
99obj-$(CONFIG_IA64_ACPI_CPUFREQ) += ia64-acpi-cpufreq.o 99obj-$(CONFIG_IA64_ACPI_CPUFREQ) += ia64-acpi-cpufreq.o
100obj-$(CONFIG_LOONGSON2_CPUFREQ) += loongson2_cpufreq.o 100obj-$(CONFIG_LOONGSON2_CPUFREQ) += loongson2_cpufreq.o
101obj-$(CONFIG_LOONGSON1_CPUFREQ) += ls1x-cpufreq.o
101obj-$(CONFIG_SH_CPU_FREQ) += sh-cpufreq.o 102obj-$(CONFIG_SH_CPU_FREQ) += sh-cpufreq.o
102obj-$(CONFIG_SPARC_US2E_CPUFREQ) += sparc-us2e-cpufreq.o 103obj-$(CONFIG_SPARC_US2E_CPUFREQ) += sparc-us2e-cpufreq.o
103obj-$(CONFIG_SPARC_US3_CPUFREQ) += sparc-us3-cpufreq.o 104obj-$(CONFIG_SPARC_US3_CPUFREQ) += sparc-us3-cpufreq.o
diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c
index f657c571b18e..8cba13df5f28 100644
--- a/drivers/cpufreq/cpufreq-dt.c
+++ b/drivers/cpufreq/cpufreq-dt.c
@@ -58,6 +58,8 @@ static int set_target(struct cpufreq_policy *policy, unsigned int index)
58 old_freq = clk_get_rate(cpu_clk) / 1000; 58 old_freq = clk_get_rate(cpu_clk) / 1000;
59 59
60 if (!IS_ERR(cpu_reg)) { 60 if (!IS_ERR(cpu_reg)) {
61 unsigned long opp_freq;
62
61 rcu_read_lock(); 63 rcu_read_lock();
62 opp = dev_pm_opp_find_freq_ceil(cpu_dev, &freq_Hz); 64 opp = dev_pm_opp_find_freq_ceil(cpu_dev, &freq_Hz);
63 if (IS_ERR(opp)) { 65 if (IS_ERR(opp)) {
@@ -67,13 +69,16 @@ static int set_target(struct cpufreq_policy *policy, unsigned int index)
67 return PTR_ERR(opp); 69 return PTR_ERR(opp);
68 } 70 }
69 volt = dev_pm_opp_get_voltage(opp); 71 volt = dev_pm_opp_get_voltage(opp);
72 opp_freq = dev_pm_opp_get_freq(opp);
70 rcu_read_unlock(); 73 rcu_read_unlock();
71 tol = volt * priv->voltage_tolerance / 100; 74 tol = volt * priv->voltage_tolerance / 100;
72 volt_old = regulator_get_voltage(cpu_reg); 75 volt_old = regulator_get_voltage(cpu_reg);
76 dev_dbg(cpu_dev, "Found OPP: %ld kHz, %ld uV\n",
77 opp_freq / 1000, volt);
73 } 78 }
74 79
75 dev_dbg(cpu_dev, "%u MHz, %ld mV --> %u MHz, %ld mV\n", 80 dev_dbg(cpu_dev, "%u MHz, %ld mV --> %u MHz, %ld mV\n",
76 old_freq / 1000, volt_old ? volt_old / 1000 : -1, 81 old_freq / 1000, (volt_old > 0) ? volt_old / 1000 : -1,
77 new_freq / 1000, volt ? volt / 1000 : -1); 82 new_freq / 1000, volt ? volt / 1000 : -1);
78 83
79 /* scaling up? scale voltage before frequency */ 84 /* scaling up? scale voltage before frequency */
@@ -89,7 +94,7 @@ static int set_target(struct cpufreq_policy *policy, unsigned int index)
89 ret = clk_set_rate(cpu_clk, freq_exact); 94 ret = clk_set_rate(cpu_clk, freq_exact);
90 if (ret) { 95 if (ret) {
91 dev_err(cpu_dev, "failed to set clock rate: %d\n", ret); 96 dev_err(cpu_dev, "failed to set clock rate: %d\n", ret);
92 if (!IS_ERR(cpu_reg)) 97 if (!IS_ERR(cpu_reg) && volt_old > 0)
93 regulator_set_voltage_tol(cpu_reg, volt_old, tol); 98 regulator_set_voltage_tol(cpu_reg, volt_old, tol);
94 return ret; 99 return ret;
95 } 100 }
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 4473eba1d6b0..c9701e9e53e4 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -535,7 +535,7 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
535static ssize_t store_##file_name \ 535static ssize_t store_##file_name \
536(struct cpufreq_policy *policy, const char *buf, size_t count) \ 536(struct cpufreq_policy *policy, const char *buf, size_t count) \
537{ \ 537{ \
538 int ret; \ 538 int ret, temp; \
539 struct cpufreq_policy new_policy; \ 539 struct cpufreq_policy new_policy; \
540 \ 540 \
541 ret = cpufreq_get_policy(&new_policy, policy->cpu); \ 541 ret = cpufreq_get_policy(&new_policy, policy->cpu); \
@@ -546,8 +546,10 @@ static ssize_t store_##file_name \
546 if (ret != 1) \ 546 if (ret != 1) \
547 return -EINVAL; \ 547 return -EINVAL; \
548 \ 548 \
549 temp = new_policy.object; \
549 ret = cpufreq_set_policy(policy, &new_policy); \ 550 ret = cpufreq_set_policy(policy, &new_policy); \
550 policy->user_policy.object = policy->object; \ 551 if (!ret) \
552 policy->user_policy.object = temp; \
551 \ 553 \
552 return ret ? ret : count; \ 554 return ret ? ret : count; \
553} 555}
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 27bb6d3877ed..ab2e100a1807 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -137,6 +137,7 @@ struct cpu_defaults {
137 137
138static struct pstate_adjust_policy pid_params; 138static struct pstate_adjust_policy pid_params;
139static struct pstate_funcs pstate_funcs; 139static struct pstate_funcs pstate_funcs;
140static int hwp_active;
140 141
141struct perf_limits { 142struct perf_limits {
142 int no_turbo; 143 int no_turbo;
@@ -244,6 +245,34 @@ static inline void update_turbo_state(void)
244 cpu->pstate.max_pstate == cpu->pstate.turbo_pstate); 245 cpu->pstate.max_pstate == cpu->pstate.turbo_pstate);
245} 246}
246 247
248#define PCT_TO_HWP(x) (x * 255 / 100)
249static void intel_pstate_hwp_set(void)
250{
251 int min, max, cpu;
252 u64 value, freq;
253
254 get_online_cpus();
255
256 for_each_online_cpu(cpu) {
257 rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, &value);
258 min = PCT_TO_HWP(limits.min_perf_pct);
259 value &= ~HWP_MIN_PERF(~0L);
260 value |= HWP_MIN_PERF(min);
261
262 max = PCT_TO_HWP(limits.max_perf_pct);
263 if (limits.no_turbo) {
264 rdmsrl( MSR_HWP_CAPABILITIES, freq);
265 max = HWP_GUARANTEED_PERF(freq);
266 }
267
268 value &= ~HWP_MAX_PERF(~0L);
269 value |= HWP_MAX_PERF(max);
270 wrmsrl_on_cpu(cpu, MSR_HWP_REQUEST, value);
271 }
272
273 put_online_cpus();
274}
275
247/************************** debugfs begin ************************/ 276/************************** debugfs begin ************************/
248static int pid_param_set(void *data, u64 val) 277static int pid_param_set(void *data, u64 val)
249{ 278{
@@ -279,6 +308,8 @@ static void __init intel_pstate_debug_expose_params(void)
279 struct dentry *debugfs_parent; 308 struct dentry *debugfs_parent;
280 int i = 0; 309 int i = 0;
281 310
311 if (hwp_active)
312 return;
282 debugfs_parent = debugfs_create_dir("pstate_snb", NULL); 313 debugfs_parent = debugfs_create_dir("pstate_snb", NULL);
283 if (IS_ERR_OR_NULL(debugfs_parent)) 314 if (IS_ERR_OR_NULL(debugfs_parent))
284 return; 315 return;
@@ -329,8 +360,12 @@ static ssize_t store_no_turbo(struct kobject *a, struct attribute *b,
329 pr_warn("Turbo disabled by BIOS or unavailable on processor\n"); 360 pr_warn("Turbo disabled by BIOS or unavailable on processor\n");
330 return -EPERM; 361 return -EPERM;
331 } 362 }
363
332 limits.no_turbo = clamp_t(int, input, 0, 1); 364 limits.no_turbo = clamp_t(int, input, 0, 1);
333 365
366 if (hwp_active)
367 intel_pstate_hwp_set();
368
334 return count; 369 return count;
335} 370}
336 371
@@ -348,6 +383,8 @@ static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b,
348 limits.max_perf_pct = min(limits.max_policy_pct, limits.max_sysfs_pct); 383 limits.max_perf_pct = min(limits.max_policy_pct, limits.max_sysfs_pct);
349 limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100)); 384 limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100));
350 385
386 if (hwp_active)
387 intel_pstate_hwp_set();
351 return count; 388 return count;
352} 389}
353 390
@@ -363,6 +400,8 @@ static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b,
363 limits.min_perf_pct = clamp_t(int, input, 0 , 100); 400 limits.min_perf_pct = clamp_t(int, input, 0 , 100);
364 limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100)); 401 limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100));
365 402
403 if (hwp_active)
404 intel_pstate_hwp_set();
366 return count; 405 return count;
367} 406}
368 407
@@ -395,8 +434,16 @@ static void __init intel_pstate_sysfs_expose_params(void)
395 rc = sysfs_create_group(intel_pstate_kobject, &intel_pstate_attr_group); 434 rc = sysfs_create_group(intel_pstate_kobject, &intel_pstate_attr_group);
396 BUG_ON(rc); 435 BUG_ON(rc);
397} 436}
398
399/************************** sysfs end ************************/ 437/************************** sysfs end ************************/
438
439static void intel_pstate_hwp_enable(void)
440{
441 hwp_active++;
442 pr_info("intel_pstate HWP enabled\n");
443
444 wrmsrl( MSR_PM_ENABLE, 0x1);
445}
446
400static int byt_get_min_pstate(void) 447static int byt_get_min_pstate(void)
401{ 448{
402 u64 value; 449 u64 value;
@@ -648,6 +695,14 @@ static inline void intel_pstate_sample(struct cpudata *cpu)
648 cpu->prev_mperf = mperf; 695 cpu->prev_mperf = mperf;
649} 696}
650 697
698static inline void intel_hwp_set_sample_time(struct cpudata *cpu)
699{
700 int delay;
701
702 delay = msecs_to_jiffies(50);
703 mod_timer_pinned(&cpu->timer, jiffies + delay);
704}
705
651static inline void intel_pstate_set_sample_time(struct cpudata *cpu) 706static inline void intel_pstate_set_sample_time(struct cpudata *cpu)
652{ 707{
653 int delay; 708 int delay;
@@ -694,6 +749,14 @@ static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu)
694 intel_pstate_set_pstate(cpu, cpu->pstate.current_pstate - ctl); 749 intel_pstate_set_pstate(cpu, cpu->pstate.current_pstate - ctl);
695} 750}
696 751
752static void intel_hwp_timer_func(unsigned long __data)
753{
754 struct cpudata *cpu = (struct cpudata *) __data;
755
756 intel_pstate_sample(cpu);
757 intel_hwp_set_sample_time(cpu);
758}
759
697static void intel_pstate_timer_func(unsigned long __data) 760static void intel_pstate_timer_func(unsigned long __data)
698{ 761{
699 struct cpudata *cpu = (struct cpudata *) __data; 762 struct cpudata *cpu = (struct cpudata *) __data;
@@ -730,6 +793,7 @@ static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
730 ICPU(0x3f, core_params), 793 ICPU(0x3f, core_params),
731 ICPU(0x45, core_params), 794 ICPU(0x45, core_params),
732 ICPU(0x46, core_params), 795 ICPU(0x46, core_params),
796 ICPU(0x47, core_params),
733 ICPU(0x4c, byt_params), 797 ICPU(0x4c, byt_params),
734 ICPU(0x4f, core_params), 798 ICPU(0x4f, core_params),
735 ICPU(0x56, core_params), 799 ICPU(0x56, core_params),
@@ -737,6 +801,11 @@ static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
737}; 801};
738MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids); 802MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids);
739 803
804static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] = {
805 ICPU(0x56, core_params),
806 {}
807};
808
740static int intel_pstate_init_cpu(unsigned int cpunum) 809static int intel_pstate_init_cpu(unsigned int cpunum)
741{ 810{
742 struct cpudata *cpu; 811 struct cpudata *cpu;
@@ -753,9 +822,14 @@ static int intel_pstate_init_cpu(unsigned int cpunum)
753 intel_pstate_get_cpu_pstates(cpu); 822 intel_pstate_get_cpu_pstates(cpu);
754 823
755 init_timer_deferrable(&cpu->timer); 824 init_timer_deferrable(&cpu->timer);
756 cpu->timer.function = intel_pstate_timer_func;
757 cpu->timer.data = (unsigned long)cpu; 825 cpu->timer.data = (unsigned long)cpu;
758 cpu->timer.expires = jiffies + HZ/100; 826 cpu->timer.expires = jiffies + HZ/100;
827
828 if (!hwp_active)
829 cpu->timer.function = intel_pstate_timer_func;
830 else
831 cpu->timer.function = intel_hwp_timer_func;
832
759 intel_pstate_busy_pid_reset(cpu); 833 intel_pstate_busy_pid_reset(cpu);
760 intel_pstate_sample(cpu); 834 intel_pstate_sample(cpu);
761 835
@@ -792,6 +866,7 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
792 limits.no_turbo = 0; 866 limits.no_turbo = 0;
793 return 0; 867 return 0;
794 } 868 }
869
795 limits.min_perf_pct = (policy->min * 100) / policy->cpuinfo.max_freq; 870 limits.min_perf_pct = (policy->min * 100) / policy->cpuinfo.max_freq;
796 limits.min_perf_pct = clamp_t(int, limits.min_perf_pct, 0 , 100); 871 limits.min_perf_pct = clamp_t(int, limits.min_perf_pct, 0 , 100);
797 limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100)); 872 limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100));
@@ -801,6 +876,9 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
801 limits.max_perf_pct = min(limits.max_policy_pct, limits.max_sysfs_pct); 876 limits.max_perf_pct = min(limits.max_policy_pct, limits.max_sysfs_pct);
802 limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100)); 877 limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100));
803 878
879 if (hwp_active)
880 intel_pstate_hwp_set();
881
804 return 0; 882 return 0;
805} 883}
806 884
@@ -823,6 +901,9 @@ static void intel_pstate_stop_cpu(struct cpufreq_policy *policy)
823 pr_info("intel_pstate CPU %d exiting\n", cpu_num); 901 pr_info("intel_pstate CPU %d exiting\n", cpu_num);
824 902
825 del_timer_sync(&all_cpu_data[cpu_num]->timer); 903 del_timer_sync(&all_cpu_data[cpu_num]->timer);
904 if (hwp_active)
905 return;
906
826 intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate); 907 intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate);
827} 908}
828 909
@@ -866,6 +947,7 @@ static struct cpufreq_driver intel_pstate_driver = {
866}; 947};
867 948
868static int __initdata no_load; 949static int __initdata no_load;
950static int __initdata no_hwp;
869 951
870static int intel_pstate_msrs_not_valid(void) 952static int intel_pstate_msrs_not_valid(void)
871{ 953{
@@ -959,6 +1041,15 @@ static bool intel_pstate_platform_pwr_mgmt_exists(void)
959{ 1041{
960 struct acpi_table_header hdr; 1042 struct acpi_table_header hdr;
961 struct hw_vendor_info *v_info; 1043 struct hw_vendor_info *v_info;
1044 const struct x86_cpu_id *id;
1045 u64 misc_pwr;
1046
1047 id = x86_match_cpu(intel_pstate_cpu_oob_ids);
1048 if (id) {
1049 rdmsrl(MSR_MISC_PWR_MGMT, misc_pwr);
1050 if ( misc_pwr & (1 << 8))
1051 return true;
1052 }
962 1053
963 if (acpi_disabled || 1054 if (acpi_disabled ||
964 ACPI_FAILURE(acpi_get_table_header(ACPI_SIG_FADT, 0, &hdr))) 1055 ACPI_FAILURE(acpi_get_table_header(ACPI_SIG_FADT, 0, &hdr)))
@@ -982,6 +1073,7 @@ static int __init intel_pstate_init(void)
982 int cpu, rc = 0; 1073 int cpu, rc = 0;
983 const struct x86_cpu_id *id; 1074 const struct x86_cpu_id *id;
984 struct cpu_defaults *cpu_info; 1075 struct cpu_defaults *cpu_info;
1076 struct cpuinfo_x86 *c = &boot_cpu_data;
985 1077
986 if (no_load) 1078 if (no_load)
987 return -ENODEV; 1079 return -ENODEV;
@@ -1011,6 +1103,9 @@ static int __init intel_pstate_init(void)
1011 if (!all_cpu_data) 1103 if (!all_cpu_data)
1012 return -ENOMEM; 1104 return -ENOMEM;
1013 1105
1106 if (cpu_has(c,X86_FEATURE_HWP) && !no_hwp)
1107 intel_pstate_hwp_enable();
1108
1014 rc = cpufreq_register_driver(&intel_pstate_driver); 1109 rc = cpufreq_register_driver(&intel_pstate_driver);
1015 if (rc) 1110 if (rc)
1016 goto out; 1111 goto out;
@@ -1041,6 +1136,8 @@ static int __init intel_pstate_setup(char *str)
1041 1136
1042 if (!strcmp(str, "disable")) 1137 if (!strcmp(str, "disable"))
1043 no_load = 1; 1138 no_load = 1;
1139 if (!strcmp(str, "no_hwp"))
1140 no_hwp = 1;
1044 return 0; 1141 return 0;
1045} 1142}
1046early_param("intel_pstate", intel_pstate_setup); 1143early_param("intel_pstate", intel_pstate_setup);
diff --git a/drivers/cpufreq/ls1x-cpufreq.c b/drivers/cpufreq/ls1x-cpufreq.c
new file mode 100644
index 000000000000..25fbd6a1374f
--- /dev/null
+++ b/drivers/cpufreq/ls1x-cpufreq.c
@@ -0,0 +1,223 @@
1/*
2 * CPU Frequency Scaling for Loongson 1 SoC
3 *
4 * Copyright (C) 2014 Zhang, Keguang <keguang.zhang@gmail.com>
5 *
6 * This file is licensed under the terms of the GNU General Public
7 * License version 2. This program is licensed "as is" without any
8 * warranty of any kind, whether express or implied.
9 */
10
11#include <linux/clk.h>
12#include <linux/clk-provider.h>
13#include <linux/cpu.h>
14#include <linux/cpufreq.h>
15#include <linux/delay.h>
16#include <linux/module.h>
17#include <linux/platform_device.h>
18#include <linux/slab.h>
19
20#include <asm/mach-loongson1/cpufreq.h>
21#include <asm/mach-loongson1/loongson1.h>
22
23static struct {
24 struct device *dev;
25 struct clk *clk; /* CPU clk */
26 struct clk *mux_clk; /* MUX of CPU clk */
27 struct clk *pll_clk; /* PLL clk */
28 struct clk *osc_clk; /* OSC clk */
29 unsigned int max_freq;
30 unsigned int min_freq;
31} ls1x_cpufreq;
32
33static int ls1x_cpufreq_notifier(struct notifier_block *nb,
34 unsigned long val, void *data)
35{
36 if (val == CPUFREQ_POSTCHANGE)
37 current_cpu_data.udelay_val = loops_per_jiffy;
38
39 return NOTIFY_OK;
40}
41
42static struct notifier_block ls1x_cpufreq_notifier_block = {
43 .notifier_call = ls1x_cpufreq_notifier
44};
45
46static int ls1x_cpufreq_target(struct cpufreq_policy *policy,
47 unsigned int index)
48{
49 unsigned int old_freq, new_freq;
50
51 old_freq = policy->cur;
52 new_freq = policy->freq_table[index].frequency;
53
54 /*
55 * The procedure of reconfiguring CPU clk is as below.
56 *
57 * - Reparent CPU clk to OSC clk
58 * - Reset CPU clock (very important)
59 * - Reconfigure CPU DIV
60 * - Reparent CPU clk back to CPU DIV clk
61 */
62
63 dev_dbg(ls1x_cpufreq.dev, "%u KHz --> %u KHz\n", old_freq, new_freq);
64 clk_set_parent(policy->clk, ls1x_cpufreq.osc_clk);
65 __raw_writel(__raw_readl(LS1X_CLK_PLL_DIV) | RST_CPU_EN | RST_CPU,
66 LS1X_CLK_PLL_DIV);
67 __raw_writel(__raw_readl(LS1X_CLK_PLL_DIV) & ~(RST_CPU_EN | RST_CPU),
68 LS1X_CLK_PLL_DIV);
69 clk_set_rate(ls1x_cpufreq.mux_clk, new_freq * 1000);
70 clk_set_parent(policy->clk, ls1x_cpufreq.mux_clk);
71
72 return 0;
73}
74
75static int ls1x_cpufreq_init(struct cpufreq_policy *policy)
76{
77 struct cpufreq_frequency_table *freq_tbl;
78 unsigned int pll_freq, freq;
79 int steps, i, ret;
80
81 pll_freq = clk_get_rate(ls1x_cpufreq.pll_clk) / 1000;
82
83 steps = 1 << DIV_CPU_WIDTH;
84 freq_tbl = kzalloc(sizeof(*freq_tbl) * steps, GFP_KERNEL);
85 if (!freq_tbl) {
86 dev_err(ls1x_cpufreq.dev,
87 "failed to alloc cpufreq_frequency_table\n");
88 ret = -ENOMEM;
89 goto out;
90 }
91
92 for (i = 0; i < (steps - 1); i++) {
93 freq = pll_freq / (i + 1);
94 if ((freq < ls1x_cpufreq.min_freq) ||
95 (freq > ls1x_cpufreq.max_freq))
96 freq_tbl[i].frequency = CPUFREQ_ENTRY_INVALID;
97 else
98 freq_tbl[i].frequency = freq;
99 dev_dbg(ls1x_cpufreq.dev,
100 "cpufreq table: index %d: frequency %d\n", i,
101 freq_tbl[i].frequency);
102 }
103 freq_tbl[i].frequency = CPUFREQ_TABLE_END;
104
105 policy->clk = ls1x_cpufreq.clk;
106 ret = cpufreq_generic_init(policy, freq_tbl, 0);
107 if (ret)
108 kfree(freq_tbl);
109out:
110 return ret;
111}
112
113static int ls1x_cpufreq_exit(struct cpufreq_policy *policy)
114{
115 kfree(policy->freq_table);
116 return 0;
117}
118
119static struct cpufreq_driver ls1x_cpufreq_driver = {
120 .name = "cpufreq-ls1x",
121 .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK,
122 .verify = cpufreq_generic_frequency_table_verify,
123 .target_index = ls1x_cpufreq_target,
124 .get = cpufreq_generic_get,
125 .init = ls1x_cpufreq_init,
126 .exit = ls1x_cpufreq_exit,
127 .attr = cpufreq_generic_attr,
128};
129
130static int ls1x_cpufreq_remove(struct platform_device *pdev)
131{
132 cpufreq_unregister_notifier(&ls1x_cpufreq_notifier_block,
133 CPUFREQ_TRANSITION_NOTIFIER);
134 cpufreq_unregister_driver(&ls1x_cpufreq_driver);
135
136 return 0;
137}
138
139static int ls1x_cpufreq_probe(struct platform_device *pdev)
140{
141 struct plat_ls1x_cpufreq *pdata = pdev->dev.platform_data;
142 struct clk *clk;
143 int ret;
144
145 if (!pdata || !pdata->clk_name || !pdata->osc_clk_name)
146 return -EINVAL;
147
148 ls1x_cpufreq.dev = &pdev->dev;
149
150 clk = devm_clk_get(&pdev->dev, pdata->clk_name);
151 if (IS_ERR(clk)) {
152 dev_err(ls1x_cpufreq.dev, "unable to get %s clock\n",
153 pdata->clk_name);
154 ret = PTR_ERR(clk);
155 goto out;
156 }
157 ls1x_cpufreq.clk = clk;
158
159 clk = clk_get_parent(clk);
160 if (IS_ERR(clk)) {
161 dev_err(ls1x_cpufreq.dev, "unable to get parent of %s clock\n",
162 __clk_get_name(ls1x_cpufreq.clk));
163 ret = PTR_ERR(clk);
164 goto out;
165 }
166 ls1x_cpufreq.mux_clk = clk;
167
168 clk = clk_get_parent(clk);
169 if (IS_ERR(clk)) {
170 dev_err(ls1x_cpufreq.dev, "unable to get parent of %s clock\n",
171 __clk_get_name(ls1x_cpufreq.mux_clk));
172 ret = PTR_ERR(clk);
173 goto out;
174 }
175 ls1x_cpufreq.pll_clk = clk;
176
177 clk = devm_clk_get(&pdev->dev, pdata->osc_clk_name);
178 if (IS_ERR(clk)) {
179 dev_err(ls1x_cpufreq.dev, "unable to get %s clock\n",
180 pdata->osc_clk_name);
181 ret = PTR_ERR(clk);
182 goto out;
183 }
184 ls1x_cpufreq.osc_clk = clk;
185
186 ls1x_cpufreq.max_freq = pdata->max_freq;
187 ls1x_cpufreq.min_freq = pdata->min_freq;
188
189 ret = cpufreq_register_driver(&ls1x_cpufreq_driver);
190 if (ret) {
191 dev_err(ls1x_cpufreq.dev,
192 "failed to register cpufreq driver: %d\n", ret);
193 goto out;
194 }
195
196 ret = cpufreq_register_notifier(&ls1x_cpufreq_notifier_block,
197 CPUFREQ_TRANSITION_NOTIFIER);
198
199 if (!ret)
200 goto out;
201
202 dev_err(ls1x_cpufreq.dev, "failed to register cpufreq notifier: %d\n",
203 ret);
204
205 cpufreq_unregister_driver(&ls1x_cpufreq_driver);
206out:
207 return ret;
208}
209
210static struct platform_driver ls1x_cpufreq_platdrv = {
211 .driver = {
212 .name = "ls1x-cpufreq",
213 .owner = THIS_MODULE,
214 },
215 .probe = ls1x_cpufreq_probe,
216 .remove = ls1x_cpufreq_remove,
217};
218
219module_platform_driver(ls1x_cpufreq_platdrv);
220
221MODULE_AUTHOR("Kelvin Cheung <keguang.zhang@gmail.com>");
222MODULE_DESCRIPTION("Loongson 1 CPUFreq driver");
223MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/pcc-cpufreq.c b/drivers/cpufreq/pcc-cpufreq.c
index 4d2c8e861089..2a0d58959acf 100644
--- a/drivers/cpufreq/pcc-cpufreq.c
+++ b/drivers/cpufreq/pcc-cpufreq.c
@@ -603,6 +603,13 @@ static void __exit pcc_cpufreq_exit(void)
603 free_percpu(pcc_cpu_info); 603 free_percpu(pcc_cpu_info);
604} 604}
605 605
606static const struct acpi_device_id processor_device_ids[] = {
607 {ACPI_PROCESSOR_OBJECT_HID, },
608 {ACPI_PROCESSOR_DEVICE_HID, },
609 {},
610};
611MODULE_DEVICE_TABLE(acpi, processor_device_ids);
612
606MODULE_AUTHOR("Matthew Garrett, Naga Chumbalkar"); 613MODULE_AUTHOR("Matthew Garrett, Naga Chumbalkar");
607MODULE_VERSION(PCC_VERSION); 614MODULE_VERSION(PCC_VERSION);
608MODULE_DESCRIPTION("Processor Clocking Control interface driver"); 615MODULE_DESCRIPTION("Processor Clocking Control interface driver");