aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/cpufreq/Kconfig11
-rw-r--r--drivers/cpufreq/Makefile1
-rw-r--r--drivers/cpufreq/cpufreq-dt.c9
-rw-r--r--drivers/cpufreq/cpufreq.c6
-rw-r--r--drivers/cpufreq/intel_pstate.c101
-rw-r--r--drivers/cpufreq/ls1x-cpufreq.c223
-rw-r--r--drivers/cpufreq/pcc-cpufreq.c7
7 files changed, 351 insertions, 7 deletions
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
index 3489f8f5fada..4de4dfae4ccc 100644
--- a/drivers/cpufreq/Kconfig
+++ b/drivers/cpufreq/Kconfig
@@ -63,7 +63,6 @@ config CPU_FREQ_DEFAULT_GOV_PERFORMANCE
63 63
64config CPU_FREQ_DEFAULT_GOV_POWERSAVE 64config CPU_FREQ_DEFAULT_GOV_POWERSAVE
65 bool "powersave" 65 bool "powersave"
66 depends on EXPERT
67 select CPU_FREQ_GOV_POWERSAVE 66 select CPU_FREQ_GOV_POWERSAVE
68 help 67 help
69 Use the CPUFreq governor 'powersave' as default. This sets 68 Use the CPUFreq governor 'powersave' as default. This sets
@@ -250,6 +249,16 @@ config LOONGSON2_CPUFREQ
250 249
251 If in doubt, say N. 250 If in doubt, say N.
252 251
252config LOONGSON1_CPUFREQ
253 tristate "Loongson1 CPUFreq Driver"
254 help
255 This option adds a CPUFreq driver for loongson1 processors which
256 support software configurable cpu frequency.
257
258 For details, take a look at <file:Documentation/cpu-freq/>.
259
260 If in doubt, say N.
261
253endmenu 262endmenu
254 263
255menu "PowerPC CPU frequency scaling drivers" 264menu "PowerPC CPU frequency scaling drivers"
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index 40c53dc1937e..215e447abec6 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -98,6 +98,7 @@ obj-$(CONFIG_CRIS_MACH_ARTPEC3) += cris-artpec3-cpufreq.o
98obj-$(CONFIG_ETRAXFS) += cris-etraxfs-cpufreq.o 98obj-$(CONFIG_ETRAXFS) += cris-etraxfs-cpufreq.o
99obj-$(CONFIG_IA64_ACPI_CPUFREQ) += ia64-acpi-cpufreq.o 99obj-$(CONFIG_IA64_ACPI_CPUFREQ) += ia64-acpi-cpufreq.o
100obj-$(CONFIG_LOONGSON2_CPUFREQ) += loongson2_cpufreq.o 100obj-$(CONFIG_LOONGSON2_CPUFREQ) += loongson2_cpufreq.o
101obj-$(CONFIG_LOONGSON1_CPUFREQ) += ls1x-cpufreq.o
101obj-$(CONFIG_SH_CPU_FREQ) += sh-cpufreq.o 102obj-$(CONFIG_SH_CPU_FREQ) += sh-cpufreq.o
102obj-$(CONFIG_SPARC_US2E_CPUFREQ) += sparc-us2e-cpufreq.o 103obj-$(CONFIG_SPARC_US2E_CPUFREQ) += sparc-us2e-cpufreq.o
103obj-$(CONFIG_SPARC_US3_CPUFREQ) += sparc-us3-cpufreq.o 104obj-$(CONFIG_SPARC_US3_CPUFREQ) += sparc-us3-cpufreq.o
diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c
index f657c571b18e..8cba13df5f28 100644
--- a/drivers/cpufreq/cpufreq-dt.c
+++ b/drivers/cpufreq/cpufreq-dt.c
@@ -58,6 +58,8 @@ static int set_target(struct cpufreq_policy *policy, unsigned int index)
58 old_freq = clk_get_rate(cpu_clk) / 1000; 58 old_freq = clk_get_rate(cpu_clk) / 1000;
59 59
60 if (!IS_ERR(cpu_reg)) { 60 if (!IS_ERR(cpu_reg)) {
61 unsigned long opp_freq;
62
61 rcu_read_lock(); 63 rcu_read_lock();
62 opp = dev_pm_opp_find_freq_ceil(cpu_dev, &freq_Hz); 64 opp = dev_pm_opp_find_freq_ceil(cpu_dev, &freq_Hz);
63 if (IS_ERR(opp)) { 65 if (IS_ERR(opp)) {
@@ -67,13 +69,16 @@ static int set_target(struct cpufreq_policy *policy, unsigned int index)
67 return PTR_ERR(opp); 69 return PTR_ERR(opp);
68 } 70 }
69 volt = dev_pm_opp_get_voltage(opp); 71 volt = dev_pm_opp_get_voltage(opp);
72 opp_freq = dev_pm_opp_get_freq(opp);
70 rcu_read_unlock(); 73 rcu_read_unlock();
71 tol = volt * priv->voltage_tolerance / 100; 74 tol = volt * priv->voltage_tolerance / 100;
72 volt_old = regulator_get_voltage(cpu_reg); 75 volt_old = regulator_get_voltage(cpu_reg);
76 dev_dbg(cpu_dev, "Found OPP: %ld kHz, %ld uV\n",
77 opp_freq / 1000, volt);
73 } 78 }
74 79
75 dev_dbg(cpu_dev, "%u MHz, %ld mV --> %u MHz, %ld mV\n", 80 dev_dbg(cpu_dev, "%u MHz, %ld mV --> %u MHz, %ld mV\n",
76 old_freq / 1000, volt_old ? volt_old / 1000 : -1, 81 old_freq / 1000, (volt_old > 0) ? volt_old / 1000 : -1,
77 new_freq / 1000, volt ? volt / 1000 : -1); 82 new_freq / 1000, volt ? volt / 1000 : -1);
78 83
79 /* scaling up? scale voltage before frequency */ 84 /* scaling up? scale voltage before frequency */
@@ -89,7 +94,7 @@ static int set_target(struct cpufreq_policy *policy, unsigned int index)
89 ret = clk_set_rate(cpu_clk, freq_exact); 94 ret = clk_set_rate(cpu_clk, freq_exact);
90 if (ret) { 95 if (ret) {
91 dev_err(cpu_dev, "failed to set clock rate: %d\n", ret); 96 dev_err(cpu_dev, "failed to set clock rate: %d\n", ret);
92 if (!IS_ERR(cpu_reg)) 97 if (!IS_ERR(cpu_reg) && volt_old > 0)
93 regulator_set_voltage_tol(cpu_reg, volt_old, tol); 98 regulator_set_voltage_tol(cpu_reg, volt_old, tol);
94 return ret; 99 return ret;
95 } 100 }
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 4473eba1d6b0..c9701e9e53e4 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -535,7 +535,7 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
535static ssize_t store_##file_name \ 535static ssize_t store_##file_name \
536(struct cpufreq_policy *policy, const char *buf, size_t count) \ 536(struct cpufreq_policy *policy, const char *buf, size_t count) \
537{ \ 537{ \
538 int ret; \ 538 int ret, temp; \
539 struct cpufreq_policy new_policy; \ 539 struct cpufreq_policy new_policy; \
540 \ 540 \
541 ret = cpufreq_get_policy(&new_policy, policy->cpu); \ 541 ret = cpufreq_get_policy(&new_policy, policy->cpu); \
@@ -546,8 +546,10 @@ static ssize_t store_##file_name \
546 if (ret != 1) \ 546 if (ret != 1) \
547 return -EINVAL; \ 547 return -EINVAL; \
548 \ 548 \
549 temp = new_policy.object; \
549 ret = cpufreq_set_policy(policy, &new_policy); \ 550 ret = cpufreq_set_policy(policy, &new_policy); \
550 policy->user_policy.object = policy->object; \ 551 if (!ret) \
552 policy->user_policy.object = temp; \
551 \ 553 \
552 return ret ? ret : count; \ 554 return ret ? ret : count; \
553} 555}
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 27bb6d3877ed..ab2e100a1807 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -137,6 +137,7 @@ struct cpu_defaults {
137 137
138static struct pstate_adjust_policy pid_params; 138static struct pstate_adjust_policy pid_params;
139static struct pstate_funcs pstate_funcs; 139static struct pstate_funcs pstate_funcs;
140static int hwp_active;
140 141
141struct perf_limits { 142struct perf_limits {
142 int no_turbo; 143 int no_turbo;
@@ -244,6 +245,34 @@ static inline void update_turbo_state(void)
244 cpu->pstate.max_pstate == cpu->pstate.turbo_pstate); 245 cpu->pstate.max_pstate == cpu->pstate.turbo_pstate);
245} 246}
246 247
248#define PCT_TO_HWP(x) (x * 255 / 100)
249static void intel_pstate_hwp_set(void)
250{
251 int min, max, cpu;
252 u64 value, freq;
253
254 get_online_cpus();
255
256 for_each_online_cpu(cpu) {
257 rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, &value);
258 min = PCT_TO_HWP(limits.min_perf_pct);
259 value &= ~HWP_MIN_PERF(~0L);
260 value |= HWP_MIN_PERF(min);
261
262 max = PCT_TO_HWP(limits.max_perf_pct);
263 if (limits.no_turbo) {
264 rdmsrl( MSR_HWP_CAPABILITIES, freq);
265 max = HWP_GUARANTEED_PERF(freq);
266 }
267
268 value &= ~HWP_MAX_PERF(~0L);
269 value |= HWP_MAX_PERF(max);
270 wrmsrl_on_cpu(cpu, MSR_HWP_REQUEST, value);
271 }
272
273 put_online_cpus();
274}
275
247/************************** debugfs begin ************************/ 276/************************** debugfs begin ************************/
248static int pid_param_set(void *data, u64 val) 277static int pid_param_set(void *data, u64 val)
249{ 278{
@@ -279,6 +308,8 @@ static void __init intel_pstate_debug_expose_params(void)
279 struct dentry *debugfs_parent; 308 struct dentry *debugfs_parent;
280 int i = 0; 309 int i = 0;
281 310
311 if (hwp_active)
312 return;
282 debugfs_parent = debugfs_create_dir("pstate_snb", NULL); 313 debugfs_parent = debugfs_create_dir("pstate_snb", NULL);
283 if (IS_ERR_OR_NULL(debugfs_parent)) 314 if (IS_ERR_OR_NULL(debugfs_parent))
284 return; 315 return;
@@ -329,8 +360,12 @@ static ssize_t store_no_turbo(struct kobject *a, struct attribute *b,
329 pr_warn("Turbo disabled by BIOS or unavailable on processor\n"); 360 pr_warn("Turbo disabled by BIOS or unavailable on processor\n");
330 return -EPERM; 361 return -EPERM;
331 } 362 }
363
332 limits.no_turbo = clamp_t(int, input, 0, 1); 364 limits.no_turbo = clamp_t(int, input, 0, 1);
333 365
366 if (hwp_active)
367 intel_pstate_hwp_set();
368
334 return count; 369 return count;
335} 370}
336 371
@@ -348,6 +383,8 @@ static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b,
348 limits.max_perf_pct = min(limits.max_policy_pct, limits.max_sysfs_pct); 383 limits.max_perf_pct = min(limits.max_policy_pct, limits.max_sysfs_pct);
349 limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100)); 384 limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100));
350 385
386 if (hwp_active)
387 intel_pstate_hwp_set();
351 return count; 388 return count;
352} 389}
353 390
@@ -363,6 +400,8 @@ static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b,
363 limits.min_perf_pct = clamp_t(int, input, 0 , 100); 400 limits.min_perf_pct = clamp_t(int, input, 0 , 100);
364 limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100)); 401 limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100));
365 402
403 if (hwp_active)
404 intel_pstate_hwp_set();
366 return count; 405 return count;
367} 406}
368 407
@@ -395,8 +434,16 @@ static void __init intel_pstate_sysfs_expose_params(void)
395 rc = sysfs_create_group(intel_pstate_kobject, &intel_pstate_attr_group); 434 rc = sysfs_create_group(intel_pstate_kobject, &intel_pstate_attr_group);
396 BUG_ON(rc); 435 BUG_ON(rc);
397} 436}
398
399/************************** sysfs end ************************/ 437/************************** sysfs end ************************/
438
439static void intel_pstate_hwp_enable(void)
440{
441 hwp_active++;
442 pr_info("intel_pstate HWP enabled\n");
443
444 wrmsrl( MSR_PM_ENABLE, 0x1);
445}
446
400static int byt_get_min_pstate(void) 447static int byt_get_min_pstate(void)
401{ 448{
402 u64 value; 449 u64 value;
@@ -648,6 +695,14 @@ static inline void intel_pstate_sample(struct cpudata *cpu)
648 cpu->prev_mperf = mperf; 695 cpu->prev_mperf = mperf;
649} 696}
650 697
698static inline void intel_hwp_set_sample_time(struct cpudata *cpu)
699{
700 int delay;
701
702 delay = msecs_to_jiffies(50);
703 mod_timer_pinned(&cpu->timer, jiffies + delay);
704}
705
651static inline void intel_pstate_set_sample_time(struct cpudata *cpu) 706static inline void intel_pstate_set_sample_time(struct cpudata *cpu)
652{ 707{
653 int delay; 708 int delay;
@@ -694,6 +749,14 @@ static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu)
694 intel_pstate_set_pstate(cpu, cpu->pstate.current_pstate - ctl); 749 intel_pstate_set_pstate(cpu, cpu->pstate.current_pstate - ctl);
695} 750}
696 751
752static void intel_hwp_timer_func(unsigned long __data)
753{
754 struct cpudata *cpu = (struct cpudata *) __data;
755
756 intel_pstate_sample(cpu);
757 intel_hwp_set_sample_time(cpu);
758}
759
697static void intel_pstate_timer_func(unsigned long __data) 760static void intel_pstate_timer_func(unsigned long __data)
698{ 761{
699 struct cpudata *cpu = (struct cpudata *) __data; 762 struct cpudata *cpu = (struct cpudata *) __data;
@@ -730,6 +793,7 @@ static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
730 ICPU(0x3f, core_params), 793 ICPU(0x3f, core_params),
731 ICPU(0x45, core_params), 794 ICPU(0x45, core_params),
732 ICPU(0x46, core_params), 795 ICPU(0x46, core_params),
796 ICPU(0x47, core_params),
733 ICPU(0x4c, byt_params), 797 ICPU(0x4c, byt_params),
734 ICPU(0x4f, core_params), 798 ICPU(0x4f, core_params),
735 ICPU(0x56, core_params), 799 ICPU(0x56, core_params),
@@ -737,6 +801,11 @@ static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
737}; 801};
738MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids); 802MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids);
739 803
804static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] = {
805 ICPU(0x56, core_params),
806 {}
807};
808
740static int intel_pstate_init_cpu(unsigned int cpunum) 809static int intel_pstate_init_cpu(unsigned int cpunum)
741{ 810{
742 struct cpudata *cpu; 811 struct cpudata *cpu;
@@ -753,9 +822,14 @@ static int intel_pstate_init_cpu(unsigned int cpunum)
753 intel_pstate_get_cpu_pstates(cpu); 822 intel_pstate_get_cpu_pstates(cpu);
754 823
755 init_timer_deferrable(&cpu->timer); 824 init_timer_deferrable(&cpu->timer);
756 cpu->timer.function = intel_pstate_timer_func;
757 cpu->timer.data = (unsigned long)cpu; 825 cpu->timer.data = (unsigned long)cpu;
758 cpu->timer.expires = jiffies + HZ/100; 826 cpu->timer.expires = jiffies + HZ/100;
827
828 if (!hwp_active)
829 cpu->timer.function = intel_pstate_timer_func;
830 else
831 cpu->timer.function = intel_hwp_timer_func;
832
759 intel_pstate_busy_pid_reset(cpu); 833 intel_pstate_busy_pid_reset(cpu);
760 intel_pstate_sample(cpu); 834 intel_pstate_sample(cpu);
761 835
@@ -792,6 +866,7 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
792 limits.no_turbo = 0; 866 limits.no_turbo = 0;
793 return 0; 867 return 0;
794 } 868 }
869
795 limits.min_perf_pct = (policy->min * 100) / policy->cpuinfo.max_freq; 870 limits.min_perf_pct = (policy->min * 100) / policy->cpuinfo.max_freq;
796 limits.min_perf_pct = clamp_t(int, limits.min_perf_pct, 0 , 100); 871 limits.min_perf_pct = clamp_t(int, limits.min_perf_pct, 0 , 100);
797 limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100)); 872 limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100));
@@ -801,6 +876,9 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
801 limits.max_perf_pct = min(limits.max_policy_pct, limits.max_sysfs_pct); 876 limits.max_perf_pct = min(limits.max_policy_pct, limits.max_sysfs_pct);
802 limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100)); 877 limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100));
803 878
879 if (hwp_active)
880 intel_pstate_hwp_set();
881
804 return 0; 882 return 0;
805} 883}
806 884
@@ -823,6 +901,9 @@ static void intel_pstate_stop_cpu(struct cpufreq_policy *policy)
823 pr_info("intel_pstate CPU %d exiting\n", cpu_num); 901 pr_info("intel_pstate CPU %d exiting\n", cpu_num);
824 902
825 del_timer_sync(&all_cpu_data[cpu_num]->timer); 903 del_timer_sync(&all_cpu_data[cpu_num]->timer);
904 if (hwp_active)
905 return;
906
826 intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate); 907 intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate);
827} 908}
828 909
@@ -866,6 +947,7 @@ static struct cpufreq_driver intel_pstate_driver = {
866}; 947};
867 948
868static int __initdata no_load; 949static int __initdata no_load;
950static int __initdata no_hwp;
869 951
870static int intel_pstate_msrs_not_valid(void) 952static int intel_pstate_msrs_not_valid(void)
871{ 953{
@@ -959,6 +1041,15 @@ static bool intel_pstate_platform_pwr_mgmt_exists(void)
959{ 1041{
960 struct acpi_table_header hdr; 1042 struct acpi_table_header hdr;
961 struct hw_vendor_info *v_info; 1043 struct hw_vendor_info *v_info;
1044 const struct x86_cpu_id *id;
1045 u64 misc_pwr;
1046
1047 id = x86_match_cpu(intel_pstate_cpu_oob_ids);
1048 if (id) {
1049 rdmsrl(MSR_MISC_PWR_MGMT, misc_pwr);
1050 if ( misc_pwr & (1 << 8))
1051 return true;
1052 }
962 1053
963 if (acpi_disabled || 1054 if (acpi_disabled ||
964 ACPI_FAILURE(acpi_get_table_header(ACPI_SIG_FADT, 0, &hdr))) 1055 ACPI_FAILURE(acpi_get_table_header(ACPI_SIG_FADT, 0, &hdr)))
@@ -982,6 +1073,7 @@ static int __init intel_pstate_init(void)
982 int cpu, rc = 0; 1073 int cpu, rc = 0;
983 const struct x86_cpu_id *id; 1074 const struct x86_cpu_id *id;
984 struct cpu_defaults *cpu_info; 1075 struct cpu_defaults *cpu_info;
1076 struct cpuinfo_x86 *c = &boot_cpu_data;
985 1077
986 if (no_load) 1078 if (no_load)
987 return -ENODEV; 1079 return -ENODEV;
@@ -1011,6 +1103,9 @@ static int __init intel_pstate_init(void)
1011 if (!all_cpu_data) 1103 if (!all_cpu_data)
1012 return -ENOMEM; 1104 return -ENOMEM;
1013 1105
1106 if (cpu_has(c,X86_FEATURE_HWP) && !no_hwp)
1107 intel_pstate_hwp_enable();
1108
1014 rc = cpufreq_register_driver(&intel_pstate_driver); 1109 rc = cpufreq_register_driver(&intel_pstate_driver);
1015 if (rc) 1110 if (rc)
1016 goto out; 1111 goto out;
@@ -1041,6 +1136,8 @@ static int __init intel_pstate_setup(char *str)
1041 1136
1042 if (!strcmp(str, "disable")) 1137 if (!strcmp(str, "disable"))
1043 no_load = 1; 1138 no_load = 1;
1139 if (!strcmp(str, "no_hwp"))
1140 no_hwp = 1;
1044 return 0; 1141 return 0;
1045} 1142}
1046early_param("intel_pstate", intel_pstate_setup); 1143early_param("intel_pstate", intel_pstate_setup);
diff --git a/drivers/cpufreq/ls1x-cpufreq.c b/drivers/cpufreq/ls1x-cpufreq.c
new file mode 100644
index 000000000000..25fbd6a1374f
--- /dev/null
+++ b/drivers/cpufreq/ls1x-cpufreq.c
@@ -0,0 +1,223 @@
1/*
2 * CPU Frequency Scaling for Loongson 1 SoC
3 *
4 * Copyright (C) 2014 Zhang, Keguang <keguang.zhang@gmail.com>
5 *
6 * This file is licensed under the terms of the GNU General Public
7 * License version 2. This program is licensed "as is" without any
8 * warranty of any kind, whether express or implied.
9 */
10
11#include <linux/clk.h>
12#include <linux/clk-provider.h>
13#include <linux/cpu.h>
14#include <linux/cpufreq.h>
15#include <linux/delay.h>
16#include <linux/module.h>
17#include <linux/platform_device.h>
18#include <linux/slab.h>
19
20#include <asm/mach-loongson1/cpufreq.h>
21#include <asm/mach-loongson1/loongson1.h>
22
23static struct {
24 struct device *dev;
25 struct clk *clk; /* CPU clk */
26 struct clk *mux_clk; /* MUX of CPU clk */
27 struct clk *pll_clk; /* PLL clk */
28 struct clk *osc_clk; /* OSC clk */
29 unsigned int max_freq;
30 unsigned int min_freq;
31} ls1x_cpufreq;
32
33static int ls1x_cpufreq_notifier(struct notifier_block *nb,
34 unsigned long val, void *data)
35{
36 if (val == CPUFREQ_POSTCHANGE)
37 current_cpu_data.udelay_val = loops_per_jiffy;
38
39 return NOTIFY_OK;
40}
41
42static struct notifier_block ls1x_cpufreq_notifier_block = {
43 .notifier_call = ls1x_cpufreq_notifier
44};
45
46static int ls1x_cpufreq_target(struct cpufreq_policy *policy,
47 unsigned int index)
48{
49 unsigned int old_freq, new_freq;
50
51 old_freq = policy->cur;
52 new_freq = policy->freq_table[index].frequency;
53
54 /*
55 * The procedure of reconfiguring CPU clk is as below.
56 *
57 * - Reparent CPU clk to OSC clk
58 * - Reset CPU clock (very important)
59 * - Reconfigure CPU DIV
60 * - Reparent CPU clk back to CPU DIV clk
61 */
62
63 dev_dbg(ls1x_cpufreq.dev, "%u KHz --> %u KHz\n", old_freq, new_freq);
64 clk_set_parent(policy->clk, ls1x_cpufreq.osc_clk);
65 __raw_writel(__raw_readl(LS1X_CLK_PLL_DIV) | RST_CPU_EN | RST_CPU,
66 LS1X_CLK_PLL_DIV);
67 __raw_writel(__raw_readl(LS1X_CLK_PLL_DIV) & ~(RST_CPU_EN | RST_CPU),
68 LS1X_CLK_PLL_DIV);
69 clk_set_rate(ls1x_cpufreq.mux_clk, new_freq * 1000);
70 clk_set_parent(policy->clk, ls1x_cpufreq.mux_clk);
71
72 return 0;
73}
74
75static int ls1x_cpufreq_init(struct cpufreq_policy *policy)
76{
77 struct cpufreq_frequency_table *freq_tbl;
78 unsigned int pll_freq, freq;
79 int steps, i, ret;
80
81 pll_freq = clk_get_rate(ls1x_cpufreq.pll_clk) / 1000;
82
83 steps = 1 << DIV_CPU_WIDTH;
84 freq_tbl = kzalloc(sizeof(*freq_tbl) * steps, GFP_KERNEL);
85 if (!freq_tbl) {
86 dev_err(ls1x_cpufreq.dev,
87 "failed to alloc cpufreq_frequency_table\n");
88 ret = -ENOMEM;
89 goto out;
90 }
91
92 for (i = 0; i < (steps - 1); i++) {
93 freq = pll_freq / (i + 1);
94 if ((freq < ls1x_cpufreq.min_freq) ||
95 (freq > ls1x_cpufreq.max_freq))
96 freq_tbl[i].frequency = CPUFREQ_ENTRY_INVALID;
97 else
98 freq_tbl[i].frequency = freq;
99 dev_dbg(ls1x_cpufreq.dev,
100 "cpufreq table: index %d: frequency %d\n", i,
101 freq_tbl[i].frequency);
102 }
103 freq_tbl[i].frequency = CPUFREQ_TABLE_END;
104
105 policy->clk = ls1x_cpufreq.clk;
106 ret = cpufreq_generic_init(policy, freq_tbl, 0);
107 if (ret)
108 kfree(freq_tbl);
109out:
110 return ret;
111}
112
113static int ls1x_cpufreq_exit(struct cpufreq_policy *policy)
114{
115 kfree(policy->freq_table);
116 return 0;
117}
118
119static struct cpufreq_driver ls1x_cpufreq_driver = {
120 .name = "cpufreq-ls1x",
121 .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK,
122 .verify = cpufreq_generic_frequency_table_verify,
123 .target_index = ls1x_cpufreq_target,
124 .get = cpufreq_generic_get,
125 .init = ls1x_cpufreq_init,
126 .exit = ls1x_cpufreq_exit,
127 .attr = cpufreq_generic_attr,
128};
129
130static int ls1x_cpufreq_remove(struct platform_device *pdev)
131{
132 cpufreq_unregister_notifier(&ls1x_cpufreq_notifier_block,
133 CPUFREQ_TRANSITION_NOTIFIER);
134 cpufreq_unregister_driver(&ls1x_cpufreq_driver);
135
136 return 0;
137}
138
139static int ls1x_cpufreq_probe(struct platform_device *pdev)
140{
141 struct plat_ls1x_cpufreq *pdata = pdev->dev.platform_data;
142 struct clk *clk;
143 int ret;
144
145 if (!pdata || !pdata->clk_name || !pdata->osc_clk_name)
146 return -EINVAL;
147
148 ls1x_cpufreq.dev = &pdev->dev;
149
150 clk = devm_clk_get(&pdev->dev, pdata->clk_name);
151 if (IS_ERR(clk)) {
152 dev_err(ls1x_cpufreq.dev, "unable to get %s clock\n",
153 pdata->clk_name);
154 ret = PTR_ERR(clk);
155 goto out;
156 }
157 ls1x_cpufreq.clk = clk;
158
159 clk = clk_get_parent(clk);
160 if (IS_ERR(clk)) {
161 dev_err(ls1x_cpufreq.dev, "unable to get parent of %s clock\n",
162 __clk_get_name(ls1x_cpufreq.clk));
163 ret = PTR_ERR(clk);
164 goto out;
165 }
166 ls1x_cpufreq.mux_clk = clk;
167
168 clk = clk_get_parent(clk);
169 if (IS_ERR(clk)) {
170 dev_err(ls1x_cpufreq.dev, "unable to get parent of %s clock\n",
171 __clk_get_name(ls1x_cpufreq.mux_clk));
172 ret = PTR_ERR(clk);
173 goto out;
174 }
175 ls1x_cpufreq.pll_clk = clk;
176
177 clk = devm_clk_get(&pdev->dev, pdata->osc_clk_name);
178 if (IS_ERR(clk)) {
179 dev_err(ls1x_cpufreq.dev, "unable to get %s clock\n",
180 pdata->osc_clk_name);
181 ret = PTR_ERR(clk);
182 goto out;
183 }
184 ls1x_cpufreq.osc_clk = clk;
185
186 ls1x_cpufreq.max_freq = pdata->max_freq;
187 ls1x_cpufreq.min_freq = pdata->min_freq;
188
189 ret = cpufreq_register_driver(&ls1x_cpufreq_driver);
190 if (ret) {
191 dev_err(ls1x_cpufreq.dev,
192 "failed to register cpufreq driver: %d\n", ret);
193 goto out;
194 }
195
196 ret = cpufreq_register_notifier(&ls1x_cpufreq_notifier_block,
197 CPUFREQ_TRANSITION_NOTIFIER);
198
199 if (!ret)
200 goto out;
201
202 dev_err(ls1x_cpufreq.dev, "failed to register cpufreq notifier: %d\n",
203 ret);
204
205 cpufreq_unregister_driver(&ls1x_cpufreq_driver);
206out:
207 return ret;
208}
209
210static struct platform_driver ls1x_cpufreq_platdrv = {
211 .driver = {
212 .name = "ls1x-cpufreq",
213 .owner = THIS_MODULE,
214 },
215 .probe = ls1x_cpufreq_probe,
216 .remove = ls1x_cpufreq_remove,
217};
218
219module_platform_driver(ls1x_cpufreq_platdrv);
220
221MODULE_AUTHOR("Kelvin Cheung <keguang.zhang@gmail.com>");
222MODULE_DESCRIPTION("Loongson 1 CPUFreq driver");
223MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/pcc-cpufreq.c b/drivers/cpufreq/pcc-cpufreq.c
index 4d2c8e861089..2a0d58959acf 100644
--- a/drivers/cpufreq/pcc-cpufreq.c
+++ b/drivers/cpufreq/pcc-cpufreq.c
@@ -603,6 +603,13 @@ static void __exit pcc_cpufreq_exit(void)
603 free_percpu(pcc_cpu_info); 603 free_percpu(pcc_cpu_info);
604} 604}
605 605
606static const struct acpi_device_id processor_device_ids[] = {
607 {ACPI_PROCESSOR_OBJECT_HID, },
608 {ACPI_PROCESSOR_DEVICE_HID, },
609 {},
610};
611MODULE_DEVICE_TABLE(acpi, processor_device_ids);
612
606MODULE_AUTHOR("Matthew Garrett, Naga Chumbalkar"); 613MODULE_AUTHOR("Matthew Garrett, Naga Chumbalkar");
607MODULE_VERSION(PCC_VERSION); 614MODULE_VERSION(PCC_VERSION);
608MODULE_DESCRIPTION("Processor Clocking Control interface driver"); 615MODULE_DESCRIPTION("Processor Clocking Control interface driver");