aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/kernel-parameters.txt3
-rw-r--r--arch/x86/include/asm/msr-index.h7
-rw-r--r--drivers/cpufreq/Kconfig.x861
-rw-r--r--drivers/cpufreq/cpufreq.c112
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c31
-rw-r--r--drivers/cpufreq/cpufreq_governor.c32
-rw-r--r--drivers/cpufreq/cpufreq_governor.h1
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c10
-rw-r--r--drivers/cpufreq/imx6q-cpufreq.c50
-rw-r--r--drivers/cpufreq/integrator-cpufreq.c2
-rw-r--r--drivers/cpufreq/intel_pstate.c390
-rw-r--r--drivers/cpufreq/powernv-cpufreq.c10
-rw-r--r--drivers/cpufreq/tegra20-cpufreq.c2
-rw-r--r--drivers/cpuidle/cpuidle-mvebu-v7.c46
-rw-r--r--include/linux/cpufreq.h5
15 files changed, 469 insertions, 233 deletions
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index bbf4ee6c96da..a491aaecc8bb 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -1547,6 +1547,9 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
1547 hwp_only 1547 hwp_only
1548 Only load intel_pstate on systems which support 1548 Only load intel_pstate on systems which support
1549 hardware P state control (HWP) if available. 1549 hardware P state control (HWP) if available.
1550 no_acpi
1551 Don't use ACPI processor performance control objects
1552 _PSS and _PPC specified limits.
1550 1553
1551 intremap= [X86-64, Intel-IOMMU] 1554 intremap= [X86-64, Intel-IOMMU]
1552 on enable Interrupt Remapping (default) 1555 on enable Interrupt Remapping (default)
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index b8c14bb7fc8f..9f3905697f12 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -206,6 +206,13 @@
206#define MSR_GFX_PERF_LIMIT_REASONS 0x000006B0 206#define MSR_GFX_PERF_LIMIT_REASONS 0x000006B0
207#define MSR_RING_PERF_LIMIT_REASONS 0x000006B1 207#define MSR_RING_PERF_LIMIT_REASONS 0x000006B1
208 208
209/* Config TDP MSRs */
210#define MSR_CONFIG_TDP_NOMINAL 0x00000648
211#define MSR_CONFIG_TDP_LEVEL1 0x00000649
212#define MSR_CONFIG_TDP_LEVEL2 0x0000064A
213#define MSR_CONFIG_TDP_CONTROL 0x0000064B
214#define MSR_TURBO_ACTIVATION_RATIO 0x0000064C
215
209/* Hardware P state interface */ 216/* Hardware P state interface */
210#define MSR_PPERF 0x0000064e 217#define MSR_PPERF 0x0000064e
211#define MSR_PERF_LIMIT_REASONS 0x0000064f 218#define MSR_PERF_LIMIT_REASONS 0x0000064f
diff --git a/drivers/cpufreq/Kconfig.x86 b/drivers/cpufreq/Kconfig.x86
index c59bdcb83217..adbd1de1cea5 100644
--- a/drivers/cpufreq/Kconfig.x86
+++ b/drivers/cpufreq/Kconfig.x86
@@ -5,6 +5,7 @@
5config X86_INTEL_PSTATE 5config X86_INTEL_PSTATE
6 bool "Intel P state control" 6 bool "Intel P state control"
7 depends on X86 7 depends on X86
8 select ACPI_PROCESSOR if ACPI
8 help 9 help
9 This driver provides a P state for Intel core processors. 10 This driver provides a P state for Intel core processors.
10 The driver implements an internal governor and will become 11 The driver implements an internal governor and will become
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 25c4c15103a0..7c48e7316d91 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -843,18 +843,11 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr,
843 843
844 down_write(&policy->rwsem); 844 down_write(&policy->rwsem);
845 845
846 /* Updating inactive policies is invalid, so avoid doing that. */
847 if (unlikely(policy_is_inactive(policy))) {
848 ret = -EBUSY;
849 goto unlock_policy_rwsem;
850 }
851
852 if (fattr->store) 846 if (fattr->store)
853 ret = fattr->store(policy, buf, count); 847 ret = fattr->store(policy, buf, count);
854 else 848 else
855 ret = -EIO; 849 ret = -EIO;
856 850
857unlock_policy_rwsem:
858 up_write(&policy->rwsem); 851 up_write(&policy->rwsem);
859unlock: 852unlock:
860 put_online_cpus(); 853 put_online_cpus();
@@ -880,49 +873,6 @@ static struct kobj_type ktype_cpufreq = {
880 .release = cpufreq_sysfs_release, 873 .release = cpufreq_sysfs_release,
881}; 874};
882 875
883struct kobject *cpufreq_global_kobject;
884EXPORT_SYMBOL(cpufreq_global_kobject);
885
886static int cpufreq_global_kobject_usage;
887
888int cpufreq_get_global_kobject(void)
889{
890 if (!cpufreq_global_kobject_usage++)
891 return kobject_add(cpufreq_global_kobject,
892 &cpu_subsys.dev_root->kobj, "%s", "cpufreq");
893
894 return 0;
895}
896EXPORT_SYMBOL(cpufreq_get_global_kobject);
897
898void cpufreq_put_global_kobject(void)
899{
900 if (!--cpufreq_global_kobject_usage)
901 kobject_del(cpufreq_global_kobject);
902}
903EXPORT_SYMBOL(cpufreq_put_global_kobject);
904
905int cpufreq_sysfs_create_file(const struct attribute *attr)
906{
907 int ret = cpufreq_get_global_kobject();
908
909 if (!ret) {
910 ret = sysfs_create_file(cpufreq_global_kobject, attr);
911 if (ret)
912 cpufreq_put_global_kobject();
913 }
914
915 return ret;
916}
917EXPORT_SYMBOL(cpufreq_sysfs_create_file);
918
919void cpufreq_sysfs_remove_file(const struct attribute *attr)
920{
921 sysfs_remove_file(cpufreq_global_kobject, attr);
922 cpufreq_put_global_kobject();
923}
924EXPORT_SYMBOL(cpufreq_sysfs_remove_file);
925
926static int add_cpu_dev_symlink(struct cpufreq_policy *policy, int cpu) 876static int add_cpu_dev_symlink(struct cpufreq_policy *policy, int cpu)
927{ 877{
928 struct device *cpu_dev; 878 struct device *cpu_dev;
@@ -960,9 +910,6 @@ static int cpufreq_add_dev_symlink(struct cpufreq_policy *policy)
960 910
961 /* Some related CPUs might not be present (physically hotplugged) */ 911 /* Some related CPUs might not be present (physically hotplugged) */
962 for_each_cpu(j, policy->real_cpus) { 912 for_each_cpu(j, policy->real_cpus) {
963 if (j == policy->kobj_cpu)
964 continue;
965
966 ret = add_cpu_dev_symlink(policy, j); 913 ret = add_cpu_dev_symlink(policy, j);
967 if (ret) 914 if (ret)
968 break; 915 break;
@@ -976,12 +923,8 @@ static void cpufreq_remove_dev_symlink(struct cpufreq_policy *policy)
976 unsigned int j; 923 unsigned int j;
977 924
978 /* Some related CPUs might not be present (physically hotplugged) */ 925 /* Some related CPUs might not be present (physically hotplugged) */
979 for_each_cpu(j, policy->real_cpus) { 926 for_each_cpu(j, policy->real_cpus)
980 if (j == policy->kobj_cpu)
981 continue;
982
983 remove_cpu_dev_symlink(policy, j); 927 remove_cpu_dev_symlink(policy, j);
984 }
985} 928}
986 929
987static int cpufreq_add_dev_interface(struct cpufreq_policy *policy) 930static int cpufreq_add_dev_interface(struct cpufreq_policy *policy)
@@ -1079,7 +1022,6 @@ static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu)
1079{ 1022{
1080 struct device *dev = get_cpu_device(cpu); 1023 struct device *dev = get_cpu_device(cpu);
1081 struct cpufreq_policy *policy; 1024 struct cpufreq_policy *policy;
1082 int ret;
1083 1025
1084 if (WARN_ON(!dev)) 1026 if (WARN_ON(!dev))
1085 return NULL; 1027 return NULL;
@@ -1097,13 +1039,7 @@ static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu)
1097 if (!zalloc_cpumask_var(&policy->real_cpus, GFP_KERNEL)) 1039 if (!zalloc_cpumask_var(&policy->real_cpus, GFP_KERNEL))
1098 goto err_free_rcpumask; 1040 goto err_free_rcpumask;
1099 1041
1100 ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq, &dev->kobj, 1042 kobject_init(&policy->kobj, &ktype_cpufreq);
1101 "cpufreq");
1102 if (ret) {
1103 pr_err("%s: failed to init policy->kobj: %d\n", __func__, ret);
1104 goto err_free_real_cpus;
1105 }
1106
1107 INIT_LIST_HEAD(&policy->policy_list); 1043 INIT_LIST_HEAD(&policy->policy_list);
1108 init_rwsem(&policy->rwsem); 1044 init_rwsem(&policy->rwsem);
1109 spin_lock_init(&policy->transition_lock); 1045 spin_lock_init(&policy->transition_lock);
@@ -1112,14 +1048,8 @@ static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu)
1112 INIT_WORK(&policy->update, handle_update); 1048 INIT_WORK(&policy->update, handle_update);
1113 1049
1114 policy->cpu = cpu; 1050 policy->cpu = cpu;
1115
1116 /* Set this once on allocation */
1117 policy->kobj_cpu = cpu;
1118
1119 return policy; 1051 return policy;
1120 1052
1121err_free_real_cpus:
1122 free_cpumask_var(policy->real_cpus);
1123err_free_rcpumask: 1053err_free_rcpumask:
1124 free_cpumask_var(policy->related_cpus); 1054 free_cpumask_var(policy->related_cpus);
1125err_free_cpumask: 1055err_free_cpumask:
@@ -1221,9 +1151,19 @@ static int cpufreq_online(unsigned int cpu)
1221 1151
1222 if (new_policy) { 1152 if (new_policy) {
1223 /* related_cpus should at least include policy->cpus. */ 1153 /* related_cpus should at least include policy->cpus. */
1224 cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus); 1154 cpumask_copy(policy->related_cpus, policy->cpus);
1225 /* Remember CPUs present at the policy creation time. */ 1155 /* Remember CPUs present at the policy creation time. */
1226 cpumask_and(policy->real_cpus, policy->cpus, cpu_present_mask); 1156 cpumask_and(policy->real_cpus, policy->cpus, cpu_present_mask);
1157
1158 /* Name and add the kobject */
1159 ret = kobject_add(&policy->kobj, cpufreq_global_kobject,
1160 "policy%u",
1161 cpumask_first(policy->related_cpus));
1162 if (ret) {
1163 pr_err("%s: failed to add policy->kobj: %d\n", __func__,
1164 ret);
1165 goto out_exit_policy;
1166 }
1227 } 1167 }
1228 1168
1229 /* 1169 /*
@@ -1467,22 +1407,7 @@ static void cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
1467 return; 1407 return;
1468 } 1408 }
1469 1409
1470 if (cpu != policy->kobj_cpu) { 1410 remove_cpu_dev_symlink(policy, cpu);
1471 remove_cpu_dev_symlink(policy, cpu);
1472 } else {
1473 /*
1474 * The CPU owning the policy object is going away. Move it to
1475 * another suitable CPU.
1476 */
1477 unsigned int new_cpu = cpumask_first(policy->real_cpus);
1478 struct device *new_dev = get_cpu_device(new_cpu);
1479
1480 dev_dbg(dev, "%s: Moving policy object to CPU%u\n", __func__, new_cpu);
1481
1482 sysfs_remove_link(&new_dev->kobj, "cpufreq");
1483 policy->kobj_cpu = new_cpu;
1484 WARN_ON(kobject_move(&policy->kobj, &new_dev->kobj));
1485 }
1486} 1411}
1487 1412
1488static void handle_update(struct work_struct *work) 1413static void handle_update(struct work_struct *work)
@@ -2425,7 +2350,7 @@ static int create_boost_sysfs_file(void)
2425 if (!cpufreq_driver->set_boost) 2350 if (!cpufreq_driver->set_boost)
2426 cpufreq_driver->set_boost = cpufreq_boost_set_sw; 2351 cpufreq_driver->set_boost = cpufreq_boost_set_sw;
2427 2352
2428 ret = cpufreq_sysfs_create_file(&boost.attr); 2353 ret = sysfs_create_file(cpufreq_global_kobject, &boost.attr);
2429 if (ret) 2354 if (ret)
2430 pr_err("%s: cannot register global BOOST sysfs file\n", 2355 pr_err("%s: cannot register global BOOST sysfs file\n",
2431 __func__); 2356 __func__);
@@ -2436,7 +2361,7 @@ static int create_boost_sysfs_file(void)
2436static void remove_boost_sysfs_file(void) 2361static void remove_boost_sysfs_file(void)
2437{ 2362{
2438 if (cpufreq_boost_supported()) 2363 if (cpufreq_boost_supported())
2439 cpufreq_sysfs_remove_file(&boost.attr); 2364 sysfs_remove_file(cpufreq_global_kobject, &boost.attr);
2440} 2365}
2441 2366
2442int cpufreq_enable_boost_support(void) 2367int cpufreq_enable_boost_support(void)
@@ -2584,12 +2509,15 @@ static struct syscore_ops cpufreq_syscore_ops = {
2584 .shutdown = cpufreq_suspend, 2509 .shutdown = cpufreq_suspend,
2585}; 2510};
2586 2511
2512struct kobject *cpufreq_global_kobject;
2513EXPORT_SYMBOL(cpufreq_global_kobject);
2514
2587static int __init cpufreq_core_init(void) 2515static int __init cpufreq_core_init(void)
2588{ 2516{
2589 if (cpufreq_disabled()) 2517 if (cpufreq_disabled())
2590 return -ENODEV; 2518 return -ENODEV;
2591 2519
2592 cpufreq_global_kobject = kobject_create(); 2520 cpufreq_global_kobject = kobject_create_and_add("cpufreq", &cpu_subsys.dev_root->kobj);
2593 BUG_ON(!cpufreq_global_kobject); 2521 BUG_ON(!cpufreq_global_kobject);
2594 2522
2595 register_syscore_ops(&cpufreq_syscore_ops); 2523 register_syscore_ops(&cpufreq_syscore_ops);
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index 84a1506950a7..1fa1deb6e91f 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -23,6 +23,19 @@
23 23
24static DEFINE_PER_CPU(struct cs_cpu_dbs_info_s, cs_cpu_dbs_info); 24static DEFINE_PER_CPU(struct cs_cpu_dbs_info_s, cs_cpu_dbs_info);
25 25
26static int cs_cpufreq_governor_dbs(struct cpufreq_policy *policy,
27 unsigned int event);
28
29#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE
30static
31#endif
32struct cpufreq_governor cpufreq_gov_conservative = {
33 .name = "conservative",
34 .governor = cs_cpufreq_governor_dbs,
35 .max_transition_latency = TRANSITION_LATENCY_LIMIT,
36 .owner = THIS_MODULE,
37};
38
26static inline unsigned int get_freq_target(struct cs_dbs_tuners *cs_tuners, 39static inline unsigned int get_freq_target(struct cs_dbs_tuners *cs_tuners,
27 struct cpufreq_policy *policy) 40 struct cpufreq_policy *policy)
28{ 41{
@@ -119,12 +132,14 @@ static int dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
119 struct cpufreq_freqs *freq = data; 132 struct cpufreq_freqs *freq = data;
120 struct cs_cpu_dbs_info_s *dbs_info = 133 struct cs_cpu_dbs_info_s *dbs_info =
121 &per_cpu(cs_cpu_dbs_info, freq->cpu); 134 &per_cpu(cs_cpu_dbs_info, freq->cpu);
122 struct cpufreq_policy *policy; 135 struct cpufreq_policy *policy = cpufreq_cpu_get_raw(freq->cpu);
123 136
124 if (!dbs_info->enable) 137 if (!policy)
125 return 0; 138 return 0;
126 139
127 policy = dbs_info->cdbs.shared->policy; 140 /* policy isn't governed by conservative governor */
141 if (policy->governor != &cpufreq_gov_conservative)
142 return 0;
128 143
129 /* 144 /*
130 * we only care if our internally tracked freq moves outside the 'valid' 145 * we only care if our internally tracked freq moves outside the 'valid'
@@ -367,16 +382,6 @@ static int cs_cpufreq_governor_dbs(struct cpufreq_policy *policy,
367 return cpufreq_governor_dbs(policy, &cs_dbs_cdata, event); 382 return cpufreq_governor_dbs(policy, &cs_dbs_cdata, event);
368} 383}
369 384
370#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE
371static
372#endif
373struct cpufreq_governor cpufreq_gov_conservative = {
374 .name = "conservative",
375 .governor = cs_cpufreq_governor_dbs,
376 .max_transition_latency = TRANSITION_LATENCY_LIMIT,
377 .owner = THIS_MODULE,
378};
379
380static int __init cpufreq_gov_dbs_init(void) 385static int __init cpufreq_gov_dbs_init(void)
381{ 386{
382 return cpufreq_register_governor(&cpufreq_gov_conservative); 387 return cpufreq_register_governor(&cpufreq_gov_conservative);
diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
index 939197ffa4ac..11258c4c1b17 100644
--- a/drivers/cpufreq/cpufreq_governor.c
+++ b/drivers/cpufreq/cpufreq_governor.c
@@ -348,29 +348,21 @@ static int cpufreq_governor_init(struct cpufreq_policy *policy,
348 set_sampling_rate(dbs_data, max(dbs_data->min_sampling_rate, 348 set_sampling_rate(dbs_data, max(dbs_data->min_sampling_rate,
349 latency * LATENCY_MULTIPLIER)); 349 latency * LATENCY_MULTIPLIER));
350 350
351 if (!have_governor_per_policy()) { 351 if (!have_governor_per_policy())
352 if (WARN_ON(cpufreq_get_global_kobject())) {
353 ret = -EINVAL;
354 goto cdata_exit;
355 }
356 cdata->gdbs_data = dbs_data; 352 cdata->gdbs_data = dbs_data;
357 }
358 353
359 ret = sysfs_create_group(get_governor_parent_kobj(policy), 354 ret = sysfs_create_group(get_governor_parent_kobj(policy),
360 get_sysfs_attr(dbs_data)); 355 get_sysfs_attr(dbs_data));
361 if (ret) 356 if (ret)
362 goto put_kobj; 357 goto reset_gdbs_data;
363 358
364 policy->governor_data = dbs_data; 359 policy->governor_data = dbs_data;
365 360
366 return 0; 361 return 0;
367 362
368put_kobj: 363reset_gdbs_data:
369 if (!have_governor_per_policy()) { 364 if (!have_governor_per_policy())
370 cdata->gdbs_data = NULL; 365 cdata->gdbs_data = NULL;
371 cpufreq_put_global_kobject();
372 }
373cdata_exit:
374 cdata->exit(dbs_data, !policy->governor->initialized); 366 cdata->exit(dbs_data, !policy->governor->initialized);
375free_common_dbs_info: 367free_common_dbs_info:
376 free_common_dbs_info(policy, cdata); 368 free_common_dbs_info(policy, cdata);
@@ -394,10 +386,8 @@ static int cpufreq_governor_exit(struct cpufreq_policy *policy,
394 sysfs_remove_group(get_governor_parent_kobj(policy), 386 sysfs_remove_group(get_governor_parent_kobj(policy),
395 get_sysfs_attr(dbs_data)); 387 get_sysfs_attr(dbs_data));
396 388
397 if (!have_governor_per_policy()) { 389 if (!have_governor_per_policy())
398 cdata->gdbs_data = NULL; 390 cdata->gdbs_data = NULL;
399 cpufreq_put_global_kobject();
400 }
401 391
402 cdata->exit(dbs_data, policy->governor->initialized == 1); 392 cdata->exit(dbs_data, policy->governor->initialized == 1);
403 kfree(dbs_data); 393 kfree(dbs_data);
@@ -463,7 +453,6 @@ static int cpufreq_governor_start(struct cpufreq_policy *policy,
463 cdata->get_cpu_dbs_info_s(cpu); 453 cdata->get_cpu_dbs_info_s(cpu);
464 454
465 cs_dbs_info->down_skip = 0; 455 cs_dbs_info->down_skip = 0;
466 cs_dbs_info->enable = 1;
467 cs_dbs_info->requested_freq = policy->cur; 456 cs_dbs_info->requested_freq = policy->cur;
468 } else { 457 } else {
469 struct od_ops *od_ops = cdata->gov_ops; 458 struct od_ops *od_ops = cdata->gov_ops;
@@ -482,9 +471,7 @@ static int cpufreq_governor_start(struct cpufreq_policy *policy,
482static int cpufreq_governor_stop(struct cpufreq_policy *policy, 471static int cpufreq_governor_stop(struct cpufreq_policy *policy,
483 struct dbs_data *dbs_data) 472 struct dbs_data *dbs_data)
484{ 473{
485 struct common_dbs_data *cdata = dbs_data->cdata; 474 struct cpu_dbs_info *cdbs = dbs_data->cdata->get_cpu_cdbs(policy->cpu);
486 unsigned int cpu = policy->cpu;
487 struct cpu_dbs_info *cdbs = cdata->get_cpu_cdbs(cpu);
488 struct cpu_common_dbs_info *shared = cdbs->shared; 475 struct cpu_common_dbs_info *shared = cdbs->shared;
489 476
490 /* State should be equivalent to START */ 477 /* State should be equivalent to START */
@@ -493,13 +480,6 @@ static int cpufreq_governor_stop(struct cpufreq_policy *policy,
493 480
494 gov_cancel_work(dbs_data, policy); 481 gov_cancel_work(dbs_data, policy);
495 482
496 if (cdata->governor == GOV_CONSERVATIVE) {
497 struct cs_cpu_dbs_info_s *cs_dbs_info =
498 cdata->get_cpu_dbs_info_s(cpu);
499
500 cs_dbs_info->enable = 0;
501 }
502
503 shared->policy = NULL; 483 shared->policy = NULL;
504 mutex_destroy(&shared->timer_mutex); 484 mutex_destroy(&shared->timer_mutex);
505 return 0; 485 return 0;
diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h
index 50f171796632..5621bb03e874 100644
--- a/drivers/cpufreq/cpufreq_governor.h
+++ b/drivers/cpufreq/cpufreq_governor.h
@@ -170,7 +170,6 @@ struct cs_cpu_dbs_info_s {
170 struct cpu_dbs_info cdbs; 170 struct cpu_dbs_info cdbs;
171 unsigned int down_skip; 171 unsigned int down_skip;
172 unsigned int requested_freq; 172 unsigned int requested_freq;
173 unsigned int enable:1;
174}; 173};
175 174
176/* Per policy Governors sysfs tunables */ 175/* Per policy Governors sysfs tunables */
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index 1fa9088c84a8..03ac6ce54042 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -267,27 +267,19 @@ static void update_sampling_rate(struct dbs_data *dbs_data,
267 dbs_info = &per_cpu(od_cpu_dbs_info, cpu); 267 dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
268 cpufreq_cpu_put(policy); 268 cpufreq_cpu_put(policy);
269 269
270 mutex_lock(&dbs_info->cdbs.shared->timer_mutex); 270 if (!delayed_work_pending(&dbs_info->cdbs.dwork))
271
272 if (!delayed_work_pending(&dbs_info->cdbs.dwork)) {
273 mutex_unlock(&dbs_info->cdbs.shared->timer_mutex);
274 continue; 271 continue;
275 }
276 272
277 next_sampling = jiffies + usecs_to_jiffies(new_rate); 273 next_sampling = jiffies + usecs_to_jiffies(new_rate);
278 appointed_at = dbs_info->cdbs.dwork.timer.expires; 274 appointed_at = dbs_info->cdbs.dwork.timer.expires;
279 275
280 if (time_before(next_sampling, appointed_at)) { 276 if (time_before(next_sampling, appointed_at)) {
281
282 mutex_unlock(&dbs_info->cdbs.shared->timer_mutex);
283 cancel_delayed_work_sync(&dbs_info->cdbs.dwork); 277 cancel_delayed_work_sync(&dbs_info->cdbs.dwork);
284 mutex_lock(&dbs_info->cdbs.shared->timer_mutex);
285 278
286 gov_queue_work(dbs_data, policy, 279 gov_queue_work(dbs_data, policy,
287 usecs_to_jiffies(new_rate), true); 280 usecs_to_jiffies(new_rate), true);
288 281
289 } 282 }
290 mutex_unlock(&dbs_info->cdbs.shared->timer_mutex);
291 } 283 }
292} 284}
293 285
diff --git a/drivers/cpufreq/imx6q-cpufreq.c b/drivers/cpufreq/imx6q-cpufreq.c
index 380a90d3c57e..9b4a7bd04dea 100644
--- a/drivers/cpufreq/imx6q-cpufreq.c
+++ b/drivers/cpufreq/imx6q-cpufreq.c
@@ -30,6 +30,10 @@ static struct clk *pll1_sw_clk;
30static struct clk *step_clk; 30static struct clk *step_clk;
31static struct clk *pll2_pfd2_396m_clk; 31static struct clk *pll2_pfd2_396m_clk;
32 32
33/* clk used by i.MX6UL */
34static struct clk *pll2_bus_clk;
35static struct clk *secondary_sel_clk;
36
33static struct device *cpu_dev; 37static struct device *cpu_dev;
34static bool free_opp; 38static bool free_opp;
35static struct cpufreq_frequency_table *freq_table; 39static struct cpufreq_frequency_table *freq_table;
@@ -91,16 +95,36 @@ static int imx6q_set_target(struct cpufreq_policy *policy, unsigned int index)
91 * The setpoints are selected per PLL/PDF frequencies, so we need to 95 * The setpoints are selected per PLL/PDF frequencies, so we need to
92 * reprogram PLL for frequency scaling. The procedure of reprogramming 96 * reprogram PLL for frequency scaling. The procedure of reprogramming
93 * PLL1 is as below. 97 * PLL1 is as below.
94 * 98 * For i.MX6UL, it has a secondary clk mux, the cpu frequency change
99 * flow is slightly different from other i.MX6 OSC.
100 * The cpu frequeny change flow for i.MX6(except i.MX6UL) is as below:
95 * - Enable pll2_pfd2_396m_clk and reparent pll1_sw_clk to it 101 * - Enable pll2_pfd2_396m_clk and reparent pll1_sw_clk to it
96 * - Reprogram pll1_sys_clk and reparent pll1_sw_clk back to it 102 * - Reprogram pll1_sys_clk and reparent pll1_sw_clk back to it
97 * - Disable pll2_pfd2_396m_clk 103 * - Disable pll2_pfd2_396m_clk
98 */ 104 */
99 clk_set_parent(step_clk, pll2_pfd2_396m_clk); 105 if (of_machine_is_compatible("fsl,imx6ul")) {
100 clk_set_parent(pll1_sw_clk, step_clk); 106 /*
101 if (freq_hz > clk_get_rate(pll2_pfd2_396m_clk)) { 107 * When changing pll1_sw_clk's parent to pll1_sys_clk,
102 clk_set_rate(pll1_sys_clk, new_freq * 1000); 108 * CPU may run at higher than 528MHz, this will lead to
109 * the system unstable if the voltage is lower than the
110 * voltage of 528MHz, so lower the CPU frequency to one
111 * half before changing CPU frequency.
112 */
113 clk_set_rate(arm_clk, (old_freq >> 1) * 1000);
103 clk_set_parent(pll1_sw_clk, pll1_sys_clk); 114 clk_set_parent(pll1_sw_clk, pll1_sys_clk);
115 if (freq_hz > clk_get_rate(pll2_pfd2_396m_clk))
116 clk_set_parent(secondary_sel_clk, pll2_bus_clk);
117 else
118 clk_set_parent(secondary_sel_clk, pll2_pfd2_396m_clk);
119 clk_set_parent(step_clk, secondary_sel_clk);
120 clk_set_parent(pll1_sw_clk, step_clk);
121 } else {
122 clk_set_parent(step_clk, pll2_pfd2_396m_clk);
123 clk_set_parent(pll1_sw_clk, step_clk);
124 if (freq_hz > clk_get_rate(pll2_pfd2_396m_clk)) {
125 clk_set_rate(pll1_sys_clk, new_freq * 1000);
126 clk_set_parent(pll1_sw_clk, pll1_sys_clk);
127 }
104 } 128 }
105 129
106 /* Ensure the arm clock divider is what we expect */ 130 /* Ensure the arm clock divider is what we expect */
@@ -186,6 +210,16 @@ static int imx6q_cpufreq_probe(struct platform_device *pdev)
186 goto put_clk; 210 goto put_clk;
187 } 211 }
188 212
213 if (of_machine_is_compatible("fsl,imx6ul")) {
214 pll2_bus_clk = clk_get(cpu_dev, "pll2_bus");
215 secondary_sel_clk = clk_get(cpu_dev, "secondary_sel");
216 if (IS_ERR(pll2_bus_clk) || IS_ERR(secondary_sel_clk)) {
217 dev_err(cpu_dev, "failed to get clocks specific to imx6ul\n");
218 ret = -ENOENT;
219 goto put_clk;
220 }
221 }
222
189 arm_reg = regulator_get(cpu_dev, "arm"); 223 arm_reg = regulator_get(cpu_dev, "arm");
190 pu_reg = regulator_get_optional(cpu_dev, "pu"); 224 pu_reg = regulator_get_optional(cpu_dev, "pu");
191 soc_reg = regulator_get(cpu_dev, "soc"); 225 soc_reg = regulator_get(cpu_dev, "soc");
@@ -331,6 +365,10 @@ put_clk:
331 clk_put(step_clk); 365 clk_put(step_clk);
332 if (!IS_ERR(pll2_pfd2_396m_clk)) 366 if (!IS_ERR(pll2_pfd2_396m_clk))
333 clk_put(pll2_pfd2_396m_clk); 367 clk_put(pll2_pfd2_396m_clk);
368 if (!IS_ERR(pll2_bus_clk))
369 clk_put(pll2_bus_clk);
370 if (!IS_ERR(secondary_sel_clk))
371 clk_put(secondary_sel_clk);
334 of_node_put(np); 372 of_node_put(np);
335 return ret; 373 return ret;
336} 374}
@@ -350,6 +388,8 @@ static int imx6q_cpufreq_remove(struct platform_device *pdev)
350 clk_put(pll1_sw_clk); 388 clk_put(pll1_sw_clk);
351 clk_put(step_clk); 389 clk_put(step_clk);
352 clk_put(pll2_pfd2_396m_clk); 390 clk_put(pll2_pfd2_396m_clk);
391 clk_put(pll2_bus_clk);
392 clk_put(secondary_sel_clk);
353 393
354 return 0; 394 return 0;
355} 395}
diff --git a/drivers/cpufreq/integrator-cpufreq.c b/drivers/cpufreq/integrator-cpufreq.c
index 2faa4216bf2a..79e3ff2771a6 100644
--- a/drivers/cpufreq/integrator-cpufreq.c
+++ b/drivers/cpufreq/integrator-cpufreq.c
@@ -221,6 +221,8 @@ static const struct of_device_id integrator_cpufreq_match[] = {
221 { }, 221 { },
222}; 222};
223 223
224MODULE_DEVICE_TABLE(of, integrator_cpufreq_match);
225
224static struct platform_driver integrator_cpufreq_driver = { 226static struct platform_driver integrator_cpufreq_driver = {
225 .driver = { 227 .driver = {
226 .name = "integrator-cpufreq", 228 .name = "integrator-cpufreq",
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index aa33b92b3e3e..93a3c635ea27 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -34,6 +34,10 @@
34#include <asm/cpu_device_id.h> 34#include <asm/cpu_device_id.h>
35#include <asm/cpufeature.h> 35#include <asm/cpufeature.h>
36 36
37#if IS_ENABLED(CONFIG_ACPI)
38#include <acpi/processor.h>
39#endif
40
37#define BYT_RATIOS 0x66a 41#define BYT_RATIOS 0x66a
38#define BYT_VIDS 0x66b 42#define BYT_VIDS 0x66b
39#define BYT_TURBO_RATIOS 0x66c 43#define BYT_TURBO_RATIOS 0x66c
@@ -43,7 +47,6 @@
43#define int_tofp(X) ((int64_t)(X) << FRAC_BITS) 47#define int_tofp(X) ((int64_t)(X) << FRAC_BITS)
44#define fp_toint(X) ((X) >> FRAC_BITS) 48#define fp_toint(X) ((X) >> FRAC_BITS)
45 49
46
47static inline int32_t mul_fp(int32_t x, int32_t y) 50static inline int32_t mul_fp(int32_t x, int32_t y)
48{ 51{
49 return ((int64_t)x * (int64_t)y) >> FRAC_BITS; 52 return ((int64_t)x * (int64_t)y) >> FRAC_BITS;
@@ -78,6 +81,7 @@ struct pstate_data {
78 int current_pstate; 81 int current_pstate;
79 int min_pstate; 82 int min_pstate;
80 int max_pstate; 83 int max_pstate;
84 int max_pstate_physical;
81 int scaling; 85 int scaling;
82 int turbo_pstate; 86 int turbo_pstate;
83}; 87};
@@ -113,6 +117,9 @@ struct cpudata {
113 u64 prev_mperf; 117 u64 prev_mperf;
114 u64 prev_tsc; 118 u64 prev_tsc;
115 struct sample sample; 119 struct sample sample;
120#if IS_ENABLED(CONFIG_ACPI)
121 struct acpi_processor_performance acpi_perf_data;
122#endif
116}; 123};
117 124
118static struct cpudata **all_cpu_data; 125static struct cpudata **all_cpu_data;
@@ -127,6 +134,7 @@ struct pstate_adjust_policy {
127 134
128struct pstate_funcs { 135struct pstate_funcs {
129 int (*get_max)(void); 136 int (*get_max)(void);
137 int (*get_max_physical)(void);
130 int (*get_min)(void); 138 int (*get_min)(void);
131 int (*get_turbo)(void); 139 int (*get_turbo)(void);
132 int (*get_scaling)(void); 140 int (*get_scaling)(void);
@@ -142,6 +150,7 @@ struct cpu_defaults {
142static struct pstate_adjust_policy pid_params; 150static struct pstate_adjust_policy pid_params;
143static struct pstate_funcs pstate_funcs; 151static struct pstate_funcs pstate_funcs;
144static int hwp_active; 152static int hwp_active;
153static int no_acpi_perf;
145 154
146struct perf_limits { 155struct perf_limits {
147 int no_turbo; 156 int no_turbo;
@@ -154,9 +163,24 @@ struct perf_limits {
154 int max_sysfs_pct; 163 int max_sysfs_pct;
155 int min_policy_pct; 164 int min_policy_pct;
156 int min_sysfs_pct; 165 int min_sysfs_pct;
166 int max_perf_ctl;
167 int min_perf_ctl;
168};
169
170static struct perf_limits performance_limits = {
171 .no_turbo = 0,
172 .turbo_disabled = 0,
173 .max_perf_pct = 100,
174 .max_perf = int_tofp(1),
175 .min_perf_pct = 100,
176 .min_perf = int_tofp(1),
177 .max_policy_pct = 100,
178 .max_sysfs_pct = 100,
179 .min_policy_pct = 0,
180 .min_sysfs_pct = 0,
157}; 181};
158 182
159static struct perf_limits limits = { 183static struct perf_limits powersave_limits = {
160 .no_turbo = 0, 184 .no_turbo = 0,
161 .turbo_disabled = 0, 185 .turbo_disabled = 0,
162 .max_perf_pct = 100, 186 .max_perf_pct = 100,
@@ -167,8 +191,163 @@ static struct perf_limits limits = {
167 .max_sysfs_pct = 100, 191 .max_sysfs_pct = 100,
168 .min_policy_pct = 0, 192 .min_policy_pct = 0,
169 .min_sysfs_pct = 0, 193 .min_sysfs_pct = 0,
194 .max_perf_ctl = 0,
195 .min_perf_ctl = 0,
170}; 196};
171 197
198#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE
199static struct perf_limits *limits = &performance_limits;
200#else
201static struct perf_limits *limits = &powersave_limits;
202#endif
203
204#if IS_ENABLED(CONFIG_ACPI)
205/*
206 * The max target pstate ratio is a 8 bit value in both PLATFORM_INFO MSR and
207 * in TURBO_RATIO_LIMIT MSR, which pstate driver stores in max_pstate and
208 * max_turbo_pstate fields. The PERF_CTL MSR contains 16 bit value for P state
209 * ratio, out of it only high 8 bits are used. For example 0x1700 is setting
210 * target ratio 0x17. The _PSS control value stores in a format which can be
211 * directly written to PERF_CTL MSR. But in intel_pstate driver this shift
212 * occurs during write to PERF_CTL (E.g. for cores core_set_pstate()).
213 * This function converts the _PSS control value to intel pstate driver format
214 * for comparison and assignment.
215 */
216static int convert_to_native_pstate_format(struct cpudata *cpu, int index)
217{
218 return cpu->acpi_perf_data.states[index].control >> 8;
219}
220
221static int intel_pstate_init_perf_limits(struct cpufreq_policy *policy)
222{
223 struct cpudata *cpu;
224 int ret;
225 bool turbo_absent = false;
226 int max_pstate_index;
227 int min_pss_ctl, max_pss_ctl, turbo_pss_ctl;
228 int i;
229
230 cpu = all_cpu_data[policy->cpu];
231
232 pr_debug("intel_pstate: default limits 0x%x 0x%x 0x%x\n",
233 cpu->pstate.min_pstate, cpu->pstate.max_pstate,
234 cpu->pstate.turbo_pstate);
235
236 if (!cpu->acpi_perf_data.shared_cpu_map &&
237 zalloc_cpumask_var_node(&cpu->acpi_perf_data.shared_cpu_map,
238 GFP_KERNEL, cpu_to_node(policy->cpu))) {
239 return -ENOMEM;
240 }
241
242 ret = acpi_processor_register_performance(&cpu->acpi_perf_data,
243 policy->cpu);
244 if (ret)
245 return ret;
246
247 /*
248 * Check if the control value in _PSS is for PERF_CTL MSR, which should
249 * guarantee that the states returned by it map to the states in our
250 * list directly.
251 */
252 if (cpu->acpi_perf_data.control_register.space_id !=
253 ACPI_ADR_SPACE_FIXED_HARDWARE)
254 return -EIO;
255
256 pr_debug("intel_pstate: CPU%u - ACPI _PSS perf data\n", policy->cpu);
257 for (i = 0; i < cpu->acpi_perf_data.state_count; i++)
258 pr_debug(" %cP%d: %u MHz, %u mW, 0x%x\n",
259 (i == cpu->acpi_perf_data.state ? '*' : ' '), i,
260 (u32) cpu->acpi_perf_data.states[i].core_frequency,
261 (u32) cpu->acpi_perf_data.states[i].power,
262 (u32) cpu->acpi_perf_data.states[i].control);
263
264 /*
265 * If there is only one entry _PSS, simply ignore _PSS and continue as
266 * usual without taking _PSS into account
267 */
268 if (cpu->acpi_perf_data.state_count < 2)
269 return 0;
270
271 turbo_pss_ctl = convert_to_native_pstate_format(cpu, 0);
272 min_pss_ctl = convert_to_native_pstate_format(cpu,
273 cpu->acpi_perf_data.state_count - 1);
274 /* Check if there is a turbo freq in _PSS */
275 if (turbo_pss_ctl <= cpu->pstate.max_pstate &&
276 turbo_pss_ctl > cpu->pstate.min_pstate) {
277 pr_debug("intel_pstate: no turbo range exists in _PSS\n");
278 limits->no_turbo = limits->turbo_disabled = 1;
279 cpu->pstate.turbo_pstate = cpu->pstate.max_pstate;
280 turbo_absent = true;
281 }
282
283 /* Check if the max non turbo p state < Intel P state max */
284 max_pstate_index = turbo_absent ? 0 : 1;
285 max_pss_ctl = convert_to_native_pstate_format(cpu, max_pstate_index);
286 if (max_pss_ctl < cpu->pstate.max_pstate &&
287 max_pss_ctl > cpu->pstate.min_pstate)
288 cpu->pstate.max_pstate = max_pss_ctl;
289
290 /* check If min perf > Intel P State min */
291 if (min_pss_ctl > cpu->pstate.min_pstate &&
292 min_pss_ctl < cpu->pstate.max_pstate) {
293 cpu->pstate.min_pstate = min_pss_ctl;
294 policy->cpuinfo.min_freq = min_pss_ctl * cpu->pstate.scaling;
295 }
296
297 if (turbo_absent)
298 policy->cpuinfo.max_freq = cpu->pstate.max_pstate *
299 cpu->pstate.scaling;
300 else {
301 policy->cpuinfo.max_freq = cpu->pstate.turbo_pstate *
302 cpu->pstate.scaling;
303 /*
304 * The _PSS table doesn't contain whole turbo frequency range.
305 * This just contains +1 MHZ above the max non turbo frequency,
306 * with control value corresponding to max turbo ratio. But
307 * when cpufreq set policy is called, it will call with this
308 * max frequency, which will cause a reduced performance as
309 * this driver uses real max turbo frequency as the max
310 * frequeny. So correct this frequency in _PSS table to
311 * correct max turbo frequency based on the turbo ratio.
312 * Also need to convert to MHz as _PSS freq is in MHz.
313 */
314 cpu->acpi_perf_data.states[0].core_frequency =
315 turbo_pss_ctl * 100;
316 }
317
318 pr_debug("intel_pstate: Updated limits using _PSS 0x%x 0x%x 0x%x\n",
319 cpu->pstate.min_pstate, cpu->pstate.max_pstate,
320 cpu->pstate.turbo_pstate);
321 pr_debug("intel_pstate: policy max_freq=%d Khz min_freq = %d KHz\n",
322 policy->cpuinfo.max_freq, policy->cpuinfo.min_freq);
323
324 return 0;
325}
326
327static int intel_pstate_exit_perf_limits(struct cpufreq_policy *policy)
328{
329 struct cpudata *cpu;
330
331 if (!no_acpi_perf)
332 return 0;
333
334 cpu = all_cpu_data[policy->cpu];
335 acpi_processor_unregister_performance(policy->cpu);
336 return 0;
337}
338
339#else
340static int intel_pstate_init_perf_limits(struct cpufreq_policy *policy)
341{
342 return 0;
343}
344
345static int intel_pstate_exit_perf_limits(struct cpufreq_policy *policy)
346{
347 return 0;
348}
349#endif
350
172static inline void pid_reset(struct _pid *pid, int setpoint, int busy, 351static inline void pid_reset(struct _pid *pid, int setpoint, int busy,
173 int deadband, int integral) { 352 int deadband, int integral) {
174 pid->setpoint = setpoint; 353 pid->setpoint = setpoint;
@@ -255,7 +434,7 @@ static inline void update_turbo_state(void)
255 434
256 cpu = all_cpu_data[0]; 435 cpu = all_cpu_data[0];
257 rdmsrl(MSR_IA32_MISC_ENABLE, misc_en); 436 rdmsrl(MSR_IA32_MISC_ENABLE, misc_en);
258 limits.turbo_disabled = 437 limits->turbo_disabled =
259 (misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE || 438 (misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE ||
260 cpu->pstate.max_pstate == cpu->pstate.turbo_pstate); 439 cpu->pstate.max_pstate == cpu->pstate.turbo_pstate);
261} 440}
@@ -274,14 +453,14 @@ static void intel_pstate_hwp_set(void)
274 453
275 for_each_online_cpu(cpu) { 454 for_each_online_cpu(cpu) {
276 rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, &value); 455 rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, &value);
277 adj_range = limits.min_perf_pct * range / 100; 456 adj_range = limits->min_perf_pct * range / 100;
278 min = hw_min + adj_range; 457 min = hw_min + adj_range;
279 value &= ~HWP_MIN_PERF(~0L); 458 value &= ~HWP_MIN_PERF(~0L);
280 value |= HWP_MIN_PERF(min); 459 value |= HWP_MIN_PERF(min);
281 460
282 adj_range = limits.max_perf_pct * range / 100; 461 adj_range = limits->max_perf_pct * range / 100;
283 max = hw_min + adj_range; 462 max = hw_min + adj_range;
284 if (limits.no_turbo) { 463 if (limits->no_turbo) {
285 hw_max = HWP_GUARANTEED_PERF(cap); 464 hw_max = HWP_GUARANTEED_PERF(cap);
286 if (hw_max < max) 465 if (hw_max < max)
287 max = hw_max; 466 max = hw_max;
@@ -350,7 +529,7 @@ static void __init intel_pstate_debug_expose_params(void)
350 static ssize_t show_##file_name \ 529 static ssize_t show_##file_name \
351 (struct kobject *kobj, struct attribute *attr, char *buf) \ 530 (struct kobject *kobj, struct attribute *attr, char *buf) \
352 { \ 531 { \
353 return sprintf(buf, "%u\n", limits.object); \ 532 return sprintf(buf, "%u\n", limits->object); \
354 } 533 }
355 534
356static ssize_t show_turbo_pct(struct kobject *kobj, 535static ssize_t show_turbo_pct(struct kobject *kobj,
@@ -386,10 +565,10 @@ static ssize_t show_no_turbo(struct kobject *kobj,
386 ssize_t ret; 565 ssize_t ret;
387 566
388 update_turbo_state(); 567 update_turbo_state();
389 if (limits.turbo_disabled) 568 if (limits->turbo_disabled)
390 ret = sprintf(buf, "%u\n", limits.turbo_disabled); 569 ret = sprintf(buf, "%u\n", limits->turbo_disabled);
391 else 570 else
392 ret = sprintf(buf, "%u\n", limits.no_turbo); 571 ret = sprintf(buf, "%u\n", limits->no_turbo);
393 572
394 return ret; 573 return ret;
395} 574}
@@ -405,12 +584,12 @@ static ssize_t store_no_turbo(struct kobject *a, struct attribute *b,
405 return -EINVAL; 584 return -EINVAL;
406 585
407 update_turbo_state(); 586 update_turbo_state();
408 if (limits.turbo_disabled) { 587 if (limits->turbo_disabled) {
409 pr_warn("intel_pstate: Turbo disabled by BIOS or unavailable on processor\n"); 588 pr_warn("intel_pstate: Turbo disabled by BIOS or unavailable on processor\n");
410 return -EPERM; 589 return -EPERM;
411 } 590 }
412 591
413 limits.no_turbo = clamp_t(int, input, 0, 1); 592 limits->no_turbo = clamp_t(int, input, 0, 1);
414 593
415 if (hwp_active) 594 if (hwp_active)
416 intel_pstate_hwp_set(); 595 intel_pstate_hwp_set();
@@ -428,11 +607,15 @@ static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b,
428 if (ret != 1) 607 if (ret != 1)
429 return -EINVAL; 608 return -EINVAL;
430 609
431 limits.max_sysfs_pct = clamp_t(int, input, 0 , 100); 610 limits->max_sysfs_pct = clamp_t(int, input, 0 , 100);
432 limits.max_perf_pct = min(limits.max_policy_pct, limits.max_sysfs_pct); 611 limits->max_perf_pct = min(limits->max_policy_pct,
433 limits.max_perf_pct = max(limits.min_policy_pct, limits.max_perf_pct); 612 limits->max_sysfs_pct);
434 limits.max_perf_pct = max(limits.min_perf_pct, limits.max_perf_pct); 613 limits->max_perf_pct = max(limits->min_policy_pct,
435 limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100)); 614 limits->max_perf_pct);
615 limits->max_perf_pct = max(limits->min_perf_pct,
616 limits->max_perf_pct);
617 limits->max_perf = div_fp(int_tofp(limits->max_perf_pct),
618 int_tofp(100));
436 619
437 if (hwp_active) 620 if (hwp_active)
438 intel_pstate_hwp_set(); 621 intel_pstate_hwp_set();
@@ -449,11 +632,15 @@ static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b,
449 if (ret != 1) 632 if (ret != 1)
450 return -EINVAL; 633 return -EINVAL;
451 634
452 limits.min_sysfs_pct = clamp_t(int, input, 0 , 100); 635 limits->min_sysfs_pct = clamp_t(int, input, 0 , 100);
453 limits.min_perf_pct = max(limits.min_policy_pct, limits.min_sysfs_pct); 636 limits->min_perf_pct = max(limits->min_policy_pct,
454 limits.min_perf_pct = min(limits.max_policy_pct, limits.min_perf_pct); 637 limits->min_sysfs_pct);
455 limits.min_perf_pct = min(limits.max_perf_pct, limits.min_perf_pct); 638 limits->min_perf_pct = min(limits->max_policy_pct,
456 limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100)); 639 limits->min_perf_pct);
640 limits->min_perf_pct = min(limits->max_perf_pct,
641 limits->min_perf_pct);
642 limits->min_perf = div_fp(int_tofp(limits->min_perf_pct),
643 int_tofp(100));
457 644
458 if (hwp_active) 645 if (hwp_active)
459 intel_pstate_hwp_set(); 646 intel_pstate_hwp_set();
@@ -533,7 +720,7 @@ static void byt_set_pstate(struct cpudata *cpudata, int pstate)
533 u32 vid; 720 u32 vid;
534 721
535 val = (u64)pstate << 8; 722 val = (u64)pstate << 8;
536 if (limits.no_turbo && !limits.turbo_disabled) 723 if (limits->no_turbo && !limits->turbo_disabled)
537 val |= (u64)1 << 32; 724 val |= (u64)1 << 32;
538 725
539 vid_fp = cpudata->vid.min + mul_fp( 726 vid_fp = cpudata->vid.min + mul_fp(
@@ -591,7 +778,7 @@ static int core_get_min_pstate(void)
591 return (value >> 40) & 0xFF; 778 return (value >> 40) & 0xFF;
592} 779}
593 780
594static int core_get_max_pstate(void) 781static int core_get_max_pstate_physical(void)
595{ 782{
596 u64 value; 783 u64 value;
597 784
@@ -599,6 +786,46 @@ static int core_get_max_pstate(void)
599 return (value >> 8) & 0xFF; 786 return (value >> 8) & 0xFF;
600} 787}
601 788
789static int core_get_max_pstate(void)
790{
791 u64 tar;
792 u64 plat_info;
793 int max_pstate;
794 int err;
795
796 rdmsrl(MSR_PLATFORM_INFO, plat_info);
797 max_pstate = (plat_info >> 8) & 0xFF;
798
799 err = rdmsrl_safe(MSR_TURBO_ACTIVATION_RATIO, &tar);
800 if (!err) {
801 /* Do some sanity checking for safety */
802 if (plat_info & 0x600000000) {
803 u64 tdp_ctrl;
804 u64 tdp_ratio;
805 int tdp_msr;
806
807 err = rdmsrl_safe(MSR_CONFIG_TDP_CONTROL, &tdp_ctrl);
808 if (err)
809 goto skip_tar;
810
811 tdp_msr = MSR_CONFIG_TDP_NOMINAL + tdp_ctrl;
812 err = rdmsrl_safe(tdp_msr, &tdp_ratio);
813 if (err)
814 goto skip_tar;
815
816 if (tdp_ratio - 1 == tar) {
817 max_pstate = tar;
818 pr_debug("max_pstate=TAC %x\n", max_pstate);
819 } else {
820 goto skip_tar;
821 }
822 }
823 }
824
825skip_tar:
826 return max_pstate;
827}
828
602static int core_get_turbo_pstate(void) 829static int core_get_turbo_pstate(void)
603{ 830{
604 u64 value; 831 u64 value;
@@ -622,7 +849,7 @@ static void core_set_pstate(struct cpudata *cpudata, int pstate)
622 u64 val; 849 u64 val;
623 850
624 val = (u64)pstate << 8; 851 val = (u64)pstate << 8;
625 if (limits.no_turbo && !limits.turbo_disabled) 852 if (limits->no_turbo && !limits->turbo_disabled)
626 val |= (u64)1 << 32; 853 val |= (u64)1 << 32;
627 854
628 wrmsrl_on_cpu(cpudata->cpu, MSR_IA32_PERF_CTL, val); 855 wrmsrl_on_cpu(cpudata->cpu, MSR_IA32_PERF_CTL, val);
@@ -652,6 +879,7 @@ static struct cpu_defaults core_params = {
652 }, 879 },
653 .funcs = { 880 .funcs = {
654 .get_max = core_get_max_pstate, 881 .get_max = core_get_max_pstate,
882 .get_max_physical = core_get_max_pstate_physical,
655 .get_min = core_get_min_pstate, 883 .get_min = core_get_min_pstate,
656 .get_turbo = core_get_turbo_pstate, 884 .get_turbo = core_get_turbo_pstate,
657 .get_scaling = core_get_scaling, 885 .get_scaling = core_get_scaling,
@@ -670,6 +898,7 @@ static struct cpu_defaults byt_params = {
670 }, 898 },
671 .funcs = { 899 .funcs = {
672 .get_max = byt_get_max_pstate, 900 .get_max = byt_get_max_pstate,
901 .get_max_physical = byt_get_max_pstate,
673 .get_min = byt_get_min_pstate, 902 .get_min = byt_get_min_pstate,
674 .get_turbo = byt_get_turbo_pstate, 903 .get_turbo = byt_get_turbo_pstate,
675 .set = byt_set_pstate, 904 .set = byt_set_pstate,
@@ -689,6 +918,7 @@ static struct cpu_defaults knl_params = {
689 }, 918 },
690 .funcs = { 919 .funcs = {
691 .get_max = core_get_max_pstate, 920 .get_max = core_get_max_pstate,
921 .get_max_physical = core_get_max_pstate_physical,
692 .get_min = core_get_min_pstate, 922 .get_min = core_get_min_pstate,
693 .get_turbo = knl_get_turbo_pstate, 923 .get_turbo = knl_get_turbo_pstate,
694 .get_scaling = core_get_scaling, 924 .get_scaling = core_get_scaling,
@@ -702,7 +932,7 @@ static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max)
702 int max_perf_adj; 932 int max_perf_adj;
703 int min_perf; 933 int min_perf;
704 934
705 if (limits.no_turbo || limits.turbo_disabled) 935 if (limits->no_turbo || limits->turbo_disabled)
706 max_perf = cpu->pstate.max_pstate; 936 max_perf = cpu->pstate.max_pstate;
707 937
708 /* 938 /*
@@ -710,12 +940,23 @@ static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max)
710 * policy, or by cpu specific default values determined through 940 * policy, or by cpu specific default values determined through
711 * experimentation. 941 * experimentation.
712 */ 942 */
713 max_perf_adj = fp_toint(mul_fp(int_tofp(max_perf), limits.max_perf)); 943 if (limits->max_perf_ctl && limits->max_sysfs_pct >=
714 *max = clamp_t(int, max_perf_adj, 944 limits->max_policy_pct) {
715 cpu->pstate.min_pstate, cpu->pstate.turbo_pstate); 945 *max = limits->max_perf_ctl;
946 } else {
947 max_perf_adj = fp_toint(mul_fp(int_tofp(max_perf),
948 limits->max_perf));
949 *max = clamp_t(int, max_perf_adj, cpu->pstate.min_pstate,
950 cpu->pstate.turbo_pstate);
951 }
716 952
717 min_perf = fp_toint(mul_fp(int_tofp(max_perf), limits.min_perf)); 953 if (limits->min_perf_ctl) {
718 *min = clamp_t(int, min_perf, cpu->pstate.min_pstate, max_perf); 954 *min = limits->min_perf_ctl;
955 } else {
956 min_perf = fp_toint(mul_fp(int_tofp(max_perf),
957 limits->min_perf));
958 *min = clamp_t(int, min_perf, cpu->pstate.min_pstate, max_perf);
959 }
719} 960}
720 961
721static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate, bool force) 962static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate, bool force)
@@ -743,6 +984,7 @@ static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
743{ 984{
744 cpu->pstate.min_pstate = pstate_funcs.get_min(); 985 cpu->pstate.min_pstate = pstate_funcs.get_min();
745 cpu->pstate.max_pstate = pstate_funcs.get_max(); 986 cpu->pstate.max_pstate = pstate_funcs.get_max();
987 cpu->pstate.max_pstate_physical = pstate_funcs.get_max_physical();
746 cpu->pstate.turbo_pstate = pstate_funcs.get_turbo(); 988 cpu->pstate.turbo_pstate = pstate_funcs.get_turbo();
747 cpu->pstate.scaling = pstate_funcs.get_scaling(); 989 cpu->pstate.scaling = pstate_funcs.get_scaling();
748 990
@@ -761,7 +1003,8 @@ static inline void intel_pstate_calc_busy(struct cpudata *cpu)
761 1003
762 sample->freq = fp_toint( 1004 sample->freq = fp_toint(
763 mul_fp(int_tofp( 1005 mul_fp(int_tofp(
764 cpu->pstate.max_pstate * cpu->pstate.scaling / 100), 1006 cpu->pstate.max_pstate_physical *
1007 cpu->pstate.scaling / 100),
765 core_pct)); 1008 core_pct));
766 1009
767 sample->core_pct_busy = (int32_t)core_pct; 1010 sample->core_pct_busy = (int32_t)core_pct;
@@ -834,7 +1077,7 @@ static inline int32_t intel_pstate_get_scaled_busy(struct cpudata *cpu)
834 * specified pstate. 1077 * specified pstate.
835 */ 1078 */
836 core_busy = cpu->sample.core_pct_busy; 1079 core_busy = cpu->sample.core_pct_busy;
837 max_pstate = int_tofp(cpu->pstate.max_pstate); 1080 max_pstate = int_tofp(cpu->pstate.max_pstate_physical);
838 current_pstate = int_tofp(cpu->pstate.current_pstate); 1081 current_pstate = int_tofp(cpu->pstate.current_pstate);
839 core_busy = mul_fp(core_busy, div_fp(max_pstate, current_pstate)); 1082 core_busy = mul_fp(core_busy, div_fp(max_pstate, current_pstate));
840 1083
@@ -988,37 +1231,63 @@ static unsigned int intel_pstate_get(unsigned int cpu_num)
988 1231
989static int intel_pstate_set_policy(struct cpufreq_policy *policy) 1232static int intel_pstate_set_policy(struct cpufreq_policy *policy)
990{ 1233{
1234#if IS_ENABLED(CONFIG_ACPI)
1235 struct cpudata *cpu;
1236 int i;
1237#endif
1238 pr_debug("intel_pstate: %s max %u policy->max %u\n", __func__,
1239 policy->cpuinfo.max_freq, policy->max);
991 if (!policy->cpuinfo.max_freq) 1240 if (!policy->cpuinfo.max_freq)
992 return -ENODEV; 1241 return -ENODEV;
993 1242
994 if (policy->policy == CPUFREQ_POLICY_PERFORMANCE && 1243 if (policy->policy == CPUFREQ_POLICY_PERFORMANCE &&
995 policy->max >= policy->cpuinfo.max_freq) { 1244 policy->max >= policy->cpuinfo.max_freq) {
996 limits.min_policy_pct = 100; 1245 pr_debug("intel_pstate: set performance\n");
997 limits.min_perf_pct = 100; 1246 limits = &performance_limits;
998 limits.min_perf = int_tofp(1);
999 limits.max_policy_pct = 100;
1000 limits.max_perf_pct = 100;
1001 limits.max_perf = int_tofp(1);
1002 limits.no_turbo = 0;
1003 return 0; 1247 return 0;
1004 } 1248 }
1005 1249
1006 limits.min_policy_pct = (policy->min * 100) / policy->cpuinfo.max_freq; 1250 pr_debug("intel_pstate: set powersave\n");
1007 limits.min_policy_pct = clamp_t(int, limits.min_policy_pct, 0 , 100); 1251 limits = &powersave_limits;
1008 limits.max_policy_pct = (policy->max * 100) / policy->cpuinfo.max_freq; 1252 limits->min_policy_pct = (policy->min * 100) / policy->cpuinfo.max_freq;
1009 limits.max_policy_pct = clamp_t(int, limits.max_policy_pct, 0 , 100); 1253 limits->min_policy_pct = clamp_t(int, limits->min_policy_pct, 0 , 100);
1254 limits->max_policy_pct = (policy->max * 100) / policy->cpuinfo.max_freq;
1255 limits->max_policy_pct = clamp_t(int, limits->max_policy_pct, 0 , 100);
1010 1256
1011 /* Normalize user input to [min_policy_pct, max_policy_pct] */ 1257 /* Normalize user input to [min_policy_pct, max_policy_pct] */
1012 limits.min_perf_pct = max(limits.min_policy_pct, limits.min_sysfs_pct); 1258 limits->min_perf_pct = max(limits->min_policy_pct,
1013 limits.min_perf_pct = min(limits.max_policy_pct, limits.min_perf_pct); 1259 limits->min_sysfs_pct);
1014 limits.max_perf_pct = min(limits.max_policy_pct, limits.max_sysfs_pct); 1260 limits->min_perf_pct = min(limits->max_policy_pct,
1015 limits.max_perf_pct = max(limits.min_policy_pct, limits.max_perf_pct); 1261 limits->min_perf_pct);
1262 limits->max_perf_pct = min(limits->max_policy_pct,
1263 limits->max_sysfs_pct);
1264 limits->max_perf_pct = max(limits->min_policy_pct,
1265 limits->max_perf_pct);
1016 1266
1017 /* Make sure min_perf_pct <= max_perf_pct */ 1267 /* Make sure min_perf_pct <= max_perf_pct */
1018 limits.min_perf_pct = min(limits.max_perf_pct, limits.min_perf_pct); 1268 limits->min_perf_pct = min(limits->max_perf_pct, limits->min_perf_pct);
1019 1269
1020 limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100)); 1270 limits->min_perf = div_fp(int_tofp(limits->min_perf_pct),
1021 limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100)); 1271 int_tofp(100));
1272 limits->max_perf = div_fp(int_tofp(limits->max_perf_pct),
1273 int_tofp(100));
1274
1275#if IS_ENABLED(CONFIG_ACPI)
1276 cpu = all_cpu_data[policy->cpu];
1277 for (i = 0; i < cpu->acpi_perf_data.state_count; i++) {
1278 int control;
1279
1280 control = convert_to_native_pstate_format(cpu, i);
1281 if (control * cpu->pstate.scaling == policy->max)
1282 limits->max_perf_ctl = control;
1283 if (control * cpu->pstate.scaling == policy->min)
1284 limits->min_perf_ctl = control;
1285 }
1286
1287 pr_debug("intel_pstate: max %u policy_max %u perf_ctl [0x%x-0x%x]\n",
1288 policy->cpuinfo.max_freq, policy->max, limits->min_perf_ctl,
1289 limits->max_perf_ctl);
1290#endif
1022 1291
1023 if (hwp_active) 1292 if (hwp_active)
1024 intel_pstate_hwp_set(); 1293 intel_pstate_hwp_set();
@@ -1062,7 +1331,7 @@ static int intel_pstate_cpu_init(struct cpufreq_policy *policy)
1062 1331
1063 cpu = all_cpu_data[policy->cpu]; 1332 cpu = all_cpu_data[policy->cpu];
1064 1333
1065 if (limits.min_perf_pct == 100 && limits.max_perf_pct == 100) 1334 if (limits->min_perf_pct == 100 && limits->max_perf_pct == 100)
1066 policy->policy = CPUFREQ_POLICY_PERFORMANCE; 1335 policy->policy = CPUFREQ_POLICY_PERFORMANCE;
1067 else 1336 else
1068 policy->policy = CPUFREQ_POLICY_POWERSAVE; 1337 policy->policy = CPUFREQ_POLICY_POWERSAVE;
@@ -1074,18 +1343,30 @@ static int intel_pstate_cpu_init(struct cpufreq_policy *policy)
1074 policy->cpuinfo.min_freq = cpu->pstate.min_pstate * cpu->pstate.scaling; 1343 policy->cpuinfo.min_freq = cpu->pstate.min_pstate * cpu->pstate.scaling;
1075 policy->cpuinfo.max_freq = 1344 policy->cpuinfo.max_freq =
1076 cpu->pstate.turbo_pstate * cpu->pstate.scaling; 1345 cpu->pstate.turbo_pstate * cpu->pstate.scaling;
1346 if (!no_acpi_perf)
1347 intel_pstate_init_perf_limits(policy);
1348 /*
1349 * If there is no acpi perf data or error, we ignore and use Intel P
1350 * state calculated limits, So this is not fatal error.
1351 */
1077 policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; 1352 policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
1078 cpumask_set_cpu(policy->cpu, policy->cpus); 1353 cpumask_set_cpu(policy->cpu, policy->cpus);
1079 1354
1080 return 0; 1355 return 0;
1081} 1356}
1082 1357
1358static int intel_pstate_cpu_exit(struct cpufreq_policy *policy)
1359{
1360 return intel_pstate_exit_perf_limits(policy);
1361}
1362
1083static struct cpufreq_driver intel_pstate_driver = { 1363static struct cpufreq_driver intel_pstate_driver = {
1084 .flags = CPUFREQ_CONST_LOOPS, 1364 .flags = CPUFREQ_CONST_LOOPS,
1085 .verify = intel_pstate_verify_policy, 1365 .verify = intel_pstate_verify_policy,
1086 .setpolicy = intel_pstate_set_policy, 1366 .setpolicy = intel_pstate_set_policy,
1087 .get = intel_pstate_get, 1367 .get = intel_pstate_get,
1088 .init = intel_pstate_cpu_init, 1368 .init = intel_pstate_cpu_init,
1369 .exit = intel_pstate_cpu_exit,
1089 .stop_cpu = intel_pstate_stop_cpu, 1370 .stop_cpu = intel_pstate_stop_cpu,
1090 .name = "intel_pstate", 1371 .name = "intel_pstate",
1091}; 1372};
@@ -1118,6 +1399,7 @@ static void copy_pid_params(struct pstate_adjust_policy *policy)
1118static void copy_cpu_funcs(struct pstate_funcs *funcs) 1399static void copy_cpu_funcs(struct pstate_funcs *funcs)
1119{ 1400{
1120 pstate_funcs.get_max = funcs->get_max; 1401 pstate_funcs.get_max = funcs->get_max;
1402 pstate_funcs.get_max_physical = funcs->get_max_physical;
1121 pstate_funcs.get_min = funcs->get_min; 1403 pstate_funcs.get_min = funcs->get_min;
1122 pstate_funcs.get_turbo = funcs->get_turbo; 1404 pstate_funcs.get_turbo = funcs->get_turbo;
1123 pstate_funcs.get_scaling = funcs->get_scaling; 1405 pstate_funcs.get_scaling = funcs->get_scaling;
@@ -1126,7 +1408,6 @@ static void copy_cpu_funcs(struct pstate_funcs *funcs)
1126} 1408}
1127 1409
1128#if IS_ENABLED(CONFIG_ACPI) 1410#if IS_ENABLED(CONFIG_ACPI)
1129#include <acpi/processor.h>
1130 1411
1131static bool intel_pstate_no_acpi_pss(void) 1412static bool intel_pstate_no_acpi_pss(void)
1132{ 1413{
@@ -1318,6 +1599,9 @@ static int __init intel_pstate_setup(char *str)
1318 force_load = 1; 1599 force_load = 1;
1319 if (!strcmp(str, "hwp_only")) 1600 if (!strcmp(str, "hwp_only"))
1320 hwp_only = 1; 1601 hwp_only = 1;
1602 if (!strcmp(str, "no_acpi"))
1603 no_acpi_perf = 1;
1604
1321 return 0; 1605 return 0;
1322} 1606}
1323early_param("intel_pstate", intel_pstate_setup); 1607early_param("intel_pstate", intel_pstate_setup);
diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c
index 64994e10638e..cb501386eb6e 100644
--- a/drivers/cpufreq/powernv-cpufreq.c
+++ b/drivers/cpufreq/powernv-cpufreq.c
@@ -327,8 +327,14 @@ static void powernv_cpufreq_throttle_check(void *data)
327 if (chips[i].throttled) 327 if (chips[i].throttled)
328 goto next; 328 goto next;
329 chips[i].throttled = true; 329 chips[i].throttled = true;
330 pr_info("CPU %d on Chip %u has Pmax reduced to %d\n", cpu, 330 if (pmsr_pmax < powernv_pstate_info.nominal)
331 chips[i].id, pmsr_pmax); 331 pr_crit("CPU %d on Chip %u has Pmax reduced below nominal frequency (%d < %d)\n",
332 cpu, chips[i].id, pmsr_pmax,
333 powernv_pstate_info.nominal);
334 else
335 pr_info("CPU %d on Chip %u has Pmax reduced below turbo frequency (%d < %d)\n",
336 cpu, chips[i].id, pmsr_pmax,
337 powernv_pstate_info.max);
332 } else if (chips[i].throttled) { 338 } else if (chips[i].throttled) {
333 chips[i].throttled = false; 339 chips[i].throttled = false;
334 pr_info("CPU %d on Chip %u has Pmax restored to %d\n", cpu, 340 pr_info("CPU %d on Chip %u has Pmax restored to %d\n", cpu,
diff --git a/drivers/cpufreq/tegra20-cpufreq.c b/drivers/cpufreq/tegra20-cpufreq.c
index 8084c7f7e206..2bd62845e9d5 100644
--- a/drivers/cpufreq/tegra20-cpufreq.c
+++ b/drivers/cpufreq/tegra20-cpufreq.c
@@ -175,9 +175,7 @@ static struct cpufreq_driver tegra_cpufreq_driver = {
175 .exit = tegra_cpu_exit, 175 .exit = tegra_cpu_exit,
176 .name = "tegra", 176 .name = "tegra",
177 .attr = cpufreq_generic_attr, 177 .attr = cpufreq_generic_attr,
178#ifdef CONFIG_PM
179 .suspend = cpufreq_generic_suspend, 178 .suspend = cpufreq_generic_suspend,
180#endif
181}; 179};
182 180
183static int __init tegra_cpufreq_init(void) 181static int __init tegra_cpufreq_init(void)
diff --git a/drivers/cpuidle/cpuidle-mvebu-v7.c b/drivers/cpuidle/cpuidle-mvebu-v7.c
index 980151f34707..01a856971f05 100644
--- a/drivers/cpuidle/cpuidle-mvebu-v7.c
+++ b/drivers/cpuidle/cpuidle-mvebu-v7.c
@@ -99,44 +99,40 @@ static struct cpuidle_driver armada38x_idle_driver = {
99 99
100static int mvebu_v7_cpuidle_probe(struct platform_device *pdev) 100static int mvebu_v7_cpuidle_probe(struct platform_device *pdev)
101{ 101{
102 mvebu_v7_cpu_suspend = pdev->dev.platform_data; 102 const struct platform_device_id *id = pdev->id_entry;
103 103
104 if (!strcmp(pdev->dev.driver->name, "cpuidle-armada-xp")) 104 if (!id)
105 return cpuidle_register(&armadaxp_idle_driver, NULL);
106 else if (!strcmp(pdev->dev.driver->name, "cpuidle-armada-370"))
107 return cpuidle_register(&armada370_idle_driver, NULL);
108 else if (!strcmp(pdev->dev.driver->name, "cpuidle-armada-38x"))
109 return cpuidle_register(&armada38x_idle_driver, NULL);
110 else
111 return -EINVAL; 105 return -EINVAL;
112}
113 106
114static struct platform_driver armadaxp_cpuidle_plat_driver = { 107 mvebu_v7_cpu_suspend = pdev->dev.platform_data;
115 .driver = {
116 .name = "cpuidle-armada-xp",
117 },
118 .probe = mvebu_v7_cpuidle_probe,
119};
120 108
121module_platform_driver(armadaxp_cpuidle_plat_driver); 109 return cpuidle_register((struct cpuidle_driver *)id->driver_data, NULL);
110}
122 111
123static struct platform_driver armada370_cpuidle_plat_driver = { 112static const struct platform_device_id mvebu_cpuidle_ids[] = {
124 .driver = { 113 {
114 .name = "cpuidle-armada-xp",
115 .driver_data = (unsigned long)&armadaxp_idle_driver,
116 }, {
125 .name = "cpuidle-armada-370", 117 .name = "cpuidle-armada-370",
118 .driver_data = (unsigned long)&armada370_idle_driver,
119 }, {
120 .name = "cpuidle-armada-38x",
121 .driver_data = (unsigned long)&armada38x_idle_driver,
126 }, 122 },
127 .probe = mvebu_v7_cpuidle_probe, 123 {}
128}; 124};
129 125
130module_platform_driver(armada370_cpuidle_plat_driver); 126static struct platform_driver mvebu_cpuidle_driver = {
131 127 .probe = mvebu_v7_cpuidle_probe,
132static struct platform_driver armada38x_cpuidle_plat_driver = {
133 .driver = { 128 .driver = {
134 .name = "cpuidle-armada-38x", 129 .name = "cpuidle-mbevu",
130 .suppress_bind_attrs = true,
135 }, 131 },
136 .probe = mvebu_v7_cpuidle_probe, 132 .id_table = mvebu_cpuidle_ids,
137}; 133};
138 134
139module_platform_driver(armada38x_cpuidle_plat_driver); 135builtin_platform_driver(mvebu_cpuidle_driver);
140 136
141MODULE_AUTHOR("Gregory CLEMENT <gregory.clement@free-electrons.com>"); 137MODULE_AUTHOR("Gregory CLEMENT <gregory.clement@free-electrons.com>");
142MODULE_DESCRIPTION("Marvell EBU v7 cpuidle driver"); 138MODULE_DESCRIPTION("Marvell EBU v7 cpuidle driver");
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index dca22de98d94..ef4c5b1a860f 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -65,7 +65,6 @@ struct cpufreq_policy {
65 unsigned int shared_type; /* ACPI: ANY or ALL affected CPUs 65 unsigned int shared_type; /* ACPI: ANY or ALL affected CPUs
66 should set cpufreq */ 66 should set cpufreq */
67 unsigned int cpu; /* cpu managing this policy, must be online */ 67 unsigned int cpu; /* cpu managing this policy, must be online */
68 unsigned int kobj_cpu; /* cpu managing sysfs files, can be offline */
69 68
70 struct clk *clk; 69 struct clk *clk;
71 struct cpufreq_cpuinfo cpuinfo;/* see above */ 70 struct cpufreq_cpuinfo cpuinfo;/* see above */
@@ -149,10 +148,6 @@ static inline bool policy_is_shared(struct cpufreq_policy *policy)
149 148
150/* /sys/devices/system/cpu/cpufreq: entry point for global variables */ 149/* /sys/devices/system/cpu/cpufreq: entry point for global variables */
151extern struct kobject *cpufreq_global_kobject; 150extern struct kobject *cpufreq_global_kobject;
152int cpufreq_get_global_kobject(void);
153void cpufreq_put_global_kobject(void);
154int cpufreq_sysfs_create_file(const struct attribute *attr);
155void cpufreq_sysfs_remove_file(const struct attribute *attr);
156 151
157#ifdef CONFIG_CPU_FREQ 152#ifdef CONFIG_CPU_FREQ
158unsigned int cpufreq_get(unsigned int cpu); 153unsigned int cpufreq_get(unsigned int cpu);