diff options
author | Rafael J. Wysocki <rafael.j.wysocki@intel.com> | 2016-04-25 09:44:01 -0400 |
---|---|---|
committer | Rafael J. Wysocki <rafael.j.wysocki@intel.com> | 2016-04-25 09:44:01 -0400 |
commit | 1cbc99dfe5d7d686fd022647f4e489b5eb8e9068 (patch) | |
tree | 735e0254f6f491442d75e683eb9eb3c7ac685111 /drivers/cpufreq/cpufreq.c | |
parent | 94862a62dfe3ba1c7601115a2dc80721c5b256f0 (diff) | |
parent | 8cee1eed8e78143aa2ed60308fb88e2d6fa46205 (diff) |
Merge back cpufreq changes for v4.7.
Diffstat (limited to 'drivers/cpufreq/cpufreq.c')
-rw-r--r-- | drivers/cpufreq/cpufreq.c | 164 |
1 files changed, 141 insertions, 23 deletions
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index e93405f0eac4..a48b998b3304 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c | |||
@@ -78,6 +78,11 @@ static int cpufreq_governor(struct cpufreq_policy *policy, unsigned int event); | |||
78 | static unsigned int __cpufreq_get(struct cpufreq_policy *policy); | 78 | static unsigned int __cpufreq_get(struct cpufreq_policy *policy); |
79 | static int cpufreq_start_governor(struct cpufreq_policy *policy); | 79 | static int cpufreq_start_governor(struct cpufreq_policy *policy); |
80 | 80 | ||
81 | static inline int cpufreq_exit_governor(struct cpufreq_policy *policy) | ||
82 | { | ||
83 | return cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT); | ||
84 | } | ||
85 | |||
81 | /** | 86 | /** |
82 | * Two notifier lists: the "policy" list is involved in the | 87 | * Two notifier lists: the "policy" list is involved in the |
83 | * validation process for a new CPU frequency policy; the | 88 | * validation process for a new CPU frequency policy; the |
@@ -429,6 +434,73 @@ void cpufreq_freq_transition_end(struct cpufreq_policy *policy, | |||
429 | } | 434 | } |
430 | EXPORT_SYMBOL_GPL(cpufreq_freq_transition_end); | 435 | EXPORT_SYMBOL_GPL(cpufreq_freq_transition_end); |
431 | 436 | ||
437 | /* | ||
438 | * Fast frequency switching status count. Positive means "enabled", negative | ||
439 | * means "disabled" and 0 means "not decided yet". | ||
440 | */ | ||
441 | static int cpufreq_fast_switch_count; | ||
442 | static DEFINE_MUTEX(cpufreq_fast_switch_lock); | ||
443 | |||
444 | static void cpufreq_list_transition_notifiers(void) | ||
445 | { | ||
446 | struct notifier_block *nb; | ||
447 | |||
448 | pr_info("Registered transition notifiers:\n"); | ||
449 | |||
450 | mutex_lock(&cpufreq_transition_notifier_list.mutex); | ||
451 | |||
452 | for (nb = cpufreq_transition_notifier_list.head; nb; nb = nb->next) | ||
453 | pr_info("%pF\n", nb->notifier_call); | ||
454 | |||
455 | mutex_unlock(&cpufreq_transition_notifier_list.mutex); | ||
456 | } | ||
457 | |||
458 | /** | ||
459 | * cpufreq_enable_fast_switch - Enable fast frequency switching for policy. | ||
460 | * @policy: cpufreq policy to enable fast frequency switching for. | ||
461 | * | ||
462 | * Try to enable fast frequency switching for @policy. | ||
463 | * | ||
464 | * The attempt will fail if there is at least one transition notifier registered | ||
465 | * at this point, as fast frequency switching is quite fundamentally at odds | ||
466 | * with transition notifiers. Thus if successful, it will make registration of | ||
467 | * transition notifiers fail going forward. | ||
468 | */ | ||
469 | void cpufreq_enable_fast_switch(struct cpufreq_policy *policy) | ||
470 | { | ||
471 | lockdep_assert_held(&policy->rwsem); | ||
472 | |||
473 | if (!policy->fast_switch_possible) | ||
474 | return; | ||
475 | |||
476 | mutex_lock(&cpufreq_fast_switch_lock); | ||
477 | if (cpufreq_fast_switch_count >= 0) { | ||
478 | cpufreq_fast_switch_count++; | ||
479 | policy->fast_switch_enabled = true; | ||
480 | } else { | ||
481 | pr_warn("CPU%u: Fast frequency switching not enabled\n", | ||
482 | policy->cpu); | ||
483 | cpufreq_list_transition_notifiers(); | ||
484 | } | ||
485 | mutex_unlock(&cpufreq_fast_switch_lock); | ||
486 | } | ||
487 | EXPORT_SYMBOL_GPL(cpufreq_enable_fast_switch); | ||
488 | |||
489 | /** | ||
490 | * cpufreq_disable_fast_switch - Disable fast frequency switching for policy. | ||
491 | * @policy: cpufreq policy to disable fast frequency switching for. | ||
492 | */ | ||
493 | void cpufreq_disable_fast_switch(struct cpufreq_policy *policy) | ||
494 | { | ||
495 | mutex_lock(&cpufreq_fast_switch_lock); | ||
496 | if (policy->fast_switch_enabled) { | ||
497 | policy->fast_switch_enabled = false; | ||
498 | if (!WARN_ON(cpufreq_fast_switch_count <= 0)) | ||
499 | cpufreq_fast_switch_count--; | ||
500 | } | ||
501 | mutex_unlock(&cpufreq_fast_switch_lock); | ||
502 | } | ||
503 | EXPORT_SYMBOL_GPL(cpufreq_disable_fast_switch); | ||
432 | 504 | ||
433 | /********************************************************************* | 505 | /********************************************************************* |
434 | * SYSFS INTERFACE * | 506 | * SYSFS INTERFACE * |
@@ -1248,26 +1320,24 @@ out_free_policy: | |||
1248 | */ | 1320 | */ |
1249 | static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif) | 1321 | static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif) |
1250 | { | 1322 | { |
1323 | struct cpufreq_policy *policy; | ||
1251 | unsigned cpu = dev->id; | 1324 | unsigned cpu = dev->id; |
1252 | int ret; | ||
1253 | 1325 | ||
1254 | dev_dbg(dev, "%s: adding CPU%u\n", __func__, cpu); | 1326 | dev_dbg(dev, "%s: adding CPU%u\n", __func__, cpu); |
1255 | 1327 | ||
1256 | if (cpu_online(cpu)) { | 1328 | if (cpu_online(cpu)) |
1257 | ret = cpufreq_online(cpu); | 1329 | return cpufreq_online(cpu); |
1258 | } else { | ||
1259 | /* | ||
1260 | * A hotplug notifier will follow and we will handle it as CPU | ||
1261 | * online then. For now, just create the sysfs link, unless | ||
1262 | * there is no policy or the link is already present. | ||
1263 | */ | ||
1264 | struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu); | ||
1265 | 1330 | ||
1266 | ret = policy && !cpumask_test_and_set_cpu(cpu, policy->real_cpus) | 1331 | /* |
1267 | ? add_cpu_dev_symlink(policy, cpu) : 0; | 1332 | * A hotplug notifier will follow and we will handle it as CPU online |
1268 | } | 1333 | * then. For now, just create the sysfs link, unless there is no policy |
1334 | * or the link is already present. | ||
1335 | */ | ||
1336 | policy = per_cpu(cpufreq_cpu_data, cpu); | ||
1337 | if (!policy || cpumask_test_and_set_cpu(cpu, policy->real_cpus)) | ||
1338 | return 0; | ||
1269 | 1339 | ||
1270 | return ret; | 1340 | return add_cpu_dev_symlink(policy, cpu); |
1271 | } | 1341 | } |
1272 | 1342 | ||
1273 | static void cpufreq_offline(unsigned int cpu) | 1343 | static void cpufreq_offline(unsigned int cpu) |
@@ -1319,7 +1389,7 @@ static void cpufreq_offline(unsigned int cpu) | |||
1319 | 1389 | ||
1320 | /* If cpu is last user of policy, free policy */ | 1390 | /* If cpu is last user of policy, free policy */ |
1321 | if (has_target()) { | 1391 | if (has_target()) { |
1322 | ret = cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT); | 1392 | ret = cpufreq_exit_governor(policy); |
1323 | if (ret) | 1393 | if (ret) |
1324 | pr_err("%s: Failed to exit governor\n", __func__); | 1394 | pr_err("%s: Failed to exit governor\n", __func__); |
1325 | } | 1395 | } |
@@ -1447,8 +1517,12 @@ static unsigned int __cpufreq_get(struct cpufreq_policy *policy) | |||
1447 | 1517 | ||
1448 | ret_freq = cpufreq_driver->get(policy->cpu); | 1518 | ret_freq = cpufreq_driver->get(policy->cpu); |
1449 | 1519 | ||
1450 | /* Updating inactive policies is invalid, so avoid doing that. */ | 1520 | /* |
1451 | if (unlikely(policy_is_inactive(policy))) | 1521 | * Updating inactive policies is invalid, so avoid doing that. Also |
1522 | * if fast frequency switching is used with the given policy, the check | ||
1523 | * against policy->cur is pointless, so skip it in that case too. | ||
1524 | */ | ||
1525 | if (unlikely(policy_is_inactive(policy)) || policy->fast_switch_enabled) | ||
1452 | return ret_freq; | 1526 | return ret_freq; |
1453 | 1527 | ||
1454 | if (ret_freq && policy->cur && | 1528 | if (ret_freq && policy->cur && |
@@ -1675,8 +1749,18 @@ int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list) | |||
1675 | 1749 | ||
1676 | switch (list) { | 1750 | switch (list) { |
1677 | case CPUFREQ_TRANSITION_NOTIFIER: | 1751 | case CPUFREQ_TRANSITION_NOTIFIER: |
1752 | mutex_lock(&cpufreq_fast_switch_lock); | ||
1753 | |||
1754 | if (cpufreq_fast_switch_count > 0) { | ||
1755 | mutex_unlock(&cpufreq_fast_switch_lock); | ||
1756 | return -EBUSY; | ||
1757 | } | ||
1678 | ret = srcu_notifier_chain_register( | 1758 | ret = srcu_notifier_chain_register( |
1679 | &cpufreq_transition_notifier_list, nb); | 1759 | &cpufreq_transition_notifier_list, nb); |
1760 | if (!ret) | ||
1761 | cpufreq_fast_switch_count--; | ||
1762 | |||
1763 | mutex_unlock(&cpufreq_fast_switch_lock); | ||
1680 | break; | 1764 | break; |
1681 | case CPUFREQ_POLICY_NOTIFIER: | 1765 | case CPUFREQ_POLICY_NOTIFIER: |
1682 | ret = blocking_notifier_chain_register( | 1766 | ret = blocking_notifier_chain_register( |
@@ -1709,8 +1793,14 @@ int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list) | |||
1709 | 1793 | ||
1710 | switch (list) { | 1794 | switch (list) { |
1711 | case CPUFREQ_TRANSITION_NOTIFIER: | 1795 | case CPUFREQ_TRANSITION_NOTIFIER: |
1796 | mutex_lock(&cpufreq_fast_switch_lock); | ||
1797 | |||
1712 | ret = srcu_notifier_chain_unregister( | 1798 | ret = srcu_notifier_chain_unregister( |
1713 | &cpufreq_transition_notifier_list, nb); | 1799 | &cpufreq_transition_notifier_list, nb); |
1800 | if (!ret && !WARN_ON(cpufreq_fast_switch_count >= 0)) | ||
1801 | cpufreq_fast_switch_count++; | ||
1802 | |||
1803 | mutex_unlock(&cpufreq_fast_switch_lock); | ||
1714 | break; | 1804 | break; |
1715 | case CPUFREQ_POLICY_NOTIFIER: | 1805 | case CPUFREQ_POLICY_NOTIFIER: |
1716 | ret = blocking_notifier_chain_unregister( | 1806 | ret = blocking_notifier_chain_unregister( |
@@ -1729,6 +1819,37 @@ EXPORT_SYMBOL(cpufreq_unregister_notifier); | |||
1729 | * GOVERNORS * | 1819 | * GOVERNORS * |
1730 | *********************************************************************/ | 1820 | *********************************************************************/ |
1731 | 1821 | ||
1822 | /** | ||
1823 | * cpufreq_driver_fast_switch - Carry out a fast CPU frequency switch. | ||
1824 | * @policy: cpufreq policy to switch the frequency for. | ||
1825 | * @target_freq: New frequency to set (may be approximate). | ||
1826 | * | ||
1827 | * Carry out a fast frequency switch without sleeping. | ||
1828 | * | ||
1829 | * The driver's ->fast_switch() callback invoked by this function must be | ||
1830 | * suitable for being called from within RCU-sched read-side critical sections | ||
1831 | * and it is expected to select the minimum available frequency greater than or | ||
1832 | * equal to @target_freq (CPUFREQ_RELATION_L). | ||
1833 | * | ||
1834 | * This function must not be called if policy->fast_switch_enabled is unset. | ||
1835 | * | ||
1836 | * Governors calling this function must guarantee that it will never be invoked | ||
1837 | * twice in parallel for the same policy and that it will never be called in | ||
1838 | * parallel with either ->target() or ->target_index() for the same policy. | ||
1839 | * | ||
1840 | * If CPUFREQ_ENTRY_INVALID is returned by the driver's ->fast_switch() | ||
1841 | * callback to indicate an error condition, the hardware configuration must be | ||
1842 | * preserved. | ||
1843 | */ | ||
1844 | unsigned int cpufreq_driver_fast_switch(struct cpufreq_policy *policy, | ||
1845 | unsigned int target_freq) | ||
1846 | { | ||
1847 | clamp_val(target_freq, policy->min, policy->max); | ||
1848 | |||
1849 | return cpufreq_driver->fast_switch(policy, target_freq); | ||
1850 | } | ||
1851 | EXPORT_SYMBOL_GPL(cpufreq_driver_fast_switch); | ||
1852 | |||
1732 | /* Must set freqs->new to intermediate frequency */ | 1853 | /* Must set freqs->new to intermediate frequency */ |
1733 | static int __target_intermediate(struct cpufreq_policy *policy, | 1854 | static int __target_intermediate(struct cpufreq_policy *policy, |
1734 | struct cpufreq_freqs *freqs, int index) | 1855 | struct cpufreq_freqs *freqs, int index) |
@@ -2104,7 +2225,7 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy, | |||
2104 | return ret; | 2225 | return ret; |
2105 | } | 2226 | } |
2106 | 2227 | ||
2107 | ret = cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT); | 2228 | ret = cpufreq_exit_governor(policy); |
2108 | if (ret) { | 2229 | if (ret) { |
2109 | pr_err("%s: Failed to Exit Governor: %s (%d)\n", | 2230 | pr_err("%s: Failed to Exit Governor: %s (%d)\n", |
2110 | __func__, old_gov->name, ret); | 2231 | __func__, old_gov->name, ret); |
@@ -2121,7 +2242,7 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy, | |||
2121 | pr_debug("cpufreq: governor change\n"); | 2242 | pr_debug("cpufreq: governor change\n"); |
2122 | return 0; | 2243 | return 0; |
2123 | } | 2244 | } |
2124 | cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT); | 2245 | cpufreq_exit_governor(policy); |
2125 | } | 2246 | } |
2126 | 2247 | ||
2127 | /* new governor failed, so re-start old one */ | 2248 | /* new governor failed, so re-start old one */ |
@@ -2189,16 +2310,13 @@ static int cpufreq_cpu_callback(struct notifier_block *nfb, | |||
2189 | 2310 | ||
2190 | switch (action & ~CPU_TASKS_FROZEN) { | 2311 | switch (action & ~CPU_TASKS_FROZEN) { |
2191 | case CPU_ONLINE: | 2312 | case CPU_ONLINE: |
2313 | case CPU_DOWN_FAILED: | ||
2192 | cpufreq_online(cpu); | 2314 | cpufreq_online(cpu); |
2193 | break; | 2315 | break; |
2194 | 2316 | ||
2195 | case CPU_DOWN_PREPARE: | 2317 | case CPU_DOWN_PREPARE: |
2196 | cpufreq_offline(cpu); | 2318 | cpufreq_offline(cpu); |
2197 | break; | 2319 | break; |
2198 | |||
2199 | case CPU_DOWN_FAILED: | ||
2200 | cpufreq_online(cpu); | ||
2201 | break; | ||
2202 | } | 2320 | } |
2203 | return NOTIFY_OK; | 2321 | return NOTIFY_OK; |
2204 | } | 2322 | } |