diff options
author | Rafael J. Wysocki <rafael.j.wysocki@intel.com> | 2016-03-29 21:47:49 -0400 |
---|---|---|
committer | Rafael J. Wysocki <rafael.j.wysocki@intel.com> | 2016-04-01 19:09:03 -0400 |
commit | b7898fda5bc7e786e76ce24fbd2ec993b08ec518 (patch) | |
tree | 2bf721763c79a7fe0eca25349a549c52ba6ce483 | |
parent | 379480d8258056bfdbaa65e4d3f024bb5b34b52b (diff) |
cpufreq: Support for fast frequency switching
Modify the ACPI cpufreq driver to provide a method for switching
CPU frequencies from interrupt context and update the cpufreq core
to support that method if available.
Introduce a new cpufreq driver callback, ->fast_switch, to be
invoked for frequency switching from interrupt context by (future)
governors supporting that feature via (new) helper function
cpufreq_driver_fast_switch().
Add two new policy flags, fast_switch_possible, to be set by the
cpufreq driver if fast frequency switching can be used for the
given policy and fast_switch_enabled, to be set by the governor
if it is going to use fast frequency switching for the given
policy. Also add a helper for setting the latter.
Since fast frequency switching is inherently incompatible with
cpufreq transition notifiers, make it possible to set the
fast_switch_enabled only if there are no transition notifiers
already registered and make the registration of new transition
notifiers fail if fast_switch_enabled is set for at least one
policy.
Implement the ->fast_switch callback in the ACPI cpufreq driver
and make it set fast_switch_possible during policy initialization
as appropriate.
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Acked-by: Viresh Kumar <viresh.kumar@linaro.org>
-rw-r--r-- | drivers/cpufreq/acpi-cpufreq.c | 42 | ||||
-rw-r--r-- | drivers/cpufreq/cpufreq.c | 130 | ||||
-rw-r--r-- | include/linux/cpufreq.h | 16 |
3 files changed, 183 insertions, 5 deletions
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c index fb5712141040..7f38fb55f223 100644 --- a/drivers/cpufreq/acpi-cpufreq.c +++ b/drivers/cpufreq/acpi-cpufreq.c | |||
@@ -458,6 +458,43 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy, | |||
458 | return result; | 458 | return result; |
459 | } | 459 | } |
460 | 460 | ||
461 | unsigned int acpi_cpufreq_fast_switch(struct cpufreq_policy *policy, | ||
462 | unsigned int target_freq) | ||
463 | { | ||
464 | struct acpi_cpufreq_data *data = policy->driver_data; | ||
465 | struct acpi_processor_performance *perf; | ||
466 | struct cpufreq_frequency_table *entry; | ||
467 | unsigned int next_perf_state, next_freq, freq; | ||
468 | |||
469 | /* | ||
470 | * Find the closest frequency above target_freq. | ||
471 | * | ||
472 | * The table is sorted in the reverse order with respect to the | ||
473 | * frequency and all of the entries are valid (see the initialization). | ||
474 | */ | ||
475 | entry = data->freq_table; | ||
476 | do { | ||
477 | entry++; | ||
478 | freq = entry->frequency; | ||
479 | } while (freq >= target_freq && freq != CPUFREQ_TABLE_END); | ||
480 | entry--; | ||
481 | next_freq = entry->frequency; | ||
482 | next_perf_state = entry->driver_data; | ||
483 | |||
484 | perf = to_perf_data(data); | ||
485 | if (perf->state == next_perf_state) { | ||
486 | if (unlikely(data->resume)) | ||
487 | data->resume = 0; | ||
488 | else | ||
489 | return next_freq; | ||
490 | } | ||
491 | |||
492 | data->cpu_freq_write(&perf->control_register, | ||
493 | perf->states[next_perf_state].control); | ||
494 | perf->state = next_perf_state; | ||
495 | return next_freq; | ||
496 | } | ||
497 | |||
461 | static unsigned long | 498 | static unsigned long |
462 | acpi_cpufreq_guess_freq(struct acpi_cpufreq_data *data, unsigned int cpu) | 499 | acpi_cpufreq_guess_freq(struct acpi_cpufreq_data *data, unsigned int cpu) |
463 | { | 500 | { |
@@ -821,6 +858,9 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) | |||
821 | */ | 858 | */ |
822 | data->resume = 1; | 859 | data->resume = 1; |
823 | 860 | ||
861 | policy->fast_switch_possible = !acpi_pstate_strict && | ||
862 | !(policy_is_shared(policy) && policy->shared_type != CPUFREQ_SHARED_TYPE_ANY); | ||
863 | |||
824 | return result; | 864 | return result; |
825 | 865 | ||
826 | err_freqfree: | 866 | err_freqfree: |
@@ -843,6 +883,7 @@ static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy) | |||
843 | pr_debug("acpi_cpufreq_cpu_exit\n"); | 883 | pr_debug("acpi_cpufreq_cpu_exit\n"); |
844 | 884 | ||
845 | if (data) { | 885 | if (data) { |
886 | policy->fast_switch_possible = false; | ||
846 | policy->driver_data = NULL; | 887 | policy->driver_data = NULL; |
847 | acpi_processor_unregister_performance(data->acpi_perf_cpu); | 888 | acpi_processor_unregister_performance(data->acpi_perf_cpu); |
848 | free_cpumask_var(data->freqdomain_cpus); | 889 | free_cpumask_var(data->freqdomain_cpus); |
@@ -876,6 +917,7 @@ static struct freq_attr *acpi_cpufreq_attr[] = { | |||
876 | static struct cpufreq_driver acpi_cpufreq_driver = { | 917 | static struct cpufreq_driver acpi_cpufreq_driver = { |
877 | .verify = cpufreq_generic_frequency_table_verify, | 918 | .verify = cpufreq_generic_frequency_table_verify, |
878 | .target_index = acpi_cpufreq_target, | 919 | .target_index = acpi_cpufreq_target, |
920 | .fast_switch = acpi_cpufreq_fast_switch, | ||
879 | .bios_limit = acpi_processor_get_bios_limit, | 921 | .bios_limit = acpi_processor_get_bios_limit, |
880 | .init = acpi_cpufreq_cpu_init, | 922 | .init = acpi_cpufreq_cpu_init, |
881 | .exit = acpi_cpufreq_cpu_exit, | 923 | .exit = acpi_cpufreq_cpu_exit, |
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index b87596b591b3..a5b7d77d4816 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c | |||
@@ -77,6 +77,7 @@ static inline bool has_target(void) | |||
77 | static int cpufreq_governor(struct cpufreq_policy *policy, unsigned int event); | 77 | static int cpufreq_governor(struct cpufreq_policy *policy, unsigned int event); |
78 | static unsigned int __cpufreq_get(struct cpufreq_policy *policy); | 78 | static unsigned int __cpufreq_get(struct cpufreq_policy *policy); |
79 | static int cpufreq_start_governor(struct cpufreq_policy *policy); | 79 | static int cpufreq_start_governor(struct cpufreq_policy *policy); |
80 | static int cpufreq_exit_governor(struct cpufreq_policy *policy); | ||
80 | 81 | ||
81 | /** | 82 | /** |
82 | * Two notifier lists: the "policy" list is involved in the | 83 | * Two notifier lists: the "policy" list is involved in the |
@@ -429,6 +430,68 @@ void cpufreq_freq_transition_end(struct cpufreq_policy *policy, | |||
429 | } | 430 | } |
430 | EXPORT_SYMBOL_GPL(cpufreq_freq_transition_end); | 431 | EXPORT_SYMBOL_GPL(cpufreq_freq_transition_end); |
431 | 432 | ||
433 | /* | ||
434 | * Fast frequency switching status count. Positive means "enabled", negative | ||
435 | * means "disabled" and 0 means "not decided yet". | ||
436 | */ | ||
437 | static int cpufreq_fast_switch_count; | ||
438 | static DEFINE_MUTEX(cpufreq_fast_switch_lock); | ||
439 | |||
440 | static void cpufreq_list_transition_notifiers(void) | ||
441 | { | ||
442 | struct notifier_block *nb; | ||
443 | |||
444 | pr_info("Registered transition notifiers:\n"); | ||
445 | |||
446 | mutex_lock(&cpufreq_transition_notifier_list.mutex); | ||
447 | |||
448 | for (nb = cpufreq_transition_notifier_list.head; nb; nb = nb->next) | ||
449 | pr_info("%pF\n", nb->notifier_call); | ||
450 | |||
451 | mutex_unlock(&cpufreq_transition_notifier_list.mutex); | ||
452 | } | ||
453 | |||
454 | /** | ||
455 | * cpufreq_enable_fast_switch - Enable fast frequency switching for policy. | ||
456 | * @policy: cpufreq policy to enable fast frequency switching for. | ||
457 | * | ||
458 | * Try to enable fast frequency switching for @policy. | ||
459 | * | ||
460 | * The attempt will fail if there is at least one transition notifier registered | ||
461 | * at this point, as fast frequency switching is quite fundamentally at odds | ||
462 | * with transition notifiers. Thus if successful, it will make registration of | ||
463 | * transition notifiers fail going forward. | ||
464 | */ | ||
465 | void cpufreq_enable_fast_switch(struct cpufreq_policy *policy) | ||
466 | { | ||
467 | lockdep_assert_held(&policy->rwsem); | ||
468 | |||
469 | if (!policy->fast_switch_possible) | ||
470 | return; | ||
471 | |||
472 | mutex_lock(&cpufreq_fast_switch_lock); | ||
473 | if (cpufreq_fast_switch_count >= 0) { | ||
474 | cpufreq_fast_switch_count++; | ||
475 | policy->fast_switch_enabled = true; | ||
476 | } else { | ||
477 | pr_warn("CPU%u: Fast frequency switching not enabled\n", | ||
478 | policy->cpu); | ||
479 | cpufreq_list_transition_notifiers(); | ||
480 | } | ||
481 | mutex_unlock(&cpufreq_fast_switch_lock); | ||
482 | } | ||
483 | EXPORT_SYMBOL_GPL(cpufreq_enable_fast_switch); | ||
484 | |||
485 | static void cpufreq_disable_fast_switch(struct cpufreq_policy *policy) | ||
486 | { | ||
487 | mutex_lock(&cpufreq_fast_switch_lock); | ||
488 | if (policy->fast_switch_enabled) { | ||
489 | policy->fast_switch_enabled = false; | ||
490 | if (!WARN_ON(cpufreq_fast_switch_count <= 0)) | ||
491 | cpufreq_fast_switch_count--; | ||
492 | } | ||
493 | mutex_unlock(&cpufreq_fast_switch_lock); | ||
494 | } | ||
432 | 495 | ||
433 | /********************************************************************* | 496 | /********************************************************************* |
434 | * SYSFS INTERFACE * | 497 | * SYSFS INTERFACE * |
@@ -1319,7 +1382,7 @@ static void cpufreq_offline(unsigned int cpu) | |||
1319 | 1382 | ||
1320 | /* If cpu is last user of policy, free policy */ | 1383 | /* If cpu is last user of policy, free policy */ |
1321 | if (has_target()) { | 1384 | if (has_target()) { |
1322 | ret = cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT); | 1385 | ret = cpufreq_exit_governor(policy); |
1323 | if (ret) | 1386 | if (ret) |
1324 | pr_err("%s: Failed to exit governor\n", __func__); | 1387 | pr_err("%s: Failed to exit governor\n", __func__); |
1325 | } | 1388 | } |
@@ -1447,8 +1510,12 @@ static unsigned int __cpufreq_get(struct cpufreq_policy *policy) | |||
1447 | 1510 | ||
1448 | ret_freq = cpufreq_driver->get(policy->cpu); | 1511 | ret_freq = cpufreq_driver->get(policy->cpu); |
1449 | 1512 | ||
1450 | /* Updating inactive policies is invalid, so avoid doing that. */ | 1513 | /* |
1451 | if (unlikely(policy_is_inactive(policy))) | 1514 | * Updating inactive policies is invalid, so avoid doing that. Also |
1515 | * if fast frequency switching is used with the given policy, the check | ||
1516 | * against policy->cur is pointless, so skip it in that case too. | ||
1517 | */ | ||
1518 | if (unlikely(policy_is_inactive(policy)) || policy->fast_switch_enabled) | ||
1452 | return ret_freq; | 1519 | return ret_freq; |
1453 | 1520 | ||
1454 | if (ret_freq && policy->cur && | 1521 | if (ret_freq && policy->cur && |
@@ -1672,8 +1739,18 @@ int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list) | |||
1672 | 1739 | ||
1673 | switch (list) { | 1740 | switch (list) { |
1674 | case CPUFREQ_TRANSITION_NOTIFIER: | 1741 | case CPUFREQ_TRANSITION_NOTIFIER: |
1742 | mutex_lock(&cpufreq_fast_switch_lock); | ||
1743 | |||
1744 | if (cpufreq_fast_switch_count > 0) { | ||
1745 | mutex_unlock(&cpufreq_fast_switch_lock); | ||
1746 | return -EBUSY; | ||
1747 | } | ||
1675 | ret = srcu_notifier_chain_register( | 1748 | ret = srcu_notifier_chain_register( |
1676 | &cpufreq_transition_notifier_list, nb); | 1749 | &cpufreq_transition_notifier_list, nb); |
1750 | if (!ret) | ||
1751 | cpufreq_fast_switch_count--; | ||
1752 | |||
1753 | mutex_unlock(&cpufreq_fast_switch_lock); | ||
1677 | break; | 1754 | break; |
1678 | case CPUFREQ_POLICY_NOTIFIER: | 1755 | case CPUFREQ_POLICY_NOTIFIER: |
1679 | ret = blocking_notifier_chain_register( | 1756 | ret = blocking_notifier_chain_register( |
@@ -1706,8 +1783,14 @@ int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list) | |||
1706 | 1783 | ||
1707 | switch (list) { | 1784 | switch (list) { |
1708 | case CPUFREQ_TRANSITION_NOTIFIER: | 1785 | case CPUFREQ_TRANSITION_NOTIFIER: |
1786 | mutex_lock(&cpufreq_fast_switch_lock); | ||
1787 | |||
1709 | ret = srcu_notifier_chain_unregister( | 1788 | ret = srcu_notifier_chain_unregister( |
1710 | &cpufreq_transition_notifier_list, nb); | 1789 | &cpufreq_transition_notifier_list, nb); |
1790 | if (!ret && !WARN_ON(cpufreq_fast_switch_count >= 0)) | ||
1791 | cpufreq_fast_switch_count++; | ||
1792 | |||
1793 | mutex_unlock(&cpufreq_fast_switch_lock); | ||
1711 | break; | 1794 | break; |
1712 | case CPUFREQ_POLICY_NOTIFIER: | 1795 | case CPUFREQ_POLICY_NOTIFIER: |
1713 | ret = blocking_notifier_chain_unregister( | 1796 | ret = blocking_notifier_chain_unregister( |
@@ -1726,6 +1809,37 @@ EXPORT_SYMBOL(cpufreq_unregister_notifier); | |||
1726 | * GOVERNORS * | 1809 | * GOVERNORS * |
1727 | *********************************************************************/ | 1810 | *********************************************************************/ |
1728 | 1811 | ||
1812 | /** | ||
1813 | * cpufreq_driver_fast_switch - Carry out a fast CPU frequency switch. | ||
1814 | * @policy: cpufreq policy to switch the frequency for. | ||
1815 | * @target_freq: New frequency to set (may be approximate). | ||
1816 | * | ||
1817 | * Carry out a fast frequency switch without sleeping. | ||
1818 | * | ||
1819 | * The driver's ->fast_switch() callback invoked by this function must be | ||
1820 | * suitable for being called from within RCU-sched read-side critical sections | ||
1821 | * and it is expected to select the minimum available frequency greater than or | ||
1822 | * equal to @target_freq (CPUFREQ_RELATION_L). | ||
1823 | * | ||
1824 | * This function must not be called if policy->fast_switch_enabled is unset. | ||
1825 | * | ||
1826 | * Governors calling this function must guarantee that it will never be invoked | ||
1827 | * twice in parallel for the same policy and that it will never be called in | ||
1828 | * parallel with either ->target() or ->target_index() for the same policy. | ||
1829 | * | ||
1830 | * If CPUFREQ_ENTRY_INVALID is returned by the driver's ->fast_switch() | ||
1831 | * callback to indicate an error condition, the hardware configuration must be | ||
1832 | * preserved. | ||
1833 | */ | ||
1834 | unsigned int cpufreq_driver_fast_switch(struct cpufreq_policy *policy, | ||
1835 | unsigned int target_freq) | ||
1836 | { | ||
1837 | clamp_val(target_freq, policy->min, policy->max); | ||
1838 | |||
1839 | return cpufreq_driver->fast_switch(policy, target_freq); | ||
1840 | } | ||
1841 | EXPORT_SYMBOL_GPL(cpufreq_driver_fast_switch); | ||
1842 | |||
1729 | /* Must set freqs->new to intermediate frequency */ | 1843 | /* Must set freqs->new to intermediate frequency */ |
1730 | static int __target_intermediate(struct cpufreq_policy *policy, | 1844 | static int __target_intermediate(struct cpufreq_policy *policy, |
1731 | struct cpufreq_freqs *freqs, int index) | 1845 | struct cpufreq_freqs *freqs, int index) |
@@ -1946,6 +2060,12 @@ static int cpufreq_start_governor(struct cpufreq_policy *policy) | |||
1946 | return ret ? ret : cpufreq_governor(policy, CPUFREQ_GOV_LIMITS); | 2060 | return ret ? ret : cpufreq_governor(policy, CPUFREQ_GOV_LIMITS); |
1947 | } | 2061 | } |
1948 | 2062 | ||
2063 | static int cpufreq_exit_governor(struct cpufreq_policy *policy) | ||
2064 | { | ||
2065 | cpufreq_disable_fast_switch(policy); | ||
2066 | return cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT); | ||
2067 | } | ||
2068 | |||
1949 | int cpufreq_register_governor(struct cpufreq_governor *governor) | 2069 | int cpufreq_register_governor(struct cpufreq_governor *governor) |
1950 | { | 2070 | { |
1951 | int err; | 2071 | int err; |
@@ -2101,7 +2221,7 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy, | |||
2101 | return ret; | 2221 | return ret; |
2102 | } | 2222 | } |
2103 | 2223 | ||
2104 | ret = cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT); | 2224 | ret = cpufreq_exit_governor(policy); |
2105 | if (ret) { | 2225 | if (ret) { |
2106 | pr_err("%s: Failed to Exit Governor: %s (%d)\n", | 2226 | pr_err("%s: Failed to Exit Governor: %s (%d)\n", |
2107 | __func__, old_gov->name, ret); | 2227 | __func__, old_gov->name, ret); |
@@ -2118,7 +2238,7 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy, | |||
2118 | pr_debug("cpufreq: governor change\n"); | 2238 | pr_debug("cpufreq: governor change\n"); |
2119 | return 0; | 2239 | return 0; |
2120 | } | 2240 | } |
2121 | cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT); | 2241 | cpufreq_exit_governor(policy); |
2122 | } | 2242 | } |
2123 | 2243 | ||
2124 | /* new governor failed, so re-start old one */ | 2244 | /* new governor failed, so re-start old one */ |
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index 2b4f248b8ef1..55e69ebb035c 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h | |||
@@ -102,6 +102,17 @@ struct cpufreq_policy { | |||
102 | */ | 102 | */ |
103 | struct rw_semaphore rwsem; | 103 | struct rw_semaphore rwsem; |
104 | 104 | ||
105 | /* | ||
106 | * Fast switch flags: | ||
107 | * - fast_switch_possible should be set by the driver if it can | ||
108 | * guarantee that frequency can be changed on any CPU sharing the | ||
109 | * policy and that the change will affect all of the policy CPUs then. | ||
110 | * - fast_switch_enabled is to be set by governors that support fast | ||
111 | * freqnency switching with the help of cpufreq_enable_fast_switch(). | ||
112 | */ | ||
113 | bool fast_switch_possible; | ||
114 | bool fast_switch_enabled; | ||
115 | |||
105 | /* Synchronization for frequency transitions */ | 116 | /* Synchronization for frequency transitions */ |
106 | bool transition_ongoing; /* Tracks transition status */ | 117 | bool transition_ongoing; /* Tracks transition status */ |
107 | spinlock_t transition_lock; | 118 | spinlock_t transition_lock; |
@@ -156,6 +167,7 @@ int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu); | |||
156 | int cpufreq_update_policy(unsigned int cpu); | 167 | int cpufreq_update_policy(unsigned int cpu); |
157 | bool have_governor_per_policy(void); | 168 | bool have_governor_per_policy(void); |
158 | struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy); | 169 | struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy); |
170 | void cpufreq_enable_fast_switch(struct cpufreq_policy *policy); | ||
159 | #else | 171 | #else |
160 | static inline unsigned int cpufreq_get(unsigned int cpu) | 172 | static inline unsigned int cpufreq_get(unsigned int cpu) |
161 | { | 173 | { |
@@ -236,6 +248,8 @@ struct cpufreq_driver { | |||
236 | unsigned int relation); /* Deprecated */ | 248 | unsigned int relation); /* Deprecated */ |
237 | int (*target_index)(struct cpufreq_policy *policy, | 249 | int (*target_index)(struct cpufreq_policy *policy, |
238 | unsigned int index); | 250 | unsigned int index); |
251 | unsigned int (*fast_switch)(struct cpufreq_policy *policy, | ||
252 | unsigned int target_freq); | ||
239 | /* | 253 | /* |
240 | * Only for drivers with target_index() and CPUFREQ_ASYNC_NOTIFICATION | 254 | * Only for drivers with target_index() and CPUFREQ_ASYNC_NOTIFICATION |
241 | * unset. | 255 | * unset. |
@@ -464,6 +478,8 @@ struct cpufreq_governor { | |||
464 | }; | 478 | }; |
465 | 479 | ||
466 | /* Pass a target to the cpufreq driver */ | 480 | /* Pass a target to the cpufreq driver */ |
481 | unsigned int cpufreq_driver_fast_switch(struct cpufreq_policy *policy, | ||
482 | unsigned int target_freq); | ||
467 | int cpufreq_driver_target(struct cpufreq_policy *policy, | 483 | int cpufreq_driver_target(struct cpufreq_policy *policy, |
468 | unsigned int target_freq, | 484 | unsigned int target_freq, |
469 | unsigned int relation); | 485 | unsigned int relation); |