diff options
author | Rafael J. Wysocki <rafael.j.wysocki@intel.com> | 2014-06-12 07:43:02 -0400 |
---|---|---|
committer | Rafael J. Wysocki <rafael.j.wysocki@intel.com> | 2014-06-12 07:43:02 -0400 |
commit | 589e18a973bd6bb8abd2c6d4d8a1dcf5ae1dff61 (patch) | |
tree | 5c6ac2441bbe93ef5ccee8d1d46b8d97959c1f1c | |
parent | de815a6d00da0f8a59e8aebf8efe12e289552a8f (diff) | |
parent | 5fbfbcd3e842ddfe9dbbe8865feba909963a87ec (diff) |
Merge branch 'pm-cpufreq'
* pm-cpufreq:
cpufreq: cpufreq-cpu0: remove dependency on THERMAL and REGULATOR
cpufreq: tegra: update comment for clarity
cpufreq: intel_pstate: Remove duplicate CPU ID check
cpufreq: Mark CPU0 driver with CPUFREQ_NEED_INITIAL_FREQ_CHECK flag
cpufreq: governor: remove copy_prev_load from 'struct cpu_dbs_common_info'
cpufreq: governor: Be friendly towards latency-sensitive bursty workloads
cpufreq: ppc-corenet-cpu-freq: do_div use quotient
Revert "cpufreq: Enable big.LITTLE cpufreq driver on arm64"
cpufreq: Tegra: implement intermediate frequency callbacks
cpufreq: add support for intermediate (stable) frequencies
-rw-r--r-- | Documentation/cpu-freq/cpu-drivers.txt | 29 | ||||
-rw-r--r-- | drivers/cpufreq/Kconfig | 2 | ||||
-rw-r--r-- | drivers/cpufreq/Kconfig.arm | 3 | ||||
-rw-r--r-- | drivers/cpufreq/cpufreq-cpu0.c | 2 | ||||
-rw-r--r-- | drivers/cpufreq/cpufreq.c | 67 | ||||
-rw-r--r-- | drivers/cpufreq/cpufreq_governor.c | 67 | ||||
-rw-r--r-- | drivers/cpufreq/cpufreq_governor.h | 7 | ||||
-rw-r--r-- | drivers/cpufreq/intel_pstate.c | 6 | ||||
-rw-r--r-- | drivers/cpufreq/ppc-corenet-cpufreq.c | 9 | ||||
-rw-r--r-- | drivers/cpufreq/tegra-cpufreq.c | 100 | ||||
-rw-r--r-- | include/linux/cpufreq.h | 25 |
11 files changed, 256 insertions, 61 deletions
diff --git a/Documentation/cpu-freq/cpu-drivers.txt b/Documentation/cpu-freq/cpu-drivers.txt index b045fe54986a..14f4e6336d88 100644 --- a/Documentation/cpu-freq/cpu-drivers.txt +++ b/Documentation/cpu-freq/cpu-drivers.txt | |||
@@ -26,6 +26,7 @@ Contents: | |||
26 | 1.4 target/target_index or setpolicy? | 26 | 1.4 target/target_index or setpolicy? |
27 | 1.5 target/target_index | 27 | 1.5 target/target_index |
28 | 1.6 setpolicy | 28 | 1.6 setpolicy |
29 | 1.7 get_intermediate and target_intermediate | ||
29 | 2. Frequency Table Helpers | 30 | 2. Frequency Table Helpers |
30 | 31 | ||
31 | 32 | ||
@@ -79,6 +80,10 @@ cpufreq_driver.attr - A pointer to a NULL-terminated list of | |||
79 | "struct freq_attr" which allow to | 80 | "struct freq_attr" which allow to |
80 | export values to sysfs. | 81 | export values to sysfs. |
81 | 82 | ||
83 | cpufreq_driver.get_intermediate | ||
84 | and target_intermediate Used to switch to stable frequency while | ||
85 | changing CPU frequency. | ||
86 | |||
82 | 87 | ||
83 | 1.2 Per-CPU Initialization | 88 | 1.2 Per-CPU Initialization |
84 | -------------------------- | 89 | -------------------------- |
@@ -151,7 +156,7 @@ Some cpufreq-capable processors switch the frequency between certain | |||
151 | limits on their own. These shall use the ->setpolicy call | 156 | limits on their own. These shall use the ->setpolicy call |
152 | 157 | ||
153 | 158 | ||
154 | 1.4. target/target_index | 159 | 1.5. target/target_index |
155 | ------------- | 160 | ------------- |
156 | 161 | ||
157 | The target_index call has two arguments: struct cpufreq_policy *policy, | 162 | The target_index call has two arguments: struct cpufreq_policy *policy, |
@@ -160,6 +165,9 @@ and unsigned int index (into the exposed frequency table). | |||
160 | The CPUfreq driver must set the new frequency when called here. The | 165 | The CPUfreq driver must set the new frequency when called here. The |
161 | actual frequency must be determined by freq_table[index].frequency. | 166 | actual frequency must be determined by freq_table[index].frequency. |
162 | 167 | ||
168 | It should always restore to earlier frequency (i.e. policy->restore_freq) in | ||
169 | case of errors, even if we switched to intermediate frequency earlier. | ||
170 | |||
163 | Deprecated: | 171 | Deprecated: |
164 | ---------- | 172 | ---------- |
165 | The target call has three arguments: struct cpufreq_policy *policy, | 173 | The target call has three arguments: struct cpufreq_policy *policy, |
@@ -179,7 +187,7 @@ Here again the frequency table helper might assist you - see section 2 | |||
179 | for details. | 187 | for details. |
180 | 188 | ||
181 | 189 | ||
182 | 1.5 setpolicy | 190 | 1.6 setpolicy |
183 | --------------- | 191 | --------------- |
184 | 192 | ||
185 | The setpolicy call only takes a struct cpufreq_policy *policy as | 193 | The setpolicy call only takes a struct cpufreq_policy *policy as |
@@ -190,6 +198,23 @@ setting when policy->policy is CPUFREQ_POLICY_PERFORMANCE, and a | |||
190 | powersaving-oriented setting when CPUFREQ_POLICY_POWERSAVE. Also check | 198 | powersaving-oriented setting when CPUFREQ_POLICY_POWERSAVE. Also check |
191 | the reference implementation in drivers/cpufreq/longrun.c | 199 | the reference implementation in drivers/cpufreq/longrun.c |
192 | 200 | ||
201 | 1.7 get_intermediate and target_intermediate | ||
202 | -------------------------------------------- | ||
203 | |||
204 | Only for drivers with target_index() and CPUFREQ_ASYNC_NOTIFICATION unset. | ||
205 | |||
206 | get_intermediate should return a stable intermediate frequency platform wants to | ||
207 | switch to, and target_intermediate() should set CPU to to that frequency, before | ||
208 | jumping to the frequency corresponding to 'index'. Core will take care of | ||
209 | sending notifications and driver doesn't have to handle them in | ||
210 | target_intermediate() or target_index(). | ||
211 | |||
212 | Drivers can return '0' from get_intermediate() in case they don't wish to switch | ||
213 | to intermediate frequency for some target frequency. In that case core will | ||
214 | directly call ->target_index(). | ||
215 | |||
216 | NOTE: ->target_index() should restore to policy->restore_freq in case of | ||
217 | failures as core would send notifications for that. | ||
193 | 218 | ||
194 | 219 | ||
195 | 2. Frequency Table Helpers | 220 | 2. Frequency Table Helpers |
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig index 1fbe11f2a146..e473d6555f96 100644 --- a/drivers/cpufreq/Kconfig +++ b/drivers/cpufreq/Kconfig | |||
@@ -185,7 +185,7 @@ config CPU_FREQ_GOV_CONSERVATIVE | |||
185 | 185 | ||
186 | config GENERIC_CPUFREQ_CPU0 | 186 | config GENERIC_CPUFREQ_CPU0 |
187 | tristate "Generic CPU0 cpufreq driver" | 187 | tristate "Generic CPU0 cpufreq driver" |
188 | depends on HAVE_CLK && REGULATOR && OF && THERMAL && CPU_THERMAL | 188 | depends on HAVE_CLK && OF |
189 | select PM_OPP | 189 | select PM_OPP |
190 | help | 190 | help |
191 | This adds a generic cpufreq driver for CPU0 frequency management. | 191 | This adds a generic cpufreq driver for CPU0 frequency management. |
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm index 36d20d0fce27..ebac67115009 100644 --- a/drivers/cpufreq/Kconfig.arm +++ b/drivers/cpufreq/Kconfig.arm | |||
@@ -5,8 +5,7 @@ | |||
5 | # big LITTLE core layer and glue drivers | 5 | # big LITTLE core layer and glue drivers |
6 | config ARM_BIG_LITTLE_CPUFREQ | 6 | config ARM_BIG_LITTLE_CPUFREQ |
7 | tristate "Generic ARM big LITTLE CPUfreq driver" | 7 | tristate "Generic ARM big LITTLE CPUfreq driver" |
8 | depends on (BIG_LITTLE && ARM_CPU_TOPOLOGY) || (ARM64 && SMP) | 8 | depends on ARM && BIG_LITTLE && ARM_CPU_TOPOLOGY && HAVE_CLK |
9 | depends on HAVE_CLK | ||
10 | select PM_OPP | 9 | select PM_OPP |
11 | help | 10 | help |
12 | This enables the Generic CPUfreq driver for ARM big.LITTLE platforms. | 11 | This enables the Generic CPUfreq driver for ARM big.LITTLE platforms. |
diff --git a/drivers/cpufreq/cpufreq-cpu0.c b/drivers/cpufreq/cpufreq-cpu0.c index 09b9129c7bd3..ee1ae303a07c 100644 --- a/drivers/cpufreq/cpufreq-cpu0.c +++ b/drivers/cpufreq/cpufreq-cpu0.c | |||
@@ -104,7 +104,7 @@ static int cpu0_cpufreq_init(struct cpufreq_policy *policy) | |||
104 | } | 104 | } |
105 | 105 | ||
106 | static struct cpufreq_driver cpu0_cpufreq_driver = { | 106 | static struct cpufreq_driver cpu0_cpufreq_driver = { |
107 | .flags = CPUFREQ_STICKY, | 107 | .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK, |
108 | .verify = cpufreq_generic_frequency_table_verify, | 108 | .verify = cpufreq_generic_frequency_table_verify, |
109 | .target_index = cpu0_set_target, | 109 | .target_index = cpu0_set_target, |
110 | .get = cpufreq_generic_get, | 110 | .get = cpufreq_generic_get, |
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index ae11dd51f81d..aed2b0cb83dc 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c | |||
@@ -1816,20 +1816,55 @@ EXPORT_SYMBOL(cpufreq_unregister_notifier); | |||
1816 | * GOVERNORS * | 1816 | * GOVERNORS * |
1817 | *********************************************************************/ | 1817 | *********************************************************************/ |
1818 | 1818 | ||
1819 | /* Must set freqs->new to intermediate frequency */ | ||
1820 | static int __target_intermediate(struct cpufreq_policy *policy, | ||
1821 | struct cpufreq_freqs *freqs, int index) | ||
1822 | { | ||
1823 | int ret; | ||
1824 | |||
1825 | freqs->new = cpufreq_driver->get_intermediate(policy, index); | ||
1826 | |||
1827 | /* We don't need to switch to intermediate freq */ | ||
1828 | if (!freqs->new) | ||
1829 | return 0; | ||
1830 | |||
1831 | pr_debug("%s: cpu: %d, switching to intermediate freq: oldfreq: %u, intermediate freq: %u\n", | ||
1832 | __func__, policy->cpu, freqs->old, freqs->new); | ||
1833 | |||
1834 | cpufreq_freq_transition_begin(policy, freqs); | ||
1835 | ret = cpufreq_driver->target_intermediate(policy, index); | ||
1836 | cpufreq_freq_transition_end(policy, freqs, ret); | ||
1837 | |||
1838 | if (ret) | ||
1839 | pr_err("%s: Failed to change to intermediate frequency: %d\n", | ||
1840 | __func__, ret); | ||
1841 | |||
1842 | return ret; | ||
1843 | } | ||
1844 | |||
1819 | static int __target_index(struct cpufreq_policy *policy, | 1845 | static int __target_index(struct cpufreq_policy *policy, |
1820 | struct cpufreq_frequency_table *freq_table, int index) | 1846 | struct cpufreq_frequency_table *freq_table, int index) |
1821 | { | 1847 | { |
1822 | struct cpufreq_freqs freqs; | 1848 | struct cpufreq_freqs freqs = {.old = policy->cur, .flags = 0}; |
1849 | unsigned int intermediate_freq = 0; | ||
1823 | int retval = -EINVAL; | 1850 | int retval = -EINVAL; |
1824 | bool notify; | 1851 | bool notify; |
1825 | 1852 | ||
1826 | notify = !(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION); | 1853 | notify = !(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION); |
1827 | |||
1828 | if (notify) { | 1854 | if (notify) { |
1829 | freqs.old = policy->cur; | 1855 | /* Handle switching to intermediate frequency */ |
1830 | freqs.new = freq_table[index].frequency; | 1856 | if (cpufreq_driver->get_intermediate) { |
1831 | freqs.flags = 0; | 1857 | retval = __target_intermediate(policy, &freqs, index); |
1858 | if (retval) | ||
1859 | return retval; | ||
1860 | |||
1861 | intermediate_freq = freqs.new; | ||
1862 | /* Set old freq to intermediate */ | ||
1863 | if (intermediate_freq) | ||
1864 | freqs.old = freqs.new; | ||
1865 | } | ||
1832 | 1866 | ||
1867 | freqs.new = freq_table[index].frequency; | ||
1833 | pr_debug("%s: cpu: %d, oldfreq: %u, new freq: %u\n", | 1868 | pr_debug("%s: cpu: %d, oldfreq: %u, new freq: %u\n", |
1834 | __func__, policy->cpu, freqs.old, freqs.new); | 1869 | __func__, policy->cpu, freqs.old, freqs.new); |
1835 | 1870 | ||
@@ -1841,9 +1876,23 @@ static int __target_index(struct cpufreq_policy *policy, | |||
1841 | pr_err("%s: Failed to change cpu frequency: %d\n", __func__, | 1876 | pr_err("%s: Failed to change cpu frequency: %d\n", __func__, |
1842 | retval); | 1877 | retval); |
1843 | 1878 | ||
1844 | if (notify) | 1879 | if (notify) { |
1845 | cpufreq_freq_transition_end(policy, &freqs, retval); | 1880 | cpufreq_freq_transition_end(policy, &freqs, retval); |
1846 | 1881 | ||
1882 | /* | ||
1883 | * Failed after setting to intermediate freq? Driver should have | ||
1884 | * reverted back to initial frequency and so should we. Check | ||
1885 | * here for intermediate_freq instead of get_intermediate, in | ||
1886 | * case we have't switched to intermediate freq at all. | ||
1887 | */ | ||
1888 | if (unlikely(retval && intermediate_freq)) { | ||
1889 | freqs.old = intermediate_freq; | ||
1890 | freqs.new = policy->restore_freq; | ||
1891 | cpufreq_freq_transition_begin(policy, &freqs); | ||
1892 | cpufreq_freq_transition_end(policy, &freqs, 0); | ||
1893 | } | ||
1894 | } | ||
1895 | |||
1847 | return retval; | 1896 | return retval; |
1848 | } | 1897 | } |
1849 | 1898 | ||
@@ -1875,6 +1924,9 @@ int __cpufreq_driver_target(struct cpufreq_policy *policy, | |||
1875 | if (target_freq == policy->cur) | 1924 | if (target_freq == policy->cur) |
1876 | return 0; | 1925 | return 0; |
1877 | 1926 | ||
1927 | /* Save last value to restore later on errors */ | ||
1928 | policy->restore_freq = policy->cur; | ||
1929 | |||
1878 | if (cpufreq_driver->target) | 1930 | if (cpufreq_driver->target) |
1879 | retval = cpufreq_driver->target(policy, target_freq, relation); | 1931 | retval = cpufreq_driver->target(policy, target_freq, relation); |
1880 | else if (cpufreq_driver->target_index) { | 1932 | else if (cpufreq_driver->target_index) { |
@@ -2361,7 +2413,8 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data) | |||
2361 | !(driver_data->setpolicy || driver_data->target_index || | 2413 | !(driver_data->setpolicy || driver_data->target_index || |
2362 | driver_data->target) || | 2414 | driver_data->target) || |
2363 | (driver_data->setpolicy && (driver_data->target_index || | 2415 | (driver_data->setpolicy && (driver_data->target_index || |
2364 | driver_data->target))) | 2416 | driver_data->target)) || |
2417 | (!!driver_data->get_intermediate != !!driver_data->target_intermediate)) | ||
2365 | return -EINVAL; | 2418 | return -EINVAL; |
2366 | 2419 | ||
2367 | pr_debug("trying to register driver %s\n", driver_data->name); | 2420 | pr_debug("trying to register driver %s\n", driver_data->name); |
diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c index e1c6433b16e0..1b44496b2d2b 100644 --- a/drivers/cpufreq/cpufreq_governor.c +++ b/drivers/cpufreq/cpufreq_governor.c | |||
@@ -36,14 +36,29 @@ void dbs_check_cpu(struct dbs_data *dbs_data, int cpu) | |||
36 | struct od_dbs_tuners *od_tuners = dbs_data->tuners; | 36 | struct od_dbs_tuners *od_tuners = dbs_data->tuners; |
37 | struct cs_dbs_tuners *cs_tuners = dbs_data->tuners; | 37 | struct cs_dbs_tuners *cs_tuners = dbs_data->tuners; |
38 | struct cpufreq_policy *policy; | 38 | struct cpufreq_policy *policy; |
39 | unsigned int sampling_rate; | ||
39 | unsigned int max_load = 0; | 40 | unsigned int max_load = 0; |
40 | unsigned int ignore_nice; | 41 | unsigned int ignore_nice; |
41 | unsigned int j; | 42 | unsigned int j; |
42 | 43 | ||
43 | if (dbs_data->cdata->governor == GOV_ONDEMAND) | 44 | if (dbs_data->cdata->governor == GOV_ONDEMAND) { |
45 | struct od_cpu_dbs_info_s *od_dbs_info = | ||
46 | dbs_data->cdata->get_cpu_dbs_info_s(cpu); | ||
47 | |||
48 | /* | ||
49 | * Sometimes, the ondemand governor uses an additional | ||
50 | * multiplier to give long delays. So apply this multiplier to | ||
51 | * the 'sampling_rate', so as to keep the wake-up-from-idle | ||
52 | * detection logic a bit conservative. | ||
53 | */ | ||
54 | sampling_rate = od_tuners->sampling_rate; | ||
55 | sampling_rate *= od_dbs_info->rate_mult; | ||
56 | |||
44 | ignore_nice = od_tuners->ignore_nice_load; | 57 | ignore_nice = od_tuners->ignore_nice_load; |
45 | else | 58 | } else { |
59 | sampling_rate = cs_tuners->sampling_rate; | ||
46 | ignore_nice = cs_tuners->ignore_nice_load; | 60 | ignore_nice = cs_tuners->ignore_nice_load; |
61 | } | ||
47 | 62 | ||
48 | policy = cdbs->cur_policy; | 63 | policy = cdbs->cur_policy; |
49 | 64 | ||
@@ -96,7 +111,46 @@ void dbs_check_cpu(struct dbs_data *dbs_data, int cpu) | |||
96 | if (unlikely(!wall_time || wall_time < idle_time)) | 111 | if (unlikely(!wall_time || wall_time < idle_time)) |
97 | continue; | 112 | continue; |
98 | 113 | ||
99 | load = 100 * (wall_time - idle_time) / wall_time; | 114 | /* |
115 | * If the CPU had gone completely idle, and a task just woke up | ||
116 | * on this CPU now, it would be unfair to calculate 'load' the | ||
117 | * usual way for this elapsed time-window, because it will show | ||
118 | * near-zero load, irrespective of how CPU intensive that task | ||
119 | * actually is. This is undesirable for latency-sensitive bursty | ||
120 | * workloads. | ||
121 | * | ||
122 | * To avoid this, we reuse the 'load' from the previous | ||
123 | * time-window and give this task a chance to start with a | ||
124 | * reasonably high CPU frequency. (However, we shouldn't over-do | ||
125 | * this copy, lest we get stuck at a high load (high frequency) | ||
126 | * for too long, even when the current system load has actually | ||
127 | * dropped down. So we perform the copy only once, upon the | ||
128 | * first wake-up from idle.) | ||
129 | * | ||
130 | * Detecting this situation is easy: the governor's deferrable | ||
131 | * timer would not have fired during CPU-idle periods. Hence | ||
132 | * an unusually large 'wall_time' (as compared to the sampling | ||
133 | * rate) indicates this scenario. | ||
134 | * | ||
135 | * prev_load can be zero in two cases and we must recalculate it | ||
136 | * for both cases: | ||
137 | * - during long idle intervals | ||
138 | * - explicitly set to zero | ||
139 | */ | ||
140 | if (unlikely(wall_time > (2 * sampling_rate) && | ||
141 | j_cdbs->prev_load)) { | ||
142 | load = j_cdbs->prev_load; | ||
143 | |||
144 | /* | ||
145 | * Perform a destructive copy, to ensure that we copy | ||
146 | * the previous load only once, upon the first wake-up | ||
147 | * from idle. | ||
148 | */ | ||
149 | j_cdbs->prev_load = 0; | ||
150 | } else { | ||
151 | load = 100 * (wall_time - idle_time) / wall_time; | ||
152 | j_cdbs->prev_load = load; | ||
153 | } | ||
100 | 154 | ||
101 | if (load > max_load) | 155 | if (load > max_load) |
102 | max_load = load; | 156 | max_load = load; |
@@ -318,11 +372,18 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy, | |||
318 | for_each_cpu(j, policy->cpus) { | 372 | for_each_cpu(j, policy->cpus) { |
319 | struct cpu_dbs_common_info *j_cdbs = | 373 | struct cpu_dbs_common_info *j_cdbs = |
320 | dbs_data->cdata->get_cpu_cdbs(j); | 374 | dbs_data->cdata->get_cpu_cdbs(j); |
375 | unsigned int prev_load; | ||
321 | 376 | ||
322 | j_cdbs->cpu = j; | 377 | j_cdbs->cpu = j; |
323 | j_cdbs->cur_policy = policy; | 378 | j_cdbs->cur_policy = policy; |
324 | j_cdbs->prev_cpu_idle = get_cpu_idle_time(j, | 379 | j_cdbs->prev_cpu_idle = get_cpu_idle_time(j, |
325 | &j_cdbs->prev_cpu_wall, io_busy); | 380 | &j_cdbs->prev_cpu_wall, io_busy); |
381 | |||
382 | prev_load = (unsigned int) | ||
383 | (j_cdbs->prev_cpu_wall - j_cdbs->prev_cpu_idle); | ||
384 | j_cdbs->prev_load = 100 * prev_load / | ||
385 | (unsigned int) j_cdbs->prev_cpu_wall; | ||
386 | |||
326 | if (ignore_nice) | 387 | if (ignore_nice) |
327 | j_cdbs->prev_cpu_nice = | 388 | j_cdbs->prev_cpu_nice = |
328 | kcpustat_cpu(j).cpustat[CPUTIME_NICE]; | 389 | kcpustat_cpu(j).cpustat[CPUTIME_NICE]; |
diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h index bfb9ae14142c..cc401d147e72 100644 --- a/drivers/cpufreq/cpufreq_governor.h +++ b/drivers/cpufreq/cpufreq_governor.h | |||
@@ -134,6 +134,13 @@ struct cpu_dbs_common_info { | |||
134 | u64 prev_cpu_idle; | 134 | u64 prev_cpu_idle; |
135 | u64 prev_cpu_wall; | 135 | u64 prev_cpu_wall; |
136 | u64 prev_cpu_nice; | 136 | u64 prev_cpu_nice; |
137 | /* | ||
138 | * Used to keep track of load in the previous interval. However, when | ||
139 | * explicitly set to zero, it is used as a flag to ensure that we copy | ||
140 | * the previous load to the current interval only once, upon the first | ||
141 | * wake-up from idle. | ||
142 | */ | ||
143 | unsigned int prev_load; | ||
137 | struct cpufreq_policy *cur_policy; | 144 | struct cpufreq_policy *cur_policy; |
138 | struct delayed_work work; | 145 | struct delayed_work work; |
139 | /* | 146 | /* |
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index aebd4572eb6d..4e7f492ad583 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c | |||
@@ -691,14 +691,8 @@ MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids); | |||
691 | 691 | ||
692 | static int intel_pstate_init_cpu(unsigned int cpunum) | 692 | static int intel_pstate_init_cpu(unsigned int cpunum) |
693 | { | 693 | { |
694 | |||
695 | const struct x86_cpu_id *id; | ||
696 | struct cpudata *cpu; | 694 | struct cpudata *cpu; |
697 | 695 | ||
698 | id = x86_match_cpu(intel_pstate_cpu_ids); | ||
699 | if (!id) | ||
700 | return -ENODEV; | ||
701 | |||
702 | all_cpu_data[cpunum] = kzalloc(sizeof(struct cpudata), GFP_KERNEL); | 696 | all_cpu_data[cpunum] = kzalloc(sizeof(struct cpudata), GFP_KERNEL); |
703 | if (!all_cpu_data[cpunum]) | 697 | if (!all_cpu_data[cpunum]) |
704 | return -ENOMEM; | 698 | return -ENOMEM; |
diff --git a/drivers/cpufreq/ppc-corenet-cpufreq.c b/drivers/cpufreq/ppc-corenet-cpufreq.c index 0af618abebaf..3607070797af 100644 --- a/drivers/cpufreq/ppc-corenet-cpufreq.c +++ b/drivers/cpufreq/ppc-corenet-cpufreq.c | |||
@@ -138,7 +138,7 @@ static int corenet_cpufreq_cpu_init(struct cpufreq_policy *policy) | |||
138 | struct cpufreq_frequency_table *table; | 138 | struct cpufreq_frequency_table *table; |
139 | struct cpu_data *data; | 139 | struct cpu_data *data; |
140 | unsigned int cpu = policy->cpu; | 140 | unsigned int cpu = policy->cpu; |
141 | u64 transition_latency_hz; | 141 | u64 u64temp; |
142 | 142 | ||
143 | np = of_get_cpu_node(cpu, NULL); | 143 | np = of_get_cpu_node(cpu, NULL); |
144 | if (!np) | 144 | if (!np) |
@@ -206,9 +206,10 @@ static int corenet_cpufreq_cpu_init(struct cpufreq_policy *policy) | |||
206 | for_each_cpu(i, per_cpu(cpu_mask, cpu)) | 206 | for_each_cpu(i, per_cpu(cpu_mask, cpu)) |
207 | per_cpu(cpu_data, i) = data; | 207 | per_cpu(cpu_data, i) = data; |
208 | 208 | ||
209 | transition_latency_hz = 12ULL * NSEC_PER_SEC; | 209 | /* Minimum transition latency is 12 platform clocks */ |
210 | policy->cpuinfo.transition_latency = | 210 | u64temp = 12ULL * NSEC_PER_SEC; |
211 | do_div(transition_latency_hz, fsl_get_sys_freq()); | 211 | do_div(u64temp, fsl_get_sys_freq()); |
212 | policy->cpuinfo.transition_latency = u64temp + 1; | ||
212 | 213 | ||
213 | of_node_put(np); | 214 | of_node_put(np); |
214 | 215 | ||
diff --git a/drivers/cpufreq/tegra-cpufreq.c b/drivers/cpufreq/tegra-cpufreq.c index 6e774c6ac20b..8084c7f7e206 100644 --- a/drivers/cpufreq/tegra-cpufreq.c +++ b/drivers/cpufreq/tegra-cpufreq.c | |||
@@ -45,46 +45,54 @@ static struct clk *cpu_clk; | |||
45 | static struct clk *pll_x_clk; | 45 | static struct clk *pll_x_clk; |
46 | static struct clk *pll_p_clk; | 46 | static struct clk *pll_p_clk; |
47 | static struct clk *emc_clk; | 47 | static struct clk *emc_clk; |
48 | static bool pll_x_prepared; | ||
48 | 49 | ||
49 | static int tegra_cpu_clk_set_rate(unsigned long rate) | 50 | static unsigned int tegra_get_intermediate(struct cpufreq_policy *policy, |
51 | unsigned int index) | ||
52 | { | ||
53 | unsigned int ifreq = clk_get_rate(pll_p_clk) / 1000; | ||
54 | |||
55 | /* | ||
56 | * Don't switch to intermediate freq if: | ||
57 | * - we are already at it, i.e. policy->cur == ifreq | ||
58 | * - index corresponds to ifreq | ||
59 | */ | ||
60 | if ((freq_table[index].frequency == ifreq) || (policy->cur == ifreq)) | ||
61 | return 0; | ||
62 | |||
63 | return ifreq; | ||
64 | } | ||
65 | |||
66 | static int tegra_target_intermediate(struct cpufreq_policy *policy, | ||
67 | unsigned int index) | ||
50 | { | 68 | { |
51 | int ret; | 69 | int ret; |
52 | 70 | ||
53 | /* | 71 | /* |
54 | * Take an extra reference to the main pll so it doesn't turn | 72 | * Take an extra reference to the main pll so it doesn't turn |
55 | * off when we move the cpu off of it | 73 | * off when we move the cpu off of it as enabling it again while we |
74 | * switch to it from tegra_target() would take additional time. | ||
75 | * | ||
76 | * When target-freq is equal to intermediate freq we don't need to | ||
77 | * switch to an intermediate freq and so this routine isn't called. | ||
78 | * Also, we wouldn't be using pll_x anymore and must not take extra | ||
79 | * reference to it, as it can be disabled now to save some power. | ||
56 | */ | 80 | */ |
57 | clk_prepare_enable(pll_x_clk); | 81 | clk_prepare_enable(pll_x_clk); |
58 | 82 | ||
59 | ret = clk_set_parent(cpu_clk, pll_p_clk); | 83 | ret = clk_set_parent(cpu_clk, pll_p_clk); |
60 | if (ret) { | 84 | if (ret) |
61 | pr_err("Failed to switch cpu to clock pll_p\n"); | 85 | clk_disable_unprepare(pll_x_clk); |
62 | goto out; | 86 | else |
63 | } | 87 | pll_x_prepared = true; |
64 | |||
65 | if (rate == clk_get_rate(pll_p_clk)) | ||
66 | goto out; | ||
67 | |||
68 | ret = clk_set_rate(pll_x_clk, rate); | ||
69 | if (ret) { | ||
70 | pr_err("Failed to change pll_x to %lu\n", rate); | ||
71 | goto out; | ||
72 | } | ||
73 | |||
74 | ret = clk_set_parent(cpu_clk, pll_x_clk); | ||
75 | if (ret) { | ||
76 | pr_err("Failed to switch cpu to clock pll_x\n"); | ||
77 | goto out; | ||
78 | } | ||
79 | 88 | ||
80 | out: | ||
81 | clk_disable_unprepare(pll_x_clk); | ||
82 | return ret; | 89 | return ret; |
83 | } | 90 | } |
84 | 91 | ||
85 | static int tegra_target(struct cpufreq_policy *policy, unsigned int index) | 92 | static int tegra_target(struct cpufreq_policy *policy, unsigned int index) |
86 | { | 93 | { |
87 | unsigned long rate = freq_table[index].frequency; | 94 | unsigned long rate = freq_table[index].frequency; |
95 | unsigned int ifreq = clk_get_rate(pll_p_clk) / 1000; | ||
88 | int ret = 0; | 96 | int ret = 0; |
89 | 97 | ||
90 | /* | 98 | /* |
@@ -98,10 +106,30 @@ static int tegra_target(struct cpufreq_policy *policy, unsigned int index) | |||
98 | else | 106 | else |
99 | clk_set_rate(emc_clk, 100000000); /* emc 50Mhz */ | 107 | clk_set_rate(emc_clk, 100000000); /* emc 50Mhz */ |
100 | 108 | ||
101 | ret = tegra_cpu_clk_set_rate(rate * 1000); | 109 | /* |
110 | * target freq == pll_p, don't need to take extra reference to pll_x_clk | ||
111 | * as it isn't used anymore. | ||
112 | */ | ||
113 | if (rate == ifreq) | ||
114 | return clk_set_parent(cpu_clk, pll_p_clk); | ||
115 | |||
116 | ret = clk_set_rate(pll_x_clk, rate * 1000); | ||
117 | /* Restore to earlier frequency on error, i.e. pll_x */ | ||
102 | if (ret) | 118 | if (ret) |
103 | pr_err("cpu-tegra: Failed to set cpu frequency to %lu kHz\n", | 119 | pr_err("Failed to change pll_x to %lu\n", rate); |
104 | rate); | 120 | |
121 | ret = clk_set_parent(cpu_clk, pll_x_clk); | ||
122 | /* This shouldn't fail while changing or restoring */ | ||
123 | WARN_ON(ret); | ||
124 | |||
125 | /* | ||
126 | * Drop count to pll_x clock only if we switched to intermediate freq | ||
127 | * earlier while transitioning to a target frequency. | ||
128 | */ | ||
129 | if (pll_x_prepared) { | ||
130 | clk_disable_unprepare(pll_x_clk); | ||
131 | pll_x_prepared = false; | ||
132 | } | ||
105 | 133 | ||
106 | return ret; | 134 | return ret; |
107 | } | 135 | } |
@@ -137,16 +165,18 @@ static int tegra_cpu_exit(struct cpufreq_policy *policy) | |||
137 | } | 165 | } |
138 | 166 | ||
139 | static struct cpufreq_driver tegra_cpufreq_driver = { | 167 | static struct cpufreq_driver tegra_cpufreq_driver = { |
140 | .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK, | 168 | .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK, |
141 | .verify = cpufreq_generic_frequency_table_verify, | 169 | .verify = cpufreq_generic_frequency_table_verify, |
142 | .target_index = tegra_target, | 170 | .get_intermediate = tegra_get_intermediate, |
143 | .get = cpufreq_generic_get, | 171 | .target_intermediate = tegra_target_intermediate, |
144 | .init = tegra_cpu_init, | 172 | .target_index = tegra_target, |
145 | .exit = tegra_cpu_exit, | 173 | .get = cpufreq_generic_get, |
146 | .name = "tegra", | 174 | .init = tegra_cpu_init, |
147 | .attr = cpufreq_generic_attr, | 175 | .exit = tegra_cpu_exit, |
176 | .name = "tegra", | ||
177 | .attr = cpufreq_generic_attr, | ||
148 | #ifdef CONFIG_PM | 178 | #ifdef CONFIG_PM |
149 | .suspend = cpufreq_generic_suspend, | 179 | .suspend = cpufreq_generic_suspend, |
150 | #endif | 180 | #endif |
151 | }; | 181 | }; |
152 | 182 | ||
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index 3f458896d45c..ec4112d257bc 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h | |||
@@ -75,6 +75,7 @@ struct cpufreq_policy { | |||
75 | unsigned int max; /* in kHz */ | 75 | unsigned int max; /* in kHz */ |
76 | unsigned int cur; /* in kHz, only needed if cpufreq | 76 | unsigned int cur; /* in kHz, only needed if cpufreq |
77 | * governors are used */ | 77 | * governors are used */ |
78 | unsigned int restore_freq; /* = policy->cur before transition */ | ||
78 | unsigned int suspend_freq; /* freq to set during suspend */ | 79 | unsigned int suspend_freq; /* freq to set during suspend */ |
79 | 80 | ||
80 | unsigned int policy; /* see above */ | 81 | unsigned int policy; /* see above */ |
@@ -221,11 +222,35 @@ struct cpufreq_driver { | |||
221 | 222 | ||
222 | /* define one out of two */ | 223 | /* define one out of two */ |
223 | int (*setpolicy) (struct cpufreq_policy *policy); | 224 | int (*setpolicy) (struct cpufreq_policy *policy); |
225 | |||
226 | /* | ||
227 | * On failure, should always restore frequency to policy->restore_freq | ||
228 | * (i.e. old freq). | ||
229 | */ | ||
224 | int (*target) (struct cpufreq_policy *policy, /* Deprecated */ | 230 | int (*target) (struct cpufreq_policy *policy, /* Deprecated */ |
225 | unsigned int target_freq, | 231 | unsigned int target_freq, |
226 | unsigned int relation); | 232 | unsigned int relation); |
227 | int (*target_index) (struct cpufreq_policy *policy, | 233 | int (*target_index) (struct cpufreq_policy *policy, |
228 | unsigned int index); | 234 | unsigned int index); |
235 | /* | ||
236 | * Only for drivers with target_index() and CPUFREQ_ASYNC_NOTIFICATION | ||
237 | * unset. | ||
238 | * | ||
239 | * get_intermediate should return a stable intermediate frequency | ||
240 | * platform wants to switch to and target_intermediate() should set CPU | ||
241 | * to to that frequency, before jumping to the frequency corresponding | ||
242 | * to 'index'. Core will take care of sending notifications and driver | ||
243 | * doesn't have to handle them in target_intermediate() or | ||
244 | * target_index(). | ||
245 | * | ||
246 | * Drivers can return '0' from get_intermediate() in case they don't | ||
247 | * wish to switch to intermediate frequency for some target frequency. | ||
248 | * In that case core will directly call ->target_index(). | ||
249 | */ | ||
250 | unsigned int (*get_intermediate)(struct cpufreq_policy *policy, | ||
251 | unsigned int index); | ||
252 | int (*target_intermediate)(struct cpufreq_policy *policy, | ||
253 | unsigned int index); | ||
229 | 254 | ||
230 | /* should be defined, if possible */ | 255 | /* should be defined, if possible */ |
231 | unsigned int (*get) (unsigned int cpu); | 256 | unsigned int (*get) (unsigned int cpu); |