aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/cpufreq
diff options
context:
space:
mode:
authorRafael J. Wysocki <rafael.j.wysocki@intel.com>2014-06-12 07:43:02 -0400
committerRafael J. Wysocki <rafael.j.wysocki@intel.com>2014-06-12 07:43:02 -0400
commit589e18a973bd6bb8abd2c6d4d8a1dcf5ae1dff61 (patch)
tree5c6ac2441bbe93ef5ccee8d1d46b8d97959c1f1c /drivers/cpufreq
parentde815a6d00da0f8a59e8aebf8efe12e289552a8f (diff)
parent5fbfbcd3e842ddfe9dbbe8865feba909963a87ec (diff)
Merge branch 'pm-cpufreq'
* pm-cpufreq: cpufreq: cpufreq-cpu0: remove dependency on THERMAL and REGULATOR cpufreq: tegra: update comment for clarity cpufreq: intel_pstate: Remove duplicate CPU ID check cpufreq: Mark CPU0 driver with CPUFREQ_NEED_INITIAL_FREQ_CHECK flag cpufreq: governor: remove copy_prev_load from 'struct cpu_dbs_common_info' cpufreq: governor: Be friendly towards latency-sensitive bursty workloads cpufreq: ppc-corenet-cpu-freq: do_div use quotient Revert "cpufreq: Enable big.LITTLE cpufreq driver on arm64" cpufreq: Tegra: implement intermediate frequency callbacks cpufreq: add support for intermediate (stable) frequencies
Diffstat (limited to 'drivers/cpufreq')
-rw-r--r--drivers/cpufreq/Kconfig2
-rw-r--r--drivers/cpufreq/Kconfig.arm3
-rw-r--r--drivers/cpufreq/cpufreq-cpu0.c2
-rw-r--r--drivers/cpufreq/cpufreq.c67
-rw-r--r--drivers/cpufreq/cpufreq_governor.c67
-rw-r--r--drivers/cpufreq/cpufreq_governor.h7
-rw-r--r--drivers/cpufreq/intel_pstate.c6
-rw-r--r--drivers/cpufreq/ppc-corenet-cpufreq.c9
-rw-r--r--drivers/cpufreq/tegra-cpufreq.c100
9 files changed, 204 insertions, 59 deletions
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
index 1fbe11f2a146..e473d6555f96 100644
--- a/drivers/cpufreq/Kconfig
+++ b/drivers/cpufreq/Kconfig
@@ -185,7 +185,7 @@ config CPU_FREQ_GOV_CONSERVATIVE
185 185
186config GENERIC_CPUFREQ_CPU0 186config GENERIC_CPUFREQ_CPU0
187 tristate "Generic CPU0 cpufreq driver" 187 tristate "Generic CPU0 cpufreq driver"
188 depends on HAVE_CLK && REGULATOR && OF && THERMAL && CPU_THERMAL 188 depends on HAVE_CLK && OF
189 select PM_OPP 189 select PM_OPP
190 help 190 help
191 This adds a generic cpufreq driver for CPU0 frequency management. 191 This adds a generic cpufreq driver for CPU0 frequency management.
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index 36d20d0fce27..ebac67115009 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -5,8 +5,7 @@
5# big LITTLE core layer and glue drivers 5# big LITTLE core layer and glue drivers
6config ARM_BIG_LITTLE_CPUFREQ 6config ARM_BIG_LITTLE_CPUFREQ
7 tristate "Generic ARM big LITTLE CPUfreq driver" 7 tristate "Generic ARM big LITTLE CPUfreq driver"
8 depends on (BIG_LITTLE && ARM_CPU_TOPOLOGY) || (ARM64 && SMP) 8 depends on ARM && BIG_LITTLE && ARM_CPU_TOPOLOGY && HAVE_CLK
9 depends on HAVE_CLK
10 select PM_OPP 9 select PM_OPP
11 help 10 help
12 This enables the Generic CPUfreq driver for ARM big.LITTLE platforms. 11 This enables the Generic CPUfreq driver for ARM big.LITTLE platforms.
diff --git a/drivers/cpufreq/cpufreq-cpu0.c b/drivers/cpufreq/cpufreq-cpu0.c
index 09b9129c7bd3..ee1ae303a07c 100644
--- a/drivers/cpufreq/cpufreq-cpu0.c
+++ b/drivers/cpufreq/cpufreq-cpu0.c
@@ -104,7 +104,7 @@ static int cpu0_cpufreq_init(struct cpufreq_policy *policy)
104} 104}
105 105
106static struct cpufreq_driver cpu0_cpufreq_driver = { 106static struct cpufreq_driver cpu0_cpufreq_driver = {
107 .flags = CPUFREQ_STICKY, 107 .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK,
108 .verify = cpufreq_generic_frequency_table_verify, 108 .verify = cpufreq_generic_frequency_table_verify,
109 .target_index = cpu0_set_target, 109 .target_index = cpu0_set_target,
110 .get = cpufreq_generic_get, 110 .get = cpufreq_generic_get,
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index ae11dd51f81d..aed2b0cb83dc 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -1816,20 +1816,55 @@ EXPORT_SYMBOL(cpufreq_unregister_notifier);
1816 * GOVERNORS * 1816 * GOVERNORS *
1817 *********************************************************************/ 1817 *********************************************************************/
1818 1818
1819/* Must set freqs->new to intermediate frequency */
1820static int __target_intermediate(struct cpufreq_policy *policy,
1821 struct cpufreq_freqs *freqs, int index)
1822{
1823 int ret;
1824
1825 freqs->new = cpufreq_driver->get_intermediate(policy, index);
1826
1827 /* We don't need to switch to intermediate freq */
1828 if (!freqs->new)
1829 return 0;
1830
1831 pr_debug("%s: cpu: %d, switching to intermediate freq: oldfreq: %u, intermediate freq: %u\n",
1832 __func__, policy->cpu, freqs->old, freqs->new);
1833
1834 cpufreq_freq_transition_begin(policy, freqs);
1835 ret = cpufreq_driver->target_intermediate(policy, index);
1836 cpufreq_freq_transition_end(policy, freqs, ret);
1837
1838 if (ret)
1839 pr_err("%s: Failed to change to intermediate frequency: %d\n",
1840 __func__, ret);
1841
1842 return ret;
1843}
1844
1819static int __target_index(struct cpufreq_policy *policy, 1845static int __target_index(struct cpufreq_policy *policy,
1820 struct cpufreq_frequency_table *freq_table, int index) 1846 struct cpufreq_frequency_table *freq_table, int index)
1821{ 1847{
1822 struct cpufreq_freqs freqs; 1848 struct cpufreq_freqs freqs = {.old = policy->cur, .flags = 0};
1849 unsigned int intermediate_freq = 0;
1823 int retval = -EINVAL; 1850 int retval = -EINVAL;
1824 bool notify; 1851 bool notify;
1825 1852
1826 notify = !(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION); 1853 notify = !(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION);
1827
1828 if (notify) { 1854 if (notify) {
1829 freqs.old = policy->cur; 1855 /* Handle switching to intermediate frequency */
1830 freqs.new = freq_table[index].frequency; 1856 if (cpufreq_driver->get_intermediate) {
1831 freqs.flags = 0; 1857 retval = __target_intermediate(policy, &freqs, index);
1858 if (retval)
1859 return retval;
1860
1861 intermediate_freq = freqs.new;
1862 /* Set old freq to intermediate */
1863 if (intermediate_freq)
1864 freqs.old = freqs.new;
1865 }
1832 1866
1867 freqs.new = freq_table[index].frequency;
1833 pr_debug("%s: cpu: %d, oldfreq: %u, new freq: %u\n", 1868 pr_debug("%s: cpu: %d, oldfreq: %u, new freq: %u\n",
1834 __func__, policy->cpu, freqs.old, freqs.new); 1869 __func__, policy->cpu, freqs.old, freqs.new);
1835 1870
@@ -1841,9 +1876,23 @@ static int __target_index(struct cpufreq_policy *policy,
1841 pr_err("%s: Failed to change cpu frequency: %d\n", __func__, 1876 pr_err("%s: Failed to change cpu frequency: %d\n", __func__,
1842 retval); 1877 retval);
1843 1878
1844 if (notify) 1879 if (notify) {
1845 cpufreq_freq_transition_end(policy, &freqs, retval); 1880 cpufreq_freq_transition_end(policy, &freqs, retval);
1846 1881
1882 /*
1883 * Failed after setting to intermediate freq? Driver should have
1884 * reverted back to initial frequency and so should we. Check
1885 * here for intermediate_freq instead of get_intermediate, in
1886 * case we have't switched to intermediate freq at all.
1887 */
1888 if (unlikely(retval && intermediate_freq)) {
1889 freqs.old = intermediate_freq;
1890 freqs.new = policy->restore_freq;
1891 cpufreq_freq_transition_begin(policy, &freqs);
1892 cpufreq_freq_transition_end(policy, &freqs, 0);
1893 }
1894 }
1895
1847 return retval; 1896 return retval;
1848} 1897}
1849 1898
@@ -1875,6 +1924,9 @@ int __cpufreq_driver_target(struct cpufreq_policy *policy,
1875 if (target_freq == policy->cur) 1924 if (target_freq == policy->cur)
1876 return 0; 1925 return 0;
1877 1926
1927 /* Save last value to restore later on errors */
1928 policy->restore_freq = policy->cur;
1929
1878 if (cpufreq_driver->target) 1930 if (cpufreq_driver->target)
1879 retval = cpufreq_driver->target(policy, target_freq, relation); 1931 retval = cpufreq_driver->target(policy, target_freq, relation);
1880 else if (cpufreq_driver->target_index) { 1932 else if (cpufreq_driver->target_index) {
@@ -2361,7 +2413,8 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
2361 !(driver_data->setpolicy || driver_data->target_index || 2413 !(driver_data->setpolicy || driver_data->target_index ||
2362 driver_data->target) || 2414 driver_data->target) ||
2363 (driver_data->setpolicy && (driver_data->target_index || 2415 (driver_data->setpolicy && (driver_data->target_index ||
2364 driver_data->target))) 2416 driver_data->target)) ||
2417 (!!driver_data->get_intermediate != !!driver_data->target_intermediate))
2365 return -EINVAL; 2418 return -EINVAL;
2366 2419
2367 pr_debug("trying to register driver %s\n", driver_data->name); 2420 pr_debug("trying to register driver %s\n", driver_data->name);
diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
index e1c6433b16e0..1b44496b2d2b 100644
--- a/drivers/cpufreq/cpufreq_governor.c
+++ b/drivers/cpufreq/cpufreq_governor.c
@@ -36,14 +36,29 @@ void dbs_check_cpu(struct dbs_data *dbs_data, int cpu)
36 struct od_dbs_tuners *od_tuners = dbs_data->tuners; 36 struct od_dbs_tuners *od_tuners = dbs_data->tuners;
37 struct cs_dbs_tuners *cs_tuners = dbs_data->tuners; 37 struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
38 struct cpufreq_policy *policy; 38 struct cpufreq_policy *policy;
39 unsigned int sampling_rate;
39 unsigned int max_load = 0; 40 unsigned int max_load = 0;
40 unsigned int ignore_nice; 41 unsigned int ignore_nice;
41 unsigned int j; 42 unsigned int j;
42 43
43 if (dbs_data->cdata->governor == GOV_ONDEMAND) 44 if (dbs_data->cdata->governor == GOV_ONDEMAND) {
45 struct od_cpu_dbs_info_s *od_dbs_info =
46 dbs_data->cdata->get_cpu_dbs_info_s(cpu);
47
48 /*
49 * Sometimes, the ondemand governor uses an additional
50 * multiplier to give long delays. So apply this multiplier to
51 * the 'sampling_rate', so as to keep the wake-up-from-idle
52 * detection logic a bit conservative.
53 */
54 sampling_rate = od_tuners->sampling_rate;
55 sampling_rate *= od_dbs_info->rate_mult;
56
44 ignore_nice = od_tuners->ignore_nice_load; 57 ignore_nice = od_tuners->ignore_nice_load;
45 else 58 } else {
59 sampling_rate = cs_tuners->sampling_rate;
46 ignore_nice = cs_tuners->ignore_nice_load; 60 ignore_nice = cs_tuners->ignore_nice_load;
61 }
47 62
48 policy = cdbs->cur_policy; 63 policy = cdbs->cur_policy;
49 64
@@ -96,7 +111,46 @@ void dbs_check_cpu(struct dbs_data *dbs_data, int cpu)
96 if (unlikely(!wall_time || wall_time < idle_time)) 111 if (unlikely(!wall_time || wall_time < idle_time))
97 continue; 112 continue;
98 113
99 load = 100 * (wall_time - idle_time) / wall_time; 114 /*
115 * If the CPU had gone completely idle, and a task just woke up
116 * on this CPU now, it would be unfair to calculate 'load' the
117 * usual way for this elapsed time-window, because it will show
118 * near-zero load, irrespective of how CPU intensive that task
119 * actually is. This is undesirable for latency-sensitive bursty
120 * workloads.
121 *
122 * To avoid this, we reuse the 'load' from the previous
123 * time-window and give this task a chance to start with a
124 * reasonably high CPU frequency. (However, we shouldn't over-do
125 * this copy, lest we get stuck at a high load (high frequency)
126 * for too long, even when the current system load has actually
127 * dropped down. So we perform the copy only once, upon the
128 * first wake-up from idle.)
129 *
130 * Detecting this situation is easy: the governor's deferrable
131 * timer would not have fired during CPU-idle periods. Hence
132 * an unusually large 'wall_time' (as compared to the sampling
133 * rate) indicates this scenario.
134 *
135 * prev_load can be zero in two cases and we must recalculate it
136 * for both cases:
137 * - during long idle intervals
138 * - explicitly set to zero
139 */
140 if (unlikely(wall_time > (2 * sampling_rate) &&
141 j_cdbs->prev_load)) {
142 load = j_cdbs->prev_load;
143
144 /*
145 * Perform a destructive copy, to ensure that we copy
146 * the previous load only once, upon the first wake-up
147 * from idle.
148 */
149 j_cdbs->prev_load = 0;
150 } else {
151 load = 100 * (wall_time - idle_time) / wall_time;
152 j_cdbs->prev_load = load;
153 }
100 154
101 if (load > max_load) 155 if (load > max_load)
102 max_load = load; 156 max_load = load;
@@ -318,11 +372,18 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
318 for_each_cpu(j, policy->cpus) { 372 for_each_cpu(j, policy->cpus) {
319 struct cpu_dbs_common_info *j_cdbs = 373 struct cpu_dbs_common_info *j_cdbs =
320 dbs_data->cdata->get_cpu_cdbs(j); 374 dbs_data->cdata->get_cpu_cdbs(j);
375 unsigned int prev_load;
321 376
322 j_cdbs->cpu = j; 377 j_cdbs->cpu = j;
323 j_cdbs->cur_policy = policy; 378 j_cdbs->cur_policy = policy;
324 j_cdbs->prev_cpu_idle = get_cpu_idle_time(j, 379 j_cdbs->prev_cpu_idle = get_cpu_idle_time(j,
325 &j_cdbs->prev_cpu_wall, io_busy); 380 &j_cdbs->prev_cpu_wall, io_busy);
381
382 prev_load = (unsigned int)
383 (j_cdbs->prev_cpu_wall - j_cdbs->prev_cpu_idle);
384 j_cdbs->prev_load = 100 * prev_load /
385 (unsigned int) j_cdbs->prev_cpu_wall;
386
326 if (ignore_nice) 387 if (ignore_nice)
327 j_cdbs->prev_cpu_nice = 388 j_cdbs->prev_cpu_nice =
328 kcpustat_cpu(j).cpustat[CPUTIME_NICE]; 389 kcpustat_cpu(j).cpustat[CPUTIME_NICE];
diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h
index bfb9ae14142c..cc401d147e72 100644
--- a/drivers/cpufreq/cpufreq_governor.h
+++ b/drivers/cpufreq/cpufreq_governor.h
@@ -134,6 +134,13 @@ struct cpu_dbs_common_info {
134 u64 prev_cpu_idle; 134 u64 prev_cpu_idle;
135 u64 prev_cpu_wall; 135 u64 prev_cpu_wall;
136 u64 prev_cpu_nice; 136 u64 prev_cpu_nice;
137 /*
138 * Used to keep track of load in the previous interval. However, when
139 * explicitly set to zero, it is used as a flag to ensure that we copy
140 * the previous load to the current interval only once, upon the first
141 * wake-up from idle.
142 */
143 unsigned int prev_load;
137 struct cpufreq_policy *cur_policy; 144 struct cpufreq_policy *cur_policy;
138 struct delayed_work work; 145 struct delayed_work work;
139 /* 146 /*
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index aebd4572eb6d..4e7f492ad583 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -691,14 +691,8 @@ MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids);
691 691
692static int intel_pstate_init_cpu(unsigned int cpunum) 692static int intel_pstate_init_cpu(unsigned int cpunum)
693{ 693{
694
695 const struct x86_cpu_id *id;
696 struct cpudata *cpu; 694 struct cpudata *cpu;
697 695
698 id = x86_match_cpu(intel_pstate_cpu_ids);
699 if (!id)
700 return -ENODEV;
701
702 all_cpu_data[cpunum] = kzalloc(sizeof(struct cpudata), GFP_KERNEL); 696 all_cpu_data[cpunum] = kzalloc(sizeof(struct cpudata), GFP_KERNEL);
703 if (!all_cpu_data[cpunum]) 697 if (!all_cpu_data[cpunum])
704 return -ENOMEM; 698 return -ENOMEM;
diff --git a/drivers/cpufreq/ppc-corenet-cpufreq.c b/drivers/cpufreq/ppc-corenet-cpufreq.c
index 0af618abebaf..3607070797af 100644
--- a/drivers/cpufreq/ppc-corenet-cpufreq.c
+++ b/drivers/cpufreq/ppc-corenet-cpufreq.c
@@ -138,7 +138,7 @@ static int corenet_cpufreq_cpu_init(struct cpufreq_policy *policy)
138 struct cpufreq_frequency_table *table; 138 struct cpufreq_frequency_table *table;
139 struct cpu_data *data; 139 struct cpu_data *data;
140 unsigned int cpu = policy->cpu; 140 unsigned int cpu = policy->cpu;
141 u64 transition_latency_hz; 141 u64 u64temp;
142 142
143 np = of_get_cpu_node(cpu, NULL); 143 np = of_get_cpu_node(cpu, NULL);
144 if (!np) 144 if (!np)
@@ -206,9 +206,10 @@ static int corenet_cpufreq_cpu_init(struct cpufreq_policy *policy)
206 for_each_cpu(i, per_cpu(cpu_mask, cpu)) 206 for_each_cpu(i, per_cpu(cpu_mask, cpu))
207 per_cpu(cpu_data, i) = data; 207 per_cpu(cpu_data, i) = data;
208 208
209 transition_latency_hz = 12ULL * NSEC_PER_SEC; 209 /* Minimum transition latency is 12 platform clocks */
210 policy->cpuinfo.transition_latency = 210 u64temp = 12ULL * NSEC_PER_SEC;
211 do_div(transition_latency_hz, fsl_get_sys_freq()); 211 do_div(u64temp, fsl_get_sys_freq());
212 policy->cpuinfo.transition_latency = u64temp + 1;
212 213
213 of_node_put(np); 214 of_node_put(np);
214 215
diff --git a/drivers/cpufreq/tegra-cpufreq.c b/drivers/cpufreq/tegra-cpufreq.c
index 6e774c6ac20b..8084c7f7e206 100644
--- a/drivers/cpufreq/tegra-cpufreq.c
+++ b/drivers/cpufreq/tegra-cpufreq.c
@@ -45,46 +45,54 @@ static struct clk *cpu_clk;
45static struct clk *pll_x_clk; 45static struct clk *pll_x_clk;
46static struct clk *pll_p_clk; 46static struct clk *pll_p_clk;
47static struct clk *emc_clk; 47static struct clk *emc_clk;
48static bool pll_x_prepared;
48 49
49static int tegra_cpu_clk_set_rate(unsigned long rate) 50static unsigned int tegra_get_intermediate(struct cpufreq_policy *policy,
51 unsigned int index)
52{
53 unsigned int ifreq = clk_get_rate(pll_p_clk) / 1000;
54
55 /*
56 * Don't switch to intermediate freq if:
57 * - we are already at it, i.e. policy->cur == ifreq
58 * - index corresponds to ifreq
59 */
60 if ((freq_table[index].frequency == ifreq) || (policy->cur == ifreq))
61 return 0;
62
63 return ifreq;
64}
65
66static int tegra_target_intermediate(struct cpufreq_policy *policy,
67 unsigned int index)
50{ 68{
51 int ret; 69 int ret;
52 70
53 /* 71 /*
54 * Take an extra reference to the main pll so it doesn't turn 72 * Take an extra reference to the main pll so it doesn't turn
55 * off when we move the cpu off of it 73 * off when we move the cpu off of it as enabling it again while we
74 * switch to it from tegra_target() would take additional time.
75 *
76 * When target-freq is equal to intermediate freq we don't need to
77 * switch to an intermediate freq and so this routine isn't called.
78 * Also, we wouldn't be using pll_x anymore and must not take extra
79 * reference to it, as it can be disabled now to save some power.
56 */ 80 */
57 clk_prepare_enable(pll_x_clk); 81 clk_prepare_enable(pll_x_clk);
58 82
59 ret = clk_set_parent(cpu_clk, pll_p_clk); 83 ret = clk_set_parent(cpu_clk, pll_p_clk);
60 if (ret) { 84 if (ret)
61 pr_err("Failed to switch cpu to clock pll_p\n"); 85 clk_disable_unprepare(pll_x_clk);
62 goto out; 86 else
63 } 87 pll_x_prepared = true;
64
65 if (rate == clk_get_rate(pll_p_clk))
66 goto out;
67
68 ret = clk_set_rate(pll_x_clk, rate);
69 if (ret) {
70 pr_err("Failed to change pll_x to %lu\n", rate);
71 goto out;
72 }
73
74 ret = clk_set_parent(cpu_clk, pll_x_clk);
75 if (ret) {
76 pr_err("Failed to switch cpu to clock pll_x\n");
77 goto out;
78 }
79 88
80out:
81 clk_disable_unprepare(pll_x_clk);
82 return ret; 89 return ret;
83} 90}
84 91
85static int tegra_target(struct cpufreq_policy *policy, unsigned int index) 92static int tegra_target(struct cpufreq_policy *policy, unsigned int index)
86{ 93{
87 unsigned long rate = freq_table[index].frequency; 94 unsigned long rate = freq_table[index].frequency;
95 unsigned int ifreq = clk_get_rate(pll_p_clk) / 1000;
88 int ret = 0; 96 int ret = 0;
89 97
90 /* 98 /*
@@ -98,10 +106,30 @@ static int tegra_target(struct cpufreq_policy *policy, unsigned int index)
98 else 106 else
99 clk_set_rate(emc_clk, 100000000); /* emc 50Mhz */ 107 clk_set_rate(emc_clk, 100000000); /* emc 50Mhz */
100 108
101 ret = tegra_cpu_clk_set_rate(rate * 1000); 109 /*
110 * target freq == pll_p, don't need to take extra reference to pll_x_clk
111 * as it isn't used anymore.
112 */
113 if (rate == ifreq)
114 return clk_set_parent(cpu_clk, pll_p_clk);
115
116 ret = clk_set_rate(pll_x_clk, rate * 1000);
117 /* Restore to earlier frequency on error, i.e. pll_x */
102 if (ret) 118 if (ret)
103 pr_err("cpu-tegra: Failed to set cpu frequency to %lu kHz\n", 119 pr_err("Failed to change pll_x to %lu\n", rate);
104 rate); 120
121 ret = clk_set_parent(cpu_clk, pll_x_clk);
122 /* This shouldn't fail while changing or restoring */
123 WARN_ON(ret);
124
125 /*
126 * Drop count to pll_x clock only if we switched to intermediate freq
127 * earlier while transitioning to a target frequency.
128 */
129 if (pll_x_prepared) {
130 clk_disable_unprepare(pll_x_clk);
131 pll_x_prepared = false;
132 }
105 133
106 return ret; 134 return ret;
107} 135}
@@ -137,16 +165,18 @@ static int tegra_cpu_exit(struct cpufreq_policy *policy)
137} 165}
138 166
139static struct cpufreq_driver tegra_cpufreq_driver = { 167static struct cpufreq_driver tegra_cpufreq_driver = {
140 .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK, 168 .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK,
141 .verify = cpufreq_generic_frequency_table_verify, 169 .verify = cpufreq_generic_frequency_table_verify,
142 .target_index = tegra_target, 170 .get_intermediate = tegra_get_intermediate,
143 .get = cpufreq_generic_get, 171 .target_intermediate = tegra_target_intermediate,
144 .init = tegra_cpu_init, 172 .target_index = tegra_target,
145 .exit = tegra_cpu_exit, 173 .get = cpufreq_generic_get,
146 .name = "tegra", 174 .init = tegra_cpu_init,
147 .attr = cpufreq_generic_attr, 175 .exit = tegra_cpu_exit,
176 .name = "tegra",
177 .attr = cpufreq_generic_attr,
148#ifdef CONFIG_PM 178#ifdef CONFIG_PM
149 .suspend = cpufreq_generic_suspend, 179 .suspend = cpufreq_generic_suspend,
150#endif 180#endif
151}; 181};
152 182