aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mach-tegra/cpu-tegra.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mach-tegra/cpu-tegra.c')
-rw-r--r--arch/arm/mach-tegra/cpu-tegra.c618
1 files changed, 579 insertions, 39 deletions
diff --git a/arch/arm/mach-tegra/cpu-tegra.c b/arch/arm/mach-tegra/cpu-tegra.c
index 0e1016a827a..b4ba093952c 100644
--- a/arch/arm/mach-tegra/cpu-tegra.c
+++ b/arch/arm/mach-tegra/cpu-tegra.c
@@ -7,6 +7,8 @@
7 * Colin Cross <ccross@google.com> 7 * Colin Cross <ccross@google.com>
8 * Based on arch/arm/plat-omap/cpu-omap.c, (C) 2005 Nokia Corporation 8 * Based on arch/arm/plat-omap/cpu-omap.c, (C) 2005 Nokia Corporation
9 * 9 *
10 * Copyright (C) 2010-2012 NVIDIA Corporation
11 *
10 * This software is licensed under the terms of the GNU General Public 12 * This software is licensed under the terms of the GNU General Public
11 * License version 2, as published by the Free Software Foundation, and 13 * License version 2, as published by the Free Software Foundation, and
12 * may be copied, distributed, and modified under those terms. 14 * may be copied, distributed, and modified under those terms.
@@ -29,33 +31,425 @@
29#include <linux/clk.h> 31#include <linux/clk.h>
30#include <linux/io.h> 32#include <linux/io.h>
31#include <linux/suspend.h> 33#include <linux/suspend.h>
34#include <linux/debugfs.h>
35#include <linux/cpu.h>
32 36
33#include <asm/system.h> 37#include <asm/system.h>
34 38
35#include <mach/hardware.h>
36#include <mach/clk.h> 39#include <mach/clk.h>
40#include <mach/edp.h>
37 41
38/* Frequency table index must be sequential starting at 0 */ 42#include "clock.h"
39static struct cpufreq_frequency_table freq_table[] = { 43#include "cpu-tegra.h"
40 { 0, 216000 }, 44#include "dvfs.h"
41 { 1, 312000 },
42 { 2, 456000 },
43 { 3, 608000 },
44 { 4, 760000 },
45 { 5, 816000 },
46 { 6, 912000 },
47 { 7, 1000000 },
48 { 8, CPUFREQ_TABLE_END },
49};
50 45
51#define NUM_CPUS 2 46/* tegra throttling and edp governors require frequencies in the table
47 to be in ascending order */
48static struct cpufreq_frequency_table *freq_table;
52 49
53static struct clk *cpu_clk; 50static struct clk *cpu_clk;
54static struct clk *emc_clk; 51static struct clk *emc_clk;
55 52
56static unsigned long target_cpu_speed[NUM_CPUS]; 53static unsigned long policy_max_speed[CONFIG_NR_CPUS];
54static unsigned long target_cpu_speed[CONFIG_NR_CPUS];
57static DEFINE_MUTEX(tegra_cpu_lock); 55static DEFINE_MUTEX(tegra_cpu_lock);
58static bool is_suspended; 56static bool is_suspended;
57static int suspend_index;
58
59static bool force_policy_max;
60
61static int force_policy_max_set(const char *arg, const struct kernel_param *kp)
62{
63 int ret;
64 bool old_policy = force_policy_max;
65
66 mutex_lock(&tegra_cpu_lock);
67
68 ret = param_set_bool(arg, kp);
69 if ((ret == 0) && (old_policy != force_policy_max))
70 tegra_cpu_set_speed_cap(NULL);
71
72 mutex_unlock(&tegra_cpu_lock);
73 return ret;
74}
75
76static int force_policy_max_get(char *buffer, const struct kernel_param *kp)
77{
78 return param_get_bool(buffer, kp);
79}
80
81static struct kernel_param_ops policy_ops = {
82 .set = force_policy_max_set,
83 .get = force_policy_max_get,
84};
85module_param_cb(force_policy_max, &policy_ops, &force_policy_max, 0644);
86
87
88static unsigned int cpu_user_cap;
89
90static inline void _cpu_user_cap_set_locked(void)
91{
92#ifndef CONFIG_TEGRA_CPU_CAP_EXACT_FREQ
93 if (cpu_user_cap != 0) {
94 int i;
95 for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++) {
96 if (freq_table[i].frequency > cpu_user_cap)
97 break;
98 }
99 i = (i == 0) ? 0 : i - 1;
100 cpu_user_cap = freq_table[i].frequency;
101 }
102#endif
103 tegra_cpu_set_speed_cap(NULL);
104}
105
106void tegra_cpu_user_cap_set(unsigned int speed_khz)
107{
108 mutex_lock(&tegra_cpu_lock);
109
110 cpu_user_cap = speed_khz;
111 _cpu_user_cap_set_locked();
112
113 mutex_unlock(&tegra_cpu_lock);
114}
115
116static int cpu_user_cap_set(const char *arg, const struct kernel_param *kp)
117{
118 int ret;
119
120 mutex_lock(&tegra_cpu_lock);
121
122 ret = param_set_uint(arg, kp);
123 if (ret == 0)
124 _cpu_user_cap_set_locked();
125
126 mutex_unlock(&tegra_cpu_lock);
127 return ret;
128}
129
130static int cpu_user_cap_get(char *buffer, const struct kernel_param *kp)
131{
132 return param_get_uint(buffer, kp);
133}
134
135static struct kernel_param_ops cap_ops = {
136 .set = cpu_user_cap_set,
137 .get = cpu_user_cap_get,
138};
139module_param_cb(cpu_user_cap, &cap_ops, &cpu_user_cap, 0644);
140
141static unsigned int user_cap_speed(unsigned int requested_speed)
142{
143 if ((cpu_user_cap) && (requested_speed > cpu_user_cap))
144 return cpu_user_cap;
145 return requested_speed;
146}
147
148#ifdef CONFIG_TEGRA_THERMAL_THROTTLE
149
150static ssize_t show_throttle(struct cpufreq_policy *policy, char *buf)
151{
152 return sprintf(buf, "%u\n", tegra_is_throttling());
153}
154
155cpufreq_freq_attr_ro(throttle);
156#endif /* CONFIG_TEGRA_THERMAL_THROTTLE */
157
158#ifdef CONFIG_TEGRA_EDP_LIMITS
159
160static const struct tegra_edp_limits *cpu_edp_limits;
161static int cpu_edp_limits_size;
162
163static const unsigned int *system_edp_limits;
164static bool system_edp_alarm;
165
166static int edp_thermal_index;
167static cpumask_t edp_cpumask;
168static unsigned int edp_limit;
169
170unsigned int tegra_get_edp_limit(void)
171{
172 return edp_limit;
173}
174
175static unsigned int edp_predict_limit(unsigned int cpus)
176{
177 unsigned int limit = 0;
178
179 BUG_ON(cpus == 0);
180 if (cpu_edp_limits) {
181 BUG_ON(edp_thermal_index >= cpu_edp_limits_size);
182 limit = cpu_edp_limits[edp_thermal_index].freq_limits[cpus - 1];
183 }
184 if (system_edp_limits && system_edp_alarm)
185 limit = min(limit, system_edp_limits[cpus - 1]);
186
187 return limit;
188}
189
190static void edp_update_limit(void)
191{
192 unsigned int limit = edp_predict_limit(cpumask_weight(&edp_cpumask));
193
194#ifdef CONFIG_TEGRA_EDP_EXACT_FREQ
195 edp_limit = limit;
196#else
197 unsigned int i;
198 for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++) {
199 if (freq_table[i].frequency > limit) {
200 break;
201 }
202 }
203 BUG_ON(i == 0); /* min freq above the limit or table empty */
204 edp_limit = freq_table[i-1].frequency;
205#endif
206}
207
208static unsigned int edp_governor_speed(unsigned int requested_speed)
209{
210 if ((!edp_limit) || (requested_speed <= edp_limit))
211 return requested_speed;
212 else
213 return edp_limit;
214}
215
216int tegra_edp_update_thermal_zone(int temperature)
217{
218 int i;
219 int ret = 0;
220 int nlimits = cpu_edp_limits_size;
221 int index;
222
223 if (!cpu_edp_limits)
224 return -EINVAL;
225
226 index = nlimits - 1;
227
228 if (temperature < cpu_edp_limits[0].temperature) {
229 index = 0;
230 } else {
231 for (i = 0; i < (nlimits - 1); i++) {
232 if (temperature >= cpu_edp_limits[i].temperature &&
233 temperature < cpu_edp_limits[i + 1].temperature) {
234 index = i + 1;
235 break;
236 }
237 }
238 }
239
240 mutex_lock(&tegra_cpu_lock);
241 edp_thermal_index = index;
242
243 /* Update cpu rate if cpufreq (at least on cpu0) is already started;
244 alter cpu dvfs table for this thermal zone if necessary */
245 tegra_cpu_dvfs_alter(edp_thermal_index, true);
246 if (target_cpu_speed[0]) {
247 edp_update_limit();
248 tegra_cpu_set_speed_cap(NULL);
249 }
250 tegra_cpu_dvfs_alter(edp_thermal_index, false);
251 mutex_unlock(&tegra_cpu_lock);
252
253 return ret;
254}
255EXPORT_SYMBOL_GPL(tegra_edp_update_thermal_zone);
256
257int tegra_system_edp_alarm(bool alarm)
258{
259 int ret = -ENODEV;
260
261 mutex_lock(&tegra_cpu_lock);
262 system_edp_alarm = alarm;
263
264 /* Update cpu rate if cpufreq (at least on cpu0) is already started
265 and cancel emergency throttling after edp limit is applied */
266 if (target_cpu_speed[0]) {
267 edp_update_limit();
268 ret = tegra_cpu_set_speed_cap(NULL);
269 if (!ret && alarm)
270 tegra_edp_throttle_cpu_now(0);
271 }
272 mutex_unlock(&tegra_cpu_lock);
273
274 return ret;
275}
276
277bool tegra_cpu_edp_favor_up(unsigned int n, int mp_overhead)
278{
279 unsigned int current_limit, next_limit;
280
281 if (n == 0)
282 return true;
283
284 if (n >= ARRAY_SIZE(cpu_edp_limits->freq_limits))
285 return false;
286
287 current_limit = edp_predict_limit(n);
288 next_limit = edp_predict_limit(n + 1);
289
290 return ((next_limit * (n + 1)) >=
291 (current_limit * n * (100 + mp_overhead) / 100));
292}
293
294bool tegra_cpu_edp_favor_down(unsigned int n, int mp_overhead)
295{
296 unsigned int current_limit, next_limit;
297
298 if (n <= 1)
299 return false;
300
301 if (n > ARRAY_SIZE(cpu_edp_limits->freq_limits))
302 return true;
303
304 current_limit = edp_predict_limit(n);
305 next_limit = edp_predict_limit(n - 1);
306
307 return ((next_limit * (n - 1) * (100 + mp_overhead) / 100)) >
308 (current_limit * n);
309}
310
311static int tegra_cpu_edp_notify(
312 struct notifier_block *nb, unsigned long event, void *hcpu)
313{
314 int ret = 0;
315 unsigned int cpu_speed, new_speed;
316 int cpu = (long)hcpu;
317
318 switch (event) {
319 case CPU_UP_PREPARE:
320 mutex_lock(&tegra_cpu_lock);
321 cpu_set(cpu, edp_cpumask);
322 edp_update_limit();
323
324 cpu_speed = tegra_getspeed(0);
325 new_speed = edp_governor_speed(cpu_speed);
326 if (new_speed < cpu_speed) {
327 ret = tegra_cpu_set_speed_cap(NULL);
328 if (ret) {
329 cpu_clear(cpu, edp_cpumask);
330 edp_update_limit();
331 }
332
333 printk(KERN_DEBUG "tegra CPU:%sforce EDP limit %u kHz"
334 "\n", ret ? " failed to " : " ", new_speed);
335 }
336 mutex_unlock(&tegra_cpu_lock);
337 break;
338 case CPU_DEAD:
339 mutex_lock(&tegra_cpu_lock);
340 cpu_clear(cpu, edp_cpumask);
341 edp_update_limit();
342 tegra_cpu_set_speed_cap(NULL);
343 mutex_unlock(&tegra_cpu_lock);
344 break;
345 }
346 return notifier_from_errno(ret);
347}
348
349static struct notifier_block tegra_cpu_edp_notifier = {
350 .notifier_call = tegra_cpu_edp_notify,
351};
352
353static void tegra_cpu_edp_init(bool resume)
354{
355 tegra_get_system_edp_limits(&system_edp_limits);
356 tegra_get_cpu_edp_limits(&cpu_edp_limits, &cpu_edp_limits_size);
357
358 if (!(cpu_edp_limits || system_edp_limits)) {
359 if (!resume)
360 pr_info("cpu-tegra: no EDP table is provided\n");
361 return;
362 }
363
364 /* FIXME: use the highest temperature limits if sensor is not on-line?
365 * If thermal zone is not set yet by the sensor, edp_thermal_index = 0.
366 * Boot frequency allowed SoC to get here, should work till sensor is
367 * initialized.
368 */
369 edp_cpumask = *cpu_online_mask;
370 edp_update_limit();
371
372 if (!resume) {
373 register_hotcpu_notifier(&tegra_cpu_edp_notifier);
374 pr_info("cpu-tegra: init EDP limit: %u MHz\n", edp_limit/1000);
375 }
376}
377
378static void tegra_cpu_edp_exit(void)
379{
380 if (!(cpu_edp_limits || system_edp_limits))
381 return;
382
383 unregister_hotcpu_notifier(&tegra_cpu_edp_notifier);
384}
385
386#ifdef CONFIG_DEBUG_FS
387
388static int system_edp_alarm_get(void *data, u64 *val)
389{
390 *val = (u64)system_edp_alarm;
391 return 0;
392}
393static int system_edp_alarm_set(void *data, u64 val)
394{
395 if (val > 1) { /* emulate emergency throttling */
396 tegra_edp_throttle_cpu_now(val);
397 return 0;
398 }
399 return tegra_system_edp_alarm((bool)val);
400}
401DEFINE_SIMPLE_ATTRIBUTE(system_edp_alarm_fops,
402 system_edp_alarm_get, system_edp_alarm_set, "%llu\n");
403
404static int __init tegra_edp_debug_init(struct dentry *cpu_tegra_debugfs_root)
405{
406 if (!debugfs_create_file("edp_alarm", 0644, cpu_tegra_debugfs_root,
407 NULL, &system_edp_alarm_fops))
408 return -ENOMEM;
409
410 return 0;
411}
412#endif
413
414#else /* CONFIG_TEGRA_EDP_LIMITS */
415#define edp_governor_speed(requested_speed) (requested_speed)
416#define tegra_cpu_edp_init(resume)
417#define tegra_cpu_edp_exit()
418#define tegra_edp_debug_init(cpu_tegra_debugfs_root) (0)
419#endif /* CONFIG_TEGRA_EDP_LIMITS */
420
421#ifdef CONFIG_DEBUG_FS
422
423static struct dentry *cpu_tegra_debugfs_root;
424
425static int __init tegra_cpu_debug_init(void)
426{
427 cpu_tegra_debugfs_root = debugfs_create_dir("cpu-tegra", 0);
428
429 if (!cpu_tegra_debugfs_root)
430 return -ENOMEM;
431
432 if (tegra_throttle_debug_init(cpu_tegra_debugfs_root))
433 goto err_out;
434
435 if (tegra_edp_debug_init(cpu_tegra_debugfs_root))
436 goto err_out;
437
438 return 0;
439
440err_out:
441 debugfs_remove_recursive(cpu_tegra_debugfs_root);
442 return -ENOMEM;
443}
444
445static void __exit tegra_cpu_debug_exit(void)
446{
447 debugfs_remove_recursive(cpu_tegra_debugfs_root);
448}
449
450late_initcall(tegra_cpu_debug_init);
451module_exit(tegra_cpu_debug_exit);
452#endif /* CONFIG_DEBUG_FS */
59 453
60int tegra_verify_speed(struct cpufreq_policy *policy) 454int tegra_verify_speed(struct cpufreq_policy *policy)
61{ 455{
@@ -66,7 +460,7 @@ unsigned int tegra_getspeed(unsigned int cpu)
66{ 460{
67 unsigned long rate; 461 unsigned long rate;
68 462
69 if (cpu >= NUM_CPUS) 463 if (cpu >= CONFIG_NR_CPUS)
70 return 0; 464 return 0;
71 465
72 rate = clk_get_rate(cpu_clk) / 1000; 466 rate = clk_get_rate(cpu_clk) / 1000;
@@ -81,6 +475,10 @@ static int tegra_update_cpu_speed(unsigned long rate)
81 freqs.old = tegra_getspeed(0); 475 freqs.old = tegra_getspeed(0);
82 freqs.new = rate; 476 freqs.new = rate;
83 477
478 rate = clk_round_rate(cpu_clk, rate * 1000);
479 if (!IS_ERR_VALUE(rate))
480 freqs.new = rate / 1000;
481
84 if (freqs.old == freqs.new) 482 if (freqs.old == freqs.new)
85 return ret; 483 return ret;
86 484
@@ -88,12 +486,20 @@ static int tegra_update_cpu_speed(unsigned long rate)
88 * Vote on memory bus frequency based on cpu frequency 486 * Vote on memory bus frequency based on cpu frequency
89 * This sets the minimum frequency, display or avp may request higher 487 * This sets the minimum frequency, display or avp may request higher
90 */ 488 */
91 if (rate >= 816000) 489 if (freqs.old < freqs.new) {
92 clk_set_rate(emc_clk, 600000000); /* cpu 816 MHz, emc max */ 490 ret = tegra_update_mselect_rate(freqs.new);
93 else if (rate >= 456000) 491 if (ret) {
94 clk_set_rate(emc_clk, 300000000); /* cpu 456 MHz, emc 150Mhz */ 492 pr_err("cpu-tegra: Failed to scale mselect for cpu"
95 else 493 " frequency %u kHz\n", freqs.new);
96 clk_set_rate(emc_clk, 100000000); /* emc 50Mhz */ 494 return ret;
495 }
496 ret = clk_set_rate(emc_clk, tegra_emc_to_cpu_ratio(freqs.new));
497 if (ret) {
498 pr_err("cpu-tegra: Failed to scale emc for cpu"
499 " frequency %u kHz\n", freqs.new);
500 return ret;
501 }
502 }
97 503
98 for_each_online_cpu(freqs.cpu) 504 for_each_online_cpu(freqs.cpu)
99 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); 505 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
@@ -113,48 +519,122 @@ static int tegra_update_cpu_speed(unsigned long rate)
113 for_each_online_cpu(freqs.cpu) 519 for_each_online_cpu(freqs.cpu)
114 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 520 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
115 521
522 if (freqs.old > freqs.new) {
523 clk_set_rate(emc_clk, tegra_emc_to_cpu_ratio(freqs.new));
524 tegra_update_mselect_rate(freqs.new);
525 }
526
116 return 0; 527 return 0;
117} 528}
118 529
119static unsigned long tegra_cpu_highest_speed(void) 530unsigned int tegra_count_slow_cpus(unsigned long speed_limit)
120{ 531{
121 unsigned long rate = 0; 532 unsigned int cnt = 0;
533 int i;
534
535 for_each_online_cpu(i)
536 if (target_cpu_speed[i] <= speed_limit)
537 cnt++;
538 return cnt;
539}
540
541unsigned int tegra_get_slowest_cpu_n(void) {
542 unsigned int cpu = nr_cpu_ids;
543 unsigned long rate = ULONG_MAX;
122 int i; 544 int i;
123 545
124 for_each_online_cpu(i) 546 for_each_online_cpu(i)
547 if ((i > 0) && (rate > target_cpu_speed[i])) {
548 cpu = i;
549 rate = target_cpu_speed[i];
550 }
551 return cpu;
552}
553
554unsigned long tegra_cpu_lowest_speed(void) {
555 unsigned long rate = ULONG_MAX;
556 int i;
557
558 for_each_online_cpu(i)
559 rate = min(rate, target_cpu_speed[i]);
560 return rate;
561}
562
563unsigned long tegra_cpu_highest_speed(void) {
564 unsigned long policy_max = ULONG_MAX;
565 unsigned long rate = 0;
566 int i;
567
568 for_each_online_cpu(i) {
569 if (force_policy_max)
570 policy_max = min(policy_max, policy_max_speed[i]);
125 rate = max(rate, target_cpu_speed[i]); 571 rate = max(rate, target_cpu_speed[i]);
572 }
573 rate = min(rate, policy_max);
126 return rate; 574 return rate;
127} 575}
128 576
577int tegra_cpu_set_speed_cap(unsigned int *speed_cap)
578{
579 int ret = 0;
580 unsigned int new_speed = tegra_cpu_highest_speed();
581
582 if (is_suspended)
583 return -EBUSY;
584
585 new_speed = tegra_throttle_governor_speed(new_speed);
586 new_speed = edp_governor_speed(new_speed);
587 new_speed = user_cap_speed(new_speed);
588 if (speed_cap)
589 *speed_cap = new_speed;
590
591 ret = tegra_update_cpu_speed(new_speed);
592 if (ret == 0)
593 tegra_auto_hotplug_governor(new_speed, false);
594 return ret;
595}
596
597int tegra_suspended_target(unsigned int target_freq)
598{
599 unsigned int new_speed = target_freq;
600
601 if (!is_suspended)
602 return -EBUSY;
603
604 /* apply only "hard" caps */
605 new_speed = tegra_throttle_governor_speed(new_speed);
606 new_speed = edp_governor_speed(new_speed);
607
608 return tegra_update_cpu_speed(new_speed);
609}
610
129static int tegra_target(struct cpufreq_policy *policy, 611static int tegra_target(struct cpufreq_policy *policy,
130 unsigned int target_freq, 612 unsigned int target_freq,
131 unsigned int relation) 613 unsigned int relation)
132{ 614{
133 int idx; 615 int idx;
134 unsigned int freq; 616 unsigned int freq;
617 unsigned int new_speed;
135 int ret = 0; 618 int ret = 0;
136 619
137 mutex_lock(&tegra_cpu_lock); 620 mutex_lock(&tegra_cpu_lock);
138 621
139 if (is_suspended) { 622 ret = cpufreq_frequency_table_target(policy, freq_table, target_freq,
140 ret = -EBUSY;
141 goto out;
142 }
143
144 cpufreq_frequency_table_target(policy, freq_table, target_freq,
145 relation, &idx); 623 relation, &idx);
624 if (ret)
625 goto _out;
146 626
147 freq = freq_table[idx].frequency; 627 freq = freq_table[idx].frequency;
148 628
149 target_cpu_speed[policy->cpu] = freq; 629 target_cpu_speed[policy->cpu] = freq;
150 630 ret = tegra_cpu_set_speed_cap(&new_speed);
151 ret = tegra_update_cpu_speed(tegra_cpu_highest_speed()); 631_out:
152
153out:
154 mutex_unlock(&tegra_cpu_lock); 632 mutex_unlock(&tegra_cpu_lock);
633
155 return ret; 634 return ret;
156} 635}
157 636
637
158static int tegra_pm_notify(struct notifier_block *nb, unsigned long event, 638static int tegra_pm_notify(struct notifier_block *nb, unsigned long event,
159 void *dummy) 639 void *dummy)
160{ 640{
@@ -162,10 +642,17 @@ static int tegra_pm_notify(struct notifier_block *nb, unsigned long event,
162 if (event == PM_SUSPEND_PREPARE) { 642 if (event == PM_SUSPEND_PREPARE) {
163 is_suspended = true; 643 is_suspended = true;
164 pr_info("Tegra cpufreq suspend: setting frequency to %d kHz\n", 644 pr_info("Tegra cpufreq suspend: setting frequency to %d kHz\n",
165 freq_table[0].frequency); 645 freq_table[suspend_index].frequency);
166 tegra_update_cpu_speed(freq_table[0].frequency); 646 tegra_update_cpu_speed(freq_table[suspend_index].frequency);
647 tegra_auto_hotplug_governor(
648 freq_table[suspend_index].frequency, true);
167 } else if (event == PM_POST_SUSPEND) { 649 } else if (event == PM_POST_SUSPEND) {
650 unsigned int freq;
168 is_suspended = false; 651 is_suspended = false;
652 tegra_cpu_edp_init(true);
653 tegra_cpu_set_speed_cap(&freq);
654 pr_info("Tegra cpufreq resume: restoring frequency to %d kHz\n",
655 freq);
169 } 656 }
170 mutex_unlock(&tegra_cpu_lock); 657 mutex_unlock(&tegra_cpu_lock);
171 658
@@ -178,7 +665,7 @@ static struct notifier_block tegra_cpu_pm_notifier = {
178 665
179static int tegra_cpu_init(struct cpufreq_policy *policy) 666static int tegra_cpu_init(struct cpufreq_policy *policy)
180{ 667{
181 if (policy->cpu >= NUM_CPUS) 668 if (policy->cpu >= CONFIG_NR_CPUS)
182 return -EINVAL; 669 return -EINVAL;
183 670
184 cpu_clk = clk_get_sys(NULL, "cpu"); 671 cpu_clk = clk_get_sys(NULL, "cpu");
@@ -205,8 +692,9 @@ static int tegra_cpu_init(struct cpufreq_policy *policy)
205 policy->shared_type = CPUFREQ_SHARED_TYPE_ALL; 692 policy->shared_type = CPUFREQ_SHARED_TYPE_ALL;
206 cpumask_copy(policy->related_cpus, cpu_possible_mask); 693 cpumask_copy(policy->related_cpus, cpu_possible_mask);
207 694
208 if (policy->cpu == 0) 695 if (policy->cpu == 0) {
209 register_pm_notifier(&tegra_cpu_pm_notifier); 696 register_pm_notifier(&tegra_cpu_pm_notifier);
697 }
210 698
211 return 0; 699 return 0;
212} 700}
@@ -220,8 +708,30 @@ static int tegra_cpu_exit(struct cpufreq_policy *policy)
220 return 0; 708 return 0;
221} 709}
222 710
711static int tegra_cpufreq_policy_notifier(
712 struct notifier_block *nb, unsigned long event, void *data)
713{
714 int i, ret;
715 struct cpufreq_policy *policy = data;
716
717 if (event == CPUFREQ_NOTIFY) {
718 ret = cpufreq_frequency_table_target(policy, freq_table,
719 policy->max, CPUFREQ_RELATION_H, &i);
720 policy_max_speed[policy->cpu] =
721 ret ? policy->max : freq_table[i].frequency;
722 }
723 return NOTIFY_OK;
724}
725
726static struct notifier_block tegra_cpufreq_policy_nb = {
727 .notifier_call = tegra_cpufreq_policy_notifier,
728};
729
223static struct freq_attr *tegra_cpufreq_attr[] = { 730static struct freq_attr *tegra_cpufreq_attr[] = {
224 &cpufreq_freq_attr_scaling_available_freqs, 731 &cpufreq_freq_attr_scaling_available_freqs,
732#ifdef CONFIG_TEGRA_THERMAL_THROTTLE
733 &throttle,
734#endif
225 NULL, 735 NULL,
226}; 736};
227 737
@@ -237,12 +747,42 @@ static struct cpufreq_driver tegra_cpufreq_driver = {
237 747
238static int __init tegra_cpufreq_init(void) 748static int __init tegra_cpufreq_init(void)
239{ 749{
750 int ret = 0;
751
752 struct tegra_cpufreq_table_data *table_data =
753 tegra_cpufreq_table_get();
754 if (IS_ERR_OR_NULL(table_data))
755 return -EINVAL;
756
757 suspend_index = table_data->suspend_index;
758
759 ret = tegra_throttle_init(&tegra_cpu_lock);
760 if (ret)
761 return ret;
762
763 ret = tegra_auto_hotplug_init(&tegra_cpu_lock);
764 if (ret)
765 return ret;
766
767 freq_table = table_data->freq_table;
768 tegra_cpu_edp_init(false);
769
770 ret = cpufreq_register_notifier(
771 &tegra_cpufreq_policy_nb, CPUFREQ_POLICY_NOTIFIER);
772 if (ret)
773 return ret;
774
240 return cpufreq_register_driver(&tegra_cpufreq_driver); 775 return cpufreq_register_driver(&tegra_cpufreq_driver);
241} 776}
242 777
243static void __exit tegra_cpufreq_exit(void) 778static void __exit tegra_cpufreq_exit(void)
244{ 779{
245 cpufreq_unregister_driver(&tegra_cpufreq_driver); 780 tegra_throttle_exit();
781 tegra_cpu_edp_exit();
782 tegra_auto_hotplug_exit();
783 cpufreq_unregister_driver(&tegra_cpufreq_driver);
784 cpufreq_unregister_notifier(
785 &tegra_cpufreq_policy_nb, CPUFREQ_POLICY_NOTIFIER);
246} 786}
247 787
248 788