aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mach-tegra/cpu-tegra3.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mach-tegra/cpu-tegra3.c')
-rw-r--r--arch/arm/mach-tegra/cpu-tegra3.c555
1 files changed, 555 insertions, 0 deletions
diff --git a/arch/arm/mach-tegra/cpu-tegra3.c b/arch/arm/mach-tegra/cpu-tegra3.c
new file mode 100644
index 00000000000..76ff94435a1
--- /dev/null
+++ b/arch/arm/mach-tegra/cpu-tegra3.c
@@ -0,0 +1,555 @@
1/*
2 * arch/arm/mach-tegra/cpu-tegra3.c
3 *
4 * CPU auto-hotplug for Tegra3 CPUs
5 *
6 * Copyright (c) 2011-2012, NVIDIA Corporation.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * more details.
17 *
18 * You should have received a copy of the GNU General Public License along
19 * with this program; if not, write to the Free Software Foundation, Inc.,
20 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
21 */
22
23#include <linux/kernel.h>
24#include <linux/module.h>
25#include <linux/types.h>
26#include <linux/sched.h>
27#include <linux/cpufreq.h>
28#include <linux/delay.h>
29#include <linux/err.h>
30#include <linux/io.h>
31#include <linux/cpu.h>
32#include <linux/clk.h>
33#include <linux/debugfs.h>
34#include <linux/seq_file.h>
35#include <linux/pm_qos_params.h>
36
37#include "pm.h"
38#include "cpu-tegra.h"
39#include "clock.h"
40
41#define INITIAL_STATE TEGRA_HP_DISABLED
42#define UP2G0_DELAY_MS 70
43#define UP2Gn_DELAY_MS 100
44#define DOWN_DELAY_MS 2000
45
46static struct mutex *tegra3_cpu_lock;
47
48static struct workqueue_struct *hotplug_wq;
49static struct delayed_work hotplug_work;
50
51static bool no_lp;
52module_param(no_lp, bool, 0644);
53
54static unsigned long up2gn_delay;
55static unsigned long up2g0_delay;
56static unsigned long down_delay;
57module_param(up2gn_delay, ulong, 0644);
58module_param(up2g0_delay, ulong, 0644);
59module_param(down_delay, ulong, 0644);
60
61static unsigned int idle_top_freq;
62static unsigned int idle_bottom_freq;
63module_param(idle_top_freq, uint, 0644);
64module_param(idle_bottom_freq, uint, 0644);
65
66static int mp_overhead = 10;
67module_param(mp_overhead, int, 0644);
68
69static int balance_level = 75;
70module_param(balance_level, int, 0644);
71
72static struct clk *cpu_clk;
73static struct clk *cpu_g_clk;
74static struct clk *cpu_lp_clk;
75
76static struct {
77 cputime64_t time_up_total;
78 u64 last_update;
79 unsigned int up_down_count;
80} hp_stats[CONFIG_NR_CPUS + 1]; /* Append LP CPU entry at the end */
81
82static void hp_init_stats(void)
83{
84 int i;
85 u64 cur_jiffies = get_jiffies_64();
86
87 for (i = 0; i <= CONFIG_NR_CPUS; i++) {
88 hp_stats[i].time_up_total = 0;
89 hp_stats[i].last_update = cur_jiffies;
90
91 hp_stats[i].up_down_count = 0;
92 if (is_lp_cluster()) {
93 if (i == CONFIG_NR_CPUS)
94 hp_stats[i].up_down_count = 1;
95 } else {
96 if ((i < nr_cpu_ids) && cpu_online(i))
97 hp_stats[i].up_down_count = 1;
98 }
99 }
100
101}
102
103static void hp_stats_update(unsigned int cpu, bool up)
104{
105 u64 cur_jiffies = get_jiffies_64();
106 bool was_up = hp_stats[cpu].up_down_count & 0x1;
107
108 if (was_up)
109 hp_stats[cpu].time_up_total = cputime64_add(
110 hp_stats[cpu].time_up_total, cputime64_sub(
111 cur_jiffies, hp_stats[cpu].last_update));
112
113 if (was_up != up) {
114 hp_stats[cpu].up_down_count++;
115 if ((hp_stats[cpu].up_down_count & 0x1) != up) {
116 /* FIXME: sysfs user space CPU control breaks stats */
117 pr_err("tegra hotplug stats out of sync with %s CPU%d",
118 (cpu < CONFIG_NR_CPUS) ? "G" : "LP",
119 (cpu < CONFIG_NR_CPUS) ? cpu : 0);
120 hp_stats[cpu].up_down_count ^= 0x1;
121 }
122 }
123 hp_stats[cpu].last_update = cur_jiffies;
124}
125
126
127enum {
128 TEGRA_HP_DISABLED = 0,
129 TEGRA_HP_IDLE,
130 TEGRA_HP_DOWN,
131 TEGRA_HP_UP,
132};
133static int hp_state;
134
135static int hp_state_set(const char *arg, const struct kernel_param *kp)
136{
137 int ret = 0;
138 int old_state;
139
140 if (!tegra3_cpu_lock)
141 return ret;
142
143 mutex_lock(tegra3_cpu_lock);
144
145 old_state = hp_state;
146 ret = param_set_bool(arg, kp); /* set idle or disabled only */
147
148 if (ret == 0) {
149 if ((hp_state == TEGRA_HP_DISABLED) &&
150 (old_state != TEGRA_HP_DISABLED))
151 pr_info("Tegra auto-hotplug disabled\n");
152 else if (hp_state != TEGRA_HP_DISABLED) {
153 if (old_state == TEGRA_HP_DISABLED) {
154 pr_info("Tegra auto-hotplug enabled\n");
155 hp_init_stats();
156 }
157 /* catch-up with governor target speed */
158 tegra_cpu_set_speed_cap(NULL);
159 }
160 } else
161 pr_warn("%s: unable to set tegra hotplug state %s\n",
162 __func__, arg);
163
164 mutex_unlock(tegra3_cpu_lock);
165 return ret;
166}
167
168static int hp_state_get(char *buffer, const struct kernel_param *kp)
169{
170 return param_get_int(buffer, kp);
171}
172
173static struct kernel_param_ops tegra_hp_state_ops = {
174 .set = hp_state_set,
175 .get = hp_state_get,
176};
177module_param_cb(auto_hotplug, &tegra_hp_state_ops, &hp_state, 0644);
178
179
180enum {
181 TEGRA_CPU_SPEED_BALANCED,
182 TEGRA_CPU_SPEED_BIASED,
183 TEGRA_CPU_SPEED_SKEWED,
184};
185
186static noinline int tegra_cpu_speed_balance(void)
187{
188 unsigned long highest_speed = tegra_cpu_highest_speed();
189 unsigned long balanced_speed = highest_speed * balance_level / 100;
190 unsigned long skewed_speed = balanced_speed / 2;
191 unsigned int nr_cpus = num_online_cpus();
192 unsigned int max_cpus = pm_qos_request(PM_QOS_MAX_ONLINE_CPUS) ? : 4;
193 unsigned int min_cpus = pm_qos_request(PM_QOS_MIN_ONLINE_CPUS);
194
195 /* balanced: freq targets for all CPUs are above 50% of highest speed
196 biased: freq target for at least one CPU is below 50% threshold
197 skewed: freq targets for at least 2 CPUs are below 25% threshold */
198 if (((tegra_count_slow_cpus(skewed_speed) >= 2) ||
199 tegra_cpu_edp_favor_down(nr_cpus, mp_overhead) ||
200 (highest_speed <= idle_bottom_freq) || (nr_cpus > max_cpus)) &&
201 (nr_cpus > min_cpus))
202 return TEGRA_CPU_SPEED_SKEWED;
203
204 if (((tegra_count_slow_cpus(balanced_speed) >= 1) ||
205 (!tegra_cpu_edp_favor_up(nr_cpus, mp_overhead)) ||
206 (highest_speed <= idle_bottom_freq) || (nr_cpus == max_cpus)) &&
207 (nr_cpus >= min_cpus))
208 return TEGRA_CPU_SPEED_BIASED;
209
210 return TEGRA_CPU_SPEED_BALANCED;
211}
212
213static void tegra_auto_hotplug_work_func(struct work_struct *work)
214{
215 bool up = false;
216 unsigned int cpu = nr_cpu_ids;
217
218 mutex_lock(tegra3_cpu_lock);
219
220 switch (hp_state) {
221 case TEGRA_HP_DISABLED:
222 case TEGRA_HP_IDLE:
223 break;
224 case TEGRA_HP_DOWN:
225 cpu = tegra_get_slowest_cpu_n();
226 if (cpu < nr_cpu_ids) {
227 up = false;
228 queue_delayed_work(
229 hotplug_wq, &hotplug_work, down_delay);
230 hp_stats_update(cpu, false);
231 } else if (!is_lp_cluster() && !no_lp) {
232 if(!clk_set_parent(cpu_clk, cpu_lp_clk)) {
233 hp_stats_update(CONFIG_NR_CPUS, true);
234 hp_stats_update(0, false);
235 /* catch-up with governor target speed */
236 tegra_cpu_set_speed_cap(NULL);
237 } else
238 queue_delayed_work(
239 hotplug_wq, &hotplug_work, down_delay);
240 }
241 break;
242 case TEGRA_HP_UP:
243 if (is_lp_cluster() && !no_lp) {
244 if(!clk_set_parent(cpu_clk, cpu_g_clk)) {
245 hp_stats_update(CONFIG_NR_CPUS, false);
246 hp_stats_update(0, true);
247 /* catch-up with governor target speed */
248 tegra_cpu_set_speed_cap(NULL);
249 }
250 } else {
251 switch (tegra_cpu_speed_balance()) {
252 /* cpu speed is up and balanced - one more on-line */
253 case TEGRA_CPU_SPEED_BALANCED:
254 cpu = cpumask_next_zero(0, cpu_online_mask);
255 if (cpu < nr_cpu_ids) {
256 up = true;
257 hp_stats_update(cpu, true);
258 }
259 break;
260 /* cpu speed is up, but skewed - remove one core */
261 case TEGRA_CPU_SPEED_SKEWED:
262 cpu = tegra_get_slowest_cpu_n();
263 if (cpu < nr_cpu_ids) {
264 up = false;
265 hp_stats_update(cpu, false);
266 }
267 break;
268 /* cpu speed is up, but under-utilized - do nothing */
269 case TEGRA_CPU_SPEED_BIASED:
270 default:
271 break;
272 }
273 }
274 queue_delayed_work(
275 hotplug_wq, &hotplug_work, up2gn_delay);
276 break;
277 default:
278 pr_err("%s: invalid tegra hotplug state %d\n",
279 __func__, hp_state);
280 }
281 mutex_unlock(tegra3_cpu_lock);
282
283 if (cpu < nr_cpu_ids) {
284 if (up)
285 cpu_up(cpu);
286 else
287 cpu_down(cpu);
288 }
289}
290
291static int min_cpus_notify(struct notifier_block *nb, unsigned long n, void *p)
292{
293 mutex_lock(tegra3_cpu_lock);
294
295 if ((n >= 2) && is_lp_cluster()) {
296 if (!clk_set_parent(cpu_clk, cpu_g_clk)) {
297 hp_stats_update(CONFIG_NR_CPUS, false);
298 hp_stats_update(0, true);
299 }
300 }
301 /* update governor state machine */
302 tegra_cpu_set_speed_cap(NULL);
303 mutex_unlock(tegra3_cpu_lock);
304 return NOTIFY_OK;
305}
306
307static struct notifier_block min_cpus_notifier = {
308 .notifier_call = min_cpus_notify,
309};
310
311void tegra_auto_hotplug_governor(unsigned int cpu_freq, bool suspend)
312{
313 unsigned long up_delay, top_freq, bottom_freq;
314
315 if (!is_g_cluster_present())
316 return;
317
318 if (suspend && (hp_state != TEGRA_HP_DISABLED)) {
319 hp_state = TEGRA_HP_IDLE;
320
321 /* Switch to G-mode if suspend rate is high enough */
322 if (is_lp_cluster() && (cpu_freq >= idle_bottom_freq)) {
323 if (!clk_set_parent(cpu_clk, cpu_g_clk)) {
324 hp_stats_update(CONFIG_NR_CPUS, false);
325 hp_stats_update(0, true);
326 }
327 }
328 return;
329 }
330
331 if (is_lp_cluster()) {
332 up_delay = up2g0_delay;
333 top_freq = idle_top_freq;
334 bottom_freq = 0;
335 } else {
336 up_delay = up2gn_delay;
337 top_freq = idle_bottom_freq;
338 bottom_freq = idle_bottom_freq;
339 }
340
341 if (pm_qos_request(PM_QOS_MIN_ONLINE_CPUS) >= 2) {
342 if (hp_state != TEGRA_HP_UP) {
343 hp_state = TEGRA_HP_UP;
344 queue_delayed_work(
345 hotplug_wq, &hotplug_work, up_delay);
346 }
347 return;
348 }
349
350 switch (hp_state) {
351 case TEGRA_HP_DISABLED:
352 break;
353 case TEGRA_HP_IDLE:
354 if (cpu_freq > top_freq) {
355 hp_state = TEGRA_HP_UP;
356 queue_delayed_work(
357 hotplug_wq, &hotplug_work, up_delay);
358 } else if (cpu_freq <= bottom_freq) {
359 hp_state = TEGRA_HP_DOWN;
360 queue_delayed_work(
361 hotplug_wq, &hotplug_work, down_delay);
362 }
363 break;
364 case TEGRA_HP_DOWN:
365 if (cpu_freq > top_freq) {
366 hp_state = TEGRA_HP_UP;
367 queue_delayed_work(
368 hotplug_wq, &hotplug_work, up_delay);
369 } else if (cpu_freq > bottom_freq) {
370 hp_state = TEGRA_HP_IDLE;
371 }
372 break;
373 case TEGRA_HP_UP:
374 if (cpu_freq <= bottom_freq) {
375 hp_state = TEGRA_HP_DOWN;
376 queue_delayed_work(
377 hotplug_wq, &hotplug_work, down_delay);
378 } else if (cpu_freq <= top_freq) {
379 hp_state = TEGRA_HP_IDLE;
380 }
381 break;
382 default:
383 pr_err("%s: invalid tegra hotplug state %d\n",
384 __func__, hp_state);
385 BUG();
386 }
387}
388
389int tegra_auto_hotplug_init(struct mutex *cpu_lock)
390{
391 /*
392 * Not bound to the issuer CPU (=> high-priority), has rescue worker
393 * task, single-threaded, freezable.
394 */
395 hotplug_wq = alloc_workqueue(
396 "cpu-tegra3", WQ_UNBOUND | WQ_RESCUER | WQ_FREEZABLE, 1);
397 if (!hotplug_wq)
398 return -ENOMEM;
399 INIT_DELAYED_WORK(&hotplug_work, tegra_auto_hotplug_work_func);
400
401 cpu_clk = clk_get_sys(NULL, "cpu");
402 cpu_g_clk = clk_get_sys(NULL, "cpu_g");
403 cpu_lp_clk = clk_get_sys(NULL, "cpu_lp");
404 if (IS_ERR(cpu_clk) || IS_ERR(cpu_g_clk) || IS_ERR(cpu_lp_clk))
405 return -ENOENT;
406
407 idle_top_freq = clk_get_max_rate(cpu_lp_clk) / 1000;
408 idle_bottom_freq = clk_get_min_rate(cpu_g_clk) / 1000;
409
410 up2g0_delay = msecs_to_jiffies(UP2G0_DELAY_MS);
411 up2gn_delay = msecs_to_jiffies(UP2Gn_DELAY_MS);
412 down_delay = msecs_to_jiffies(DOWN_DELAY_MS);
413
414 tegra3_cpu_lock = cpu_lock;
415 hp_state = INITIAL_STATE;
416 hp_init_stats();
417 pr_info("Tegra auto-hotplug initialized: %s\n",
418 (hp_state == TEGRA_HP_DISABLED) ? "disabled" : "enabled");
419
420 if (pm_qos_add_notifier(PM_QOS_MIN_ONLINE_CPUS, &min_cpus_notifier))
421 pr_err("%s: Failed to register min cpus PM QoS notifier\n",
422 __func__);
423
424 return 0;
425}
426
427#ifdef CONFIG_DEBUG_FS
428
429static struct dentry *hp_debugfs_root;
430
431struct pm_qos_request_list min_cpu_req;
432struct pm_qos_request_list max_cpu_req;
433
434static int hp_stats_show(struct seq_file *s, void *data)
435{
436 int i;
437 u64 cur_jiffies = get_jiffies_64();
438
439 mutex_lock(tegra3_cpu_lock);
440 if (hp_state != TEGRA_HP_DISABLED) {
441 for (i = 0; i <= CONFIG_NR_CPUS; i++) {
442 bool was_up = (hp_stats[i].up_down_count & 0x1);
443 hp_stats_update(i, was_up);
444 }
445 }
446 mutex_unlock(tegra3_cpu_lock);
447
448 seq_printf(s, "%-15s ", "cpu:");
449 for (i = 0; i < CONFIG_NR_CPUS; i++) {
450 seq_printf(s, "G%-9d ", i);
451 }
452 seq_printf(s, "LP\n");
453
454 seq_printf(s, "%-15s ", "transitions:");
455 for (i = 0; i <= CONFIG_NR_CPUS; i++) {
456 seq_printf(s, "%-10u ", hp_stats[i].up_down_count);
457 }
458 seq_printf(s, "\n");
459
460 seq_printf(s, "%-15s ", "time plugged:");
461 for (i = 0; i <= CONFIG_NR_CPUS; i++) {
462 seq_printf(s, "%-10llu ",
463 cputime64_to_clock_t(hp_stats[i].time_up_total));
464 }
465 seq_printf(s, "\n");
466
467 seq_printf(s, "%-15s %llu\n", "time-stamp:",
468 cputime64_to_clock_t(cur_jiffies));
469
470 return 0;
471}
472
473static int hp_stats_open(struct inode *inode, struct file *file)
474{
475 return single_open(file, hp_stats_show, inode->i_private);
476}
477
478static const struct file_operations hp_stats_fops = {
479 .open = hp_stats_open,
480 .read = seq_read,
481 .llseek = seq_lseek,
482 .release = single_release,
483};
484
485static int min_cpus_get(void *data, u64 *val)
486{
487 *val = pm_qos_request(PM_QOS_MIN_ONLINE_CPUS);
488 return 0;
489}
490static int min_cpus_set(void *data, u64 val)
491{
492 pm_qos_update_request(&min_cpu_req, (s32)val);
493 return 0;
494}
495DEFINE_SIMPLE_ATTRIBUTE(min_cpus_fops, min_cpus_get, min_cpus_set, "%llu\n");
496
497static int max_cpus_get(void *data, u64 *val)
498{
499 *val = pm_qos_request(PM_QOS_MAX_ONLINE_CPUS);
500 return 0;
501}
502static int max_cpus_set(void *data, u64 val)
503{
504 pm_qos_update_request(&max_cpu_req, (s32)val);
505 return 0;
506}
507DEFINE_SIMPLE_ATTRIBUTE(max_cpus_fops, max_cpus_get, max_cpus_set, "%llu\n");
508
509static int __init tegra_auto_hotplug_debug_init(void)
510{
511 if (!tegra3_cpu_lock)
512 return -ENOENT;
513
514 hp_debugfs_root = debugfs_create_dir("tegra_hotplug", NULL);
515 if (!hp_debugfs_root)
516 return -ENOMEM;
517
518 pm_qos_add_request(&min_cpu_req, PM_QOS_MIN_ONLINE_CPUS,
519 PM_QOS_DEFAULT_VALUE);
520 pm_qos_add_request(&max_cpu_req, PM_QOS_MAX_ONLINE_CPUS,
521 PM_QOS_DEFAULT_VALUE);
522
523 if (!debugfs_create_file(
524 "min_cpus", S_IRUGO, hp_debugfs_root, NULL, &min_cpus_fops))
525 goto err_out;
526
527 if (!debugfs_create_file(
528 "max_cpus", S_IRUGO, hp_debugfs_root, NULL, &max_cpus_fops))
529 goto err_out;
530
531 if (!debugfs_create_file(
532 "stats", S_IRUGO, hp_debugfs_root, NULL, &hp_stats_fops))
533 goto err_out;
534
535 return 0;
536
537err_out:
538 debugfs_remove_recursive(hp_debugfs_root);
539 pm_qos_remove_request(&min_cpu_req);
540 pm_qos_remove_request(&max_cpu_req);
541 return -ENOMEM;
542}
543
544late_initcall(tegra_auto_hotplug_debug_init);
545#endif
546
547void tegra_auto_hotplug_exit(void)
548{
549 destroy_workqueue(hotplug_wq);
550#ifdef CONFIG_DEBUG_FS
551 debugfs_remove_recursive(hp_debugfs_root);
552 pm_qos_remove_request(&min_cpu_req);
553 pm_qos_remove_request(&max_cpu_req);
554#endif
555}