aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mach-tegra/pm.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mach-tegra/pm.c')
-rw-r--r--arch/arm/mach-tegra/pm.c1326
1 files changed, 1326 insertions, 0 deletions
diff --git a/arch/arm/mach-tegra/pm.c b/arch/arm/mach-tegra/pm.c
new file mode 100644
index 00000000000..6e8b0782e9b
--- /dev/null
+++ b/arch/arm/mach-tegra/pm.c
@@ -0,0 +1,1326 @@
1/*
2 * arch/arm/mach-tegra/pm.c
3 *
4 * CPU complex suspend & resume functions for Tegra SoCs
5 *
6 * Copyright (c) 2009-2012, NVIDIA Corporation.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * more details.
17 *
18 * You should have received a copy of the GNU General Public License along
19 * with this program; if not, write to the Free Software Foundation, Inc.,
20 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
21 */
22
23#include <linux/kernel.h>
24#include <linux/ctype.h>
25#include <linux/init.h>
26#include <linux/io.h>
27#include <linux/sched.h>
28#include <linux/smp.h>
29#include <linux/irq.h>
30#include <linux/interrupt.h>
31#include <linux/clk.h>
32#include <linux/err.h>
33#include <linux/debugfs.h>
34#include <linux/delay.h>
35#include <linux/suspend.h>
36#include <linux/earlysuspend.h>
37#include <linux/slab.h>
38#include <linux/serial_reg.h>
39#include <linux/seq_file.h>
40#include <linux/uaccess.h>
41#include <linux/syscore_ops.h>
42#include <linux/vmalloc.h>
43#include <linux/memblock.h>
44#include <linux/console.h>
45#include <linux/pm_qos_params.h>
46
47#include <asm/cacheflush.h>
48#include <asm/cpu_pm.h>
49#include <asm/hardware/gic.h>
50#include <asm/localtimer.h>
51#include <asm/pgalloc.h>
52#include <asm/pgtable.h>
53#include <asm/tlbflush.h>
54
55#include <mach/clk.h>
56#include <mach/iomap.h>
57#include <mach/irqs.h>
58#include <mach/powergate.h>
59
60#include "board.h"
61#include "clock.h"
62#include "cpuidle.h"
63#include "fuse.h"
64#include "gic.h"
65#include "pm.h"
66#include "pm-irq.h"
67#include "reset.h"
68#include "sleep.h"
69#include "timer.h"
70#include "dvfs.h"
71#include "cpu-tegra.h"
72
73struct suspend_context {
74 /*
75 * The next 7 values are referenced by offset in __restart_plls
76 * in headsmp-t2.S, and should not be moved
77 */
78 u32 pllx_misc;
79 u32 pllx_base;
80 u32 pllp_misc;
81 u32 pllp_base;
82 u32 pllp_outa;
83 u32 pllp_outb;
84 u32 pll_timeout;
85
86 u32 cpu_burst;
87 u32 clk_csite_src;
88 u32 cclk_divider;
89
90 u32 mc[3];
91 u8 uart[5];
92
93 struct tegra_twd_context twd;
94};
95
96#ifdef CONFIG_PM_SLEEP
97#if USE_TEGRA_CPU_SUSPEND
98void *tegra_cpu_context; /* non-cacheable page for CPU context */
99#endif
100phys_addr_t tegra_pgd_phys; /* pgd used by hotplug & LP2 bootup */
101static pgd_t *tegra_pgd;
102static DEFINE_SPINLOCK(tegra_lp2_lock);
103static cpumask_t tegra_in_lp2;
104static cpumask_t *iram_cpu_lp2_mask;
105static unsigned long *iram_cpu_lp1_mask;
106static u8 *iram_save;
107static unsigned long iram_save_size;
108static void __iomem *iram_code = IO_ADDRESS(TEGRA_IRAM_CODE_AREA);
109static void __iomem *clk_rst = IO_ADDRESS(TEGRA_CLK_RESET_BASE);
110static void __iomem *pmc = IO_ADDRESS(TEGRA_PMC_BASE);
111static int tegra_last_pclk;
112#endif
113
114struct suspend_context tegra_sctx;
115
116#define TEGRA_POWER_PWRREQ_POLARITY (1 << 8) /* core power request polarity */
117#define TEGRA_POWER_PWRREQ_OE (1 << 9) /* core power request enable */
118#define TEGRA_POWER_SYSCLK_POLARITY (1 << 10) /* sys clk polarity */
119#define TEGRA_POWER_SYSCLK_OE (1 << 11) /* system clock enable */
120#define TEGRA_POWER_PWRGATE_DIS (1 << 12) /* power gate disabled */
121#define TEGRA_POWER_EFFECT_LP0 (1 << 14) /* enter LP0 when CPU pwr gated */
122#define TEGRA_POWER_CPU_PWRREQ_POLARITY (1 << 15) /* CPU power request polarity */
123#define TEGRA_POWER_CPU_PWRREQ_OE (1 << 16) /* CPU power request enable */
124
125#define PMC_CTRL 0x0
126#define PMC_CTRL_LATCH_WAKEUPS (1 << 5)
127#define PMC_WAKE_MASK 0xc
128#define PMC_WAKE_LEVEL 0x10
129#define PMC_DPAD_ORIDE 0x1C
130#define PMC_WAKE_DELAY 0xe0
131#define PMC_DPD_SAMPLE 0x20
132
133#define PMC_WAKE_STATUS 0x14
134#define PMC_SW_WAKE_STATUS 0x18
135#define PMC_COREPWRGOOD_TIMER 0x3c
136#define PMC_CPUPWRGOOD_TIMER 0xc8
137#define PMC_CPUPWROFF_TIMER 0xcc
138#define PMC_COREPWROFF_TIMER PMC_WAKE_DELAY
139
140#ifdef CONFIG_TEGRA_CLUSTER_CONTROL
141#define PMC_SCRATCH4_WAKE_CLUSTER_MASK (1<<31)
142#endif
143
144#define CLK_RESET_CCLK_BURST 0x20
145#define CLK_RESET_CCLK_DIVIDER 0x24
146#define CLK_RESET_PLLC_BASE 0x80
147#define CLK_RESET_PLLM_BASE 0x90
148#define CLK_RESET_PLLX_BASE 0xe0
149#define CLK_RESET_PLLX_MISC 0xe4
150#define CLK_RESET_PLLP_BASE 0xa0
151#define CLK_RESET_PLLP_OUTA 0xa4
152#define CLK_RESET_PLLP_OUTB 0xa8
153#define CLK_RESET_PLLP_MISC 0xac
154
155#define CLK_RESET_SOURCE_CSITE 0x1d4
156
157#define CLK_RESET_CCLK_BURST_POLICY_SHIFT 28
158#define CLK_RESET_CCLK_RUN_POLICY_SHIFT 4
159#define CLK_RESET_CCLK_IDLE_POLICY_SHIFT 0
160#define CLK_RESET_CCLK_IDLE_POLICY 1
161#define CLK_RESET_CCLK_RUN_POLICY 2
162#define CLK_RESET_CCLK_BURST_POLICY_PLLM 3
163#define CLK_RESET_CCLK_BURST_POLICY_PLLX 8
164
165#define EMC_MRW_0 0x0e8
166#define EMC_MRW_DEV_SELECTN 30
167#define EMC_MRW_DEV_NONE (3 << EMC_MRW_DEV_SELECTN)
168
169#define MC_SECURITY_START 0x6c
170#define MC_SECURITY_SIZE 0x70
171#define MC_SECURITY_CFG2 0x7c
172
173#define AWAKE_CPU_FREQ_MIN 100000
174static struct pm_qos_request_list awake_cpu_freq_req;
175
176struct dvfs_rail *tegra_cpu_rail;
177static struct dvfs_rail *tegra_core_rail;
178static struct clk *tegra_pclk;
179static const struct tegra_suspend_platform_data *pdata;
180static enum tegra_suspend_mode current_suspend_mode = TEGRA_SUSPEND_NONE;
181
182#if defined(CONFIG_TEGRA_CLUSTER_CONTROL) && INSTRUMENT_CLUSTER_SWITCH
183enum tegra_cluster_switch_time_id {
184 tegra_cluster_switch_time_id_start = 0,
185 tegra_cluster_switch_time_id_prolog,
186 tegra_cluster_switch_time_id_switch,
187 tegra_cluster_switch_time_id_epilog,
188 tegra_cluster_switch_time_id_max
189};
190
191static unsigned long
192 tegra_cluster_switch_times[tegra_cluster_switch_time_id_max];
193#define tegra_cluster_switch_time(flags, id) \
194 do { \
195 barrier(); \
196 if (flags & TEGRA_POWER_CLUSTER_MASK) { \
197 void __iomem *timer_us = \
198 IO_ADDRESS(TEGRA_TMRUS_BASE); \
199 if (id < tegra_cluster_switch_time_id_max) \
200 tegra_cluster_switch_times[id] = \
201 readl(timer_us); \
202 wmb(); \
203 } \
204 barrier(); \
205 } while(0)
206#else
207#define tegra_cluster_switch_time(flags, id) do {} while(0)
208#endif
209
210#ifdef CONFIG_PM_SLEEP
211static const char *tegra_suspend_name[TEGRA_MAX_SUSPEND_MODE] = {
212 [TEGRA_SUSPEND_NONE] = "none",
213 [TEGRA_SUSPEND_LP2] = "lp2",
214 [TEGRA_SUSPEND_LP1] = "lp1",
215 [TEGRA_SUSPEND_LP0] = "lp0",
216};
217
218unsigned long tegra_cpu_power_good_time(void)
219{
220 if (WARN_ON_ONCE(!pdata))
221 return 5000;
222
223 return pdata->cpu_timer;
224}
225
226unsigned long tegra_cpu_power_off_time(void)
227{
228 if (WARN_ON_ONCE(!pdata))
229 return 5000;
230
231 return pdata->cpu_off_timer;
232}
233
234unsigned long tegra_cpu_lp2_min_residency(void)
235{
236 if (WARN_ON_ONCE(!pdata))
237 return 2000;
238
239 return pdata->cpu_lp2_min_residency;
240}
241
242/*
243 * create_suspend_pgtable
244 *
245 * Creates a page table with identity mappings of physical memory and IRAM
246 * for use when the MMU is off, in addition to all the regular kernel mappings.
247 */
248static __init int create_suspend_pgtable(void)
249{
250 tegra_pgd = pgd_alloc(&init_mm);
251 if (!tegra_pgd)
252 return -ENOMEM;
253
254 /* Only identity-map size of lowmem (high_memory - PAGE_OFFSET) */
255 identity_mapping_add(tegra_pgd, PLAT_PHYS_OFFSET,
256 PLAT_PHYS_OFFSET + (unsigned long)high_memory - PAGE_OFFSET);
257 identity_mapping_add(tegra_pgd, IO_IRAM_PHYS,
258 IO_IRAM_PHYS + SECTION_SIZE);
259
260 /* inner/outer write-back/write-allocate, sharable */
261 tegra_pgd_phys = (virt_to_phys(tegra_pgd) & PAGE_MASK) | 0x4A;
262
263 return 0;
264}
265
266/*
267 * alloc_suspend_context
268 *
269 * Allocate a non-cacheable page to hold the CPU contexts.
270 * The standard ARM CPU context save functions don't work if there's
271 * an external L2 cache controller (like a PL310) in system.
272 */
273static __init int alloc_suspend_context(void)
274{
275#if USE_TEGRA_CPU_SUSPEND
276 pgprot_t prot = __pgprot_modify(pgprot_kernel, L_PTE_MT_MASK,
277 L_PTE_MT_BUFFERABLE | L_PTE_XN);
278 struct page *ctx_page;
279 unsigned long ctx_virt = 0;
280 pgd_t *pgd;
281 pmd_t *pmd;
282 pte_t *pte;
283
284 ctx_page = alloc_pages(GFP_KERNEL, 0);
285 if (IS_ERR_OR_NULL(ctx_page))
286 goto fail;
287
288 tegra_cpu_context = vm_map_ram(&ctx_page, 1, -1, prot);
289 if (IS_ERR_OR_NULL(tegra_cpu_context))
290 goto fail;
291
292 /* Add the context page to our private pgd. */
293 ctx_virt = (unsigned long)tegra_cpu_context;
294
295 pgd = tegra_pgd + pgd_index(ctx_virt);
296 if (!pgd_present(*pgd))
297 goto fail;
298 pmd = pmd_offset(pgd, ctx_virt);
299 if (!pmd_none(*pmd))
300 goto fail;
301 pte = pte_alloc_kernel(pmd, ctx_virt);
302 if (!pte)
303 goto fail;
304
305 set_pte_ext(pte, mk_pte(ctx_page, prot), 0);
306
307 outer_clean_range(__pa(pmd), __pa(pmd + 1));
308
309 return 0;
310
311fail:
312 if (ctx_page)
313 __free_page(ctx_page);
314 if (ctx_virt)
315 vm_unmap_ram((void*)ctx_virt, 1);
316 tegra_cpu_context = NULL;
317 return -ENOMEM;
318#else
319 return 0;
320#endif
321}
322
323/* ensures that sufficient time is passed for a register write to
324 * serialize into the 32KHz domain */
325static void pmc_32kwritel(u32 val, unsigned long offs)
326{
327 writel(val, pmc + offs);
328 udelay(130);
329}
330
331static void set_power_timers(unsigned long us_on, unsigned long us_off,
332 long rate)
333{
334 static unsigned long last_us_off = 0;
335 unsigned long long ticks;
336 unsigned long long pclk;
337
338 if (WARN_ON_ONCE(rate <= 0))
339 pclk = 100000000;
340 else
341 pclk = rate;
342
343 if ((rate != tegra_last_pclk) || (us_off != last_us_off)) {
344 ticks = (us_on * pclk) + 999999ull;
345 do_div(ticks, 1000000);
346 writel((unsigned long)ticks, pmc + PMC_CPUPWRGOOD_TIMER);
347
348 ticks = (us_off * pclk) + 999999ull;
349 do_div(ticks, 1000000);
350 writel((unsigned long)ticks, pmc + PMC_CPUPWROFF_TIMER);
351 wmb();
352 }
353 tegra_last_pclk = pclk;
354 last_us_off = us_off;
355}
356
357/*
358 * restore_cpu_complex
359 *
360 * restores cpu clock setting, clears flow controller
361 *
362 * Always called on CPU 0.
363 */
364static void restore_cpu_complex(u32 mode)
365{
366 int cpu = smp_processor_id();
367 unsigned int reg, policy;
368
369 BUG_ON(cpu != 0);
370
371 /* restore original PLL settings */
372#ifdef CONFIG_ARCH_TEGRA_2x_SOC
373 writel(tegra_sctx.pllp_misc, clk_rst + CLK_RESET_PLLP_MISC);
374 writel(tegra_sctx.pllp_base, clk_rst + CLK_RESET_PLLP_BASE);
375 writel(tegra_sctx.pllp_outa, clk_rst + CLK_RESET_PLLP_OUTA);
376 writel(tegra_sctx.pllp_outb, clk_rst + CLK_RESET_PLLP_OUTB);
377#endif
378
379 /* Is CPU complex already running on PLLX? */
380 reg = readl(clk_rst + CLK_RESET_CCLK_BURST);
381 policy = (reg >> CLK_RESET_CCLK_BURST_POLICY_SHIFT) & 0xF;
382
383 if (policy == CLK_RESET_CCLK_IDLE_POLICY)
384 reg = (reg >> CLK_RESET_CCLK_IDLE_POLICY_SHIFT) & 0xF;
385 else if (policy == CLK_RESET_CCLK_RUN_POLICY)
386 reg = (reg >> CLK_RESET_CCLK_RUN_POLICY_SHIFT) & 0xF;
387 else
388 BUG();
389
390 if (reg != CLK_RESET_CCLK_BURST_POLICY_PLLX) {
391 /* restore PLLX settings if CPU is on different PLL */
392 writel(tegra_sctx.pllx_misc, clk_rst + CLK_RESET_PLLX_MISC);
393 writel(tegra_sctx.pllx_base, clk_rst + CLK_RESET_PLLX_BASE);
394
395 /* wait for PLL stabilization if PLLX was enabled */
396 if (tegra_sctx.pllx_base & (1<<30)) {
397#if USE_PLL_LOCK_BITS
398 /* Enable lock detector */
399 reg = readl(clk_rst + CLK_RESET_PLLX_MISC);
400 reg |= 1<<18;
401 writel(reg, clk_rst + CLK_RESET_PLLX_MISC);
402 while (!(readl(clk_rst + CLK_RESET_PLLX_BASE) &
403 (1<<27)))
404 cpu_relax();
405
406 udelay(PLL_POST_LOCK_DELAY);
407#else
408 udelay(300);
409#endif
410 }
411 }
412
413 /* Restore original burst policy setting for calls resulting from CPU
414 LP2 in idle or system suspend; keep cluster switch prolog setting
415 intact. */
416 if (!(mode & TEGRA_POWER_CLUSTER_MASK)) {
417 writel(tegra_sctx.cclk_divider, clk_rst +
418 CLK_RESET_CCLK_DIVIDER);
419 writel(tegra_sctx.cpu_burst, clk_rst +
420 CLK_RESET_CCLK_BURST);
421 }
422
423 writel(tegra_sctx.clk_csite_src, clk_rst + CLK_RESET_SOURCE_CSITE);
424
425 /* Do not power-gate CPU 0 when flow controlled */
426 reg = readl(FLOW_CTRL_CPU_CSR(cpu));
427 reg &= ~FLOW_CTRL_CSR_WFE_BITMAP; /* clear wfe bitmap */
428 reg &= ~FLOW_CTRL_CSR_WFI_BITMAP; /* clear wfi bitmap */
429 reg &= ~FLOW_CTRL_CSR_ENABLE; /* clear enable */
430 reg |= FLOW_CTRL_CSR_INTR_FLAG; /* clear intr */
431 reg |= FLOW_CTRL_CSR_EVENT_FLAG; /* clear event */
432 flowctrl_writel(reg, FLOW_CTRL_CPU_CSR(cpu));
433
434 /* If an immedidate cluster switch is being perfomed, restore the
435 local timer registers. For calls resulting from CPU LP2 in
436 idle or system suspend, the local timer was shut down and
437 timekeeping switched over to the global system timer. In this
438 case keep local timer disabled, and restore only periodic load. */
439 if (!(mode & (TEGRA_POWER_CLUSTER_MASK |
440 TEGRA_POWER_CLUSTER_IMMEDIATE)))
441 tegra_sctx.twd.twd_ctrl = 0;
442 tegra_twd_resume(&tegra_sctx.twd);
443}
444
445/*
446 * suspend_cpu_complex
447 *
448 * saves pll state for use by restart_plls, prepares flow controller for
449 * transition to suspend state
450 *
451 * Must always be called on cpu 0.
452 */
453static void suspend_cpu_complex(u32 mode)
454{
455 int cpu = smp_processor_id();
456 unsigned int reg;
457 int i;
458
459 BUG_ON(cpu != 0);
460
461 /* switch coresite to clk_m, save off original source */
462 tegra_sctx.clk_csite_src = readl(clk_rst + CLK_RESET_SOURCE_CSITE);
463 writel(3<<30, clk_rst + CLK_RESET_SOURCE_CSITE);
464
465 tegra_sctx.cpu_burst = readl(clk_rst + CLK_RESET_CCLK_BURST);
466 tegra_sctx.pllx_base = readl(clk_rst + CLK_RESET_PLLX_BASE);
467 tegra_sctx.pllx_misc = readl(clk_rst + CLK_RESET_PLLX_MISC);
468 tegra_sctx.pllp_base = readl(clk_rst + CLK_RESET_PLLP_BASE);
469 tegra_sctx.pllp_outa = readl(clk_rst + CLK_RESET_PLLP_OUTA);
470 tegra_sctx.pllp_outb = readl(clk_rst + CLK_RESET_PLLP_OUTB);
471 tegra_sctx.pllp_misc = readl(clk_rst + CLK_RESET_PLLP_MISC);
472 tegra_sctx.cclk_divider = readl(clk_rst + CLK_RESET_CCLK_DIVIDER);
473
474 tegra_twd_suspend(&tegra_sctx.twd);
475
476 reg = readl(FLOW_CTRL_CPU_CSR(cpu));
477 reg &= ~FLOW_CTRL_CSR_WFE_BITMAP; /* clear wfe bitmap */
478 reg &= ~FLOW_CTRL_CSR_WFI_BITMAP; /* clear wfi bitmap */
479 reg |= FLOW_CTRL_CSR_INTR_FLAG; /* clear intr flag */
480 reg |= FLOW_CTRL_CSR_EVENT_FLAG; /* clear event flag */
481#ifdef CONFIG_ARCH_TEGRA_2x_SOC
482 reg |= FLOW_CTRL_CSR_WFE_CPU0 << cpu; /* enable power gating on wfe */
483#else
484 reg |= FLOW_CTRL_CSR_WFI_CPU0 << cpu; /* enable power gating on wfi */
485#endif
486 reg |= FLOW_CTRL_CSR_ENABLE; /* enable power gating */
487 flowctrl_writel(reg, FLOW_CTRL_CPU_CSR(cpu));
488
489 for (i = 0; i < num_possible_cpus(); i++) {
490 if (i == cpu)
491 continue;
492 reg = readl(FLOW_CTRL_CPU_CSR(i));
493 reg |= FLOW_CTRL_CSR_EVENT_FLAG;
494 reg |= FLOW_CTRL_CSR_INTR_FLAG;
495 flowctrl_writel(reg, FLOW_CTRL_CPU_CSR(i));
496 }
497
498 tegra_gic_cpu_disable();
499#ifndef CONFIG_ARCH_TEGRA_2x_SOC
500 /* Tegra3 enters LPx states via WFI - do not propagate legacy IRQs
501 to CPU core to avoid fall through WFI (IRQ-to-flow controller wake
502 path is not affected). */
503 tegra_gic_pass_through_disable();
504#endif
505}
506
507void tegra_clear_cpu_in_lp2(int cpu)
508{
509 spin_lock(&tegra_lp2_lock);
510 BUG_ON(!cpumask_test_cpu(cpu, &tegra_in_lp2));
511 cpumask_clear_cpu(cpu, &tegra_in_lp2);
512
513 /* Update the IRAM copy used by the reset handler. The IRAM copy
514 can't use used directly by cpumask_clear_cpu() because it uses
515 LDREX/STREX which requires the addressed location to be inner
516 cacheable and sharable which IRAM isn't. */
517 writel(tegra_in_lp2.bits[0], iram_cpu_lp2_mask);
518 dsb();
519
520 spin_unlock(&tegra_lp2_lock);
521}
522
523bool tegra_set_cpu_in_lp2(int cpu)
524{
525 bool last_cpu = false;
526
527 spin_lock(&tegra_lp2_lock);
528 BUG_ON(cpumask_test_cpu(cpu, &tegra_in_lp2));
529 cpumask_set_cpu(cpu, &tegra_in_lp2);
530
531 /* Update the IRAM copy used by the reset handler. The IRAM copy
532 can't use used directly by cpumask_set_cpu() because it uses
533 LDREX/STREX which requires the addressed location to be inner
534 cacheable and sharable which IRAM isn't. */
535 writel(tegra_in_lp2.bits[0], iram_cpu_lp2_mask);
536 dsb();
537
538 if ((cpu == 0) && cpumask_equal(&tegra_in_lp2, cpu_online_mask))
539 last_cpu = true;
540#ifdef CONFIG_ARCH_TEGRA_2x_SOC
541 else if (cpu == 1)
542 tegra2_cpu_set_resettable_soon();
543#endif
544
545 spin_unlock(&tegra_lp2_lock);
546 return last_cpu;
547}
548
549static void tegra_sleep_core(enum tegra_suspend_mode mode,
550 unsigned long v2p)
551{
552#ifdef CONFIG_TRUSTED_FOUNDATIONS
553 if (mode == TEGRA_SUSPEND_LP0) {
554 tegra_generic_smc(0xFFFFFFFC, 0xFFFFFFE3,
555 virt_to_phys(tegra_resume));
556 } else {
557 tegra_generic_smc(0xFFFFFFFC, 0xFFFFFFE6,
558 (TEGRA_RESET_HANDLER_BASE +
559 tegra_cpu_reset_handler_offset));
560 }
561#endif
562#ifdef CONFIG_ARCH_TEGRA_2x_SOC
563 tegra2_sleep_core(v2p);
564#else
565 tegra3_sleep_core(v2p);
566#endif
567}
568
569static inline void tegra_sleep_cpu(unsigned long v2p)
570{
571#ifdef CONFIG_TRUSTED_FOUNDATIONS
572 tegra_generic_smc(0xFFFFFFFC, 0xFFFFFFE4,
573 (TEGRA_RESET_HANDLER_BASE +
574 tegra_cpu_reset_handler_offset));
575#endif
576 tegra_sleep_cpu_save(v2p);
577}
578
579unsigned int tegra_idle_lp2_last(unsigned int sleep_time, unsigned int flags)
580{
581 u32 mode; /* hardware + software power mode flags */
582 unsigned int remain;
583 pgd_t *pgd;
584
585 /* Only the last cpu down does the final suspend steps */
586 mode = readl(pmc + PMC_CTRL);
587 mode |= TEGRA_POWER_CPU_PWRREQ_OE;
588 if (pdata->combined_req)
589 mode &= ~TEGRA_POWER_PWRREQ_OE;
590 else
591 mode |= TEGRA_POWER_PWRREQ_OE;
592 mode &= ~TEGRA_POWER_EFFECT_LP0;
593 pmc_32kwritel(mode, PMC_CTRL);
594 mode |= flags;
595
596 tegra_cluster_switch_time(flags, tegra_cluster_switch_time_id_start);
597
598 /*
599 * We can use clk_get_rate_all_locked() here, because all other cpus
600 * are in LP2 state and irqs are disabled
601 */
602 if (flags & TEGRA_POWER_CLUSTER_MASK) {
603 set_power_timers(pdata->cpu_timer, 0,
604 clk_get_rate_all_locked(tegra_pclk));
605 tegra_cluster_switch_prolog(mode);
606 } else {
607 set_power_timers(pdata->cpu_timer, pdata->cpu_off_timer,
608 clk_get_rate_all_locked(tegra_pclk));
609 }
610
611 if (sleep_time)
612 tegra_lp2_set_trigger(sleep_time);
613
614 cpu_complex_pm_enter();
615 suspend_cpu_complex(mode);
616 tegra_cluster_switch_time(flags, tegra_cluster_switch_time_id_prolog);
617 flush_cache_all();
618 /*
619 * No need to flush complete L2. Cleaning kernel and IO mappings
620 * is enough for the LP code sequence that has L2 disabled but
621 * MMU on.
622 */
623 pgd = cpu_get_pgd();
624 outer_clean_range(__pa(pgd + USER_PTRS_PER_PGD),
625 __pa(pgd + PTRS_PER_PGD));
626 outer_disable();
627
628 tegra_sleep_cpu(PLAT_PHYS_OFFSET - PAGE_OFFSET);
629
630 tegra_init_cache(false);
631 tegra_cluster_switch_time(flags, tegra_cluster_switch_time_id_switch);
632 restore_cpu_complex(mode);
633 cpu_complex_pm_exit();
634
635 remain = tegra_lp2_timer_remain();
636 if (sleep_time)
637 tegra_lp2_set_trigger(0);
638
639 if (flags & TEGRA_POWER_CLUSTER_MASK)
640 tegra_cluster_switch_epilog(mode);
641
642 tegra_cluster_switch_time(flags, tegra_cluster_switch_time_id_epilog);
643
644#if INSTRUMENT_CLUSTER_SWITCH
645 if (flags & TEGRA_POWER_CLUSTER_MASK) {
646 pr_err("%s: prolog %lu us, switch %lu us, epilog %lu us, total %lu us\n",
647 is_lp_cluster() ? "G=>LP" : "LP=>G",
648 tegra_cluster_switch_times[tegra_cluster_switch_time_id_prolog] -
649 tegra_cluster_switch_times[tegra_cluster_switch_time_id_start],
650 tegra_cluster_switch_times[tegra_cluster_switch_time_id_switch] -
651 tegra_cluster_switch_times[tegra_cluster_switch_time_id_prolog],
652 tegra_cluster_switch_times[tegra_cluster_switch_time_id_epilog] -
653 tegra_cluster_switch_times[tegra_cluster_switch_time_id_switch],
654 tegra_cluster_switch_times[tegra_cluster_switch_time_id_epilog] -
655 tegra_cluster_switch_times[tegra_cluster_switch_time_id_start]);
656 }
657#endif
658 return remain;
659}
660
661static int tegra_common_suspend(void)
662{
663 void __iomem *mc = IO_ADDRESS(TEGRA_MC_BASE);
664
665 tegra_sctx.mc[0] = readl(mc + MC_SECURITY_START);
666 tegra_sctx.mc[1] = readl(mc + MC_SECURITY_SIZE);
667 tegra_sctx.mc[2] = readl(mc + MC_SECURITY_CFG2);
668
669 /* copy the reset vector and SDRAM shutdown code into IRAM */
670 memcpy(iram_save, iram_code, iram_save_size);
671 memcpy(iram_code, tegra_iram_start(), iram_save_size);
672
673 return 0;
674}
675
676static void tegra_common_resume(void)
677{
678 void __iomem *mc = IO_ADDRESS(TEGRA_MC_BASE);
679#ifdef CONFIG_ARCH_TEGRA_2x_SOC
680 void __iomem *emc = IO_ADDRESS(TEGRA_EMC_BASE);
681#endif
682
683 /* Clear DPD sample */
684 writel(0x0, pmc + PMC_DPD_SAMPLE);
685
686 writel(tegra_sctx.mc[0], mc + MC_SECURITY_START);
687 writel(tegra_sctx.mc[1], mc + MC_SECURITY_SIZE);
688 writel(tegra_sctx.mc[2], mc + MC_SECURITY_CFG2);
689#ifdef CONFIG_ARCH_TEGRA_2x_SOC
690 /* trigger emc mode write */
691 writel(EMC_MRW_DEV_NONE, emc + EMC_MRW_0);
692#endif
693 /* clear scratch registers shared by suspend and the reset pen */
694 writel(0x0, pmc + PMC_SCRATCH39);
695 writel(0x0, pmc + PMC_SCRATCH41);
696
697 /* restore IRAM */
698 memcpy(iram_code, iram_save, iram_save_size);
699}
700
701static int tegra_suspend_prepare_late(void)
702{
703#ifdef CONFIG_ARCH_TEGRA_2x_SOC
704 disable_irq(INT_SYS_STATS_MON);
705#endif
706 return 0;
707}
708
709static void tegra_suspend_wake(void)
710{
711#ifdef CONFIG_ARCH_TEGRA_2x_SOC
712 enable_irq(INT_SYS_STATS_MON);
713#endif
714}
715
716static void tegra_pm_set(enum tegra_suspend_mode mode)
717{
718 u32 reg, boot_flag;
719 unsigned long rate = 32768;
720
721 reg = readl(pmc + PMC_CTRL);
722 reg |= TEGRA_POWER_CPU_PWRREQ_OE;
723 if (pdata->combined_req)
724 reg &= ~TEGRA_POWER_PWRREQ_OE;
725 else
726 reg |= TEGRA_POWER_PWRREQ_OE;
727 reg &= ~TEGRA_POWER_EFFECT_LP0;
728
729 switch (mode) {
730 case TEGRA_SUSPEND_LP0:
731#ifdef CONFIG_ARCH_TEGRA_3x_SOC
732 rate = clk_get_rate_all_locked(tegra_pclk);
733#endif
734 if (pdata->combined_req) {
735 reg |= TEGRA_POWER_PWRREQ_OE;
736 reg &= ~TEGRA_POWER_CPU_PWRREQ_OE;
737 }
738
739 /*
740 * LP0 boots through the AVP, which then resumes the AVP to
741 * the address in scratch 39, and the cpu to the address in
742 * scratch 41 to tegra_resume
743 */
744 writel(0x0, pmc + PMC_SCRATCH39);
745
746 /* Enable DPD sample to trigger sampling pads data and direction
747 * in which pad will be driven during lp0 mode*/
748 writel(0x1, pmc + PMC_DPD_SAMPLE);
749
750 /* Set warmboot flag */
751 boot_flag = readl(pmc + PMC_SCRATCH0);
752 pmc_32kwritel(boot_flag | 1, PMC_SCRATCH0);
753
754 pmc_32kwritel(tegra_lp0_vec_start, PMC_SCRATCH1);
755
756 reg |= TEGRA_POWER_EFFECT_LP0;
757 /* No break here. LP0 code falls through to write SCRATCH41 */
758 case TEGRA_SUSPEND_LP1:
759 __raw_writel(virt_to_phys(tegra_resume), pmc + PMC_SCRATCH41);
760 wmb();
761 break;
762 case TEGRA_SUSPEND_LP2:
763 rate = clk_get_rate(tegra_pclk);
764 break;
765 case TEGRA_SUSPEND_NONE:
766 return;
767 default:
768 BUG();
769 }
770
771 set_power_timers(pdata->cpu_timer, pdata->cpu_off_timer, rate);
772
773 pmc_32kwritel(reg, PMC_CTRL);
774}
775
776static const char *lp_state[TEGRA_MAX_SUSPEND_MODE] = {
777 [TEGRA_SUSPEND_NONE] = "none",
778 [TEGRA_SUSPEND_LP2] = "LP2",
779 [TEGRA_SUSPEND_LP1] = "LP1",
780 [TEGRA_SUSPEND_LP0] = "LP0",
781};
782
783static int tegra_suspend_enter(suspend_state_t state)
784{
785 int ret;
786 ktime_t delta;
787 struct timespec ts_entry, ts_exit;
788
789 if (pdata && pdata->board_suspend)
790 pdata->board_suspend(current_suspend_mode, TEGRA_SUSPEND_BEFORE_PERIPHERAL);
791
792 read_persistent_clock(&ts_entry);
793
794 ret = tegra_suspend_dram(current_suspend_mode, 0);
795 if (ret) {
796 pr_info("Aborting suspend, tegra_suspend_dram error=%d\n", ret);
797 goto abort_suspend;
798 }
799
800 read_persistent_clock(&ts_exit);
801
802 if (timespec_compare(&ts_exit, &ts_entry) > 0) {
803 delta = timespec_to_ktime(timespec_sub(ts_exit, ts_entry));
804
805 tegra_dvfs_rail_pause(tegra_cpu_rail, delta, false);
806 if (current_suspend_mode == TEGRA_SUSPEND_LP0)
807 tegra_dvfs_rail_pause(tegra_core_rail, delta, false);
808 else
809 tegra_dvfs_rail_pause(tegra_core_rail, delta, true);
810 }
811
812abort_suspend:
813 if (pdata && pdata->board_resume)
814 pdata->board_resume(current_suspend_mode, TEGRA_RESUME_AFTER_PERIPHERAL);
815
816 return ret;
817}
818
819static void tegra_suspend_check_pwr_stats(void)
820{
821 /* cpus and l2 are powered off later */
822 unsigned long pwrgate_partid_mask =
823#if !defined(CONFIG_ARCH_TEGRA_2x_SOC)
824 (1 << TEGRA_POWERGATE_HEG) |
825 (1 << TEGRA_POWERGATE_SATA) |
826 (1 << TEGRA_POWERGATE_3D1) |
827#endif
828 (1 << TEGRA_POWERGATE_3D) |
829 (1 << TEGRA_POWERGATE_VENC) |
830 (1 << TEGRA_POWERGATE_PCIE) |
831 (1 << TEGRA_POWERGATE_VDEC) |
832 (1 << TEGRA_POWERGATE_MPE);
833
834 int partid;
835
836 for (partid = 0; partid < TEGRA_NUM_POWERGATE; partid++)
837 if ((1 << partid) & pwrgate_partid_mask)
838 if (tegra_powergate_is_powered(partid))
839 pr_warning("partition %s is left on before suspend\n",
840 tegra_powergate_get_name(partid));
841
842 return;
843}
844
845int tegra_suspend_dram(enum tegra_suspend_mode mode, unsigned int flags)
846{
847 int err = 0;
848
849 if (WARN_ON(mode <= TEGRA_SUSPEND_NONE ||
850 mode >= TEGRA_MAX_SUSPEND_MODE)) {
851 err = -ENXIO;
852 goto fail;
853 }
854
855 if ((mode == TEGRA_SUSPEND_LP0) && !tegra_pm_irq_lp0_allowed()) {
856 pr_info("LP0 not used due to unsupported wakeup events\n");
857 mode = TEGRA_SUSPEND_LP1;
858 }
859
860 if ((mode == TEGRA_SUSPEND_LP0) || (mode == TEGRA_SUSPEND_LP1))
861 tegra_suspend_check_pwr_stats();
862
863 tegra_common_suspend();
864
865 tegra_pm_set(mode);
866
867 if (pdata && pdata->board_suspend)
868 pdata->board_suspend(mode, TEGRA_SUSPEND_BEFORE_CPU);
869
870 local_fiq_disable();
871
872 cpu_pm_enter();
873 cpu_complex_pm_enter();
874
875 if (mode == TEGRA_SUSPEND_LP0) {
876#ifdef CONFIG_TEGRA_CLUSTER_CONTROL
877 u32 reg = readl(pmc + PMC_SCRATCH4);
878 if (is_lp_cluster())
879 reg |= PMC_SCRATCH4_WAKE_CLUSTER_MASK;
880 else
881 reg &= (~PMC_SCRATCH4_WAKE_CLUSTER_MASK);
882 pmc_32kwritel(reg, PMC_SCRATCH4);
883#endif
884 tegra_lp0_suspend_mc();
885 tegra_cpu_reset_handler_save();
886
887 }
888 else if (mode == TEGRA_SUSPEND_LP1)
889 *iram_cpu_lp1_mask = 1;
890
891 suspend_cpu_complex(flags);
892
893 flush_cache_all();
894 outer_flush_all();
895 outer_disable();
896
897 if (mode == TEGRA_SUSPEND_LP2)
898 tegra_sleep_cpu(PLAT_PHYS_OFFSET - PAGE_OFFSET);
899 else
900 tegra_sleep_core(mode, PLAT_PHYS_OFFSET - PAGE_OFFSET);
901
902 tegra_init_cache(true);
903
904 if (mode == TEGRA_SUSPEND_LP0) {
905 tegra_cpu_reset_handler_restore();
906 tegra_lp0_resume_mc();
907 } else if (mode == TEGRA_SUSPEND_LP1)
908 *iram_cpu_lp1_mask = 0;
909
910 restore_cpu_complex(flags);
911
912 /* for platforms where the core & CPU power requests are
913 * combined as a single request to the PMU, transition out
914 * of LP0 state by temporarily enabling both requests
915 */
916 if (mode == TEGRA_SUSPEND_LP0 && pdata->combined_req) {
917 u32 reg;
918 reg = readl(pmc + PMC_CTRL);
919 reg |= TEGRA_POWER_CPU_PWRREQ_OE;
920 pmc_32kwritel(reg, PMC_CTRL);
921 reg &= ~TEGRA_POWER_PWRREQ_OE;
922 pmc_32kwritel(reg, PMC_CTRL);
923 }
924
925 cpu_complex_pm_exit();
926 cpu_pm_exit();
927
928 if (pdata && pdata->board_resume)
929 pdata->board_resume(mode, TEGRA_RESUME_AFTER_CPU);
930
931 local_fiq_enable();
932
933 tegra_common_resume();
934
935fail:
936 return err;
937}
938
939/*
940 * Function pointers to optional board specific function
941 */
942void (*tegra_deep_sleep)(int);
943EXPORT_SYMBOL(tegra_deep_sleep);
944
945static int tegra_suspend_prepare(void)
946{
947 if ((current_suspend_mode == TEGRA_SUSPEND_LP0) && tegra_deep_sleep)
948 tegra_deep_sleep(1);
949 return 0;
950}
951
952static void tegra_suspend_finish(void)
953{
954 if (pdata && pdata->cpu_resume_boost) {
955 int ret = tegra_suspended_target(pdata->cpu_resume_boost);
956 pr_info("Tegra: resume CPU boost to %u KHz: %s (%d)\n",
957 pdata->cpu_resume_boost, ret ? "Failed" : "OK", ret);
958 }
959
960 if ((current_suspend_mode == TEGRA_SUSPEND_LP0) && tegra_deep_sleep)
961 tegra_deep_sleep(0);
962}
963
964static const struct platform_suspend_ops tegra_suspend_ops = {
965 .valid = suspend_valid_only_mem,
966 .prepare = tegra_suspend_prepare,
967 .finish = tegra_suspend_finish,
968 .prepare_late = tegra_suspend_prepare_late,
969 .wake = tegra_suspend_wake,
970 .enter = tegra_suspend_enter,
971};
972
973static ssize_t suspend_mode_show(struct kobject *kobj,
974 struct kobj_attribute *attr, char *buf)
975{
976 char *start = buf;
977 char *end = buf + PAGE_SIZE;
978
979 start += scnprintf(start, end - start, "%s ", \
980 tegra_suspend_name[current_suspend_mode]);
981 start += scnprintf(start, end - start, "\n");
982
983 return start - buf;
984}
985
986static ssize_t suspend_mode_store(struct kobject *kobj,
987 struct kobj_attribute *attr,
988 const char *buf, size_t n)
989{
990 int len;
991 const char *name_ptr;
992 enum tegra_suspend_mode new_mode;
993
994 name_ptr = buf;
995 while (*name_ptr && !isspace(*name_ptr))
996 name_ptr++;
997 len = name_ptr - buf;
998 if (!len)
999 goto bad_name;
1000 /* TEGRA_SUSPEND_NONE not allowed as suspend state */
1001 if (!(strncmp(buf, tegra_suspend_name[TEGRA_SUSPEND_NONE], len))
1002 || !(strncmp(buf, tegra_suspend_name[TEGRA_SUSPEND_LP2], len))) {
1003 pr_info("Illegal tegra suspend state: %s\n", buf);
1004 goto bad_name;
1005 }
1006
1007 for (new_mode = TEGRA_SUSPEND_NONE; \
1008 new_mode < TEGRA_MAX_SUSPEND_MODE; ++new_mode) {
1009 if (!strncmp(buf, tegra_suspend_name[new_mode], len)) {
1010 current_suspend_mode = new_mode;
1011 break;
1012 }
1013 }
1014
1015bad_name:
1016 return n;
1017}
1018
1019static struct kobj_attribute suspend_mode_attribute =
1020 __ATTR(mode, 0644, suspend_mode_show, suspend_mode_store);
1021
1022static struct kobject *suspend_kobj;
1023
1024static int tegra_pm_enter_suspend(void)
1025{
1026 pr_info("Entering suspend state %s\n", lp_state[current_suspend_mode]);
1027 if (current_suspend_mode == TEGRA_SUSPEND_LP0)
1028 tegra_lp0_cpu_mode(true);
1029 return 0;
1030}
1031
1032static void tegra_pm_enter_resume(void)
1033{
1034 if (current_suspend_mode == TEGRA_SUSPEND_LP0)
1035 tegra_lp0_cpu_mode(false);
1036 pr_info("Exited suspend state %s\n", lp_state[current_suspend_mode]);
1037}
1038
1039static struct syscore_ops tegra_pm_enter_syscore_ops = {
1040 .suspend = tegra_pm_enter_suspend,
1041 .resume = tegra_pm_enter_resume,
1042};
1043
1044static __init int tegra_pm_enter_syscore_init(void)
1045{
1046 register_syscore_ops(&tegra_pm_enter_syscore_ops);
1047 return 0;
1048}
1049subsys_initcall(tegra_pm_enter_syscore_init);
1050#endif
1051
1052void __init tegra_init_suspend(struct tegra_suspend_platform_data *plat)
1053{
1054 u32 reg;
1055 u32 mode;
1056
1057 tegra_cpu_rail = tegra_dvfs_get_rail_by_name("vdd_cpu");
1058 tegra_core_rail = tegra_dvfs_get_rail_by_name("vdd_core");
1059 pm_qos_add_request(&awake_cpu_freq_req, PM_QOS_CPU_FREQ_MIN,
1060 AWAKE_CPU_FREQ_MIN);
1061
1062 tegra_pclk = clk_get_sys(NULL, "pclk");
1063 BUG_ON(IS_ERR(tegra_pclk));
1064 pdata = plat;
1065 (void)reg;
1066 (void)mode;
1067
1068 if (plat->suspend_mode == TEGRA_SUSPEND_LP2)
1069 plat->suspend_mode = TEGRA_SUSPEND_LP0;
1070
1071#ifndef CONFIG_PM_SLEEP
1072 if (plat->suspend_mode != TEGRA_SUSPEND_NONE) {
1073 pr_warning("%s: Suspend requires CONFIG_PM_SLEEP -- "
1074 "disabling suspend\n", __func__);
1075 plat->suspend_mode = TEGRA_SUSPEND_NONE;
1076 }
1077#else
1078 if (create_suspend_pgtable() < 0) {
1079 pr_err("%s: PGD memory alloc failed -- LP0/LP1/LP2 unavailable\n",
1080 __func__);
1081 plat->suspend_mode = TEGRA_SUSPEND_NONE;
1082 goto fail;
1083 }
1084
1085 if (alloc_suspend_context() < 0) {
1086 pr_err("%s: CPU context alloc failed -- LP0/LP1/LP2 unavailable\n",
1087 __func__);
1088 plat->suspend_mode = TEGRA_SUSPEND_NONE;
1089 goto fail;
1090 }
1091
1092 if ((tegra_get_chipid() == TEGRA_CHIPID_TEGRA3) &&
1093 (tegra_get_revision() == TEGRA_REVISION_A01) &&
1094 (plat->suspend_mode == TEGRA_SUSPEND_LP0)) {
1095 /* Tegra 3 A01 supports only LP1 */
1096 pr_warning("%s: Suspend mode LP0 is not supported on A01 "
1097 "-- disabling LP0\n", __func__);
1098 plat->suspend_mode = TEGRA_SUSPEND_LP1;
1099 }
1100 if (plat->suspend_mode == TEGRA_SUSPEND_LP0 && tegra_lp0_vec_size &&
1101 tegra_lp0_vec_relocate) {
1102 unsigned char *reloc_lp0;
1103 unsigned long tmp;
1104 void __iomem *orig;
1105 reloc_lp0 = kmalloc(tegra_lp0_vec_size + L1_CACHE_BYTES - 1,
1106 GFP_KERNEL);
1107 WARN_ON(!reloc_lp0);
1108 if (!reloc_lp0) {
1109 pr_err("%s: Failed to allocate reloc_lp0\n",
1110 __func__);
1111 goto out;
1112 }
1113
1114 orig = ioremap(tegra_lp0_vec_start, tegra_lp0_vec_size);
1115 WARN_ON(!orig);
1116 if (!orig) {
1117 pr_err("%s: Failed to map tegra_lp0_vec_start %08lx\n",
1118 __func__, tegra_lp0_vec_start);
1119 kfree(reloc_lp0);
1120 goto out;
1121 }
1122
1123 tmp = (unsigned long) reloc_lp0;
1124 tmp = (tmp + L1_CACHE_BYTES - 1) & ~(L1_CACHE_BYTES - 1);
1125 reloc_lp0 = (unsigned char *)tmp;
1126 memcpy(reloc_lp0, orig, tegra_lp0_vec_size);
1127 iounmap(orig);
1128 tegra_lp0_vec_start = virt_to_phys(reloc_lp0);
1129 }
1130
1131out:
1132 if (plat->suspend_mode == TEGRA_SUSPEND_LP0 && !tegra_lp0_vec_size) {
1133 pr_warning("%s: Suspend mode LP0 requested, no lp0_vec "
1134 "provided by bootlader -- disabling LP0\n",
1135 __func__);
1136 plat->suspend_mode = TEGRA_SUSPEND_LP1;
1137 }
1138
1139 iram_save_size = tegra_iram_end() - tegra_iram_start();
1140
1141 iram_save = kmalloc(iram_save_size, GFP_KERNEL);
1142 if (!iram_save && (plat->suspend_mode >= TEGRA_SUSPEND_LP1)) {
1143 pr_err("%s: unable to allocate memory for SDRAM self-refresh "
1144 "-- LP0/LP1 unavailable\n", __func__);
1145 plat->suspend_mode = TEGRA_SUSPEND_LP2;
1146 }
1147
1148 /* !!!FIXME!!! THIS IS TEGRA2 ONLY */
1149 /* Initialize scratch registers used for CPU LP2 synchronization */
1150 writel(0, pmc + PMC_SCRATCH37);
1151 writel(0, pmc + PMC_SCRATCH38);
1152 writel(0, pmc + PMC_SCRATCH39);
1153 writel(0, pmc + PMC_SCRATCH41);
1154
1155 /* Always enable CPU power request; just normal polarity is supported */
1156 reg = readl(pmc + PMC_CTRL);
1157 BUG_ON(reg & TEGRA_POWER_CPU_PWRREQ_POLARITY);
1158 reg |= TEGRA_POWER_CPU_PWRREQ_OE;
1159 pmc_32kwritel(reg, PMC_CTRL);
1160
1161 /* Configure core power request and system clock control if LP0
1162 is supported */
1163 __raw_writel(pdata->core_timer, pmc + PMC_COREPWRGOOD_TIMER);
1164 __raw_writel(pdata->core_off_timer, pmc + PMC_COREPWROFF_TIMER);
1165
1166 reg = readl(pmc + PMC_CTRL);
1167
1168 if (!pdata->sysclkreq_high)
1169 reg |= TEGRA_POWER_SYSCLK_POLARITY;
1170 else
1171 reg &= ~TEGRA_POWER_SYSCLK_POLARITY;
1172
1173 if (!pdata->corereq_high)
1174 reg |= TEGRA_POWER_PWRREQ_POLARITY;
1175 else
1176 reg &= ~TEGRA_POWER_PWRREQ_POLARITY;
1177
1178 /* configure output inverters while the request is tristated */
1179 pmc_32kwritel(reg, PMC_CTRL);
1180
1181 /* now enable requests */
1182 reg |= TEGRA_POWER_SYSCLK_OE;
1183 if (!pdata->combined_req)
1184 reg |= TEGRA_POWER_PWRREQ_OE;
1185 pmc_32kwritel(reg, PMC_CTRL);
1186
1187 if (pdata->suspend_mode == TEGRA_SUSPEND_LP0)
1188 tegra_lp0_suspend_init();
1189
1190 suspend_set_ops(&tegra_suspend_ops);
1191
1192 /* Create /sys/power/suspend/type */
1193 suspend_kobj = kobject_create_and_add("suspend", power_kobj);
1194 if (suspend_kobj) {
1195 if (sysfs_create_file(suspend_kobj, \
1196 &suspend_mode_attribute.attr))
1197 pr_err("%s: sysfs_create_file suspend type failed!\n",
1198 __func__);
1199 }
1200
1201 iram_cpu_lp2_mask = tegra_cpu_lp2_mask;
1202 iram_cpu_lp1_mask = tegra_cpu_lp1_mask;
1203fail:
1204#endif
1205 if (plat->suspend_mode == TEGRA_SUSPEND_NONE)
1206 tegra_lp2_in_idle(false);
1207
1208 current_suspend_mode = plat->suspend_mode;
1209}
1210
1211unsigned long debug_uart_port_base = 0;
1212EXPORT_SYMBOL(debug_uart_port_base);
1213
1214static int tegra_debug_uart_suspend(void)
1215{
1216 void __iomem *uart;
1217 u32 lcr;
1218
1219 if (!debug_uart_port_base)
1220 return 0;
1221
1222 uart = IO_ADDRESS(debug_uart_port_base);
1223
1224 lcr = readb(uart + UART_LCR * 4);
1225
1226 tegra_sctx.uart[0] = lcr;
1227 tegra_sctx.uart[1] = readb(uart + UART_MCR * 4);
1228
1229 /* DLAB = 0 */
1230 writeb(lcr & ~UART_LCR_DLAB, uart + UART_LCR * 4);
1231
1232 tegra_sctx.uart[2] = readb(uart + UART_IER * 4);
1233
1234 /* DLAB = 1 */
1235 writeb(lcr | UART_LCR_DLAB, uart + UART_LCR * 4);
1236
1237 tegra_sctx.uart[3] = readb(uart + UART_DLL * 4);
1238 tegra_sctx.uart[4] = readb(uart + UART_DLM * 4);
1239
1240 writeb(lcr, uart + UART_LCR * 4);
1241
1242 return 0;
1243}
1244
1245static void tegra_debug_uart_resume(void)
1246{
1247 void __iomem *uart;
1248 u32 lcr;
1249
1250 if (!debug_uart_port_base)
1251 return;
1252
1253 uart = IO_ADDRESS(debug_uart_port_base);
1254
1255 lcr = tegra_sctx.uart[0];
1256
1257 writeb(tegra_sctx.uart[1], uart + UART_MCR * 4);
1258
1259 /* DLAB = 0 */
1260 writeb(lcr & ~UART_LCR_DLAB, uart + UART_LCR * 4);
1261
1262 writeb(UART_FCR_ENABLE_FIFO | UART_FCR_T_TRIG_01 | UART_FCR_R_TRIG_01,
1263 uart + UART_FCR * 4);
1264
1265 writeb(tegra_sctx.uart[2], uart + UART_IER * 4);
1266
1267 /* DLAB = 1 */
1268 writeb(lcr | UART_LCR_DLAB, uart + UART_LCR * 4);
1269
1270 writeb(tegra_sctx.uart[3], uart + UART_DLL * 4);
1271 writeb(tegra_sctx.uart[4], uart + UART_DLM * 4);
1272
1273 writeb(lcr, uart + UART_LCR * 4);
1274}
1275
1276static struct syscore_ops tegra_debug_uart_syscore_ops = {
1277 .suspend = tegra_debug_uart_suspend,
1278 .resume = tegra_debug_uart_resume,
1279};
1280
1281struct clk *debug_uart_clk = NULL;
1282EXPORT_SYMBOL(debug_uart_clk);
1283
1284void tegra_console_uart_suspend(void)
1285{
1286 if (console_suspend_enabled && debug_uart_clk)
1287 clk_disable(debug_uart_clk);
1288}
1289
1290void tegra_console_uart_resume(void)
1291{
1292 if (console_suspend_enabled && debug_uart_clk)
1293 clk_enable(debug_uart_clk);
1294}
1295
1296static int tegra_debug_uart_syscore_init(void)
1297{
1298 register_syscore_ops(&tegra_debug_uart_syscore_ops);
1299 return 0;
1300}
1301arch_initcall(tegra_debug_uart_syscore_init);
1302
1303#ifdef CONFIG_HAS_EARLYSUSPEND
1304static void pm_early_suspend(struct early_suspend *h)
1305{
1306 pm_qos_update_request(&awake_cpu_freq_req, PM_QOS_DEFAULT_VALUE);
1307}
1308
1309static void pm_late_resume(struct early_suspend *h)
1310{
1311 pm_qos_update_request(&awake_cpu_freq_req, (s32)AWAKE_CPU_FREQ_MIN);
1312}
1313
1314static struct early_suspend pm_early_suspender = {
1315 .suspend = pm_early_suspend,
1316 .resume = pm_late_resume,
1317};
1318
1319static int pm_init_wake_behavior(void)
1320{
1321 register_early_suspend(&pm_early_suspender);
1322 return 0;
1323}
1324
1325late_initcall(pm_init_wake_behavior);
1326#endif