diff options
author | Jonathan Herman <hermanjl@cs.unc.edu> | 2013-01-22 10:38:37 -0500 |
---|---|---|
committer | Jonathan Herman <hermanjl@cs.unc.edu> | 2013-01-22 10:38:37 -0500 |
commit | fcc9d2e5a6c89d22b8b773a64fb4ad21ac318446 (patch) | |
tree | a57612d1888735a2ec7972891b68c1ac5ec8faea /arch/arm/mach-tegra/pm-t3.c | |
parent | 8dea78da5cee153b8af9c07a2745f6c55057fe12 (diff) |
Diffstat (limited to 'arch/arm/mach-tegra/pm-t3.c')
-rw-r--r-- | arch/arm/mach-tegra/pm-t3.c | 529 |
1 files changed, 529 insertions, 0 deletions
diff --git a/arch/arm/mach-tegra/pm-t3.c b/arch/arm/mach-tegra/pm-t3.c new file mode 100644 index 00000000000..a8317422449 --- /dev/null +++ b/arch/arm/mach-tegra/pm-t3.c | |||
@@ -0,0 +1,529 @@ | |||
1 | /* | ||
2 | * arch/arm/mach-tegra/pm-t3.c | ||
3 | * | ||
4 | * Tegra3 SOC-specific power and cluster management | ||
5 | * | ||
6 | * Copyright (c) 2009-2012, NVIDIA Corporation. | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License as published by | ||
10 | * the Free Software Foundation; either version 2 of the License, or | ||
11 | * (at your option) any later version. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
14 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
15 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
16 | * more details. | ||
17 | */ | ||
18 | |||
19 | #include <linux/kernel.h> | ||
20 | #include <linux/init.h> | ||
21 | #include <linux/io.h> | ||
22 | #include <linux/smp.h> | ||
23 | #include <linux/interrupt.h> | ||
24 | #include <linux/clk.h> | ||
25 | #include <linux/delay.h> | ||
26 | #include <linux/irq.h> | ||
27 | #include <linux/device.h> | ||
28 | #include <linux/module.h> | ||
29 | #include <linux/clockchips.h> | ||
30 | |||
31 | #include <mach/gpio.h> | ||
32 | #include <mach/iomap.h> | ||
33 | #include <mach/irqs.h> | ||
34 | |||
35 | #include <asm/cpu_pm.h> | ||
36 | #include <asm/hardware/gic.h> | ||
37 | |||
38 | #include <trace/events/power.h> | ||
39 | |||
40 | #include "clock.h" | ||
41 | #include "cpuidle.h" | ||
42 | #include "pm.h" | ||
43 | #include "sleep.h" | ||
44 | #include "tegra3_emc.h" | ||
45 | #include "dvfs.h" | ||
46 | |||
47 | #ifdef CONFIG_TEGRA_CLUSTER_CONTROL | ||
48 | #define CAR_CCLK_BURST_POLICY \ | ||
49 | (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x20) | ||
50 | |||
51 | #define CAR_SUPER_CCLK_DIVIDER \ | ||
52 | (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x24) | ||
53 | |||
54 | #define CAR_CCLKG_BURST_POLICY \ | ||
55 | (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x368) | ||
56 | |||
57 | #define CAR_SUPER_CCLKG_DIVIDER \ | ||
58 | (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x36C) | ||
59 | |||
60 | #define CAR_CCLKLP_BURST_POLICY \ | ||
61 | (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x370) | ||
62 | #define PLLX_DIV2_BYPASS_LP (1<<16) | ||
63 | |||
64 | #define CAR_SUPER_CCLKLP_DIVIDER \ | ||
65 | (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x374) | ||
66 | |||
67 | #define CAR_BOND_OUT_V \ | ||
68 | (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x390) | ||
69 | #define CAR_BOND_OUT_V_CPU_G (1<<0) | ||
70 | #define CAR_BOND_OUT_V_CPU_LP (1<<1) | ||
71 | |||
72 | #define CAR_CLK_ENB_V_SET \ | ||
73 | (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x440) | ||
74 | #define CAR_CLK_ENB_V_CPU_G (1<<0) | ||
75 | #define CAR_CLK_ENB_V_CPU_LP (1<<1) | ||
76 | |||
77 | #define CAR_RST_CPUG_CMPLX_SET \ | ||
78 | (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x450) | ||
79 | |||
80 | #define CAR_RST_CPUG_CMPLX_CLR \ | ||
81 | (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x454) | ||
82 | |||
83 | #define CAR_RST_CPULP_CMPLX_SET \ | ||
84 | (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x458) | ||
85 | |||
86 | #define CAR_RST_CPULP_CMPLX_CLR \ | ||
87 | (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x45C) | ||
88 | |||
89 | #define CAR_CLK_CPUG_CMPLX_SET \ | ||
90 | (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x460) | ||
91 | |||
92 | #define CAR_CLK_CPUG_CMPLX_CLR \ | ||
93 | (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x464) | ||
94 | |||
95 | #define CAR_CLK_CPULP_CMPLX_SET \ | ||
96 | (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x468) | ||
97 | |||
98 | #define CAR_CLK_CPULP_CMPLX_CLR \ | ||
99 | (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x46C) | ||
100 | |||
101 | #define CPU_CLOCK(cpu) (0x1<<(8+cpu)) | ||
102 | #define CPU_RESET(cpu) (0x1111ul<<(cpu)) | ||
103 | |||
104 | static int cluster_switch_prolog_clock(unsigned int flags) | ||
105 | { | ||
106 | u32 reg; | ||
107 | u32 CclkBurstPolicy; | ||
108 | u32 SuperCclkDivier; | ||
109 | |||
110 | /* Read the bond out register containing the G and LP CPUs. */ | ||
111 | reg = readl(CAR_BOND_OUT_V); | ||
112 | |||
113 | /* Sync G-PLLX divider bypass with LP (no effect on G, just to prevent | ||
114 | LP settings overwrite by save/restore code */ | ||
115 | CclkBurstPolicy = ~PLLX_DIV2_BYPASS_LP & readl(CAR_CCLKG_BURST_POLICY); | ||
116 | CclkBurstPolicy |= PLLX_DIV2_BYPASS_LP & readl(CAR_CCLKLP_BURST_POLICY); | ||
117 | writel(CclkBurstPolicy, CAR_CCLKG_BURST_POLICY); | ||
118 | |||
119 | /* Switching to G? */ | ||
120 | if (flags & TEGRA_POWER_CLUSTER_G) { | ||
121 | /* Do the G CPUs exist? */ | ||
122 | if (reg & CAR_BOND_OUT_V_CPU_G) | ||
123 | return -ENXIO; | ||
124 | |||
125 | /* Keep G CPU clock policy set by upper laayer, with the | ||
126 | exception of the transition via LP1 */ | ||
127 | if (flags & TEGRA_POWER_SDRAM_SELFREFRESH) { | ||
128 | /* In LP1 power mode come up on CLKM (oscillator) */ | ||
129 | CclkBurstPolicy = readl(CAR_CCLKG_BURST_POLICY); | ||
130 | CclkBurstPolicy &= ~0xF; | ||
131 | SuperCclkDivier = 0; | ||
132 | |||
133 | writel(CclkBurstPolicy, CAR_CCLKG_BURST_POLICY); | ||
134 | writel(SuperCclkDivier, CAR_SUPER_CCLKG_DIVIDER); | ||
135 | } | ||
136 | |||
137 | /* Hold G CPUs 1-3 in reset after the switch */ | ||
138 | reg = CPU_RESET(1) | CPU_RESET(2) | CPU_RESET(3); | ||
139 | writel(reg, CAR_RST_CPUG_CMPLX_SET); | ||
140 | |||
141 | /* Take G CPU 0 out of reset after the switch */ | ||
142 | reg = CPU_RESET(0); | ||
143 | writel(reg, CAR_RST_CPUG_CMPLX_CLR); | ||
144 | |||
145 | /* Disable the clocks on G CPUs 1-3 after the switch */ | ||
146 | reg = CPU_CLOCK(1) | CPU_CLOCK(2) | CPU_CLOCK(3); | ||
147 | writel(reg, CAR_CLK_CPUG_CMPLX_SET); | ||
148 | |||
149 | /* Enable the clock on G CPU 0 after the switch */ | ||
150 | reg = CPU_CLOCK(0); | ||
151 | writel(reg, CAR_CLK_CPUG_CMPLX_CLR); | ||
152 | |||
153 | /* Enable the G CPU complex clock after the switch */ | ||
154 | reg = CAR_CLK_ENB_V_CPU_G; | ||
155 | writel(reg, CAR_CLK_ENB_V_SET); | ||
156 | } | ||
157 | /* Switching to LP? */ | ||
158 | else if (flags & TEGRA_POWER_CLUSTER_LP) { | ||
159 | /* Does the LP CPU exist? */ | ||
160 | if (reg & CAR_BOND_OUT_V_CPU_LP) | ||
161 | return -ENXIO; | ||
162 | |||
163 | /* Keep LP CPU clock policy set by upper layer, with the | ||
164 | exception of the transition via LP1 */ | ||
165 | if (flags & TEGRA_POWER_SDRAM_SELFREFRESH) { | ||
166 | /* In LP1 power mode come up on CLKM (oscillator) */ | ||
167 | CclkBurstPolicy = readl(CAR_CCLKLP_BURST_POLICY); | ||
168 | CclkBurstPolicy &= ~0xF; | ||
169 | SuperCclkDivier = 0; | ||
170 | |||
171 | writel(CclkBurstPolicy, CAR_CCLKLP_BURST_POLICY); | ||
172 | writel(SuperCclkDivier, CAR_SUPER_CCLKLP_DIVIDER); | ||
173 | } | ||
174 | |||
175 | /* Take the LP CPU ut of reset after the switch */ | ||
176 | reg = CPU_RESET(0); | ||
177 | writel(reg, CAR_RST_CPULP_CMPLX_CLR); | ||
178 | |||
179 | /* Enable the clock on the LP CPU after the switch */ | ||
180 | reg = CPU_CLOCK(0); | ||
181 | writel(reg, CAR_CLK_CPULP_CMPLX_CLR); | ||
182 | |||
183 | /* Enable the LP CPU complex clock after the switch */ | ||
184 | reg = CAR_CLK_ENB_V_CPU_LP; | ||
185 | writel(reg, CAR_CLK_ENB_V_SET); | ||
186 | } | ||
187 | |||
188 | return 0; | ||
189 | } | ||
190 | |||
191 | void tegra_cluster_switch_prolog(unsigned int flags) | ||
192 | { | ||
193 | unsigned int target_cluster = flags & TEGRA_POWER_CLUSTER_MASK; | ||
194 | unsigned int current_cluster = is_lp_cluster() | ||
195 | ? TEGRA_POWER_CLUSTER_LP | ||
196 | : TEGRA_POWER_CLUSTER_G; | ||
197 | u32 reg; | ||
198 | |||
199 | /* Read the flow controler CSR register and clear the CPU switch | ||
200 | and immediate flags. If an actual CPU switch is to be performed, | ||
201 | re-write the CSR register with the desired values. */ | ||
202 | reg = readl(FLOW_CTRL_CPU_CSR(0)); | ||
203 | reg &= ~(FLOW_CTRL_CPU_CSR_IMMEDIATE_WAKE | | ||
204 | FLOW_CTRL_CPU_CSR_SWITCH_CLUSTER); | ||
205 | |||
206 | /* Program flow controller for immediate wake if requested */ | ||
207 | if (flags & TEGRA_POWER_CLUSTER_IMMEDIATE) | ||
208 | reg |= FLOW_CTRL_CPU_CSR_IMMEDIATE_WAKE; | ||
209 | |||
210 | /* Do nothing if no switch actions requested */ | ||
211 | if (!target_cluster) | ||
212 | goto done; | ||
213 | |||
214 | if ((current_cluster != target_cluster) || | ||
215 | (flags & TEGRA_POWER_CLUSTER_FORCE)) { | ||
216 | if (current_cluster != target_cluster) { | ||
217 | // Set up the clocks for the target CPU. | ||
218 | if (cluster_switch_prolog_clock(flags)) { | ||
219 | /* The target CPU does not exist */ | ||
220 | goto done; | ||
221 | } | ||
222 | |||
223 | /* Set up the flow controller to switch CPUs. */ | ||
224 | reg |= FLOW_CTRL_CPU_CSR_SWITCH_CLUSTER; | ||
225 | } | ||
226 | } | ||
227 | |||
228 | done: | ||
229 | writel(reg, FLOW_CTRL_CPU_CSR(0)); | ||
230 | } | ||
231 | |||
232 | |||
233 | static void cluster_switch_epilog_actlr(void) | ||
234 | { | ||
235 | u32 actlr; | ||
236 | |||
237 | /* TLB maintenance broadcast bit (FW) is stubbed out on LP CPU (reads | ||
238 | as zero, writes ignored). Hence, it is not preserved across G=>LP=>G | ||
239 | switch by CPU save/restore code, but SMP bit is restored correctly. | ||
240 | Synchronize these two bits here after LP=>G transition. Note that | ||
241 | only CPU0 core is powered on before and after the switch. See also | ||
242 | bug 807595. */ | ||
243 | |||
244 | __asm__("mrc p15, 0, %0, c1, c0, 1\n" : "=r" (actlr)); | ||
245 | |||
246 | if (actlr & (0x1 << 6)) { | ||
247 | actlr |= 0x1; | ||
248 | __asm__("mcr p15, 0, %0, c1, c0, 1\n" : : "r" (actlr)); | ||
249 | } | ||
250 | } | ||
251 | |||
252 | static void cluster_switch_epilog_gic(void) | ||
253 | { | ||
254 | unsigned int max_irq, i; | ||
255 | void __iomem *gic_base = IO_ADDRESS(TEGRA_ARM_INT_DIST_BASE); | ||
256 | |||
257 | /* Reprogram the interrupt affinity because the on the LP CPU, | ||
258 | the interrupt distributor affinity regsiters are stubbed out | ||
259 | by ARM (reads as zero, writes ignored). So when the LP CPU | ||
260 | context save code runs, the affinity registers will read | ||
261 | as all zero. This causes all interrupts to be effectively | ||
262 | disabled when back on the G CPU because they aren't routable | ||
263 | to any CPU. See bug 667720 for details. */ | ||
264 | |||
265 | max_irq = readl(gic_base + GIC_DIST_CTR) & 0x1f; | ||
266 | max_irq = (max_irq + 1) * 32; | ||
267 | |||
268 | for (i = 32; i < max_irq; i += 4) { | ||
269 | u32 val = 0x01010101; | ||
270 | #ifdef CONFIG_GIC_SET_MULTIPLE_CPUS | ||
271 | unsigned int irq; | ||
272 | for (irq = i; irq < (i + 4); irq++) { | ||
273 | struct cpumask mask; | ||
274 | struct irq_desc *desc = irq_to_desc(irq); | ||
275 | |||
276 | if (desc && desc->affinity_hint && | ||
277 | desc->irq_data.affinity) { | ||
278 | if (cpumask_and(&mask, desc->affinity_hint, | ||
279 | desc->irq_data.affinity)) | ||
280 | val |= (*cpumask_bits(&mask) & 0xff) << | ||
281 | ((irq & 3) * 8); | ||
282 | } | ||
283 | } | ||
284 | #endif | ||
285 | writel(val, gic_base + GIC_DIST_TARGET + i * 4 / 4); | ||
286 | } | ||
287 | } | ||
288 | |||
289 | void tegra_cluster_switch_epilog(unsigned int flags) | ||
290 | { | ||
291 | u32 reg; | ||
292 | |||
293 | /* Make sure the switch and immediate flags are cleared in | ||
294 | the flow controller to prevent undesirable side-effects | ||
295 | for future users of the flow controller. */ | ||
296 | reg = readl(FLOW_CTRL_CPU_CSR(0)); | ||
297 | reg &= ~(FLOW_CTRL_CPU_CSR_IMMEDIATE_WAKE | | ||
298 | FLOW_CTRL_CPU_CSR_SWITCH_CLUSTER); | ||
299 | writel(reg, FLOW_CTRL_CPU_CSR(0)); | ||
300 | |||
301 | /* Perform post-switch LP=>G clean-up */ | ||
302 | if (!is_lp_cluster()) { | ||
303 | cluster_switch_epilog_actlr(); | ||
304 | cluster_switch_epilog_gic(); | ||
305 | } | ||
306 | |||
307 | #if DEBUG_CLUSTER_SWITCH | ||
308 | { | ||
309 | /* FIXME: clock functions below are taking mutex */ | ||
310 | struct clk *c = tegra_get_clock_by_name( | ||
311 | is_lp_cluster() ? "cpu_lp" : "cpu_g"); | ||
312 | DEBUG_CLUSTER(("%s: %s freq %lu\r\n", __func__, | ||
313 | is_lp_cluster() ? "LP" : "G", clk_get_rate(c))); | ||
314 | } | ||
315 | #endif | ||
316 | } | ||
317 | |||
318 | int tegra_cluster_control(unsigned int us, unsigned int flags) | ||
319 | { | ||
320 | static ktime_t last_g2lp; | ||
321 | |||
322 | unsigned int target_cluster = flags & TEGRA_POWER_CLUSTER_MASK; | ||
323 | unsigned int current_cluster = is_lp_cluster() | ||
324 | ? TEGRA_POWER_CLUSTER_LP | ||
325 | : TEGRA_POWER_CLUSTER_G; | ||
326 | unsigned long irq_flags; | ||
327 | |||
328 | if ((target_cluster == TEGRA_POWER_CLUSTER_MASK) || !target_cluster) | ||
329 | return -EINVAL; | ||
330 | |||
331 | if (num_online_cpus() > 1) | ||
332 | return -EBUSY; | ||
333 | |||
334 | if ((current_cluster == target_cluster) | ||
335 | && !(flags & TEGRA_POWER_CLUSTER_FORCE)) | ||
336 | return -EEXIST; | ||
337 | |||
338 | if (target_cluster == TEGRA_POWER_CLUSTER_G) | ||
339 | if (!is_g_cluster_present()) | ||
340 | return -EPERM; | ||
341 | |||
342 | trace_power_start(POWER_PSTATE, target_cluster, 0); | ||
343 | |||
344 | if (flags & TEGRA_POWER_CLUSTER_IMMEDIATE) | ||
345 | us = 0; | ||
346 | |||
347 | DEBUG_CLUSTER(("%s(LP%d): %s->%s %s %s %d\r\n", __func__, | ||
348 | (flags & TEGRA_POWER_SDRAM_SELFREFRESH) ? 1 : 2, | ||
349 | is_lp_cluster() ? "LP" : "G", | ||
350 | (target_cluster == TEGRA_POWER_CLUSTER_G) ? "G" : "LP", | ||
351 | (flags & TEGRA_POWER_CLUSTER_IMMEDIATE) ? "immediate" : "", | ||
352 | (flags & TEGRA_POWER_CLUSTER_FORCE) ? "force" : "", | ||
353 | us)); | ||
354 | |||
355 | local_irq_save(irq_flags); | ||
356 | |||
357 | if (current_cluster != target_cluster && !timekeeping_suspended) { | ||
358 | ktime_t now = ktime_get(); | ||
359 | if (target_cluster == TEGRA_POWER_CLUSTER_G) { | ||
360 | s64 t = ktime_to_us(ktime_sub(now, last_g2lp)); | ||
361 | s64 t_off = tegra_cpu_power_off_time(); | ||
362 | if (t_off > t) | ||
363 | udelay((unsigned int)(t_off - t)); | ||
364 | |||
365 | tegra_dvfs_rail_on(tegra_cpu_rail, now); | ||
366 | |||
367 | } else { | ||
368 | last_g2lp = now; | ||
369 | tegra_dvfs_rail_off(tegra_cpu_rail, now); | ||
370 | } | ||
371 | } | ||
372 | |||
373 | if (flags & TEGRA_POWER_SDRAM_SELFREFRESH) { | ||
374 | if (us) | ||
375 | tegra_lp2_set_trigger(us); | ||
376 | |||
377 | tegra_cluster_switch_prolog(flags); | ||
378 | tegra_suspend_dram(TEGRA_SUSPEND_LP1, flags); | ||
379 | tegra_cluster_switch_epilog(flags); | ||
380 | |||
381 | if (us) | ||
382 | tegra_lp2_set_trigger(0); | ||
383 | } else { | ||
384 | int cpu = 0; | ||
385 | |||
386 | tegra_set_cpu_in_lp2(0); | ||
387 | cpu_pm_enter(); | ||
388 | if (!timekeeping_suspended) | ||
389 | clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, | ||
390 | &cpu); | ||
391 | tegra_idle_lp2_last(0, flags); | ||
392 | if (!timekeeping_suspended) | ||
393 | clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, | ||
394 | &cpu); | ||
395 | cpu_pm_exit(); | ||
396 | tegra_clear_cpu_in_lp2(0); | ||
397 | } | ||
398 | local_irq_restore(irq_flags); | ||
399 | |||
400 | DEBUG_CLUSTER(("%s: %s\r\n", __func__, is_lp_cluster() ? "LP" : "G")); | ||
401 | |||
402 | return 0; | ||
403 | } | ||
404 | #endif | ||
405 | |||
406 | #ifdef CONFIG_PM_SLEEP | ||
407 | |||
408 | void tegra_lp0_suspend_mc(void) | ||
409 | { | ||
410 | /* Since memory frequency after LP0 is restored to boot rate | ||
411 | mc timing is saved during init, not on entry to LP0. Keep | ||
412 | this hook just in case, anyway */ | ||
413 | } | ||
414 | |||
415 | void tegra_lp0_resume_mc(void) | ||
416 | { | ||
417 | tegra_mc_timing_restore(); | ||
418 | } | ||
419 | |||
420 | void tegra_lp0_cpu_mode(bool enter) | ||
421 | { | ||
422 | static bool entered_on_g = false; | ||
423 | unsigned int flags; | ||
424 | |||
425 | if (enter) | ||
426 | entered_on_g = !is_lp_cluster(); | ||
427 | |||
428 | if (entered_on_g) { | ||
429 | flags = enter ? TEGRA_POWER_CLUSTER_LP : TEGRA_POWER_CLUSTER_G; | ||
430 | flags |= TEGRA_POWER_CLUSTER_IMMEDIATE; | ||
431 | tegra_cluster_control(0, flags); | ||
432 | pr_info("Tegra: switched to %s cluster\n", enter ? "LP" : "G"); | ||
433 | } | ||
434 | } | ||
435 | #endif | ||
436 | |||
437 | #define IO_DPD_INFO(_name, _index, _bit) \ | ||
438 | { \ | ||
439 | .name = _name, \ | ||
440 | .io_dpd_reg_index = _index, \ | ||
441 | .io_dpd_bit = _bit, \ | ||
442 | } | ||
443 | |||
444 | /* PMC IO DPD register offsets */ | ||
445 | #define APBDEV_PMC_IO_DPD_REQ_0 0x1b8 | ||
446 | #define APBDEV_PMC_IO_DPD_STATUS_0 0x1bc | ||
447 | #define APBDEV_PMC_SEL_DPD_TIM_0 0x1c8 | ||
448 | #define APBDEV_DPD_ENABLE_LSB 30 | ||
449 | #define APBDEV_DPD2_ENABLE_LSB 5 | ||
450 | #define PMC_DPD_SAMPLE 0x20 | ||
451 | |||
452 | struct tegra_io_dpd tegra_list_io_dpd[] = { | ||
453 | /* Empty DPD list - sd dpd entries removed */ | ||
454 | }; | ||
455 | |||
456 | struct tegra_io_dpd *tegra_io_dpd_get(struct device *dev) | ||
457 | { | ||
458 | int i; | ||
459 | const char *name = dev ? dev_name(dev) : NULL; | ||
460 | if (name) { | ||
461 | for (i = 0; i < (sizeof(tegra_list_io_dpd) / | ||
462 | sizeof(struct tegra_io_dpd)); i++) { | ||
463 | if (!(strncmp(tegra_list_io_dpd[i].name, name, | ||
464 | strlen(name)))) { | ||
465 | return &tegra_list_io_dpd[i]; | ||
466 | } | ||
467 | } | ||
468 | } | ||
469 | dev_info(dev, "Error: tegra3 io dpd not supported for %s\n", | ||
470 | ((name) ? name : "NULL")); | ||
471 | return NULL; | ||
472 | } | ||
473 | EXPORT_SYMBOL(tegra_io_dpd_get); | ||
474 | |||
475 | static void __iomem *pmc = IO_ADDRESS(TEGRA_PMC_BASE); | ||
476 | static DEFINE_SPINLOCK(tegra_io_dpd_lock); | ||
477 | |||
478 | void tegra_io_dpd_enable(struct tegra_io_dpd *hnd) | ||
479 | { | ||
480 | unsigned int enable_mask; | ||
481 | unsigned int dpd_status; | ||
482 | unsigned int dpd_enable_lsb; | ||
483 | |||
484 | if ((!hnd)) | ||
485 | return; | ||
486 | spin_lock(&tegra_io_dpd_lock); | ||
487 | dpd_enable_lsb = (hnd->io_dpd_reg_index) ? APBDEV_DPD2_ENABLE_LSB : | ||
488 | APBDEV_DPD_ENABLE_LSB; | ||
489 | writel(0x1, pmc + PMC_DPD_SAMPLE); | ||
490 | writel(0x10, pmc + APBDEV_PMC_SEL_DPD_TIM_0); | ||
491 | enable_mask = ((1 << hnd->io_dpd_bit) | (2 << dpd_enable_lsb)); | ||
492 | writel(enable_mask, pmc + (APBDEV_PMC_IO_DPD_REQ_0 + | ||
493 | hnd->io_dpd_reg_index * 8)); | ||
494 | udelay(1); | ||
495 | dpd_status = readl(pmc + (APBDEV_PMC_IO_DPD_STATUS_0 + | ||
496 | hnd->io_dpd_reg_index * 8)); | ||
497 | if (!(dpd_status & (1 << hnd->io_dpd_bit))) | ||
498 | pr_info("Error: dpd%d enable failed, status=%#x\n", | ||
499 | (hnd->io_dpd_reg_index + 1), dpd_status); | ||
500 | /* Sample register must be reset before next sample operation */ | ||
501 | writel(0x0, pmc + PMC_DPD_SAMPLE); | ||
502 | spin_unlock(&tegra_io_dpd_lock); | ||
503 | return; | ||
504 | } | ||
505 | EXPORT_SYMBOL(tegra_io_dpd_enable); | ||
506 | |||
507 | void tegra_io_dpd_disable(struct tegra_io_dpd *hnd) | ||
508 | { | ||
509 | unsigned int enable_mask; | ||
510 | unsigned int dpd_status; | ||
511 | unsigned int dpd_enable_lsb; | ||
512 | |||
513 | if ((!hnd)) | ||
514 | return; | ||
515 | spin_lock(&tegra_io_dpd_lock); | ||
516 | dpd_enable_lsb = (hnd->io_dpd_reg_index) ? APBDEV_DPD2_ENABLE_LSB : | ||
517 | APBDEV_DPD_ENABLE_LSB; | ||
518 | enable_mask = ((1 << hnd->io_dpd_bit) | (1 << dpd_enable_lsb)); | ||
519 | writel(enable_mask, pmc + (APBDEV_PMC_IO_DPD_REQ_0 + | ||
520 | hnd->io_dpd_reg_index * 8)); | ||
521 | dpd_status = readl(pmc + (APBDEV_PMC_IO_DPD_STATUS_0 + | ||
522 | hnd->io_dpd_reg_index * 8)); | ||
523 | if (dpd_status & (1 << hnd->io_dpd_bit)) | ||
524 | pr_info("Error: dpd%d disable failed, status=%#x\n", | ||
525 | (hnd->io_dpd_reg_index + 1), dpd_status); | ||
526 | spin_unlock(&tegra_io_dpd_lock); | ||
527 | return; | ||
528 | } | ||
529 | EXPORT_SYMBOL(tegra_io_dpd_disable); | ||