aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mach-tegra/tegra3_emc.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mach-tegra/tegra3_emc.c')
-rw-r--r--arch/arm/mach-tegra/tegra3_emc.c1254
1 files changed, 1254 insertions, 0 deletions
diff --git a/arch/arm/mach-tegra/tegra3_emc.c b/arch/arm/mach-tegra/tegra3_emc.c
new file mode 100644
index 00000000000..0ceed669fa5
--- /dev/null
+++ b/arch/arm/mach-tegra/tegra3_emc.c
@@ -0,0 +1,1254 @@
1/*
2 * arch/arm/mach-tegra/tegra3_emc.c
3 *
4 * Copyright (C) 2011 NVIDIA Corporation
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along
17 * with this program; if not, write to the Free Software Foundation, Inc.,
18 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
19 *
20 */
21
22#include <linux/kernel.h>
23#include <linux/clk.h>
24#include <linux/err.h>
25#include <linux/io.h>
26#include <linux/module.h>
27#include <linux/delay.h>
28#include <linux/suspend.h>
29#include <linux/debugfs.h>
30#include <linux/seq_file.h>
31
32#include <asm/cputime.h>
33#include <asm/cacheflush.h>
34
35#include <mach/iomap.h>
36
37#include "clock.h"
38#include "dvfs.h"
39#include "tegra3_emc.h"
40
41#ifdef CONFIG_TEGRA_EMC_SCALING_ENABLE
42static bool emc_enable = true;
43#else
44static bool emc_enable;
45#endif
46module_param(emc_enable, bool, 0644);
47
48#define EMC_MIN_RATE_DDR3 50000000
49#define EMC_STATUS_UPDATE_TIMEOUT 100
50#define TEGRA_EMC_TABLE_MAX_SIZE 16
51
52enum {
53 DLL_CHANGE_NONE = 0,
54 DLL_CHANGE_ON,
55 DLL_CHANGE_OFF,
56};
57
58#define EMC_CLK_DIV_SHIFT 0
59#define EMC_CLK_DIV_MASK (0xFF << EMC_CLK_DIV_SHIFT)
60#define EMC_CLK_SOURCE_SHIFT 30
61#define EMC_CLK_SOURCE_MASK (0x3 << EMC_CLK_SOURCE_SHIFT)
62#define EMC_CLK_LOW_JITTER_ENABLE (0x1 << 29)
63#define EMC_CLK_MC_SAME_FREQ (0x1 << 16)
64
65#define BURST_REG_LIST \
66 DEFINE_REG(TEGRA_EMC_BASE, EMC_RC), \
67 DEFINE_REG(TEGRA_EMC_BASE, EMC_RFC), \
68 DEFINE_REG(TEGRA_EMC_BASE, EMC_RAS), \
69 DEFINE_REG(TEGRA_EMC_BASE, EMC_RP), \
70 DEFINE_REG(TEGRA_EMC_BASE, EMC_R2W), \
71 DEFINE_REG(TEGRA_EMC_BASE, EMC_W2R), \
72 DEFINE_REG(TEGRA_EMC_BASE, EMC_R2P), \
73 DEFINE_REG(TEGRA_EMC_BASE, EMC_W2P), \
74 DEFINE_REG(TEGRA_EMC_BASE, EMC_RD_RCD), \
75 DEFINE_REG(TEGRA_EMC_BASE, EMC_WR_RCD), \
76 DEFINE_REG(TEGRA_EMC_BASE, EMC_RRD), \
77 DEFINE_REG(TEGRA_EMC_BASE, EMC_REXT), \
78 DEFINE_REG(TEGRA_EMC_BASE, EMC_WEXT), \
79 DEFINE_REG(TEGRA_EMC_BASE, EMC_WDV), \
80 DEFINE_REG(TEGRA_EMC_BASE, EMC_QUSE), \
81 DEFINE_REG(TEGRA_EMC_BASE, EMC_QRST), \
82 DEFINE_REG(TEGRA_EMC_BASE, EMC_QSAFE), \
83 DEFINE_REG(TEGRA_EMC_BASE, EMC_RDV), \
84 DEFINE_REG(TEGRA_EMC_BASE, EMC_REFRESH), \
85 DEFINE_REG(TEGRA_EMC_BASE, EMC_BURST_REFRESH_NUM), \
86 DEFINE_REG(TEGRA_EMC_BASE, EMC_PRE_REFRESH_REQ_CNT), \
87 DEFINE_REG(TEGRA_EMC_BASE, EMC_PDEX2WR), \
88 DEFINE_REG(TEGRA_EMC_BASE, EMC_PDEX2RD), \
89 DEFINE_REG(TEGRA_EMC_BASE, EMC_PCHG2PDEN), \
90 DEFINE_REG(TEGRA_EMC_BASE, EMC_ACT2PDEN), \
91 DEFINE_REG(TEGRA_EMC_BASE, EMC_AR2PDEN), \
92 DEFINE_REG(TEGRA_EMC_BASE, EMC_RW2PDEN), \
93 DEFINE_REG(TEGRA_EMC_BASE, EMC_TXSR), \
94 DEFINE_REG(TEGRA_EMC_BASE, EMC_TXSRDLL), \
95 DEFINE_REG(TEGRA_EMC_BASE, EMC_TCKE), \
96 DEFINE_REG(TEGRA_EMC_BASE, EMC_TFAW), \
97 DEFINE_REG(TEGRA_EMC_BASE, EMC_TRPAB), \
98 DEFINE_REG(TEGRA_EMC_BASE, EMC_TCLKSTABLE), \
99 DEFINE_REG(TEGRA_EMC_BASE, EMC_TCLKSTOP), \
100 DEFINE_REG(TEGRA_EMC_BASE, EMC_TREFBW), \
101 DEFINE_REG(TEGRA_EMC_BASE, EMC_QUSE_EXTRA), \
102 DEFINE_REG(TEGRA_EMC_BASE, EMC_FBIO_CFG6), \
103 DEFINE_REG(TEGRA_EMC_BASE, EMC_ODT_WRITE), \
104 DEFINE_REG(TEGRA_EMC_BASE, EMC_ODT_READ), \
105 DEFINE_REG(TEGRA_EMC_BASE, EMC_FBIO_CFG5), \
106 DEFINE_REG(TEGRA_EMC_BASE, EMC_CFG_DIG_DLL), \
107 DEFINE_REG(TEGRA_EMC_BASE, EMC_CFG_DIG_DLL_PERIOD), \
108 DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_DQS0), \
109 DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_DQS1), \
110 DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_DQS2), \
111 DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_DQS3), \
112 DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_DQS4), \
113 DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_DQS5), \
114 DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_DQS6), \
115 DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_DQS7), \
116 DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_QUSE0), \
117 DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_QUSE1), \
118 DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_QUSE2), \
119 DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_QUSE3), \
120 DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_QUSE4), \
121 DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_QUSE5), \
122 DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_QUSE6), \
123 DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_QUSE7), \
124 DEFINE_REG(TEGRA_EMC_BASE, EMC_DLI_TRIM_TXDQS0), \
125 DEFINE_REG(TEGRA_EMC_BASE, EMC_DLI_TRIM_TXDQS1), \
126 DEFINE_REG(TEGRA_EMC_BASE, EMC_DLI_TRIM_TXDQS2), \
127 DEFINE_REG(TEGRA_EMC_BASE, EMC_DLI_TRIM_TXDQS3), \
128 DEFINE_REG(TEGRA_EMC_BASE, EMC_DLI_TRIM_TXDQS4), \
129 DEFINE_REG(TEGRA_EMC_BASE, EMC_DLI_TRIM_TXDQS5), \
130 DEFINE_REG(TEGRA_EMC_BASE, EMC_DLI_TRIM_TXDQS6), \
131 DEFINE_REG(TEGRA_EMC_BASE, EMC_DLI_TRIM_TXDQS7), \
132 DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_DQ0), \
133 DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_DQ1), \
134 DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_DQ2), \
135 DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_DQ3), \
136 DEFINE_REG(TEGRA_EMC_BASE, EMC_XM2CMDPADCTRL), \
137 DEFINE_REG(TEGRA_EMC_BASE, EMC_XM2DQSPADCTRL2), \
138 DEFINE_REG(TEGRA_EMC_BASE, EMC_XM2DQPADCTRL2), \
139 DEFINE_REG(0 , EMC_XM2CLKPADCTRL), \
140 DEFINE_REG(TEGRA_EMC_BASE, EMC_XM2COMPPADCTRL), \
141 DEFINE_REG(TEGRA_EMC_BASE, EMC_XM2VTTGENPADCTRL), \
142 DEFINE_REG(TEGRA_EMC_BASE, EMC_XM2VTTGENPADCTRL2), \
143 DEFINE_REG(TEGRA_EMC_BASE, EMC_XM2QUSEPADCTRL), \
144 DEFINE_REG(TEGRA_EMC_BASE, EMC_XM2DQSPADCTRL3), \
145 DEFINE_REG(TEGRA_EMC_BASE, EMC_CTT_TERM_CTRL), \
146 DEFINE_REG(TEGRA_EMC_BASE, EMC_ZCAL_INTERVAL), \
147 DEFINE_REG(TEGRA_EMC_BASE, EMC_ZCAL_WAIT_CNT), \
148 DEFINE_REG(TEGRA_EMC_BASE, EMC_MRS_WAIT_CNT), \
149 DEFINE_REG(TEGRA_EMC_BASE, EMC_AUTO_CAL_CONFIG), \
150 DEFINE_REG(TEGRA_EMC_BASE, EMC_CTT), \
151 DEFINE_REG(TEGRA_EMC_BASE, EMC_CTT_DURATION), \
152 DEFINE_REG(TEGRA_EMC_BASE, EMC_DYN_SELF_REF_CONTROL), \
153 \
154 DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_CFG), \
155 DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_OUTSTANDING_REQ), \
156 DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_RCD), \
157 DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_RP), \
158 DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_RC), \
159 DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_RAS), \
160 DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_FAW), \
161 DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_RRD), \
162 DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_RAP2PRE), \
163 DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_WAP2PRE), \
164 DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_R2R), \
165 DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_W2W), \
166 DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_R2W), \
167 DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_W2R), \
168 DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_DA_TURNS), \
169 DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_DA_COVERS), \
170 DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_MISC0), \
171 DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_RING1_THROTTLE), \
172 \
173 DEFINE_REG(TEGRA_EMC_BASE, EMC_FBIO_SPARE), \
174 DEFINE_REG(TEGRA_EMC_BASE, EMC_CFG_RSV),
175
176#define DEFINE_REG(base, reg) ((base) ? ((u32)IO_ADDRESS((base)) + (reg)) : 0)
177static const u32 burst_reg_addr[TEGRA_EMC_NUM_REGS] = {
178 BURST_REG_LIST
179};
180#undef DEFINE_REG
181
182#define DEFINE_REG(base, reg) reg##_INDEX
183enum {
184 BURST_REG_LIST
185};
186#undef DEFINE_REG
187
188static int emc_num_burst_regs;
189
190static struct clk_mux_sel tegra_emc_clk_sel[TEGRA_EMC_TABLE_MAX_SIZE];
191static struct tegra_emc_table start_timing;
192static const struct tegra_emc_table *emc_timing;
193static unsigned long dram_over_temp_state = DRAM_OVER_TEMP_NONE;
194
195static const u32 *dram_to_soc_bit_map;
196static const struct tegra_emc_table *tegra_emc_table;
197static int tegra_emc_table_size;
198
199static u32 dram_dev_num;
200static u32 emc_cfg_saved;
201static u32 dram_type = -1;
202
203static struct clk *emc;
204static struct clk *bridge;
205
206static struct {
207 cputime64_t time_at_clock[TEGRA_EMC_TABLE_MAX_SIZE];
208 int last_sel;
209 u64 last_update;
210 u64 clkchange_count;
211 spinlock_t spinlock;
212} emc_stats;
213
214static DEFINE_SPINLOCK(emc_access_lock);
215
216static void __iomem *emc_base = IO_ADDRESS(TEGRA_EMC_BASE);
217static void __iomem *mc_base = IO_ADDRESS(TEGRA_MC_BASE);
218static void __iomem *clk_base = IO_ADDRESS(TEGRA_CLK_RESET_BASE);
219
220static inline void emc_writel(u32 val, unsigned long addr)
221{
222 writel(val, (u32)emc_base + addr);
223 barrier();
224}
225static inline u32 emc_readl(unsigned long addr)
226{
227 return readl((u32)emc_base + addr);
228}
229static inline void mc_writel(u32 val, unsigned long addr)
230{
231 writel(val, (u32)mc_base + addr);
232 barrier();
233}
234static inline u32 mc_readl(unsigned long addr)
235{
236 return readl((u32)mc_base + addr);
237}
238
239static void emc_last_stats_update(int last_sel)
240{
241 unsigned long flags;
242 u64 cur_jiffies = get_jiffies_64();
243
244 spin_lock_irqsave(&emc_stats.spinlock, flags);
245
246 if (emc_stats.last_sel < TEGRA_EMC_TABLE_MAX_SIZE)
247 emc_stats.time_at_clock[emc_stats.last_sel] = cputime64_add(
248 emc_stats.time_at_clock[emc_stats.last_sel],
249 cputime64_sub(cur_jiffies, emc_stats.last_update));
250
251 emc_stats.last_update = cur_jiffies;
252
253 if (last_sel < TEGRA_EMC_TABLE_MAX_SIZE) {
254 emc_stats.clkchange_count++;
255 emc_stats.last_sel = last_sel;
256 }
257 spin_unlock_irqrestore(&emc_stats.spinlock, flags);
258}
259
260static int wait_for_update(u32 status_reg, u32 bit_mask, bool updated_state)
261{
262 int i;
263 for (i = 0; i < EMC_STATUS_UPDATE_TIMEOUT; i++) {
264 if (!!(emc_readl(status_reg) & bit_mask) == updated_state)
265 return 0;
266 udelay(1);
267 }
268 return -ETIMEDOUT;
269}
270
271static inline void emc_timing_update(void)
272{
273 int err;
274
275 emc_writel(0x1, EMC_TIMING_CONTROL);
276 err = wait_for_update(EMC_STATUS,
277 EMC_STATUS_TIMING_UPDATE_STALLED, false);
278 if (err) {
279 pr_err("%s: timing update error: %d", __func__, err);
280 BUG();
281 }
282}
283
284static inline void auto_cal_disable(void)
285{
286 int err;
287
288 emc_writel(0, EMC_AUTO_CAL_INTERVAL);
289 err = wait_for_update(EMC_AUTO_CAL_STATUS,
290 EMC_AUTO_CAL_STATUS_ACTIVE, false);
291 if (err) {
292 pr_err("%s: disable auto-cal error: %d", __func__, err);
293 BUG();
294 }
295}
296
297static inline void set_over_temp_timing(
298 const struct tegra_emc_table *next_timing, unsigned long state)
299{
300#define REFRESH_SPEEDUP(val) \
301 do { \
302 val = ((val) & 0xFFFF0000) | (((val) & 0xFFFF) >> 2); \
303 } while (0)
304
305 u32 ref = next_timing->burst_regs[EMC_REFRESH_INDEX];
306 u32 pre_ref = next_timing->burst_regs[EMC_PRE_REFRESH_REQ_CNT_INDEX];
307 u32 dsr_cntrl = next_timing->burst_regs[EMC_DYN_SELF_REF_CONTROL_INDEX];
308
309 switch (state) {
310 case DRAM_OVER_TEMP_NONE:
311 break;
312 case DRAM_OVER_TEMP_REFRESH:
313 REFRESH_SPEEDUP(ref);
314 REFRESH_SPEEDUP(pre_ref);
315 REFRESH_SPEEDUP(dsr_cntrl);
316 break;
317 default:
318 pr_err("%s: Failed to set dram over temp state %lu\n",
319 __func__, state);
320 BUG();
321 }
322
323 __raw_writel(ref, burst_reg_addr[EMC_REFRESH_INDEX]);
324 __raw_writel(pre_ref, burst_reg_addr[EMC_PRE_REFRESH_REQ_CNT_INDEX]);
325 __raw_writel(dsr_cntrl, burst_reg_addr[EMC_DYN_SELF_REF_CONTROL_INDEX]);
326}
327
328static inline void set_mc_arbiter_limits(void)
329{
330 u32 reg = mc_readl(MC_EMEM_ARB_OUTSTANDING_REQ);
331 u32 max_val = 0x50 << EMC_MRS_WAIT_CNT_SHORT_WAIT_SHIFT;
332
333 if (!(reg & MC_EMEM_ARB_OUTSTANDING_REQ_HOLDOFF_OVERRIDE) ||
334 ((reg & MC_EMEM_ARB_OUTSTANDING_REQ_MAX_MASK) > max_val)) {
335 reg = MC_EMEM_ARB_OUTSTANDING_REQ_LIMIT_ENABLE |
336 MC_EMEM_ARB_OUTSTANDING_REQ_HOLDOFF_OVERRIDE | max_val;
337 mc_writel(reg, MC_EMEM_ARB_OUTSTANDING_REQ);
338 mc_writel(0x1, MC_TIMING_CONTROL);
339 }
340}
341
342static inline void disable_early_ack(u32 mc_override)
343{
344 static u32 override_val;
345
346 override_val = mc_override & (~MC_EMEM_ARB_OVERRIDE_EACK_MASK);
347 mc_writel(override_val, MC_EMEM_ARB_OVERRIDE);
348 __cpuc_flush_dcache_area(&override_val, sizeof(override_val));
349 outer_clean_range(__pa(&override_val), __pa(&override_val + 1));
350 override_val |= mc_override & MC_EMEM_ARB_OVERRIDE_EACK_MASK;
351}
352
353static inline bool dqs_preset(const struct tegra_emc_table *next_timing,
354 const struct tegra_emc_table *last_timing)
355{
356 bool ret = false;
357
358#define DQS_SET(reg, bit) \
359 do { \
360 if ((next_timing->burst_regs[EMC_##reg##_INDEX] & \
361 EMC_##reg##_##bit##_ENABLE) && \
362 (!(last_timing->burst_regs[EMC_##reg##_INDEX] & \
363 EMC_##reg##_##bit##_ENABLE))) { \
364 emc_writel(last_timing->burst_regs[EMC_##reg##_INDEX] \
365 | EMC_##reg##_##bit##_ENABLE, EMC_##reg); \
366 ret = true; \
367 } \
368 } while (0)
369
370 DQS_SET(XM2DQSPADCTRL2, VREF);
371 DQS_SET(XM2DQSPADCTRL3, VREF);
372 DQS_SET(XM2QUSEPADCTRL, IVREF);
373
374 return ret;
375}
376
377static inline void overwrite_mrs_wait_cnt(
378 const struct tegra_emc_table *next_timing,
379 bool zcal_long)
380{
381 u32 reg;
382 u32 cnt = 512;
383
384 /* For ddr3 when DLL is re-started: overwrite EMC DFS table settings
385 for MRS_WAIT_LONG with maximum of MRS_WAIT_SHORT settings and
386 expected operation length. Reduce the latter by the overlapping
387 zq-calibration, if any */
388 if (zcal_long)
389 cnt -= dram_dev_num * 256;
390
391 reg = (next_timing->burst_regs[EMC_MRS_WAIT_CNT_INDEX] &
392 EMC_MRS_WAIT_CNT_SHORT_WAIT_MASK) >>
393 EMC_MRS_WAIT_CNT_SHORT_WAIT_SHIFT;
394 if (cnt < reg)
395 cnt = reg;
396
397 reg = (next_timing->burst_regs[EMC_MRS_WAIT_CNT_INDEX] &
398 (~EMC_MRS_WAIT_CNT_LONG_WAIT_MASK));
399 reg |= (cnt << EMC_MRS_WAIT_CNT_LONG_WAIT_SHIFT) &
400 EMC_MRS_WAIT_CNT_LONG_WAIT_MASK;
401
402 emc_writel(reg, EMC_MRS_WAIT_CNT);
403}
404
405static inline bool need_qrst(const struct tegra_emc_table *next_timing,
406 const struct tegra_emc_table *last_timing,
407 u32 emc_dpd_reg)
408{
409 u32 last_mode = (last_timing->burst_regs[EMC_FBIO_CFG5_INDEX] &
410 EMC_CFG5_QUSE_MODE_MASK) >> EMC_CFG5_QUSE_MODE_SHIFT;
411 u32 next_mode = (next_timing->burst_regs[EMC_FBIO_CFG5_INDEX] &
412 EMC_CFG5_QUSE_MODE_MASK) >> EMC_CFG5_QUSE_MODE_SHIFT;
413
414 /* QUSE DPD is disabled */
415 bool ret = !(emc_dpd_reg & EMC_SEL_DPD_CTRL_QUSE_DPD_ENABLE) &&
416
417 /* QUSE uses external mode before or after clock change */
418 (((last_mode != EMC_CFG5_QUSE_MODE_PULSE_INTERN) &&
419 (last_mode != EMC_CFG5_QUSE_MODE_INTERNAL_LPBK)) ||
420 ((next_mode != EMC_CFG5_QUSE_MODE_PULSE_INTERN) &&
421 (next_mode != EMC_CFG5_QUSE_MODE_INTERNAL_LPBK))) &&
422
423 /* QUSE pad switches from schmitt to vref mode */
424 (((last_timing->burst_regs[EMC_XM2QUSEPADCTRL_INDEX] &
425 EMC_XM2QUSEPADCTRL_IVREF_ENABLE) == 0) &&
426 ((next_timing->burst_regs[EMC_XM2QUSEPADCTRL_INDEX] &
427 EMC_XM2QUSEPADCTRL_IVREF_ENABLE) != 0));
428
429 return ret;
430}
431
432static inline void periodic_qrst_enable(u32 emc_cfg_reg, u32 emc_dbg_reg)
433{
434 /* enable write mux => enable periodic QRST => restore mux */
435 emc_writel(emc_dbg_reg | EMC_DBG_WRITE_MUX_ACTIVE, EMC_DBG);
436 emc_writel(emc_cfg_reg | EMC_CFG_PERIODIC_QRST, EMC_CFG);
437 emc_writel(emc_dbg_reg, EMC_DBG);
438}
439
440static inline int get_dll_change(const struct tegra_emc_table *next_timing,
441 const struct tegra_emc_table *last_timing)
442{
443 bool next_dll_enabled = !(next_timing->emc_mode_1 & 0x1);
444 bool last_dll_enabled = !(last_timing->emc_mode_1 & 0x1);
445
446 if (next_dll_enabled == last_dll_enabled)
447 return DLL_CHANGE_NONE;
448 else if (next_dll_enabled)
449 return DLL_CHANGE_ON;
450 else
451 return DLL_CHANGE_OFF;
452}
453
454static inline void set_dram_mode(const struct tegra_emc_table *next_timing,
455 const struct tegra_emc_table *last_timing,
456 int dll_change)
457{
458 if (dram_type == DRAM_TYPE_DDR3) {
459 /* first mode_1, then mode_2, then mode_reset*/
460 if (next_timing->emc_mode_1 != last_timing->emc_mode_1)
461 emc_writel(next_timing->emc_mode_1, EMC_EMRS);
462 if (next_timing->emc_mode_2 != last_timing->emc_mode_2)
463 emc_writel(next_timing->emc_mode_2, EMC_EMRS);
464
465 if ((next_timing->emc_mode_reset !=
466 last_timing->emc_mode_reset) ||
467 (dll_change == DLL_CHANGE_ON))
468 {
469 u32 reg = next_timing->emc_mode_reset &
470 (~EMC_MODE_SET_DLL_RESET);
471 if (dll_change == DLL_CHANGE_ON) {
472 reg |= EMC_MODE_SET_DLL_RESET;
473 reg |= EMC_MODE_SET_LONG_CNT;
474 }
475 emc_writel(reg, EMC_MRS);
476 }
477 } else {
478 /* first mode_2, then mode_1; mode_reset is not applicable */
479 if (next_timing->emc_mode_2 != last_timing->emc_mode_2)
480 emc_writel(next_timing->emc_mode_2, EMC_MRW);
481 if (next_timing->emc_mode_1 != last_timing->emc_mode_1)
482 emc_writel(next_timing->emc_mode_1, EMC_MRW);
483 }
484}
485
486static inline void do_clock_change(u32 clk_setting)
487{
488 int err;
489
490 mc_readl(MC_EMEM_ADR_CFG); /* completes prev writes */
491 writel(clk_setting, (u32)clk_base + emc->reg);
492
493 err = wait_for_update(EMC_INTSTATUS,
494 EMC_INTSTATUS_CLKCHANGE_COMPLETE, true);
495 if (err) {
496 pr_err("%s: clock change completion error: %d", __func__, err);
497 BUG();
498 }
499}
500
501static noinline void emc_set_clock(const struct tegra_emc_table *next_timing,
502 const struct tegra_emc_table *last_timing,
503 u32 clk_setting)
504{
505 int i, dll_change, pre_wait;
506 bool dyn_sref_enabled, vref_cal_toggle, qrst_used, zcal_long;
507
508 u32 mc_override = mc_readl(MC_EMEM_ARB_OVERRIDE);
509 u32 emc_cfg_reg = emc_readl(EMC_CFG);
510 u32 emc_dbg_reg = emc_readl(EMC_DBG);
511
512 dyn_sref_enabled = emc_cfg_reg & EMC_CFG_DYN_SREF_ENABLE;
513 dll_change = get_dll_change(next_timing, last_timing);
514 zcal_long = (next_timing->burst_regs[EMC_ZCAL_INTERVAL_INDEX] != 0) &&
515 (last_timing->burst_regs[EMC_ZCAL_INTERVAL_INDEX] == 0);
516
517 /* FIXME: remove steps enumeration below? */
518
519 /* 1. clear clkchange_complete interrupts */
520 emc_writel(EMC_INTSTATUS_CLKCHANGE_COMPLETE, EMC_INTSTATUS);
521
522 /* 2. disable dynamic self-refresh and preset dqs vref, then wait for
523 possible self-refresh entry/exit and/or dqs vref settled - waiting
524 before the clock change decreases worst case change stall time */
525 pre_wait = 0;
526 if (dyn_sref_enabled) {
527 emc_cfg_reg &= ~EMC_CFG_DYN_SREF_ENABLE;
528 emc_writel(emc_cfg_reg, EMC_CFG);
529 pre_wait = 5; /* 5us+ for self-refresh entry/exit */
530 }
531
532 /* 2.25 update MC arbiter settings */
533 set_mc_arbiter_limits();
534 if (mc_override & MC_EMEM_ARB_OVERRIDE_EACK_MASK)
535 disable_early_ack(mc_override);
536
537 /* 2.5 check dq/dqs vref delay */
538 if (dqs_preset(next_timing, last_timing)) {
539 if (pre_wait < 3)
540 pre_wait = 3; /* 3us+ for dqs vref settled */
541 }
542 if (pre_wait) {
543 emc_timing_update();
544 udelay(pre_wait);
545 }
546
547 /* 3. disable auto-cal if vref mode is switching */
548 vref_cal_toggle = (next_timing->emc_acal_interval != 0) &&
549 ((next_timing->burst_regs[EMC_XM2COMPPADCTRL_INDEX] ^
550 last_timing->burst_regs[EMC_XM2COMPPADCTRL_INDEX]) &
551 EMC_XM2COMPPADCTRL_VREF_CAL_ENABLE);
552 if (vref_cal_toggle)
553 auto_cal_disable();
554
555 /* 4. program burst shadow registers */
556 for (i = 0; i < emc_num_burst_regs; i++) {
557 if (!burst_reg_addr[i])
558 continue;
559 __raw_writel(next_timing->burst_regs[i], burst_reg_addr[i]);
560 }
561 if ((dram_type == DRAM_TYPE_LPDDR2) &&
562 (dram_over_temp_state != DRAM_OVER_TEMP_NONE))
563 set_over_temp_timing(next_timing, dram_over_temp_state);
564 wmb();
565 barrier();
566
567 /* On ddr3 when DLL is re-started predict MRS long wait count and
568 overwrite DFS table setting */
569 if ((dram_type == DRAM_TYPE_DDR3) && (dll_change == DLL_CHANGE_ON))
570 overwrite_mrs_wait_cnt(next_timing, zcal_long);
571
572 /* the last read below makes sure prev writes are completed */
573 qrst_used = need_qrst(next_timing, last_timing,
574 emc_readl(EMC_SEL_DPD_CTRL));
575
576 /* 5. flow control marker 1 (no EMC read access after this) */
577 emc_writel(1, EMC_STALL_BEFORE_CLKCHANGE);
578
579 /* 6. enable periodic QRST */
580 if (qrst_used)
581 periodic_qrst_enable(emc_cfg_reg, emc_dbg_reg);
582
583 /* 6.1 disable auto-refresh to save time after clock change */
584 emc_writel(EMC_REFCTRL_DISABLE_ALL(dram_dev_num), EMC_REFCTRL);
585
586 /* 7. turn Off dll and enter self-refresh on DDR3 */
587 if (dram_type == DRAM_TYPE_DDR3) {
588 if (dll_change == DLL_CHANGE_OFF)
589 emc_writel(next_timing->emc_mode_1, EMC_EMRS);
590 emc_writel(DRAM_BROADCAST(dram_dev_num) |
591 EMC_SELF_REF_CMD_ENABLED, EMC_SELF_REF);
592 }
593
594 /* 8. flow control marker 2 */
595 emc_writel(1, EMC_STALL_AFTER_CLKCHANGE);
596
597 /* 8.1 enable write mux, update unshadowed pad control */
598 emc_writel(emc_dbg_reg | EMC_DBG_WRITE_MUX_ACTIVE, EMC_DBG);
599 emc_writel(next_timing->burst_regs[EMC_XM2CLKPADCTRL_INDEX],
600 EMC_XM2CLKPADCTRL);
601
602 /* 9. restore periodic QRST, and disable write mux */
603 if ((qrst_used) || (next_timing->emc_periodic_qrst !=
604 last_timing->emc_periodic_qrst)) {
605 emc_cfg_reg = next_timing->emc_periodic_qrst ?
606 emc_cfg_reg | EMC_CFG_PERIODIC_QRST :
607 emc_cfg_reg & (~EMC_CFG_PERIODIC_QRST);
608 emc_writel(emc_cfg_reg, EMC_CFG);
609 }
610 emc_writel(emc_dbg_reg, EMC_DBG);
611
612 /* 10. exit self-refresh on DDR3 */
613 if (dram_type == DRAM_TYPE_DDR3)
614 emc_writel(DRAM_BROADCAST(dram_dev_num), EMC_SELF_REF);
615
616 /* 11. set dram mode registers */
617 set_dram_mode(next_timing, last_timing, dll_change);
618
619 /* 12. issue zcal command if turning zcal On */
620 if (zcal_long) {
621 emc_writel(EMC_ZQ_CAL_LONG_CMD_DEV0, EMC_ZQ_CAL);
622 if (dram_dev_num > 1)
623 emc_writel(EMC_ZQ_CAL_LONG_CMD_DEV1, EMC_ZQ_CAL);
624 }
625
626 /* 13. flow control marker 3 */
627 emc_writel(1, EMC_UNSTALL_RW_AFTER_CLKCHANGE);
628
629 /* 14. read any MC register to ensure the programming is done
630 change EMC clock source register (EMC read access restored)
631 wait for clk change completion */
632 do_clock_change(clk_setting);
633
634 /* 14.1 re-enable auto-refresh */
635 emc_writel(EMC_REFCTRL_ENABLE_ALL(dram_dev_num), EMC_REFCTRL);
636
637 /* 15. restore auto-cal */
638 if (vref_cal_toggle)
639 emc_writel(next_timing->emc_acal_interval,
640 EMC_AUTO_CAL_INTERVAL);
641
642 /* 16. restore dynamic self-refresh */
643 if (next_timing->rev >= 0x32)
644 dyn_sref_enabled = next_timing->emc_dsr;
645 if (dyn_sref_enabled) {
646 emc_cfg_reg |= EMC_CFG_DYN_SREF_ENABLE;
647 emc_writel(emc_cfg_reg, EMC_CFG);
648 }
649
650 /* 17. set zcal wait count */
651 if (zcal_long)
652 emc_writel(next_timing->emc_zcal_cnt_long, EMC_ZCAL_WAIT_CNT);
653
654 /* 18. update restored timing */
655 udelay(2);
656 emc_timing_update();
657
658 /* 18.a restore early ACK */
659 mc_writel(mc_override, MC_EMEM_ARB_OVERRIDE);
660}
661
662static inline void emc_get_timing(struct tegra_emc_table *timing)
663{
664 int i;
665
666 for (i = 0; i < emc_num_burst_regs; i++) {
667 if (burst_reg_addr[i])
668 timing->burst_regs[i] = __raw_readl(burst_reg_addr[i]);
669 else
670 timing->burst_regs[i] = 0;
671 }
672 timing->emc_acal_interval = 0;
673 timing->emc_zcal_cnt_long = 0;
674 timing->emc_mode_reset = 0;
675 timing->emc_mode_1 = 0;
676 timing->emc_mode_2 = 0;
677 timing->emc_periodic_qrst = (emc_readl(EMC_CFG) &
678 EMC_CFG_PERIODIC_QRST) ? 1 : 0;
679}
680
681/* After deep sleep EMC power features are not restored.
682 * Do it at run-time after the 1st clock change.
683 */
684static inline void emc_cfg_power_restore(void)
685{
686 u32 reg = emc_readl(EMC_CFG);
687 u32 pwr_mask = EMC_CFG_PWR_MASK;
688
689 if (tegra_emc_table[0].rev >= 0x32)
690 pwr_mask &= ~EMC_CFG_DYN_SREF_ENABLE;
691
692 if ((reg ^ emc_cfg_saved) & pwr_mask) {
693 reg = (reg & (~pwr_mask)) | (emc_cfg_saved & pwr_mask);
694 emc_writel(reg, EMC_CFG);
695 emc_timing_update();
696 }
697}
698
699/* The EMC registers have shadow registers. When the EMC clock is updated
700 * in the clock controller, the shadow registers are copied to the active
701 * registers, allowing glitchless memory bus frequency changes.
702 * This function updates the shadow registers for a new clock frequency,
703 * and relies on the clock lock on the emc clock to avoid races between
704 * multiple frequency changes */
705int tegra_emc_set_rate(unsigned long rate)
706{
707 int i;
708 u32 clk_setting;
709 const struct tegra_emc_table *last_timing;
710 unsigned long flags;
711
712 if (!tegra_emc_table)
713 return -EINVAL;
714
715 /* Table entries specify rate in kHz */
716 rate = rate / 1000;
717
718 for (i = 0; i < tegra_emc_table_size; i++) {
719 if (tegra_emc_clk_sel[i].input == NULL)
720 continue; /* invalid entry */
721
722 if (tegra_emc_table[i].rate == rate)
723 break;
724 }
725
726 if (i >= tegra_emc_table_size)
727 return -EINVAL;
728
729 if (!emc_timing) {
730 /* can not assume that boot timing matches dfs table even
731 if boot frequency matches one of the table nodes */
732 emc_get_timing(&start_timing);
733 last_timing = &start_timing;
734 }
735 else
736 last_timing = emc_timing;
737
738 clk_setting = tegra_emc_clk_sel[i].value;
739
740 spin_lock_irqsave(&emc_access_lock, flags);
741 emc_set_clock(&tegra_emc_table[i], last_timing, clk_setting);
742 if (!emc_timing)
743 emc_cfg_power_restore();
744 emc_timing = &tegra_emc_table[i];
745 spin_unlock_irqrestore(&emc_access_lock, flags);
746
747 emc_last_stats_update(i);
748
749 pr_debug("%s: rate %lu setting 0x%x\n", __func__, rate, clk_setting);
750
751 return 0;
752}
753
754/* Select the closest EMC rate that is higher than the requested rate */
755long tegra_emc_round_rate(unsigned long rate)
756{
757 int i;
758 int best = -1;
759 unsigned long distance = ULONG_MAX;
760
761 if (!tegra_emc_table)
762 return clk_get_rate_locked(emc); /* no table - no rate change */
763
764 if (!emc_enable)
765 return -EINVAL;
766
767 pr_debug("%s: %lu\n", __func__, rate);
768
769 /* Table entries specify rate in kHz */
770 rate = rate / 1000;
771
772 for (i = 0; i < tegra_emc_table_size; i++) {
773 if (tegra_emc_clk_sel[i].input == NULL)
774 continue; /* invalid entry */
775
776 if (tegra_emc_table[i].rate >= rate &&
777 (tegra_emc_table[i].rate - rate) < distance) {
778 distance = tegra_emc_table[i].rate - rate;
779 best = i;
780 }
781 }
782
783 if (best < 0)
784 return -EINVAL;
785
786 pr_debug("%s: using %lu\n", __func__, tegra_emc_table[best].rate);
787
788 return tegra_emc_table[best].rate * 1000;
789}
790
791struct clk *tegra_emc_predict_parent(unsigned long rate, u32 *div_value)
792{
793 int i;
794
795 if (!tegra_emc_table)
796 return NULL;
797
798 pr_debug("%s: %lu\n", __func__, rate);
799
800 /* Table entries specify rate in kHz */
801 rate = rate / 1000;
802
803 for (i = 0; i < tegra_emc_table_size; i++) {
804 if (tegra_emc_table[i].rate == rate) {
805 *div_value = (tegra_emc_clk_sel[i].value &
806 EMC_CLK_DIV_MASK) >> EMC_CLK_DIV_SHIFT;
807 return tegra_emc_clk_sel[i].input;
808 }
809 }
810
811 return NULL;
812}
813
814static const struct clk_mux_sel *find_matching_input(
815 unsigned long table_rate,
816 u32 *div_value)
817{
818 unsigned long inp_rate;
819 const struct clk_mux_sel *sel;
820
821 for (sel = emc->inputs; sel->input != NULL; sel++) {
822 /* Table entries specify rate in kHz */
823 inp_rate = clk_get_rate(sel->input) / 1000;
824
825 if ((inp_rate >= table_rate) &&
826 (inp_rate % table_rate == 0)) {
827 *div_value = 2 * inp_rate / table_rate - 2;
828 return sel;
829 }
830 }
831 return NULL;
832}
833
834static void adjust_emc_dvfs_table(const struct tegra_emc_table *table,
835 int table_size)
836{
837 int i, j;
838 unsigned long rate;
839
840 if (table[0].rev < 0x33)
841 return;
842
843 for (i = 0; i < MAX_DVFS_FREQS; i++) {
844 int mv = emc->dvfs->millivolts[i];
845 if (!mv)
846 break;
847
848 /* For each dvfs voltage find maximum supported rate;
849 use 1MHz placeholder if not found */
850 for (rate = 1000, j = 0; j < table_size; j++) {
851 if (tegra_emc_clk_sel[j].input == NULL)
852 continue; /* invalid entry */
853
854 if ((mv >= table[j].emc_min_mv) &&
855 (rate < table[j].rate))
856 rate = table[j].rate;
857 }
858 /* Table entries specify rate in kHz */
859 emc->dvfs->freqs[i] = rate * 1000;
860 }
861}
862
863static bool is_emc_bridge(void)
864{
865 int mv;
866 unsigned long rate;
867
868 bridge = tegra_get_clock_by_name("bridge.emc");
869 BUG_ON(!bridge);
870
871 /* LPDDR2 does not need a bridge entry in DFS table: just lock bridge
872 rate at minimum so it won't interfere with emc bus operations */
873 if (dram_type == DRAM_TYPE_LPDDR2) {
874 clk_set_rate(bridge, 0);
875 return true;
876 }
877
878 /* DDR3 requires EMC DFS table to include a bridge entry with frequency
879 above minimum bridge threshold, and voltage below bridge threshold */
880 rate = clk_round_rate(bridge, TEGRA_EMC_BRIDGE_RATE_MIN);
881 if (IS_ERR_VALUE(rate))
882 return false;
883
884 mv = tegra_dvfs_predict_millivolts(emc, rate);
885 if (IS_ERR_VALUE(mv) || (mv > TEGRA_EMC_BRIDGE_MVOLTS_MIN))
886 return false;
887
888 if (clk_set_rate(bridge, rate))
889 return false;
890
891 return true;
892}
893
894static int tegra_emc_suspend_notify(struct notifier_block *nb,
895 unsigned long event, void *data)
896{
897 if (event != PM_SUSPEND_PREPARE)
898 return NOTIFY_OK;
899
900 if (dram_type == DRAM_TYPE_DDR3) {
901 if (clk_enable(bridge)) {
902 pr_info("Tegra emc suspend:"
903 " failed to enable bridge.emc\n");
904 return NOTIFY_STOP;
905 }
906 pr_info("Tegra emc suspend: enabled bridge.emc\n");
907 }
908 return NOTIFY_OK;
909};
910static struct notifier_block tegra_emc_suspend_nb = {
911 .notifier_call = tegra_emc_suspend_notify,
912 .priority = 2,
913};
914
915static int tegra_emc_resume_notify(struct notifier_block *nb,
916 unsigned long event, void *data)
917{
918 if (event != PM_POST_SUSPEND)
919 return NOTIFY_OK;
920
921 if (dram_type == DRAM_TYPE_DDR3) {
922 clk_disable(bridge);
923 pr_info("Tegra emc resume: disabled bridge.emc\n");
924 }
925 return NOTIFY_OK;
926};
927static struct notifier_block tegra_emc_resume_nb = {
928 .notifier_call = tegra_emc_resume_notify,
929 .priority = -1,
930};
931
932void tegra_init_emc(const struct tegra_emc_table *table, int table_size)
933{
934 int i, mv;
935 u32 reg, div_value;
936 bool max_entry = false;
937 unsigned long boot_rate, max_rate;
938 const struct clk_mux_sel *sel;
939
940 emc_stats.clkchange_count = 0;
941 spin_lock_init(&emc_stats.spinlock);
942 emc_stats.last_update = get_jiffies_64();
943 emc_stats.last_sel = TEGRA_EMC_TABLE_MAX_SIZE;
944
945 boot_rate = clk_get_rate(emc) / 1000;
946 max_rate = clk_get_max_rate(emc) / 1000;
947
948 if ((dram_type != DRAM_TYPE_DDR3) && (dram_type != DRAM_TYPE_LPDDR2)) {
949 pr_err("tegra: not supported DRAM type %u\n", dram_type);
950 return;
951 }
952
953 if (emc->parent != tegra_get_clock_by_name("pll_m")) {
954 pr_err("tegra: boot parent %s is not supported by EMC DFS\n",
955 emc->parent->name);
956 return;
957 }
958
959 if (!table || !table_size) {
960 pr_err("tegra: EMC DFS table is empty\n");
961 return;
962 }
963
964 tegra_emc_table_size = min(table_size, TEGRA_EMC_TABLE_MAX_SIZE);
965 switch (table[0].rev) {
966 case 0x30:
967 emc_num_burst_regs = 105;
968 break;
969 case 0x31:
970 case 0x32:
971 case 0x33:
972 emc_num_burst_regs = 107;
973 break;
974 default:
975 pr_err("tegra: invalid EMC DFS table: unknown rev 0x%x\n",
976 table[0].rev);
977 return;
978 }
979
980 /* Match EMC source/divider settings with table entries */
981 for (i = 0; i < tegra_emc_table_size; i++) {
982 unsigned long table_rate = table[i].rate;
983 if (!table_rate)
984 continue;
985
986 BUG_ON(table[i].rev != table[0].rev);
987
988 sel = find_matching_input(table_rate, &div_value);
989 if (!sel)
990 continue;
991
992 if (table_rate == boot_rate)
993 emc_stats.last_sel = i;
994
995 if (table_rate == max_rate)
996 max_entry = true;
997
998 tegra_emc_clk_sel[i] = *sel;
999 BUG_ON(div_value >
1000 (EMC_CLK_DIV_MASK >> EMC_CLK_DIV_SHIFT));
1001 tegra_emc_clk_sel[i].value <<= EMC_CLK_SOURCE_SHIFT;
1002 tegra_emc_clk_sel[i].value |= (div_value << EMC_CLK_DIV_SHIFT);
1003
1004 if ((div_value == 0) &&
1005 (tegra_emc_clk_sel[i].input == emc->parent)) {
1006 tegra_emc_clk_sel[i].value |= EMC_CLK_LOW_JITTER_ENABLE;
1007 }
1008
1009 if (table[i].burst_regs[MC_EMEM_ARB_MISC0_INDEX] &
1010 MC_EMEM_ARB_MISC0_EMC_SAME_FREQ)
1011 tegra_emc_clk_sel[i].value |= EMC_CLK_MC_SAME_FREQ;
1012 }
1013
1014 /* Validate EMC rate and voltage limits */
1015 if (!max_entry) {
1016 pr_err("tegra: invalid EMC DFS table: entry for max rate"
1017 " %lu kHz is not found\n", max_rate);
1018 return;
1019 }
1020
1021 tegra_emc_table = table;
1022
1023 adjust_emc_dvfs_table(tegra_emc_table, tegra_emc_table_size);
1024 mv = tegra_dvfs_predict_millivolts(emc, max_rate * 1000);
1025 if ((mv <= 0) || (mv > emc->dvfs->max_millivolts)) {
1026 tegra_emc_table = NULL;
1027 pr_err("tegra: invalid EMC DFS table: maximum rate %lu kHz does"
1028 " not match nominal voltage %d\n",
1029 max_rate, emc->dvfs->max_millivolts);
1030 return;
1031 }
1032
1033 if (!is_emc_bridge()) {
1034 tegra_emc_table = NULL;
1035 pr_err("tegra: invalid EMC DFS table: emc bridge not found");
1036 return;
1037 }
1038 pr_info("tegra: validated EMC DFS table\n");
1039
1040 /* Configure clock change mode according to dram type */
1041 reg = emc_readl(EMC_CFG_2) & (~EMC_CFG_2_MODE_MASK);
1042 reg |= ((dram_type == DRAM_TYPE_LPDDR2) ? EMC_CFG_2_PD_MODE :
1043 EMC_CFG_2_SREF_MODE) << EMC_CFG_2_MODE_SHIFT;
1044 emc_writel(reg, EMC_CFG_2);
1045
1046 register_pm_notifier(&tegra_emc_suspend_nb);
1047 register_pm_notifier(&tegra_emc_resume_nb);
1048}
1049
1050void tegra_emc_timing_invalidate(void)
1051{
1052 emc_timing = NULL;
1053}
1054
1055void tegra_init_dram_bit_map(const u32 *bit_map, int map_size)
1056{
1057 BUG_ON(map_size != 32);
1058 dram_to_soc_bit_map = bit_map;
1059}
1060
1061void tegra_emc_dram_type_init(struct clk *c)
1062{
1063 emc = c;
1064
1065 dram_type = (emc_readl(EMC_FBIO_CFG5) &
1066 EMC_CFG5_TYPE_MASK) >> EMC_CFG5_TYPE_SHIFT;
1067 if (dram_type == DRAM_TYPE_DDR3)
1068 emc->min_rate = EMC_MIN_RATE_DDR3;
1069
1070 dram_dev_num = (mc_readl(MC_EMEM_ADR_CFG) & 0x1) + 1; /* 2 dev max */
1071 emc_cfg_saved = emc_readl(EMC_CFG);
1072}
1073
1074int tegra_emc_get_dram_type(void)
1075{
1076 return dram_type;
1077}
1078
1079static u32 soc_to_dram_bit_swap(u32 soc_val, u32 dram_mask, u32 dram_shift)
1080{
1081 int bit;
1082 u32 dram_val = 0;
1083
1084 /* tegra clocks definitions use shifted mask always */
1085 if (!dram_to_soc_bit_map)
1086 return soc_val & dram_mask;
1087
1088 for (bit = dram_shift; bit < 32; bit++) {
1089 u32 dram_bit_mask = 0x1 << bit;
1090 u32 soc_bit_mask = dram_to_soc_bit_map[bit];
1091
1092 if (!(dram_bit_mask & dram_mask))
1093 break;
1094
1095 if (soc_bit_mask & soc_val)
1096 dram_val |= dram_bit_mask;
1097 }
1098
1099 return dram_val;
1100}
1101
1102static int emc_read_mrr(int dev, int addr)
1103{
1104 int ret;
1105 u32 val;
1106
1107 if (dram_type != DRAM_TYPE_LPDDR2)
1108 return -ENODEV;
1109
1110 ret = wait_for_update(EMC_STATUS, EMC_STATUS_MRR_DIVLD, false);
1111 if (ret)
1112 return ret;
1113
1114 val = dev ? DRAM_DEV_SEL_1 : DRAM_DEV_SEL_0;
1115 val |= (addr << EMC_MRR_MA_SHIFT) & EMC_MRR_MA_MASK;
1116 emc_writel(val, EMC_MRR);
1117
1118 ret = wait_for_update(EMC_STATUS, EMC_STATUS_MRR_DIVLD, true);
1119 if (ret)
1120 return ret;
1121
1122 val = emc_readl(EMC_MRR) & EMC_MRR_DATA_MASK;
1123 return val;
1124}
1125
1126int tegra_emc_get_dram_temperature(void)
1127{
1128 int mr4;
1129 unsigned long flags;
1130
1131 spin_lock_irqsave(&emc_access_lock, flags);
1132
1133 mr4 = emc_read_mrr(0, 4);
1134 if (IS_ERR_VALUE(mr4)) {
1135 spin_unlock_irqrestore(&emc_access_lock, flags);
1136 return mr4;
1137 }
1138 spin_unlock_irqrestore(&emc_access_lock, flags);
1139
1140 mr4 = soc_to_dram_bit_swap(
1141 mr4, LPDDR2_MR4_TEMP_MASK, LPDDR2_MR4_TEMP_SHIFT);
1142 return mr4;
1143}
1144
1145int tegra_emc_set_over_temp_state(unsigned long state)
1146{
1147 unsigned long flags;
1148
1149 if (dram_type != DRAM_TYPE_LPDDR2)
1150 return -ENODEV;
1151
1152 spin_lock_irqsave(&emc_access_lock, flags);
1153
1154 /* Update refresh timing if state changed */
1155 if (emc_timing && (dram_over_temp_state != state)) {
1156 set_over_temp_timing(emc_timing, state);
1157 emc_timing_update();
1158 if (state != DRAM_OVER_TEMP_NONE)
1159 emc_writel(EMC_REF_FORCE_CMD, EMC_REF);
1160 dram_over_temp_state = state;
1161 }
1162 spin_unlock_irqrestore(&emc_access_lock, flags);
1163 return 0;
1164}
1165
1166#ifdef CONFIG_DEBUG_FS
1167
1168static struct dentry *emc_debugfs_root;
1169
1170static int emc_stats_show(struct seq_file *s, void *data)
1171{
1172 int i;
1173
1174 emc_last_stats_update(TEGRA_EMC_TABLE_MAX_SIZE);
1175
1176 seq_printf(s, "%-10s %-10s \n", "rate kHz", "time");
1177 for (i = 0; i < tegra_emc_table_size; i++) {
1178 if (tegra_emc_clk_sel[i].input == NULL)
1179 continue; /* invalid entry */
1180
1181 seq_printf(s, "%-10lu %-10llu \n", tegra_emc_table[i].rate,
1182 cputime64_to_clock_t(emc_stats.time_at_clock[i]));
1183 }
1184 seq_printf(s, "%-15s %llu\n", "transitions:",
1185 emc_stats.clkchange_count);
1186 seq_printf(s, "%-15s %llu\n", "time-stamp:",
1187 cputime64_to_clock_t(emc_stats.last_update));
1188
1189 return 0;
1190}
1191
1192static int emc_stats_open(struct inode *inode, struct file *file)
1193{
1194 return single_open(file, emc_stats_show, inode->i_private);
1195}
1196
1197static const struct file_operations emc_stats_fops = {
1198 .open = emc_stats_open,
1199 .read = seq_read,
1200 .llseek = seq_lseek,
1201 .release = single_release,
1202};
1203
1204static int dram_temperature_get(void *data, u64 *val)
1205{
1206 *val = tegra_emc_get_dram_temperature();
1207 return 0;
1208}
1209DEFINE_SIMPLE_ATTRIBUTE(dram_temperature_fops, dram_temperature_get,
1210 NULL, "%lld\n");
1211
1212static int over_temp_state_get(void *data, u64 *val)
1213{
1214 *val = dram_over_temp_state;
1215 return 0;
1216}
1217static int over_temp_state_set(void *data, u64 val)
1218{
1219 tegra_emc_set_over_temp_state(val);
1220 return 0;
1221}
1222DEFINE_SIMPLE_ATTRIBUTE(over_temp_state_fops, over_temp_state_get,
1223 over_temp_state_set, "%llu\n");
1224
1225static int __init tegra_emc_debug_init(void)
1226{
1227 if (!tegra_emc_table)
1228 return 0;
1229
1230 emc_debugfs_root = debugfs_create_dir("tegra_emc", NULL);
1231 if (!emc_debugfs_root)
1232 return -ENOMEM;
1233
1234 if (!debugfs_create_file(
1235 "stats", S_IRUGO, emc_debugfs_root, NULL, &emc_stats_fops))
1236 goto err_out;
1237
1238 if (!debugfs_create_file("dram_temperature", S_IRUGO, emc_debugfs_root,
1239 NULL, &dram_temperature_fops))
1240 goto err_out;
1241
1242 if (!debugfs_create_file("over_temp_state", S_IRUGO | S_IWUSR,
1243 emc_debugfs_root, NULL, &over_temp_state_fops))
1244 goto err_out;
1245
1246 return 0;
1247
1248err_out:
1249 debugfs_remove_recursive(emc_debugfs_root);
1250 return -ENOMEM;
1251}
1252
1253late_initcall(tegra_emc_debug_init);
1254#endif