aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAneesh V <aneesh@ti.com>2012-04-27 08:24:06 -0400
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2012-05-02 13:52:09 -0400
commita93de288aad3b046935d626065d4bcbb7d93b093 (patch)
tree69fb0c026b801c22f166fabf2844440985dbbd79
parent7ec944538dde3d7f490bd4d2619051789db5c3c3 (diff)
memory: emif: handle frequency and voltage change events
Change SDRAM timings and other settings as necessary on voltage and frequency changes. We calculate these register settings based on data from the device data sheet and inputs such a frequency, voltage state(stable or ramping), temperature level etc. TODO: frequency and voltage change handling needs to be integrated with clock framework and regulator framework respectively. This is not done today due to missing pieces in the kernel. Signed-off-by: Aneesh V <aneesh@ti.com> Reviewed-by: Santosh Shilimkar <santosh.shilimkar@ti.com> Reviewed-by: Benoit Cousson <b-cousson@ti.com> [santosh.shilimkar@ti.com: Moved to drivers/memory from drivers/misc] Signed-off-by: Santosh Shilimkar <santosh.shilimkar@ti.com> Tested-by: Lokesh Vutla <lokeshvutla@ti.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-rw-r--r--drivers/memory/emif.c894
-rw-r--r--drivers/memory/emif.h130
2 files changed, 1020 insertions, 4 deletions
diff --git a/drivers/memory/emif.c b/drivers/memory/emif.c
index 7486d7ef0826..bd116eb8c738 100644
--- a/drivers/memory/emif.c
+++ b/drivers/memory/emif.c
@@ -21,6 +21,7 @@
21#include <linux/seq_file.h> 21#include <linux/seq_file.h>
22#include <linux/module.h> 22#include <linux/module.h>
23#include <linux/list.h> 23#include <linux/list.h>
24#include <linux/spinlock.h>
24#include <memory/jedec_ddr.h> 25#include <memory/jedec_ddr.h>
25#include "emif.h" 26#include "emif.h"
26 27
@@ -37,20 +38,595 @@
37 * @node: node in the device list 38 * @node: node in the device list
38 * @base: base address of memory-mapped IO registers. 39 * @base: base address of memory-mapped IO registers.
39 * @dev: device pointer. 40 * @dev: device pointer.
41 * @addressing table with addressing information from the spec
42 * @regs_cache: An array of 'struct emif_regs' that stores
43 * calculated register values for different
44 * frequencies, to avoid re-calculating them on
45 * each DVFS transition.
46 * @curr_regs: The set of register values used in the last
47 * frequency change (i.e. corresponding to the
48 * frequency in effect at the moment)
40 * @plat_data: Pointer to saved platform data. 49 * @plat_data: Pointer to saved platform data.
41 */ 50 */
42struct emif_data { 51struct emif_data {
43 u8 duplicate; 52 u8 duplicate;
44 u8 temperature_level; 53 u8 temperature_level;
54 u8 lpmode;
45 struct list_head node; 55 struct list_head node;
56 unsigned long irq_state;
46 void __iomem *base; 57 void __iomem *base;
47 struct device *dev; 58 struct device *dev;
59 const struct lpddr2_addressing *addressing;
60 struct emif_regs *regs_cache[EMIF_MAX_NUM_FREQUENCIES];
61 struct emif_regs *curr_regs;
48 struct emif_platform_data *plat_data; 62 struct emif_platform_data *plat_data;
49}; 63};
50 64
51static struct emif_data *emif1; 65static struct emif_data *emif1;
66static spinlock_t emif_lock;
67static unsigned long irq_state;
68static u32 t_ck; /* DDR clock period in ps */
52static LIST_HEAD(device_list); 69static LIST_HEAD(device_list);
53 70
71/*
72 * Calculate the period of DDR clock from frequency value
73 */
74static void set_ddr_clk_period(u32 freq)
75{
76 /* Divide 10^12 by frequency to get period in ps */
77 t_ck = (u32)DIV_ROUND_UP_ULL(1000000000000ull, freq);
78}
79
80/*
81 * Get the CL from SDRAM_CONFIG register
82 */
83static u32 get_cl(struct emif_data *emif)
84{
85 u32 cl;
86 void __iomem *base = emif->base;
87
88 cl = (readl(base + EMIF_SDRAM_CONFIG) & CL_MASK) >> CL_SHIFT;
89
90 return cl;
91}
92
93static void set_lpmode(struct emif_data *emif, u8 lpmode)
94{
95 u32 temp;
96 void __iomem *base = emif->base;
97
98 temp = readl(base + EMIF_POWER_MANAGEMENT_CONTROL);
99 temp &= ~LP_MODE_MASK;
100 temp |= (lpmode << LP_MODE_SHIFT);
101 writel(temp, base + EMIF_POWER_MANAGEMENT_CONTROL);
102}
103
104static void do_freq_update(void)
105{
106 struct emif_data *emif;
107
108 /*
109 * Workaround for errata i728: Disable LPMODE during FREQ_UPDATE
110 *
111 * i728 DESCRIPTION:
112 * The EMIF automatically puts the SDRAM into self-refresh mode
113 * after the EMIF has not performed accesses during
114 * EMIF_PWR_MGMT_CTRL[7:4] REG_SR_TIM number of DDR clock cycles
115 * and the EMIF_PWR_MGMT_CTRL[10:8] REG_LP_MODE bit field is set
116 * to 0x2. If during a small window the following three events
117 * occur:
118 * - The SR_TIMING counter expires
119 * - And frequency change is requested
120 * - And OCP access is requested
121 * Then it causes instable clock on the DDR interface.
122 *
123 * WORKAROUND
124 * To avoid the occurrence of the three events, the workaround
125 * is to disable the self-refresh when requesting a frequency
126 * change. Before requesting a frequency change the software must
127 * program EMIF_PWR_MGMT_CTRL[10:8] REG_LP_MODE to 0x0. When the
128 * frequency change has been done, the software can reprogram
129 * EMIF_PWR_MGMT_CTRL[10:8] REG_LP_MODE to 0x2
130 */
131 list_for_each_entry(emif, &device_list, node) {
132 if (emif->lpmode == EMIF_LP_MODE_SELF_REFRESH)
133 set_lpmode(emif, EMIF_LP_MODE_DISABLE);
134 }
135
136 /*
137 * TODO: Do FREQ_UPDATE here when an API
138 * is available for this as part of the new
139 * clock framework
140 */
141
142 list_for_each_entry(emif, &device_list, node) {
143 if (emif->lpmode == EMIF_LP_MODE_SELF_REFRESH)
144 set_lpmode(emif, EMIF_LP_MODE_SELF_REFRESH);
145 }
146}
147
148/* Find addressing table entry based on the device's type and density */
149static const struct lpddr2_addressing *get_addressing_table(
150 const struct ddr_device_info *device_info)
151{
152 u32 index, type, density;
153
154 type = device_info->type;
155 density = device_info->density;
156
157 switch (type) {
158 case DDR_TYPE_LPDDR2_S4:
159 index = density - 1;
160 break;
161 case DDR_TYPE_LPDDR2_S2:
162 switch (density) {
163 case DDR_DENSITY_1Gb:
164 case DDR_DENSITY_2Gb:
165 index = density + 3;
166 break;
167 default:
168 index = density - 1;
169 }
170 break;
171 default:
172 return NULL;
173 }
174
175 return &lpddr2_jedec_addressing_table[index];
176}
177
178/*
179 * Find the the right timing table from the array of timing
180 * tables of the device using DDR clock frequency
181 */
182static const struct lpddr2_timings *get_timings_table(struct emif_data *emif,
183 u32 freq)
184{
185 u32 i, min, max, freq_nearest;
186 const struct lpddr2_timings *timings = NULL;
187 const struct lpddr2_timings *timings_arr = emif->plat_data->timings;
188 struct device *dev = emif->dev;
189
190 /* Start with a very high frequency - 1GHz */
191 freq_nearest = 1000000000;
192
193 /*
194 * Find the timings table such that:
195 * 1. the frequency range covers the required frequency(safe) AND
196 * 2. the max_freq is closest to the required frequency(optimal)
197 */
198 for (i = 0; i < emif->plat_data->timings_arr_size; i++) {
199 max = timings_arr[i].max_freq;
200 min = timings_arr[i].min_freq;
201 if ((freq >= min) && (freq <= max) && (max < freq_nearest)) {
202 freq_nearest = max;
203 timings = &timings_arr[i];
204 }
205 }
206
207 if (!timings)
208 dev_err(dev, "%s: couldn't find timings for - %dHz\n",
209 __func__, freq);
210
211 dev_dbg(dev, "%s: timings table: freq %d, speed bin freq %d\n",
212 __func__, freq, freq_nearest);
213
214 return timings;
215}
216
217static u32 get_sdram_ref_ctrl_shdw(u32 freq,
218 const struct lpddr2_addressing *addressing)
219{
220 u32 ref_ctrl_shdw = 0, val = 0, freq_khz, t_refi;
221
222 /* Scale down frequency and t_refi to avoid overflow */
223 freq_khz = freq / 1000;
224 t_refi = addressing->tREFI_ns / 100;
225
226 /*
227 * refresh rate to be set is 'tREFI(in us) * freq in MHz
228 * division by 10000 to account for change in units
229 */
230 val = t_refi * freq_khz / 10000;
231 ref_ctrl_shdw |= val << REFRESH_RATE_SHIFT;
232
233 return ref_ctrl_shdw;
234}
235
236static u32 get_sdram_tim_1_shdw(const struct lpddr2_timings *timings,
237 const struct lpddr2_min_tck *min_tck,
238 const struct lpddr2_addressing *addressing)
239{
240 u32 tim1 = 0, val = 0;
241
242 val = max(min_tck->tWTR, DIV_ROUND_UP(timings->tWTR, t_ck)) - 1;
243 tim1 |= val << T_WTR_SHIFT;
244
245 if (addressing->num_banks == B8)
246 val = DIV_ROUND_UP(timings->tFAW, t_ck*4);
247 else
248 val = max(min_tck->tRRD, DIV_ROUND_UP(timings->tRRD, t_ck));
249 tim1 |= (val - 1) << T_RRD_SHIFT;
250
251 val = DIV_ROUND_UP(timings->tRAS_min + timings->tRPab, t_ck) - 1;
252 tim1 |= val << T_RC_SHIFT;
253
254 val = max(min_tck->tRASmin, DIV_ROUND_UP(timings->tRAS_min, t_ck));
255 tim1 |= (val - 1) << T_RAS_SHIFT;
256
257 val = max(min_tck->tWR, DIV_ROUND_UP(timings->tWR, t_ck)) - 1;
258 tim1 |= val << T_WR_SHIFT;
259
260 val = max(min_tck->tRCD, DIV_ROUND_UP(timings->tRCD, t_ck)) - 1;
261 tim1 |= val << T_RCD_SHIFT;
262
263 val = max(min_tck->tRPab, DIV_ROUND_UP(timings->tRPab, t_ck)) - 1;
264 tim1 |= val << T_RP_SHIFT;
265
266 return tim1;
267}
268
269static u32 get_sdram_tim_1_shdw_derated(const struct lpddr2_timings *timings,
270 const struct lpddr2_min_tck *min_tck,
271 const struct lpddr2_addressing *addressing)
272{
273 u32 tim1 = 0, val = 0;
274
275 val = max(min_tck->tWTR, DIV_ROUND_UP(timings->tWTR, t_ck)) - 1;
276 tim1 = val << T_WTR_SHIFT;
277
278 /*
279 * tFAW is approximately 4 times tRRD. So add 1875*4 = 7500ps
280 * to tFAW for de-rating
281 */
282 if (addressing->num_banks == B8) {
283 val = DIV_ROUND_UP(timings->tFAW + 7500, 4 * t_ck) - 1;
284 } else {
285 val = DIV_ROUND_UP(timings->tRRD + 1875, t_ck);
286 val = max(min_tck->tRRD, val) - 1;
287 }
288 tim1 |= val << T_RRD_SHIFT;
289
290 val = DIV_ROUND_UP(timings->tRAS_min + timings->tRPab + 1875, t_ck);
291 tim1 |= (val - 1) << T_RC_SHIFT;
292
293 val = DIV_ROUND_UP(timings->tRAS_min + 1875, t_ck);
294 val = max(min_tck->tRASmin, val) - 1;
295 tim1 |= val << T_RAS_SHIFT;
296
297 val = max(min_tck->tWR, DIV_ROUND_UP(timings->tWR, t_ck)) - 1;
298 tim1 |= val << T_WR_SHIFT;
299
300 val = max(min_tck->tRCD, DIV_ROUND_UP(timings->tRCD + 1875, t_ck));
301 tim1 |= (val - 1) << T_RCD_SHIFT;
302
303 val = max(min_tck->tRPab, DIV_ROUND_UP(timings->tRPab + 1875, t_ck));
304 tim1 |= (val - 1) << T_RP_SHIFT;
305
306 return tim1;
307}
308
309static u32 get_sdram_tim_2_shdw(const struct lpddr2_timings *timings,
310 const struct lpddr2_min_tck *min_tck,
311 const struct lpddr2_addressing *addressing,
312 u32 type)
313{
314 u32 tim2 = 0, val = 0;
315
316 val = min_tck->tCKE - 1;
317 tim2 |= val << T_CKE_SHIFT;
318
319 val = max(min_tck->tRTP, DIV_ROUND_UP(timings->tRTP, t_ck)) - 1;
320 tim2 |= val << T_RTP_SHIFT;
321
322 /* tXSNR = tRFCab_ps + 10 ns(tRFCab_ps for LPDDR2). */
323 val = DIV_ROUND_UP(addressing->tRFCab_ps + 10000, t_ck) - 1;
324 tim2 |= val << T_XSNR_SHIFT;
325
326 /* XSRD same as XSNR for LPDDR2 */
327 tim2 |= val << T_XSRD_SHIFT;
328
329 val = max(min_tck->tXP, DIV_ROUND_UP(timings->tXP, t_ck)) - 1;
330 tim2 |= val << T_XP_SHIFT;
331
332 return tim2;
333}
334
335static u32 get_sdram_tim_3_shdw(const struct lpddr2_timings *timings,
336 const struct lpddr2_min_tck *min_tck,
337 const struct lpddr2_addressing *addressing,
338 u32 type, u32 ip_rev, u32 derated)
339{
340 u32 tim3 = 0, val = 0, t_dqsck;
341
342 val = timings->tRAS_max_ns / addressing->tREFI_ns - 1;
343 val = val > 0xF ? 0xF : val;
344 tim3 |= val << T_RAS_MAX_SHIFT;
345
346 val = DIV_ROUND_UP(addressing->tRFCab_ps, t_ck) - 1;
347 tim3 |= val << T_RFC_SHIFT;
348
349 t_dqsck = (derated == EMIF_DERATED_TIMINGS) ?
350 timings->tDQSCK_max_derated : timings->tDQSCK_max;
351 if (ip_rev == EMIF_4D5)
352 val = DIV_ROUND_UP(t_dqsck + 1000, t_ck) - 1;
353 else
354 val = DIV_ROUND_UP(t_dqsck, t_ck) - 1;
355
356 tim3 |= val << T_TDQSCKMAX_SHIFT;
357
358 val = DIV_ROUND_UP(timings->tZQCS, t_ck) - 1;
359 tim3 |= val << ZQ_ZQCS_SHIFT;
360
361 val = DIV_ROUND_UP(timings->tCKESR, t_ck);
362 val = max(min_tck->tCKESR, val) - 1;
363 tim3 |= val << T_CKESR_SHIFT;
364
365 if (ip_rev == EMIF_4D5) {
366 tim3 |= (EMIF_T_CSTA - 1) << T_CSTA_SHIFT;
367
368 val = DIV_ROUND_UP(EMIF_T_PDLL_UL, 128) - 1;
369 tim3 |= val << T_PDLL_UL_SHIFT;
370 }
371
372 return tim3;
373}
374
375static u32 get_read_idle_ctrl_shdw(u8 volt_ramp)
376{
377 u32 idle = 0, val = 0;
378
379 /*
380 * Maximum value in normal conditions and increased frequency
381 * when voltage is ramping
382 */
383 if (volt_ramp)
384 val = READ_IDLE_INTERVAL_DVFS / t_ck / 64 - 1;
385 else
386 val = 0x1FF;
387
388 /*
389 * READ_IDLE_CTRL register in EMIF4D has same offset and fields
390 * as DLL_CALIB_CTRL in EMIF4D5, so use the same shifts
391 */
392 idle |= val << DLL_CALIB_INTERVAL_SHIFT;
393 idle |= EMIF_READ_IDLE_LEN_VAL << ACK_WAIT_SHIFT;
394
395 return idle;
396}
397
398static u32 get_dll_calib_ctrl_shdw(u8 volt_ramp)
399{
400 u32 calib = 0, val = 0;
401
402 if (volt_ramp == DDR_VOLTAGE_RAMPING)
403 val = DLL_CALIB_INTERVAL_DVFS / t_ck / 16 - 1;
404 else
405 val = 0; /* Disabled when voltage is stable */
406
407 calib |= val << DLL_CALIB_INTERVAL_SHIFT;
408 calib |= DLL_CALIB_ACK_WAIT_VAL << ACK_WAIT_SHIFT;
409
410 return calib;
411}
412
413static u32 get_ddr_phy_ctrl_1_attilaphy_4d(const struct lpddr2_timings *timings,
414 u32 freq, u8 RL)
415{
416 u32 phy = EMIF_DDR_PHY_CTRL_1_BASE_VAL_ATTILAPHY, val = 0;
417
418 val = RL + DIV_ROUND_UP(timings->tDQSCK_max, t_ck) - 1;
419 phy |= val << READ_LATENCY_SHIFT_4D;
420
421 if (freq <= 100000000)
422 val = EMIF_DLL_SLAVE_DLY_CTRL_100_MHZ_AND_LESS_ATTILAPHY;
423 else if (freq <= 200000000)
424 val = EMIF_DLL_SLAVE_DLY_CTRL_200_MHZ_ATTILAPHY;
425 else
426 val = EMIF_DLL_SLAVE_DLY_CTRL_400_MHZ_ATTILAPHY;
427
428 phy |= val << DLL_SLAVE_DLY_CTRL_SHIFT_4D;
429
430 return phy;
431}
432
433static u32 get_phy_ctrl_1_intelliphy_4d5(u32 freq, u8 cl)
434{
435 u32 phy = EMIF_DDR_PHY_CTRL_1_BASE_VAL_INTELLIPHY, half_delay;
436
437 /*
438 * DLL operates at 266 MHz. If DDR frequency is near 266 MHz,
439 * half-delay is not needed else set half-delay
440 */
441 if (freq >= 265000000 && freq < 267000000)
442 half_delay = 0;
443 else
444 half_delay = 1;
445
446 phy |= half_delay << DLL_HALF_DELAY_SHIFT_4D5;
447 phy |= ((cl + DIV_ROUND_UP(EMIF_PHY_TOTAL_READ_LATENCY_INTELLIPHY_PS,
448 t_ck) - 1) << READ_LATENCY_SHIFT_4D5);
449
450 return phy;
451}
452
453static u32 get_ext_phy_ctrl_2_intelliphy_4d5(void)
454{
455 u32 fifo_we_slave_ratio;
456
457 fifo_we_slave_ratio = DIV_ROUND_CLOSEST(
458 EMIF_INTELLI_PHY_DQS_GATE_OPENING_DELAY_PS * 256 , t_ck);
459
460 return fifo_we_slave_ratio | fifo_we_slave_ratio << 11 |
461 fifo_we_slave_ratio << 22;
462}
463
464static u32 get_ext_phy_ctrl_3_intelliphy_4d5(void)
465{
466 u32 fifo_we_slave_ratio;
467
468 fifo_we_slave_ratio = DIV_ROUND_CLOSEST(
469 EMIF_INTELLI_PHY_DQS_GATE_OPENING_DELAY_PS * 256 , t_ck);
470
471 return fifo_we_slave_ratio >> 10 | fifo_we_slave_ratio << 1 |
472 fifo_we_slave_ratio << 12 | fifo_we_slave_ratio << 23;
473}
474
475static u32 get_ext_phy_ctrl_4_intelliphy_4d5(void)
476{
477 u32 fifo_we_slave_ratio;
478
479 fifo_we_slave_ratio = DIV_ROUND_CLOSEST(
480 EMIF_INTELLI_PHY_DQS_GATE_OPENING_DELAY_PS * 256 , t_ck);
481
482 return fifo_we_slave_ratio >> 9 | fifo_we_slave_ratio << 2 |
483 fifo_we_slave_ratio << 13;
484}
485
486static u32 get_pwr_mgmt_ctrl(u32 freq, struct emif_data *emif, u32 ip_rev)
487{
488 u32 pwr_mgmt_ctrl = 0, timeout;
489 u32 lpmode = EMIF_LP_MODE_SELF_REFRESH;
490 u32 timeout_perf = EMIF_LP_MODE_TIMEOUT_PERFORMANCE;
491 u32 timeout_pwr = EMIF_LP_MODE_TIMEOUT_POWER;
492 u32 freq_threshold = EMIF_LP_MODE_FREQ_THRESHOLD;
493
494 struct emif_custom_configs *cust_cfgs = emif->plat_data->custom_configs;
495
496 if (cust_cfgs && (cust_cfgs->mask & EMIF_CUSTOM_CONFIG_LPMODE)) {
497 lpmode = cust_cfgs->lpmode;
498 timeout_perf = cust_cfgs->lpmode_timeout_performance;
499 timeout_pwr = cust_cfgs->lpmode_timeout_power;
500 freq_threshold = cust_cfgs->lpmode_freq_threshold;
501 }
502
503 /* Timeout based on DDR frequency */
504 timeout = freq >= freq_threshold ? timeout_perf : timeout_pwr;
505
506 /* The value to be set in register is "log2(timeout) - 3" */
507 if (timeout < 16) {
508 timeout = 0;
509 } else {
510 timeout = __fls(timeout) - 3;
511 if (timeout & (timeout - 1))
512 timeout++;
513 }
514
515 switch (lpmode) {
516 case EMIF_LP_MODE_CLOCK_STOP:
517 pwr_mgmt_ctrl = (timeout << CS_TIM_SHIFT) |
518 SR_TIM_MASK | PD_TIM_MASK;
519 break;
520 case EMIF_LP_MODE_SELF_REFRESH:
521 /* Workaround for errata i735 */
522 if (timeout < 6)
523 timeout = 6;
524
525 pwr_mgmt_ctrl = (timeout << SR_TIM_SHIFT) |
526 CS_TIM_MASK | PD_TIM_MASK;
527 break;
528 case EMIF_LP_MODE_PWR_DN:
529 pwr_mgmt_ctrl = (timeout << PD_TIM_SHIFT) |
530 CS_TIM_MASK | SR_TIM_MASK;
531 break;
532 case EMIF_LP_MODE_DISABLE:
533 default:
534 pwr_mgmt_ctrl = CS_TIM_MASK |
535 PD_TIM_MASK | SR_TIM_MASK;
536 }
537
538 /* No CS_TIM in EMIF_4D5 */
539 if (ip_rev == EMIF_4D5)
540 pwr_mgmt_ctrl &= ~CS_TIM_MASK;
541
542 pwr_mgmt_ctrl |= lpmode << LP_MODE_SHIFT;
543
544 return pwr_mgmt_ctrl;
545}
546
547/*
548 * Program EMIF shadow registers that are not dependent on temperature
549 * or voltage
550 */
551static void setup_registers(struct emif_data *emif, struct emif_regs *regs)
552{
553 void __iomem *base = emif->base;
554
555 writel(regs->sdram_tim2_shdw, base + EMIF_SDRAM_TIMING_2_SHDW);
556 writel(regs->phy_ctrl_1_shdw, base + EMIF_DDR_PHY_CTRL_1_SHDW);
557
558 /* Settings specific for EMIF4D5 */
559 if (emif->plat_data->ip_rev != EMIF_4D5)
560 return;
561 writel(regs->ext_phy_ctrl_2_shdw, base + EMIF_EXT_PHY_CTRL_2_SHDW);
562 writel(regs->ext_phy_ctrl_3_shdw, base + EMIF_EXT_PHY_CTRL_3_SHDW);
563 writel(regs->ext_phy_ctrl_4_shdw, base + EMIF_EXT_PHY_CTRL_4_SHDW);
564}
565
566/*
567 * When voltage ramps dll calibration and forced read idle should
568 * happen more often
569 */
570static void setup_volt_sensitive_regs(struct emif_data *emif,
571 struct emif_regs *regs, u32 volt_state)
572{
573 u32 calib_ctrl;
574 void __iomem *base = emif->base;
575
576 /*
577 * EMIF_READ_IDLE_CTRL in EMIF4D refers to the same register as
578 * EMIF_DLL_CALIB_CTRL in EMIF4D5 and dll_calib_ctrl_shadow_*
579 * is an alias of the respective read_idle_ctrl_shdw_* (members of
580 * a union). So, the below code takes care of both cases
581 */
582 if (volt_state == DDR_VOLTAGE_RAMPING)
583 calib_ctrl = regs->dll_calib_ctrl_shdw_volt_ramp;
584 else
585 calib_ctrl = regs->dll_calib_ctrl_shdw_normal;
586
587 writel(calib_ctrl, base + EMIF_DLL_CALIB_CTRL_SHDW);
588}
589
590/*
591 * setup_temperature_sensitive_regs() - set the timings for temperature
592 * sensitive registers. This happens once at initialisation time based
593 * on the temperature at boot time and subsequently based on the temperature
594 * alert interrupt. Temperature alert can happen when the temperature
595 * increases or drops. So this function can have the effect of either
596 * derating the timings or going back to nominal values.
597 */
598static void setup_temperature_sensitive_regs(struct emif_data *emif,
599 struct emif_regs *regs)
600{
601 u32 tim1, tim3, ref_ctrl, type;
602 void __iomem *base = emif->base;
603 u32 temperature;
604
605 type = emif->plat_data->device_info->type;
606
607 tim1 = regs->sdram_tim1_shdw;
608 tim3 = regs->sdram_tim3_shdw;
609 ref_ctrl = regs->ref_ctrl_shdw;
610
611 /* No de-rating for non-lpddr2 devices */
612 if (type != DDR_TYPE_LPDDR2_S2 && type != DDR_TYPE_LPDDR2_S4)
613 goto out;
614
615 temperature = emif->temperature_level;
616 if (temperature == SDRAM_TEMP_HIGH_DERATE_REFRESH) {
617 ref_ctrl = regs->ref_ctrl_shdw_derated;
618 } else if (temperature == SDRAM_TEMP_HIGH_DERATE_REFRESH_AND_TIMINGS) {
619 tim1 = regs->sdram_tim1_shdw_derated;
620 tim3 = regs->sdram_tim3_shdw_derated;
621 ref_ctrl = regs->ref_ctrl_shdw_derated;
622 }
623
624out:
625 writel(tim1, base + EMIF_SDRAM_TIMING_1_SHDW);
626 writel(tim3, base + EMIF_SDRAM_TIMING_3_SHDW);
627 writel(ref_ctrl, base + EMIF_SDRAM_REFRESH_CTRL_SHDW);
628}
629
54static void get_default_timings(struct emif_data *emif) 630static void get_default_timings(struct emif_data *emif)
55{ 631{
56 struct emif_platform_data *pd = emif->plat_data; 632 struct emif_platform_data *pd = emif->plat_data;
@@ -234,10 +810,8 @@ static int __init_or_module emif_probe(struct platform_device *pdev)
234 goto error; 810 goto error;
235 } 811 }
236 812
237 if (!emif1)
238 emif1 = emif;
239
240 list_add(&emif->node, &device_list); 813 list_add(&emif->node, &device_list);
814 emif->addressing = get_addressing_table(emif->plat_data->device_info);
241 815
242 /* Save pointers to each other in emif and device structures */ 816 /* Save pointers to each other in emif and device structures */
243 emif->dev = &pdev->dev; 817 emif->dev = &pdev->dev;
@@ -257,6 +831,18 @@ static int __init_or_module emif_probe(struct platform_device *pdev)
257 goto error; 831 goto error;
258 } 832 }
259 833
834 /* One-time actions taken on probing the first device */
835 if (!emif1) {
836 emif1 = emif;
837 spin_lock_init(&emif_lock);
838
839 /*
840 * TODO: register notifiers for frequency and voltage
841 * change here once the respective frameworks are
842 * available
843 */
844 }
845
260 dev_info(&pdev->dev, "%s: device configured with addr = %p\n", 846 dev_info(&pdev->dev, "%s: device configured with addr = %p\n",
261 __func__, emif->base); 847 __func__, emif->base);
262 848
@@ -265,6 +851,308 @@ error:
265 return -ENODEV; 851 return -ENODEV;
266} 852}
267 853
854static int get_emif_reg_values(struct emif_data *emif, u32 freq,
855 struct emif_regs *regs)
856{
857 u32 cs1_used, ip_rev, phy_type;
858 u32 cl, type;
859 const struct lpddr2_timings *timings;
860 const struct lpddr2_min_tck *min_tck;
861 const struct ddr_device_info *device_info;
862 const struct lpddr2_addressing *addressing;
863 struct emif_data *emif_for_calc;
864 struct device *dev;
865 const struct emif_custom_configs *custom_configs;
866
867 dev = emif->dev;
868 /*
869 * If the devices on this EMIF instance is duplicate of EMIF1,
870 * use EMIF1 details for the calculation
871 */
872 emif_for_calc = emif->duplicate ? emif1 : emif;
873 timings = get_timings_table(emif_for_calc, freq);
874 addressing = emif_for_calc->addressing;
875 if (!timings || !addressing) {
876 dev_err(dev, "%s: not enough data available for %dHz",
877 __func__, freq);
878 return -1;
879 }
880
881 device_info = emif_for_calc->plat_data->device_info;
882 type = device_info->type;
883 cs1_used = device_info->cs1_used;
884 ip_rev = emif_for_calc->plat_data->ip_rev;
885 phy_type = emif_for_calc->plat_data->phy_type;
886
887 min_tck = emif_for_calc->plat_data->min_tck;
888 custom_configs = emif_for_calc->plat_data->custom_configs;
889
890 set_ddr_clk_period(freq);
891
892 regs->ref_ctrl_shdw = get_sdram_ref_ctrl_shdw(freq, addressing);
893 regs->sdram_tim1_shdw = get_sdram_tim_1_shdw(timings, min_tck,
894 addressing);
895 regs->sdram_tim2_shdw = get_sdram_tim_2_shdw(timings, min_tck,
896 addressing, type);
897 regs->sdram_tim3_shdw = get_sdram_tim_3_shdw(timings, min_tck,
898 addressing, type, ip_rev, EMIF_NORMAL_TIMINGS);
899
900 cl = get_cl(emif);
901
902 if (phy_type == EMIF_PHY_TYPE_ATTILAPHY && ip_rev == EMIF_4D) {
903 regs->phy_ctrl_1_shdw = get_ddr_phy_ctrl_1_attilaphy_4d(
904 timings, freq, cl);
905 } else if (phy_type == EMIF_PHY_TYPE_INTELLIPHY && ip_rev == EMIF_4D5) {
906 regs->phy_ctrl_1_shdw = get_phy_ctrl_1_intelliphy_4d5(freq, cl);
907 regs->ext_phy_ctrl_2_shdw = get_ext_phy_ctrl_2_intelliphy_4d5();
908 regs->ext_phy_ctrl_3_shdw = get_ext_phy_ctrl_3_intelliphy_4d5();
909 regs->ext_phy_ctrl_4_shdw = get_ext_phy_ctrl_4_intelliphy_4d5();
910 } else {
911 return -1;
912 }
913
914 /* Only timeout values in pwr_mgmt_ctrl_shdw register */
915 regs->pwr_mgmt_ctrl_shdw =
916 get_pwr_mgmt_ctrl(freq, emif_for_calc, ip_rev) &
917 (CS_TIM_MASK | SR_TIM_MASK | PD_TIM_MASK);
918
919 if (ip_rev & EMIF_4D) {
920 regs->read_idle_ctrl_shdw_normal =
921 get_read_idle_ctrl_shdw(DDR_VOLTAGE_STABLE);
922
923 regs->read_idle_ctrl_shdw_volt_ramp =
924 get_read_idle_ctrl_shdw(DDR_VOLTAGE_RAMPING);
925 } else if (ip_rev & EMIF_4D5) {
926 regs->dll_calib_ctrl_shdw_normal =
927 get_dll_calib_ctrl_shdw(DDR_VOLTAGE_STABLE);
928
929 regs->dll_calib_ctrl_shdw_volt_ramp =
930 get_dll_calib_ctrl_shdw(DDR_VOLTAGE_RAMPING);
931 }
932
933 if (type == DDR_TYPE_LPDDR2_S2 || type == DDR_TYPE_LPDDR2_S4) {
934 regs->ref_ctrl_shdw_derated = get_sdram_ref_ctrl_shdw(freq / 4,
935 addressing);
936
937 regs->sdram_tim1_shdw_derated =
938 get_sdram_tim_1_shdw_derated(timings, min_tck,
939 addressing);
940
941 regs->sdram_tim3_shdw_derated = get_sdram_tim_3_shdw(timings,
942 min_tck, addressing, type, ip_rev,
943 EMIF_DERATED_TIMINGS);
944 }
945
946 regs->freq = freq;
947
948 return 0;
949}
950
951/*
952 * get_regs() - gets the cached emif_regs structure for a given EMIF instance
953 * given frequency(freq):
954 *
955 * As an optimisation, every EMIF instance other than EMIF1 shares the
956 * register cache with EMIF1 if the devices connected on this instance
957 * are same as that on EMIF1(indicated by the duplicate flag)
958 *
959 * If we do not have an entry corresponding to the frequency given, we
960 * allocate a new entry and calculate the values
961 *
962 * Upon finding the right reg dump, save it in curr_regs. It can be
963 * directly used for thermal de-rating and voltage ramping changes.
964 */
965static struct emif_regs *get_regs(struct emif_data *emif, u32 freq)
966{
967 int i;
968 struct emif_regs **regs_cache;
969 struct emif_regs *regs = NULL;
970 struct device *dev;
971
972 dev = emif->dev;
973 if (emif->curr_regs && emif->curr_regs->freq == freq) {
974 dev_dbg(dev, "%s: using curr_regs - %u Hz", __func__, freq);
975 return emif->curr_regs;
976 }
977
978 if (emif->duplicate)
979 regs_cache = emif1->regs_cache;
980 else
981 regs_cache = emif->regs_cache;
982
983 for (i = 0; i < EMIF_MAX_NUM_FREQUENCIES && regs_cache[i]; i++) {
984 if (regs_cache[i]->freq == freq) {
985 regs = regs_cache[i];
986 dev_dbg(dev,
987 "%s: reg dump found in reg cache for %u Hz\n",
988 __func__, freq);
989 break;
990 }
991 }
992
993 /*
994 * If we don't have an entry for this frequency in the cache create one
995 * and calculate the values
996 */
997 if (!regs) {
998 regs = devm_kzalloc(emif->dev, sizeof(*regs), GFP_ATOMIC);
999 if (!regs)
1000 return NULL;
1001
1002 if (get_emif_reg_values(emif, freq, regs)) {
1003 devm_kfree(emif->dev, regs);
1004 return NULL;
1005 }
1006
1007 /*
1008 * Now look for an un-used entry in the cache and save the
1009 * newly created struct. If there are no free entries
1010 * over-write the last entry
1011 */
1012 for (i = 0; i < EMIF_MAX_NUM_FREQUENCIES && regs_cache[i]; i++)
1013 ;
1014
1015 if (i >= EMIF_MAX_NUM_FREQUENCIES) {
1016 dev_warn(dev, "%s: regs_cache full - reusing a slot!!\n",
1017 __func__);
1018 i = EMIF_MAX_NUM_FREQUENCIES - 1;
1019 devm_kfree(emif->dev, regs_cache[i]);
1020 }
1021 regs_cache[i] = regs;
1022 }
1023
1024 return regs;
1025}
1026
1027static void do_volt_notify_handling(struct emif_data *emif, u32 volt_state)
1028{
1029 dev_dbg(emif->dev, "%s: voltage notification : %d", __func__,
1030 volt_state);
1031
1032 if (!emif->curr_regs) {
1033 dev_err(emif->dev,
1034 "%s: volt-notify before registers are ready: %d\n",
1035 __func__, volt_state);
1036 return;
1037 }
1038
1039 setup_volt_sensitive_regs(emif, emif->curr_regs, volt_state);
1040}
1041
1042/*
1043 * TODO: voltage notify handling should be hooked up to
1044 * regulator framework as soon as the necessary support
1045 * is available in mainline kernel. This function is un-used
1046 * right now.
1047 */
1048static void __attribute__((unused)) volt_notify_handling(u32 volt_state)
1049{
1050 struct emif_data *emif;
1051
1052 spin_lock_irqsave(&emif_lock, irq_state);
1053
1054 list_for_each_entry(emif, &device_list, node)
1055 do_volt_notify_handling(emif, volt_state);
1056 do_freq_update();
1057
1058 spin_unlock_irqrestore(&emif_lock, irq_state);
1059}
1060
1061static void do_freq_pre_notify_handling(struct emif_data *emif, u32 new_freq)
1062{
1063 struct emif_regs *regs;
1064
1065 regs = get_regs(emif, new_freq);
1066 if (!regs)
1067 return;
1068
1069 emif->curr_regs = regs;
1070
1071 /*
1072 * Update the shadow registers:
1073 * Temperature and voltage-ramp sensitive settings are also configured
1074 * in terms of DDR cycles. So, we need to update them too when there
1075 * is a freq change
1076 */
1077 dev_dbg(emif->dev, "%s: setting up shadow registers for %uHz",
1078 __func__, new_freq);
1079 setup_registers(emif, regs);
1080 setup_temperature_sensitive_regs(emif, regs);
1081 setup_volt_sensitive_regs(emif, regs, DDR_VOLTAGE_STABLE);
1082
1083 /*
1084 * Part of workaround for errata i728. See do_freq_update()
1085 * for more details
1086 */
1087 if (emif->lpmode == EMIF_LP_MODE_SELF_REFRESH)
1088 set_lpmode(emif, EMIF_LP_MODE_DISABLE);
1089}
1090
1091/*
1092 * TODO: frequency notify handling should be hooked up to
1093 * clock framework as soon as the necessary support is
1094 * available in mainline kernel. This function is un-used
1095 * right now.
1096 */
1097static void __attribute__((unused)) freq_pre_notify_handling(u32 new_freq)
1098{
1099 struct emif_data *emif;
1100
1101 /*
1102 * NOTE: we are taking the spin-lock here and releases it
1103 * only in post-notifier. This doesn't look good and
1104 * Sparse complains about it, but this seems to be
1105 * un-avoidable. We need to lock a sequence of events
1106 * that is split between EMIF and clock framework.
1107 *
1108 * 1. EMIF driver updates EMIF timings in shadow registers in the
1109 * frequency pre-notify callback from clock framework
1110 * 2. clock framework sets up the registers for the new frequency
1111 * 3. clock framework initiates a hw-sequence that updates
1112 * the frequency EMIF timings synchronously.
1113 *
1114 * All these 3 steps should be performed as an atomic operation
1115 * vis-a-vis similar sequence in the EMIF interrupt handler
1116 * for temperature events. Otherwise, there could be race
1117 * conditions that could result in incorrect EMIF timings for
1118 * a given frequency
1119 */
1120 spin_lock_irqsave(&emif_lock, irq_state);
1121
1122 list_for_each_entry(emif, &device_list, node)
1123 do_freq_pre_notify_handling(emif, new_freq);
1124}
1125
1126static void do_freq_post_notify_handling(struct emif_data *emif)
1127{
1128 /*
1129 * Part of workaround for errata i728. See do_freq_update()
1130 * for more details
1131 */
1132 if (emif->lpmode == EMIF_LP_MODE_SELF_REFRESH)
1133 set_lpmode(emif, EMIF_LP_MODE_SELF_REFRESH);
1134}
1135
1136/*
1137 * TODO: frequency notify handling should be hooked up to
1138 * clock framework as soon as the necessary support is
1139 * available in mainline kernel. This function is un-used
1140 * right now.
1141 */
1142static void __attribute__((unused)) freq_post_notify_handling(void)
1143{
1144 struct emif_data *emif;
1145
1146 list_for_each_entry(emif, &device_list, node)
1147 do_freq_post_notify_handling(emif);
1148
1149 /*
1150 * Lock is done in pre-notify handler. See freq_pre_notify_handling()
1151 * for more details
1152 */
1153 spin_unlock_irqrestore(&emif_lock, irq_state);
1154}
1155
268static struct platform_driver emif_driver = { 1156static struct platform_driver emif_driver = {
269 .driver = { 1157 .driver = {
270 .name = "emif", 1158 .name = "emif",
diff --git a/drivers/memory/emif.h b/drivers/memory/emif.h
index 692b2a864e7b..bfe08bae961a 100644
--- a/drivers/memory/emif.h
+++ b/drivers/memory/emif.h
@@ -19,6 +19,103 @@
19 */ 19 */
20#define EMIF_MAX_NUM_FREQUENCIES 6 20#define EMIF_MAX_NUM_FREQUENCIES 6
21 21
22/* State of the core voltage */
23#define DDR_VOLTAGE_STABLE 0
24#define DDR_VOLTAGE_RAMPING 1
25
26/* Defines for timing De-rating */
27#define EMIF_NORMAL_TIMINGS 0
28#define EMIF_DERATED_TIMINGS 1
29
30/* Length of the forced read idle period in terms of cycles */
31#define EMIF_READ_IDLE_LEN_VAL 5
32
33/*
34 * forced read idle interval to be used when voltage
35 * is changed as part of DVFS/DPS - 1ms
36 */
37#define READ_IDLE_INTERVAL_DVFS (1*1000000)
38
39/*
40 * Forced read idle interval to be used when voltage is stable
41 * 50us - or maximum value will do
42 */
43#define READ_IDLE_INTERVAL_NORMAL (50*1000000)
44
45/* DLL calibration interval when voltage is NOT stable - 1us */
46#define DLL_CALIB_INTERVAL_DVFS (1*1000000)
47
48#define DLL_CALIB_ACK_WAIT_VAL 5
49
50/* Interval between ZQCS commands - hw team recommended value */
51#define EMIF_ZQCS_INTERVAL_US (50*1000)
52/* Enable ZQ Calibration on exiting Self-refresh */
53#define ZQ_SFEXITEN_ENABLE 1
54/*
55 * ZQ Calibration simultaneously on both chip-selects:
56 * Needs one calibration resistor per CS
57 */
58#define ZQ_DUALCALEN_DISABLE 0
59#define ZQ_DUALCALEN_ENABLE 1
60
61#define T_ZQCS_DEFAULT_NS 90
62#define T_ZQCL_DEFAULT_NS 360
63#define T_ZQINIT_DEFAULT_NS 1000
64
65/* DPD_EN */
66#define DPD_DISABLE 0
67#define DPD_ENABLE 1
68
69/*
70 * Default values for the low-power entry to be used if not provided by user.
71 * OMAP4/5 has a hw bug(i735) due to which this value can not be less than 512
72 * Timeout values are in DDR clock 'cycles' and frequency threshold in Hz
73 */
74#define EMIF_LP_MODE_TIMEOUT_PERFORMANCE 2048
75#define EMIF_LP_MODE_TIMEOUT_POWER 512
76#define EMIF_LP_MODE_FREQ_THRESHOLD 400000000
77
78/* DDR_PHY_CTRL_1 values for EMIF4D - ATTILA PHY combination */
79#define EMIF_DDR_PHY_CTRL_1_BASE_VAL_ATTILAPHY 0x049FF000
80#define EMIF_DLL_SLAVE_DLY_CTRL_400_MHZ_ATTILAPHY 0x41
81#define EMIF_DLL_SLAVE_DLY_CTRL_200_MHZ_ATTILAPHY 0x80
82#define EMIF_DLL_SLAVE_DLY_CTRL_100_MHZ_AND_LESS_ATTILAPHY 0xFF
83
84/* DDR_PHY_CTRL_1 values for EMIF4D5 INTELLIPHY combination */
85#define EMIF_DDR_PHY_CTRL_1_BASE_VAL_INTELLIPHY 0x0E084200
86#define EMIF_PHY_TOTAL_READ_LATENCY_INTELLIPHY_PS 10000
87
88/* TEMP_ALERT_CONFIG - corresponding to temp gradient 5 C/s */
89#define TEMP_ALERT_POLL_INTERVAL_DEFAULT_MS 360
90
91#define EMIF_T_CSTA 3
92#define EMIF_T_PDLL_UL 128
93
94/* External PHY control registers magic values */
95#define EMIF_EXT_PHY_CTRL_1_VAL 0x04020080
96#define EMIF_EXT_PHY_CTRL_5_VAL 0x04010040
97#define EMIF_EXT_PHY_CTRL_6_VAL 0x01004010
98#define EMIF_EXT_PHY_CTRL_7_VAL 0x00001004
99#define EMIF_EXT_PHY_CTRL_8_VAL 0x04010040
100#define EMIF_EXT_PHY_CTRL_9_VAL 0x01004010
101#define EMIF_EXT_PHY_CTRL_10_VAL 0x00001004
102#define EMIF_EXT_PHY_CTRL_11_VAL 0x00000000
103#define EMIF_EXT_PHY_CTRL_12_VAL 0x00000000
104#define EMIF_EXT_PHY_CTRL_13_VAL 0x00000000
105#define EMIF_EXT_PHY_CTRL_14_VAL 0x80080080
106#define EMIF_EXT_PHY_CTRL_15_VAL 0x00800800
107#define EMIF_EXT_PHY_CTRL_16_VAL 0x08102040
108#define EMIF_EXT_PHY_CTRL_17_VAL 0x00000001
109#define EMIF_EXT_PHY_CTRL_18_VAL 0x540A8150
110#define EMIF_EXT_PHY_CTRL_19_VAL 0xA81502A0
111#define EMIF_EXT_PHY_CTRL_20_VAL 0x002A0540
112#define EMIF_EXT_PHY_CTRL_21_VAL 0x00000000
113#define EMIF_EXT_PHY_CTRL_22_VAL 0x00000000
114#define EMIF_EXT_PHY_CTRL_23_VAL 0x00000000
115#define EMIF_EXT_PHY_CTRL_24_VAL 0x00000077
116
117#define EMIF_INTELLI_PHY_DQS_GATE_OPENING_DELAY_PS 1200
118
22/* Registers offset */ 119/* Registers offset */
23#define EMIF_MODULE_ID_AND_REVISION 0x0000 120#define EMIF_MODULE_ID_AND_REVISION 0x0000
24#define EMIF_STATUS 0x0004 121#define EMIF_STATUS 0x0004
@@ -458,4 +555,35 @@
458#define READ_LATENCY_SHDW_SHIFT 0 555#define READ_LATENCY_SHDW_SHIFT 0
459#define READ_LATENCY_SHDW_MASK (0x1f << 0) 556#define READ_LATENCY_SHDW_MASK (0x1f << 0)
460 557
461#endif 558#ifndef __ASSEMBLY__
559/*
560 * Structure containing shadow of important registers in EMIF
561 * The calculation function fills in this structure to be later used for
562 * initialisation and DVFS
563 */
564struct emif_regs {
565 u32 freq;
566 u32 ref_ctrl_shdw;
567 u32 ref_ctrl_shdw_derated;
568 u32 sdram_tim1_shdw;
569 u32 sdram_tim1_shdw_derated;
570 u32 sdram_tim2_shdw;
571 u32 sdram_tim3_shdw;
572 u32 sdram_tim3_shdw_derated;
573 u32 pwr_mgmt_ctrl_shdw;
574 union {
575 u32 read_idle_ctrl_shdw_normal;
576 u32 dll_calib_ctrl_shdw_normal;
577 };
578 union {
579 u32 read_idle_ctrl_shdw_volt_ramp;
580 u32 dll_calib_ctrl_shdw_volt_ramp;
581 };
582
583 u32 phy_ctrl_1_shdw;
584 u32 ext_phy_ctrl_2_shdw;
585 u32 ext_phy_ctrl_3_shdw;
586 u32 ext_phy_ctrl_4_shdw;
587};
588#endif /* __ASSEMBLY__ */
589#endif /* __EMIF_H */