aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/video
diff options
context:
space:
mode:
authorTomi Valkeinen <tomi.valkeinen@nokia.com>2009-10-28 05:59:56 -0400
committerTomi Valkeinen <tomi.valkeinen@nokia.com>2009-12-09 05:04:38 -0500
commit3de7a1dc0c9d29b138713ecb85df4b6ca3af2ef3 (patch)
treec2002ddcd1030013abbab2b2712932ad1f2673cc /drivers/video
parent23c0a7a6e810289998a713e943e42d64eb421516 (diff)
OMAP: DSS2: DSI driver
DSI (Display Serial Interface) driver implements MIPI DSI interface. Signed-off-by: Tomi Valkeinen <tomi.valkeinen@nokia.com>
Diffstat (limited to 'drivers/video')
-rw-r--r--drivers/video/omap2/dss/dsi.c3710
1 files changed, 3710 insertions, 0 deletions
diff --git a/drivers/video/omap2/dss/dsi.c b/drivers/video/omap2/dss/dsi.c
new file mode 100644
index 000000000000..5936487b5def
--- /dev/null
+++ b/drivers/video/omap2/dss/dsi.c
@@ -0,0 +1,3710 @@
1/*
2 * linux/drivers/video/omap2/dss/dsi.c
3 *
4 * Copyright (C) 2009 Nokia Corporation
5 * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 as published by
9 * the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20#define DSS_SUBSYS_NAME "DSI"
21
22#include <linux/kernel.h>
23#include <linux/io.h>
24#include <linux/clk.h>
25#include <linux/device.h>
26#include <linux/err.h>
27#include <linux/interrupt.h>
28#include <linux/delay.h>
29#include <linux/mutex.h>
30#include <linux/seq_file.h>
31#include <linux/platform_device.h>
32#include <linux/regulator/consumer.h>
33#include <linux/kthread.h>
34#include <linux/wait.h>
35
36#include <plat/display.h>
37#include <plat/clock.h>
38
39#include "dss.h"
40
41/*#define VERBOSE_IRQ*/
42#define DSI_CATCH_MISSING_TE
43
44#define DSI_BASE 0x4804FC00
45
46struct dsi_reg { u16 idx; };
47
48#define DSI_REG(idx) ((const struct dsi_reg) { idx })
49
50#define DSI_SZ_REGS SZ_1K
51/* DSI Protocol Engine */
52
53#define DSI_REVISION DSI_REG(0x0000)
54#define DSI_SYSCONFIG DSI_REG(0x0010)
55#define DSI_SYSSTATUS DSI_REG(0x0014)
56#define DSI_IRQSTATUS DSI_REG(0x0018)
57#define DSI_IRQENABLE DSI_REG(0x001C)
58#define DSI_CTRL DSI_REG(0x0040)
59#define DSI_COMPLEXIO_CFG1 DSI_REG(0x0048)
60#define DSI_COMPLEXIO_IRQ_STATUS DSI_REG(0x004C)
61#define DSI_COMPLEXIO_IRQ_ENABLE DSI_REG(0x0050)
62#define DSI_CLK_CTRL DSI_REG(0x0054)
63#define DSI_TIMING1 DSI_REG(0x0058)
64#define DSI_TIMING2 DSI_REG(0x005C)
65#define DSI_VM_TIMING1 DSI_REG(0x0060)
66#define DSI_VM_TIMING2 DSI_REG(0x0064)
67#define DSI_VM_TIMING3 DSI_REG(0x0068)
68#define DSI_CLK_TIMING DSI_REG(0x006C)
69#define DSI_TX_FIFO_VC_SIZE DSI_REG(0x0070)
70#define DSI_RX_FIFO_VC_SIZE DSI_REG(0x0074)
71#define DSI_COMPLEXIO_CFG2 DSI_REG(0x0078)
72#define DSI_RX_FIFO_VC_FULLNESS DSI_REG(0x007C)
73#define DSI_VM_TIMING4 DSI_REG(0x0080)
74#define DSI_TX_FIFO_VC_EMPTINESS DSI_REG(0x0084)
75#define DSI_VM_TIMING5 DSI_REG(0x0088)
76#define DSI_VM_TIMING6 DSI_REG(0x008C)
77#define DSI_VM_TIMING7 DSI_REG(0x0090)
78#define DSI_STOPCLK_TIMING DSI_REG(0x0094)
79#define DSI_VC_CTRL(n) DSI_REG(0x0100 + (n * 0x20))
80#define DSI_VC_TE(n) DSI_REG(0x0104 + (n * 0x20))
81#define DSI_VC_LONG_PACKET_HEADER(n) DSI_REG(0x0108 + (n * 0x20))
82#define DSI_VC_LONG_PACKET_PAYLOAD(n) DSI_REG(0x010C + (n * 0x20))
83#define DSI_VC_SHORT_PACKET_HEADER(n) DSI_REG(0x0110 + (n * 0x20))
84#define DSI_VC_IRQSTATUS(n) DSI_REG(0x0118 + (n * 0x20))
85#define DSI_VC_IRQENABLE(n) DSI_REG(0x011C + (n * 0x20))
86
87/* DSIPHY_SCP */
88
89#define DSI_DSIPHY_CFG0 DSI_REG(0x200 + 0x0000)
90#define DSI_DSIPHY_CFG1 DSI_REG(0x200 + 0x0004)
91#define DSI_DSIPHY_CFG2 DSI_REG(0x200 + 0x0008)
92#define DSI_DSIPHY_CFG5 DSI_REG(0x200 + 0x0014)
93
94/* DSI_PLL_CTRL_SCP */
95
96#define DSI_PLL_CONTROL DSI_REG(0x300 + 0x0000)
97#define DSI_PLL_STATUS DSI_REG(0x300 + 0x0004)
98#define DSI_PLL_GO DSI_REG(0x300 + 0x0008)
99#define DSI_PLL_CONFIGURATION1 DSI_REG(0x300 + 0x000C)
100#define DSI_PLL_CONFIGURATION2 DSI_REG(0x300 + 0x0010)
101
102#define REG_GET(idx, start, end) \
103 FLD_GET(dsi_read_reg(idx), start, end)
104
105#define REG_FLD_MOD(idx, val, start, end) \
106 dsi_write_reg(idx, FLD_MOD(dsi_read_reg(idx), val, start, end))
107
108/* Global interrupts */
109#define DSI_IRQ_VC0 (1 << 0)
110#define DSI_IRQ_VC1 (1 << 1)
111#define DSI_IRQ_VC2 (1 << 2)
112#define DSI_IRQ_VC3 (1 << 3)
113#define DSI_IRQ_WAKEUP (1 << 4)
114#define DSI_IRQ_RESYNC (1 << 5)
115#define DSI_IRQ_PLL_LOCK (1 << 7)
116#define DSI_IRQ_PLL_UNLOCK (1 << 8)
117#define DSI_IRQ_PLL_RECALL (1 << 9)
118#define DSI_IRQ_COMPLEXIO_ERR (1 << 10)
119#define DSI_IRQ_HS_TX_TIMEOUT (1 << 14)
120#define DSI_IRQ_LP_RX_TIMEOUT (1 << 15)
121#define DSI_IRQ_TE_TRIGGER (1 << 16)
122#define DSI_IRQ_ACK_TRIGGER (1 << 17)
123#define DSI_IRQ_SYNC_LOST (1 << 18)
124#define DSI_IRQ_LDO_POWER_GOOD (1 << 19)
125#define DSI_IRQ_TA_TIMEOUT (1 << 20)
126#define DSI_IRQ_ERROR_MASK \
127 (DSI_IRQ_HS_TX_TIMEOUT | DSI_IRQ_LP_RX_TIMEOUT | DSI_IRQ_SYNC_LOST | \
128 DSI_IRQ_TA_TIMEOUT)
129#define DSI_IRQ_CHANNEL_MASK 0xf
130
131/* Virtual channel interrupts */
132#define DSI_VC_IRQ_CS (1 << 0)
133#define DSI_VC_IRQ_ECC_CORR (1 << 1)
134#define DSI_VC_IRQ_PACKET_SENT (1 << 2)
135#define DSI_VC_IRQ_FIFO_TX_OVF (1 << 3)
136#define DSI_VC_IRQ_FIFO_RX_OVF (1 << 4)
137#define DSI_VC_IRQ_BTA (1 << 5)
138#define DSI_VC_IRQ_ECC_NO_CORR (1 << 6)
139#define DSI_VC_IRQ_FIFO_TX_UDF (1 << 7)
140#define DSI_VC_IRQ_PP_BUSY_CHANGE (1 << 8)
141#define DSI_VC_IRQ_ERROR_MASK \
142 (DSI_VC_IRQ_CS | DSI_VC_IRQ_ECC_CORR | DSI_VC_IRQ_FIFO_TX_OVF | \
143 DSI_VC_IRQ_FIFO_RX_OVF | DSI_VC_IRQ_ECC_NO_CORR | \
144 DSI_VC_IRQ_FIFO_TX_UDF)
145
146/* ComplexIO interrupts */
147#define DSI_CIO_IRQ_ERRSYNCESC1 (1 << 0)
148#define DSI_CIO_IRQ_ERRSYNCESC2 (1 << 1)
149#define DSI_CIO_IRQ_ERRSYNCESC3 (1 << 2)
150#define DSI_CIO_IRQ_ERRESC1 (1 << 5)
151#define DSI_CIO_IRQ_ERRESC2 (1 << 6)
152#define DSI_CIO_IRQ_ERRESC3 (1 << 7)
153#define DSI_CIO_IRQ_ERRCONTROL1 (1 << 10)
154#define DSI_CIO_IRQ_ERRCONTROL2 (1 << 11)
155#define DSI_CIO_IRQ_ERRCONTROL3 (1 << 12)
156#define DSI_CIO_IRQ_STATEULPS1 (1 << 15)
157#define DSI_CIO_IRQ_STATEULPS2 (1 << 16)
158#define DSI_CIO_IRQ_STATEULPS3 (1 << 17)
159#define DSI_CIO_IRQ_ERRCONTENTIONLP0_1 (1 << 20)
160#define DSI_CIO_IRQ_ERRCONTENTIONLP1_1 (1 << 21)
161#define DSI_CIO_IRQ_ERRCONTENTIONLP0_2 (1 << 22)
162#define DSI_CIO_IRQ_ERRCONTENTIONLP1_2 (1 << 23)
163#define DSI_CIO_IRQ_ERRCONTENTIONLP0_3 (1 << 24)
164#define DSI_CIO_IRQ_ERRCONTENTIONLP1_3 (1 << 25)
165#define DSI_CIO_IRQ_ULPSACTIVENOT_ALL0 (1 << 30)
166#define DSI_CIO_IRQ_ULPSACTIVENOT_ALL1 (1 << 31)
167
168#define DSI_DT_DCS_SHORT_WRITE_0 0x05
169#define DSI_DT_DCS_SHORT_WRITE_1 0x15
170#define DSI_DT_DCS_READ 0x06
171#define DSI_DT_SET_MAX_RET_PKG_SIZE 0x37
172#define DSI_DT_NULL_PACKET 0x09
173#define DSI_DT_DCS_LONG_WRITE 0x39
174
175#define DSI_DT_RX_ACK_WITH_ERR 0x02
176#define DSI_DT_RX_DCS_LONG_READ 0x1c
177#define DSI_DT_RX_SHORT_READ_1 0x21
178#define DSI_DT_RX_SHORT_READ_2 0x22
179
180#define FINT_MAX 2100000
181#define FINT_MIN 750000
182#define REGN_MAX (1 << 7)
183#define REGM_MAX ((1 << 11) - 1)
184#define REGM3_MAX (1 << 4)
185#define REGM4_MAX (1 << 4)
186#define LP_DIV_MAX ((1 << 13) - 1)
187
188enum fifo_size {
189 DSI_FIFO_SIZE_0 = 0,
190 DSI_FIFO_SIZE_32 = 1,
191 DSI_FIFO_SIZE_64 = 2,
192 DSI_FIFO_SIZE_96 = 3,
193 DSI_FIFO_SIZE_128 = 4,
194};
195
196enum dsi_vc_mode {
197 DSI_VC_MODE_L4 = 0,
198 DSI_VC_MODE_VP,
199};
200
201struct dsi_update_region {
202 bool dirty;
203 u16 x, y, w, h;
204 struct omap_dss_device *device;
205};
206
207static struct
208{
209 void __iomem *base;
210
211 struct dsi_clock_info current_cinfo;
212
213 struct regulator *vdds_dsi_reg;
214
215 struct {
216 enum dsi_vc_mode mode;
217 struct omap_dss_device *dssdev;
218 enum fifo_size fifo_size;
219 int dest_per; /* destination peripheral 0-3 */
220 } vc[4];
221
222 struct mutex lock;
223 struct mutex bus_lock;
224
225 unsigned pll_locked;
226
227 struct completion bta_completion;
228
229 struct task_struct *thread;
230 wait_queue_head_t waitqueue;
231
232 spinlock_t update_lock;
233 bool framedone_received;
234 struct dsi_update_region update_region;
235 struct dsi_update_region active_update_region;
236 struct completion update_completion;
237
238 enum omap_dss_update_mode user_update_mode;
239 enum omap_dss_update_mode update_mode;
240 bool te_enabled;
241 bool use_ext_te;
242
243#ifdef DSI_CATCH_MISSING_TE
244 struct timer_list te_timer;
245#endif
246
247 unsigned long cache_req_pck;
248 unsigned long cache_clk_freq;
249 struct dsi_clock_info cache_cinfo;
250
251 u32 errors;
252 spinlock_t errors_lock;
253#ifdef DEBUG
254 ktime_t perf_setup_time;
255 ktime_t perf_start_time;
256 ktime_t perf_start_time_auto;
257 int perf_measure_frames;
258#endif
259 int debug_read;
260 int debug_write;
261} dsi;
262
263#ifdef DEBUG
264static unsigned int dsi_perf;
265module_param_named(dsi_perf, dsi_perf, bool, 0644);
266#endif
267
268static inline void dsi_write_reg(const struct dsi_reg idx, u32 val)
269{
270 __raw_writel(val, dsi.base + idx.idx);
271}
272
273static inline u32 dsi_read_reg(const struct dsi_reg idx)
274{
275 return __raw_readl(dsi.base + idx.idx);
276}
277
278
279void dsi_save_context(void)
280{
281}
282
283void dsi_restore_context(void)
284{
285}
286
287void dsi_bus_lock(void)
288{
289 mutex_lock(&dsi.bus_lock);
290}
291EXPORT_SYMBOL(dsi_bus_lock);
292
293void dsi_bus_unlock(void)
294{
295 mutex_unlock(&dsi.bus_lock);
296}
297EXPORT_SYMBOL(dsi_bus_unlock);
298
299static inline int wait_for_bit_change(const struct dsi_reg idx, int bitnum,
300 int value)
301{
302 int t = 100000;
303
304 while (REG_GET(idx, bitnum, bitnum) != value) {
305 if (--t == 0)
306 return !value;
307 }
308
309 return value;
310}
311
312#ifdef DEBUG
313static void dsi_perf_mark_setup(void)
314{
315 dsi.perf_setup_time = ktime_get();
316}
317
318static void dsi_perf_mark_start(void)
319{
320 dsi.perf_start_time = ktime_get();
321}
322
323static void dsi_perf_mark_start_auto(void)
324{
325 dsi.perf_measure_frames = 0;
326 dsi.perf_start_time_auto = ktime_get();
327}
328
329static void dsi_perf_show(const char *name)
330{
331 ktime_t t, setup_time, trans_time;
332 u32 total_bytes;
333 u32 setup_us, trans_us, total_us;
334
335 if (!dsi_perf)
336 return;
337
338 if (dsi.update_mode == OMAP_DSS_UPDATE_DISABLED)
339 return;
340
341 t = ktime_get();
342
343 setup_time = ktime_sub(dsi.perf_start_time, dsi.perf_setup_time);
344 setup_us = (u32)ktime_to_us(setup_time);
345 if (setup_us == 0)
346 setup_us = 1;
347
348 trans_time = ktime_sub(t, dsi.perf_start_time);
349 trans_us = (u32)ktime_to_us(trans_time);
350 if (trans_us == 0)
351 trans_us = 1;
352
353 total_us = setup_us + trans_us;
354
355 total_bytes = dsi.active_update_region.w *
356 dsi.active_update_region.h *
357 dsi.active_update_region.device->ctrl.pixel_size / 8;
358
359 if (dsi.update_mode == OMAP_DSS_UPDATE_AUTO) {
360 static u32 s_total_trans_us, s_total_setup_us;
361 static u32 s_min_trans_us = 0xffffffff, s_min_setup_us;
362 static u32 s_max_trans_us, s_max_setup_us;
363 const int numframes = 100;
364 ktime_t total_time_auto;
365 u32 total_time_auto_us;
366
367 dsi.perf_measure_frames++;
368
369 if (setup_us < s_min_setup_us)
370 s_min_setup_us = setup_us;
371
372 if (setup_us > s_max_setup_us)
373 s_max_setup_us = setup_us;
374
375 s_total_setup_us += setup_us;
376
377 if (trans_us < s_min_trans_us)
378 s_min_trans_us = trans_us;
379
380 if (trans_us > s_max_trans_us)
381 s_max_trans_us = trans_us;
382
383 s_total_trans_us += trans_us;
384
385 if (dsi.perf_measure_frames < numframes)
386 return;
387
388 total_time_auto = ktime_sub(t, dsi.perf_start_time_auto);
389 total_time_auto_us = (u32)ktime_to_us(total_time_auto);
390
391 printk(KERN_INFO "DSI(%s): %u fps, setup %u/%u/%u, "
392 "trans %u/%u/%u\n",
393 name,
394 1000 * 1000 * numframes / total_time_auto_us,
395 s_min_setup_us,
396 s_max_setup_us,
397 s_total_setup_us / numframes,
398 s_min_trans_us,
399 s_max_trans_us,
400 s_total_trans_us / numframes);
401
402 s_total_setup_us = 0;
403 s_min_setup_us = 0xffffffff;
404 s_max_setup_us = 0;
405 s_total_trans_us = 0;
406 s_min_trans_us = 0xffffffff;
407 s_max_trans_us = 0;
408 dsi_perf_mark_start_auto();
409 } else {
410 printk(KERN_INFO "DSI(%s): %u us + %u us = %u us (%uHz), "
411 "%u bytes, %u kbytes/sec\n",
412 name,
413 setup_us,
414 trans_us,
415 total_us,
416 1000*1000 / total_us,
417 total_bytes,
418 total_bytes * 1000 / total_us);
419 }
420}
421#else
422#define dsi_perf_mark_setup()
423#define dsi_perf_mark_start()
424#define dsi_perf_mark_start_auto()
425#define dsi_perf_show(x)
426#endif
427
428static void print_irq_status(u32 status)
429{
430#ifndef VERBOSE_IRQ
431 if ((status & ~DSI_IRQ_CHANNEL_MASK) == 0)
432 return;
433#endif
434 printk(KERN_DEBUG "DSI IRQ: 0x%x: ", status);
435
436#define PIS(x) \
437 if (status & DSI_IRQ_##x) \
438 printk(#x " ");
439#ifdef VERBOSE_IRQ
440 PIS(VC0);
441 PIS(VC1);
442 PIS(VC2);
443 PIS(VC3);
444#endif
445 PIS(WAKEUP);
446 PIS(RESYNC);
447 PIS(PLL_LOCK);
448 PIS(PLL_UNLOCK);
449 PIS(PLL_RECALL);
450 PIS(COMPLEXIO_ERR);
451 PIS(HS_TX_TIMEOUT);
452 PIS(LP_RX_TIMEOUT);
453 PIS(TE_TRIGGER);
454 PIS(ACK_TRIGGER);
455 PIS(SYNC_LOST);
456 PIS(LDO_POWER_GOOD);
457 PIS(TA_TIMEOUT);
458#undef PIS
459
460 printk("\n");
461}
462
463static void print_irq_status_vc(int channel, u32 status)
464{
465#ifndef VERBOSE_IRQ
466 if ((status & ~DSI_VC_IRQ_PACKET_SENT) == 0)
467 return;
468#endif
469 printk(KERN_DEBUG "DSI VC(%d) IRQ 0x%x: ", channel, status);
470
471#define PIS(x) \
472 if (status & DSI_VC_IRQ_##x) \
473 printk(#x " ");
474 PIS(CS);
475 PIS(ECC_CORR);
476#ifdef VERBOSE_IRQ
477 PIS(PACKET_SENT);
478#endif
479 PIS(FIFO_TX_OVF);
480 PIS(FIFO_RX_OVF);
481 PIS(BTA);
482 PIS(ECC_NO_CORR);
483 PIS(FIFO_TX_UDF);
484 PIS(PP_BUSY_CHANGE);
485#undef PIS
486 printk("\n");
487}
488
489static void print_irq_status_cio(u32 status)
490{
491 printk(KERN_DEBUG "DSI CIO IRQ 0x%x: ", status);
492
493#define PIS(x) \
494 if (status & DSI_CIO_IRQ_##x) \
495 printk(#x " ");
496 PIS(ERRSYNCESC1);
497 PIS(ERRSYNCESC2);
498 PIS(ERRSYNCESC3);
499 PIS(ERRESC1);
500 PIS(ERRESC2);
501 PIS(ERRESC3);
502 PIS(ERRCONTROL1);
503 PIS(ERRCONTROL2);
504 PIS(ERRCONTROL3);
505 PIS(STATEULPS1);
506 PIS(STATEULPS2);
507 PIS(STATEULPS3);
508 PIS(ERRCONTENTIONLP0_1);
509 PIS(ERRCONTENTIONLP1_1);
510 PIS(ERRCONTENTIONLP0_2);
511 PIS(ERRCONTENTIONLP1_2);
512 PIS(ERRCONTENTIONLP0_3);
513 PIS(ERRCONTENTIONLP1_3);
514 PIS(ULPSACTIVENOT_ALL0);
515 PIS(ULPSACTIVENOT_ALL1);
516#undef PIS
517
518 printk("\n");
519}
520
521static int debug_irq;
522
523/* called from dss */
524void dsi_irq_handler(void)
525{
526 u32 irqstatus, vcstatus, ciostatus;
527 int i;
528
529 irqstatus = dsi_read_reg(DSI_IRQSTATUS);
530
531 if (irqstatus & DSI_IRQ_ERROR_MASK) {
532 DSSERR("DSI error, irqstatus %x\n", irqstatus);
533 print_irq_status(irqstatus);
534 spin_lock(&dsi.errors_lock);
535 dsi.errors |= irqstatus & DSI_IRQ_ERROR_MASK;
536 spin_unlock(&dsi.errors_lock);
537 } else if (debug_irq) {
538 print_irq_status(irqstatus);
539 }
540
541#ifdef DSI_CATCH_MISSING_TE
542 if (irqstatus & DSI_IRQ_TE_TRIGGER)
543 del_timer(&dsi.te_timer);
544#endif
545
546 for (i = 0; i < 4; ++i) {
547 if ((irqstatus & (1<<i)) == 0)
548 continue;
549
550 vcstatus = dsi_read_reg(DSI_VC_IRQSTATUS(i));
551
552 if (vcstatus & DSI_VC_IRQ_BTA)
553 complete(&dsi.bta_completion);
554
555 if (vcstatus & DSI_VC_IRQ_ERROR_MASK) {
556 DSSERR("DSI VC(%d) error, vc irqstatus %x\n",
557 i, vcstatus);
558 print_irq_status_vc(i, vcstatus);
559 } else if (debug_irq) {
560 print_irq_status_vc(i, vcstatus);
561 }
562
563 dsi_write_reg(DSI_VC_IRQSTATUS(i), vcstatus);
564 /* flush posted write */
565 dsi_read_reg(DSI_VC_IRQSTATUS(i));
566 }
567
568 if (irqstatus & DSI_IRQ_COMPLEXIO_ERR) {
569 ciostatus = dsi_read_reg(DSI_COMPLEXIO_IRQ_STATUS);
570
571 dsi_write_reg(DSI_COMPLEXIO_IRQ_STATUS, ciostatus);
572 /* flush posted write */
573 dsi_read_reg(DSI_COMPLEXIO_IRQ_STATUS);
574
575 DSSERR("DSI CIO error, cio irqstatus %x\n", ciostatus);
576 print_irq_status_cio(ciostatus);
577 }
578
579 dsi_write_reg(DSI_IRQSTATUS, irqstatus & ~DSI_IRQ_CHANNEL_MASK);
580 /* flush posted write */
581 dsi_read_reg(DSI_IRQSTATUS);
582}
583
584
585static void _dsi_initialize_irq(void)
586{
587 u32 l;
588 int i;
589
590 /* disable all interrupts */
591 dsi_write_reg(DSI_IRQENABLE, 0);
592 for (i = 0; i < 4; ++i)
593 dsi_write_reg(DSI_VC_IRQENABLE(i), 0);
594 dsi_write_reg(DSI_COMPLEXIO_IRQ_ENABLE, 0);
595
596 /* clear interrupt status */
597 l = dsi_read_reg(DSI_IRQSTATUS);
598 dsi_write_reg(DSI_IRQSTATUS, l & ~DSI_IRQ_CHANNEL_MASK);
599
600 for (i = 0; i < 4; ++i) {
601 l = dsi_read_reg(DSI_VC_IRQSTATUS(i));
602 dsi_write_reg(DSI_VC_IRQSTATUS(i), l);
603 }
604
605 l = dsi_read_reg(DSI_COMPLEXIO_IRQ_STATUS);
606 dsi_write_reg(DSI_COMPLEXIO_IRQ_STATUS, l);
607
608 /* enable error irqs */
609 l = DSI_IRQ_ERROR_MASK;
610#ifdef DSI_CATCH_MISSING_TE
611 l |= DSI_IRQ_TE_TRIGGER;
612#endif
613 dsi_write_reg(DSI_IRQENABLE, l);
614
615 l = DSI_VC_IRQ_ERROR_MASK;
616 for (i = 0; i < 4; ++i)
617 dsi_write_reg(DSI_VC_IRQENABLE(i), l);
618
619 /* XXX zonda responds incorrectly, causing control error:
620 Exit from LP-ESC mode to LP11 uses wrong transition states on the
621 data lines LP0 and LN0. */
622 dsi_write_reg(DSI_COMPLEXIO_IRQ_ENABLE,
623 -1 & (~DSI_CIO_IRQ_ERRCONTROL2));
624}
625
626static u32 dsi_get_errors(void)
627{
628 unsigned long flags;
629 u32 e;
630 spin_lock_irqsave(&dsi.errors_lock, flags);
631 e = dsi.errors;
632 dsi.errors = 0;
633 spin_unlock_irqrestore(&dsi.errors_lock, flags);
634 return e;
635}
636
637static void dsi_vc_enable_bta_irq(int channel)
638{
639 u32 l;
640
641 dsi_write_reg(DSI_VC_IRQSTATUS(channel), DSI_VC_IRQ_BTA);
642
643 l = dsi_read_reg(DSI_VC_IRQENABLE(channel));
644 l |= DSI_VC_IRQ_BTA;
645 dsi_write_reg(DSI_VC_IRQENABLE(channel), l);
646}
647
648static void dsi_vc_disable_bta_irq(int channel)
649{
650 u32 l;
651
652 l = dsi_read_reg(DSI_VC_IRQENABLE(channel));
653 l &= ~DSI_VC_IRQ_BTA;
654 dsi_write_reg(DSI_VC_IRQENABLE(channel), l);
655}
656
657/* DSI func clock. this could also be DSI2_PLL_FCLK */
658static inline void enable_clocks(bool enable)
659{
660 if (enable)
661 dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK1);
662 else
663 dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1);
664}
665
666/* source clock for DSI PLL. this could also be PCLKFREE */
667static inline void dsi_enable_pll_clock(bool enable)
668{
669 if (enable)
670 dss_clk_enable(DSS_CLK_FCK2);
671 else
672 dss_clk_disable(DSS_CLK_FCK2);
673
674 if (enable && dsi.pll_locked) {
675 if (wait_for_bit_change(DSI_PLL_STATUS, 1, 1) != 1)
676 DSSERR("cannot lock PLL when enabling clocks\n");
677 }
678}
679
680#ifdef DEBUG
681static void _dsi_print_reset_status(void)
682{
683 u32 l;
684
685 if (!dss_debug)
686 return;
687
688 /* A dummy read using the SCP interface to any DSIPHY register is
689 * required after DSIPHY reset to complete the reset of the DSI complex
690 * I/O. */
691 l = dsi_read_reg(DSI_DSIPHY_CFG5);
692
693 printk(KERN_DEBUG "DSI resets: ");
694
695 l = dsi_read_reg(DSI_PLL_STATUS);
696 printk("PLL (%d) ", FLD_GET(l, 0, 0));
697
698 l = dsi_read_reg(DSI_COMPLEXIO_CFG1);
699 printk("CIO (%d) ", FLD_GET(l, 29, 29));
700
701 l = dsi_read_reg(DSI_DSIPHY_CFG5);
702 printk("PHY (%x, %d, %d, %d)\n",
703 FLD_GET(l, 28, 26),
704 FLD_GET(l, 29, 29),
705 FLD_GET(l, 30, 30),
706 FLD_GET(l, 31, 31));
707}
708#else
709#define _dsi_print_reset_status()
710#endif
711
712static inline int dsi_if_enable(bool enable)
713{
714 DSSDBG("dsi_if_enable(%d)\n", enable);
715
716 enable = enable ? 1 : 0;
717 REG_FLD_MOD(DSI_CTRL, enable, 0, 0); /* IF_EN */
718
719 if (wait_for_bit_change(DSI_CTRL, 0, enable) != enable) {
720 DSSERR("Failed to set dsi_if_enable to %d\n", enable);
721 return -EIO;
722 }
723
724 return 0;
725}
726
727unsigned long dsi_get_dsi1_pll_rate(void)
728{
729 return dsi.current_cinfo.dsi1_pll_fclk;
730}
731
732static unsigned long dsi_get_dsi2_pll_rate(void)
733{
734 return dsi.current_cinfo.dsi2_pll_fclk;
735}
736
737static unsigned long dsi_get_txbyteclkhs(void)
738{
739 return dsi.current_cinfo.clkin4ddr / 16;
740}
741
742static unsigned long dsi_fclk_rate(void)
743{
744 unsigned long r;
745
746 if (dss_get_dsi_clk_source() == 0) {
747 /* DSI FCLK source is DSS1_ALWON_FCK, which is dss1_fck */
748 r = dss_clk_get_rate(DSS_CLK_FCK1);
749 } else {
750 /* DSI FCLK source is DSI2_PLL_FCLK */
751 r = dsi_get_dsi2_pll_rate();
752 }
753
754 return r;
755}
756
757static int dsi_set_lp_clk_divisor(struct omap_dss_device *dssdev)
758{
759 unsigned long dsi_fclk;
760 unsigned lp_clk_div;
761 unsigned long lp_clk;
762
763 lp_clk_div = dssdev->phy.dsi.div.lp_clk_div;
764
765 if (lp_clk_div == 0 || lp_clk_div > LP_DIV_MAX)
766 return -EINVAL;
767
768 dsi_fclk = dsi_fclk_rate();
769
770 lp_clk = dsi_fclk / 2 / lp_clk_div;
771
772 DSSDBG("LP_CLK_DIV %u, LP_CLK %lu\n", lp_clk_div, lp_clk);
773 dsi.current_cinfo.lp_clk = lp_clk;
774 dsi.current_cinfo.lp_clk_div = lp_clk_div;
775
776 REG_FLD_MOD(DSI_CLK_CTRL, lp_clk_div, 12, 0); /* LP_CLK_DIVISOR */
777
778 REG_FLD_MOD(DSI_CLK_CTRL, dsi_fclk > 30000000 ? 1 : 0,
779 21, 21); /* LP_RX_SYNCHRO_ENABLE */
780
781 return 0;
782}
783
784
785enum dsi_pll_power_state {
786 DSI_PLL_POWER_OFF = 0x0,
787 DSI_PLL_POWER_ON_HSCLK = 0x1,
788 DSI_PLL_POWER_ON_ALL = 0x2,
789 DSI_PLL_POWER_ON_DIV = 0x3,
790};
791
792static int dsi_pll_power(enum dsi_pll_power_state state)
793{
794 int t = 0;
795
796 REG_FLD_MOD(DSI_CLK_CTRL, state, 31, 30); /* PLL_PWR_CMD */
797
798 /* PLL_PWR_STATUS */
799 while (FLD_GET(dsi_read_reg(DSI_CLK_CTRL), 29, 28) != state) {
800 udelay(1);
801 if (t++ > 1000) {
802 DSSERR("Failed to set DSI PLL power mode to %d\n",
803 state);
804 return -ENODEV;
805 }
806 }
807
808 return 0;
809}
810
811/* calculate clock rates using dividers in cinfo */
812static int dsi_calc_clock_rates(struct dsi_clock_info *cinfo)
813{
814 if (cinfo->regn == 0 || cinfo->regn > REGN_MAX)
815 return -EINVAL;
816
817 if (cinfo->regm == 0 || cinfo->regm > REGM_MAX)
818 return -EINVAL;
819
820 if (cinfo->regm3 > REGM3_MAX)
821 return -EINVAL;
822
823 if (cinfo->regm4 > REGM4_MAX)
824 return -EINVAL;
825
826 if (cinfo->use_dss2_fck) {
827 cinfo->clkin = dss_clk_get_rate(DSS_CLK_FCK2);
828 /* XXX it is unclear if highfreq should be used
829 * with DSS2_FCK source also */
830 cinfo->highfreq = 0;
831 } else {
832 cinfo->clkin = dispc_pclk_rate();
833
834 if (cinfo->clkin < 32000000)
835 cinfo->highfreq = 0;
836 else
837 cinfo->highfreq = 1;
838 }
839
840 cinfo->fint = cinfo->clkin / (cinfo->regn * (cinfo->highfreq ? 2 : 1));
841
842 if (cinfo->fint > FINT_MAX || cinfo->fint < FINT_MIN)
843 return -EINVAL;
844
845 cinfo->clkin4ddr = 2 * cinfo->regm * cinfo->fint;
846
847 if (cinfo->clkin4ddr > 1800 * 1000 * 1000)
848 return -EINVAL;
849
850 if (cinfo->regm3 > 0)
851 cinfo->dsi1_pll_fclk = cinfo->clkin4ddr / cinfo->regm3;
852 else
853 cinfo->dsi1_pll_fclk = 0;
854
855 if (cinfo->regm4 > 0)
856 cinfo->dsi2_pll_fclk = cinfo->clkin4ddr / cinfo->regm4;
857 else
858 cinfo->dsi2_pll_fclk = 0;
859
860 return 0;
861}
862
863int dsi_pll_calc_clock_div_pck(bool is_tft, unsigned long req_pck,
864 struct dsi_clock_info *dsi_cinfo,
865 struct dispc_clock_info *dispc_cinfo)
866{
867 struct dsi_clock_info cur, best;
868 struct dispc_clock_info best_dispc;
869 int min_fck_per_pck;
870 int match = 0;
871 unsigned long dss_clk_fck2;
872
873 dss_clk_fck2 = dss_clk_get_rate(DSS_CLK_FCK2);
874
875 if (req_pck == dsi.cache_req_pck &&
876 dsi.cache_cinfo.clkin == dss_clk_fck2) {
877 DSSDBG("DSI clock info found from cache\n");
878 *dsi_cinfo = dsi.cache_cinfo;
879 dispc_find_clk_divs(is_tft, req_pck, dsi_cinfo->dsi1_pll_fclk,
880 dispc_cinfo);
881 return 0;
882 }
883
884 min_fck_per_pck = CONFIG_OMAP2_DSS_MIN_FCK_PER_PCK;
885
886 if (min_fck_per_pck &&
887 req_pck * min_fck_per_pck > DISPC_MAX_FCK) {
888 DSSERR("Requested pixel clock not possible with the current "
889 "OMAP2_DSS_MIN_FCK_PER_PCK setting. Turning "
890 "the constraint off.\n");
891 min_fck_per_pck = 0;
892 }
893
894 DSSDBG("dsi_pll_calc\n");
895
896retry:
897 memset(&best, 0, sizeof(best));
898 memset(&best_dispc, 0, sizeof(best_dispc));
899
900 memset(&cur, 0, sizeof(cur));
901 cur.clkin = dss_clk_fck2;
902 cur.use_dss2_fck = 1;
903 cur.highfreq = 0;
904
905 /* no highfreq: 0.75MHz < Fint = clkin / regn < 2.1MHz */
906 /* highfreq: 0.75MHz < Fint = clkin / (2*regn) < 2.1MHz */
907 /* To reduce PLL lock time, keep Fint high (around 2 MHz) */
908 for (cur.regn = 1; cur.regn < REGN_MAX; ++cur.regn) {
909 if (cur.highfreq == 0)
910 cur.fint = cur.clkin / cur.regn;
911 else
912 cur.fint = cur.clkin / (2 * cur.regn);
913
914 if (cur.fint > FINT_MAX || cur.fint < FINT_MIN)
915 continue;
916
917 /* DSIPHY(MHz) = (2 * regm / regn) * (clkin / (highfreq + 1)) */
918 for (cur.regm = 1; cur.regm < REGM_MAX; ++cur.regm) {
919 unsigned long a, b;
920
921 a = 2 * cur.regm * (cur.clkin/1000);
922 b = cur.regn * (cur.highfreq + 1);
923 cur.clkin4ddr = a / b * 1000;
924
925 if (cur.clkin4ddr > 1800 * 1000 * 1000)
926 break;
927
928 /* DSI1_PLL_FCLK(MHz) = DSIPHY(MHz) / regm3 < 173MHz */
929 for (cur.regm3 = 1; cur.regm3 < REGM3_MAX;
930 ++cur.regm3) {
931 struct dispc_clock_info cur_dispc;
932 cur.dsi1_pll_fclk = cur.clkin4ddr / cur.regm3;
933
934 /* this will narrow down the search a bit,
935 * but still give pixclocks below what was
936 * requested */
937 if (cur.dsi1_pll_fclk < req_pck)
938 break;
939
940 if (cur.dsi1_pll_fclk > DISPC_MAX_FCK)
941 continue;
942
943 if (min_fck_per_pck &&
944 cur.dsi1_pll_fclk <
945 req_pck * min_fck_per_pck)
946 continue;
947
948 match = 1;
949
950 dispc_find_clk_divs(is_tft, req_pck,
951 cur.dsi1_pll_fclk,
952 &cur_dispc);
953
954 if (abs(cur_dispc.pck - req_pck) <
955 abs(best_dispc.pck - req_pck)) {
956 best = cur;
957 best_dispc = cur_dispc;
958
959 if (cur_dispc.pck == req_pck)
960 goto found;
961 }
962 }
963 }
964 }
965found:
966 if (!match) {
967 if (min_fck_per_pck) {
968 DSSERR("Could not find suitable clock settings.\n"
969 "Turning FCK/PCK constraint off and"
970 "trying again.\n");
971 min_fck_per_pck = 0;
972 goto retry;
973 }
974
975 DSSERR("Could not find suitable clock settings.\n");
976
977 return -EINVAL;
978 }
979
980 /* DSI2_PLL_FCLK (regm4) is not used */
981 best.regm4 = 0;
982 best.dsi2_pll_fclk = 0;
983
984 if (dsi_cinfo)
985 *dsi_cinfo = best;
986 if (dispc_cinfo)
987 *dispc_cinfo = best_dispc;
988
989 dsi.cache_req_pck = req_pck;
990 dsi.cache_clk_freq = 0;
991 dsi.cache_cinfo = best;
992
993 return 0;
994}
995
996int dsi_pll_set_clock_div(struct dsi_clock_info *cinfo)
997{
998 int r = 0;
999 u32 l;
1000 int f;
1001
1002 DSSDBGF();
1003
1004 dsi.current_cinfo.fint = cinfo->fint;
1005 dsi.current_cinfo.clkin4ddr = cinfo->clkin4ddr;
1006 dsi.current_cinfo.dsi1_pll_fclk = cinfo->dsi1_pll_fclk;
1007 dsi.current_cinfo.dsi2_pll_fclk = cinfo->dsi2_pll_fclk;
1008
1009 dsi.current_cinfo.regn = cinfo->regn;
1010 dsi.current_cinfo.regm = cinfo->regm;
1011 dsi.current_cinfo.regm3 = cinfo->regm3;
1012 dsi.current_cinfo.regm4 = cinfo->regm4;
1013
1014 DSSDBG("DSI Fint %ld\n", cinfo->fint);
1015
1016 DSSDBG("clkin (%s) rate %ld, highfreq %d\n",
1017 cinfo->use_dss2_fck ? "dss2_fck" : "pclkfree",
1018 cinfo->clkin,
1019 cinfo->highfreq);
1020
1021 /* DSIPHY == CLKIN4DDR */
1022 DSSDBG("CLKIN4DDR = 2 * %d / %d * %lu / %d = %lu\n",
1023 cinfo->regm,
1024 cinfo->regn,
1025 cinfo->clkin,
1026 cinfo->highfreq + 1,
1027 cinfo->clkin4ddr);
1028
1029 DSSDBG("Data rate on 1 DSI lane %ld Mbps\n",
1030 cinfo->clkin4ddr / 1000 / 1000 / 2);
1031
1032 DSSDBG("Clock lane freq %ld Hz\n", cinfo->clkin4ddr / 4);
1033
1034 DSSDBG("regm3 = %d, dsi1_pll_fclk = %lu\n",
1035 cinfo->regm3, cinfo->dsi1_pll_fclk);
1036 DSSDBG("regm4 = %d, dsi2_pll_fclk = %lu\n",
1037 cinfo->regm4, cinfo->dsi2_pll_fclk);
1038
1039 REG_FLD_MOD(DSI_PLL_CONTROL, 0, 0, 0); /* DSI_PLL_AUTOMODE = manual */
1040
1041 l = dsi_read_reg(DSI_PLL_CONFIGURATION1);
1042 l = FLD_MOD(l, 1, 0, 0); /* DSI_PLL_STOPMODE */
1043 l = FLD_MOD(l, cinfo->regn - 1, 7, 1); /* DSI_PLL_REGN */
1044 l = FLD_MOD(l, cinfo->regm, 18, 8); /* DSI_PLL_REGM */
1045 l = FLD_MOD(l, cinfo->regm3 > 0 ? cinfo->regm3 - 1 : 0,
1046 22, 19); /* DSI_CLOCK_DIV */
1047 l = FLD_MOD(l, cinfo->regm4 > 0 ? cinfo->regm4 - 1 : 0,
1048 26, 23); /* DSIPROTO_CLOCK_DIV */
1049 dsi_write_reg(DSI_PLL_CONFIGURATION1, l);
1050
1051 BUG_ON(cinfo->fint < 750000 || cinfo->fint > 2100000);
1052 if (cinfo->fint < 1000000)
1053 f = 0x3;
1054 else if (cinfo->fint < 1250000)
1055 f = 0x4;
1056 else if (cinfo->fint < 1500000)
1057 f = 0x5;
1058 else if (cinfo->fint < 1750000)
1059 f = 0x6;
1060 else
1061 f = 0x7;
1062
1063 l = dsi_read_reg(DSI_PLL_CONFIGURATION2);
1064 l = FLD_MOD(l, f, 4, 1); /* DSI_PLL_FREQSEL */
1065 l = FLD_MOD(l, cinfo->use_dss2_fck ? 0 : 1,
1066 11, 11); /* DSI_PLL_CLKSEL */
1067 l = FLD_MOD(l, cinfo->highfreq,
1068 12, 12); /* DSI_PLL_HIGHFREQ */
1069 l = FLD_MOD(l, 1, 13, 13); /* DSI_PLL_REFEN */
1070 l = FLD_MOD(l, 0, 14, 14); /* DSIPHY_CLKINEN */
1071 l = FLD_MOD(l, 1, 20, 20); /* DSI_HSDIVBYPASS */
1072 dsi_write_reg(DSI_PLL_CONFIGURATION2, l);
1073
1074 REG_FLD_MOD(DSI_PLL_GO, 1, 0, 0); /* DSI_PLL_GO */
1075
1076 if (wait_for_bit_change(DSI_PLL_GO, 0, 0) != 0) {
1077 DSSERR("dsi pll go bit not going down.\n");
1078 r = -EIO;
1079 goto err;
1080 }
1081
1082 if (wait_for_bit_change(DSI_PLL_STATUS, 1, 1) != 1) {
1083 DSSERR("cannot lock PLL\n");
1084 r = -EIO;
1085 goto err;
1086 }
1087
1088 dsi.pll_locked = 1;
1089
1090 l = dsi_read_reg(DSI_PLL_CONFIGURATION2);
1091 l = FLD_MOD(l, 0, 0, 0); /* DSI_PLL_IDLE */
1092 l = FLD_MOD(l, 0, 5, 5); /* DSI_PLL_PLLLPMODE */
1093 l = FLD_MOD(l, 0, 6, 6); /* DSI_PLL_LOWCURRSTBY */
1094 l = FLD_MOD(l, 0, 7, 7); /* DSI_PLL_TIGHTPHASELOCK */
1095 l = FLD_MOD(l, 0, 8, 8); /* DSI_PLL_DRIFTGUARDEN */
1096 l = FLD_MOD(l, 0, 10, 9); /* DSI_PLL_LOCKSEL */
1097 l = FLD_MOD(l, 1, 13, 13); /* DSI_PLL_REFEN */
1098 l = FLD_MOD(l, 1, 14, 14); /* DSIPHY_CLKINEN */
1099 l = FLD_MOD(l, 0, 15, 15); /* DSI_BYPASSEN */
1100 l = FLD_MOD(l, 1, 16, 16); /* DSS_CLOCK_EN */
1101 l = FLD_MOD(l, 0, 17, 17); /* DSS_CLOCK_PWDN */
1102 l = FLD_MOD(l, 1, 18, 18); /* DSI_PROTO_CLOCK_EN */
1103 l = FLD_MOD(l, 0, 19, 19); /* DSI_PROTO_CLOCK_PWDN */
1104 l = FLD_MOD(l, 0, 20, 20); /* DSI_HSDIVBYPASS */
1105 dsi_write_reg(DSI_PLL_CONFIGURATION2, l);
1106
1107 DSSDBG("PLL config done\n");
1108err:
1109 return r;
1110}
1111
1112int dsi_pll_init(struct omap_dss_device *dssdev, bool enable_hsclk,
1113 bool enable_hsdiv)
1114{
1115 int r = 0;
1116 enum dsi_pll_power_state pwstate;
1117
1118 DSSDBG("PLL init\n");
1119
1120 enable_clocks(1);
1121 dsi_enable_pll_clock(1);
1122
1123 r = regulator_enable(dsi.vdds_dsi_reg);
1124 if (r)
1125 goto err0;
1126
1127 /* XXX PLL does not come out of reset without this... */
1128 dispc_pck_free_enable(1);
1129
1130 if (wait_for_bit_change(DSI_PLL_STATUS, 0, 1) != 1) {
1131 DSSERR("PLL not coming out of reset.\n");
1132 r = -ENODEV;
1133 goto err1;
1134 }
1135
1136 /* XXX ... but if left on, we get problems when planes do not
1137 * fill the whole display. No idea about this */
1138 dispc_pck_free_enable(0);
1139
1140 if (enable_hsclk && enable_hsdiv)
1141 pwstate = DSI_PLL_POWER_ON_ALL;
1142 else if (enable_hsclk)
1143 pwstate = DSI_PLL_POWER_ON_HSCLK;
1144 else if (enable_hsdiv)
1145 pwstate = DSI_PLL_POWER_ON_DIV;
1146 else
1147 pwstate = DSI_PLL_POWER_OFF;
1148
1149 r = dsi_pll_power(pwstate);
1150
1151 if (r)
1152 goto err1;
1153
1154 DSSDBG("PLL init done\n");
1155
1156 return 0;
1157err1:
1158 regulator_disable(dsi.vdds_dsi_reg);
1159err0:
1160 enable_clocks(0);
1161 dsi_enable_pll_clock(0);
1162 return r;
1163}
1164
1165void dsi_pll_uninit(void)
1166{
1167 enable_clocks(0);
1168 dsi_enable_pll_clock(0);
1169
1170 dsi.pll_locked = 0;
1171 dsi_pll_power(DSI_PLL_POWER_OFF);
1172 regulator_disable(dsi.vdds_dsi_reg);
1173 DSSDBG("PLL uninit done\n");
1174}
1175
1176void dsi_dump_clocks(struct seq_file *s)
1177{
1178 int clksel;
1179 struct dsi_clock_info *cinfo = &dsi.current_cinfo;
1180
1181 enable_clocks(1);
1182
1183 clksel = REG_GET(DSI_PLL_CONFIGURATION2, 11, 11);
1184
1185 seq_printf(s, "- DSI PLL -\n");
1186
1187 seq_printf(s, "dsi pll source = %s\n",
1188 clksel == 0 ?
1189 "dss2_alwon_fclk" : "pclkfree");
1190
1191 seq_printf(s, "Fint\t\t%-16luregn %u\n", cinfo->fint, cinfo->regn);
1192
1193 seq_printf(s, "CLKIN4DDR\t%-16luregm %u\n",
1194 cinfo->clkin4ddr, cinfo->regm);
1195
1196 seq_printf(s, "dsi1_pll_fck\t%-16luregm3 %u\t(%s)\n",
1197 cinfo->dsi1_pll_fclk,
1198 cinfo->regm3,
1199 dss_get_dispc_clk_source() == 0 ? "off" : "on");
1200
1201 seq_printf(s, "dsi2_pll_fck\t%-16luregm4 %u\t(%s)\n",
1202 cinfo->dsi2_pll_fclk,
1203 cinfo->regm4,
1204 dss_get_dsi_clk_source() == 0 ? "off" : "on");
1205
1206 seq_printf(s, "- DSI -\n");
1207
1208 seq_printf(s, "dsi fclk source = %s\n",
1209 dss_get_dsi_clk_source() == 0 ?
1210 "dss1_alwon_fclk" : "dsi2_pll_fclk");
1211
1212 seq_printf(s, "DSI_FCLK\t%lu\n", dsi_fclk_rate());
1213
1214 seq_printf(s, "DDR_CLK\t\t%lu\n",
1215 cinfo->clkin4ddr / 4);
1216
1217 seq_printf(s, "TxByteClkHS\t%lu\n", dsi_get_txbyteclkhs());
1218
1219 seq_printf(s, "LP_CLK\t\t%lu\n", cinfo->lp_clk);
1220
1221 seq_printf(s, "VP_CLK\t\t%lu\n"
1222 "VP_PCLK\t\t%lu\n",
1223 dispc_lclk_rate(),
1224 dispc_pclk_rate());
1225
1226 enable_clocks(0);
1227}
1228
1229void dsi_dump_regs(struct seq_file *s)
1230{
1231#define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, dsi_read_reg(r))
1232
1233 dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK1);
1234
1235 DUMPREG(DSI_REVISION);
1236 DUMPREG(DSI_SYSCONFIG);
1237 DUMPREG(DSI_SYSSTATUS);
1238 DUMPREG(DSI_IRQSTATUS);
1239 DUMPREG(DSI_IRQENABLE);
1240 DUMPREG(DSI_CTRL);
1241 DUMPREG(DSI_COMPLEXIO_CFG1);
1242 DUMPREG(DSI_COMPLEXIO_IRQ_STATUS);
1243 DUMPREG(DSI_COMPLEXIO_IRQ_ENABLE);
1244 DUMPREG(DSI_CLK_CTRL);
1245 DUMPREG(DSI_TIMING1);
1246 DUMPREG(DSI_TIMING2);
1247 DUMPREG(DSI_VM_TIMING1);
1248 DUMPREG(DSI_VM_TIMING2);
1249 DUMPREG(DSI_VM_TIMING3);
1250 DUMPREG(DSI_CLK_TIMING);
1251 DUMPREG(DSI_TX_FIFO_VC_SIZE);
1252 DUMPREG(DSI_RX_FIFO_VC_SIZE);
1253 DUMPREG(DSI_COMPLEXIO_CFG2);
1254 DUMPREG(DSI_RX_FIFO_VC_FULLNESS);
1255 DUMPREG(DSI_VM_TIMING4);
1256 DUMPREG(DSI_TX_FIFO_VC_EMPTINESS);
1257 DUMPREG(DSI_VM_TIMING5);
1258 DUMPREG(DSI_VM_TIMING6);
1259 DUMPREG(DSI_VM_TIMING7);
1260 DUMPREG(DSI_STOPCLK_TIMING);
1261
1262 DUMPREG(DSI_VC_CTRL(0));
1263 DUMPREG(DSI_VC_TE(0));
1264 DUMPREG(DSI_VC_LONG_PACKET_HEADER(0));
1265 DUMPREG(DSI_VC_LONG_PACKET_PAYLOAD(0));
1266 DUMPREG(DSI_VC_SHORT_PACKET_HEADER(0));
1267 DUMPREG(DSI_VC_IRQSTATUS(0));
1268 DUMPREG(DSI_VC_IRQENABLE(0));
1269
1270 DUMPREG(DSI_VC_CTRL(1));
1271 DUMPREG(DSI_VC_TE(1));
1272 DUMPREG(DSI_VC_LONG_PACKET_HEADER(1));
1273 DUMPREG(DSI_VC_LONG_PACKET_PAYLOAD(1));
1274 DUMPREG(DSI_VC_SHORT_PACKET_HEADER(1));
1275 DUMPREG(DSI_VC_IRQSTATUS(1));
1276 DUMPREG(DSI_VC_IRQENABLE(1));
1277
1278 DUMPREG(DSI_VC_CTRL(2));
1279 DUMPREG(DSI_VC_TE(2));
1280 DUMPREG(DSI_VC_LONG_PACKET_HEADER(2));
1281 DUMPREG(DSI_VC_LONG_PACKET_PAYLOAD(2));
1282 DUMPREG(DSI_VC_SHORT_PACKET_HEADER(2));
1283 DUMPREG(DSI_VC_IRQSTATUS(2));
1284 DUMPREG(DSI_VC_IRQENABLE(2));
1285
1286 DUMPREG(DSI_VC_CTRL(3));
1287 DUMPREG(DSI_VC_TE(3));
1288 DUMPREG(DSI_VC_LONG_PACKET_HEADER(3));
1289 DUMPREG(DSI_VC_LONG_PACKET_PAYLOAD(3));
1290 DUMPREG(DSI_VC_SHORT_PACKET_HEADER(3));
1291 DUMPREG(DSI_VC_IRQSTATUS(3));
1292 DUMPREG(DSI_VC_IRQENABLE(3));
1293
1294 DUMPREG(DSI_DSIPHY_CFG0);
1295 DUMPREG(DSI_DSIPHY_CFG1);
1296 DUMPREG(DSI_DSIPHY_CFG2);
1297 DUMPREG(DSI_DSIPHY_CFG5);
1298
1299 DUMPREG(DSI_PLL_CONTROL);
1300 DUMPREG(DSI_PLL_STATUS);
1301 DUMPREG(DSI_PLL_GO);
1302 DUMPREG(DSI_PLL_CONFIGURATION1);
1303 DUMPREG(DSI_PLL_CONFIGURATION2);
1304
1305 dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1);
1306#undef DUMPREG
1307}
1308
1309enum dsi_complexio_power_state {
1310 DSI_COMPLEXIO_POWER_OFF = 0x0,
1311 DSI_COMPLEXIO_POWER_ON = 0x1,
1312 DSI_COMPLEXIO_POWER_ULPS = 0x2,
1313};
1314
1315static int dsi_complexio_power(enum dsi_complexio_power_state state)
1316{
1317 int t = 0;
1318
1319 /* PWR_CMD */
1320 REG_FLD_MOD(DSI_COMPLEXIO_CFG1, state, 28, 27);
1321
1322 /* PWR_STATUS */
1323 while (FLD_GET(dsi_read_reg(DSI_COMPLEXIO_CFG1), 26, 25) != state) {
1324 udelay(1);
1325 if (t++ > 1000) {
1326 DSSERR("failed to set complexio power state to "
1327 "%d\n", state);
1328 return -ENODEV;
1329 }
1330 }
1331
1332 return 0;
1333}
1334
1335static void dsi_complexio_config(struct omap_dss_device *dssdev)
1336{
1337 u32 r;
1338
1339 int clk_lane = dssdev->phy.dsi.clk_lane;
1340 int data1_lane = dssdev->phy.dsi.data1_lane;
1341 int data2_lane = dssdev->phy.dsi.data2_lane;
1342 int clk_pol = dssdev->phy.dsi.clk_pol;
1343 int data1_pol = dssdev->phy.dsi.data1_pol;
1344 int data2_pol = dssdev->phy.dsi.data2_pol;
1345
1346 r = dsi_read_reg(DSI_COMPLEXIO_CFG1);
1347 r = FLD_MOD(r, clk_lane, 2, 0);
1348 r = FLD_MOD(r, clk_pol, 3, 3);
1349 r = FLD_MOD(r, data1_lane, 6, 4);
1350 r = FLD_MOD(r, data1_pol, 7, 7);
1351 r = FLD_MOD(r, data2_lane, 10, 8);
1352 r = FLD_MOD(r, data2_pol, 11, 11);
1353 dsi_write_reg(DSI_COMPLEXIO_CFG1, r);
1354
1355 /* The configuration of the DSI complex I/O (number of data lanes,
1356 position, differential order) should not be changed while
1357 DSS.DSI_CLK_CRTRL[20] LP_CLK_ENABLE bit is set to 1. In order for
1358 the hardware to take into account a new configuration of the complex
1359 I/O (done in DSS.DSI_COMPLEXIO_CFG1 register), it is recommended to
1360 follow this sequence: First set the DSS.DSI_CTRL[0] IF_EN bit to 1,
1361 then reset the DSS.DSI_CTRL[0] IF_EN to 0, then set
1362 DSS.DSI_CLK_CTRL[20] LP_CLK_ENABLE to 1 and finally set again the
1363 DSS.DSI_CTRL[0] IF_EN bit to 1. If the sequence is not followed, the
1364 DSI complex I/O configuration is unknown. */
1365
1366 /*
1367 REG_FLD_MOD(DSI_CTRL, 1, 0, 0);
1368 REG_FLD_MOD(DSI_CTRL, 0, 0, 0);
1369 REG_FLD_MOD(DSI_CLK_CTRL, 1, 20, 20);
1370 REG_FLD_MOD(DSI_CTRL, 1, 0, 0);
1371 */
1372}
1373
1374static inline unsigned ns2ddr(unsigned ns)
1375{
1376 /* convert time in ns to ddr ticks, rounding up */
1377 unsigned long ddr_clk = dsi.current_cinfo.clkin4ddr / 4;
1378 return (ns * (ddr_clk / 1000 / 1000) + 999) / 1000;
1379}
1380
1381static inline unsigned ddr2ns(unsigned ddr)
1382{
1383 unsigned long ddr_clk = dsi.current_cinfo.clkin4ddr / 4;
1384 return ddr * 1000 * 1000 / (ddr_clk / 1000);
1385}
1386
1387static void dsi_complexio_timings(void)
1388{
1389 u32 r;
1390 u32 ths_prepare, ths_prepare_ths_zero, ths_trail, ths_exit;
1391 u32 tlpx_half, tclk_trail, tclk_zero;
1392 u32 tclk_prepare;
1393
1394 /* calculate timings */
1395
1396 /* 1 * DDR_CLK = 2 * UI */
1397
1398 /* min 40ns + 4*UI max 85ns + 6*UI */
1399 ths_prepare = ns2ddr(70) + 2;
1400
1401 /* min 145ns + 10*UI */
1402 ths_prepare_ths_zero = ns2ddr(175) + 2;
1403
1404 /* min max(8*UI, 60ns+4*UI) */
1405 ths_trail = ns2ddr(60) + 5;
1406
1407 /* min 100ns */
1408 ths_exit = ns2ddr(145);
1409
1410 /* tlpx min 50n */
1411 tlpx_half = ns2ddr(25);
1412
1413 /* min 60ns */
1414 tclk_trail = ns2ddr(60) + 2;
1415
1416 /* min 38ns, max 95ns */
1417 tclk_prepare = ns2ddr(65);
1418
1419 /* min tclk-prepare + tclk-zero = 300ns */
1420 tclk_zero = ns2ddr(260);
1421
1422 DSSDBG("ths_prepare %u (%uns), ths_prepare_ths_zero %u (%uns)\n",
1423 ths_prepare, ddr2ns(ths_prepare),
1424 ths_prepare_ths_zero, ddr2ns(ths_prepare_ths_zero));
1425 DSSDBG("ths_trail %u (%uns), ths_exit %u (%uns)\n",
1426 ths_trail, ddr2ns(ths_trail),
1427 ths_exit, ddr2ns(ths_exit));
1428
1429 DSSDBG("tlpx_half %u (%uns), tclk_trail %u (%uns), "
1430 "tclk_zero %u (%uns)\n",
1431 tlpx_half, ddr2ns(tlpx_half),
1432 tclk_trail, ddr2ns(tclk_trail),
1433 tclk_zero, ddr2ns(tclk_zero));
1434 DSSDBG("tclk_prepare %u (%uns)\n",
1435 tclk_prepare, ddr2ns(tclk_prepare));
1436
1437 /* program timings */
1438
1439 r = dsi_read_reg(DSI_DSIPHY_CFG0);
1440 r = FLD_MOD(r, ths_prepare, 31, 24);
1441 r = FLD_MOD(r, ths_prepare_ths_zero, 23, 16);
1442 r = FLD_MOD(r, ths_trail, 15, 8);
1443 r = FLD_MOD(r, ths_exit, 7, 0);
1444 dsi_write_reg(DSI_DSIPHY_CFG0, r);
1445
1446 r = dsi_read_reg(DSI_DSIPHY_CFG1);
1447 r = FLD_MOD(r, tlpx_half, 22, 16);
1448 r = FLD_MOD(r, tclk_trail, 15, 8);
1449 r = FLD_MOD(r, tclk_zero, 7, 0);
1450 dsi_write_reg(DSI_DSIPHY_CFG1, r);
1451
1452 r = dsi_read_reg(DSI_DSIPHY_CFG2);
1453 r = FLD_MOD(r, tclk_prepare, 7, 0);
1454 dsi_write_reg(DSI_DSIPHY_CFG2, r);
1455}
1456
1457
1458static int dsi_complexio_init(struct omap_dss_device *dssdev)
1459{
1460 int r = 0;
1461
1462 DSSDBG("dsi_complexio_init\n");
1463
1464 /* CIO_CLK_ICG, enable L3 clk to CIO */
1465 REG_FLD_MOD(DSI_CLK_CTRL, 1, 14, 14);
1466
1467 /* A dummy read using the SCP interface to any DSIPHY register is
1468 * required after DSIPHY reset to complete the reset of the DSI complex
1469 * I/O. */
1470 dsi_read_reg(DSI_DSIPHY_CFG5);
1471
1472 if (wait_for_bit_change(DSI_DSIPHY_CFG5, 30, 1) != 1) {
1473 DSSERR("ComplexIO PHY not coming out of reset.\n");
1474 r = -ENODEV;
1475 goto err;
1476 }
1477
1478 dsi_complexio_config(dssdev);
1479
1480 r = dsi_complexio_power(DSI_COMPLEXIO_POWER_ON);
1481
1482 if (r)
1483 goto err;
1484
1485 if (wait_for_bit_change(DSI_COMPLEXIO_CFG1, 29, 1) != 1) {
1486 DSSERR("ComplexIO not coming out of reset.\n");
1487 r = -ENODEV;
1488 goto err;
1489 }
1490
1491 if (wait_for_bit_change(DSI_COMPLEXIO_CFG1, 21, 1) != 1) {
1492 DSSERR("ComplexIO LDO power down.\n");
1493 r = -ENODEV;
1494 goto err;
1495 }
1496
1497 dsi_complexio_timings();
1498
1499 /*
1500 The configuration of the DSI complex I/O (number of data lanes,
1501 position, differential order) should not be changed while
1502 DSS.DSI_CLK_CRTRL[20] LP_CLK_ENABLE bit is set to 1. For the
1503 hardware to recognize a new configuration of the complex I/O (done
1504 in DSS.DSI_COMPLEXIO_CFG1 register), it is recommended to follow
1505 this sequence: First set the DSS.DSI_CTRL[0] IF_EN bit to 1, next
1506 reset the DSS.DSI_CTRL[0] IF_EN to 0, then set DSS.DSI_CLK_CTRL[20]
1507 LP_CLK_ENABLE to 1, and finally, set again the DSS.DSI_CTRL[0] IF_EN
1508 bit to 1. If the sequence is not followed, the DSi complex I/O
1509 configuration is undetermined.
1510 */
1511 dsi_if_enable(1);
1512 dsi_if_enable(0);
1513 REG_FLD_MOD(DSI_CLK_CTRL, 1, 20, 20); /* LP_CLK_ENABLE */
1514 dsi_if_enable(1);
1515 dsi_if_enable(0);
1516
1517 DSSDBG("CIO init done\n");
1518err:
1519 return r;
1520}
1521
1522static void dsi_complexio_uninit(void)
1523{
1524 dsi_complexio_power(DSI_COMPLEXIO_POWER_OFF);
1525}
1526
1527static int _dsi_wait_reset(void)
1528{
1529 int i = 0;
1530
1531 while (REG_GET(DSI_SYSSTATUS, 0, 0) == 0) {
1532 if (i++ > 5) {
1533 DSSERR("soft reset failed\n");
1534 return -ENODEV;
1535 }
1536 udelay(1);
1537 }
1538
1539 return 0;
1540}
1541
1542static int _dsi_reset(void)
1543{
1544 /* Soft reset */
1545 REG_FLD_MOD(DSI_SYSCONFIG, 1, 1, 1);
1546 return _dsi_wait_reset();
1547}
1548
1549static void dsi_reset_tx_fifo(int channel)
1550{
1551 u32 mask;
1552 u32 l;
1553
1554 /* set fifosize of the channel to 0, then return the old size */
1555 l = dsi_read_reg(DSI_TX_FIFO_VC_SIZE);
1556
1557 mask = FLD_MASK((8 * channel) + 7, (8 * channel) + 4);
1558 dsi_write_reg(DSI_TX_FIFO_VC_SIZE, l & ~mask);
1559
1560 dsi_write_reg(DSI_TX_FIFO_VC_SIZE, l);
1561}
1562
1563static void dsi_config_tx_fifo(enum fifo_size size1, enum fifo_size size2,
1564 enum fifo_size size3, enum fifo_size size4)
1565{
1566 u32 r = 0;
1567 int add = 0;
1568 int i;
1569
1570 dsi.vc[0].fifo_size = size1;
1571 dsi.vc[1].fifo_size = size2;
1572 dsi.vc[2].fifo_size = size3;
1573 dsi.vc[3].fifo_size = size4;
1574
1575 for (i = 0; i < 4; i++) {
1576 u8 v;
1577 int size = dsi.vc[i].fifo_size;
1578
1579 if (add + size > 4) {
1580 DSSERR("Illegal FIFO configuration\n");
1581 BUG();
1582 }
1583
1584 v = FLD_VAL(add, 2, 0) | FLD_VAL(size, 7, 4);
1585 r |= v << (8 * i);
1586 /*DSSDBG("TX FIFO vc %d: size %d, add %d\n", i, size, add); */
1587 add += size;
1588 }
1589
1590 dsi_write_reg(DSI_TX_FIFO_VC_SIZE, r);
1591}
1592
1593static void dsi_config_rx_fifo(enum fifo_size size1, enum fifo_size size2,
1594 enum fifo_size size3, enum fifo_size size4)
1595{
1596 u32 r = 0;
1597 int add = 0;
1598 int i;
1599
1600 dsi.vc[0].fifo_size = size1;
1601 dsi.vc[1].fifo_size = size2;
1602 dsi.vc[2].fifo_size = size3;
1603 dsi.vc[3].fifo_size = size4;
1604
1605 for (i = 0; i < 4; i++) {
1606 u8 v;
1607 int size = dsi.vc[i].fifo_size;
1608
1609 if (add + size > 4) {
1610 DSSERR("Illegal FIFO configuration\n");
1611 BUG();
1612 }
1613
1614 v = FLD_VAL(add, 2, 0) | FLD_VAL(size, 7, 4);
1615 r |= v << (8 * i);
1616 /*DSSDBG("RX FIFO vc %d: size %d, add %d\n", i, size, add); */
1617 add += size;
1618 }
1619
1620 dsi_write_reg(DSI_RX_FIFO_VC_SIZE, r);
1621}
1622
1623static int dsi_force_tx_stop_mode_io(void)
1624{
1625 u32 r;
1626
1627 r = dsi_read_reg(DSI_TIMING1);
1628 r = FLD_MOD(r, 1, 15, 15); /* FORCE_TX_STOP_MODE_IO */
1629 dsi_write_reg(DSI_TIMING1, r);
1630
1631 if (wait_for_bit_change(DSI_TIMING1, 15, 0) != 0) {
1632 DSSERR("TX_STOP bit not going down\n");
1633 return -EIO;
1634 }
1635
1636 return 0;
1637}
1638
1639static void dsi_vc_print_status(int channel)
1640{
1641 u32 r;
1642
1643 r = dsi_read_reg(DSI_VC_CTRL(channel));
1644 DSSDBG("vc %d: TX_FIFO_NOT_EMPTY %d, BTA_EN %d, VC_BUSY %d, "
1645 "TX_FIFO_FULL %d, RX_FIFO_NOT_EMPTY %d, ",
1646 channel,
1647 FLD_GET(r, 5, 5),
1648 FLD_GET(r, 6, 6),
1649 FLD_GET(r, 15, 15),
1650 FLD_GET(r, 16, 16),
1651 FLD_GET(r, 20, 20));
1652
1653 r = dsi_read_reg(DSI_TX_FIFO_VC_EMPTINESS);
1654 DSSDBG("EMPTINESS %d\n", (r >> (8 * channel)) & 0xff);
1655}
1656
1657static int dsi_vc_enable(int channel, bool enable)
1658{
1659 if (dsi.update_mode != OMAP_DSS_UPDATE_AUTO)
1660 DSSDBG("dsi_vc_enable channel %d, enable %d\n",
1661 channel, enable);
1662
1663 enable = enable ? 1 : 0;
1664
1665 REG_FLD_MOD(DSI_VC_CTRL(channel), enable, 0, 0);
1666
1667 if (wait_for_bit_change(DSI_VC_CTRL(channel), 0, enable) != enable) {
1668 DSSERR("Failed to set dsi_vc_enable to %d\n", enable);
1669 return -EIO;
1670 }
1671
1672 return 0;
1673}
1674
1675static void dsi_vc_initial_config(int channel)
1676{
1677 u32 r;
1678
1679 DSSDBGF("%d", channel);
1680
1681 r = dsi_read_reg(DSI_VC_CTRL(channel));
1682
1683 if (FLD_GET(r, 15, 15)) /* VC_BUSY */
1684 DSSERR("VC(%d) busy when trying to configure it!\n",
1685 channel);
1686
1687 r = FLD_MOD(r, 0, 1, 1); /* SOURCE, 0 = L4 */
1688 r = FLD_MOD(r, 0, 2, 2); /* BTA_SHORT_EN */
1689 r = FLD_MOD(r, 0, 3, 3); /* BTA_LONG_EN */
1690 r = FLD_MOD(r, 0, 4, 4); /* MODE, 0 = command */
1691 r = FLD_MOD(r, 1, 7, 7); /* CS_TX_EN */
1692 r = FLD_MOD(r, 1, 8, 8); /* ECC_TX_EN */
1693 r = FLD_MOD(r, 0, 9, 9); /* MODE_SPEED, high speed on/off */
1694
1695 r = FLD_MOD(r, 4, 29, 27); /* DMA_RX_REQ_NB = no dma */
1696 r = FLD_MOD(r, 4, 23, 21); /* DMA_TX_REQ_NB = no dma */
1697
1698 dsi_write_reg(DSI_VC_CTRL(channel), r);
1699
1700 dsi.vc[channel].mode = DSI_VC_MODE_L4;
1701}
1702
1703static void dsi_vc_config_l4(int channel)
1704{
1705 if (dsi.vc[channel].mode == DSI_VC_MODE_L4)
1706 return;
1707
1708 DSSDBGF("%d", channel);
1709
1710 dsi_vc_enable(channel, 0);
1711
1712 if (REG_GET(DSI_VC_CTRL(channel), 15, 15)) /* VC_BUSY */
1713 DSSERR("vc(%d) busy when trying to config for L4\n", channel);
1714
1715 REG_FLD_MOD(DSI_VC_CTRL(channel), 0, 1, 1); /* SOURCE, 0 = L4 */
1716
1717 dsi_vc_enable(channel, 1);
1718
1719 dsi.vc[channel].mode = DSI_VC_MODE_L4;
1720}
1721
1722static void dsi_vc_config_vp(int channel)
1723{
1724 if (dsi.vc[channel].mode == DSI_VC_MODE_VP)
1725 return;
1726
1727 DSSDBGF("%d", channel);
1728
1729 dsi_vc_enable(channel, 0);
1730
1731 if (REG_GET(DSI_VC_CTRL(channel), 15, 15)) /* VC_BUSY */
1732 DSSERR("vc(%d) busy when trying to config for VP\n", channel);
1733
1734 REG_FLD_MOD(DSI_VC_CTRL(channel), 1, 1, 1); /* SOURCE, 1 = video port */
1735
1736 dsi_vc_enable(channel, 1);
1737
1738 dsi.vc[channel].mode = DSI_VC_MODE_VP;
1739}
1740
1741
1742static void dsi_vc_enable_hs(int channel, bool enable)
1743{
1744 DSSDBG("dsi_vc_enable_hs(%d, %d)\n", channel, enable);
1745
1746 dsi_vc_enable(channel, 0);
1747 dsi_if_enable(0);
1748
1749 REG_FLD_MOD(DSI_VC_CTRL(channel), enable, 9, 9);
1750
1751 dsi_vc_enable(channel, 1);
1752 dsi_if_enable(1);
1753
1754 dsi_force_tx_stop_mode_io();
1755}
1756
1757static void dsi_vc_flush_long_data(int channel)
1758{
1759 while (REG_GET(DSI_VC_CTRL(channel), 20, 20)) {
1760 u32 val;
1761 val = dsi_read_reg(DSI_VC_SHORT_PACKET_HEADER(channel));
1762 DSSDBG("\t\tb1 %#02x b2 %#02x b3 %#02x b4 %#02x\n",
1763 (val >> 0) & 0xff,
1764 (val >> 8) & 0xff,
1765 (val >> 16) & 0xff,
1766 (val >> 24) & 0xff);
1767 }
1768}
1769
1770static void dsi_show_rx_ack_with_err(u16 err)
1771{
1772 DSSERR("\tACK with ERROR (%#x):\n", err);
1773 if (err & (1 << 0))
1774 DSSERR("\t\tSoT Error\n");
1775 if (err & (1 << 1))
1776 DSSERR("\t\tSoT Sync Error\n");
1777 if (err & (1 << 2))
1778 DSSERR("\t\tEoT Sync Error\n");
1779 if (err & (1 << 3))
1780 DSSERR("\t\tEscape Mode Entry Command Error\n");
1781 if (err & (1 << 4))
1782 DSSERR("\t\tLP Transmit Sync Error\n");
1783 if (err & (1 << 5))
1784 DSSERR("\t\tHS Receive Timeout Error\n");
1785 if (err & (1 << 6))
1786 DSSERR("\t\tFalse Control Error\n");
1787 if (err & (1 << 7))
1788 DSSERR("\t\t(reserved7)\n");
1789 if (err & (1 << 8))
1790 DSSERR("\t\tECC Error, single-bit (corrected)\n");
1791 if (err & (1 << 9))
1792 DSSERR("\t\tECC Error, multi-bit (not corrected)\n");
1793 if (err & (1 << 10))
1794 DSSERR("\t\tChecksum Error\n");
1795 if (err & (1 << 11))
1796 DSSERR("\t\tData type not recognized\n");
1797 if (err & (1 << 12))
1798 DSSERR("\t\tInvalid VC ID\n");
1799 if (err & (1 << 13))
1800 DSSERR("\t\tInvalid Transmission Length\n");
1801 if (err & (1 << 14))
1802 DSSERR("\t\t(reserved14)\n");
1803 if (err & (1 << 15))
1804 DSSERR("\t\tDSI Protocol Violation\n");
1805}
1806
1807static u16 dsi_vc_flush_receive_data(int channel)
1808{
1809 /* RX_FIFO_NOT_EMPTY */
1810 while (REG_GET(DSI_VC_CTRL(channel), 20, 20)) {
1811 u32 val;
1812 u8 dt;
1813 val = dsi_read_reg(DSI_VC_SHORT_PACKET_HEADER(channel));
1814 DSSDBG("\trawval %#08x\n", val);
1815 dt = FLD_GET(val, 5, 0);
1816 if (dt == DSI_DT_RX_ACK_WITH_ERR) {
1817 u16 err = FLD_GET(val, 23, 8);
1818 dsi_show_rx_ack_with_err(err);
1819 } else if (dt == DSI_DT_RX_SHORT_READ_1) {
1820 DSSDBG("\tDCS short response, 1 byte: %#x\n",
1821 FLD_GET(val, 23, 8));
1822 } else if (dt == DSI_DT_RX_SHORT_READ_2) {
1823 DSSDBG("\tDCS short response, 2 byte: %#x\n",
1824 FLD_GET(val, 23, 8));
1825 } else if (dt == DSI_DT_RX_DCS_LONG_READ) {
1826 DSSDBG("\tDCS long response, len %d\n",
1827 FLD_GET(val, 23, 8));
1828 dsi_vc_flush_long_data(channel);
1829 } else {
1830 DSSERR("\tunknown datatype 0x%02x\n", dt);
1831 }
1832 }
1833 return 0;
1834}
1835
1836static int dsi_vc_send_bta(int channel)
1837{
1838 if (dsi.update_mode != OMAP_DSS_UPDATE_AUTO &&
1839 (dsi.debug_write || dsi.debug_read))
1840 DSSDBG("dsi_vc_send_bta %d\n", channel);
1841
1842 WARN_ON(!mutex_is_locked(&dsi.bus_lock));
1843
1844 if (REG_GET(DSI_VC_CTRL(channel), 20, 20)) { /* RX_FIFO_NOT_EMPTY */
1845 DSSERR("rx fifo not empty when sending BTA, dumping data:\n");
1846 dsi_vc_flush_receive_data(channel);
1847 }
1848
1849 REG_FLD_MOD(DSI_VC_CTRL(channel), 1, 6, 6); /* BTA_EN */
1850
1851 return 0;
1852}
1853
1854int dsi_vc_send_bta_sync(int channel)
1855{
1856 int r = 0;
1857 u32 err;
1858
1859 INIT_COMPLETION(dsi.bta_completion);
1860
1861 dsi_vc_enable_bta_irq(channel);
1862
1863 r = dsi_vc_send_bta(channel);
1864 if (r)
1865 goto err;
1866
1867 if (wait_for_completion_timeout(&dsi.bta_completion,
1868 msecs_to_jiffies(500)) == 0) {
1869 DSSERR("Failed to receive BTA\n");
1870 r = -EIO;
1871 goto err;
1872 }
1873
1874 err = dsi_get_errors();
1875 if (err) {
1876 DSSERR("Error while sending BTA: %x\n", err);
1877 r = -EIO;
1878 goto err;
1879 }
1880err:
1881 dsi_vc_disable_bta_irq(channel);
1882
1883 return r;
1884}
1885EXPORT_SYMBOL(dsi_vc_send_bta_sync);
1886
1887static inline void dsi_vc_write_long_header(int channel, u8 data_type,
1888 u16 len, u8 ecc)
1889{
1890 u32 val;
1891 u8 data_id;
1892
1893 WARN_ON(!mutex_is_locked(&dsi.bus_lock));
1894
1895 /*data_id = data_type | channel << 6; */
1896 data_id = data_type | dsi.vc[channel].dest_per << 6;
1897
1898 val = FLD_VAL(data_id, 7, 0) | FLD_VAL(len, 23, 8) |
1899 FLD_VAL(ecc, 31, 24);
1900
1901 dsi_write_reg(DSI_VC_LONG_PACKET_HEADER(channel), val);
1902}
1903
1904static inline void dsi_vc_write_long_payload(int channel,
1905 u8 b1, u8 b2, u8 b3, u8 b4)
1906{
1907 u32 val;
1908
1909 val = b4 << 24 | b3 << 16 | b2 << 8 | b1 << 0;
1910
1911/* DSSDBG("\twriting %02x, %02x, %02x, %02x (%#010x)\n",
1912 b1, b2, b3, b4, val); */
1913
1914 dsi_write_reg(DSI_VC_LONG_PACKET_PAYLOAD(channel), val);
1915}
1916
1917static int dsi_vc_send_long(int channel, u8 data_type, u8 *data, u16 len,
1918 u8 ecc)
1919{
1920 /*u32 val; */
1921 int i;
1922 u8 *p;
1923 int r = 0;
1924 u8 b1, b2, b3, b4;
1925
1926 if (dsi.debug_write)
1927 DSSDBG("dsi_vc_send_long, %d bytes\n", len);
1928
1929 /* len + header */
1930 if (dsi.vc[channel].fifo_size * 32 * 4 < len + 4) {
1931 DSSERR("unable to send long packet: packet too long.\n");
1932 return -EINVAL;
1933 }
1934
1935 dsi_vc_config_l4(channel);
1936
1937 dsi_vc_write_long_header(channel, data_type, len, ecc);
1938
1939 /*dsi_vc_print_status(0); */
1940
1941 p = data;
1942 for (i = 0; i < len >> 2; i++) {
1943 if (dsi.debug_write)
1944 DSSDBG("\tsending full packet %d\n", i);
1945 /*dsi_vc_print_status(0); */
1946
1947 b1 = *p++;
1948 b2 = *p++;
1949 b3 = *p++;
1950 b4 = *p++;
1951
1952 dsi_vc_write_long_payload(channel, b1, b2, b3, b4);
1953 }
1954
1955 i = len % 4;
1956 if (i) {
1957 b1 = 0; b2 = 0; b3 = 0;
1958
1959 if (dsi.debug_write)
1960 DSSDBG("\tsending remainder bytes %d\n", i);
1961
1962 switch (i) {
1963 case 3:
1964 b1 = *p++;
1965 b2 = *p++;
1966 b3 = *p++;
1967 break;
1968 case 2:
1969 b1 = *p++;
1970 b2 = *p++;
1971 break;
1972 case 1:
1973 b1 = *p++;
1974 break;
1975 }
1976
1977 dsi_vc_write_long_payload(channel, b1, b2, b3, 0);
1978 }
1979
1980 return r;
1981}
1982
1983static int dsi_vc_send_short(int channel, u8 data_type, u16 data, u8 ecc)
1984{
1985 u32 r;
1986 u8 data_id;
1987
1988 WARN_ON(!mutex_is_locked(&dsi.bus_lock));
1989
1990 if (dsi.debug_write)
1991 DSSDBG("dsi_vc_send_short(ch%d, dt %#x, b1 %#x, b2 %#x)\n",
1992 channel,
1993 data_type, data & 0xff, (data >> 8) & 0xff);
1994
1995 dsi_vc_config_l4(channel);
1996
1997 if (FLD_GET(dsi_read_reg(DSI_VC_CTRL(channel)), 16, 16)) {
1998 DSSERR("ERROR FIFO FULL, aborting transfer\n");
1999 return -EINVAL;
2000 }
2001
2002 data_id = data_type | channel << 6;
2003
2004 r = (data_id << 0) | (data << 8) | (ecc << 24);
2005
2006 dsi_write_reg(DSI_VC_SHORT_PACKET_HEADER(channel), r);
2007
2008 return 0;
2009}
2010
2011int dsi_vc_send_null(int channel)
2012{
2013 u8 nullpkg[] = {0, 0, 0, 0};
2014 return dsi_vc_send_long(0, DSI_DT_NULL_PACKET, nullpkg, 4, 0);
2015}
2016EXPORT_SYMBOL(dsi_vc_send_null);
2017
2018int dsi_vc_dcs_write_nosync(int channel, u8 *data, int len)
2019{
2020 int r;
2021
2022 BUG_ON(len == 0);
2023
2024 if (len == 1) {
2025 r = dsi_vc_send_short(channel, DSI_DT_DCS_SHORT_WRITE_0,
2026 data[0], 0);
2027 } else if (len == 2) {
2028 r = dsi_vc_send_short(channel, DSI_DT_DCS_SHORT_WRITE_1,
2029 data[0] | (data[1] << 8), 0);
2030 } else {
2031 /* 0x39 = DCS Long Write */
2032 r = dsi_vc_send_long(channel, DSI_DT_DCS_LONG_WRITE,
2033 data, len, 0);
2034 }
2035
2036 return r;
2037}
2038EXPORT_SYMBOL(dsi_vc_dcs_write_nosync);
2039
2040int dsi_vc_dcs_write(int channel, u8 *data, int len)
2041{
2042 int r;
2043
2044 r = dsi_vc_dcs_write_nosync(channel, data, len);
2045 if (r)
2046 return r;
2047
2048 r = dsi_vc_send_bta_sync(channel);
2049
2050 return r;
2051}
2052EXPORT_SYMBOL(dsi_vc_dcs_write);
2053
2054int dsi_vc_dcs_read(int channel, u8 dcs_cmd, u8 *buf, int buflen)
2055{
2056 u32 val;
2057 u8 dt;
2058 int r;
2059
2060 if (dsi.debug_read)
2061 DSSDBG("dsi_vc_dcs_read(ch%d, dcs_cmd %u)\n", channel, dcs_cmd);
2062
2063 r = dsi_vc_send_short(channel, DSI_DT_DCS_READ, dcs_cmd, 0);
2064 if (r)
2065 return r;
2066
2067 r = dsi_vc_send_bta_sync(channel);
2068 if (r)
2069 return r;
2070
2071 /* RX_FIFO_NOT_EMPTY */
2072 if (REG_GET(DSI_VC_CTRL(channel), 20, 20) == 0) {
2073 DSSERR("RX fifo empty when trying to read.\n");
2074 return -EIO;
2075 }
2076
2077 val = dsi_read_reg(DSI_VC_SHORT_PACKET_HEADER(channel));
2078 if (dsi.debug_read)
2079 DSSDBG("\theader: %08x\n", val);
2080 dt = FLD_GET(val, 5, 0);
2081 if (dt == DSI_DT_RX_ACK_WITH_ERR) {
2082 u16 err = FLD_GET(val, 23, 8);
2083 dsi_show_rx_ack_with_err(err);
2084 return -EIO;
2085
2086 } else if (dt == DSI_DT_RX_SHORT_READ_1) {
2087 u8 data = FLD_GET(val, 15, 8);
2088 if (dsi.debug_read)
2089 DSSDBG("\tDCS short response, 1 byte: %02x\n", data);
2090
2091 if (buflen < 1)
2092 return -EIO;
2093
2094 buf[0] = data;
2095
2096 return 1;
2097 } else if (dt == DSI_DT_RX_SHORT_READ_2) {
2098 u16 data = FLD_GET(val, 23, 8);
2099 if (dsi.debug_read)
2100 DSSDBG("\tDCS short response, 2 byte: %04x\n", data);
2101
2102 if (buflen < 2)
2103 return -EIO;
2104
2105 buf[0] = data & 0xff;
2106 buf[1] = (data >> 8) & 0xff;
2107
2108 return 2;
2109 } else if (dt == DSI_DT_RX_DCS_LONG_READ) {
2110 int w;
2111 int len = FLD_GET(val, 23, 8);
2112 if (dsi.debug_read)
2113 DSSDBG("\tDCS long response, len %d\n", len);
2114
2115 if (len > buflen)
2116 return -EIO;
2117
2118 /* two byte checksum ends the packet, not included in len */
2119 for (w = 0; w < len + 2;) {
2120 int b;
2121 val = dsi_read_reg(DSI_VC_SHORT_PACKET_HEADER(channel));
2122 if (dsi.debug_read)
2123 DSSDBG("\t\t%02x %02x %02x %02x\n",
2124 (val >> 0) & 0xff,
2125 (val >> 8) & 0xff,
2126 (val >> 16) & 0xff,
2127 (val >> 24) & 0xff);
2128
2129 for (b = 0; b < 4; ++b) {
2130 if (w < len)
2131 buf[w] = (val >> (b * 8)) & 0xff;
2132 /* we discard the 2 byte checksum */
2133 ++w;
2134 }
2135 }
2136
2137 return len;
2138
2139 } else {
2140 DSSERR("\tunknown datatype 0x%02x\n", dt);
2141 return -EIO;
2142 }
2143}
2144EXPORT_SYMBOL(dsi_vc_dcs_read);
2145
2146
2147int dsi_vc_set_max_rx_packet_size(int channel, u16 len)
2148{
2149 int r;
2150 r = dsi_vc_send_short(channel, DSI_DT_SET_MAX_RET_PKG_SIZE,
2151 len, 0);
2152
2153 if (r)
2154 return r;
2155
2156 r = dsi_vc_send_bta_sync(channel);
2157
2158 return r;
2159}
2160EXPORT_SYMBOL(dsi_vc_set_max_rx_packet_size);
2161
2162static void dsi_set_lp_rx_timeout(unsigned long ns)
2163{
2164 u32 r;
2165 unsigned x4, x16;
2166 unsigned long fck;
2167 unsigned long ticks;
2168
2169 /* ticks in DSI_FCK */
2170
2171 fck = dsi_fclk_rate();
2172 ticks = (fck / 1000 / 1000) * ns / 1000;
2173 x4 = 0;
2174 x16 = 0;
2175
2176 if (ticks > 0x1fff) {
2177 ticks = (fck / 1000 / 1000) * ns / 1000 / 4;
2178 x4 = 1;
2179 x16 = 0;
2180 }
2181
2182 if (ticks > 0x1fff) {
2183 ticks = (fck / 1000 / 1000) * ns / 1000 / 16;
2184 x4 = 0;
2185 x16 = 1;
2186 }
2187
2188 if (ticks > 0x1fff) {
2189 ticks = (fck / 1000 / 1000) * ns / 1000 / (4 * 16);
2190 x4 = 1;
2191 x16 = 1;
2192 }
2193
2194 if (ticks > 0x1fff) {
2195 DSSWARN("LP_TX_TO over limit, setting it to max\n");
2196 ticks = 0x1fff;
2197 x4 = 1;
2198 x16 = 1;
2199 }
2200
2201 r = dsi_read_reg(DSI_TIMING2);
2202 r = FLD_MOD(r, 1, 15, 15); /* LP_RX_TO */
2203 r = FLD_MOD(r, x16, 14, 14); /* LP_RX_TO_X16 */
2204 r = FLD_MOD(r, x4, 13, 13); /* LP_RX_TO_X4 */
2205 r = FLD_MOD(r, ticks, 12, 0); /* LP_RX_COUNTER */
2206 dsi_write_reg(DSI_TIMING2, r);
2207
2208 DSSDBG("LP_RX_TO %lu ns (%#lx ticks%s%s)\n",
2209 (ticks * (x16 ? 16 : 1) * (x4 ? 4 : 1) * 1000) /
2210 (fck / 1000 / 1000),
2211 ticks, x4 ? " x4" : "", x16 ? " x16" : "");
2212}
2213
2214static void dsi_set_ta_timeout(unsigned long ns)
2215{
2216 u32 r;
2217 unsigned x8, x16;
2218 unsigned long fck;
2219 unsigned long ticks;
2220
2221 /* ticks in DSI_FCK */
2222 fck = dsi_fclk_rate();
2223 ticks = (fck / 1000 / 1000) * ns / 1000;
2224 x8 = 0;
2225 x16 = 0;
2226
2227 if (ticks > 0x1fff) {
2228 ticks = (fck / 1000 / 1000) * ns / 1000 / 8;
2229 x8 = 1;
2230 x16 = 0;
2231 }
2232
2233 if (ticks > 0x1fff) {
2234 ticks = (fck / 1000 / 1000) * ns / 1000 / 16;
2235 x8 = 0;
2236 x16 = 1;
2237 }
2238
2239 if (ticks > 0x1fff) {
2240 ticks = (fck / 1000 / 1000) * ns / 1000 / (8 * 16);
2241 x8 = 1;
2242 x16 = 1;
2243 }
2244
2245 if (ticks > 0x1fff) {
2246 DSSWARN("TA_TO over limit, setting it to max\n");
2247 ticks = 0x1fff;
2248 x8 = 1;
2249 x16 = 1;
2250 }
2251
2252 r = dsi_read_reg(DSI_TIMING1);
2253 r = FLD_MOD(r, 1, 31, 31); /* TA_TO */
2254 r = FLD_MOD(r, x16, 30, 30); /* TA_TO_X16 */
2255 r = FLD_MOD(r, x8, 29, 29); /* TA_TO_X8 */
2256 r = FLD_MOD(r, ticks, 28, 16); /* TA_TO_COUNTER */
2257 dsi_write_reg(DSI_TIMING1, r);
2258
2259 DSSDBG("TA_TO %lu ns (%#lx ticks%s%s)\n",
2260 (ticks * (x16 ? 16 : 1) * (x8 ? 8 : 1) * 1000) /
2261 (fck / 1000 / 1000),
2262 ticks, x8 ? " x8" : "", x16 ? " x16" : "");
2263}
2264
2265static void dsi_set_stop_state_counter(unsigned long ns)
2266{
2267 u32 r;
2268 unsigned x4, x16;
2269 unsigned long fck;
2270 unsigned long ticks;
2271
2272 /* ticks in DSI_FCK */
2273
2274 fck = dsi_fclk_rate();
2275 ticks = (fck / 1000 / 1000) * ns / 1000;
2276 x4 = 0;
2277 x16 = 0;
2278
2279 if (ticks > 0x1fff) {
2280 ticks = (fck / 1000 / 1000) * ns / 1000 / 4;
2281 x4 = 1;
2282 x16 = 0;
2283 }
2284
2285 if (ticks > 0x1fff) {
2286 ticks = (fck / 1000 / 1000) * ns / 1000 / 16;
2287 x4 = 0;
2288 x16 = 1;
2289 }
2290
2291 if (ticks > 0x1fff) {
2292 ticks = (fck / 1000 / 1000) * ns / 1000 / (4 * 16);
2293 x4 = 1;
2294 x16 = 1;
2295 }
2296
2297 if (ticks > 0x1fff) {
2298 DSSWARN("STOP_STATE_COUNTER_IO over limit, "
2299 "setting it to max\n");
2300 ticks = 0x1fff;
2301 x4 = 1;
2302 x16 = 1;
2303 }
2304
2305 r = dsi_read_reg(DSI_TIMING1);
2306 r = FLD_MOD(r, 1, 15, 15); /* FORCE_TX_STOP_MODE_IO */
2307 r = FLD_MOD(r, x16, 14, 14); /* STOP_STATE_X16_IO */
2308 r = FLD_MOD(r, x4, 13, 13); /* STOP_STATE_X4_IO */
2309 r = FLD_MOD(r, ticks, 12, 0); /* STOP_STATE_COUNTER_IO */
2310 dsi_write_reg(DSI_TIMING1, r);
2311
2312 DSSDBG("STOP_STATE_COUNTER %lu ns (%#lx ticks%s%s)\n",
2313 (ticks * (x16 ? 16 : 1) * (x4 ? 4 : 1) * 1000) /
2314 (fck / 1000 / 1000),
2315 ticks, x4 ? " x4" : "", x16 ? " x16" : "");
2316}
2317
2318static void dsi_set_hs_tx_timeout(unsigned long ns)
2319{
2320 u32 r;
2321 unsigned x4, x16;
2322 unsigned long fck;
2323 unsigned long ticks;
2324
2325 /* ticks in TxByteClkHS */
2326
2327 fck = dsi_get_txbyteclkhs();
2328 ticks = (fck / 1000 / 1000) * ns / 1000;
2329 x4 = 0;
2330 x16 = 0;
2331
2332 if (ticks > 0x1fff) {
2333 ticks = (fck / 1000 / 1000) * ns / 1000 / 4;
2334 x4 = 1;
2335 x16 = 0;
2336 }
2337
2338 if (ticks > 0x1fff) {
2339 ticks = (fck / 1000 / 1000) * ns / 1000 / 16;
2340 x4 = 0;
2341 x16 = 1;
2342 }
2343
2344 if (ticks > 0x1fff) {
2345 ticks = (fck / 1000 / 1000) * ns / 1000 / (4 * 16);
2346 x4 = 1;
2347 x16 = 1;
2348 }
2349
2350 if (ticks > 0x1fff) {
2351 DSSWARN("HS_TX_TO over limit, setting it to max\n");
2352 ticks = 0x1fff;
2353 x4 = 1;
2354 x16 = 1;
2355 }
2356
2357 r = dsi_read_reg(DSI_TIMING2);
2358 r = FLD_MOD(r, 1, 31, 31); /* HS_TX_TO */
2359 r = FLD_MOD(r, x16, 30, 30); /* HS_TX_TO_X16 */
2360 r = FLD_MOD(r, x4, 29, 29); /* HS_TX_TO_X8 (4 really) */
2361 r = FLD_MOD(r, ticks, 28, 16); /* HS_TX_TO_COUNTER */
2362 dsi_write_reg(DSI_TIMING2, r);
2363
2364 DSSDBG("HS_TX_TO %lu ns (%#lx ticks%s%s)\n",
2365 (ticks * (x16 ? 16 : 1) * (x4 ? 4 : 1) * 1000) /
2366 (fck / 1000 / 1000),
2367 ticks, x4 ? " x4" : "", x16 ? " x16" : "");
2368}
2369static int dsi_proto_config(struct omap_dss_device *dssdev)
2370{
2371 u32 r;
2372 int buswidth = 0;
2373
2374 dsi_config_tx_fifo(DSI_FIFO_SIZE_128,
2375 DSI_FIFO_SIZE_0,
2376 DSI_FIFO_SIZE_0,
2377 DSI_FIFO_SIZE_0);
2378
2379 dsi_config_rx_fifo(DSI_FIFO_SIZE_128,
2380 DSI_FIFO_SIZE_0,
2381 DSI_FIFO_SIZE_0,
2382 DSI_FIFO_SIZE_0);
2383
2384 /* XXX what values for the timeouts? */
2385 dsi_set_stop_state_counter(1000);
2386 dsi_set_ta_timeout(6400000);
2387 dsi_set_lp_rx_timeout(48000);
2388 dsi_set_hs_tx_timeout(1000000);
2389
2390 switch (dssdev->ctrl.pixel_size) {
2391 case 16:
2392 buswidth = 0;
2393 break;
2394 case 18:
2395 buswidth = 1;
2396 break;
2397 case 24:
2398 buswidth = 2;
2399 break;
2400 default:
2401 BUG();
2402 }
2403
2404 r = dsi_read_reg(DSI_CTRL);
2405 r = FLD_MOD(r, 1, 1, 1); /* CS_RX_EN */
2406 r = FLD_MOD(r, 1, 2, 2); /* ECC_RX_EN */
2407 r = FLD_MOD(r, 1, 3, 3); /* TX_FIFO_ARBITRATION */
2408 r = FLD_MOD(r, 1, 4, 4); /* VP_CLK_RATIO, always 1, see errata*/
2409 r = FLD_MOD(r, buswidth, 7, 6); /* VP_DATA_BUS_WIDTH */
2410 r = FLD_MOD(r, 0, 8, 8); /* VP_CLK_POL */
2411 r = FLD_MOD(r, 2, 13, 12); /* LINE_BUFFER, 2 lines */
2412 r = FLD_MOD(r, 1, 14, 14); /* TRIGGER_RESET_MODE */
2413 r = FLD_MOD(r, 1, 19, 19); /* EOT_ENABLE */
2414 r = FLD_MOD(r, 1, 24, 24); /* DCS_CMD_ENABLE */
2415 r = FLD_MOD(r, 0, 25, 25); /* DCS_CMD_CODE, 1=start, 0=continue */
2416
2417 dsi_write_reg(DSI_CTRL, r);
2418
2419 dsi_vc_initial_config(0);
2420
2421 /* set all vc targets to peripheral 0 */
2422 dsi.vc[0].dest_per = 0;
2423 dsi.vc[1].dest_per = 0;
2424 dsi.vc[2].dest_per = 0;
2425 dsi.vc[3].dest_per = 0;
2426
2427 return 0;
2428}
2429
2430static void dsi_proto_timings(struct omap_dss_device *dssdev)
2431{
2432 unsigned tlpx, tclk_zero, tclk_prepare, tclk_trail;
2433 unsigned tclk_pre, tclk_post;
2434 unsigned ths_prepare, ths_prepare_ths_zero, ths_zero;
2435 unsigned ths_trail, ths_exit;
2436 unsigned ddr_clk_pre, ddr_clk_post;
2437 unsigned enter_hs_mode_lat, exit_hs_mode_lat;
2438 unsigned ths_eot;
2439 u32 r;
2440
2441 r = dsi_read_reg(DSI_DSIPHY_CFG0);
2442 ths_prepare = FLD_GET(r, 31, 24);
2443 ths_prepare_ths_zero = FLD_GET(r, 23, 16);
2444 ths_zero = ths_prepare_ths_zero - ths_prepare;
2445 ths_trail = FLD_GET(r, 15, 8);
2446 ths_exit = FLD_GET(r, 7, 0);
2447
2448 r = dsi_read_reg(DSI_DSIPHY_CFG1);
2449 tlpx = FLD_GET(r, 22, 16) * 2;
2450 tclk_trail = FLD_GET(r, 15, 8);
2451 tclk_zero = FLD_GET(r, 7, 0);
2452
2453 r = dsi_read_reg(DSI_DSIPHY_CFG2);
2454 tclk_prepare = FLD_GET(r, 7, 0);
2455
2456 /* min 8*UI */
2457 tclk_pre = 20;
2458 /* min 60ns + 52*UI */
2459 tclk_post = ns2ddr(60) + 26;
2460
2461 /* ths_eot is 2 for 2 datalanes and 4 for 1 datalane */
2462 if (dssdev->phy.dsi.data1_lane != 0 &&
2463 dssdev->phy.dsi.data2_lane != 0)
2464 ths_eot = 2;
2465 else
2466 ths_eot = 4;
2467
2468 ddr_clk_pre = DIV_ROUND_UP(tclk_pre + tlpx + tclk_zero + tclk_prepare,
2469 4);
2470 ddr_clk_post = DIV_ROUND_UP(tclk_post + ths_trail, 4) + ths_eot;
2471
2472 BUG_ON(ddr_clk_pre == 0 || ddr_clk_pre > 255);
2473 BUG_ON(ddr_clk_post == 0 || ddr_clk_post > 255);
2474
2475 r = dsi_read_reg(DSI_CLK_TIMING);
2476 r = FLD_MOD(r, ddr_clk_pre, 15, 8);
2477 r = FLD_MOD(r, ddr_clk_post, 7, 0);
2478 dsi_write_reg(DSI_CLK_TIMING, r);
2479
2480 DSSDBG("ddr_clk_pre %u, ddr_clk_post %u\n",
2481 ddr_clk_pre,
2482 ddr_clk_post);
2483
2484 enter_hs_mode_lat = 1 + DIV_ROUND_UP(tlpx, 4) +
2485 DIV_ROUND_UP(ths_prepare, 4) +
2486 DIV_ROUND_UP(ths_zero + 3, 4);
2487
2488 exit_hs_mode_lat = DIV_ROUND_UP(ths_trail + ths_exit, 4) + 1 + ths_eot;
2489
2490 r = FLD_VAL(enter_hs_mode_lat, 31, 16) |
2491 FLD_VAL(exit_hs_mode_lat, 15, 0);
2492 dsi_write_reg(DSI_VM_TIMING7, r);
2493
2494 DSSDBG("enter_hs_mode_lat %u, exit_hs_mode_lat %u\n",
2495 enter_hs_mode_lat, exit_hs_mode_lat);
2496}
2497
2498
2499#define DSI_DECL_VARS \
2500 int __dsi_cb = 0; u32 __dsi_cv = 0;
2501
2502#define DSI_FLUSH(ch) \
2503 if (__dsi_cb > 0) { \
2504 /*DSSDBG("sending long packet %#010x\n", __dsi_cv);*/ \
2505 dsi_write_reg(DSI_VC_LONG_PACKET_PAYLOAD(ch), __dsi_cv); \
2506 __dsi_cb = __dsi_cv = 0; \
2507 }
2508
2509#define DSI_PUSH(ch, data) \
2510 do { \
2511 __dsi_cv |= (data) << (__dsi_cb * 8); \
2512 /*DSSDBG("cv = %#010x, cb = %d\n", __dsi_cv, __dsi_cb);*/ \
2513 if (++__dsi_cb > 3) \
2514 DSI_FLUSH(ch); \
2515 } while (0)
2516
2517static int dsi_update_screen_l4(struct omap_dss_device *dssdev,
2518 int x, int y, int w, int h)
2519{
2520 /* Note: supports only 24bit colors in 32bit container */
2521 int first = 1;
2522 int fifo_stalls = 0;
2523 int max_dsi_packet_size;
2524 int max_data_per_packet;
2525 int max_pixels_per_packet;
2526 int pixels_left;
2527 int bytespp = dssdev->ctrl.pixel_size / 8;
2528 int scr_width;
2529 u32 __iomem *data;
2530 int start_offset;
2531 int horiz_inc;
2532 int current_x;
2533 struct omap_overlay *ovl;
2534
2535 debug_irq = 0;
2536
2537 DSSDBG("dsi_update_screen_l4 (%d,%d %dx%d)\n",
2538 x, y, w, h);
2539
2540 ovl = dssdev->manager->overlays[0];
2541
2542 if (ovl->info.color_mode != OMAP_DSS_COLOR_RGB24U)
2543 return -EINVAL;
2544
2545 if (dssdev->ctrl.pixel_size != 24)
2546 return -EINVAL;
2547
2548 scr_width = ovl->info.screen_width;
2549 data = ovl->info.vaddr;
2550
2551 start_offset = scr_width * y + x;
2552 horiz_inc = scr_width - w;
2553 current_x = x;
2554
2555 /* We need header(4) + DCSCMD(1) + pixels(numpix*bytespp) bytes
2556 * in fifo */
2557
2558 /* When using CPU, max long packet size is TX buffer size */
2559 max_dsi_packet_size = dsi.vc[0].fifo_size * 32 * 4;
2560
2561 /* we seem to get better perf if we divide the tx fifo to half,
2562 and while the other half is being sent, we fill the other half
2563 max_dsi_packet_size /= 2; */
2564
2565 max_data_per_packet = max_dsi_packet_size - 4 - 1;
2566
2567 max_pixels_per_packet = max_data_per_packet / bytespp;
2568
2569 DSSDBG("max_pixels_per_packet %d\n", max_pixels_per_packet);
2570
2571 pixels_left = w * h;
2572
2573 DSSDBG("total pixels %d\n", pixels_left);
2574
2575 data += start_offset;
2576
2577 while (pixels_left > 0) {
2578 /* 0x2c = write_memory_start */
2579 /* 0x3c = write_memory_continue */
2580 u8 dcs_cmd = first ? 0x2c : 0x3c;
2581 int pixels;
2582 DSI_DECL_VARS;
2583 first = 0;
2584
2585#if 1
2586 /* using fifo not empty */
2587 /* TX_FIFO_NOT_EMPTY */
2588 while (FLD_GET(dsi_read_reg(DSI_VC_CTRL(0)), 5, 5)) {
2589 udelay(1);
2590 fifo_stalls++;
2591 if (fifo_stalls > 0xfffff) {
2592 DSSERR("fifo stalls overflow, pixels left %d\n",
2593 pixels_left);
2594 dsi_if_enable(0);
2595 return -EIO;
2596 }
2597 }
2598#elif 1
2599 /* using fifo emptiness */
2600 while ((REG_GET(DSI_TX_FIFO_VC_EMPTINESS, 7, 0)+1)*4 <
2601 max_dsi_packet_size) {
2602 fifo_stalls++;
2603 if (fifo_stalls > 0xfffff) {
2604 DSSERR("fifo stalls overflow, pixels left %d\n",
2605 pixels_left);
2606 dsi_if_enable(0);
2607 return -EIO;
2608 }
2609 }
2610#else
2611 while ((REG_GET(DSI_TX_FIFO_VC_EMPTINESS, 7, 0)+1)*4 == 0) {
2612 fifo_stalls++;
2613 if (fifo_stalls > 0xfffff) {
2614 DSSERR("fifo stalls overflow, pixels left %d\n",
2615 pixels_left);
2616 dsi_if_enable(0);
2617 return -EIO;
2618 }
2619 }
2620#endif
2621 pixels = min(max_pixels_per_packet, pixels_left);
2622
2623 pixels_left -= pixels;
2624
2625 dsi_vc_write_long_header(0, DSI_DT_DCS_LONG_WRITE,
2626 1 + pixels * bytespp, 0);
2627
2628 DSI_PUSH(0, dcs_cmd);
2629
2630 while (pixels-- > 0) {
2631 u32 pix = __raw_readl(data++);
2632
2633 DSI_PUSH(0, (pix >> 16) & 0xff);
2634 DSI_PUSH(0, (pix >> 8) & 0xff);
2635 DSI_PUSH(0, (pix >> 0) & 0xff);
2636
2637 current_x++;
2638 if (current_x == x+w) {
2639 current_x = x;
2640 data += horiz_inc;
2641 }
2642 }
2643
2644 DSI_FLUSH(0);
2645 }
2646
2647 return 0;
2648}
2649
2650static void dsi_update_screen_dispc(struct omap_dss_device *dssdev,
2651 u16 x, u16 y, u16 w, u16 h)
2652{
2653 unsigned bytespp;
2654 unsigned bytespl;
2655 unsigned bytespf;
2656 unsigned total_len;
2657 unsigned packet_payload;
2658 unsigned packet_len;
2659 u32 l;
2660 bool use_te_trigger;
2661 const unsigned channel = 0;
2662 /* line buffer is 1024 x 24bits */
2663 /* XXX: for some reason using full buffer size causes considerable TX
2664 * slowdown with update sizes that fill the whole buffer */
2665 const unsigned line_buf_size = 1023 * 3;
2666
2667 use_te_trigger = dsi.te_enabled && !dsi.use_ext_te;
2668
2669 if (dsi.update_mode != OMAP_DSS_UPDATE_AUTO)
2670 DSSDBG("dsi_update_screen_dispc(%d,%d %dx%d)\n",
2671 x, y, w, h);
2672
2673 bytespp = dssdev->ctrl.pixel_size / 8;
2674 bytespl = w * bytespp;
2675 bytespf = bytespl * h;
2676
2677 /* NOTE: packet_payload has to be equal to N * bytespl, where N is
2678 * number of lines in a packet. See errata about VP_CLK_RATIO */
2679
2680 if (bytespf < line_buf_size)
2681 packet_payload = bytespf;
2682 else
2683 packet_payload = (line_buf_size) / bytespl * bytespl;
2684
2685 packet_len = packet_payload + 1; /* 1 byte for DCS cmd */
2686 total_len = (bytespf / packet_payload) * packet_len;
2687
2688 if (bytespf % packet_payload)
2689 total_len += (bytespf % packet_payload) + 1;
2690
2691 if (0)
2692 dsi_vc_print_status(1);
2693
2694 l = FLD_VAL(total_len, 23, 0); /* TE_SIZE */
2695 dsi_write_reg(DSI_VC_TE(channel), l);
2696
2697 dsi_vc_write_long_header(channel, DSI_DT_DCS_LONG_WRITE, packet_len, 0);
2698
2699 if (use_te_trigger)
2700 l = FLD_MOD(l, 1, 30, 30); /* TE_EN */
2701 else
2702 l = FLD_MOD(l, 1, 31, 31); /* TE_START */
2703 dsi_write_reg(DSI_VC_TE(channel), l);
2704
2705 /* We put SIDLEMODE to no-idle for the duration of the transfer,
2706 * because DSS interrupts are not capable of waking up the CPU and the
2707 * framedone interrupt could be delayed for quite a long time. I think
2708 * the same goes for any DSS interrupts, but for some reason I have not
2709 * seen the problem anywhere else than here.
2710 */
2711 dispc_disable_sidle();
2712
2713 dss_start_update(dssdev);
2714
2715 if (use_te_trigger) {
2716 /* disable LP_RX_TO, so that we can receive TE. Time to wait
2717 * for TE is longer than the timer allows */
2718 REG_FLD_MOD(DSI_TIMING2, 0, 15, 15); /* LP_RX_TO */
2719
2720 dsi_vc_send_bta(channel);
2721
2722#ifdef DSI_CATCH_MISSING_TE
2723 mod_timer(&dsi.te_timer, jiffies + msecs_to_jiffies(250));
2724#endif
2725 }
2726}
2727
2728#ifdef DSI_CATCH_MISSING_TE
2729static void dsi_te_timeout(unsigned long arg)
2730{
2731 DSSERR("TE not received for 250ms!\n");
2732}
2733#endif
2734
2735static void dsi_framedone_irq_callback(void *data, u32 mask)
2736{
2737 /* Note: We get FRAMEDONE when DISPC has finished sending pixels and
2738 * turns itself off. However, DSI still has the pixels in its buffers,
2739 * and is sending the data.
2740 */
2741
2742 /* SIDLEMODE back to smart-idle */
2743 dispc_enable_sidle();
2744
2745 dsi.framedone_received = true;
2746 wake_up(&dsi.waitqueue);
2747}
2748
2749static void dsi_set_update_region(struct omap_dss_device *dssdev,
2750 u16 x, u16 y, u16 w, u16 h)
2751{
2752 spin_lock(&dsi.update_lock);
2753 if (dsi.update_region.dirty) {
2754 dsi.update_region.x = min(x, dsi.update_region.x);
2755 dsi.update_region.y = min(y, dsi.update_region.y);
2756 dsi.update_region.w = max(w, dsi.update_region.w);
2757 dsi.update_region.h = max(h, dsi.update_region.h);
2758 } else {
2759 dsi.update_region.x = x;
2760 dsi.update_region.y = y;
2761 dsi.update_region.w = w;
2762 dsi.update_region.h = h;
2763 }
2764
2765 dsi.update_region.device = dssdev;
2766 dsi.update_region.dirty = true;
2767
2768 spin_unlock(&dsi.update_lock);
2769
2770}
2771
2772static int dsi_set_update_mode(struct omap_dss_device *dssdev,
2773 enum omap_dss_update_mode mode)
2774{
2775 int r = 0;
2776 int i;
2777
2778 WARN_ON(!mutex_is_locked(&dsi.bus_lock));
2779
2780 if (dsi.update_mode != mode) {
2781 dsi.update_mode = mode;
2782
2783 /* Mark the overlays dirty, and do apply(), so that we get the
2784 * overlays configured properly after update mode change. */
2785 for (i = 0; i < omap_dss_get_num_overlays(); ++i) {
2786 struct omap_overlay *ovl;
2787 ovl = omap_dss_get_overlay(i);
2788 if (ovl->manager == dssdev->manager)
2789 ovl->info_dirty = true;
2790 }
2791
2792 r = dssdev->manager->apply(dssdev->manager);
2793
2794 if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE &&
2795 mode == OMAP_DSS_UPDATE_AUTO) {
2796 u16 w, h;
2797
2798 DSSDBG("starting auto update\n");
2799
2800 dssdev->get_resolution(dssdev, &w, &h);
2801
2802 dsi_set_update_region(dssdev, 0, 0, w, h);
2803
2804 dsi_perf_mark_start_auto();
2805
2806 wake_up(&dsi.waitqueue);
2807 }
2808 }
2809
2810 return r;
2811}
2812
2813static int dsi_set_te(struct omap_dss_device *dssdev, bool enable)
2814{
2815 int r;
2816 r = dssdev->driver->enable_te(dssdev, enable);
2817 /* XXX for some reason, DSI TE breaks if we don't wait here.
2818 * Panel bug? Needs more studying */
2819 msleep(100);
2820 return r;
2821}
2822
2823static void dsi_handle_framedone(void)
2824{
2825 int r;
2826 const int channel = 0;
2827 bool use_te_trigger;
2828
2829 use_te_trigger = dsi.te_enabled && !dsi.use_ext_te;
2830
2831 if (dsi.update_mode != OMAP_DSS_UPDATE_AUTO)
2832 DSSDBG("FRAMEDONE\n");
2833
2834 if (use_te_trigger) {
2835 /* enable LP_RX_TO again after the TE */
2836 REG_FLD_MOD(DSI_TIMING2, 1, 15, 15); /* LP_RX_TO */
2837 }
2838
2839 /* Send BTA after the frame. We need this for the TE to work, as TE
2840 * trigger is only sent for BTAs without preceding packet. Thus we need
2841 * to BTA after the pixel packets so that next BTA will cause TE
2842 * trigger.
2843 *
2844 * This is not needed when TE is not in use, but we do it anyway to
2845 * make sure that the transfer has been completed. It would be more
2846 * optimal, but more complex, to wait only just before starting next
2847 * transfer. */
2848 r = dsi_vc_send_bta_sync(channel);
2849 if (r)
2850 DSSERR("BTA after framedone failed\n");
2851
2852 /* RX_FIFO_NOT_EMPTY */
2853 if (REG_GET(DSI_VC_CTRL(channel), 20, 20)) {
2854 DSSERR("Received error during frame transfer:\n");
2855 dsi_vc_flush_receive_data(0);
2856 }
2857
2858#ifdef CONFIG_OMAP2_DSS_FAKE_VSYNC
2859 dispc_fake_vsync_irq();
2860#endif
2861}
2862
2863static int dsi_update_thread(void *data)
2864{
2865 unsigned long timeout;
2866 struct omap_dss_device *device;
2867 u16 x, y, w, h;
2868
2869 while (1) {
2870 bool sched;
2871
2872 wait_event_interruptible(dsi.waitqueue,
2873 dsi.update_mode == OMAP_DSS_UPDATE_AUTO ||
2874 (dsi.update_mode == OMAP_DSS_UPDATE_MANUAL &&
2875 dsi.update_region.dirty == true) ||
2876 kthread_should_stop());
2877
2878 if (kthread_should_stop())
2879 break;
2880
2881 dsi_bus_lock();
2882
2883 if (dsi.update_mode == OMAP_DSS_UPDATE_DISABLED ||
2884 kthread_should_stop()) {
2885 dsi_bus_unlock();
2886 break;
2887 }
2888
2889 dsi_perf_mark_setup();
2890
2891 if (dsi.update_region.dirty) {
2892 spin_lock(&dsi.update_lock);
2893 dsi.active_update_region = dsi.update_region;
2894 dsi.update_region.dirty = false;
2895 spin_unlock(&dsi.update_lock);
2896 }
2897
2898 device = dsi.active_update_region.device;
2899 x = dsi.active_update_region.x;
2900 y = dsi.active_update_region.y;
2901 w = dsi.active_update_region.w;
2902 h = dsi.active_update_region.h;
2903
2904 if (device->manager->caps & OMAP_DSS_OVL_MGR_CAP_DISPC) {
2905
2906 if (dsi.update_mode == OMAP_DSS_UPDATE_MANUAL)
2907 dss_setup_partial_planes(device,
2908 &x, &y, &w, &h);
2909
2910 dispc_set_lcd_size(w, h);
2911 }
2912
2913 if (dsi.active_update_region.dirty) {
2914 dsi.active_update_region.dirty = false;
2915 /* XXX TODO we don't need to send the coords, if they
2916 * are the same that are already programmed to the
2917 * panel. That should speed up manual update a bit */
2918 device->driver->setup_update(device, x, y, w, h);
2919 }
2920
2921 dsi_perf_mark_start();
2922
2923 if (device->manager->caps & OMAP_DSS_OVL_MGR_CAP_DISPC) {
2924 dsi_vc_config_vp(0);
2925
2926 if (dsi.te_enabled && dsi.use_ext_te)
2927 device->driver->wait_for_te(device);
2928
2929 dsi.framedone_received = false;
2930
2931 dsi_update_screen_dispc(device, x, y, w, h);
2932
2933 /* wait for framedone */
2934 timeout = msecs_to_jiffies(1000);
2935 wait_event_timeout(dsi.waitqueue,
2936 dsi.framedone_received == true,
2937 timeout);
2938
2939 if (!dsi.framedone_received) {
2940 DSSERR("framedone timeout\n");
2941 DSSERR("failed update %d,%d %dx%d\n",
2942 x, y, w, h);
2943
2944 dispc_enable_sidle();
2945 dispc_enable_lcd_out(0);
2946
2947 dsi_reset_tx_fifo(0);
2948 } else {
2949 dsi_handle_framedone();
2950 dsi_perf_show("DISPC");
2951 }
2952 } else {
2953 dsi_update_screen_l4(device, x, y, w, h);
2954 dsi_perf_show("L4");
2955 }
2956
2957 sched = atomic_read(&dsi.bus_lock.count) < 0;
2958
2959 complete_all(&dsi.update_completion);
2960
2961 dsi_bus_unlock();
2962
2963 /* XXX We need to give others chance to get the bus lock. Is
2964 * there a better way for this? */
2965 if (dsi.update_mode == OMAP_DSS_UPDATE_AUTO && sched)
2966 schedule_timeout_interruptible(1);
2967 }
2968
2969 DSSDBG("update thread exiting\n");
2970
2971 return 0;
2972}
2973
2974
2975
2976/* Display funcs */
2977
2978static int dsi_display_init_dispc(struct omap_dss_device *dssdev)
2979{
2980 int r;
2981
2982 r = omap_dispc_register_isr(dsi_framedone_irq_callback, NULL,
2983 DISPC_IRQ_FRAMEDONE);
2984 if (r) {
2985 DSSERR("can't get FRAMEDONE irq\n");
2986 return r;
2987 }
2988
2989 dispc_set_lcd_display_type(OMAP_DSS_LCD_DISPLAY_TFT);
2990
2991 dispc_set_parallel_interface_mode(OMAP_DSS_PARALLELMODE_DSI);
2992 dispc_enable_fifohandcheck(1);
2993
2994 dispc_set_tft_data_lines(dssdev->ctrl.pixel_size);
2995
2996 {
2997 struct omap_video_timings timings = {
2998 .hsw = 1,
2999 .hfp = 1,
3000 .hbp = 1,
3001 .vsw = 1,
3002 .vfp = 0,
3003 .vbp = 0,
3004 };
3005
3006 dispc_set_lcd_timings(&timings);
3007 }
3008
3009 return 0;
3010}
3011
3012static void dsi_display_uninit_dispc(struct omap_dss_device *dssdev)
3013{
3014 omap_dispc_unregister_isr(dsi_framedone_irq_callback, NULL,
3015 DISPC_IRQ_FRAMEDONE);
3016}
3017
3018static int dsi_configure_dsi_clocks(struct omap_dss_device *dssdev)
3019{
3020 struct dsi_clock_info cinfo;
3021 int r;
3022
3023 /* we always use DSS2_FCK as input clock */
3024 cinfo.use_dss2_fck = true;
3025 cinfo.regn = dssdev->phy.dsi.div.regn;
3026 cinfo.regm = dssdev->phy.dsi.div.regm;
3027 cinfo.regm3 = dssdev->phy.dsi.div.regm3;
3028 cinfo.regm4 = dssdev->phy.dsi.div.regm4;
3029 r = dsi_calc_clock_rates(&cinfo);
3030 if (r)
3031 return r;
3032
3033 r = dsi_pll_set_clock_div(&cinfo);
3034 if (r) {
3035 DSSERR("Failed to set dsi clocks\n");
3036 return r;
3037 }
3038
3039 return 0;
3040}
3041
3042static int dsi_configure_dispc_clocks(struct omap_dss_device *dssdev)
3043{
3044 struct dispc_clock_info dispc_cinfo;
3045 int r;
3046 unsigned long long fck;
3047
3048 fck = dsi_get_dsi1_pll_rate();
3049
3050 dispc_cinfo.lck_div = dssdev->phy.dsi.div.lck_div;
3051 dispc_cinfo.pck_div = dssdev->phy.dsi.div.pck_div;
3052
3053 r = dispc_calc_clock_rates(fck, &dispc_cinfo);
3054 if (r) {
3055 DSSERR("Failed to calc dispc clocks\n");
3056 return r;
3057 }
3058
3059 r = dispc_set_clock_div(&dispc_cinfo);
3060 if (r) {
3061 DSSERR("Failed to set dispc clocks\n");
3062 return r;
3063 }
3064
3065 return 0;
3066}
3067
3068static int dsi_display_init_dsi(struct omap_dss_device *dssdev)
3069{
3070 int r;
3071
3072 _dsi_print_reset_status();
3073
3074 r = dsi_pll_init(dssdev, true, true);
3075 if (r)
3076 goto err0;
3077
3078 r = dsi_configure_dsi_clocks(dssdev);
3079 if (r)
3080 goto err1;
3081
3082 dss_select_clk_source(true, true);
3083
3084 DSSDBG("PLL OK\n");
3085
3086 r = dsi_configure_dispc_clocks(dssdev);
3087 if (r)
3088 goto err2;
3089
3090 r = dsi_complexio_init(dssdev);
3091 if (r)
3092 goto err2;
3093
3094 _dsi_print_reset_status();
3095
3096 dsi_proto_timings(dssdev);
3097 dsi_set_lp_clk_divisor(dssdev);
3098
3099 if (1)
3100 _dsi_print_reset_status();
3101
3102 r = dsi_proto_config(dssdev);
3103 if (r)
3104 goto err3;
3105
3106 /* enable interface */
3107 dsi_vc_enable(0, 1);
3108 dsi_if_enable(1);
3109 dsi_force_tx_stop_mode_io();
3110
3111 if (dssdev->driver->enable) {
3112 r = dssdev->driver->enable(dssdev);
3113 if (r)
3114 goto err4;
3115 }
3116
3117 /* enable high-speed after initial config */
3118 dsi_vc_enable_hs(0, 1);
3119
3120 return 0;
3121err4:
3122 dsi_if_enable(0);
3123err3:
3124 dsi_complexio_uninit();
3125err2:
3126 dss_select_clk_source(false, false);
3127err1:
3128 dsi_pll_uninit();
3129err0:
3130 return r;
3131}
3132
3133static void dsi_display_uninit_dsi(struct omap_dss_device *dssdev)
3134{
3135 if (dssdev->driver->disable)
3136 dssdev->driver->disable(dssdev);
3137
3138 dss_select_clk_source(false, false);
3139 dsi_complexio_uninit();
3140 dsi_pll_uninit();
3141}
3142
3143static int dsi_core_init(void)
3144{
3145 /* Autoidle */
3146 REG_FLD_MOD(DSI_SYSCONFIG, 1, 0, 0);
3147
3148 /* ENWAKEUP */
3149 REG_FLD_MOD(DSI_SYSCONFIG, 1, 2, 2);
3150
3151 /* SIDLEMODE smart-idle */
3152 REG_FLD_MOD(DSI_SYSCONFIG, 2, 4, 3);
3153
3154 _dsi_initialize_irq();
3155
3156 return 0;
3157}
3158
3159static int dsi_display_enable(struct omap_dss_device *dssdev)
3160{
3161 int r = 0;
3162
3163 DSSDBG("dsi_display_enable\n");
3164
3165 mutex_lock(&dsi.lock);
3166 dsi_bus_lock();
3167
3168 r = omap_dss_start_device(dssdev);
3169 if (r) {
3170 DSSERR("failed to start device\n");
3171 goto err0;
3172 }
3173
3174 if (dssdev->state != OMAP_DSS_DISPLAY_DISABLED) {
3175 DSSERR("dssdev already enabled\n");
3176 r = -EINVAL;
3177 goto err1;
3178 }
3179
3180 enable_clocks(1);
3181 dsi_enable_pll_clock(1);
3182
3183 r = _dsi_reset();
3184 if (r)
3185 goto err2;
3186
3187 dsi_core_init();
3188
3189 r = dsi_display_init_dispc(dssdev);
3190 if (r)
3191 goto err2;
3192
3193 r = dsi_display_init_dsi(dssdev);
3194 if (r)
3195 goto err3;
3196
3197 dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
3198
3199 dsi.use_ext_te = dssdev->phy.dsi.ext_te;
3200 r = dsi_set_te(dssdev, dsi.te_enabled);
3201 if (r)
3202 goto err4;
3203
3204 dsi_set_update_mode(dssdev, dsi.user_update_mode);
3205
3206 dsi_bus_unlock();
3207 mutex_unlock(&dsi.lock);
3208
3209 return 0;
3210
3211err4:
3212
3213 dsi_display_uninit_dsi(dssdev);
3214err3:
3215 dsi_display_uninit_dispc(dssdev);
3216err2:
3217 enable_clocks(0);
3218 dsi_enable_pll_clock(0);
3219err1:
3220 omap_dss_stop_device(dssdev);
3221err0:
3222 dsi_bus_unlock();
3223 mutex_unlock(&dsi.lock);
3224 DSSDBG("dsi_display_enable FAILED\n");
3225 return r;
3226}
3227
3228static void dsi_display_disable(struct omap_dss_device *dssdev)
3229{
3230 DSSDBG("dsi_display_disable\n");
3231
3232 mutex_lock(&dsi.lock);
3233 dsi_bus_lock();
3234
3235 if (dssdev->state == OMAP_DSS_DISPLAY_DISABLED ||
3236 dssdev->state == OMAP_DSS_DISPLAY_SUSPENDED)
3237 goto end;
3238
3239 dsi.update_mode = OMAP_DSS_UPDATE_DISABLED;
3240 dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
3241
3242 dsi_display_uninit_dispc(dssdev);
3243
3244 dsi_display_uninit_dsi(dssdev);
3245
3246 enable_clocks(0);
3247 dsi_enable_pll_clock(0);
3248
3249 omap_dss_stop_device(dssdev);
3250end:
3251 dsi_bus_unlock();
3252 mutex_unlock(&dsi.lock);
3253}
3254
3255static int dsi_display_suspend(struct omap_dss_device *dssdev)
3256{
3257 DSSDBG("dsi_display_suspend\n");
3258
3259 mutex_lock(&dsi.lock);
3260 dsi_bus_lock();
3261
3262 if (dssdev->state == OMAP_DSS_DISPLAY_DISABLED ||
3263 dssdev->state == OMAP_DSS_DISPLAY_SUSPENDED)
3264 goto end;
3265
3266 dsi.update_mode = OMAP_DSS_UPDATE_DISABLED;
3267 dssdev->state = OMAP_DSS_DISPLAY_SUSPENDED;
3268
3269 dsi_display_uninit_dispc(dssdev);
3270
3271 dsi_display_uninit_dsi(dssdev);
3272
3273 enable_clocks(0);
3274 dsi_enable_pll_clock(0);
3275end:
3276 dsi_bus_unlock();
3277 mutex_unlock(&dsi.lock);
3278
3279 return 0;
3280}
3281
3282static int dsi_display_resume(struct omap_dss_device *dssdev)
3283{
3284 int r;
3285
3286 DSSDBG("dsi_display_resume\n");
3287
3288 mutex_lock(&dsi.lock);
3289 dsi_bus_lock();
3290
3291 if (dssdev->state != OMAP_DSS_DISPLAY_SUSPENDED) {
3292 DSSERR("dssdev not suspended\n");
3293 r = -EINVAL;
3294 goto err0;
3295 }
3296
3297 enable_clocks(1);
3298 dsi_enable_pll_clock(1);
3299
3300 r = _dsi_reset();
3301 if (r)
3302 goto err1;
3303
3304 dsi_core_init();
3305
3306 r = dsi_display_init_dispc(dssdev);
3307 if (r)
3308 goto err1;
3309
3310 r = dsi_display_init_dsi(dssdev);
3311 if (r)
3312 goto err2;
3313
3314 dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
3315
3316 r = dsi_set_te(dssdev, dsi.te_enabled);
3317 if (r)
3318 goto err2;
3319
3320 dsi_set_update_mode(dssdev, dsi.user_update_mode);
3321
3322 dsi_bus_unlock();
3323 mutex_unlock(&dsi.lock);
3324
3325 return 0;
3326
3327err2:
3328 dsi_display_uninit_dispc(dssdev);
3329err1:
3330 enable_clocks(0);
3331 dsi_enable_pll_clock(0);
3332err0:
3333 dsi_bus_unlock();
3334 mutex_unlock(&dsi.lock);
3335 DSSDBG("dsi_display_resume FAILED\n");
3336 return r;
3337}
3338
3339static int dsi_display_update(struct omap_dss_device *dssdev,
3340 u16 x, u16 y, u16 w, u16 h)
3341{
3342 int r = 0;
3343 u16 dw, dh;
3344
3345 DSSDBG("dsi_display_update(%d,%d %dx%d)\n", x, y, w, h);
3346
3347 mutex_lock(&dsi.lock);
3348
3349 if (dsi.update_mode != OMAP_DSS_UPDATE_MANUAL)
3350 goto end;
3351
3352 if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE)
3353 goto end;
3354
3355 dssdev->get_resolution(dssdev, &dw, &dh);
3356
3357 if (x > dw || y > dh)
3358 goto end;
3359
3360 if (x + w > dw)
3361 w = dw - x;
3362
3363 if (y + h > dh)
3364 h = dh - y;
3365
3366 if (w == 0 || h == 0)
3367 goto end;
3368
3369 if (w == 1) {
3370 r = -EINVAL;
3371 goto end;
3372 }
3373
3374 dsi_set_update_region(dssdev, x, y, w, h);
3375
3376 wake_up(&dsi.waitqueue);
3377
3378end:
3379 mutex_unlock(&dsi.lock);
3380
3381 return r;
3382}
3383
3384static int dsi_display_sync(struct omap_dss_device *dssdev)
3385{
3386 bool wait;
3387
3388 DSSDBG("dsi_display_sync()\n");
3389
3390 mutex_lock(&dsi.lock);
3391 dsi_bus_lock();
3392
3393 if (dsi.update_mode == OMAP_DSS_UPDATE_MANUAL &&
3394 dsi.update_region.dirty) {
3395 INIT_COMPLETION(dsi.update_completion);
3396 wait = true;
3397 } else {
3398 wait = false;
3399 }
3400
3401 dsi_bus_unlock();
3402 mutex_unlock(&dsi.lock);
3403
3404 if (wait)
3405 wait_for_completion_interruptible(&dsi.update_completion);
3406
3407 DSSDBG("dsi_display_sync() done\n");
3408 return 0;
3409}
3410
3411static int dsi_display_set_update_mode(struct omap_dss_device *dssdev,
3412 enum omap_dss_update_mode mode)
3413{
3414 int r = 0;
3415
3416 DSSDBGF("%d", mode);
3417
3418 mutex_lock(&dsi.lock);
3419 dsi_bus_lock();
3420
3421 dsi.user_update_mode = mode;
3422 r = dsi_set_update_mode(dssdev, mode);
3423
3424 dsi_bus_unlock();
3425 mutex_unlock(&dsi.lock);
3426
3427 return r;
3428}
3429
3430static enum omap_dss_update_mode dsi_display_get_update_mode(
3431 struct omap_dss_device *dssdev)
3432{
3433 return dsi.update_mode;
3434}
3435
3436
3437static int dsi_display_enable_te(struct omap_dss_device *dssdev, bool enable)
3438{
3439 int r = 0;
3440
3441 DSSDBGF("%d", enable);
3442
3443 if (!dssdev->driver->enable_te)
3444 return -ENOENT;
3445
3446 dsi_bus_lock();
3447
3448 dsi.te_enabled = enable;
3449
3450 if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE)
3451 goto end;
3452
3453 r = dsi_set_te(dssdev, enable);
3454end:
3455 dsi_bus_unlock();
3456
3457 return r;
3458}
3459
3460static int dsi_display_get_te(struct omap_dss_device *dssdev)
3461{
3462 return dsi.te_enabled;
3463}
3464
3465static int dsi_display_set_rotate(struct omap_dss_device *dssdev, u8 rotate)
3466{
3467
3468 DSSDBGF("%d", rotate);
3469
3470 if (!dssdev->driver->set_rotate || !dssdev->driver->get_rotate)
3471 return -EINVAL;
3472
3473 dsi_bus_lock();
3474 dssdev->driver->set_rotate(dssdev, rotate);
3475 if (dsi.update_mode == OMAP_DSS_UPDATE_AUTO) {
3476 u16 w, h;
3477 /* the display dimensions may have changed, so set a new
3478 * update region */
3479 dssdev->get_resolution(dssdev, &w, &h);
3480 dsi_set_update_region(dssdev, 0, 0, w, h);
3481 }
3482 dsi_bus_unlock();
3483
3484 return 0;
3485}
3486
3487static u8 dsi_display_get_rotate(struct omap_dss_device *dssdev)
3488{
3489 if (!dssdev->driver->set_rotate || !dssdev->driver->get_rotate)
3490 return 0;
3491
3492 return dssdev->driver->get_rotate(dssdev);
3493}
3494
3495static int dsi_display_set_mirror(struct omap_dss_device *dssdev, bool mirror)
3496{
3497 DSSDBGF("%d", mirror);
3498
3499 if (!dssdev->driver->set_mirror || !dssdev->driver->get_mirror)
3500 return -EINVAL;
3501
3502 dsi_bus_lock();
3503 dssdev->driver->set_mirror(dssdev, mirror);
3504 dsi_bus_unlock();
3505
3506 return 0;
3507}
3508
3509static bool dsi_display_get_mirror(struct omap_dss_device *dssdev)
3510{
3511 if (!dssdev->driver->set_mirror || !dssdev->driver->get_mirror)
3512 return 0;
3513
3514 return dssdev->driver->get_mirror(dssdev);
3515}
3516
3517static int dsi_display_run_test(struct omap_dss_device *dssdev, int test_num)
3518{
3519 int r;
3520
3521 if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE)
3522 return -EIO;
3523
3524 DSSDBGF("%d", test_num);
3525
3526 dsi_bus_lock();
3527
3528 /* run test first in low speed mode */
3529 dsi_vc_enable_hs(0, 0);
3530
3531 if (dssdev->driver->run_test) {
3532 r = dssdev->driver->run_test(dssdev, test_num);
3533 if (r)
3534 goto end;
3535 }
3536
3537 /* then in high speed */
3538 dsi_vc_enable_hs(0, 1);
3539
3540 if (dssdev->driver->run_test) {
3541 r = dssdev->driver->run_test(dssdev, test_num);
3542 if (r)
3543 goto end;
3544 }
3545
3546end:
3547 dsi_vc_enable_hs(0, 1);
3548
3549 dsi_bus_unlock();
3550
3551 return r;
3552}
3553
3554static int dsi_display_memory_read(struct omap_dss_device *dssdev,
3555 void *buf, size_t size,
3556 u16 x, u16 y, u16 w, u16 h)
3557{
3558 int r;
3559
3560 DSSDBGF("");
3561
3562 if (!dssdev->driver->memory_read)
3563 return -EINVAL;
3564
3565 if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE)
3566 return -EIO;
3567
3568 dsi_bus_lock();
3569
3570 r = dssdev->driver->memory_read(dssdev, buf, size,
3571 x, y, w, h);
3572
3573 /* Memory read usually changes the update area. This will
3574 * force the next update to re-set the update area */
3575 dsi.active_update_region.dirty = true;
3576
3577 dsi_bus_unlock();
3578
3579 return r;
3580}
3581
3582void dsi_get_overlay_fifo_thresholds(enum omap_plane plane,
3583 u32 fifo_size, enum omap_burst_size *burst_size,
3584 u32 *fifo_low, u32 *fifo_high)
3585{
3586 unsigned burst_size_bytes;
3587
3588 *burst_size = OMAP_DSS_BURST_16x32;
3589 burst_size_bytes = 16 * 32 / 8;
3590
3591 *fifo_high = fifo_size - burst_size_bytes;
3592 *fifo_low = fifo_size - burst_size_bytes * 8;
3593}
3594
3595int dsi_init_display(struct omap_dss_device *dssdev)
3596{
3597 DSSDBG("DSI init\n");
3598
3599 dssdev->enable = dsi_display_enable;
3600 dssdev->disable = dsi_display_disable;
3601 dssdev->suspend = dsi_display_suspend;
3602 dssdev->resume = dsi_display_resume;
3603 dssdev->update = dsi_display_update;
3604 dssdev->sync = dsi_display_sync;
3605 dssdev->set_update_mode = dsi_display_set_update_mode;
3606 dssdev->get_update_mode = dsi_display_get_update_mode;
3607 dssdev->enable_te = dsi_display_enable_te;
3608 dssdev->get_te = dsi_display_get_te;
3609
3610 dssdev->get_rotate = dsi_display_get_rotate;
3611 dssdev->set_rotate = dsi_display_set_rotate;
3612
3613 dssdev->get_mirror = dsi_display_get_mirror;
3614 dssdev->set_mirror = dsi_display_set_mirror;
3615
3616 dssdev->run_test = dsi_display_run_test;
3617 dssdev->memory_read = dsi_display_memory_read;
3618
3619 /* XXX these should be figured out dynamically */
3620 dssdev->caps = OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE |
3621 OMAP_DSS_DISPLAY_CAP_TEAR_ELIM;
3622
3623 dsi.vc[0].dssdev = dssdev;
3624 dsi.vc[1].dssdev = dssdev;
3625
3626 return 0;
3627}
3628
3629int dsi_init(struct platform_device *pdev)
3630{
3631 u32 rev;
3632 int r;
3633 struct sched_param param = {
3634 .sched_priority = MAX_USER_RT_PRIO-1
3635 };
3636
3637 spin_lock_init(&dsi.errors_lock);
3638 dsi.errors = 0;
3639
3640 init_completion(&dsi.bta_completion);
3641 init_completion(&dsi.update_completion);
3642
3643 dsi.thread = kthread_create(dsi_update_thread, NULL, "dsi");
3644 if (IS_ERR(dsi.thread)) {
3645 DSSERR("cannot create kthread\n");
3646 r = PTR_ERR(dsi.thread);
3647 goto err0;
3648 }
3649 sched_setscheduler(dsi.thread, SCHED_FIFO, &param);
3650
3651 init_waitqueue_head(&dsi.waitqueue);
3652 spin_lock_init(&dsi.update_lock);
3653
3654 mutex_init(&dsi.lock);
3655 mutex_init(&dsi.bus_lock);
3656
3657#ifdef DSI_CATCH_MISSING_TE
3658 init_timer(&dsi.te_timer);
3659 dsi.te_timer.function = dsi_te_timeout;
3660 dsi.te_timer.data = 0;
3661#endif
3662
3663 dsi.update_mode = OMAP_DSS_UPDATE_DISABLED;
3664 dsi.user_update_mode = OMAP_DSS_UPDATE_DISABLED;
3665
3666 dsi.base = ioremap(DSI_BASE, DSI_SZ_REGS);
3667 if (!dsi.base) {
3668 DSSERR("can't ioremap DSI\n");
3669 r = -ENOMEM;
3670 goto err1;
3671 }
3672
3673 dsi.vdds_dsi_reg = regulator_get(&pdev->dev, "vdds_dsi");
3674 if (IS_ERR(dsi.vdds_dsi_reg)) {
3675 iounmap(dsi.base);
3676 DSSERR("can't get VDDS_DSI regulator\n");
3677 r = PTR_ERR(dsi.vdds_dsi_reg);
3678 goto err2;
3679 }
3680
3681 enable_clocks(1);
3682
3683 rev = dsi_read_reg(DSI_REVISION);
3684 printk(KERN_INFO "OMAP DSI rev %d.%d\n",
3685 FLD_GET(rev, 7, 4), FLD_GET(rev, 3, 0));
3686
3687 enable_clocks(0);
3688
3689 wake_up_process(dsi.thread);
3690
3691 return 0;
3692err2:
3693 iounmap(dsi.base);
3694err1:
3695 kthread_stop(dsi.thread);
3696err0:
3697 return r;
3698}
3699
3700void dsi_exit(void)
3701{
3702 kthread_stop(dsi.thread);
3703
3704 regulator_put(dsi.vdds_dsi_reg);
3705
3706 iounmap(dsi.base);
3707
3708 DSSDBG("omap_dsi_exit\n");
3709}
3710