aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/video/fbdev/omap2/dss/dsi.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/video/fbdev/omap2/dss/dsi.c')
-rw-r--r--drivers/video/fbdev/omap2/dss/dsi.c5751
1 files changed, 5751 insertions, 0 deletions
diff --git a/drivers/video/fbdev/omap2/dss/dsi.c b/drivers/video/fbdev/omap2/dss/dsi.c
new file mode 100644
index 000000000000..8be9b04d8849
--- /dev/null
+++ b/drivers/video/fbdev/omap2/dss/dsi.c
@@ -0,0 +1,5751 @@
1/*
2 * linux/drivers/video/omap2/dss/dsi.c
3 *
4 * Copyright (C) 2009 Nokia Corporation
5 * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 as published by
9 * the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20#define DSS_SUBSYS_NAME "DSI"
21
22#include <linux/kernel.h>
23#include <linux/io.h>
24#include <linux/clk.h>
25#include <linux/device.h>
26#include <linux/err.h>
27#include <linux/interrupt.h>
28#include <linux/delay.h>
29#include <linux/mutex.h>
30#include <linux/module.h>
31#include <linux/semaphore.h>
32#include <linux/seq_file.h>
33#include <linux/platform_device.h>
34#include <linux/regulator/consumer.h>
35#include <linux/wait.h>
36#include <linux/workqueue.h>
37#include <linux/sched.h>
38#include <linux/slab.h>
39#include <linux/debugfs.h>
40#include <linux/pm_runtime.h>
41#include <linux/of.h>
42#include <linux/of_platform.h>
43
44#include <video/omapdss.h>
45#include <video/mipi_display.h>
46
47#include "dss.h"
48#include "dss_features.h"
49
50#define DSI_CATCH_MISSING_TE
51
52struct dsi_reg { u16 module; u16 idx; };
53
54#define DSI_REG(mod, idx) ((const struct dsi_reg) { mod, idx })
55
56/* DSI Protocol Engine */
57
58#define DSI_PROTO 0
59#define DSI_PROTO_SZ 0x200
60
61#define DSI_REVISION DSI_REG(DSI_PROTO, 0x0000)
62#define DSI_SYSCONFIG DSI_REG(DSI_PROTO, 0x0010)
63#define DSI_SYSSTATUS DSI_REG(DSI_PROTO, 0x0014)
64#define DSI_IRQSTATUS DSI_REG(DSI_PROTO, 0x0018)
65#define DSI_IRQENABLE DSI_REG(DSI_PROTO, 0x001C)
66#define DSI_CTRL DSI_REG(DSI_PROTO, 0x0040)
67#define DSI_GNQ DSI_REG(DSI_PROTO, 0x0044)
68#define DSI_COMPLEXIO_CFG1 DSI_REG(DSI_PROTO, 0x0048)
69#define DSI_COMPLEXIO_IRQ_STATUS DSI_REG(DSI_PROTO, 0x004C)
70#define DSI_COMPLEXIO_IRQ_ENABLE DSI_REG(DSI_PROTO, 0x0050)
71#define DSI_CLK_CTRL DSI_REG(DSI_PROTO, 0x0054)
72#define DSI_TIMING1 DSI_REG(DSI_PROTO, 0x0058)
73#define DSI_TIMING2 DSI_REG(DSI_PROTO, 0x005C)
74#define DSI_VM_TIMING1 DSI_REG(DSI_PROTO, 0x0060)
75#define DSI_VM_TIMING2 DSI_REG(DSI_PROTO, 0x0064)
76#define DSI_VM_TIMING3 DSI_REG(DSI_PROTO, 0x0068)
77#define DSI_CLK_TIMING DSI_REG(DSI_PROTO, 0x006C)
78#define DSI_TX_FIFO_VC_SIZE DSI_REG(DSI_PROTO, 0x0070)
79#define DSI_RX_FIFO_VC_SIZE DSI_REG(DSI_PROTO, 0x0074)
80#define DSI_COMPLEXIO_CFG2 DSI_REG(DSI_PROTO, 0x0078)
81#define DSI_RX_FIFO_VC_FULLNESS DSI_REG(DSI_PROTO, 0x007C)
82#define DSI_VM_TIMING4 DSI_REG(DSI_PROTO, 0x0080)
83#define DSI_TX_FIFO_VC_EMPTINESS DSI_REG(DSI_PROTO, 0x0084)
84#define DSI_VM_TIMING5 DSI_REG(DSI_PROTO, 0x0088)
85#define DSI_VM_TIMING6 DSI_REG(DSI_PROTO, 0x008C)
86#define DSI_VM_TIMING7 DSI_REG(DSI_PROTO, 0x0090)
87#define DSI_STOPCLK_TIMING DSI_REG(DSI_PROTO, 0x0094)
88#define DSI_VC_CTRL(n) DSI_REG(DSI_PROTO, 0x0100 + (n * 0x20))
89#define DSI_VC_TE(n) DSI_REG(DSI_PROTO, 0x0104 + (n * 0x20))
90#define DSI_VC_LONG_PACKET_HEADER(n) DSI_REG(DSI_PROTO, 0x0108 + (n * 0x20))
91#define DSI_VC_LONG_PACKET_PAYLOAD(n) DSI_REG(DSI_PROTO, 0x010C + (n * 0x20))
92#define DSI_VC_SHORT_PACKET_HEADER(n) DSI_REG(DSI_PROTO, 0x0110 + (n * 0x20))
93#define DSI_VC_IRQSTATUS(n) DSI_REG(DSI_PROTO, 0x0118 + (n * 0x20))
94#define DSI_VC_IRQENABLE(n) DSI_REG(DSI_PROTO, 0x011C + (n * 0x20))
95
96/* DSIPHY_SCP */
97
98#define DSI_PHY 1
99#define DSI_PHY_OFFSET 0x200
100#define DSI_PHY_SZ 0x40
101
102#define DSI_DSIPHY_CFG0 DSI_REG(DSI_PHY, 0x0000)
103#define DSI_DSIPHY_CFG1 DSI_REG(DSI_PHY, 0x0004)
104#define DSI_DSIPHY_CFG2 DSI_REG(DSI_PHY, 0x0008)
105#define DSI_DSIPHY_CFG5 DSI_REG(DSI_PHY, 0x0014)
106#define DSI_DSIPHY_CFG10 DSI_REG(DSI_PHY, 0x0028)
107
108/* DSI_PLL_CTRL_SCP */
109
110#define DSI_PLL 2
111#define DSI_PLL_OFFSET 0x300
112#define DSI_PLL_SZ 0x20
113
114#define DSI_PLL_CONTROL DSI_REG(DSI_PLL, 0x0000)
115#define DSI_PLL_STATUS DSI_REG(DSI_PLL, 0x0004)
116#define DSI_PLL_GO DSI_REG(DSI_PLL, 0x0008)
117#define DSI_PLL_CONFIGURATION1 DSI_REG(DSI_PLL, 0x000C)
118#define DSI_PLL_CONFIGURATION2 DSI_REG(DSI_PLL, 0x0010)
119
120#define REG_GET(dsidev, idx, start, end) \
121 FLD_GET(dsi_read_reg(dsidev, idx), start, end)
122
123#define REG_FLD_MOD(dsidev, idx, val, start, end) \
124 dsi_write_reg(dsidev, idx, FLD_MOD(dsi_read_reg(dsidev, idx), val, start, end))
125
126/* Global interrupts */
127#define DSI_IRQ_VC0 (1 << 0)
128#define DSI_IRQ_VC1 (1 << 1)
129#define DSI_IRQ_VC2 (1 << 2)
130#define DSI_IRQ_VC3 (1 << 3)
131#define DSI_IRQ_WAKEUP (1 << 4)
132#define DSI_IRQ_RESYNC (1 << 5)
133#define DSI_IRQ_PLL_LOCK (1 << 7)
134#define DSI_IRQ_PLL_UNLOCK (1 << 8)
135#define DSI_IRQ_PLL_RECALL (1 << 9)
136#define DSI_IRQ_COMPLEXIO_ERR (1 << 10)
137#define DSI_IRQ_HS_TX_TIMEOUT (1 << 14)
138#define DSI_IRQ_LP_RX_TIMEOUT (1 << 15)
139#define DSI_IRQ_TE_TRIGGER (1 << 16)
140#define DSI_IRQ_ACK_TRIGGER (1 << 17)
141#define DSI_IRQ_SYNC_LOST (1 << 18)
142#define DSI_IRQ_LDO_POWER_GOOD (1 << 19)
143#define DSI_IRQ_TA_TIMEOUT (1 << 20)
144#define DSI_IRQ_ERROR_MASK \
145 (DSI_IRQ_HS_TX_TIMEOUT | DSI_IRQ_LP_RX_TIMEOUT | DSI_IRQ_SYNC_LOST | \
146 DSI_IRQ_TA_TIMEOUT | DSI_IRQ_SYNC_LOST)
147#define DSI_IRQ_CHANNEL_MASK 0xf
148
149/* Virtual channel interrupts */
150#define DSI_VC_IRQ_CS (1 << 0)
151#define DSI_VC_IRQ_ECC_CORR (1 << 1)
152#define DSI_VC_IRQ_PACKET_SENT (1 << 2)
153#define DSI_VC_IRQ_FIFO_TX_OVF (1 << 3)
154#define DSI_VC_IRQ_FIFO_RX_OVF (1 << 4)
155#define DSI_VC_IRQ_BTA (1 << 5)
156#define DSI_VC_IRQ_ECC_NO_CORR (1 << 6)
157#define DSI_VC_IRQ_FIFO_TX_UDF (1 << 7)
158#define DSI_VC_IRQ_PP_BUSY_CHANGE (1 << 8)
159#define DSI_VC_IRQ_ERROR_MASK \
160 (DSI_VC_IRQ_CS | DSI_VC_IRQ_ECC_CORR | DSI_VC_IRQ_FIFO_TX_OVF | \
161 DSI_VC_IRQ_FIFO_RX_OVF | DSI_VC_IRQ_ECC_NO_CORR | \
162 DSI_VC_IRQ_FIFO_TX_UDF)
163
164/* ComplexIO interrupts */
165#define DSI_CIO_IRQ_ERRSYNCESC1 (1 << 0)
166#define DSI_CIO_IRQ_ERRSYNCESC2 (1 << 1)
167#define DSI_CIO_IRQ_ERRSYNCESC3 (1 << 2)
168#define DSI_CIO_IRQ_ERRSYNCESC4 (1 << 3)
169#define DSI_CIO_IRQ_ERRSYNCESC5 (1 << 4)
170#define DSI_CIO_IRQ_ERRESC1 (1 << 5)
171#define DSI_CIO_IRQ_ERRESC2 (1 << 6)
172#define DSI_CIO_IRQ_ERRESC3 (1 << 7)
173#define DSI_CIO_IRQ_ERRESC4 (1 << 8)
174#define DSI_CIO_IRQ_ERRESC5 (1 << 9)
175#define DSI_CIO_IRQ_ERRCONTROL1 (1 << 10)
176#define DSI_CIO_IRQ_ERRCONTROL2 (1 << 11)
177#define DSI_CIO_IRQ_ERRCONTROL3 (1 << 12)
178#define DSI_CIO_IRQ_ERRCONTROL4 (1 << 13)
179#define DSI_CIO_IRQ_ERRCONTROL5 (1 << 14)
180#define DSI_CIO_IRQ_STATEULPS1 (1 << 15)
181#define DSI_CIO_IRQ_STATEULPS2 (1 << 16)
182#define DSI_CIO_IRQ_STATEULPS3 (1 << 17)
183#define DSI_CIO_IRQ_STATEULPS4 (1 << 18)
184#define DSI_CIO_IRQ_STATEULPS5 (1 << 19)
185#define DSI_CIO_IRQ_ERRCONTENTIONLP0_1 (1 << 20)
186#define DSI_CIO_IRQ_ERRCONTENTIONLP1_1 (1 << 21)
187#define DSI_CIO_IRQ_ERRCONTENTIONLP0_2 (1 << 22)
188#define DSI_CIO_IRQ_ERRCONTENTIONLP1_2 (1 << 23)
189#define DSI_CIO_IRQ_ERRCONTENTIONLP0_3 (1 << 24)
190#define DSI_CIO_IRQ_ERRCONTENTIONLP1_3 (1 << 25)
191#define DSI_CIO_IRQ_ERRCONTENTIONLP0_4 (1 << 26)
192#define DSI_CIO_IRQ_ERRCONTENTIONLP1_4 (1 << 27)
193#define DSI_CIO_IRQ_ERRCONTENTIONLP0_5 (1 << 28)
194#define DSI_CIO_IRQ_ERRCONTENTIONLP1_5 (1 << 29)
195#define DSI_CIO_IRQ_ULPSACTIVENOT_ALL0 (1 << 30)
196#define DSI_CIO_IRQ_ULPSACTIVENOT_ALL1 (1 << 31)
197#define DSI_CIO_IRQ_ERROR_MASK \
198 (DSI_CIO_IRQ_ERRSYNCESC1 | DSI_CIO_IRQ_ERRSYNCESC2 | \
199 DSI_CIO_IRQ_ERRSYNCESC3 | DSI_CIO_IRQ_ERRSYNCESC4 | \
200 DSI_CIO_IRQ_ERRSYNCESC5 | \
201 DSI_CIO_IRQ_ERRESC1 | DSI_CIO_IRQ_ERRESC2 | \
202 DSI_CIO_IRQ_ERRESC3 | DSI_CIO_IRQ_ERRESC4 | \
203 DSI_CIO_IRQ_ERRESC5 | \
204 DSI_CIO_IRQ_ERRCONTROL1 | DSI_CIO_IRQ_ERRCONTROL2 | \
205 DSI_CIO_IRQ_ERRCONTROL3 | DSI_CIO_IRQ_ERRCONTROL4 | \
206 DSI_CIO_IRQ_ERRCONTROL5 | \
207 DSI_CIO_IRQ_ERRCONTENTIONLP0_1 | DSI_CIO_IRQ_ERRCONTENTIONLP1_1 | \
208 DSI_CIO_IRQ_ERRCONTENTIONLP0_2 | DSI_CIO_IRQ_ERRCONTENTIONLP1_2 | \
209 DSI_CIO_IRQ_ERRCONTENTIONLP0_3 | DSI_CIO_IRQ_ERRCONTENTIONLP1_3 | \
210 DSI_CIO_IRQ_ERRCONTENTIONLP0_4 | DSI_CIO_IRQ_ERRCONTENTIONLP1_4 | \
211 DSI_CIO_IRQ_ERRCONTENTIONLP0_5 | DSI_CIO_IRQ_ERRCONTENTIONLP1_5)
212
213typedef void (*omap_dsi_isr_t) (void *arg, u32 mask);
214
215static int dsi_display_init_dispc(struct platform_device *dsidev,
216 struct omap_overlay_manager *mgr);
217static void dsi_display_uninit_dispc(struct platform_device *dsidev,
218 struct omap_overlay_manager *mgr);
219
220static int dsi_vc_send_null(struct omap_dss_device *dssdev, int channel);
221
222#define DSI_MAX_NR_ISRS 2
223#define DSI_MAX_NR_LANES 5
224
225enum dsi_lane_function {
226 DSI_LANE_UNUSED = 0,
227 DSI_LANE_CLK,
228 DSI_LANE_DATA1,
229 DSI_LANE_DATA2,
230 DSI_LANE_DATA3,
231 DSI_LANE_DATA4,
232};
233
234struct dsi_lane_config {
235 enum dsi_lane_function function;
236 u8 polarity;
237};
238
239struct dsi_isr_data {
240 omap_dsi_isr_t isr;
241 void *arg;
242 u32 mask;
243};
244
245enum fifo_size {
246 DSI_FIFO_SIZE_0 = 0,
247 DSI_FIFO_SIZE_32 = 1,
248 DSI_FIFO_SIZE_64 = 2,
249 DSI_FIFO_SIZE_96 = 3,
250 DSI_FIFO_SIZE_128 = 4,
251};
252
253enum dsi_vc_source {
254 DSI_VC_SOURCE_L4 = 0,
255 DSI_VC_SOURCE_VP,
256};
257
258struct dsi_irq_stats {
259 unsigned long last_reset;
260 unsigned irq_count;
261 unsigned dsi_irqs[32];
262 unsigned vc_irqs[4][32];
263 unsigned cio_irqs[32];
264};
265
266struct dsi_isr_tables {
267 struct dsi_isr_data isr_table[DSI_MAX_NR_ISRS];
268 struct dsi_isr_data isr_table_vc[4][DSI_MAX_NR_ISRS];
269 struct dsi_isr_data isr_table_cio[DSI_MAX_NR_ISRS];
270};
271
272struct dsi_clk_calc_ctx {
273 struct platform_device *dsidev;
274
275 /* inputs */
276
277 const struct omap_dss_dsi_config *config;
278
279 unsigned long req_pck_min, req_pck_nom, req_pck_max;
280
281 /* outputs */
282
283 struct dsi_clock_info dsi_cinfo;
284 struct dispc_clock_info dispc_cinfo;
285
286 struct omap_video_timings dispc_vm;
287 struct omap_dss_dsi_videomode_timings dsi_vm;
288};
289
290struct dsi_data {
291 struct platform_device *pdev;
292 void __iomem *proto_base;
293 void __iomem *phy_base;
294 void __iomem *pll_base;
295
296 int module_id;
297
298 int irq;
299
300 bool is_enabled;
301
302 struct clk *dss_clk;
303 struct clk *sys_clk;
304
305 struct dispc_clock_info user_dispc_cinfo;
306 struct dsi_clock_info user_dsi_cinfo;
307
308 struct dsi_clock_info current_cinfo;
309
310 bool vdds_dsi_enabled;
311 struct regulator *vdds_dsi_reg;
312
313 struct {
314 enum dsi_vc_source source;
315 struct omap_dss_device *dssdev;
316 enum fifo_size tx_fifo_size;
317 enum fifo_size rx_fifo_size;
318 int vc_id;
319 } vc[4];
320
321 struct mutex lock;
322 struct semaphore bus_lock;
323
324 unsigned pll_locked;
325
326 spinlock_t irq_lock;
327 struct dsi_isr_tables isr_tables;
328 /* space for a copy used by the interrupt handler */
329 struct dsi_isr_tables isr_tables_copy;
330
331 int update_channel;
332#ifdef DSI_PERF_MEASURE
333 unsigned update_bytes;
334#endif
335
336 bool te_enabled;
337 bool ulps_enabled;
338
339 void (*framedone_callback)(int, void *);
340 void *framedone_data;
341
342 struct delayed_work framedone_timeout_work;
343
344#ifdef DSI_CATCH_MISSING_TE
345 struct timer_list te_timer;
346#endif
347
348 unsigned long cache_req_pck;
349 unsigned long cache_clk_freq;
350 struct dsi_clock_info cache_cinfo;
351
352 u32 errors;
353 spinlock_t errors_lock;
354#ifdef DSI_PERF_MEASURE
355 ktime_t perf_setup_time;
356 ktime_t perf_start_time;
357#endif
358 int debug_read;
359 int debug_write;
360
361#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
362 spinlock_t irq_stats_lock;
363 struct dsi_irq_stats irq_stats;
364#endif
365 /* DSI PLL Parameter Ranges */
366 unsigned long regm_max, regn_max;
367 unsigned long regm_dispc_max, regm_dsi_max;
368 unsigned long fint_min, fint_max;
369 unsigned long lpdiv_max;
370
371 unsigned num_lanes_supported;
372 unsigned line_buffer_size;
373
374 struct dsi_lane_config lanes[DSI_MAX_NR_LANES];
375 unsigned num_lanes_used;
376
377 unsigned scp_clk_refcount;
378
379 struct dss_lcd_mgr_config mgr_config;
380 struct omap_video_timings timings;
381 enum omap_dss_dsi_pixel_format pix_fmt;
382 enum omap_dss_dsi_mode mode;
383 struct omap_dss_dsi_videomode_timings vm_timings;
384
385 struct omap_dss_device output;
386};
387
388struct dsi_packet_sent_handler_data {
389 struct platform_device *dsidev;
390 struct completion *completion;
391};
392
393struct dsi_module_id_data {
394 u32 address;
395 int id;
396};
397
398static const struct of_device_id dsi_of_match[];
399
400#ifdef DSI_PERF_MEASURE
401static bool dsi_perf;
402module_param(dsi_perf, bool, 0644);
403#endif
404
405static inline struct dsi_data *dsi_get_dsidrv_data(struct platform_device *dsidev)
406{
407 return dev_get_drvdata(&dsidev->dev);
408}
409
410static inline struct platform_device *dsi_get_dsidev_from_dssdev(struct omap_dss_device *dssdev)
411{
412 return to_platform_device(dssdev->dev);
413}
414
415struct platform_device *dsi_get_dsidev_from_id(int module)
416{
417 struct omap_dss_device *out;
418 enum omap_dss_output_id id;
419
420 switch (module) {
421 case 0:
422 id = OMAP_DSS_OUTPUT_DSI1;
423 break;
424 case 1:
425 id = OMAP_DSS_OUTPUT_DSI2;
426 break;
427 default:
428 return NULL;
429 }
430
431 out = omap_dss_get_output(id);
432
433 return out ? to_platform_device(out->dev) : NULL;
434}
435
436static inline void dsi_write_reg(struct platform_device *dsidev,
437 const struct dsi_reg idx, u32 val)
438{
439 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
440 void __iomem *base;
441
442 switch(idx.module) {
443 case DSI_PROTO: base = dsi->proto_base; break;
444 case DSI_PHY: base = dsi->phy_base; break;
445 case DSI_PLL: base = dsi->pll_base; break;
446 default: return;
447 }
448
449 __raw_writel(val, base + idx.idx);
450}
451
452static inline u32 dsi_read_reg(struct platform_device *dsidev,
453 const struct dsi_reg idx)
454{
455 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
456 void __iomem *base;
457
458 switch(idx.module) {
459 case DSI_PROTO: base = dsi->proto_base; break;
460 case DSI_PHY: base = dsi->phy_base; break;
461 case DSI_PLL: base = dsi->pll_base; break;
462 default: return 0;
463 }
464
465 return __raw_readl(base + idx.idx);
466}
467
468static void dsi_bus_lock(struct omap_dss_device *dssdev)
469{
470 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
471 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
472
473 down(&dsi->bus_lock);
474}
475
476static void dsi_bus_unlock(struct omap_dss_device *dssdev)
477{
478 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
479 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
480
481 up(&dsi->bus_lock);
482}
483
484static bool dsi_bus_is_locked(struct platform_device *dsidev)
485{
486 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
487
488 return dsi->bus_lock.count == 0;
489}
490
491static void dsi_completion_handler(void *data, u32 mask)
492{
493 complete((struct completion *)data);
494}
495
496static inline int wait_for_bit_change(struct platform_device *dsidev,
497 const struct dsi_reg idx, int bitnum, int value)
498{
499 unsigned long timeout;
500 ktime_t wait;
501 int t;
502
503 /* first busyloop to see if the bit changes right away */
504 t = 100;
505 while (t-- > 0) {
506 if (REG_GET(dsidev, idx, bitnum, bitnum) == value)
507 return value;
508 }
509
510 /* then loop for 500ms, sleeping for 1ms in between */
511 timeout = jiffies + msecs_to_jiffies(500);
512 while (time_before(jiffies, timeout)) {
513 if (REG_GET(dsidev, idx, bitnum, bitnum) == value)
514 return value;
515
516 wait = ns_to_ktime(1000 * 1000);
517 set_current_state(TASK_UNINTERRUPTIBLE);
518 schedule_hrtimeout(&wait, HRTIMER_MODE_REL);
519 }
520
521 return !value;
522}
523
524u8 dsi_get_pixel_size(enum omap_dss_dsi_pixel_format fmt)
525{
526 switch (fmt) {
527 case OMAP_DSS_DSI_FMT_RGB888:
528 case OMAP_DSS_DSI_FMT_RGB666:
529 return 24;
530 case OMAP_DSS_DSI_FMT_RGB666_PACKED:
531 return 18;
532 case OMAP_DSS_DSI_FMT_RGB565:
533 return 16;
534 default:
535 BUG();
536 return 0;
537 }
538}
539
540#ifdef DSI_PERF_MEASURE
541static void dsi_perf_mark_setup(struct platform_device *dsidev)
542{
543 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
544 dsi->perf_setup_time = ktime_get();
545}
546
547static void dsi_perf_mark_start(struct platform_device *dsidev)
548{
549 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
550 dsi->perf_start_time = ktime_get();
551}
552
553static void dsi_perf_show(struct platform_device *dsidev, const char *name)
554{
555 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
556 ktime_t t, setup_time, trans_time;
557 u32 total_bytes;
558 u32 setup_us, trans_us, total_us;
559
560 if (!dsi_perf)
561 return;
562
563 t = ktime_get();
564
565 setup_time = ktime_sub(dsi->perf_start_time, dsi->perf_setup_time);
566 setup_us = (u32)ktime_to_us(setup_time);
567 if (setup_us == 0)
568 setup_us = 1;
569
570 trans_time = ktime_sub(t, dsi->perf_start_time);
571 trans_us = (u32)ktime_to_us(trans_time);
572 if (trans_us == 0)
573 trans_us = 1;
574
575 total_us = setup_us + trans_us;
576
577 total_bytes = dsi->update_bytes;
578
579 printk(KERN_INFO "DSI(%s): %u us + %u us = %u us (%uHz), "
580 "%u bytes, %u kbytes/sec\n",
581 name,
582 setup_us,
583 trans_us,
584 total_us,
585 1000*1000 / total_us,
586 total_bytes,
587 total_bytes * 1000 / total_us);
588}
589#else
590static inline void dsi_perf_mark_setup(struct platform_device *dsidev)
591{
592}
593
594static inline void dsi_perf_mark_start(struct platform_device *dsidev)
595{
596}
597
598static inline void dsi_perf_show(struct platform_device *dsidev,
599 const char *name)
600{
601}
602#endif
603
604static int verbose_irq;
605
606static void print_irq_status(u32 status)
607{
608 if (status == 0)
609 return;
610
611 if (!verbose_irq && (status & ~DSI_IRQ_CHANNEL_MASK) == 0)
612 return;
613
614#define PIS(x) (status & DSI_IRQ_##x) ? (#x " ") : ""
615
616 pr_debug("DSI IRQ: 0x%x: %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
617 status,
618 verbose_irq ? PIS(VC0) : "",
619 verbose_irq ? PIS(VC1) : "",
620 verbose_irq ? PIS(VC2) : "",
621 verbose_irq ? PIS(VC3) : "",
622 PIS(WAKEUP),
623 PIS(RESYNC),
624 PIS(PLL_LOCK),
625 PIS(PLL_UNLOCK),
626 PIS(PLL_RECALL),
627 PIS(COMPLEXIO_ERR),
628 PIS(HS_TX_TIMEOUT),
629 PIS(LP_RX_TIMEOUT),
630 PIS(TE_TRIGGER),
631 PIS(ACK_TRIGGER),
632 PIS(SYNC_LOST),
633 PIS(LDO_POWER_GOOD),
634 PIS(TA_TIMEOUT));
635#undef PIS
636}
637
638static void print_irq_status_vc(int channel, u32 status)
639{
640 if (status == 0)
641 return;
642
643 if (!verbose_irq && (status & ~DSI_VC_IRQ_PACKET_SENT) == 0)
644 return;
645
646#define PIS(x) (status & DSI_VC_IRQ_##x) ? (#x " ") : ""
647
648 pr_debug("DSI VC(%d) IRQ 0x%x: %s%s%s%s%s%s%s%s%s\n",
649 channel,
650 status,
651 PIS(CS),
652 PIS(ECC_CORR),
653 PIS(ECC_NO_CORR),
654 verbose_irq ? PIS(PACKET_SENT) : "",
655 PIS(BTA),
656 PIS(FIFO_TX_OVF),
657 PIS(FIFO_RX_OVF),
658 PIS(FIFO_TX_UDF),
659 PIS(PP_BUSY_CHANGE));
660#undef PIS
661}
662
663static void print_irq_status_cio(u32 status)
664{
665 if (status == 0)
666 return;
667
668#define PIS(x) (status & DSI_CIO_IRQ_##x) ? (#x " ") : ""
669
670 pr_debug("DSI CIO IRQ 0x%x: %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
671 status,
672 PIS(ERRSYNCESC1),
673 PIS(ERRSYNCESC2),
674 PIS(ERRSYNCESC3),
675 PIS(ERRESC1),
676 PIS(ERRESC2),
677 PIS(ERRESC3),
678 PIS(ERRCONTROL1),
679 PIS(ERRCONTROL2),
680 PIS(ERRCONTROL3),
681 PIS(STATEULPS1),
682 PIS(STATEULPS2),
683 PIS(STATEULPS3),
684 PIS(ERRCONTENTIONLP0_1),
685 PIS(ERRCONTENTIONLP1_1),
686 PIS(ERRCONTENTIONLP0_2),
687 PIS(ERRCONTENTIONLP1_2),
688 PIS(ERRCONTENTIONLP0_3),
689 PIS(ERRCONTENTIONLP1_3),
690 PIS(ULPSACTIVENOT_ALL0),
691 PIS(ULPSACTIVENOT_ALL1));
692#undef PIS
693}
694
695#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
696static void dsi_collect_irq_stats(struct platform_device *dsidev, u32 irqstatus,
697 u32 *vcstatus, u32 ciostatus)
698{
699 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
700 int i;
701
702 spin_lock(&dsi->irq_stats_lock);
703
704 dsi->irq_stats.irq_count++;
705 dss_collect_irq_stats(irqstatus, dsi->irq_stats.dsi_irqs);
706
707 for (i = 0; i < 4; ++i)
708 dss_collect_irq_stats(vcstatus[i], dsi->irq_stats.vc_irqs[i]);
709
710 dss_collect_irq_stats(ciostatus, dsi->irq_stats.cio_irqs);
711
712 spin_unlock(&dsi->irq_stats_lock);
713}
714#else
715#define dsi_collect_irq_stats(dsidev, irqstatus, vcstatus, ciostatus)
716#endif
717
718static int debug_irq;
719
720static void dsi_handle_irq_errors(struct platform_device *dsidev, u32 irqstatus,
721 u32 *vcstatus, u32 ciostatus)
722{
723 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
724 int i;
725
726 if (irqstatus & DSI_IRQ_ERROR_MASK) {
727 DSSERR("DSI error, irqstatus %x\n", irqstatus);
728 print_irq_status(irqstatus);
729 spin_lock(&dsi->errors_lock);
730 dsi->errors |= irqstatus & DSI_IRQ_ERROR_MASK;
731 spin_unlock(&dsi->errors_lock);
732 } else if (debug_irq) {
733 print_irq_status(irqstatus);
734 }
735
736 for (i = 0; i < 4; ++i) {
737 if (vcstatus[i] & DSI_VC_IRQ_ERROR_MASK) {
738 DSSERR("DSI VC(%d) error, vc irqstatus %x\n",
739 i, vcstatus[i]);
740 print_irq_status_vc(i, vcstatus[i]);
741 } else if (debug_irq) {
742 print_irq_status_vc(i, vcstatus[i]);
743 }
744 }
745
746 if (ciostatus & DSI_CIO_IRQ_ERROR_MASK) {
747 DSSERR("DSI CIO error, cio irqstatus %x\n", ciostatus);
748 print_irq_status_cio(ciostatus);
749 } else if (debug_irq) {
750 print_irq_status_cio(ciostatus);
751 }
752}
753
754static void dsi_call_isrs(struct dsi_isr_data *isr_array,
755 unsigned isr_array_size, u32 irqstatus)
756{
757 struct dsi_isr_data *isr_data;
758 int i;
759
760 for (i = 0; i < isr_array_size; i++) {
761 isr_data = &isr_array[i];
762 if (isr_data->isr && isr_data->mask & irqstatus)
763 isr_data->isr(isr_data->arg, irqstatus);
764 }
765}
766
767static void dsi_handle_isrs(struct dsi_isr_tables *isr_tables,
768 u32 irqstatus, u32 *vcstatus, u32 ciostatus)
769{
770 int i;
771
772 dsi_call_isrs(isr_tables->isr_table,
773 ARRAY_SIZE(isr_tables->isr_table),
774 irqstatus);
775
776 for (i = 0; i < 4; ++i) {
777 if (vcstatus[i] == 0)
778 continue;
779 dsi_call_isrs(isr_tables->isr_table_vc[i],
780 ARRAY_SIZE(isr_tables->isr_table_vc[i]),
781 vcstatus[i]);
782 }
783
784 if (ciostatus != 0)
785 dsi_call_isrs(isr_tables->isr_table_cio,
786 ARRAY_SIZE(isr_tables->isr_table_cio),
787 ciostatus);
788}
789
790static irqreturn_t omap_dsi_irq_handler(int irq, void *arg)
791{
792 struct platform_device *dsidev;
793 struct dsi_data *dsi;
794 u32 irqstatus, vcstatus[4], ciostatus;
795 int i;
796
797 dsidev = (struct platform_device *) arg;
798 dsi = dsi_get_dsidrv_data(dsidev);
799
800 if (!dsi->is_enabled)
801 return IRQ_NONE;
802
803 spin_lock(&dsi->irq_lock);
804
805 irqstatus = dsi_read_reg(dsidev, DSI_IRQSTATUS);
806
807 /* IRQ is not for us */
808 if (!irqstatus) {
809 spin_unlock(&dsi->irq_lock);
810 return IRQ_NONE;
811 }
812
813 dsi_write_reg(dsidev, DSI_IRQSTATUS, irqstatus & ~DSI_IRQ_CHANNEL_MASK);
814 /* flush posted write */
815 dsi_read_reg(dsidev, DSI_IRQSTATUS);
816
817 for (i = 0; i < 4; ++i) {
818 if ((irqstatus & (1 << i)) == 0) {
819 vcstatus[i] = 0;
820 continue;
821 }
822
823 vcstatus[i] = dsi_read_reg(dsidev, DSI_VC_IRQSTATUS(i));
824
825 dsi_write_reg(dsidev, DSI_VC_IRQSTATUS(i), vcstatus[i]);
826 /* flush posted write */
827 dsi_read_reg(dsidev, DSI_VC_IRQSTATUS(i));
828 }
829
830 if (irqstatus & DSI_IRQ_COMPLEXIO_ERR) {
831 ciostatus = dsi_read_reg(dsidev, DSI_COMPLEXIO_IRQ_STATUS);
832
833 dsi_write_reg(dsidev, DSI_COMPLEXIO_IRQ_STATUS, ciostatus);
834 /* flush posted write */
835 dsi_read_reg(dsidev, DSI_COMPLEXIO_IRQ_STATUS);
836 } else {
837 ciostatus = 0;
838 }
839
840#ifdef DSI_CATCH_MISSING_TE
841 if (irqstatus & DSI_IRQ_TE_TRIGGER)
842 del_timer(&dsi->te_timer);
843#endif
844
845 /* make a copy and unlock, so that isrs can unregister
846 * themselves */
847 memcpy(&dsi->isr_tables_copy, &dsi->isr_tables,
848 sizeof(dsi->isr_tables));
849
850 spin_unlock(&dsi->irq_lock);
851
852 dsi_handle_isrs(&dsi->isr_tables_copy, irqstatus, vcstatus, ciostatus);
853
854 dsi_handle_irq_errors(dsidev, irqstatus, vcstatus, ciostatus);
855
856 dsi_collect_irq_stats(dsidev, irqstatus, vcstatus, ciostatus);
857
858 return IRQ_HANDLED;
859}
860
861/* dsi->irq_lock has to be locked by the caller */
862static void _omap_dsi_configure_irqs(struct platform_device *dsidev,
863 struct dsi_isr_data *isr_array,
864 unsigned isr_array_size, u32 default_mask,
865 const struct dsi_reg enable_reg,
866 const struct dsi_reg status_reg)
867{
868 struct dsi_isr_data *isr_data;
869 u32 mask;
870 u32 old_mask;
871 int i;
872
873 mask = default_mask;
874
875 for (i = 0; i < isr_array_size; i++) {
876 isr_data = &isr_array[i];
877
878 if (isr_data->isr == NULL)
879 continue;
880
881 mask |= isr_data->mask;
882 }
883
884 old_mask = dsi_read_reg(dsidev, enable_reg);
885 /* clear the irqstatus for newly enabled irqs */
886 dsi_write_reg(dsidev, status_reg, (mask ^ old_mask) & mask);
887 dsi_write_reg(dsidev, enable_reg, mask);
888
889 /* flush posted writes */
890 dsi_read_reg(dsidev, enable_reg);
891 dsi_read_reg(dsidev, status_reg);
892}
893
894/* dsi->irq_lock has to be locked by the caller */
895static void _omap_dsi_set_irqs(struct platform_device *dsidev)
896{
897 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
898 u32 mask = DSI_IRQ_ERROR_MASK;
899#ifdef DSI_CATCH_MISSING_TE
900 mask |= DSI_IRQ_TE_TRIGGER;
901#endif
902 _omap_dsi_configure_irqs(dsidev, dsi->isr_tables.isr_table,
903 ARRAY_SIZE(dsi->isr_tables.isr_table), mask,
904 DSI_IRQENABLE, DSI_IRQSTATUS);
905}
906
907/* dsi->irq_lock has to be locked by the caller */
908static void _omap_dsi_set_irqs_vc(struct platform_device *dsidev, int vc)
909{
910 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
911
912 _omap_dsi_configure_irqs(dsidev, dsi->isr_tables.isr_table_vc[vc],
913 ARRAY_SIZE(dsi->isr_tables.isr_table_vc[vc]),
914 DSI_VC_IRQ_ERROR_MASK,
915 DSI_VC_IRQENABLE(vc), DSI_VC_IRQSTATUS(vc));
916}
917
918/* dsi->irq_lock has to be locked by the caller */
919static void _omap_dsi_set_irqs_cio(struct platform_device *dsidev)
920{
921 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
922
923 _omap_dsi_configure_irqs(dsidev, dsi->isr_tables.isr_table_cio,
924 ARRAY_SIZE(dsi->isr_tables.isr_table_cio),
925 DSI_CIO_IRQ_ERROR_MASK,
926 DSI_COMPLEXIO_IRQ_ENABLE, DSI_COMPLEXIO_IRQ_STATUS);
927}
928
929static void _dsi_initialize_irq(struct platform_device *dsidev)
930{
931 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
932 unsigned long flags;
933 int vc;
934
935 spin_lock_irqsave(&dsi->irq_lock, flags);
936
937 memset(&dsi->isr_tables, 0, sizeof(dsi->isr_tables));
938
939 _omap_dsi_set_irqs(dsidev);
940 for (vc = 0; vc < 4; ++vc)
941 _omap_dsi_set_irqs_vc(dsidev, vc);
942 _omap_dsi_set_irqs_cio(dsidev);
943
944 spin_unlock_irqrestore(&dsi->irq_lock, flags);
945}
946
947static int _dsi_register_isr(omap_dsi_isr_t isr, void *arg, u32 mask,
948 struct dsi_isr_data *isr_array, unsigned isr_array_size)
949{
950 struct dsi_isr_data *isr_data;
951 int free_idx;
952 int i;
953
954 BUG_ON(isr == NULL);
955
956 /* check for duplicate entry and find a free slot */
957 free_idx = -1;
958 for (i = 0; i < isr_array_size; i++) {
959 isr_data = &isr_array[i];
960
961 if (isr_data->isr == isr && isr_data->arg == arg &&
962 isr_data->mask == mask) {
963 return -EINVAL;
964 }
965
966 if (isr_data->isr == NULL && free_idx == -1)
967 free_idx = i;
968 }
969
970 if (free_idx == -1)
971 return -EBUSY;
972
973 isr_data = &isr_array[free_idx];
974 isr_data->isr = isr;
975 isr_data->arg = arg;
976 isr_data->mask = mask;
977
978 return 0;
979}
980
981static int _dsi_unregister_isr(omap_dsi_isr_t isr, void *arg, u32 mask,
982 struct dsi_isr_data *isr_array, unsigned isr_array_size)
983{
984 struct dsi_isr_data *isr_data;
985 int i;
986
987 for (i = 0; i < isr_array_size; i++) {
988 isr_data = &isr_array[i];
989 if (isr_data->isr != isr || isr_data->arg != arg ||
990 isr_data->mask != mask)
991 continue;
992
993 isr_data->isr = NULL;
994 isr_data->arg = NULL;
995 isr_data->mask = 0;
996
997 return 0;
998 }
999
1000 return -EINVAL;
1001}
1002
1003static int dsi_register_isr(struct platform_device *dsidev, omap_dsi_isr_t isr,
1004 void *arg, u32 mask)
1005{
1006 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1007 unsigned long flags;
1008 int r;
1009
1010 spin_lock_irqsave(&dsi->irq_lock, flags);
1011
1012 r = _dsi_register_isr(isr, arg, mask, dsi->isr_tables.isr_table,
1013 ARRAY_SIZE(dsi->isr_tables.isr_table));
1014
1015 if (r == 0)
1016 _omap_dsi_set_irqs(dsidev);
1017
1018 spin_unlock_irqrestore(&dsi->irq_lock, flags);
1019
1020 return r;
1021}
1022
1023static int dsi_unregister_isr(struct platform_device *dsidev,
1024 omap_dsi_isr_t isr, void *arg, u32 mask)
1025{
1026 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1027 unsigned long flags;
1028 int r;
1029
1030 spin_lock_irqsave(&dsi->irq_lock, flags);
1031
1032 r = _dsi_unregister_isr(isr, arg, mask, dsi->isr_tables.isr_table,
1033 ARRAY_SIZE(dsi->isr_tables.isr_table));
1034
1035 if (r == 0)
1036 _omap_dsi_set_irqs(dsidev);
1037
1038 spin_unlock_irqrestore(&dsi->irq_lock, flags);
1039
1040 return r;
1041}
1042
1043static int dsi_register_isr_vc(struct platform_device *dsidev, int channel,
1044 omap_dsi_isr_t isr, void *arg, u32 mask)
1045{
1046 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1047 unsigned long flags;
1048 int r;
1049
1050 spin_lock_irqsave(&dsi->irq_lock, flags);
1051
1052 r = _dsi_register_isr(isr, arg, mask,
1053 dsi->isr_tables.isr_table_vc[channel],
1054 ARRAY_SIZE(dsi->isr_tables.isr_table_vc[channel]));
1055
1056 if (r == 0)
1057 _omap_dsi_set_irqs_vc(dsidev, channel);
1058
1059 spin_unlock_irqrestore(&dsi->irq_lock, flags);
1060
1061 return r;
1062}
1063
1064static int dsi_unregister_isr_vc(struct platform_device *dsidev, int channel,
1065 omap_dsi_isr_t isr, void *arg, u32 mask)
1066{
1067 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1068 unsigned long flags;
1069 int r;
1070
1071 spin_lock_irqsave(&dsi->irq_lock, flags);
1072
1073 r = _dsi_unregister_isr(isr, arg, mask,
1074 dsi->isr_tables.isr_table_vc[channel],
1075 ARRAY_SIZE(dsi->isr_tables.isr_table_vc[channel]));
1076
1077 if (r == 0)
1078 _omap_dsi_set_irqs_vc(dsidev, channel);
1079
1080 spin_unlock_irqrestore(&dsi->irq_lock, flags);
1081
1082 return r;
1083}
1084
1085static int dsi_register_isr_cio(struct platform_device *dsidev,
1086 omap_dsi_isr_t isr, void *arg, u32 mask)
1087{
1088 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1089 unsigned long flags;
1090 int r;
1091
1092 spin_lock_irqsave(&dsi->irq_lock, flags);
1093
1094 r = _dsi_register_isr(isr, arg, mask, dsi->isr_tables.isr_table_cio,
1095 ARRAY_SIZE(dsi->isr_tables.isr_table_cio));
1096
1097 if (r == 0)
1098 _omap_dsi_set_irqs_cio(dsidev);
1099
1100 spin_unlock_irqrestore(&dsi->irq_lock, flags);
1101
1102 return r;
1103}
1104
1105static int dsi_unregister_isr_cio(struct platform_device *dsidev,
1106 omap_dsi_isr_t isr, void *arg, u32 mask)
1107{
1108 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1109 unsigned long flags;
1110 int r;
1111
1112 spin_lock_irqsave(&dsi->irq_lock, flags);
1113
1114 r = _dsi_unregister_isr(isr, arg, mask, dsi->isr_tables.isr_table_cio,
1115 ARRAY_SIZE(dsi->isr_tables.isr_table_cio));
1116
1117 if (r == 0)
1118 _omap_dsi_set_irqs_cio(dsidev);
1119
1120 spin_unlock_irqrestore(&dsi->irq_lock, flags);
1121
1122 return r;
1123}
1124
1125static u32 dsi_get_errors(struct platform_device *dsidev)
1126{
1127 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1128 unsigned long flags;
1129 u32 e;
1130 spin_lock_irqsave(&dsi->errors_lock, flags);
1131 e = dsi->errors;
1132 dsi->errors = 0;
1133 spin_unlock_irqrestore(&dsi->errors_lock, flags);
1134 return e;
1135}
1136
1137int dsi_runtime_get(struct platform_device *dsidev)
1138{
1139 int r;
1140 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1141
1142 DSSDBG("dsi_runtime_get\n");
1143
1144 r = pm_runtime_get_sync(&dsi->pdev->dev);
1145 WARN_ON(r < 0);
1146 return r < 0 ? r : 0;
1147}
1148
1149void dsi_runtime_put(struct platform_device *dsidev)
1150{
1151 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1152 int r;
1153
1154 DSSDBG("dsi_runtime_put\n");
1155
1156 r = pm_runtime_put_sync(&dsi->pdev->dev);
1157 WARN_ON(r < 0 && r != -ENOSYS);
1158}
1159
1160static int dsi_regulator_init(struct platform_device *dsidev)
1161{
1162 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1163 struct regulator *vdds_dsi;
1164
1165 if (dsi->vdds_dsi_reg != NULL)
1166 return 0;
1167
1168 vdds_dsi = devm_regulator_get(&dsi->pdev->dev, "vdd");
1169
1170 if (IS_ERR(vdds_dsi)) {
1171 if (PTR_ERR(vdds_dsi) != -EPROBE_DEFER)
1172 DSSERR("can't get DSI VDD regulator\n");
1173 return PTR_ERR(vdds_dsi);
1174 }
1175
1176 dsi->vdds_dsi_reg = vdds_dsi;
1177
1178 return 0;
1179}
1180
1181/* source clock for DSI PLL. this could also be PCLKFREE */
1182static inline void dsi_enable_pll_clock(struct platform_device *dsidev,
1183 bool enable)
1184{
1185 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1186
1187 if (enable)
1188 clk_prepare_enable(dsi->sys_clk);
1189 else
1190 clk_disable_unprepare(dsi->sys_clk);
1191
1192 if (enable && dsi->pll_locked) {
1193 if (wait_for_bit_change(dsidev, DSI_PLL_STATUS, 1, 1) != 1)
1194 DSSERR("cannot lock PLL when enabling clocks\n");
1195 }
1196}
1197
1198static void _dsi_print_reset_status(struct platform_device *dsidev)
1199{
1200 u32 l;
1201 int b0, b1, b2;
1202
1203 /* A dummy read using the SCP interface to any DSIPHY register is
1204 * required after DSIPHY reset to complete the reset of the DSI complex
1205 * I/O. */
1206 l = dsi_read_reg(dsidev, DSI_DSIPHY_CFG5);
1207
1208 if (dss_has_feature(FEAT_DSI_REVERSE_TXCLKESC)) {
1209 b0 = 28;
1210 b1 = 27;
1211 b2 = 26;
1212 } else {
1213 b0 = 24;
1214 b1 = 25;
1215 b2 = 26;
1216 }
1217
1218#define DSI_FLD_GET(fld, start, end)\
1219 FLD_GET(dsi_read_reg(dsidev, DSI_##fld), start, end)
1220
1221 pr_debug("DSI resets: PLL (%d) CIO (%d) PHY (%x%x%x, %d, %d, %d)\n",
1222 DSI_FLD_GET(PLL_STATUS, 0, 0),
1223 DSI_FLD_GET(COMPLEXIO_CFG1, 29, 29),
1224 DSI_FLD_GET(DSIPHY_CFG5, b0, b0),
1225 DSI_FLD_GET(DSIPHY_CFG5, b1, b1),
1226 DSI_FLD_GET(DSIPHY_CFG5, b2, b2),
1227 DSI_FLD_GET(DSIPHY_CFG5, 29, 29),
1228 DSI_FLD_GET(DSIPHY_CFG5, 30, 30),
1229 DSI_FLD_GET(DSIPHY_CFG5, 31, 31));
1230
1231#undef DSI_FLD_GET
1232}
1233
1234static inline int dsi_if_enable(struct platform_device *dsidev, bool enable)
1235{
1236 DSSDBG("dsi_if_enable(%d)\n", enable);
1237
1238 enable = enable ? 1 : 0;
1239 REG_FLD_MOD(dsidev, DSI_CTRL, enable, 0, 0); /* IF_EN */
1240
1241 if (wait_for_bit_change(dsidev, DSI_CTRL, 0, enable) != enable) {
1242 DSSERR("Failed to set dsi_if_enable to %d\n", enable);
1243 return -EIO;
1244 }
1245
1246 return 0;
1247}
1248
1249unsigned long dsi_get_pll_hsdiv_dispc_rate(struct platform_device *dsidev)
1250{
1251 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1252
1253 return dsi->current_cinfo.dsi_pll_hsdiv_dispc_clk;
1254}
1255
1256static unsigned long dsi_get_pll_hsdiv_dsi_rate(struct platform_device *dsidev)
1257{
1258 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1259
1260 return dsi->current_cinfo.dsi_pll_hsdiv_dsi_clk;
1261}
1262
1263static unsigned long dsi_get_txbyteclkhs(struct platform_device *dsidev)
1264{
1265 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1266
1267 return dsi->current_cinfo.clkin4ddr / 16;
1268}
1269
1270static unsigned long dsi_fclk_rate(struct platform_device *dsidev)
1271{
1272 unsigned long r;
1273 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1274
1275 if (dss_get_dsi_clk_source(dsi->module_id) == OMAP_DSS_CLK_SRC_FCK) {
1276 /* DSI FCLK source is DSS_CLK_FCK */
1277 r = clk_get_rate(dsi->dss_clk);
1278 } else {
1279 /* DSI FCLK source is dsi_pll_hsdiv_dsi_clk */
1280 r = dsi_get_pll_hsdiv_dsi_rate(dsidev);
1281 }
1282
1283 return r;
1284}
1285
1286static int dsi_lp_clock_calc(struct dsi_clock_info *cinfo,
1287 unsigned long lp_clk_min, unsigned long lp_clk_max)
1288{
1289 unsigned long dsi_fclk = cinfo->dsi_pll_hsdiv_dsi_clk;
1290 unsigned lp_clk_div;
1291 unsigned long lp_clk;
1292
1293 lp_clk_div = DIV_ROUND_UP(dsi_fclk, lp_clk_max * 2);
1294 lp_clk = dsi_fclk / 2 / lp_clk_div;
1295
1296 if (lp_clk < lp_clk_min || lp_clk > lp_clk_max)
1297 return -EINVAL;
1298
1299 cinfo->lp_clk_div = lp_clk_div;
1300 cinfo->lp_clk = lp_clk;
1301
1302 return 0;
1303}
1304
1305static int dsi_set_lp_clk_divisor(struct platform_device *dsidev)
1306{
1307 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1308 unsigned long dsi_fclk;
1309 unsigned lp_clk_div;
1310 unsigned long lp_clk;
1311
1312 lp_clk_div = dsi->user_dsi_cinfo.lp_clk_div;
1313
1314 if (lp_clk_div == 0 || lp_clk_div > dsi->lpdiv_max)
1315 return -EINVAL;
1316
1317 dsi_fclk = dsi_fclk_rate(dsidev);
1318
1319 lp_clk = dsi_fclk / 2 / lp_clk_div;
1320
1321 DSSDBG("LP_CLK_DIV %u, LP_CLK %lu\n", lp_clk_div, lp_clk);
1322 dsi->current_cinfo.lp_clk = lp_clk;
1323 dsi->current_cinfo.lp_clk_div = lp_clk_div;
1324
1325 /* LP_CLK_DIVISOR */
1326 REG_FLD_MOD(dsidev, DSI_CLK_CTRL, lp_clk_div, 12, 0);
1327
1328 /* LP_RX_SYNCHRO_ENABLE */
1329 REG_FLD_MOD(dsidev, DSI_CLK_CTRL, dsi_fclk > 30000000 ? 1 : 0, 21, 21);
1330
1331 return 0;
1332}
1333
1334static void dsi_enable_scp_clk(struct platform_device *dsidev)
1335{
1336 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1337
1338 if (dsi->scp_clk_refcount++ == 0)
1339 REG_FLD_MOD(dsidev, DSI_CLK_CTRL, 1, 14, 14); /* CIO_CLK_ICG */
1340}
1341
1342static void dsi_disable_scp_clk(struct platform_device *dsidev)
1343{
1344 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1345
1346 WARN_ON(dsi->scp_clk_refcount == 0);
1347 if (--dsi->scp_clk_refcount == 0)
1348 REG_FLD_MOD(dsidev, DSI_CLK_CTRL, 0, 14, 14); /* CIO_CLK_ICG */
1349}
1350
1351enum dsi_pll_power_state {
1352 DSI_PLL_POWER_OFF = 0x0,
1353 DSI_PLL_POWER_ON_HSCLK = 0x1,
1354 DSI_PLL_POWER_ON_ALL = 0x2,
1355 DSI_PLL_POWER_ON_DIV = 0x3,
1356};
1357
1358static int dsi_pll_power(struct platform_device *dsidev,
1359 enum dsi_pll_power_state state)
1360{
1361 int t = 0;
1362
1363 /* DSI-PLL power command 0x3 is not working */
1364 if (dss_has_feature(FEAT_DSI_PLL_PWR_BUG) &&
1365 state == DSI_PLL_POWER_ON_DIV)
1366 state = DSI_PLL_POWER_ON_ALL;
1367
1368 /* PLL_PWR_CMD */
1369 REG_FLD_MOD(dsidev, DSI_CLK_CTRL, state, 31, 30);
1370
1371 /* PLL_PWR_STATUS */
1372 while (FLD_GET(dsi_read_reg(dsidev, DSI_CLK_CTRL), 29, 28) != state) {
1373 if (++t > 1000) {
1374 DSSERR("Failed to set DSI PLL power mode to %d\n",
1375 state);
1376 return -ENODEV;
1377 }
1378 udelay(1);
1379 }
1380
1381 return 0;
1382}
1383
1384unsigned long dsi_get_pll_clkin(struct platform_device *dsidev)
1385{
1386 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1387 return clk_get_rate(dsi->sys_clk);
1388}
1389
1390bool dsi_hsdiv_calc(struct platform_device *dsidev, unsigned long pll,
1391 unsigned long out_min, dsi_hsdiv_calc_func func, void *data)
1392{
1393 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1394 int regm, regm_start, regm_stop;
1395 unsigned long out_max;
1396 unsigned long out;
1397
1398 out_min = out_min ? out_min : 1;
1399 out_max = dss_feat_get_param_max(FEAT_PARAM_DSS_FCK);
1400
1401 regm_start = max(DIV_ROUND_UP(pll, out_max), 1ul);
1402 regm_stop = min(pll / out_min, dsi->regm_dispc_max);
1403
1404 for (regm = regm_start; regm <= regm_stop; ++regm) {
1405 out = pll / regm;
1406
1407 if (func(regm, out, data))
1408 return true;
1409 }
1410
1411 return false;
1412}
1413
1414bool dsi_pll_calc(struct platform_device *dsidev, unsigned long clkin,
1415 unsigned long pll_min, unsigned long pll_max,
1416 dsi_pll_calc_func func, void *data)
1417{
1418 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1419 int regn, regn_start, regn_stop;
1420 int regm, regm_start, regm_stop;
1421 unsigned long fint, pll;
1422 const unsigned long pll_hw_max = 1800000000;
1423 unsigned long fint_hw_min, fint_hw_max;
1424
1425 fint_hw_min = dsi->fint_min;
1426 fint_hw_max = dsi->fint_max;
1427
1428 regn_start = max(DIV_ROUND_UP(clkin, fint_hw_max), 1ul);
1429 regn_stop = min(clkin / fint_hw_min, dsi->regn_max);
1430
1431 pll_max = pll_max ? pll_max : ULONG_MAX;
1432
1433 for (regn = regn_start; regn <= regn_stop; ++regn) {
1434 fint = clkin / regn;
1435
1436 regm_start = max(DIV_ROUND_UP(DIV_ROUND_UP(pll_min, fint), 2),
1437 1ul);
1438 regm_stop = min3(pll_max / fint / 2,
1439 pll_hw_max / fint / 2,
1440 dsi->regm_max);
1441
1442 for (regm = regm_start; regm <= regm_stop; ++regm) {
1443 pll = 2 * regm * fint;
1444
1445 if (func(regn, regm, fint, pll, data))
1446 return true;
1447 }
1448 }
1449
1450 return false;
1451}
1452
1453/* calculate clock rates using dividers in cinfo */
1454static int dsi_calc_clock_rates(struct platform_device *dsidev,
1455 struct dsi_clock_info *cinfo)
1456{
1457 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1458
1459 if (cinfo->regn == 0 || cinfo->regn > dsi->regn_max)
1460 return -EINVAL;
1461
1462 if (cinfo->regm == 0 || cinfo->regm > dsi->regm_max)
1463 return -EINVAL;
1464
1465 if (cinfo->regm_dispc > dsi->regm_dispc_max)
1466 return -EINVAL;
1467
1468 if (cinfo->regm_dsi > dsi->regm_dsi_max)
1469 return -EINVAL;
1470
1471 cinfo->clkin = clk_get_rate(dsi->sys_clk);
1472 cinfo->fint = cinfo->clkin / cinfo->regn;
1473
1474 if (cinfo->fint > dsi->fint_max || cinfo->fint < dsi->fint_min)
1475 return -EINVAL;
1476
1477 cinfo->clkin4ddr = 2 * cinfo->regm * cinfo->fint;
1478
1479 if (cinfo->clkin4ddr > 1800 * 1000 * 1000)
1480 return -EINVAL;
1481
1482 if (cinfo->regm_dispc > 0)
1483 cinfo->dsi_pll_hsdiv_dispc_clk =
1484 cinfo->clkin4ddr / cinfo->regm_dispc;
1485 else
1486 cinfo->dsi_pll_hsdiv_dispc_clk = 0;
1487
1488 if (cinfo->regm_dsi > 0)
1489 cinfo->dsi_pll_hsdiv_dsi_clk =
1490 cinfo->clkin4ddr / cinfo->regm_dsi;
1491 else
1492 cinfo->dsi_pll_hsdiv_dsi_clk = 0;
1493
1494 return 0;
1495}
1496
1497static void dsi_pll_calc_dsi_fck(struct dsi_clock_info *cinfo)
1498{
1499 unsigned long max_dsi_fck;
1500
1501 max_dsi_fck = dss_feat_get_param_max(FEAT_PARAM_DSI_FCK);
1502
1503 cinfo->regm_dsi = DIV_ROUND_UP(cinfo->clkin4ddr, max_dsi_fck);
1504 cinfo->dsi_pll_hsdiv_dsi_clk = cinfo->clkin4ddr / cinfo->regm_dsi;
1505}
1506
1507int dsi_pll_set_clock_div(struct platform_device *dsidev,
1508 struct dsi_clock_info *cinfo)
1509{
1510 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1511 int r = 0;
1512 u32 l;
1513 int f = 0;
1514 u8 regn_start, regn_end, regm_start, regm_end;
1515 u8 regm_dispc_start, regm_dispc_end, regm_dsi_start, regm_dsi_end;
1516
1517 DSSDBG("DSI PLL clock config starts");
1518
1519 dsi->current_cinfo.clkin = cinfo->clkin;
1520 dsi->current_cinfo.fint = cinfo->fint;
1521 dsi->current_cinfo.clkin4ddr = cinfo->clkin4ddr;
1522 dsi->current_cinfo.dsi_pll_hsdiv_dispc_clk =
1523 cinfo->dsi_pll_hsdiv_dispc_clk;
1524 dsi->current_cinfo.dsi_pll_hsdiv_dsi_clk =
1525 cinfo->dsi_pll_hsdiv_dsi_clk;
1526
1527 dsi->current_cinfo.regn = cinfo->regn;
1528 dsi->current_cinfo.regm = cinfo->regm;
1529 dsi->current_cinfo.regm_dispc = cinfo->regm_dispc;
1530 dsi->current_cinfo.regm_dsi = cinfo->regm_dsi;
1531
1532 DSSDBG("DSI Fint %ld\n", cinfo->fint);
1533
1534 DSSDBG("clkin rate %ld\n", cinfo->clkin);
1535
1536 /* DSIPHY == CLKIN4DDR */
1537 DSSDBG("CLKIN4DDR = 2 * %d / %d * %lu = %lu\n",
1538 cinfo->regm,
1539 cinfo->regn,
1540 cinfo->clkin,
1541 cinfo->clkin4ddr);
1542
1543 DSSDBG("Data rate on 1 DSI lane %ld Mbps\n",
1544 cinfo->clkin4ddr / 1000 / 1000 / 2);
1545
1546 DSSDBG("Clock lane freq %ld Hz\n", cinfo->clkin4ddr / 4);
1547
1548 DSSDBG("regm_dispc = %d, %s (%s) = %lu\n", cinfo->regm_dispc,
1549 dss_get_generic_clk_source_name(OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC),
1550 dss_feat_get_clk_source_name(OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC),
1551 cinfo->dsi_pll_hsdiv_dispc_clk);
1552 DSSDBG("regm_dsi = %d, %s (%s) = %lu\n", cinfo->regm_dsi,
1553 dss_get_generic_clk_source_name(OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI),
1554 dss_feat_get_clk_source_name(OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI),
1555 cinfo->dsi_pll_hsdiv_dsi_clk);
1556
1557 dss_feat_get_reg_field(FEAT_REG_DSIPLL_REGN, &regn_start, &regn_end);
1558 dss_feat_get_reg_field(FEAT_REG_DSIPLL_REGM, &regm_start, &regm_end);
1559 dss_feat_get_reg_field(FEAT_REG_DSIPLL_REGM_DISPC, &regm_dispc_start,
1560 &regm_dispc_end);
1561 dss_feat_get_reg_field(FEAT_REG_DSIPLL_REGM_DSI, &regm_dsi_start,
1562 &regm_dsi_end);
1563
1564 /* DSI_PLL_AUTOMODE = manual */
1565 REG_FLD_MOD(dsidev, DSI_PLL_CONTROL, 0, 0, 0);
1566
1567 l = dsi_read_reg(dsidev, DSI_PLL_CONFIGURATION1);
1568 l = FLD_MOD(l, 1, 0, 0); /* DSI_PLL_STOPMODE */
1569 /* DSI_PLL_REGN */
1570 l = FLD_MOD(l, cinfo->regn - 1, regn_start, regn_end);
1571 /* DSI_PLL_REGM */
1572 l = FLD_MOD(l, cinfo->regm, regm_start, regm_end);
1573 /* DSI_CLOCK_DIV */
1574 l = FLD_MOD(l, cinfo->regm_dispc > 0 ? cinfo->regm_dispc - 1 : 0,
1575 regm_dispc_start, regm_dispc_end);
1576 /* DSIPROTO_CLOCK_DIV */
1577 l = FLD_MOD(l, cinfo->regm_dsi > 0 ? cinfo->regm_dsi - 1 : 0,
1578 regm_dsi_start, regm_dsi_end);
1579 dsi_write_reg(dsidev, DSI_PLL_CONFIGURATION1, l);
1580
1581 BUG_ON(cinfo->fint < dsi->fint_min || cinfo->fint > dsi->fint_max);
1582
1583 l = dsi_read_reg(dsidev, DSI_PLL_CONFIGURATION2);
1584
1585 if (dss_has_feature(FEAT_DSI_PLL_FREQSEL)) {
1586 f = cinfo->fint < 1000000 ? 0x3 :
1587 cinfo->fint < 1250000 ? 0x4 :
1588 cinfo->fint < 1500000 ? 0x5 :
1589 cinfo->fint < 1750000 ? 0x6 :
1590 0x7;
1591
1592 l = FLD_MOD(l, f, 4, 1); /* DSI_PLL_FREQSEL */
1593 } else if (dss_has_feature(FEAT_DSI_PLL_SELFREQDCO)) {
1594 f = cinfo->clkin4ddr < 1000000000 ? 0x2 : 0x4;
1595
1596 l = FLD_MOD(l, f, 4, 1); /* PLL_SELFREQDCO */
1597 }
1598
1599 l = FLD_MOD(l, 1, 13, 13); /* DSI_PLL_REFEN */
1600 l = FLD_MOD(l, 0, 14, 14); /* DSIPHY_CLKINEN */
1601 l = FLD_MOD(l, 1, 20, 20); /* DSI_HSDIVBYPASS */
1602 if (dss_has_feature(FEAT_DSI_PLL_REFSEL))
1603 l = FLD_MOD(l, 3, 22, 21); /* REF_SYSCLK = sysclk */
1604 dsi_write_reg(dsidev, DSI_PLL_CONFIGURATION2, l);
1605
1606 REG_FLD_MOD(dsidev, DSI_PLL_GO, 1, 0, 0); /* DSI_PLL_GO */
1607
1608 if (wait_for_bit_change(dsidev, DSI_PLL_GO, 0, 0) != 0) {
1609 DSSERR("dsi pll go bit not going down.\n");
1610 r = -EIO;
1611 goto err;
1612 }
1613
1614 if (wait_for_bit_change(dsidev, DSI_PLL_STATUS, 1, 1) != 1) {
1615 DSSERR("cannot lock PLL\n");
1616 r = -EIO;
1617 goto err;
1618 }
1619
1620 dsi->pll_locked = 1;
1621
1622 l = dsi_read_reg(dsidev, DSI_PLL_CONFIGURATION2);
1623 l = FLD_MOD(l, 0, 0, 0); /* DSI_PLL_IDLE */
1624 l = FLD_MOD(l, 0, 5, 5); /* DSI_PLL_PLLLPMODE */
1625 l = FLD_MOD(l, 0, 6, 6); /* DSI_PLL_LOWCURRSTBY */
1626 l = FLD_MOD(l, 0, 7, 7); /* DSI_PLL_TIGHTPHASELOCK */
1627 l = FLD_MOD(l, 0, 8, 8); /* DSI_PLL_DRIFTGUARDEN */
1628 l = FLD_MOD(l, 0, 10, 9); /* DSI_PLL_LOCKSEL */
1629 l = FLD_MOD(l, 1, 13, 13); /* DSI_PLL_REFEN */
1630 l = FLD_MOD(l, 1, 14, 14); /* DSIPHY_CLKINEN */
1631 l = FLD_MOD(l, 0, 15, 15); /* DSI_BYPASSEN */
1632 l = FLD_MOD(l, 1, 16, 16); /* DSS_CLOCK_EN */
1633 l = FLD_MOD(l, 0, 17, 17); /* DSS_CLOCK_PWDN */
1634 l = FLD_MOD(l, 1, 18, 18); /* DSI_PROTO_CLOCK_EN */
1635 l = FLD_MOD(l, 0, 19, 19); /* DSI_PROTO_CLOCK_PWDN */
1636 l = FLD_MOD(l, 0, 20, 20); /* DSI_HSDIVBYPASS */
1637 dsi_write_reg(dsidev, DSI_PLL_CONFIGURATION2, l);
1638
1639 DSSDBG("PLL config done\n");
1640err:
1641 return r;
1642}
1643
1644int dsi_pll_init(struct platform_device *dsidev, bool enable_hsclk,
1645 bool enable_hsdiv)
1646{
1647 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1648 int r = 0;
1649 enum dsi_pll_power_state pwstate;
1650
1651 DSSDBG("PLL init\n");
1652
1653 /*
1654 * It seems that on many OMAPs we need to enable both to have a
1655 * functional HSDivider.
1656 */
1657 enable_hsclk = enable_hsdiv = true;
1658
1659 r = dsi_regulator_init(dsidev);
1660 if (r)
1661 return r;
1662
1663 dsi_enable_pll_clock(dsidev, 1);
1664 /*
1665 * Note: SCP CLK is not required on OMAP3, but it is required on OMAP4.
1666 */
1667 dsi_enable_scp_clk(dsidev);
1668
1669 if (!dsi->vdds_dsi_enabled) {
1670 r = regulator_enable(dsi->vdds_dsi_reg);
1671 if (r)
1672 goto err0;
1673 dsi->vdds_dsi_enabled = true;
1674 }
1675
1676 /* XXX PLL does not come out of reset without this... */
1677 dispc_pck_free_enable(1);
1678
1679 if (wait_for_bit_change(dsidev, DSI_PLL_STATUS, 0, 1) != 1) {
1680 DSSERR("PLL not coming out of reset.\n");
1681 r = -ENODEV;
1682 dispc_pck_free_enable(0);
1683 goto err1;
1684 }
1685
1686 /* XXX ... but if left on, we get problems when planes do not
1687 * fill the whole display. No idea about this */
1688 dispc_pck_free_enable(0);
1689
1690 if (enable_hsclk && enable_hsdiv)
1691 pwstate = DSI_PLL_POWER_ON_ALL;
1692 else if (enable_hsclk)
1693 pwstate = DSI_PLL_POWER_ON_HSCLK;
1694 else if (enable_hsdiv)
1695 pwstate = DSI_PLL_POWER_ON_DIV;
1696 else
1697 pwstate = DSI_PLL_POWER_OFF;
1698
1699 r = dsi_pll_power(dsidev, pwstate);
1700
1701 if (r)
1702 goto err1;
1703
1704 DSSDBG("PLL init done\n");
1705
1706 return 0;
1707err1:
1708 if (dsi->vdds_dsi_enabled) {
1709 regulator_disable(dsi->vdds_dsi_reg);
1710 dsi->vdds_dsi_enabled = false;
1711 }
1712err0:
1713 dsi_disable_scp_clk(dsidev);
1714 dsi_enable_pll_clock(dsidev, 0);
1715 return r;
1716}
1717
1718void dsi_pll_uninit(struct platform_device *dsidev, bool disconnect_lanes)
1719{
1720 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1721
1722 dsi->pll_locked = 0;
1723 dsi_pll_power(dsidev, DSI_PLL_POWER_OFF);
1724 if (disconnect_lanes) {
1725 WARN_ON(!dsi->vdds_dsi_enabled);
1726 regulator_disable(dsi->vdds_dsi_reg);
1727 dsi->vdds_dsi_enabled = false;
1728 }
1729
1730 dsi_disable_scp_clk(dsidev);
1731 dsi_enable_pll_clock(dsidev, 0);
1732
1733 DSSDBG("PLL uninit done\n");
1734}
1735
1736static void dsi_dump_dsidev_clocks(struct platform_device *dsidev,
1737 struct seq_file *s)
1738{
1739 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1740 struct dsi_clock_info *cinfo = &dsi->current_cinfo;
1741 enum omap_dss_clk_source dispc_clk_src, dsi_clk_src;
1742 int dsi_module = dsi->module_id;
1743
1744 dispc_clk_src = dss_get_dispc_clk_source();
1745 dsi_clk_src = dss_get_dsi_clk_source(dsi_module);
1746
1747 if (dsi_runtime_get(dsidev))
1748 return;
1749
1750 seq_printf(s, "- DSI%d PLL -\n", dsi_module + 1);
1751
1752 seq_printf(s, "dsi pll clkin\t%lu\n", cinfo->clkin);
1753
1754 seq_printf(s, "Fint\t\t%-16luregn %u\n", cinfo->fint, cinfo->regn);
1755
1756 seq_printf(s, "CLKIN4DDR\t%-16luregm %u\n",
1757 cinfo->clkin4ddr, cinfo->regm);
1758
1759 seq_printf(s, "DSI_PLL_HSDIV_DISPC (%s)\t%-16luregm_dispc %u\t(%s)\n",
1760 dss_feat_get_clk_source_name(dsi_module == 0 ?
1761 OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC :
1762 OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC),
1763 cinfo->dsi_pll_hsdiv_dispc_clk,
1764 cinfo->regm_dispc,
1765 dispc_clk_src == OMAP_DSS_CLK_SRC_FCK ?
1766 "off" : "on");
1767
1768 seq_printf(s, "DSI_PLL_HSDIV_DSI (%s)\t%-16luregm_dsi %u\t(%s)\n",
1769 dss_feat_get_clk_source_name(dsi_module == 0 ?
1770 OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI :
1771 OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DSI),
1772 cinfo->dsi_pll_hsdiv_dsi_clk,
1773 cinfo->regm_dsi,
1774 dsi_clk_src == OMAP_DSS_CLK_SRC_FCK ?
1775 "off" : "on");
1776
1777 seq_printf(s, "- DSI%d -\n", dsi_module + 1);
1778
1779 seq_printf(s, "dsi fclk source = %s (%s)\n",
1780 dss_get_generic_clk_source_name(dsi_clk_src),
1781 dss_feat_get_clk_source_name(dsi_clk_src));
1782
1783 seq_printf(s, "DSI_FCLK\t%lu\n", dsi_fclk_rate(dsidev));
1784
1785 seq_printf(s, "DDR_CLK\t\t%lu\n",
1786 cinfo->clkin4ddr / 4);
1787
1788 seq_printf(s, "TxByteClkHS\t%lu\n", dsi_get_txbyteclkhs(dsidev));
1789
1790 seq_printf(s, "LP_CLK\t\t%lu\n", cinfo->lp_clk);
1791
1792 dsi_runtime_put(dsidev);
1793}
1794
1795void dsi_dump_clocks(struct seq_file *s)
1796{
1797 struct platform_device *dsidev;
1798 int i;
1799
1800 for (i = 0; i < MAX_NUM_DSI; i++) {
1801 dsidev = dsi_get_dsidev_from_id(i);
1802 if (dsidev)
1803 dsi_dump_dsidev_clocks(dsidev, s);
1804 }
1805}
1806
1807#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
1808static void dsi_dump_dsidev_irqs(struct platform_device *dsidev,
1809 struct seq_file *s)
1810{
1811 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1812 unsigned long flags;
1813 struct dsi_irq_stats stats;
1814
1815 spin_lock_irqsave(&dsi->irq_stats_lock, flags);
1816
1817 stats = dsi->irq_stats;
1818 memset(&dsi->irq_stats, 0, sizeof(dsi->irq_stats));
1819 dsi->irq_stats.last_reset = jiffies;
1820
1821 spin_unlock_irqrestore(&dsi->irq_stats_lock, flags);
1822
1823 seq_printf(s, "period %u ms\n",
1824 jiffies_to_msecs(jiffies - stats.last_reset));
1825
1826 seq_printf(s, "irqs %d\n", stats.irq_count);
1827#define PIS(x) \
1828 seq_printf(s, "%-20s %10d\n", #x, stats.dsi_irqs[ffs(DSI_IRQ_##x)-1]);
1829
1830 seq_printf(s, "-- DSI%d interrupts --\n", dsi->module_id + 1);
1831 PIS(VC0);
1832 PIS(VC1);
1833 PIS(VC2);
1834 PIS(VC3);
1835 PIS(WAKEUP);
1836 PIS(RESYNC);
1837 PIS(PLL_LOCK);
1838 PIS(PLL_UNLOCK);
1839 PIS(PLL_RECALL);
1840 PIS(COMPLEXIO_ERR);
1841 PIS(HS_TX_TIMEOUT);
1842 PIS(LP_RX_TIMEOUT);
1843 PIS(TE_TRIGGER);
1844 PIS(ACK_TRIGGER);
1845 PIS(SYNC_LOST);
1846 PIS(LDO_POWER_GOOD);
1847 PIS(TA_TIMEOUT);
1848#undef PIS
1849
1850#define PIS(x) \
1851 seq_printf(s, "%-20s %10d %10d %10d %10d\n", #x, \
1852 stats.vc_irqs[0][ffs(DSI_VC_IRQ_##x)-1], \
1853 stats.vc_irqs[1][ffs(DSI_VC_IRQ_##x)-1], \
1854 stats.vc_irqs[2][ffs(DSI_VC_IRQ_##x)-1], \
1855 stats.vc_irqs[3][ffs(DSI_VC_IRQ_##x)-1]);
1856
1857 seq_printf(s, "-- VC interrupts --\n");
1858 PIS(CS);
1859 PIS(ECC_CORR);
1860 PIS(PACKET_SENT);
1861 PIS(FIFO_TX_OVF);
1862 PIS(FIFO_RX_OVF);
1863 PIS(BTA);
1864 PIS(ECC_NO_CORR);
1865 PIS(FIFO_TX_UDF);
1866 PIS(PP_BUSY_CHANGE);
1867#undef PIS
1868
1869#define PIS(x) \
1870 seq_printf(s, "%-20s %10d\n", #x, \
1871 stats.cio_irqs[ffs(DSI_CIO_IRQ_##x)-1]);
1872
1873 seq_printf(s, "-- CIO interrupts --\n");
1874 PIS(ERRSYNCESC1);
1875 PIS(ERRSYNCESC2);
1876 PIS(ERRSYNCESC3);
1877 PIS(ERRESC1);
1878 PIS(ERRESC2);
1879 PIS(ERRESC3);
1880 PIS(ERRCONTROL1);
1881 PIS(ERRCONTROL2);
1882 PIS(ERRCONTROL3);
1883 PIS(STATEULPS1);
1884 PIS(STATEULPS2);
1885 PIS(STATEULPS3);
1886 PIS(ERRCONTENTIONLP0_1);
1887 PIS(ERRCONTENTIONLP1_1);
1888 PIS(ERRCONTENTIONLP0_2);
1889 PIS(ERRCONTENTIONLP1_2);
1890 PIS(ERRCONTENTIONLP0_3);
1891 PIS(ERRCONTENTIONLP1_3);
1892 PIS(ULPSACTIVENOT_ALL0);
1893 PIS(ULPSACTIVENOT_ALL1);
1894#undef PIS
1895}
1896
1897static void dsi1_dump_irqs(struct seq_file *s)
1898{
1899 struct platform_device *dsidev = dsi_get_dsidev_from_id(0);
1900
1901 dsi_dump_dsidev_irqs(dsidev, s);
1902}
1903
1904static void dsi2_dump_irqs(struct seq_file *s)
1905{
1906 struct platform_device *dsidev = dsi_get_dsidev_from_id(1);
1907
1908 dsi_dump_dsidev_irqs(dsidev, s);
1909}
1910#endif
1911
1912static void dsi_dump_dsidev_regs(struct platform_device *dsidev,
1913 struct seq_file *s)
1914{
1915#define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, dsi_read_reg(dsidev, r))
1916
1917 if (dsi_runtime_get(dsidev))
1918 return;
1919 dsi_enable_scp_clk(dsidev);
1920
1921 DUMPREG(DSI_REVISION);
1922 DUMPREG(DSI_SYSCONFIG);
1923 DUMPREG(DSI_SYSSTATUS);
1924 DUMPREG(DSI_IRQSTATUS);
1925 DUMPREG(DSI_IRQENABLE);
1926 DUMPREG(DSI_CTRL);
1927 DUMPREG(DSI_COMPLEXIO_CFG1);
1928 DUMPREG(DSI_COMPLEXIO_IRQ_STATUS);
1929 DUMPREG(DSI_COMPLEXIO_IRQ_ENABLE);
1930 DUMPREG(DSI_CLK_CTRL);
1931 DUMPREG(DSI_TIMING1);
1932 DUMPREG(DSI_TIMING2);
1933 DUMPREG(DSI_VM_TIMING1);
1934 DUMPREG(DSI_VM_TIMING2);
1935 DUMPREG(DSI_VM_TIMING3);
1936 DUMPREG(DSI_CLK_TIMING);
1937 DUMPREG(DSI_TX_FIFO_VC_SIZE);
1938 DUMPREG(DSI_RX_FIFO_VC_SIZE);
1939 DUMPREG(DSI_COMPLEXIO_CFG2);
1940 DUMPREG(DSI_RX_FIFO_VC_FULLNESS);
1941 DUMPREG(DSI_VM_TIMING4);
1942 DUMPREG(DSI_TX_FIFO_VC_EMPTINESS);
1943 DUMPREG(DSI_VM_TIMING5);
1944 DUMPREG(DSI_VM_TIMING6);
1945 DUMPREG(DSI_VM_TIMING7);
1946 DUMPREG(DSI_STOPCLK_TIMING);
1947
1948 DUMPREG(DSI_VC_CTRL(0));
1949 DUMPREG(DSI_VC_TE(0));
1950 DUMPREG(DSI_VC_LONG_PACKET_HEADER(0));
1951 DUMPREG(DSI_VC_LONG_PACKET_PAYLOAD(0));
1952 DUMPREG(DSI_VC_SHORT_PACKET_HEADER(0));
1953 DUMPREG(DSI_VC_IRQSTATUS(0));
1954 DUMPREG(DSI_VC_IRQENABLE(0));
1955
1956 DUMPREG(DSI_VC_CTRL(1));
1957 DUMPREG(DSI_VC_TE(1));
1958 DUMPREG(DSI_VC_LONG_PACKET_HEADER(1));
1959 DUMPREG(DSI_VC_LONG_PACKET_PAYLOAD(1));
1960 DUMPREG(DSI_VC_SHORT_PACKET_HEADER(1));
1961 DUMPREG(DSI_VC_IRQSTATUS(1));
1962 DUMPREG(DSI_VC_IRQENABLE(1));
1963
1964 DUMPREG(DSI_VC_CTRL(2));
1965 DUMPREG(DSI_VC_TE(2));
1966 DUMPREG(DSI_VC_LONG_PACKET_HEADER(2));
1967 DUMPREG(DSI_VC_LONG_PACKET_PAYLOAD(2));
1968 DUMPREG(DSI_VC_SHORT_PACKET_HEADER(2));
1969 DUMPREG(DSI_VC_IRQSTATUS(2));
1970 DUMPREG(DSI_VC_IRQENABLE(2));
1971
1972 DUMPREG(DSI_VC_CTRL(3));
1973 DUMPREG(DSI_VC_TE(3));
1974 DUMPREG(DSI_VC_LONG_PACKET_HEADER(3));
1975 DUMPREG(DSI_VC_LONG_PACKET_PAYLOAD(3));
1976 DUMPREG(DSI_VC_SHORT_PACKET_HEADER(3));
1977 DUMPREG(DSI_VC_IRQSTATUS(3));
1978 DUMPREG(DSI_VC_IRQENABLE(3));
1979
1980 DUMPREG(DSI_DSIPHY_CFG0);
1981 DUMPREG(DSI_DSIPHY_CFG1);
1982 DUMPREG(DSI_DSIPHY_CFG2);
1983 DUMPREG(DSI_DSIPHY_CFG5);
1984
1985 DUMPREG(DSI_PLL_CONTROL);
1986 DUMPREG(DSI_PLL_STATUS);
1987 DUMPREG(DSI_PLL_GO);
1988 DUMPREG(DSI_PLL_CONFIGURATION1);
1989 DUMPREG(DSI_PLL_CONFIGURATION2);
1990
1991 dsi_disable_scp_clk(dsidev);
1992 dsi_runtime_put(dsidev);
1993#undef DUMPREG
1994}
1995
1996static void dsi1_dump_regs(struct seq_file *s)
1997{
1998 struct platform_device *dsidev = dsi_get_dsidev_from_id(0);
1999
2000 dsi_dump_dsidev_regs(dsidev, s);
2001}
2002
2003static void dsi2_dump_regs(struct seq_file *s)
2004{
2005 struct platform_device *dsidev = dsi_get_dsidev_from_id(1);
2006
2007 dsi_dump_dsidev_regs(dsidev, s);
2008}
2009
2010enum dsi_cio_power_state {
2011 DSI_COMPLEXIO_POWER_OFF = 0x0,
2012 DSI_COMPLEXIO_POWER_ON = 0x1,
2013 DSI_COMPLEXIO_POWER_ULPS = 0x2,
2014};
2015
2016static int dsi_cio_power(struct platform_device *dsidev,
2017 enum dsi_cio_power_state state)
2018{
2019 int t = 0;
2020
2021 /* PWR_CMD */
2022 REG_FLD_MOD(dsidev, DSI_COMPLEXIO_CFG1, state, 28, 27);
2023
2024 /* PWR_STATUS */
2025 while (FLD_GET(dsi_read_reg(dsidev, DSI_COMPLEXIO_CFG1),
2026 26, 25) != state) {
2027 if (++t > 1000) {
2028 DSSERR("failed to set complexio power state to "
2029 "%d\n", state);
2030 return -ENODEV;
2031 }
2032 udelay(1);
2033 }
2034
2035 return 0;
2036}
2037
2038static unsigned dsi_get_line_buf_size(struct platform_device *dsidev)
2039{
2040 int val;
2041
2042 /* line buffer on OMAP3 is 1024 x 24bits */
2043 /* XXX: for some reason using full buffer size causes
2044 * considerable TX slowdown with update sizes that fill the
2045 * whole buffer */
2046 if (!dss_has_feature(FEAT_DSI_GNQ))
2047 return 1023 * 3;
2048
2049 val = REG_GET(dsidev, DSI_GNQ, 14, 12); /* VP1_LINE_BUFFER_SIZE */
2050
2051 switch (val) {
2052 case 1:
2053 return 512 * 3; /* 512x24 bits */
2054 case 2:
2055 return 682 * 3; /* 682x24 bits */
2056 case 3:
2057 return 853 * 3; /* 853x24 bits */
2058 case 4:
2059 return 1024 * 3; /* 1024x24 bits */
2060 case 5:
2061 return 1194 * 3; /* 1194x24 bits */
2062 case 6:
2063 return 1365 * 3; /* 1365x24 bits */
2064 case 7:
2065 return 1920 * 3; /* 1920x24 bits */
2066 default:
2067 BUG();
2068 return 0;
2069 }
2070}
2071
2072static int dsi_set_lane_config(struct platform_device *dsidev)
2073{
2074 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2075 static const u8 offsets[] = { 0, 4, 8, 12, 16 };
2076 static const enum dsi_lane_function functions[] = {
2077 DSI_LANE_CLK,
2078 DSI_LANE_DATA1,
2079 DSI_LANE_DATA2,
2080 DSI_LANE_DATA3,
2081 DSI_LANE_DATA4,
2082 };
2083 u32 r;
2084 int i;
2085
2086 r = dsi_read_reg(dsidev, DSI_COMPLEXIO_CFG1);
2087
2088 for (i = 0; i < dsi->num_lanes_used; ++i) {
2089 unsigned offset = offsets[i];
2090 unsigned polarity, lane_number;
2091 unsigned t;
2092
2093 for (t = 0; t < dsi->num_lanes_supported; ++t)
2094 if (dsi->lanes[t].function == functions[i])
2095 break;
2096
2097 if (t == dsi->num_lanes_supported)
2098 return -EINVAL;
2099
2100 lane_number = t;
2101 polarity = dsi->lanes[t].polarity;
2102
2103 r = FLD_MOD(r, lane_number + 1, offset + 2, offset);
2104 r = FLD_MOD(r, polarity, offset + 3, offset + 3);
2105 }
2106
2107 /* clear the unused lanes */
2108 for (; i < dsi->num_lanes_supported; ++i) {
2109 unsigned offset = offsets[i];
2110
2111 r = FLD_MOD(r, 0, offset + 2, offset);
2112 r = FLD_MOD(r, 0, offset + 3, offset + 3);
2113 }
2114
2115 dsi_write_reg(dsidev, DSI_COMPLEXIO_CFG1, r);
2116
2117 return 0;
2118}
2119
2120static inline unsigned ns2ddr(struct platform_device *dsidev, unsigned ns)
2121{
2122 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2123
2124 /* convert time in ns to ddr ticks, rounding up */
2125 unsigned long ddr_clk = dsi->current_cinfo.clkin4ddr / 4;
2126 return (ns * (ddr_clk / 1000 / 1000) + 999) / 1000;
2127}
2128
2129static inline unsigned ddr2ns(struct platform_device *dsidev, unsigned ddr)
2130{
2131 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2132
2133 unsigned long ddr_clk = dsi->current_cinfo.clkin4ddr / 4;
2134 return ddr * 1000 * 1000 / (ddr_clk / 1000);
2135}
2136
2137static void dsi_cio_timings(struct platform_device *dsidev)
2138{
2139 u32 r;
2140 u32 ths_prepare, ths_prepare_ths_zero, ths_trail, ths_exit;
2141 u32 tlpx_half, tclk_trail, tclk_zero;
2142 u32 tclk_prepare;
2143
2144 /* calculate timings */
2145
2146 /* 1 * DDR_CLK = 2 * UI */
2147
2148 /* min 40ns + 4*UI max 85ns + 6*UI */
2149 ths_prepare = ns2ddr(dsidev, 70) + 2;
2150
2151 /* min 145ns + 10*UI */
2152 ths_prepare_ths_zero = ns2ddr(dsidev, 175) + 2;
2153
2154 /* min max(8*UI, 60ns+4*UI) */
2155 ths_trail = ns2ddr(dsidev, 60) + 5;
2156
2157 /* min 100ns */
2158 ths_exit = ns2ddr(dsidev, 145);
2159
2160 /* tlpx min 50n */
2161 tlpx_half = ns2ddr(dsidev, 25);
2162
2163 /* min 60ns */
2164 tclk_trail = ns2ddr(dsidev, 60) + 2;
2165
2166 /* min 38ns, max 95ns */
2167 tclk_prepare = ns2ddr(dsidev, 65);
2168
2169 /* min tclk-prepare + tclk-zero = 300ns */
2170 tclk_zero = ns2ddr(dsidev, 260);
2171
2172 DSSDBG("ths_prepare %u (%uns), ths_prepare_ths_zero %u (%uns)\n",
2173 ths_prepare, ddr2ns(dsidev, ths_prepare),
2174 ths_prepare_ths_zero, ddr2ns(dsidev, ths_prepare_ths_zero));
2175 DSSDBG("ths_trail %u (%uns), ths_exit %u (%uns)\n",
2176 ths_trail, ddr2ns(dsidev, ths_trail),
2177 ths_exit, ddr2ns(dsidev, ths_exit));
2178
2179 DSSDBG("tlpx_half %u (%uns), tclk_trail %u (%uns), "
2180 "tclk_zero %u (%uns)\n",
2181 tlpx_half, ddr2ns(dsidev, tlpx_half),
2182 tclk_trail, ddr2ns(dsidev, tclk_trail),
2183 tclk_zero, ddr2ns(dsidev, tclk_zero));
2184 DSSDBG("tclk_prepare %u (%uns)\n",
2185 tclk_prepare, ddr2ns(dsidev, tclk_prepare));
2186
2187 /* program timings */
2188
2189 r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG0);
2190 r = FLD_MOD(r, ths_prepare, 31, 24);
2191 r = FLD_MOD(r, ths_prepare_ths_zero, 23, 16);
2192 r = FLD_MOD(r, ths_trail, 15, 8);
2193 r = FLD_MOD(r, ths_exit, 7, 0);
2194 dsi_write_reg(dsidev, DSI_DSIPHY_CFG0, r);
2195
2196 r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG1);
2197 r = FLD_MOD(r, tlpx_half, 20, 16);
2198 r = FLD_MOD(r, tclk_trail, 15, 8);
2199 r = FLD_MOD(r, tclk_zero, 7, 0);
2200
2201 if (dss_has_feature(FEAT_DSI_PHY_DCC)) {
2202 r = FLD_MOD(r, 0, 21, 21); /* DCCEN = disable */
2203 r = FLD_MOD(r, 1, 22, 22); /* CLKINP_DIVBY2EN = enable */
2204 r = FLD_MOD(r, 1, 23, 23); /* CLKINP_SEL = enable */
2205 }
2206
2207 dsi_write_reg(dsidev, DSI_DSIPHY_CFG1, r);
2208
2209 r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG2);
2210 r = FLD_MOD(r, tclk_prepare, 7, 0);
2211 dsi_write_reg(dsidev, DSI_DSIPHY_CFG2, r);
2212}
2213
2214/* lane masks have lane 0 at lsb. mask_p for positive lines, n for negative */
2215static void dsi_cio_enable_lane_override(struct platform_device *dsidev,
2216 unsigned mask_p, unsigned mask_n)
2217{
2218 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2219 int i;
2220 u32 l;
2221 u8 lptxscp_start = dsi->num_lanes_supported == 3 ? 22 : 26;
2222
2223 l = 0;
2224
2225 for (i = 0; i < dsi->num_lanes_supported; ++i) {
2226 unsigned p = dsi->lanes[i].polarity;
2227
2228 if (mask_p & (1 << i))
2229 l |= 1 << (i * 2 + (p ? 0 : 1));
2230
2231 if (mask_n & (1 << i))
2232 l |= 1 << (i * 2 + (p ? 1 : 0));
2233 }
2234
2235 /*
2236 * Bits in REGLPTXSCPDAT4TO0DXDY:
2237 * 17: DY0 18: DX0
2238 * 19: DY1 20: DX1
2239 * 21: DY2 22: DX2
2240 * 23: DY3 24: DX3
2241 * 25: DY4 26: DX4
2242 */
2243
2244 /* Set the lane override configuration */
2245
2246 /* REGLPTXSCPDAT4TO0DXDY */
2247 REG_FLD_MOD(dsidev, DSI_DSIPHY_CFG10, l, lptxscp_start, 17);
2248
2249 /* Enable lane override */
2250
2251 /* ENLPTXSCPDAT */
2252 REG_FLD_MOD(dsidev, DSI_DSIPHY_CFG10, 1, 27, 27);
2253}
2254
2255static void dsi_cio_disable_lane_override(struct platform_device *dsidev)
2256{
2257 /* Disable lane override */
2258 REG_FLD_MOD(dsidev, DSI_DSIPHY_CFG10, 0, 27, 27); /* ENLPTXSCPDAT */
2259 /* Reset the lane override configuration */
2260 /* REGLPTXSCPDAT4TO0DXDY */
2261 REG_FLD_MOD(dsidev, DSI_DSIPHY_CFG10, 0, 22, 17);
2262}
2263
2264static int dsi_cio_wait_tx_clk_esc_reset(struct platform_device *dsidev)
2265{
2266 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2267 int t, i;
2268 bool in_use[DSI_MAX_NR_LANES];
2269 static const u8 offsets_old[] = { 28, 27, 26 };
2270 static const u8 offsets_new[] = { 24, 25, 26, 27, 28 };
2271 const u8 *offsets;
2272
2273 if (dss_has_feature(FEAT_DSI_REVERSE_TXCLKESC))
2274 offsets = offsets_old;
2275 else
2276 offsets = offsets_new;
2277
2278 for (i = 0; i < dsi->num_lanes_supported; ++i)
2279 in_use[i] = dsi->lanes[i].function != DSI_LANE_UNUSED;
2280
2281 t = 100000;
2282 while (true) {
2283 u32 l;
2284 int ok;
2285
2286 l = dsi_read_reg(dsidev, DSI_DSIPHY_CFG5);
2287
2288 ok = 0;
2289 for (i = 0; i < dsi->num_lanes_supported; ++i) {
2290 if (!in_use[i] || (l & (1 << offsets[i])))
2291 ok++;
2292 }
2293
2294 if (ok == dsi->num_lanes_supported)
2295 break;
2296
2297 if (--t == 0) {
2298 for (i = 0; i < dsi->num_lanes_supported; ++i) {
2299 if (!in_use[i] || (l & (1 << offsets[i])))
2300 continue;
2301
2302 DSSERR("CIO TXCLKESC%d domain not coming " \
2303 "out of reset\n", i);
2304 }
2305 return -EIO;
2306 }
2307 }
2308
2309 return 0;
2310}
2311
2312/* return bitmask of enabled lanes, lane0 being the lsb */
2313static unsigned dsi_get_lane_mask(struct platform_device *dsidev)
2314{
2315 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2316 unsigned mask = 0;
2317 int i;
2318
2319 for (i = 0; i < dsi->num_lanes_supported; ++i) {
2320 if (dsi->lanes[i].function != DSI_LANE_UNUSED)
2321 mask |= 1 << i;
2322 }
2323
2324 return mask;
2325}
2326
2327static int dsi_cio_init(struct platform_device *dsidev)
2328{
2329 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2330 int r;
2331 u32 l;
2332
2333 DSSDBG("DSI CIO init starts");
2334
2335 r = dss_dsi_enable_pads(dsi->module_id, dsi_get_lane_mask(dsidev));
2336 if (r)
2337 return r;
2338
2339 dsi_enable_scp_clk(dsidev);
2340
2341 /* A dummy read using the SCP interface to any DSIPHY register is
2342 * required after DSIPHY reset to complete the reset of the DSI complex
2343 * I/O. */
2344 dsi_read_reg(dsidev, DSI_DSIPHY_CFG5);
2345
2346 if (wait_for_bit_change(dsidev, DSI_DSIPHY_CFG5, 30, 1) != 1) {
2347 DSSERR("CIO SCP Clock domain not coming out of reset.\n");
2348 r = -EIO;
2349 goto err_scp_clk_dom;
2350 }
2351
2352 r = dsi_set_lane_config(dsidev);
2353 if (r)
2354 goto err_scp_clk_dom;
2355
2356 /* set TX STOP MODE timer to maximum for this operation */
2357 l = dsi_read_reg(dsidev, DSI_TIMING1);
2358 l = FLD_MOD(l, 1, 15, 15); /* FORCE_TX_STOP_MODE_IO */
2359 l = FLD_MOD(l, 1, 14, 14); /* STOP_STATE_X16_IO */
2360 l = FLD_MOD(l, 1, 13, 13); /* STOP_STATE_X4_IO */
2361 l = FLD_MOD(l, 0x1fff, 12, 0); /* STOP_STATE_COUNTER_IO */
2362 dsi_write_reg(dsidev, DSI_TIMING1, l);
2363
2364 if (dsi->ulps_enabled) {
2365 unsigned mask_p;
2366 int i;
2367
2368 DSSDBG("manual ulps exit\n");
2369
2370 /* ULPS is exited by Mark-1 state for 1ms, followed by
2371 * stop state. DSS HW cannot do this via the normal
2372 * ULPS exit sequence, as after reset the DSS HW thinks
2373 * that we are not in ULPS mode, and refuses to send the
2374 * sequence. So we need to send the ULPS exit sequence
2375 * manually by setting positive lines high and negative lines
2376 * low for 1ms.
2377 */
2378
2379 mask_p = 0;
2380
2381 for (i = 0; i < dsi->num_lanes_supported; ++i) {
2382 if (dsi->lanes[i].function == DSI_LANE_UNUSED)
2383 continue;
2384 mask_p |= 1 << i;
2385 }
2386
2387 dsi_cio_enable_lane_override(dsidev, mask_p, 0);
2388 }
2389
2390 r = dsi_cio_power(dsidev, DSI_COMPLEXIO_POWER_ON);
2391 if (r)
2392 goto err_cio_pwr;
2393
2394 if (wait_for_bit_change(dsidev, DSI_COMPLEXIO_CFG1, 29, 1) != 1) {
2395 DSSERR("CIO PWR clock domain not coming out of reset.\n");
2396 r = -ENODEV;
2397 goto err_cio_pwr_dom;
2398 }
2399
2400 dsi_if_enable(dsidev, true);
2401 dsi_if_enable(dsidev, false);
2402 REG_FLD_MOD(dsidev, DSI_CLK_CTRL, 1, 20, 20); /* LP_CLK_ENABLE */
2403
2404 r = dsi_cio_wait_tx_clk_esc_reset(dsidev);
2405 if (r)
2406 goto err_tx_clk_esc_rst;
2407
2408 if (dsi->ulps_enabled) {
2409 /* Keep Mark-1 state for 1ms (as per DSI spec) */
2410 ktime_t wait = ns_to_ktime(1000 * 1000);
2411 set_current_state(TASK_UNINTERRUPTIBLE);
2412 schedule_hrtimeout(&wait, HRTIMER_MODE_REL);
2413
2414 /* Disable the override. The lanes should be set to Mark-11
2415 * state by the HW */
2416 dsi_cio_disable_lane_override(dsidev);
2417 }
2418
2419 /* FORCE_TX_STOP_MODE_IO */
2420 REG_FLD_MOD(dsidev, DSI_TIMING1, 0, 15, 15);
2421
2422 dsi_cio_timings(dsidev);
2423
2424 if (dsi->mode == OMAP_DSS_DSI_VIDEO_MODE) {
2425 /* DDR_CLK_ALWAYS_ON */
2426 REG_FLD_MOD(dsidev, DSI_CLK_CTRL,
2427 dsi->vm_timings.ddr_clk_always_on, 13, 13);
2428 }
2429
2430 dsi->ulps_enabled = false;
2431
2432 DSSDBG("CIO init done\n");
2433
2434 return 0;
2435
2436err_tx_clk_esc_rst:
2437 REG_FLD_MOD(dsidev, DSI_CLK_CTRL, 0, 20, 20); /* LP_CLK_ENABLE */
2438err_cio_pwr_dom:
2439 dsi_cio_power(dsidev, DSI_COMPLEXIO_POWER_OFF);
2440err_cio_pwr:
2441 if (dsi->ulps_enabled)
2442 dsi_cio_disable_lane_override(dsidev);
2443err_scp_clk_dom:
2444 dsi_disable_scp_clk(dsidev);
2445 dss_dsi_disable_pads(dsi->module_id, dsi_get_lane_mask(dsidev));
2446 return r;
2447}
2448
2449static void dsi_cio_uninit(struct platform_device *dsidev)
2450{
2451 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2452
2453 /* DDR_CLK_ALWAYS_ON */
2454 REG_FLD_MOD(dsidev, DSI_CLK_CTRL, 0, 13, 13);
2455
2456 dsi_cio_power(dsidev, DSI_COMPLEXIO_POWER_OFF);
2457 dsi_disable_scp_clk(dsidev);
2458 dss_dsi_disable_pads(dsi->module_id, dsi_get_lane_mask(dsidev));
2459}
2460
2461static void dsi_config_tx_fifo(struct platform_device *dsidev,
2462 enum fifo_size size1, enum fifo_size size2,
2463 enum fifo_size size3, enum fifo_size size4)
2464{
2465 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2466 u32 r = 0;
2467 int add = 0;
2468 int i;
2469
2470 dsi->vc[0].tx_fifo_size = size1;
2471 dsi->vc[1].tx_fifo_size = size2;
2472 dsi->vc[2].tx_fifo_size = size3;
2473 dsi->vc[3].tx_fifo_size = size4;
2474
2475 for (i = 0; i < 4; i++) {
2476 u8 v;
2477 int size = dsi->vc[i].tx_fifo_size;
2478
2479 if (add + size > 4) {
2480 DSSERR("Illegal FIFO configuration\n");
2481 BUG();
2482 return;
2483 }
2484
2485 v = FLD_VAL(add, 2, 0) | FLD_VAL(size, 7, 4);
2486 r |= v << (8 * i);
2487 /*DSSDBG("TX FIFO vc %d: size %d, add %d\n", i, size, add); */
2488 add += size;
2489 }
2490
2491 dsi_write_reg(dsidev, DSI_TX_FIFO_VC_SIZE, r);
2492}
2493
2494static void dsi_config_rx_fifo(struct platform_device *dsidev,
2495 enum fifo_size size1, enum fifo_size size2,
2496 enum fifo_size size3, enum fifo_size size4)
2497{
2498 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2499 u32 r = 0;
2500 int add = 0;
2501 int i;
2502
2503 dsi->vc[0].rx_fifo_size = size1;
2504 dsi->vc[1].rx_fifo_size = size2;
2505 dsi->vc[2].rx_fifo_size = size3;
2506 dsi->vc[3].rx_fifo_size = size4;
2507
2508 for (i = 0; i < 4; i++) {
2509 u8 v;
2510 int size = dsi->vc[i].rx_fifo_size;
2511
2512 if (add + size > 4) {
2513 DSSERR("Illegal FIFO configuration\n");
2514 BUG();
2515 return;
2516 }
2517
2518 v = FLD_VAL(add, 2, 0) | FLD_VAL(size, 7, 4);
2519 r |= v << (8 * i);
2520 /*DSSDBG("RX FIFO vc %d: size %d, add %d\n", i, size, add); */
2521 add += size;
2522 }
2523
2524 dsi_write_reg(dsidev, DSI_RX_FIFO_VC_SIZE, r);
2525}
2526
2527static int dsi_force_tx_stop_mode_io(struct platform_device *dsidev)
2528{
2529 u32 r;
2530
2531 r = dsi_read_reg(dsidev, DSI_TIMING1);
2532 r = FLD_MOD(r, 1, 15, 15); /* FORCE_TX_STOP_MODE_IO */
2533 dsi_write_reg(dsidev, DSI_TIMING1, r);
2534
2535 if (wait_for_bit_change(dsidev, DSI_TIMING1, 15, 0) != 0) {
2536 DSSERR("TX_STOP bit not going down\n");
2537 return -EIO;
2538 }
2539
2540 return 0;
2541}
2542
2543static bool dsi_vc_is_enabled(struct platform_device *dsidev, int channel)
2544{
2545 return REG_GET(dsidev, DSI_VC_CTRL(channel), 0, 0);
2546}
2547
2548static void dsi_packet_sent_handler_vp(void *data, u32 mask)
2549{
2550 struct dsi_packet_sent_handler_data *vp_data =
2551 (struct dsi_packet_sent_handler_data *) data;
2552 struct dsi_data *dsi = dsi_get_dsidrv_data(vp_data->dsidev);
2553 const int channel = dsi->update_channel;
2554 u8 bit = dsi->te_enabled ? 30 : 31;
2555
2556 if (REG_GET(vp_data->dsidev, DSI_VC_TE(channel), bit, bit) == 0)
2557 complete(vp_data->completion);
2558}
2559
2560static int dsi_sync_vc_vp(struct platform_device *dsidev, int channel)
2561{
2562 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2563 DECLARE_COMPLETION_ONSTACK(completion);
2564 struct dsi_packet_sent_handler_data vp_data = { dsidev, &completion };
2565 int r = 0;
2566 u8 bit;
2567
2568 bit = dsi->te_enabled ? 30 : 31;
2569
2570 r = dsi_register_isr_vc(dsidev, channel, dsi_packet_sent_handler_vp,
2571 &vp_data, DSI_VC_IRQ_PACKET_SENT);
2572 if (r)
2573 goto err0;
2574
2575 /* Wait for completion only if TE_EN/TE_START is still set */
2576 if (REG_GET(dsidev, DSI_VC_TE(channel), bit, bit)) {
2577 if (wait_for_completion_timeout(&completion,
2578 msecs_to_jiffies(10)) == 0) {
2579 DSSERR("Failed to complete previous frame transfer\n");
2580 r = -EIO;
2581 goto err1;
2582 }
2583 }
2584
2585 dsi_unregister_isr_vc(dsidev, channel, dsi_packet_sent_handler_vp,
2586 &vp_data, DSI_VC_IRQ_PACKET_SENT);
2587
2588 return 0;
2589err1:
2590 dsi_unregister_isr_vc(dsidev, channel, dsi_packet_sent_handler_vp,
2591 &vp_data, DSI_VC_IRQ_PACKET_SENT);
2592err0:
2593 return r;
2594}
2595
2596static void dsi_packet_sent_handler_l4(void *data, u32 mask)
2597{
2598 struct dsi_packet_sent_handler_data *l4_data =
2599 (struct dsi_packet_sent_handler_data *) data;
2600 struct dsi_data *dsi = dsi_get_dsidrv_data(l4_data->dsidev);
2601 const int channel = dsi->update_channel;
2602
2603 if (REG_GET(l4_data->dsidev, DSI_VC_CTRL(channel), 5, 5) == 0)
2604 complete(l4_data->completion);
2605}
2606
2607static int dsi_sync_vc_l4(struct platform_device *dsidev, int channel)
2608{
2609 DECLARE_COMPLETION_ONSTACK(completion);
2610 struct dsi_packet_sent_handler_data l4_data = { dsidev, &completion };
2611 int r = 0;
2612
2613 r = dsi_register_isr_vc(dsidev, channel, dsi_packet_sent_handler_l4,
2614 &l4_data, DSI_VC_IRQ_PACKET_SENT);
2615 if (r)
2616 goto err0;
2617
2618 /* Wait for completion only if TX_FIFO_NOT_EMPTY is still set */
2619 if (REG_GET(dsidev, DSI_VC_CTRL(channel), 5, 5)) {
2620 if (wait_for_completion_timeout(&completion,
2621 msecs_to_jiffies(10)) == 0) {
2622 DSSERR("Failed to complete previous l4 transfer\n");
2623 r = -EIO;
2624 goto err1;
2625 }
2626 }
2627
2628 dsi_unregister_isr_vc(dsidev, channel, dsi_packet_sent_handler_l4,
2629 &l4_data, DSI_VC_IRQ_PACKET_SENT);
2630
2631 return 0;
2632err1:
2633 dsi_unregister_isr_vc(dsidev, channel, dsi_packet_sent_handler_l4,
2634 &l4_data, DSI_VC_IRQ_PACKET_SENT);
2635err0:
2636 return r;
2637}
2638
2639static int dsi_sync_vc(struct platform_device *dsidev, int channel)
2640{
2641 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2642
2643 WARN_ON(!dsi_bus_is_locked(dsidev));
2644
2645 WARN_ON(in_interrupt());
2646
2647 if (!dsi_vc_is_enabled(dsidev, channel))
2648 return 0;
2649
2650 switch (dsi->vc[channel].source) {
2651 case DSI_VC_SOURCE_VP:
2652 return dsi_sync_vc_vp(dsidev, channel);
2653 case DSI_VC_SOURCE_L4:
2654 return dsi_sync_vc_l4(dsidev, channel);
2655 default:
2656 BUG();
2657 return -EINVAL;
2658 }
2659}
2660
2661static int dsi_vc_enable(struct platform_device *dsidev, int channel,
2662 bool enable)
2663{
2664 DSSDBG("dsi_vc_enable channel %d, enable %d\n",
2665 channel, enable);
2666
2667 enable = enable ? 1 : 0;
2668
2669 REG_FLD_MOD(dsidev, DSI_VC_CTRL(channel), enable, 0, 0);
2670
2671 if (wait_for_bit_change(dsidev, DSI_VC_CTRL(channel),
2672 0, enable) != enable) {
2673 DSSERR("Failed to set dsi_vc_enable to %d\n", enable);
2674 return -EIO;
2675 }
2676
2677 return 0;
2678}
2679
2680static void dsi_vc_initial_config(struct platform_device *dsidev, int channel)
2681{
2682 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2683 u32 r;
2684
2685 DSSDBG("Initial config of virtual channel %d", channel);
2686
2687 r = dsi_read_reg(dsidev, DSI_VC_CTRL(channel));
2688
2689 if (FLD_GET(r, 15, 15)) /* VC_BUSY */
2690 DSSERR("VC(%d) busy when trying to configure it!\n",
2691 channel);
2692
2693 r = FLD_MOD(r, 0, 1, 1); /* SOURCE, 0 = L4 */
2694 r = FLD_MOD(r, 0, 2, 2); /* BTA_SHORT_EN */
2695 r = FLD_MOD(r, 0, 3, 3); /* BTA_LONG_EN */
2696 r = FLD_MOD(r, 0, 4, 4); /* MODE, 0 = command */
2697 r = FLD_MOD(r, 1, 7, 7); /* CS_TX_EN */
2698 r = FLD_MOD(r, 1, 8, 8); /* ECC_TX_EN */
2699 r = FLD_MOD(r, 0, 9, 9); /* MODE_SPEED, high speed on/off */
2700 if (dss_has_feature(FEAT_DSI_VC_OCP_WIDTH))
2701 r = FLD_MOD(r, 3, 11, 10); /* OCP_WIDTH = 32 bit */
2702
2703 r = FLD_MOD(r, 4, 29, 27); /* DMA_RX_REQ_NB = no dma */
2704 r = FLD_MOD(r, 4, 23, 21); /* DMA_TX_REQ_NB = no dma */
2705
2706 dsi_write_reg(dsidev, DSI_VC_CTRL(channel), r);
2707
2708 dsi->vc[channel].source = DSI_VC_SOURCE_L4;
2709}
2710
2711static int dsi_vc_config_source(struct platform_device *dsidev, int channel,
2712 enum dsi_vc_source source)
2713{
2714 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2715
2716 if (dsi->vc[channel].source == source)
2717 return 0;
2718
2719 DSSDBG("Source config of virtual channel %d", channel);
2720
2721 dsi_sync_vc(dsidev, channel);
2722
2723 dsi_vc_enable(dsidev, channel, 0);
2724
2725 /* VC_BUSY */
2726 if (wait_for_bit_change(dsidev, DSI_VC_CTRL(channel), 15, 0) != 0) {
2727 DSSERR("vc(%d) busy when trying to config for VP\n", channel);
2728 return -EIO;
2729 }
2730
2731 /* SOURCE, 0 = L4, 1 = video port */
2732 REG_FLD_MOD(dsidev, DSI_VC_CTRL(channel), source, 1, 1);
2733
2734 /* DCS_CMD_ENABLE */
2735 if (dss_has_feature(FEAT_DSI_DCS_CMD_CONFIG_VC)) {
2736 bool enable = source == DSI_VC_SOURCE_VP;
2737 REG_FLD_MOD(dsidev, DSI_VC_CTRL(channel), enable, 30, 30);
2738 }
2739
2740 dsi_vc_enable(dsidev, channel, 1);
2741
2742 dsi->vc[channel].source = source;
2743
2744 return 0;
2745}
2746
2747static void dsi_vc_enable_hs(struct omap_dss_device *dssdev, int channel,
2748 bool enable)
2749{
2750 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
2751 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2752
2753 DSSDBG("dsi_vc_enable_hs(%d, %d)\n", channel, enable);
2754
2755 WARN_ON(!dsi_bus_is_locked(dsidev));
2756
2757 dsi_vc_enable(dsidev, channel, 0);
2758 dsi_if_enable(dsidev, 0);
2759
2760 REG_FLD_MOD(dsidev, DSI_VC_CTRL(channel), enable, 9, 9);
2761
2762 dsi_vc_enable(dsidev, channel, 1);
2763 dsi_if_enable(dsidev, 1);
2764
2765 dsi_force_tx_stop_mode_io(dsidev);
2766
2767 /* start the DDR clock by sending a NULL packet */
2768 if (dsi->vm_timings.ddr_clk_always_on && enable)
2769 dsi_vc_send_null(dssdev, channel);
2770}
2771
2772static void dsi_vc_flush_long_data(struct platform_device *dsidev, int channel)
2773{
2774 while (REG_GET(dsidev, DSI_VC_CTRL(channel), 20, 20)) {
2775 u32 val;
2776 val = dsi_read_reg(dsidev, DSI_VC_SHORT_PACKET_HEADER(channel));
2777 DSSDBG("\t\tb1 %#02x b2 %#02x b3 %#02x b4 %#02x\n",
2778 (val >> 0) & 0xff,
2779 (val >> 8) & 0xff,
2780 (val >> 16) & 0xff,
2781 (val >> 24) & 0xff);
2782 }
2783}
2784
2785static void dsi_show_rx_ack_with_err(u16 err)
2786{
2787 DSSERR("\tACK with ERROR (%#x):\n", err);
2788 if (err & (1 << 0))
2789 DSSERR("\t\tSoT Error\n");
2790 if (err & (1 << 1))
2791 DSSERR("\t\tSoT Sync Error\n");
2792 if (err & (1 << 2))
2793 DSSERR("\t\tEoT Sync Error\n");
2794 if (err & (1 << 3))
2795 DSSERR("\t\tEscape Mode Entry Command Error\n");
2796 if (err & (1 << 4))
2797 DSSERR("\t\tLP Transmit Sync Error\n");
2798 if (err & (1 << 5))
2799 DSSERR("\t\tHS Receive Timeout Error\n");
2800 if (err & (1 << 6))
2801 DSSERR("\t\tFalse Control Error\n");
2802 if (err & (1 << 7))
2803 DSSERR("\t\t(reserved7)\n");
2804 if (err & (1 << 8))
2805 DSSERR("\t\tECC Error, single-bit (corrected)\n");
2806 if (err & (1 << 9))
2807 DSSERR("\t\tECC Error, multi-bit (not corrected)\n");
2808 if (err & (1 << 10))
2809 DSSERR("\t\tChecksum Error\n");
2810 if (err & (1 << 11))
2811 DSSERR("\t\tData type not recognized\n");
2812 if (err & (1 << 12))
2813 DSSERR("\t\tInvalid VC ID\n");
2814 if (err & (1 << 13))
2815 DSSERR("\t\tInvalid Transmission Length\n");
2816 if (err & (1 << 14))
2817 DSSERR("\t\t(reserved14)\n");
2818 if (err & (1 << 15))
2819 DSSERR("\t\tDSI Protocol Violation\n");
2820}
2821
2822static u16 dsi_vc_flush_receive_data(struct platform_device *dsidev,
2823 int channel)
2824{
2825 /* RX_FIFO_NOT_EMPTY */
2826 while (REG_GET(dsidev, DSI_VC_CTRL(channel), 20, 20)) {
2827 u32 val;
2828 u8 dt;
2829 val = dsi_read_reg(dsidev, DSI_VC_SHORT_PACKET_HEADER(channel));
2830 DSSERR("\trawval %#08x\n", val);
2831 dt = FLD_GET(val, 5, 0);
2832 if (dt == MIPI_DSI_RX_ACKNOWLEDGE_AND_ERROR_REPORT) {
2833 u16 err = FLD_GET(val, 23, 8);
2834 dsi_show_rx_ack_with_err(err);
2835 } else if (dt == MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_1BYTE) {
2836 DSSERR("\tDCS short response, 1 byte: %#x\n",
2837 FLD_GET(val, 23, 8));
2838 } else if (dt == MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_2BYTE) {
2839 DSSERR("\tDCS short response, 2 byte: %#x\n",
2840 FLD_GET(val, 23, 8));
2841 } else if (dt == MIPI_DSI_RX_DCS_LONG_READ_RESPONSE) {
2842 DSSERR("\tDCS long response, len %d\n",
2843 FLD_GET(val, 23, 8));
2844 dsi_vc_flush_long_data(dsidev, channel);
2845 } else {
2846 DSSERR("\tunknown datatype 0x%02x\n", dt);
2847 }
2848 }
2849 return 0;
2850}
2851
2852static int dsi_vc_send_bta(struct platform_device *dsidev, int channel)
2853{
2854 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2855
2856 if (dsi->debug_write || dsi->debug_read)
2857 DSSDBG("dsi_vc_send_bta %d\n", channel);
2858
2859 WARN_ON(!dsi_bus_is_locked(dsidev));
2860
2861 /* RX_FIFO_NOT_EMPTY */
2862 if (REG_GET(dsidev, DSI_VC_CTRL(channel), 20, 20)) {
2863 DSSERR("rx fifo not empty when sending BTA, dumping data:\n");
2864 dsi_vc_flush_receive_data(dsidev, channel);
2865 }
2866
2867 REG_FLD_MOD(dsidev, DSI_VC_CTRL(channel), 1, 6, 6); /* BTA_EN */
2868
2869 /* flush posted write */
2870 dsi_read_reg(dsidev, DSI_VC_CTRL(channel));
2871
2872 return 0;
2873}
2874
2875static int dsi_vc_send_bta_sync(struct omap_dss_device *dssdev, int channel)
2876{
2877 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
2878 DECLARE_COMPLETION_ONSTACK(completion);
2879 int r = 0;
2880 u32 err;
2881
2882 r = dsi_register_isr_vc(dsidev, channel, dsi_completion_handler,
2883 &completion, DSI_VC_IRQ_BTA);
2884 if (r)
2885 goto err0;
2886
2887 r = dsi_register_isr(dsidev, dsi_completion_handler, &completion,
2888 DSI_IRQ_ERROR_MASK);
2889 if (r)
2890 goto err1;
2891
2892 r = dsi_vc_send_bta(dsidev, channel);
2893 if (r)
2894 goto err2;
2895
2896 if (wait_for_completion_timeout(&completion,
2897 msecs_to_jiffies(500)) == 0) {
2898 DSSERR("Failed to receive BTA\n");
2899 r = -EIO;
2900 goto err2;
2901 }
2902
2903 err = dsi_get_errors(dsidev);
2904 if (err) {
2905 DSSERR("Error while sending BTA: %x\n", err);
2906 r = -EIO;
2907 goto err2;
2908 }
2909err2:
2910 dsi_unregister_isr(dsidev, dsi_completion_handler, &completion,
2911 DSI_IRQ_ERROR_MASK);
2912err1:
2913 dsi_unregister_isr_vc(dsidev, channel, dsi_completion_handler,
2914 &completion, DSI_VC_IRQ_BTA);
2915err0:
2916 return r;
2917}
2918
2919static inline void dsi_vc_write_long_header(struct platform_device *dsidev,
2920 int channel, u8 data_type, u16 len, u8 ecc)
2921{
2922 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2923 u32 val;
2924 u8 data_id;
2925
2926 WARN_ON(!dsi_bus_is_locked(dsidev));
2927
2928 data_id = data_type | dsi->vc[channel].vc_id << 6;
2929
2930 val = FLD_VAL(data_id, 7, 0) | FLD_VAL(len, 23, 8) |
2931 FLD_VAL(ecc, 31, 24);
2932
2933 dsi_write_reg(dsidev, DSI_VC_LONG_PACKET_HEADER(channel), val);
2934}
2935
2936static inline void dsi_vc_write_long_payload(struct platform_device *dsidev,
2937 int channel, u8 b1, u8 b2, u8 b3, u8 b4)
2938{
2939 u32 val;
2940
2941 val = b4 << 24 | b3 << 16 | b2 << 8 | b1 << 0;
2942
2943/* DSSDBG("\twriting %02x, %02x, %02x, %02x (%#010x)\n",
2944 b1, b2, b3, b4, val); */
2945
2946 dsi_write_reg(dsidev, DSI_VC_LONG_PACKET_PAYLOAD(channel), val);
2947}
2948
2949static int dsi_vc_send_long(struct platform_device *dsidev, int channel,
2950 u8 data_type, u8 *data, u16 len, u8 ecc)
2951{
2952 /*u32 val; */
2953 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2954 int i;
2955 u8 *p;
2956 int r = 0;
2957 u8 b1, b2, b3, b4;
2958
2959 if (dsi->debug_write)
2960 DSSDBG("dsi_vc_send_long, %d bytes\n", len);
2961
2962 /* len + header */
2963 if (dsi->vc[channel].tx_fifo_size * 32 * 4 < len + 4) {
2964 DSSERR("unable to send long packet: packet too long.\n");
2965 return -EINVAL;
2966 }
2967
2968 dsi_vc_config_source(dsidev, channel, DSI_VC_SOURCE_L4);
2969
2970 dsi_vc_write_long_header(dsidev, channel, data_type, len, ecc);
2971
2972 p = data;
2973 for (i = 0; i < len >> 2; i++) {
2974 if (dsi->debug_write)
2975 DSSDBG("\tsending full packet %d\n", i);
2976
2977 b1 = *p++;
2978 b2 = *p++;
2979 b3 = *p++;
2980 b4 = *p++;
2981
2982 dsi_vc_write_long_payload(dsidev, channel, b1, b2, b3, b4);
2983 }
2984
2985 i = len % 4;
2986 if (i) {
2987 b1 = 0; b2 = 0; b3 = 0;
2988
2989 if (dsi->debug_write)
2990 DSSDBG("\tsending remainder bytes %d\n", i);
2991
2992 switch (i) {
2993 case 3:
2994 b1 = *p++;
2995 b2 = *p++;
2996 b3 = *p++;
2997 break;
2998 case 2:
2999 b1 = *p++;
3000 b2 = *p++;
3001 break;
3002 case 1:
3003 b1 = *p++;
3004 break;
3005 }
3006
3007 dsi_vc_write_long_payload(dsidev, channel, b1, b2, b3, 0);
3008 }
3009
3010 return r;
3011}
3012
3013static int dsi_vc_send_short(struct platform_device *dsidev, int channel,
3014 u8 data_type, u16 data, u8 ecc)
3015{
3016 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
3017 u32 r;
3018 u8 data_id;
3019
3020 WARN_ON(!dsi_bus_is_locked(dsidev));
3021
3022 if (dsi->debug_write)
3023 DSSDBG("dsi_vc_send_short(ch%d, dt %#x, b1 %#x, b2 %#x)\n",
3024 channel,
3025 data_type, data & 0xff, (data >> 8) & 0xff);
3026
3027 dsi_vc_config_source(dsidev, channel, DSI_VC_SOURCE_L4);
3028
3029 if (FLD_GET(dsi_read_reg(dsidev, DSI_VC_CTRL(channel)), 16, 16)) {
3030 DSSERR("ERROR FIFO FULL, aborting transfer\n");
3031 return -EINVAL;
3032 }
3033
3034 data_id = data_type | dsi->vc[channel].vc_id << 6;
3035
3036 r = (data_id << 0) | (data << 8) | (ecc << 24);
3037
3038 dsi_write_reg(dsidev, DSI_VC_SHORT_PACKET_HEADER(channel), r);
3039
3040 return 0;
3041}
3042
3043static int dsi_vc_send_null(struct omap_dss_device *dssdev, int channel)
3044{
3045 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
3046
3047 return dsi_vc_send_long(dsidev, channel, MIPI_DSI_NULL_PACKET, NULL,
3048 0, 0);
3049}
3050
3051static int dsi_vc_write_nosync_common(struct platform_device *dsidev,
3052 int channel, u8 *data, int len, enum dss_dsi_content_type type)
3053{
3054 int r;
3055
3056 if (len == 0) {
3057 BUG_ON(type == DSS_DSI_CONTENT_DCS);
3058 r = dsi_vc_send_short(dsidev, channel,
3059 MIPI_DSI_GENERIC_SHORT_WRITE_0_PARAM, 0, 0);
3060 } else if (len == 1) {
3061 r = dsi_vc_send_short(dsidev, channel,
3062 type == DSS_DSI_CONTENT_GENERIC ?
3063 MIPI_DSI_GENERIC_SHORT_WRITE_1_PARAM :
3064 MIPI_DSI_DCS_SHORT_WRITE, data[0], 0);
3065 } else if (len == 2) {
3066 r = dsi_vc_send_short(dsidev, channel,
3067 type == DSS_DSI_CONTENT_GENERIC ?
3068 MIPI_DSI_GENERIC_SHORT_WRITE_2_PARAM :
3069 MIPI_DSI_DCS_SHORT_WRITE_PARAM,
3070 data[0] | (data[1] << 8), 0);
3071 } else {
3072 r = dsi_vc_send_long(dsidev, channel,
3073 type == DSS_DSI_CONTENT_GENERIC ?
3074 MIPI_DSI_GENERIC_LONG_WRITE :
3075 MIPI_DSI_DCS_LONG_WRITE, data, len, 0);
3076 }
3077
3078 return r;
3079}
3080
3081static int dsi_vc_dcs_write_nosync(struct omap_dss_device *dssdev, int channel,
3082 u8 *data, int len)
3083{
3084 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
3085
3086 return dsi_vc_write_nosync_common(dsidev, channel, data, len,
3087 DSS_DSI_CONTENT_DCS);
3088}
3089
3090static int dsi_vc_generic_write_nosync(struct omap_dss_device *dssdev, int channel,
3091 u8 *data, int len)
3092{
3093 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
3094
3095 return dsi_vc_write_nosync_common(dsidev, channel, data, len,
3096 DSS_DSI_CONTENT_GENERIC);
3097}
3098
3099static int dsi_vc_write_common(struct omap_dss_device *dssdev, int channel,
3100 u8 *data, int len, enum dss_dsi_content_type type)
3101{
3102 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
3103 int r;
3104
3105 r = dsi_vc_write_nosync_common(dsidev, channel, data, len, type);
3106 if (r)
3107 goto err;
3108
3109 r = dsi_vc_send_bta_sync(dssdev, channel);
3110 if (r)
3111 goto err;
3112
3113 /* RX_FIFO_NOT_EMPTY */
3114 if (REG_GET(dsidev, DSI_VC_CTRL(channel), 20, 20)) {
3115 DSSERR("rx fifo not empty after write, dumping data:\n");
3116 dsi_vc_flush_receive_data(dsidev, channel);
3117 r = -EIO;
3118 goto err;
3119 }
3120
3121 return 0;
3122err:
3123 DSSERR("dsi_vc_write_common(ch %d, cmd 0x%02x, len %d) failed\n",
3124 channel, data[0], len);
3125 return r;
3126}
3127
3128static int dsi_vc_dcs_write(struct omap_dss_device *dssdev, int channel, u8 *data,
3129 int len)
3130{
3131 return dsi_vc_write_common(dssdev, channel, data, len,
3132 DSS_DSI_CONTENT_DCS);
3133}
3134
3135static int dsi_vc_generic_write(struct omap_dss_device *dssdev, int channel, u8 *data,
3136 int len)
3137{
3138 return dsi_vc_write_common(dssdev, channel, data, len,
3139 DSS_DSI_CONTENT_GENERIC);
3140}
3141
3142static int dsi_vc_dcs_send_read_request(struct platform_device *dsidev,
3143 int channel, u8 dcs_cmd)
3144{
3145 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
3146 int r;
3147
3148 if (dsi->debug_read)
3149 DSSDBG("dsi_vc_dcs_send_read_request(ch%d, dcs_cmd %x)\n",
3150 channel, dcs_cmd);
3151
3152 r = dsi_vc_send_short(dsidev, channel, MIPI_DSI_DCS_READ, dcs_cmd, 0);
3153 if (r) {
3154 DSSERR("dsi_vc_dcs_send_read_request(ch %d, cmd 0x%02x)"
3155 " failed\n", channel, dcs_cmd);
3156 return r;
3157 }
3158
3159 return 0;
3160}
3161
3162static int dsi_vc_generic_send_read_request(struct platform_device *dsidev,
3163 int channel, u8 *reqdata, int reqlen)
3164{
3165 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
3166 u16 data;
3167 u8 data_type;
3168 int r;
3169
3170 if (dsi->debug_read)
3171 DSSDBG("dsi_vc_generic_send_read_request(ch %d, reqlen %d)\n",
3172 channel, reqlen);
3173
3174 if (reqlen == 0) {
3175 data_type = MIPI_DSI_GENERIC_READ_REQUEST_0_PARAM;
3176 data = 0;
3177 } else if (reqlen == 1) {
3178 data_type = MIPI_DSI_GENERIC_READ_REQUEST_1_PARAM;
3179 data = reqdata[0];
3180 } else if (reqlen == 2) {
3181 data_type = MIPI_DSI_GENERIC_READ_REQUEST_2_PARAM;
3182 data = reqdata[0] | (reqdata[1] << 8);
3183 } else {
3184 BUG();
3185 return -EINVAL;
3186 }
3187
3188 r = dsi_vc_send_short(dsidev, channel, data_type, data, 0);
3189 if (r) {
3190 DSSERR("dsi_vc_generic_send_read_request(ch %d, reqlen %d)"
3191 " failed\n", channel, reqlen);
3192 return r;
3193 }
3194
3195 return 0;
3196}
3197
3198static int dsi_vc_read_rx_fifo(struct platform_device *dsidev, int channel,
3199 u8 *buf, int buflen, enum dss_dsi_content_type type)
3200{
3201 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
3202 u32 val;
3203 u8 dt;
3204 int r;
3205
3206 /* RX_FIFO_NOT_EMPTY */
3207 if (REG_GET(dsidev, DSI_VC_CTRL(channel), 20, 20) == 0) {
3208 DSSERR("RX fifo empty when trying to read.\n");
3209 r = -EIO;
3210 goto err;
3211 }
3212
3213 val = dsi_read_reg(dsidev, DSI_VC_SHORT_PACKET_HEADER(channel));
3214 if (dsi->debug_read)
3215 DSSDBG("\theader: %08x\n", val);
3216 dt = FLD_GET(val, 5, 0);
3217 if (dt == MIPI_DSI_RX_ACKNOWLEDGE_AND_ERROR_REPORT) {
3218 u16 err = FLD_GET(val, 23, 8);
3219 dsi_show_rx_ack_with_err(err);
3220 r = -EIO;
3221 goto err;
3222
3223 } else if (dt == (type == DSS_DSI_CONTENT_GENERIC ?
3224 MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_1BYTE :
3225 MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_1BYTE)) {
3226 u8 data = FLD_GET(val, 15, 8);
3227 if (dsi->debug_read)
3228 DSSDBG("\t%s short response, 1 byte: %02x\n",
3229 type == DSS_DSI_CONTENT_GENERIC ? "GENERIC" :
3230 "DCS", data);
3231
3232 if (buflen < 1) {
3233 r = -EIO;
3234 goto err;
3235 }
3236
3237 buf[0] = data;
3238
3239 return 1;
3240 } else if (dt == (type == DSS_DSI_CONTENT_GENERIC ?
3241 MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_2BYTE :
3242 MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_2BYTE)) {
3243 u16 data = FLD_GET(val, 23, 8);
3244 if (dsi->debug_read)
3245 DSSDBG("\t%s short response, 2 byte: %04x\n",
3246 type == DSS_DSI_CONTENT_GENERIC ? "GENERIC" :
3247 "DCS", data);
3248
3249 if (buflen < 2) {
3250 r = -EIO;
3251 goto err;
3252 }
3253
3254 buf[0] = data & 0xff;
3255 buf[1] = (data >> 8) & 0xff;
3256
3257 return 2;
3258 } else if (dt == (type == DSS_DSI_CONTENT_GENERIC ?
3259 MIPI_DSI_RX_GENERIC_LONG_READ_RESPONSE :
3260 MIPI_DSI_RX_DCS_LONG_READ_RESPONSE)) {
3261 int w;
3262 int len = FLD_GET(val, 23, 8);
3263 if (dsi->debug_read)
3264 DSSDBG("\t%s long response, len %d\n",
3265 type == DSS_DSI_CONTENT_GENERIC ? "GENERIC" :
3266 "DCS", len);
3267
3268 if (len > buflen) {
3269 r = -EIO;
3270 goto err;
3271 }
3272
3273 /* two byte checksum ends the packet, not included in len */
3274 for (w = 0; w < len + 2;) {
3275 int b;
3276 val = dsi_read_reg(dsidev,
3277 DSI_VC_SHORT_PACKET_HEADER(channel));
3278 if (dsi->debug_read)
3279 DSSDBG("\t\t%02x %02x %02x %02x\n",
3280 (val >> 0) & 0xff,
3281 (val >> 8) & 0xff,
3282 (val >> 16) & 0xff,
3283 (val >> 24) & 0xff);
3284
3285 for (b = 0; b < 4; ++b) {
3286 if (w < len)
3287 buf[w] = (val >> (b * 8)) & 0xff;
3288 /* we discard the 2 byte checksum */
3289 ++w;
3290 }
3291 }
3292
3293 return len;
3294 } else {
3295 DSSERR("\tunknown datatype 0x%02x\n", dt);
3296 r = -EIO;
3297 goto err;
3298 }
3299
3300err:
3301 DSSERR("dsi_vc_read_rx_fifo(ch %d type %s) failed\n", channel,
3302 type == DSS_DSI_CONTENT_GENERIC ? "GENERIC" : "DCS");
3303
3304 return r;
3305}
3306
3307static int dsi_vc_dcs_read(struct omap_dss_device *dssdev, int channel, u8 dcs_cmd,
3308 u8 *buf, int buflen)
3309{
3310 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
3311 int r;
3312
3313 r = dsi_vc_dcs_send_read_request(dsidev, channel, dcs_cmd);
3314 if (r)
3315 goto err;
3316
3317 r = dsi_vc_send_bta_sync(dssdev, channel);
3318 if (r)
3319 goto err;
3320
3321 r = dsi_vc_read_rx_fifo(dsidev, channel, buf, buflen,
3322 DSS_DSI_CONTENT_DCS);
3323 if (r < 0)
3324 goto err;
3325
3326 if (r != buflen) {
3327 r = -EIO;
3328 goto err;
3329 }
3330
3331 return 0;
3332err:
3333 DSSERR("dsi_vc_dcs_read(ch %d, cmd 0x%02x) failed\n", channel, dcs_cmd);
3334 return r;
3335}
3336
3337static int dsi_vc_generic_read(struct omap_dss_device *dssdev, int channel,
3338 u8 *reqdata, int reqlen, u8 *buf, int buflen)
3339{
3340 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
3341 int r;
3342
3343 r = dsi_vc_generic_send_read_request(dsidev, channel, reqdata, reqlen);
3344 if (r)
3345 return r;
3346
3347 r = dsi_vc_send_bta_sync(dssdev, channel);
3348 if (r)
3349 return r;
3350
3351 r = dsi_vc_read_rx_fifo(dsidev, channel, buf, buflen,
3352 DSS_DSI_CONTENT_GENERIC);
3353 if (r < 0)
3354 return r;
3355
3356 if (r != buflen) {
3357 r = -EIO;
3358 return r;
3359 }
3360
3361 return 0;
3362}
3363
3364static int dsi_vc_set_max_rx_packet_size(struct omap_dss_device *dssdev, int channel,
3365 u16 len)
3366{
3367 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
3368
3369 return dsi_vc_send_short(dsidev, channel,
3370 MIPI_DSI_SET_MAXIMUM_RETURN_PACKET_SIZE, len, 0);
3371}
3372
3373static int dsi_enter_ulps(struct platform_device *dsidev)
3374{
3375 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
3376 DECLARE_COMPLETION_ONSTACK(completion);
3377 int r, i;
3378 unsigned mask;
3379
3380 DSSDBG("Entering ULPS");
3381
3382 WARN_ON(!dsi_bus_is_locked(dsidev));
3383
3384 WARN_ON(dsi->ulps_enabled);
3385
3386 if (dsi->ulps_enabled)
3387 return 0;
3388
3389 /* DDR_CLK_ALWAYS_ON */
3390 if (REG_GET(dsidev, DSI_CLK_CTRL, 13, 13)) {
3391 dsi_if_enable(dsidev, 0);
3392 REG_FLD_MOD(dsidev, DSI_CLK_CTRL, 0, 13, 13);
3393 dsi_if_enable(dsidev, 1);
3394 }
3395
3396 dsi_sync_vc(dsidev, 0);
3397 dsi_sync_vc(dsidev, 1);
3398 dsi_sync_vc(dsidev, 2);
3399 dsi_sync_vc(dsidev, 3);
3400
3401 dsi_force_tx_stop_mode_io(dsidev);
3402
3403 dsi_vc_enable(dsidev, 0, false);
3404 dsi_vc_enable(dsidev, 1, false);
3405 dsi_vc_enable(dsidev, 2, false);
3406 dsi_vc_enable(dsidev, 3, false);
3407
3408 if (REG_GET(dsidev, DSI_COMPLEXIO_CFG2, 16, 16)) { /* HS_BUSY */
3409 DSSERR("HS busy when enabling ULPS\n");
3410 return -EIO;
3411 }
3412
3413 if (REG_GET(dsidev, DSI_COMPLEXIO_CFG2, 17, 17)) { /* LP_BUSY */
3414 DSSERR("LP busy when enabling ULPS\n");
3415 return -EIO;
3416 }
3417
3418 r = dsi_register_isr_cio(dsidev, dsi_completion_handler, &completion,
3419 DSI_CIO_IRQ_ULPSACTIVENOT_ALL0);
3420 if (r)
3421 return r;
3422
3423 mask = 0;
3424
3425 for (i = 0; i < dsi->num_lanes_supported; ++i) {
3426 if (dsi->lanes[i].function == DSI_LANE_UNUSED)
3427 continue;
3428 mask |= 1 << i;
3429 }
3430 /* Assert TxRequestEsc for data lanes and TxUlpsClk for clk lane */
3431 /* LANEx_ULPS_SIG2 */
3432 REG_FLD_MOD(dsidev, DSI_COMPLEXIO_CFG2, mask, 9, 5);
3433
3434 /* flush posted write and wait for SCP interface to finish the write */
3435 dsi_read_reg(dsidev, DSI_COMPLEXIO_CFG2);
3436
3437 if (wait_for_completion_timeout(&completion,
3438 msecs_to_jiffies(1000)) == 0) {
3439 DSSERR("ULPS enable timeout\n");
3440 r = -EIO;
3441 goto err;
3442 }
3443
3444 dsi_unregister_isr_cio(dsidev, dsi_completion_handler, &completion,
3445 DSI_CIO_IRQ_ULPSACTIVENOT_ALL0);
3446
3447 /* Reset LANEx_ULPS_SIG2 */
3448 REG_FLD_MOD(dsidev, DSI_COMPLEXIO_CFG2, 0, 9, 5);
3449
3450 /* flush posted write and wait for SCP interface to finish the write */
3451 dsi_read_reg(dsidev, DSI_COMPLEXIO_CFG2);
3452
3453 dsi_cio_power(dsidev, DSI_COMPLEXIO_POWER_ULPS);
3454
3455 dsi_if_enable(dsidev, false);
3456
3457 dsi->ulps_enabled = true;
3458
3459 return 0;
3460
3461err:
3462 dsi_unregister_isr_cio(dsidev, dsi_completion_handler, &completion,
3463 DSI_CIO_IRQ_ULPSACTIVENOT_ALL0);
3464 return r;
3465}
3466
3467static void dsi_set_lp_rx_timeout(struct platform_device *dsidev,
3468 unsigned ticks, bool x4, bool x16)
3469{
3470 unsigned long fck;
3471 unsigned long total_ticks;
3472 u32 r;
3473
3474 BUG_ON(ticks > 0x1fff);
3475
3476 /* ticks in DSI_FCK */
3477 fck = dsi_fclk_rate(dsidev);
3478
3479 r = dsi_read_reg(dsidev, DSI_TIMING2);
3480 r = FLD_MOD(r, 1, 15, 15); /* LP_RX_TO */
3481 r = FLD_MOD(r, x16 ? 1 : 0, 14, 14); /* LP_RX_TO_X16 */
3482 r = FLD_MOD(r, x4 ? 1 : 0, 13, 13); /* LP_RX_TO_X4 */
3483 r = FLD_MOD(r, ticks, 12, 0); /* LP_RX_COUNTER */
3484 dsi_write_reg(dsidev, DSI_TIMING2, r);
3485
3486 total_ticks = ticks * (x16 ? 16 : 1) * (x4 ? 4 : 1);
3487
3488 DSSDBG("LP_RX_TO %lu ticks (%#x%s%s) = %lu ns\n",
3489 total_ticks,
3490 ticks, x4 ? " x4" : "", x16 ? " x16" : "",
3491 (total_ticks * 1000) / (fck / 1000 / 1000));
3492}
3493
3494static void dsi_set_ta_timeout(struct platform_device *dsidev, unsigned ticks,
3495 bool x8, bool x16)
3496{
3497 unsigned long fck;
3498 unsigned long total_ticks;
3499 u32 r;
3500
3501 BUG_ON(ticks > 0x1fff);
3502
3503 /* ticks in DSI_FCK */
3504 fck = dsi_fclk_rate(dsidev);
3505
3506 r = dsi_read_reg(dsidev, DSI_TIMING1);
3507 r = FLD_MOD(r, 1, 31, 31); /* TA_TO */
3508 r = FLD_MOD(r, x16 ? 1 : 0, 30, 30); /* TA_TO_X16 */
3509 r = FLD_MOD(r, x8 ? 1 : 0, 29, 29); /* TA_TO_X8 */
3510 r = FLD_MOD(r, ticks, 28, 16); /* TA_TO_COUNTER */
3511 dsi_write_reg(dsidev, DSI_TIMING1, r);
3512
3513 total_ticks = ticks * (x16 ? 16 : 1) * (x8 ? 8 : 1);
3514
3515 DSSDBG("TA_TO %lu ticks (%#x%s%s) = %lu ns\n",
3516 total_ticks,
3517 ticks, x8 ? " x8" : "", x16 ? " x16" : "",
3518 (total_ticks * 1000) / (fck / 1000 / 1000));
3519}
3520
3521static void dsi_set_stop_state_counter(struct platform_device *dsidev,
3522 unsigned ticks, bool x4, bool x16)
3523{
3524 unsigned long fck;
3525 unsigned long total_ticks;
3526 u32 r;
3527
3528 BUG_ON(ticks > 0x1fff);
3529
3530 /* ticks in DSI_FCK */
3531 fck = dsi_fclk_rate(dsidev);
3532
3533 r = dsi_read_reg(dsidev, DSI_TIMING1);
3534 r = FLD_MOD(r, 1, 15, 15); /* FORCE_TX_STOP_MODE_IO */
3535 r = FLD_MOD(r, x16 ? 1 : 0, 14, 14); /* STOP_STATE_X16_IO */
3536 r = FLD_MOD(r, x4 ? 1 : 0, 13, 13); /* STOP_STATE_X4_IO */
3537 r = FLD_MOD(r, ticks, 12, 0); /* STOP_STATE_COUNTER_IO */
3538 dsi_write_reg(dsidev, DSI_TIMING1, r);
3539
3540 total_ticks = ticks * (x16 ? 16 : 1) * (x4 ? 4 : 1);
3541
3542 DSSDBG("STOP_STATE_COUNTER %lu ticks (%#x%s%s) = %lu ns\n",
3543 total_ticks,
3544 ticks, x4 ? " x4" : "", x16 ? " x16" : "",
3545 (total_ticks * 1000) / (fck / 1000 / 1000));
3546}
3547
3548static void dsi_set_hs_tx_timeout(struct platform_device *dsidev,
3549 unsigned ticks, bool x4, bool x16)
3550{
3551 unsigned long fck;
3552 unsigned long total_ticks;
3553 u32 r;
3554
3555 BUG_ON(ticks > 0x1fff);
3556
3557 /* ticks in TxByteClkHS */
3558 fck = dsi_get_txbyteclkhs(dsidev);
3559
3560 r = dsi_read_reg(dsidev, DSI_TIMING2);
3561 r = FLD_MOD(r, 1, 31, 31); /* HS_TX_TO */
3562 r = FLD_MOD(r, x16 ? 1 : 0, 30, 30); /* HS_TX_TO_X16 */
3563 r = FLD_MOD(r, x4 ? 1 : 0, 29, 29); /* HS_TX_TO_X8 (4 really) */
3564 r = FLD_MOD(r, ticks, 28, 16); /* HS_TX_TO_COUNTER */
3565 dsi_write_reg(dsidev, DSI_TIMING2, r);
3566
3567 total_ticks = ticks * (x16 ? 16 : 1) * (x4 ? 4 : 1);
3568
3569 DSSDBG("HS_TX_TO %lu ticks (%#x%s%s) = %lu ns\n",
3570 total_ticks,
3571 ticks, x4 ? " x4" : "", x16 ? " x16" : "",
3572 (total_ticks * 1000) / (fck / 1000 / 1000));
3573}
3574
3575static void dsi_config_vp_num_line_buffers(struct platform_device *dsidev)
3576{
3577 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
3578 int num_line_buffers;
3579
3580 if (dsi->mode == OMAP_DSS_DSI_VIDEO_MODE) {
3581 int bpp = dsi_get_pixel_size(dsi->pix_fmt);
3582 struct omap_video_timings *timings = &dsi->timings;
3583 /*
3584 * Don't use line buffers if width is greater than the video
3585 * port's line buffer size
3586 */
3587 if (dsi->line_buffer_size <= timings->x_res * bpp / 8)
3588 num_line_buffers = 0;
3589 else
3590 num_line_buffers = 2;
3591 } else {
3592 /* Use maximum number of line buffers in command mode */
3593 num_line_buffers = 2;
3594 }
3595
3596 /* LINE_BUFFER */
3597 REG_FLD_MOD(dsidev, DSI_CTRL, num_line_buffers, 13, 12);
3598}
3599
3600static void dsi_config_vp_sync_events(struct platform_device *dsidev)
3601{
3602 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
3603 bool sync_end;
3604 u32 r;
3605
3606 if (dsi->vm_timings.trans_mode == OMAP_DSS_DSI_PULSE_MODE)
3607 sync_end = true;
3608 else
3609 sync_end = false;
3610
3611 r = dsi_read_reg(dsidev, DSI_CTRL);
3612 r = FLD_MOD(r, 1, 9, 9); /* VP_DE_POL */
3613 r = FLD_MOD(r, 1, 10, 10); /* VP_HSYNC_POL */
3614 r = FLD_MOD(r, 1, 11, 11); /* VP_VSYNC_POL */
3615 r = FLD_MOD(r, 1, 15, 15); /* VP_VSYNC_START */
3616 r = FLD_MOD(r, sync_end, 16, 16); /* VP_VSYNC_END */
3617 r = FLD_MOD(r, 1, 17, 17); /* VP_HSYNC_START */
3618 r = FLD_MOD(r, sync_end, 18, 18); /* VP_HSYNC_END */
3619 dsi_write_reg(dsidev, DSI_CTRL, r);
3620}
3621
3622static void dsi_config_blanking_modes(struct platform_device *dsidev)
3623{
3624 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
3625 int blanking_mode = dsi->vm_timings.blanking_mode;
3626 int hfp_blanking_mode = dsi->vm_timings.hfp_blanking_mode;
3627 int hbp_blanking_mode = dsi->vm_timings.hbp_blanking_mode;
3628 int hsa_blanking_mode = dsi->vm_timings.hsa_blanking_mode;
3629 u32 r;
3630
3631 /*
3632 * 0 = TX FIFO packets sent or LPS in corresponding blanking periods
3633 * 1 = Long blanking packets are sent in corresponding blanking periods
3634 */
3635 r = dsi_read_reg(dsidev, DSI_CTRL);
3636 r = FLD_MOD(r, blanking_mode, 20, 20); /* BLANKING_MODE */
3637 r = FLD_MOD(r, hfp_blanking_mode, 21, 21); /* HFP_BLANKING */
3638 r = FLD_MOD(r, hbp_blanking_mode, 22, 22); /* HBP_BLANKING */
3639 r = FLD_MOD(r, hsa_blanking_mode, 23, 23); /* HSA_BLANKING */
3640 dsi_write_reg(dsidev, DSI_CTRL, r);
3641}
3642
3643/*
3644 * According to section 'HS Command Mode Interleaving' in OMAP TRM, Scenario 3
3645 * results in maximum transition time for data and clock lanes to enter and
3646 * exit HS mode. Hence, this is the scenario where the least amount of command
3647 * mode data can be interleaved. We program the minimum amount of TXBYTECLKHS
3648 * clock cycles that can be used to interleave command mode data in HS so that
3649 * all scenarios are satisfied.
3650 */
3651static int dsi_compute_interleave_hs(int blank, bool ddr_alwon, int enter_hs,
3652 int exit_hs, int exiths_clk, int ddr_pre, int ddr_post)
3653{
3654 int transition;
3655
3656 /*
3657 * If DDR_CLK_ALWAYS_ON is set, we need to consider HS mode transition
3658 * time of data lanes only, if it isn't set, we need to consider HS
3659 * transition time of both data and clock lanes. HS transition time
3660 * of Scenario 3 is considered.
3661 */
3662 if (ddr_alwon) {
3663 transition = enter_hs + exit_hs + max(enter_hs, 2) + 1;
3664 } else {
3665 int trans1, trans2;
3666 trans1 = ddr_pre + enter_hs + exit_hs + max(enter_hs, 2) + 1;
3667 trans2 = ddr_pre + enter_hs + exiths_clk + ddr_post + ddr_pre +
3668 enter_hs + 1;
3669 transition = max(trans1, trans2);
3670 }
3671
3672 return blank > transition ? blank - transition : 0;
3673}
3674
3675/*
3676 * According to section 'LP Command Mode Interleaving' in OMAP TRM, Scenario 1
3677 * results in maximum transition time for data lanes to enter and exit LP mode.
3678 * Hence, this is the scenario where the least amount of command mode data can
3679 * be interleaved. We program the minimum amount of bytes that can be
3680 * interleaved in LP so that all scenarios are satisfied.
3681 */
3682static int dsi_compute_interleave_lp(int blank, int enter_hs, int exit_hs,
3683 int lp_clk_div, int tdsi_fclk)
3684{
3685 int trans_lp; /* time required for a LP transition, in TXBYTECLKHS */
3686 int tlp_avail; /* time left for interleaving commands, in CLKIN4DDR */
3687 int ttxclkesc; /* period of LP transmit escape clock, in CLKIN4DDR */
3688 int thsbyte_clk = 16; /* Period of TXBYTECLKHS clock, in CLKIN4DDR */
3689 int lp_inter; /* cmd mode data that can be interleaved, in bytes */
3690
3691 /* maximum LP transition time according to Scenario 1 */
3692 trans_lp = exit_hs + max(enter_hs, 2) + 1;
3693
3694 /* CLKIN4DDR = 16 * TXBYTECLKHS */
3695 tlp_avail = thsbyte_clk * (blank - trans_lp);
3696
3697 ttxclkesc = tdsi_fclk * lp_clk_div;
3698
3699 lp_inter = ((tlp_avail - 8 * thsbyte_clk - 5 * tdsi_fclk) / ttxclkesc -
3700 26) / 16;
3701
3702 return max(lp_inter, 0);
3703}
3704
3705static void dsi_config_cmd_mode_interleaving(struct platform_device *dsidev)
3706{
3707 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
3708 int blanking_mode;
3709 int hfp_blanking_mode, hbp_blanking_mode, hsa_blanking_mode;
3710 int hsa, hfp, hbp, width_bytes, bllp, lp_clk_div;
3711 int ddr_clk_pre, ddr_clk_post, enter_hs_mode_lat, exit_hs_mode_lat;
3712 int tclk_trail, ths_exit, exiths_clk;
3713 bool ddr_alwon;
3714 struct omap_video_timings *timings = &dsi->timings;
3715 int bpp = dsi_get_pixel_size(dsi->pix_fmt);
3716 int ndl = dsi->num_lanes_used - 1;
3717 int dsi_fclk_hsdiv = dsi->user_dsi_cinfo.regm_dsi + 1;
3718 int hsa_interleave_hs = 0, hsa_interleave_lp = 0;
3719 int hfp_interleave_hs = 0, hfp_interleave_lp = 0;
3720 int hbp_interleave_hs = 0, hbp_interleave_lp = 0;
3721 int bl_interleave_hs = 0, bl_interleave_lp = 0;
3722 u32 r;
3723
3724 r = dsi_read_reg(dsidev, DSI_CTRL);
3725 blanking_mode = FLD_GET(r, 20, 20);
3726 hfp_blanking_mode = FLD_GET(r, 21, 21);
3727 hbp_blanking_mode = FLD_GET(r, 22, 22);
3728 hsa_blanking_mode = FLD_GET(r, 23, 23);
3729
3730 r = dsi_read_reg(dsidev, DSI_VM_TIMING1);
3731 hbp = FLD_GET(r, 11, 0);
3732 hfp = FLD_GET(r, 23, 12);
3733 hsa = FLD_GET(r, 31, 24);
3734
3735 r = dsi_read_reg(dsidev, DSI_CLK_TIMING);
3736 ddr_clk_post = FLD_GET(r, 7, 0);
3737 ddr_clk_pre = FLD_GET(r, 15, 8);
3738
3739 r = dsi_read_reg(dsidev, DSI_VM_TIMING7);
3740 exit_hs_mode_lat = FLD_GET(r, 15, 0);
3741 enter_hs_mode_lat = FLD_GET(r, 31, 16);
3742
3743 r = dsi_read_reg(dsidev, DSI_CLK_CTRL);
3744 lp_clk_div = FLD_GET(r, 12, 0);
3745 ddr_alwon = FLD_GET(r, 13, 13);
3746
3747 r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG0);
3748 ths_exit = FLD_GET(r, 7, 0);
3749
3750 r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG1);
3751 tclk_trail = FLD_GET(r, 15, 8);
3752
3753 exiths_clk = ths_exit + tclk_trail;
3754
3755 width_bytes = DIV_ROUND_UP(timings->x_res * bpp, 8);
3756 bllp = hbp + hfp + hsa + DIV_ROUND_UP(width_bytes + 6, ndl);
3757
3758 if (!hsa_blanking_mode) {
3759 hsa_interleave_hs = dsi_compute_interleave_hs(hsa, ddr_alwon,
3760 enter_hs_mode_lat, exit_hs_mode_lat,
3761 exiths_clk, ddr_clk_pre, ddr_clk_post);
3762 hsa_interleave_lp = dsi_compute_interleave_lp(hsa,
3763 enter_hs_mode_lat, exit_hs_mode_lat,
3764 lp_clk_div, dsi_fclk_hsdiv);
3765 }
3766
3767 if (!hfp_blanking_mode) {
3768 hfp_interleave_hs = dsi_compute_interleave_hs(hfp, ddr_alwon,
3769 enter_hs_mode_lat, exit_hs_mode_lat,
3770 exiths_clk, ddr_clk_pre, ddr_clk_post);
3771 hfp_interleave_lp = dsi_compute_interleave_lp(hfp,
3772 enter_hs_mode_lat, exit_hs_mode_lat,
3773 lp_clk_div, dsi_fclk_hsdiv);
3774 }
3775
3776 if (!hbp_blanking_mode) {
3777 hbp_interleave_hs = dsi_compute_interleave_hs(hbp, ddr_alwon,
3778 enter_hs_mode_lat, exit_hs_mode_lat,
3779 exiths_clk, ddr_clk_pre, ddr_clk_post);
3780
3781 hbp_interleave_lp = dsi_compute_interleave_lp(hbp,
3782 enter_hs_mode_lat, exit_hs_mode_lat,
3783 lp_clk_div, dsi_fclk_hsdiv);
3784 }
3785
3786 if (!blanking_mode) {
3787 bl_interleave_hs = dsi_compute_interleave_hs(bllp, ddr_alwon,
3788 enter_hs_mode_lat, exit_hs_mode_lat,
3789 exiths_clk, ddr_clk_pre, ddr_clk_post);
3790
3791 bl_interleave_lp = dsi_compute_interleave_lp(bllp,
3792 enter_hs_mode_lat, exit_hs_mode_lat,
3793 lp_clk_div, dsi_fclk_hsdiv);
3794 }
3795
3796 DSSDBG("DSI HS interleaving(TXBYTECLKHS) HSA %d, HFP %d, HBP %d, BLLP %d\n",
3797 hsa_interleave_hs, hfp_interleave_hs, hbp_interleave_hs,
3798 bl_interleave_hs);
3799
3800 DSSDBG("DSI LP interleaving(bytes) HSA %d, HFP %d, HBP %d, BLLP %d\n",
3801 hsa_interleave_lp, hfp_interleave_lp, hbp_interleave_lp,
3802 bl_interleave_lp);
3803
3804 r = dsi_read_reg(dsidev, DSI_VM_TIMING4);
3805 r = FLD_MOD(r, hsa_interleave_hs, 23, 16);
3806 r = FLD_MOD(r, hfp_interleave_hs, 15, 8);
3807 r = FLD_MOD(r, hbp_interleave_hs, 7, 0);
3808 dsi_write_reg(dsidev, DSI_VM_TIMING4, r);
3809
3810 r = dsi_read_reg(dsidev, DSI_VM_TIMING5);
3811 r = FLD_MOD(r, hsa_interleave_lp, 23, 16);
3812 r = FLD_MOD(r, hfp_interleave_lp, 15, 8);
3813 r = FLD_MOD(r, hbp_interleave_lp, 7, 0);
3814 dsi_write_reg(dsidev, DSI_VM_TIMING5, r);
3815
3816 r = dsi_read_reg(dsidev, DSI_VM_TIMING6);
3817 r = FLD_MOD(r, bl_interleave_hs, 31, 15);
3818 r = FLD_MOD(r, bl_interleave_lp, 16, 0);
3819 dsi_write_reg(dsidev, DSI_VM_TIMING6, r);
3820}
3821
3822static int dsi_proto_config(struct platform_device *dsidev)
3823{
3824 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
3825 u32 r;
3826 int buswidth = 0;
3827
3828 dsi_config_tx_fifo(dsidev, DSI_FIFO_SIZE_32,
3829 DSI_FIFO_SIZE_32,
3830 DSI_FIFO_SIZE_32,
3831 DSI_FIFO_SIZE_32);
3832
3833 dsi_config_rx_fifo(dsidev, DSI_FIFO_SIZE_32,
3834 DSI_FIFO_SIZE_32,
3835 DSI_FIFO_SIZE_32,
3836 DSI_FIFO_SIZE_32);
3837
3838 /* XXX what values for the timeouts? */
3839 dsi_set_stop_state_counter(dsidev, 0x1000, false, false);
3840 dsi_set_ta_timeout(dsidev, 0x1fff, true, true);
3841 dsi_set_lp_rx_timeout(dsidev, 0x1fff, true, true);
3842 dsi_set_hs_tx_timeout(dsidev, 0x1fff, true, true);
3843
3844 switch (dsi_get_pixel_size(dsi->pix_fmt)) {
3845 case 16:
3846 buswidth = 0;
3847 break;
3848 case 18:
3849 buswidth = 1;
3850 break;
3851 case 24:
3852 buswidth = 2;
3853 break;
3854 default:
3855 BUG();
3856 return -EINVAL;
3857 }
3858
3859 r = dsi_read_reg(dsidev, DSI_CTRL);
3860 r = FLD_MOD(r, 1, 1, 1); /* CS_RX_EN */
3861 r = FLD_MOD(r, 1, 2, 2); /* ECC_RX_EN */
3862 r = FLD_MOD(r, 1, 3, 3); /* TX_FIFO_ARBITRATION */
3863 r = FLD_MOD(r, 1, 4, 4); /* VP_CLK_RATIO, always 1, see errata*/
3864 r = FLD_MOD(r, buswidth, 7, 6); /* VP_DATA_BUS_WIDTH */
3865 r = FLD_MOD(r, 0, 8, 8); /* VP_CLK_POL */
3866 r = FLD_MOD(r, 1, 14, 14); /* TRIGGER_RESET_MODE */
3867 r = FLD_MOD(r, 1, 19, 19); /* EOT_ENABLE */
3868 if (!dss_has_feature(FEAT_DSI_DCS_CMD_CONFIG_VC)) {
3869 r = FLD_MOD(r, 1, 24, 24); /* DCS_CMD_ENABLE */
3870 /* DCS_CMD_CODE, 1=start, 0=continue */
3871 r = FLD_MOD(r, 0, 25, 25);
3872 }
3873
3874 dsi_write_reg(dsidev, DSI_CTRL, r);
3875
3876 dsi_config_vp_num_line_buffers(dsidev);
3877
3878 if (dsi->mode == OMAP_DSS_DSI_VIDEO_MODE) {
3879 dsi_config_vp_sync_events(dsidev);
3880 dsi_config_blanking_modes(dsidev);
3881 dsi_config_cmd_mode_interleaving(dsidev);
3882 }
3883
3884 dsi_vc_initial_config(dsidev, 0);
3885 dsi_vc_initial_config(dsidev, 1);
3886 dsi_vc_initial_config(dsidev, 2);
3887 dsi_vc_initial_config(dsidev, 3);
3888
3889 return 0;
3890}
3891
3892static void dsi_proto_timings(struct platform_device *dsidev)
3893{
3894 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
3895 unsigned tlpx, tclk_zero, tclk_prepare, tclk_trail;
3896 unsigned tclk_pre, tclk_post;
3897 unsigned ths_prepare, ths_prepare_ths_zero, ths_zero;
3898 unsigned ths_trail, ths_exit;
3899 unsigned ddr_clk_pre, ddr_clk_post;
3900 unsigned enter_hs_mode_lat, exit_hs_mode_lat;
3901 unsigned ths_eot;
3902 int ndl = dsi->num_lanes_used - 1;
3903 u32 r;
3904
3905 r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG0);
3906 ths_prepare = FLD_GET(r, 31, 24);
3907 ths_prepare_ths_zero = FLD_GET(r, 23, 16);
3908 ths_zero = ths_prepare_ths_zero - ths_prepare;
3909 ths_trail = FLD_GET(r, 15, 8);
3910 ths_exit = FLD_GET(r, 7, 0);
3911
3912 r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG1);
3913 tlpx = FLD_GET(r, 20, 16) * 2;
3914 tclk_trail = FLD_GET(r, 15, 8);
3915 tclk_zero = FLD_GET(r, 7, 0);
3916
3917 r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG2);
3918 tclk_prepare = FLD_GET(r, 7, 0);
3919
3920 /* min 8*UI */
3921 tclk_pre = 20;
3922 /* min 60ns + 52*UI */
3923 tclk_post = ns2ddr(dsidev, 60) + 26;
3924
3925 ths_eot = DIV_ROUND_UP(4, ndl);
3926
3927 ddr_clk_pre = DIV_ROUND_UP(tclk_pre + tlpx + tclk_zero + tclk_prepare,
3928 4);
3929 ddr_clk_post = DIV_ROUND_UP(tclk_post + ths_trail, 4) + ths_eot;
3930
3931 BUG_ON(ddr_clk_pre == 0 || ddr_clk_pre > 255);
3932 BUG_ON(ddr_clk_post == 0 || ddr_clk_post > 255);
3933
3934 r = dsi_read_reg(dsidev, DSI_CLK_TIMING);
3935 r = FLD_MOD(r, ddr_clk_pre, 15, 8);
3936 r = FLD_MOD(r, ddr_clk_post, 7, 0);
3937 dsi_write_reg(dsidev, DSI_CLK_TIMING, r);
3938
3939 DSSDBG("ddr_clk_pre %u, ddr_clk_post %u\n",
3940 ddr_clk_pre,
3941 ddr_clk_post);
3942
3943 enter_hs_mode_lat = 1 + DIV_ROUND_UP(tlpx, 4) +
3944 DIV_ROUND_UP(ths_prepare, 4) +
3945 DIV_ROUND_UP(ths_zero + 3, 4);
3946
3947 exit_hs_mode_lat = DIV_ROUND_UP(ths_trail + ths_exit, 4) + 1 + ths_eot;
3948
3949 r = FLD_VAL(enter_hs_mode_lat, 31, 16) |
3950 FLD_VAL(exit_hs_mode_lat, 15, 0);
3951 dsi_write_reg(dsidev, DSI_VM_TIMING7, r);
3952
3953 DSSDBG("enter_hs_mode_lat %u, exit_hs_mode_lat %u\n",
3954 enter_hs_mode_lat, exit_hs_mode_lat);
3955
3956 if (dsi->mode == OMAP_DSS_DSI_VIDEO_MODE) {
3957 /* TODO: Implement a video mode check_timings function */
3958 int hsa = dsi->vm_timings.hsa;
3959 int hfp = dsi->vm_timings.hfp;
3960 int hbp = dsi->vm_timings.hbp;
3961 int vsa = dsi->vm_timings.vsa;
3962 int vfp = dsi->vm_timings.vfp;
3963 int vbp = dsi->vm_timings.vbp;
3964 int window_sync = dsi->vm_timings.window_sync;
3965 bool hsync_end;
3966 struct omap_video_timings *timings = &dsi->timings;
3967 int bpp = dsi_get_pixel_size(dsi->pix_fmt);
3968 int tl, t_he, width_bytes;
3969
3970 hsync_end = dsi->vm_timings.trans_mode == OMAP_DSS_DSI_PULSE_MODE;
3971 t_he = hsync_end ?
3972 ((hsa == 0 && ndl == 3) ? 1 : DIV_ROUND_UP(4, ndl)) : 0;
3973
3974 width_bytes = DIV_ROUND_UP(timings->x_res * bpp, 8);
3975
3976 /* TL = t_HS + HSA + t_HE + HFP + ceil((WC + 6) / NDL) + HBP */
3977 tl = DIV_ROUND_UP(4, ndl) + (hsync_end ? hsa : 0) + t_he + hfp +
3978 DIV_ROUND_UP(width_bytes + 6, ndl) + hbp;
3979
3980 DSSDBG("HBP: %d, HFP: %d, HSA: %d, TL: %d TXBYTECLKHS\n", hbp,
3981 hfp, hsync_end ? hsa : 0, tl);
3982 DSSDBG("VBP: %d, VFP: %d, VSA: %d, VACT: %d lines\n", vbp, vfp,
3983 vsa, timings->y_res);
3984
3985 r = dsi_read_reg(dsidev, DSI_VM_TIMING1);
3986 r = FLD_MOD(r, hbp, 11, 0); /* HBP */
3987 r = FLD_MOD(r, hfp, 23, 12); /* HFP */
3988 r = FLD_MOD(r, hsync_end ? hsa : 0, 31, 24); /* HSA */
3989 dsi_write_reg(dsidev, DSI_VM_TIMING1, r);
3990
3991 r = dsi_read_reg(dsidev, DSI_VM_TIMING2);
3992 r = FLD_MOD(r, vbp, 7, 0); /* VBP */
3993 r = FLD_MOD(r, vfp, 15, 8); /* VFP */
3994 r = FLD_MOD(r, vsa, 23, 16); /* VSA */
3995 r = FLD_MOD(r, window_sync, 27, 24); /* WINDOW_SYNC */
3996 dsi_write_reg(dsidev, DSI_VM_TIMING2, r);
3997
3998 r = dsi_read_reg(dsidev, DSI_VM_TIMING3);
3999 r = FLD_MOD(r, timings->y_res, 14, 0); /* VACT */
4000 r = FLD_MOD(r, tl, 31, 16); /* TL */
4001 dsi_write_reg(dsidev, DSI_VM_TIMING3, r);
4002 }
4003}
4004
4005static int dsi_configure_pins(struct omap_dss_device *dssdev,
4006 const struct omap_dsi_pin_config *pin_cfg)
4007{
4008 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
4009 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4010 int num_pins;
4011 const int *pins;
4012 struct dsi_lane_config lanes[DSI_MAX_NR_LANES];
4013 int num_lanes;
4014 int i;
4015
4016 static const enum dsi_lane_function functions[] = {
4017 DSI_LANE_CLK,
4018 DSI_LANE_DATA1,
4019 DSI_LANE_DATA2,
4020 DSI_LANE_DATA3,
4021 DSI_LANE_DATA4,
4022 };
4023
4024 num_pins = pin_cfg->num_pins;
4025 pins = pin_cfg->pins;
4026
4027 if (num_pins < 4 || num_pins > dsi->num_lanes_supported * 2
4028 || num_pins % 2 != 0)
4029 return -EINVAL;
4030
4031 for (i = 0; i < DSI_MAX_NR_LANES; ++i)
4032 lanes[i].function = DSI_LANE_UNUSED;
4033
4034 num_lanes = 0;
4035
4036 for (i = 0; i < num_pins; i += 2) {
4037 u8 lane, pol;
4038 int dx, dy;
4039
4040 dx = pins[i];
4041 dy = pins[i + 1];
4042
4043 if (dx < 0 || dx >= dsi->num_lanes_supported * 2)
4044 return -EINVAL;
4045
4046 if (dy < 0 || dy >= dsi->num_lanes_supported * 2)
4047 return -EINVAL;
4048
4049 if (dx & 1) {
4050 if (dy != dx - 1)
4051 return -EINVAL;
4052 pol = 1;
4053 } else {
4054 if (dy != dx + 1)
4055 return -EINVAL;
4056 pol = 0;
4057 }
4058
4059 lane = dx / 2;
4060
4061 lanes[lane].function = functions[i / 2];
4062 lanes[lane].polarity = pol;
4063 num_lanes++;
4064 }
4065
4066 memcpy(dsi->lanes, lanes, sizeof(dsi->lanes));
4067 dsi->num_lanes_used = num_lanes;
4068
4069 return 0;
4070}
4071
4072static int dsi_enable_video_output(struct omap_dss_device *dssdev, int channel)
4073{
4074 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
4075 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4076 struct omap_overlay_manager *mgr = dsi->output.manager;
4077 int bpp = dsi_get_pixel_size(dsi->pix_fmt);
4078 struct omap_dss_device *out = &dsi->output;
4079 u8 data_type;
4080 u16 word_count;
4081 int r;
4082
4083 if (out == NULL || out->manager == NULL) {
4084 DSSERR("failed to enable display: no output/manager\n");
4085 return -ENODEV;
4086 }
4087
4088 r = dsi_display_init_dispc(dsidev, mgr);
4089 if (r)
4090 goto err_init_dispc;
4091
4092 if (dsi->mode == OMAP_DSS_DSI_VIDEO_MODE) {
4093 switch (dsi->pix_fmt) {
4094 case OMAP_DSS_DSI_FMT_RGB888:
4095 data_type = MIPI_DSI_PACKED_PIXEL_STREAM_24;
4096 break;
4097 case OMAP_DSS_DSI_FMT_RGB666:
4098 data_type = MIPI_DSI_PIXEL_STREAM_3BYTE_18;
4099 break;
4100 case OMAP_DSS_DSI_FMT_RGB666_PACKED:
4101 data_type = MIPI_DSI_PACKED_PIXEL_STREAM_18;
4102 break;
4103 case OMAP_DSS_DSI_FMT_RGB565:
4104 data_type = MIPI_DSI_PACKED_PIXEL_STREAM_16;
4105 break;
4106 default:
4107 r = -EINVAL;
4108 goto err_pix_fmt;
4109 }
4110
4111 dsi_if_enable(dsidev, false);
4112 dsi_vc_enable(dsidev, channel, false);
4113
4114 /* MODE, 1 = video mode */
4115 REG_FLD_MOD(dsidev, DSI_VC_CTRL(channel), 1, 4, 4);
4116
4117 word_count = DIV_ROUND_UP(dsi->timings.x_res * bpp, 8);
4118
4119 dsi_vc_write_long_header(dsidev, channel, data_type,
4120 word_count, 0);
4121
4122 dsi_vc_enable(dsidev, channel, true);
4123 dsi_if_enable(dsidev, true);
4124 }
4125
4126 r = dss_mgr_enable(mgr);
4127 if (r)
4128 goto err_mgr_enable;
4129
4130 return 0;
4131
4132err_mgr_enable:
4133 if (dsi->mode == OMAP_DSS_DSI_VIDEO_MODE) {
4134 dsi_if_enable(dsidev, false);
4135 dsi_vc_enable(dsidev, channel, false);
4136 }
4137err_pix_fmt:
4138 dsi_display_uninit_dispc(dsidev, mgr);
4139err_init_dispc:
4140 return r;
4141}
4142
4143static void dsi_disable_video_output(struct omap_dss_device *dssdev, int channel)
4144{
4145 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
4146 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4147 struct omap_overlay_manager *mgr = dsi->output.manager;
4148
4149 if (dsi->mode == OMAP_DSS_DSI_VIDEO_MODE) {
4150 dsi_if_enable(dsidev, false);
4151 dsi_vc_enable(dsidev, channel, false);
4152
4153 /* MODE, 0 = command mode */
4154 REG_FLD_MOD(dsidev, DSI_VC_CTRL(channel), 0, 4, 4);
4155
4156 dsi_vc_enable(dsidev, channel, true);
4157 dsi_if_enable(dsidev, true);
4158 }
4159
4160 dss_mgr_disable(mgr);
4161
4162 dsi_display_uninit_dispc(dsidev, mgr);
4163}
4164
4165static void dsi_update_screen_dispc(struct platform_device *dsidev)
4166{
4167 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4168 struct omap_overlay_manager *mgr = dsi->output.manager;
4169 unsigned bytespp;
4170 unsigned bytespl;
4171 unsigned bytespf;
4172 unsigned total_len;
4173 unsigned packet_payload;
4174 unsigned packet_len;
4175 u32 l;
4176 int r;
4177 const unsigned channel = dsi->update_channel;
4178 const unsigned line_buf_size = dsi->line_buffer_size;
4179 u16 w = dsi->timings.x_res;
4180 u16 h = dsi->timings.y_res;
4181
4182 DSSDBG("dsi_update_screen_dispc(%dx%d)\n", w, h);
4183
4184 dsi_vc_config_source(dsidev, channel, DSI_VC_SOURCE_VP);
4185
4186 bytespp = dsi_get_pixel_size(dsi->pix_fmt) / 8;
4187 bytespl = w * bytespp;
4188 bytespf = bytespl * h;
4189
4190 /* NOTE: packet_payload has to be equal to N * bytespl, where N is
4191 * number of lines in a packet. See errata about VP_CLK_RATIO */
4192
4193 if (bytespf < line_buf_size)
4194 packet_payload = bytespf;
4195 else
4196 packet_payload = (line_buf_size) / bytespl * bytespl;
4197
4198 packet_len = packet_payload + 1; /* 1 byte for DCS cmd */
4199 total_len = (bytespf / packet_payload) * packet_len;
4200
4201 if (bytespf % packet_payload)
4202 total_len += (bytespf % packet_payload) + 1;
4203
4204 l = FLD_VAL(total_len, 23, 0); /* TE_SIZE */
4205 dsi_write_reg(dsidev, DSI_VC_TE(channel), l);
4206
4207 dsi_vc_write_long_header(dsidev, channel, MIPI_DSI_DCS_LONG_WRITE,
4208 packet_len, 0);
4209
4210 if (dsi->te_enabled)
4211 l = FLD_MOD(l, 1, 30, 30); /* TE_EN */
4212 else
4213 l = FLD_MOD(l, 1, 31, 31); /* TE_START */
4214 dsi_write_reg(dsidev, DSI_VC_TE(channel), l);
4215
4216 /* We put SIDLEMODE to no-idle for the duration of the transfer,
4217 * because DSS interrupts are not capable of waking up the CPU and the
4218 * framedone interrupt could be delayed for quite a long time. I think
4219 * the same goes for any DSS interrupts, but for some reason I have not
4220 * seen the problem anywhere else than here.
4221 */
4222 dispc_disable_sidle();
4223
4224 dsi_perf_mark_start(dsidev);
4225
4226 r = schedule_delayed_work(&dsi->framedone_timeout_work,
4227 msecs_to_jiffies(250));
4228 BUG_ON(r == 0);
4229
4230 dss_mgr_set_timings(mgr, &dsi->timings);
4231
4232 dss_mgr_start_update(mgr);
4233
4234 if (dsi->te_enabled) {
4235 /* disable LP_RX_TO, so that we can receive TE. Time to wait
4236 * for TE is longer than the timer allows */
4237 REG_FLD_MOD(dsidev, DSI_TIMING2, 0, 15, 15); /* LP_RX_TO */
4238
4239 dsi_vc_send_bta(dsidev, channel);
4240
4241#ifdef DSI_CATCH_MISSING_TE
4242 mod_timer(&dsi->te_timer, jiffies + msecs_to_jiffies(250));
4243#endif
4244 }
4245}
4246
4247#ifdef DSI_CATCH_MISSING_TE
4248static void dsi_te_timeout(unsigned long arg)
4249{
4250 DSSERR("TE not received for 250ms!\n");
4251}
4252#endif
4253
4254static void dsi_handle_framedone(struct platform_device *dsidev, int error)
4255{
4256 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4257
4258 /* SIDLEMODE back to smart-idle */
4259 dispc_enable_sidle();
4260
4261 if (dsi->te_enabled) {
4262 /* enable LP_RX_TO again after the TE */
4263 REG_FLD_MOD(dsidev, DSI_TIMING2, 1, 15, 15); /* LP_RX_TO */
4264 }
4265
4266 dsi->framedone_callback(error, dsi->framedone_data);
4267
4268 if (!error)
4269 dsi_perf_show(dsidev, "DISPC");
4270}
4271
4272static void dsi_framedone_timeout_work_callback(struct work_struct *work)
4273{
4274 struct dsi_data *dsi = container_of(work, struct dsi_data,
4275 framedone_timeout_work.work);
4276 /* XXX While extremely unlikely, we could get FRAMEDONE interrupt after
4277 * 250ms which would conflict with this timeout work. What should be
4278 * done is first cancel the transfer on the HW, and then cancel the
4279 * possibly scheduled framedone work. However, cancelling the transfer
4280 * on the HW is buggy, and would probably require resetting the whole
4281 * DSI */
4282
4283 DSSERR("Framedone not received for 250ms!\n");
4284
4285 dsi_handle_framedone(dsi->pdev, -ETIMEDOUT);
4286}
4287
4288static void dsi_framedone_irq_callback(void *data)
4289{
4290 struct platform_device *dsidev = (struct platform_device *) data;
4291 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4292
4293 /* Note: We get FRAMEDONE when DISPC has finished sending pixels and
4294 * turns itself off. However, DSI still has the pixels in its buffers,
4295 * and is sending the data.
4296 */
4297
4298 cancel_delayed_work(&dsi->framedone_timeout_work);
4299
4300 dsi_handle_framedone(dsidev, 0);
4301}
4302
4303static int dsi_update(struct omap_dss_device *dssdev, int channel,
4304 void (*callback)(int, void *), void *data)
4305{
4306 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
4307 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4308 u16 dw, dh;
4309
4310 dsi_perf_mark_setup(dsidev);
4311
4312 dsi->update_channel = channel;
4313
4314 dsi->framedone_callback = callback;
4315 dsi->framedone_data = data;
4316
4317 dw = dsi->timings.x_res;
4318 dh = dsi->timings.y_res;
4319
4320#ifdef DSI_PERF_MEASURE
4321 dsi->update_bytes = dw * dh *
4322 dsi_get_pixel_size(dsi->pix_fmt) / 8;
4323#endif
4324 dsi_update_screen_dispc(dsidev);
4325
4326 return 0;
4327}
4328
4329/* Display funcs */
4330
4331static int dsi_configure_dispc_clocks(struct platform_device *dsidev)
4332{
4333 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4334 struct dispc_clock_info dispc_cinfo;
4335 int r;
4336 unsigned long fck;
4337
4338 fck = dsi_get_pll_hsdiv_dispc_rate(dsidev);
4339
4340 dispc_cinfo.lck_div = dsi->user_dispc_cinfo.lck_div;
4341 dispc_cinfo.pck_div = dsi->user_dispc_cinfo.pck_div;
4342
4343 r = dispc_calc_clock_rates(fck, &dispc_cinfo);
4344 if (r) {
4345 DSSERR("Failed to calc dispc clocks\n");
4346 return r;
4347 }
4348
4349 dsi->mgr_config.clock_info = dispc_cinfo;
4350
4351 return 0;
4352}
4353
4354static int dsi_display_init_dispc(struct platform_device *dsidev,
4355 struct omap_overlay_manager *mgr)
4356{
4357 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4358 int r;
4359
4360 dss_select_lcd_clk_source(mgr->id, dsi->module_id == 0 ?
4361 OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC :
4362 OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC);
4363
4364 if (dsi->mode == OMAP_DSS_DSI_CMD_MODE) {
4365 r = dss_mgr_register_framedone_handler(mgr,
4366 dsi_framedone_irq_callback, dsidev);
4367 if (r) {
4368 DSSERR("can't register FRAMEDONE handler\n");
4369 goto err;
4370 }
4371
4372 dsi->mgr_config.stallmode = true;
4373 dsi->mgr_config.fifohandcheck = true;
4374 } else {
4375 dsi->mgr_config.stallmode = false;
4376 dsi->mgr_config.fifohandcheck = false;
4377 }
4378
4379 /*
4380 * override interlace, logic level and edge related parameters in
4381 * omap_video_timings with default values
4382 */
4383 dsi->timings.interlace = false;
4384 dsi->timings.hsync_level = OMAPDSS_SIG_ACTIVE_HIGH;
4385 dsi->timings.vsync_level = OMAPDSS_SIG_ACTIVE_HIGH;
4386 dsi->timings.data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE;
4387 dsi->timings.de_level = OMAPDSS_SIG_ACTIVE_HIGH;
4388 dsi->timings.sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES;
4389
4390 dss_mgr_set_timings(mgr, &dsi->timings);
4391
4392 r = dsi_configure_dispc_clocks(dsidev);
4393 if (r)
4394 goto err1;
4395
4396 dsi->mgr_config.io_pad_mode = DSS_IO_PAD_MODE_BYPASS;
4397 dsi->mgr_config.video_port_width =
4398 dsi_get_pixel_size(dsi->pix_fmt);
4399 dsi->mgr_config.lcden_sig_polarity = 0;
4400
4401 dss_mgr_set_lcd_config(mgr, &dsi->mgr_config);
4402
4403 return 0;
4404err1:
4405 if (dsi->mode == OMAP_DSS_DSI_CMD_MODE)
4406 dss_mgr_unregister_framedone_handler(mgr,
4407 dsi_framedone_irq_callback, dsidev);
4408err:
4409 dss_select_lcd_clk_source(mgr->id, OMAP_DSS_CLK_SRC_FCK);
4410 return r;
4411}
4412
4413static void dsi_display_uninit_dispc(struct platform_device *dsidev,
4414 struct omap_overlay_manager *mgr)
4415{
4416 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4417
4418 if (dsi->mode == OMAP_DSS_DSI_CMD_MODE)
4419 dss_mgr_unregister_framedone_handler(mgr,
4420 dsi_framedone_irq_callback, dsidev);
4421
4422 dss_select_lcd_clk_source(mgr->id, OMAP_DSS_CLK_SRC_FCK);
4423}
4424
4425static int dsi_configure_dsi_clocks(struct platform_device *dsidev)
4426{
4427 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4428 struct dsi_clock_info cinfo;
4429 int r;
4430
4431 cinfo = dsi->user_dsi_cinfo;
4432
4433 r = dsi_calc_clock_rates(dsidev, &cinfo);
4434 if (r) {
4435 DSSERR("Failed to calc dsi clocks\n");
4436 return r;
4437 }
4438
4439 r = dsi_pll_set_clock_div(dsidev, &cinfo);
4440 if (r) {
4441 DSSERR("Failed to set dsi clocks\n");
4442 return r;
4443 }
4444
4445 return 0;
4446}
4447
4448static int dsi_display_init_dsi(struct platform_device *dsidev)
4449{
4450 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4451 int r;
4452
4453 r = dsi_pll_init(dsidev, true, true);
4454 if (r)
4455 goto err0;
4456
4457 r = dsi_configure_dsi_clocks(dsidev);
4458 if (r)
4459 goto err1;
4460
4461 dss_select_dsi_clk_source(dsi->module_id, dsi->module_id == 0 ?
4462 OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI :
4463 OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DSI);
4464
4465 DSSDBG("PLL OK\n");
4466
4467 r = dsi_cio_init(dsidev);
4468 if (r)
4469 goto err2;
4470
4471 _dsi_print_reset_status(dsidev);
4472
4473 dsi_proto_timings(dsidev);
4474 dsi_set_lp_clk_divisor(dsidev);
4475
4476 if (1)
4477 _dsi_print_reset_status(dsidev);
4478
4479 r = dsi_proto_config(dsidev);
4480 if (r)
4481 goto err3;
4482
4483 /* enable interface */
4484 dsi_vc_enable(dsidev, 0, 1);
4485 dsi_vc_enable(dsidev, 1, 1);
4486 dsi_vc_enable(dsidev, 2, 1);
4487 dsi_vc_enable(dsidev, 3, 1);
4488 dsi_if_enable(dsidev, 1);
4489 dsi_force_tx_stop_mode_io(dsidev);
4490
4491 return 0;
4492err3:
4493 dsi_cio_uninit(dsidev);
4494err2:
4495 dss_select_dsi_clk_source(dsi->module_id, OMAP_DSS_CLK_SRC_FCK);
4496err1:
4497 dsi_pll_uninit(dsidev, true);
4498err0:
4499 return r;
4500}
4501
4502static void dsi_display_uninit_dsi(struct platform_device *dsidev,
4503 bool disconnect_lanes, bool enter_ulps)
4504{
4505 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4506
4507 if (enter_ulps && !dsi->ulps_enabled)
4508 dsi_enter_ulps(dsidev);
4509
4510 /* disable interface */
4511 dsi_if_enable(dsidev, 0);
4512 dsi_vc_enable(dsidev, 0, 0);
4513 dsi_vc_enable(dsidev, 1, 0);
4514 dsi_vc_enable(dsidev, 2, 0);
4515 dsi_vc_enable(dsidev, 3, 0);
4516
4517 dss_select_dsi_clk_source(dsi->module_id, OMAP_DSS_CLK_SRC_FCK);
4518 dsi_cio_uninit(dsidev);
4519 dsi_pll_uninit(dsidev, disconnect_lanes);
4520}
4521
4522static int dsi_display_enable(struct omap_dss_device *dssdev)
4523{
4524 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
4525 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4526 int r = 0;
4527
4528 DSSDBG("dsi_display_enable\n");
4529
4530 WARN_ON(!dsi_bus_is_locked(dsidev));
4531
4532 mutex_lock(&dsi->lock);
4533
4534 r = dsi_runtime_get(dsidev);
4535 if (r)
4536 goto err_get_dsi;
4537
4538 dsi_enable_pll_clock(dsidev, 1);
4539
4540 _dsi_initialize_irq(dsidev);
4541
4542 r = dsi_display_init_dsi(dsidev);
4543 if (r)
4544 goto err_init_dsi;
4545
4546 mutex_unlock(&dsi->lock);
4547
4548 return 0;
4549
4550err_init_dsi:
4551 dsi_enable_pll_clock(dsidev, 0);
4552 dsi_runtime_put(dsidev);
4553err_get_dsi:
4554 mutex_unlock(&dsi->lock);
4555 DSSDBG("dsi_display_enable FAILED\n");
4556 return r;
4557}
4558
4559static void dsi_display_disable(struct omap_dss_device *dssdev,
4560 bool disconnect_lanes, bool enter_ulps)
4561{
4562 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
4563 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4564
4565 DSSDBG("dsi_display_disable\n");
4566
4567 WARN_ON(!dsi_bus_is_locked(dsidev));
4568
4569 mutex_lock(&dsi->lock);
4570
4571 dsi_sync_vc(dsidev, 0);
4572 dsi_sync_vc(dsidev, 1);
4573 dsi_sync_vc(dsidev, 2);
4574 dsi_sync_vc(dsidev, 3);
4575
4576 dsi_display_uninit_dsi(dsidev, disconnect_lanes, enter_ulps);
4577
4578 dsi_runtime_put(dsidev);
4579 dsi_enable_pll_clock(dsidev, 0);
4580
4581 mutex_unlock(&dsi->lock);
4582}
4583
4584static int dsi_enable_te(struct omap_dss_device *dssdev, bool enable)
4585{
4586 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
4587 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4588
4589 dsi->te_enabled = enable;
4590 return 0;
4591}
4592
4593#ifdef PRINT_VERBOSE_VM_TIMINGS
4594static void print_dsi_vm(const char *str,
4595 const struct omap_dss_dsi_videomode_timings *t)
4596{
4597 unsigned long byteclk = t->hsclk / 4;
4598 int bl, wc, pps, tot;
4599
4600 wc = DIV_ROUND_UP(t->hact * t->bitspp, 8);
4601 pps = DIV_ROUND_UP(wc + 6, t->ndl); /* pixel packet size */
4602 bl = t->hss + t->hsa + t->hse + t->hbp + t->hfp;
4603 tot = bl + pps;
4604
4605#define TO_DSI_T(x) ((u32)div64_u64((u64)x * 1000000000llu, byteclk))
4606
4607 pr_debug("%s bck %lu, %u/%u/%u/%u/%u/%u = %u+%u = %u, "
4608 "%u/%u/%u/%u/%u/%u = %u + %u = %u\n",
4609 str,
4610 byteclk,
4611 t->hss, t->hsa, t->hse, t->hbp, pps, t->hfp,
4612 bl, pps, tot,
4613 TO_DSI_T(t->hss),
4614 TO_DSI_T(t->hsa),
4615 TO_DSI_T(t->hse),
4616 TO_DSI_T(t->hbp),
4617 TO_DSI_T(pps),
4618 TO_DSI_T(t->hfp),
4619
4620 TO_DSI_T(bl),
4621 TO_DSI_T(pps),
4622
4623 TO_DSI_T(tot));
4624#undef TO_DSI_T
4625}
4626
4627static void print_dispc_vm(const char *str, const struct omap_video_timings *t)
4628{
4629 unsigned long pck = t->pixelclock;
4630 int hact, bl, tot;
4631
4632 hact = t->x_res;
4633 bl = t->hsw + t->hbp + t->hfp;
4634 tot = hact + bl;
4635
4636#define TO_DISPC_T(x) ((u32)div64_u64((u64)x * 1000000000llu, pck))
4637
4638 pr_debug("%s pck %lu, %u/%u/%u/%u = %u+%u = %u, "
4639 "%u/%u/%u/%u = %u + %u = %u\n",
4640 str,
4641 pck,
4642 t->hsw, t->hbp, hact, t->hfp,
4643 bl, hact, tot,
4644 TO_DISPC_T(t->hsw),
4645 TO_DISPC_T(t->hbp),
4646 TO_DISPC_T(hact),
4647 TO_DISPC_T(t->hfp),
4648 TO_DISPC_T(bl),
4649 TO_DISPC_T(hact),
4650 TO_DISPC_T(tot));
4651#undef TO_DISPC_T
4652}
4653
4654/* note: this is not quite accurate */
4655static void print_dsi_dispc_vm(const char *str,
4656 const struct omap_dss_dsi_videomode_timings *t)
4657{
4658 struct omap_video_timings vm = { 0 };
4659 unsigned long byteclk = t->hsclk / 4;
4660 unsigned long pck;
4661 u64 dsi_tput;
4662 int dsi_hact, dsi_htot;
4663
4664 dsi_tput = (u64)byteclk * t->ndl * 8;
4665 pck = (u32)div64_u64(dsi_tput, t->bitspp);
4666 dsi_hact = DIV_ROUND_UP(DIV_ROUND_UP(t->hact * t->bitspp, 8) + 6, t->ndl);
4667 dsi_htot = t->hss + t->hsa + t->hse + t->hbp + dsi_hact + t->hfp;
4668
4669 vm.pixelclock = pck;
4670 vm.hsw = div64_u64((u64)(t->hsa + t->hse) * pck, byteclk);
4671 vm.hbp = div64_u64((u64)t->hbp * pck, byteclk);
4672 vm.hfp = div64_u64((u64)t->hfp * pck, byteclk);
4673 vm.x_res = t->hact;
4674
4675 print_dispc_vm(str, &vm);
4676}
4677#endif /* PRINT_VERBOSE_VM_TIMINGS */
4678
4679static bool dsi_cm_calc_dispc_cb(int lckd, int pckd, unsigned long lck,
4680 unsigned long pck, void *data)
4681{
4682 struct dsi_clk_calc_ctx *ctx = data;
4683 struct omap_video_timings *t = &ctx->dispc_vm;
4684
4685 ctx->dispc_cinfo.lck_div = lckd;
4686 ctx->dispc_cinfo.pck_div = pckd;
4687 ctx->dispc_cinfo.lck = lck;
4688 ctx->dispc_cinfo.pck = pck;
4689
4690 *t = *ctx->config->timings;
4691 t->pixelclock = pck;
4692 t->x_res = ctx->config->timings->x_res;
4693 t->y_res = ctx->config->timings->y_res;
4694 t->hsw = t->hfp = t->hbp = t->vsw = 1;
4695 t->vfp = t->vbp = 0;
4696
4697 return true;
4698}
4699
4700static bool dsi_cm_calc_hsdiv_cb(int regm_dispc, unsigned long dispc,
4701 void *data)
4702{
4703 struct dsi_clk_calc_ctx *ctx = data;
4704
4705 ctx->dsi_cinfo.regm_dispc = regm_dispc;
4706 ctx->dsi_cinfo.dsi_pll_hsdiv_dispc_clk = dispc;
4707
4708 return dispc_div_calc(dispc, ctx->req_pck_min, ctx->req_pck_max,
4709 dsi_cm_calc_dispc_cb, ctx);
4710}
4711
4712static bool dsi_cm_calc_pll_cb(int regn, int regm, unsigned long fint,
4713 unsigned long pll, void *data)
4714{
4715 struct dsi_clk_calc_ctx *ctx = data;
4716
4717 ctx->dsi_cinfo.regn = regn;
4718 ctx->dsi_cinfo.regm = regm;
4719 ctx->dsi_cinfo.fint = fint;
4720 ctx->dsi_cinfo.clkin4ddr = pll;
4721
4722 return dsi_hsdiv_calc(ctx->dsidev, pll, ctx->req_pck_min,
4723 dsi_cm_calc_hsdiv_cb, ctx);
4724}
4725
4726static bool dsi_cm_calc(struct dsi_data *dsi,
4727 const struct omap_dss_dsi_config *cfg,
4728 struct dsi_clk_calc_ctx *ctx)
4729{
4730 unsigned long clkin;
4731 int bitspp, ndl;
4732 unsigned long pll_min, pll_max;
4733 unsigned long pck, txbyteclk;
4734
4735 clkin = clk_get_rate(dsi->sys_clk);
4736 bitspp = dsi_get_pixel_size(cfg->pixel_format);
4737 ndl = dsi->num_lanes_used - 1;
4738
4739 /*
4740 * Here we should calculate minimum txbyteclk to be able to send the
4741 * frame in time, and also to handle TE. That's not very simple, though,
4742 * especially as we go to LP between each pixel packet due to HW
4743 * "feature". So let's just estimate very roughly and multiply by 1.5.
4744 */
4745 pck = cfg->timings->pixelclock;
4746 pck = pck * 3 / 2;
4747 txbyteclk = pck * bitspp / 8 / ndl;
4748
4749 memset(ctx, 0, sizeof(*ctx));
4750 ctx->dsidev = dsi->pdev;
4751 ctx->config = cfg;
4752 ctx->req_pck_min = pck;
4753 ctx->req_pck_nom = pck;
4754 ctx->req_pck_max = pck * 3 / 2;
4755 ctx->dsi_cinfo.clkin = clkin;
4756
4757 pll_min = max(cfg->hs_clk_min * 4, txbyteclk * 4 * 4);
4758 pll_max = cfg->hs_clk_max * 4;
4759
4760 return dsi_pll_calc(dsi->pdev, clkin,
4761 pll_min, pll_max,
4762 dsi_cm_calc_pll_cb, ctx);
4763}
4764
4765static bool dsi_vm_calc_blanking(struct dsi_clk_calc_ctx *ctx)
4766{
4767 struct dsi_data *dsi = dsi_get_dsidrv_data(ctx->dsidev);
4768 const struct omap_dss_dsi_config *cfg = ctx->config;
4769 int bitspp = dsi_get_pixel_size(cfg->pixel_format);
4770 int ndl = dsi->num_lanes_used - 1;
4771 unsigned long hsclk = ctx->dsi_cinfo.clkin4ddr / 4;
4772 unsigned long byteclk = hsclk / 4;
4773
4774 unsigned long dispc_pck, req_pck_min, req_pck_nom, req_pck_max;
4775 int xres;
4776 int panel_htot, panel_hbl; /* pixels */
4777 int dispc_htot, dispc_hbl; /* pixels */
4778 int dsi_htot, dsi_hact, dsi_hbl, hss, hse; /* byteclks */
4779 int hfp, hsa, hbp;
4780 const struct omap_video_timings *req_vm;
4781 struct omap_video_timings *dispc_vm;
4782 struct omap_dss_dsi_videomode_timings *dsi_vm;
4783 u64 dsi_tput, dispc_tput;
4784
4785 dsi_tput = (u64)byteclk * ndl * 8;
4786
4787 req_vm = cfg->timings;
4788 req_pck_min = ctx->req_pck_min;
4789 req_pck_max = ctx->req_pck_max;
4790 req_pck_nom = ctx->req_pck_nom;
4791
4792 dispc_pck = ctx->dispc_cinfo.pck;
4793 dispc_tput = (u64)dispc_pck * bitspp;
4794
4795 xres = req_vm->x_res;
4796
4797 panel_hbl = req_vm->hfp + req_vm->hbp + req_vm->hsw;
4798 panel_htot = xres + panel_hbl;
4799
4800 dsi_hact = DIV_ROUND_UP(DIV_ROUND_UP(xres * bitspp, 8) + 6, ndl);
4801
4802 /*
4803 * When there are no line buffers, DISPC and DSI must have the
4804 * same tput. Otherwise DISPC tput needs to be higher than DSI's.
4805 */
4806 if (dsi->line_buffer_size < xres * bitspp / 8) {
4807 if (dispc_tput != dsi_tput)
4808 return false;
4809 } else {
4810 if (dispc_tput < dsi_tput)
4811 return false;
4812 }
4813
4814 /* DSI tput must be over the min requirement */
4815 if (dsi_tput < (u64)bitspp * req_pck_min)
4816 return false;
4817
4818 /* When non-burst mode, DSI tput must be below max requirement. */
4819 if (cfg->trans_mode != OMAP_DSS_DSI_BURST_MODE) {
4820 if (dsi_tput > (u64)bitspp * req_pck_max)
4821 return false;
4822 }
4823
4824 hss = DIV_ROUND_UP(4, ndl);
4825
4826 if (cfg->trans_mode == OMAP_DSS_DSI_PULSE_MODE) {
4827 if (ndl == 3 && req_vm->hsw == 0)
4828 hse = 1;
4829 else
4830 hse = DIV_ROUND_UP(4, ndl);
4831 } else {
4832 hse = 0;
4833 }
4834
4835 /* DSI htot to match the panel's nominal pck */
4836 dsi_htot = div64_u64((u64)panel_htot * byteclk, req_pck_nom);
4837
4838 /* fail if there would be no time for blanking */
4839 if (dsi_htot < hss + hse + dsi_hact)
4840 return false;
4841
4842 /* total DSI blanking needed to achieve panel's TL */
4843 dsi_hbl = dsi_htot - dsi_hact;
4844
4845 /* DISPC htot to match the DSI TL */
4846 dispc_htot = div64_u64((u64)dsi_htot * dispc_pck, byteclk);
4847
4848 /* verify that the DSI and DISPC TLs are the same */
4849 if ((u64)dsi_htot * dispc_pck != (u64)dispc_htot * byteclk)
4850 return false;
4851
4852 dispc_hbl = dispc_htot - xres;
4853
4854 /* setup DSI videomode */
4855
4856 dsi_vm = &ctx->dsi_vm;
4857 memset(dsi_vm, 0, sizeof(*dsi_vm));
4858
4859 dsi_vm->hsclk = hsclk;
4860
4861 dsi_vm->ndl = ndl;
4862 dsi_vm->bitspp = bitspp;
4863
4864 if (cfg->trans_mode != OMAP_DSS_DSI_PULSE_MODE) {
4865 hsa = 0;
4866 } else if (ndl == 3 && req_vm->hsw == 0) {
4867 hsa = 0;
4868 } else {
4869 hsa = div64_u64((u64)req_vm->hsw * byteclk, req_pck_nom);
4870 hsa = max(hsa - hse, 1);
4871 }
4872
4873 hbp = div64_u64((u64)req_vm->hbp * byteclk, req_pck_nom);
4874 hbp = max(hbp, 1);
4875
4876 hfp = dsi_hbl - (hss + hsa + hse + hbp);
4877 if (hfp < 1) {
4878 int t;
4879 /* we need to take cycles from hbp */
4880
4881 t = 1 - hfp;
4882 hbp = max(hbp - t, 1);
4883 hfp = dsi_hbl - (hss + hsa + hse + hbp);
4884
4885 if (hfp < 1 && hsa > 0) {
4886 /* we need to take cycles from hsa */
4887 t = 1 - hfp;
4888 hsa = max(hsa - t, 1);
4889 hfp = dsi_hbl - (hss + hsa + hse + hbp);
4890 }
4891 }
4892
4893 if (hfp < 1)
4894 return false;
4895
4896 dsi_vm->hss = hss;
4897 dsi_vm->hsa = hsa;
4898 dsi_vm->hse = hse;
4899 dsi_vm->hbp = hbp;
4900 dsi_vm->hact = xres;
4901 dsi_vm->hfp = hfp;
4902
4903 dsi_vm->vsa = req_vm->vsw;
4904 dsi_vm->vbp = req_vm->vbp;
4905 dsi_vm->vact = req_vm->y_res;
4906 dsi_vm->vfp = req_vm->vfp;
4907
4908 dsi_vm->trans_mode = cfg->trans_mode;
4909
4910 dsi_vm->blanking_mode = 0;
4911 dsi_vm->hsa_blanking_mode = 1;
4912 dsi_vm->hfp_blanking_mode = 1;
4913 dsi_vm->hbp_blanking_mode = 1;
4914
4915 dsi_vm->ddr_clk_always_on = cfg->ddr_clk_always_on;
4916 dsi_vm->window_sync = 4;
4917
4918 /* setup DISPC videomode */
4919
4920 dispc_vm = &ctx->dispc_vm;
4921 *dispc_vm = *req_vm;
4922 dispc_vm->pixelclock = dispc_pck;
4923
4924 if (cfg->trans_mode == OMAP_DSS_DSI_PULSE_MODE) {
4925 hsa = div64_u64((u64)req_vm->hsw * dispc_pck,
4926 req_pck_nom);
4927 hsa = max(hsa, 1);
4928 } else {
4929 hsa = 1;
4930 }
4931
4932 hbp = div64_u64((u64)req_vm->hbp * dispc_pck, req_pck_nom);
4933 hbp = max(hbp, 1);
4934
4935 hfp = dispc_hbl - hsa - hbp;
4936 if (hfp < 1) {
4937 int t;
4938 /* we need to take cycles from hbp */
4939
4940 t = 1 - hfp;
4941 hbp = max(hbp - t, 1);
4942 hfp = dispc_hbl - hsa - hbp;
4943
4944 if (hfp < 1) {
4945 /* we need to take cycles from hsa */
4946 t = 1 - hfp;
4947 hsa = max(hsa - t, 1);
4948 hfp = dispc_hbl - hsa - hbp;
4949 }
4950 }
4951
4952 if (hfp < 1)
4953 return false;
4954
4955 dispc_vm->hfp = hfp;
4956 dispc_vm->hsw = hsa;
4957 dispc_vm->hbp = hbp;
4958
4959 return true;
4960}
4961
4962
4963static bool dsi_vm_calc_dispc_cb(int lckd, int pckd, unsigned long lck,
4964 unsigned long pck, void *data)
4965{
4966 struct dsi_clk_calc_ctx *ctx = data;
4967
4968 ctx->dispc_cinfo.lck_div = lckd;
4969 ctx->dispc_cinfo.pck_div = pckd;
4970 ctx->dispc_cinfo.lck = lck;
4971 ctx->dispc_cinfo.pck = pck;
4972
4973 if (dsi_vm_calc_blanking(ctx) == false)
4974 return false;
4975
4976#ifdef PRINT_VERBOSE_VM_TIMINGS
4977 print_dispc_vm("dispc", &ctx->dispc_vm);
4978 print_dsi_vm("dsi ", &ctx->dsi_vm);
4979 print_dispc_vm("req ", ctx->config->timings);
4980 print_dsi_dispc_vm("act ", &ctx->dsi_vm);
4981#endif
4982
4983 return true;
4984}
4985
4986static bool dsi_vm_calc_hsdiv_cb(int regm_dispc, unsigned long dispc,
4987 void *data)
4988{
4989 struct dsi_clk_calc_ctx *ctx = data;
4990 unsigned long pck_max;
4991
4992 ctx->dsi_cinfo.regm_dispc = regm_dispc;
4993 ctx->dsi_cinfo.dsi_pll_hsdiv_dispc_clk = dispc;
4994
4995 /*
4996 * In burst mode we can let the dispc pck be arbitrarily high, but it
4997 * limits our scaling abilities. So for now, don't aim too high.
4998 */
4999
5000 if (ctx->config->trans_mode == OMAP_DSS_DSI_BURST_MODE)
5001 pck_max = ctx->req_pck_max + 10000000;
5002 else
5003 pck_max = ctx->req_pck_max;
5004
5005 return dispc_div_calc(dispc, ctx->req_pck_min, pck_max,
5006 dsi_vm_calc_dispc_cb, ctx);
5007}
5008
5009static bool dsi_vm_calc_pll_cb(int regn, int regm, unsigned long fint,
5010 unsigned long pll, void *data)
5011{
5012 struct dsi_clk_calc_ctx *ctx = data;
5013
5014 ctx->dsi_cinfo.regn = regn;
5015 ctx->dsi_cinfo.regm = regm;
5016 ctx->dsi_cinfo.fint = fint;
5017 ctx->dsi_cinfo.clkin4ddr = pll;
5018
5019 return dsi_hsdiv_calc(ctx->dsidev, pll, ctx->req_pck_min,
5020 dsi_vm_calc_hsdiv_cb, ctx);
5021}
5022
5023static bool dsi_vm_calc(struct dsi_data *dsi,
5024 const struct omap_dss_dsi_config *cfg,
5025 struct dsi_clk_calc_ctx *ctx)
5026{
5027 const struct omap_video_timings *t = cfg->timings;
5028 unsigned long clkin;
5029 unsigned long pll_min;
5030 unsigned long pll_max;
5031 int ndl = dsi->num_lanes_used - 1;
5032 int bitspp = dsi_get_pixel_size(cfg->pixel_format);
5033 unsigned long byteclk_min;
5034
5035 clkin = clk_get_rate(dsi->sys_clk);
5036
5037 memset(ctx, 0, sizeof(*ctx));
5038 ctx->dsidev = dsi->pdev;
5039 ctx->config = cfg;
5040
5041 ctx->dsi_cinfo.clkin = clkin;
5042
5043 /* these limits should come from the panel driver */
5044 ctx->req_pck_min = t->pixelclock - 1000;
5045 ctx->req_pck_nom = t->pixelclock;
5046 ctx->req_pck_max = t->pixelclock + 1000;
5047
5048 byteclk_min = div64_u64((u64)ctx->req_pck_min * bitspp, ndl * 8);
5049 pll_min = max(cfg->hs_clk_min * 4, byteclk_min * 4 * 4);
5050
5051 if (cfg->trans_mode == OMAP_DSS_DSI_BURST_MODE) {
5052 pll_max = cfg->hs_clk_max * 4;
5053 } else {
5054 unsigned long byteclk_max;
5055 byteclk_max = div64_u64((u64)ctx->req_pck_max * bitspp,
5056 ndl * 8);
5057
5058 pll_max = byteclk_max * 4 * 4;
5059 }
5060
5061 return dsi_pll_calc(dsi->pdev, clkin,
5062 pll_min, pll_max,
5063 dsi_vm_calc_pll_cb, ctx);
5064}
5065
5066static int dsi_set_config(struct omap_dss_device *dssdev,
5067 const struct omap_dss_dsi_config *config)
5068{
5069 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
5070 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
5071 struct dsi_clk_calc_ctx ctx;
5072 bool ok;
5073 int r;
5074
5075 mutex_lock(&dsi->lock);
5076
5077 dsi->pix_fmt = config->pixel_format;
5078 dsi->mode = config->mode;
5079
5080 if (config->mode == OMAP_DSS_DSI_VIDEO_MODE)
5081 ok = dsi_vm_calc(dsi, config, &ctx);
5082 else
5083 ok = dsi_cm_calc(dsi, config, &ctx);
5084
5085 if (!ok) {
5086 DSSERR("failed to find suitable DSI clock settings\n");
5087 r = -EINVAL;
5088 goto err;
5089 }
5090
5091 dsi_pll_calc_dsi_fck(&ctx.dsi_cinfo);
5092
5093 r = dsi_lp_clock_calc(&ctx.dsi_cinfo, config->lp_clk_min,
5094 config->lp_clk_max);
5095 if (r) {
5096 DSSERR("failed to find suitable DSI LP clock settings\n");
5097 goto err;
5098 }
5099
5100 dsi->user_dsi_cinfo = ctx.dsi_cinfo;
5101 dsi->user_dispc_cinfo = ctx.dispc_cinfo;
5102
5103 dsi->timings = ctx.dispc_vm;
5104 dsi->vm_timings = ctx.dsi_vm;
5105
5106 mutex_unlock(&dsi->lock);
5107
5108 return 0;
5109err:
5110 mutex_unlock(&dsi->lock);
5111
5112 return r;
5113}
5114
5115/*
5116 * Return a hardcoded channel for the DSI output. This should work for
5117 * current use cases, but this can be later expanded to either resolve
5118 * the channel in some more dynamic manner, or get the channel as a user
5119 * parameter.
5120 */
5121static enum omap_channel dsi_get_channel(int module_id)
5122{
5123 switch (omapdss_get_version()) {
5124 case OMAPDSS_VER_OMAP24xx:
5125 DSSWARN("DSI not supported\n");
5126 return OMAP_DSS_CHANNEL_LCD;
5127
5128 case OMAPDSS_VER_OMAP34xx_ES1:
5129 case OMAPDSS_VER_OMAP34xx_ES3:
5130 case OMAPDSS_VER_OMAP3630:
5131 case OMAPDSS_VER_AM35xx:
5132 return OMAP_DSS_CHANNEL_LCD;
5133
5134 case OMAPDSS_VER_OMAP4430_ES1:
5135 case OMAPDSS_VER_OMAP4430_ES2:
5136 case OMAPDSS_VER_OMAP4:
5137 switch (module_id) {
5138 case 0:
5139 return OMAP_DSS_CHANNEL_LCD;
5140 case 1:
5141 return OMAP_DSS_CHANNEL_LCD2;
5142 default:
5143 DSSWARN("unsupported module id\n");
5144 return OMAP_DSS_CHANNEL_LCD;
5145 }
5146
5147 case OMAPDSS_VER_OMAP5:
5148 switch (module_id) {
5149 case 0:
5150 return OMAP_DSS_CHANNEL_LCD;
5151 case 1:
5152 return OMAP_DSS_CHANNEL_LCD3;
5153 default:
5154 DSSWARN("unsupported module id\n");
5155 return OMAP_DSS_CHANNEL_LCD;
5156 }
5157
5158 default:
5159 DSSWARN("unsupported DSS version\n");
5160 return OMAP_DSS_CHANNEL_LCD;
5161 }
5162}
5163
5164static int dsi_request_vc(struct omap_dss_device *dssdev, int *channel)
5165{
5166 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
5167 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
5168 int i;
5169
5170 for (i = 0; i < ARRAY_SIZE(dsi->vc); i++) {
5171 if (!dsi->vc[i].dssdev) {
5172 dsi->vc[i].dssdev = dssdev;
5173 *channel = i;
5174 return 0;
5175 }
5176 }
5177
5178 DSSERR("cannot get VC for display %s", dssdev->name);
5179 return -ENOSPC;
5180}
5181
5182static int dsi_set_vc_id(struct omap_dss_device *dssdev, int channel, int vc_id)
5183{
5184 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
5185 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
5186
5187 if (vc_id < 0 || vc_id > 3) {
5188 DSSERR("VC ID out of range\n");
5189 return -EINVAL;
5190 }
5191
5192 if (channel < 0 || channel > 3) {
5193 DSSERR("Virtual Channel out of range\n");
5194 return -EINVAL;
5195 }
5196
5197 if (dsi->vc[channel].dssdev != dssdev) {
5198 DSSERR("Virtual Channel not allocated to display %s\n",
5199 dssdev->name);
5200 return -EINVAL;
5201 }
5202
5203 dsi->vc[channel].vc_id = vc_id;
5204
5205 return 0;
5206}
5207
5208static void dsi_release_vc(struct omap_dss_device *dssdev, int channel)
5209{
5210 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
5211 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
5212
5213 if ((channel >= 0 && channel <= 3) &&
5214 dsi->vc[channel].dssdev == dssdev) {
5215 dsi->vc[channel].dssdev = NULL;
5216 dsi->vc[channel].vc_id = 0;
5217 }
5218}
5219
5220void dsi_wait_pll_hsdiv_dispc_active(struct platform_device *dsidev)
5221{
5222 if (wait_for_bit_change(dsidev, DSI_PLL_STATUS, 7, 1) != 1)
5223 DSSERR("%s (%s) not active\n",
5224 dss_get_generic_clk_source_name(OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC),
5225 dss_feat_get_clk_source_name(OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC));
5226}
5227
5228void dsi_wait_pll_hsdiv_dsi_active(struct platform_device *dsidev)
5229{
5230 if (wait_for_bit_change(dsidev, DSI_PLL_STATUS, 8, 1) != 1)
5231 DSSERR("%s (%s) not active\n",
5232 dss_get_generic_clk_source_name(OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI),
5233 dss_feat_get_clk_source_name(OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI));
5234}
5235
5236static void dsi_calc_clock_param_ranges(struct platform_device *dsidev)
5237{
5238 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
5239
5240 dsi->regn_max = dss_feat_get_param_max(FEAT_PARAM_DSIPLL_REGN);
5241 dsi->regm_max = dss_feat_get_param_max(FEAT_PARAM_DSIPLL_REGM);
5242 dsi->regm_dispc_max =
5243 dss_feat_get_param_max(FEAT_PARAM_DSIPLL_REGM_DISPC);
5244 dsi->regm_dsi_max = dss_feat_get_param_max(FEAT_PARAM_DSIPLL_REGM_DSI);
5245 dsi->fint_min = dss_feat_get_param_min(FEAT_PARAM_DSIPLL_FINT);
5246 dsi->fint_max = dss_feat_get_param_max(FEAT_PARAM_DSIPLL_FINT);
5247 dsi->lpdiv_max = dss_feat_get_param_max(FEAT_PARAM_DSIPLL_LPDIV);
5248}
5249
5250static int dsi_get_clocks(struct platform_device *dsidev)
5251{
5252 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
5253 struct clk *clk;
5254
5255 clk = devm_clk_get(&dsidev->dev, "fck");
5256 if (IS_ERR(clk)) {
5257 DSSERR("can't get fck\n");
5258 return PTR_ERR(clk);
5259 }
5260
5261 dsi->dss_clk = clk;
5262
5263 clk = devm_clk_get(&dsidev->dev, "sys_clk");
5264 if (IS_ERR(clk)) {
5265 DSSERR("can't get sys_clk\n");
5266 return PTR_ERR(clk);
5267 }
5268
5269 dsi->sys_clk = clk;
5270
5271 return 0;
5272}
5273
5274static int dsi_connect(struct omap_dss_device *dssdev,
5275 struct omap_dss_device *dst)
5276{
5277 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
5278 struct omap_overlay_manager *mgr;
5279 int r;
5280
5281 r = dsi_regulator_init(dsidev);
5282 if (r)
5283 return r;
5284
5285 mgr = omap_dss_get_overlay_manager(dssdev->dispc_channel);
5286 if (!mgr)
5287 return -ENODEV;
5288
5289 r = dss_mgr_connect(mgr, dssdev);
5290 if (r)
5291 return r;
5292
5293 r = omapdss_output_set_device(dssdev, dst);
5294 if (r) {
5295 DSSERR("failed to connect output to new device: %s\n",
5296 dssdev->name);
5297 dss_mgr_disconnect(mgr, dssdev);
5298 return r;
5299 }
5300
5301 return 0;
5302}
5303
5304static void dsi_disconnect(struct omap_dss_device *dssdev,
5305 struct omap_dss_device *dst)
5306{
5307 WARN_ON(dst != dssdev->dst);
5308
5309 if (dst != dssdev->dst)
5310 return;
5311
5312 omapdss_output_unset_device(dssdev);
5313
5314 if (dssdev->manager)
5315 dss_mgr_disconnect(dssdev->manager, dssdev);
5316}
5317
5318static const struct omapdss_dsi_ops dsi_ops = {
5319 .connect = dsi_connect,
5320 .disconnect = dsi_disconnect,
5321
5322 .bus_lock = dsi_bus_lock,
5323 .bus_unlock = dsi_bus_unlock,
5324
5325 .enable = dsi_display_enable,
5326 .disable = dsi_display_disable,
5327
5328 .enable_hs = dsi_vc_enable_hs,
5329
5330 .configure_pins = dsi_configure_pins,
5331 .set_config = dsi_set_config,
5332
5333 .enable_video_output = dsi_enable_video_output,
5334 .disable_video_output = dsi_disable_video_output,
5335
5336 .update = dsi_update,
5337
5338 .enable_te = dsi_enable_te,
5339
5340 .request_vc = dsi_request_vc,
5341 .set_vc_id = dsi_set_vc_id,
5342 .release_vc = dsi_release_vc,
5343
5344 .dcs_write = dsi_vc_dcs_write,
5345 .dcs_write_nosync = dsi_vc_dcs_write_nosync,
5346 .dcs_read = dsi_vc_dcs_read,
5347
5348 .gen_write = dsi_vc_generic_write,
5349 .gen_write_nosync = dsi_vc_generic_write_nosync,
5350 .gen_read = dsi_vc_generic_read,
5351
5352 .bta_sync = dsi_vc_send_bta_sync,
5353
5354 .set_max_rx_packet_size = dsi_vc_set_max_rx_packet_size,
5355};
5356
5357static void dsi_init_output(struct platform_device *dsidev)
5358{
5359 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
5360 struct omap_dss_device *out = &dsi->output;
5361
5362 out->dev = &dsidev->dev;
5363 out->id = dsi->module_id == 0 ?
5364 OMAP_DSS_OUTPUT_DSI1 : OMAP_DSS_OUTPUT_DSI2;
5365
5366 out->output_type = OMAP_DISPLAY_TYPE_DSI;
5367 out->name = dsi->module_id == 0 ? "dsi.0" : "dsi.1";
5368 out->dispc_channel = dsi_get_channel(dsi->module_id);
5369 out->ops.dsi = &dsi_ops;
5370 out->owner = THIS_MODULE;
5371
5372 omapdss_register_output(out);
5373}
5374
5375static void dsi_uninit_output(struct platform_device *dsidev)
5376{
5377 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
5378 struct omap_dss_device *out = &dsi->output;
5379
5380 omapdss_unregister_output(out);
5381}
5382
5383static int dsi_probe_of(struct platform_device *pdev)
5384{
5385 struct device_node *node = pdev->dev.of_node;
5386 struct dsi_data *dsi = dsi_get_dsidrv_data(pdev);
5387 struct property *prop;
5388 u32 lane_arr[10];
5389 int len, num_pins;
5390 int r, i;
5391 struct device_node *ep;
5392 struct omap_dsi_pin_config pin_cfg;
5393
5394 ep = omapdss_of_get_first_endpoint(node);
5395 if (!ep)
5396 return 0;
5397
5398 prop = of_find_property(ep, "lanes", &len);
5399 if (prop == NULL) {
5400 dev_err(&pdev->dev, "failed to find lane data\n");
5401 r = -EINVAL;
5402 goto err;
5403 }
5404
5405 num_pins = len / sizeof(u32);
5406
5407 if (num_pins < 4 || num_pins % 2 != 0 ||
5408 num_pins > dsi->num_lanes_supported * 2) {
5409 dev_err(&pdev->dev, "bad number of lanes\n");
5410 r = -EINVAL;
5411 goto err;
5412 }
5413
5414 r = of_property_read_u32_array(ep, "lanes", lane_arr, num_pins);
5415 if (r) {
5416 dev_err(&pdev->dev, "failed to read lane data\n");
5417 goto err;
5418 }
5419
5420 pin_cfg.num_pins = num_pins;
5421 for (i = 0; i < num_pins; ++i)
5422 pin_cfg.pins[i] = (int)lane_arr[i];
5423
5424 r = dsi_configure_pins(&dsi->output, &pin_cfg);
5425 if (r) {
5426 dev_err(&pdev->dev, "failed to configure pins");
5427 goto err;
5428 }
5429
5430 of_node_put(ep);
5431
5432 return 0;
5433
5434err:
5435 of_node_put(ep);
5436 return r;
5437}
5438
5439/* DSI1 HW IP initialisation */
5440static int omap_dsihw_probe(struct platform_device *dsidev)
5441{
5442 u32 rev;
5443 int r, i;
5444 struct dsi_data *dsi;
5445 struct resource *dsi_mem;
5446 struct resource *res;
5447 struct resource temp_res;
5448
5449 dsi = devm_kzalloc(&dsidev->dev, sizeof(*dsi), GFP_KERNEL);
5450 if (!dsi)
5451 return -ENOMEM;
5452
5453 dsi->pdev = dsidev;
5454 dev_set_drvdata(&dsidev->dev, dsi);
5455
5456 spin_lock_init(&dsi->irq_lock);
5457 spin_lock_init(&dsi->errors_lock);
5458 dsi->errors = 0;
5459
5460#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
5461 spin_lock_init(&dsi->irq_stats_lock);
5462 dsi->irq_stats.last_reset = jiffies;
5463#endif
5464
5465 mutex_init(&dsi->lock);
5466 sema_init(&dsi->bus_lock, 1);
5467
5468 INIT_DEFERRABLE_WORK(&dsi->framedone_timeout_work,
5469 dsi_framedone_timeout_work_callback);
5470
5471#ifdef DSI_CATCH_MISSING_TE
5472 init_timer(&dsi->te_timer);
5473 dsi->te_timer.function = dsi_te_timeout;
5474 dsi->te_timer.data = 0;
5475#endif
5476
5477 res = platform_get_resource_byname(dsidev, IORESOURCE_MEM, "proto");
5478 if (!res) {
5479 res = platform_get_resource(dsidev, IORESOURCE_MEM, 0);
5480 if (!res) {
5481 DSSERR("can't get IORESOURCE_MEM DSI\n");
5482 return -EINVAL;
5483 }
5484
5485 temp_res.start = res->start;
5486 temp_res.end = temp_res.start + DSI_PROTO_SZ - 1;
5487 res = &temp_res;
5488 }
5489
5490 dsi_mem = res;
5491
5492 dsi->proto_base = devm_ioremap(&dsidev->dev, res->start,
5493 resource_size(res));
5494 if (!dsi->proto_base) {
5495 DSSERR("can't ioremap DSI protocol engine\n");
5496 return -ENOMEM;
5497 }
5498
5499 res = platform_get_resource_byname(dsidev, IORESOURCE_MEM, "phy");
5500 if (!res) {
5501 res = platform_get_resource(dsidev, IORESOURCE_MEM, 0);
5502 if (!res) {
5503 DSSERR("can't get IORESOURCE_MEM DSI\n");
5504 return -EINVAL;
5505 }
5506
5507 temp_res.start = res->start + DSI_PHY_OFFSET;
5508 temp_res.end = temp_res.start + DSI_PHY_SZ - 1;
5509 res = &temp_res;
5510 }
5511
5512 dsi->phy_base = devm_ioremap(&dsidev->dev, res->start,
5513 resource_size(res));
5514 if (!dsi->proto_base) {
5515 DSSERR("can't ioremap DSI PHY\n");
5516 return -ENOMEM;
5517 }
5518
5519 res = platform_get_resource_byname(dsidev, IORESOURCE_MEM, "pll");
5520 if (!res) {
5521 res = platform_get_resource(dsidev, IORESOURCE_MEM, 0);
5522 if (!res) {
5523 DSSERR("can't get IORESOURCE_MEM DSI\n");
5524 return -EINVAL;
5525 }
5526
5527 temp_res.start = res->start + DSI_PLL_OFFSET;
5528 temp_res.end = temp_res.start + DSI_PLL_SZ - 1;
5529 res = &temp_res;
5530 }
5531
5532 dsi->pll_base = devm_ioremap(&dsidev->dev, res->start,
5533 resource_size(res));
5534 if (!dsi->proto_base) {
5535 DSSERR("can't ioremap DSI PLL\n");
5536 return -ENOMEM;
5537 }
5538
5539 dsi->irq = platform_get_irq(dsi->pdev, 0);
5540 if (dsi->irq < 0) {
5541 DSSERR("platform_get_irq failed\n");
5542 return -ENODEV;
5543 }
5544
5545 r = devm_request_irq(&dsidev->dev, dsi->irq, omap_dsi_irq_handler,
5546 IRQF_SHARED, dev_name(&dsidev->dev), dsi->pdev);
5547 if (r < 0) {
5548 DSSERR("request_irq failed\n");
5549 return r;
5550 }
5551
5552 if (dsidev->dev.of_node) {
5553 const struct of_device_id *match;
5554 const struct dsi_module_id_data *d;
5555
5556 match = of_match_node(dsi_of_match, dsidev->dev.of_node);
5557 if (!match) {
5558 DSSERR("unsupported DSI module\n");
5559 return -ENODEV;
5560 }
5561
5562 d = match->data;
5563
5564 while (d->address != 0 && d->address != dsi_mem->start)
5565 d++;
5566
5567 if (d->address == 0) {
5568 DSSERR("unsupported DSI module\n");
5569 return -ENODEV;
5570 }
5571
5572 dsi->module_id = d->id;
5573 } else {
5574 dsi->module_id = dsidev->id;
5575 }
5576
5577 /* DSI VCs initialization */
5578 for (i = 0; i < ARRAY_SIZE(dsi->vc); i++) {
5579 dsi->vc[i].source = DSI_VC_SOURCE_L4;
5580 dsi->vc[i].dssdev = NULL;
5581 dsi->vc[i].vc_id = 0;
5582 }
5583
5584 dsi_calc_clock_param_ranges(dsidev);
5585
5586 r = dsi_get_clocks(dsidev);
5587 if (r)
5588 return r;
5589
5590 pm_runtime_enable(&dsidev->dev);
5591
5592 r = dsi_runtime_get(dsidev);
5593 if (r)
5594 goto err_runtime_get;
5595
5596 rev = dsi_read_reg(dsidev, DSI_REVISION);
5597 dev_dbg(&dsidev->dev, "OMAP DSI rev %d.%d\n",
5598 FLD_GET(rev, 7, 4), FLD_GET(rev, 3, 0));
5599
5600 /* DSI on OMAP3 doesn't have register DSI_GNQ, set number
5601 * of data to 3 by default */
5602 if (dss_has_feature(FEAT_DSI_GNQ))
5603 /* NB_DATA_LANES */
5604 dsi->num_lanes_supported = 1 + REG_GET(dsidev, DSI_GNQ, 11, 9);
5605 else
5606 dsi->num_lanes_supported = 3;
5607
5608 dsi->line_buffer_size = dsi_get_line_buf_size(dsidev);
5609
5610 dsi_init_output(dsidev);
5611
5612 if (dsidev->dev.of_node) {
5613 r = dsi_probe_of(dsidev);
5614 if (r) {
5615 DSSERR("Invalid DSI DT data\n");
5616 goto err_probe_of;
5617 }
5618
5619 r = of_platform_populate(dsidev->dev.of_node, NULL, NULL,
5620 &dsidev->dev);
5621 if (r)
5622 DSSERR("Failed to populate DSI child devices: %d\n", r);
5623 }
5624
5625 dsi_runtime_put(dsidev);
5626
5627 if (dsi->module_id == 0)
5628 dss_debugfs_create_file("dsi1_regs", dsi1_dump_regs);
5629 else if (dsi->module_id == 1)
5630 dss_debugfs_create_file("dsi2_regs", dsi2_dump_regs);
5631
5632#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
5633 if (dsi->module_id == 0)
5634 dss_debugfs_create_file("dsi1_irqs", dsi1_dump_irqs);
5635 else if (dsi->module_id == 1)
5636 dss_debugfs_create_file("dsi2_irqs", dsi2_dump_irqs);
5637#endif
5638
5639 return 0;
5640
5641err_probe_of:
5642 dsi_uninit_output(dsidev);
5643 dsi_runtime_put(dsidev);
5644
5645err_runtime_get:
5646 pm_runtime_disable(&dsidev->dev);
5647 return r;
5648}
5649
5650static int dsi_unregister_child(struct device *dev, void *data)
5651{
5652 struct platform_device *pdev = to_platform_device(dev);
5653 platform_device_unregister(pdev);
5654 return 0;
5655}
5656
5657static int __exit omap_dsihw_remove(struct platform_device *dsidev)
5658{
5659 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
5660
5661 device_for_each_child(&dsidev->dev, NULL, dsi_unregister_child);
5662
5663 WARN_ON(dsi->scp_clk_refcount > 0);
5664
5665 dsi_uninit_output(dsidev);
5666
5667 pm_runtime_disable(&dsidev->dev);
5668
5669 if (dsi->vdds_dsi_reg != NULL && dsi->vdds_dsi_enabled) {
5670 regulator_disable(dsi->vdds_dsi_reg);
5671 dsi->vdds_dsi_enabled = false;
5672 }
5673
5674 return 0;
5675}
5676
5677static int dsi_runtime_suspend(struct device *dev)
5678{
5679 struct platform_device *pdev = to_platform_device(dev);
5680 struct dsi_data *dsi = dsi_get_dsidrv_data(pdev);
5681
5682 dsi->is_enabled = false;
5683 /* ensure the irq handler sees the is_enabled value */
5684 smp_wmb();
5685 /* wait for current handler to finish before turning the DSI off */
5686 synchronize_irq(dsi->irq);
5687
5688 dispc_runtime_put();
5689
5690 return 0;
5691}
5692
5693static int dsi_runtime_resume(struct device *dev)
5694{
5695 struct platform_device *pdev = to_platform_device(dev);
5696 struct dsi_data *dsi = dsi_get_dsidrv_data(pdev);
5697 int r;
5698
5699 r = dispc_runtime_get();
5700 if (r)
5701 return r;
5702
5703 dsi->is_enabled = true;
5704 /* ensure the irq handler sees the is_enabled value */
5705 smp_wmb();
5706
5707 return 0;
5708}
5709
5710static const struct dev_pm_ops dsi_pm_ops = {
5711 .runtime_suspend = dsi_runtime_suspend,
5712 .runtime_resume = dsi_runtime_resume,
5713};
5714
5715static const struct dsi_module_id_data dsi_of_data_omap3[] = {
5716 { .address = 0x4804fc00, .id = 0, },
5717 { },
5718};
5719
5720static const struct dsi_module_id_data dsi_of_data_omap4[] = {
5721 { .address = 0x58004000, .id = 0, },
5722 { .address = 0x58005000, .id = 1, },
5723 { },
5724};
5725
5726static const struct of_device_id dsi_of_match[] = {
5727 { .compatible = "ti,omap3-dsi", .data = dsi_of_data_omap3, },
5728 { .compatible = "ti,omap4-dsi", .data = dsi_of_data_omap4, },
5729 {},
5730};
5731
5732static struct platform_driver omap_dsihw_driver = {
5733 .probe = omap_dsihw_probe,
5734 .remove = __exit_p(omap_dsihw_remove),
5735 .driver = {
5736 .name = "omapdss_dsi",
5737 .owner = THIS_MODULE,
5738 .pm = &dsi_pm_ops,
5739 .of_match_table = dsi_of_match,
5740 },
5741};
5742
5743int __init dsi_init_platform_driver(void)
5744{
5745 return platform_driver_register(&omap_dsihw_driver);
5746}
5747
5748void __exit dsi_uninit_platform_driver(void)
5749{
5750 platform_driver_unregister(&omap_dsihw_driver);
5751}