diff options
Diffstat (limited to 'drivers/video')
-rw-r--r-- | drivers/video/omap2/dss/dispc.c | 8 | ||||
-rw-r--r-- | drivers/video/omap2/dss/dpi.c | 30 | ||||
-rw-r--r-- | drivers/video/omap2/dss/dsi.c | 938 | ||||
-rw-r--r-- | drivers/video/omap2/dss/dss.c | 12 | ||||
-rw-r--r-- | drivers/video/omap2/dss/dss.h | 31 | ||||
-rw-r--r-- | drivers/video/omap2/dss/dss_features.h | 1 |
6 files changed, 575 insertions, 445 deletions
diff --git a/drivers/video/omap2/dss/dispc.c b/drivers/video/omap2/dss/dispc.c index 10e9e8c16dbf..df8c9921763b 100644 --- a/drivers/video/omap2/dss/dispc.c +++ b/drivers/video/omap2/dss/dispc.c | |||
@@ -2239,6 +2239,7 @@ static void dispc_get_lcd_divisor(enum omap_channel channel, int *lck_div, | |||
2239 | 2239 | ||
2240 | unsigned long dispc_fclk_rate(void) | 2240 | unsigned long dispc_fclk_rate(void) |
2241 | { | 2241 | { |
2242 | struct platform_device *dsidev; | ||
2242 | unsigned long r = 0; | 2243 | unsigned long r = 0; |
2243 | 2244 | ||
2244 | switch (dss_get_dispc_clk_source()) { | 2245 | switch (dss_get_dispc_clk_source()) { |
@@ -2246,7 +2247,8 @@ unsigned long dispc_fclk_rate(void) | |||
2246 | r = dss_clk_get_rate(DSS_CLK_FCK); | 2247 | r = dss_clk_get_rate(DSS_CLK_FCK); |
2247 | break; | 2248 | break; |
2248 | case OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC: | 2249 | case OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC: |
2249 | r = dsi_get_pll_hsdiv_dispc_rate(); | 2250 | dsidev = dsi_get_dsidev_from_id(0); |
2251 | r = dsi_get_pll_hsdiv_dispc_rate(dsidev); | ||
2250 | break; | 2252 | break; |
2251 | default: | 2253 | default: |
2252 | BUG(); | 2254 | BUG(); |
@@ -2257,6 +2259,7 @@ unsigned long dispc_fclk_rate(void) | |||
2257 | 2259 | ||
2258 | unsigned long dispc_lclk_rate(enum omap_channel channel) | 2260 | unsigned long dispc_lclk_rate(enum omap_channel channel) |
2259 | { | 2261 | { |
2262 | struct platform_device *dsidev; | ||
2260 | int lcd; | 2263 | int lcd; |
2261 | unsigned long r; | 2264 | unsigned long r; |
2262 | u32 l; | 2265 | u32 l; |
@@ -2270,7 +2273,8 @@ unsigned long dispc_lclk_rate(enum omap_channel channel) | |||
2270 | r = dss_clk_get_rate(DSS_CLK_FCK); | 2273 | r = dss_clk_get_rate(DSS_CLK_FCK); |
2271 | break; | 2274 | break; |
2272 | case OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC: | 2275 | case OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC: |
2273 | r = dsi_get_pll_hsdiv_dispc_rate(); | 2276 | dsidev = dsi_get_dsidev_from_id(0); |
2277 | r = dsi_get_pll_hsdiv_dispc_rate(dsidev); | ||
2274 | break; | 2278 | break; |
2275 | default: | 2279 | default: |
2276 | BUG(); | 2280 | BUG(); |
diff --git a/drivers/video/omap2/dss/dpi.c b/drivers/video/omap2/dss/dpi.c index 3c988b65ca15..4d661a949b89 100644 --- a/drivers/video/omap2/dss/dpi.c +++ b/drivers/video/omap2/dss/dpi.c | |||
@@ -37,8 +37,18 @@ | |||
37 | 37 | ||
38 | static struct { | 38 | static struct { |
39 | struct regulator *vdds_dsi_reg; | 39 | struct regulator *vdds_dsi_reg; |
40 | struct platform_device *dsidev; | ||
40 | } dpi; | 41 | } dpi; |
41 | 42 | ||
43 | static struct platform_device *dpi_get_dsidev(enum omap_dss_clk_source clk) | ||
44 | { | ||
45 | int dsi_module; | ||
46 | |||
47 | dsi_module = clk == OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC ? 0 : 1; | ||
48 | |||
49 | return dsi_get_dsidev_from_id(dsi_module); | ||
50 | } | ||
51 | |||
42 | static bool dpi_use_dsi_pll(struct omap_dss_device *dssdev) | 52 | static bool dpi_use_dsi_pll(struct omap_dss_device *dssdev) |
43 | { | 53 | { |
44 | if (dssdev->clocks.dispc.dispc_fclk_src == | 54 | if (dssdev->clocks.dispc.dispc_fclk_src == |
@@ -58,12 +68,12 @@ static int dpi_set_dsi_clk(struct omap_dss_device *dssdev, bool is_tft, | |||
58 | struct dispc_clock_info dispc_cinfo; | 68 | struct dispc_clock_info dispc_cinfo; |
59 | int r; | 69 | int r; |
60 | 70 | ||
61 | r = dsi_pll_calc_clock_div_pck(is_tft, pck_req, &dsi_cinfo, | 71 | r = dsi_pll_calc_clock_div_pck(dpi.dsidev, is_tft, pck_req, |
62 | &dispc_cinfo); | 72 | &dsi_cinfo, &dispc_cinfo); |
63 | if (r) | 73 | if (r) |
64 | return r; | 74 | return r; |
65 | 75 | ||
66 | r = dsi_pll_set_clock_div(&dsi_cinfo); | 76 | r = dsi_pll_set_clock_div(dpi.dsidev, &dsi_cinfo); |
67 | if (r) | 77 | if (r) |
68 | return r; | 78 | return r; |
69 | 79 | ||
@@ -189,7 +199,7 @@ int omapdss_dpi_display_enable(struct omap_dss_device *dssdev) | |||
189 | 199 | ||
190 | if (dpi_use_dsi_pll(dssdev)) { | 200 | if (dpi_use_dsi_pll(dssdev)) { |
191 | dss_clk_enable(DSS_CLK_SYSCK); | 201 | dss_clk_enable(DSS_CLK_SYSCK); |
192 | r = dsi_pll_init(0, 1); | 202 | r = dsi_pll_init(dpi.dsidev, 0, 1); |
193 | if (r) | 203 | if (r) |
194 | goto err3; | 204 | goto err3; |
195 | } | 205 | } |
@@ -206,7 +216,7 @@ int omapdss_dpi_display_enable(struct omap_dss_device *dssdev) | |||
206 | 216 | ||
207 | err4: | 217 | err4: |
208 | if (dpi_use_dsi_pll(dssdev)) | 218 | if (dpi_use_dsi_pll(dssdev)) |
209 | dsi_pll_uninit(true); | 219 | dsi_pll_uninit(dpi.dsidev, true); |
210 | err3: | 220 | err3: |
211 | if (dpi_use_dsi_pll(dssdev)) | 221 | if (dpi_use_dsi_pll(dssdev)) |
212 | dss_clk_disable(DSS_CLK_SYSCK); | 222 | dss_clk_disable(DSS_CLK_SYSCK); |
@@ -227,7 +237,7 @@ void omapdss_dpi_display_disable(struct omap_dss_device *dssdev) | |||
227 | 237 | ||
228 | if (dpi_use_dsi_pll(dssdev)) { | 238 | if (dpi_use_dsi_pll(dssdev)) { |
229 | dss_select_dispc_clk_source(OMAP_DSS_CLK_SRC_FCK); | 239 | dss_select_dispc_clk_source(OMAP_DSS_CLK_SRC_FCK); |
230 | dsi_pll_uninit(true); | 240 | dsi_pll_uninit(dpi.dsidev, true); |
231 | dss_clk_disable(DSS_CLK_SYSCK); | 241 | dss_clk_disable(DSS_CLK_SYSCK); |
232 | } | 242 | } |
233 | 243 | ||
@@ -272,7 +282,7 @@ int dpi_check_timings(struct omap_dss_device *dssdev, | |||
272 | 282 | ||
273 | if (dpi_use_dsi_pll(dssdev)) { | 283 | if (dpi_use_dsi_pll(dssdev)) { |
274 | struct dsi_clock_info dsi_cinfo; | 284 | struct dsi_clock_info dsi_cinfo; |
275 | r = dsi_pll_calc_clock_div_pck(is_tft, | 285 | r = dsi_pll_calc_clock_div_pck(dpi.dsidev, is_tft, |
276 | timings->pixel_clock * 1000, | 286 | timings->pixel_clock * 1000, |
277 | &dsi_cinfo, &dispc_cinfo); | 287 | &dsi_cinfo, &dispc_cinfo); |
278 | 288 | ||
@@ -319,6 +329,12 @@ int dpi_init_display(struct omap_dss_device *dssdev) | |||
319 | dpi.vdds_dsi_reg = vdds_dsi; | 329 | dpi.vdds_dsi_reg = vdds_dsi; |
320 | } | 330 | } |
321 | 331 | ||
332 | if (dpi_use_dsi_pll(dssdev)) { | ||
333 | enum omap_dss_clk_source dispc_fclk_src = | ||
334 | dssdev->clocks.dispc.dispc_fclk_src; | ||
335 | dpi.dsidev = dpi_get_dsidev(dispc_fclk_src); | ||
336 | } | ||
337 | |||
322 | return 0; | 338 | return 0; |
323 | } | 339 | } |
324 | 340 | ||
diff --git a/drivers/video/omap2/dss/dsi.c b/drivers/video/omap2/dss/dsi.c index b2945fed7bab..8d03eb6adcfd 100644 --- a/drivers/video/omap2/dss/dsi.c +++ b/drivers/video/omap2/dss/dsi.c | |||
@@ -101,11 +101,11 @@ struct dsi_reg { u16 idx; }; | |||
101 | #define DSI_PLL_CONFIGURATION1 DSI_REG(0x300 + 0x000C) | 101 | #define DSI_PLL_CONFIGURATION1 DSI_REG(0x300 + 0x000C) |
102 | #define DSI_PLL_CONFIGURATION2 DSI_REG(0x300 + 0x0010) | 102 | #define DSI_PLL_CONFIGURATION2 DSI_REG(0x300 + 0x0010) |
103 | 103 | ||
104 | #define REG_GET(idx, start, end) \ | 104 | #define REG_GET(dsidev, idx, start, end) \ |
105 | FLD_GET(dsi_read_reg(idx), start, end) | 105 | FLD_GET(dsi_read_reg(dsidev, idx), start, end) |
106 | 106 | ||
107 | #define REG_FLD_MOD(idx, val, start, end) \ | 107 | #define REG_FLD_MOD(dsidev, idx, val, start, end) \ |
108 | dsi_write_reg(idx, FLD_MOD(dsi_read_reg(idx), val, start, end)) | 108 | dsi_write_reg(dsidev, idx, FLD_MOD(dsi_read_reg(dsidev, idx), val, start, end)) |
109 | 109 | ||
110 | /* Global interrupts */ | 110 | /* Global interrupts */ |
111 | #define DSI_IRQ_VC0 (1 << 0) | 111 | #define DSI_IRQ_VC0 (1 << 0) |
@@ -257,8 +257,7 @@ struct dsi_isr_tables { | |||
257 | struct dsi_isr_data isr_table_cio[DSI_MAX_NR_ISRS]; | 257 | struct dsi_isr_data isr_table_cio[DSI_MAX_NR_ISRS]; |
258 | }; | 258 | }; |
259 | 259 | ||
260 | static struct | 260 | static struct dsi_data { |
261 | { | ||
262 | struct platform_device *pdev; | 261 | struct platform_device *pdev; |
263 | void __iomem *base; | 262 | void __iomem *base; |
264 | int irq; | 263 | int irq; |
@@ -330,17 +329,31 @@ static struct | |||
330 | unsigned scp_clk_refcount; | 329 | unsigned scp_clk_refcount; |
331 | } dsi; | 330 | } dsi; |
332 | 331 | ||
332 | static struct platform_device *dsi_pdev_map[MAX_NUM_DSI]; | ||
333 | |||
333 | #ifdef DEBUG | 334 | #ifdef DEBUG |
334 | static unsigned int dsi_perf; | 335 | static unsigned int dsi_perf; |
335 | module_param_named(dsi_perf, dsi_perf, bool, 0644); | 336 | module_param_named(dsi_perf, dsi_perf, bool, 0644); |
336 | #endif | 337 | #endif |
337 | 338 | ||
338 | static inline void dsi_write_reg(const struct dsi_reg idx, u32 val) | 339 | static inline struct platform_device *dsi_get_dsidev_from_dssdev(struct omap_dss_device *dssdev) |
340 | { | ||
341 | return dsi_pdev_map[dssdev->phy.dsi.module]; | ||
342 | } | ||
343 | |||
344 | struct platform_device *dsi_get_dsidev_from_id(int module) | ||
345 | { | ||
346 | return dsi_pdev_map[module]; | ||
347 | } | ||
348 | |||
349 | static inline void dsi_write_reg(struct platform_device *dsidev, | ||
350 | const struct dsi_reg idx, u32 val) | ||
339 | { | 351 | { |
340 | __raw_writel(val, dsi.base + idx.idx); | 352 | __raw_writel(val, dsi.base + idx.idx); |
341 | } | 353 | } |
342 | 354 | ||
343 | static inline u32 dsi_read_reg(const struct dsi_reg idx) | 355 | static inline u32 dsi_read_reg(struct platform_device *dsidev, |
356 | const struct dsi_reg idx) | ||
344 | { | 357 | { |
345 | return __raw_readl(dsi.base + idx.idx); | 358 | return __raw_readl(dsi.base + idx.idx); |
346 | } | 359 | } |
@@ -366,7 +379,7 @@ void dsi_bus_unlock(struct omap_dss_device *dssdev) | |||
366 | } | 379 | } |
367 | EXPORT_SYMBOL(dsi_bus_unlock); | 380 | EXPORT_SYMBOL(dsi_bus_unlock); |
368 | 381 | ||
369 | static bool dsi_bus_is_locked(void) | 382 | static bool dsi_bus_is_locked(struct platform_device *dsidev) |
370 | { | 383 | { |
371 | return dsi.bus_lock.count == 0; | 384 | return dsi.bus_lock.count == 0; |
372 | } | 385 | } |
@@ -376,12 +389,12 @@ static void dsi_completion_handler(void *data, u32 mask) | |||
376 | complete((struct completion *)data); | 389 | complete((struct completion *)data); |
377 | } | 390 | } |
378 | 391 | ||
379 | static inline int wait_for_bit_change(const struct dsi_reg idx, int bitnum, | 392 | static inline int wait_for_bit_change(struct platform_device *dsidev, |
380 | int value) | 393 | const struct dsi_reg idx, int bitnum, int value) |
381 | { | 394 | { |
382 | int t = 100000; | 395 | int t = 100000; |
383 | 396 | ||
384 | while (REG_GET(idx, bitnum, bitnum) != value) { | 397 | while (REG_GET(dsidev, idx, bitnum, bitnum) != value) { |
385 | if (--t == 0) | 398 | if (--t == 0) |
386 | return !value; | 399 | return !value; |
387 | } | 400 | } |
@@ -390,17 +403,17 @@ static inline int wait_for_bit_change(const struct dsi_reg idx, int bitnum, | |||
390 | } | 403 | } |
391 | 404 | ||
392 | #ifdef DEBUG | 405 | #ifdef DEBUG |
393 | static void dsi_perf_mark_setup(void) | 406 | static void dsi_perf_mark_setup(struct platform_device *dsidev) |
394 | { | 407 | { |
395 | dsi.perf_setup_time = ktime_get(); | 408 | dsi.perf_setup_time = ktime_get(); |
396 | } | 409 | } |
397 | 410 | ||
398 | static void dsi_perf_mark_start(void) | 411 | static void dsi_perf_mark_start(struct platform_device *dsidev) |
399 | { | 412 | { |
400 | dsi.perf_start_time = ktime_get(); | 413 | dsi.perf_start_time = ktime_get(); |
401 | } | 414 | } |
402 | 415 | ||
403 | static void dsi_perf_show(const char *name) | 416 | static void dsi_perf_show(struct platform_device *dsidev, const char *name) |
404 | { | 417 | { |
405 | ktime_t t, setup_time, trans_time; | 418 | ktime_t t, setup_time, trans_time; |
406 | u32 total_bytes; | 419 | u32 total_bytes; |
@@ -438,9 +451,9 @@ static void dsi_perf_show(const char *name) | |||
438 | total_bytes * 1000 / total_us); | 451 | total_bytes * 1000 / total_us); |
439 | } | 452 | } |
440 | #else | 453 | #else |
441 | #define dsi_perf_mark_setup() | 454 | #define dsi_perf_mark_setup(x) |
442 | #define dsi_perf_mark_start() | 455 | #define dsi_perf_mark_start(x) |
443 | #define dsi_perf_show(x) | 456 | #define dsi_perf_show(x, y) |
444 | #endif | 457 | #endif |
445 | 458 | ||
446 | static void print_irq_status(u32 status) | 459 | static void print_irq_status(u32 status) |
@@ -546,7 +559,8 @@ static void print_irq_status_cio(u32 status) | |||
546 | } | 559 | } |
547 | 560 | ||
548 | #ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS | 561 | #ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS |
549 | static void dsi_collect_irq_stats(u32 irqstatus, u32 *vcstatus, u32 ciostatus) | 562 | static void dsi_collect_irq_stats(struct platform_device *dsidev, u32 irqstatus, |
563 | u32 *vcstatus, u32 ciostatus) | ||
550 | { | 564 | { |
551 | int i; | 565 | int i; |
552 | 566 | ||
@@ -563,12 +577,13 @@ static void dsi_collect_irq_stats(u32 irqstatus, u32 *vcstatus, u32 ciostatus) | |||
563 | spin_unlock(&dsi.irq_stats_lock); | 577 | spin_unlock(&dsi.irq_stats_lock); |
564 | } | 578 | } |
565 | #else | 579 | #else |
566 | #define dsi_collect_irq_stats(irqstatus, vcstatus, ciostatus) | 580 | #define dsi_collect_irq_stats(dsidev, irqstatus, vcstatus, ciostatus) |
567 | #endif | 581 | #endif |
568 | 582 | ||
569 | static int debug_irq; | 583 | static int debug_irq; |
570 | 584 | ||
571 | static void dsi_handle_irq_errors(u32 irqstatus, u32 *vcstatus, u32 ciostatus) | 585 | static void dsi_handle_irq_errors(struct platform_device *dsidev, u32 irqstatus, |
586 | u32 *vcstatus, u32 ciostatus) | ||
572 | { | 587 | { |
573 | int i; | 588 | int i; |
574 | 589 | ||
@@ -638,12 +653,15 @@ static void dsi_handle_isrs(struct dsi_isr_tables *isr_tables, | |||
638 | 653 | ||
639 | static irqreturn_t omap_dsi_irq_handler(int irq, void *arg) | 654 | static irqreturn_t omap_dsi_irq_handler(int irq, void *arg) |
640 | { | 655 | { |
656 | struct platform_device *dsidev; | ||
641 | u32 irqstatus, vcstatus[4], ciostatus; | 657 | u32 irqstatus, vcstatus[4], ciostatus; |
642 | int i; | 658 | int i; |
643 | 659 | ||
660 | dsidev = (struct platform_device *) arg; | ||
661 | |||
644 | spin_lock(&dsi.irq_lock); | 662 | spin_lock(&dsi.irq_lock); |
645 | 663 | ||
646 | irqstatus = dsi_read_reg(DSI_IRQSTATUS); | 664 | irqstatus = dsi_read_reg(dsidev, DSI_IRQSTATUS); |
647 | 665 | ||
648 | /* IRQ is not for us */ | 666 | /* IRQ is not for us */ |
649 | if (!irqstatus) { | 667 | if (!irqstatus) { |
@@ -651,9 +669,9 @@ static irqreturn_t omap_dsi_irq_handler(int irq, void *arg) | |||
651 | return IRQ_NONE; | 669 | return IRQ_NONE; |
652 | } | 670 | } |
653 | 671 | ||
654 | dsi_write_reg(DSI_IRQSTATUS, irqstatus & ~DSI_IRQ_CHANNEL_MASK); | 672 | dsi_write_reg(dsidev, DSI_IRQSTATUS, irqstatus & ~DSI_IRQ_CHANNEL_MASK); |
655 | /* flush posted write */ | 673 | /* flush posted write */ |
656 | dsi_read_reg(DSI_IRQSTATUS); | 674 | dsi_read_reg(dsidev, DSI_IRQSTATUS); |
657 | 675 | ||
658 | for (i = 0; i < 4; ++i) { | 676 | for (i = 0; i < 4; ++i) { |
659 | if ((irqstatus & (1 << i)) == 0) { | 677 | if ((irqstatus & (1 << i)) == 0) { |
@@ -661,19 +679,19 @@ static irqreturn_t omap_dsi_irq_handler(int irq, void *arg) | |||
661 | continue; | 679 | continue; |
662 | } | 680 | } |
663 | 681 | ||
664 | vcstatus[i] = dsi_read_reg(DSI_VC_IRQSTATUS(i)); | 682 | vcstatus[i] = dsi_read_reg(dsidev, DSI_VC_IRQSTATUS(i)); |
665 | 683 | ||
666 | dsi_write_reg(DSI_VC_IRQSTATUS(i), vcstatus[i]); | 684 | dsi_write_reg(dsidev, DSI_VC_IRQSTATUS(i), vcstatus[i]); |
667 | /* flush posted write */ | 685 | /* flush posted write */ |
668 | dsi_read_reg(DSI_VC_IRQSTATUS(i)); | 686 | dsi_read_reg(dsidev, DSI_VC_IRQSTATUS(i)); |
669 | } | 687 | } |
670 | 688 | ||
671 | if (irqstatus & DSI_IRQ_COMPLEXIO_ERR) { | 689 | if (irqstatus & DSI_IRQ_COMPLEXIO_ERR) { |
672 | ciostatus = dsi_read_reg(DSI_COMPLEXIO_IRQ_STATUS); | 690 | ciostatus = dsi_read_reg(dsidev, DSI_COMPLEXIO_IRQ_STATUS); |
673 | 691 | ||
674 | dsi_write_reg(DSI_COMPLEXIO_IRQ_STATUS, ciostatus); | 692 | dsi_write_reg(dsidev, DSI_COMPLEXIO_IRQ_STATUS, ciostatus); |
675 | /* flush posted write */ | 693 | /* flush posted write */ |
676 | dsi_read_reg(DSI_COMPLEXIO_IRQ_STATUS); | 694 | dsi_read_reg(dsidev, DSI_COMPLEXIO_IRQ_STATUS); |
677 | } else { | 695 | } else { |
678 | ciostatus = 0; | 696 | ciostatus = 0; |
679 | } | 697 | } |
@@ -691,15 +709,16 @@ static irqreturn_t omap_dsi_irq_handler(int irq, void *arg) | |||
691 | 709 | ||
692 | dsi_handle_isrs(&dsi.isr_tables_copy, irqstatus, vcstatus, ciostatus); | 710 | dsi_handle_isrs(&dsi.isr_tables_copy, irqstatus, vcstatus, ciostatus); |
693 | 711 | ||
694 | dsi_handle_irq_errors(irqstatus, vcstatus, ciostatus); | 712 | dsi_handle_irq_errors(dsidev, irqstatus, vcstatus, ciostatus); |
695 | 713 | ||
696 | dsi_collect_irq_stats(irqstatus, vcstatus, ciostatus); | 714 | dsi_collect_irq_stats(dsidev, irqstatus, vcstatus, ciostatus); |
697 | 715 | ||
698 | return IRQ_HANDLED; | 716 | return IRQ_HANDLED; |
699 | } | 717 | } |
700 | 718 | ||
701 | /* dsi.irq_lock has to be locked by the caller */ | 719 | /* dsi.irq_lock has to be locked by the caller */ |
702 | static void _omap_dsi_configure_irqs(struct dsi_isr_data *isr_array, | 720 | static void _omap_dsi_configure_irqs(struct platform_device *dsidev, |
721 | struct dsi_isr_data *isr_array, | ||
703 | unsigned isr_array_size, u32 default_mask, | 722 | unsigned isr_array_size, u32 default_mask, |
704 | const struct dsi_reg enable_reg, | 723 | const struct dsi_reg enable_reg, |
705 | const struct dsi_reg status_reg) | 724 | const struct dsi_reg status_reg) |
@@ -720,47 +739,47 @@ static void _omap_dsi_configure_irqs(struct dsi_isr_data *isr_array, | |||
720 | mask |= isr_data->mask; | 739 | mask |= isr_data->mask; |
721 | } | 740 | } |
722 | 741 | ||
723 | old_mask = dsi_read_reg(enable_reg); | 742 | old_mask = dsi_read_reg(dsidev, enable_reg); |
724 | /* clear the irqstatus for newly enabled irqs */ | 743 | /* clear the irqstatus for newly enabled irqs */ |
725 | dsi_write_reg(status_reg, (mask ^ old_mask) & mask); | 744 | dsi_write_reg(dsidev, status_reg, (mask ^ old_mask) & mask); |
726 | dsi_write_reg(enable_reg, mask); | 745 | dsi_write_reg(dsidev, enable_reg, mask); |
727 | 746 | ||
728 | /* flush posted writes */ | 747 | /* flush posted writes */ |
729 | dsi_read_reg(enable_reg); | 748 | dsi_read_reg(dsidev, enable_reg); |
730 | dsi_read_reg(status_reg); | 749 | dsi_read_reg(dsidev, status_reg); |
731 | } | 750 | } |
732 | 751 | ||
733 | /* dsi.irq_lock has to be locked by the caller */ | 752 | /* dsi.irq_lock has to be locked by the caller */ |
734 | static void _omap_dsi_set_irqs(void) | 753 | static void _omap_dsi_set_irqs(struct platform_device *dsidev) |
735 | { | 754 | { |
736 | u32 mask = DSI_IRQ_ERROR_MASK; | 755 | u32 mask = DSI_IRQ_ERROR_MASK; |
737 | #ifdef DSI_CATCH_MISSING_TE | 756 | #ifdef DSI_CATCH_MISSING_TE |
738 | mask |= DSI_IRQ_TE_TRIGGER; | 757 | mask |= DSI_IRQ_TE_TRIGGER; |
739 | #endif | 758 | #endif |
740 | _omap_dsi_configure_irqs(dsi.isr_tables.isr_table, | 759 | _omap_dsi_configure_irqs(dsidev, dsi.isr_tables.isr_table, |
741 | ARRAY_SIZE(dsi.isr_tables.isr_table), mask, | 760 | ARRAY_SIZE(dsi.isr_tables.isr_table), mask, |
742 | DSI_IRQENABLE, DSI_IRQSTATUS); | 761 | DSI_IRQENABLE, DSI_IRQSTATUS); |
743 | } | 762 | } |
744 | 763 | ||
745 | /* dsi.irq_lock has to be locked by the caller */ | 764 | /* dsi.irq_lock has to be locked by the caller */ |
746 | static void _omap_dsi_set_irqs_vc(int vc) | 765 | static void _omap_dsi_set_irqs_vc(struct platform_device *dsidev, int vc) |
747 | { | 766 | { |
748 | _omap_dsi_configure_irqs(dsi.isr_tables.isr_table_vc[vc], | 767 | _omap_dsi_configure_irqs(dsidev, dsi.isr_tables.isr_table_vc[vc], |
749 | ARRAY_SIZE(dsi.isr_tables.isr_table_vc[vc]), | 768 | ARRAY_SIZE(dsi.isr_tables.isr_table_vc[vc]), |
750 | DSI_VC_IRQ_ERROR_MASK, | 769 | DSI_VC_IRQ_ERROR_MASK, |
751 | DSI_VC_IRQENABLE(vc), DSI_VC_IRQSTATUS(vc)); | 770 | DSI_VC_IRQENABLE(vc), DSI_VC_IRQSTATUS(vc)); |
752 | } | 771 | } |
753 | 772 | ||
754 | /* dsi.irq_lock has to be locked by the caller */ | 773 | /* dsi.irq_lock has to be locked by the caller */ |
755 | static void _omap_dsi_set_irqs_cio(void) | 774 | static void _omap_dsi_set_irqs_cio(struct platform_device *dsidev) |
756 | { | 775 | { |
757 | _omap_dsi_configure_irqs(dsi.isr_tables.isr_table_cio, | 776 | _omap_dsi_configure_irqs(dsidev, dsi.isr_tables.isr_table_cio, |
758 | ARRAY_SIZE(dsi.isr_tables.isr_table_cio), | 777 | ARRAY_SIZE(dsi.isr_tables.isr_table_cio), |
759 | DSI_CIO_IRQ_ERROR_MASK, | 778 | DSI_CIO_IRQ_ERROR_MASK, |
760 | DSI_COMPLEXIO_IRQ_ENABLE, DSI_COMPLEXIO_IRQ_STATUS); | 779 | DSI_COMPLEXIO_IRQ_ENABLE, DSI_COMPLEXIO_IRQ_STATUS); |
761 | } | 780 | } |
762 | 781 | ||
763 | static void _dsi_initialize_irq(void) | 782 | static void _dsi_initialize_irq(struct platform_device *dsidev) |
764 | { | 783 | { |
765 | unsigned long flags; | 784 | unsigned long flags; |
766 | int vc; | 785 | int vc; |
@@ -769,10 +788,10 @@ static void _dsi_initialize_irq(void) | |||
769 | 788 | ||
770 | memset(&dsi.isr_tables, 0, sizeof(dsi.isr_tables)); | 789 | memset(&dsi.isr_tables, 0, sizeof(dsi.isr_tables)); |
771 | 790 | ||
772 | _omap_dsi_set_irqs(); | 791 | _omap_dsi_set_irqs(dsidev); |
773 | for (vc = 0; vc < 4; ++vc) | 792 | for (vc = 0; vc < 4; ++vc) |
774 | _omap_dsi_set_irqs_vc(vc); | 793 | _omap_dsi_set_irqs_vc(dsidev, vc); |
775 | _omap_dsi_set_irqs_cio(); | 794 | _omap_dsi_set_irqs_cio(dsidev); |
776 | 795 | ||
777 | spin_unlock_irqrestore(&dsi.irq_lock, flags); | 796 | spin_unlock_irqrestore(&dsi.irq_lock, flags); |
778 | } | 797 | } |
@@ -833,7 +852,8 @@ static int _dsi_unregister_isr(omap_dsi_isr_t isr, void *arg, u32 mask, | |||
833 | return -EINVAL; | 852 | return -EINVAL; |
834 | } | 853 | } |
835 | 854 | ||
836 | static int dsi_register_isr(omap_dsi_isr_t isr, void *arg, u32 mask) | 855 | static int dsi_register_isr(struct platform_device *dsidev, omap_dsi_isr_t isr, |
856 | void *arg, u32 mask) | ||
837 | { | 857 | { |
838 | unsigned long flags; | 858 | unsigned long flags; |
839 | int r; | 859 | int r; |
@@ -844,14 +864,15 @@ static int dsi_register_isr(omap_dsi_isr_t isr, void *arg, u32 mask) | |||
844 | ARRAY_SIZE(dsi.isr_tables.isr_table)); | 864 | ARRAY_SIZE(dsi.isr_tables.isr_table)); |
845 | 865 | ||
846 | if (r == 0) | 866 | if (r == 0) |
847 | _omap_dsi_set_irqs(); | 867 | _omap_dsi_set_irqs(dsidev); |
848 | 868 | ||
849 | spin_unlock_irqrestore(&dsi.irq_lock, flags); | 869 | spin_unlock_irqrestore(&dsi.irq_lock, flags); |
850 | 870 | ||
851 | return r; | 871 | return r; |
852 | } | 872 | } |
853 | 873 | ||
854 | static int dsi_unregister_isr(omap_dsi_isr_t isr, void *arg, u32 mask) | 874 | static int dsi_unregister_isr(struct platform_device *dsidev, |
875 | omap_dsi_isr_t isr, void *arg, u32 mask) | ||
855 | { | 876 | { |
856 | unsigned long flags; | 877 | unsigned long flags; |
857 | int r; | 878 | int r; |
@@ -862,15 +883,15 @@ static int dsi_unregister_isr(omap_dsi_isr_t isr, void *arg, u32 mask) | |||
862 | ARRAY_SIZE(dsi.isr_tables.isr_table)); | 883 | ARRAY_SIZE(dsi.isr_tables.isr_table)); |
863 | 884 | ||
864 | if (r == 0) | 885 | if (r == 0) |
865 | _omap_dsi_set_irqs(); | 886 | _omap_dsi_set_irqs(dsidev); |
866 | 887 | ||
867 | spin_unlock_irqrestore(&dsi.irq_lock, flags); | 888 | spin_unlock_irqrestore(&dsi.irq_lock, flags); |
868 | 889 | ||
869 | return r; | 890 | return r; |
870 | } | 891 | } |
871 | 892 | ||
872 | static int dsi_register_isr_vc(int channel, omap_dsi_isr_t isr, void *arg, | 893 | static int dsi_register_isr_vc(struct platform_device *dsidev, int channel, |
873 | u32 mask) | 894 | omap_dsi_isr_t isr, void *arg, u32 mask) |
874 | { | 895 | { |
875 | unsigned long flags; | 896 | unsigned long flags; |
876 | int r; | 897 | int r; |
@@ -882,15 +903,15 @@ static int dsi_register_isr_vc(int channel, omap_dsi_isr_t isr, void *arg, | |||
882 | ARRAY_SIZE(dsi.isr_tables.isr_table_vc[channel])); | 903 | ARRAY_SIZE(dsi.isr_tables.isr_table_vc[channel])); |
883 | 904 | ||
884 | if (r == 0) | 905 | if (r == 0) |
885 | _omap_dsi_set_irqs_vc(channel); | 906 | _omap_dsi_set_irqs_vc(dsidev, channel); |
886 | 907 | ||
887 | spin_unlock_irqrestore(&dsi.irq_lock, flags); | 908 | spin_unlock_irqrestore(&dsi.irq_lock, flags); |
888 | 909 | ||
889 | return r; | 910 | return r; |
890 | } | 911 | } |
891 | 912 | ||
892 | static int dsi_unregister_isr_vc(int channel, omap_dsi_isr_t isr, void *arg, | 913 | static int dsi_unregister_isr_vc(struct platform_device *dsidev, int channel, |
893 | u32 mask) | 914 | omap_dsi_isr_t isr, void *arg, u32 mask) |
894 | { | 915 | { |
895 | unsigned long flags; | 916 | unsigned long flags; |
896 | int r; | 917 | int r; |
@@ -902,14 +923,15 @@ static int dsi_unregister_isr_vc(int channel, omap_dsi_isr_t isr, void *arg, | |||
902 | ARRAY_SIZE(dsi.isr_tables.isr_table_vc[channel])); | 923 | ARRAY_SIZE(dsi.isr_tables.isr_table_vc[channel])); |
903 | 924 | ||
904 | if (r == 0) | 925 | if (r == 0) |
905 | _omap_dsi_set_irqs_vc(channel); | 926 | _omap_dsi_set_irqs_vc(dsidev, channel); |
906 | 927 | ||
907 | spin_unlock_irqrestore(&dsi.irq_lock, flags); | 928 | spin_unlock_irqrestore(&dsi.irq_lock, flags); |
908 | 929 | ||
909 | return r; | 930 | return r; |
910 | } | 931 | } |
911 | 932 | ||
912 | static int dsi_register_isr_cio(omap_dsi_isr_t isr, void *arg, u32 mask) | 933 | static int dsi_register_isr_cio(struct platform_device *dsidev, |
934 | omap_dsi_isr_t isr, void *arg, u32 mask) | ||
913 | { | 935 | { |
914 | unsigned long flags; | 936 | unsigned long flags; |
915 | int r; | 937 | int r; |
@@ -920,14 +942,15 @@ static int dsi_register_isr_cio(omap_dsi_isr_t isr, void *arg, u32 mask) | |||
920 | ARRAY_SIZE(dsi.isr_tables.isr_table_cio)); | 942 | ARRAY_SIZE(dsi.isr_tables.isr_table_cio)); |
921 | 943 | ||
922 | if (r == 0) | 944 | if (r == 0) |
923 | _omap_dsi_set_irqs_cio(); | 945 | _omap_dsi_set_irqs_cio(dsidev); |
924 | 946 | ||
925 | spin_unlock_irqrestore(&dsi.irq_lock, flags); | 947 | spin_unlock_irqrestore(&dsi.irq_lock, flags); |
926 | 948 | ||
927 | return r; | 949 | return r; |
928 | } | 950 | } |
929 | 951 | ||
930 | static int dsi_unregister_isr_cio(omap_dsi_isr_t isr, void *arg, u32 mask) | 952 | static int dsi_unregister_isr_cio(struct platform_device *dsidev, |
953 | omap_dsi_isr_t isr, void *arg, u32 mask) | ||
931 | { | 954 | { |
932 | unsigned long flags; | 955 | unsigned long flags; |
933 | int r; | 956 | int r; |
@@ -938,14 +961,14 @@ static int dsi_unregister_isr_cio(omap_dsi_isr_t isr, void *arg, u32 mask) | |||
938 | ARRAY_SIZE(dsi.isr_tables.isr_table_cio)); | 961 | ARRAY_SIZE(dsi.isr_tables.isr_table_cio)); |
939 | 962 | ||
940 | if (r == 0) | 963 | if (r == 0) |
941 | _omap_dsi_set_irqs_cio(); | 964 | _omap_dsi_set_irqs_cio(dsidev); |
942 | 965 | ||
943 | spin_unlock_irqrestore(&dsi.irq_lock, flags); | 966 | spin_unlock_irqrestore(&dsi.irq_lock, flags); |
944 | 967 | ||
945 | return r; | 968 | return r; |
946 | } | 969 | } |
947 | 970 | ||
948 | static u32 dsi_get_errors(void) | 971 | static u32 dsi_get_errors(struct platform_device *dsidev) |
949 | { | 972 | { |
950 | unsigned long flags; | 973 | unsigned long flags; |
951 | u32 e; | 974 | u32 e; |
@@ -966,7 +989,8 @@ static inline void enable_clocks(bool enable) | |||
966 | } | 989 | } |
967 | 990 | ||
968 | /* source clock for DSI PLL. this could also be PCLKFREE */ | 991 | /* source clock for DSI PLL. this could also be PCLKFREE */ |
969 | static inline void dsi_enable_pll_clock(bool enable) | 992 | static inline void dsi_enable_pll_clock(struct platform_device *dsidev, |
993 | bool enable) | ||
970 | { | 994 | { |
971 | if (enable) | 995 | if (enable) |
972 | dss_clk_enable(DSS_CLK_SYSCK); | 996 | dss_clk_enable(DSS_CLK_SYSCK); |
@@ -974,13 +998,13 @@ static inline void dsi_enable_pll_clock(bool enable) | |||
974 | dss_clk_disable(DSS_CLK_SYSCK); | 998 | dss_clk_disable(DSS_CLK_SYSCK); |
975 | 999 | ||
976 | if (enable && dsi.pll_locked) { | 1000 | if (enable && dsi.pll_locked) { |
977 | if (wait_for_bit_change(DSI_PLL_STATUS, 1, 1) != 1) | 1001 | if (wait_for_bit_change(dsidev, DSI_PLL_STATUS, 1, 1) != 1) |
978 | DSSERR("cannot lock PLL when enabling clocks\n"); | 1002 | DSSERR("cannot lock PLL when enabling clocks\n"); |
979 | } | 1003 | } |
980 | } | 1004 | } |
981 | 1005 | ||
982 | #ifdef DEBUG | 1006 | #ifdef DEBUG |
983 | static void _dsi_print_reset_status(void) | 1007 | static void _dsi_print_reset_status(struct platform_device *dsidev) |
984 | { | 1008 | { |
985 | u32 l; | 1009 | u32 l; |
986 | int b0, b1, b2; | 1010 | int b0, b1, b2; |
@@ -991,14 +1015,14 @@ static void _dsi_print_reset_status(void) | |||
991 | /* A dummy read using the SCP interface to any DSIPHY register is | 1015 | /* A dummy read using the SCP interface to any DSIPHY register is |
992 | * required after DSIPHY reset to complete the reset of the DSI complex | 1016 | * required after DSIPHY reset to complete the reset of the DSI complex |
993 | * I/O. */ | 1017 | * I/O. */ |
994 | l = dsi_read_reg(DSI_DSIPHY_CFG5); | 1018 | l = dsi_read_reg(dsidev, DSI_DSIPHY_CFG5); |
995 | 1019 | ||
996 | printk(KERN_DEBUG "DSI resets: "); | 1020 | printk(KERN_DEBUG "DSI resets: "); |
997 | 1021 | ||
998 | l = dsi_read_reg(DSI_PLL_STATUS); | 1022 | l = dsi_read_reg(dsidev, DSI_PLL_STATUS); |
999 | printk("PLL (%d) ", FLD_GET(l, 0, 0)); | 1023 | printk("PLL (%d) ", FLD_GET(l, 0, 0)); |
1000 | 1024 | ||
1001 | l = dsi_read_reg(DSI_COMPLEXIO_CFG1); | 1025 | l = dsi_read_reg(dsidev, DSI_COMPLEXIO_CFG1); |
1002 | printk("CIO (%d) ", FLD_GET(l, 29, 29)); | 1026 | printk("CIO (%d) ", FLD_GET(l, 29, 29)); |
1003 | 1027 | ||
1004 | if (dss_has_feature(FEAT_DSI_REVERSE_TXCLKESC)) { | 1028 | if (dss_has_feature(FEAT_DSI_REVERSE_TXCLKESC)) { |
@@ -1011,7 +1035,7 @@ static void _dsi_print_reset_status(void) | |||
1011 | b2 = 26; | 1035 | b2 = 26; |
1012 | } | 1036 | } |
1013 | 1037 | ||
1014 | l = dsi_read_reg(DSI_DSIPHY_CFG5); | 1038 | l = dsi_read_reg(dsidev, DSI_DSIPHY_CFG5); |
1015 | printk("PHY (%x%x%x, %d, %d, %d)\n", | 1039 | printk("PHY (%x%x%x, %d, %d, %d)\n", |
1016 | FLD_GET(l, b0, b0), | 1040 | FLD_GET(l, b0, b0), |
1017 | FLD_GET(l, b1, b1), | 1041 | FLD_GET(l, b1, b1), |
@@ -1021,17 +1045,17 @@ static void _dsi_print_reset_status(void) | |||
1021 | FLD_GET(l, 31, 31)); | 1045 | FLD_GET(l, 31, 31)); |
1022 | } | 1046 | } |
1023 | #else | 1047 | #else |
1024 | #define _dsi_print_reset_status() | 1048 | #define _dsi_print_reset_status(x) |
1025 | #endif | 1049 | #endif |
1026 | 1050 | ||
1027 | static inline int dsi_if_enable(bool enable) | 1051 | static inline int dsi_if_enable(struct platform_device *dsidev, bool enable) |
1028 | { | 1052 | { |
1029 | DSSDBG("dsi_if_enable(%d)\n", enable); | 1053 | DSSDBG("dsi_if_enable(%d)\n", enable); |
1030 | 1054 | ||
1031 | enable = enable ? 1 : 0; | 1055 | enable = enable ? 1 : 0; |
1032 | REG_FLD_MOD(DSI_CTRL, enable, 0, 0); /* IF_EN */ | 1056 | REG_FLD_MOD(dsidev, DSI_CTRL, enable, 0, 0); /* IF_EN */ |
1033 | 1057 | ||
1034 | if (wait_for_bit_change(DSI_CTRL, 0, enable) != enable) { | 1058 | if (wait_for_bit_change(dsidev, DSI_CTRL, 0, enable) != enable) { |
1035 | DSSERR("Failed to set dsi_if_enable to %d\n", enable); | 1059 | DSSERR("Failed to set dsi_if_enable to %d\n", enable); |
1036 | return -EIO; | 1060 | return -EIO; |
1037 | } | 1061 | } |
@@ -1039,22 +1063,22 @@ static inline int dsi_if_enable(bool enable) | |||
1039 | return 0; | 1063 | return 0; |
1040 | } | 1064 | } |
1041 | 1065 | ||
1042 | unsigned long dsi_get_pll_hsdiv_dispc_rate(void) | 1066 | unsigned long dsi_get_pll_hsdiv_dispc_rate(struct platform_device *dsidev) |
1043 | { | 1067 | { |
1044 | return dsi.current_cinfo.dsi_pll_hsdiv_dispc_clk; | 1068 | return dsi.current_cinfo.dsi_pll_hsdiv_dispc_clk; |
1045 | } | 1069 | } |
1046 | 1070 | ||
1047 | static unsigned long dsi_get_pll_hsdiv_dsi_rate(void) | 1071 | static unsigned long dsi_get_pll_hsdiv_dsi_rate(struct platform_device *dsidev) |
1048 | { | 1072 | { |
1049 | return dsi.current_cinfo.dsi_pll_hsdiv_dsi_clk; | 1073 | return dsi.current_cinfo.dsi_pll_hsdiv_dsi_clk; |
1050 | } | 1074 | } |
1051 | 1075 | ||
1052 | static unsigned long dsi_get_txbyteclkhs(void) | 1076 | static unsigned long dsi_get_txbyteclkhs(struct platform_device *dsidev) |
1053 | { | 1077 | { |
1054 | return dsi.current_cinfo.clkin4ddr / 16; | 1078 | return dsi.current_cinfo.clkin4ddr / 16; |
1055 | } | 1079 | } |
1056 | 1080 | ||
1057 | static unsigned long dsi_fclk_rate(void) | 1081 | static unsigned long dsi_fclk_rate(struct platform_device *dsidev) |
1058 | { | 1082 | { |
1059 | unsigned long r; | 1083 | unsigned long r; |
1060 | 1084 | ||
@@ -1063,7 +1087,7 @@ static unsigned long dsi_fclk_rate(void) | |||
1063 | r = dss_clk_get_rate(DSS_CLK_FCK); | 1087 | r = dss_clk_get_rate(DSS_CLK_FCK); |
1064 | } else { | 1088 | } else { |
1065 | /* DSI FCLK source is dsi_pll_hsdiv_dsi_clk */ | 1089 | /* DSI FCLK source is dsi_pll_hsdiv_dsi_clk */ |
1066 | r = dsi_get_pll_hsdiv_dsi_rate(); | 1090 | r = dsi_get_pll_hsdiv_dsi_rate(dsidev); |
1067 | } | 1091 | } |
1068 | 1092 | ||
1069 | return r; | 1093 | return r; |
@@ -1071,6 +1095,7 @@ static unsigned long dsi_fclk_rate(void) | |||
1071 | 1095 | ||
1072 | static int dsi_set_lp_clk_divisor(struct omap_dss_device *dssdev) | 1096 | static int dsi_set_lp_clk_divisor(struct omap_dss_device *dssdev) |
1073 | { | 1097 | { |
1098 | struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); | ||
1074 | unsigned long dsi_fclk; | 1099 | unsigned long dsi_fclk; |
1075 | unsigned lp_clk_div; | 1100 | unsigned lp_clk_div; |
1076 | unsigned long lp_clk; | 1101 | unsigned long lp_clk; |
@@ -1080,7 +1105,7 @@ static int dsi_set_lp_clk_divisor(struct omap_dss_device *dssdev) | |||
1080 | if (lp_clk_div == 0 || lp_clk_div > dsi.lpdiv_max) | 1105 | if (lp_clk_div == 0 || lp_clk_div > dsi.lpdiv_max) |
1081 | return -EINVAL; | 1106 | return -EINVAL; |
1082 | 1107 | ||
1083 | dsi_fclk = dsi_fclk_rate(); | 1108 | dsi_fclk = dsi_fclk_rate(dsidev); |
1084 | 1109 | ||
1085 | lp_clk = dsi_fclk / 2 / lp_clk_div; | 1110 | lp_clk = dsi_fclk / 2 / lp_clk_div; |
1086 | 1111 | ||
@@ -1088,25 +1113,26 @@ static int dsi_set_lp_clk_divisor(struct omap_dss_device *dssdev) | |||
1088 | dsi.current_cinfo.lp_clk = lp_clk; | 1113 | dsi.current_cinfo.lp_clk = lp_clk; |
1089 | dsi.current_cinfo.lp_clk_div = lp_clk_div; | 1114 | dsi.current_cinfo.lp_clk_div = lp_clk_div; |
1090 | 1115 | ||
1091 | REG_FLD_MOD(DSI_CLK_CTRL, lp_clk_div, 12, 0); /* LP_CLK_DIVISOR */ | 1116 | /* LP_CLK_DIVISOR */ |
1117 | REG_FLD_MOD(dsidev, DSI_CLK_CTRL, lp_clk_div, 12, 0); | ||
1092 | 1118 | ||
1093 | REG_FLD_MOD(DSI_CLK_CTRL, dsi_fclk > 30000000 ? 1 : 0, | 1119 | /* LP_RX_SYNCHRO_ENABLE */ |
1094 | 21, 21); /* LP_RX_SYNCHRO_ENABLE */ | 1120 | REG_FLD_MOD(dsidev, DSI_CLK_CTRL, dsi_fclk > 30000000 ? 1 : 0, 21, 21); |
1095 | 1121 | ||
1096 | return 0; | 1122 | return 0; |
1097 | } | 1123 | } |
1098 | 1124 | ||
1099 | static void dsi_enable_scp_clk(void) | 1125 | static void dsi_enable_scp_clk(struct platform_device *dsidev) |
1100 | { | 1126 | { |
1101 | if (dsi.scp_clk_refcount++ == 0) | 1127 | if (dsi.scp_clk_refcount++ == 0) |
1102 | REG_FLD_MOD(DSI_CLK_CTRL, 1, 14, 14); /* CIO_CLK_ICG */ | 1128 | REG_FLD_MOD(dsidev, DSI_CLK_CTRL, 1, 14, 14); /* CIO_CLK_ICG */ |
1103 | } | 1129 | } |
1104 | 1130 | ||
1105 | static void dsi_disable_scp_clk(void) | 1131 | static void dsi_disable_scp_clk(struct platform_device *dsidev) |
1106 | { | 1132 | { |
1107 | WARN_ON(dsi.scp_clk_refcount == 0); | 1133 | WARN_ON(dsi.scp_clk_refcount == 0); |
1108 | if (--dsi.scp_clk_refcount == 0) | 1134 | if (--dsi.scp_clk_refcount == 0) |
1109 | REG_FLD_MOD(DSI_CLK_CTRL, 0, 14, 14); /* CIO_CLK_ICG */ | 1135 | REG_FLD_MOD(dsidev, DSI_CLK_CTRL, 0, 14, 14); /* CIO_CLK_ICG */ |
1110 | } | 1136 | } |
1111 | 1137 | ||
1112 | enum dsi_pll_power_state { | 1138 | enum dsi_pll_power_state { |
@@ -1116,7 +1142,8 @@ enum dsi_pll_power_state { | |||
1116 | DSI_PLL_POWER_ON_DIV = 0x3, | 1142 | DSI_PLL_POWER_ON_DIV = 0x3, |
1117 | }; | 1143 | }; |
1118 | 1144 | ||
1119 | static int dsi_pll_power(enum dsi_pll_power_state state) | 1145 | static int dsi_pll_power(struct platform_device *dsidev, |
1146 | enum dsi_pll_power_state state) | ||
1120 | { | 1147 | { |
1121 | int t = 0; | 1148 | int t = 0; |
1122 | 1149 | ||
@@ -1125,10 +1152,11 @@ static int dsi_pll_power(enum dsi_pll_power_state state) | |||
1125 | state == DSI_PLL_POWER_ON_DIV) | 1152 | state == DSI_PLL_POWER_ON_DIV) |
1126 | state = DSI_PLL_POWER_ON_ALL; | 1153 | state = DSI_PLL_POWER_ON_ALL; |
1127 | 1154 | ||
1128 | REG_FLD_MOD(DSI_CLK_CTRL, state, 31, 30); /* PLL_PWR_CMD */ | 1155 | /* PLL_PWR_CMD */ |
1156 | REG_FLD_MOD(dsidev, DSI_CLK_CTRL, state, 31, 30); | ||
1129 | 1157 | ||
1130 | /* PLL_PWR_STATUS */ | 1158 | /* PLL_PWR_STATUS */ |
1131 | while (FLD_GET(dsi_read_reg(DSI_CLK_CTRL), 29, 28) != state) { | 1159 | while (FLD_GET(dsi_read_reg(dsidev, DSI_CLK_CTRL), 29, 28) != state) { |
1132 | if (++t > 1000) { | 1160 | if (++t > 1000) { |
1133 | DSSERR("Failed to set DSI PLL power mode to %d\n", | 1161 | DSSERR("Failed to set DSI PLL power mode to %d\n", |
1134 | state); | 1162 | state); |
@@ -1195,8 +1223,8 @@ static int dsi_calc_clock_rates(struct omap_dss_device *dssdev, | |||
1195 | return 0; | 1223 | return 0; |
1196 | } | 1224 | } |
1197 | 1225 | ||
1198 | int dsi_pll_calc_clock_div_pck(bool is_tft, unsigned long req_pck, | 1226 | int dsi_pll_calc_clock_div_pck(struct platform_device *dsidev, bool is_tft, |
1199 | struct dsi_clock_info *dsi_cinfo, | 1227 | unsigned long req_pck, struct dsi_clock_info *dsi_cinfo, |
1200 | struct dispc_clock_info *dispc_cinfo) | 1228 | struct dispc_clock_info *dispc_cinfo) |
1201 | { | 1229 | { |
1202 | struct dsi_clock_info cur, best; | 1230 | struct dsi_clock_info cur, best; |
@@ -1332,7 +1360,8 @@ found: | |||
1332 | return 0; | 1360 | return 0; |
1333 | } | 1361 | } |
1334 | 1362 | ||
1335 | int dsi_pll_set_clock_div(struct dsi_clock_info *cinfo) | 1363 | int dsi_pll_set_clock_div(struct platform_device *dsidev, |
1364 | struct dsi_clock_info *cinfo) | ||
1336 | { | 1365 | { |
1337 | int r = 0; | 1366 | int r = 0; |
1338 | u32 l; | 1367 | u32 l; |
@@ -1393,9 +1422,10 @@ int dsi_pll_set_clock_div(struct dsi_clock_info *cinfo) | |||
1393 | dss_feat_get_reg_field(FEAT_REG_DSIPLL_REGM_DSI, ®m_dsi_start, | 1422 | dss_feat_get_reg_field(FEAT_REG_DSIPLL_REGM_DSI, ®m_dsi_start, |
1394 | ®m_dsi_end); | 1423 | ®m_dsi_end); |
1395 | 1424 | ||
1396 | REG_FLD_MOD(DSI_PLL_CONTROL, 0, 0, 0); /* DSI_PLL_AUTOMODE = manual */ | 1425 | /* DSI_PLL_AUTOMODE = manual */ |
1426 | REG_FLD_MOD(dsidev, DSI_PLL_CONTROL, 0, 0, 0); | ||
1397 | 1427 | ||
1398 | l = dsi_read_reg(DSI_PLL_CONFIGURATION1); | 1428 | l = dsi_read_reg(dsidev, DSI_PLL_CONFIGURATION1); |
1399 | l = FLD_MOD(l, 1, 0, 0); /* DSI_PLL_STOPMODE */ | 1429 | l = FLD_MOD(l, 1, 0, 0); /* DSI_PLL_STOPMODE */ |
1400 | /* DSI_PLL_REGN */ | 1430 | /* DSI_PLL_REGN */ |
1401 | l = FLD_MOD(l, cinfo->regn - 1, regn_start, regn_end); | 1431 | l = FLD_MOD(l, cinfo->regn - 1, regn_start, regn_end); |
@@ -1407,7 +1437,7 @@ int dsi_pll_set_clock_div(struct dsi_clock_info *cinfo) | |||
1407 | /* DSIPROTO_CLOCK_DIV */ | 1437 | /* DSIPROTO_CLOCK_DIV */ |
1408 | l = FLD_MOD(l, cinfo->regm_dsi > 0 ? cinfo->regm_dsi - 1 : 0, | 1438 | l = FLD_MOD(l, cinfo->regm_dsi > 0 ? cinfo->regm_dsi - 1 : 0, |
1409 | regm_dsi_start, regm_dsi_end); | 1439 | regm_dsi_start, regm_dsi_end); |
1410 | dsi_write_reg(DSI_PLL_CONFIGURATION1, l); | 1440 | dsi_write_reg(dsidev, DSI_PLL_CONFIGURATION1, l); |
1411 | 1441 | ||
1412 | BUG_ON(cinfo->fint < dsi.fint_min || cinfo->fint > dsi.fint_max); | 1442 | BUG_ON(cinfo->fint < dsi.fint_min || cinfo->fint > dsi.fint_max); |
1413 | 1443 | ||
@@ -1419,7 +1449,7 @@ int dsi_pll_set_clock_div(struct dsi_clock_info *cinfo) | |||
1419 | 0x7; | 1449 | 0x7; |
1420 | } | 1450 | } |
1421 | 1451 | ||
1422 | l = dsi_read_reg(DSI_PLL_CONFIGURATION2); | 1452 | l = dsi_read_reg(dsidev, DSI_PLL_CONFIGURATION2); |
1423 | 1453 | ||
1424 | if (dss_has_feature(FEAT_DSI_PLL_FREQSEL)) | 1454 | if (dss_has_feature(FEAT_DSI_PLL_FREQSEL)) |
1425 | l = FLD_MOD(l, f, 4, 1); /* DSI_PLL_FREQSEL */ | 1455 | l = FLD_MOD(l, f, 4, 1); /* DSI_PLL_FREQSEL */ |
@@ -1430,17 +1460,17 @@ int dsi_pll_set_clock_div(struct dsi_clock_info *cinfo) | |||
1430 | l = FLD_MOD(l, 1, 13, 13); /* DSI_PLL_REFEN */ | 1460 | l = FLD_MOD(l, 1, 13, 13); /* DSI_PLL_REFEN */ |
1431 | l = FLD_MOD(l, 0, 14, 14); /* DSIPHY_CLKINEN */ | 1461 | l = FLD_MOD(l, 0, 14, 14); /* DSIPHY_CLKINEN */ |
1432 | l = FLD_MOD(l, 1, 20, 20); /* DSI_HSDIVBYPASS */ | 1462 | l = FLD_MOD(l, 1, 20, 20); /* DSI_HSDIVBYPASS */ |
1433 | dsi_write_reg(DSI_PLL_CONFIGURATION2, l); | 1463 | dsi_write_reg(dsidev, DSI_PLL_CONFIGURATION2, l); |
1434 | 1464 | ||
1435 | REG_FLD_MOD(DSI_PLL_GO, 1, 0, 0); /* DSI_PLL_GO */ | 1465 | REG_FLD_MOD(dsidev, DSI_PLL_GO, 1, 0, 0); /* DSI_PLL_GO */ |
1436 | 1466 | ||
1437 | if (wait_for_bit_change(DSI_PLL_GO, 0, 0) != 0) { | 1467 | if (wait_for_bit_change(dsidev, DSI_PLL_GO, 0, 0) != 0) { |
1438 | DSSERR("dsi pll go bit not going down.\n"); | 1468 | DSSERR("dsi pll go bit not going down.\n"); |
1439 | r = -EIO; | 1469 | r = -EIO; |
1440 | goto err; | 1470 | goto err; |
1441 | } | 1471 | } |
1442 | 1472 | ||
1443 | if (wait_for_bit_change(DSI_PLL_STATUS, 1, 1) != 1) { | 1473 | if (wait_for_bit_change(dsidev, DSI_PLL_STATUS, 1, 1) != 1) { |
1444 | DSSERR("cannot lock PLL\n"); | 1474 | DSSERR("cannot lock PLL\n"); |
1445 | r = -EIO; | 1475 | r = -EIO; |
1446 | goto err; | 1476 | goto err; |
@@ -1448,7 +1478,7 @@ int dsi_pll_set_clock_div(struct dsi_clock_info *cinfo) | |||
1448 | 1478 | ||
1449 | dsi.pll_locked = 1; | 1479 | dsi.pll_locked = 1; |
1450 | 1480 | ||
1451 | l = dsi_read_reg(DSI_PLL_CONFIGURATION2); | 1481 | l = dsi_read_reg(dsidev, DSI_PLL_CONFIGURATION2); |
1452 | l = FLD_MOD(l, 0, 0, 0); /* DSI_PLL_IDLE */ | 1482 | l = FLD_MOD(l, 0, 0, 0); /* DSI_PLL_IDLE */ |
1453 | l = FLD_MOD(l, 0, 5, 5); /* DSI_PLL_PLLLPMODE */ | 1483 | l = FLD_MOD(l, 0, 5, 5); /* DSI_PLL_PLLLPMODE */ |
1454 | l = FLD_MOD(l, 0, 6, 6); /* DSI_PLL_LOWCURRSTBY */ | 1484 | l = FLD_MOD(l, 0, 6, 6); /* DSI_PLL_LOWCURRSTBY */ |
@@ -1463,14 +1493,15 @@ int dsi_pll_set_clock_div(struct dsi_clock_info *cinfo) | |||
1463 | l = FLD_MOD(l, 1, 18, 18); /* DSI_PROTO_CLOCK_EN */ | 1493 | l = FLD_MOD(l, 1, 18, 18); /* DSI_PROTO_CLOCK_EN */ |
1464 | l = FLD_MOD(l, 0, 19, 19); /* DSI_PROTO_CLOCK_PWDN */ | 1494 | l = FLD_MOD(l, 0, 19, 19); /* DSI_PROTO_CLOCK_PWDN */ |
1465 | l = FLD_MOD(l, 0, 20, 20); /* DSI_HSDIVBYPASS */ | 1495 | l = FLD_MOD(l, 0, 20, 20); /* DSI_HSDIVBYPASS */ |
1466 | dsi_write_reg(DSI_PLL_CONFIGURATION2, l); | 1496 | dsi_write_reg(dsidev, DSI_PLL_CONFIGURATION2, l); |
1467 | 1497 | ||
1468 | DSSDBG("PLL config done\n"); | 1498 | DSSDBG("PLL config done\n"); |
1469 | err: | 1499 | err: |
1470 | return r; | 1500 | return r; |
1471 | } | 1501 | } |
1472 | 1502 | ||
1473 | int dsi_pll_init(bool enable_hsclk, bool enable_hsdiv) | 1503 | int dsi_pll_init(struct platform_device *dsidev, bool enable_hsclk, |
1504 | bool enable_hsdiv) | ||
1474 | { | 1505 | { |
1475 | int r = 0; | 1506 | int r = 0; |
1476 | enum dsi_pll_power_state pwstate; | 1507 | enum dsi_pll_power_state pwstate; |
@@ -1491,11 +1522,11 @@ int dsi_pll_init(bool enable_hsclk, bool enable_hsdiv) | |||
1491 | } | 1522 | } |
1492 | 1523 | ||
1493 | enable_clocks(1); | 1524 | enable_clocks(1); |
1494 | dsi_enable_pll_clock(1); | 1525 | dsi_enable_pll_clock(dsidev, 1); |
1495 | /* | 1526 | /* |
1496 | * Note: SCP CLK is not required on OMAP3, but it is required on OMAP4. | 1527 | * Note: SCP CLK is not required on OMAP3, but it is required on OMAP4. |
1497 | */ | 1528 | */ |
1498 | dsi_enable_scp_clk(); | 1529 | dsi_enable_scp_clk(dsidev); |
1499 | 1530 | ||
1500 | if (!dsi.vdds_dsi_enabled) { | 1531 | if (!dsi.vdds_dsi_enabled) { |
1501 | r = regulator_enable(dsi.vdds_dsi_reg); | 1532 | r = regulator_enable(dsi.vdds_dsi_reg); |
@@ -1507,7 +1538,7 @@ int dsi_pll_init(bool enable_hsclk, bool enable_hsdiv) | |||
1507 | /* XXX PLL does not come out of reset without this... */ | 1538 | /* XXX PLL does not come out of reset without this... */ |
1508 | dispc_pck_free_enable(1); | 1539 | dispc_pck_free_enable(1); |
1509 | 1540 | ||
1510 | if (wait_for_bit_change(DSI_PLL_STATUS, 0, 1) != 1) { | 1541 | if (wait_for_bit_change(dsidev, DSI_PLL_STATUS, 0, 1) != 1) { |
1511 | DSSERR("PLL not coming out of reset.\n"); | 1542 | DSSERR("PLL not coming out of reset.\n"); |
1512 | r = -ENODEV; | 1543 | r = -ENODEV; |
1513 | dispc_pck_free_enable(0); | 1544 | dispc_pck_free_enable(0); |
@@ -1527,7 +1558,7 @@ int dsi_pll_init(bool enable_hsclk, bool enable_hsdiv) | |||
1527 | else | 1558 | else |
1528 | pwstate = DSI_PLL_POWER_OFF; | 1559 | pwstate = DSI_PLL_POWER_OFF; |
1529 | 1560 | ||
1530 | r = dsi_pll_power(pwstate); | 1561 | r = dsi_pll_power(dsidev, pwstate); |
1531 | 1562 | ||
1532 | if (r) | 1563 | if (r) |
1533 | goto err1; | 1564 | goto err1; |
@@ -1541,31 +1572,32 @@ err1: | |||
1541 | dsi.vdds_dsi_enabled = false; | 1572 | dsi.vdds_dsi_enabled = false; |
1542 | } | 1573 | } |
1543 | err0: | 1574 | err0: |
1544 | dsi_disable_scp_clk(); | 1575 | dsi_disable_scp_clk(dsidev); |
1545 | enable_clocks(0); | 1576 | enable_clocks(0); |
1546 | dsi_enable_pll_clock(0); | 1577 | dsi_enable_pll_clock(dsidev, 0); |
1547 | return r; | 1578 | return r; |
1548 | } | 1579 | } |
1549 | 1580 | ||
1550 | void dsi_pll_uninit(bool disconnect_lanes) | 1581 | void dsi_pll_uninit(struct platform_device *dsidev, bool disconnect_lanes) |
1551 | { | 1582 | { |
1552 | dsi.pll_locked = 0; | 1583 | dsi.pll_locked = 0; |
1553 | dsi_pll_power(DSI_PLL_POWER_OFF); | 1584 | dsi_pll_power(dsidev, DSI_PLL_POWER_OFF); |
1554 | if (disconnect_lanes) { | 1585 | if (disconnect_lanes) { |
1555 | WARN_ON(!dsi.vdds_dsi_enabled); | 1586 | WARN_ON(!dsi.vdds_dsi_enabled); |
1556 | regulator_disable(dsi.vdds_dsi_reg); | 1587 | regulator_disable(dsi.vdds_dsi_reg); |
1557 | dsi.vdds_dsi_enabled = false; | 1588 | dsi.vdds_dsi_enabled = false; |
1558 | } | 1589 | } |
1559 | 1590 | ||
1560 | dsi_disable_scp_clk(); | 1591 | dsi_disable_scp_clk(dsidev); |
1561 | enable_clocks(0); | 1592 | enable_clocks(0); |
1562 | dsi_enable_pll_clock(0); | 1593 | dsi_enable_pll_clock(dsidev, 0); |
1563 | 1594 | ||
1564 | DSSDBG("PLL uninit done\n"); | 1595 | DSSDBG("PLL uninit done\n"); |
1565 | } | 1596 | } |
1566 | 1597 | ||
1567 | void dsi_dump_clocks(struct seq_file *s) | 1598 | void dsi_dump_clocks(struct seq_file *s) |
1568 | { | 1599 | { |
1600 | struct platform_device *dsidev = dsi_get_dsidev_from_id(0); | ||
1569 | struct dsi_clock_info *cinfo = &dsi.current_cinfo; | 1601 | struct dsi_clock_info *cinfo = &dsi.current_cinfo; |
1570 | enum omap_dss_clk_source dispc_clk_src, dsi_clk_src; | 1602 | enum omap_dss_clk_source dispc_clk_src, dsi_clk_src; |
1571 | 1603 | ||
@@ -1606,12 +1638,12 @@ void dsi_dump_clocks(struct seq_file *s) | |||
1606 | dss_get_generic_clk_source_name(dsi_clk_src), | 1638 | dss_get_generic_clk_source_name(dsi_clk_src), |
1607 | dss_feat_get_clk_source_name(dsi_clk_src)); | 1639 | dss_feat_get_clk_source_name(dsi_clk_src)); |
1608 | 1640 | ||
1609 | seq_printf(s, "DSI_FCLK\t%lu\n", dsi_fclk_rate()); | 1641 | seq_printf(s, "DSI_FCLK\t%lu\n", dsi_fclk_rate(dsidev)); |
1610 | 1642 | ||
1611 | seq_printf(s, "DDR_CLK\t\t%lu\n", | 1643 | seq_printf(s, "DDR_CLK\t\t%lu\n", |
1612 | cinfo->clkin4ddr / 4); | 1644 | cinfo->clkin4ddr / 4); |
1613 | 1645 | ||
1614 | seq_printf(s, "TxByteClkHS\t%lu\n", dsi_get_txbyteclkhs()); | 1646 | seq_printf(s, "TxByteClkHS\t%lu\n", dsi_get_txbyteclkhs(dsidev)); |
1615 | 1647 | ||
1616 | seq_printf(s, "LP_CLK\t\t%lu\n", cinfo->lp_clk); | 1648 | seq_printf(s, "LP_CLK\t\t%lu\n", cinfo->lp_clk); |
1617 | 1649 | ||
@@ -1714,10 +1746,12 @@ void dsi_dump_irqs(struct seq_file *s) | |||
1714 | 1746 | ||
1715 | void dsi_dump_regs(struct seq_file *s) | 1747 | void dsi_dump_regs(struct seq_file *s) |
1716 | { | 1748 | { |
1717 | #define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, dsi_read_reg(r)) | 1749 | struct platform_device *dsidev = dsi_get_dsidev_from_id(0); |
1750 | |||
1751 | #define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, dsi_read_reg(dsidev, r)) | ||
1718 | 1752 | ||
1719 | dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK); | 1753 | dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK); |
1720 | dsi_enable_scp_clk(); | 1754 | dsi_enable_scp_clk(dsidev); |
1721 | 1755 | ||
1722 | DUMPREG(DSI_REVISION); | 1756 | DUMPREG(DSI_REVISION); |
1723 | DUMPREG(DSI_SYSCONFIG); | 1757 | DUMPREG(DSI_SYSCONFIG); |
@@ -1789,7 +1823,7 @@ void dsi_dump_regs(struct seq_file *s) | |||
1789 | DUMPREG(DSI_PLL_CONFIGURATION1); | 1823 | DUMPREG(DSI_PLL_CONFIGURATION1); |
1790 | DUMPREG(DSI_PLL_CONFIGURATION2); | 1824 | DUMPREG(DSI_PLL_CONFIGURATION2); |
1791 | 1825 | ||
1792 | dsi_disable_scp_clk(); | 1826 | dsi_disable_scp_clk(dsidev); |
1793 | dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK); | 1827 | dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK); |
1794 | #undef DUMPREG | 1828 | #undef DUMPREG |
1795 | } | 1829 | } |
@@ -1800,15 +1834,17 @@ enum dsi_cio_power_state { | |||
1800 | DSI_COMPLEXIO_POWER_ULPS = 0x2, | 1834 | DSI_COMPLEXIO_POWER_ULPS = 0x2, |
1801 | }; | 1835 | }; |
1802 | 1836 | ||
1803 | static int dsi_cio_power(enum dsi_cio_power_state state) | 1837 | static int dsi_cio_power(struct platform_device *dsidev, |
1838 | enum dsi_cio_power_state state) | ||
1804 | { | 1839 | { |
1805 | int t = 0; | 1840 | int t = 0; |
1806 | 1841 | ||
1807 | /* PWR_CMD */ | 1842 | /* PWR_CMD */ |
1808 | REG_FLD_MOD(DSI_COMPLEXIO_CFG1, state, 28, 27); | 1843 | REG_FLD_MOD(dsidev, DSI_COMPLEXIO_CFG1, state, 28, 27); |
1809 | 1844 | ||
1810 | /* PWR_STATUS */ | 1845 | /* PWR_STATUS */ |
1811 | while (FLD_GET(dsi_read_reg(DSI_COMPLEXIO_CFG1), 26, 25) != state) { | 1846 | while (FLD_GET(dsi_read_reg(dsidev, DSI_COMPLEXIO_CFG1), |
1847 | 26, 25) != state) { | ||
1812 | if (++t > 1000) { | 1848 | if (++t > 1000) { |
1813 | DSSERR("failed to set complexio power state to " | 1849 | DSSERR("failed to set complexio power state to " |
1814 | "%d\n", state); | 1850 | "%d\n", state); |
@@ -1822,6 +1858,7 @@ static int dsi_cio_power(enum dsi_cio_power_state state) | |||
1822 | 1858 | ||
1823 | static void dsi_set_lane_config(struct omap_dss_device *dssdev) | 1859 | static void dsi_set_lane_config(struct omap_dss_device *dssdev) |
1824 | { | 1860 | { |
1861 | struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); | ||
1825 | u32 r; | 1862 | u32 r; |
1826 | 1863 | ||
1827 | int clk_lane = dssdev->phy.dsi.clk_lane; | 1864 | int clk_lane = dssdev->phy.dsi.clk_lane; |
@@ -1831,14 +1868,14 @@ static void dsi_set_lane_config(struct omap_dss_device *dssdev) | |||
1831 | int data1_pol = dssdev->phy.dsi.data1_pol; | 1868 | int data1_pol = dssdev->phy.dsi.data1_pol; |
1832 | int data2_pol = dssdev->phy.dsi.data2_pol; | 1869 | int data2_pol = dssdev->phy.dsi.data2_pol; |
1833 | 1870 | ||
1834 | r = dsi_read_reg(DSI_COMPLEXIO_CFG1); | 1871 | r = dsi_read_reg(dsidev, DSI_COMPLEXIO_CFG1); |
1835 | r = FLD_MOD(r, clk_lane, 2, 0); | 1872 | r = FLD_MOD(r, clk_lane, 2, 0); |
1836 | r = FLD_MOD(r, clk_pol, 3, 3); | 1873 | r = FLD_MOD(r, clk_pol, 3, 3); |
1837 | r = FLD_MOD(r, data1_lane, 6, 4); | 1874 | r = FLD_MOD(r, data1_lane, 6, 4); |
1838 | r = FLD_MOD(r, data1_pol, 7, 7); | 1875 | r = FLD_MOD(r, data1_pol, 7, 7); |
1839 | r = FLD_MOD(r, data2_lane, 10, 8); | 1876 | r = FLD_MOD(r, data2_lane, 10, 8); |
1840 | r = FLD_MOD(r, data2_pol, 11, 11); | 1877 | r = FLD_MOD(r, data2_pol, 11, 11); |
1841 | dsi_write_reg(DSI_COMPLEXIO_CFG1, r); | 1878 | dsi_write_reg(dsidev, DSI_COMPLEXIO_CFG1, r); |
1842 | 1879 | ||
1843 | /* The configuration of the DSI complex I/O (number of data lanes, | 1880 | /* The configuration of the DSI complex I/O (number of data lanes, |
1844 | position, differential order) should not be changed while | 1881 | position, differential order) should not be changed while |
@@ -1852,27 +1889,27 @@ static void dsi_set_lane_config(struct omap_dss_device *dssdev) | |||
1852 | DSI complex I/O configuration is unknown. */ | 1889 | DSI complex I/O configuration is unknown. */ |
1853 | 1890 | ||
1854 | /* | 1891 | /* |
1855 | REG_FLD_MOD(DSI_CTRL, 1, 0, 0); | 1892 | REG_FLD_MOD(dsidev, DSI_CTRL, 1, 0, 0); |
1856 | REG_FLD_MOD(DSI_CTRL, 0, 0, 0); | 1893 | REG_FLD_MOD(dsidev, DSI_CTRL, 0, 0, 0); |
1857 | REG_FLD_MOD(DSI_CLK_CTRL, 1, 20, 20); | 1894 | REG_FLD_MOD(dsidev, DSI_CLK_CTRL, 1, 20, 20); |
1858 | REG_FLD_MOD(DSI_CTRL, 1, 0, 0); | 1895 | REG_FLD_MOD(dsidev, DSI_CTRL, 1, 0, 0); |
1859 | */ | 1896 | */ |
1860 | } | 1897 | } |
1861 | 1898 | ||
1862 | static inline unsigned ns2ddr(unsigned ns) | 1899 | static inline unsigned ns2ddr(struct platform_device *dsidev, unsigned ns) |
1863 | { | 1900 | { |
1864 | /* convert time in ns to ddr ticks, rounding up */ | 1901 | /* convert time in ns to ddr ticks, rounding up */ |
1865 | unsigned long ddr_clk = dsi.current_cinfo.clkin4ddr / 4; | 1902 | unsigned long ddr_clk = dsi.current_cinfo.clkin4ddr / 4; |
1866 | return (ns * (ddr_clk / 1000 / 1000) + 999) / 1000; | 1903 | return (ns * (ddr_clk / 1000 / 1000) + 999) / 1000; |
1867 | } | 1904 | } |
1868 | 1905 | ||
1869 | static inline unsigned ddr2ns(unsigned ddr) | 1906 | static inline unsigned ddr2ns(struct platform_device *dsidev, unsigned ddr) |
1870 | { | 1907 | { |
1871 | unsigned long ddr_clk = dsi.current_cinfo.clkin4ddr / 4; | 1908 | unsigned long ddr_clk = dsi.current_cinfo.clkin4ddr / 4; |
1872 | return ddr * 1000 * 1000 / (ddr_clk / 1000); | 1909 | return ddr * 1000 * 1000 / (ddr_clk / 1000); |
1873 | } | 1910 | } |
1874 | 1911 | ||
1875 | static void dsi_cio_timings(void) | 1912 | static void dsi_cio_timings(struct platform_device *dsidev) |
1876 | { | 1913 | { |
1877 | u32 r; | 1914 | u32 r; |
1878 | u32 ths_prepare, ths_prepare_ths_zero, ths_trail, ths_exit; | 1915 | u32 ths_prepare, ths_prepare_ths_zero, ths_trail, ths_exit; |
@@ -1884,67 +1921,68 @@ static void dsi_cio_timings(void) | |||
1884 | /* 1 * DDR_CLK = 2 * UI */ | 1921 | /* 1 * DDR_CLK = 2 * UI */ |
1885 | 1922 | ||
1886 | /* min 40ns + 4*UI max 85ns + 6*UI */ | 1923 | /* min 40ns + 4*UI max 85ns + 6*UI */ |
1887 | ths_prepare = ns2ddr(70) + 2; | 1924 | ths_prepare = ns2ddr(dsidev, 70) + 2; |
1888 | 1925 | ||
1889 | /* min 145ns + 10*UI */ | 1926 | /* min 145ns + 10*UI */ |
1890 | ths_prepare_ths_zero = ns2ddr(175) + 2; | 1927 | ths_prepare_ths_zero = ns2ddr(dsidev, 175) + 2; |
1891 | 1928 | ||
1892 | /* min max(8*UI, 60ns+4*UI) */ | 1929 | /* min max(8*UI, 60ns+4*UI) */ |
1893 | ths_trail = ns2ddr(60) + 5; | 1930 | ths_trail = ns2ddr(dsidev, 60) + 5; |
1894 | 1931 | ||
1895 | /* min 100ns */ | 1932 | /* min 100ns */ |
1896 | ths_exit = ns2ddr(145); | 1933 | ths_exit = ns2ddr(dsidev, 145); |
1897 | 1934 | ||
1898 | /* tlpx min 50n */ | 1935 | /* tlpx min 50n */ |
1899 | tlpx_half = ns2ddr(25); | 1936 | tlpx_half = ns2ddr(dsidev, 25); |
1900 | 1937 | ||
1901 | /* min 60ns */ | 1938 | /* min 60ns */ |
1902 | tclk_trail = ns2ddr(60) + 2; | 1939 | tclk_trail = ns2ddr(dsidev, 60) + 2; |
1903 | 1940 | ||
1904 | /* min 38ns, max 95ns */ | 1941 | /* min 38ns, max 95ns */ |
1905 | tclk_prepare = ns2ddr(65); | 1942 | tclk_prepare = ns2ddr(dsidev, 65); |
1906 | 1943 | ||
1907 | /* min tclk-prepare + tclk-zero = 300ns */ | 1944 | /* min tclk-prepare + tclk-zero = 300ns */ |
1908 | tclk_zero = ns2ddr(260); | 1945 | tclk_zero = ns2ddr(dsidev, 260); |
1909 | 1946 | ||
1910 | DSSDBG("ths_prepare %u (%uns), ths_prepare_ths_zero %u (%uns)\n", | 1947 | DSSDBG("ths_prepare %u (%uns), ths_prepare_ths_zero %u (%uns)\n", |
1911 | ths_prepare, ddr2ns(ths_prepare), | 1948 | ths_prepare, ddr2ns(dsidev, ths_prepare), |
1912 | ths_prepare_ths_zero, ddr2ns(ths_prepare_ths_zero)); | 1949 | ths_prepare_ths_zero, ddr2ns(dsidev, ths_prepare_ths_zero)); |
1913 | DSSDBG("ths_trail %u (%uns), ths_exit %u (%uns)\n", | 1950 | DSSDBG("ths_trail %u (%uns), ths_exit %u (%uns)\n", |
1914 | ths_trail, ddr2ns(ths_trail), | 1951 | ths_trail, ddr2ns(dsidev, ths_trail), |
1915 | ths_exit, ddr2ns(ths_exit)); | 1952 | ths_exit, ddr2ns(dsidev, ths_exit)); |
1916 | 1953 | ||
1917 | DSSDBG("tlpx_half %u (%uns), tclk_trail %u (%uns), " | 1954 | DSSDBG("tlpx_half %u (%uns), tclk_trail %u (%uns), " |
1918 | "tclk_zero %u (%uns)\n", | 1955 | "tclk_zero %u (%uns)\n", |
1919 | tlpx_half, ddr2ns(tlpx_half), | 1956 | tlpx_half, ddr2ns(dsidev, tlpx_half), |
1920 | tclk_trail, ddr2ns(tclk_trail), | 1957 | tclk_trail, ddr2ns(dsidev, tclk_trail), |
1921 | tclk_zero, ddr2ns(tclk_zero)); | 1958 | tclk_zero, ddr2ns(dsidev, tclk_zero)); |
1922 | DSSDBG("tclk_prepare %u (%uns)\n", | 1959 | DSSDBG("tclk_prepare %u (%uns)\n", |
1923 | tclk_prepare, ddr2ns(tclk_prepare)); | 1960 | tclk_prepare, ddr2ns(dsidev, tclk_prepare)); |
1924 | 1961 | ||
1925 | /* program timings */ | 1962 | /* program timings */ |
1926 | 1963 | ||
1927 | r = dsi_read_reg(DSI_DSIPHY_CFG0); | 1964 | r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG0); |
1928 | r = FLD_MOD(r, ths_prepare, 31, 24); | 1965 | r = FLD_MOD(r, ths_prepare, 31, 24); |
1929 | r = FLD_MOD(r, ths_prepare_ths_zero, 23, 16); | 1966 | r = FLD_MOD(r, ths_prepare_ths_zero, 23, 16); |
1930 | r = FLD_MOD(r, ths_trail, 15, 8); | 1967 | r = FLD_MOD(r, ths_trail, 15, 8); |
1931 | r = FLD_MOD(r, ths_exit, 7, 0); | 1968 | r = FLD_MOD(r, ths_exit, 7, 0); |
1932 | dsi_write_reg(DSI_DSIPHY_CFG0, r); | 1969 | dsi_write_reg(dsidev, DSI_DSIPHY_CFG0, r); |
1933 | 1970 | ||
1934 | r = dsi_read_reg(DSI_DSIPHY_CFG1); | 1971 | r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG1); |
1935 | r = FLD_MOD(r, tlpx_half, 22, 16); | 1972 | r = FLD_MOD(r, tlpx_half, 22, 16); |
1936 | r = FLD_MOD(r, tclk_trail, 15, 8); | 1973 | r = FLD_MOD(r, tclk_trail, 15, 8); |
1937 | r = FLD_MOD(r, tclk_zero, 7, 0); | 1974 | r = FLD_MOD(r, tclk_zero, 7, 0); |
1938 | dsi_write_reg(DSI_DSIPHY_CFG1, r); | 1975 | dsi_write_reg(dsidev, DSI_DSIPHY_CFG1, r); |
1939 | 1976 | ||
1940 | r = dsi_read_reg(DSI_DSIPHY_CFG2); | 1977 | r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG2); |
1941 | r = FLD_MOD(r, tclk_prepare, 7, 0); | 1978 | r = FLD_MOD(r, tclk_prepare, 7, 0); |
1942 | dsi_write_reg(DSI_DSIPHY_CFG2, r); | 1979 | dsi_write_reg(dsidev, DSI_DSIPHY_CFG2, r); |
1943 | } | 1980 | } |
1944 | 1981 | ||
1945 | static void dsi_cio_enable_lane_override(struct omap_dss_device *dssdev, | 1982 | static void dsi_cio_enable_lane_override(struct omap_dss_device *dssdev, |
1946 | enum dsi_lane lanes) | 1983 | enum dsi_lane lanes) |
1947 | { | 1984 | { |
1985 | struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); | ||
1948 | int clk_lane = dssdev->phy.dsi.clk_lane; | 1986 | int clk_lane = dssdev->phy.dsi.clk_lane; |
1949 | int data1_lane = dssdev->phy.dsi.data1_lane; | 1987 | int data1_lane = dssdev->phy.dsi.data1_lane; |
1950 | int data2_lane = dssdev->phy.dsi.data2_lane; | 1988 | int data2_lane = dssdev->phy.dsi.data2_lane; |
@@ -1977,22 +2015,28 @@ static void dsi_cio_enable_lane_override(struct omap_dss_device *dssdev, | |||
1977 | */ | 2015 | */ |
1978 | 2016 | ||
1979 | /* Set the lane override configuration */ | 2017 | /* Set the lane override configuration */ |
1980 | REG_FLD_MOD(DSI_DSIPHY_CFG10, l, 22, 17); /* REGLPTXSCPDAT4TO0DXDY */ | 2018 | |
2019 | /* REGLPTXSCPDAT4TO0DXDY */ | ||
2020 | REG_FLD_MOD(dsidev, DSI_DSIPHY_CFG10, l, 22, 17); | ||
1981 | 2021 | ||
1982 | /* Enable lane override */ | 2022 | /* Enable lane override */ |
1983 | REG_FLD_MOD(DSI_DSIPHY_CFG10, 1, 27, 27); /* ENLPTXSCPDAT */ | 2023 | |
2024 | /* ENLPTXSCPDAT */ | ||
2025 | REG_FLD_MOD(dsidev, DSI_DSIPHY_CFG10, 1, 27, 27); | ||
1984 | } | 2026 | } |
1985 | 2027 | ||
1986 | static void dsi_cio_disable_lane_override(void) | 2028 | static void dsi_cio_disable_lane_override(struct platform_device *dsidev) |
1987 | { | 2029 | { |
1988 | /* Disable lane override */ | 2030 | /* Disable lane override */ |
1989 | REG_FLD_MOD(DSI_DSIPHY_CFG10, 0, 27, 27); /* ENLPTXSCPDAT */ | 2031 | REG_FLD_MOD(dsidev, DSI_DSIPHY_CFG10, 0, 27, 27); /* ENLPTXSCPDAT */ |
1990 | /* Reset the lane override configuration */ | 2032 | /* Reset the lane override configuration */ |
1991 | REG_FLD_MOD(DSI_DSIPHY_CFG10, 0, 22, 17); /* REGLPTXSCPDAT4TO0DXDY */ | 2033 | /* REGLPTXSCPDAT4TO0DXDY */ |
2034 | REG_FLD_MOD(dsidev, DSI_DSIPHY_CFG10, 0, 22, 17); | ||
1992 | } | 2035 | } |
1993 | 2036 | ||
1994 | static int dsi_cio_wait_tx_clk_esc_reset(struct omap_dss_device *dssdev) | 2037 | static int dsi_cio_wait_tx_clk_esc_reset(struct omap_dss_device *dssdev) |
1995 | { | 2038 | { |
2039 | struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); | ||
1996 | int t; | 2040 | int t; |
1997 | int bits[3]; | 2041 | int bits[3]; |
1998 | bool in_use[3]; | 2042 | bool in_use[3]; |
@@ -2024,7 +2068,7 @@ static int dsi_cio_wait_tx_clk_esc_reset(struct omap_dss_device *dssdev) | |||
2024 | int i; | 2068 | int i; |
2025 | int ok; | 2069 | int ok; |
2026 | 2070 | ||
2027 | l = dsi_read_reg(DSI_DSIPHY_CFG5); | 2071 | l = dsi_read_reg(dsidev, DSI_DSIPHY_CFG5); |
2028 | 2072 | ||
2029 | ok = 0; | 2073 | ok = 0; |
2030 | for (i = 0; i < 3; ++i) { | 2074 | for (i = 0; i < 3; ++i) { |
@@ -2052,6 +2096,7 @@ static int dsi_cio_wait_tx_clk_esc_reset(struct omap_dss_device *dssdev) | |||
2052 | 2096 | ||
2053 | static int dsi_cio_init(struct omap_dss_device *dssdev) | 2097 | static int dsi_cio_init(struct omap_dss_device *dssdev) |
2054 | { | 2098 | { |
2099 | struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); | ||
2055 | int r; | 2100 | int r; |
2056 | u32 l; | 2101 | u32 l; |
2057 | 2102 | ||
@@ -2060,14 +2105,14 @@ static int dsi_cio_init(struct omap_dss_device *dssdev) | |||
2060 | if (dsi.dsi_mux_pads) | 2105 | if (dsi.dsi_mux_pads) |
2061 | dsi.dsi_mux_pads(true); | 2106 | dsi.dsi_mux_pads(true); |
2062 | 2107 | ||
2063 | dsi_enable_scp_clk(); | 2108 | dsi_enable_scp_clk(dsidev); |
2064 | 2109 | ||
2065 | /* A dummy read using the SCP interface to any DSIPHY register is | 2110 | /* A dummy read using the SCP interface to any DSIPHY register is |
2066 | * required after DSIPHY reset to complete the reset of the DSI complex | 2111 | * required after DSIPHY reset to complete the reset of the DSI complex |
2067 | * I/O. */ | 2112 | * I/O. */ |
2068 | dsi_read_reg(DSI_DSIPHY_CFG5); | 2113 | dsi_read_reg(dsidev, DSI_DSIPHY_CFG5); |
2069 | 2114 | ||
2070 | if (wait_for_bit_change(DSI_DSIPHY_CFG5, 30, 1) != 1) { | 2115 | if (wait_for_bit_change(dsidev, DSI_DSIPHY_CFG5, 30, 1) != 1) { |
2071 | DSSERR("CIO SCP Clock domain not coming out of reset.\n"); | 2116 | DSSERR("CIO SCP Clock domain not coming out of reset.\n"); |
2072 | r = -EIO; | 2117 | r = -EIO; |
2073 | goto err_scp_clk_dom; | 2118 | goto err_scp_clk_dom; |
@@ -2076,12 +2121,12 @@ static int dsi_cio_init(struct omap_dss_device *dssdev) | |||
2076 | dsi_set_lane_config(dssdev); | 2121 | dsi_set_lane_config(dssdev); |
2077 | 2122 | ||
2078 | /* set TX STOP MODE timer to maximum for this operation */ | 2123 | /* set TX STOP MODE timer to maximum for this operation */ |
2079 | l = dsi_read_reg(DSI_TIMING1); | 2124 | l = dsi_read_reg(dsidev, DSI_TIMING1); |
2080 | l = FLD_MOD(l, 1, 15, 15); /* FORCE_TX_STOP_MODE_IO */ | 2125 | l = FLD_MOD(l, 1, 15, 15); /* FORCE_TX_STOP_MODE_IO */ |
2081 | l = FLD_MOD(l, 1, 14, 14); /* STOP_STATE_X16_IO */ | 2126 | l = FLD_MOD(l, 1, 14, 14); /* STOP_STATE_X16_IO */ |
2082 | l = FLD_MOD(l, 1, 13, 13); /* STOP_STATE_X4_IO */ | 2127 | l = FLD_MOD(l, 1, 13, 13); /* STOP_STATE_X4_IO */ |
2083 | l = FLD_MOD(l, 0x1fff, 12, 0); /* STOP_STATE_COUNTER_IO */ | 2128 | l = FLD_MOD(l, 0x1fff, 12, 0); /* STOP_STATE_COUNTER_IO */ |
2084 | dsi_write_reg(DSI_TIMING1, l); | 2129 | dsi_write_reg(dsidev, DSI_TIMING1, l); |
2085 | 2130 | ||
2086 | if (dsi.ulps_enabled) { | 2131 | if (dsi.ulps_enabled) { |
2087 | DSSDBG("manual ulps exit\n"); | 2132 | DSSDBG("manual ulps exit\n"); |
@@ -2098,19 +2143,19 @@ static int dsi_cio_init(struct omap_dss_device *dssdev) | |||
2098 | DSI_CLK_P | DSI_DATA1_P | DSI_DATA2_P); | 2143 | DSI_CLK_P | DSI_DATA1_P | DSI_DATA2_P); |
2099 | } | 2144 | } |
2100 | 2145 | ||
2101 | r = dsi_cio_power(DSI_COMPLEXIO_POWER_ON); | 2146 | r = dsi_cio_power(dsidev, DSI_COMPLEXIO_POWER_ON); |
2102 | if (r) | 2147 | if (r) |
2103 | goto err_cio_pwr; | 2148 | goto err_cio_pwr; |
2104 | 2149 | ||
2105 | if (wait_for_bit_change(DSI_COMPLEXIO_CFG1, 29, 1) != 1) { | 2150 | if (wait_for_bit_change(dsidev, DSI_COMPLEXIO_CFG1, 29, 1) != 1) { |
2106 | DSSERR("CIO PWR clock domain not coming out of reset.\n"); | 2151 | DSSERR("CIO PWR clock domain not coming out of reset.\n"); |
2107 | r = -ENODEV; | 2152 | r = -ENODEV; |
2108 | goto err_cio_pwr_dom; | 2153 | goto err_cio_pwr_dom; |
2109 | } | 2154 | } |
2110 | 2155 | ||
2111 | dsi_if_enable(true); | 2156 | dsi_if_enable(dsidev, true); |
2112 | dsi_if_enable(false); | 2157 | dsi_if_enable(dsidev, false); |
2113 | REG_FLD_MOD(DSI_CLK_CTRL, 1, 20, 20); /* LP_CLK_ENABLE */ | 2158 | REG_FLD_MOD(dsidev, DSI_CLK_CTRL, 1, 20, 20); /* LP_CLK_ENABLE */ |
2114 | 2159 | ||
2115 | r = dsi_cio_wait_tx_clk_esc_reset(dssdev); | 2160 | r = dsi_cio_wait_tx_clk_esc_reset(dssdev); |
2116 | if (r) | 2161 | if (r) |
@@ -2124,13 +2169,13 @@ static int dsi_cio_init(struct omap_dss_device *dssdev) | |||
2124 | 2169 | ||
2125 | /* Disable the override. The lanes should be set to Mark-11 | 2170 | /* Disable the override. The lanes should be set to Mark-11 |
2126 | * state by the HW */ | 2171 | * state by the HW */ |
2127 | dsi_cio_disable_lane_override(); | 2172 | dsi_cio_disable_lane_override(dsidev); |
2128 | } | 2173 | } |
2129 | 2174 | ||
2130 | /* FORCE_TX_STOP_MODE_IO */ | 2175 | /* FORCE_TX_STOP_MODE_IO */ |
2131 | REG_FLD_MOD(DSI_TIMING1, 0, 15, 15); | 2176 | REG_FLD_MOD(dsidev, DSI_TIMING1, 0, 15, 15); |
2132 | 2177 | ||
2133 | dsi_cio_timings(); | 2178 | dsi_cio_timings(dsidev); |
2134 | 2179 | ||
2135 | dsi.ulps_enabled = false; | 2180 | dsi.ulps_enabled = false; |
2136 | 2181 | ||
@@ -2139,32 +2184,32 @@ static int dsi_cio_init(struct omap_dss_device *dssdev) | |||
2139 | return 0; | 2184 | return 0; |
2140 | 2185 | ||
2141 | err_tx_clk_esc_rst: | 2186 | err_tx_clk_esc_rst: |
2142 | REG_FLD_MOD(DSI_CLK_CTRL, 0, 20, 20); /* LP_CLK_ENABLE */ | 2187 | REG_FLD_MOD(dsidev, DSI_CLK_CTRL, 0, 20, 20); /* LP_CLK_ENABLE */ |
2143 | err_cio_pwr_dom: | 2188 | err_cio_pwr_dom: |
2144 | dsi_cio_power(DSI_COMPLEXIO_POWER_OFF); | 2189 | dsi_cio_power(dsidev, DSI_COMPLEXIO_POWER_OFF); |
2145 | err_cio_pwr: | 2190 | err_cio_pwr: |
2146 | if (dsi.ulps_enabled) | 2191 | if (dsi.ulps_enabled) |
2147 | dsi_cio_disable_lane_override(); | 2192 | dsi_cio_disable_lane_override(dsidev); |
2148 | err_scp_clk_dom: | 2193 | err_scp_clk_dom: |
2149 | dsi_disable_scp_clk(); | 2194 | dsi_disable_scp_clk(dsidev); |
2150 | if (dsi.dsi_mux_pads) | 2195 | if (dsi.dsi_mux_pads) |
2151 | dsi.dsi_mux_pads(false); | 2196 | dsi.dsi_mux_pads(false); |
2152 | return r; | 2197 | return r; |
2153 | } | 2198 | } |
2154 | 2199 | ||
2155 | static void dsi_cio_uninit(void) | 2200 | static void dsi_cio_uninit(struct platform_device *dsidev) |
2156 | { | 2201 | { |
2157 | dsi_cio_power(DSI_COMPLEXIO_POWER_OFF); | 2202 | dsi_cio_power(dsidev, DSI_COMPLEXIO_POWER_OFF); |
2158 | dsi_disable_scp_clk(); | 2203 | dsi_disable_scp_clk(dsidev); |
2159 | if (dsi.dsi_mux_pads) | 2204 | if (dsi.dsi_mux_pads) |
2160 | dsi.dsi_mux_pads(false); | 2205 | dsi.dsi_mux_pads(false); |
2161 | } | 2206 | } |
2162 | 2207 | ||
2163 | static int _dsi_wait_reset(void) | 2208 | static int _dsi_wait_reset(struct platform_device *dsidev) |
2164 | { | 2209 | { |
2165 | int t = 0; | 2210 | int t = 0; |
2166 | 2211 | ||
2167 | while (REG_GET(DSI_SYSSTATUS, 0, 0) == 0) { | 2212 | while (REG_GET(dsidev, DSI_SYSSTATUS, 0, 0) == 0) { |
2168 | if (++t > 5) { | 2213 | if (++t > 5) { |
2169 | DSSERR("soft reset failed\n"); | 2214 | DSSERR("soft reset failed\n"); |
2170 | return -ENODEV; | 2215 | return -ENODEV; |
@@ -2175,14 +2220,15 @@ static int _dsi_wait_reset(void) | |||
2175 | return 0; | 2220 | return 0; |
2176 | } | 2221 | } |
2177 | 2222 | ||
2178 | static int _dsi_reset(void) | 2223 | static int _dsi_reset(struct platform_device *dsidev) |
2179 | { | 2224 | { |
2180 | /* Soft reset */ | 2225 | /* Soft reset */ |
2181 | REG_FLD_MOD(DSI_SYSCONFIG, 1, 1, 1); | 2226 | REG_FLD_MOD(dsidev, DSI_SYSCONFIG, 1, 1, 1); |
2182 | return _dsi_wait_reset(); | 2227 | return _dsi_wait_reset(dsidev); |
2183 | } | 2228 | } |
2184 | 2229 | ||
2185 | static void dsi_config_tx_fifo(enum fifo_size size1, enum fifo_size size2, | 2230 | static void dsi_config_tx_fifo(struct platform_device *dsidev, |
2231 | enum fifo_size size1, enum fifo_size size2, | ||
2186 | enum fifo_size size3, enum fifo_size size4) | 2232 | enum fifo_size size3, enum fifo_size size4) |
2187 | { | 2233 | { |
2188 | u32 r = 0; | 2234 | u32 r = 0; |
@@ -2209,10 +2255,11 @@ static void dsi_config_tx_fifo(enum fifo_size size1, enum fifo_size size2, | |||
2209 | add += size; | 2255 | add += size; |
2210 | } | 2256 | } |
2211 | 2257 | ||
2212 | dsi_write_reg(DSI_TX_FIFO_VC_SIZE, r); | 2258 | dsi_write_reg(dsidev, DSI_TX_FIFO_VC_SIZE, r); |
2213 | } | 2259 | } |
2214 | 2260 | ||
2215 | static void dsi_config_rx_fifo(enum fifo_size size1, enum fifo_size size2, | 2261 | static void dsi_config_rx_fifo(struct platform_device *dsidev, |
2262 | enum fifo_size size1, enum fifo_size size2, | ||
2216 | enum fifo_size size3, enum fifo_size size4) | 2263 | enum fifo_size size3, enum fifo_size size4) |
2217 | { | 2264 | { |
2218 | u32 r = 0; | 2265 | u32 r = 0; |
@@ -2239,18 +2286,18 @@ static void dsi_config_rx_fifo(enum fifo_size size1, enum fifo_size size2, | |||
2239 | add += size; | 2286 | add += size; |
2240 | } | 2287 | } |
2241 | 2288 | ||
2242 | dsi_write_reg(DSI_RX_FIFO_VC_SIZE, r); | 2289 | dsi_write_reg(dsidev, DSI_RX_FIFO_VC_SIZE, r); |
2243 | } | 2290 | } |
2244 | 2291 | ||
2245 | static int dsi_force_tx_stop_mode_io(void) | 2292 | static int dsi_force_tx_stop_mode_io(struct platform_device *dsidev) |
2246 | { | 2293 | { |
2247 | u32 r; | 2294 | u32 r; |
2248 | 2295 | ||
2249 | r = dsi_read_reg(DSI_TIMING1); | 2296 | r = dsi_read_reg(dsidev, DSI_TIMING1); |
2250 | r = FLD_MOD(r, 1, 15, 15); /* FORCE_TX_STOP_MODE_IO */ | 2297 | r = FLD_MOD(r, 1, 15, 15); /* FORCE_TX_STOP_MODE_IO */ |
2251 | dsi_write_reg(DSI_TIMING1, r); | 2298 | dsi_write_reg(dsidev, DSI_TIMING1, r); |
2252 | 2299 | ||
2253 | if (wait_for_bit_change(DSI_TIMING1, 15, 0) != 0) { | 2300 | if (wait_for_bit_change(dsidev, DSI_TIMING1, 15, 0) != 0) { |
2254 | DSSERR("TX_STOP bit not going down\n"); | 2301 | DSSERR("TX_STOP bit not going down\n"); |
2255 | return -EIO; | 2302 | return -EIO; |
2256 | } | 2303 | } |
@@ -2258,21 +2305,22 @@ static int dsi_force_tx_stop_mode_io(void) | |||
2258 | return 0; | 2305 | return 0; |
2259 | } | 2306 | } |
2260 | 2307 | ||
2261 | static bool dsi_vc_is_enabled(int channel) | 2308 | static bool dsi_vc_is_enabled(struct platform_device *dsidev, int channel) |
2262 | { | 2309 | { |
2263 | return REG_GET(DSI_VC_CTRL(channel), 0, 0); | 2310 | return REG_GET(dsidev, DSI_VC_CTRL(channel), 0, 0); |
2264 | } | 2311 | } |
2265 | 2312 | ||
2266 | static void dsi_packet_sent_handler_vp(void *data, u32 mask) | 2313 | static void dsi_packet_sent_handler_vp(void *data, u32 mask) |
2267 | { | 2314 | { |
2315 | struct platform_device *dsidev = dsi_get_dsidev_from_id(0); | ||
2268 | const int channel = dsi.update_channel; | 2316 | const int channel = dsi.update_channel; |
2269 | u8 bit = dsi.te_enabled ? 30 : 31; | 2317 | u8 bit = dsi.te_enabled ? 30 : 31; |
2270 | 2318 | ||
2271 | if (REG_GET(DSI_VC_TE(channel), bit, bit) == 0) | 2319 | if (REG_GET(dsidev, DSI_VC_TE(channel), bit, bit) == 0) |
2272 | complete((struct completion *)data); | 2320 | complete((struct completion *)data); |
2273 | } | 2321 | } |
2274 | 2322 | ||
2275 | static int dsi_sync_vc_vp(int channel) | 2323 | static int dsi_sync_vc_vp(struct platform_device *dsidev, int channel) |
2276 | { | 2324 | { |
2277 | int r = 0; | 2325 | int r = 0; |
2278 | u8 bit; | 2326 | u8 bit; |
@@ -2281,13 +2329,13 @@ static int dsi_sync_vc_vp(int channel) | |||
2281 | 2329 | ||
2282 | bit = dsi.te_enabled ? 30 : 31; | 2330 | bit = dsi.te_enabled ? 30 : 31; |
2283 | 2331 | ||
2284 | r = dsi_register_isr_vc(channel, dsi_packet_sent_handler_vp, | 2332 | r = dsi_register_isr_vc(dsidev, channel, dsi_packet_sent_handler_vp, |
2285 | &completion, DSI_VC_IRQ_PACKET_SENT); | 2333 | &completion, DSI_VC_IRQ_PACKET_SENT); |
2286 | if (r) | 2334 | if (r) |
2287 | goto err0; | 2335 | goto err0; |
2288 | 2336 | ||
2289 | /* Wait for completion only if TE_EN/TE_START is still set */ | 2337 | /* Wait for completion only if TE_EN/TE_START is still set */ |
2290 | if (REG_GET(DSI_VC_TE(channel), bit, bit)) { | 2338 | if (REG_GET(dsidev, DSI_VC_TE(channel), bit, bit)) { |
2291 | if (wait_for_completion_timeout(&completion, | 2339 | if (wait_for_completion_timeout(&completion, |
2292 | msecs_to_jiffies(10)) == 0) { | 2340 | msecs_to_jiffies(10)) == 0) { |
2293 | DSSERR("Failed to complete previous frame transfer\n"); | 2341 | DSSERR("Failed to complete previous frame transfer\n"); |
@@ -2296,38 +2344,39 @@ static int dsi_sync_vc_vp(int channel) | |||
2296 | } | 2344 | } |
2297 | } | 2345 | } |
2298 | 2346 | ||
2299 | dsi_unregister_isr_vc(channel, dsi_packet_sent_handler_vp, | 2347 | dsi_unregister_isr_vc(dsidev, channel, dsi_packet_sent_handler_vp, |
2300 | &completion, DSI_VC_IRQ_PACKET_SENT); | 2348 | &completion, DSI_VC_IRQ_PACKET_SENT); |
2301 | 2349 | ||
2302 | return 0; | 2350 | return 0; |
2303 | err1: | 2351 | err1: |
2304 | dsi_unregister_isr_vc(channel, dsi_packet_sent_handler_vp, &completion, | 2352 | dsi_unregister_isr_vc(dsidev, channel, dsi_packet_sent_handler_vp, |
2305 | DSI_VC_IRQ_PACKET_SENT); | 2353 | &completion, DSI_VC_IRQ_PACKET_SENT); |
2306 | err0: | 2354 | err0: |
2307 | return r; | 2355 | return r; |
2308 | } | 2356 | } |
2309 | 2357 | ||
2310 | static void dsi_packet_sent_handler_l4(void *data, u32 mask) | 2358 | static void dsi_packet_sent_handler_l4(void *data, u32 mask) |
2311 | { | 2359 | { |
2360 | struct platform_device *dsidev = dsi_get_dsidev_from_id(0); | ||
2312 | const int channel = dsi.update_channel; | 2361 | const int channel = dsi.update_channel; |
2313 | 2362 | ||
2314 | if (REG_GET(DSI_VC_CTRL(channel), 5, 5) == 0) | 2363 | if (REG_GET(dsidev, DSI_VC_CTRL(channel), 5, 5) == 0) |
2315 | complete((struct completion *)data); | 2364 | complete((struct completion *)data); |
2316 | } | 2365 | } |
2317 | 2366 | ||
2318 | static int dsi_sync_vc_l4(int channel) | 2367 | static int dsi_sync_vc_l4(struct platform_device *dsidev, int channel) |
2319 | { | 2368 | { |
2320 | int r = 0; | 2369 | int r = 0; |
2321 | 2370 | ||
2322 | DECLARE_COMPLETION_ONSTACK(completion); | 2371 | DECLARE_COMPLETION_ONSTACK(completion); |
2323 | 2372 | ||
2324 | r = dsi_register_isr_vc(channel, dsi_packet_sent_handler_l4, | 2373 | r = dsi_register_isr_vc(dsidev, channel, dsi_packet_sent_handler_l4, |
2325 | &completion, DSI_VC_IRQ_PACKET_SENT); | 2374 | &completion, DSI_VC_IRQ_PACKET_SENT); |
2326 | if (r) | 2375 | if (r) |
2327 | goto err0; | 2376 | goto err0; |
2328 | 2377 | ||
2329 | /* Wait for completion only if TX_FIFO_NOT_EMPTY is still set */ | 2378 | /* Wait for completion only if TX_FIFO_NOT_EMPTY is still set */ |
2330 | if (REG_GET(DSI_VC_CTRL(channel), 5, 5)) { | 2379 | if (REG_GET(dsidev, DSI_VC_CTRL(channel), 5, 5)) { |
2331 | if (wait_for_completion_timeout(&completion, | 2380 | if (wait_for_completion_timeout(&completion, |
2332 | msecs_to_jiffies(10)) == 0) { | 2381 | msecs_to_jiffies(10)) == 0) { |
2333 | DSSERR("Failed to complete previous l4 transfer\n"); | 2382 | DSSERR("Failed to complete previous l4 transfer\n"); |
@@ -2336,46 +2385,48 @@ static int dsi_sync_vc_l4(int channel) | |||
2336 | } | 2385 | } |
2337 | } | 2386 | } |
2338 | 2387 | ||
2339 | dsi_unregister_isr_vc(channel, dsi_packet_sent_handler_l4, | 2388 | dsi_unregister_isr_vc(dsidev, channel, dsi_packet_sent_handler_l4, |
2340 | &completion, DSI_VC_IRQ_PACKET_SENT); | 2389 | &completion, DSI_VC_IRQ_PACKET_SENT); |
2341 | 2390 | ||
2342 | return 0; | 2391 | return 0; |
2343 | err1: | 2392 | err1: |
2344 | dsi_unregister_isr_vc(channel, dsi_packet_sent_handler_l4, | 2393 | dsi_unregister_isr_vc(dsidev, channel, dsi_packet_sent_handler_l4, |
2345 | &completion, DSI_VC_IRQ_PACKET_SENT); | 2394 | &completion, DSI_VC_IRQ_PACKET_SENT); |
2346 | err0: | 2395 | err0: |
2347 | return r; | 2396 | return r; |
2348 | } | 2397 | } |
2349 | 2398 | ||
2350 | static int dsi_sync_vc(int channel) | 2399 | static int dsi_sync_vc(struct platform_device *dsidev, int channel) |
2351 | { | 2400 | { |
2352 | WARN_ON(!dsi_bus_is_locked()); | 2401 | WARN_ON(!dsi_bus_is_locked(dsidev)); |
2353 | 2402 | ||
2354 | WARN_ON(in_interrupt()); | 2403 | WARN_ON(in_interrupt()); |
2355 | 2404 | ||
2356 | if (!dsi_vc_is_enabled(channel)) | 2405 | if (!dsi_vc_is_enabled(dsidev, channel)) |
2357 | return 0; | 2406 | return 0; |
2358 | 2407 | ||
2359 | switch (dsi.vc[channel].mode) { | 2408 | switch (dsi.vc[channel].mode) { |
2360 | case DSI_VC_MODE_VP: | 2409 | case DSI_VC_MODE_VP: |
2361 | return dsi_sync_vc_vp(channel); | 2410 | return dsi_sync_vc_vp(dsidev, channel); |
2362 | case DSI_VC_MODE_L4: | 2411 | case DSI_VC_MODE_L4: |
2363 | return dsi_sync_vc_l4(channel); | 2412 | return dsi_sync_vc_l4(dsidev, channel); |
2364 | default: | 2413 | default: |
2365 | BUG(); | 2414 | BUG(); |
2366 | } | 2415 | } |
2367 | } | 2416 | } |
2368 | 2417 | ||
2369 | static int dsi_vc_enable(int channel, bool enable) | 2418 | static int dsi_vc_enable(struct platform_device *dsidev, int channel, |
2419 | bool enable) | ||
2370 | { | 2420 | { |
2371 | DSSDBG("dsi_vc_enable channel %d, enable %d\n", | 2421 | DSSDBG("dsi_vc_enable channel %d, enable %d\n", |
2372 | channel, enable); | 2422 | channel, enable); |
2373 | 2423 | ||
2374 | enable = enable ? 1 : 0; | 2424 | enable = enable ? 1 : 0; |
2375 | 2425 | ||
2376 | REG_FLD_MOD(DSI_VC_CTRL(channel), enable, 0, 0); | 2426 | REG_FLD_MOD(dsidev, DSI_VC_CTRL(channel), enable, 0, 0); |
2377 | 2427 | ||
2378 | if (wait_for_bit_change(DSI_VC_CTRL(channel), 0, enable) != enable) { | 2428 | if (wait_for_bit_change(dsidev, DSI_VC_CTRL(channel), |
2429 | 0, enable) != enable) { | ||
2379 | DSSERR("Failed to set dsi_vc_enable to %d\n", enable); | 2430 | DSSERR("Failed to set dsi_vc_enable to %d\n", enable); |
2380 | return -EIO; | 2431 | return -EIO; |
2381 | } | 2432 | } |
@@ -2383,13 +2434,13 @@ static int dsi_vc_enable(int channel, bool enable) | |||
2383 | return 0; | 2434 | return 0; |
2384 | } | 2435 | } |
2385 | 2436 | ||
2386 | static void dsi_vc_initial_config(int channel) | 2437 | static void dsi_vc_initial_config(struct platform_device *dsidev, int channel) |
2387 | { | 2438 | { |
2388 | u32 r; | 2439 | u32 r; |
2389 | 2440 | ||
2390 | DSSDBGF("%d", channel); | 2441 | DSSDBGF("%d", channel); |
2391 | 2442 | ||
2392 | r = dsi_read_reg(DSI_VC_CTRL(channel)); | 2443 | r = dsi_read_reg(dsidev, DSI_VC_CTRL(channel)); |
2393 | 2444 | ||
2394 | if (FLD_GET(r, 15, 15)) /* VC_BUSY */ | 2445 | if (FLD_GET(r, 15, 15)) /* VC_BUSY */ |
2395 | DSSERR("VC(%d) busy when trying to configure it!\n", | 2446 | DSSERR("VC(%d) busy when trying to configure it!\n", |
@@ -2408,63 +2459,64 @@ static void dsi_vc_initial_config(int channel) | |||
2408 | r = FLD_MOD(r, 4, 29, 27); /* DMA_RX_REQ_NB = no dma */ | 2459 | r = FLD_MOD(r, 4, 29, 27); /* DMA_RX_REQ_NB = no dma */ |
2409 | r = FLD_MOD(r, 4, 23, 21); /* DMA_TX_REQ_NB = no dma */ | 2460 | r = FLD_MOD(r, 4, 23, 21); /* DMA_TX_REQ_NB = no dma */ |
2410 | 2461 | ||
2411 | dsi_write_reg(DSI_VC_CTRL(channel), r); | 2462 | dsi_write_reg(dsidev, DSI_VC_CTRL(channel), r); |
2412 | } | 2463 | } |
2413 | 2464 | ||
2414 | static int dsi_vc_config_l4(int channel) | 2465 | static int dsi_vc_config_l4(struct platform_device *dsidev, int channel) |
2415 | { | 2466 | { |
2416 | if (dsi.vc[channel].mode == DSI_VC_MODE_L4) | 2467 | if (dsi.vc[channel].mode == DSI_VC_MODE_L4) |
2417 | return 0; | 2468 | return 0; |
2418 | 2469 | ||
2419 | DSSDBGF("%d", channel); | 2470 | DSSDBGF("%d", channel); |
2420 | 2471 | ||
2421 | dsi_sync_vc(channel); | 2472 | dsi_sync_vc(dsidev, channel); |
2422 | 2473 | ||
2423 | dsi_vc_enable(channel, 0); | 2474 | dsi_vc_enable(dsidev, channel, 0); |
2424 | 2475 | ||
2425 | /* VC_BUSY */ | 2476 | /* VC_BUSY */ |
2426 | if (wait_for_bit_change(DSI_VC_CTRL(channel), 15, 0) != 0) { | 2477 | if (wait_for_bit_change(dsidev, DSI_VC_CTRL(channel), 15, 0) != 0) { |
2427 | DSSERR("vc(%d) busy when trying to config for L4\n", channel); | 2478 | DSSERR("vc(%d) busy when trying to config for L4\n", channel); |
2428 | return -EIO; | 2479 | return -EIO; |
2429 | } | 2480 | } |
2430 | 2481 | ||
2431 | REG_FLD_MOD(DSI_VC_CTRL(channel), 0, 1, 1); /* SOURCE, 0 = L4 */ | 2482 | REG_FLD_MOD(dsidev, DSI_VC_CTRL(channel), 0, 1, 1); /* SOURCE, 0 = L4 */ |
2432 | 2483 | ||
2433 | /* DCS_CMD_ENABLE */ | 2484 | /* DCS_CMD_ENABLE */ |
2434 | if (dss_has_feature(FEAT_DSI_DCS_CMD_CONFIG_VC)) | 2485 | if (dss_has_feature(FEAT_DSI_DCS_CMD_CONFIG_VC)) |
2435 | REG_FLD_MOD(DSI_VC_CTRL(channel), 0, 30, 30); | 2486 | REG_FLD_MOD(dsidev, DSI_VC_CTRL(channel), 0, 30, 30); |
2436 | 2487 | ||
2437 | dsi_vc_enable(channel, 1); | 2488 | dsi_vc_enable(dsidev, channel, 1); |
2438 | 2489 | ||
2439 | dsi.vc[channel].mode = DSI_VC_MODE_L4; | 2490 | dsi.vc[channel].mode = DSI_VC_MODE_L4; |
2440 | 2491 | ||
2441 | return 0; | 2492 | return 0; |
2442 | } | 2493 | } |
2443 | 2494 | ||
2444 | static int dsi_vc_config_vp(int channel) | 2495 | static int dsi_vc_config_vp(struct platform_device *dsidev, int channel) |
2445 | { | 2496 | { |
2446 | if (dsi.vc[channel].mode == DSI_VC_MODE_VP) | 2497 | if (dsi.vc[channel].mode == DSI_VC_MODE_VP) |
2447 | return 0; | 2498 | return 0; |
2448 | 2499 | ||
2449 | DSSDBGF("%d", channel); | 2500 | DSSDBGF("%d", channel); |
2450 | 2501 | ||
2451 | dsi_sync_vc(channel); | 2502 | dsi_sync_vc(dsidev, channel); |
2452 | 2503 | ||
2453 | dsi_vc_enable(channel, 0); | 2504 | dsi_vc_enable(dsidev, channel, 0); |
2454 | 2505 | ||
2455 | /* VC_BUSY */ | 2506 | /* VC_BUSY */ |
2456 | if (wait_for_bit_change(DSI_VC_CTRL(channel), 15, 0) != 0) { | 2507 | if (wait_for_bit_change(dsidev, DSI_VC_CTRL(channel), 15, 0) != 0) { |
2457 | DSSERR("vc(%d) busy when trying to config for VP\n", channel); | 2508 | DSSERR("vc(%d) busy when trying to config for VP\n", channel); |
2458 | return -EIO; | 2509 | return -EIO; |
2459 | } | 2510 | } |
2460 | 2511 | ||
2461 | REG_FLD_MOD(DSI_VC_CTRL(channel), 1, 1, 1); /* SOURCE, 1 = video port */ | 2512 | /* SOURCE, 1 = video port */ |
2513 | REG_FLD_MOD(dsidev, DSI_VC_CTRL(channel), 1, 1, 1); | ||
2462 | 2514 | ||
2463 | /* DCS_CMD_ENABLE */ | 2515 | /* DCS_CMD_ENABLE */ |
2464 | if (dss_has_feature(FEAT_DSI_DCS_CMD_CONFIG_VC)) | 2516 | if (dss_has_feature(FEAT_DSI_DCS_CMD_CONFIG_VC)) |
2465 | REG_FLD_MOD(DSI_VC_CTRL(channel), 1, 30, 30); | 2517 | REG_FLD_MOD(dsidev, DSI_VC_CTRL(channel), 1, 30, 30); |
2466 | 2518 | ||
2467 | dsi_vc_enable(channel, 1); | 2519 | dsi_vc_enable(dsidev, channel, 1); |
2468 | 2520 | ||
2469 | dsi.vc[channel].mode = DSI_VC_MODE_VP; | 2521 | dsi.vc[channel].mode = DSI_VC_MODE_VP; |
2470 | 2522 | ||
@@ -2475,27 +2527,29 @@ static int dsi_vc_config_vp(int channel) | |||
2475 | void omapdss_dsi_vc_enable_hs(struct omap_dss_device *dssdev, int channel, | 2527 | void omapdss_dsi_vc_enable_hs(struct omap_dss_device *dssdev, int channel, |
2476 | bool enable) | 2528 | bool enable) |
2477 | { | 2529 | { |
2530 | struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); | ||
2531 | |||
2478 | DSSDBG("dsi_vc_enable_hs(%d, %d)\n", channel, enable); | 2532 | DSSDBG("dsi_vc_enable_hs(%d, %d)\n", channel, enable); |
2479 | 2533 | ||
2480 | WARN_ON(!dsi_bus_is_locked()); | 2534 | WARN_ON(!dsi_bus_is_locked(dsidev)); |
2481 | 2535 | ||
2482 | dsi_vc_enable(channel, 0); | 2536 | dsi_vc_enable(dsidev, channel, 0); |
2483 | dsi_if_enable(0); | 2537 | dsi_if_enable(dsidev, 0); |
2484 | 2538 | ||
2485 | REG_FLD_MOD(DSI_VC_CTRL(channel), enable, 9, 9); | 2539 | REG_FLD_MOD(dsidev, DSI_VC_CTRL(channel), enable, 9, 9); |
2486 | 2540 | ||
2487 | dsi_vc_enable(channel, 1); | 2541 | dsi_vc_enable(dsidev, channel, 1); |
2488 | dsi_if_enable(1); | 2542 | dsi_if_enable(dsidev, 1); |
2489 | 2543 | ||
2490 | dsi_force_tx_stop_mode_io(); | 2544 | dsi_force_tx_stop_mode_io(dsidev); |
2491 | } | 2545 | } |
2492 | EXPORT_SYMBOL(omapdss_dsi_vc_enable_hs); | 2546 | EXPORT_SYMBOL(omapdss_dsi_vc_enable_hs); |
2493 | 2547 | ||
2494 | static void dsi_vc_flush_long_data(int channel) | 2548 | static void dsi_vc_flush_long_data(struct platform_device *dsidev, int channel) |
2495 | { | 2549 | { |
2496 | while (REG_GET(DSI_VC_CTRL(channel), 20, 20)) { | 2550 | while (REG_GET(dsidev, DSI_VC_CTRL(channel), 20, 20)) { |
2497 | u32 val; | 2551 | u32 val; |
2498 | val = dsi_read_reg(DSI_VC_SHORT_PACKET_HEADER(channel)); | 2552 | val = dsi_read_reg(dsidev, DSI_VC_SHORT_PACKET_HEADER(channel)); |
2499 | DSSDBG("\t\tb1 %#02x b2 %#02x b3 %#02x b4 %#02x\n", | 2553 | DSSDBG("\t\tb1 %#02x b2 %#02x b3 %#02x b4 %#02x\n", |
2500 | (val >> 0) & 0xff, | 2554 | (val >> 0) & 0xff, |
2501 | (val >> 8) & 0xff, | 2555 | (val >> 8) & 0xff, |
@@ -2541,13 +2595,14 @@ static void dsi_show_rx_ack_with_err(u16 err) | |||
2541 | DSSERR("\t\tDSI Protocol Violation\n"); | 2595 | DSSERR("\t\tDSI Protocol Violation\n"); |
2542 | } | 2596 | } |
2543 | 2597 | ||
2544 | static u16 dsi_vc_flush_receive_data(int channel) | 2598 | static u16 dsi_vc_flush_receive_data(struct platform_device *dsidev, |
2599 | int channel) | ||
2545 | { | 2600 | { |
2546 | /* RX_FIFO_NOT_EMPTY */ | 2601 | /* RX_FIFO_NOT_EMPTY */ |
2547 | while (REG_GET(DSI_VC_CTRL(channel), 20, 20)) { | 2602 | while (REG_GET(dsidev, DSI_VC_CTRL(channel), 20, 20)) { |
2548 | u32 val; | 2603 | u32 val; |
2549 | u8 dt; | 2604 | u8 dt; |
2550 | val = dsi_read_reg(DSI_VC_SHORT_PACKET_HEADER(channel)); | 2605 | val = dsi_read_reg(dsidev, DSI_VC_SHORT_PACKET_HEADER(channel)); |
2551 | DSSERR("\trawval %#08x\n", val); | 2606 | DSSERR("\trawval %#08x\n", val); |
2552 | dt = FLD_GET(val, 5, 0); | 2607 | dt = FLD_GET(val, 5, 0); |
2553 | if (dt == DSI_DT_RX_ACK_WITH_ERR) { | 2608 | if (dt == DSI_DT_RX_ACK_WITH_ERR) { |
@@ -2562,7 +2617,7 @@ static u16 dsi_vc_flush_receive_data(int channel) | |||
2562 | } else if (dt == DSI_DT_RX_DCS_LONG_READ) { | 2617 | } else if (dt == DSI_DT_RX_DCS_LONG_READ) { |
2563 | DSSERR("\tDCS long response, len %d\n", | 2618 | DSSERR("\tDCS long response, len %d\n", |
2564 | FLD_GET(val, 23, 8)); | 2619 | FLD_GET(val, 23, 8)); |
2565 | dsi_vc_flush_long_data(channel); | 2620 | dsi_vc_flush_long_data(dsidev, channel); |
2566 | } else { | 2621 | } else { |
2567 | DSSERR("\tunknown datatype 0x%02x\n", dt); | 2622 | DSSERR("\tunknown datatype 0x%02x\n", dt); |
2568 | } | 2623 | } |
@@ -2570,40 +2625,42 @@ static u16 dsi_vc_flush_receive_data(int channel) | |||
2570 | return 0; | 2625 | return 0; |
2571 | } | 2626 | } |
2572 | 2627 | ||
2573 | static int dsi_vc_send_bta(int channel) | 2628 | static int dsi_vc_send_bta(struct platform_device *dsidev, int channel) |
2574 | { | 2629 | { |
2575 | if (dsi.debug_write || dsi.debug_read) | 2630 | if (dsi.debug_write || dsi.debug_read) |
2576 | DSSDBG("dsi_vc_send_bta %d\n", channel); | 2631 | DSSDBG("dsi_vc_send_bta %d\n", channel); |
2577 | 2632 | ||
2578 | WARN_ON(!dsi_bus_is_locked()); | 2633 | WARN_ON(!dsi_bus_is_locked(dsidev)); |
2579 | 2634 | ||
2580 | if (REG_GET(DSI_VC_CTRL(channel), 20, 20)) { /* RX_FIFO_NOT_EMPTY */ | 2635 | /* RX_FIFO_NOT_EMPTY */ |
2636 | if (REG_GET(dsidev, DSI_VC_CTRL(channel), 20, 20)) { | ||
2581 | DSSERR("rx fifo not empty when sending BTA, dumping data:\n"); | 2637 | DSSERR("rx fifo not empty when sending BTA, dumping data:\n"); |
2582 | dsi_vc_flush_receive_data(channel); | 2638 | dsi_vc_flush_receive_data(dsidev, channel); |
2583 | } | 2639 | } |
2584 | 2640 | ||
2585 | REG_FLD_MOD(DSI_VC_CTRL(channel), 1, 6, 6); /* BTA_EN */ | 2641 | REG_FLD_MOD(dsidev, DSI_VC_CTRL(channel), 1, 6, 6); /* BTA_EN */ |
2586 | 2642 | ||
2587 | return 0; | 2643 | return 0; |
2588 | } | 2644 | } |
2589 | 2645 | ||
2590 | int dsi_vc_send_bta_sync(struct omap_dss_device *dssdev, int channel) | 2646 | int dsi_vc_send_bta_sync(struct omap_dss_device *dssdev, int channel) |
2591 | { | 2647 | { |
2648 | struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); | ||
2592 | DECLARE_COMPLETION_ONSTACK(completion); | 2649 | DECLARE_COMPLETION_ONSTACK(completion); |
2593 | int r = 0; | 2650 | int r = 0; |
2594 | u32 err; | 2651 | u32 err; |
2595 | 2652 | ||
2596 | r = dsi_register_isr_vc(channel, dsi_completion_handler, | 2653 | r = dsi_register_isr_vc(dsidev, channel, dsi_completion_handler, |
2597 | &completion, DSI_VC_IRQ_BTA); | 2654 | &completion, DSI_VC_IRQ_BTA); |
2598 | if (r) | 2655 | if (r) |
2599 | goto err0; | 2656 | goto err0; |
2600 | 2657 | ||
2601 | r = dsi_register_isr(dsi_completion_handler, &completion, | 2658 | r = dsi_register_isr(dsidev, dsi_completion_handler, &completion, |
2602 | DSI_IRQ_ERROR_MASK); | 2659 | DSI_IRQ_ERROR_MASK); |
2603 | if (r) | 2660 | if (r) |
2604 | goto err1; | 2661 | goto err1; |
2605 | 2662 | ||
2606 | r = dsi_vc_send_bta(channel); | 2663 | r = dsi_vc_send_bta(dsidev, channel); |
2607 | if (r) | 2664 | if (r) |
2608 | goto err2; | 2665 | goto err2; |
2609 | 2666 | ||
@@ -2614,41 +2671,41 @@ int dsi_vc_send_bta_sync(struct omap_dss_device *dssdev, int channel) | |||
2614 | goto err2; | 2671 | goto err2; |
2615 | } | 2672 | } |
2616 | 2673 | ||
2617 | err = dsi_get_errors(); | 2674 | err = dsi_get_errors(dsidev); |
2618 | if (err) { | 2675 | if (err) { |
2619 | DSSERR("Error while sending BTA: %x\n", err); | 2676 | DSSERR("Error while sending BTA: %x\n", err); |
2620 | r = -EIO; | 2677 | r = -EIO; |
2621 | goto err2; | 2678 | goto err2; |
2622 | } | 2679 | } |
2623 | err2: | 2680 | err2: |
2624 | dsi_unregister_isr(dsi_completion_handler, &completion, | 2681 | dsi_unregister_isr(dsidev, dsi_completion_handler, &completion, |
2625 | DSI_IRQ_ERROR_MASK); | 2682 | DSI_IRQ_ERROR_MASK); |
2626 | err1: | 2683 | err1: |
2627 | dsi_unregister_isr_vc(channel, dsi_completion_handler, | 2684 | dsi_unregister_isr_vc(dsidev, channel, dsi_completion_handler, |
2628 | &completion, DSI_VC_IRQ_BTA); | 2685 | &completion, DSI_VC_IRQ_BTA); |
2629 | err0: | 2686 | err0: |
2630 | return r; | 2687 | return r; |
2631 | } | 2688 | } |
2632 | EXPORT_SYMBOL(dsi_vc_send_bta_sync); | 2689 | EXPORT_SYMBOL(dsi_vc_send_bta_sync); |
2633 | 2690 | ||
2634 | static inline void dsi_vc_write_long_header(int channel, u8 data_type, | 2691 | static inline void dsi_vc_write_long_header(struct platform_device *dsidev, |
2635 | u16 len, u8 ecc) | 2692 | int channel, u8 data_type, u16 len, u8 ecc) |
2636 | { | 2693 | { |
2637 | u32 val; | 2694 | u32 val; |
2638 | u8 data_id; | 2695 | u8 data_id; |
2639 | 2696 | ||
2640 | WARN_ON(!dsi_bus_is_locked()); | 2697 | WARN_ON(!dsi_bus_is_locked(dsidev)); |
2641 | 2698 | ||
2642 | data_id = data_type | dsi.vc[channel].vc_id << 6; | 2699 | data_id = data_type | dsi.vc[channel].vc_id << 6; |
2643 | 2700 | ||
2644 | val = FLD_VAL(data_id, 7, 0) | FLD_VAL(len, 23, 8) | | 2701 | val = FLD_VAL(data_id, 7, 0) | FLD_VAL(len, 23, 8) | |
2645 | FLD_VAL(ecc, 31, 24); | 2702 | FLD_VAL(ecc, 31, 24); |
2646 | 2703 | ||
2647 | dsi_write_reg(DSI_VC_LONG_PACKET_HEADER(channel), val); | 2704 | dsi_write_reg(dsidev, DSI_VC_LONG_PACKET_HEADER(channel), val); |
2648 | } | 2705 | } |
2649 | 2706 | ||
2650 | static inline void dsi_vc_write_long_payload(int channel, | 2707 | static inline void dsi_vc_write_long_payload(struct platform_device *dsidev, |
2651 | u8 b1, u8 b2, u8 b3, u8 b4) | 2708 | int channel, u8 b1, u8 b2, u8 b3, u8 b4) |
2652 | { | 2709 | { |
2653 | u32 val; | 2710 | u32 val; |
2654 | 2711 | ||
@@ -2657,11 +2714,11 @@ static inline void dsi_vc_write_long_payload(int channel, | |||
2657 | /* DSSDBG("\twriting %02x, %02x, %02x, %02x (%#010x)\n", | 2714 | /* DSSDBG("\twriting %02x, %02x, %02x, %02x (%#010x)\n", |
2658 | b1, b2, b3, b4, val); */ | 2715 | b1, b2, b3, b4, val); */ |
2659 | 2716 | ||
2660 | dsi_write_reg(DSI_VC_LONG_PACKET_PAYLOAD(channel), val); | 2717 | dsi_write_reg(dsidev, DSI_VC_LONG_PACKET_PAYLOAD(channel), val); |
2661 | } | 2718 | } |
2662 | 2719 | ||
2663 | static int dsi_vc_send_long(int channel, u8 data_type, u8 *data, u16 len, | 2720 | static int dsi_vc_send_long(struct platform_device *dsidev, int channel, |
2664 | u8 ecc) | 2721 | u8 data_type, u8 *data, u16 len, u8 ecc) |
2665 | { | 2722 | { |
2666 | /*u32 val; */ | 2723 | /*u32 val; */ |
2667 | int i; | 2724 | int i; |
@@ -2678,9 +2735,9 @@ static int dsi_vc_send_long(int channel, u8 data_type, u8 *data, u16 len, | |||
2678 | return -EINVAL; | 2735 | return -EINVAL; |
2679 | } | 2736 | } |
2680 | 2737 | ||
2681 | dsi_vc_config_l4(channel); | 2738 | dsi_vc_config_l4(dsidev, channel); |
2682 | 2739 | ||
2683 | dsi_vc_write_long_header(channel, data_type, len, ecc); | 2740 | dsi_vc_write_long_header(dsidev, channel, data_type, len, ecc); |
2684 | 2741 | ||
2685 | p = data; | 2742 | p = data; |
2686 | for (i = 0; i < len >> 2; i++) { | 2743 | for (i = 0; i < len >> 2; i++) { |
@@ -2692,7 +2749,7 @@ static int dsi_vc_send_long(int channel, u8 data_type, u8 *data, u16 len, | |||
2692 | b3 = *p++; | 2749 | b3 = *p++; |
2693 | b4 = *p++; | 2750 | b4 = *p++; |
2694 | 2751 | ||
2695 | dsi_vc_write_long_payload(channel, b1, b2, b3, b4); | 2752 | dsi_vc_write_long_payload(dsidev, channel, b1, b2, b3, b4); |
2696 | } | 2753 | } |
2697 | 2754 | ||
2698 | i = len % 4; | 2755 | i = len % 4; |
@@ -2717,27 +2774,28 @@ static int dsi_vc_send_long(int channel, u8 data_type, u8 *data, u16 len, | |||
2717 | break; | 2774 | break; |
2718 | } | 2775 | } |
2719 | 2776 | ||
2720 | dsi_vc_write_long_payload(channel, b1, b2, b3, 0); | 2777 | dsi_vc_write_long_payload(dsidev, channel, b1, b2, b3, 0); |
2721 | } | 2778 | } |
2722 | 2779 | ||
2723 | return r; | 2780 | return r; |
2724 | } | 2781 | } |
2725 | 2782 | ||
2726 | static int dsi_vc_send_short(int channel, u8 data_type, u16 data, u8 ecc) | 2783 | static int dsi_vc_send_short(struct platform_device *dsidev, int channel, |
2784 | u8 data_type, u16 data, u8 ecc) | ||
2727 | { | 2785 | { |
2728 | u32 r; | 2786 | u32 r; |
2729 | u8 data_id; | 2787 | u8 data_id; |
2730 | 2788 | ||
2731 | WARN_ON(!dsi_bus_is_locked()); | 2789 | WARN_ON(!dsi_bus_is_locked(dsidev)); |
2732 | 2790 | ||
2733 | if (dsi.debug_write) | 2791 | if (dsi.debug_write) |
2734 | DSSDBG("dsi_vc_send_short(ch%d, dt %#x, b1 %#x, b2 %#x)\n", | 2792 | DSSDBG("dsi_vc_send_short(ch%d, dt %#x, b1 %#x, b2 %#x)\n", |
2735 | channel, | 2793 | channel, |
2736 | data_type, data & 0xff, (data >> 8) & 0xff); | 2794 | data_type, data & 0xff, (data >> 8) & 0xff); |
2737 | 2795 | ||
2738 | dsi_vc_config_l4(channel); | 2796 | dsi_vc_config_l4(dsidev, channel); |
2739 | 2797 | ||
2740 | if (FLD_GET(dsi_read_reg(DSI_VC_CTRL(channel)), 16, 16)) { | 2798 | if (FLD_GET(dsi_read_reg(dsidev, DSI_VC_CTRL(channel)), 16, 16)) { |
2741 | DSSERR("ERROR FIFO FULL, aborting transfer\n"); | 2799 | DSSERR("ERROR FIFO FULL, aborting transfer\n"); |
2742 | return -EINVAL; | 2800 | return -EINVAL; |
2743 | } | 2801 | } |
@@ -2746,34 +2804,38 @@ static int dsi_vc_send_short(int channel, u8 data_type, u16 data, u8 ecc) | |||
2746 | 2804 | ||
2747 | r = (data_id << 0) | (data << 8) | (ecc << 24); | 2805 | r = (data_id << 0) | (data << 8) | (ecc << 24); |
2748 | 2806 | ||
2749 | dsi_write_reg(DSI_VC_SHORT_PACKET_HEADER(channel), r); | 2807 | dsi_write_reg(dsidev, DSI_VC_SHORT_PACKET_HEADER(channel), r); |
2750 | 2808 | ||
2751 | return 0; | 2809 | return 0; |
2752 | } | 2810 | } |
2753 | 2811 | ||
2754 | int dsi_vc_send_null(struct omap_dss_device *dssdev, int channel) | 2812 | int dsi_vc_send_null(struct omap_dss_device *dssdev, int channel) |
2755 | { | 2813 | { |
2814 | struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); | ||
2756 | u8 nullpkg[] = {0, 0, 0, 0}; | 2815 | u8 nullpkg[] = {0, 0, 0, 0}; |
2757 | return dsi_vc_send_long(channel, DSI_DT_NULL_PACKET, nullpkg, 4, 0); | 2816 | |
2817 | return dsi_vc_send_long(dsidev, channel, DSI_DT_NULL_PACKET, nullpkg, | ||
2818 | 4, 0); | ||
2758 | } | 2819 | } |
2759 | EXPORT_SYMBOL(dsi_vc_send_null); | 2820 | EXPORT_SYMBOL(dsi_vc_send_null); |
2760 | 2821 | ||
2761 | int dsi_vc_dcs_write_nosync(struct omap_dss_device *dssdev, int channel, | 2822 | int dsi_vc_dcs_write_nosync(struct omap_dss_device *dssdev, int channel, |
2762 | u8 *data, int len) | 2823 | u8 *data, int len) |
2763 | { | 2824 | { |
2825 | struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); | ||
2764 | int r; | 2826 | int r; |
2765 | 2827 | ||
2766 | BUG_ON(len == 0); | 2828 | BUG_ON(len == 0); |
2767 | 2829 | ||
2768 | if (len == 1) { | 2830 | if (len == 1) { |
2769 | r = dsi_vc_send_short(channel, DSI_DT_DCS_SHORT_WRITE_0, | 2831 | r = dsi_vc_send_short(dsidev, channel, DSI_DT_DCS_SHORT_WRITE_0, |
2770 | data[0], 0); | 2832 | data[0], 0); |
2771 | } else if (len == 2) { | 2833 | } else if (len == 2) { |
2772 | r = dsi_vc_send_short(channel, DSI_DT_DCS_SHORT_WRITE_1, | 2834 | r = dsi_vc_send_short(dsidev, channel, DSI_DT_DCS_SHORT_WRITE_1, |
2773 | data[0] | (data[1] << 8), 0); | 2835 | data[0] | (data[1] << 8), 0); |
2774 | } else { | 2836 | } else { |
2775 | /* 0x39 = DCS Long Write */ | 2837 | /* 0x39 = DCS Long Write */ |
2776 | r = dsi_vc_send_long(channel, DSI_DT_DCS_LONG_WRITE, | 2838 | r = dsi_vc_send_long(dsidev, channel, DSI_DT_DCS_LONG_WRITE, |
2777 | data, len, 0); | 2839 | data, len, 0); |
2778 | } | 2840 | } |
2779 | 2841 | ||
@@ -2784,6 +2846,7 @@ EXPORT_SYMBOL(dsi_vc_dcs_write_nosync); | |||
2784 | int dsi_vc_dcs_write(struct omap_dss_device *dssdev, int channel, u8 *data, | 2846 | int dsi_vc_dcs_write(struct omap_dss_device *dssdev, int channel, u8 *data, |
2785 | int len) | 2847 | int len) |
2786 | { | 2848 | { |
2849 | struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); | ||
2787 | int r; | 2850 | int r; |
2788 | 2851 | ||
2789 | r = dsi_vc_dcs_write_nosync(dssdev, channel, data, len); | 2852 | r = dsi_vc_dcs_write_nosync(dssdev, channel, data, len); |
@@ -2794,9 +2857,10 @@ int dsi_vc_dcs_write(struct omap_dss_device *dssdev, int channel, u8 *data, | |||
2794 | if (r) | 2857 | if (r) |
2795 | goto err; | 2858 | goto err; |
2796 | 2859 | ||
2797 | if (REG_GET(DSI_VC_CTRL(channel), 20, 20)) { /* RX_FIFO_NOT_EMPTY */ | 2860 | /* RX_FIFO_NOT_EMPTY */ |
2861 | if (REG_GET(dsidev, DSI_VC_CTRL(channel), 20, 20)) { | ||
2798 | DSSERR("rx fifo not empty after write, dumping data:\n"); | 2862 | DSSERR("rx fifo not empty after write, dumping data:\n"); |
2799 | dsi_vc_flush_receive_data(channel); | 2863 | dsi_vc_flush_receive_data(dsidev, channel); |
2800 | r = -EIO; | 2864 | r = -EIO; |
2801 | goto err; | 2865 | goto err; |
2802 | } | 2866 | } |
@@ -2828,6 +2892,7 @@ EXPORT_SYMBOL(dsi_vc_dcs_write_1); | |||
2828 | int dsi_vc_dcs_read(struct omap_dss_device *dssdev, int channel, u8 dcs_cmd, | 2892 | int dsi_vc_dcs_read(struct omap_dss_device *dssdev, int channel, u8 dcs_cmd, |
2829 | u8 *buf, int buflen) | 2893 | u8 *buf, int buflen) |
2830 | { | 2894 | { |
2895 | struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); | ||
2831 | u32 val; | 2896 | u32 val; |
2832 | u8 dt; | 2897 | u8 dt; |
2833 | int r; | 2898 | int r; |
@@ -2835,7 +2900,7 @@ int dsi_vc_dcs_read(struct omap_dss_device *dssdev, int channel, u8 dcs_cmd, | |||
2835 | if (dsi.debug_read) | 2900 | if (dsi.debug_read) |
2836 | DSSDBG("dsi_vc_dcs_read(ch%d, dcs_cmd %x)\n", channel, dcs_cmd); | 2901 | DSSDBG("dsi_vc_dcs_read(ch%d, dcs_cmd %x)\n", channel, dcs_cmd); |
2837 | 2902 | ||
2838 | r = dsi_vc_send_short(channel, DSI_DT_DCS_READ, dcs_cmd, 0); | 2903 | r = dsi_vc_send_short(dsidev, channel, DSI_DT_DCS_READ, dcs_cmd, 0); |
2839 | if (r) | 2904 | if (r) |
2840 | goto err; | 2905 | goto err; |
2841 | 2906 | ||
@@ -2844,13 +2909,13 @@ int dsi_vc_dcs_read(struct omap_dss_device *dssdev, int channel, u8 dcs_cmd, | |||
2844 | goto err; | 2909 | goto err; |
2845 | 2910 | ||
2846 | /* RX_FIFO_NOT_EMPTY */ | 2911 | /* RX_FIFO_NOT_EMPTY */ |
2847 | if (REG_GET(DSI_VC_CTRL(channel), 20, 20) == 0) { | 2912 | if (REG_GET(dsidev, DSI_VC_CTRL(channel), 20, 20) == 0) { |
2848 | DSSERR("RX fifo empty when trying to read.\n"); | 2913 | DSSERR("RX fifo empty when trying to read.\n"); |
2849 | r = -EIO; | 2914 | r = -EIO; |
2850 | goto err; | 2915 | goto err; |
2851 | } | 2916 | } |
2852 | 2917 | ||
2853 | val = dsi_read_reg(DSI_VC_SHORT_PACKET_HEADER(channel)); | 2918 | val = dsi_read_reg(dsidev, DSI_VC_SHORT_PACKET_HEADER(channel)); |
2854 | if (dsi.debug_read) | 2919 | if (dsi.debug_read) |
2855 | DSSDBG("\theader: %08x\n", val); | 2920 | DSSDBG("\theader: %08x\n", val); |
2856 | dt = FLD_GET(val, 5, 0); | 2921 | dt = FLD_GET(val, 5, 0); |
@@ -2901,7 +2966,8 @@ int dsi_vc_dcs_read(struct omap_dss_device *dssdev, int channel, u8 dcs_cmd, | |||
2901 | /* two byte checksum ends the packet, not included in len */ | 2966 | /* two byte checksum ends the packet, not included in len */ |
2902 | for (w = 0; w < len + 2;) { | 2967 | for (w = 0; w < len + 2;) { |
2903 | int b; | 2968 | int b; |
2904 | val = dsi_read_reg(DSI_VC_SHORT_PACKET_HEADER(channel)); | 2969 | val = dsi_read_reg(dsidev, |
2970 | DSI_VC_SHORT_PACKET_HEADER(channel)); | ||
2905 | if (dsi.debug_read) | 2971 | if (dsi.debug_read) |
2906 | DSSDBG("\t\t%02x %02x %02x %02x\n", | 2972 | DSSDBG("\t\t%02x %02x %02x %02x\n", |
2907 | (val >> 0) & 0xff, | 2973 | (val >> 0) & 0xff, |
@@ -2974,60 +3040,63 @@ EXPORT_SYMBOL(dsi_vc_dcs_read_2); | |||
2974 | int dsi_vc_set_max_rx_packet_size(struct omap_dss_device *dssdev, int channel, | 3040 | int dsi_vc_set_max_rx_packet_size(struct omap_dss_device *dssdev, int channel, |
2975 | u16 len) | 3041 | u16 len) |
2976 | { | 3042 | { |
2977 | return dsi_vc_send_short(channel, DSI_DT_SET_MAX_RET_PKG_SIZE, | 3043 | struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); |
3044 | |||
3045 | return dsi_vc_send_short(dsidev, channel, DSI_DT_SET_MAX_RET_PKG_SIZE, | ||
2978 | len, 0); | 3046 | len, 0); |
2979 | } | 3047 | } |
2980 | EXPORT_SYMBOL(dsi_vc_set_max_rx_packet_size); | 3048 | EXPORT_SYMBOL(dsi_vc_set_max_rx_packet_size); |
2981 | 3049 | ||
2982 | static int dsi_enter_ulps(void) | 3050 | static int dsi_enter_ulps(struct platform_device *dsidev) |
2983 | { | 3051 | { |
2984 | DECLARE_COMPLETION_ONSTACK(completion); | 3052 | DECLARE_COMPLETION_ONSTACK(completion); |
2985 | int r; | 3053 | int r; |
2986 | 3054 | ||
2987 | DSSDBGF(); | 3055 | DSSDBGF(); |
2988 | 3056 | ||
2989 | WARN_ON(!dsi_bus_is_locked()); | 3057 | WARN_ON(!dsi_bus_is_locked(dsidev)); |
2990 | 3058 | ||
2991 | WARN_ON(dsi.ulps_enabled); | 3059 | WARN_ON(dsi.ulps_enabled); |
2992 | 3060 | ||
2993 | if (dsi.ulps_enabled) | 3061 | if (dsi.ulps_enabled) |
2994 | return 0; | 3062 | return 0; |
2995 | 3063 | ||
2996 | if (REG_GET(DSI_CLK_CTRL, 13, 13)) { | 3064 | if (REG_GET(dsidev, DSI_CLK_CTRL, 13, 13)) { |
2997 | DSSERR("DDR_CLK_ALWAYS_ON enabled when entering ULPS\n"); | 3065 | DSSERR("DDR_CLK_ALWAYS_ON enabled when entering ULPS\n"); |
2998 | return -EIO; | 3066 | return -EIO; |
2999 | } | 3067 | } |
3000 | 3068 | ||
3001 | dsi_sync_vc(0); | 3069 | dsi_sync_vc(dsidev, 0); |
3002 | dsi_sync_vc(1); | 3070 | dsi_sync_vc(dsidev, 1); |
3003 | dsi_sync_vc(2); | 3071 | dsi_sync_vc(dsidev, 2); |
3004 | dsi_sync_vc(3); | 3072 | dsi_sync_vc(dsidev, 3); |
3005 | 3073 | ||
3006 | dsi_force_tx_stop_mode_io(); | 3074 | dsi_force_tx_stop_mode_io(dsidev); |
3007 | 3075 | ||
3008 | dsi_vc_enable(0, false); | 3076 | dsi_vc_enable(dsidev, 0, false); |
3009 | dsi_vc_enable(1, false); | 3077 | dsi_vc_enable(dsidev, 1, false); |
3010 | dsi_vc_enable(2, false); | 3078 | dsi_vc_enable(dsidev, 2, false); |
3011 | dsi_vc_enable(3, false); | 3079 | dsi_vc_enable(dsidev, 3, false); |
3012 | 3080 | ||
3013 | if (REG_GET(DSI_COMPLEXIO_CFG2, 16, 16)) { /* HS_BUSY */ | 3081 | if (REG_GET(dsidev, DSI_COMPLEXIO_CFG2, 16, 16)) { /* HS_BUSY */ |
3014 | DSSERR("HS busy when enabling ULPS\n"); | 3082 | DSSERR("HS busy when enabling ULPS\n"); |
3015 | return -EIO; | 3083 | return -EIO; |
3016 | } | 3084 | } |
3017 | 3085 | ||
3018 | if (REG_GET(DSI_COMPLEXIO_CFG2, 17, 17)) { /* LP_BUSY */ | 3086 | if (REG_GET(dsidev, DSI_COMPLEXIO_CFG2, 17, 17)) { /* LP_BUSY */ |
3019 | DSSERR("LP busy when enabling ULPS\n"); | 3087 | DSSERR("LP busy when enabling ULPS\n"); |
3020 | return -EIO; | 3088 | return -EIO; |
3021 | } | 3089 | } |
3022 | 3090 | ||
3023 | r = dsi_register_isr_cio(dsi_completion_handler, &completion, | 3091 | r = dsi_register_isr_cio(dsidev, dsi_completion_handler, &completion, |
3024 | DSI_CIO_IRQ_ULPSACTIVENOT_ALL0); | 3092 | DSI_CIO_IRQ_ULPSACTIVENOT_ALL0); |
3025 | if (r) | 3093 | if (r) |
3026 | return r; | 3094 | return r; |
3027 | 3095 | ||
3028 | /* Assert TxRequestEsc for data lanes and TxUlpsClk for clk lane */ | 3096 | /* Assert TxRequestEsc for data lanes and TxUlpsClk for clk lane */ |
3029 | /* LANEx_ULPS_SIG2 */ | 3097 | /* LANEx_ULPS_SIG2 */ |
3030 | REG_FLD_MOD(DSI_COMPLEXIO_CFG2, (1 << 0) | (1 << 1) | (1 << 2), 7, 5); | 3098 | REG_FLD_MOD(dsidev, DSI_COMPLEXIO_CFG2, (1 << 0) | (1 << 1) | (1 << 2), |
3099 | 7, 5); | ||
3031 | 3100 | ||
3032 | if (wait_for_completion_timeout(&completion, | 3101 | if (wait_for_completion_timeout(&completion, |
3033 | msecs_to_jiffies(1000)) == 0) { | 3102 | msecs_to_jiffies(1000)) == 0) { |
@@ -3036,24 +3105,25 @@ static int dsi_enter_ulps(void) | |||
3036 | goto err; | 3105 | goto err; |
3037 | } | 3106 | } |
3038 | 3107 | ||
3039 | dsi_unregister_isr_cio(dsi_completion_handler, &completion, | 3108 | dsi_unregister_isr_cio(dsidev, dsi_completion_handler, &completion, |
3040 | DSI_CIO_IRQ_ULPSACTIVENOT_ALL0); | 3109 | DSI_CIO_IRQ_ULPSACTIVENOT_ALL0); |
3041 | 3110 | ||
3042 | dsi_cio_power(DSI_COMPLEXIO_POWER_ULPS); | 3111 | dsi_cio_power(dsidev, DSI_COMPLEXIO_POWER_ULPS); |
3043 | 3112 | ||
3044 | dsi_if_enable(false); | 3113 | dsi_if_enable(dsidev, false); |
3045 | 3114 | ||
3046 | dsi.ulps_enabled = true; | 3115 | dsi.ulps_enabled = true; |
3047 | 3116 | ||
3048 | return 0; | 3117 | return 0; |
3049 | 3118 | ||
3050 | err: | 3119 | err: |
3051 | dsi_unregister_isr_cio(dsi_completion_handler, &completion, | 3120 | dsi_unregister_isr_cio(dsidev, dsi_completion_handler, &completion, |
3052 | DSI_CIO_IRQ_ULPSACTIVENOT_ALL0); | 3121 | DSI_CIO_IRQ_ULPSACTIVENOT_ALL0); |
3053 | return r; | 3122 | return r; |
3054 | } | 3123 | } |
3055 | 3124 | ||
3056 | static void dsi_set_lp_rx_timeout(unsigned ticks, bool x4, bool x16) | 3125 | static void dsi_set_lp_rx_timeout(struct platform_device *dsidev, |
3126 | unsigned ticks, bool x4, bool x16) | ||
3057 | { | 3127 | { |
3058 | unsigned long fck; | 3128 | unsigned long fck; |
3059 | unsigned long total_ticks; | 3129 | unsigned long total_ticks; |
@@ -3062,14 +3132,14 @@ static void dsi_set_lp_rx_timeout(unsigned ticks, bool x4, bool x16) | |||
3062 | BUG_ON(ticks > 0x1fff); | 3132 | BUG_ON(ticks > 0x1fff); |
3063 | 3133 | ||
3064 | /* ticks in DSI_FCK */ | 3134 | /* ticks in DSI_FCK */ |
3065 | fck = dsi_fclk_rate(); | 3135 | fck = dsi_fclk_rate(dsidev); |
3066 | 3136 | ||
3067 | r = dsi_read_reg(DSI_TIMING2); | 3137 | r = dsi_read_reg(dsidev, DSI_TIMING2); |
3068 | r = FLD_MOD(r, 1, 15, 15); /* LP_RX_TO */ | 3138 | r = FLD_MOD(r, 1, 15, 15); /* LP_RX_TO */ |
3069 | r = FLD_MOD(r, x16 ? 1 : 0, 14, 14); /* LP_RX_TO_X16 */ | 3139 | r = FLD_MOD(r, x16 ? 1 : 0, 14, 14); /* LP_RX_TO_X16 */ |
3070 | r = FLD_MOD(r, x4 ? 1 : 0, 13, 13); /* LP_RX_TO_X4 */ | 3140 | r = FLD_MOD(r, x4 ? 1 : 0, 13, 13); /* LP_RX_TO_X4 */ |
3071 | r = FLD_MOD(r, ticks, 12, 0); /* LP_RX_COUNTER */ | 3141 | r = FLD_MOD(r, ticks, 12, 0); /* LP_RX_COUNTER */ |
3072 | dsi_write_reg(DSI_TIMING2, r); | 3142 | dsi_write_reg(dsidev, DSI_TIMING2, r); |
3073 | 3143 | ||
3074 | total_ticks = ticks * (x16 ? 16 : 1) * (x4 ? 4 : 1); | 3144 | total_ticks = ticks * (x16 ? 16 : 1) * (x4 ? 4 : 1); |
3075 | 3145 | ||
@@ -3079,7 +3149,8 @@ static void dsi_set_lp_rx_timeout(unsigned ticks, bool x4, bool x16) | |||
3079 | (total_ticks * 1000) / (fck / 1000 / 1000)); | 3149 | (total_ticks * 1000) / (fck / 1000 / 1000)); |
3080 | } | 3150 | } |
3081 | 3151 | ||
3082 | static void dsi_set_ta_timeout(unsigned ticks, bool x8, bool x16) | 3152 | static void dsi_set_ta_timeout(struct platform_device *dsidev, unsigned ticks, |
3153 | bool x8, bool x16) | ||
3083 | { | 3154 | { |
3084 | unsigned long fck; | 3155 | unsigned long fck; |
3085 | unsigned long total_ticks; | 3156 | unsigned long total_ticks; |
@@ -3088,14 +3159,14 @@ static void dsi_set_ta_timeout(unsigned ticks, bool x8, bool x16) | |||
3088 | BUG_ON(ticks > 0x1fff); | 3159 | BUG_ON(ticks > 0x1fff); |
3089 | 3160 | ||
3090 | /* ticks in DSI_FCK */ | 3161 | /* ticks in DSI_FCK */ |
3091 | fck = dsi_fclk_rate(); | 3162 | fck = dsi_fclk_rate(dsidev); |
3092 | 3163 | ||
3093 | r = dsi_read_reg(DSI_TIMING1); | 3164 | r = dsi_read_reg(dsidev, DSI_TIMING1); |
3094 | r = FLD_MOD(r, 1, 31, 31); /* TA_TO */ | 3165 | r = FLD_MOD(r, 1, 31, 31); /* TA_TO */ |
3095 | r = FLD_MOD(r, x16 ? 1 : 0, 30, 30); /* TA_TO_X16 */ | 3166 | r = FLD_MOD(r, x16 ? 1 : 0, 30, 30); /* TA_TO_X16 */ |
3096 | r = FLD_MOD(r, x8 ? 1 : 0, 29, 29); /* TA_TO_X8 */ | 3167 | r = FLD_MOD(r, x8 ? 1 : 0, 29, 29); /* TA_TO_X8 */ |
3097 | r = FLD_MOD(r, ticks, 28, 16); /* TA_TO_COUNTER */ | 3168 | r = FLD_MOD(r, ticks, 28, 16); /* TA_TO_COUNTER */ |
3098 | dsi_write_reg(DSI_TIMING1, r); | 3169 | dsi_write_reg(dsidev, DSI_TIMING1, r); |
3099 | 3170 | ||
3100 | total_ticks = ticks * (x16 ? 16 : 1) * (x8 ? 8 : 1); | 3171 | total_ticks = ticks * (x16 ? 16 : 1) * (x8 ? 8 : 1); |
3101 | 3172 | ||
@@ -3105,7 +3176,8 @@ static void dsi_set_ta_timeout(unsigned ticks, bool x8, bool x16) | |||
3105 | (total_ticks * 1000) / (fck / 1000 / 1000)); | 3176 | (total_ticks * 1000) / (fck / 1000 / 1000)); |
3106 | } | 3177 | } |
3107 | 3178 | ||
3108 | static void dsi_set_stop_state_counter(unsigned ticks, bool x4, bool x16) | 3179 | static void dsi_set_stop_state_counter(struct platform_device *dsidev, |
3180 | unsigned ticks, bool x4, bool x16) | ||
3109 | { | 3181 | { |
3110 | unsigned long fck; | 3182 | unsigned long fck; |
3111 | unsigned long total_ticks; | 3183 | unsigned long total_ticks; |
@@ -3114,14 +3186,14 @@ static void dsi_set_stop_state_counter(unsigned ticks, bool x4, bool x16) | |||
3114 | BUG_ON(ticks > 0x1fff); | 3186 | BUG_ON(ticks > 0x1fff); |
3115 | 3187 | ||
3116 | /* ticks in DSI_FCK */ | 3188 | /* ticks in DSI_FCK */ |
3117 | fck = dsi_fclk_rate(); | 3189 | fck = dsi_fclk_rate(dsidev); |
3118 | 3190 | ||
3119 | r = dsi_read_reg(DSI_TIMING1); | 3191 | r = dsi_read_reg(dsidev, DSI_TIMING1); |
3120 | r = FLD_MOD(r, 1, 15, 15); /* FORCE_TX_STOP_MODE_IO */ | 3192 | r = FLD_MOD(r, 1, 15, 15); /* FORCE_TX_STOP_MODE_IO */ |
3121 | r = FLD_MOD(r, x16 ? 1 : 0, 14, 14); /* STOP_STATE_X16_IO */ | 3193 | r = FLD_MOD(r, x16 ? 1 : 0, 14, 14); /* STOP_STATE_X16_IO */ |
3122 | r = FLD_MOD(r, x4 ? 1 : 0, 13, 13); /* STOP_STATE_X4_IO */ | 3194 | r = FLD_MOD(r, x4 ? 1 : 0, 13, 13); /* STOP_STATE_X4_IO */ |
3123 | r = FLD_MOD(r, ticks, 12, 0); /* STOP_STATE_COUNTER_IO */ | 3195 | r = FLD_MOD(r, ticks, 12, 0); /* STOP_STATE_COUNTER_IO */ |
3124 | dsi_write_reg(DSI_TIMING1, r); | 3196 | dsi_write_reg(dsidev, DSI_TIMING1, r); |
3125 | 3197 | ||
3126 | total_ticks = ticks * (x16 ? 16 : 1) * (x4 ? 4 : 1); | 3198 | total_ticks = ticks * (x16 ? 16 : 1) * (x4 ? 4 : 1); |
3127 | 3199 | ||
@@ -3131,7 +3203,8 @@ static void dsi_set_stop_state_counter(unsigned ticks, bool x4, bool x16) | |||
3131 | (total_ticks * 1000) / (fck / 1000 / 1000)); | 3203 | (total_ticks * 1000) / (fck / 1000 / 1000)); |
3132 | } | 3204 | } |
3133 | 3205 | ||
3134 | static void dsi_set_hs_tx_timeout(unsigned ticks, bool x4, bool x16) | 3206 | static void dsi_set_hs_tx_timeout(struct platform_device *dsidev, |
3207 | unsigned ticks, bool x4, bool x16) | ||
3135 | { | 3208 | { |
3136 | unsigned long fck; | 3209 | unsigned long fck; |
3137 | unsigned long total_ticks; | 3210 | unsigned long total_ticks; |
@@ -3140,14 +3213,14 @@ static void dsi_set_hs_tx_timeout(unsigned ticks, bool x4, bool x16) | |||
3140 | BUG_ON(ticks > 0x1fff); | 3213 | BUG_ON(ticks > 0x1fff); |
3141 | 3214 | ||
3142 | /* ticks in TxByteClkHS */ | 3215 | /* ticks in TxByteClkHS */ |
3143 | fck = dsi_get_txbyteclkhs(); | 3216 | fck = dsi_get_txbyteclkhs(dsidev); |
3144 | 3217 | ||
3145 | r = dsi_read_reg(DSI_TIMING2); | 3218 | r = dsi_read_reg(dsidev, DSI_TIMING2); |
3146 | r = FLD_MOD(r, 1, 31, 31); /* HS_TX_TO */ | 3219 | r = FLD_MOD(r, 1, 31, 31); /* HS_TX_TO */ |
3147 | r = FLD_MOD(r, x16 ? 1 : 0, 30, 30); /* HS_TX_TO_X16 */ | 3220 | r = FLD_MOD(r, x16 ? 1 : 0, 30, 30); /* HS_TX_TO_X16 */ |
3148 | r = FLD_MOD(r, x4 ? 1 : 0, 29, 29); /* HS_TX_TO_X8 (4 really) */ | 3221 | r = FLD_MOD(r, x4 ? 1 : 0, 29, 29); /* HS_TX_TO_X8 (4 really) */ |
3149 | r = FLD_MOD(r, ticks, 28, 16); /* HS_TX_TO_COUNTER */ | 3222 | r = FLD_MOD(r, ticks, 28, 16); /* HS_TX_TO_COUNTER */ |
3150 | dsi_write_reg(DSI_TIMING2, r); | 3223 | dsi_write_reg(dsidev, DSI_TIMING2, r); |
3151 | 3224 | ||
3152 | total_ticks = ticks * (x16 ? 16 : 1) * (x4 ? 4 : 1); | 3225 | total_ticks = ticks * (x16 ? 16 : 1) * (x4 ? 4 : 1); |
3153 | 3226 | ||
@@ -3158,24 +3231,25 @@ static void dsi_set_hs_tx_timeout(unsigned ticks, bool x4, bool x16) | |||
3158 | } | 3231 | } |
3159 | static int dsi_proto_config(struct omap_dss_device *dssdev) | 3232 | static int dsi_proto_config(struct omap_dss_device *dssdev) |
3160 | { | 3233 | { |
3234 | struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); | ||
3161 | u32 r; | 3235 | u32 r; |
3162 | int buswidth = 0; | 3236 | int buswidth = 0; |
3163 | 3237 | ||
3164 | dsi_config_tx_fifo(DSI_FIFO_SIZE_32, | 3238 | dsi_config_tx_fifo(dsidev, DSI_FIFO_SIZE_32, |
3165 | DSI_FIFO_SIZE_32, | 3239 | DSI_FIFO_SIZE_32, |
3166 | DSI_FIFO_SIZE_32, | 3240 | DSI_FIFO_SIZE_32, |
3167 | DSI_FIFO_SIZE_32); | 3241 | DSI_FIFO_SIZE_32); |
3168 | 3242 | ||
3169 | dsi_config_rx_fifo(DSI_FIFO_SIZE_32, | 3243 | dsi_config_rx_fifo(dsidev, DSI_FIFO_SIZE_32, |
3170 | DSI_FIFO_SIZE_32, | 3244 | DSI_FIFO_SIZE_32, |
3171 | DSI_FIFO_SIZE_32, | 3245 | DSI_FIFO_SIZE_32, |
3172 | DSI_FIFO_SIZE_32); | 3246 | DSI_FIFO_SIZE_32); |
3173 | 3247 | ||
3174 | /* XXX what values for the timeouts? */ | 3248 | /* XXX what values for the timeouts? */ |
3175 | dsi_set_stop_state_counter(0x1000, false, false); | 3249 | dsi_set_stop_state_counter(dsidev, 0x1000, false, false); |
3176 | dsi_set_ta_timeout(0x1fff, true, true); | 3250 | dsi_set_ta_timeout(dsidev, 0x1fff, true, true); |
3177 | dsi_set_lp_rx_timeout(0x1fff, true, true); | 3251 | dsi_set_lp_rx_timeout(dsidev, 0x1fff, true, true); |
3178 | dsi_set_hs_tx_timeout(0x1fff, true, true); | 3252 | dsi_set_hs_tx_timeout(dsidev, 0x1fff, true, true); |
3179 | 3253 | ||
3180 | switch (dssdev->ctrl.pixel_size) { | 3254 | switch (dssdev->ctrl.pixel_size) { |
3181 | case 16: | 3255 | case 16: |
@@ -3191,7 +3265,7 @@ static int dsi_proto_config(struct omap_dss_device *dssdev) | |||
3191 | BUG(); | 3265 | BUG(); |
3192 | } | 3266 | } |
3193 | 3267 | ||
3194 | r = dsi_read_reg(DSI_CTRL); | 3268 | r = dsi_read_reg(dsidev, DSI_CTRL); |
3195 | r = FLD_MOD(r, 1, 1, 1); /* CS_RX_EN */ | 3269 | r = FLD_MOD(r, 1, 1, 1); /* CS_RX_EN */ |
3196 | r = FLD_MOD(r, 1, 2, 2); /* ECC_RX_EN */ | 3270 | r = FLD_MOD(r, 1, 2, 2); /* ECC_RX_EN */ |
3197 | r = FLD_MOD(r, 1, 3, 3); /* TX_FIFO_ARBITRATION */ | 3271 | r = FLD_MOD(r, 1, 3, 3); /* TX_FIFO_ARBITRATION */ |
@@ -3207,18 +3281,19 @@ static int dsi_proto_config(struct omap_dss_device *dssdev) | |||
3207 | r = FLD_MOD(r, 0, 25, 25); | 3281 | r = FLD_MOD(r, 0, 25, 25); |
3208 | } | 3282 | } |
3209 | 3283 | ||
3210 | dsi_write_reg(DSI_CTRL, r); | 3284 | dsi_write_reg(dsidev, DSI_CTRL, r); |
3211 | 3285 | ||
3212 | dsi_vc_initial_config(0); | 3286 | dsi_vc_initial_config(dsidev, 0); |
3213 | dsi_vc_initial_config(1); | 3287 | dsi_vc_initial_config(dsidev, 1); |
3214 | dsi_vc_initial_config(2); | 3288 | dsi_vc_initial_config(dsidev, 2); |
3215 | dsi_vc_initial_config(3); | 3289 | dsi_vc_initial_config(dsidev, 3); |
3216 | 3290 | ||
3217 | return 0; | 3291 | return 0; |
3218 | } | 3292 | } |
3219 | 3293 | ||
3220 | static void dsi_proto_timings(struct omap_dss_device *dssdev) | 3294 | static void dsi_proto_timings(struct omap_dss_device *dssdev) |
3221 | { | 3295 | { |
3296 | struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); | ||
3222 | unsigned tlpx, tclk_zero, tclk_prepare, tclk_trail; | 3297 | unsigned tlpx, tclk_zero, tclk_prepare, tclk_trail; |
3223 | unsigned tclk_pre, tclk_post; | 3298 | unsigned tclk_pre, tclk_post; |
3224 | unsigned ths_prepare, ths_prepare_ths_zero, ths_zero; | 3299 | unsigned ths_prepare, ths_prepare_ths_zero, ths_zero; |
@@ -3228,25 +3303,25 @@ static void dsi_proto_timings(struct omap_dss_device *dssdev) | |||
3228 | unsigned ths_eot; | 3303 | unsigned ths_eot; |
3229 | u32 r; | 3304 | u32 r; |
3230 | 3305 | ||
3231 | r = dsi_read_reg(DSI_DSIPHY_CFG0); | 3306 | r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG0); |
3232 | ths_prepare = FLD_GET(r, 31, 24); | 3307 | ths_prepare = FLD_GET(r, 31, 24); |
3233 | ths_prepare_ths_zero = FLD_GET(r, 23, 16); | 3308 | ths_prepare_ths_zero = FLD_GET(r, 23, 16); |
3234 | ths_zero = ths_prepare_ths_zero - ths_prepare; | 3309 | ths_zero = ths_prepare_ths_zero - ths_prepare; |
3235 | ths_trail = FLD_GET(r, 15, 8); | 3310 | ths_trail = FLD_GET(r, 15, 8); |
3236 | ths_exit = FLD_GET(r, 7, 0); | 3311 | ths_exit = FLD_GET(r, 7, 0); |
3237 | 3312 | ||
3238 | r = dsi_read_reg(DSI_DSIPHY_CFG1); | 3313 | r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG1); |
3239 | tlpx = FLD_GET(r, 22, 16) * 2; | 3314 | tlpx = FLD_GET(r, 22, 16) * 2; |
3240 | tclk_trail = FLD_GET(r, 15, 8); | 3315 | tclk_trail = FLD_GET(r, 15, 8); |
3241 | tclk_zero = FLD_GET(r, 7, 0); | 3316 | tclk_zero = FLD_GET(r, 7, 0); |
3242 | 3317 | ||
3243 | r = dsi_read_reg(DSI_DSIPHY_CFG2); | 3318 | r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG2); |
3244 | tclk_prepare = FLD_GET(r, 7, 0); | 3319 | tclk_prepare = FLD_GET(r, 7, 0); |
3245 | 3320 | ||
3246 | /* min 8*UI */ | 3321 | /* min 8*UI */ |
3247 | tclk_pre = 20; | 3322 | tclk_pre = 20; |
3248 | /* min 60ns + 52*UI */ | 3323 | /* min 60ns + 52*UI */ |
3249 | tclk_post = ns2ddr(60) + 26; | 3324 | tclk_post = ns2ddr(dsidev, 60) + 26; |
3250 | 3325 | ||
3251 | /* ths_eot is 2 for 2 datalanes and 4 for 1 datalane */ | 3326 | /* ths_eot is 2 for 2 datalanes and 4 for 1 datalane */ |
3252 | if (dssdev->phy.dsi.data1_lane != 0 && | 3327 | if (dssdev->phy.dsi.data1_lane != 0 && |
@@ -3262,10 +3337,10 @@ static void dsi_proto_timings(struct omap_dss_device *dssdev) | |||
3262 | BUG_ON(ddr_clk_pre == 0 || ddr_clk_pre > 255); | 3337 | BUG_ON(ddr_clk_pre == 0 || ddr_clk_pre > 255); |
3263 | BUG_ON(ddr_clk_post == 0 || ddr_clk_post > 255); | 3338 | BUG_ON(ddr_clk_post == 0 || ddr_clk_post > 255); |
3264 | 3339 | ||
3265 | r = dsi_read_reg(DSI_CLK_TIMING); | 3340 | r = dsi_read_reg(dsidev, DSI_CLK_TIMING); |
3266 | r = FLD_MOD(r, ddr_clk_pre, 15, 8); | 3341 | r = FLD_MOD(r, ddr_clk_pre, 15, 8); |
3267 | r = FLD_MOD(r, ddr_clk_post, 7, 0); | 3342 | r = FLD_MOD(r, ddr_clk_post, 7, 0); |
3268 | dsi_write_reg(DSI_CLK_TIMING, r); | 3343 | dsi_write_reg(dsidev, DSI_CLK_TIMING, r); |
3269 | 3344 | ||
3270 | DSSDBG("ddr_clk_pre %u, ddr_clk_post %u\n", | 3345 | DSSDBG("ddr_clk_pre %u, ddr_clk_post %u\n", |
3271 | ddr_clk_pre, | 3346 | ddr_clk_pre, |
@@ -3279,7 +3354,7 @@ static void dsi_proto_timings(struct omap_dss_device *dssdev) | |||
3279 | 3354 | ||
3280 | r = FLD_VAL(enter_hs_mode_lat, 31, 16) | | 3355 | r = FLD_VAL(enter_hs_mode_lat, 31, 16) | |
3281 | FLD_VAL(exit_hs_mode_lat, 15, 0); | 3356 | FLD_VAL(exit_hs_mode_lat, 15, 0); |
3282 | dsi_write_reg(DSI_VM_TIMING7, r); | 3357 | dsi_write_reg(dsidev, DSI_VM_TIMING7, r); |
3283 | 3358 | ||
3284 | DSSDBG("enter_hs_mode_lat %u, exit_hs_mode_lat %u\n", | 3359 | DSSDBG("enter_hs_mode_lat %u, exit_hs_mode_lat %u\n", |
3285 | enter_hs_mode_lat, exit_hs_mode_lat); | 3360 | enter_hs_mode_lat, exit_hs_mode_lat); |
@@ -3289,25 +3364,26 @@ static void dsi_proto_timings(struct omap_dss_device *dssdev) | |||
3289 | #define DSI_DECL_VARS \ | 3364 | #define DSI_DECL_VARS \ |
3290 | int __dsi_cb = 0; u32 __dsi_cv = 0; | 3365 | int __dsi_cb = 0; u32 __dsi_cv = 0; |
3291 | 3366 | ||
3292 | #define DSI_FLUSH(ch) \ | 3367 | #define DSI_FLUSH(dsidev, ch) \ |
3293 | if (__dsi_cb > 0) { \ | 3368 | if (__dsi_cb > 0) { \ |
3294 | /*DSSDBG("sending long packet %#010x\n", __dsi_cv);*/ \ | 3369 | /*DSSDBG("sending long packet %#010x\n", __dsi_cv);*/ \ |
3295 | dsi_write_reg(DSI_VC_LONG_PACKET_PAYLOAD(ch), __dsi_cv); \ | 3370 | dsi_write_reg(dsidev, DSI_VC_LONG_PACKET_PAYLOAD(ch), __dsi_cv); \ |
3296 | __dsi_cb = __dsi_cv = 0; \ | 3371 | __dsi_cb = __dsi_cv = 0; \ |
3297 | } | 3372 | } |
3298 | 3373 | ||
3299 | #define DSI_PUSH(ch, data) \ | 3374 | #define DSI_PUSH(dsidev, ch, data) \ |
3300 | do { \ | 3375 | do { \ |
3301 | __dsi_cv |= (data) << (__dsi_cb * 8); \ | 3376 | __dsi_cv |= (data) << (__dsi_cb * 8); \ |
3302 | /*DSSDBG("cv = %#010x, cb = %d\n", __dsi_cv, __dsi_cb);*/ \ | 3377 | /*DSSDBG("cv = %#010x, cb = %d\n", __dsi_cv, __dsi_cb);*/ \ |
3303 | if (++__dsi_cb > 3) \ | 3378 | if (++__dsi_cb > 3) \ |
3304 | DSI_FLUSH(ch); \ | 3379 | DSI_FLUSH(dsidev, ch); \ |
3305 | } while (0) | 3380 | } while (0) |
3306 | 3381 | ||
3307 | static int dsi_update_screen_l4(struct omap_dss_device *dssdev, | 3382 | static int dsi_update_screen_l4(struct omap_dss_device *dssdev, |
3308 | int x, int y, int w, int h) | 3383 | int x, int y, int w, int h) |
3309 | { | 3384 | { |
3310 | /* Note: supports only 24bit colors in 32bit container */ | 3385 | /* Note: supports only 24bit colors in 32bit container */ |
3386 | struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); | ||
3311 | int first = 1; | 3387 | int first = 1; |
3312 | int fifo_stalls = 0; | 3388 | int fifo_stalls = 0; |
3313 | int max_dsi_packet_size; | 3389 | int max_dsi_packet_size; |
@@ -3375,35 +3451,36 @@ static int dsi_update_screen_l4(struct omap_dss_device *dssdev, | |||
3375 | #if 1 | 3451 | #if 1 |
3376 | /* using fifo not empty */ | 3452 | /* using fifo not empty */ |
3377 | /* TX_FIFO_NOT_EMPTY */ | 3453 | /* TX_FIFO_NOT_EMPTY */ |
3378 | while (FLD_GET(dsi_read_reg(DSI_VC_CTRL(0)), 5, 5)) { | 3454 | while (FLD_GET(dsi_read_reg(dsidev, DSI_VC_CTRL(0)), 5, 5)) { |
3379 | fifo_stalls++; | 3455 | fifo_stalls++; |
3380 | if (fifo_stalls > 0xfffff) { | 3456 | if (fifo_stalls > 0xfffff) { |
3381 | DSSERR("fifo stalls overflow, pixels left %d\n", | 3457 | DSSERR("fifo stalls overflow, pixels left %d\n", |
3382 | pixels_left); | 3458 | pixels_left); |
3383 | dsi_if_enable(0); | 3459 | dsi_if_enable(dsidev, 0); |
3384 | return -EIO; | 3460 | return -EIO; |
3385 | } | 3461 | } |
3386 | udelay(1); | 3462 | udelay(1); |
3387 | } | 3463 | } |
3388 | #elif 1 | 3464 | #elif 1 |
3389 | /* using fifo emptiness */ | 3465 | /* using fifo emptiness */ |
3390 | while ((REG_GET(DSI_TX_FIFO_VC_EMPTINESS, 7, 0)+1)*4 < | 3466 | while ((REG_GET(dsidev, DSI_TX_FIFO_VC_EMPTINESS, 7, 0)+1)*4 < |
3391 | max_dsi_packet_size) { | 3467 | max_dsi_packet_size) { |
3392 | fifo_stalls++; | 3468 | fifo_stalls++; |
3393 | if (fifo_stalls > 0xfffff) { | 3469 | if (fifo_stalls > 0xfffff) { |
3394 | DSSERR("fifo stalls overflow, pixels left %d\n", | 3470 | DSSERR("fifo stalls overflow, pixels left %d\n", |
3395 | pixels_left); | 3471 | pixels_left); |
3396 | dsi_if_enable(0); | 3472 | dsi_if_enable(dsidev, 0); |
3397 | return -EIO; | 3473 | return -EIO; |
3398 | } | 3474 | } |
3399 | } | 3475 | } |
3400 | #else | 3476 | #else |
3401 | while ((REG_GET(DSI_TX_FIFO_VC_EMPTINESS, 7, 0)+1)*4 == 0) { | 3477 | while ((REG_GET(dsidev, DSI_TX_FIFO_VC_EMPTINESS, |
3478 | 7, 0) + 1) * 4 == 0) { | ||
3402 | fifo_stalls++; | 3479 | fifo_stalls++; |
3403 | if (fifo_stalls > 0xfffff) { | 3480 | if (fifo_stalls > 0xfffff) { |
3404 | DSSERR("fifo stalls overflow, pixels left %d\n", | 3481 | DSSERR("fifo stalls overflow, pixels left %d\n", |
3405 | pixels_left); | 3482 | pixels_left); |
3406 | dsi_if_enable(0); | 3483 | dsi_if_enable(dsidev, 0); |
3407 | return -EIO; | 3484 | return -EIO; |
3408 | } | 3485 | } |
3409 | } | 3486 | } |
@@ -3412,17 +3489,17 @@ static int dsi_update_screen_l4(struct omap_dss_device *dssdev, | |||
3412 | 3489 | ||
3413 | pixels_left -= pixels; | 3490 | pixels_left -= pixels; |
3414 | 3491 | ||
3415 | dsi_vc_write_long_header(0, DSI_DT_DCS_LONG_WRITE, | 3492 | dsi_vc_write_long_header(dsidev, 0, DSI_DT_DCS_LONG_WRITE, |
3416 | 1 + pixels * bytespp, 0); | 3493 | 1 + pixels * bytespp, 0); |
3417 | 3494 | ||
3418 | DSI_PUSH(0, dcs_cmd); | 3495 | DSI_PUSH(dsidev, 0, dcs_cmd); |
3419 | 3496 | ||
3420 | while (pixels-- > 0) { | 3497 | while (pixels-- > 0) { |
3421 | u32 pix = __raw_readl(data++); | 3498 | u32 pix = __raw_readl(data++); |
3422 | 3499 | ||
3423 | DSI_PUSH(0, (pix >> 16) & 0xff); | 3500 | DSI_PUSH(dsidev, 0, (pix >> 16) & 0xff); |
3424 | DSI_PUSH(0, (pix >> 8) & 0xff); | 3501 | DSI_PUSH(dsidev, 0, (pix >> 8) & 0xff); |
3425 | DSI_PUSH(0, (pix >> 0) & 0xff); | 3502 | DSI_PUSH(dsidev, 0, (pix >> 0) & 0xff); |
3426 | 3503 | ||
3427 | current_x++; | 3504 | current_x++; |
3428 | if (current_x == x+w) { | 3505 | if (current_x == x+w) { |
@@ -3431,7 +3508,7 @@ static int dsi_update_screen_l4(struct omap_dss_device *dssdev, | |||
3431 | } | 3508 | } |
3432 | } | 3509 | } |
3433 | 3510 | ||
3434 | DSI_FLUSH(0); | 3511 | DSI_FLUSH(dsidev, 0); |
3435 | } | 3512 | } |
3436 | 3513 | ||
3437 | return 0; | 3514 | return 0; |
@@ -3440,6 +3517,7 @@ static int dsi_update_screen_l4(struct omap_dss_device *dssdev, | |||
3440 | static void dsi_update_screen_dispc(struct omap_dss_device *dssdev, | 3517 | static void dsi_update_screen_dispc(struct omap_dss_device *dssdev, |
3441 | u16 x, u16 y, u16 w, u16 h) | 3518 | u16 x, u16 y, u16 w, u16 h) |
3442 | { | 3519 | { |
3520 | struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); | ||
3443 | unsigned bytespp; | 3521 | unsigned bytespp; |
3444 | unsigned bytespl; | 3522 | unsigned bytespl; |
3445 | unsigned bytespf; | 3523 | unsigned bytespf; |
@@ -3457,7 +3535,7 @@ static void dsi_update_screen_dispc(struct omap_dss_device *dssdev, | |||
3457 | DSSDBG("dsi_update_screen_dispc(%d,%d %dx%d)\n", | 3535 | DSSDBG("dsi_update_screen_dispc(%d,%d %dx%d)\n", |
3458 | x, y, w, h); | 3536 | x, y, w, h); |
3459 | 3537 | ||
3460 | dsi_vc_config_vp(channel); | 3538 | dsi_vc_config_vp(dsidev, channel); |
3461 | 3539 | ||
3462 | bytespp = dssdev->ctrl.pixel_size / 8; | 3540 | bytespp = dssdev->ctrl.pixel_size / 8; |
3463 | bytespl = w * bytespp; | 3541 | bytespl = w * bytespp; |
@@ -3478,15 +3556,16 @@ static void dsi_update_screen_dispc(struct omap_dss_device *dssdev, | |||
3478 | total_len += (bytespf % packet_payload) + 1; | 3556 | total_len += (bytespf % packet_payload) + 1; |
3479 | 3557 | ||
3480 | l = FLD_VAL(total_len, 23, 0); /* TE_SIZE */ | 3558 | l = FLD_VAL(total_len, 23, 0); /* TE_SIZE */ |
3481 | dsi_write_reg(DSI_VC_TE(channel), l); | 3559 | dsi_write_reg(dsidev, DSI_VC_TE(channel), l); |
3482 | 3560 | ||
3483 | dsi_vc_write_long_header(channel, DSI_DT_DCS_LONG_WRITE, packet_len, 0); | 3561 | dsi_vc_write_long_header(dsidev, channel, DSI_DT_DCS_LONG_WRITE, |
3562 | packet_len, 0); | ||
3484 | 3563 | ||
3485 | if (dsi.te_enabled) | 3564 | if (dsi.te_enabled) |
3486 | l = FLD_MOD(l, 1, 30, 30); /* TE_EN */ | 3565 | l = FLD_MOD(l, 1, 30, 30); /* TE_EN */ |
3487 | else | 3566 | else |
3488 | l = FLD_MOD(l, 1, 31, 31); /* TE_START */ | 3567 | l = FLD_MOD(l, 1, 31, 31); /* TE_START */ |
3489 | dsi_write_reg(DSI_VC_TE(channel), l); | 3568 | dsi_write_reg(dsidev, DSI_VC_TE(channel), l); |
3490 | 3569 | ||
3491 | /* We put SIDLEMODE to no-idle for the duration of the transfer, | 3570 | /* We put SIDLEMODE to no-idle for the duration of the transfer, |
3492 | * because DSS interrupts are not capable of waking up the CPU and the | 3571 | * because DSS interrupts are not capable of waking up the CPU and the |
@@ -3496,7 +3575,7 @@ static void dsi_update_screen_dispc(struct omap_dss_device *dssdev, | |||
3496 | */ | 3575 | */ |
3497 | dispc_disable_sidle(); | 3576 | dispc_disable_sidle(); |
3498 | 3577 | ||
3499 | dsi_perf_mark_start(); | 3578 | dsi_perf_mark_start(dsidev); |
3500 | 3579 | ||
3501 | r = queue_delayed_work(dsi.workqueue, &dsi.framedone_timeout_work, | 3580 | r = queue_delayed_work(dsi.workqueue, &dsi.framedone_timeout_work, |
3502 | msecs_to_jiffies(250)); | 3581 | msecs_to_jiffies(250)); |
@@ -3507,9 +3586,9 @@ static void dsi_update_screen_dispc(struct omap_dss_device *dssdev, | |||
3507 | if (dsi.te_enabled) { | 3586 | if (dsi.te_enabled) { |
3508 | /* disable LP_RX_TO, so that we can receive TE. Time to wait | 3587 | /* disable LP_RX_TO, so that we can receive TE. Time to wait |
3509 | * for TE is longer than the timer allows */ | 3588 | * for TE is longer than the timer allows */ |
3510 | REG_FLD_MOD(DSI_TIMING2, 0, 15, 15); /* LP_RX_TO */ | 3589 | REG_FLD_MOD(dsidev, DSI_TIMING2, 0, 15, 15); /* LP_RX_TO */ |
3511 | 3590 | ||
3512 | dsi_vc_send_bta(channel); | 3591 | dsi_vc_send_bta(dsidev, channel); |
3513 | 3592 | ||
3514 | #ifdef DSI_CATCH_MISSING_TE | 3593 | #ifdef DSI_CATCH_MISSING_TE |
3515 | mod_timer(&dsi.te_timer, jiffies + msecs_to_jiffies(250)); | 3594 | mod_timer(&dsi.te_timer, jiffies + msecs_to_jiffies(250)); |
@@ -3524,20 +3603,20 @@ static void dsi_te_timeout(unsigned long arg) | |||
3524 | } | 3603 | } |
3525 | #endif | 3604 | #endif |
3526 | 3605 | ||
3527 | static void dsi_handle_framedone(int error) | 3606 | static void dsi_handle_framedone(struct platform_device *dsidev, int error) |
3528 | { | 3607 | { |
3529 | /* SIDLEMODE back to smart-idle */ | 3608 | /* SIDLEMODE back to smart-idle */ |
3530 | dispc_enable_sidle(); | 3609 | dispc_enable_sidle(); |
3531 | 3610 | ||
3532 | if (dsi.te_enabled) { | 3611 | if (dsi.te_enabled) { |
3533 | /* enable LP_RX_TO again after the TE */ | 3612 | /* enable LP_RX_TO again after the TE */ |
3534 | REG_FLD_MOD(DSI_TIMING2, 1, 15, 15); /* LP_RX_TO */ | 3613 | REG_FLD_MOD(dsidev, DSI_TIMING2, 1, 15, 15); /* LP_RX_TO */ |
3535 | } | 3614 | } |
3536 | 3615 | ||
3537 | dsi.framedone_callback(error, dsi.framedone_data); | 3616 | dsi.framedone_callback(error, dsi.framedone_data); |
3538 | 3617 | ||
3539 | if (!error) | 3618 | if (!error) |
3540 | dsi_perf_show("DISPC"); | 3619 | dsi_perf_show(dsidev, "DISPC"); |
3541 | } | 3620 | } |
3542 | 3621 | ||
3543 | static void dsi_framedone_timeout_work_callback(struct work_struct *work) | 3622 | static void dsi_framedone_timeout_work_callback(struct work_struct *work) |
@@ -3551,11 +3630,13 @@ static void dsi_framedone_timeout_work_callback(struct work_struct *work) | |||
3551 | 3630 | ||
3552 | DSSERR("Framedone not received for 250ms!\n"); | 3631 | DSSERR("Framedone not received for 250ms!\n"); |
3553 | 3632 | ||
3554 | dsi_handle_framedone(-ETIMEDOUT); | 3633 | dsi_handle_framedone(dsi.pdev, -ETIMEDOUT); |
3555 | } | 3634 | } |
3556 | 3635 | ||
3557 | static void dsi_framedone_irq_callback(void *data, u32 mask) | 3636 | static void dsi_framedone_irq_callback(void *data, u32 mask) |
3558 | { | 3637 | { |
3638 | struct omap_dss_device *dssdev = (struct omap_dss_device *) data; | ||
3639 | struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); | ||
3559 | /* Note: We get FRAMEDONE when DISPC has finished sending pixels and | 3640 | /* Note: We get FRAMEDONE when DISPC has finished sending pixels and |
3560 | * turns itself off. However, DSI still has the pixels in its buffers, | 3641 | * turns itself off. However, DSI still has the pixels in its buffers, |
3561 | * and is sending the data. | 3642 | * and is sending the data. |
@@ -3563,7 +3644,7 @@ static void dsi_framedone_irq_callback(void *data, u32 mask) | |||
3563 | 3644 | ||
3564 | __cancel_delayed_work(&dsi.framedone_timeout_work); | 3645 | __cancel_delayed_work(&dsi.framedone_timeout_work); |
3565 | 3646 | ||
3566 | dsi_handle_framedone(0); | 3647 | dsi_handle_framedone(dsidev, 0); |
3567 | 3648 | ||
3568 | #ifdef CONFIG_OMAP2_DSS_FAKE_VSYNC | 3649 | #ifdef CONFIG_OMAP2_DSS_FAKE_VSYNC |
3569 | dispc_fake_vsync_irq(); | 3650 | dispc_fake_vsync_irq(); |
@@ -3574,6 +3655,7 @@ int omap_dsi_prepare_update(struct omap_dss_device *dssdev, | |||
3574 | u16 *x, u16 *y, u16 *w, u16 *h, | 3655 | u16 *x, u16 *y, u16 *w, u16 *h, |
3575 | bool enlarge_update_area) | 3656 | bool enlarge_update_area) |
3576 | { | 3657 | { |
3658 | struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); | ||
3577 | u16 dw, dh; | 3659 | u16 dw, dh; |
3578 | 3660 | ||
3579 | dssdev->driver->get_resolution(dssdev, &dw, &dh); | 3661 | dssdev->driver->get_resolution(dssdev, &dw, &dh); |
@@ -3593,7 +3675,7 @@ int omap_dsi_prepare_update(struct omap_dss_device *dssdev, | |||
3593 | if (*w == 0 || *h == 0) | 3675 | if (*w == 0 || *h == 0) |
3594 | return -EINVAL; | 3676 | return -EINVAL; |
3595 | 3677 | ||
3596 | dsi_perf_mark_setup(); | 3678 | dsi_perf_mark_setup(dsidev); |
3597 | 3679 | ||
3598 | if (dssdev->manager->caps & OMAP_DSS_OVL_MGR_CAP_DISPC) { | 3680 | if (dssdev->manager->caps & OMAP_DSS_OVL_MGR_CAP_DISPC) { |
3599 | dss_setup_partial_planes(dssdev, x, y, w, h, | 3681 | dss_setup_partial_planes(dssdev, x, y, w, h, |
@@ -3610,6 +3692,8 @@ int omap_dsi_update(struct omap_dss_device *dssdev, | |||
3610 | u16 x, u16 y, u16 w, u16 h, | 3692 | u16 x, u16 y, u16 w, u16 h, |
3611 | void (*callback)(int, void *), void *data) | 3693 | void (*callback)(int, void *), void *data) |
3612 | { | 3694 | { |
3695 | struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); | ||
3696 | |||
3613 | dsi.update_channel = channel; | 3697 | dsi.update_channel = channel; |
3614 | 3698 | ||
3615 | /* OMAP DSS cannot send updates of odd widths. | 3699 | /* OMAP DSS cannot send updates of odd widths. |
@@ -3636,7 +3720,7 @@ int omap_dsi_update(struct omap_dss_device *dssdev, | |||
3636 | if (r) | 3720 | if (r) |
3637 | return r; | 3721 | return r; |
3638 | 3722 | ||
3639 | dsi_perf_show("L4"); | 3723 | dsi_perf_show(dsidev, "L4"); |
3640 | callback(0, data); | 3724 | callback(0, data); |
3641 | } | 3725 | } |
3642 | 3726 | ||
@@ -3650,7 +3734,7 @@ static int dsi_display_init_dispc(struct omap_dss_device *dssdev) | |||
3650 | { | 3734 | { |
3651 | int r; | 3735 | int r; |
3652 | 3736 | ||
3653 | r = omap_dispc_register_isr(dsi_framedone_irq_callback, NULL, | 3737 | r = omap_dispc_register_isr(dsi_framedone_irq_callback, (void *) dssdev, |
3654 | DISPC_IRQ_FRAMEDONE); | 3738 | DISPC_IRQ_FRAMEDONE); |
3655 | if (r) { | 3739 | if (r) { |
3656 | DSSERR("can't get FRAMEDONE irq\n"); | 3740 | DSSERR("can't get FRAMEDONE irq\n"); |
@@ -3684,12 +3768,13 @@ static int dsi_display_init_dispc(struct omap_dss_device *dssdev) | |||
3684 | 3768 | ||
3685 | static void dsi_display_uninit_dispc(struct omap_dss_device *dssdev) | 3769 | static void dsi_display_uninit_dispc(struct omap_dss_device *dssdev) |
3686 | { | 3770 | { |
3687 | omap_dispc_unregister_isr(dsi_framedone_irq_callback, NULL, | 3771 | omap_dispc_unregister_isr(dsi_framedone_irq_callback, (void *) dssdev, |
3688 | DISPC_IRQ_FRAMEDONE); | 3772 | DISPC_IRQ_FRAMEDONE); |
3689 | } | 3773 | } |
3690 | 3774 | ||
3691 | static int dsi_configure_dsi_clocks(struct omap_dss_device *dssdev) | 3775 | static int dsi_configure_dsi_clocks(struct omap_dss_device *dssdev) |
3692 | { | 3776 | { |
3777 | struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); | ||
3693 | struct dsi_clock_info cinfo; | 3778 | struct dsi_clock_info cinfo; |
3694 | int r; | 3779 | int r; |
3695 | 3780 | ||
@@ -3705,7 +3790,7 @@ static int dsi_configure_dsi_clocks(struct omap_dss_device *dssdev) | |||
3705 | return r; | 3790 | return r; |
3706 | } | 3791 | } |
3707 | 3792 | ||
3708 | r = dsi_pll_set_clock_div(&cinfo); | 3793 | r = dsi_pll_set_clock_div(dsidev, &cinfo); |
3709 | if (r) { | 3794 | if (r) { |
3710 | DSSERR("Failed to set dsi clocks\n"); | 3795 | DSSERR("Failed to set dsi clocks\n"); |
3711 | return r; | 3796 | return r; |
@@ -3716,11 +3801,12 @@ static int dsi_configure_dsi_clocks(struct omap_dss_device *dssdev) | |||
3716 | 3801 | ||
3717 | static int dsi_configure_dispc_clocks(struct omap_dss_device *dssdev) | 3802 | static int dsi_configure_dispc_clocks(struct omap_dss_device *dssdev) |
3718 | { | 3803 | { |
3804 | struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); | ||
3719 | struct dispc_clock_info dispc_cinfo; | 3805 | struct dispc_clock_info dispc_cinfo; |
3720 | int r; | 3806 | int r; |
3721 | unsigned long long fck; | 3807 | unsigned long long fck; |
3722 | 3808 | ||
3723 | fck = dsi_get_pll_hsdiv_dispc_rate(); | 3809 | fck = dsi_get_pll_hsdiv_dispc_rate(dsidev); |
3724 | 3810 | ||
3725 | dispc_cinfo.lck_div = dssdev->clocks.dispc.channel.lck_div; | 3811 | dispc_cinfo.lck_div = dssdev->clocks.dispc.channel.lck_div; |
3726 | dispc_cinfo.pck_div = dssdev->clocks.dispc.channel.pck_div; | 3812 | dispc_cinfo.pck_div = dssdev->clocks.dispc.channel.pck_div; |
@@ -3742,9 +3828,10 @@ static int dsi_configure_dispc_clocks(struct omap_dss_device *dssdev) | |||
3742 | 3828 | ||
3743 | static int dsi_display_init_dsi(struct omap_dss_device *dssdev) | 3829 | static int dsi_display_init_dsi(struct omap_dss_device *dssdev) |
3744 | { | 3830 | { |
3831 | struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); | ||
3745 | int r; | 3832 | int r; |
3746 | 3833 | ||
3747 | r = dsi_pll_init(true, true); | 3834 | r = dsi_pll_init(dsidev, true, true); |
3748 | if (r) | 3835 | if (r) |
3749 | goto err0; | 3836 | goto err0; |
3750 | 3837 | ||
@@ -3767,34 +3854,34 @@ static int dsi_display_init_dsi(struct omap_dss_device *dssdev) | |||
3767 | if (r) | 3854 | if (r) |
3768 | goto err2; | 3855 | goto err2; |
3769 | 3856 | ||
3770 | _dsi_print_reset_status(); | 3857 | _dsi_print_reset_status(dsidev); |
3771 | 3858 | ||
3772 | dsi_proto_timings(dssdev); | 3859 | dsi_proto_timings(dssdev); |
3773 | dsi_set_lp_clk_divisor(dssdev); | 3860 | dsi_set_lp_clk_divisor(dssdev); |
3774 | 3861 | ||
3775 | if (1) | 3862 | if (1) |
3776 | _dsi_print_reset_status(); | 3863 | _dsi_print_reset_status(dsidev); |
3777 | 3864 | ||
3778 | r = dsi_proto_config(dssdev); | 3865 | r = dsi_proto_config(dssdev); |
3779 | if (r) | 3866 | if (r) |
3780 | goto err3; | 3867 | goto err3; |
3781 | 3868 | ||
3782 | /* enable interface */ | 3869 | /* enable interface */ |
3783 | dsi_vc_enable(0, 1); | 3870 | dsi_vc_enable(dsidev, 0, 1); |
3784 | dsi_vc_enable(1, 1); | 3871 | dsi_vc_enable(dsidev, 1, 1); |
3785 | dsi_vc_enable(2, 1); | 3872 | dsi_vc_enable(dsidev, 2, 1); |
3786 | dsi_vc_enable(3, 1); | 3873 | dsi_vc_enable(dsidev, 3, 1); |
3787 | dsi_if_enable(1); | 3874 | dsi_if_enable(dsidev, 1); |
3788 | dsi_force_tx_stop_mode_io(); | 3875 | dsi_force_tx_stop_mode_io(dsidev); |
3789 | 3876 | ||
3790 | return 0; | 3877 | return 0; |
3791 | err3: | 3878 | err3: |
3792 | dsi_cio_uninit(); | 3879 | dsi_cio_uninit(dsidev); |
3793 | err2: | 3880 | err2: |
3794 | dss_select_dispc_clk_source(OMAP_DSS_CLK_SRC_FCK); | 3881 | dss_select_dispc_clk_source(OMAP_DSS_CLK_SRC_FCK); |
3795 | dss_select_dsi_clk_source(OMAP_DSS_CLK_SRC_FCK); | 3882 | dss_select_dsi_clk_source(OMAP_DSS_CLK_SRC_FCK); |
3796 | err1: | 3883 | err1: |
3797 | dsi_pll_uninit(true); | 3884 | dsi_pll_uninit(dsidev, true); |
3798 | err0: | 3885 | err0: |
3799 | return r; | 3886 | return r; |
3800 | } | 3887 | } |
@@ -3802,45 +3889,48 @@ err0: | |||
3802 | static void dsi_display_uninit_dsi(struct omap_dss_device *dssdev, | 3889 | static void dsi_display_uninit_dsi(struct omap_dss_device *dssdev, |
3803 | bool disconnect_lanes, bool enter_ulps) | 3890 | bool disconnect_lanes, bool enter_ulps) |
3804 | { | 3891 | { |
3892 | struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); | ||
3893 | |||
3805 | if (enter_ulps && !dsi.ulps_enabled) | 3894 | if (enter_ulps && !dsi.ulps_enabled) |
3806 | dsi_enter_ulps(); | 3895 | dsi_enter_ulps(dsidev); |
3807 | 3896 | ||
3808 | /* disable interface */ | 3897 | /* disable interface */ |
3809 | dsi_if_enable(0); | 3898 | dsi_if_enable(dsidev, 0); |
3810 | dsi_vc_enable(0, 0); | 3899 | dsi_vc_enable(dsidev, 0, 0); |
3811 | dsi_vc_enable(1, 0); | 3900 | dsi_vc_enable(dsidev, 1, 0); |
3812 | dsi_vc_enable(2, 0); | 3901 | dsi_vc_enable(dsidev, 2, 0); |
3813 | dsi_vc_enable(3, 0); | 3902 | dsi_vc_enable(dsidev, 3, 0); |
3814 | 3903 | ||
3815 | dss_select_dispc_clk_source(OMAP_DSS_CLK_SRC_FCK); | 3904 | dss_select_dispc_clk_source(OMAP_DSS_CLK_SRC_FCK); |
3816 | dss_select_dsi_clk_source(OMAP_DSS_CLK_SRC_FCK); | 3905 | dss_select_dsi_clk_source(OMAP_DSS_CLK_SRC_FCK); |
3817 | dsi_cio_uninit(); | 3906 | dsi_cio_uninit(dsidev); |
3818 | dsi_pll_uninit(disconnect_lanes); | 3907 | dsi_pll_uninit(dsidev, disconnect_lanes); |
3819 | } | 3908 | } |
3820 | 3909 | ||
3821 | static int dsi_core_init(void) | 3910 | static int dsi_core_init(struct platform_device *dsidev) |
3822 | { | 3911 | { |
3823 | /* Autoidle */ | 3912 | /* Autoidle */ |
3824 | REG_FLD_MOD(DSI_SYSCONFIG, 1, 0, 0); | 3913 | REG_FLD_MOD(dsidev, DSI_SYSCONFIG, 1, 0, 0); |
3825 | 3914 | ||
3826 | /* ENWAKEUP */ | 3915 | /* ENWAKEUP */ |
3827 | REG_FLD_MOD(DSI_SYSCONFIG, 1, 2, 2); | 3916 | REG_FLD_MOD(dsidev, DSI_SYSCONFIG, 1, 2, 2); |
3828 | 3917 | ||
3829 | /* SIDLEMODE smart-idle */ | 3918 | /* SIDLEMODE smart-idle */ |
3830 | REG_FLD_MOD(DSI_SYSCONFIG, 2, 4, 3); | 3919 | REG_FLD_MOD(dsidev, DSI_SYSCONFIG, 2, 4, 3); |
3831 | 3920 | ||
3832 | _dsi_initialize_irq(); | 3921 | _dsi_initialize_irq(dsidev); |
3833 | 3922 | ||
3834 | return 0; | 3923 | return 0; |
3835 | } | 3924 | } |
3836 | 3925 | ||
3837 | int omapdss_dsi_display_enable(struct omap_dss_device *dssdev) | 3926 | int omapdss_dsi_display_enable(struct omap_dss_device *dssdev) |
3838 | { | 3927 | { |
3928 | struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); | ||
3839 | int r = 0; | 3929 | int r = 0; |
3840 | 3930 | ||
3841 | DSSDBG("dsi_display_enable\n"); | 3931 | DSSDBG("dsi_display_enable\n"); |
3842 | 3932 | ||
3843 | WARN_ON(!dsi_bus_is_locked()); | 3933 | WARN_ON(!dsi_bus_is_locked(dsidev)); |
3844 | 3934 | ||
3845 | mutex_lock(&dsi.lock); | 3935 | mutex_lock(&dsi.lock); |
3846 | 3936 | ||
@@ -3851,13 +3941,13 @@ int omapdss_dsi_display_enable(struct omap_dss_device *dssdev) | |||
3851 | } | 3941 | } |
3852 | 3942 | ||
3853 | enable_clocks(1); | 3943 | enable_clocks(1); |
3854 | dsi_enable_pll_clock(1); | 3944 | dsi_enable_pll_clock(dsidev, 1); |
3855 | 3945 | ||
3856 | r = _dsi_reset(); | 3946 | r = _dsi_reset(dsidev); |
3857 | if (r) | 3947 | if (r) |
3858 | goto err1; | 3948 | goto err1; |
3859 | 3949 | ||
3860 | dsi_core_init(); | 3950 | dsi_core_init(dsidev); |
3861 | 3951 | ||
3862 | r = dsi_display_init_dispc(dssdev); | 3952 | r = dsi_display_init_dispc(dssdev); |
3863 | if (r) | 3953 | if (r) |
@@ -3875,7 +3965,7 @@ err2: | |||
3875 | dsi_display_uninit_dispc(dssdev); | 3965 | dsi_display_uninit_dispc(dssdev); |
3876 | err1: | 3966 | err1: |
3877 | enable_clocks(0); | 3967 | enable_clocks(0); |
3878 | dsi_enable_pll_clock(0); | 3968 | dsi_enable_pll_clock(dsidev, 0); |
3879 | omap_dss_stop_device(dssdev); | 3969 | omap_dss_stop_device(dssdev); |
3880 | err0: | 3970 | err0: |
3881 | mutex_unlock(&dsi.lock); | 3971 | mutex_unlock(&dsi.lock); |
@@ -3887,9 +3977,11 @@ EXPORT_SYMBOL(omapdss_dsi_display_enable); | |||
3887 | void omapdss_dsi_display_disable(struct omap_dss_device *dssdev, | 3977 | void omapdss_dsi_display_disable(struct omap_dss_device *dssdev, |
3888 | bool disconnect_lanes, bool enter_ulps) | 3978 | bool disconnect_lanes, bool enter_ulps) |
3889 | { | 3979 | { |
3980 | struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); | ||
3981 | |||
3890 | DSSDBG("dsi_display_disable\n"); | 3982 | DSSDBG("dsi_display_disable\n"); |
3891 | 3983 | ||
3892 | WARN_ON(!dsi_bus_is_locked()); | 3984 | WARN_ON(!dsi_bus_is_locked(dsidev)); |
3893 | 3985 | ||
3894 | mutex_lock(&dsi.lock); | 3986 | mutex_lock(&dsi.lock); |
3895 | 3987 | ||
@@ -3898,7 +3990,7 @@ void omapdss_dsi_display_disable(struct omap_dss_device *dssdev, | |||
3898 | dsi_display_uninit_dsi(dssdev, disconnect_lanes, enter_ulps); | 3990 | dsi_display_uninit_dsi(dssdev, disconnect_lanes, enter_ulps); |
3899 | 3991 | ||
3900 | enable_clocks(0); | 3992 | enable_clocks(0); |
3901 | dsi_enable_pll_clock(0); | 3993 | dsi_enable_pll_clock(dsidev, 0); |
3902 | 3994 | ||
3903 | omap_dss_stop_device(dssdev); | 3995 | omap_dss_stop_device(dssdev); |
3904 | 3996 | ||
@@ -4001,23 +4093,23 @@ void omap_dsi_release_vc(struct omap_dss_device *dssdev, int channel) | |||
4001 | } | 4093 | } |
4002 | EXPORT_SYMBOL(omap_dsi_release_vc); | 4094 | EXPORT_SYMBOL(omap_dsi_release_vc); |
4003 | 4095 | ||
4004 | void dsi_wait_pll_hsdiv_dispc_active(void) | 4096 | void dsi_wait_pll_hsdiv_dispc_active(struct platform_device *dsidev) |
4005 | { | 4097 | { |
4006 | if (wait_for_bit_change(DSI_PLL_STATUS, 7, 1) != 1) | 4098 | if (wait_for_bit_change(dsidev, DSI_PLL_STATUS, 7, 1) != 1) |
4007 | DSSERR("%s (%s) not active\n", | 4099 | DSSERR("%s (%s) not active\n", |
4008 | dss_get_generic_clk_source_name(OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC), | 4100 | dss_get_generic_clk_source_name(OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC), |
4009 | dss_feat_get_clk_source_name(OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC)); | 4101 | dss_feat_get_clk_source_name(OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC)); |
4010 | } | 4102 | } |
4011 | 4103 | ||
4012 | void dsi_wait_pll_hsdiv_dsi_active(void) | 4104 | void dsi_wait_pll_hsdiv_dsi_active(struct platform_device *dsidev) |
4013 | { | 4105 | { |
4014 | if (wait_for_bit_change(DSI_PLL_STATUS, 8, 1) != 1) | 4106 | if (wait_for_bit_change(dsidev, DSI_PLL_STATUS, 8, 1) != 1) |
4015 | DSSERR("%s (%s) not active\n", | 4107 | DSSERR("%s (%s) not active\n", |
4016 | dss_get_generic_clk_source_name(OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI), | 4108 | dss_get_generic_clk_source_name(OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI), |
4017 | dss_feat_get_clk_source_name(OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI)); | 4109 | dss_feat_get_clk_source_name(OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI)); |
4018 | } | 4110 | } |
4019 | 4111 | ||
4020 | static void dsi_calc_clock_param_ranges(void) | 4112 | static void dsi_calc_clock_param_ranges(struct platform_device *dsidev) |
4021 | { | 4113 | { |
4022 | dsi.regn_max = dss_feat_get_param_max(FEAT_PARAM_DSIPLL_REGN); | 4114 | dsi.regn_max = dss_feat_get_param_max(FEAT_PARAM_DSIPLL_REGN); |
4023 | dsi.regm_max = dss_feat_get_param_max(FEAT_PARAM_DSIPLL_REGM); | 4115 | dsi.regm_max = dss_feat_get_param_max(FEAT_PARAM_DSIPLL_REGM); |
@@ -4028,7 +4120,7 @@ static void dsi_calc_clock_param_ranges(void) | |||
4028 | dsi.lpdiv_max = dss_feat_get_param_max(FEAT_PARAM_DSIPLL_LPDIV); | 4120 | dsi.lpdiv_max = dss_feat_get_param_max(FEAT_PARAM_DSIPLL_LPDIV); |
4029 | } | 4121 | } |
4030 | 4122 | ||
4031 | static int dsi_init(struct platform_device *pdev) | 4123 | static int dsi_init(struct platform_device *dsidev) |
4032 | { | 4124 | { |
4033 | struct omap_display_platform_data *dss_plat_data; | 4125 | struct omap_display_platform_data *dss_plat_data; |
4034 | struct omap_dss_board_info *board_info; | 4126 | struct omap_dss_board_info *board_info; |
@@ -4036,7 +4128,9 @@ static int dsi_init(struct platform_device *pdev) | |||
4036 | int r, i; | 4128 | int r, i; |
4037 | struct resource *dsi_mem; | 4129 | struct resource *dsi_mem; |
4038 | 4130 | ||
4039 | dss_plat_data = pdev->dev.platform_data; | 4131 | dsi_pdev_map[dsidev->id] = dsidev; |
4132 | |||
4133 | dss_plat_data = dsidev->dev.platform_data; | ||
4040 | board_info = dss_plat_data->board_data; | 4134 | board_info = dss_plat_data->board_data; |
4041 | dsi.dsi_mux_pads = board_info->dsi_mux_pads; | 4135 | dsi.dsi_mux_pads = board_info->dsi_mux_pads; |
4042 | 4136 | ||
@@ -4097,12 +4191,12 @@ static int dsi_init(struct platform_device *pdev) | |||
4097 | dsi.vc[i].vc_id = 0; | 4191 | dsi.vc[i].vc_id = 0; |
4098 | } | 4192 | } |
4099 | 4193 | ||
4100 | dsi_calc_clock_param_ranges(); | 4194 | dsi_calc_clock_param_ranges(dsidev); |
4101 | 4195 | ||
4102 | enable_clocks(1); | 4196 | enable_clocks(1); |
4103 | 4197 | ||
4104 | rev = dsi_read_reg(DSI_REVISION); | 4198 | rev = dsi_read_reg(dsidev, DSI_REVISION); |
4105 | dev_dbg(&pdev->dev, "OMAP DSI rev %d.%d\n", | 4199 | dev_dbg(&dsidev->dev, "OMAP DSI rev %d.%d\n", |
4106 | FLD_GET(rev, 7, 4), FLD_GET(rev, 3, 0)); | 4200 | FLD_GET(rev, 7, 4), FLD_GET(rev, 3, 0)); |
4107 | 4201 | ||
4108 | enable_clocks(0); | 4202 | enable_clocks(0); |
@@ -4115,7 +4209,7 @@ err1: | |||
4115 | return r; | 4209 | return r; |
4116 | } | 4210 | } |
4117 | 4211 | ||
4118 | static void dsi_exit(void) | 4212 | static void dsi_exit(struct platform_device *dsidev) |
4119 | { | 4213 | { |
4120 | if (dsi.vdds_dsi_reg != NULL) { | 4214 | if (dsi.vdds_dsi_reg != NULL) { |
4121 | if (dsi.vdds_dsi_enabled) { | 4215 | if (dsi.vdds_dsi_enabled) { |
@@ -4136,11 +4230,11 @@ static void dsi_exit(void) | |||
4136 | } | 4230 | } |
4137 | 4231 | ||
4138 | /* DSI1 HW IP initialisation */ | 4232 | /* DSI1 HW IP initialisation */ |
4139 | static int omap_dsi1hw_probe(struct platform_device *pdev) | 4233 | static int omap_dsi1hw_probe(struct platform_device *dsidev) |
4140 | { | 4234 | { |
4141 | int r; | 4235 | int r; |
4142 | dsi.pdev = pdev; | 4236 | dsi.pdev = dsidev; |
4143 | r = dsi_init(pdev); | 4237 | r = dsi_init(dsidev); |
4144 | if (r) { | 4238 | if (r) { |
4145 | DSSERR("Failed to initialize DSI\n"); | 4239 | DSSERR("Failed to initialize DSI\n"); |
4146 | goto err_dsi; | 4240 | goto err_dsi; |
@@ -4149,9 +4243,9 @@ err_dsi: | |||
4149 | return r; | 4243 | return r; |
4150 | } | 4244 | } |
4151 | 4245 | ||
4152 | static int omap_dsi1hw_remove(struct platform_device *pdev) | 4246 | static int omap_dsi1hw_remove(struct platform_device *dsidev) |
4153 | { | 4247 | { |
4154 | dsi_exit(); | 4248 | dsi_exit(dsidev); |
4155 | WARN_ON(dsi.scp_clk_refcount > 0); | 4249 | WARN_ON(dsi.scp_clk_refcount > 0); |
4156 | return 0; | 4250 | return 0; |
4157 | } | 4251 | } |
diff --git a/drivers/video/omap2/dss/dss.c b/drivers/video/omap2/dss/dss.c index 3302bd367449..3bf6e626f862 100644 --- a/drivers/video/omap2/dss/dss.c +++ b/drivers/video/omap2/dss/dss.c | |||
@@ -300,6 +300,7 @@ void dss_dump_regs(struct seq_file *s) | |||
300 | 300 | ||
301 | void dss_select_dispc_clk_source(enum omap_dss_clk_source clk_src) | 301 | void dss_select_dispc_clk_source(enum omap_dss_clk_source clk_src) |
302 | { | 302 | { |
303 | struct platform_device *dsidev; | ||
303 | int b; | 304 | int b; |
304 | u8 start, end; | 305 | u8 start, end; |
305 | 306 | ||
@@ -309,7 +310,8 @@ void dss_select_dispc_clk_source(enum omap_dss_clk_source clk_src) | |||
309 | break; | 310 | break; |
310 | case OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC: | 311 | case OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC: |
311 | b = 1; | 312 | b = 1; |
312 | dsi_wait_pll_hsdiv_dispc_active(); | 313 | dsidev = dsi_get_dsidev_from_id(0); |
314 | dsi_wait_pll_hsdiv_dispc_active(dsidev); | ||
313 | break; | 315 | break; |
314 | default: | 316 | default: |
315 | BUG(); | 317 | BUG(); |
@@ -324,6 +326,7 @@ void dss_select_dispc_clk_source(enum omap_dss_clk_source clk_src) | |||
324 | 326 | ||
325 | void dss_select_dsi_clk_source(enum omap_dss_clk_source clk_src) | 327 | void dss_select_dsi_clk_source(enum omap_dss_clk_source clk_src) |
326 | { | 328 | { |
329 | struct platform_device *dsidev; | ||
327 | int b; | 330 | int b; |
328 | 331 | ||
329 | switch (clk_src) { | 332 | switch (clk_src) { |
@@ -332,7 +335,8 @@ void dss_select_dsi_clk_source(enum omap_dss_clk_source clk_src) | |||
332 | break; | 335 | break; |
333 | case OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI: | 336 | case OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI: |
334 | b = 1; | 337 | b = 1; |
335 | dsi_wait_pll_hsdiv_dsi_active(); | 338 | dsidev = dsi_get_dsidev_from_id(0); |
339 | dsi_wait_pll_hsdiv_dsi_active(dsidev); | ||
336 | break; | 340 | break; |
337 | default: | 341 | default: |
338 | BUG(); | 342 | BUG(); |
@@ -346,6 +350,7 @@ void dss_select_dsi_clk_source(enum omap_dss_clk_source clk_src) | |||
346 | void dss_select_lcd_clk_source(enum omap_channel channel, | 350 | void dss_select_lcd_clk_source(enum omap_channel channel, |
347 | enum omap_dss_clk_source clk_src) | 351 | enum omap_dss_clk_source clk_src) |
348 | { | 352 | { |
353 | struct platform_device *dsidev; | ||
349 | int b, ix, pos; | 354 | int b, ix, pos; |
350 | 355 | ||
351 | if (!dss_has_feature(FEAT_LCD_CLK_SRC)) | 356 | if (!dss_has_feature(FEAT_LCD_CLK_SRC)) |
@@ -358,7 +363,8 @@ void dss_select_lcd_clk_source(enum omap_channel channel, | |||
358 | case OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC: | 363 | case OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC: |
359 | BUG_ON(channel != OMAP_DSS_CHANNEL_LCD); | 364 | BUG_ON(channel != OMAP_DSS_CHANNEL_LCD); |
360 | b = 1; | 365 | b = 1; |
361 | dsi_wait_pll_hsdiv_dispc_active(); | 366 | dsidev = dsi_get_dsidev_from_id(0); |
367 | dsi_wait_pll_hsdiv_dispc_active(dsidev); | ||
362 | break; | 368 | break; |
363 | default: | 369 | default: |
364 | BUG(); | 370 | BUG(); |
diff --git a/drivers/video/omap2/dss/dss.h b/drivers/video/omap2/dss/dss.h index 40764e047a5f..90b4d51dcd6c 100644 --- a/drivers/video/omap2/dss/dss.h +++ b/drivers/video/omap2/dss/dss.h | |||
@@ -287,18 +287,21 @@ void dsi_restore_context(void); | |||
287 | 287 | ||
288 | int dsi_init_display(struct omap_dss_device *display); | 288 | int dsi_init_display(struct omap_dss_device *display); |
289 | void dsi_irq_handler(void); | 289 | void dsi_irq_handler(void); |
290 | unsigned long dsi_get_pll_hsdiv_dispc_rate(void); | 290 | unsigned long dsi_get_pll_hsdiv_dispc_rate(struct platform_device *dsidev); |
291 | int dsi_pll_set_clock_div(struct dsi_clock_info *cinfo); | 291 | int dsi_pll_set_clock_div(struct platform_device *dsidev, |
292 | int dsi_pll_calc_clock_div_pck(bool is_tft, unsigned long req_pck, | 292 | struct dsi_clock_info *cinfo); |
293 | struct dsi_clock_info *cinfo, | 293 | int dsi_pll_calc_clock_div_pck(struct platform_device *dsidev, bool is_tft, |
294 | unsigned long req_pck, struct dsi_clock_info *cinfo, | ||
294 | struct dispc_clock_info *dispc_cinfo); | 295 | struct dispc_clock_info *dispc_cinfo); |
295 | int dsi_pll_init(bool enable_hsclk, bool enable_hsdiv); | 296 | int dsi_pll_init(struct platform_device *dsidev, bool enable_hsclk, |
296 | void dsi_pll_uninit(bool disconnect_lanes); | 297 | bool enable_hsdiv); |
298 | void dsi_pll_uninit(struct platform_device *dsidev, bool disconnect_lanes); | ||
297 | void dsi_get_overlay_fifo_thresholds(enum omap_plane plane, | 299 | void dsi_get_overlay_fifo_thresholds(enum omap_plane plane, |
298 | u32 fifo_size, enum omap_burst_size *burst_size, | 300 | u32 fifo_size, enum omap_burst_size *burst_size, |
299 | u32 *fifo_low, u32 *fifo_high); | 301 | u32 *fifo_low, u32 *fifo_high); |
300 | void dsi_wait_pll_hsdiv_dispc_active(void); | 302 | void dsi_wait_pll_hsdiv_dispc_active(struct platform_device *dsidev); |
301 | void dsi_wait_pll_hsdiv_dsi_active(void); | 303 | void dsi_wait_pll_hsdiv_dsi_active(struct platform_device *dsidev); |
304 | struct platform_device *dsi_get_dsidev_from_id(int module); | ||
302 | #else | 305 | #else |
303 | static inline int dsi_init_platform_driver(void) | 306 | static inline int dsi_init_platform_driver(void) |
304 | { | 307 | { |
@@ -307,17 +310,23 @@ static inline int dsi_init_platform_driver(void) | |||
307 | static inline void dsi_uninit_platform_driver(void) | 310 | static inline void dsi_uninit_platform_driver(void) |
308 | { | 311 | { |
309 | } | 312 | } |
310 | static inline unsigned long dsi_get_pll_hsdiv_dispc_rate(void) | 313 | static inline unsigned long dsi_get_pll_hsdiv_dispc_rate(struct platform_device *dsidev) |
311 | { | 314 | { |
312 | WARN("%s: DSI not compiled in, returning rate as 0\n", __func__); | 315 | WARN("%s: DSI not compiled in, returning rate as 0\n", __func__); |
313 | return 0; | 316 | return 0; |
314 | } | 317 | } |
315 | static inline void dsi_wait_pll_hsdiv_dispc_active(void) | 318 | static inline void dsi_wait_pll_hsdiv_dispc_active(struct platform_device *dsidev) |
316 | { | 319 | { |
317 | } | 320 | } |
318 | static inline void dsi_wait_pll_hsdiv_dsi_active(void) | 321 | static inline void dsi_wait_pll_hsdiv_dsi_active(struct platform_device *dsidev) |
319 | { | 322 | { |
320 | } | 323 | } |
324 | static inline struct platform_device *dsi_get_dsidev_from_id(int module) | ||
325 | { | ||
326 | WARN("%s: DSI not compiled in, returning platform device as NULL\n", | ||
327 | __func__); | ||
328 | return NULL; | ||
329 | } | ||
321 | #endif | 330 | #endif |
322 | 331 | ||
323 | /* DPI */ | 332 | /* DPI */ |
diff --git a/drivers/video/omap2/dss/dss_features.h b/drivers/video/omap2/dss/dss_features.h index 5668fecbaede..857162b56f6b 100644 --- a/drivers/video/omap2/dss/dss_features.h +++ b/drivers/video/omap2/dss/dss_features.h | |||
@@ -23,6 +23,7 @@ | |||
23 | #define MAX_DSS_MANAGERS 3 | 23 | #define MAX_DSS_MANAGERS 3 |
24 | #define MAX_DSS_OVERLAYS 3 | 24 | #define MAX_DSS_OVERLAYS 3 |
25 | #define MAX_DSS_LCD_MANAGERS 2 | 25 | #define MAX_DSS_LCD_MANAGERS 2 |
26 | #define MAX_NUM_DSI 2 | ||
26 | 27 | ||
27 | /* DSS has feature id */ | 28 | /* DSS has feature id */ |
28 | enum dss_feat_id { | 29 | enum dss_feat_id { |