aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/video/omap2/dss/dsi.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/video/omap2/dss/dsi.c')
-rw-r--r--drivers/video/omap2/dss/dsi.c464
1 files changed, 215 insertions, 249 deletions
diff --git a/drivers/video/omap2/dss/dsi.c b/drivers/video/omap2/dss/dsi.c
index 3af207b2bde3..aa4f7a5fae29 100644
--- a/drivers/video/omap2/dss/dsi.c
+++ b/drivers/video/omap2/dss/dsi.c
@@ -165,6 +165,14 @@ struct dsi_reg { u16 idx; };
165#define DSI_CIO_IRQ_ERRCONTENTIONLP1_3 (1 << 25) 165#define DSI_CIO_IRQ_ERRCONTENTIONLP1_3 (1 << 25)
166#define DSI_CIO_IRQ_ULPSACTIVENOT_ALL0 (1 << 30) 166#define DSI_CIO_IRQ_ULPSACTIVENOT_ALL0 (1 << 30)
167#define DSI_CIO_IRQ_ULPSACTIVENOT_ALL1 (1 << 31) 167#define DSI_CIO_IRQ_ULPSACTIVENOT_ALL1 (1 << 31)
168#define DSI_CIO_IRQ_ERROR_MASK \
169 (DSI_CIO_IRQ_ERRSYNCESC1 | DSI_CIO_IRQ_ERRSYNCESC2 | \
170 DSI_CIO_IRQ_ERRSYNCESC3 | DSI_CIO_IRQ_ERRESC1 | DSI_CIO_IRQ_ERRESC2 | \
171 DSI_CIO_IRQ_ERRESC3 | DSI_CIO_IRQ_ERRCONTROL1 | \
172 DSI_CIO_IRQ_ERRCONTROL2 | DSI_CIO_IRQ_ERRCONTROL3 | \
173 DSI_CIO_IRQ_ERRCONTENTIONLP0_1 | DSI_CIO_IRQ_ERRCONTENTIONLP1_1 | \
174 DSI_CIO_IRQ_ERRCONTENTIONLP0_2 | DSI_CIO_IRQ_ERRCONTENTIONLP1_2 | \
175 DSI_CIO_IRQ_ERRCONTENTIONLP0_3 | DSI_CIO_IRQ_ERRCONTENTIONLP1_3)
168 176
169#define DSI_DT_DCS_SHORT_WRITE_0 0x05 177#define DSI_DT_DCS_SHORT_WRITE_0 0x05
170#define DSI_DT_DCS_SHORT_WRITE_1 0x15 178#define DSI_DT_DCS_SHORT_WRITE_1 0x15
@@ -232,13 +240,15 @@ static struct
232 unsigned pll_locked; 240 unsigned pll_locked;
233 241
234 struct completion bta_completion; 242 struct completion bta_completion;
243 void (*bta_callback)(void);
235 244
236 int update_channel; 245 int update_channel;
237 struct dsi_update_region update_region; 246 struct dsi_update_region update_region;
238 247
239 bool te_enabled; 248 bool te_enabled;
240 249
241 struct work_struct framedone_work; 250 struct workqueue_struct *workqueue;
251
242 void (*framedone_callback)(int, void *); 252 void (*framedone_callback)(int, void *);
243 void *framedone_data; 253 void *framedone_data;
244 254
@@ -509,9 +519,13 @@ void dsi_irq_handler(void)
509 dss_collect_irq_stats(vcstatus, dsi.irq_stats.vc_irqs[i]); 519 dss_collect_irq_stats(vcstatus, dsi.irq_stats.vc_irqs[i]);
510#endif 520#endif
511 521
512 if (vcstatus & DSI_VC_IRQ_BTA) 522 if (vcstatus & DSI_VC_IRQ_BTA) {
513 complete(&dsi.bta_completion); 523 complete(&dsi.bta_completion);
514 524
525 if (dsi.bta_callback)
526 dsi.bta_callback();
527 }
528
515 if (vcstatus & DSI_VC_IRQ_ERROR_MASK) { 529 if (vcstatus & DSI_VC_IRQ_ERROR_MASK) {
516 DSSERR("DSI VC(%d) error, vc irqstatus %x\n", 530 DSSERR("DSI VC(%d) error, vc irqstatus %x\n",
517 i, vcstatus); 531 i, vcstatus);
@@ -536,8 +550,12 @@ void dsi_irq_handler(void)
536 /* flush posted write */ 550 /* flush posted write */
537 dsi_read_reg(DSI_COMPLEXIO_IRQ_STATUS); 551 dsi_read_reg(DSI_COMPLEXIO_IRQ_STATUS);
538 552
539 DSSERR("DSI CIO error, cio irqstatus %x\n", ciostatus); 553 if (ciostatus & DSI_CIO_IRQ_ERROR_MASK) {
540 print_irq_status_cio(ciostatus); 554 DSSERR("DSI CIO error, cio irqstatus %x\n", ciostatus);
555 print_irq_status_cio(ciostatus);
556 } else if (debug_irq) {
557 print_irq_status_cio(ciostatus);
558 }
541 } 559 }
542 560
543 dsi_write_reg(DSI_IRQSTATUS, irqstatus & ~DSI_IRQ_CHANNEL_MASK); 561 dsi_write_reg(DSI_IRQSTATUS, irqstatus & ~DSI_IRQ_CHANNEL_MASK);
@@ -584,11 +602,8 @@ static void _dsi_initialize_irq(void)
584 for (i = 0; i < 4; ++i) 602 for (i = 0; i < 4; ++i)
585 dsi_write_reg(DSI_VC_IRQENABLE(i), l); 603 dsi_write_reg(DSI_VC_IRQENABLE(i), l);
586 604
587 /* XXX zonda responds incorrectly, causing control error: 605 l = DSI_CIO_IRQ_ERROR_MASK;
588 Exit from LP-ESC mode to LP11 uses wrong transition states on the 606 dsi_write_reg(DSI_COMPLEXIO_IRQ_ENABLE, l);
589 data lines LP0 and LN0. */
590 dsi_write_reg(DSI_COMPLEXIO_IRQ_ENABLE,
591 -1 & (~DSI_CIO_IRQ_ERRCONTROL2));
592} 607}
593 608
594static u32 dsi_get_errors(void) 609static u32 dsi_get_errors(void)
@@ -1098,6 +1113,7 @@ int dsi_pll_init(struct omap_dss_device *dssdev, bool enable_hsclk,
1098 if (wait_for_bit_change(DSI_PLL_STATUS, 0, 1) != 1) { 1113 if (wait_for_bit_change(DSI_PLL_STATUS, 0, 1) != 1) {
1099 DSSERR("PLL not coming out of reset.\n"); 1114 DSSERR("PLL not coming out of reset.\n");
1100 r = -ENODEV; 1115 r = -ENODEV;
1116 dispc_pck_free_enable(0);
1101 goto err1; 1117 goto err1;
1102 } 1118 }
1103 1119
@@ -1740,42 +1756,52 @@ static void dsi_vc_initial_config(int channel)
1740 dsi.vc[channel].mode = DSI_VC_MODE_L4; 1756 dsi.vc[channel].mode = DSI_VC_MODE_L4;
1741} 1757}
1742 1758
1743static void dsi_vc_config_l4(int channel) 1759static int dsi_vc_config_l4(int channel)
1744{ 1760{
1745 if (dsi.vc[channel].mode == DSI_VC_MODE_L4) 1761 if (dsi.vc[channel].mode == DSI_VC_MODE_L4)
1746 return; 1762 return 0;
1747 1763
1748 DSSDBGF("%d", channel); 1764 DSSDBGF("%d", channel);
1749 1765
1750 dsi_vc_enable(channel, 0); 1766 dsi_vc_enable(channel, 0);
1751 1767
1752 if (REG_GET(DSI_VC_CTRL(channel), 15, 15)) /* VC_BUSY */ 1768 /* VC_BUSY */
1769 if (wait_for_bit_change(DSI_VC_CTRL(channel), 15, 0) != 0) {
1753 DSSERR("vc(%d) busy when trying to config for L4\n", channel); 1770 DSSERR("vc(%d) busy when trying to config for L4\n", channel);
1771 return -EIO;
1772 }
1754 1773
1755 REG_FLD_MOD(DSI_VC_CTRL(channel), 0, 1, 1); /* SOURCE, 0 = L4 */ 1774 REG_FLD_MOD(DSI_VC_CTRL(channel), 0, 1, 1); /* SOURCE, 0 = L4 */
1756 1775
1757 dsi_vc_enable(channel, 1); 1776 dsi_vc_enable(channel, 1);
1758 1777
1759 dsi.vc[channel].mode = DSI_VC_MODE_L4; 1778 dsi.vc[channel].mode = DSI_VC_MODE_L4;
1779
1780 return 0;
1760} 1781}
1761 1782
1762static void dsi_vc_config_vp(int channel) 1783static int dsi_vc_config_vp(int channel)
1763{ 1784{
1764 if (dsi.vc[channel].mode == DSI_VC_MODE_VP) 1785 if (dsi.vc[channel].mode == DSI_VC_MODE_VP)
1765 return; 1786 return 0;
1766 1787
1767 DSSDBGF("%d", channel); 1788 DSSDBGF("%d", channel);
1768 1789
1769 dsi_vc_enable(channel, 0); 1790 dsi_vc_enable(channel, 0);
1770 1791
1771 if (REG_GET(DSI_VC_CTRL(channel), 15, 15)) /* VC_BUSY */ 1792 /* VC_BUSY */
1793 if (wait_for_bit_change(DSI_VC_CTRL(channel), 15, 0) != 0) {
1772 DSSERR("vc(%d) busy when trying to config for VP\n", channel); 1794 DSSERR("vc(%d) busy when trying to config for VP\n", channel);
1795 return -EIO;
1796 }
1773 1797
1774 REG_FLD_MOD(DSI_VC_CTRL(channel), 1, 1, 1); /* SOURCE, 1 = video port */ 1798 REG_FLD_MOD(DSI_VC_CTRL(channel), 1, 1, 1); /* SOURCE, 1 = video port */
1775 1799
1776 dsi_vc_enable(channel, 1); 1800 dsi_vc_enable(channel, 1);
1777 1801
1778 dsi.vc[channel].mode = DSI_VC_MODE_VP; 1802 dsi.vc[channel].mode = DSI_VC_MODE_VP;
1803
1804 return 0;
1779} 1805}
1780 1806
1781 1807
@@ -1854,19 +1880,19 @@ static u16 dsi_vc_flush_receive_data(int channel)
1854 u32 val; 1880 u32 val;
1855 u8 dt; 1881 u8 dt;
1856 val = dsi_read_reg(DSI_VC_SHORT_PACKET_HEADER(channel)); 1882 val = dsi_read_reg(DSI_VC_SHORT_PACKET_HEADER(channel));
1857 DSSDBG("\trawval %#08x\n", val); 1883 DSSERR("\trawval %#08x\n", val);
1858 dt = FLD_GET(val, 5, 0); 1884 dt = FLD_GET(val, 5, 0);
1859 if (dt == DSI_DT_RX_ACK_WITH_ERR) { 1885 if (dt == DSI_DT_RX_ACK_WITH_ERR) {
1860 u16 err = FLD_GET(val, 23, 8); 1886 u16 err = FLD_GET(val, 23, 8);
1861 dsi_show_rx_ack_with_err(err); 1887 dsi_show_rx_ack_with_err(err);
1862 } else if (dt == DSI_DT_RX_SHORT_READ_1) { 1888 } else if (dt == DSI_DT_RX_SHORT_READ_1) {
1863 DSSDBG("\tDCS short response, 1 byte: %#x\n", 1889 DSSERR("\tDCS short response, 1 byte: %#x\n",
1864 FLD_GET(val, 23, 8)); 1890 FLD_GET(val, 23, 8));
1865 } else if (dt == DSI_DT_RX_SHORT_READ_2) { 1891 } else if (dt == DSI_DT_RX_SHORT_READ_2) {
1866 DSSDBG("\tDCS short response, 2 byte: %#x\n", 1892 DSSERR("\tDCS short response, 2 byte: %#x\n",
1867 FLD_GET(val, 23, 8)); 1893 FLD_GET(val, 23, 8));
1868 } else if (dt == DSI_DT_RX_DCS_LONG_READ) { 1894 } else if (dt == DSI_DT_RX_DCS_LONG_READ) {
1869 DSSDBG("\tDCS long response, len %d\n", 1895 DSSERR("\tDCS long response, len %d\n",
1870 FLD_GET(val, 23, 8)); 1896 FLD_GET(val, 23, 8));
1871 dsi_vc_flush_long_data(channel); 1897 dsi_vc_flush_long_data(channel);
1872 } else { 1898 } else {
@@ -2087,6 +2113,13 @@ int dsi_vc_dcs_write(int channel, u8 *data, int len)
2087 if (r) 2113 if (r)
2088 goto err; 2114 goto err;
2089 2115
2116 if (REG_GET(DSI_VC_CTRL(channel), 20, 20)) { /* RX_FIFO_NOT_EMPTY */
2117 DSSERR("rx fifo not empty after write, dumping data:\n");
2118 dsi_vc_flush_receive_data(channel);
2119 r = -EIO;
2120 goto err;
2121 }
2122
2090 return 0; 2123 return 0;
2091err: 2124err:
2092 DSSERR("dsi_vc_dcs_write(ch %d, cmd 0x%02x, len %d) failed\n", 2125 DSSERR("dsi_vc_dcs_write(ch %d, cmd 0x%02x, len %d) failed\n",
@@ -2233,11 +2266,12 @@ int dsi_vc_dcs_read_1(int channel, u8 dcs_cmd, u8 *data)
2233} 2266}
2234EXPORT_SYMBOL(dsi_vc_dcs_read_1); 2267EXPORT_SYMBOL(dsi_vc_dcs_read_1);
2235 2268
2236int dsi_vc_dcs_read_2(int channel, u8 dcs_cmd, u16 *data) 2269int dsi_vc_dcs_read_2(int channel, u8 dcs_cmd, u8 *data1, u8 *data2)
2237{ 2270{
2271 u8 buf[2];
2238 int r; 2272 int r;
2239 2273
2240 r = dsi_vc_dcs_read(channel, dcs_cmd, (u8 *)data, 2); 2274 r = dsi_vc_dcs_read(channel, dcs_cmd, buf, 2);
2241 2275
2242 if (r < 0) 2276 if (r < 0)
2243 return r; 2277 return r;
@@ -2245,231 +2279,122 @@ int dsi_vc_dcs_read_2(int channel, u8 dcs_cmd, u16 *data)
2245 if (r != 2) 2279 if (r != 2)
2246 return -EIO; 2280 return -EIO;
2247 2281
2282 *data1 = buf[0];
2283 *data2 = buf[1];
2284
2248 return 0; 2285 return 0;
2249} 2286}
2250EXPORT_SYMBOL(dsi_vc_dcs_read_2); 2287EXPORT_SYMBOL(dsi_vc_dcs_read_2);
2251 2288
2252int dsi_vc_set_max_rx_packet_size(int channel, u16 len) 2289int dsi_vc_set_max_rx_packet_size(int channel, u16 len)
2253{ 2290{
2254 int r; 2291 return dsi_vc_send_short(channel, DSI_DT_SET_MAX_RET_PKG_SIZE,
2255 r = dsi_vc_send_short(channel, DSI_DT_SET_MAX_RET_PKG_SIZE,
2256 len, 0); 2292 len, 0);
2257
2258 if (r)
2259 return r;
2260
2261 r = dsi_vc_send_bta_sync(channel);
2262
2263 return r;
2264} 2293}
2265EXPORT_SYMBOL(dsi_vc_set_max_rx_packet_size); 2294EXPORT_SYMBOL(dsi_vc_set_max_rx_packet_size);
2266 2295
2267static void dsi_set_lp_rx_timeout(unsigned long ns) 2296static void dsi_set_lp_rx_timeout(unsigned ticks, bool x4, bool x16)
2268{ 2297{
2269 u32 r;
2270 unsigned x4, x16;
2271 unsigned long fck; 2298 unsigned long fck;
2272 unsigned long ticks; 2299 unsigned long total_ticks;
2300 u32 r;
2273 2301
2274 /* ticks in DSI_FCK */ 2302 BUG_ON(ticks > 0x1fff);
2275 2303
2304 /* ticks in DSI_FCK */
2276 fck = dsi_fclk_rate(); 2305 fck = dsi_fclk_rate();
2277 ticks = (fck / 1000 / 1000) * ns / 1000;
2278 x4 = 0;
2279 x16 = 0;
2280
2281 if (ticks > 0x1fff) {
2282 ticks = (fck / 1000 / 1000) * ns / 1000 / 4;
2283 x4 = 1;
2284 x16 = 0;
2285 }
2286
2287 if (ticks > 0x1fff) {
2288 ticks = (fck / 1000 / 1000) * ns / 1000 / 16;
2289 x4 = 0;
2290 x16 = 1;
2291 }
2292
2293 if (ticks > 0x1fff) {
2294 ticks = (fck / 1000 / 1000) * ns / 1000 / (4 * 16);
2295 x4 = 1;
2296 x16 = 1;
2297 }
2298
2299 if (ticks > 0x1fff) {
2300 DSSWARN("LP_TX_TO over limit, setting it to max\n");
2301 ticks = 0x1fff;
2302 x4 = 1;
2303 x16 = 1;
2304 }
2305 2306
2306 r = dsi_read_reg(DSI_TIMING2); 2307 r = dsi_read_reg(DSI_TIMING2);
2307 r = FLD_MOD(r, 1, 15, 15); /* LP_RX_TO */ 2308 r = FLD_MOD(r, 1, 15, 15); /* LP_RX_TO */
2308 r = FLD_MOD(r, x16, 14, 14); /* LP_RX_TO_X16 */ 2309 r = FLD_MOD(r, x16 ? 1 : 0, 14, 14); /* LP_RX_TO_X16 */
2309 r = FLD_MOD(r, x4, 13, 13); /* LP_RX_TO_X4 */ 2310 r = FLD_MOD(r, x4 ? 1 : 0, 13, 13); /* LP_RX_TO_X4 */
2310 r = FLD_MOD(r, ticks, 12, 0); /* LP_RX_COUNTER */ 2311 r = FLD_MOD(r, ticks, 12, 0); /* LP_RX_COUNTER */
2311 dsi_write_reg(DSI_TIMING2, r); 2312 dsi_write_reg(DSI_TIMING2, r);
2312 2313
2313 DSSDBG("LP_RX_TO %lu ns (%#lx ticks%s%s)\n", 2314 total_ticks = ticks * (x16 ? 16 : 1) * (x4 ? 4 : 1);
2314 (ticks * (x16 ? 16 : 1) * (x4 ? 4 : 1) * 1000) / 2315
2315 (fck / 1000 / 1000), 2316 DSSDBG("LP_RX_TO %lu ticks (%#x%s%s) = %lu ns\n",
2316 ticks, x4 ? " x4" : "", x16 ? " x16" : ""); 2317 total_ticks,
2318 ticks, x4 ? " x4" : "", x16 ? " x16" : "",
2319 (total_ticks * 1000) / (fck / 1000 / 1000));
2317} 2320}
2318 2321
2319static void dsi_set_ta_timeout(unsigned long ns) 2322static void dsi_set_ta_timeout(unsigned ticks, bool x8, bool x16)
2320{ 2323{
2321 u32 r;
2322 unsigned x8, x16;
2323 unsigned long fck; 2324 unsigned long fck;
2324 unsigned long ticks; 2325 unsigned long total_ticks;
2326 u32 r;
2327
2328 BUG_ON(ticks > 0x1fff);
2325 2329
2326 /* ticks in DSI_FCK */ 2330 /* ticks in DSI_FCK */
2327 fck = dsi_fclk_rate(); 2331 fck = dsi_fclk_rate();
2328 ticks = (fck / 1000 / 1000) * ns / 1000;
2329 x8 = 0;
2330 x16 = 0;
2331
2332 if (ticks > 0x1fff) {
2333 ticks = (fck / 1000 / 1000) * ns / 1000 / 8;
2334 x8 = 1;
2335 x16 = 0;
2336 }
2337
2338 if (ticks > 0x1fff) {
2339 ticks = (fck / 1000 / 1000) * ns / 1000 / 16;
2340 x8 = 0;
2341 x16 = 1;
2342 }
2343
2344 if (ticks > 0x1fff) {
2345 ticks = (fck / 1000 / 1000) * ns / 1000 / (8 * 16);
2346 x8 = 1;
2347 x16 = 1;
2348 }
2349
2350 if (ticks > 0x1fff) {
2351 DSSWARN("TA_TO over limit, setting it to max\n");
2352 ticks = 0x1fff;
2353 x8 = 1;
2354 x16 = 1;
2355 }
2356 2332
2357 r = dsi_read_reg(DSI_TIMING1); 2333 r = dsi_read_reg(DSI_TIMING1);
2358 r = FLD_MOD(r, 1, 31, 31); /* TA_TO */ 2334 r = FLD_MOD(r, 1, 31, 31); /* TA_TO */
2359 r = FLD_MOD(r, x16, 30, 30); /* TA_TO_X16 */ 2335 r = FLD_MOD(r, x16 ? 1 : 0, 30, 30); /* TA_TO_X16 */
2360 r = FLD_MOD(r, x8, 29, 29); /* TA_TO_X8 */ 2336 r = FLD_MOD(r, x8 ? 1 : 0, 29, 29); /* TA_TO_X8 */
2361 r = FLD_MOD(r, ticks, 28, 16); /* TA_TO_COUNTER */ 2337 r = FLD_MOD(r, ticks, 28, 16); /* TA_TO_COUNTER */
2362 dsi_write_reg(DSI_TIMING1, r); 2338 dsi_write_reg(DSI_TIMING1, r);
2363 2339
2364 DSSDBG("TA_TO %lu ns (%#lx ticks%s%s)\n", 2340 total_ticks = ticks * (x16 ? 16 : 1) * (x8 ? 8 : 1);
2365 (ticks * (x16 ? 16 : 1) * (x8 ? 8 : 1) * 1000) / 2341
2366 (fck / 1000 / 1000), 2342 DSSDBG("TA_TO %lu ticks (%#x%s%s) = %lu ns\n",
2367 ticks, x8 ? " x8" : "", x16 ? " x16" : ""); 2343 total_ticks,
2344 ticks, x8 ? " x8" : "", x16 ? " x16" : "",
2345 (total_ticks * 1000) / (fck / 1000 / 1000));
2368} 2346}
2369 2347
2370static void dsi_set_stop_state_counter(unsigned long ns) 2348static void dsi_set_stop_state_counter(unsigned ticks, bool x4, bool x16)
2371{ 2349{
2372 u32 r;
2373 unsigned x4, x16;
2374 unsigned long fck; 2350 unsigned long fck;
2375 unsigned long ticks; 2351 unsigned long total_ticks;
2352 u32 r;
2376 2353
2377 /* ticks in DSI_FCK */ 2354 BUG_ON(ticks > 0x1fff);
2378 2355
2356 /* ticks in DSI_FCK */
2379 fck = dsi_fclk_rate(); 2357 fck = dsi_fclk_rate();
2380 ticks = (fck / 1000 / 1000) * ns / 1000;
2381 x4 = 0;
2382 x16 = 0;
2383
2384 if (ticks > 0x1fff) {
2385 ticks = (fck / 1000 / 1000) * ns / 1000 / 4;
2386 x4 = 1;
2387 x16 = 0;
2388 }
2389
2390 if (ticks > 0x1fff) {
2391 ticks = (fck / 1000 / 1000) * ns / 1000 / 16;
2392 x4 = 0;
2393 x16 = 1;
2394 }
2395
2396 if (ticks > 0x1fff) {
2397 ticks = (fck / 1000 / 1000) * ns / 1000 / (4 * 16);
2398 x4 = 1;
2399 x16 = 1;
2400 }
2401
2402 if (ticks > 0x1fff) {
2403 DSSWARN("STOP_STATE_COUNTER_IO over limit, "
2404 "setting it to max\n");
2405 ticks = 0x1fff;
2406 x4 = 1;
2407 x16 = 1;
2408 }
2409 2358
2410 r = dsi_read_reg(DSI_TIMING1); 2359 r = dsi_read_reg(DSI_TIMING1);
2411 r = FLD_MOD(r, 1, 15, 15); /* FORCE_TX_STOP_MODE_IO */ 2360 r = FLD_MOD(r, 1, 15, 15); /* FORCE_TX_STOP_MODE_IO */
2412 r = FLD_MOD(r, x16, 14, 14); /* STOP_STATE_X16_IO */ 2361 r = FLD_MOD(r, x16 ? 1 : 0, 14, 14); /* STOP_STATE_X16_IO */
2413 r = FLD_MOD(r, x4, 13, 13); /* STOP_STATE_X4_IO */ 2362 r = FLD_MOD(r, x4 ? 1 : 0, 13, 13); /* STOP_STATE_X4_IO */
2414 r = FLD_MOD(r, ticks, 12, 0); /* STOP_STATE_COUNTER_IO */ 2363 r = FLD_MOD(r, ticks, 12, 0); /* STOP_STATE_COUNTER_IO */
2415 dsi_write_reg(DSI_TIMING1, r); 2364 dsi_write_reg(DSI_TIMING1, r);
2416 2365
2417 DSSDBG("STOP_STATE_COUNTER %lu ns (%#lx ticks%s%s)\n", 2366 total_ticks = ticks * (x16 ? 16 : 1) * (x4 ? 4 : 1);
2418 (ticks * (x16 ? 16 : 1) * (x4 ? 4 : 1) * 1000) / 2367
2419 (fck / 1000 / 1000), 2368 DSSDBG("STOP_STATE_COUNTER %lu ticks (%#x%s%s) = %lu ns\n",
2420 ticks, x4 ? " x4" : "", x16 ? " x16" : ""); 2369 total_ticks,
2370 ticks, x4 ? " x4" : "", x16 ? " x16" : "",
2371 (total_ticks * 1000) / (fck / 1000 / 1000));
2421} 2372}
2422 2373
2423static void dsi_set_hs_tx_timeout(unsigned long ns) 2374static void dsi_set_hs_tx_timeout(unsigned ticks, bool x4, bool x16)
2424{ 2375{
2425 u32 r;
2426 unsigned x4, x16;
2427 unsigned long fck; 2376 unsigned long fck;
2428 unsigned long ticks; 2377 unsigned long total_ticks;
2378 u32 r;
2429 2379
2430 /* ticks in TxByteClkHS */ 2380 BUG_ON(ticks > 0x1fff);
2431 2381
2382 /* ticks in TxByteClkHS */
2432 fck = dsi_get_txbyteclkhs(); 2383 fck = dsi_get_txbyteclkhs();
2433 ticks = (fck / 1000 / 1000) * ns / 1000;
2434 x4 = 0;
2435 x16 = 0;
2436
2437 if (ticks > 0x1fff) {
2438 ticks = (fck / 1000 / 1000) * ns / 1000 / 4;
2439 x4 = 1;
2440 x16 = 0;
2441 }
2442
2443 if (ticks > 0x1fff) {
2444 ticks = (fck / 1000 / 1000) * ns / 1000 / 16;
2445 x4 = 0;
2446 x16 = 1;
2447 }
2448
2449 if (ticks > 0x1fff) {
2450 ticks = (fck / 1000 / 1000) * ns / 1000 / (4 * 16);
2451 x4 = 1;
2452 x16 = 1;
2453 }
2454
2455 if (ticks > 0x1fff) {
2456 DSSWARN("HS_TX_TO over limit, setting it to max\n");
2457 ticks = 0x1fff;
2458 x4 = 1;
2459 x16 = 1;
2460 }
2461 2384
2462 r = dsi_read_reg(DSI_TIMING2); 2385 r = dsi_read_reg(DSI_TIMING2);
2463 r = FLD_MOD(r, 1, 31, 31); /* HS_TX_TO */ 2386 r = FLD_MOD(r, 1, 31, 31); /* HS_TX_TO */
2464 r = FLD_MOD(r, x16, 30, 30); /* HS_TX_TO_X16 */ 2387 r = FLD_MOD(r, x16 ? 1 : 0, 30, 30); /* HS_TX_TO_X16 */
2465 r = FLD_MOD(r, x4, 29, 29); /* HS_TX_TO_X8 (4 really) */ 2388 r = FLD_MOD(r, x4 ? 1 : 0, 29, 29); /* HS_TX_TO_X8 (4 really) */
2466 r = FLD_MOD(r, ticks, 28, 16); /* HS_TX_TO_COUNTER */ 2389 r = FLD_MOD(r, ticks, 28, 16); /* HS_TX_TO_COUNTER */
2467 dsi_write_reg(DSI_TIMING2, r); 2390 dsi_write_reg(DSI_TIMING2, r);
2468 2391
2469 DSSDBG("HS_TX_TO %lu ns (%#lx ticks%s%s)\n", 2392 total_ticks = ticks * (x16 ? 16 : 1) * (x4 ? 4 : 1);
2470 (ticks * (x16 ? 16 : 1) * (x4 ? 4 : 1) * 1000) / 2393
2471 (fck / 1000 / 1000), 2394 DSSDBG("HS_TX_TO %lu ticks (%#x%s%s) = %lu ns\n",
2472 ticks, x4 ? " x4" : "", x16 ? " x16" : ""); 2395 total_ticks,
2396 ticks, x4 ? " x4" : "", x16 ? " x16" : "",
2397 (total_ticks * 1000) / (fck / 1000 / 1000));
2473} 2398}
2474static int dsi_proto_config(struct omap_dss_device *dssdev) 2399static int dsi_proto_config(struct omap_dss_device *dssdev)
2475{ 2400{
@@ -2487,10 +2412,10 @@ static int dsi_proto_config(struct omap_dss_device *dssdev)
2487 DSI_FIFO_SIZE_32); 2412 DSI_FIFO_SIZE_32);
2488 2413
2489 /* XXX what values for the timeouts? */ 2414 /* XXX what values for the timeouts? */
2490 dsi_set_stop_state_counter(1000); 2415 dsi_set_stop_state_counter(0x1000, false, false);
2491 dsi_set_ta_timeout(6400000); 2416 dsi_set_ta_timeout(0x1fff, true, true);
2492 dsi_set_lp_rx_timeout(48000); 2417 dsi_set_lp_rx_timeout(0x1fff, true, true);
2493 dsi_set_hs_tx_timeout(1000000); 2418 dsi_set_hs_tx_timeout(0x1fff, true, true);
2494 2419
2495 switch (dssdev->ctrl.pixel_size) { 2420 switch (dssdev->ctrl.pixel_size) {
2496 case 16: 2421 case 16:
@@ -2759,6 +2684,7 @@ static void dsi_update_screen_dispc(struct omap_dss_device *dssdev,
2759 unsigned packet_payload; 2684 unsigned packet_payload;
2760 unsigned packet_len; 2685 unsigned packet_len;
2761 u32 l; 2686 u32 l;
2687 int r;
2762 const unsigned channel = dsi.update_channel; 2688 const unsigned channel = dsi.update_channel;
2763 /* line buffer is 1024 x 24bits */ 2689 /* line buffer is 1024 x 24bits */
2764 /* XXX: for some reason using full buffer size causes considerable TX 2690 /* XXX: for some reason using full buffer size causes considerable TX
@@ -2809,8 +2735,9 @@ static void dsi_update_screen_dispc(struct omap_dss_device *dssdev,
2809 2735
2810 dsi_perf_mark_start(); 2736 dsi_perf_mark_start();
2811 2737
2812 schedule_delayed_work(&dsi.framedone_timeout_work, 2738 r = queue_delayed_work(dsi.workqueue, &dsi.framedone_timeout_work,
2813 msecs_to_jiffies(250)); 2739 msecs_to_jiffies(250));
2740 BUG_ON(r == 0);
2814 2741
2815 dss_start_update(dssdev); 2742 dss_start_update(dssdev);
2816 2743
@@ -2834,62 +2761,70 @@ static void dsi_te_timeout(unsigned long arg)
2834} 2761}
2835#endif 2762#endif
2836 2763
2837static void dsi_framedone_timeout_work_callback(struct work_struct *work) 2764static void dsi_handle_framedone(int error)
2838{ 2765{
2839 int r;
2840 const int channel = dsi.update_channel; 2766 const int channel = dsi.update_channel;
2841 2767
2842 DSSERR("Framedone not received for 250ms!\n"); 2768 cancel_delayed_work(&dsi.framedone_timeout_work);
2769
2770 dsi_vc_disable_bta_irq(channel);
2843 2771
2844 /* SIDLEMODE back to smart-idle */ 2772 /* SIDLEMODE back to smart-idle */
2845 dispc_enable_sidle(); 2773 dispc_enable_sidle();
2846 2774
2775 dsi.bta_callback = NULL;
2776
2847 if (dsi.te_enabled) { 2777 if (dsi.te_enabled) {
2848 /* enable LP_RX_TO again after the TE */ 2778 /* enable LP_RX_TO again after the TE */
2849 REG_FLD_MOD(DSI_TIMING2, 1, 15, 15); /* LP_RX_TO */ 2779 REG_FLD_MOD(DSI_TIMING2, 1, 15, 15); /* LP_RX_TO */
2850 } 2780 }
2851 2781
2852 /* Send BTA after the frame. We need this for the TE to work, as TE
2853 * trigger is only sent for BTAs without preceding packet. Thus we need
2854 * to BTA after the pixel packets so that next BTA will cause TE
2855 * trigger.
2856 *
2857 * This is not needed when TE is not in use, but we do it anyway to
2858 * make sure that the transfer has been completed. It would be more
2859 * optimal, but more complex, to wait only just before starting next
2860 * transfer. */
2861 r = dsi_vc_send_bta_sync(channel);
2862 if (r)
2863 DSSERR("BTA after framedone failed\n");
2864
2865 /* RX_FIFO_NOT_EMPTY */ 2782 /* RX_FIFO_NOT_EMPTY */
2866 if (REG_GET(DSI_VC_CTRL(channel), 20, 20)) { 2783 if (REG_GET(DSI_VC_CTRL(channel), 20, 20)) {
2867 DSSERR("Received error during frame transfer:\n"); 2784 DSSERR("Received error during frame transfer:\n");
2868 dsi_vc_flush_receive_data(channel); 2785 dsi_vc_flush_receive_data(channel);
2786 if (!error)
2787 error = -EIO;
2869 } 2788 }
2870 2789
2871 dsi.framedone_callback(-ETIMEDOUT, dsi.framedone_data); 2790 dsi.framedone_callback(error, dsi.framedone_data);
2791
2792 if (!error)
2793 dsi_perf_show("DISPC");
2872} 2794}
2873 2795
2874static void dsi_framedone_irq_callback(void *data, u32 mask) 2796static void dsi_framedone_timeout_work_callback(struct work_struct *work)
2875{ 2797{
2876 /* Note: We get FRAMEDONE when DISPC has finished sending pixels and 2798 /* XXX While extremely unlikely, we could get FRAMEDONE interrupt after
2877 * turns itself off. However, DSI still has the pixels in its buffers, 2799 * 250ms which would conflict with this timeout work. What should be
2878 * and is sending the data. 2800 * done is first cancel the transfer on the HW, and then cancel the
2879 */ 2801 * possibly scheduled framedone work. However, cancelling the transfer
2802 * on the HW is buggy, and would probably require resetting the whole
2803 * DSI */
2880 2804
2881 /* SIDLEMODE back to smart-idle */ 2805 DSSERR("Framedone not received for 250ms!\n");
2882 dispc_enable_sidle();
2883 2806
2884 schedule_work(&dsi.framedone_work); 2807 dsi_handle_framedone(-ETIMEDOUT);
2885} 2808}
2886 2809
2887static void dsi_handle_framedone(void) 2810static void dsi_framedone_bta_callback(void)
2811{
2812 dsi_handle_framedone(0);
2813
2814#ifdef CONFIG_OMAP2_DSS_FAKE_VSYNC
2815 dispc_fake_vsync_irq();
2816#endif
2817}
2818
2819static void dsi_framedone_irq_callback(void *data, u32 mask)
2888{ 2820{
2889 int r;
2890 const int channel = dsi.update_channel; 2821 const int channel = dsi.update_channel;
2822 int r;
2891 2823
2892 DSSDBG("FRAMEDONE\n"); 2824 /* Note: We get FRAMEDONE when DISPC has finished sending pixels and
2825 * turns itself off. However, DSI still has the pixels in its buffers,
2826 * and is sending the data.
2827 */
2893 2828
2894 if (dsi.te_enabled) { 2829 if (dsi.te_enabled) {
2895 /* enable LP_RX_TO again after the TE */ 2830 /* enable LP_RX_TO again after the TE */
@@ -2904,37 +2839,30 @@ static void dsi_handle_framedone(void)
2904 * This is not needed when TE is not in use, but we do it anyway to 2839 * This is not needed when TE is not in use, but we do it anyway to
2905 * make sure that the transfer has been completed. It would be more 2840 * make sure that the transfer has been completed. It would be more
2906 * optimal, but more complex, to wait only just before starting next 2841 * optimal, but more complex, to wait only just before starting next
2907 * transfer. */ 2842 * transfer.
2908 r = dsi_vc_send_bta_sync(channel); 2843 *
2909 if (r) 2844 * Also, as there's no interrupt telling when the transfer has been
2910 DSSERR("BTA after framedone failed\n"); 2845 * done and the channel could be reconfigured, the only way is to
2911 2846 * busyloop until TE_SIZE is zero. With BTA we can do this
2912 /* RX_FIFO_NOT_EMPTY */ 2847 * asynchronously.
2913 if (REG_GET(DSI_VC_CTRL(channel), 20, 20)) { 2848 * */
2914 DSSERR("Received error during frame transfer:\n");
2915 dsi_vc_flush_receive_data(channel);
2916 }
2917
2918#ifdef CONFIG_OMAP2_DSS_FAKE_VSYNC
2919 dispc_fake_vsync_irq();
2920#endif
2921}
2922
2923static void dsi_framedone_work_callback(struct work_struct *work)
2924{
2925 DSSDBGF();
2926 2849
2927 cancel_delayed_work_sync(&dsi.framedone_timeout_work); 2850 dsi.bta_callback = dsi_framedone_bta_callback;
2928 2851
2929 dsi_handle_framedone(); 2852 barrier();
2930 2853
2931 dsi_perf_show("DISPC"); 2854 dsi_vc_enable_bta_irq(channel);
2932 2855
2933 dsi.framedone_callback(0, dsi.framedone_data); 2856 r = dsi_vc_send_bta(channel);
2857 if (r) {
2858 DSSERR("BTA after framedone failed\n");
2859 dsi_handle_framedone(-EIO);
2860 }
2934} 2861}
2935 2862
2936int omap_dsi_prepare_update(struct omap_dss_device *dssdev, 2863int omap_dsi_prepare_update(struct omap_dss_device *dssdev,
2937 u16 *x, u16 *y, u16 *w, u16 *h) 2864 u16 *x, u16 *y, u16 *w, u16 *h,
2865 bool enlarge_update_area)
2938{ 2866{
2939 u16 dw, dh; 2867 u16 dw, dh;
2940 2868
@@ -2958,7 +2886,8 @@ int omap_dsi_prepare_update(struct omap_dss_device *dssdev,
2958 dsi_perf_mark_setup(); 2886 dsi_perf_mark_setup();
2959 2887
2960 if (dssdev->manager->caps & OMAP_DSS_OVL_MGR_CAP_DISPC) { 2888 if (dssdev->manager->caps & OMAP_DSS_OVL_MGR_CAP_DISPC) {
2961 dss_setup_partial_planes(dssdev, x, y, w, h); 2889 dss_setup_partial_planes(dssdev, x, y, w, h,
2890 enlarge_update_area);
2962 dispc_set_lcd_size(*w, *h); 2891 dispc_set_lcd_size(*w, *h);
2963 } 2892 }
2964 2893
@@ -2973,6 +2902,12 @@ int omap_dsi_update(struct omap_dss_device *dssdev,
2973{ 2902{
2974 dsi.update_channel = channel; 2903 dsi.update_channel = channel;
2975 2904
2905 /* OMAP DSS cannot send updates of odd widths.
2906 * omap_dsi_prepare_update() makes the widths even, but add a BUG_ON
2907 * here to make sure we catch erroneous updates. Otherwise we'll only
2908 * see rather obscure HW error happening, as DSS halts. */
2909 BUG_ON(x % 2 == 1);
2910
2976 if (dssdev->manager->caps & OMAP_DSS_OVL_MGR_CAP_DISPC) { 2911 if (dssdev->manager->caps & OMAP_DSS_OVL_MGR_CAP_DISPC) {
2977 dsi.framedone_callback = callback; 2912 dsi.framedone_callback = callback;
2978 dsi.framedone_data = data; 2913 dsi.framedone_data = data;
@@ -2985,7 +2920,12 @@ int omap_dsi_update(struct omap_dss_device *dssdev,
2985 2920
2986 dsi_update_screen_dispc(dssdev, x, y, w, h); 2921 dsi_update_screen_dispc(dssdev, x, y, w, h);
2987 } else { 2922 } else {
2988 dsi_update_screen_l4(dssdev, x, y, w, h); 2923 int r;
2924
2925 r = dsi_update_screen_l4(dssdev, x, y, w, h);
2926 if (r)
2927 return r;
2928
2989 dsi_perf_show("L4"); 2929 dsi_perf_show("L4");
2990 callback(0, data); 2930 callback(0, data);
2991 } 2931 }
@@ -3048,8 +2988,10 @@ static int dsi_configure_dsi_clocks(struct omap_dss_device *dssdev)
3048 cinfo.regm3 = dssdev->phy.dsi.div.regm3; 2988 cinfo.regm3 = dssdev->phy.dsi.div.regm3;
3049 cinfo.regm4 = dssdev->phy.dsi.div.regm4; 2989 cinfo.regm4 = dssdev->phy.dsi.div.regm4;
3050 r = dsi_calc_clock_rates(&cinfo); 2990 r = dsi_calc_clock_rates(&cinfo);
3051 if (r) 2991 if (r) {
2992 DSSERR("Failed to calc dsi clocks\n");
3052 return r; 2993 return r;
2994 }
3053 2995
3054 r = dsi_pll_set_clock_div(&cinfo); 2996 r = dsi_pll_set_clock_div(&cinfo);
3055 if (r) { 2997 if (r) {
@@ -3147,6 +3089,13 @@ err0:
3147 3089
3148static void dsi_display_uninit_dsi(struct omap_dss_device *dssdev) 3090static void dsi_display_uninit_dsi(struct omap_dss_device *dssdev)
3149{ 3091{
3092 /* disable interface */
3093 dsi_if_enable(0);
3094 dsi_vc_enable(0, 0);
3095 dsi_vc_enable(1, 0);
3096 dsi_vc_enable(2, 0);
3097 dsi_vc_enable(3, 0);
3098
3150 dss_select_dispc_clk_source(DSS_SRC_DSS1_ALWON_FCLK); 3099 dss_select_dispc_clk_source(DSS_SRC_DSS1_ALWON_FCLK);
3151 dss_select_dsi_clk_source(DSS_SRC_DSS1_ALWON_FCLK); 3100 dss_select_dsi_clk_source(DSS_SRC_DSS1_ALWON_FCLK);
3152 dsi_complexio_uninit(); 3101 dsi_complexio_uninit();
@@ -3257,7 +3206,7 @@ void dsi_get_overlay_fifo_thresholds(enum omap_plane plane,
3257 burst_size_bytes = 16 * 32 / 8; 3206 burst_size_bytes = 16 * 32 / 8;
3258 3207
3259 *fifo_high = fifo_size - burst_size_bytes; 3208 *fifo_high = fifo_size - burst_size_bytes;
3260 *fifo_low = fifo_size - burst_size_bytes * 8; 3209 *fifo_low = fifo_size - burst_size_bytes * 2;
3261} 3210}
3262 3211
3263int dsi_init_display(struct omap_dss_device *dssdev) 3212int dsi_init_display(struct omap_dss_device *dssdev)
@@ -3274,6 +3223,18 @@ int dsi_init_display(struct omap_dss_device *dssdev)
3274 return 0; 3223 return 0;
3275} 3224}
3276 3225
3226void dsi_wait_dsi1_pll_active(void)
3227{
3228 if (wait_for_bit_change(DSI_PLL_STATUS, 7, 1) != 1)
3229 DSSERR("DSI1 PLL clock not active\n");
3230}
3231
3232void dsi_wait_dsi2_pll_active(void)
3233{
3234 if (wait_for_bit_change(DSI_PLL_STATUS, 8, 1) != 1)
3235 DSSERR("DSI2 PLL clock not active\n");
3236}
3237
3277int dsi_init(struct platform_device *pdev) 3238int dsi_init(struct platform_device *pdev)
3278{ 3239{
3279 u32 rev; 3240 u32 rev;
@@ -3292,7 +3253,10 @@ int dsi_init(struct platform_device *pdev)
3292 mutex_init(&dsi.lock); 3253 mutex_init(&dsi.lock);
3293 sema_init(&dsi.bus_lock, 1); 3254 sema_init(&dsi.bus_lock, 1);
3294 3255
3295 INIT_WORK(&dsi.framedone_work, dsi_framedone_work_callback); 3256 dsi.workqueue = create_singlethread_workqueue("dsi");
3257 if (dsi.workqueue == NULL)
3258 return -ENOMEM;
3259
3296 INIT_DELAYED_WORK_DEFERRABLE(&dsi.framedone_timeout_work, 3260 INIT_DELAYED_WORK_DEFERRABLE(&dsi.framedone_timeout_work,
3297 dsi_framedone_timeout_work_callback); 3261 dsi_framedone_timeout_work_callback);
3298 3262
@@ -3310,7 +3274,6 @@ int dsi_init(struct platform_device *pdev)
3310 3274
3311 dsi.vdds_dsi_reg = dss_get_vdds_dsi(); 3275 dsi.vdds_dsi_reg = dss_get_vdds_dsi();
3312 if (IS_ERR(dsi.vdds_dsi_reg)) { 3276 if (IS_ERR(dsi.vdds_dsi_reg)) {
3313 iounmap(dsi.base);
3314 DSSERR("can't get VDDS_DSI regulator\n"); 3277 DSSERR("can't get VDDS_DSI regulator\n");
3315 r = PTR_ERR(dsi.vdds_dsi_reg); 3278 r = PTR_ERR(dsi.vdds_dsi_reg);
3316 goto err2; 3279 goto err2;
@@ -3328,6 +3291,7 @@ int dsi_init(struct platform_device *pdev)
3328err2: 3291err2:
3329 iounmap(dsi.base); 3292 iounmap(dsi.base);
3330err1: 3293err1:
3294 destroy_workqueue(dsi.workqueue);
3331 return r; 3295 return r;
3332} 3296}
3333 3297
@@ -3335,6 +3299,8 @@ void dsi_exit(void)
3335{ 3299{
3336 iounmap(dsi.base); 3300 iounmap(dsi.base);
3337 3301
3302 destroy_workqueue(dsi.workqueue);
3303
3338 DSSDBG("omap_dsi_exit\n"); 3304 DSSDBG("omap_dsi_exit\n");
3339} 3305}
3340 3306