aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/video
diff options
context:
space:
mode:
authorArchit Taneja <archit@ti.com>2011-05-12 07:56:27 -0400
committerTomi Valkeinen <tomi.valkeinen@ti.com>2011-05-12 12:30:26 -0400
commitf1da39d9ce8490c5652768180454a816eb043b48 (patch)
tree52d301d4804674c45cdddbd5bb66d4aa70fd36b0 /drivers/video
parenta72b64b99918ee801a3a6abf5391e356752bcad0 (diff)
OMAP: DSS2: DSI: Use platform_device pointer to get dsi data
The dsi related data structure currently creates one global instance of itself which is accessed by dsi functions. Remove this global structure instance and declare the struct as dsi_data. Modify dsi_init() to allocate a "dsi_data" structure for each platform device instance. Link this data with the device's platform_device pointer. Create the function dsi_get_dsidrv_data() which takes the pdev and return a pointer to the device's dsi_data. Make dsi_get_dsidev_id() return only 0 for now, this will be removed once the name of the DSI platform device is changed to the device instance form, like "omapdss_dsi.0" and "omapdss_dsi.1" etc. Signed-off-by: Archit Taneja <archit@ti.com> Signed-off-by: Tomi Valkeinen <tomi.valkeinen@ti.com>
Diffstat (limited to 'drivers/video')
-rw-r--r--drivers/video/omap2/dss/dsi.c672
1 files changed, 409 insertions, 263 deletions
diff --git a/drivers/video/omap2/dss/dsi.c b/drivers/video/omap2/dss/dsi.c
index 8d03eb6adcfd..61ee3dbd5486 100644
--- a/drivers/video/omap2/dss/dsi.c
+++ b/drivers/video/omap2/dss/dsi.c
@@ -34,6 +34,7 @@
34#include <linux/wait.h> 34#include <linux/wait.h>
35#include <linux/workqueue.h> 35#include <linux/workqueue.h>
36#include <linux/sched.h> 36#include <linux/sched.h>
37#include <linux/slab.h>
37 38
38#include <video/omapdss.h> 39#include <video/omapdss.h>
39#include <plat/clock.h> 40#include <plat/clock.h>
@@ -257,7 +258,7 @@ struct dsi_isr_tables {
257 struct dsi_isr_data isr_table_cio[DSI_MAX_NR_ISRS]; 258 struct dsi_isr_data isr_table_cio[DSI_MAX_NR_ISRS];
258}; 259};
259 260
260static struct dsi_data { 261struct dsi_data {
261 struct platform_device *pdev; 262 struct platform_device *pdev;
262 void __iomem *base; 263 void __iomem *base;
263 int irq; 264 int irq;
@@ -327,7 +328,7 @@ static struct dsi_data {
327 unsigned long lpdiv_max; 328 unsigned long lpdiv_max;
328 329
329 unsigned scp_clk_refcount; 330 unsigned scp_clk_refcount;
330} dsi; 331};
331 332
332static struct platform_device *dsi_pdev_map[MAX_NUM_DSI]; 333static struct platform_device *dsi_pdev_map[MAX_NUM_DSI];
333 334
@@ -336,6 +337,11 @@ static unsigned int dsi_perf;
336module_param_named(dsi_perf, dsi_perf, bool, 0644); 337module_param_named(dsi_perf, dsi_perf, bool, 0644);
337#endif 338#endif
338 339
340static inline struct dsi_data *dsi_get_dsidrv_data(struct platform_device *dsidev)
341{
342 return dev_get_drvdata(&dsidev->dev);
343}
344
339static inline struct platform_device *dsi_get_dsidev_from_dssdev(struct omap_dss_device *dssdev) 345static inline struct platform_device *dsi_get_dsidev_from_dssdev(struct omap_dss_device *dssdev)
340{ 346{
341 return dsi_pdev_map[dssdev->phy.dsi.module]; 347 return dsi_pdev_map[dssdev->phy.dsi.module];
@@ -346,16 +352,30 @@ struct platform_device *dsi_get_dsidev_from_id(int module)
346 return dsi_pdev_map[module]; 352 return dsi_pdev_map[module];
347} 353}
348 354
355static int dsi_get_dsidev_id(struct platform_device *dsidev)
356{
357 /* TEMP: Pass 0 as the dsi module index till the time the dsi platform
358 * device names aren't changed to the form "omapdss_dsi.0",
359 * "omapdss_dsi.1" and so on */
360 BUG_ON(dsidev->id != -1);
361
362 return 0;
363}
364
349static inline void dsi_write_reg(struct platform_device *dsidev, 365static inline void dsi_write_reg(struct platform_device *dsidev,
350 const struct dsi_reg idx, u32 val) 366 const struct dsi_reg idx, u32 val)
351{ 367{
352 __raw_writel(val, dsi.base + idx.idx); 368 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
369
370 __raw_writel(val, dsi->base + idx.idx);
353} 371}
354 372
355static inline u32 dsi_read_reg(struct platform_device *dsidev, 373static inline u32 dsi_read_reg(struct platform_device *dsidev,
356 const struct dsi_reg idx) 374 const struct dsi_reg idx)
357{ 375{
358 return __raw_readl(dsi.base + idx.idx); 376 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
377
378 return __raw_readl(dsi->base + idx.idx);
359} 379}
360 380
361 381
@@ -369,19 +389,27 @@ void dsi_restore_context(void)
369 389
370void dsi_bus_lock(struct omap_dss_device *dssdev) 390void dsi_bus_lock(struct omap_dss_device *dssdev)
371{ 391{
372 down(&dsi.bus_lock); 392 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
393 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
394
395 down(&dsi->bus_lock);
373} 396}
374EXPORT_SYMBOL(dsi_bus_lock); 397EXPORT_SYMBOL(dsi_bus_lock);
375 398
376void dsi_bus_unlock(struct omap_dss_device *dssdev) 399void dsi_bus_unlock(struct omap_dss_device *dssdev)
377{ 400{
378 up(&dsi.bus_lock); 401 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
402 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
403
404 up(&dsi->bus_lock);
379} 405}
380EXPORT_SYMBOL(dsi_bus_unlock); 406EXPORT_SYMBOL(dsi_bus_unlock);
381 407
382static bool dsi_bus_is_locked(struct platform_device *dsidev) 408static bool dsi_bus_is_locked(struct platform_device *dsidev)
383{ 409{
384 return dsi.bus_lock.count == 0; 410 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
411
412 return dsi->bus_lock.count == 0;
385} 413}
386 414
387static void dsi_completion_handler(void *data, u32 mask) 415static void dsi_completion_handler(void *data, u32 mask)
@@ -405,16 +433,19 @@ static inline int wait_for_bit_change(struct platform_device *dsidev,
405#ifdef DEBUG 433#ifdef DEBUG
406static void dsi_perf_mark_setup(struct platform_device *dsidev) 434static void dsi_perf_mark_setup(struct platform_device *dsidev)
407{ 435{
408 dsi.perf_setup_time = ktime_get(); 436 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
437 dsi->perf_setup_time = ktime_get();
409} 438}
410 439
411static void dsi_perf_mark_start(struct platform_device *dsidev) 440static void dsi_perf_mark_start(struct platform_device *dsidev)
412{ 441{
413 dsi.perf_start_time = ktime_get(); 442 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
443 dsi->perf_start_time = ktime_get();
414} 444}
415 445
416static void dsi_perf_show(struct platform_device *dsidev, const char *name) 446static void dsi_perf_show(struct platform_device *dsidev, const char *name)
417{ 447{
448 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
418 ktime_t t, setup_time, trans_time; 449 ktime_t t, setup_time, trans_time;
419 u32 total_bytes; 450 u32 total_bytes;
420 u32 setup_us, trans_us, total_us; 451 u32 setup_us, trans_us, total_us;
@@ -424,21 +455,21 @@ static void dsi_perf_show(struct platform_device *dsidev, const char *name)
424 455
425 t = ktime_get(); 456 t = ktime_get();
426 457
427 setup_time = ktime_sub(dsi.perf_start_time, dsi.perf_setup_time); 458 setup_time = ktime_sub(dsi->perf_start_time, dsi->perf_setup_time);
428 setup_us = (u32)ktime_to_us(setup_time); 459 setup_us = (u32)ktime_to_us(setup_time);
429 if (setup_us == 0) 460 if (setup_us == 0)
430 setup_us = 1; 461 setup_us = 1;
431 462
432 trans_time = ktime_sub(t, dsi.perf_start_time); 463 trans_time = ktime_sub(t, dsi->perf_start_time);
433 trans_us = (u32)ktime_to_us(trans_time); 464 trans_us = (u32)ktime_to_us(trans_time);
434 if (trans_us == 0) 465 if (trans_us == 0)
435 trans_us = 1; 466 trans_us = 1;
436 467
437 total_us = setup_us + trans_us; 468 total_us = setup_us + trans_us;
438 469
439 total_bytes = dsi.update_region.w * 470 total_bytes = dsi->update_region.w *
440 dsi.update_region.h * 471 dsi->update_region.h *
441 dsi.update_region.device->ctrl.pixel_size / 8; 472 dsi->update_region.device->ctrl.pixel_size / 8;
442 473
443 printk(KERN_INFO "DSI(%s): %u us + %u us = %u us (%uHz), " 474 printk(KERN_INFO "DSI(%s): %u us + %u us = %u us (%uHz), "
444 "%u bytes, %u kbytes/sec\n", 475 "%u bytes, %u kbytes/sec\n",
@@ -562,19 +593,20 @@ static void print_irq_status_cio(u32 status)
562static void dsi_collect_irq_stats(struct platform_device *dsidev, u32 irqstatus, 593static void dsi_collect_irq_stats(struct platform_device *dsidev, u32 irqstatus,
563 u32 *vcstatus, u32 ciostatus) 594 u32 *vcstatus, u32 ciostatus)
564{ 595{
596 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
565 int i; 597 int i;
566 598
567 spin_lock(&dsi.irq_stats_lock); 599 spin_lock(&dsi->irq_stats_lock);
568 600
569 dsi.irq_stats.irq_count++; 601 dsi->irq_stats.irq_count++;
570 dss_collect_irq_stats(irqstatus, dsi.irq_stats.dsi_irqs); 602 dss_collect_irq_stats(irqstatus, dsi->irq_stats.dsi_irqs);
571 603
572 for (i = 0; i < 4; ++i) 604 for (i = 0; i < 4; ++i)
573 dss_collect_irq_stats(vcstatus[i], dsi.irq_stats.vc_irqs[i]); 605 dss_collect_irq_stats(vcstatus[i], dsi->irq_stats.vc_irqs[i]);
574 606
575 dss_collect_irq_stats(ciostatus, dsi.irq_stats.cio_irqs); 607 dss_collect_irq_stats(ciostatus, dsi->irq_stats.cio_irqs);
576 608
577 spin_unlock(&dsi.irq_stats_lock); 609 spin_unlock(&dsi->irq_stats_lock);
578} 610}
579#else 611#else
580#define dsi_collect_irq_stats(dsidev, irqstatus, vcstatus, ciostatus) 612#define dsi_collect_irq_stats(dsidev, irqstatus, vcstatus, ciostatus)
@@ -585,14 +617,15 @@ static int debug_irq;
585static void dsi_handle_irq_errors(struct platform_device *dsidev, u32 irqstatus, 617static void dsi_handle_irq_errors(struct platform_device *dsidev, u32 irqstatus,
586 u32 *vcstatus, u32 ciostatus) 618 u32 *vcstatus, u32 ciostatus)
587{ 619{
620 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
588 int i; 621 int i;
589 622
590 if (irqstatus & DSI_IRQ_ERROR_MASK) { 623 if (irqstatus & DSI_IRQ_ERROR_MASK) {
591 DSSERR("DSI error, irqstatus %x\n", irqstatus); 624 DSSERR("DSI error, irqstatus %x\n", irqstatus);
592 print_irq_status(irqstatus); 625 print_irq_status(irqstatus);
593 spin_lock(&dsi.errors_lock); 626 spin_lock(&dsi->errors_lock);
594 dsi.errors |= irqstatus & DSI_IRQ_ERROR_MASK; 627 dsi->errors |= irqstatus & DSI_IRQ_ERROR_MASK;
595 spin_unlock(&dsi.errors_lock); 628 spin_unlock(&dsi->errors_lock);
596 } else if (debug_irq) { 629 } else if (debug_irq) {
597 print_irq_status(irqstatus); 630 print_irq_status(irqstatus);
598 } 631 }
@@ -654,18 +687,20 @@ static void dsi_handle_isrs(struct dsi_isr_tables *isr_tables,
654static irqreturn_t omap_dsi_irq_handler(int irq, void *arg) 687static irqreturn_t omap_dsi_irq_handler(int irq, void *arg)
655{ 688{
656 struct platform_device *dsidev; 689 struct platform_device *dsidev;
690 struct dsi_data *dsi;
657 u32 irqstatus, vcstatus[4], ciostatus; 691 u32 irqstatus, vcstatus[4], ciostatus;
658 int i; 692 int i;
659 693
660 dsidev = (struct platform_device *) arg; 694 dsidev = (struct platform_device *) arg;
695 dsi = dsi_get_dsidrv_data(dsidev);
661 696
662 spin_lock(&dsi.irq_lock); 697 spin_lock(&dsi->irq_lock);
663 698
664 irqstatus = dsi_read_reg(dsidev, DSI_IRQSTATUS); 699 irqstatus = dsi_read_reg(dsidev, DSI_IRQSTATUS);
665 700
666 /* IRQ is not for us */ 701 /* IRQ is not for us */
667 if (!irqstatus) { 702 if (!irqstatus) {
668 spin_unlock(&dsi.irq_lock); 703 spin_unlock(&dsi->irq_lock);
669 return IRQ_NONE; 704 return IRQ_NONE;
670 } 705 }
671 706
@@ -698,16 +733,17 @@ static irqreturn_t omap_dsi_irq_handler(int irq, void *arg)
698 733
699#ifdef DSI_CATCH_MISSING_TE 734#ifdef DSI_CATCH_MISSING_TE
700 if (irqstatus & DSI_IRQ_TE_TRIGGER) 735 if (irqstatus & DSI_IRQ_TE_TRIGGER)
701 del_timer(&dsi.te_timer); 736 del_timer(&dsi->te_timer);
702#endif 737#endif
703 738
704 /* make a copy and unlock, so that isrs can unregister 739 /* make a copy and unlock, so that isrs can unregister
705 * themselves */ 740 * themselves */
706 memcpy(&dsi.isr_tables_copy, &dsi.isr_tables, sizeof(dsi.isr_tables)); 741 memcpy(&dsi->isr_tables_copy, &dsi->isr_tables,
742 sizeof(dsi->isr_tables));
707 743
708 spin_unlock(&dsi.irq_lock); 744 spin_unlock(&dsi->irq_lock);
709 745
710 dsi_handle_isrs(&dsi.isr_tables_copy, irqstatus, vcstatus, ciostatus); 746 dsi_handle_isrs(&dsi->isr_tables_copy, irqstatus, vcstatus, ciostatus);
711 747
712 dsi_handle_irq_errors(dsidev, irqstatus, vcstatus, ciostatus); 748 dsi_handle_irq_errors(dsidev, irqstatus, vcstatus, ciostatus);
713 749
@@ -716,7 +752,7 @@ static irqreturn_t omap_dsi_irq_handler(int irq, void *arg)
716 return IRQ_HANDLED; 752 return IRQ_HANDLED;
717} 753}
718 754
719/* dsi.irq_lock has to be locked by the caller */ 755/* dsi->irq_lock has to be locked by the caller */
720static void _omap_dsi_configure_irqs(struct platform_device *dsidev, 756static void _omap_dsi_configure_irqs(struct platform_device *dsidev,
721 struct dsi_isr_data *isr_array, 757 struct dsi_isr_data *isr_array,
722 unsigned isr_array_size, u32 default_mask, 758 unsigned isr_array_size, u32 default_mask,
@@ -749,51 +785,57 @@ static void _omap_dsi_configure_irqs(struct platform_device *dsidev,
749 dsi_read_reg(dsidev, status_reg); 785 dsi_read_reg(dsidev, status_reg);
750} 786}
751 787
752/* dsi.irq_lock has to be locked by the caller */ 788/* dsi->irq_lock has to be locked by the caller */
753static void _omap_dsi_set_irqs(struct platform_device *dsidev) 789static void _omap_dsi_set_irqs(struct platform_device *dsidev)
754{ 790{
791 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
755 u32 mask = DSI_IRQ_ERROR_MASK; 792 u32 mask = DSI_IRQ_ERROR_MASK;
756#ifdef DSI_CATCH_MISSING_TE 793#ifdef DSI_CATCH_MISSING_TE
757 mask |= DSI_IRQ_TE_TRIGGER; 794 mask |= DSI_IRQ_TE_TRIGGER;
758#endif 795#endif
759 _omap_dsi_configure_irqs(dsidev, dsi.isr_tables.isr_table, 796 _omap_dsi_configure_irqs(dsidev, dsi->isr_tables.isr_table,
760 ARRAY_SIZE(dsi.isr_tables.isr_table), mask, 797 ARRAY_SIZE(dsi->isr_tables.isr_table), mask,
761 DSI_IRQENABLE, DSI_IRQSTATUS); 798 DSI_IRQENABLE, DSI_IRQSTATUS);
762} 799}
763 800
764/* dsi.irq_lock has to be locked by the caller */ 801/* dsi->irq_lock has to be locked by the caller */
765static void _omap_dsi_set_irqs_vc(struct platform_device *dsidev, int vc) 802static void _omap_dsi_set_irqs_vc(struct platform_device *dsidev, int vc)
766{ 803{
767 _omap_dsi_configure_irqs(dsidev, dsi.isr_tables.isr_table_vc[vc], 804 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
768 ARRAY_SIZE(dsi.isr_tables.isr_table_vc[vc]), 805
806 _omap_dsi_configure_irqs(dsidev, dsi->isr_tables.isr_table_vc[vc],
807 ARRAY_SIZE(dsi->isr_tables.isr_table_vc[vc]),
769 DSI_VC_IRQ_ERROR_MASK, 808 DSI_VC_IRQ_ERROR_MASK,
770 DSI_VC_IRQENABLE(vc), DSI_VC_IRQSTATUS(vc)); 809 DSI_VC_IRQENABLE(vc), DSI_VC_IRQSTATUS(vc));
771} 810}
772 811
773/* dsi.irq_lock has to be locked by the caller */ 812/* dsi->irq_lock has to be locked by the caller */
774static void _omap_dsi_set_irqs_cio(struct platform_device *dsidev) 813static void _omap_dsi_set_irqs_cio(struct platform_device *dsidev)
775{ 814{
776 _omap_dsi_configure_irqs(dsidev, dsi.isr_tables.isr_table_cio, 815 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
777 ARRAY_SIZE(dsi.isr_tables.isr_table_cio), 816
817 _omap_dsi_configure_irqs(dsidev, dsi->isr_tables.isr_table_cio,
818 ARRAY_SIZE(dsi->isr_tables.isr_table_cio),
778 DSI_CIO_IRQ_ERROR_MASK, 819 DSI_CIO_IRQ_ERROR_MASK,
779 DSI_COMPLEXIO_IRQ_ENABLE, DSI_COMPLEXIO_IRQ_STATUS); 820 DSI_COMPLEXIO_IRQ_ENABLE, DSI_COMPLEXIO_IRQ_STATUS);
780} 821}
781 822
782static void _dsi_initialize_irq(struct platform_device *dsidev) 823static void _dsi_initialize_irq(struct platform_device *dsidev)
783{ 824{
825 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
784 unsigned long flags; 826 unsigned long flags;
785 int vc; 827 int vc;
786 828
787 spin_lock_irqsave(&dsi.irq_lock, flags); 829 spin_lock_irqsave(&dsi->irq_lock, flags);
788 830
789 memset(&dsi.isr_tables, 0, sizeof(dsi.isr_tables)); 831 memset(&dsi->isr_tables, 0, sizeof(dsi->isr_tables));
790 832
791 _omap_dsi_set_irqs(dsidev); 833 _omap_dsi_set_irqs(dsidev);
792 for (vc = 0; vc < 4; ++vc) 834 for (vc = 0; vc < 4; ++vc)
793 _omap_dsi_set_irqs_vc(dsidev, vc); 835 _omap_dsi_set_irqs_vc(dsidev, vc);
794 _omap_dsi_set_irqs_cio(dsidev); 836 _omap_dsi_set_irqs_cio(dsidev);
795 837
796 spin_unlock_irqrestore(&dsi.irq_lock, flags); 838 spin_unlock_irqrestore(&dsi->irq_lock, flags);
797} 839}
798 840
799static int _dsi_register_isr(omap_dsi_isr_t isr, void *arg, u32 mask, 841static int _dsi_register_isr(omap_dsi_isr_t isr, void *arg, u32 mask,
@@ -855,18 +897,19 @@ static int _dsi_unregister_isr(omap_dsi_isr_t isr, void *arg, u32 mask,
855static int dsi_register_isr(struct platform_device *dsidev, omap_dsi_isr_t isr, 897static int dsi_register_isr(struct platform_device *dsidev, omap_dsi_isr_t isr,
856 void *arg, u32 mask) 898 void *arg, u32 mask)
857{ 899{
900 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
858 unsigned long flags; 901 unsigned long flags;
859 int r; 902 int r;
860 903
861 spin_lock_irqsave(&dsi.irq_lock, flags); 904 spin_lock_irqsave(&dsi->irq_lock, flags);
862 905
863 r = _dsi_register_isr(isr, arg, mask, dsi.isr_tables.isr_table, 906 r = _dsi_register_isr(isr, arg, mask, dsi->isr_tables.isr_table,
864 ARRAY_SIZE(dsi.isr_tables.isr_table)); 907 ARRAY_SIZE(dsi->isr_tables.isr_table));
865 908
866 if (r == 0) 909 if (r == 0)
867 _omap_dsi_set_irqs(dsidev); 910 _omap_dsi_set_irqs(dsidev);
868 911
869 spin_unlock_irqrestore(&dsi.irq_lock, flags); 912 spin_unlock_irqrestore(&dsi->irq_lock, flags);
870 913
871 return r; 914 return r;
872} 915}
@@ -874,18 +917,19 @@ static int dsi_register_isr(struct platform_device *dsidev, omap_dsi_isr_t isr,
874static int dsi_unregister_isr(struct platform_device *dsidev, 917static int dsi_unregister_isr(struct platform_device *dsidev,
875 omap_dsi_isr_t isr, void *arg, u32 mask) 918 omap_dsi_isr_t isr, void *arg, u32 mask)
876{ 919{
920 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
877 unsigned long flags; 921 unsigned long flags;
878 int r; 922 int r;
879 923
880 spin_lock_irqsave(&dsi.irq_lock, flags); 924 spin_lock_irqsave(&dsi->irq_lock, flags);
881 925
882 r = _dsi_unregister_isr(isr, arg, mask, dsi.isr_tables.isr_table, 926 r = _dsi_unregister_isr(isr, arg, mask, dsi->isr_tables.isr_table,
883 ARRAY_SIZE(dsi.isr_tables.isr_table)); 927 ARRAY_SIZE(dsi->isr_tables.isr_table));
884 928
885 if (r == 0) 929 if (r == 0)
886 _omap_dsi_set_irqs(dsidev); 930 _omap_dsi_set_irqs(dsidev);
887 931
888 spin_unlock_irqrestore(&dsi.irq_lock, flags); 932 spin_unlock_irqrestore(&dsi->irq_lock, flags);
889 933
890 return r; 934 return r;
891} 935}
@@ -893,19 +937,20 @@ static int dsi_unregister_isr(struct platform_device *dsidev,
893static int dsi_register_isr_vc(struct platform_device *dsidev, int channel, 937static int dsi_register_isr_vc(struct platform_device *dsidev, int channel,
894 omap_dsi_isr_t isr, void *arg, u32 mask) 938 omap_dsi_isr_t isr, void *arg, u32 mask)
895{ 939{
940 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
896 unsigned long flags; 941 unsigned long flags;
897 int r; 942 int r;
898 943
899 spin_lock_irqsave(&dsi.irq_lock, flags); 944 spin_lock_irqsave(&dsi->irq_lock, flags);
900 945
901 r = _dsi_register_isr(isr, arg, mask, 946 r = _dsi_register_isr(isr, arg, mask,
902 dsi.isr_tables.isr_table_vc[channel], 947 dsi->isr_tables.isr_table_vc[channel],
903 ARRAY_SIZE(dsi.isr_tables.isr_table_vc[channel])); 948 ARRAY_SIZE(dsi->isr_tables.isr_table_vc[channel]));
904 949
905 if (r == 0) 950 if (r == 0)
906 _omap_dsi_set_irqs_vc(dsidev, channel); 951 _omap_dsi_set_irqs_vc(dsidev, channel);
907 952
908 spin_unlock_irqrestore(&dsi.irq_lock, flags); 953 spin_unlock_irqrestore(&dsi->irq_lock, flags);
909 954
910 return r; 955 return r;
911} 956}
@@ -913,19 +958,20 @@ static int dsi_register_isr_vc(struct platform_device *dsidev, int channel,
913static int dsi_unregister_isr_vc(struct platform_device *dsidev, int channel, 958static int dsi_unregister_isr_vc(struct platform_device *dsidev, int channel,
914 omap_dsi_isr_t isr, void *arg, u32 mask) 959 omap_dsi_isr_t isr, void *arg, u32 mask)
915{ 960{
961 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
916 unsigned long flags; 962 unsigned long flags;
917 int r; 963 int r;
918 964
919 spin_lock_irqsave(&dsi.irq_lock, flags); 965 spin_lock_irqsave(&dsi->irq_lock, flags);
920 966
921 r = _dsi_unregister_isr(isr, arg, mask, 967 r = _dsi_unregister_isr(isr, arg, mask,
922 dsi.isr_tables.isr_table_vc[channel], 968 dsi->isr_tables.isr_table_vc[channel],
923 ARRAY_SIZE(dsi.isr_tables.isr_table_vc[channel])); 969 ARRAY_SIZE(dsi->isr_tables.isr_table_vc[channel]));
924 970
925 if (r == 0) 971 if (r == 0)
926 _omap_dsi_set_irqs_vc(dsidev, channel); 972 _omap_dsi_set_irqs_vc(dsidev, channel);
927 973
928 spin_unlock_irqrestore(&dsi.irq_lock, flags); 974 spin_unlock_irqrestore(&dsi->irq_lock, flags);
929 975
930 return r; 976 return r;
931} 977}
@@ -933,18 +979,19 @@ static int dsi_unregister_isr_vc(struct platform_device *dsidev, int channel,
933static int dsi_register_isr_cio(struct platform_device *dsidev, 979static int dsi_register_isr_cio(struct platform_device *dsidev,
934 omap_dsi_isr_t isr, void *arg, u32 mask) 980 omap_dsi_isr_t isr, void *arg, u32 mask)
935{ 981{
982 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
936 unsigned long flags; 983 unsigned long flags;
937 int r; 984 int r;
938 985
939 spin_lock_irqsave(&dsi.irq_lock, flags); 986 spin_lock_irqsave(&dsi->irq_lock, flags);
940 987
941 r = _dsi_register_isr(isr, arg, mask, dsi.isr_tables.isr_table_cio, 988 r = _dsi_register_isr(isr, arg, mask, dsi->isr_tables.isr_table_cio,
942 ARRAY_SIZE(dsi.isr_tables.isr_table_cio)); 989 ARRAY_SIZE(dsi->isr_tables.isr_table_cio));
943 990
944 if (r == 0) 991 if (r == 0)
945 _omap_dsi_set_irqs_cio(dsidev); 992 _omap_dsi_set_irqs_cio(dsidev);
946 993
947 spin_unlock_irqrestore(&dsi.irq_lock, flags); 994 spin_unlock_irqrestore(&dsi->irq_lock, flags);
948 995
949 return r; 996 return r;
950} 997}
@@ -952,30 +999,32 @@ static int dsi_register_isr_cio(struct platform_device *dsidev,
952static int dsi_unregister_isr_cio(struct platform_device *dsidev, 999static int dsi_unregister_isr_cio(struct platform_device *dsidev,
953 omap_dsi_isr_t isr, void *arg, u32 mask) 1000 omap_dsi_isr_t isr, void *arg, u32 mask)
954{ 1001{
1002 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
955 unsigned long flags; 1003 unsigned long flags;
956 int r; 1004 int r;
957 1005
958 spin_lock_irqsave(&dsi.irq_lock, flags); 1006 spin_lock_irqsave(&dsi->irq_lock, flags);
959 1007
960 r = _dsi_unregister_isr(isr, arg, mask, dsi.isr_tables.isr_table_cio, 1008 r = _dsi_unregister_isr(isr, arg, mask, dsi->isr_tables.isr_table_cio,
961 ARRAY_SIZE(dsi.isr_tables.isr_table_cio)); 1009 ARRAY_SIZE(dsi->isr_tables.isr_table_cio));
962 1010
963 if (r == 0) 1011 if (r == 0)
964 _omap_dsi_set_irqs_cio(dsidev); 1012 _omap_dsi_set_irqs_cio(dsidev);
965 1013
966 spin_unlock_irqrestore(&dsi.irq_lock, flags); 1014 spin_unlock_irqrestore(&dsi->irq_lock, flags);
967 1015
968 return r; 1016 return r;
969} 1017}
970 1018
971static u32 dsi_get_errors(struct platform_device *dsidev) 1019static u32 dsi_get_errors(struct platform_device *dsidev)
972{ 1020{
1021 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
973 unsigned long flags; 1022 unsigned long flags;
974 u32 e; 1023 u32 e;
975 spin_lock_irqsave(&dsi.errors_lock, flags); 1024 spin_lock_irqsave(&dsi->errors_lock, flags);
976 e = dsi.errors; 1025 e = dsi->errors;
977 dsi.errors = 0; 1026 dsi->errors = 0;
978 spin_unlock_irqrestore(&dsi.errors_lock, flags); 1027 spin_unlock_irqrestore(&dsi->errors_lock, flags);
979 return e; 1028 return e;
980} 1029}
981 1030
@@ -992,12 +1041,14 @@ static inline void enable_clocks(bool enable)
992static inline void dsi_enable_pll_clock(struct platform_device *dsidev, 1041static inline void dsi_enable_pll_clock(struct platform_device *dsidev,
993 bool enable) 1042 bool enable)
994{ 1043{
1044 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1045
995 if (enable) 1046 if (enable)
996 dss_clk_enable(DSS_CLK_SYSCK); 1047 dss_clk_enable(DSS_CLK_SYSCK);
997 else 1048 else
998 dss_clk_disable(DSS_CLK_SYSCK); 1049 dss_clk_disable(DSS_CLK_SYSCK);
999 1050
1000 if (enable && dsi.pll_locked) { 1051 if (enable && dsi->pll_locked) {
1001 if (wait_for_bit_change(dsidev, DSI_PLL_STATUS, 1, 1) != 1) 1052 if (wait_for_bit_change(dsidev, DSI_PLL_STATUS, 1, 1) != 1)
1002 DSSERR("cannot lock PLL when enabling clocks\n"); 1053 DSSERR("cannot lock PLL when enabling clocks\n");
1003 } 1054 }
@@ -1065,17 +1116,23 @@ static inline int dsi_if_enable(struct platform_device *dsidev, bool enable)
1065 1116
1066unsigned long dsi_get_pll_hsdiv_dispc_rate(struct platform_device *dsidev) 1117unsigned long dsi_get_pll_hsdiv_dispc_rate(struct platform_device *dsidev)
1067{ 1118{
1068 return dsi.current_cinfo.dsi_pll_hsdiv_dispc_clk; 1119 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1120
1121 return dsi->current_cinfo.dsi_pll_hsdiv_dispc_clk;
1069} 1122}
1070 1123
1071static unsigned long dsi_get_pll_hsdiv_dsi_rate(struct platform_device *dsidev) 1124static unsigned long dsi_get_pll_hsdiv_dsi_rate(struct platform_device *dsidev)
1072{ 1125{
1073 return dsi.current_cinfo.dsi_pll_hsdiv_dsi_clk; 1126 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1127
1128 return dsi->current_cinfo.dsi_pll_hsdiv_dsi_clk;
1074} 1129}
1075 1130
1076static unsigned long dsi_get_txbyteclkhs(struct platform_device *dsidev) 1131static unsigned long dsi_get_txbyteclkhs(struct platform_device *dsidev)
1077{ 1132{
1078 return dsi.current_cinfo.clkin4ddr / 16; 1133 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1134
1135 return dsi->current_cinfo.clkin4ddr / 16;
1079} 1136}
1080 1137
1081static unsigned long dsi_fclk_rate(struct platform_device *dsidev) 1138static unsigned long dsi_fclk_rate(struct platform_device *dsidev)
@@ -1096,13 +1153,14 @@ static unsigned long dsi_fclk_rate(struct platform_device *dsidev)
1096static int dsi_set_lp_clk_divisor(struct omap_dss_device *dssdev) 1153static int dsi_set_lp_clk_divisor(struct omap_dss_device *dssdev)
1097{ 1154{
1098 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); 1155 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
1156 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1099 unsigned long dsi_fclk; 1157 unsigned long dsi_fclk;
1100 unsigned lp_clk_div; 1158 unsigned lp_clk_div;
1101 unsigned long lp_clk; 1159 unsigned long lp_clk;
1102 1160
1103 lp_clk_div = dssdev->clocks.dsi.lp_clk_div; 1161 lp_clk_div = dssdev->clocks.dsi.lp_clk_div;
1104 1162
1105 if (lp_clk_div == 0 || lp_clk_div > dsi.lpdiv_max) 1163 if (lp_clk_div == 0 || lp_clk_div > dsi->lpdiv_max)
1106 return -EINVAL; 1164 return -EINVAL;
1107 1165
1108 dsi_fclk = dsi_fclk_rate(dsidev); 1166 dsi_fclk = dsi_fclk_rate(dsidev);
@@ -1110,8 +1168,8 @@ static int dsi_set_lp_clk_divisor(struct omap_dss_device *dssdev)
1110 lp_clk = dsi_fclk / 2 / lp_clk_div; 1168 lp_clk = dsi_fclk / 2 / lp_clk_div;
1111 1169
1112 DSSDBG("LP_CLK_DIV %u, LP_CLK %lu\n", lp_clk_div, lp_clk); 1170 DSSDBG("LP_CLK_DIV %u, LP_CLK %lu\n", lp_clk_div, lp_clk);
1113 dsi.current_cinfo.lp_clk = lp_clk; 1171 dsi->current_cinfo.lp_clk = lp_clk;
1114 dsi.current_cinfo.lp_clk_div = lp_clk_div; 1172 dsi->current_cinfo.lp_clk_div = lp_clk_div;
1115 1173
1116 /* LP_CLK_DIVISOR */ 1174 /* LP_CLK_DIVISOR */
1117 REG_FLD_MOD(dsidev, DSI_CLK_CTRL, lp_clk_div, 12, 0); 1175 REG_FLD_MOD(dsidev, DSI_CLK_CTRL, lp_clk_div, 12, 0);
@@ -1124,14 +1182,18 @@ static int dsi_set_lp_clk_divisor(struct omap_dss_device *dssdev)
1124 1182
1125static void dsi_enable_scp_clk(struct platform_device *dsidev) 1183static void dsi_enable_scp_clk(struct platform_device *dsidev)
1126{ 1184{
1127 if (dsi.scp_clk_refcount++ == 0) 1185 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1186
1187 if (dsi->scp_clk_refcount++ == 0)
1128 REG_FLD_MOD(dsidev, DSI_CLK_CTRL, 1, 14, 14); /* CIO_CLK_ICG */ 1188 REG_FLD_MOD(dsidev, DSI_CLK_CTRL, 1, 14, 14); /* CIO_CLK_ICG */
1129} 1189}
1130 1190
1131static void dsi_disable_scp_clk(struct platform_device *dsidev) 1191static void dsi_disable_scp_clk(struct platform_device *dsidev)
1132{ 1192{
1133 WARN_ON(dsi.scp_clk_refcount == 0); 1193 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1134 if (--dsi.scp_clk_refcount == 0) 1194
1195 WARN_ON(dsi->scp_clk_refcount == 0);
1196 if (--dsi->scp_clk_refcount == 0)
1135 REG_FLD_MOD(dsidev, DSI_CLK_CTRL, 0, 14, 14); /* CIO_CLK_ICG */ 1197 REG_FLD_MOD(dsidev, DSI_CLK_CTRL, 0, 14, 14); /* CIO_CLK_ICG */
1136} 1198}
1137 1199
@@ -1172,16 +1234,19 @@ static int dsi_pll_power(struct platform_device *dsidev,
1172static int dsi_calc_clock_rates(struct omap_dss_device *dssdev, 1234static int dsi_calc_clock_rates(struct omap_dss_device *dssdev,
1173 struct dsi_clock_info *cinfo) 1235 struct dsi_clock_info *cinfo)
1174{ 1236{
1175 if (cinfo->regn == 0 || cinfo->regn > dsi.regn_max) 1237 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
1238 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1239
1240 if (cinfo->regn == 0 || cinfo->regn > dsi->regn_max)
1176 return -EINVAL; 1241 return -EINVAL;
1177 1242
1178 if (cinfo->regm == 0 || cinfo->regm > dsi.regm_max) 1243 if (cinfo->regm == 0 || cinfo->regm > dsi->regm_max)
1179 return -EINVAL; 1244 return -EINVAL;
1180 1245
1181 if (cinfo->regm_dispc > dsi.regm_dispc_max) 1246 if (cinfo->regm_dispc > dsi->regm_dispc_max)
1182 return -EINVAL; 1247 return -EINVAL;
1183 1248
1184 if (cinfo->regm_dsi > dsi.regm_dsi_max) 1249 if (cinfo->regm_dsi > dsi->regm_dsi_max)
1185 return -EINVAL; 1250 return -EINVAL;
1186 1251
1187 if (cinfo->use_sys_clk) { 1252 if (cinfo->use_sys_clk) {
@@ -1200,7 +1265,7 @@ static int dsi_calc_clock_rates(struct omap_dss_device *dssdev,
1200 1265
1201 cinfo->fint = cinfo->clkin / (cinfo->regn * (cinfo->highfreq ? 2 : 1)); 1266 cinfo->fint = cinfo->clkin / (cinfo->regn * (cinfo->highfreq ? 2 : 1));
1202 1267
1203 if (cinfo->fint > dsi.fint_max || cinfo->fint < dsi.fint_min) 1268 if (cinfo->fint > dsi->fint_max || cinfo->fint < dsi->fint_min)
1204 return -EINVAL; 1269 return -EINVAL;
1205 1270
1206 cinfo->clkin4ddr = 2 * cinfo->regm * cinfo->fint; 1271 cinfo->clkin4ddr = 2 * cinfo->regm * cinfo->fint;
@@ -1227,6 +1292,7 @@ int dsi_pll_calc_clock_div_pck(struct platform_device *dsidev, bool is_tft,
1227 unsigned long req_pck, struct dsi_clock_info *dsi_cinfo, 1292 unsigned long req_pck, struct dsi_clock_info *dsi_cinfo,
1228 struct dispc_clock_info *dispc_cinfo) 1293 struct dispc_clock_info *dispc_cinfo)
1229{ 1294{
1295 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1230 struct dsi_clock_info cur, best; 1296 struct dsi_clock_info cur, best;
1231 struct dispc_clock_info best_dispc; 1297 struct dispc_clock_info best_dispc;
1232 int min_fck_per_pck; 1298 int min_fck_per_pck;
@@ -1237,10 +1303,10 @@ int dsi_pll_calc_clock_div_pck(struct platform_device *dsidev, bool is_tft,
1237 1303
1238 max_dss_fck = dss_feat_get_param_max(FEAT_PARAM_DSS_FCK); 1304 max_dss_fck = dss_feat_get_param_max(FEAT_PARAM_DSS_FCK);
1239 1305
1240 if (req_pck == dsi.cache_req_pck && 1306 if (req_pck == dsi->cache_req_pck &&
1241 dsi.cache_cinfo.clkin == dss_sys_clk) { 1307 dsi->cache_cinfo.clkin == dss_sys_clk) {
1242 DSSDBG("DSI clock info found from cache\n"); 1308 DSSDBG("DSI clock info found from cache\n");
1243 *dsi_cinfo = dsi.cache_cinfo; 1309 *dsi_cinfo = dsi->cache_cinfo;
1244 dispc_find_clk_divs(is_tft, req_pck, 1310 dispc_find_clk_divs(is_tft, req_pck,
1245 dsi_cinfo->dsi_pll_hsdiv_dispc_clk, dispc_cinfo); 1311 dsi_cinfo->dsi_pll_hsdiv_dispc_clk, dispc_cinfo);
1246 return 0; 1312 return 0;
@@ -1270,17 +1336,17 @@ retry:
1270 /* no highfreq: 0.75MHz < Fint = clkin / regn < 2.1MHz */ 1336 /* no highfreq: 0.75MHz < Fint = clkin / regn < 2.1MHz */
1271 /* highfreq: 0.75MHz < Fint = clkin / (2*regn) < 2.1MHz */ 1337 /* highfreq: 0.75MHz < Fint = clkin / (2*regn) < 2.1MHz */
1272 /* To reduce PLL lock time, keep Fint high (around 2 MHz) */ 1338 /* To reduce PLL lock time, keep Fint high (around 2 MHz) */
1273 for (cur.regn = 1; cur.regn < dsi.regn_max; ++cur.regn) { 1339 for (cur.regn = 1; cur.regn < dsi->regn_max; ++cur.regn) {
1274 if (cur.highfreq == 0) 1340 if (cur.highfreq == 0)
1275 cur.fint = cur.clkin / cur.regn; 1341 cur.fint = cur.clkin / cur.regn;
1276 else 1342 else
1277 cur.fint = cur.clkin / (2 * cur.regn); 1343 cur.fint = cur.clkin / (2 * cur.regn);
1278 1344
1279 if (cur.fint > dsi.fint_max || cur.fint < dsi.fint_min) 1345 if (cur.fint > dsi->fint_max || cur.fint < dsi->fint_min)
1280 continue; 1346 continue;
1281 1347
1282 /* DSIPHY(MHz) = (2 * regm / regn) * (clkin / (highfreq + 1)) */ 1348 /* DSIPHY(MHz) = (2 * regm / regn) * (clkin / (highfreq + 1)) */
1283 for (cur.regm = 1; cur.regm < dsi.regm_max; ++cur.regm) { 1349 for (cur.regm = 1; cur.regm < dsi->regm_max; ++cur.regm) {
1284 unsigned long a, b; 1350 unsigned long a, b;
1285 1351
1286 a = 2 * cur.regm * (cur.clkin/1000); 1352 a = 2 * cur.regm * (cur.clkin/1000);
@@ -1292,8 +1358,8 @@ retry:
1292 1358
1293 /* dsi_pll_hsdiv_dispc_clk(MHz) = 1359 /* dsi_pll_hsdiv_dispc_clk(MHz) =
1294 * DSIPHY(MHz) / regm_dispc < 173MHz/186Mhz */ 1360 * DSIPHY(MHz) / regm_dispc < 173MHz/186Mhz */
1295 for (cur.regm_dispc = 1; cur.regm_dispc < dsi.regm_dispc_max; 1361 for (cur.regm_dispc = 1; cur.regm_dispc <
1296 ++cur.regm_dispc) { 1362 dsi->regm_dispc_max; ++cur.regm_dispc) {
1297 struct dispc_clock_info cur_dispc; 1363 struct dispc_clock_info cur_dispc;
1298 cur.dsi_pll_hsdiv_dispc_clk = 1364 cur.dsi_pll_hsdiv_dispc_clk =
1299 cur.clkin4ddr / cur.regm_dispc; 1365 cur.clkin4ddr / cur.regm_dispc;
@@ -1353,9 +1419,9 @@ found:
1353 if (dispc_cinfo) 1419 if (dispc_cinfo)
1354 *dispc_cinfo = best_dispc; 1420 *dispc_cinfo = best_dispc;
1355 1421
1356 dsi.cache_req_pck = req_pck; 1422 dsi->cache_req_pck = req_pck;
1357 dsi.cache_clk_freq = 0; 1423 dsi->cache_clk_freq = 0;
1358 dsi.cache_cinfo = best; 1424 dsi->cache_cinfo = best;
1359 1425
1360 return 0; 1426 return 0;
1361} 1427}
@@ -1363,6 +1429,7 @@ found:
1363int dsi_pll_set_clock_div(struct platform_device *dsidev, 1429int dsi_pll_set_clock_div(struct platform_device *dsidev,
1364 struct dsi_clock_info *cinfo) 1430 struct dsi_clock_info *cinfo)
1365{ 1431{
1432 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1366 int r = 0; 1433 int r = 0;
1367 u32 l; 1434 u32 l;
1368 int f = 0; 1435 int f = 0;
@@ -1371,20 +1438,20 @@ int dsi_pll_set_clock_div(struct platform_device *dsidev,
1371 1438
1372 DSSDBGF(); 1439 DSSDBGF();
1373 1440
1374 dsi.current_cinfo.use_sys_clk = cinfo->use_sys_clk; 1441 dsi->current_cinfo.use_sys_clk = cinfo->use_sys_clk;
1375 dsi.current_cinfo.highfreq = cinfo->highfreq; 1442 dsi->current_cinfo.highfreq = cinfo->highfreq;
1376 1443
1377 dsi.current_cinfo.fint = cinfo->fint; 1444 dsi->current_cinfo.fint = cinfo->fint;
1378 dsi.current_cinfo.clkin4ddr = cinfo->clkin4ddr; 1445 dsi->current_cinfo.clkin4ddr = cinfo->clkin4ddr;
1379 dsi.current_cinfo.dsi_pll_hsdiv_dispc_clk = 1446 dsi->current_cinfo.dsi_pll_hsdiv_dispc_clk =
1380 cinfo->dsi_pll_hsdiv_dispc_clk; 1447 cinfo->dsi_pll_hsdiv_dispc_clk;
1381 dsi.current_cinfo.dsi_pll_hsdiv_dsi_clk = 1448 dsi->current_cinfo.dsi_pll_hsdiv_dsi_clk =
1382 cinfo->dsi_pll_hsdiv_dsi_clk; 1449 cinfo->dsi_pll_hsdiv_dsi_clk;
1383 1450
1384 dsi.current_cinfo.regn = cinfo->regn; 1451 dsi->current_cinfo.regn = cinfo->regn;
1385 dsi.current_cinfo.regm = cinfo->regm; 1452 dsi->current_cinfo.regm = cinfo->regm;
1386 dsi.current_cinfo.regm_dispc = cinfo->regm_dispc; 1453 dsi->current_cinfo.regm_dispc = cinfo->regm_dispc;
1387 dsi.current_cinfo.regm_dsi = cinfo->regm_dsi; 1454 dsi->current_cinfo.regm_dsi = cinfo->regm_dsi;
1388 1455
1389 DSSDBG("DSI Fint %ld\n", cinfo->fint); 1456 DSSDBG("DSI Fint %ld\n", cinfo->fint);
1390 1457
@@ -1439,7 +1506,7 @@ int dsi_pll_set_clock_div(struct platform_device *dsidev,
1439 regm_dsi_start, regm_dsi_end); 1506 regm_dsi_start, regm_dsi_end);
1440 dsi_write_reg(dsidev, DSI_PLL_CONFIGURATION1, l); 1507 dsi_write_reg(dsidev, DSI_PLL_CONFIGURATION1, l);
1441 1508
1442 BUG_ON(cinfo->fint < dsi.fint_min || cinfo->fint > dsi.fint_max); 1509 BUG_ON(cinfo->fint < dsi->fint_min || cinfo->fint > dsi->fint_max);
1443 1510
1444 if (dss_has_feature(FEAT_DSI_PLL_FREQSEL)) { 1511 if (dss_has_feature(FEAT_DSI_PLL_FREQSEL)) {
1445 f = cinfo->fint < 1000000 ? 0x3 : 1512 f = cinfo->fint < 1000000 ? 0x3 :
@@ -1476,7 +1543,7 @@ int dsi_pll_set_clock_div(struct platform_device *dsidev,
1476 goto err; 1543 goto err;
1477 } 1544 }
1478 1545
1479 dsi.pll_locked = 1; 1546 dsi->pll_locked = 1;
1480 1547
1481 l = dsi_read_reg(dsidev, DSI_PLL_CONFIGURATION2); 1548 l = dsi_read_reg(dsidev, DSI_PLL_CONFIGURATION2);
1482 l = FLD_MOD(l, 0, 0, 0); /* DSI_PLL_IDLE */ 1549 l = FLD_MOD(l, 0, 0, 0); /* DSI_PLL_IDLE */
@@ -1503,22 +1570,23 @@ err:
1503int dsi_pll_init(struct platform_device *dsidev, bool enable_hsclk, 1570int dsi_pll_init(struct platform_device *dsidev, bool enable_hsclk,
1504 bool enable_hsdiv) 1571 bool enable_hsdiv)
1505{ 1572{
1573 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1506 int r = 0; 1574 int r = 0;
1507 enum dsi_pll_power_state pwstate; 1575 enum dsi_pll_power_state pwstate;
1508 1576
1509 DSSDBG("PLL init\n"); 1577 DSSDBG("PLL init\n");
1510 1578
1511 if (dsi.vdds_dsi_reg == NULL) { 1579 if (dsi->vdds_dsi_reg == NULL) {
1512 struct regulator *vdds_dsi; 1580 struct regulator *vdds_dsi;
1513 1581
1514 vdds_dsi = regulator_get(&dsi.pdev->dev, "vdds_dsi"); 1582 vdds_dsi = regulator_get(&dsi->pdev->dev, "vdds_dsi");
1515 1583
1516 if (IS_ERR(vdds_dsi)) { 1584 if (IS_ERR(vdds_dsi)) {
1517 DSSERR("can't get VDDS_DSI regulator\n"); 1585 DSSERR("can't get VDDS_DSI regulator\n");
1518 return PTR_ERR(vdds_dsi); 1586 return PTR_ERR(vdds_dsi);
1519 } 1587 }
1520 1588
1521 dsi.vdds_dsi_reg = vdds_dsi; 1589 dsi->vdds_dsi_reg = vdds_dsi;
1522 } 1590 }
1523 1591
1524 enable_clocks(1); 1592 enable_clocks(1);
@@ -1528,11 +1596,11 @@ int dsi_pll_init(struct platform_device *dsidev, bool enable_hsclk,
1528 */ 1596 */
1529 dsi_enable_scp_clk(dsidev); 1597 dsi_enable_scp_clk(dsidev);
1530 1598
1531 if (!dsi.vdds_dsi_enabled) { 1599 if (!dsi->vdds_dsi_enabled) {
1532 r = regulator_enable(dsi.vdds_dsi_reg); 1600 r = regulator_enable(dsi->vdds_dsi_reg);
1533 if (r) 1601 if (r)
1534 goto err0; 1602 goto err0;
1535 dsi.vdds_dsi_enabled = true; 1603 dsi->vdds_dsi_enabled = true;
1536 } 1604 }
1537 1605
1538 /* XXX PLL does not come out of reset without this... */ 1606 /* XXX PLL does not come out of reset without this... */
@@ -1567,9 +1635,9 @@ int dsi_pll_init(struct platform_device *dsidev, bool enable_hsclk,
1567 1635
1568 return 0; 1636 return 0;
1569err1: 1637err1:
1570 if (dsi.vdds_dsi_enabled) { 1638 if (dsi->vdds_dsi_enabled) {
1571 regulator_disable(dsi.vdds_dsi_reg); 1639 regulator_disable(dsi->vdds_dsi_reg);
1572 dsi.vdds_dsi_enabled = false; 1640 dsi->vdds_dsi_enabled = false;
1573 } 1641 }
1574err0: 1642err0:
1575 dsi_disable_scp_clk(dsidev); 1643 dsi_disable_scp_clk(dsidev);
@@ -1580,12 +1648,14 @@ err0:
1580 1648
1581void dsi_pll_uninit(struct platform_device *dsidev, bool disconnect_lanes) 1649void dsi_pll_uninit(struct platform_device *dsidev, bool disconnect_lanes)
1582{ 1650{
1583 dsi.pll_locked = 0; 1651 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1652
1653 dsi->pll_locked = 0;
1584 dsi_pll_power(dsidev, DSI_PLL_POWER_OFF); 1654 dsi_pll_power(dsidev, DSI_PLL_POWER_OFF);
1585 if (disconnect_lanes) { 1655 if (disconnect_lanes) {
1586 WARN_ON(!dsi.vdds_dsi_enabled); 1656 WARN_ON(!dsi->vdds_dsi_enabled);
1587 regulator_disable(dsi.vdds_dsi_reg); 1657 regulator_disable(dsi->vdds_dsi_reg);
1588 dsi.vdds_dsi_enabled = false; 1658 dsi->vdds_dsi_enabled = false;
1589 } 1659 }
1590 1660
1591 dsi_disable_scp_clk(dsidev); 1661 dsi_disable_scp_clk(dsidev);
@@ -1598,7 +1668,8 @@ void dsi_pll_uninit(struct platform_device *dsidev, bool disconnect_lanes)
1598void dsi_dump_clocks(struct seq_file *s) 1668void dsi_dump_clocks(struct seq_file *s)
1599{ 1669{
1600 struct platform_device *dsidev = dsi_get_dsidev_from_id(0); 1670 struct platform_device *dsidev = dsi_get_dsidev_from_id(0);
1601 struct dsi_clock_info *cinfo = &dsi.current_cinfo; 1671 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1672 struct dsi_clock_info *cinfo = &dsi->current_cinfo;
1602 enum omap_dss_clk_source dispc_clk_src, dsi_clk_src; 1673 enum omap_dss_clk_source dispc_clk_src, dsi_clk_src;
1603 1674
1604 dispc_clk_src = dss_get_dispc_clk_source(); 1675 dispc_clk_src = dss_get_dispc_clk_source();
@@ -1658,16 +1729,18 @@ void dsi_dump_clocks(struct seq_file *s)
1658#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS 1729#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
1659void dsi_dump_irqs(struct seq_file *s) 1730void dsi_dump_irqs(struct seq_file *s)
1660{ 1731{
1732 struct platform_device *dsidev = dsi_get_dsidev_from_id(0);
1733 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1661 unsigned long flags; 1734 unsigned long flags;
1662 struct dsi_irq_stats stats; 1735 struct dsi_irq_stats stats;
1663 1736
1664 spin_lock_irqsave(&dsi.irq_stats_lock, flags); 1737 spin_lock_irqsave(&dsi->irq_stats_lock, flags);
1665 1738
1666 stats = dsi.irq_stats; 1739 stats = dsi->irq_stats;
1667 memset(&dsi.irq_stats, 0, sizeof(dsi.irq_stats)); 1740 memset(&dsi->irq_stats, 0, sizeof(dsi->irq_stats));
1668 dsi.irq_stats.last_reset = jiffies; 1741 dsi->irq_stats.last_reset = jiffies;
1669 1742
1670 spin_unlock_irqrestore(&dsi.irq_stats_lock, flags); 1743 spin_unlock_irqrestore(&dsi->irq_stats_lock, flags);
1671 1744
1672 seq_printf(s, "period %u ms\n", 1745 seq_printf(s, "period %u ms\n",
1673 jiffies_to_msecs(jiffies - stats.last_reset)); 1746 jiffies_to_msecs(jiffies - stats.last_reset));
@@ -1898,14 +1971,18 @@ static void dsi_set_lane_config(struct omap_dss_device *dssdev)
1898 1971
1899static inline unsigned ns2ddr(struct platform_device *dsidev, unsigned ns) 1972static inline unsigned ns2ddr(struct platform_device *dsidev, unsigned ns)
1900{ 1973{
1974 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1975
1901 /* convert time in ns to ddr ticks, rounding up */ 1976 /* convert time in ns to ddr ticks, rounding up */
1902 unsigned long ddr_clk = dsi.current_cinfo.clkin4ddr / 4; 1977 unsigned long ddr_clk = dsi->current_cinfo.clkin4ddr / 4;
1903 return (ns * (ddr_clk / 1000 / 1000) + 999) / 1000; 1978 return (ns * (ddr_clk / 1000 / 1000) + 999) / 1000;
1904} 1979}
1905 1980
1906static inline unsigned ddr2ns(struct platform_device *dsidev, unsigned ddr) 1981static inline unsigned ddr2ns(struct platform_device *dsidev, unsigned ddr)
1907{ 1982{
1908 unsigned long ddr_clk = dsi.current_cinfo.clkin4ddr / 4; 1983 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1984
1985 unsigned long ddr_clk = dsi->current_cinfo.clkin4ddr / 4;
1909 return ddr * 1000 * 1000 / (ddr_clk / 1000); 1986 return ddr * 1000 * 1000 / (ddr_clk / 1000);
1910} 1987}
1911 1988
@@ -2097,13 +2174,14 @@ static int dsi_cio_wait_tx_clk_esc_reset(struct omap_dss_device *dssdev)
2097static int dsi_cio_init(struct omap_dss_device *dssdev) 2174static int dsi_cio_init(struct omap_dss_device *dssdev)
2098{ 2175{
2099 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); 2176 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
2177 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2100 int r; 2178 int r;
2101 u32 l; 2179 u32 l;
2102 2180
2103 DSSDBGF(); 2181 DSSDBGF();
2104 2182
2105 if (dsi.dsi_mux_pads) 2183 if (dsi->dsi_mux_pads)
2106 dsi.dsi_mux_pads(true); 2184 dsi->dsi_mux_pads(true);
2107 2185
2108 dsi_enable_scp_clk(dsidev); 2186 dsi_enable_scp_clk(dsidev);
2109 2187
@@ -2128,7 +2206,7 @@ static int dsi_cio_init(struct omap_dss_device *dssdev)
2128 l = FLD_MOD(l, 0x1fff, 12, 0); /* STOP_STATE_COUNTER_IO */ 2206 l = FLD_MOD(l, 0x1fff, 12, 0); /* STOP_STATE_COUNTER_IO */
2129 dsi_write_reg(dsidev, DSI_TIMING1, l); 2207 dsi_write_reg(dsidev, DSI_TIMING1, l);
2130 2208
2131 if (dsi.ulps_enabled) { 2209 if (dsi->ulps_enabled) {
2132 DSSDBG("manual ulps exit\n"); 2210 DSSDBG("manual ulps exit\n");
2133 2211
2134 /* ULPS is exited by Mark-1 state for 1ms, followed by 2212 /* ULPS is exited by Mark-1 state for 1ms, followed by
@@ -2161,7 +2239,7 @@ static int dsi_cio_init(struct omap_dss_device *dssdev)
2161 if (r) 2239 if (r)
2162 goto err_tx_clk_esc_rst; 2240 goto err_tx_clk_esc_rst;
2163 2241
2164 if (dsi.ulps_enabled) { 2242 if (dsi->ulps_enabled) {
2165 /* Keep Mark-1 state for 1ms (as per DSI spec) */ 2243 /* Keep Mark-1 state for 1ms (as per DSI spec) */
2166 ktime_t wait = ns_to_ktime(1000 * 1000); 2244 ktime_t wait = ns_to_ktime(1000 * 1000);
2167 set_current_state(TASK_UNINTERRUPTIBLE); 2245 set_current_state(TASK_UNINTERRUPTIBLE);
@@ -2177,7 +2255,7 @@ static int dsi_cio_init(struct omap_dss_device *dssdev)
2177 2255
2178 dsi_cio_timings(dsidev); 2256 dsi_cio_timings(dsidev);
2179 2257
2180 dsi.ulps_enabled = false; 2258 dsi->ulps_enabled = false;
2181 2259
2182 DSSDBG("CIO init done\n"); 2260 DSSDBG("CIO init done\n");
2183 2261
@@ -2188,21 +2266,23 @@ err_tx_clk_esc_rst:
2188err_cio_pwr_dom: 2266err_cio_pwr_dom:
2189 dsi_cio_power(dsidev, DSI_COMPLEXIO_POWER_OFF); 2267 dsi_cio_power(dsidev, DSI_COMPLEXIO_POWER_OFF);
2190err_cio_pwr: 2268err_cio_pwr:
2191 if (dsi.ulps_enabled) 2269 if (dsi->ulps_enabled)
2192 dsi_cio_disable_lane_override(dsidev); 2270 dsi_cio_disable_lane_override(dsidev);
2193err_scp_clk_dom: 2271err_scp_clk_dom:
2194 dsi_disable_scp_clk(dsidev); 2272 dsi_disable_scp_clk(dsidev);
2195 if (dsi.dsi_mux_pads) 2273 if (dsi->dsi_mux_pads)
2196 dsi.dsi_mux_pads(false); 2274 dsi->dsi_mux_pads(false);
2197 return r; 2275 return r;
2198} 2276}
2199 2277
2200static void dsi_cio_uninit(struct platform_device *dsidev) 2278static void dsi_cio_uninit(struct platform_device *dsidev)
2201{ 2279{
2280 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2281
2202 dsi_cio_power(dsidev, DSI_COMPLEXIO_POWER_OFF); 2282 dsi_cio_power(dsidev, DSI_COMPLEXIO_POWER_OFF);
2203 dsi_disable_scp_clk(dsidev); 2283 dsi_disable_scp_clk(dsidev);
2204 if (dsi.dsi_mux_pads) 2284 if (dsi->dsi_mux_pads)
2205 dsi.dsi_mux_pads(false); 2285 dsi->dsi_mux_pads(false);
2206} 2286}
2207 2287
2208static int _dsi_wait_reset(struct platform_device *dsidev) 2288static int _dsi_wait_reset(struct platform_device *dsidev)
@@ -2231,18 +2311,19 @@ static void dsi_config_tx_fifo(struct platform_device *dsidev,
2231 enum fifo_size size1, enum fifo_size size2, 2311 enum fifo_size size1, enum fifo_size size2,
2232 enum fifo_size size3, enum fifo_size size4) 2312 enum fifo_size size3, enum fifo_size size4)
2233{ 2313{
2314 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2234 u32 r = 0; 2315 u32 r = 0;
2235 int add = 0; 2316 int add = 0;
2236 int i; 2317 int i;
2237 2318
2238 dsi.vc[0].fifo_size = size1; 2319 dsi->vc[0].fifo_size = size1;
2239 dsi.vc[1].fifo_size = size2; 2320 dsi->vc[1].fifo_size = size2;
2240 dsi.vc[2].fifo_size = size3; 2321 dsi->vc[2].fifo_size = size3;
2241 dsi.vc[3].fifo_size = size4; 2322 dsi->vc[3].fifo_size = size4;
2242 2323
2243 for (i = 0; i < 4; i++) { 2324 for (i = 0; i < 4; i++) {
2244 u8 v; 2325 u8 v;
2245 int size = dsi.vc[i].fifo_size; 2326 int size = dsi->vc[i].fifo_size;
2246 2327
2247 if (add + size > 4) { 2328 if (add + size > 4) {
2248 DSSERR("Illegal FIFO configuration\n"); 2329 DSSERR("Illegal FIFO configuration\n");
@@ -2262,18 +2343,19 @@ static void dsi_config_rx_fifo(struct platform_device *dsidev,
2262 enum fifo_size size1, enum fifo_size size2, 2343 enum fifo_size size1, enum fifo_size size2,
2263 enum fifo_size size3, enum fifo_size size4) 2344 enum fifo_size size3, enum fifo_size size4)
2264{ 2345{
2346 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2265 u32 r = 0; 2347 u32 r = 0;
2266 int add = 0; 2348 int add = 0;
2267 int i; 2349 int i;
2268 2350
2269 dsi.vc[0].fifo_size = size1; 2351 dsi->vc[0].fifo_size = size1;
2270 dsi.vc[1].fifo_size = size2; 2352 dsi->vc[1].fifo_size = size2;
2271 dsi.vc[2].fifo_size = size3; 2353 dsi->vc[2].fifo_size = size3;
2272 dsi.vc[3].fifo_size = size4; 2354 dsi->vc[3].fifo_size = size4;
2273 2355
2274 for (i = 0; i < 4; i++) { 2356 for (i = 0; i < 4; i++) {
2275 u8 v; 2357 u8 v;
2276 int size = dsi.vc[i].fifo_size; 2358 int size = dsi->vc[i].fifo_size;
2277 2359
2278 if (add + size > 4) { 2360 if (add + size > 4) {
2279 DSSERR("Illegal FIFO configuration\n"); 2361 DSSERR("Illegal FIFO configuration\n");
@@ -2313,8 +2395,9 @@ static bool dsi_vc_is_enabled(struct platform_device *dsidev, int channel)
2313static void dsi_packet_sent_handler_vp(void *data, u32 mask) 2395static void dsi_packet_sent_handler_vp(void *data, u32 mask)
2314{ 2396{
2315 struct platform_device *dsidev = dsi_get_dsidev_from_id(0); 2397 struct platform_device *dsidev = dsi_get_dsidev_from_id(0);
2316 const int channel = dsi.update_channel; 2398 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2317 u8 bit = dsi.te_enabled ? 30 : 31; 2399 const int channel = dsi->update_channel;
2400 u8 bit = dsi->te_enabled ? 30 : 31;
2318 2401
2319 if (REG_GET(dsidev, DSI_VC_TE(channel), bit, bit) == 0) 2402 if (REG_GET(dsidev, DSI_VC_TE(channel), bit, bit) == 0)
2320 complete((struct completion *)data); 2403 complete((struct completion *)data);
@@ -2322,12 +2405,13 @@ static void dsi_packet_sent_handler_vp(void *data, u32 mask)
2322 2405
2323static int dsi_sync_vc_vp(struct platform_device *dsidev, int channel) 2406static int dsi_sync_vc_vp(struct platform_device *dsidev, int channel)
2324{ 2407{
2408 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2325 int r = 0; 2409 int r = 0;
2326 u8 bit; 2410 u8 bit;
2327 2411
2328 DECLARE_COMPLETION_ONSTACK(completion); 2412 DECLARE_COMPLETION_ONSTACK(completion);
2329 2413
2330 bit = dsi.te_enabled ? 30 : 31; 2414 bit = dsi->te_enabled ? 30 : 31;
2331 2415
2332 r = dsi_register_isr_vc(dsidev, channel, dsi_packet_sent_handler_vp, 2416 r = dsi_register_isr_vc(dsidev, channel, dsi_packet_sent_handler_vp,
2333 &completion, DSI_VC_IRQ_PACKET_SENT); 2417 &completion, DSI_VC_IRQ_PACKET_SENT);
@@ -2358,7 +2442,8 @@ err0:
2358static void dsi_packet_sent_handler_l4(void *data, u32 mask) 2442static void dsi_packet_sent_handler_l4(void *data, u32 mask)
2359{ 2443{
2360 struct platform_device *dsidev = dsi_get_dsidev_from_id(0); 2444 struct platform_device *dsidev = dsi_get_dsidev_from_id(0);
2361 const int channel = dsi.update_channel; 2445 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2446 const int channel = dsi->update_channel;
2362 2447
2363 if (REG_GET(dsidev, DSI_VC_CTRL(channel), 5, 5) == 0) 2448 if (REG_GET(dsidev, DSI_VC_CTRL(channel), 5, 5) == 0)
2364 complete((struct completion *)data); 2449 complete((struct completion *)data);
@@ -2398,6 +2483,8 @@ err0:
2398 2483
2399static int dsi_sync_vc(struct platform_device *dsidev, int channel) 2484static int dsi_sync_vc(struct platform_device *dsidev, int channel)
2400{ 2485{
2486 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2487
2401 WARN_ON(!dsi_bus_is_locked(dsidev)); 2488 WARN_ON(!dsi_bus_is_locked(dsidev));
2402 2489
2403 WARN_ON(in_interrupt()); 2490 WARN_ON(in_interrupt());
@@ -2405,7 +2492,7 @@ static int dsi_sync_vc(struct platform_device *dsidev, int channel)
2405 if (!dsi_vc_is_enabled(dsidev, channel)) 2492 if (!dsi_vc_is_enabled(dsidev, channel))
2406 return 0; 2493 return 0;
2407 2494
2408 switch (dsi.vc[channel].mode) { 2495 switch (dsi->vc[channel].mode) {
2409 case DSI_VC_MODE_VP: 2496 case DSI_VC_MODE_VP:
2410 return dsi_sync_vc_vp(dsidev, channel); 2497 return dsi_sync_vc_vp(dsidev, channel);
2411 case DSI_VC_MODE_L4: 2498 case DSI_VC_MODE_L4:
@@ -2464,7 +2551,9 @@ static void dsi_vc_initial_config(struct platform_device *dsidev, int channel)
2464 2551
2465static int dsi_vc_config_l4(struct platform_device *dsidev, int channel) 2552static int dsi_vc_config_l4(struct platform_device *dsidev, int channel)
2466{ 2553{
2467 if (dsi.vc[channel].mode == DSI_VC_MODE_L4) 2554 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2555
2556 if (dsi->vc[channel].mode == DSI_VC_MODE_L4)
2468 return 0; 2557 return 0;
2469 2558
2470 DSSDBGF("%d", channel); 2559 DSSDBGF("%d", channel);
@@ -2487,14 +2576,16 @@ static int dsi_vc_config_l4(struct platform_device *dsidev, int channel)
2487 2576
2488 dsi_vc_enable(dsidev, channel, 1); 2577 dsi_vc_enable(dsidev, channel, 1);
2489 2578
2490 dsi.vc[channel].mode = DSI_VC_MODE_L4; 2579 dsi->vc[channel].mode = DSI_VC_MODE_L4;
2491 2580
2492 return 0; 2581 return 0;
2493} 2582}
2494 2583
2495static int dsi_vc_config_vp(struct platform_device *dsidev, int channel) 2584static int dsi_vc_config_vp(struct platform_device *dsidev, int channel)
2496{ 2585{
2497 if (dsi.vc[channel].mode == DSI_VC_MODE_VP) 2586 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2587
2588 if (dsi->vc[channel].mode == DSI_VC_MODE_VP)
2498 return 0; 2589 return 0;
2499 2590
2500 DSSDBGF("%d", channel); 2591 DSSDBGF("%d", channel);
@@ -2518,7 +2609,7 @@ static int dsi_vc_config_vp(struct platform_device *dsidev, int channel)
2518 2609
2519 dsi_vc_enable(dsidev, channel, 1); 2610 dsi_vc_enable(dsidev, channel, 1);
2520 2611
2521 dsi.vc[channel].mode = DSI_VC_MODE_VP; 2612 dsi->vc[channel].mode = DSI_VC_MODE_VP;
2522 2613
2523 return 0; 2614 return 0;
2524} 2615}
@@ -2627,7 +2718,9 @@ static u16 dsi_vc_flush_receive_data(struct platform_device *dsidev,
2627 2718
2628static int dsi_vc_send_bta(struct platform_device *dsidev, int channel) 2719static int dsi_vc_send_bta(struct platform_device *dsidev, int channel)
2629{ 2720{
2630 if (dsi.debug_write || dsi.debug_read) 2721 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2722
2723 if (dsi->debug_write || dsi->debug_read)
2631 DSSDBG("dsi_vc_send_bta %d\n", channel); 2724 DSSDBG("dsi_vc_send_bta %d\n", channel);
2632 2725
2633 WARN_ON(!dsi_bus_is_locked(dsidev)); 2726 WARN_ON(!dsi_bus_is_locked(dsidev));
@@ -2691,12 +2784,13 @@ EXPORT_SYMBOL(dsi_vc_send_bta_sync);
2691static inline void dsi_vc_write_long_header(struct platform_device *dsidev, 2784static inline void dsi_vc_write_long_header(struct platform_device *dsidev,
2692 int channel, u8 data_type, u16 len, u8 ecc) 2785 int channel, u8 data_type, u16 len, u8 ecc)
2693{ 2786{
2787 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2694 u32 val; 2788 u32 val;
2695 u8 data_id; 2789 u8 data_id;
2696 2790
2697 WARN_ON(!dsi_bus_is_locked(dsidev)); 2791 WARN_ON(!dsi_bus_is_locked(dsidev));
2698 2792
2699 data_id = data_type | dsi.vc[channel].vc_id << 6; 2793 data_id = data_type | dsi->vc[channel].vc_id << 6;
2700 2794
2701 val = FLD_VAL(data_id, 7, 0) | FLD_VAL(len, 23, 8) | 2795 val = FLD_VAL(data_id, 7, 0) | FLD_VAL(len, 23, 8) |
2702 FLD_VAL(ecc, 31, 24); 2796 FLD_VAL(ecc, 31, 24);
@@ -2721,16 +2815,17 @@ static int dsi_vc_send_long(struct platform_device *dsidev, int channel,
2721 u8 data_type, u8 *data, u16 len, u8 ecc) 2815 u8 data_type, u8 *data, u16 len, u8 ecc)
2722{ 2816{
2723 /*u32 val; */ 2817 /*u32 val; */
2818 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2724 int i; 2819 int i;
2725 u8 *p; 2820 u8 *p;
2726 int r = 0; 2821 int r = 0;
2727 u8 b1, b2, b3, b4; 2822 u8 b1, b2, b3, b4;
2728 2823
2729 if (dsi.debug_write) 2824 if (dsi->debug_write)
2730 DSSDBG("dsi_vc_send_long, %d bytes\n", len); 2825 DSSDBG("dsi_vc_send_long, %d bytes\n", len);
2731 2826
2732 /* len + header */ 2827 /* len + header */
2733 if (dsi.vc[channel].fifo_size * 32 * 4 < len + 4) { 2828 if (dsi->vc[channel].fifo_size * 32 * 4 < len + 4) {
2734 DSSERR("unable to send long packet: packet too long.\n"); 2829 DSSERR("unable to send long packet: packet too long.\n");
2735 return -EINVAL; 2830 return -EINVAL;
2736 } 2831 }
@@ -2741,7 +2836,7 @@ static int dsi_vc_send_long(struct platform_device *dsidev, int channel,
2741 2836
2742 p = data; 2837 p = data;
2743 for (i = 0; i < len >> 2; i++) { 2838 for (i = 0; i < len >> 2; i++) {
2744 if (dsi.debug_write) 2839 if (dsi->debug_write)
2745 DSSDBG("\tsending full packet %d\n", i); 2840 DSSDBG("\tsending full packet %d\n", i);
2746 2841
2747 b1 = *p++; 2842 b1 = *p++;
@@ -2756,7 +2851,7 @@ static int dsi_vc_send_long(struct platform_device *dsidev, int channel,
2756 if (i) { 2851 if (i) {
2757 b1 = 0; b2 = 0; b3 = 0; 2852 b1 = 0; b2 = 0; b3 = 0;
2758 2853
2759 if (dsi.debug_write) 2854 if (dsi->debug_write)
2760 DSSDBG("\tsending remainder bytes %d\n", i); 2855 DSSDBG("\tsending remainder bytes %d\n", i);
2761 2856
2762 switch (i) { 2857 switch (i) {
@@ -2783,12 +2878,13 @@ static int dsi_vc_send_long(struct platform_device *dsidev, int channel,
2783static int dsi_vc_send_short(struct platform_device *dsidev, int channel, 2878static int dsi_vc_send_short(struct platform_device *dsidev, int channel,
2784 u8 data_type, u16 data, u8 ecc) 2879 u8 data_type, u16 data, u8 ecc)
2785{ 2880{
2881 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2786 u32 r; 2882 u32 r;
2787 u8 data_id; 2883 u8 data_id;
2788 2884
2789 WARN_ON(!dsi_bus_is_locked(dsidev)); 2885 WARN_ON(!dsi_bus_is_locked(dsidev));
2790 2886
2791 if (dsi.debug_write) 2887 if (dsi->debug_write)
2792 DSSDBG("dsi_vc_send_short(ch%d, dt %#x, b1 %#x, b2 %#x)\n", 2888 DSSDBG("dsi_vc_send_short(ch%d, dt %#x, b1 %#x, b2 %#x)\n",
2793 channel, 2889 channel,
2794 data_type, data & 0xff, (data >> 8) & 0xff); 2890 data_type, data & 0xff, (data >> 8) & 0xff);
@@ -2800,7 +2896,7 @@ static int dsi_vc_send_short(struct platform_device *dsidev, int channel,
2800 return -EINVAL; 2896 return -EINVAL;
2801 } 2897 }
2802 2898
2803 data_id = data_type | dsi.vc[channel].vc_id << 6; 2899 data_id = data_type | dsi->vc[channel].vc_id << 6;
2804 2900
2805 r = (data_id << 0) | (data << 8) | (ecc << 24); 2901 r = (data_id << 0) | (data << 8) | (ecc << 24);
2806 2902
@@ -2893,11 +2989,12 @@ int dsi_vc_dcs_read(struct omap_dss_device *dssdev, int channel, u8 dcs_cmd,
2893 u8 *buf, int buflen) 2989 u8 *buf, int buflen)
2894{ 2990{
2895 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); 2991 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
2992 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2896 u32 val; 2993 u32 val;
2897 u8 dt; 2994 u8 dt;
2898 int r; 2995 int r;
2899 2996
2900 if (dsi.debug_read) 2997 if (dsi->debug_read)
2901 DSSDBG("dsi_vc_dcs_read(ch%d, dcs_cmd %x)\n", channel, dcs_cmd); 2998 DSSDBG("dsi_vc_dcs_read(ch%d, dcs_cmd %x)\n", channel, dcs_cmd);
2902 2999
2903 r = dsi_vc_send_short(dsidev, channel, DSI_DT_DCS_READ, dcs_cmd, 0); 3000 r = dsi_vc_send_short(dsidev, channel, DSI_DT_DCS_READ, dcs_cmd, 0);
@@ -2916,7 +3013,7 @@ int dsi_vc_dcs_read(struct omap_dss_device *dssdev, int channel, u8 dcs_cmd,
2916 } 3013 }
2917 3014
2918 val = dsi_read_reg(dsidev, DSI_VC_SHORT_PACKET_HEADER(channel)); 3015 val = dsi_read_reg(dsidev, DSI_VC_SHORT_PACKET_HEADER(channel));
2919 if (dsi.debug_read) 3016 if (dsi->debug_read)
2920 DSSDBG("\theader: %08x\n", val); 3017 DSSDBG("\theader: %08x\n", val);
2921 dt = FLD_GET(val, 5, 0); 3018 dt = FLD_GET(val, 5, 0);
2922 if (dt == DSI_DT_RX_ACK_WITH_ERR) { 3019 if (dt == DSI_DT_RX_ACK_WITH_ERR) {
@@ -2927,7 +3024,7 @@ int dsi_vc_dcs_read(struct omap_dss_device *dssdev, int channel, u8 dcs_cmd,
2927 3024
2928 } else if (dt == DSI_DT_RX_SHORT_READ_1) { 3025 } else if (dt == DSI_DT_RX_SHORT_READ_1) {
2929 u8 data = FLD_GET(val, 15, 8); 3026 u8 data = FLD_GET(val, 15, 8);
2930 if (dsi.debug_read) 3027 if (dsi->debug_read)
2931 DSSDBG("\tDCS short response, 1 byte: %02x\n", data); 3028 DSSDBG("\tDCS short response, 1 byte: %02x\n", data);
2932 3029
2933 if (buflen < 1) { 3030 if (buflen < 1) {
@@ -2940,7 +3037,7 @@ int dsi_vc_dcs_read(struct omap_dss_device *dssdev, int channel, u8 dcs_cmd,
2940 return 1; 3037 return 1;
2941 } else if (dt == DSI_DT_RX_SHORT_READ_2) { 3038 } else if (dt == DSI_DT_RX_SHORT_READ_2) {
2942 u16 data = FLD_GET(val, 23, 8); 3039 u16 data = FLD_GET(val, 23, 8);
2943 if (dsi.debug_read) 3040 if (dsi->debug_read)
2944 DSSDBG("\tDCS short response, 2 byte: %04x\n", data); 3041 DSSDBG("\tDCS short response, 2 byte: %04x\n", data);
2945 3042
2946 if (buflen < 2) { 3043 if (buflen < 2) {
@@ -2955,7 +3052,7 @@ int dsi_vc_dcs_read(struct omap_dss_device *dssdev, int channel, u8 dcs_cmd,
2955 } else if (dt == DSI_DT_RX_DCS_LONG_READ) { 3052 } else if (dt == DSI_DT_RX_DCS_LONG_READ) {
2956 int w; 3053 int w;
2957 int len = FLD_GET(val, 23, 8); 3054 int len = FLD_GET(val, 23, 8);
2958 if (dsi.debug_read) 3055 if (dsi->debug_read)
2959 DSSDBG("\tDCS long response, len %d\n", len); 3056 DSSDBG("\tDCS long response, len %d\n", len);
2960 3057
2961 if (len > buflen) { 3058 if (len > buflen) {
@@ -2968,7 +3065,7 @@ int dsi_vc_dcs_read(struct omap_dss_device *dssdev, int channel, u8 dcs_cmd,
2968 int b; 3065 int b;
2969 val = dsi_read_reg(dsidev, 3066 val = dsi_read_reg(dsidev,
2970 DSI_VC_SHORT_PACKET_HEADER(channel)); 3067 DSI_VC_SHORT_PACKET_HEADER(channel));
2971 if (dsi.debug_read) 3068 if (dsi->debug_read)
2972 DSSDBG("\t\t%02x %02x %02x %02x\n", 3069 DSSDBG("\t\t%02x %02x %02x %02x\n",
2973 (val >> 0) & 0xff, 3070 (val >> 0) & 0xff,
2974 (val >> 8) & 0xff, 3071 (val >> 8) & 0xff,
@@ -3049,6 +3146,7 @@ EXPORT_SYMBOL(dsi_vc_set_max_rx_packet_size);
3049 3146
3050static int dsi_enter_ulps(struct platform_device *dsidev) 3147static int dsi_enter_ulps(struct platform_device *dsidev)
3051{ 3148{
3149 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
3052 DECLARE_COMPLETION_ONSTACK(completion); 3150 DECLARE_COMPLETION_ONSTACK(completion);
3053 int r; 3151 int r;
3054 3152
@@ -3056,9 +3154,9 @@ static int dsi_enter_ulps(struct platform_device *dsidev)
3056 3154
3057 WARN_ON(!dsi_bus_is_locked(dsidev)); 3155 WARN_ON(!dsi_bus_is_locked(dsidev));
3058 3156
3059 WARN_ON(dsi.ulps_enabled); 3157 WARN_ON(dsi->ulps_enabled);
3060 3158
3061 if (dsi.ulps_enabled) 3159 if (dsi->ulps_enabled)
3062 return 0; 3160 return 0;
3063 3161
3064 if (REG_GET(dsidev, DSI_CLK_CTRL, 13, 13)) { 3162 if (REG_GET(dsidev, DSI_CLK_CTRL, 13, 13)) {
@@ -3112,7 +3210,7 @@ static int dsi_enter_ulps(struct platform_device *dsidev)
3112 3210
3113 dsi_if_enable(dsidev, false); 3211 dsi_if_enable(dsidev, false);
3114 3212
3115 dsi.ulps_enabled = true; 3213 dsi->ulps_enabled = true;
3116 3214
3117 return 0; 3215 return 0;
3118 3216
@@ -3384,6 +3482,7 @@ static int dsi_update_screen_l4(struct omap_dss_device *dssdev,
3384{ 3482{
3385 /* Note: supports only 24bit colors in 32bit container */ 3483 /* Note: supports only 24bit colors in 32bit container */
3386 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); 3484 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
3485 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
3387 int first = 1; 3486 int first = 1;
3388 int fifo_stalls = 0; 3487 int fifo_stalls = 0;
3389 int max_dsi_packet_size; 3488 int max_dsi_packet_size;
@@ -3422,7 +3521,7 @@ static int dsi_update_screen_l4(struct omap_dss_device *dssdev,
3422 * in fifo */ 3521 * in fifo */
3423 3522
3424 /* When using CPU, max long packet size is TX buffer size */ 3523 /* When using CPU, max long packet size is TX buffer size */
3425 max_dsi_packet_size = dsi.vc[0].fifo_size * 32 * 4; 3524 max_dsi_packet_size = dsi->vc[0].fifo_size * 32 * 4;
3426 3525
3427 /* we seem to get better perf if we divide the tx fifo to half, 3526 /* we seem to get better perf if we divide the tx fifo to half,
3428 and while the other half is being sent, we fill the other half 3527 and while the other half is being sent, we fill the other half
@@ -3518,6 +3617,7 @@ static void dsi_update_screen_dispc(struct omap_dss_device *dssdev,
3518 u16 x, u16 y, u16 w, u16 h) 3617 u16 x, u16 y, u16 w, u16 h)
3519{ 3618{
3520 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); 3619 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
3620 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
3521 unsigned bytespp; 3621 unsigned bytespp;
3522 unsigned bytespl; 3622 unsigned bytespl;
3523 unsigned bytespf; 3623 unsigned bytespf;
@@ -3526,7 +3626,7 @@ static void dsi_update_screen_dispc(struct omap_dss_device *dssdev,
3526 unsigned packet_len; 3626 unsigned packet_len;
3527 u32 l; 3627 u32 l;
3528 int r; 3628 int r;
3529 const unsigned channel = dsi.update_channel; 3629 const unsigned channel = dsi->update_channel;
3530 /* line buffer is 1024 x 24bits */ 3630 /* line buffer is 1024 x 24bits */
3531 /* XXX: for some reason using full buffer size causes considerable TX 3631 /* XXX: for some reason using full buffer size causes considerable TX
3532 * slowdown with update sizes that fill the whole buffer */ 3632 * slowdown with update sizes that fill the whole buffer */
@@ -3561,7 +3661,7 @@ static void dsi_update_screen_dispc(struct omap_dss_device *dssdev,
3561 dsi_vc_write_long_header(dsidev, channel, DSI_DT_DCS_LONG_WRITE, 3661 dsi_vc_write_long_header(dsidev, channel, DSI_DT_DCS_LONG_WRITE,
3562 packet_len, 0); 3662 packet_len, 0);
3563 3663
3564 if (dsi.te_enabled) 3664 if (dsi->te_enabled)
3565 l = FLD_MOD(l, 1, 30, 30); /* TE_EN */ 3665 l = FLD_MOD(l, 1, 30, 30); /* TE_EN */
3566 else 3666 else
3567 l = FLD_MOD(l, 1, 31, 31); /* TE_START */ 3667 l = FLD_MOD(l, 1, 31, 31); /* TE_START */
@@ -3577,13 +3677,13 @@ static void dsi_update_screen_dispc(struct omap_dss_device *dssdev,
3577 3677
3578 dsi_perf_mark_start(dsidev); 3678 dsi_perf_mark_start(dsidev);
3579 3679
3580 r = queue_delayed_work(dsi.workqueue, &dsi.framedone_timeout_work, 3680 r = queue_delayed_work(dsi->workqueue, &dsi->framedone_timeout_work,
3581 msecs_to_jiffies(250)); 3681 msecs_to_jiffies(250));
3582 BUG_ON(r == 0); 3682 BUG_ON(r == 0);
3583 3683
3584 dss_start_update(dssdev); 3684 dss_start_update(dssdev);
3585 3685
3586 if (dsi.te_enabled) { 3686 if (dsi->te_enabled) {
3587 /* disable LP_RX_TO, so that we can receive TE. Time to wait 3687 /* disable LP_RX_TO, so that we can receive TE. Time to wait
3588 * for TE is longer than the timer allows */ 3688 * for TE is longer than the timer allows */
3589 REG_FLD_MOD(dsidev, DSI_TIMING2, 0, 15, 15); /* LP_RX_TO */ 3689 REG_FLD_MOD(dsidev, DSI_TIMING2, 0, 15, 15); /* LP_RX_TO */
@@ -3591,7 +3691,7 @@ static void dsi_update_screen_dispc(struct omap_dss_device *dssdev,
3591 dsi_vc_send_bta(dsidev, channel); 3691 dsi_vc_send_bta(dsidev, channel);
3592 3692
3593#ifdef DSI_CATCH_MISSING_TE 3693#ifdef DSI_CATCH_MISSING_TE
3594 mod_timer(&dsi.te_timer, jiffies + msecs_to_jiffies(250)); 3694 mod_timer(&dsi->te_timer, jiffies + msecs_to_jiffies(250));
3595#endif 3695#endif
3596 } 3696 }
3597} 3697}
@@ -3605,15 +3705,17 @@ static void dsi_te_timeout(unsigned long arg)
3605 3705
3606static void dsi_handle_framedone(struct platform_device *dsidev, int error) 3706static void dsi_handle_framedone(struct platform_device *dsidev, int error)
3607{ 3707{
3708 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
3709
3608 /* SIDLEMODE back to smart-idle */ 3710 /* SIDLEMODE back to smart-idle */
3609 dispc_enable_sidle(); 3711 dispc_enable_sidle();
3610 3712
3611 if (dsi.te_enabled) { 3713 if (dsi->te_enabled) {
3612 /* enable LP_RX_TO again after the TE */ 3714 /* enable LP_RX_TO again after the TE */
3613 REG_FLD_MOD(dsidev, DSI_TIMING2, 1, 15, 15); /* LP_RX_TO */ 3715 REG_FLD_MOD(dsidev, DSI_TIMING2, 1, 15, 15); /* LP_RX_TO */
3614 } 3716 }
3615 3717
3616 dsi.framedone_callback(error, dsi.framedone_data); 3718 dsi->framedone_callback(error, dsi->framedone_data);
3617 3719
3618 if (!error) 3720 if (!error)
3619 dsi_perf_show(dsidev, "DISPC"); 3721 dsi_perf_show(dsidev, "DISPC");
@@ -3621,6 +3723,8 @@ static void dsi_handle_framedone(struct platform_device *dsidev, int error)
3621 3723
3622static void dsi_framedone_timeout_work_callback(struct work_struct *work) 3724static void dsi_framedone_timeout_work_callback(struct work_struct *work)
3623{ 3725{
3726 struct dsi_data *dsi = container_of(work, struct dsi_data,
3727 framedone_timeout_work.work);
3624 /* XXX While extremely unlikely, we could get FRAMEDONE interrupt after 3728 /* XXX While extremely unlikely, we could get FRAMEDONE interrupt after
3625 * 250ms which would conflict with this timeout work. What should be 3729 * 250ms which would conflict with this timeout work. What should be
3626 * done is first cancel the transfer on the HW, and then cancel the 3730 * done is first cancel the transfer on the HW, and then cancel the
@@ -3630,19 +3734,21 @@ static void dsi_framedone_timeout_work_callback(struct work_struct *work)
3630 3734
3631 DSSERR("Framedone not received for 250ms!\n"); 3735 DSSERR("Framedone not received for 250ms!\n");
3632 3736
3633 dsi_handle_framedone(dsi.pdev, -ETIMEDOUT); 3737 dsi_handle_framedone(dsi->pdev, -ETIMEDOUT);
3634} 3738}
3635 3739
3636static void dsi_framedone_irq_callback(void *data, u32 mask) 3740static void dsi_framedone_irq_callback(void *data, u32 mask)
3637{ 3741{
3638 struct omap_dss_device *dssdev = (struct omap_dss_device *) data; 3742 struct omap_dss_device *dssdev = (struct omap_dss_device *) data;
3639 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); 3743 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
3744 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
3745
3640 /* Note: We get FRAMEDONE when DISPC has finished sending pixels and 3746 /* Note: We get FRAMEDONE when DISPC has finished sending pixels and
3641 * turns itself off. However, DSI still has the pixels in its buffers, 3747 * turns itself off. However, DSI still has the pixels in its buffers,
3642 * and is sending the data. 3748 * and is sending the data.
3643 */ 3749 */
3644 3750
3645 __cancel_delayed_work(&dsi.framedone_timeout_work); 3751 __cancel_delayed_work(&dsi->framedone_timeout_work);
3646 3752
3647 dsi_handle_framedone(dsidev, 0); 3753 dsi_handle_framedone(dsidev, 0);
3648 3754
@@ -3693,8 +3799,9 @@ int omap_dsi_update(struct omap_dss_device *dssdev,
3693 void (*callback)(int, void *), void *data) 3799 void (*callback)(int, void *), void *data)
3694{ 3800{
3695 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); 3801 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
3802 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
3696 3803
3697 dsi.update_channel = channel; 3804 dsi->update_channel = channel;
3698 3805
3699 /* OMAP DSS cannot send updates of odd widths. 3806 /* OMAP DSS cannot send updates of odd widths.
3700 * omap_dsi_prepare_update() makes the widths even, but add a BUG_ON 3807 * omap_dsi_prepare_update() makes the widths even, but add a BUG_ON
@@ -3703,14 +3810,14 @@ int omap_dsi_update(struct omap_dss_device *dssdev,
3703 BUG_ON(x % 2 == 1); 3810 BUG_ON(x % 2 == 1);
3704 3811
3705 if (dssdev->manager->caps & OMAP_DSS_OVL_MGR_CAP_DISPC) { 3812 if (dssdev->manager->caps & OMAP_DSS_OVL_MGR_CAP_DISPC) {
3706 dsi.framedone_callback = callback; 3813 dsi->framedone_callback = callback;
3707 dsi.framedone_data = data; 3814 dsi->framedone_data = data;
3708 3815
3709 dsi.update_region.x = x; 3816 dsi->update_region.x = x;
3710 dsi.update_region.y = y; 3817 dsi->update_region.y = y;
3711 dsi.update_region.w = w; 3818 dsi->update_region.w = w;
3712 dsi.update_region.h = h; 3819 dsi->update_region.h = h;
3713 dsi.update_region.device = dssdev; 3820 dsi->update_region.device = dssdev;
3714 3821
3715 dsi_update_screen_dispc(dssdev, x, y, w, h); 3822 dsi_update_screen_dispc(dssdev, x, y, w, h);
3716 } else { 3823 } else {
@@ -3890,8 +3997,9 @@ static void dsi_display_uninit_dsi(struct omap_dss_device *dssdev,
3890 bool disconnect_lanes, bool enter_ulps) 3997 bool disconnect_lanes, bool enter_ulps)
3891{ 3998{
3892 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); 3999 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
4000 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
3893 4001
3894 if (enter_ulps && !dsi.ulps_enabled) 4002 if (enter_ulps && !dsi->ulps_enabled)
3895 dsi_enter_ulps(dsidev); 4003 dsi_enter_ulps(dsidev);
3896 4004
3897 /* disable interface */ 4005 /* disable interface */
@@ -3926,13 +4034,14 @@ static int dsi_core_init(struct platform_device *dsidev)
3926int omapdss_dsi_display_enable(struct omap_dss_device *dssdev) 4034int omapdss_dsi_display_enable(struct omap_dss_device *dssdev)
3927{ 4035{
3928 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); 4036 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
4037 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
3929 int r = 0; 4038 int r = 0;
3930 4039
3931 DSSDBG("dsi_display_enable\n"); 4040 DSSDBG("dsi_display_enable\n");
3932 4041
3933 WARN_ON(!dsi_bus_is_locked(dsidev)); 4042 WARN_ON(!dsi_bus_is_locked(dsidev));
3934 4043
3935 mutex_lock(&dsi.lock); 4044 mutex_lock(&dsi->lock);
3936 4045
3937 r = omap_dss_start_device(dssdev); 4046 r = omap_dss_start_device(dssdev);
3938 if (r) { 4047 if (r) {
@@ -3957,7 +4066,7 @@ int omapdss_dsi_display_enable(struct omap_dss_device *dssdev)
3957 if (r) 4066 if (r)
3958 goto err2; 4067 goto err2;
3959 4068
3960 mutex_unlock(&dsi.lock); 4069 mutex_unlock(&dsi->lock);
3961 4070
3962 return 0; 4071 return 0;
3963 4072
@@ -3968,7 +4077,7 @@ err1:
3968 dsi_enable_pll_clock(dsidev, 0); 4077 dsi_enable_pll_clock(dsidev, 0);
3969 omap_dss_stop_device(dssdev); 4078 omap_dss_stop_device(dssdev);
3970err0: 4079err0:
3971 mutex_unlock(&dsi.lock); 4080 mutex_unlock(&dsi->lock);
3972 DSSDBG("dsi_display_enable FAILED\n"); 4081 DSSDBG("dsi_display_enable FAILED\n");
3973 return r; 4082 return r;
3974} 4083}
@@ -3978,12 +4087,13 @@ void omapdss_dsi_display_disable(struct omap_dss_device *dssdev,
3978 bool disconnect_lanes, bool enter_ulps) 4087 bool disconnect_lanes, bool enter_ulps)
3979{ 4088{
3980 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); 4089 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
4090 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
3981 4091
3982 DSSDBG("dsi_display_disable\n"); 4092 DSSDBG("dsi_display_disable\n");
3983 4093
3984 WARN_ON(!dsi_bus_is_locked(dsidev)); 4094 WARN_ON(!dsi_bus_is_locked(dsidev));
3985 4095
3986 mutex_lock(&dsi.lock); 4096 mutex_lock(&dsi->lock);
3987 4097
3988 dsi_display_uninit_dispc(dssdev); 4098 dsi_display_uninit_dispc(dssdev);
3989 4099
@@ -3994,13 +4104,16 @@ void omapdss_dsi_display_disable(struct omap_dss_device *dssdev,
3994 4104
3995 omap_dss_stop_device(dssdev); 4105 omap_dss_stop_device(dssdev);
3996 4106
3997 mutex_unlock(&dsi.lock); 4107 mutex_unlock(&dsi->lock);
3998} 4108}
3999EXPORT_SYMBOL(omapdss_dsi_display_disable); 4109EXPORT_SYMBOL(omapdss_dsi_display_disable);
4000 4110
4001int omapdss_dsi_enable_te(struct omap_dss_device *dssdev, bool enable) 4111int omapdss_dsi_enable_te(struct omap_dss_device *dssdev, bool enable)
4002{ 4112{
4003 dsi.te_enabled = enable; 4113 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
4114 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4115
4116 dsi->te_enabled = enable;
4004 return 0; 4117 return 0;
4005} 4118}
4006EXPORT_SYMBOL(omapdss_dsi_enable_te); 4119EXPORT_SYMBOL(omapdss_dsi_enable_te);
@@ -4020,23 +4133,26 @@ void dsi_get_overlay_fifo_thresholds(enum omap_plane plane,
4020 4133
4021int dsi_init_display(struct omap_dss_device *dssdev) 4134int dsi_init_display(struct omap_dss_device *dssdev)
4022{ 4135{
4136 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
4137 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4138
4023 DSSDBG("DSI init\n"); 4139 DSSDBG("DSI init\n");
4024 4140
4025 /* XXX these should be figured out dynamically */ 4141 /* XXX these should be figured out dynamically */
4026 dssdev->caps = OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE | 4142 dssdev->caps = OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE |
4027 OMAP_DSS_DISPLAY_CAP_TEAR_ELIM; 4143 OMAP_DSS_DISPLAY_CAP_TEAR_ELIM;
4028 4144
4029 if (dsi.vdds_dsi_reg == NULL) { 4145 if (dsi->vdds_dsi_reg == NULL) {
4030 struct regulator *vdds_dsi; 4146 struct regulator *vdds_dsi;
4031 4147
4032 vdds_dsi = regulator_get(&dsi.pdev->dev, "vdds_dsi"); 4148 vdds_dsi = regulator_get(&dsi->pdev->dev, "vdds_dsi");
4033 4149
4034 if (IS_ERR(vdds_dsi)) { 4150 if (IS_ERR(vdds_dsi)) {
4035 DSSERR("can't get VDDS_DSI regulator\n"); 4151 DSSERR("can't get VDDS_DSI regulator\n");
4036 return PTR_ERR(vdds_dsi); 4152 return PTR_ERR(vdds_dsi);
4037 } 4153 }
4038 4154
4039 dsi.vdds_dsi_reg = vdds_dsi; 4155 dsi->vdds_dsi_reg = vdds_dsi;
4040 } 4156 }
4041 4157
4042 return 0; 4158 return 0;
@@ -4044,11 +4160,13 @@ int dsi_init_display(struct omap_dss_device *dssdev)
4044 4160
4045int omap_dsi_request_vc(struct omap_dss_device *dssdev, int *channel) 4161int omap_dsi_request_vc(struct omap_dss_device *dssdev, int *channel)
4046{ 4162{
4163 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
4164 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4047 int i; 4165 int i;
4048 4166
4049 for (i = 0; i < ARRAY_SIZE(dsi.vc); i++) { 4167 for (i = 0; i < ARRAY_SIZE(dsi->vc); i++) {
4050 if (!dsi.vc[i].dssdev) { 4168 if (!dsi->vc[i].dssdev) {
4051 dsi.vc[i].dssdev = dssdev; 4169 dsi->vc[i].dssdev = dssdev;
4052 *channel = i; 4170 *channel = i;
4053 return 0; 4171 return 0;
4054 } 4172 }
@@ -4061,6 +4179,9 @@ EXPORT_SYMBOL(omap_dsi_request_vc);
4061 4179
4062int omap_dsi_set_vc_id(struct omap_dss_device *dssdev, int channel, int vc_id) 4180int omap_dsi_set_vc_id(struct omap_dss_device *dssdev, int channel, int vc_id)
4063{ 4181{
4182 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
4183 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4184
4064 if (vc_id < 0 || vc_id > 3) { 4185 if (vc_id < 0 || vc_id > 3) {
4065 DSSERR("VC ID out of range\n"); 4186 DSSERR("VC ID out of range\n");
4066 return -EINVAL; 4187 return -EINVAL;
@@ -4071,13 +4192,13 @@ int omap_dsi_set_vc_id(struct omap_dss_device *dssdev, int channel, int vc_id)
4071 return -EINVAL; 4192 return -EINVAL;
4072 } 4193 }
4073 4194
4074 if (dsi.vc[channel].dssdev != dssdev) { 4195 if (dsi->vc[channel].dssdev != dssdev) {
4075 DSSERR("Virtual Channel not allocated to display %s\n", 4196 DSSERR("Virtual Channel not allocated to display %s\n",
4076 dssdev->name); 4197 dssdev->name);
4077 return -EINVAL; 4198 return -EINVAL;
4078 } 4199 }
4079 4200
4080 dsi.vc[channel].vc_id = vc_id; 4201 dsi->vc[channel].vc_id = vc_id;
4081 4202
4082 return 0; 4203 return 0;
4083} 4204}
@@ -4085,10 +4206,13 @@ EXPORT_SYMBOL(omap_dsi_set_vc_id);
4085 4206
4086void omap_dsi_release_vc(struct omap_dss_device *dssdev, int channel) 4207void omap_dsi_release_vc(struct omap_dss_device *dssdev, int channel)
4087{ 4208{
4209 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
4210 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4211
4088 if ((channel >= 0 && channel <= 3) && 4212 if ((channel >= 0 && channel <= 3) &&
4089 dsi.vc[channel].dssdev == dssdev) { 4213 dsi->vc[channel].dssdev == dssdev) {
4090 dsi.vc[channel].dssdev = NULL; 4214 dsi->vc[channel].dssdev = NULL;
4091 dsi.vc[channel].vc_id = 0; 4215 dsi->vc[channel].vc_id = 0;
4092 } 4216 }
4093} 4217}
4094EXPORT_SYMBOL(omap_dsi_release_vc); 4218EXPORT_SYMBOL(omap_dsi_release_vc);
@@ -4111,13 +4235,16 @@ void dsi_wait_pll_hsdiv_dsi_active(struct platform_device *dsidev)
4111 4235
4112static void dsi_calc_clock_param_ranges(struct platform_device *dsidev) 4236static void dsi_calc_clock_param_ranges(struct platform_device *dsidev)
4113{ 4237{
4114 dsi.regn_max = dss_feat_get_param_max(FEAT_PARAM_DSIPLL_REGN); 4238 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4115 dsi.regm_max = dss_feat_get_param_max(FEAT_PARAM_DSIPLL_REGM); 4239
4116 dsi.regm_dispc_max = dss_feat_get_param_max(FEAT_PARAM_DSIPLL_REGM_DISPC); 4240 dsi->regn_max = dss_feat_get_param_max(FEAT_PARAM_DSIPLL_REGN);
4117 dsi.regm_dsi_max = dss_feat_get_param_max(FEAT_PARAM_DSIPLL_REGM_DSI); 4241 dsi->regm_max = dss_feat_get_param_max(FEAT_PARAM_DSIPLL_REGM);
4118 dsi.fint_min = dss_feat_get_param_min(FEAT_PARAM_DSIPLL_FINT); 4242 dsi->regm_dispc_max =
4119 dsi.fint_max = dss_feat_get_param_max(FEAT_PARAM_DSIPLL_FINT); 4243 dss_feat_get_param_max(FEAT_PARAM_DSIPLL_REGM_DISPC);
4120 dsi.lpdiv_max = dss_feat_get_param_max(FEAT_PARAM_DSIPLL_LPDIV); 4244 dsi->regm_dsi_max = dss_feat_get_param_max(FEAT_PARAM_DSIPLL_REGM_DSI);
4245 dsi->fint_min = dss_feat_get_param_min(FEAT_PARAM_DSIPLL_FINT);
4246 dsi->fint_max = dss_feat_get_param_max(FEAT_PARAM_DSIPLL_FINT);
4247 dsi->lpdiv_max = dss_feat_get_param_max(FEAT_PARAM_DSIPLL_LPDIV);
4121} 4248}
4122 4249
4123static int dsi_init(struct platform_device *dsidev) 4250static int dsi_init(struct platform_device *dsidev)
@@ -4125,70 +4252,81 @@ static int dsi_init(struct platform_device *dsidev)
4125 struct omap_display_platform_data *dss_plat_data; 4252 struct omap_display_platform_data *dss_plat_data;
4126 struct omap_dss_board_info *board_info; 4253 struct omap_dss_board_info *board_info;
4127 u32 rev; 4254 u32 rev;
4128 int r, i; 4255 int r, i, dsi_module = dsi_get_dsidev_id(dsidev);
4129 struct resource *dsi_mem; 4256 struct resource *dsi_mem;
4257 struct dsi_data *dsi;
4258
4259 dsi = kzalloc(sizeof(*dsi), GFP_KERNEL);
4260 if (!dsi) {
4261 r = -ENOMEM;
4262 goto err0;
4263 }
4130 4264
4131 dsi_pdev_map[dsidev->id] = dsidev; 4265 dsi->pdev = dsidev;
4266 dsi_pdev_map[dsi_module] = dsidev;
4267 dev_set_drvdata(&dsidev->dev, dsi);
4132 4268
4133 dss_plat_data = dsidev->dev.platform_data; 4269 dss_plat_data = dsidev->dev.platform_data;
4134 board_info = dss_plat_data->board_data; 4270 board_info = dss_plat_data->board_data;
4135 dsi.dsi_mux_pads = board_info->dsi_mux_pads; 4271 dsi->dsi_mux_pads = board_info->dsi_mux_pads;
4136 4272
4137 spin_lock_init(&dsi.irq_lock); 4273 spin_lock_init(&dsi->irq_lock);
4138 spin_lock_init(&dsi.errors_lock); 4274 spin_lock_init(&dsi->errors_lock);
4139 dsi.errors = 0; 4275 dsi->errors = 0;
4140 4276
4141#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS 4277#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
4142 spin_lock_init(&dsi.irq_stats_lock); 4278 spin_lock_init(&dsi->irq_stats_lock);
4143 dsi.irq_stats.last_reset = jiffies; 4279 dsi->irq_stats.last_reset = jiffies;
4144#endif 4280#endif
4145 4281
4146 mutex_init(&dsi.lock); 4282 mutex_init(&dsi->lock);
4147 sema_init(&dsi.bus_lock, 1); 4283 sema_init(&dsi->bus_lock, 1);
4148 4284
4149 dsi.workqueue = create_singlethread_workqueue("dsi"); 4285 dsi->workqueue = create_singlethread_workqueue(dev_name(&dsidev->dev));
4150 if (dsi.workqueue == NULL) 4286 if (dsi->workqueue == NULL) {
4151 return -ENOMEM; 4287 r = -ENOMEM;
4288 goto err1;
4289 }
4152 4290
4153 INIT_DELAYED_WORK_DEFERRABLE(&dsi.framedone_timeout_work, 4291 INIT_DELAYED_WORK_DEFERRABLE(&dsi->framedone_timeout_work,
4154 dsi_framedone_timeout_work_callback); 4292 dsi_framedone_timeout_work_callback);
4155 4293
4156#ifdef DSI_CATCH_MISSING_TE 4294#ifdef DSI_CATCH_MISSING_TE
4157 init_timer(&dsi.te_timer); 4295 init_timer(&dsi->te_timer);
4158 dsi.te_timer.function = dsi_te_timeout; 4296 dsi->te_timer.function = dsi_te_timeout;
4159 dsi.te_timer.data = 0; 4297 dsi->te_timer.data = 0;
4160#endif 4298#endif
4161 dsi_mem = platform_get_resource(dsi.pdev, IORESOURCE_MEM, 0); 4299 dsi_mem = platform_get_resource(dsi->pdev, IORESOURCE_MEM, 0);
4162 if (!dsi_mem) { 4300 if (!dsi_mem) {
4163 DSSERR("can't get IORESOURCE_MEM DSI\n"); 4301 DSSERR("can't get IORESOURCE_MEM DSI\n");
4164 r = -EINVAL; 4302 r = -EINVAL;
4165 goto err1; 4303 goto err2;
4166 } 4304 }
4167 dsi.base = ioremap(dsi_mem->start, resource_size(dsi_mem)); 4305 dsi->base = ioremap(dsi_mem->start, resource_size(dsi_mem));
4168 if (!dsi.base) { 4306 if (!dsi->base) {
4169 DSSERR("can't ioremap DSI\n"); 4307 DSSERR("can't ioremap DSI\n");
4170 r = -ENOMEM; 4308 r = -ENOMEM;
4171 goto err1; 4309 goto err2;
4172 } 4310 }
4173 dsi.irq = platform_get_irq(dsi.pdev, 0); 4311 dsi->irq = platform_get_irq(dsi->pdev, 0);
4174 if (dsi.irq < 0) { 4312 if (dsi->irq < 0) {
4175 DSSERR("platform_get_irq failed\n"); 4313 DSSERR("platform_get_irq failed\n");
4176 r = -ENODEV; 4314 r = -ENODEV;
4177 goto err2; 4315 goto err3;
4178 } 4316 }
4179 4317
4180 r = request_irq(dsi.irq, omap_dsi_irq_handler, IRQF_SHARED, 4318 r = request_irq(dsi->irq, omap_dsi_irq_handler, IRQF_SHARED,
4181 "OMAP DSI1", dsi.pdev); 4319 dev_name(&dsidev->dev), dsi->pdev);
4182 if (r < 0) { 4320 if (r < 0) {
4183 DSSERR("request_irq failed\n"); 4321 DSSERR("request_irq failed\n");
4184 goto err2; 4322 goto err3;
4185 } 4323 }
4186 4324
4187 /* DSI VCs initialization */ 4325 /* DSI VCs initialization */
4188 for (i = 0; i < ARRAY_SIZE(dsi.vc); i++) { 4326 for (i = 0; i < ARRAY_SIZE(dsi->vc); i++) {
4189 dsi.vc[i].mode = DSI_VC_MODE_L4; 4327 dsi->vc[i].mode = DSI_VC_MODE_L4;
4190 dsi.vc[i].dssdev = NULL; 4328 dsi->vc[i].dssdev = NULL;
4191 dsi.vc[i].vc_id = 0; 4329 dsi->vc[i].vc_id = 0;
4192 } 4330 }
4193 4331
4194 dsi_calc_clock_param_ranges(dsidev); 4332 dsi_calc_clock_param_ranges(dsidev);
@@ -4202,29 +4340,35 @@ static int dsi_init(struct platform_device *dsidev)
4202 enable_clocks(0); 4340 enable_clocks(0);
4203 4341
4204 return 0; 4342 return 0;
4343err3:
4344 iounmap(dsi->base);
4205err2: 4345err2:
4206 iounmap(dsi.base); 4346 destroy_workqueue(dsi->workqueue);
4207err1: 4347err1:
4208 destroy_workqueue(dsi.workqueue); 4348 kfree(dsi);
4349err0:
4209 return r; 4350 return r;
4210} 4351}
4211 4352
4212static void dsi_exit(struct platform_device *dsidev) 4353static void dsi_exit(struct platform_device *dsidev)
4213{ 4354{
4214 if (dsi.vdds_dsi_reg != NULL) { 4355 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4215 if (dsi.vdds_dsi_enabled) { 4356
4216 regulator_disable(dsi.vdds_dsi_reg); 4357 if (dsi->vdds_dsi_reg != NULL) {
4217 dsi.vdds_dsi_enabled = false; 4358 if (dsi->vdds_dsi_enabled) {
4359 regulator_disable(dsi->vdds_dsi_reg);
4360 dsi->vdds_dsi_enabled = false;
4218 } 4361 }
4219 4362
4220 regulator_put(dsi.vdds_dsi_reg); 4363 regulator_put(dsi->vdds_dsi_reg);
4221 dsi.vdds_dsi_reg = NULL; 4364 dsi->vdds_dsi_reg = NULL;
4222 } 4365 }
4223 4366
4224 free_irq(dsi.irq, dsi.pdev); 4367 free_irq(dsi->irq, dsi->pdev);
4225 iounmap(dsi.base); 4368 iounmap(dsi->base);
4226 4369
4227 destroy_workqueue(dsi.workqueue); 4370 destroy_workqueue(dsi->workqueue);
4371 kfree(dsi);
4228 4372
4229 DSSDBG("omap_dsi_exit\n"); 4373 DSSDBG("omap_dsi_exit\n");
4230} 4374}
@@ -4233,7 +4377,7 @@ static void dsi_exit(struct platform_device *dsidev)
4233static int omap_dsi1hw_probe(struct platform_device *dsidev) 4377static int omap_dsi1hw_probe(struct platform_device *dsidev)
4234{ 4378{
4235 int r; 4379 int r;
4236 dsi.pdev = dsidev; 4380
4237 r = dsi_init(dsidev); 4381 r = dsi_init(dsidev);
4238 if (r) { 4382 if (r) {
4239 DSSERR("Failed to initialize DSI\n"); 4383 DSSERR("Failed to initialize DSI\n");
@@ -4245,8 +4389,10 @@ err_dsi:
4245 4389
4246static int omap_dsi1hw_remove(struct platform_device *dsidev) 4390static int omap_dsi1hw_remove(struct platform_device *dsidev)
4247{ 4391{
4392 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4393
4248 dsi_exit(dsidev); 4394 dsi_exit(dsidev);
4249 WARN_ON(dsi.scp_clk_refcount > 0); 4395 WARN_ON(dsi->scp_clk_refcount > 0);
4250 return 0; 4396 return 0;
4251} 4397}
4252 4398