aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mach-omap2/clockdomain.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mach-omap2/clockdomain.c')
-rw-r--r--arch/arm/mach-omap2/clockdomain.c210
1 files changed, 169 insertions, 41 deletions
diff --git a/arch/arm/mach-omap2/clockdomain.c b/arch/arm/mach-omap2/clockdomain.c
index 6cb6c03293df..ab7db083f97f 100644
--- a/arch/arm/mach-omap2/clockdomain.c
+++ b/arch/arm/mach-omap2/clockdomain.c
@@ -1,8 +1,8 @@
1/* 1/*
2 * OMAP2/3/4 clockdomain framework functions 2 * OMAP2/3/4 clockdomain framework functions
3 * 3 *
4 * Copyright (C) 2008-2010 Texas Instruments, Inc. 4 * Copyright (C) 2008-2011 Texas Instruments, Inc.
5 * Copyright (C) 2008-2010 Nokia Corporation 5 * Copyright (C) 2008-2011 Nokia Corporation
6 * 6 *
7 * Written by Paul Walmsley and Jouni Högander 7 * Written by Paul Walmsley and Jouni Högander
8 * Added OMAP4 specific support by Abhijit Pagare <abhijitpagare@ti.com> 8 * Added OMAP4 specific support by Abhijit Pagare <abhijitpagare@ti.com>
@@ -92,6 +92,8 @@ static int _clkdm_register(struct clockdomain *clkdm)
92 92
93 pwrdm_add_clkdm(pwrdm, clkdm); 93 pwrdm_add_clkdm(pwrdm, clkdm);
94 94
95 spin_lock_init(&clkdm->lock);
96
95 pr_debug("clockdomain: registered %s\n", clkdm->name); 97 pr_debug("clockdomain: registered %s\n", clkdm->name);
96 98
97 return 0; 99 return 0;
@@ -690,6 +692,9 @@ int clkdm_clear_all_sleepdeps(struct clockdomain *clkdm)
690 */ 692 */
691int clkdm_sleep(struct clockdomain *clkdm) 693int clkdm_sleep(struct clockdomain *clkdm)
692{ 694{
695 int ret;
696 unsigned long flags;
697
693 if (!clkdm) 698 if (!clkdm)
694 return -EINVAL; 699 return -EINVAL;
695 700
@@ -704,7 +709,11 @@ int clkdm_sleep(struct clockdomain *clkdm)
704 709
705 pr_debug("clockdomain: forcing sleep on %s\n", clkdm->name); 710 pr_debug("clockdomain: forcing sleep on %s\n", clkdm->name);
706 711
707 return arch_clkdm->clkdm_sleep(clkdm); 712 spin_lock_irqsave(&clkdm->lock, flags);
713 clkdm->_flags &= ~_CLKDM_FLAG_HWSUP_ENABLED;
714 ret = arch_clkdm->clkdm_sleep(clkdm);
715 spin_unlock_irqrestore(&clkdm->lock, flags);
716 return ret;
708} 717}
709 718
710/** 719/**
@@ -718,6 +727,9 @@ int clkdm_sleep(struct clockdomain *clkdm)
718 */ 727 */
719int clkdm_wakeup(struct clockdomain *clkdm) 728int clkdm_wakeup(struct clockdomain *clkdm)
720{ 729{
730 int ret;
731 unsigned long flags;
732
721 if (!clkdm) 733 if (!clkdm)
722 return -EINVAL; 734 return -EINVAL;
723 735
@@ -732,7 +744,11 @@ int clkdm_wakeup(struct clockdomain *clkdm)
732 744
733 pr_debug("clockdomain: forcing wakeup on %s\n", clkdm->name); 745 pr_debug("clockdomain: forcing wakeup on %s\n", clkdm->name);
734 746
735 return arch_clkdm->clkdm_wakeup(clkdm); 747 spin_lock_irqsave(&clkdm->lock, flags);
748 clkdm->_flags &= ~_CLKDM_FLAG_HWSUP_ENABLED;
749 ret = arch_clkdm->clkdm_wakeup(clkdm);
750 spin_unlock_irqrestore(&clkdm->lock, flags);
751 return ret;
736} 752}
737 753
738/** 754/**
@@ -747,6 +763,8 @@ int clkdm_wakeup(struct clockdomain *clkdm)
747 */ 763 */
748void clkdm_allow_idle(struct clockdomain *clkdm) 764void clkdm_allow_idle(struct clockdomain *clkdm)
749{ 765{
766 unsigned long flags;
767
750 if (!clkdm) 768 if (!clkdm)
751 return; 769 return;
752 770
@@ -762,8 +780,11 @@ void clkdm_allow_idle(struct clockdomain *clkdm)
762 pr_debug("clockdomain: enabling automatic idle transitions for %s\n", 780 pr_debug("clockdomain: enabling automatic idle transitions for %s\n",
763 clkdm->name); 781 clkdm->name);
764 782
783 spin_lock_irqsave(&clkdm->lock, flags);
784 clkdm->_flags |= _CLKDM_FLAG_HWSUP_ENABLED;
765 arch_clkdm->clkdm_allow_idle(clkdm); 785 arch_clkdm->clkdm_allow_idle(clkdm);
766 pwrdm_clkdm_state_switch(clkdm); 786 pwrdm_clkdm_state_switch(clkdm);
787 spin_unlock_irqrestore(&clkdm->lock, flags);
767} 788}
768 789
769/** 790/**
@@ -777,6 +798,8 @@ void clkdm_allow_idle(struct clockdomain *clkdm)
777 */ 798 */
778void clkdm_deny_idle(struct clockdomain *clkdm) 799void clkdm_deny_idle(struct clockdomain *clkdm)
779{ 800{
801 unsigned long flags;
802
780 if (!clkdm) 803 if (!clkdm)
781 return; 804 return;
782 805
@@ -792,11 +815,90 @@ void clkdm_deny_idle(struct clockdomain *clkdm)
792 pr_debug("clockdomain: disabling automatic idle transitions for %s\n", 815 pr_debug("clockdomain: disabling automatic idle transitions for %s\n",
793 clkdm->name); 816 clkdm->name);
794 817
818 spin_lock_irqsave(&clkdm->lock, flags);
819 clkdm->_flags &= ~_CLKDM_FLAG_HWSUP_ENABLED;
795 arch_clkdm->clkdm_deny_idle(clkdm); 820 arch_clkdm->clkdm_deny_idle(clkdm);
821 spin_unlock_irqrestore(&clkdm->lock, flags);
822}
823
824/**
825 * clkdm_in_hwsup - is clockdomain @clkdm have hardware-supervised idle enabled?
826 * @clkdm: struct clockdomain *
827 *
828 * Returns true if clockdomain @clkdm currently has
829 * hardware-supervised idle enabled, or false if it does not or if
830 * @clkdm is NULL. It is only valid to call this function after
831 * clkdm_init() has been called. This function does not actually read
832 * bits from the hardware; it instead tests an in-memory flag that is
833 * changed whenever the clockdomain code changes the auto-idle mode.
834 */
835bool clkdm_in_hwsup(struct clockdomain *clkdm)
836{
837 bool ret;
838 unsigned long flags;
839
840 if (!clkdm)
841 return false;
842
843 spin_lock_irqsave(&clkdm->lock, flags);
844 ret = (clkdm->_flags & _CLKDM_FLAG_HWSUP_ENABLED) ? true : false;
845 spin_unlock_irqrestore(&clkdm->lock, flags);
846
847 return ret;
848}
849
850/* Clockdomain-to-clock/hwmod framework interface code */
851
852static int _clkdm_clk_hwmod_enable(struct clockdomain *clkdm)
853{
854 unsigned long flags;
855
856 if (!clkdm || !arch_clkdm || !arch_clkdm->clkdm_clk_enable)
857 return -EINVAL;
858
859 /*
860 * For arch's with no autodeps, clkcm_clk_enable
861 * should be called for every clock instance or hwmod that is
862 * enabled, so the clkdm can be force woken up.
863 */
864 if ((atomic_inc_return(&clkdm->usecount) > 1) && autodeps)
865 return 0;
866
867 spin_lock_irqsave(&clkdm->lock, flags);
868 arch_clkdm->clkdm_clk_enable(clkdm);
869 pwrdm_wait_transition(clkdm->pwrdm.ptr);
870 pwrdm_clkdm_state_switch(clkdm);
871 spin_unlock_irqrestore(&clkdm->lock, flags);
872
873 pr_debug("clockdomain: clkdm %s: enabled\n", clkdm->name);
874
875 return 0;
796} 876}
797 877
878static int _clkdm_clk_hwmod_disable(struct clockdomain *clkdm)
879{
880 unsigned long flags;
881
882 if (!clkdm || !arch_clkdm || !arch_clkdm->clkdm_clk_disable)
883 return -EINVAL;
884
885 if (atomic_read(&clkdm->usecount) == 0) {
886 WARN_ON(1); /* underflow */
887 return -ERANGE;
888 }
889
890 if (atomic_dec_return(&clkdm->usecount) > 0)
891 return 0;
892
893 spin_lock_irqsave(&clkdm->lock, flags);
894 arch_clkdm->clkdm_clk_disable(clkdm);
895 pwrdm_clkdm_state_switch(clkdm);
896 spin_unlock_irqrestore(&clkdm->lock, flags);
798 897
799/* Clockdomain-to-clock framework interface code */ 898 pr_debug("clockdomain: clkdm %s: disabled\n", clkdm->name);
899
900 return 0;
901}
800 902
801/** 903/**
802 * clkdm_clk_enable - add an enabled downstream clock to this clkdm 904 * clkdm_clk_enable - add an enabled downstream clock to this clkdm
@@ -819,25 +921,10 @@ int clkdm_clk_enable(struct clockdomain *clkdm, struct clk *clk)
819 * downstream clocks for debugging purposes? 921 * downstream clocks for debugging purposes?
820 */ 922 */
821 923
822 if (!clkdm || !clk) 924 if (!clk)
823 return -EINVAL; 925 return -EINVAL;
824 926
825 if (!arch_clkdm || !arch_clkdm->clkdm_clk_enable) 927 return _clkdm_clk_hwmod_enable(clkdm);
826 return -EINVAL;
827
828 if (atomic_inc_return(&clkdm->usecount) > 1)
829 return 0;
830
831 /* Clockdomain now has one enabled downstream clock */
832
833 pr_debug("clockdomain: clkdm %s: clk %s now enabled\n", clkdm->name,
834 clk->name);
835
836 arch_clkdm->clkdm_clk_enable(clkdm);
837 pwrdm_wait_transition(clkdm->pwrdm.ptr);
838 pwrdm_clkdm_state_switch(clkdm);
839
840 return 0;
841} 928}
842 929
843/** 930/**
@@ -850,9 +937,8 @@ int clkdm_clk_enable(struct clockdomain *clkdm, struct clk *clk)
850 * clockdomain usecount goes to 0, put the clockdomain to sleep 937 * clockdomain usecount goes to 0, put the clockdomain to sleep
851 * (software-supervised mode) or remove the clkdm autodependencies 938 * (software-supervised mode) or remove the clkdm autodependencies
852 * (hardware-supervised mode). Returns -EINVAL if passed null 939 * (hardware-supervised mode). Returns -EINVAL if passed null
853 * pointers; -ERANGE if the @clkdm usecount underflows and debugging 940 * pointers; -ERANGE if the @clkdm usecount underflows; or returns 0
854 * is enabled; or returns 0 upon success or if the clockdomain is in 941 * upon success or if the clockdomain is in hwsup idle mode.
855 * hwsup idle mode.
856 */ 942 */
857int clkdm_clk_disable(struct clockdomain *clkdm, struct clk *clk) 943int clkdm_clk_disable(struct clockdomain *clkdm, struct clk *clk)
858{ 944{
@@ -861,30 +947,72 @@ int clkdm_clk_disable(struct clockdomain *clkdm, struct clk *clk)
861 * downstream clocks for debugging purposes? 947 * downstream clocks for debugging purposes?
862 */ 948 */
863 949
864 if (!clkdm || !clk) 950 if (!clk)
865 return -EINVAL; 951 return -EINVAL;
866 952
867 if (!arch_clkdm || !arch_clkdm->clkdm_clk_disable) 953 return _clkdm_clk_hwmod_disable(clkdm);
954}
955
956/**
957 * clkdm_hwmod_enable - add an enabled downstream hwmod to this clkdm
958 * @clkdm: struct clockdomain *
959 * @oh: struct omap_hwmod * of the enabled downstream hwmod
960 *
961 * Increment the usecount of the clockdomain @clkdm and ensure that it
962 * is awake before @oh is enabled. Intended to be called by
963 * module_enable() code.
964 * If the clockdomain is in software-supervised idle mode, force the
965 * clockdomain to wake. If the clockdomain is in hardware-supervised idle
966 * mode, add clkdm-pwrdm autodependencies, to ensure that devices in the
967 * clockdomain can be read from/written to by on-chip processors.
968 * Returns -EINVAL if passed null pointers;
969 * returns 0 upon success or if the clockdomain is in hwsup idle mode.
970 */
971int clkdm_hwmod_enable(struct clockdomain *clkdm, struct omap_hwmod *oh)
972{
973 /* The clkdm attribute does not exist yet prior OMAP4 */
974 if (cpu_is_omap24xx() || cpu_is_omap34xx())
975 return 0;
976
977 /*
978 * XXX Rewrite this code to maintain a list of enabled
979 * downstream hwmods for debugging purposes?
980 */
981
982 if (!oh)
868 return -EINVAL; 983 return -EINVAL;
869 984
870#ifdef DEBUG 985 return _clkdm_clk_hwmod_enable(clkdm);
871 if (atomic_read(&clkdm->usecount) == 0) { 986}
872 WARN_ON(1); /* underflow */
873 return -ERANGE;
874 }
875#endif
876 987
877 if (atomic_dec_return(&clkdm->usecount) > 0) 988/**
989 * clkdm_hwmod_disable - remove an enabled downstream hwmod from this clkdm
990 * @clkdm: struct clockdomain *
991 * @oh: struct omap_hwmod * of the disabled downstream hwmod
992 *
993 * Decrement the usecount of this clockdomain @clkdm when @oh is
994 * disabled. Intended to be called by module_disable() code.
995 * If the clockdomain usecount goes to 0, put the clockdomain to sleep
996 * (software-supervised mode) or remove the clkdm autodependencies
997 * (hardware-supervised mode).
998 * Returns -EINVAL if passed null pointers; -ERANGE if the @clkdm usecount
999 * underflows; or returns 0 upon success or if the clockdomain is in hwsup
1000 * idle mode.
1001 */
1002int clkdm_hwmod_disable(struct clockdomain *clkdm, struct omap_hwmod *oh)
1003{
1004 /* The clkdm attribute does not exist yet prior OMAP4 */
1005 if (cpu_is_omap24xx() || cpu_is_omap34xx())
878 return 0; 1006 return 0;
879 1007
880 /* All downstream clocks of this clockdomain are now disabled */ 1008 /*
881 1009 * XXX Rewrite this code to maintain a list of enabled
882 pr_debug("clockdomain: clkdm %s: clk %s now disabled\n", clkdm->name, 1010 * downstream hwmods for debugging purposes?
883 clk->name); 1011 */
884 1012
885 arch_clkdm->clkdm_clk_disable(clkdm); 1013 if (!oh)
886 pwrdm_clkdm_state_switch(clkdm); 1014 return -EINVAL;
887 1015
888 return 0; 1016 return _clkdm_clk_hwmod_disable(clkdm);
889} 1017}
890 1018