aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mach-omap2/clockdomain.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mach-omap2/clockdomain.c')
-rw-r--r--arch/arm/mach-omap2/clockdomain.c212
1 files changed, 171 insertions, 41 deletions
diff --git a/arch/arm/mach-omap2/clockdomain.c b/arch/arm/mach-omap2/clockdomain.c
index 6cb6c03293d..8f0890685d7 100644
--- a/arch/arm/mach-omap2/clockdomain.c
+++ b/arch/arm/mach-omap2/clockdomain.c
@@ -1,8 +1,8 @@
1/* 1/*
2 * OMAP2/3/4 clockdomain framework functions 2 * OMAP2/3/4 clockdomain framework functions
3 * 3 *
4 * Copyright (C) 2008-2010 Texas Instruments, Inc. 4 * Copyright (C) 2008-2011 Texas Instruments, Inc.
5 * Copyright (C) 2008-2010 Nokia Corporation 5 * Copyright (C) 2008-2011 Nokia Corporation
6 * 6 *
7 * Written by Paul Walmsley and Jouni Högander 7 * Written by Paul Walmsley and Jouni Högander
8 * Added OMAP4 specific support by Abhijit Pagare <abhijitpagare@ti.com> 8 * Added OMAP4 specific support by Abhijit Pagare <abhijitpagare@ti.com>
@@ -92,6 +92,8 @@ static int _clkdm_register(struct clockdomain *clkdm)
92 92
93 pwrdm_add_clkdm(pwrdm, clkdm); 93 pwrdm_add_clkdm(pwrdm, clkdm);
94 94
95 spin_lock_init(&clkdm->lock);
96
95 pr_debug("clockdomain: registered %s\n", clkdm->name); 97 pr_debug("clockdomain: registered %s\n", clkdm->name);
96 98
97 return 0; 99 return 0;
@@ -690,6 +692,9 @@ int clkdm_clear_all_sleepdeps(struct clockdomain *clkdm)
690 */ 692 */
691int clkdm_sleep(struct clockdomain *clkdm) 693int clkdm_sleep(struct clockdomain *clkdm)
692{ 694{
695 int ret;
696 unsigned long flags;
697
693 if (!clkdm) 698 if (!clkdm)
694 return -EINVAL; 699 return -EINVAL;
695 700
@@ -704,7 +709,11 @@ int clkdm_sleep(struct clockdomain *clkdm)
704 709
705 pr_debug("clockdomain: forcing sleep on %s\n", clkdm->name); 710 pr_debug("clockdomain: forcing sleep on %s\n", clkdm->name);
706 711
707 return arch_clkdm->clkdm_sleep(clkdm); 712 spin_lock_irqsave(&clkdm->lock, flags);
713 clkdm->_flags &= ~_CLKDM_FLAG_HWSUP_ENABLED;
714 ret = arch_clkdm->clkdm_sleep(clkdm);
715 spin_unlock_irqrestore(&clkdm->lock, flags);
716 return ret;
708} 717}
709 718
710/** 719/**
@@ -718,6 +727,9 @@ int clkdm_sleep(struct clockdomain *clkdm)
718 */ 727 */
719int clkdm_wakeup(struct clockdomain *clkdm) 728int clkdm_wakeup(struct clockdomain *clkdm)
720{ 729{
730 int ret;
731 unsigned long flags;
732
721 if (!clkdm) 733 if (!clkdm)
722 return -EINVAL; 734 return -EINVAL;
723 735
@@ -732,7 +744,12 @@ int clkdm_wakeup(struct clockdomain *clkdm)
732 744
733 pr_debug("clockdomain: forcing wakeup on %s\n", clkdm->name); 745 pr_debug("clockdomain: forcing wakeup on %s\n", clkdm->name);
734 746
735 return arch_clkdm->clkdm_wakeup(clkdm); 747 spin_lock_irqsave(&clkdm->lock, flags);
748 clkdm->_flags &= ~_CLKDM_FLAG_HWSUP_ENABLED;
749 ret = arch_clkdm->clkdm_wakeup(clkdm);
750 ret |= pwrdm_state_switch(clkdm->pwrdm.ptr);
751 spin_unlock_irqrestore(&clkdm->lock, flags);
752 return ret;
736} 753}
737 754
738/** 755/**
@@ -747,6 +764,8 @@ int clkdm_wakeup(struct clockdomain *clkdm)
747 */ 764 */
748void clkdm_allow_idle(struct clockdomain *clkdm) 765void clkdm_allow_idle(struct clockdomain *clkdm)
749{ 766{
767 unsigned long flags;
768
750 if (!clkdm) 769 if (!clkdm)
751 return; 770 return;
752 771
@@ -762,8 +781,11 @@ void clkdm_allow_idle(struct clockdomain *clkdm)
762 pr_debug("clockdomain: enabling automatic idle transitions for %s\n", 781 pr_debug("clockdomain: enabling automatic idle transitions for %s\n",
763 clkdm->name); 782 clkdm->name);
764 783
784 spin_lock_irqsave(&clkdm->lock, flags);
785 clkdm->_flags |= _CLKDM_FLAG_HWSUP_ENABLED;
765 arch_clkdm->clkdm_allow_idle(clkdm); 786 arch_clkdm->clkdm_allow_idle(clkdm);
766 pwrdm_clkdm_state_switch(clkdm); 787 pwrdm_clkdm_state_switch(clkdm);
788 spin_unlock_irqrestore(&clkdm->lock, flags);
767} 789}
768 790
769/** 791/**
@@ -777,6 +799,8 @@ void clkdm_allow_idle(struct clockdomain *clkdm)
777 */ 799 */
778void clkdm_deny_idle(struct clockdomain *clkdm) 800void clkdm_deny_idle(struct clockdomain *clkdm)
779{ 801{
802 unsigned long flags;
803
780 if (!clkdm) 804 if (!clkdm)
781 return; 805 return;
782 806
@@ -792,11 +816,91 @@ void clkdm_deny_idle(struct clockdomain *clkdm)
792 pr_debug("clockdomain: disabling automatic idle transitions for %s\n", 816 pr_debug("clockdomain: disabling automatic idle transitions for %s\n",
793 clkdm->name); 817 clkdm->name);
794 818
819 spin_lock_irqsave(&clkdm->lock, flags);
820 clkdm->_flags &= ~_CLKDM_FLAG_HWSUP_ENABLED;
795 arch_clkdm->clkdm_deny_idle(clkdm); 821 arch_clkdm->clkdm_deny_idle(clkdm);
822 pwrdm_state_switch(clkdm->pwrdm.ptr);
823 spin_unlock_irqrestore(&clkdm->lock, flags);
824}
825
826/**
827 * clkdm_in_hwsup - is clockdomain @clkdm have hardware-supervised idle enabled?
828 * @clkdm: struct clockdomain *
829 *
830 * Returns true if clockdomain @clkdm currently has
831 * hardware-supervised idle enabled, or false if it does not or if
832 * @clkdm is NULL. It is only valid to call this function after
833 * clkdm_init() has been called. This function does not actually read
834 * bits from the hardware; it instead tests an in-memory flag that is
835 * changed whenever the clockdomain code changes the auto-idle mode.
836 */
837bool clkdm_in_hwsup(struct clockdomain *clkdm)
838{
839 bool ret;
840 unsigned long flags;
841
842 if (!clkdm)
843 return false;
844
845 spin_lock_irqsave(&clkdm->lock, flags);
846 ret = (clkdm->_flags & _CLKDM_FLAG_HWSUP_ENABLED) ? true : false;
847 spin_unlock_irqrestore(&clkdm->lock, flags);
848
849 return ret;
850}
851
852/* Clockdomain-to-clock/hwmod framework interface code */
853
854static int _clkdm_clk_hwmod_enable(struct clockdomain *clkdm)
855{
856 unsigned long flags;
857
858 if (!clkdm || !arch_clkdm || !arch_clkdm->clkdm_clk_enable)
859 return -EINVAL;
860
861 /*
862 * For arch's with no autodeps, clkcm_clk_enable
863 * should be called for every clock instance or hwmod that is
864 * enabled, so the clkdm can be force woken up.
865 */
866 if ((atomic_inc_return(&clkdm->usecount) > 1) && autodeps)
867 return 0;
868
869 spin_lock_irqsave(&clkdm->lock, flags);
870 arch_clkdm->clkdm_clk_enable(clkdm);
871 pwrdm_wait_transition(clkdm->pwrdm.ptr);
872 pwrdm_clkdm_state_switch(clkdm);
873 spin_unlock_irqrestore(&clkdm->lock, flags);
874
875 pr_debug("clockdomain: clkdm %s: enabled\n", clkdm->name);
876
877 return 0;
796} 878}
797 879
880static int _clkdm_clk_hwmod_disable(struct clockdomain *clkdm)
881{
882 unsigned long flags;
883
884 if (!clkdm || !arch_clkdm || !arch_clkdm->clkdm_clk_disable)
885 return -EINVAL;
886
887 if (atomic_read(&clkdm->usecount) == 0) {
888 WARN_ON(1); /* underflow */
889 return -ERANGE;
890 }
891
892 if (atomic_dec_return(&clkdm->usecount) > 0)
893 return 0;
894
895 spin_lock_irqsave(&clkdm->lock, flags);
896 arch_clkdm->clkdm_clk_disable(clkdm);
897 pwrdm_clkdm_state_switch(clkdm);
898 spin_unlock_irqrestore(&clkdm->lock, flags);
798 899
799/* Clockdomain-to-clock framework interface code */ 900 pr_debug("clockdomain: clkdm %s: disabled\n", clkdm->name);
901
902 return 0;
903}
800 904
801/** 905/**
802 * clkdm_clk_enable - add an enabled downstream clock to this clkdm 906 * clkdm_clk_enable - add an enabled downstream clock to this clkdm
@@ -819,25 +923,10 @@ int clkdm_clk_enable(struct clockdomain *clkdm, struct clk *clk)
819 * downstream clocks for debugging purposes? 923 * downstream clocks for debugging purposes?
820 */ 924 */
821 925
822 if (!clkdm || !clk) 926 if (!clk)
823 return -EINVAL; 927 return -EINVAL;
824 928
825 if (!arch_clkdm || !arch_clkdm->clkdm_clk_enable) 929 return _clkdm_clk_hwmod_enable(clkdm);
826 return -EINVAL;
827
828 if (atomic_inc_return(&clkdm->usecount) > 1)
829 return 0;
830
831 /* Clockdomain now has one enabled downstream clock */
832
833 pr_debug("clockdomain: clkdm %s: clk %s now enabled\n", clkdm->name,
834 clk->name);
835
836 arch_clkdm->clkdm_clk_enable(clkdm);
837 pwrdm_wait_transition(clkdm->pwrdm.ptr);
838 pwrdm_clkdm_state_switch(clkdm);
839
840 return 0;
841} 930}
842 931
843/** 932/**
@@ -850,9 +939,8 @@ int clkdm_clk_enable(struct clockdomain *clkdm, struct clk *clk)
850 * clockdomain usecount goes to 0, put the clockdomain to sleep 939 * clockdomain usecount goes to 0, put the clockdomain to sleep
851 * (software-supervised mode) or remove the clkdm autodependencies 940 * (software-supervised mode) or remove the clkdm autodependencies
852 * (hardware-supervised mode). Returns -EINVAL if passed null 941 * (hardware-supervised mode). Returns -EINVAL if passed null
853 * pointers; -ERANGE if the @clkdm usecount underflows and debugging 942 * pointers; -ERANGE if the @clkdm usecount underflows; or returns 0
854 * is enabled; or returns 0 upon success or if the clockdomain is in 943 * upon success or if the clockdomain is in hwsup idle mode.
855 * hwsup idle mode.
856 */ 944 */
857int clkdm_clk_disable(struct clockdomain *clkdm, struct clk *clk) 945int clkdm_clk_disable(struct clockdomain *clkdm, struct clk *clk)
858{ 946{
@@ -861,30 +949,72 @@ int clkdm_clk_disable(struct clockdomain *clkdm, struct clk *clk)
861 * downstream clocks for debugging purposes? 949 * downstream clocks for debugging purposes?
862 */ 950 */
863 951
864 if (!clkdm || !clk) 952 if (!clk)
865 return -EINVAL; 953 return -EINVAL;
866 954
867 if (!arch_clkdm || !arch_clkdm->clkdm_clk_disable) 955 return _clkdm_clk_hwmod_disable(clkdm);
956}
957
958/**
959 * clkdm_hwmod_enable - add an enabled downstream hwmod to this clkdm
960 * @clkdm: struct clockdomain *
961 * @oh: struct omap_hwmod * of the enabled downstream hwmod
962 *
963 * Increment the usecount of the clockdomain @clkdm and ensure that it
964 * is awake before @oh is enabled. Intended to be called by
965 * module_enable() code.
966 * If the clockdomain is in software-supervised idle mode, force the
967 * clockdomain to wake. If the clockdomain is in hardware-supervised idle
968 * mode, add clkdm-pwrdm autodependencies, to ensure that devices in the
969 * clockdomain can be read from/written to by on-chip processors.
970 * Returns -EINVAL if passed null pointers;
971 * returns 0 upon success or if the clockdomain is in hwsup idle mode.
972 */
973int clkdm_hwmod_enable(struct clockdomain *clkdm, struct omap_hwmod *oh)
974{
975 /* The clkdm attribute does not exist yet prior OMAP4 */
976 if (cpu_is_omap24xx() || cpu_is_omap34xx())
977 return 0;
978
979 /*
980 * XXX Rewrite this code to maintain a list of enabled
981 * downstream hwmods for debugging purposes?
982 */
983
984 if (!oh)
868 return -EINVAL; 985 return -EINVAL;
869 986
870#ifdef DEBUG 987 return _clkdm_clk_hwmod_enable(clkdm);
871 if (atomic_read(&clkdm->usecount) == 0) { 988}
872 WARN_ON(1); /* underflow */
873 return -ERANGE;
874 }
875#endif
876 989
877 if (atomic_dec_return(&clkdm->usecount) > 0) 990/**
991 * clkdm_hwmod_disable - remove an enabled downstream hwmod from this clkdm
992 * @clkdm: struct clockdomain *
993 * @oh: struct omap_hwmod * of the disabled downstream hwmod
994 *
995 * Decrement the usecount of this clockdomain @clkdm when @oh is
996 * disabled. Intended to be called by module_disable() code.
997 * If the clockdomain usecount goes to 0, put the clockdomain to sleep
998 * (software-supervised mode) or remove the clkdm autodependencies
999 * (hardware-supervised mode).
1000 * Returns -EINVAL if passed null pointers; -ERANGE if the @clkdm usecount
1001 * underflows; or returns 0 upon success or if the clockdomain is in hwsup
1002 * idle mode.
1003 */
1004int clkdm_hwmod_disable(struct clockdomain *clkdm, struct omap_hwmod *oh)
1005{
1006 /* The clkdm attribute does not exist yet prior OMAP4 */
1007 if (cpu_is_omap24xx() || cpu_is_omap34xx())
878 return 0; 1008 return 0;
879 1009
880 /* All downstream clocks of this clockdomain are now disabled */ 1010 /*
881 1011 * XXX Rewrite this code to maintain a list of enabled
882 pr_debug("clockdomain: clkdm %s: clk %s now disabled\n", clkdm->name, 1012 * downstream hwmods for debugging purposes?
883 clk->name); 1013 */
884 1014
885 arch_clkdm->clkdm_clk_disable(clkdm); 1015 if (!oh)
886 pwrdm_clkdm_state_switch(clkdm); 1016 return -EINVAL;
887 1017
888 return 0; 1018 return _clkdm_clk_hwmod_disable(clkdm);
889} 1019}
890 1020