diff options
Diffstat (limited to 'arch/arm/mach-omap2/clockdomain.c')
-rw-r--r-- | arch/arm/mach-omap2/clockdomain.c | 93 |
1 files changed, 55 insertions, 38 deletions
diff --git a/arch/arm/mach-omap2/clockdomain.c b/arch/arm/mach-omap2/clockdomain.c index 512e79a842cb..7faf82d4e85c 100644 --- a/arch/arm/mach-omap2/clockdomain.c +++ b/arch/arm/mach-omap2/clockdomain.c | |||
@@ -22,12 +22,14 @@ | |||
22 | #include <linux/clk.h> | 22 | #include <linux/clk.h> |
23 | #include <linux/limits.h> | 23 | #include <linux/limits.h> |
24 | #include <linux/err.h> | 24 | #include <linux/err.h> |
25 | #include <linux/clk-provider.h> | ||
25 | 26 | ||
26 | #include <linux/io.h> | 27 | #include <linux/io.h> |
27 | 28 | ||
28 | #include <linux/bitops.h> | 29 | #include <linux/bitops.h> |
29 | 30 | ||
30 | #include <plat/clock.h> | 31 | #include "soc.h" |
32 | #include "clock.h" | ||
31 | #include "clockdomain.h" | 33 | #include "clockdomain.h" |
32 | 34 | ||
33 | /* clkdm_list contains all registered struct clockdomains */ | 35 | /* clkdm_list contains all registered struct clockdomains */ |
@@ -946,35 +948,6 @@ static int _clkdm_clk_hwmod_enable(struct clockdomain *clkdm) | |||
946 | return 0; | 948 | return 0; |
947 | } | 949 | } |
948 | 950 | ||
949 | static int _clkdm_clk_hwmod_disable(struct clockdomain *clkdm) | ||
950 | { | ||
951 | unsigned long flags; | ||
952 | |||
953 | if (!clkdm || !arch_clkdm || !arch_clkdm->clkdm_clk_disable) | ||
954 | return -EINVAL; | ||
955 | |||
956 | spin_lock_irqsave(&clkdm->lock, flags); | ||
957 | |||
958 | if (atomic_read(&clkdm->usecount) == 0) { | ||
959 | spin_unlock_irqrestore(&clkdm->lock, flags); | ||
960 | WARN_ON(1); /* underflow */ | ||
961 | return -ERANGE; | ||
962 | } | ||
963 | |||
964 | if (atomic_dec_return(&clkdm->usecount) > 0) { | ||
965 | spin_unlock_irqrestore(&clkdm->lock, flags); | ||
966 | return 0; | ||
967 | } | ||
968 | |||
969 | arch_clkdm->clkdm_clk_disable(clkdm); | ||
970 | pwrdm_state_switch(clkdm->pwrdm.ptr); | ||
971 | spin_unlock_irqrestore(&clkdm->lock, flags); | ||
972 | |||
973 | pr_debug("clockdomain: %s: disabled\n", clkdm->name); | ||
974 | |||
975 | return 0; | ||
976 | } | ||
977 | |||
978 | /** | 951 | /** |
979 | * clkdm_clk_enable - add an enabled downstream clock to this clkdm | 952 | * clkdm_clk_enable - add an enabled downstream clock to this clkdm |
980 | * @clkdm: struct clockdomain * | 953 | * @clkdm: struct clockdomain * |
@@ -1017,15 +990,38 @@ int clkdm_clk_enable(struct clockdomain *clkdm, struct clk *clk) | |||
1017 | */ | 990 | */ |
1018 | int clkdm_clk_disable(struct clockdomain *clkdm, struct clk *clk) | 991 | int clkdm_clk_disable(struct clockdomain *clkdm, struct clk *clk) |
1019 | { | 992 | { |
1020 | /* | 993 | unsigned long flags; |
1021 | * XXX Rewrite this code to maintain a list of enabled | ||
1022 | * downstream clocks for debugging purposes? | ||
1023 | */ | ||
1024 | 994 | ||
1025 | if (!clk) | 995 | if (!clkdm || !clk || !arch_clkdm || !arch_clkdm->clkdm_clk_disable) |
1026 | return -EINVAL; | 996 | return -EINVAL; |
1027 | 997 | ||
1028 | return _clkdm_clk_hwmod_disable(clkdm); | 998 | spin_lock_irqsave(&clkdm->lock, flags); |
999 | |||
1000 | /* corner case: disabling unused clocks */ | ||
1001 | if ((__clk_get_enable_count(clk) == 0) && | ||
1002 | (atomic_read(&clkdm->usecount) == 0)) | ||
1003 | goto ccd_exit; | ||
1004 | |||
1005 | if (atomic_read(&clkdm->usecount) == 0) { | ||
1006 | spin_unlock_irqrestore(&clkdm->lock, flags); | ||
1007 | WARN_ON(1); /* underflow */ | ||
1008 | return -ERANGE; | ||
1009 | } | ||
1010 | |||
1011 | if (atomic_dec_return(&clkdm->usecount) > 0) { | ||
1012 | spin_unlock_irqrestore(&clkdm->lock, flags); | ||
1013 | return 0; | ||
1014 | } | ||
1015 | |||
1016 | arch_clkdm->clkdm_clk_disable(clkdm); | ||
1017 | pwrdm_state_switch(clkdm->pwrdm.ptr); | ||
1018 | |||
1019 | pr_debug("clockdomain: %s: disabled\n", clkdm->name); | ||
1020 | |||
1021 | ccd_exit: | ||
1022 | spin_unlock_irqrestore(&clkdm->lock, flags); | ||
1023 | |||
1024 | return 0; | ||
1029 | } | 1025 | } |
1030 | 1026 | ||
1031 | /** | 1027 | /** |
@@ -1076,6 +1072,8 @@ int clkdm_hwmod_enable(struct clockdomain *clkdm, struct omap_hwmod *oh) | |||
1076 | */ | 1072 | */ |
1077 | int clkdm_hwmod_disable(struct clockdomain *clkdm, struct omap_hwmod *oh) | 1073 | int clkdm_hwmod_disable(struct clockdomain *clkdm, struct omap_hwmod *oh) |
1078 | { | 1074 | { |
1075 | unsigned long flags; | ||
1076 | |||
1079 | /* The clkdm attribute does not exist yet prior OMAP4 */ | 1077 | /* The clkdm attribute does not exist yet prior OMAP4 */ |
1080 | if (cpu_is_omap24xx() || cpu_is_omap34xx()) | 1078 | if (cpu_is_omap24xx() || cpu_is_omap34xx()) |
1081 | return 0; | 1079 | return 0; |
@@ -1085,9 +1083,28 @@ int clkdm_hwmod_disable(struct clockdomain *clkdm, struct omap_hwmod *oh) | |||
1085 | * downstream hwmods for debugging purposes? | 1083 | * downstream hwmods for debugging purposes? |
1086 | */ | 1084 | */ |
1087 | 1085 | ||
1088 | if (!oh) | 1086 | if (!clkdm || !oh || !arch_clkdm || !arch_clkdm->clkdm_clk_disable) |
1089 | return -EINVAL; | 1087 | return -EINVAL; |
1090 | 1088 | ||
1091 | return _clkdm_clk_hwmod_disable(clkdm); | 1089 | spin_lock_irqsave(&clkdm->lock, flags); |
1090 | |||
1091 | if (atomic_read(&clkdm->usecount) == 0) { | ||
1092 | spin_unlock_irqrestore(&clkdm->lock, flags); | ||
1093 | WARN_ON(1); /* underflow */ | ||
1094 | return -ERANGE; | ||
1095 | } | ||
1096 | |||
1097 | if (atomic_dec_return(&clkdm->usecount) > 0) { | ||
1098 | spin_unlock_irqrestore(&clkdm->lock, flags); | ||
1099 | return 0; | ||
1100 | } | ||
1101 | |||
1102 | arch_clkdm->clkdm_clk_disable(clkdm); | ||
1103 | pwrdm_state_switch(clkdm->pwrdm.ptr); | ||
1104 | spin_unlock_irqrestore(&clkdm->lock, flags); | ||
1105 | |||
1106 | pr_debug("clockdomain: %s: disabled\n", clkdm->name); | ||
1107 | |||
1108 | return 0; | ||
1092 | } | 1109 | } |
1093 | 1110 | ||