diff options
author | Mike Turquette <mturquette@ti.com> | 2012-11-09 13:28:42 -0500 |
---|---|---|
committer | Paul Walmsley <paul@pwsan.com> | 2012-11-12 21:10:19 -0500 |
commit | d043d87cd33ef0a6bec707077ef88f4c020db4c8 (patch) | |
tree | 826aa2c9038d5e8bfb83ccf2ed9b2fc547f076ab | |
parent | b797be1d4c079e78a3cb4e95f4a74274a4aef9f5 (diff) |
ARM: OMAP2+: clockdomain: bypass clockdomain handling when disabling unused clks
The OMAP port to the common clk framework[1] resulted in spurious WARNs
while disable unused clocks. This is due to _clkdm_clk_hwmod_disable
catching clkdm->usecount's with a value of zero. Even less desirable it
would not allow the clkdm_clk_disable function pointer to get called due
to an early return of -ERANGE.
This patch adds a check for such a corner case by skipping the WARN and
early return in the event that clkdm->usecount and clk->enable_usecount
are both zero. Presumably this could only happen during the check for
unused clocks at boot-time.
[1] http://article.gmane.org/gmane.linux.ports.arm.omap/88824
Signed-off-by: Mike Turquette <mturquette@ti.com>
[paul@pwsan.com: split the hwmod and clock disable cases; modified the
code to skip the clockdomain handling during the disable-unused-clocks phase;
added COMMON_CLK ifdef; removed include of clk-private.h at Mike's request]
Signed-off-by: Paul Walmsley <paul@pwsan.com>
-rw-r--r-- | arch/arm/mach-omap2/clockdomain.c | 93 |
1 files changed, 56 insertions, 37 deletions
diff --git a/arch/arm/mach-omap2/clockdomain.c b/arch/arm/mach-omap2/clockdomain.c index 64e50465a4b5..be6fe356ddba 100644 --- a/arch/arm/mach-omap2/clockdomain.c +++ b/arch/arm/mach-omap2/clockdomain.c | |||
@@ -22,6 +22,7 @@ | |||
22 | #include <linux/clk.h> | 22 | #include <linux/clk.h> |
23 | #include <linux/limits.h> | 23 | #include <linux/limits.h> |
24 | #include <linux/err.h> | 24 | #include <linux/err.h> |
25 | #include <linux/clk-provider.h> | ||
25 | 26 | ||
26 | #include <linux/io.h> | 27 | #include <linux/io.h> |
27 | 28 | ||
@@ -947,35 +948,6 @@ static int _clkdm_clk_hwmod_enable(struct clockdomain *clkdm) | |||
947 | return 0; | 948 | return 0; |
948 | } | 949 | } |
949 | 950 | ||
950 | static int _clkdm_clk_hwmod_disable(struct clockdomain *clkdm) | ||
951 | { | ||
952 | unsigned long flags; | ||
953 | |||
954 | if (!clkdm || !arch_clkdm || !arch_clkdm->clkdm_clk_disable) | ||
955 | return -EINVAL; | ||
956 | |||
957 | spin_lock_irqsave(&clkdm->lock, flags); | ||
958 | |||
959 | if (atomic_read(&clkdm->usecount) == 0) { | ||
960 | spin_unlock_irqrestore(&clkdm->lock, flags); | ||
961 | WARN_ON(1); /* underflow */ | ||
962 | return -ERANGE; | ||
963 | } | ||
964 | |||
965 | if (atomic_dec_return(&clkdm->usecount) > 0) { | ||
966 | spin_unlock_irqrestore(&clkdm->lock, flags); | ||
967 | return 0; | ||
968 | } | ||
969 | |||
970 | arch_clkdm->clkdm_clk_disable(clkdm); | ||
971 | pwrdm_state_switch(clkdm->pwrdm.ptr); | ||
972 | spin_unlock_irqrestore(&clkdm->lock, flags); | ||
973 | |||
974 | pr_debug("clockdomain: %s: disabled\n", clkdm->name); | ||
975 | |||
976 | return 0; | ||
977 | } | ||
978 | |||
979 | /** | 951 | /** |
980 | * clkdm_clk_enable - add an enabled downstream clock to this clkdm | 952 | * clkdm_clk_enable - add an enabled downstream clock to this clkdm |
981 | * @clkdm: struct clockdomain * | 953 | * @clkdm: struct clockdomain * |
@@ -1018,15 +990,41 @@ int clkdm_clk_enable(struct clockdomain *clkdm, struct clk *clk) | |||
1018 | */ | 990 | */ |
1019 | int clkdm_clk_disable(struct clockdomain *clkdm, struct clk *clk) | 991 | int clkdm_clk_disable(struct clockdomain *clkdm, struct clk *clk) |
1020 | { | 992 | { |
1021 | /* | 993 | unsigned long flags; |
1022 | * XXX Rewrite this code to maintain a list of enabled | ||
1023 | * downstream clocks for debugging purposes? | ||
1024 | */ | ||
1025 | 994 | ||
1026 | if (!clk) | 995 | if (!clkdm || !clk || !arch_clkdm || !arch_clkdm->clkdm_clk_disable) |
1027 | return -EINVAL; | 996 | return -EINVAL; |
1028 | 997 | ||
1029 | return _clkdm_clk_hwmod_disable(clkdm); | 998 | spin_lock_irqsave(&clkdm->lock, flags); |
999 | |||
1000 | #ifdef CONFIG_COMMON_CLK | ||
1001 | /* corner case: disabling unused clocks */ | ||
1002 | if (__clk_get_enable_count(clk) == 0) | ||
1003 | goto ccd_exit; | ||
1004 | #endif | ||
1005 | |||
1006 | if (atomic_read(&clkdm->usecount) == 0) { | ||
1007 | spin_unlock_irqrestore(&clkdm->lock, flags); | ||
1008 | WARN_ON(1); /* underflow */ | ||
1009 | return -ERANGE; | ||
1010 | } | ||
1011 | |||
1012 | if (atomic_dec_return(&clkdm->usecount) > 0) { | ||
1013 | spin_unlock_irqrestore(&clkdm->lock, flags); | ||
1014 | return 0; | ||
1015 | } | ||
1016 | |||
1017 | arch_clkdm->clkdm_clk_disable(clkdm); | ||
1018 | pwrdm_state_switch(clkdm->pwrdm.ptr); | ||
1019 | |||
1020 | pr_debug("clockdomain: %s: disabled\n", clkdm->name); | ||
1021 | |||
1022 | #ifdef CONFIG_COMMON_CLK | ||
1023 | ccd_exit: | ||
1024 | #endif | ||
1025 | spin_unlock_irqrestore(&clkdm->lock, flags); | ||
1026 | |||
1027 | return 0; | ||
1030 | } | 1028 | } |
1031 | 1029 | ||
1032 | /** | 1030 | /** |
@@ -1077,6 +1075,8 @@ int clkdm_hwmod_enable(struct clockdomain *clkdm, struct omap_hwmod *oh) | |||
1077 | */ | 1075 | */ |
1078 | int clkdm_hwmod_disable(struct clockdomain *clkdm, struct omap_hwmod *oh) | 1076 | int clkdm_hwmod_disable(struct clockdomain *clkdm, struct omap_hwmod *oh) |
1079 | { | 1077 | { |
1078 | unsigned long flags; | ||
1079 | |||
1080 | /* The clkdm attribute does not exist yet prior OMAP4 */ | 1080 | /* The clkdm attribute does not exist yet prior OMAP4 */ |
1081 | if (cpu_is_omap24xx() || cpu_is_omap34xx()) | 1081 | if (cpu_is_omap24xx() || cpu_is_omap34xx()) |
1082 | return 0; | 1082 | return 0; |
@@ -1086,9 +1086,28 @@ int clkdm_hwmod_disable(struct clockdomain *clkdm, struct omap_hwmod *oh) | |||
1086 | * downstream hwmods for debugging purposes? | 1086 | * downstream hwmods for debugging purposes? |
1087 | */ | 1087 | */ |
1088 | 1088 | ||
1089 | if (!oh) | 1089 | if (!clkdm || !oh || !arch_clkdm || !arch_clkdm->clkdm_clk_disable) |
1090 | return -EINVAL; | 1090 | return -EINVAL; |
1091 | 1091 | ||
1092 | return _clkdm_clk_hwmod_disable(clkdm); | 1092 | spin_lock_irqsave(&clkdm->lock, flags); |
1093 | |||
1094 | if (atomic_read(&clkdm->usecount) == 0) { | ||
1095 | spin_unlock_irqrestore(&clkdm->lock, flags); | ||
1096 | WARN_ON(1); /* underflow */ | ||
1097 | return -ERANGE; | ||
1098 | } | ||
1099 | |||
1100 | if (atomic_dec_return(&clkdm->usecount) > 0) { | ||
1101 | spin_unlock_irqrestore(&clkdm->lock, flags); | ||
1102 | return 0; | ||
1103 | } | ||
1104 | |||
1105 | arch_clkdm->clkdm_clk_disable(clkdm); | ||
1106 | pwrdm_state_switch(clkdm->pwrdm.ptr); | ||
1107 | spin_unlock_irqrestore(&clkdm->lock, flags); | ||
1108 | |||
1109 | pr_debug("clockdomain: %s: disabled\n", clkdm->name); | ||
1110 | |||
1111 | return 0; | ||
1093 | } | 1112 | } |
1094 | 1113 | ||