diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2013-09-09 18:49:04 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-09-09 18:49:04 -0400 |
commit | bef4a0ab984662d4ccd68d431a7c4ef3daebcb43 (patch) | |
tree | 3f1a2797dbf2fde9235c47e023be929e32fa9265 /drivers/clk/clk.c | |
parent | 7eb69529cbaf4229baf5559a400a7a46352c6e52 (diff) | |
parent | 12d298865ec5d0f14dd570c3506c270880769ed7 (diff) |
Merge tag 'clk-for-linus-3.12' of git://git.linaro.org/people/mturquette/linux
Pull clock framework changes from Michael Turquette:
"The common clk framework changes for 3.12 are dominated by clock
driver patches, both new drivers and fixes to existing. A high
percentage of these are for Samsung platforms like Exynos. Core
framework fixes and some new features like automagical clock
re-parenting round out the patches"
* tag 'clk-for-linus-3.12' of git://git.linaro.org/people/mturquette/linux: (102 commits)
clk: only call get_parent if there is one
clk: samsung: exynos5250: Simplify registration of PLL rate tables
clk: samsung: exynos4: Register PLL rate tables for Exynos4x12
clk: samsung: exynos4: Register PLL rate tables for Exynos4210
clk: samsung: exynos4: Reorder registration of mout_vpllsrc
clk: samsung: pll: Add support for rate configuration of PLL46xx
clk: samsung: pll: Use new registration method for PLL46xx
clk: samsung: pll: Add support for rate configuration of PLL45xx
clk: samsung: pll: Use new registration method for PLL45xx
clk: samsung: exynos4: Rename exynos4_plls to exynos4x12_plls
clk: samsung: exynos4: Remove checks for DT node
clk: samsung: exynos4: Remove unused static clkdev aliases
clk: samsung: Modify _get_rate() helper to use __clk_lookup()
clk: samsung: exynos4: Use separate aliases for cpufreq related clocks
clocksource: samsung_pwm_timer: Get clock from device tree
ARM: dts: exynos4: Specify PWM clocks in PWM node
pwm: samsung: Update DT bindings documentation to cover clocks
clk: Move symbol export to proper location
clk: fix new_parent dereference before null check
clk: wm831x: Initialise wm831x pointer on init
...
Diffstat (limited to 'drivers/clk/clk.c')
-rw-r--r-- | drivers/clk/clk.c | 450 |
1 files changed, 282 insertions, 168 deletions
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index 54a191c5bbf0..a004769528e6 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c | |||
@@ -458,7 +458,6 @@ static void clk_unprepare_unused_subtree(struct clk *clk) | |||
458 | clk->ops->unprepare(clk->hw); | 458 | clk->ops->unprepare(clk->hw); |
459 | } | 459 | } |
460 | } | 460 | } |
461 | EXPORT_SYMBOL_GPL(__clk_get_flags); | ||
462 | 461 | ||
463 | /* caller must hold prepare_lock */ | 462 | /* caller must hold prepare_lock */ |
464 | static void clk_disable_unused_subtree(struct clk *clk) | 463 | static void clk_disable_unused_subtree(struct clk *clk) |
@@ -559,6 +558,19 @@ struct clk *__clk_get_parent(struct clk *clk) | |||
559 | return !clk ? NULL : clk->parent; | 558 | return !clk ? NULL : clk->parent; |
560 | } | 559 | } |
561 | 560 | ||
561 | struct clk *clk_get_parent_by_index(struct clk *clk, u8 index) | ||
562 | { | ||
563 | if (!clk || index >= clk->num_parents) | ||
564 | return NULL; | ||
565 | else if (!clk->parents) | ||
566 | return __clk_lookup(clk->parent_names[index]); | ||
567 | else if (!clk->parents[index]) | ||
568 | return clk->parents[index] = | ||
569 | __clk_lookup(clk->parent_names[index]); | ||
570 | else | ||
571 | return clk->parents[index]; | ||
572 | } | ||
573 | |||
562 | unsigned int __clk_get_enable_count(struct clk *clk) | 574 | unsigned int __clk_get_enable_count(struct clk *clk) |
563 | { | 575 | { |
564 | return !clk ? 0 : clk->enable_count; | 576 | return !clk ? 0 : clk->enable_count; |
@@ -594,6 +606,7 @@ unsigned long __clk_get_flags(struct clk *clk) | |||
594 | { | 606 | { |
595 | return !clk ? 0 : clk->flags; | 607 | return !clk ? 0 : clk->flags; |
596 | } | 608 | } |
609 | EXPORT_SYMBOL_GPL(__clk_get_flags); | ||
597 | 610 | ||
598 | bool __clk_is_prepared(struct clk *clk) | 611 | bool __clk_is_prepared(struct clk *clk) |
599 | { | 612 | { |
@@ -679,6 +692,55 @@ struct clk *__clk_lookup(const char *name) | |||
679 | return NULL; | 692 | return NULL; |
680 | } | 693 | } |
681 | 694 | ||
695 | /* | ||
696 | * Helper for finding best parent to provide a given frequency. This can be used | ||
697 | * directly as a determine_rate callback (e.g. for a mux), or from a more | ||
698 | * complex clock that may combine a mux with other operations. | ||
699 | */ | ||
700 | long __clk_mux_determine_rate(struct clk_hw *hw, unsigned long rate, | ||
701 | unsigned long *best_parent_rate, | ||
702 | struct clk **best_parent_p) | ||
703 | { | ||
704 | struct clk *clk = hw->clk, *parent, *best_parent = NULL; | ||
705 | int i, num_parents; | ||
706 | unsigned long parent_rate, best = 0; | ||
707 | |||
708 | /* if NO_REPARENT flag set, pass through to current parent */ | ||
709 | if (clk->flags & CLK_SET_RATE_NO_REPARENT) { | ||
710 | parent = clk->parent; | ||
711 | if (clk->flags & CLK_SET_RATE_PARENT) | ||
712 | best = __clk_round_rate(parent, rate); | ||
713 | else if (parent) | ||
714 | best = __clk_get_rate(parent); | ||
715 | else | ||
716 | best = __clk_get_rate(clk); | ||
717 | goto out; | ||
718 | } | ||
719 | |||
720 | /* find the parent that can provide the fastest rate <= rate */ | ||
721 | num_parents = clk->num_parents; | ||
722 | for (i = 0; i < num_parents; i++) { | ||
723 | parent = clk_get_parent_by_index(clk, i); | ||
724 | if (!parent) | ||
725 | continue; | ||
726 | if (clk->flags & CLK_SET_RATE_PARENT) | ||
727 | parent_rate = __clk_round_rate(parent, rate); | ||
728 | else | ||
729 | parent_rate = __clk_get_rate(parent); | ||
730 | if (parent_rate <= rate && parent_rate > best) { | ||
731 | best_parent = parent; | ||
732 | best = parent_rate; | ||
733 | } | ||
734 | } | ||
735 | |||
736 | out: | ||
737 | if (best_parent) | ||
738 | *best_parent_p = best_parent; | ||
739 | *best_parent_rate = best; | ||
740 | |||
741 | return best; | ||
742 | } | ||
743 | |||
682 | /*** clk api ***/ | 744 | /*** clk api ***/ |
683 | 745 | ||
684 | void __clk_unprepare(struct clk *clk) | 746 | void __clk_unprepare(struct clk *clk) |
@@ -702,7 +764,7 @@ void __clk_unprepare(struct clk *clk) | |||
702 | 764 | ||
703 | /** | 765 | /** |
704 | * clk_unprepare - undo preparation of a clock source | 766 | * clk_unprepare - undo preparation of a clock source |
705 | * @clk: the clk being unprepare | 767 | * @clk: the clk being unprepared |
706 | * | 768 | * |
707 | * clk_unprepare may sleep, which differentiates it from clk_disable. In a | 769 | * clk_unprepare may sleep, which differentiates it from clk_disable. In a |
708 | * simple case, clk_unprepare can be used instead of clk_disable to gate a clk | 770 | * simple case, clk_unprepare can be used instead of clk_disable to gate a clk |
@@ -869,27 +931,31 @@ EXPORT_SYMBOL_GPL(clk_enable); | |||
869 | /** | 931 | /** |
870 | * __clk_round_rate - round the given rate for a clk | 932 | * __clk_round_rate - round the given rate for a clk |
871 | * @clk: round the rate of this clock | 933 | * @clk: round the rate of this clock |
934 | * @rate: the rate which is to be rounded | ||
872 | * | 935 | * |
873 | * Caller must hold prepare_lock. Useful for clk_ops such as .set_rate | 936 | * Caller must hold prepare_lock. Useful for clk_ops such as .set_rate |
874 | */ | 937 | */ |
875 | unsigned long __clk_round_rate(struct clk *clk, unsigned long rate) | 938 | unsigned long __clk_round_rate(struct clk *clk, unsigned long rate) |
876 | { | 939 | { |
877 | unsigned long parent_rate = 0; | 940 | unsigned long parent_rate = 0; |
941 | struct clk *parent; | ||
878 | 942 | ||
879 | if (!clk) | 943 | if (!clk) |
880 | return 0; | 944 | return 0; |
881 | 945 | ||
882 | if (!clk->ops->round_rate) { | 946 | parent = clk->parent; |
883 | if (clk->flags & CLK_SET_RATE_PARENT) | 947 | if (parent) |
884 | return __clk_round_rate(clk->parent, rate); | 948 | parent_rate = parent->rate; |
885 | else | 949 | |
886 | return clk->rate; | 950 | if (clk->ops->determine_rate) |
887 | } | 951 | return clk->ops->determine_rate(clk->hw, rate, &parent_rate, |
888 | 952 | &parent); | |
889 | if (clk->parent) | 953 | else if (clk->ops->round_rate) |
890 | parent_rate = clk->parent->rate; | 954 | return clk->ops->round_rate(clk->hw, rate, &parent_rate); |
891 | 955 | else if (clk->flags & CLK_SET_RATE_PARENT) | |
892 | return clk->ops->round_rate(clk->hw, rate, &parent_rate); | 956 | return __clk_round_rate(clk->parent, rate); |
957 | else | ||
958 | return clk->rate; | ||
893 | } | 959 | } |
894 | 960 | ||
895 | /** | 961 | /** |
@@ -956,7 +1022,7 @@ static int __clk_notify(struct clk *clk, unsigned long msg, | |||
956 | * | 1022 | * |
957 | * Walks the subtree of clks starting with clk and recalculates rates as it | 1023 | * Walks the subtree of clks starting with clk and recalculates rates as it |
958 | * goes. Note that if a clk does not implement the .recalc_rate callback then | 1024 | * goes. Note that if a clk does not implement the .recalc_rate callback then |
959 | * it is assumed that the clock will take on the rate of it's parent. | 1025 | * it is assumed that the clock will take on the rate of its parent. |
960 | * | 1026 | * |
961 | * clk_recalc_rates also propagates the POST_RATE_CHANGE notification, | 1027 | * clk_recalc_rates also propagates the POST_RATE_CHANGE notification, |
962 | * if necessary. | 1028 | * if necessary. |
@@ -1014,6 +1080,115 @@ unsigned long clk_get_rate(struct clk *clk) | |||
1014 | } | 1080 | } |
1015 | EXPORT_SYMBOL_GPL(clk_get_rate); | 1081 | EXPORT_SYMBOL_GPL(clk_get_rate); |
1016 | 1082 | ||
1083 | static u8 clk_fetch_parent_index(struct clk *clk, struct clk *parent) | ||
1084 | { | ||
1085 | u8 i; | ||
1086 | |||
1087 | if (!clk->parents) | ||
1088 | clk->parents = kzalloc((sizeof(struct clk*) * clk->num_parents), | ||
1089 | GFP_KERNEL); | ||
1090 | |||
1091 | /* | ||
1092 | * find index of new parent clock using cached parent ptrs, | ||
1093 | * or if not yet cached, use string name comparison and cache | ||
1094 | * them now to avoid future calls to __clk_lookup. | ||
1095 | */ | ||
1096 | for (i = 0; i < clk->num_parents; i++) { | ||
1097 | if (clk->parents && clk->parents[i] == parent) | ||
1098 | break; | ||
1099 | else if (!strcmp(clk->parent_names[i], parent->name)) { | ||
1100 | if (clk->parents) | ||
1101 | clk->parents[i] = __clk_lookup(parent->name); | ||
1102 | break; | ||
1103 | } | ||
1104 | } | ||
1105 | |||
1106 | return i; | ||
1107 | } | ||
1108 | |||
1109 | static void clk_reparent(struct clk *clk, struct clk *new_parent) | ||
1110 | { | ||
1111 | hlist_del(&clk->child_node); | ||
1112 | |||
1113 | if (new_parent) { | ||
1114 | /* avoid duplicate POST_RATE_CHANGE notifications */ | ||
1115 | if (new_parent->new_child == clk) | ||
1116 | new_parent->new_child = NULL; | ||
1117 | |||
1118 | hlist_add_head(&clk->child_node, &new_parent->children); | ||
1119 | } else { | ||
1120 | hlist_add_head(&clk->child_node, &clk_orphan_list); | ||
1121 | } | ||
1122 | |||
1123 | clk->parent = new_parent; | ||
1124 | } | ||
1125 | |||
1126 | static int __clk_set_parent(struct clk *clk, struct clk *parent, u8 p_index) | ||
1127 | { | ||
1128 | unsigned long flags; | ||
1129 | int ret = 0; | ||
1130 | struct clk *old_parent = clk->parent; | ||
1131 | |||
1132 | /* | ||
1133 | * Migrate prepare state between parents and prevent race with | ||
1134 | * clk_enable(). | ||
1135 | * | ||
1136 | * If the clock is not prepared, then a race with | ||
1137 | * clk_enable/disable() is impossible since we already have the | ||
1138 | * prepare lock (future calls to clk_enable() need to be preceded by | ||
1139 | * a clk_prepare()). | ||
1140 | * | ||
1141 | * If the clock is prepared, migrate the prepared state to the new | ||
1142 | * parent and also protect against a race with clk_enable() by | ||
1143 | * forcing the clock and the new parent on. This ensures that all | ||
1144 | * future calls to clk_enable() are practically NOPs with respect to | ||
1145 | * hardware and software states. | ||
1146 | * | ||
1147 | * See also: Comment for clk_set_parent() below. | ||
1148 | */ | ||
1149 | if (clk->prepare_count) { | ||
1150 | __clk_prepare(parent); | ||
1151 | clk_enable(parent); | ||
1152 | clk_enable(clk); | ||
1153 | } | ||
1154 | |||
1155 | /* update the clk tree topology */ | ||
1156 | flags = clk_enable_lock(); | ||
1157 | clk_reparent(clk, parent); | ||
1158 | clk_enable_unlock(flags); | ||
1159 | |||
1160 | /* change clock input source */ | ||
1161 | if (parent && clk->ops->set_parent) | ||
1162 | ret = clk->ops->set_parent(clk->hw, p_index); | ||
1163 | |||
1164 | if (ret) { | ||
1165 | flags = clk_enable_lock(); | ||
1166 | clk_reparent(clk, old_parent); | ||
1167 | clk_enable_unlock(flags); | ||
1168 | |||
1169 | if (clk->prepare_count) { | ||
1170 | clk_disable(clk); | ||
1171 | clk_disable(parent); | ||
1172 | __clk_unprepare(parent); | ||
1173 | } | ||
1174 | return ret; | ||
1175 | } | ||
1176 | |||
1177 | /* | ||
1178 | * Finish the migration of prepare state and undo the changes done | ||
1179 | * for preventing a race with clk_enable(). | ||
1180 | */ | ||
1181 | if (clk->prepare_count) { | ||
1182 | clk_disable(clk); | ||
1183 | clk_disable(old_parent); | ||
1184 | __clk_unprepare(old_parent); | ||
1185 | } | ||
1186 | |||
1187 | /* update debugfs with new clk tree topology */ | ||
1188 | clk_debug_reparent(clk, parent); | ||
1189 | return 0; | ||
1190 | } | ||
1191 | |||
1017 | /** | 1192 | /** |
1018 | * __clk_speculate_rates | 1193 | * __clk_speculate_rates |
1019 | * @clk: first clk in the subtree | 1194 | * @clk: first clk in the subtree |
@@ -1026,7 +1201,7 @@ EXPORT_SYMBOL_GPL(clk_get_rate); | |||
1026 | * pre-rate change notifications and returns early if no clks in the | 1201 | * pre-rate change notifications and returns early if no clks in the |
1027 | * subtree have subscribed to the notifications. Note that if a clk does not | 1202 | * subtree have subscribed to the notifications. Note that if a clk does not |
1028 | * implement the .recalc_rate callback then it is assumed that the clock will | 1203 | * implement the .recalc_rate callback then it is assumed that the clock will |
1029 | * take on the rate of it's parent. | 1204 | * take on the rate of its parent. |
1030 | * | 1205 | * |
1031 | * Caller must hold prepare_lock. | 1206 | * Caller must hold prepare_lock. |
1032 | */ | 1207 | */ |
@@ -1058,18 +1233,25 @@ out: | |||
1058 | return ret; | 1233 | return ret; |
1059 | } | 1234 | } |
1060 | 1235 | ||
1061 | static void clk_calc_subtree(struct clk *clk, unsigned long new_rate) | 1236 | static void clk_calc_subtree(struct clk *clk, unsigned long new_rate, |
1237 | struct clk *new_parent, u8 p_index) | ||
1062 | { | 1238 | { |
1063 | struct clk *child; | 1239 | struct clk *child; |
1064 | 1240 | ||
1065 | clk->new_rate = new_rate; | 1241 | clk->new_rate = new_rate; |
1242 | clk->new_parent = new_parent; | ||
1243 | clk->new_parent_index = p_index; | ||
1244 | /* include clk in new parent's PRE_RATE_CHANGE notifications */ | ||
1245 | clk->new_child = NULL; | ||
1246 | if (new_parent && new_parent != clk->parent) | ||
1247 | new_parent->new_child = clk; | ||
1066 | 1248 | ||
1067 | hlist_for_each_entry(child, &clk->children, child_node) { | 1249 | hlist_for_each_entry(child, &clk->children, child_node) { |
1068 | if (child->ops->recalc_rate) | 1250 | if (child->ops->recalc_rate) |
1069 | child->new_rate = child->ops->recalc_rate(child->hw, new_rate); | 1251 | child->new_rate = child->ops->recalc_rate(child->hw, new_rate); |
1070 | else | 1252 | else |
1071 | child->new_rate = new_rate; | 1253 | child->new_rate = new_rate; |
1072 | clk_calc_subtree(child, child->new_rate); | 1254 | clk_calc_subtree(child, child->new_rate, NULL, 0); |
1073 | } | 1255 | } |
1074 | } | 1256 | } |
1075 | 1257 | ||
@@ -1080,50 +1262,63 @@ static void clk_calc_subtree(struct clk *clk, unsigned long new_rate) | |||
1080 | static struct clk *clk_calc_new_rates(struct clk *clk, unsigned long rate) | 1262 | static struct clk *clk_calc_new_rates(struct clk *clk, unsigned long rate) |
1081 | { | 1263 | { |
1082 | struct clk *top = clk; | 1264 | struct clk *top = clk; |
1265 | struct clk *old_parent, *parent; | ||
1083 | unsigned long best_parent_rate = 0; | 1266 | unsigned long best_parent_rate = 0; |
1084 | unsigned long new_rate; | 1267 | unsigned long new_rate; |
1268 | u8 p_index = 0; | ||
1085 | 1269 | ||
1086 | /* sanity */ | 1270 | /* sanity */ |
1087 | if (IS_ERR_OR_NULL(clk)) | 1271 | if (IS_ERR_OR_NULL(clk)) |
1088 | return NULL; | 1272 | return NULL; |
1089 | 1273 | ||
1090 | /* save parent rate, if it exists */ | 1274 | /* save parent rate, if it exists */ |
1091 | if (clk->parent) | 1275 | parent = old_parent = clk->parent; |
1092 | best_parent_rate = clk->parent->rate; | 1276 | if (parent) |
1093 | 1277 | best_parent_rate = parent->rate; | |
1094 | /* never propagate up to the parent */ | 1278 | |
1095 | if (!(clk->flags & CLK_SET_RATE_PARENT)) { | 1279 | /* find the closest rate and parent clk/rate */ |
1096 | if (!clk->ops->round_rate) { | 1280 | if (clk->ops->determine_rate) { |
1097 | clk->new_rate = clk->rate; | 1281 | new_rate = clk->ops->determine_rate(clk->hw, rate, |
1098 | return NULL; | 1282 | &best_parent_rate, |
1099 | } | 1283 | &parent); |
1100 | new_rate = clk->ops->round_rate(clk->hw, rate, &best_parent_rate); | 1284 | } else if (clk->ops->round_rate) { |
1285 | new_rate = clk->ops->round_rate(clk->hw, rate, | ||
1286 | &best_parent_rate); | ||
1287 | } else if (!parent || !(clk->flags & CLK_SET_RATE_PARENT)) { | ||
1288 | /* pass-through clock without adjustable parent */ | ||
1289 | clk->new_rate = clk->rate; | ||
1290 | return NULL; | ||
1291 | } else { | ||
1292 | /* pass-through clock with adjustable parent */ | ||
1293 | top = clk_calc_new_rates(parent, rate); | ||
1294 | new_rate = parent->new_rate; | ||
1101 | goto out; | 1295 | goto out; |
1102 | } | 1296 | } |
1103 | 1297 | ||
1104 | /* need clk->parent from here on out */ | 1298 | /* some clocks must be gated to change parent */ |
1105 | if (!clk->parent) { | 1299 | if (parent != old_parent && |
1106 | pr_debug("%s: %s has NULL parent\n", __func__, clk->name); | 1300 | (clk->flags & CLK_SET_PARENT_GATE) && clk->prepare_count) { |
1301 | pr_debug("%s: %s not gated but wants to reparent\n", | ||
1302 | __func__, clk->name); | ||
1107 | return NULL; | 1303 | return NULL; |
1108 | } | 1304 | } |
1109 | 1305 | ||
1110 | if (!clk->ops->round_rate) { | 1306 | /* try finding the new parent index */ |
1111 | top = clk_calc_new_rates(clk->parent, rate); | 1307 | if (parent) { |
1112 | new_rate = clk->parent->new_rate; | 1308 | p_index = clk_fetch_parent_index(clk, parent); |
1113 | 1309 | if (p_index == clk->num_parents) { | |
1114 | goto out; | 1310 | pr_debug("%s: clk %s can not be parent of clk %s\n", |
1311 | __func__, parent->name, clk->name); | ||
1312 | return NULL; | ||
1313 | } | ||
1115 | } | 1314 | } |
1116 | 1315 | ||
1117 | new_rate = clk->ops->round_rate(clk->hw, rate, &best_parent_rate); | 1316 | if ((clk->flags & CLK_SET_RATE_PARENT) && parent && |
1118 | 1317 | best_parent_rate != parent->rate) | |
1119 | if (best_parent_rate != clk->parent->rate) { | 1318 | top = clk_calc_new_rates(parent, best_parent_rate); |
1120 | top = clk_calc_new_rates(clk->parent, best_parent_rate); | ||
1121 | |||
1122 | goto out; | ||
1123 | } | ||
1124 | 1319 | ||
1125 | out: | 1320 | out: |
1126 | clk_calc_subtree(clk, new_rate); | 1321 | clk_calc_subtree(clk, new_rate, parent, p_index); |
1127 | 1322 | ||
1128 | return top; | 1323 | return top; |
1129 | } | 1324 | } |
@@ -1135,7 +1330,7 @@ out: | |||
1135 | */ | 1330 | */ |
1136 | static struct clk *clk_propagate_rate_change(struct clk *clk, unsigned long event) | 1331 | static struct clk *clk_propagate_rate_change(struct clk *clk, unsigned long event) |
1137 | { | 1332 | { |
1138 | struct clk *child, *fail_clk = NULL; | 1333 | struct clk *child, *tmp_clk, *fail_clk = NULL; |
1139 | int ret = NOTIFY_DONE; | 1334 | int ret = NOTIFY_DONE; |
1140 | 1335 | ||
1141 | if (clk->rate == clk->new_rate) | 1336 | if (clk->rate == clk->new_rate) |
@@ -1148,9 +1343,19 @@ static struct clk *clk_propagate_rate_change(struct clk *clk, unsigned long even | |||
1148 | } | 1343 | } |
1149 | 1344 | ||
1150 | hlist_for_each_entry(child, &clk->children, child_node) { | 1345 | hlist_for_each_entry(child, &clk->children, child_node) { |
1151 | clk = clk_propagate_rate_change(child, event); | 1346 | /* Skip children who will be reparented to another clock */ |
1152 | if (clk) | 1347 | if (child->new_parent && child->new_parent != clk) |
1153 | fail_clk = clk; | 1348 | continue; |
1349 | tmp_clk = clk_propagate_rate_change(child, event); | ||
1350 | if (tmp_clk) | ||
1351 | fail_clk = tmp_clk; | ||
1352 | } | ||
1353 | |||
1354 | /* handle the new child who might not be in clk->children yet */ | ||
1355 | if (clk->new_child) { | ||
1356 | tmp_clk = clk_propagate_rate_change(clk->new_child, event); | ||
1357 | if (tmp_clk) | ||
1358 | fail_clk = tmp_clk; | ||
1154 | } | 1359 | } |
1155 | 1360 | ||
1156 | return fail_clk; | 1361 | return fail_clk; |
@@ -1168,6 +1373,10 @@ static void clk_change_rate(struct clk *clk) | |||
1168 | 1373 | ||
1169 | old_rate = clk->rate; | 1374 | old_rate = clk->rate; |
1170 | 1375 | ||
1376 | /* set parent */ | ||
1377 | if (clk->new_parent && clk->new_parent != clk->parent) | ||
1378 | __clk_set_parent(clk, clk->new_parent, clk->new_parent_index); | ||
1379 | |||
1171 | if (clk->parent) | 1380 | if (clk->parent) |
1172 | best_parent_rate = clk->parent->rate; | 1381 | best_parent_rate = clk->parent->rate; |
1173 | 1382 | ||
@@ -1182,8 +1391,16 @@ static void clk_change_rate(struct clk *clk) | |||
1182 | if (clk->notifier_count && old_rate != clk->rate) | 1391 | if (clk->notifier_count && old_rate != clk->rate) |
1183 | __clk_notify(clk, POST_RATE_CHANGE, old_rate, clk->rate); | 1392 | __clk_notify(clk, POST_RATE_CHANGE, old_rate, clk->rate); |
1184 | 1393 | ||
1185 | hlist_for_each_entry(child, &clk->children, child_node) | 1394 | hlist_for_each_entry(child, &clk->children, child_node) { |
1395 | /* Skip children who will be reparented to another clock */ | ||
1396 | if (child->new_parent && child->new_parent != clk) | ||
1397 | continue; | ||
1186 | clk_change_rate(child); | 1398 | clk_change_rate(child); |
1399 | } | ||
1400 | |||
1401 | /* handle the new child who might not be in clk->children yet */ | ||
1402 | if (clk->new_child) | ||
1403 | clk_change_rate(clk->new_child); | ||
1187 | } | 1404 | } |
1188 | 1405 | ||
1189 | /** | 1406 | /** |
@@ -1198,7 +1415,7 @@ static void clk_change_rate(struct clk *clk) | |||
1198 | * outcome of clk's .round_rate implementation. If *parent_rate is unchanged | 1415 | * outcome of clk's .round_rate implementation. If *parent_rate is unchanged |
1199 | * after calling .round_rate then upstream parent propagation is ignored. If | 1416 | * after calling .round_rate then upstream parent propagation is ignored. If |
1200 | * *parent_rate comes back with a new rate for clk's parent then we propagate | 1417 | * *parent_rate comes back with a new rate for clk's parent then we propagate |
1201 | * up to clk's parent and set it's rate. Upward propagation will continue | 1418 | * up to clk's parent and set its rate. Upward propagation will continue |
1202 | * until either a clk does not support the CLK_SET_RATE_PARENT flag or | 1419 | * until either a clk does not support the CLK_SET_RATE_PARENT flag or |
1203 | * .round_rate stops requesting changes to clk's parent_rate. | 1420 | * .round_rate stops requesting changes to clk's parent_rate. |
1204 | * | 1421 | * |
@@ -1212,6 +1429,9 @@ int clk_set_rate(struct clk *clk, unsigned long rate) | |||
1212 | struct clk *top, *fail_clk; | 1429 | struct clk *top, *fail_clk; |
1213 | int ret = 0; | 1430 | int ret = 0; |
1214 | 1431 | ||
1432 | if (!clk) | ||
1433 | return 0; | ||
1434 | |||
1215 | /* prevent racing with updates to the clock topology */ | 1435 | /* prevent racing with updates to the clock topology */ |
1216 | clk_prepare_lock(); | 1436 | clk_prepare_lock(); |
1217 | 1437 | ||
@@ -1315,30 +1535,12 @@ static struct clk *__clk_init_parent(struct clk *clk) | |||
1315 | kzalloc((sizeof(struct clk*) * clk->num_parents), | 1535 | kzalloc((sizeof(struct clk*) * clk->num_parents), |
1316 | GFP_KERNEL); | 1536 | GFP_KERNEL); |
1317 | 1537 | ||
1318 | if (!clk->parents) | 1538 | ret = clk_get_parent_by_index(clk, index); |
1319 | ret = __clk_lookup(clk->parent_names[index]); | ||
1320 | else if (!clk->parents[index]) | ||
1321 | ret = clk->parents[index] = | ||
1322 | __clk_lookup(clk->parent_names[index]); | ||
1323 | else | ||
1324 | ret = clk->parents[index]; | ||
1325 | 1539 | ||
1326 | out: | 1540 | out: |
1327 | return ret; | 1541 | return ret; |
1328 | } | 1542 | } |
1329 | 1543 | ||
1330 | static void clk_reparent(struct clk *clk, struct clk *new_parent) | ||
1331 | { | ||
1332 | hlist_del(&clk->child_node); | ||
1333 | |||
1334 | if (new_parent) | ||
1335 | hlist_add_head(&clk->child_node, &new_parent->children); | ||
1336 | else | ||
1337 | hlist_add_head(&clk->child_node, &clk_orphan_list); | ||
1338 | |||
1339 | clk->parent = new_parent; | ||
1340 | } | ||
1341 | |||
1342 | void __clk_reparent(struct clk *clk, struct clk *new_parent) | 1544 | void __clk_reparent(struct clk *clk, struct clk *new_parent) |
1343 | { | 1545 | { |
1344 | clk_reparent(clk, new_parent); | 1546 | clk_reparent(clk, new_parent); |
@@ -1346,98 +1548,6 @@ void __clk_reparent(struct clk *clk, struct clk *new_parent) | |||
1346 | __clk_recalc_rates(clk, POST_RATE_CHANGE); | 1548 | __clk_recalc_rates(clk, POST_RATE_CHANGE); |
1347 | } | 1549 | } |
1348 | 1550 | ||
1349 | static u8 clk_fetch_parent_index(struct clk *clk, struct clk *parent) | ||
1350 | { | ||
1351 | u8 i; | ||
1352 | |||
1353 | if (!clk->parents) | ||
1354 | clk->parents = kzalloc((sizeof(struct clk*) * clk->num_parents), | ||
1355 | GFP_KERNEL); | ||
1356 | |||
1357 | /* | ||
1358 | * find index of new parent clock using cached parent ptrs, | ||
1359 | * or if not yet cached, use string name comparison and cache | ||
1360 | * them now to avoid future calls to __clk_lookup. | ||
1361 | */ | ||
1362 | for (i = 0; i < clk->num_parents; i++) { | ||
1363 | if (clk->parents && clk->parents[i] == parent) | ||
1364 | break; | ||
1365 | else if (!strcmp(clk->parent_names[i], parent->name)) { | ||
1366 | if (clk->parents) | ||
1367 | clk->parents[i] = __clk_lookup(parent->name); | ||
1368 | break; | ||
1369 | } | ||
1370 | } | ||
1371 | |||
1372 | return i; | ||
1373 | } | ||
1374 | |||
1375 | static int __clk_set_parent(struct clk *clk, struct clk *parent, u8 p_index) | ||
1376 | { | ||
1377 | unsigned long flags; | ||
1378 | int ret = 0; | ||
1379 | struct clk *old_parent = clk->parent; | ||
1380 | |||
1381 | /* | ||
1382 | * Migrate prepare state between parents and prevent race with | ||
1383 | * clk_enable(). | ||
1384 | * | ||
1385 | * If the clock is not prepared, then a race with | ||
1386 | * clk_enable/disable() is impossible since we already have the | ||
1387 | * prepare lock (future calls to clk_enable() need to be preceded by | ||
1388 | * a clk_prepare()). | ||
1389 | * | ||
1390 | * If the clock is prepared, migrate the prepared state to the new | ||
1391 | * parent and also protect against a race with clk_enable() by | ||
1392 | * forcing the clock and the new parent on. This ensures that all | ||
1393 | * future calls to clk_enable() are practically NOPs with respect to | ||
1394 | * hardware and software states. | ||
1395 | * | ||
1396 | * See also: Comment for clk_set_parent() below. | ||
1397 | */ | ||
1398 | if (clk->prepare_count) { | ||
1399 | __clk_prepare(parent); | ||
1400 | clk_enable(parent); | ||
1401 | clk_enable(clk); | ||
1402 | } | ||
1403 | |||
1404 | /* update the clk tree topology */ | ||
1405 | flags = clk_enable_lock(); | ||
1406 | clk_reparent(clk, parent); | ||
1407 | clk_enable_unlock(flags); | ||
1408 | |||
1409 | /* change clock input source */ | ||
1410 | if (parent && clk->ops->set_parent) | ||
1411 | ret = clk->ops->set_parent(clk->hw, p_index); | ||
1412 | |||
1413 | if (ret) { | ||
1414 | flags = clk_enable_lock(); | ||
1415 | clk_reparent(clk, old_parent); | ||
1416 | clk_enable_unlock(flags); | ||
1417 | |||
1418 | if (clk->prepare_count) { | ||
1419 | clk_disable(clk); | ||
1420 | clk_disable(parent); | ||
1421 | __clk_unprepare(parent); | ||
1422 | } | ||
1423 | return ret; | ||
1424 | } | ||
1425 | |||
1426 | /* | ||
1427 | * Finish the migration of prepare state and undo the changes done | ||
1428 | * for preventing a race with clk_enable(). | ||
1429 | */ | ||
1430 | if (clk->prepare_count) { | ||
1431 | clk_disable(clk); | ||
1432 | clk_disable(old_parent); | ||
1433 | __clk_unprepare(old_parent); | ||
1434 | } | ||
1435 | |||
1436 | /* update debugfs with new clk tree topology */ | ||
1437 | clk_debug_reparent(clk, parent); | ||
1438 | return 0; | ||
1439 | } | ||
1440 | |||
1441 | /** | 1551 | /** |
1442 | * clk_set_parent - switch the parent of a mux clk | 1552 | * clk_set_parent - switch the parent of a mux clk |
1443 | * @clk: the mux clk whose input we are switching | 1553 | * @clk: the mux clk whose input we are switching |
@@ -1461,7 +1571,10 @@ int clk_set_parent(struct clk *clk, struct clk *parent) | |||
1461 | u8 p_index = 0; | 1571 | u8 p_index = 0; |
1462 | unsigned long p_rate = 0; | 1572 | unsigned long p_rate = 0; |
1463 | 1573 | ||
1464 | if (!clk || !clk->ops) | 1574 | if (!clk) |
1575 | return 0; | ||
1576 | |||
1577 | if (!clk->ops) | ||
1465 | return -EINVAL; | 1578 | return -EINVAL; |
1466 | 1579 | ||
1467 | /* verify ops for for multi-parent clks */ | 1580 | /* verify ops for for multi-parent clks */ |
@@ -1544,8 +1657,9 @@ int __clk_init(struct device *dev, struct clk *clk) | |||
1544 | 1657 | ||
1545 | /* check that clk_ops are sane. See Documentation/clk.txt */ | 1658 | /* check that clk_ops are sane. See Documentation/clk.txt */ |
1546 | if (clk->ops->set_rate && | 1659 | if (clk->ops->set_rate && |
1547 | !(clk->ops->round_rate && clk->ops->recalc_rate)) { | 1660 | !((clk->ops->round_rate || clk->ops->determine_rate) && |
1548 | pr_warning("%s: %s must implement .round_rate & .recalc_rate\n", | 1661 | clk->ops->recalc_rate)) { |
1662 | pr_warning("%s: %s must implement .round_rate or .determine_rate in addition to .recalc_rate\n", | ||
1549 | __func__, clk->name); | 1663 | __func__, clk->name); |
1550 | ret = -EINVAL; | 1664 | ret = -EINVAL; |
1551 | goto out; | 1665 | goto out; |
@@ -1628,7 +1742,7 @@ int __clk_init(struct device *dev, struct clk *clk) | |||
1628 | * this clock | 1742 | * this clock |
1629 | */ | 1743 | */ |
1630 | hlist_for_each_entry_safe(orphan, tmp2, &clk_orphan_list, child_node) { | 1744 | hlist_for_each_entry_safe(orphan, tmp2, &clk_orphan_list, child_node) { |
1631 | if (orphan->ops->get_parent) { | 1745 | if (orphan->num_parents && orphan->ops->get_parent) { |
1632 | i = orphan->ops->get_parent(orphan->hw); | 1746 | i = orphan->ops->get_parent(orphan->hw); |
1633 | if (!strcmp(clk->name, orphan->parent_names[i])) | 1747 | if (!strcmp(clk->name, orphan->parent_names[i])) |
1634 | __clk_reparent(orphan, clk); | 1748 | __clk_reparent(orphan, clk); |
@@ -1648,7 +1762,7 @@ int __clk_init(struct device *dev, struct clk *clk) | |||
1648 | * The .init callback is not used by any of the basic clock types, but | 1762 | * The .init callback is not used by any of the basic clock types, but |
1649 | * exists for weird hardware that must perform initialization magic. | 1763 | * exists for weird hardware that must perform initialization magic. |
1650 | * Please consider other ways of solving initialization problems before | 1764 | * Please consider other ways of solving initialization problems before |
1651 | * using this callback, as it's use is discouraged. | 1765 | * using this callback, as its use is discouraged. |
1652 | */ | 1766 | */ |
1653 | if (clk->ops->init) | 1767 | if (clk->ops->init) |
1654 | clk->ops->init(clk->hw); | 1768 | clk->ops->init(clk->hw); |
@@ -1675,7 +1789,7 @@ out: | |||
1675 | * very large numbers of clocks that need to be statically initialized. It is | 1789 | * very large numbers of clocks that need to be statically initialized. It is |
1676 | * a layering violation to include clk-private.h from any code which implements | 1790 | * a layering violation to include clk-private.h from any code which implements |
1677 | * a clock's .ops; as such any statically initialized clock data MUST be in a | 1791 | * a clock's .ops; as such any statically initialized clock data MUST be in a |
1678 | * separate C file from the logic that implements it's operations. Returns 0 | 1792 | * separate C file from the logic that implements its operations. Returns 0 |
1679 | * on success, otherwise an error code. | 1793 | * on success, otherwise an error code. |
1680 | */ | 1794 | */ |
1681 | struct clk *__clk_register(struct device *dev, struct clk_hw *hw) | 1795 | struct clk *__clk_register(struct device *dev, struct clk_hw *hw) |
@@ -2115,13 +2229,13 @@ EXPORT_SYMBOL_GPL(of_clk_get_parent_name); | |||
2115 | */ | 2229 | */ |
2116 | void __init of_clk_init(const struct of_device_id *matches) | 2230 | void __init of_clk_init(const struct of_device_id *matches) |
2117 | { | 2231 | { |
2232 | const struct of_device_id *match; | ||
2118 | struct device_node *np; | 2233 | struct device_node *np; |
2119 | 2234 | ||
2120 | if (!matches) | 2235 | if (!matches) |
2121 | matches = __clk_of_table; | 2236 | matches = __clk_of_table; |
2122 | 2237 | ||
2123 | for_each_matching_node(np, matches) { | 2238 | for_each_matching_node_and_match(np, matches, &match) { |
2124 | const struct of_device_id *match = of_match_node(matches, np); | ||
2125 | of_clk_init_cb_t clk_init_cb = match->data; | 2239 | of_clk_init_cb_t clk_init_cb = match->data; |
2126 | clk_init_cb(np); | 2240 | clk_init_cb(np); |
2127 | } | 2241 | } |