aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/clk/clk.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/clk/clk.c')
-rw-r--r--drivers/clk/clk.c208
1 files changed, 104 insertions, 104 deletions
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 42c15a8ba34d..3d57cf64c9e7 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -1028,6 +1028,110 @@ unsigned long clk_get_rate(struct clk *clk)
1028} 1028}
1029EXPORT_SYMBOL_GPL(clk_get_rate); 1029EXPORT_SYMBOL_GPL(clk_get_rate);
1030 1030
1031static u8 clk_fetch_parent_index(struct clk *clk, struct clk *parent)
1032{
1033 u8 i;
1034
1035 if (!clk->parents)
1036 clk->parents = kzalloc((sizeof(struct clk*) * clk->num_parents),
1037 GFP_KERNEL);
1038
1039 /*
1040 * find index of new parent clock using cached parent ptrs,
1041 * or if not yet cached, use string name comparison and cache
1042 * them now to avoid future calls to __clk_lookup.
1043 */
1044 for (i = 0; i < clk->num_parents; i++) {
1045 if (clk->parents && clk->parents[i] == parent)
1046 break;
1047 else if (!strcmp(clk->parent_names[i], parent->name)) {
1048 if (clk->parents)
1049 clk->parents[i] = __clk_lookup(parent->name);
1050 break;
1051 }
1052 }
1053
1054 return i;
1055}
1056
1057static void clk_reparent(struct clk *clk, struct clk *new_parent)
1058{
1059 hlist_del(&clk->child_node);
1060
1061 if (new_parent)
1062 hlist_add_head(&clk->child_node, &new_parent->children);
1063 else
1064 hlist_add_head(&clk->child_node, &clk_orphan_list);
1065
1066 clk->parent = new_parent;
1067}
1068
1069static int __clk_set_parent(struct clk *clk, struct clk *parent, u8 p_index)
1070{
1071 unsigned long flags;
1072 int ret = 0;
1073 struct clk *old_parent = clk->parent;
1074
1075 /*
1076 * Migrate prepare state between parents and prevent race with
1077 * clk_enable().
1078 *
1079 * If the clock is not prepared, then a race with
1080 * clk_enable/disable() is impossible since we already have the
1081 * prepare lock (future calls to clk_enable() need to be preceded by
1082 * a clk_prepare()).
1083 *
1084 * If the clock is prepared, migrate the prepared state to the new
1085 * parent and also protect against a race with clk_enable() by
1086 * forcing the clock and the new parent on. This ensures that all
1087 * future calls to clk_enable() are practically NOPs with respect to
1088 * hardware and software states.
1089 *
1090 * See also: Comment for clk_set_parent() below.
1091 */
1092 if (clk->prepare_count) {
1093 __clk_prepare(parent);
1094 clk_enable(parent);
1095 clk_enable(clk);
1096 }
1097
1098 /* update the clk tree topology */
1099 flags = clk_enable_lock();
1100 clk_reparent(clk, parent);
1101 clk_enable_unlock(flags);
1102
1103 /* change clock input source */
1104 if (parent && clk->ops->set_parent)
1105 ret = clk->ops->set_parent(clk->hw, p_index);
1106
1107 if (ret) {
1108 flags = clk_enable_lock();
1109 clk_reparent(clk, old_parent);
1110 clk_enable_unlock(flags);
1111
1112 if (clk->prepare_count) {
1113 clk_disable(clk);
1114 clk_disable(parent);
1115 __clk_unprepare(parent);
1116 }
1117 return ret;
1118 }
1119
1120 /*
1121 * Finish the migration of prepare state and undo the changes done
1122 * for preventing a race with clk_enable().
1123 */
1124 if (clk->prepare_count) {
1125 clk_disable(clk);
1126 clk_disable(old_parent);
1127 __clk_unprepare(old_parent);
1128 }
1129
1130 /* update debugfs with new clk tree topology */
1131 clk_debug_reparent(clk, parent);
1132 return 0;
1133}
1134
1031/** 1135/**
1032 * __clk_speculate_rates 1136 * __clk_speculate_rates
1033 * @clk: first clk in the subtree 1137 * @clk: first clk in the subtree
@@ -1335,18 +1439,6 @@ out:
1335 return ret; 1439 return ret;
1336} 1440}
1337 1441
1338static void clk_reparent(struct clk *clk, struct clk *new_parent)
1339{
1340 hlist_del(&clk->child_node);
1341
1342 if (new_parent)
1343 hlist_add_head(&clk->child_node, &new_parent->children);
1344 else
1345 hlist_add_head(&clk->child_node, &clk_orphan_list);
1346
1347 clk->parent = new_parent;
1348}
1349
1350void __clk_reparent(struct clk *clk, struct clk *new_parent) 1442void __clk_reparent(struct clk *clk, struct clk *new_parent)
1351{ 1443{
1352 clk_reparent(clk, new_parent); 1444 clk_reparent(clk, new_parent);
@@ -1354,98 +1446,6 @@ void __clk_reparent(struct clk *clk, struct clk *new_parent)
1354 __clk_recalc_rates(clk, POST_RATE_CHANGE); 1446 __clk_recalc_rates(clk, POST_RATE_CHANGE);
1355} 1447}
1356 1448
1357static u8 clk_fetch_parent_index(struct clk *clk, struct clk *parent)
1358{
1359 u8 i;
1360
1361 if (!clk->parents)
1362 clk->parents = kzalloc((sizeof(struct clk*) * clk->num_parents),
1363 GFP_KERNEL);
1364
1365 /*
1366 * find index of new parent clock using cached parent ptrs,
1367 * or if not yet cached, use string name comparison and cache
1368 * them now to avoid future calls to __clk_lookup.
1369 */
1370 for (i = 0; i < clk->num_parents; i++) {
1371 if (clk->parents && clk->parents[i] == parent)
1372 break;
1373 else if (!strcmp(clk->parent_names[i], parent->name)) {
1374 if (clk->parents)
1375 clk->parents[i] = __clk_lookup(parent->name);
1376 break;
1377 }
1378 }
1379
1380 return i;
1381}
1382
1383static int __clk_set_parent(struct clk *clk, struct clk *parent, u8 p_index)
1384{
1385 unsigned long flags;
1386 int ret = 0;
1387 struct clk *old_parent = clk->parent;
1388
1389 /*
1390 * Migrate prepare state between parents and prevent race with
1391 * clk_enable().
1392 *
1393 * If the clock is not prepared, then a race with
1394 * clk_enable/disable() is impossible since we already have the
1395 * prepare lock (future calls to clk_enable() need to be preceded by
1396 * a clk_prepare()).
1397 *
1398 * If the clock is prepared, migrate the prepared state to the new
1399 * parent and also protect against a race with clk_enable() by
1400 * forcing the clock and the new parent on. This ensures that all
1401 * future calls to clk_enable() are practically NOPs with respect to
1402 * hardware and software states.
1403 *
1404 * See also: Comment for clk_set_parent() below.
1405 */
1406 if (clk->prepare_count) {
1407 __clk_prepare(parent);
1408 clk_enable(parent);
1409 clk_enable(clk);
1410 }
1411
1412 /* update the clk tree topology */
1413 flags = clk_enable_lock();
1414 clk_reparent(clk, parent);
1415 clk_enable_unlock(flags);
1416
1417 /* change clock input source */
1418 if (parent && clk->ops->set_parent)
1419 ret = clk->ops->set_parent(clk->hw, p_index);
1420
1421 if (ret) {
1422 flags = clk_enable_lock();
1423 clk_reparent(clk, old_parent);
1424 clk_enable_unlock(flags);
1425
1426 if (clk->prepare_count) {
1427 clk_disable(clk);
1428 clk_disable(parent);
1429 __clk_unprepare(parent);
1430 }
1431 return ret;
1432 }
1433
1434 /*
1435 * Finish the migration of prepare state and undo the changes done
1436 * for preventing a race with clk_enable().
1437 */
1438 if (clk->prepare_count) {
1439 clk_disable(clk);
1440 clk_disable(old_parent);
1441 __clk_unprepare(old_parent);
1442 }
1443
1444 /* update debugfs with new clk tree topology */
1445 clk_debug_reparent(clk, parent);
1446 return 0;
1447}
1448
1449/** 1449/**
1450 * clk_set_parent - switch the parent of a mux clk 1450 * clk_set_parent - switch the parent of a mux clk
1451 * @clk: the mux clk whose input we are switching 1451 * @clk: the mux clk whose input we are switching