diff options
Diffstat (limited to 'drivers/clk/clk.c')
-rw-r--r-- | drivers/clk/clk.c | 100 |
1 files changed, 49 insertions, 51 deletions
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index 1144e8c7579d..54a191c5bbf0 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c | |||
@@ -107,7 +107,7 @@ static void clk_summary_show_one(struct seq_file *s, struct clk *c, int level) | |||
107 | seq_printf(s, "%*s%-*s %-11d %-12d %-10lu", | 107 | seq_printf(s, "%*s%-*s %-11d %-12d %-10lu", |
108 | level * 3 + 1, "", | 108 | level * 3 + 1, "", |
109 | 30 - level * 3, c->name, | 109 | 30 - level * 3, c->name, |
110 | c->enable_count, c->prepare_count, c->rate); | 110 | c->enable_count, c->prepare_count, clk_get_rate(c)); |
111 | seq_printf(s, "\n"); | 111 | seq_printf(s, "\n"); |
112 | } | 112 | } |
113 | 113 | ||
@@ -166,7 +166,7 @@ static void clk_dump_one(struct seq_file *s, struct clk *c, int level) | |||
166 | seq_printf(s, "\"%s\": { ", c->name); | 166 | seq_printf(s, "\"%s\": { ", c->name); |
167 | seq_printf(s, "\"enable_count\": %d,", c->enable_count); | 167 | seq_printf(s, "\"enable_count\": %d,", c->enable_count); |
168 | seq_printf(s, "\"prepare_count\": %d,", c->prepare_count); | 168 | seq_printf(s, "\"prepare_count\": %d,", c->prepare_count); |
169 | seq_printf(s, "\"rate\": %lu", c->rate); | 169 | seq_printf(s, "\"rate\": %lu", clk_get_rate(c)); |
170 | } | 170 | } |
171 | 171 | ||
172 | static void clk_dump_subtree(struct seq_file *s, struct clk *c, int level) | 172 | static void clk_dump_subtree(struct seq_file *s, struct clk *c, int level) |
@@ -534,7 +534,7 @@ static int clk_disable_unused(void) | |||
534 | 534 | ||
535 | return 0; | 535 | return 0; |
536 | } | 536 | } |
537 | late_initcall(clk_disable_unused); | 537 | late_initcall_sync(clk_disable_unused); |
538 | 538 | ||
539 | /*** helper functions ***/ | 539 | /*** helper functions ***/ |
540 | 540 | ||
@@ -1216,7 +1216,7 @@ int clk_set_rate(struct clk *clk, unsigned long rate) | |||
1216 | clk_prepare_lock(); | 1216 | clk_prepare_lock(); |
1217 | 1217 | ||
1218 | /* bail early if nothing to do */ | 1218 | /* bail early if nothing to do */ |
1219 | if (rate == clk->rate) | 1219 | if (rate == clk_get_rate(clk)) |
1220 | goto out; | 1220 | goto out; |
1221 | 1221 | ||
1222 | if ((clk->flags & CLK_SET_RATE_GATE) && clk->prepare_count) { | 1222 | if ((clk->flags & CLK_SET_RATE_GATE) && clk->prepare_count) { |
@@ -1377,23 +1377,33 @@ static int __clk_set_parent(struct clk *clk, struct clk *parent, u8 p_index) | |||
1377 | unsigned long flags; | 1377 | unsigned long flags; |
1378 | int ret = 0; | 1378 | int ret = 0; |
1379 | struct clk *old_parent = clk->parent; | 1379 | struct clk *old_parent = clk->parent; |
1380 | bool migrated_enable = false; | ||
1381 | 1380 | ||
1382 | /* migrate prepare */ | 1381 | /* |
1383 | if (clk->prepare_count) | 1382 | * Migrate prepare state between parents and prevent race with |
1383 | * clk_enable(). | ||
1384 | * | ||
1385 | * If the clock is not prepared, then a race with | ||
1386 | * clk_enable/disable() is impossible since we already have the | ||
1387 | * prepare lock (future calls to clk_enable() need to be preceded by | ||
1388 | * a clk_prepare()). | ||
1389 | * | ||
1390 | * If the clock is prepared, migrate the prepared state to the new | ||
1391 | * parent and also protect against a race with clk_enable() by | ||
1392 | * forcing the clock and the new parent on. This ensures that all | ||
1393 | * future calls to clk_enable() are practically NOPs with respect to | ||
1394 | * hardware and software states. | ||
1395 | * | ||
1396 | * See also: Comment for clk_set_parent() below. | ||
1397 | */ | ||
1398 | if (clk->prepare_count) { | ||
1384 | __clk_prepare(parent); | 1399 | __clk_prepare(parent); |
1385 | 1400 | clk_enable(parent); | |
1386 | flags = clk_enable_lock(); | 1401 | clk_enable(clk); |
1387 | |||
1388 | /* migrate enable */ | ||
1389 | if (clk->enable_count) { | ||
1390 | __clk_enable(parent); | ||
1391 | migrated_enable = true; | ||
1392 | } | 1402 | } |
1393 | 1403 | ||
1394 | /* update the clk tree topology */ | 1404 | /* update the clk tree topology */ |
1405 | flags = clk_enable_lock(); | ||
1395 | clk_reparent(clk, parent); | 1406 | clk_reparent(clk, parent); |
1396 | |||
1397 | clk_enable_unlock(flags); | 1407 | clk_enable_unlock(flags); |
1398 | 1408 | ||
1399 | /* change clock input source */ | 1409 | /* change clock input source */ |
@@ -1401,43 +1411,27 @@ static int __clk_set_parent(struct clk *clk, struct clk *parent, u8 p_index) | |||
1401 | ret = clk->ops->set_parent(clk->hw, p_index); | 1411 | ret = clk->ops->set_parent(clk->hw, p_index); |
1402 | 1412 | ||
1403 | if (ret) { | 1413 | if (ret) { |
1404 | /* | ||
1405 | * The error handling is tricky due to that we need to release | ||
1406 | * the spinlock while issuing the .set_parent callback. This | ||
1407 | * means the new parent might have been enabled/disabled in | ||
1408 | * between, which must be considered when doing rollback. | ||
1409 | */ | ||
1410 | flags = clk_enable_lock(); | 1414 | flags = clk_enable_lock(); |
1411 | |||
1412 | clk_reparent(clk, old_parent); | 1415 | clk_reparent(clk, old_parent); |
1413 | |||
1414 | if (migrated_enable && clk->enable_count) { | ||
1415 | __clk_disable(parent); | ||
1416 | } else if (migrated_enable && (clk->enable_count == 0)) { | ||
1417 | __clk_disable(old_parent); | ||
1418 | } else if (!migrated_enable && clk->enable_count) { | ||
1419 | __clk_disable(parent); | ||
1420 | __clk_enable(old_parent); | ||
1421 | } | ||
1422 | |||
1423 | clk_enable_unlock(flags); | 1416 | clk_enable_unlock(flags); |
1424 | 1417 | ||
1425 | if (clk->prepare_count) | 1418 | if (clk->prepare_count) { |
1419 | clk_disable(clk); | ||
1420 | clk_disable(parent); | ||
1426 | __clk_unprepare(parent); | 1421 | __clk_unprepare(parent); |
1427 | 1422 | } | |
1428 | return ret; | 1423 | return ret; |
1429 | } | 1424 | } |
1430 | 1425 | ||
1431 | /* clean up enable for old parent if migration was done */ | 1426 | /* |
1432 | if (migrated_enable) { | 1427 | * Finish the migration of prepare state and undo the changes done |
1433 | flags = clk_enable_lock(); | 1428 | * for preventing a race with clk_enable(). |
1434 | __clk_disable(old_parent); | 1429 | */ |
1435 | clk_enable_unlock(flags); | 1430 | if (clk->prepare_count) { |
1436 | } | 1431 | clk_disable(clk); |
1437 | 1432 | clk_disable(old_parent); | |
1438 | /* clean up prepare for old parent if migration was done */ | ||
1439 | if (clk->prepare_count) | ||
1440 | __clk_unprepare(old_parent); | 1433 | __clk_unprepare(old_parent); |
1434 | } | ||
1441 | 1435 | ||
1442 | /* update debugfs with new clk tree topology */ | 1436 | /* update debugfs with new clk tree topology */ |
1443 | clk_debug_reparent(clk, parent); | 1437 | clk_debug_reparent(clk, parent); |
@@ -1449,12 +1443,17 @@ static int __clk_set_parent(struct clk *clk, struct clk *parent, u8 p_index) | |||
1449 | * @clk: the mux clk whose input we are switching | 1443 | * @clk: the mux clk whose input we are switching |
1450 | * @parent: the new input to clk | 1444 | * @parent: the new input to clk |
1451 | * | 1445 | * |
1452 | * Re-parent clk to use parent as it's new input source. If clk has the | 1446 | * Re-parent clk to use parent as its new input source. If clk is in |
1453 | * CLK_SET_PARENT_GATE flag set then clk must be gated for this | 1447 | * prepared state, the clk will get enabled for the duration of this call. If |
1454 | * operation to succeed. After successfully changing clk's parent | 1448 | * that's not acceptable for a specific clk (Eg: the consumer can't handle |
1455 | * clk_set_parent will update the clk topology, sysfs topology and | 1449 | * that, the reparenting is glitchy in hardware, etc), use the |
1456 | * propagate rate recalculation via __clk_recalc_rates. Returns 0 on | 1450 | * CLK_SET_PARENT_GATE flag to allow reparenting only when clk is unprepared. |
1457 | * success, -EERROR otherwise. | 1451 | * |
1452 | * After successfully changing clk's parent clk_set_parent will update the | ||
1453 | * clk topology, sysfs topology and propagate rate recalculation via | ||
1454 | * __clk_recalc_rates. | ||
1455 | * | ||
1456 | * Returns 0 on success, -EERROR otherwise. | ||
1458 | */ | 1457 | */ |
1459 | int clk_set_parent(struct clk *clk, struct clk *parent) | 1458 | int clk_set_parent(struct clk *clk, struct clk *parent) |
1460 | { | 1459 | { |
@@ -1494,8 +1493,7 @@ int clk_set_parent(struct clk *clk, struct clk *parent) | |||
1494 | } | 1493 | } |
1495 | 1494 | ||
1496 | /* propagate PRE_RATE_CHANGE notifications */ | 1495 | /* propagate PRE_RATE_CHANGE notifications */ |
1497 | if (clk->notifier_count) | 1496 | ret = __clk_speculate_rates(clk, p_rate); |
1498 | ret = __clk_speculate_rates(clk, p_rate); | ||
1499 | 1497 | ||
1500 | /* abort if a driver objects */ | 1498 | /* abort if a driver objects */ |
1501 | if (ret & NOTIFY_STOP_MASK) | 1499 | if (ret & NOTIFY_STOP_MASK) |