aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/clk/clk.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/clk/clk.c')
-rw-r--r--drivers/clk/clk.c89
1 files changed, 44 insertions, 45 deletions
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 934cfd18f72d..399b0d8ac562 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -1377,23 +1377,33 @@ static int __clk_set_parent(struct clk *clk, struct clk *parent, u8 p_index)
1377 unsigned long flags; 1377 unsigned long flags;
1378 int ret = 0; 1378 int ret = 0;
1379 struct clk *old_parent = clk->parent; 1379 struct clk *old_parent = clk->parent;
1380 bool migrated_enable = false;
1381 1380
1382 /* migrate prepare */ 1381 /*
1383 if (clk->prepare_count) 1382 * Migrate prepare state between parents and prevent race with
1383 * clk_enable().
1384 *
1385 * If the clock is not prepared, then a race with
1386 * clk_enable/disable() is impossible since we already have the
1387 * prepare lock (future calls to clk_enable() need to be preceded by
1388 * a clk_prepare()).
1389 *
1390 * If the clock is prepared, migrate the prepared state to the new
1391 * parent and also protect against a race with clk_enable() by
1392 * forcing the clock and the new parent on. This ensures that all
1393 * future calls to clk_enable() are practically NOPs with respect to
1394 * hardware and software states.
1395 *
1396 * See also: Comment for clk_set_parent() below.
1397 */
1398 if (clk->prepare_count) {
1384 __clk_prepare(parent); 1399 __clk_prepare(parent);
1385 1400 clk_enable(parent);
1386 flags = clk_enable_lock(); 1401 clk_enable(clk);
1387
1388 /* migrate enable */
1389 if (clk->enable_count) {
1390 __clk_enable(parent);
1391 migrated_enable = true;
1392 } 1402 }
1393 1403
1394 /* update the clk tree topology */ 1404 /* update the clk tree topology */
1405 flags = clk_enable_lock();
1395 clk_reparent(clk, parent); 1406 clk_reparent(clk, parent);
1396
1397 clk_enable_unlock(flags); 1407 clk_enable_unlock(flags);
1398 1408
1399 /* change clock input source */ 1409 /* change clock input source */
@@ -1401,43 +1411,27 @@ static int __clk_set_parent(struct clk *clk, struct clk *parent, u8 p_index)
1401 ret = clk->ops->set_parent(clk->hw, p_index); 1411 ret = clk->ops->set_parent(clk->hw, p_index);
1402 1412
1403 if (ret) { 1413 if (ret) {
1404 /*
1405 * The error handling is tricky due to that we need to release
1406 * the spinlock while issuing the .set_parent callback. This
1407 * means the new parent might have been enabled/disabled in
1408 * between, which must be considered when doing rollback.
1409 */
1410 flags = clk_enable_lock(); 1414 flags = clk_enable_lock();
1411
1412 clk_reparent(clk, old_parent); 1415 clk_reparent(clk, old_parent);
1413
1414 if (migrated_enable && clk->enable_count) {
1415 __clk_disable(parent);
1416 } else if (migrated_enable && (clk->enable_count == 0)) {
1417 __clk_disable(old_parent);
1418 } else if (!migrated_enable && clk->enable_count) {
1419 __clk_disable(parent);
1420 __clk_enable(old_parent);
1421 }
1422
1423 clk_enable_unlock(flags); 1416 clk_enable_unlock(flags);
1424 1417
1425 if (clk->prepare_count) 1418 if (clk->prepare_count) {
1419 clk_disable(clk);
1420 clk_disable(parent);
1426 __clk_unprepare(parent); 1421 __clk_unprepare(parent);
1427 1422 }
1428 return ret; 1423 return ret;
1429 } 1424 }
1430 1425
1431 /* clean up enable for old parent if migration was done */ 1426 /*
1432 if (migrated_enable) { 1427 * Finish the migration of prepare state and undo the changes done
1433 flags = clk_enable_lock(); 1428 * for preventing a race with clk_enable().
1434 __clk_disable(old_parent); 1429 */
1435 clk_enable_unlock(flags); 1430 if (clk->prepare_count) {
1436 } 1431 clk_disable(clk);
1437 1432 clk_disable(old_parent);
1438 /* clean up prepare for old parent if migration was done */
1439 if (clk->prepare_count)
1440 __clk_unprepare(old_parent); 1433 __clk_unprepare(old_parent);
1434 }
1441 1435
1442 /* update debugfs with new clk tree topology */ 1436 /* update debugfs with new clk tree topology */
1443 clk_debug_reparent(clk, parent); 1437 clk_debug_reparent(clk, parent);
@@ -1449,12 +1443,17 @@ static int __clk_set_parent(struct clk *clk, struct clk *parent, u8 p_index)
1449 * @clk: the mux clk whose input we are switching 1443 * @clk: the mux clk whose input we are switching
1450 * @parent: the new input to clk 1444 * @parent: the new input to clk
1451 * 1445 *
1452 * Re-parent clk to use parent as it's new input source. If clk has the 1446 * Re-parent clk to use parent as its new input source. If clk is in
1453 * CLK_SET_PARENT_GATE flag set then clk must be gated for this 1447 * prepared state, the clk will get enabled for the duration of this call. If
1454 * operation to succeed. After successfully changing clk's parent 1448 * that's not acceptable for a specific clk (Eg: the consumer can't handle
1455 * clk_set_parent will update the clk topology, sysfs topology and 1449 * that, the reparenting is glitchy in hardware, etc), use the
1456 * propagate rate recalculation via __clk_recalc_rates. Returns 0 on 1450 * CLK_SET_PARENT_GATE flag to allow reparenting only when clk is unprepared.
1457 * success, -EERROR otherwise. 1451 *
1452 * After successfully changing clk's parent clk_set_parent will update the
1453 * clk topology, sysfs topology and propagate rate recalculation via
1454 * __clk_recalc_rates.
1455 *
1456 * Returns 0 on success, -EERROR otherwise.
1458 */ 1457 */
1459int clk_set_parent(struct clk *clk, struct clk *parent) 1458int clk_set_parent(struct clk *clk, struct clk *parent)
1460{ 1459{