aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/clk/clk.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/clk/clk.c')
-rw-r--r--drivers/clk/clk.c347
1 files changed, 204 insertions, 143 deletions
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index d584004f7af7..820a939fb6bb 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -172,104 +172,6 @@ static bool clk_core_is_enabled(struct clk_core *core)
172 return core->ops->is_enabled(core->hw); 172 return core->ops->is_enabled(core->hw);
173} 173}
174 174
175static void clk_unprepare_unused_subtree(struct clk_core *core)
176{
177 struct clk_core *child;
178
179 lockdep_assert_held(&prepare_lock);
180
181 hlist_for_each_entry(child, &core->children, child_node)
182 clk_unprepare_unused_subtree(child);
183
184 if (core->prepare_count)
185 return;
186
187 if (core->flags & CLK_IGNORE_UNUSED)
188 return;
189
190 if (clk_core_is_prepared(core)) {
191 trace_clk_unprepare(core);
192 if (core->ops->unprepare_unused)
193 core->ops->unprepare_unused(core->hw);
194 else if (core->ops->unprepare)
195 core->ops->unprepare(core->hw);
196 trace_clk_unprepare_complete(core);
197 }
198}
199
200static void clk_disable_unused_subtree(struct clk_core *core)
201{
202 struct clk_core *child;
203 unsigned long flags;
204
205 lockdep_assert_held(&prepare_lock);
206
207 hlist_for_each_entry(child, &core->children, child_node)
208 clk_disable_unused_subtree(child);
209
210 flags = clk_enable_lock();
211
212 if (core->enable_count)
213 goto unlock_out;
214
215 if (core->flags & CLK_IGNORE_UNUSED)
216 goto unlock_out;
217
218 /*
219 * some gate clocks have special needs during the disable-unused
220 * sequence. call .disable_unused if available, otherwise fall
221 * back to .disable
222 */
223 if (clk_core_is_enabled(core)) {
224 trace_clk_disable(core);
225 if (core->ops->disable_unused)
226 core->ops->disable_unused(core->hw);
227 else if (core->ops->disable)
228 core->ops->disable(core->hw);
229 trace_clk_disable_complete(core);
230 }
231
232unlock_out:
233 clk_enable_unlock(flags);
234}
235
236static bool clk_ignore_unused;
237static int __init clk_ignore_unused_setup(char *__unused)
238{
239 clk_ignore_unused = true;
240 return 1;
241}
242__setup("clk_ignore_unused", clk_ignore_unused_setup);
243
244static int clk_disable_unused(void)
245{
246 struct clk_core *core;
247
248 if (clk_ignore_unused) {
249 pr_warn("clk: Not disabling unused clocks\n");
250 return 0;
251 }
252
253 clk_prepare_lock();
254
255 hlist_for_each_entry(core, &clk_root_list, child_node)
256 clk_disable_unused_subtree(core);
257
258 hlist_for_each_entry(core, &clk_orphan_list, child_node)
259 clk_disable_unused_subtree(core);
260
261 hlist_for_each_entry(core, &clk_root_list, child_node)
262 clk_unprepare_unused_subtree(core);
263
264 hlist_for_each_entry(core, &clk_orphan_list, child_node)
265 clk_unprepare_unused_subtree(core);
266
267 clk_prepare_unlock();
268
269 return 0;
270}
271late_initcall_sync(clk_disable_unused);
272
273/*** helper functions ***/ 175/*** helper functions ***/
274 176
275const char *__clk_get_name(const struct clk *clk) 177const char *__clk_get_name(const struct clk *clk)
@@ -591,6 +493,13 @@ static void clk_core_unprepare(struct clk_core *core)
591 clk_core_unprepare(core->parent); 493 clk_core_unprepare(core->parent);
592} 494}
593 495
496static void clk_core_unprepare_lock(struct clk_core *core)
497{
498 clk_prepare_lock();
499 clk_core_unprepare(core);
500 clk_prepare_unlock();
501}
502
594/** 503/**
595 * clk_unprepare - undo preparation of a clock source 504 * clk_unprepare - undo preparation of a clock source
596 * @clk: the clk being unprepared 505 * @clk: the clk being unprepared
@@ -607,9 +516,7 @@ void clk_unprepare(struct clk *clk)
607 if (IS_ERR_OR_NULL(clk)) 516 if (IS_ERR_OR_NULL(clk))
608 return; 517 return;
609 518
610 clk_prepare_lock(); 519 clk_core_unprepare_lock(clk->core);
611 clk_core_unprepare(clk->core);
612 clk_prepare_unlock();
613} 520}
614EXPORT_SYMBOL_GPL(clk_unprepare); 521EXPORT_SYMBOL_GPL(clk_unprepare);
615 522
@@ -645,6 +552,17 @@ static int clk_core_prepare(struct clk_core *core)
645 return 0; 552 return 0;
646} 553}
647 554
555static int clk_core_prepare_lock(struct clk_core *core)
556{
557 int ret;
558
559 clk_prepare_lock();
560 ret = clk_core_prepare(core);
561 clk_prepare_unlock();
562
563 return ret;
564}
565
648/** 566/**
649 * clk_prepare - prepare a clock source 567 * clk_prepare - prepare a clock source
650 * @clk: the clk being prepared 568 * @clk: the clk being prepared
@@ -659,16 +577,10 @@ static int clk_core_prepare(struct clk_core *core)
659 */ 577 */
660int clk_prepare(struct clk *clk) 578int clk_prepare(struct clk *clk)
661{ 579{
662 int ret;
663
664 if (!clk) 580 if (!clk)
665 return 0; 581 return 0;
666 582
667 clk_prepare_lock(); 583 return clk_core_prepare_lock(clk->core);
668 ret = clk_core_prepare(clk->core);
669 clk_prepare_unlock();
670
671 return ret;
672} 584}
673EXPORT_SYMBOL_GPL(clk_prepare); 585EXPORT_SYMBOL_GPL(clk_prepare);
674 586
@@ -688,16 +600,25 @@ static void clk_core_disable(struct clk_core *core)
688 if (--core->enable_count > 0) 600 if (--core->enable_count > 0)
689 return; 601 return;
690 602
691 trace_clk_disable(core); 603 trace_clk_disable_rcuidle(core);
692 604
693 if (core->ops->disable) 605 if (core->ops->disable)
694 core->ops->disable(core->hw); 606 core->ops->disable(core->hw);
695 607
696 trace_clk_disable_complete(core); 608 trace_clk_disable_complete_rcuidle(core);
697 609
698 clk_core_disable(core->parent); 610 clk_core_disable(core->parent);
699} 611}
700 612
613static void clk_core_disable_lock(struct clk_core *core)
614{
615 unsigned long flags;
616
617 flags = clk_enable_lock();
618 clk_core_disable(core);
619 clk_enable_unlock(flags);
620}
621
701/** 622/**
702 * clk_disable - gate a clock 623 * clk_disable - gate a clock
703 * @clk: the clk being gated 624 * @clk: the clk being gated
@@ -712,14 +633,10 @@ static void clk_core_disable(struct clk_core *core)
712 */ 633 */
713void clk_disable(struct clk *clk) 634void clk_disable(struct clk *clk)
714{ 635{
715 unsigned long flags;
716
717 if (IS_ERR_OR_NULL(clk)) 636 if (IS_ERR_OR_NULL(clk))
718 return; 637 return;
719 638
720 flags = clk_enable_lock(); 639 clk_core_disable_lock(clk->core);
721 clk_core_disable(clk->core);
722 clk_enable_unlock(flags);
723} 640}
724EXPORT_SYMBOL_GPL(clk_disable); 641EXPORT_SYMBOL_GPL(clk_disable);
725 642
@@ -741,12 +658,12 @@ static int clk_core_enable(struct clk_core *core)
741 if (ret) 658 if (ret)
742 return ret; 659 return ret;
743 660
744 trace_clk_enable(core); 661 trace_clk_enable_rcuidle(core);
745 662
746 if (core->ops->enable) 663 if (core->ops->enable)
747 ret = core->ops->enable(core->hw); 664 ret = core->ops->enable(core->hw);
748 665
749 trace_clk_enable_complete(core); 666 trace_clk_enable_complete_rcuidle(core);
750 667
751 if (ret) { 668 if (ret) {
752 clk_core_disable(core->parent); 669 clk_core_disable(core->parent);
@@ -758,6 +675,18 @@ static int clk_core_enable(struct clk_core *core)
758 return 0; 675 return 0;
759} 676}
760 677
678static int clk_core_enable_lock(struct clk_core *core)
679{
680 unsigned long flags;
681 int ret;
682
683 flags = clk_enable_lock();
684 ret = clk_core_enable(core);
685 clk_enable_unlock(flags);
686
687 return ret;
688}
689
761/** 690/**
762 * clk_enable - ungate a clock 691 * clk_enable - ungate a clock
763 * @clk: the clk being ungated 692 * @clk: the clk being ungated
@@ -773,19 +702,136 @@ static int clk_core_enable(struct clk_core *core)
773 */ 702 */
774int clk_enable(struct clk *clk) 703int clk_enable(struct clk *clk)
775{ 704{
776 unsigned long flags;
777 int ret;
778
779 if (!clk) 705 if (!clk)
780 return 0; 706 return 0;
781 707
708 return clk_core_enable_lock(clk->core);
709}
710EXPORT_SYMBOL_GPL(clk_enable);
711
712static int clk_core_prepare_enable(struct clk_core *core)
713{
714 int ret;
715
716 ret = clk_core_prepare_lock(core);
717 if (ret)
718 return ret;
719
720 ret = clk_core_enable_lock(core);
721 if (ret)
722 clk_core_unprepare_lock(core);
723
724 return ret;
725}
726
727static void clk_core_disable_unprepare(struct clk_core *core)
728{
729 clk_core_disable_lock(core);
730 clk_core_unprepare_lock(core);
731}
732
733static void clk_unprepare_unused_subtree(struct clk_core *core)
734{
735 struct clk_core *child;
736
737 lockdep_assert_held(&prepare_lock);
738
739 hlist_for_each_entry(child, &core->children, child_node)
740 clk_unprepare_unused_subtree(child);
741
742 if (core->prepare_count)
743 return;
744
745 if (core->flags & CLK_IGNORE_UNUSED)
746 return;
747
748 if (clk_core_is_prepared(core)) {
749 trace_clk_unprepare(core);
750 if (core->ops->unprepare_unused)
751 core->ops->unprepare_unused(core->hw);
752 else if (core->ops->unprepare)
753 core->ops->unprepare(core->hw);
754 trace_clk_unprepare_complete(core);
755 }
756}
757
758static void clk_disable_unused_subtree(struct clk_core *core)
759{
760 struct clk_core *child;
761 unsigned long flags;
762
763 lockdep_assert_held(&prepare_lock);
764
765 hlist_for_each_entry(child, &core->children, child_node)
766 clk_disable_unused_subtree(child);
767
768 if (core->flags & CLK_OPS_PARENT_ENABLE)
769 clk_core_prepare_enable(core->parent);
770
782 flags = clk_enable_lock(); 771 flags = clk_enable_lock();
783 ret = clk_core_enable(clk->core); 772
773 if (core->enable_count)
774 goto unlock_out;
775
776 if (core->flags & CLK_IGNORE_UNUSED)
777 goto unlock_out;
778
779 /*
780 * some gate clocks have special needs during the disable-unused
781 * sequence. call .disable_unused if available, otherwise fall
782 * back to .disable
783 */
784 if (clk_core_is_enabled(core)) {
785 trace_clk_disable(core);
786 if (core->ops->disable_unused)
787 core->ops->disable_unused(core->hw);
788 else if (core->ops->disable)
789 core->ops->disable(core->hw);
790 trace_clk_disable_complete(core);
791 }
792
793unlock_out:
784 clk_enable_unlock(flags); 794 clk_enable_unlock(flags);
795 if (core->flags & CLK_OPS_PARENT_ENABLE)
796 clk_core_disable_unprepare(core->parent);
797}
785 798
786 return ret; 799static bool clk_ignore_unused;
800static int __init clk_ignore_unused_setup(char *__unused)
801{
802 clk_ignore_unused = true;
803 return 1;
787} 804}
788EXPORT_SYMBOL_GPL(clk_enable); 805__setup("clk_ignore_unused", clk_ignore_unused_setup);
806
807static int clk_disable_unused(void)
808{
809 struct clk_core *core;
810
811 if (clk_ignore_unused) {
812 pr_warn("clk: Not disabling unused clocks\n");
813 return 0;
814 }
815
816 clk_prepare_lock();
817
818 hlist_for_each_entry(core, &clk_root_list, child_node)
819 clk_disable_unused_subtree(core);
820
821 hlist_for_each_entry(core, &clk_orphan_list, child_node)
822 clk_disable_unused_subtree(core);
823
824 hlist_for_each_entry(core, &clk_root_list, child_node)
825 clk_unprepare_unused_subtree(core);
826
827 hlist_for_each_entry(core, &clk_orphan_list, child_node)
828 clk_unprepare_unused_subtree(core);
829
830 clk_prepare_unlock();
831
832 return 0;
833}
834late_initcall_sync(clk_disable_unused);
789 835
790static int clk_core_round_rate_nolock(struct clk_core *core, 836static int clk_core_round_rate_nolock(struct clk_core *core,
791 struct clk_rate_request *req) 837 struct clk_rate_request *req)
@@ -828,9 +874,7 @@ static int clk_core_round_rate_nolock(struct clk_core *core,
828/** 874/**
829 * __clk_determine_rate - get the closest rate actually supported by a clock 875 * __clk_determine_rate - get the closest rate actually supported by a clock
830 * @hw: determine the rate of this clock 876 * @hw: determine the rate of this clock
831 * @rate: target rate 877 * @req: target rate request
832 * @min_rate: returned rate must be greater than this rate
833 * @max_rate: returned rate must be less than this rate
834 * 878 *
835 * Useful for clk_ops such as .set_rate and .determine_rate. 879 * Useful for clk_ops such as .set_rate and .determine_rate.
836 */ 880 */
@@ -1128,7 +1172,9 @@ static struct clk_core *__clk_set_parent_before(struct clk_core *core,
1128 struct clk_core *old_parent = core->parent; 1172 struct clk_core *old_parent = core->parent;
1129 1173
1130 /* 1174 /*
1131 * Migrate prepare state between parents and prevent race with 1175 * 1. enable parents for CLK_OPS_PARENT_ENABLE clock
1176 *
1177 * 2. Migrate prepare state between parents and prevent race with
1132 * clk_enable(). 1178 * clk_enable().
1133 * 1179 *
1134 * If the clock is not prepared, then a race with 1180 * If the clock is not prepared, then a race with
@@ -1144,12 +1190,17 @@ static struct clk_core *__clk_set_parent_before(struct clk_core *core,
1144 * 1190 *
1145 * See also: Comment for clk_set_parent() below. 1191 * See also: Comment for clk_set_parent() below.
1146 */ 1192 */
1193
1194 /* enable old_parent & parent if CLK_OPS_PARENT_ENABLE is set */
1195 if (core->flags & CLK_OPS_PARENT_ENABLE) {
1196 clk_core_prepare_enable(old_parent);
1197 clk_core_prepare_enable(parent);
1198 }
1199
1200 /* migrate prepare count if > 0 */
1147 if (core->prepare_count) { 1201 if (core->prepare_count) {
1148 clk_core_prepare(parent); 1202 clk_core_prepare_enable(parent);
1149 flags = clk_enable_lock(); 1203 clk_core_enable_lock(core);
1150 clk_core_enable(parent);
1151 clk_core_enable(core);
1152 clk_enable_unlock(flags);
1153 } 1204 }
1154 1205
1155 /* update the clk tree topology */ 1206 /* update the clk tree topology */
@@ -1164,18 +1215,19 @@ static void __clk_set_parent_after(struct clk_core *core,
1164 struct clk_core *parent, 1215 struct clk_core *parent,
1165 struct clk_core *old_parent) 1216 struct clk_core *old_parent)
1166{ 1217{
1167 unsigned long flags;
1168
1169 /* 1218 /*
1170 * Finish the migration of prepare state and undo the changes done 1219 * Finish the migration of prepare state and undo the changes done
1171 * for preventing a race with clk_enable(). 1220 * for preventing a race with clk_enable().
1172 */ 1221 */
1173 if (core->prepare_count) { 1222 if (core->prepare_count) {
1174 flags = clk_enable_lock(); 1223 clk_core_disable_lock(core);
1175 clk_core_disable(core); 1224 clk_core_disable_unprepare(old_parent);
1176 clk_core_disable(old_parent); 1225 }
1177 clk_enable_unlock(flags); 1226
1178 clk_core_unprepare(old_parent); 1227 /* re-balance ref counting if CLK_OPS_PARENT_ENABLE is set */
1228 if (core->flags & CLK_OPS_PARENT_ENABLE) {
1229 clk_core_disable_unprepare(parent);
1230 clk_core_disable_unprepare(old_parent);
1179 } 1231 }
1180} 1232}
1181 1233
@@ -1422,13 +1474,17 @@ static void clk_change_rate(struct clk_core *core)
1422 unsigned long best_parent_rate = 0; 1474 unsigned long best_parent_rate = 0;
1423 bool skip_set_rate = false; 1475 bool skip_set_rate = false;
1424 struct clk_core *old_parent; 1476 struct clk_core *old_parent;
1477 struct clk_core *parent = NULL;
1425 1478
1426 old_rate = core->rate; 1479 old_rate = core->rate;
1427 1480
1428 if (core->new_parent) 1481 if (core->new_parent) {
1482 parent = core->new_parent;
1429 best_parent_rate = core->new_parent->rate; 1483 best_parent_rate = core->new_parent->rate;
1430 else if (core->parent) 1484 } else if (core->parent) {
1485 parent = core->parent;
1431 best_parent_rate = core->parent->rate; 1486 best_parent_rate = core->parent->rate;
1487 }
1432 1488
1433 if (core->flags & CLK_SET_RATE_UNGATE) { 1489 if (core->flags & CLK_SET_RATE_UNGATE) {
1434 unsigned long flags; 1490 unsigned long flags;
@@ -1456,6 +1512,9 @@ static void clk_change_rate(struct clk_core *core)
1456 __clk_set_parent_after(core, core->new_parent, old_parent); 1512 __clk_set_parent_after(core, core->new_parent, old_parent);
1457 } 1513 }
1458 1514
1515 if (core->flags & CLK_OPS_PARENT_ENABLE)
1516 clk_core_prepare_enable(parent);
1517
1459 trace_clk_set_rate(core, core->new_rate); 1518 trace_clk_set_rate(core, core->new_rate);
1460 1519
1461 if (!skip_set_rate && core->ops->set_rate) 1520 if (!skip_set_rate && core->ops->set_rate)
@@ -1474,6 +1533,9 @@ static void clk_change_rate(struct clk_core *core)
1474 clk_core_unprepare(core); 1533 clk_core_unprepare(core);
1475 } 1534 }
1476 1535
1536 if (core->flags & CLK_OPS_PARENT_ENABLE)
1537 clk_core_disable_unprepare(parent);
1538
1477 if (core->notifier_count && old_rate != core->rate) 1539 if (core->notifier_count && old_rate != core->rate)
1478 __clk_notify(core, POST_RATE_CHANGE, old_rate, core->rate); 1540 __clk_notify(core, POST_RATE_CHANGE, old_rate, core->rate);
1479 1541
@@ -1501,7 +1563,6 @@ static int clk_core_set_rate_nolock(struct clk_core *core,
1501{ 1563{
1502 struct clk_core *top, *fail_clk; 1564 struct clk_core *top, *fail_clk;
1503 unsigned long rate = req_rate; 1565 unsigned long rate = req_rate;
1504 int ret = 0;
1505 1566
1506 if (!core) 1567 if (!core)
1507 return 0; 1568 return 0;
@@ -1532,7 +1593,7 @@ static int clk_core_set_rate_nolock(struct clk_core *core,
1532 1593
1533 core->req_rate = req_rate; 1594 core->req_rate = req_rate;
1534 1595
1535 return ret; 1596 return 0;
1536} 1597}
1537 1598
1538/** 1599/**