diff options
author | Mike Turquette <mturquette@linaro.org> | 2013-03-28 16:59:01 -0400 |
---|---|---|
committer | Mike Turquette <mturquette@linaro.org> | 2013-04-02 13:23:08 -0400 |
commit | eab89f690ee0805c02017d7959f4f930379a8c46 (patch) | |
tree | ea9bf078de6212572fc4a095ab22540d62326d98 /drivers/clk/clk.c | |
parent | 43c4120c0656692d08f5c005881cc0c4573ce3b5 (diff) |
clk: abstract locking out into helper functions
Create locking helpers for the global mutex and global spinlock. The
definitions of these helpers will be expanded upon in the next patch
which introduces reentrancy into the locking scheme.
Signed-off-by: Mike Turquette <mturquette@linaro.org>
Cc: Rajagopal Venkat <rajagopal.venkat@linaro.org>
Cc: David Brown <davidb@codeaurora.org>
Tested-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Ulf Hansson <ulf.hansson@linaro.org>
Diffstat (limited to 'drivers/clk/clk.c')
-rw-r--r-- | drivers/clk/clk.c | 99 |
1 files changed, 61 insertions, 38 deletions
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index 5e8ffff99362..0b5d61201ca1 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c | |||
@@ -27,6 +27,29 @@ static HLIST_HEAD(clk_root_list); | |||
27 | static HLIST_HEAD(clk_orphan_list); | 27 | static HLIST_HEAD(clk_orphan_list); |
28 | static LIST_HEAD(clk_notifier_list); | 28 | static LIST_HEAD(clk_notifier_list); |
29 | 29 | ||
30 | /*** locking ***/ | ||
31 | static void clk_prepare_lock(void) | ||
32 | { | ||
33 | mutex_lock(&prepare_lock); | ||
34 | } | ||
35 | |||
36 | static void clk_prepare_unlock(void) | ||
37 | { | ||
38 | mutex_unlock(&prepare_lock); | ||
39 | } | ||
40 | |||
41 | static unsigned long clk_enable_lock(void) | ||
42 | { | ||
43 | unsigned long flags; | ||
44 | spin_lock_irqsave(&enable_lock, flags); | ||
45 | return flags; | ||
46 | } | ||
47 | |||
48 | static void clk_enable_unlock(unsigned long flags) | ||
49 | { | ||
50 | spin_unlock_irqrestore(&enable_lock, flags); | ||
51 | } | ||
52 | |||
30 | /*** debugfs support ***/ | 53 | /*** debugfs support ***/ |
31 | 54 | ||
32 | #ifdef CONFIG_COMMON_CLK_DEBUG | 55 | #ifdef CONFIG_COMMON_CLK_DEBUG |
@@ -69,7 +92,7 @@ static int clk_summary_show(struct seq_file *s, void *data) | |||
69 | seq_printf(s, " clock enable_cnt prepare_cnt rate\n"); | 92 | seq_printf(s, " clock enable_cnt prepare_cnt rate\n"); |
70 | seq_printf(s, "---------------------------------------------------------------------\n"); | 93 | seq_printf(s, "---------------------------------------------------------------------\n"); |
71 | 94 | ||
72 | mutex_lock(&prepare_lock); | 95 | clk_prepare_lock(); |
73 | 96 | ||
74 | hlist_for_each_entry(c, &clk_root_list, child_node) | 97 | hlist_for_each_entry(c, &clk_root_list, child_node) |
75 | clk_summary_show_subtree(s, c, 0); | 98 | clk_summary_show_subtree(s, c, 0); |
@@ -77,7 +100,7 @@ static int clk_summary_show(struct seq_file *s, void *data) | |||
77 | hlist_for_each_entry(c, &clk_orphan_list, child_node) | 100 | hlist_for_each_entry(c, &clk_orphan_list, child_node) |
78 | clk_summary_show_subtree(s, c, 0); | 101 | clk_summary_show_subtree(s, c, 0); |
79 | 102 | ||
80 | mutex_unlock(&prepare_lock); | 103 | clk_prepare_unlock(); |
81 | 104 | ||
82 | return 0; | 105 | return 0; |
83 | } | 106 | } |
@@ -130,7 +153,7 @@ static int clk_dump(struct seq_file *s, void *data) | |||
130 | 153 | ||
131 | seq_printf(s, "{"); | 154 | seq_printf(s, "{"); |
132 | 155 | ||
133 | mutex_lock(&prepare_lock); | 156 | clk_prepare_lock(); |
134 | 157 | ||
135 | hlist_for_each_entry(c, &clk_root_list, child_node) { | 158 | hlist_for_each_entry(c, &clk_root_list, child_node) { |
136 | if (!first_node) | 159 | if (!first_node) |
@@ -144,7 +167,7 @@ static int clk_dump(struct seq_file *s, void *data) | |||
144 | clk_dump_subtree(s, c, 0); | 167 | clk_dump_subtree(s, c, 0); |
145 | } | 168 | } |
146 | 169 | ||
147 | mutex_unlock(&prepare_lock); | 170 | clk_prepare_unlock(); |
148 | 171 | ||
149 | seq_printf(s, "}"); | 172 | seq_printf(s, "}"); |
150 | return 0; | 173 | return 0; |
@@ -316,7 +339,7 @@ static int __init clk_debug_init(void) | |||
316 | if (!orphandir) | 339 | if (!orphandir) |
317 | return -ENOMEM; | 340 | return -ENOMEM; |
318 | 341 | ||
319 | mutex_lock(&prepare_lock); | 342 | clk_prepare_lock(); |
320 | 343 | ||
321 | hlist_for_each_entry(clk, &clk_root_list, child_node) | 344 | hlist_for_each_entry(clk, &clk_root_list, child_node) |
322 | clk_debug_create_subtree(clk, rootdir); | 345 | clk_debug_create_subtree(clk, rootdir); |
@@ -326,7 +349,7 @@ static int __init clk_debug_init(void) | |||
326 | 349 | ||
327 | inited = 1; | 350 | inited = 1; |
328 | 351 | ||
329 | mutex_unlock(&prepare_lock); | 352 | clk_prepare_unlock(); |
330 | 353 | ||
331 | return 0; | 354 | return 0; |
332 | } | 355 | } |
@@ -372,7 +395,7 @@ static void clk_disable_unused_subtree(struct clk *clk) | |||
372 | hlist_for_each_entry(child, &clk->children, child_node) | 395 | hlist_for_each_entry(child, &clk->children, child_node) |
373 | clk_disable_unused_subtree(child); | 396 | clk_disable_unused_subtree(child); |
374 | 397 | ||
375 | spin_lock_irqsave(&enable_lock, flags); | 398 | flags = clk_enable_lock(); |
376 | 399 | ||
377 | if (clk->enable_count) | 400 | if (clk->enable_count) |
378 | goto unlock_out; | 401 | goto unlock_out; |
@@ -393,7 +416,7 @@ static void clk_disable_unused_subtree(struct clk *clk) | |||
393 | } | 416 | } |
394 | 417 | ||
395 | unlock_out: | 418 | unlock_out: |
396 | spin_unlock_irqrestore(&enable_lock, flags); | 419 | clk_enable_unlock(flags); |
397 | 420 | ||
398 | out: | 421 | out: |
399 | return; | 422 | return; |
@@ -403,7 +426,7 @@ static int clk_disable_unused(void) | |||
403 | { | 426 | { |
404 | struct clk *clk; | 427 | struct clk *clk; |
405 | 428 | ||
406 | mutex_lock(&prepare_lock); | 429 | clk_prepare_lock(); |
407 | 430 | ||
408 | hlist_for_each_entry(clk, &clk_root_list, child_node) | 431 | hlist_for_each_entry(clk, &clk_root_list, child_node) |
409 | clk_disable_unused_subtree(clk); | 432 | clk_disable_unused_subtree(clk); |
@@ -417,7 +440,7 @@ static int clk_disable_unused(void) | |||
417 | hlist_for_each_entry(clk, &clk_orphan_list, child_node) | 440 | hlist_for_each_entry(clk, &clk_orphan_list, child_node) |
418 | clk_unprepare_unused_subtree(clk); | 441 | clk_unprepare_unused_subtree(clk); |
419 | 442 | ||
420 | mutex_unlock(&prepare_lock); | 443 | clk_prepare_unlock(); |
421 | 444 | ||
422 | return 0; | 445 | return 0; |
423 | } | 446 | } |
@@ -600,9 +623,9 @@ void __clk_unprepare(struct clk *clk) | |||
600 | */ | 623 | */ |
601 | void clk_unprepare(struct clk *clk) | 624 | void clk_unprepare(struct clk *clk) |
602 | { | 625 | { |
603 | mutex_lock(&prepare_lock); | 626 | clk_prepare_lock(); |
604 | __clk_unprepare(clk); | 627 | __clk_unprepare(clk); |
605 | mutex_unlock(&prepare_lock); | 628 | clk_prepare_unlock(); |
606 | } | 629 | } |
607 | EXPORT_SYMBOL_GPL(clk_unprepare); | 630 | EXPORT_SYMBOL_GPL(clk_unprepare); |
608 | 631 | ||
@@ -648,9 +671,9 @@ int clk_prepare(struct clk *clk) | |||
648 | { | 671 | { |
649 | int ret; | 672 | int ret; |
650 | 673 | ||
651 | mutex_lock(&prepare_lock); | 674 | clk_prepare_lock(); |
652 | ret = __clk_prepare(clk); | 675 | ret = __clk_prepare(clk); |
653 | mutex_unlock(&prepare_lock); | 676 | clk_prepare_unlock(); |
654 | 677 | ||
655 | return ret; | 678 | return ret; |
656 | } | 679 | } |
@@ -692,9 +715,9 @@ void clk_disable(struct clk *clk) | |||
692 | { | 715 | { |
693 | unsigned long flags; | 716 | unsigned long flags; |
694 | 717 | ||
695 | spin_lock_irqsave(&enable_lock, flags); | 718 | flags = clk_enable_lock(); |
696 | __clk_disable(clk); | 719 | __clk_disable(clk); |
697 | spin_unlock_irqrestore(&enable_lock, flags); | 720 | clk_enable_unlock(flags); |
698 | } | 721 | } |
699 | EXPORT_SYMBOL_GPL(clk_disable); | 722 | EXPORT_SYMBOL_GPL(clk_disable); |
700 | 723 | ||
@@ -745,9 +768,9 @@ int clk_enable(struct clk *clk) | |||
745 | unsigned long flags; | 768 | unsigned long flags; |
746 | int ret; | 769 | int ret; |
747 | 770 | ||
748 | spin_lock_irqsave(&enable_lock, flags); | 771 | flags = clk_enable_lock(); |
749 | ret = __clk_enable(clk); | 772 | ret = __clk_enable(clk); |
750 | spin_unlock_irqrestore(&enable_lock, flags); | 773 | clk_enable_unlock(flags); |
751 | 774 | ||
752 | return ret; | 775 | return ret; |
753 | } | 776 | } |
@@ -792,9 +815,9 @@ long clk_round_rate(struct clk *clk, unsigned long rate) | |||
792 | { | 815 | { |
793 | unsigned long ret; | 816 | unsigned long ret; |
794 | 817 | ||
795 | mutex_lock(&prepare_lock); | 818 | clk_prepare_lock(); |
796 | ret = __clk_round_rate(clk, rate); | 819 | ret = __clk_round_rate(clk, rate); |
797 | mutex_unlock(&prepare_lock); | 820 | clk_prepare_unlock(); |
798 | 821 | ||
799 | return ret; | 822 | return ret; |
800 | } | 823 | } |
@@ -889,13 +912,13 @@ unsigned long clk_get_rate(struct clk *clk) | |||
889 | { | 912 | { |
890 | unsigned long rate; | 913 | unsigned long rate; |
891 | 914 | ||
892 | mutex_lock(&prepare_lock); | 915 | clk_prepare_lock(); |
893 | 916 | ||
894 | if (clk && (clk->flags & CLK_GET_RATE_NOCACHE)) | 917 | if (clk && (clk->flags & CLK_GET_RATE_NOCACHE)) |
895 | __clk_recalc_rates(clk, 0); | 918 | __clk_recalc_rates(clk, 0); |
896 | 919 | ||
897 | rate = __clk_get_rate(clk); | 920 | rate = __clk_get_rate(clk); |
898 | mutex_unlock(&prepare_lock); | 921 | clk_prepare_unlock(); |
899 | 922 | ||
900 | return rate; | 923 | return rate; |
901 | } | 924 | } |
@@ -1100,7 +1123,7 @@ int clk_set_rate(struct clk *clk, unsigned long rate) | |||
1100 | int ret = 0; | 1123 | int ret = 0; |
1101 | 1124 | ||
1102 | /* prevent racing with updates to the clock topology */ | 1125 | /* prevent racing with updates to the clock topology */ |
1103 | mutex_lock(&prepare_lock); | 1126 | clk_prepare_lock(); |
1104 | 1127 | ||
1105 | /* bail early if nothing to do */ | 1128 | /* bail early if nothing to do */ |
1106 | if (rate == clk->rate) | 1129 | if (rate == clk->rate) |
@@ -1132,7 +1155,7 @@ int clk_set_rate(struct clk *clk, unsigned long rate) | |||
1132 | clk_change_rate(top); | 1155 | clk_change_rate(top); |
1133 | 1156 | ||
1134 | out: | 1157 | out: |
1135 | mutex_unlock(&prepare_lock); | 1158 | clk_prepare_unlock(); |
1136 | 1159 | ||
1137 | return ret; | 1160 | return ret; |
1138 | } | 1161 | } |
@@ -1148,9 +1171,9 @@ struct clk *clk_get_parent(struct clk *clk) | |||
1148 | { | 1171 | { |
1149 | struct clk *parent; | 1172 | struct clk *parent; |
1150 | 1173 | ||
1151 | mutex_lock(&prepare_lock); | 1174 | clk_prepare_lock(); |
1152 | parent = __clk_get_parent(clk); | 1175 | parent = __clk_get_parent(clk); |
1153 | mutex_unlock(&prepare_lock); | 1176 | clk_prepare_unlock(); |
1154 | 1177 | ||
1155 | return parent; | 1178 | return parent; |
1156 | } | 1179 | } |
@@ -1294,19 +1317,19 @@ static int __clk_set_parent(struct clk *clk, struct clk *parent) | |||
1294 | __clk_prepare(parent); | 1317 | __clk_prepare(parent); |
1295 | 1318 | ||
1296 | /* FIXME replace with clk_is_enabled(clk) someday */ | 1319 | /* FIXME replace with clk_is_enabled(clk) someday */ |
1297 | spin_lock_irqsave(&enable_lock, flags); | 1320 | flags = clk_enable_lock(); |
1298 | if (clk->enable_count) | 1321 | if (clk->enable_count) |
1299 | __clk_enable(parent); | 1322 | __clk_enable(parent); |
1300 | spin_unlock_irqrestore(&enable_lock, flags); | 1323 | clk_enable_unlock(flags); |
1301 | 1324 | ||
1302 | /* change clock input source */ | 1325 | /* change clock input source */ |
1303 | ret = clk->ops->set_parent(clk->hw, i); | 1326 | ret = clk->ops->set_parent(clk->hw, i); |
1304 | 1327 | ||
1305 | /* clean up old prepare and enable */ | 1328 | /* clean up old prepare and enable */ |
1306 | spin_lock_irqsave(&enable_lock, flags); | 1329 | flags = clk_enable_lock(); |
1307 | if (clk->enable_count) | 1330 | if (clk->enable_count) |
1308 | __clk_disable(old_parent); | 1331 | __clk_disable(old_parent); |
1309 | spin_unlock_irqrestore(&enable_lock, flags); | 1332 | clk_enable_unlock(flags); |
1310 | 1333 | ||
1311 | if (clk->prepare_count) | 1334 | if (clk->prepare_count) |
1312 | __clk_unprepare(old_parent); | 1335 | __clk_unprepare(old_parent); |
@@ -1338,7 +1361,7 @@ int clk_set_parent(struct clk *clk, struct clk *parent) | |||
1338 | return -ENOSYS; | 1361 | return -ENOSYS; |
1339 | 1362 | ||
1340 | /* prevent racing with updates to the clock topology */ | 1363 | /* prevent racing with updates to the clock topology */ |
1341 | mutex_lock(&prepare_lock); | 1364 | clk_prepare_lock(); |
1342 | 1365 | ||
1343 | if (clk->parent == parent) | 1366 | if (clk->parent == parent) |
1344 | goto out; | 1367 | goto out; |
@@ -1367,7 +1390,7 @@ int clk_set_parent(struct clk *clk, struct clk *parent) | |||
1367 | __clk_reparent(clk, parent); | 1390 | __clk_reparent(clk, parent); |
1368 | 1391 | ||
1369 | out: | 1392 | out: |
1370 | mutex_unlock(&prepare_lock); | 1393 | clk_prepare_unlock(); |
1371 | 1394 | ||
1372 | return ret; | 1395 | return ret; |
1373 | } | 1396 | } |
@@ -1390,7 +1413,7 @@ int __clk_init(struct device *dev, struct clk *clk) | |||
1390 | if (!clk) | 1413 | if (!clk) |
1391 | return -EINVAL; | 1414 | return -EINVAL; |
1392 | 1415 | ||
1393 | mutex_lock(&prepare_lock); | 1416 | clk_prepare_lock(); |
1394 | 1417 | ||
1395 | /* check to see if a clock with this name is already registered */ | 1418 | /* check to see if a clock with this name is already registered */ |
1396 | if (__clk_lookup(clk->name)) { | 1419 | if (__clk_lookup(clk->name)) { |
@@ -1514,7 +1537,7 @@ int __clk_init(struct device *dev, struct clk *clk) | |||
1514 | clk_debug_register(clk); | 1537 | clk_debug_register(clk); |
1515 | 1538 | ||
1516 | out: | 1539 | out: |
1517 | mutex_unlock(&prepare_lock); | 1540 | clk_prepare_unlock(); |
1518 | 1541 | ||
1519 | return ret; | 1542 | return ret; |
1520 | } | 1543 | } |
@@ -1748,7 +1771,7 @@ int clk_notifier_register(struct clk *clk, struct notifier_block *nb) | |||
1748 | if (!clk || !nb) | 1771 | if (!clk || !nb) |
1749 | return -EINVAL; | 1772 | return -EINVAL; |
1750 | 1773 | ||
1751 | mutex_lock(&prepare_lock); | 1774 | clk_prepare_lock(); |
1752 | 1775 | ||
1753 | /* search the list of notifiers for this clk */ | 1776 | /* search the list of notifiers for this clk */ |
1754 | list_for_each_entry(cn, &clk_notifier_list, node) | 1777 | list_for_each_entry(cn, &clk_notifier_list, node) |
@@ -1772,7 +1795,7 @@ int clk_notifier_register(struct clk *clk, struct notifier_block *nb) | |||
1772 | clk->notifier_count++; | 1795 | clk->notifier_count++; |
1773 | 1796 | ||
1774 | out: | 1797 | out: |
1775 | mutex_unlock(&prepare_lock); | 1798 | clk_prepare_unlock(); |
1776 | 1799 | ||
1777 | return ret; | 1800 | return ret; |
1778 | } | 1801 | } |
@@ -1797,7 +1820,7 @@ int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb) | |||
1797 | if (!clk || !nb) | 1820 | if (!clk || !nb) |
1798 | return -EINVAL; | 1821 | return -EINVAL; |
1799 | 1822 | ||
1800 | mutex_lock(&prepare_lock); | 1823 | clk_prepare_lock(); |
1801 | 1824 | ||
1802 | list_for_each_entry(cn, &clk_notifier_list, node) | 1825 | list_for_each_entry(cn, &clk_notifier_list, node) |
1803 | if (cn->clk == clk) | 1826 | if (cn->clk == clk) |
@@ -1818,7 +1841,7 @@ int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb) | |||
1818 | ret = -ENOENT; | 1841 | ret = -ENOENT; |
1819 | } | 1842 | } |
1820 | 1843 | ||
1821 | mutex_unlock(&prepare_lock); | 1844 | clk_prepare_unlock(); |
1822 | 1845 | ||
1823 | return ret; | 1846 | return ret; |
1824 | } | 1847 | } |