aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/clk/clk.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/clk/clk.c')
-rw-r--r--drivers/clk/clk.c383
1 files changed, 339 insertions, 44 deletions
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 2cf2ea6b77a1..c42e608af6bb 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -21,6 +21,8 @@
21#include <linux/init.h> 21#include <linux/init.h>
22#include <linux/sched.h> 22#include <linux/sched.h>
23 23
24#include "clk.h"
25
24static DEFINE_SPINLOCK(enable_lock); 26static DEFINE_SPINLOCK(enable_lock);
25static DEFINE_MUTEX(prepare_lock); 27static DEFINE_MUTEX(prepare_lock);
26 28
@@ -92,7 +94,7 @@ static void clk_enable_unlock(unsigned long flags)
92 94
93/*** debugfs support ***/ 95/*** debugfs support ***/
94 96
95#ifdef CONFIG_COMMON_CLK_DEBUG 97#ifdef CONFIG_DEBUG_FS
96#include <linux/debugfs.h> 98#include <linux/debugfs.h>
97 99
98static struct dentry *rootdir; 100static struct dentry *rootdir;
@@ -104,10 +106,11 @@ static void clk_summary_show_one(struct seq_file *s, struct clk *c, int level)
104 if (!c) 106 if (!c)
105 return; 107 return;
106 108
107 seq_printf(s, "%*s%-*s %-11d %-12d %-10lu", 109 seq_printf(s, "%*s%-*s %-11d %-12d %-10lu %-11lu",
108 level * 3 + 1, "", 110 level * 3 + 1, "",
109 30 - level * 3, c->name, 111 30 - level * 3, c->name,
110 c->enable_count, c->prepare_count, clk_get_rate(c)); 112 c->enable_count, c->prepare_count, clk_get_rate(c),
113 clk_get_accuracy(c));
111 seq_printf(s, "\n"); 114 seq_printf(s, "\n");
112} 115}
113 116
@@ -129,8 +132,8 @@ static int clk_summary_show(struct seq_file *s, void *data)
129{ 132{
130 struct clk *c; 133 struct clk *c;
131 134
132 seq_printf(s, " clock enable_cnt prepare_cnt rate\n"); 135 seq_printf(s, " clock enable_cnt prepare_cnt rate accuracy\n");
133 seq_printf(s, "---------------------------------------------------------------------\n"); 136 seq_printf(s, "---------------------------------------------------------------------------------\n");
134 137
135 clk_prepare_lock(); 138 clk_prepare_lock();
136 139
@@ -167,6 +170,7 @@ static void clk_dump_one(struct seq_file *s, struct clk *c, int level)
167 seq_printf(s, "\"enable_count\": %d,", c->enable_count); 170 seq_printf(s, "\"enable_count\": %d,", c->enable_count);
168 seq_printf(s, "\"prepare_count\": %d,", c->prepare_count); 171 seq_printf(s, "\"prepare_count\": %d,", c->prepare_count);
169 seq_printf(s, "\"rate\": %lu", clk_get_rate(c)); 172 seq_printf(s, "\"rate\": %lu", clk_get_rate(c));
173 seq_printf(s, "\"accuracy\": %lu", clk_get_accuracy(c));
170} 174}
171 175
172static void clk_dump_subtree(struct seq_file *s, struct clk *c, int level) 176static void clk_dump_subtree(struct seq_file *s, struct clk *c, int level)
@@ -248,6 +252,11 @@ static int clk_debug_create_one(struct clk *clk, struct dentry *pdentry)
248 if (!d) 252 if (!d)
249 goto err_out; 253 goto err_out;
250 254
255 d = debugfs_create_u32("clk_accuracy", S_IRUGO, clk->dentry,
256 (u32 *)&clk->accuracy);
257 if (!d)
258 goto err_out;
259
251 d = debugfs_create_x32("clk_flags", S_IRUGO, clk->dentry, 260 d = debugfs_create_x32("clk_flags", S_IRUGO, clk->dentry,
252 (u32 *)&clk->flags); 261 (u32 *)&clk->flags);
253 if (!d) 262 if (!d)
@@ -272,7 +281,8 @@ static int clk_debug_create_one(struct clk *clk, struct dentry *pdentry)
272 goto out; 281 goto out;
273 282
274err_out: 283err_out:
275 debugfs_remove(clk->dentry); 284 debugfs_remove_recursive(clk->dentry);
285 clk->dentry = NULL;
276out: 286out:
277 return ret; 287 return ret;
278} 288}
@@ -342,6 +352,21 @@ out:
342 return ret; 352 return ret;
343} 353}
344 354
355 /**
356 * clk_debug_unregister - remove a clk node from the debugfs clk tree
357 * @clk: the clk being removed from the debugfs clk tree
358 *
359 * Dynamically removes a clk and all it's children clk nodes from the
360 * debugfs clk tree if clk->dentry points to debugfs created by
361 * clk_debug_register in __clk_init.
362 *
363 * Caller must hold prepare_lock.
364 */
365static void clk_debug_unregister(struct clk *clk)
366{
367 debugfs_remove_recursive(clk->dentry);
368}
369
345/** 370/**
346 * clk_debug_reparent - reparent clk node in the debugfs clk tree 371 * clk_debug_reparent - reparent clk node in the debugfs clk tree
347 * @clk: the clk being reparented 372 * @clk: the clk being reparented
@@ -432,6 +457,9 @@ static inline int clk_debug_register(struct clk *clk) { return 0; }
432static inline void clk_debug_reparent(struct clk *clk, struct clk *new_parent) 457static inline void clk_debug_reparent(struct clk *clk, struct clk *new_parent)
433{ 458{
434} 459}
460static inline void clk_debug_unregister(struct clk *clk)
461{
462}
435#endif 463#endif
436 464
437/* caller must hold prepare_lock */ 465/* caller must hold prepare_lock */
@@ -547,16 +575,19 @@ struct clk_hw *__clk_get_hw(struct clk *clk)
547{ 575{
548 return !clk ? NULL : clk->hw; 576 return !clk ? NULL : clk->hw;
549} 577}
578EXPORT_SYMBOL_GPL(__clk_get_hw);
550 579
551u8 __clk_get_num_parents(struct clk *clk) 580u8 __clk_get_num_parents(struct clk *clk)
552{ 581{
553 return !clk ? 0 : clk->num_parents; 582 return !clk ? 0 : clk->num_parents;
554} 583}
584EXPORT_SYMBOL_GPL(__clk_get_num_parents);
555 585
556struct clk *__clk_get_parent(struct clk *clk) 586struct clk *__clk_get_parent(struct clk *clk)
557{ 587{
558 return !clk ? NULL : clk->parent; 588 return !clk ? NULL : clk->parent;
559} 589}
590EXPORT_SYMBOL_GPL(__clk_get_parent);
560 591
561struct clk *clk_get_parent_by_index(struct clk *clk, u8 index) 592struct clk *clk_get_parent_by_index(struct clk *clk, u8 index)
562{ 593{
@@ -570,6 +601,7 @@ struct clk *clk_get_parent_by_index(struct clk *clk, u8 index)
570 else 601 else
571 return clk->parents[index]; 602 return clk->parents[index];
572} 603}
604EXPORT_SYMBOL_GPL(clk_get_parent_by_index);
573 605
574unsigned int __clk_get_enable_count(struct clk *clk) 606unsigned int __clk_get_enable_count(struct clk *clk)
575{ 607{
@@ -601,6 +633,15 @@ unsigned long __clk_get_rate(struct clk *clk)
601out: 633out:
602 return ret; 634 return ret;
603} 635}
636EXPORT_SYMBOL_GPL(__clk_get_rate);
637
638unsigned long __clk_get_accuracy(struct clk *clk)
639{
640 if (!clk)
641 return 0;
642
643 return clk->accuracy;
644}
604 645
605unsigned long __clk_get_flags(struct clk *clk) 646unsigned long __clk_get_flags(struct clk *clk)
606{ 647{
@@ -649,6 +690,7 @@ bool __clk_is_enabled(struct clk *clk)
649out: 690out:
650 return !!ret; 691 return !!ret;
651} 692}
693EXPORT_SYMBOL_GPL(__clk_is_enabled);
652 694
653static struct clk *__clk_lookup_subtree(const char *name, struct clk *clk) 695static struct clk *__clk_lookup_subtree(const char *name, struct clk *clk)
654{ 696{
@@ -740,6 +782,7 @@ out:
740 782
741 return best; 783 return best;
742} 784}
785EXPORT_SYMBOL_GPL(__clk_mux_determine_rate);
743 786
744/*** clk api ***/ 787/*** clk api ***/
745 788
@@ -1016,6 +1059,59 @@ static int __clk_notify(struct clk *clk, unsigned long msg,
1016} 1059}
1017 1060
1018/** 1061/**
1062 * __clk_recalc_accuracies
1063 * @clk: first clk in the subtree
1064 *
1065 * Walks the subtree of clks starting with clk and recalculates accuracies as
1066 * it goes. Note that if a clk does not implement the .recalc_accuracy
1067 * callback then it is assumed that the clock will take on the accuracy of it's
1068 * parent.
1069 *
1070 * Caller must hold prepare_lock.
1071 */
1072static void __clk_recalc_accuracies(struct clk *clk)
1073{
1074 unsigned long parent_accuracy = 0;
1075 struct clk *child;
1076
1077 if (clk->parent)
1078 parent_accuracy = clk->parent->accuracy;
1079
1080 if (clk->ops->recalc_accuracy)
1081 clk->accuracy = clk->ops->recalc_accuracy(clk->hw,
1082 parent_accuracy);
1083 else
1084 clk->accuracy = parent_accuracy;
1085
1086 hlist_for_each_entry(child, &clk->children, child_node)
1087 __clk_recalc_accuracies(child);
1088}
1089
1090/**
1091 * clk_get_accuracy - return the accuracy of clk
1092 * @clk: the clk whose accuracy is being returned
1093 *
1094 * Simply returns the cached accuracy of the clk, unless
1095 * CLK_GET_ACCURACY_NOCACHE flag is set, which means a recalc_rate will be
1096 * issued.
1097 * If clk is NULL then returns 0.
1098 */
1099long clk_get_accuracy(struct clk *clk)
1100{
1101 unsigned long accuracy;
1102
1103 clk_prepare_lock();
1104 if (clk && (clk->flags & CLK_GET_ACCURACY_NOCACHE))
1105 __clk_recalc_accuracies(clk);
1106
1107 accuracy = __clk_get_accuracy(clk);
1108 clk_prepare_unlock();
1109
1110 return accuracy;
1111}
1112EXPORT_SYMBOL_GPL(clk_get_accuracy);
1113
1114/**
1019 * __clk_recalc_rates 1115 * __clk_recalc_rates
1020 * @clk: first clk in the subtree 1116 * @clk: first clk in the subtree
1021 * @msg: notification type (see include/linux/clk.h) 1117 * @msg: notification type (see include/linux/clk.h)
@@ -1129,10 +1225,9 @@ static void clk_reparent(struct clk *clk, struct clk *new_parent)
1129 clk->parent = new_parent; 1225 clk->parent = new_parent;
1130} 1226}
1131 1227
1132static int __clk_set_parent(struct clk *clk, struct clk *parent, u8 p_index) 1228static struct clk *__clk_set_parent_before(struct clk *clk, struct clk *parent)
1133{ 1229{
1134 unsigned long flags; 1230 unsigned long flags;
1135 int ret = 0;
1136 struct clk *old_parent = clk->parent; 1231 struct clk *old_parent = clk->parent;
1137 1232
1138 /* 1233 /*
@@ -1163,6 +1258,34 @@ static int __clk_set_parent(struct clk *clk, struct clk *parent, u8 p_index)
1163 clk_reparent(clk, parent); 1258 clk_reparent(clk, parent);
1164 clk_enable_unlock(flags); 1259 clk_enable_unlock(flags);
1165 1260
1261 return old_parent;
1262}
1263
1264static void __clk_set_parent_after(struct clk *clk, struct clk *parent,
1265 struct clk *old_parent)
1266{
1267 /*
1268 * Finish the migration of prepare state and undo the changes done
1269 * for preventing a race with clk_enable().
1270 */
1271 if (clk->prepare_count) {
1272 clk_disable(clk);
1273 clk_disable(old_parent);
1274 __clk_unprepare(old_parent);
1275 }
1276
1277 /* update debugfs with new clk tree topology */
1278 clk_debug_reparent(clk, parent);
1279}
1280
1281static int __clk_set_parent(struct clk *clk, struct clk *parent, u8 p_index)
1282{
1283 unsigned long flags;
1284 int ret = 0;
1285 struct clk *old_parent;
1286
1287 old_parent = __clk_set_parent_before(clk, parent);
1288
1166 /* change clock input source */ 1289 /* change clock input source */
1167 if (parent && clk->ops->set_parent) 1290 if (parent && clk->ops->set_parent)
1168 ret = clk->ops->set_parent(clk->hw, p_index); 1291 ret = clk->ops->set_parent(clk->hw, p_index);
@@ -1180,18 +1303,8 @@ static int __clk_set_parent(struct clk *clk, struct clk *parent, u8 p_index)
1180 return ret; 1303 return ret;
1181 } 1304 }
1182 1305
1183 /* 1306 __clk_set_parent_after(clk, parent, old_parent);
1184 * Finish the migration of prepare state and undo the changes done
1185 * for preventing a race with clk_enable().
1186 */
1187 if (clk->prepare_count) {
1188 clk_disable(clk);
1189 clk_disable(old_parent);
1190 __clk_unprepare(old_parent);
1191 }
1192 1307
1193 /* update debugfs with new clk tree topology */
1194 clk_debug_reparent(clk, parent);
1195 return 0; 1308 return 0;
1196} 1309}
1197 1310
@@ -1376,17 +1489,32 @@ static void clk_change_rate(struct clk *clk)
1376 struct clk *child; 1489 struct clk *child;
1377 unsigned long old_rate; 1490 unsigned long old_rate;
1378 unsigned long best_parent_rate = 0; 1491 unsigned long best_parent_rate = 0;
1492 bool skip_set_rate = false;
1493 struct clk *old_parent;
1379 1494
1380 old_rate = clk->rate; 1495 old_rate = clk->rate;
1381 1496
1382 /* set parent */ 1497 if (clk->new_parent)
1383 if (clk->new_parent && clk->new_parent != clk->parent) 1498 best_parent_rate = clk->new_parent->rate;
1384 __clk_set_parent(clk, clk->new_parent, clk->new_parent_index); 1499 else if (clk->parent)
1385
1386 if (clk->parent)
1387 best_parent_rate = clk->parent->rate; 1500 best_parent_rate = clk->parent->rate;
1388 1501
1389 if (clk->ops->set_rate) 1502 if (clk->new_parent && clk->new_parent != clk->parent) {
1503 old_parent = __clk_set_parent_before(clk, clk->new_parent);
1504
1505 if (clk->ops->set_rate_and_parent) {
1506 skip_set_rate = true;
1507 clk->ops->set_rate_and_parent(clk->hw, clk->new_rate,
1508 best_parent_rate,
1509 clk->new_parent_index);
1510 } else if (clk->ops->set_parent) {
1511 clk->ops->set_parent(clk->hw, clk->new_parent_index);
1512 }
1513
1514 __clk_set_parent_after(clk, clk->new_parent, old_parent);
1515 }
1516
1517 if (!skip_set_rate && clk->ops->set_rate)
1390 clk->ops->set_rate(clk->hw, clk->new_rate, best_parent_rate); 1518 clk->ops->set_rate(clk->hw, clk->new_rate, best_parent_rate);
1391 1519
1392 if (clk->ops->recalc_rate) 1520 if (clk->ops->recalc_rate)
@@ -1551,6 +1679,7 @@ void __clk_reparent(struct clk *clk, struct clk *new_parent)
1551{ 1679{
1552 clk_reparent(clk, new_parent); 1680 clk_reparent(clk, new_parent);
1553 clk_debug_reparent(clk, new_parent); 1681 clk_debug_reparent(clk, new_parent);
1682 __clk_recalc_accuracies(clk);
1554 __clk_recalc_rates(clk, POST_RATE_CHANGE); 1683 __clk_recalc_rates(clk, POST_RATE_CHANGE);
1555} 1684}
1556 1685
@@ -1621,11 +1750,13 @@ int clk_set_parent(struct clk *clk, struct clk *parent)
1621 /* do the re-parent */ 1750 /* do the re-parent */
1622 ret = __clk_set_parent(clk, parent, p_index); 1751 ret = __clk_set_parent(clk, parent, p_index);
1623 1752
1624 /* propagate rate recalculation accordingly */ 1753 /* propagate rate an accuracy recalculation accordingly */
1625 if (ret) 1754 if (ret) {
1626 __clk_recalc_rates(clk, ABORT_RATE_CHANGE); 1755 __clk_recalc_rates(clk, ABORT_RATE_CHANGE);
1627 else 1756 } else {
1628 __clk_recalc_rates(clk, POST_RATE_CHANGE); 1757 __clk_recalc_rates(clk, POST_RATE_CHANGE);
1758 __clk_recalc_accuracies(clk);
1759 }
1629 1760
1630out: 1761out:
1631 clk_prepare_unlock(); 1762 clk_prepare_unlock();
@@ -1678,6 +1809,14 @@ int __clk_init(struct device *dev, struct clk *clk)
1678 goto out; 1809 goto out;
1679 } 1810 }
1680 1811
1812 if (clk->ops->set_rate_and_parent &&
1813 !(clk->ops->set_parent && clk->ops->set_rate)) {
1814 pr_warn("%s: %s must implement .set_parent & .set_rate\n",
1815 __func__, clk->name);
1816 ret = -EINVAL;
1817 goto out;
1818 }
1819
1681 /* throw a WARN if any entries in parent_names are NULL */ 1820 /* throw a WARN if any entries in parent_names are NULL */
1682 for (i = 0; i < clk->num_parents; i++) 1821 for (i = 0; i < clk->num_parents; i++)
1683 WARN(!clk->parent_names[i], 1822 WARN(!clk->parent_names[i],
@@ -1730,6 +1869,21 @@ int __clk_init(struct device *dev, struct clk *clk)
1730 hlist_add_head(&clk->child_node, &clk_orphan_list); 1869 hlist_add_head(&clk->child_node, &clk_orphan_list);
1731 1870
1732 /* 1871 /*
1872 * Set clk's accuracy. The preferred method is to use
1873 * .recalc_accuracy. For simple clocks and lazy developers the default
1874 * fallback is to use the parent's accuracy. If a clock doesn't have a
1875 * parent (or is orphaned) then accuracy is set to zero (perfect
1876 * clock).
1877 */
1878 if (clk->ops->recalc_accuracy)
1879 clk->accuracy = clk->ops->recalc_accuracy(clk->hw,
1880 __clk_get_accuracy(clk->parent));
1881 else if (clk->parent)
1882 clk->accuracy = clk->parent->accuracy;
1883 else
1884 clk->accuracy = 0;
1885
1886 /*
1733 * Set clk's rate. The preferred method is to use .recalc_rate. For 1887 * Set clk's rate. The preferred method is to use .recalc_rate. For
1734 * simple clocks and lazy developers the default fallback is to use the 1888 * simple clocks and lazy developers the default fallback is to use the
1735 * parent's rate. If a clock doesn't have a parent (or is orphaned) 1889 * parent's rate. If a clock doesn't have a parent (or is orphaned)
@@ -1743,6 +1897,7 @@ int __clk_init(struct device *dev, struct clk *clk)
1743 else 1897 else
1744 clk->rate = 0; 1898 clk->rate = 0;
1745 1899
1900 clk_debug_register(clk);
1746 /* 1901 /*
1747 * walk the list of orphan clocks and reparent any that are children of 1902 * walk the list of orphan clocks and reparent any that are children of
1748 * this clock 1903 * this clock
@@ -1773,8 +1928,7 @@ int __clk_init(struct device *dev, struct clk *clk)
1773 if (clk->ops->init) 1928 if (clk->ops->init)
1774 clk->ops->init(clk->hw); 1929 clk->ops->init(clk->hw);
1775 1930
1776 clk_debug_register(clk); 1931 kref_init(&clk->ref);
1777
1778out: 1932out:
1779 clk_prepare_unlock(); 1933 clk_prepare_unlock();
1780 1934
@@ -1810,6 +1964,10 @@ struct clk *__clk_register(struct device *dev, struct clk_hw *hw)
1810 clk->flags = hw->init->flags; 1964 clk->flags = hw->init->flags;
1811 clk->parent_names = hw->init->parent_names; 1965 clk->parent_names = hw->init->parent_names;
1812 clk->num_parents = hw->init->num_parents; 1966 clk->num_parents = hw->init->num_parents;
1967 if (dev && dev->driver)
1968 clk->owner = dev->driver->owner;
1969 else
1970 clk->owner = NULL;
1813 1971
1814 ret = __clk_init(dev, clk); 1972 ret = __clk_init(dev, clk);
1815 if (ret) 1973 if (ret)
@@ -1830,6 +1988,8 @@ static int _clk_register(struct device *dev, struct clk_hw *hw, struct clk *clk)
1830 goto fail_name; 1988 goto fail_name;
1831 } 1989 }
1832 clk->ops = hw->init->ops; 1990 clk->ops = hw->init->ops;
1991 if (dev && dev->driver)
1992 clk->owner = dev->driver->owner;
1833 clk->hw = hw; 1993 clk->hw = hw;
1834 clk->flags = hw->init->flags; 1994 clk->flags = hw->init->flags;
1835 clk->num_parents = hw->init->num_parents; 1995 clk->num_parents = hw->init->num_parents;
@@ -1904,13 +2064,104 @@ fail_out:
1904} 2064}
1905EXPORT_SYMBOL_GPL(clk_register); 2065EXPORT_SYMBOL_GPL(clk_register);
1906 2066
2067/*
2068 * Free memory allocated for a clock.
2069 * Caller must hold prepare_lock.
2070 */
2071static void __clk_release(struct kref *ref)
2072{
2073 struct clk *clk = container_of(ref, struct clk, ref);
2074 int i = clk->num_parents;
2075
2076 kfree(clk->parents);
2077 while (--i >= 0)
2078 kfree(clk->parent_names[i]);
2079
2080 kfree(clk->parent_names);
2081 kfree(clk->name);
2082 kfree(clk);
2083}
2084
2085/*
2086 * Empty clk_ops for unregistered clocks. These are used temporarily
2087 * after clk_unregister() was called on a clock and until last clock
2088 * consumer calls clk_put() and the struct clk object is freed.
2089 */
2090static int clk_nodrv_prepare_enable(struct clk_hw *hw)
2091{
2092 return -ENXIO;
2093}
2094
2095static void clk_nodrv_disable_unprepare(struct clk_hw *hw)
2096{
2097 WARN_ON_ONCE(1);
2098}
2099
2100static int clk_nodrv_set_rate(struct clk_hw *hw, unsigned long rate,
2101 unsigned long parent_rate)
2102{
2103 return -ENXIO;
2104}
2105
2106static int clk_nodrv_set_parent(struct clk_hw *hw, u8 index)
2107{
2108 return -ENXIO;
2109}
2110
2111static const struct clk_ops clk_nodrv_ops = {
2112 .enable = clk_nodrv_prepare_enable,
2113 .disable = clk_nodrv_disable_unprepare,
2114 .prepare = clk_nodrv_prepare_enable,
2115 .unprepare = clk_nodrv_disable_unprepare,
2116 .set_rate = clk_nodrv_set_rate,
2117 .set_parent = clk_nodrv_set_parent,
2118};
2119
1907/** 2120/**
1908 * clk_unregister - unregister a currently registered clock 2121 * clk_unregister - unregister a currently registered clock
1909 * @clk: clock to unregister 2122 * @clk: clock to unregister
1910 *
1911 * Currently unimplemented.
1912 */ 2123 */
1913void clk_unregister(struct clk *clk) {} 2124void clk_unregister(struct clk *clk)
2125{
2126 unsigned long flags;
2127
2128 if (!clk || WARN_ON_ONCE(IS_ERR(clk)))
2129 return;
2130
2131 clk_prepare_lock();
2132
2133 if (clk->ops == &clk_nodrv_ops) {
2134 pr_err("%s: unregistered clock: %s\n", __func__, clk->name);
2135 goto out;
2136 }
2137 /*
2138 * Assign empty clock ops for consumers that might still hold
2139 * a reference to this clock.
2140 */
2141 flags = clk_enable_lock();
2142 clk->ops = &clk_nodrv_ops;
2143 clk_enable_unlock(flags);
2144
2145 if (!hlist_empty(&clk->children)) {
2146 struct clk *child;
2147
2148 /* Reparent all children to the orphan list. */
2149 hlist_for_each_entry(child, &clk->children, child_node)
2150 clk_set_parent(child, NULL);
2151 }
2152
2153 clk_debug_unregister(clk);
2154
2155 hlist_del_init(&clk->child_node);
2156
2157 if (clk->prepare_count)
2158 pr_warn("%s: unregistering prepared clock: %s\n",
2159 __func__, clk->name);
2160
2161 kref_put(&clk->ref, __clk_release);
2162out:
2163 clk_prepare_unlock();
2164}
1914EXPORT_SYMBOL_GPL(clk_unregister); 2165EXPORT_SYMBOL_GPL(clk_unregister);
1915 2166
1916static void devm_clk_release(struct device *dev, void *res) 2167static void devm_clk_release(struct device *dev, void *res)
@@ -1970,6 +2221,32 @@ void devm_clk_unregister(struct device *dev, struct clk *clk)
1970} 2221}
1971EXPORT_SYMBOL_GPL(devm_clk_unregister); 2222EXPORT_SYMBOL_GPL(devm_clk_unregister);
1972 2223
2224/*
2225 * clkdev helpers
2226 */
2227int __clk_get(struct clk *clk)
2228{
2229 if (clk) {
2230 if (!try_module_get(clk->owner))
2231 return 0;
2232
2233 kref_get(&clk->ref);
2234 }
2235 return 1;
2236}
2237
2238void __clk_put(struct clk *clk)
2239{
2240 if (!clk || WARN_ON_ONCE(IS_ERR(clk)))
2241 return;
2242
2243 clk_prepare_lock();
2244 kref_put(&clk->ref, __clk_release);
2245 clk_prepare_unlock();
2246
2247 module_put(clk->owner);
2248}
2249
1973/*** clk rate change notifiers ***/ 2250/*** clk rate change notifiers ***/
1974 2251
1975/** 2252/**
@@ -2104,13 +2381,22 @@ struct of_clk_provider {
2104 void *data; 2381 void *data;
2105}; 2382};
2106 2383
2107extern struct of_device_id __clk_of_table[];
2108
2109static const struct of_device_id __clk_of_table_sentinel 2384static const struct of_device_id __clk_of_table_sentinel
2110 __used __section(__clk_of_table_end); 2385 __used __section(__clk_of_table_end);
2111 2386
2112static LIST_HEAD(of_clk_providers); 2387static LIST_HEAD(of_clk_providers);
2113static DEFINE_MUTEX(of_clk_lock); 2388static DEFINE_MUTEX(of_clk_mutex);
2389
2390/* of_clk_provider list locking helpers */
2391void of_clk_lock(void)
2392{
2393 mutex_lock(&of_clk_mutex);
2394}
2395
2396void of_clk_unlock(void)
2397{
2398 mutex_unlock(&of_clk_mutex);
2399}
2114 2400
2115struct clk *of_clk_src_simple_get(struct of_phandle_args *clkspec, 2401struct clk *of_clk_src_simple_get(struct of_phandle_args *clkspec,
2116 void *data) 2402 void *data)
@@ -2154,9 +2440,9 @@ int of_clk_add_provider(struct device_node *np,
2154 cp->data = data; 2440 cp->data = data;
2155 cp->get = clk_src_get; 2441 cp->get = clk_src_get;
2156 2442
2157 mutex_lock(&of_clk_lock); 2443 mutex_lock(&of_clk_mutex);
2158 list_add(&cp->link, &of_clk_providers); 2444 list_add(&cp->link, &of_clk_providers);
2159 mutex_unlock(&of_clk_lock); 2445 mutex_unlock(&of_clk_mutex);
2160 pr_debug("Added clock from %s\n", np->full_name); 2446 pr_debug("Added clock from %s\n", np->full_name);
2161 2447
2162 return 0; 2448 return 0;
@@ -2171,7 +2457,7 @@ void of_clk_del_provider(struct device_node *np)
2171{ 2457{
2172 struct of_clk_provider *cp; 2458 struct of_clk_provider *cp;
2173 2459
2174 mutex_lock(&of_clk_lock); 2460 mutex_lock(&of_clk_mutex);
2175 list_for_each_entry(cp, &of_clk_providers, link) { 2461 list_for_each_entry(cp, &of_clk_providers, link) {
2176 if (cp->node == np) { 2462 if (cp->node == np) {
2177 list_del(&cp->link); 2463 list_del(&cp->link);
@@ -2180,24 +2466,33 @@ void of_clk_del_provider(struct device_node *np)
2180 break; 2466 break;
2181 } 2467 }
2182 } 2468 }
2183 mutex_unlock(&of_clk_lock); 2469 mutex_unlock(&of_clk_mutex);
2184} 2470}
2185EXPORT_SYMBOL_GPL(of_clk_del_provider); 2471EXPORT_SYMBOL_GPL(of_clk_del_provider);
2186 2472
2187struct clk *of_clk_get_from_provider(struct of_phandle_args *clkspec) 2473struct clk *__of_clk_get_from_provider(struct of_phandle_args *clkspec)
2188{ 2474{
2189 struct of_clk_provider *provider; 2475 struct of_clk_provider *provider;
2190 struct clk *clk = ERR_PTR(-ENOENT); 2476 struct clk *clk = ERR_PTR(-ENOENT);
2191 2477
2192 /* Check if we have such a provider in our array */ 2478 /* Check if we have such a provider in our array */
2193 mutex_lock(&of_clk_lock);
2194 list_for_each_entry(provider, &of_clk_providers, link) { 2479 list_for_each_entry(provider, &of_clk_providers, link) {
2195 if (provider->node == clkspec->np) 2480 if (provider->node == clkspec->np)
2196 clk = provider->get(clkspec, provider->data); 2481 clk = provider->get(clkspec, provider->data);
2197 if (!IS_ERR(clk)) 2482 if (!IS_ERR(clk))
2198 break; 2483 break;
2199 } 2484 }
2200 mutex_unlock(&of_clk_lock); 2485
2486 return clk;
2487}
2488
2489struct clk *of_clk_get_from_provider(struct of_phandle_args *clkspec)
2490{
2491 struct clk *clk;
2492
2493 mutex_lock(&of_clk_mutex);
2494 clk = __of_clk_get_from_provider(clkspec);
2495 mutex_unlock(&of_clk_mutex);
2201 2496
2202 return clk; 2497 return clk;
2203} 2498}
@@ -2245,7 +2540,7 @@ void __init of_clk_init(const struct of_device_id *matches)
2245 struct device_node *np; 2540 struct device_node *np;
2246 2541
2247 if (!matches) 2542 if (!matches)
2248 matches = __clk_of_table; 2543 matches = &__clk_of_table;
2249 2544
2250 for_each_matching_node_and_match(np, matches, &match) { 2545 for_each_matching_node_and_match(np, matches, &match) {
2251 of_clk_init_cb_t clk_init_cb = match->data; 2546 of_clk_init_cb_t clk_init_cb = match->data;