diff options
author | Jerome Brunet <jbrunet@baylibre.com> | 2017-12-01 16:51:59 -0500 |
---|---|---|
committer | Michael Turquette <mturquette@baylibre.com> | 2017-12-19 19:35:34 -0500 |
commit | 55e9b8b7b806ec3f9a8817e13596682a5981c19c (patch) | |
tree | 9739ff95800ee2449e34a920ab174a0644edd189 | |
parent | c5ce26edb4fec178232b9cb37f334ec574931514 (diff) |
clk: add clk_rate_exclusive api
Using clock rate protection, we can now provide a way for clock consumer
to claim exclusive control over the rate of a producer
So far, rate change operations have been a "last write wins" affair. This
changes allows drivers to explicitly protect against this behavior, if
required.
Of course, if exclusivity over a producer is claimed more than once, the
rate is effectively locked as exclusivity cannot be preempted
Tested-by: Maxime Ripard <maxime.ripard@free-electrons.com>
Acked-by: Michael Turquette <mturquette@baylibre.com>
Signed-off-by: Jerome Brunet <jbrunet@baylibre.com>
Signed-off-by: Michael Turquette <mturquette@baylibre.com>
Link: lkml.kernel.org/r/20171201215200.23523-10-jbrunet@baylibre.com
-rw-r--r-- | drivers/clk/clk.c | 172 | ||||
-rw-r--r-- | include/linux/clk.h | 62 |
2 files changed, 234 insertions, 0 deletions
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index f6fe5e5595ca..8e728f395b54 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c | |||
@@ -87,6 +87,7 @@ struct clk { | |||
87 | const char *con_id; | 87 | const char *con_id; |
88 | unsigned long min_rate; | 88 | unsigned long min_rate; |
89 | unsigned long max_rate; | 89 | unsigned long max_rate; |
90 | unsigned int exclusive_count; | ||
90 | struct hlist_node clks_node; | 91 | struct hlist_node clks_node; |
91 | }; | 92 | }; |
92 | 93 | ||
@@ -565,6 +566,45 @@ static int clk_core_rate_nuke_protect(struct clk_core *core) | |||
565 | return ret; | 566 | return ret; |
566 | } | 567 | } |
567 | 568 | ||
569 | /** | ||
570 | * clk_rate_exclusive_put - release exclusivity over clock rate control | ||
571 | * @clk: the clk over which the exclusivity is released | ||
572 | * | ||
573 | * clk_rate_exclusive_put() completes a critical section during which a clock | ||
574 | * consumer cannot tolerate any other consumer making any operation on the | ||
575 | * clock which could result in a rate change or rate glitch. Exclusive clocks | ||
576 | * cannot have their rate changed, either directly or indirectly due to changes | ||
577 | * further up the parent chain of clocks. As a result, clocks up parent chain | ||
578 | * also get under exclusive control of the calling consumer. | ||
579 | * | ||
580 | * If exlusivity is claimed more than once on clock, even by the same consumer, | ||
581 | * the rate effectively gets locked as exclusivity can't be preempted. | ||
582 | * | ||
583 | * Calls to clk_rate_exclusive_put() must be balanced with calls to | ||
584 | * clk_rate_exclusive_get(). Calls to this function may sleep, and do not return | ||
585 | * error status. | ||
586 | */ | ||
587 | void clk_rate_exclusive_put(struct clk *clk) | ||
588 | { | ||
589 | if (!clk) | ||
590 | return; | ||
591 | |||
592 | clk_prepare_lock(); | ||
593 | |||
594 | /* | ||
595 | * if there is something wrong with this consumer protect count, stop | ||
596 | * here before messing with the provider | ||
597 | */ | ||
598 | if (WARN_ON(clk->exclusive_count <= 0)) | ||
599 | goto out; | ||
600 | |||
601 | clk_core_rate_unprotect(clk->core); | ||
602 | clk->exclusive_count--; | ||
603 | out: | ||
604 | clk_prepare_unlock(); | ||
605 | } | ||
606 | EXPORT_SYMBOL_GPL(clk_rate_exclusive_put); | ||
607 | |||
568 | static void clk_core_rate_protect(struct clk_core *core) | 608 | static void clk_core_rate_protect(struct clk_core *core) |
569 | { | 609 | { |
570 | lockdep_assert_held(&prepare_lock); | 610 | lockdep_assert_held(&prepare_lock); |
@@ -592,6 +632,38 @@ static void clk_core_rate_restore_protect(struct clk_core *core, int count) | |||
592 | core->protect_count = count; | 632 | core->protect_count = count; |
593 | } | 633 | } |
594 | 634 | ||
635 | /** | ||
636 | * clk_rate_exclusive_get - get exclusivity over the clk rate control | ||
637 | * @clk: the clk over which the exclusity of rate control is requested | ||
638 | * | ||
639 | * clk_rate_exlusive_get() begins a critical section during which a clock | ||
640 | * consumer cannot tolerate any other consumer making any operation on the | ||
641 | * clock which could result in a rate change or rate glitch. Exclusive clocks | ||
642 | * cannot have their rate changed, either directly or indirectly due to changes | ||
643 | * further up the parent chain of clocks. As a result, clocks up parent chain | ||
644 | * also get under exclusive control of the calling consumer. | ||
645 | * | ||
646 | * If exlusivity is claimed more than once on clock, even by the same consumer, | ||
647 | * the rate effectively gets locked as exclusivity can't be preempted. | ||
648 | * | ||
649 | * Calls to clk_rate_exclusive_get() should be balanced with calls to | ||
650 | * clk_rate_exclusive_put(). Calls to this function may sleep. | ||
651 | * Returns 0 on success, -EERROR otherwise | ||
652 | */ | ||
653 | int clk_rate_exclusive_get(struct clk *clk) | ||
654 | { | ||
655 | if (!clk) | ||
656 | return 0; | ||
657 | |||
658 | clk_prepare_lock(); | ||
659 | clk_core_rate_protect(clk->core); | ||
660 | clk->exclusive_count++; | ||
661 | clk_prepare_unlock(); | ||
662 | |||
663 | return 0; | ||
664 | } | ||
665 | EXPORT_SYMBOL_GPL(clk_rate_exclusive_get); | ||
666 | |||
595 | static void clk_core_unprepare(struct clk_core *core) | 667 | static void clk_core_unprepare(struct clk_core *core) |
596 | { | 668 | { |
597 | lockdep_assert_held(&prepare_lock); | 669 | lockdep_assert_held(&prepare_lock); |
@@ -988,6 +1060,12 @@ static int clk_core_determine_round_nolock(struct clk_core *core, | |||
988 | if (!core) | 1060 | if (!core) |
989 | return 0; | 1061 | return 0; |
990 | 1062 | ||
1063 | /* | ||
1064 | * At this point, core protection will be disabled if | ||
1065 | * - if the provider is not protected at all | ||
1066 | * - if the calling consumer is the only one which has exclusivity | ||
1067 | * over the provider | ||
1068 | */ | ||
991 | if (clk_core_rate_is_protected(core)) { | 1069 | if (clk_core_rate_is_protected(core)) { |
992 | req->rate = core->rate; | 1070 | req->rate = core->rate; |
993 | } else if (core->ops->determine_rate) { | 1071 | } else if (core->ops->determine_rate) { |
@@ -1104,10 +1182,17 @@ long clk_round_rate(struct clk *clk, unsigned long rate) | |||
1104 | 1182 | ||
1105 | clk_prepare_lock(); | 1183 | clk_prepare_lock(); |
1106 | 1184 | ||
1185 | if (clk->exclusive_count) | ||
1186 | clk_core_rate_unprotect(clk->core); | ||
1187 | |||
1107 | clk_core_get_boundaries(clk->core, &req.min_rate, &req.max_rate); | 1188 | clk_core_get_boundaries(clk->core, &req.min_rate, &req.max_rate); |
1108 | req.rate = rate; | 1189 | req.rate = rate; |
1109 | 1190 | ||
1110 | ret = clk_core_round_rate_nolock(clk->core, &req); | 1191 | ret = clk_core_round_rate_nolock(clk->core, &req); |
1192 | |||
1193 | if (clk->exclusive_count) | ||
1194 | clk_core_rate_protect(clk->core); | ||
1195 | |||
1111 | clk_prepare_unlock(); | 1196 | clk_prepare_unlock(); |
1112 | 1197 | ||
1113 | if (ret) | 1198 | if (ret) |
@@ -1843,8 +1928,14 @@ int clk_set_rate(struct clk *clk, unsigned long rate) | |||
1843 | /* prevent racing with updates to the clock topology */ | 1928 | /* prevent racing with updates to the clock topology */ |
1844 | clk_prepare_lock(); | 1929 | clk_prepare_lock(); |
1845 | 1930 | ||
1931 | if (clk->exclusive_count) | ||
1932 | clk_core_rate_unprotect(clk->core); | ||
1933 | |||
1846 | ret = clk_core_set_rate_nolock(clk->core, rate); | 1934 | ret = clk_core_set_rate_nolock(clk->core, rate); |
1847 | 1935 | ||
1936 | if (clk->exclusive_count) | ||
1937 | clk_core_rate_protect(clk->core); | ||
1938 | |||
1848 | clk_prepare_unlock(); | 1939 | clk_prepare_unlock(); |
1849 | 1940 | ||
1850 | return ret; | 1941 | return ret; |
@@ -1852,6 +1943,53 @@ int clk_set_rate(struct clk *clk, unsigned long rate) | |||
1852 | EXPORT_SYMBOL_GPL(clk_set_rate); | 1943 | EXPORT_SYMBOL_GPL(clk_set_rate); |
1853 | 1944 | ||
1854 | /** | 1945 | /** |
1946 | * clk_set_rate_exclusive - specify a new rate get exclusive control | ||
1947 | * @clk: the clk whose rate is being changed | ||
1948 | * @rate: the new rate for clk | ||
1949 | * | ||
1950 | * This is a combination of clk_set_rate() and clk_rate_exclusive_get() | ||
1951 | * within a critical section | ||
1952 | * | ||
1953 | * This can be used initially to ensure that at least 1 consumer is | ||
1954 | * statisfied when several consumers are competing for exclusivity over the | ||
1955 | * same clock provider. | ||
1956 | * | ||
1957 | * The exclusivity is not applied if setting the rate failed. | ||
1958 | * | ||
1959 | * Calls to clk_rate_exclusive_get() should be balanced with calls to | ||
1960 | * clk_rate_exclusive_put(). | ||
1961 | * | ||
1962 | * Returns 0 on success, -EERROR otherwise. | ||
1963 | */ | ||
1964 | int clk_set_rate_exclusive(struct clk *clk, unsigned long rate) | ||
1965 | { | ||
1966 | int ret; | ||
1967 | |||
1968 | if (!clk) | ||
1969 | return 0; | ||
1970 | |||
1971 | /* prevent racing with updates to the clock topology */ | ||
1972 | clk_prepare_lock(); | ||
1973 | |||
1974 | /* | ||
1975 | * The temporary protection removal is not here, on purpose | ||
1976 | * This function is meant to be used instead of clk_rate_protect, | ||
1977 | * so before the consumer code path protect the clock provider | ||
1978 | */ | ||
1979 | |||
1980 | ret = clk_core_set_rate_nolock(clk->core, rate); | ||
1981 | if (!ret) { | ||
1982 | clk_core_rate_protect(clk->core); | ||
1983 | clk->exclusive_count++; | ||
1984 | } | ||
1985 | |||
1986 | clk_prepare_unlock(); | ||
1987 | |||
1988 | return ret; | ||
1989 | } | ||
1990 | EXPORT_SYMBOL_GPL(clk_set_rate_exclusive); | ||
1991 | |||
1992 | /** | ||
1855 | * clk_set_rate_range - set a rate range for a clock source | 1993 | * clk_set_rate_range - set a rate range for a clock source |
1856 | * @clk: clock source | 1994 | * @clk: clock source |
1857 | * @min: desired minimum clock rate in Hz, inclusive | 1995 | * @min: desired minimum clock rate in Hz, inclusive |
@@ -1875,12 +2013,18 @@ int clk_set_rate_range(struct clk *clk, unsigned long min, unsigned long max) | |||
1875 | 2013 | ||
1876 | clk_prepare_lock(); | 2014 | clk_prepare_lock(); |
1877 | 2015 | ||
2016 | if (clk->exclusive_count) | ||
2017 | clk_core_rate_unprotect(clk->core); | ||
2018 | |||
1878 | if (min != clk->min_rate || max != clk->max_rate) { | 2019 | if (min != clk->min_rate || max != clk->max_rate) { |
1879 | clk->min_rate = min; | 2020 | clk->min_rate = min; |
1880 | clk->max_rate = max; | 2021 | clk->max_rate = max; |
1881 | ret = clk_core_set_rate_nolock(clk->core, clk->core->req_rate); | 2022 | ret = clk_core_set_rate_nolock(clk->core, clk->core->req_rate); |
1882 | } | 2023 | } |
1883 | 2024 | ||
2025 | if (clk->exclusive_count) | ||
2026 | clk_core_rate_protect(clk->core); | ||
2027 | |||
1884 | clk_prepare_unlock(); | 2028 | clk_prepare_unlock(); |
1885 | 2029 | ||
1886 | return ret; | 2030 | return ret; |
@@ -2091,8 +2235,16 @@ int clk_set_parent(struct clk *clk, struct clk *parent) | |||
2091 | return 0; | 2235 | return 0; |
2092 | 2236 | ||
2093 | clk_prepare_lock(); | 2237 | clk_prepare_lock(); |
2238 | |||
2239 | if (clk->exclusive_count) | ||
2240 | clk_core_rate_unprotect(clk->core); | ||
2241 | |||
2094 | ret = clk_core_set_parent_nolock(clk->core, | 2242 | ret = clk_core_set_parent_nolock(clk->core, |
2095 | parent ? parent->core : NULL); | 2243 | parent ? parent->core : NULL); |
2244 | |||
2245 | if (clk->exclusive_count) | ||
2246 | clk_core_rate_protect(clk->core); | ||
2247 | |||
2096 | clk_prepare_unlock(); | 2248 | clk_prepare_unlock(); |
2097 | 2249 | ||
2098 | return ret; | 2250 | return ret; |
@@ -2154,7 +2306,15 @@ int clk_set_phase(struct clk *clk, int degrees) | |||
2154 | degrees += 360; | 2306 | degrees += 360; |
2155 | 2307 | ||
2156 | clk_prepare_lock(); | 2308 | clk_prepare_lock(); |
2309 | |||
2310 | if (clk->exclusive_count) | ||
2311 | clk_core_rate_unprotect(clk->core); | ||
2312 | |||
2157 | ret = clk_core_set_phase_nolock(clk->core, degrees); | 2313 | ret = clk_core_set_phase_nolock(clk->core, degrees); |
2314 | |||
2315 | if (clk->exclusive_count) | ||
2316 | clk_core_rate_protect(clk->core); | ||
2317 | |||
2158 | clk_prepare_unlock(); | 2318 | clk_prepare_unlock(); |
2159 | 2319 | ||
2160 | return ret; | 2320 | return ret; |
@@ -3175,6 +3335,18 @@ void __clk_put(struct clk *clk) | |||
3175 | 3335 | ||
3176 | clk_prepare_lock(); | 3336 | clk_prepare_lock(); |
3177 | 3337 | ||
3338 | /* | ||
3339 | * Before calling clk_put, all calls to clk_rate_exclusive_get() from a | ||
3340 | * given user should be balanced with calls to clk_rate_exclusive_put() | ||
3341 | * and by that same consumer | ||
3342 | */ | ||
3343 | if (WARN_ON(clk->exclusive_count)) { | ||
3344 | /* We voiced our concern, let's sanitize the situation */ | ||
3345 | clk->core->protect_count -= (clk->exclusive_count - 1); | ||
3346 | clk_core_rate_unprotect(clk->core); | ||
3347 | clk->exclusive_count = 0; | ||
3348 | } | ||
3349 | |||
3178 | hlist_del(&clk->clks_node); | 3350 | hlist_del(&clk->clks_node); |
3179 | if (clk->min_rate > clk->core->req_rate || | 3351 | if (clk->min_rate > clk->core->req_rate || |
3180 | clk->max_rate < clk->core->req_rate) | 3352 | clk->max_rate < clk->core->req_rate) |
diff --git a/include/linux/clk.h b/include/linux/clk.h index 12c96d94d1fa..4c4ef9f34db3 100644 --- a/include/linux/clk.h +++ b/include/linux/clk.h | |||
@@ -331,6 +331,38 @@ struct clk *devm_clk_get(struct device *dev, const char *id); | |||
331 | */ | 331 | */ |
332 | struct clk *devm_get_clk_from_child(struct device *dev, | 332 | struct clk *devm_get_clk_from_child(struct device *dev, |
333 | struct device_node *np, const char *con_id); | 333 | struct device_node *np, const char *con_id); |
334 | /** | ||
335 | * clk_rate_exclusive_get - get exclusivity over the rate control of a | ||
336 | * producer | ||
337 | * @clk: clock source | ||
338 | * | ||
339 | * This function allows drivers to get exclusive control over the rate of a | ||
340 | * provider. It prevents any other consumer to execute, even indirectly, | ||
341 | * opereation which could alter the rate of the provider or cause glitches | ||
342 | * | ||
343 | * If exlusivity is claimed more than once on clock, even by the same driver, | ||
344 | * the rate effectively gets locked as exclusivity can't be preempted. | ||
345 | * | ||
346 | * Must not be called from within atomic context. | ||
347 | * | ||
348 | * Returns success (0) or negative errno. | ||
349 | */ | ||
350 | int clk_rate_exclusive_get(struct clk *clk); | ||
351 | |||
352 | /** | ||
353 | * clk_rate_exclusive_put - release exclusivity over the rate control of a | ||
354 | * producer | ||
355 | * @clk: clock source | ||
356 | * | ||
357 | * This function allows drivers to release the exclusivity it previously got | ||
358 | * from clk_rate_exclusive_get() | ||
359 | * | ||
360 | * The caller must balance the number of clk_rate_exclusive_get() and | ||
361 | * clk_rate_exclusive_put() calls. | ||
362 | * | ||
363 | * Must not be called from within atomic context. | ||
364 | */ | ||
365 | void clk_rate_exclusive_put(struct clk *clk); | ||
334 | 366 | ||
335 | /** | 367 | /** |
336 | * clk_enable - inform the system when the clock source should be running. | 368 | * clk_enable - inform the system when the clock source should be running. |
@@ -473,6 +505,23 @@ long clk_round_rate(struct clk *clk, unsigned long rate); | |||
473 | int clk_set_rate(struct clk *clk, unsigned long rate); | 505 | int clk_set_rate(struct clk *clk, unsigned long rate); |
474 | 506 | ||
475 | /** | 507 | /** |
508 | * clk_set_rate_exclusive- set the clock rate and claim exclusivity over | ||
509 | * clock source | ||
510 | * @clk: clock source | ||
511 | * @rate: desired clock rate in Hz | ||
512 | * | ||
513 | * This helper function allows drivers to atomically set the rate of a producer | ||
514 | * and claim exclusivity over the rate control of the producer. | ||
515 | * | ||
516 | * It is essentially a combination of clk_set_rate() and | ||
517 | * clk_rate_exclusite_get(). Caller must balance this call with a call to | ||
518 | * clk_rate_exclusive_put() | ||
519 | * | ||
520 | * Returns success (0) or negative errno. | ||
521 | */ | ||
522 | int clk_set_rate_exclusive(struct clk *clk, unsigned long rate); | ||
523 | |||
524 | /** | ||
476 | * clk_has_parent - check if a clock is a possible parent for another | 525 | * clk_has_parent - check if a clock is a possible parent for another |
477 | * @clk: clock source | 526 | * @clk: clock source |
478 | * @parent: parent clock source | 527 | * @parent: parent clock source |
@@ -583,6 +632,14 @@ static inline void clk_bulk_put(int num_clks, struct clk_bulk_data *clks) {} | |||
583 | 632 | ||
584 | static inline void devm_clk_put(struct device *dev, struct clk *clk) {} | 633 | static inline void devm_clk_put(struct device *dev, struct clk *clk) {} |
585 | 634 | ||
635 | |||
636 | static inline int clk_rate_exclusive_get(struct clk *clk) | ||
637 | { | ||
638 | return 0; | ||
639 | } | ||
640 | |||
641 | static inline void clk_rate_exclusive_put(struct clk *clk) {} | ||
642 | |||
586 | static inline int clk_enable(struct clk *clk) | 643 | static inline int clk_enable(struct clk *clk) |
587 | { | 644 | { |
588 | return 0; | 645 | return 0; |
@@ -609,6 +666,11 @@ static inline int clk_set_rate(struct clk *clk, unsigned long rate) | |||
609 | return 0; | 666 | return 0; |
610 | } | 667 | } |
611 | 668 | ||
669 | static inline int clk_set_rate_exclusive(struct clk *clk, unsigned long rate) | ||
670 | { | ||
671 | return 0; | ||
672 | } | ||
673 | |||
612 | static inline long clk_round_rate(struct clk *clk, unsigned long rate) | 674 | static inline long clk_round_rate(struct clk *clk, unsigned long rate) |
613 | { | 675 | { |
614 | return 0; | 676 | return 0; |