aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-03-28 15:38:06 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-03-28 15:38:06 -0400
commit735e941caa9a35f933297af0ab1e0ad6447411c4 (patch)
treead05f19a0e748061d743f9f4d99cde9923b192c2
parent09893ee84591b0417a9186a7e7cf1503ccf99ac2 (diff)
parent8fb61e33507e5d76b69467b4f96290338e96b733 (diff)
Merge tag 'common-clk-api' of git://git.kernel.org/pub/scm/linux/kernel/git/arm/arm-soc
Pull "drivers/clk: common clock framework" from Olof Johansson: "This branch contains patches from Mike Turquette adding a common clock framework to be shared across platforms. This is part of the work towards building a common zImage for several ARM platforms." * tag 'common-clk-api' of git://git.kernel.org/pub/scm/linux/kernel/git/arm/arm-soc: clk: make CONFIG_COMMON_CLK invisible clk: basic clock hardware types clk: introduce the common clock framework Documentation: common clk API
-rw-r--r--Documentation/clk.txt233
-rw-r--r--drivers/clk/Kconfig37
-rw-r--r--drivers/clk/Makefile2
-rw-r--r--drivers/clk/clk-divider.c200
-rw-r--r--drivers/clk/clk-fixed-rate.c82
-rw-r--r--drivers/clk/clk-gate.c150
-rw-r--r--drivers/clk/clk-mux.c116
-rw-r--r--drivers/clk/clk.c1461
-rw-r--r--include/linux/clk-private.h196
-rw-r--r--include/linux/clk-provider.h300
-rw-r--r--include/linux/clk.h68
11 files changed, 2840 insertions, 5 deletions
diff --git a/Documentation/clk.txt b/Documentation/clk.txt
new file mode 100644
index 000000000000..1943fae014fd
--- /dev/null
+++ b/Documentation/clk.txt
@@ -0,0 +1,233 @@
1 The Common Clk Framework
2 Mike Turquette <mturquette@ti.com>
3
4This document endeavours to explain the common clk framework details,
5and how to port a platform over to this framework. It is not yet a
6detailed explanation of the clock api in include/linux/clk.h, but
7perhaps someday it will include that information.
8
9 Part 1 - introduction and interface split
10
11The common clk framework is an interface to control the clock nodes
12available on various devices today. This may come in the form of clock
13gating, rate adjustment, muxing or other operations. This framework is
14enabled with the CONFIG_COMMON_CLK option.
15
16The interface itself is divided into two halves, each shielded from the
17details of its counterpart. First is the common definition of struct
18clk which unifies the framework-level accounting and infrastructure that
19has traditionally been duplicated across a variety of platforms. Second
20is a common implementation of the clk.h api, defined in
21drivers/clk/clk.c. Finally there is struct clk_ops, whose operations
22are invoked by the clk api implementation.
23
24The second half of the interface is comprised of the hardware-specific
25callbacks registered with struct clk_ops and the corresponding
26hardware-specific structures needed to model a particular clock. For
27the remainder of this document any reference to a callback in struct
28clk_ops, such as .enable or .set_rate, implies the hardware-specific
29implementation of that code. Likewise, references to struct clk_foo
30serve as a convenient shorthand for the implementation of the
31hardware-specific bits for the hypothetical "foo" hardware.
32
33Tying the two halves of this interface together is struct clk_hw, which
34is defined in struct clk_foo and pointed to within struct clk. This
35allows easy for navigation between the two discrete halves of the common
36clock interface.
37
38 Part 2 - common data structures and api
39
40Below is the common struct clk definition from
41include/linux/clk-private.h, modified for brevity:
42
43 struct clk {
44 const char *name;
45 const struct clk_ops *ops;
46 struct clk_hw *hw;
47 char **parent_names;
48 struct clk **parents;
49 struct clk *parent;
50 struct hlist_head children;
51 struct hlist_node child_node;
52 ...
53 };
54
55The members above make up the core of the clk tree topology. The clk
56api itself defines several driver-facing functions which operate on
57struct clk. That api is documented in include/linux/clk.h.
58
59Platforms and devices utilizing the common struct clk use the struct
60clk_ops pointer in struct clk to perform the hardware-specific parts of
61the operations defined in clk.h:
62
63 struct clk_ops {
64 int (*prepare)(struct clk_hw *hw);
65 void (*unprepare)(struct clk_hw *hw);
66 int (*enable)(struct clk_hw *hw);
67 void (*disable)(struct clk_hw *hw);
68 int (*is_enabled)(struct clk_hw *hw);
69 unsigned long (*recalc_rate)(struct clk_hw *hw,
70 unsigned long parent_rate);
71 long (*round_rate)(struct clk_hw *hw, unsigned long,
72 unsigned long *);
73 int (*set_parent)(struct clk_hw *hw, u8 index);
74 u8 (*get_parent)(struct clk_hw *hw);
75 int (*set_rate)(struct clk_hw *hw, unsigned long);
76 void (*init)(struct clk_hw *hw);
77 };
78
79 Part 3 - hardware clk implementations
80
81The strength of the common struct clk comes from its .ops and .hw pointers
82which abstract the details of struct clk from the hardware-specific bits, and
83vice versa. To illustrate consider the simple gateable clk implementation in
84drivers/clk/clk-gate.c:
85
86struct clk_gate {
87 struct clk_hw hw;
88 void __iomem *reg;
89 u8 bit_idx;
90 ...
91};
92
93struct clk_gate contains struct clk_hw hw as well as hardware-specific
94knowledge about which register and bit controls this clk's gating.
95Nothing about clock topology or accounting, such as enable_count or
96notifier_count, is needed here. That is all handled by the common
97framework code and struct clk.
98
99Let's walk through enabling this clk from driver code:
100
101 struct clk *clk;
102 clk = clk_get(NULL, "my_gateable_clk");
103
104 clk_prepare(clk);
105 clk_enable(clk);
106
107The call graph for clk_enable is very simple:
108
109clk_enable(clk);
110 clk->ops->enable(clk->hw);
111 [resolves to...]
112 clk_gate_enable(hw);
113 [resolves struct clk gate with to_clk_gate(hw)]
114 clk_gate_set_bit(gate);
115
116And the definition of clk_gate_set_bit:
117
118static void clk_gate_set_bit(struct clk_gate *gate)
119{
120 u32 reg;
121
122 reg = __raw_readl(gate->reg);
123 reg |= BIT(gate->bit_idx);
124 writel(reg, gate->reg);
125}
126
127Note that to_clk_gate is defined as:
128
129#define to_clk_gate(_hw) container_of(_hw, struct clk_gate, clk)
130
131This pattern of abstraction is used for every clock hardware
132representation.
133
134 Part 4 - supporting your own clk hardware
135
136When implementing support for a new type of clock it only necessary to
137include the following header:
138
139#include <linux/clk-provider.h>
140
141include/linux/clk.h is included within that header and clk-private.h
142must never be included from the code which implements the operations for
143a clock. More on that below in Part 5.
144
145To construct a clk hardware structure for your platform you must define
146the following:
147
148struct clk_foo {
149 struct clk_hw hw;
150 ... hardware specific data goes here ...
151};
152
153To take advantage of your data you'll need to support valid operations
154for your clk:
155
156struct clk_ops clk_foo_ops {
157 .enable = &clk_foo_enable;
158 .disable = &clk_foo_disable;
159};
160
161Implement the above functions using container_of:
162
163#define to_clk_foo(_hw) container_of(_hw, struct clk_foo, hw)
164
165int clk_foo_enable(struct clk_hw *hw)
166{
167 struct clk_foo *foo;
168
169 foo = to_clk_foo(hw);
170
171 ... perform magic on foo ...
172
173 return 0;
174};
175
176Below is a matrix detailing which clk_ops are mandatory based upon the
177hardware capbilities of that clock. A cell marked as "y" means
178mandatory, a cell marked as "n" implies that either including that
179callback is invalid or otherwise uneccesary. Empty cells are either
180optional or must be evaluated on a case-by-case basis.
181
182 clock hardware characteristics
183 -----------------------------------------------------------
184 | gate | change rate | single parent | multiplexer | root |
185 |------|-------------|---------------|-------------|------|
186.prepare | | | | | |
187.unprepare | | | | | |
188 | | | | | |
189.enable | y | | | | |
190.disable | y | | | | |
191.is_enabled | y | | | | |
192 | | | | | |
193.recalc_rate | | y | | | |
194.round_rate | | y | | | |
195.set_rate | | y | | | |
196 | | | | | |
197.set_parent | | | n | y | n |
198.get_parent | | | n | y | n |
199 | | | | | |
200.init | | | | | |
201 -----------------------------------------------------------
202
203Finally, register your clock at run-time with a hardware-specific
204registration function. This function simply populates struct clk_foo's
205data and then passes the common struct clk parameters to the framework
206with a call to:
207
208clk_register(...)
209
210See the basic clock types in drivers/clk/clk-*.c for examples.
211
212 Part 5 - static initialization of clock data
213
214For platforms with many clocks (often numbering into the hundreds) it
215may be desirable to statically initialize some clock data. This
216presents a problem since the definition of struct clk should be hidden
217from everyone except for the clock core in drivers/clk/clk.c.
218
219To get around this problem struct clk's definition is exposed in
220include/linux/clk-private.h along with some macros for more easily
221initializing instances of the basic clock types. These clocks must
222still be initialized with the common clock framework via a call to
223__clk_init.
224
225clk-private.h must NEVER be included by code which implements struct
226clk_ops callbacks, nor must it be included by any logic which pokes
227around inside of struct clk at run-time. To do so is a layering
228violation.
229
230To better enforce this policy, always follow this simple rule: any
231statically initialized clock data MUST be defined in a separate file
232from the logic that implements its ops. Basically separate the logic
233from the data and all is well.
diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig
index 9b3cd08cd0ed..165e1febae53 100644
--- a/drivers/clk/Kconfig
+++ b/drivers/clk/Kconfig
@@ -8,3 +8,40 @@ config HAVE_CLK_PREPARE
8 8
9config HAVE_MACH_CLKDEV 9config HAVE_MACH_CLKDEV
10 bool 10 bool
11
12config COMMON_CLK
13 bool
14 select HAVE_CLK_PREPARE
15 ---help---
16 The common clock framework is a single definition of struct
17 clk, useful across many platforms, as well as an
18 implementation of the clock API in include/linux/clk.h.
19 Architectures utilizing the common struct clk should select
20 this option.
21
22menu "Common Clock Framework"
23 depends on COMMON_CLK
24
25config COMMON_CLK_DISABLE_UNUSED
26 bool "Disabled unused clocks at boot"
27 depends on COMMON_CLK
28 ---help---
29 Traverses the entire clock tree and disables any clocks that are
30 enabled in hardware but have not been enabled by any device drivers.
31 This saves power and keeps the software model of the clock in line
32 with reality.
33
34 If in doubt, say "N".
35
36config COMMON_CLK_DEBUG
37 bool "DebugFS representation of clock tree"
38 depends on COMMON_CLK
39 select DEBUG_FS
40 ---help---
41 Creates a directory hierchy in debugfs for visualizing the clk
42 tree structure. Each directory contains read-only members
43 that export information specific to that clk node: clk_rate,
44 clk_flags, clk_prepare_count, clk_enable_count &
45 clk_notifier_count.
46
47endmenu
diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile
index 07613fa172c9..1f736bc11c4b 100644
--- a/drivers/clk/Makefile
+++ b/drivers/clk/Makefile
@@ -1,2 +1,4 @@
1 1
2obj-$(CONFIG_CLKDEV_LOOKUP) += clkdev.o 2obj-$(CONFIG_CLKDEV_LOOKUP) += clkdev.o
3obj-$(CONFIG_COMMON_CLK) += clk.o clk-fixed-rate.o clk-gate.o \
4 clk-mux.o clk-divider.o
diff --git a/drivers/clk/clk-divider.c b/drivers/clk/clk-divider.c
new file mode 100644
index 000000000000..d5ac6a75ea57
--- /dev/null
+++ b/drivers/clk/clk-divider.c
@@ -0,0 +1,200 @@
1/*
2 * Copyright (C) 2011 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
3 * Copyright (C) 2011 Richard Zhao, Linaro <richard.zhao@linaro.org>
4 * Copyright (C) 2011-2012 Mike Turquette, Linaro Ltd <mturquette@linaro.org>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * Adjustable divider clock implementation
11 */
12
13#include <linux/clk-provider.h>
14#include <linux/module.h>
15#include <linux/slab.h>
16#include <linux/io.h>
17#include <linux/err.h>
18#include <linux/string.h>
19
20/*
21 * DOC: basic adjustable divider clock that cannot gate
22 *
23 * Traits of this clock:
24 * prepare - clk_prepare only ensures that parents are prepared
25 * enable - clk_enable only ensures that parents are enabled
26 * rate - rate is adjustable. clk->rate = parent->rate / divisor
27 * parent - fixed parent. No clk_set_parent support
28 */
29
30#define to_clk_divider(_hw) container_of(_hw, struct clk_divider, hw)
31
32#define div_mask(d) ((1 << (d->width)) - 1)
33
34static unsigned long clk_divider_recalc_rate(struct clk_hw *hw,
35 unsigned long parent_rate)
36{
37 struct clk_divider *divider = to_clk_divider(hw);
38 unsigned int div;
39
40 div = readl(divider->reg) >> divider->shift;
41 div &= div_mask(divider);
42
43 if (!(divider->flags & CLK_DIVIDER_ONE_BASED))
44 div++;
45
46 return parent_rate / div;
47}
48EXPORT_SYMBOL_GPL(clk_divider_recalc_rate);
49
50/*
51 * The reverse of DIV_ROUND_UP: The maximum number which
52 * divided by m is r
53 */
54#define MULT_ROUND_UP(r, m) ((r) * (m) + (m) - 1)
55
56static int clk_divider_bestdiv(struct clk_hw *hw, unsigned long rate,
57 unsigned long *best_parent_rate)
58{
59 struct clk_divider *divider = to_clk_divider(hw);
60 int i, bestdiv = 0;
61 unsigned long parent_rate, best = 0, now, maxdiv;
62
63 if (!rate)
64 rate = 1;
65
66 maxdiv = (1 << divider->width);
67
68 if (divider->flags & CLK_DIVIDER_ONE_BASED)
69 maxdiv--;
70
71 if (!best_parent_rate) {
72 parent_rate = __clk_get_rate(__clk_get_parent(hw->clk));
73 bestdiv = DIV_ROUND_UP(parent_rate, rate);
74 bestdiv = bestdiv == 0 ? 1 : bestdiv;
75 bestdiv = bestdiv > maxdiv ? maxdiv : bestdiv;
76 return bestdiv;
77 }
78
79 /*
80 * The maximum divider we can use without overflowing
81 * unsigned long in rate * i below
82 */
83 maxdiv = min(ULONG_MAX / rate, maxdiv);
84
85 for (i = 1; i <= maxdiv; i++) {
86 parent_rate = __clk_round_rate(__clk_get_parent(hw->clk),
87 MULT_ROUND_UP(rate, i));
88 now = parent_rate / i;
89 if (now <= rate && now > best) {
90 bestdiv = i;
91 best = now;
92 *best_parent_rate = parent_rate;
93 }
94 }
95
96 if (!bestdiv) {
97 bestdiv = (1 << divider->width);
98 if (divider->flags & CLK_DIVIDER_ONE_BASED)
99 bestdiv--;
100 *best_parent_rate = __clk_round_rate(__clk_get_parent(hw->clk), 1);
101 }
102
103 return bestdiv;
104}
105
106static long clk_divider_round_rate(struct clk_hw *hw, unsigned long rate,
107 unsigned long *prate)
108{
109 int div;
110 div = clk_divider_bestdiv(hw, rate, prate);
111
112 if (prate)
113 return *prate / div;
114 else {
115 unsigned long r;
116 r = __clk_get_rate(__clk_get_parent(hw->clk));
117 return r / div;
118 }
119}
120EXPORT_SYMBOL_GPL(clk_divider_round_rate);
121
122static int clk_divider_set_rate(struct clk_hw *hw, unsigned long rate)
123{
124 struct clk_divider *divider = to_clk_divider(hw);
125 unsigned int div;
126 unsigned long flags = 0;
127 u32 val;
128
129 div = __clk_get_rate(__clk_get_parent(hw->clk)) / rate;
130
131 if (!(divider->flags & CLK_DIVIDER_ONE_BASED))
132 div--;
133
134 if (div > div_mask(divider))
135 div = div_mask(divider);
136
137 if (divider->lock)
138 spin_lock_irqsave(divider->lock, flags);
139
140 val = readl(divider->reg);
141 val &= ~(div_mask(divider) << divider->shift);
142 val |= div << divider->shift;
143 writel(val, divider->reg);
144
145 if (divider->lock)
146 spin_unlock_irqrestore(divider->lock, flags);
147
148 return 0;
149}
150EXPORT_SYMBOL_GPL(clk_divider_set_rate);
151
152struct clk_ops clk_divider_ops = {
153 .recalc_rate = clk_divider_recalc_rate,
154 .round_rate = clk_divider_round_rate,
155 .set_rate = clk_divider_set_rate,
156};
157EXPORT_SYMBOL_GPL(clk_divider_ops);
158
159struct clk *clk_register_divider(struct device *dev, const char *name,
160 const char *parent_name, unsigned long flags,
161 void __iomem *reg, u8 shift, u8 width,
162 u8 clk_divider_flags, spinlock_t *lock)
163{
164 struct clk_divider *div;
165 struct clk *clk;
166
167 div = kzalloc(sizeof(struct clk_divider), GFP_KERNEL);
168
169 if (!div) {
170 pr_err("%s: could not allocate divider clk\n", __func__);
171 return NULL;
172 }
173
174 /* struct clk_divider assignments */
175 div->reg = reg;
176 div->shift = shift;
177 div->width = width;
178 div->flags = clk_divider_flags;
179 div->lock = lock;
180
181 if (parent_name) {
182 div->parent[0] = kstrdup(parent_name, GFP_KERNEL);
183 if (!div->parent[0])
184 goto out;
185 }
186
187 clk = clk_register(dev, name,
188 &clk_divider_ops, &div->hw,
189 div->parent,
190 (parent_name ? 1 : 0),
191 flags);
192 if (clk)
193 return clk;
194
195out:
196 kfree(div->parent[0]);
197 kfree(div);
198
199 return NULL;
200}
diff --git a/drivers/clk/clk-fixed-rate.c b/drivers/clk/clk-fixed-rate.c
new file mode 100644
index 000000000000..90c79fb5d1bd
--- /dev/null
+++ b/drivers/clk/clk-fixed-rate.c
@@ -0,0 +1,82 @@
1/*
2 * Copyright (C) 2010-2011 Canonical Ltd <jeremy.kerr@canonical.com>
3 * Copyright (C) 2011-2012 Mike Turquette, Linaro Ltd <mturquette@linaro.org>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * Fixed rate clock implementation
10 */
11
12#include <linux/clk-provider.h>
13#include <linux/module.h>
14#include <linux/slab.h>
15#include <linux/io.h>
16#include <linux/err.h>
17
18/*
19 * DOC: basic fixed-rate clock that cannot gate
20 *
21 * Traits of this clock:
22 * prepare - clk_(un)prepare only ensures parents are prepared
23 * enable - clk_enable only ensures parents are enabled
24 * rate - rate is always a fixed value. No clk_set_rate support
25 * parent - fixed parent. No clk_set_parent support
26 */
27
28#define to_clk_fixed_rate(_hw) container_of(_hw, struct clk_fixed_rate, hw)
29
30static unsigned long clk_fixed_rate_recalc_rate(struct clk_hw *hw,
31 unsigned long parent_rate)
32{
33 return to_clk_fixed_rate(hw)->fixed_rate;
34}
35EXPORT_SYMBOL_GPL(clk_fixed_rate_recalc_rate);
36
37struct clk_ops clk_fixed_rate_ops = {
38 .recalc_rate = clk_fixed_rate_recalc_rate,
39};
40EXPORT_SYMBOL_GPL(clk_fixed_rate_ops);
41
42struct clk *clk_register_fixed_rate(struct device *dev, const char *name,
43 const char *parent_name, unsigned long flags,
44 unsigned long fixed_rate)
45{
46 struct clk_fixed_rate *fixed;
47 char **parent_names = NULL;
48 u8 len;
49
50 fixed = kzalloc(sizeof(struct clk_fixed_rate), GFP_KERNEL);
51
52 if (!fixed) {
53 pr_err("%s: could not allocate fixed clk\n", __func__);
54 return ERR_PTR(-ENOMEM);
55 }
56
57 /* struct clk_fixed_rate assignments */
58 fixed->fixed_rate = fixed_rate;
59
60 if (parent_name) {
61 parent_names = kmalloc(sizeof(char *), GFP_KERNEL);
62
63 if (! parent_names)
64 goto out;
65
66 len = sizeof(char) * strlen(parent_name);
67
68 parent_names[0] = kmalloc(len, GFP_KERNEL);
69
70 if (!parent_names[0])
71 goto out;
72
73 strncpy(parent_names[0], parent_name, len);
74 }
75
76out:
77 return clk_register(dev, name,
78 &clk_fixed_rate_ops, &fixed->hw,
79 parent_names,
80 (parent_name ? 1 : 0),
81 flags);
82}
diff --git a/drivers/clk/clk-gate.c b/drivers/clk/clk-gate.c
new file mode 100644
index 000000000000..b5902e2ef2fd
--- /dev/null
+++ b/drivers/clk/clk-gate.c
@@ -0,0 +1,150 @@
1/*
2 * Copyright (C) 2010-2011 Canonical Ltd <jeremy.kerr@canonical.com>
3 * Copyright (C) 2011-2012 Mike Turquette, Linaro Ltd <mturquette@linaro.org>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * Gated clock implementation
10 */
11
12#include <linux/clk-provider.h>
13#include <linux/module.h>
14#include <linux/slab.h>
15#include <linux/io.h>
16#include <linux/err.h>
17#include <linux/string.h>
18
19/**
20 * DOC: basic gatable clock which can gate and ungate it's ouput
21 *
22 * Traits of this clock:
23 * prepare - clk_(un)prepare only ensures parent is (un)prepared
24 * enable - clk_enable and clk_disable are functional & control gating
25 * rate - inherits rate from parent. No clk_set_rate support
26 * parent - fixed parent. No clk_set_parent support
27 */
28
29#define to_clk_gate(_hw) container_of(_hw, struct clk_gate, hw)
30
31static void clk_gate_set_bit(struct clk_gate *gate)
32{
33 u32 reg;
34 unsigned long flags = 0;
35
36 if (gate->lock)
37 spin_lock_irqsave(gate->lock, flags);
38
39 reg = readl(gate->reg);
40 reg |= BIT(gate->bit_idx);
41 writel(reg, gate->reg);
42
43 if (gate->lock)
44 spin_unlock_irqrestore(gate->lock, flags);
45}
46
47static void clk_gate_clear_bit(struct clk_gate *gate)
48{
49 u32 reg;
50 unsigned long flags = 0;
51
52 if (gate->lock)
53 spin_lock_irqsave(gate->lock, flags);
54
55 reg = readl(gate->reg);
56 reg &= ~BIT(gate->bit_idx);
57 writel(reg, gate->reg);
58
59 if (gate->lock)
60 spin_unlock_irqrestore(gate->lock, flags);
61}
62
63static int clk_gate_enable(struct clk_hw *hw)
64{
65 struct clk_gate *gate = to_clk_gate(hw);
66
67 if (gate->flags & CLK_GATE_SET_TO_DISABLE)
68 clk_gate_clear_bit(gate);
69 else
70 clk_gate_set_bit(gate);
71
72 return 0;
73}
74EXPORT_SYMBOL_GPL(clk_gate_enable);
75
76static void clk_gate_disable(struct clk_hw *hw)
77{
78 struct clk_gate *gate = to_clk_gate(hw);
79
80 if (gate->flags & CLK_GATE_SET_TO_DISABLE)
81 clk_gate_set_bit(gate);
82 else
83 clk_gate_clear_bit(gate);
84}
85EXPORT_SYMBOL_GPL(clk_gate_disable);
86
87static int clk_gate_is_enabled(struct clk_hw *hw)
88{
89 u32 reg;
90 struct clk_gate *gate = to_clk_gate(hw);
91
92 reg = readl(gate->reg);
93
94 /* if a set bit disables this clk, flip it before masking */
95 if (gate->flags & CLK_GATE_SET_TO_DISABLE)
96 reg ^= BIT(gate->bit_idx);
97
98 reg &= BIT(gate->bit_idx);
99
100 return reg ? 1 : 0;
101}
102EXPORT_SYMBOL_GPL(clk_gate_is_enabled);
103
104struct clk_ops clk_gate_ops = {
105 .enable = clk_gate_enable,
106 .disable = clk_gate_disable,
107 .is_enabled = clk_gate_is_enabled,
108};
109EXPORT_SYMBOL_GPL(clk_gate_ops);
110
111struct clk *clk_register_gate(struct device *dev, const char *name,
112 const char *parent_name, unsigned long flags,
113 void __iomem *reg, u8 bit_idx,
114 u8 clk_gate_flags, spinlock_t *lock)
115{
116 struct clk_gate *gate;
117 struct clk *clk;
118
119 gate = kzalloc(sizeof(struct clk_gate), GFP_KERNEL);
120
121 if (!gate) {
122 pr_err("%s: could not allocate gated clk\n", __func__);
123 return NULL;
124 }
125
126 /* struct clk_gate assignments */
127 gate->reg = reg;
128 gate->bit_idx = bit_idx;
129 gate->flags = clk_gate_flags;
130 gate->lock = lock;
131
132 if (parent_name) {
133 gate->parent[0] = kstrdup(parent_name, GFP_KERNEL);
134 if (!gate->parent[0])
135 goto out;
136 }
137
138 clk = clk_register(dev, name,
139 &clk_gate_ops, &gate->hw,
140 gate->parent,
141 (parent_name ? 1 : 0),
142 flags);
143 if (clk)
144 return clk;
145out:
146 kfree(gate->parent[0]);
147 kfree(gate);
148
149 return NULL;
150}
diff --git a/drivers/clk/clk-mux.c b/drivers/clk/clk-mux.c
new file mode 100644
index 000000000000..c71ad1f41a97
--- /dev/null
+++ b/drivers/clk/clk-mux.c
@@ -0,0 +1,116 @@
1/*
2 * Copyright (C) 2011 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
3 * Copyright (C) 2011 Richard Zhao, Linaro <richard.zhao@linaro.org>
4 * Copyright (C) 2011-2012 Mike Turquette, Linaro Ltd <mturquette@linaro.org>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * Simple multiplexer clock implementation
11 */
12
13#include <linux/clk.h>
14#include <linux/clk-provider.h>
15#include <linux/module.h>
16#include <linux/slab.h>
17#include <linux/io.h>
18#include <linux/err.h>
19
20/*
21 * DOC: basic adjustable multiplexer clock that cannot gate
22 *
23 * Traits of this clock:
24 * prepare - clk_prepare only ensures that parents are prepared
25 * enable - clk_enable only ensures that parents are enabled
26 * rate - rate is only affected by parent switching. No clk_set_rate support
27 * parent - parent is adjustable through clk_set_parent
28 */
29
30#define to_clk_mux(_hw) container_of(_hw, struct clk_mux, hw)
31
32static u8 clk_mux_get_parent(struct clk_hw *hw)
33{
34 struct clk_mux *mux = to_clk_mux(hw);
35 u32 val;
36
37 /*
38 * FIXME need a mux-specific flag to determine if val is bitwise or numeric
39 * e.g. sys_clkin_ck's clksel field is 3 bits wide, but ranges from 0x1
40 * to 0x7 (index starts at one)
41 * OTOH, pmd_trace_clk_mux_ck uses a separate bit for each clock, so
42 * val = 0x4 really means "bit 2, index starts at bit 0"
43 */
44 val = readl(mux->reg) >> mux->shift;
45 val &= (1 << mux->width) - 1;
46
47 if (val && (mux->flags & CLK_MUX_INDEX_BIT))
48 val = ffs(val) - 1;
49
50 if (val && (mux->flags & CLK_MUX_INDEX_ONE))
51 val--;
52
53 if (val >= __clk_get_num_parents(hw->clk))
54 return -EINVAL;
55
56 return val;
57}
58EXPORT_SYMBOL_GPL(clk_mux_get_parent);
59
60static int clk_mux_set_parent(struct clk_hw *hw, u8 index)
61{
62 struct clk_mux *mux = to_clk_mux(hw);
63 u32 val;
64 unsigned long flags = 0;
65
66 if (mux->flags & CLK_MUX_INDEX_BIT)
67 index = (1 << ffs(index));
68
69 if (mux->flags & CLK_MUX_INDEX_ONE)
70 index++;
71
72 if (mux->lock)
73 spin_lock_irqsave(mux->lock, flags);
74
75 val = readl(mux->reg);
76 val &= ~(((1 << mux->width) - 1) << mux->shift);
77 val |= index << mux->shift;
78 writel(val, mux->reg);
79
80 if (mux->lock)
81 spin_unlock_irqrestore(mux->lock, flags);
82
83 return 0;
84}
85EXPORT_SYMBOL_GPL(clk_mux_set_parent);
86
87struct clk_ops clk_mux_ops = {
88 .get_parent = clk_mux_get_parent,
89 .set_parent = clk_mux_set_parent,
90};
91EXPORT_SYMBOL_GPL(clk_mux_ops);
92
93struct clk *clk_register_mux(struct device *dev, const char *name,
94 char **parent_names, u8 num_parents, unsigned long flags,
95 void __iomem *reg, u8 shift, u8 width,
96 u8 clk_mux_flags, spinlock_t *lock)
97{
98 struct clk_mux *mux;
99
100 mux = kmalloc(sizeof(struct clk_mux), GFP_KERNEL);
101
102 if (!mux) {
103 pr_err("%s: could not allocate mux clk\n", __func__);
104 return ERR_PTR(-ENOMEM);
105 }
106
107 /* struct clk_mux assignments */
108 mux->reg = reg;
109 mux->shift = shift;
110 mux->width = width;
111 mux->flags = clk_mux_flags;
112 mux->lock = lock;
113
114 return clk_register(dev, name, &clk_mux_ops, &mux->hw,
115 parent_names, num_parents, flags);
116}
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
new file mode 100644
index 000000000000..9cf6f59e3e19
--- /dev/null
+++ b/drivers/clk/clk.c
@@ -0,0 +1,1461 @@
1/*
2 * Copyright (C) 2010-2011 Canonical Ltd <jeremy.kerr@canonical.com>
3 * Copyright (C) 2011-2012 Linaro Ltd <mturquette@linaro.org>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * Standard functionality for the common clock API. See Documentation/clk.txt
10 */
11
12#include <linux/clk-private.h>
13#include <linux/module.h>
14#include <linux/mutex.h>
15#include <linux/spinlock.h>
16#include <linux/err.h>
17#include <linux/list.h>
18#include <linux/slab.h>
19
20static DEFINE_SPINLOCK(enable_lock);
21static DEFINE_MUTEX(prepare_lock);
22
23static HLIST_HEAD(clk_root_list);
24static HLIST_HEAD(clk_orphan_list);
25static LIST_HEAD(clk_notifier_list);
26
27/*** debugfs support ***/
28
29#ifdef CONFIG_COMMON_CLK_DEBUG
30#include <linux/debugfs.h>
31
32static struct dentry *rootdir;
33static struct dentry *orphandir;
34static int inited = 0;
35
36/* caller must hold prepare_lock */
37static int clk_debug_create_one(struct clk *clk, struct dentry *pdentry)
38{
39 struct dentry *d;
40 int ret = -ENOMEM;
41
42 if (!clk || !pdentry) {
43 ret = -EINVAL;
44 goto out;
45 }
46
47 d = debugfs_create_dir(clk->name, pdentry);
48 if (!d)
49 goto out;
50
51 clk->dentry = d;
52
53 d = debugfs_create_u32("clk_rate", S_IRUGO, clk->dentry,
54 (u32 *)&clk->rate);
55 if (!d)
56 goto err_out;
57
58 d = debugfs_create_x32("clk_flags", S_IRUGO, clk->dentry,
59 (u32 *)&clk->flags);
60 if (!d)
61 goto err_out;
62
63 d = debugfs_create_u32("clk_prepare_count", S_IRUGO, clk->dentry,
64 (u32 *)&clk->prepare_count);
65 if (!d)
66 goto err_out;
67
68 d = debugfs_create_u32("clk_enable_count", S_IRUGO, clk->dentry,
69 (u32 *)&clk->enable_count);
70 if (!d)
71 goto err_out;
72
73 d = debugfs_create_u32("clk_notifier_count", S_IRUGO, clk->dentry,
74 (u32 *)&clk->notifier_count);
75 if (!d)
76 goto err_out;
77
78 ret = 0;
79 goto out;
80
81err_out:
82 debugfs_remove(clk->dentry);
83out:
84 return ret;
85}
86
87/* caller must hold prepare_lock */
88static int clk_debug_create_subtree(struct clk *clk, struct dentry *pdentry)
89{
90 struct clk *child;
91 struct hlist_node *tmp;
92 int ret = -EINVAL;;
93
94 if (!clk || !pdentry)
95 goto out;
96
97 ret = clk_debug_create_one(clk, pdentry);
98
99 if (ret)
100 goto out;
101
102 hlist_for_each_entry(child, tmp, &clk->children, child_node)
103 clk_debug_create_subtree(child, clk->dentry);
104
105 ret = 0;
106out:
107 return ret;
108}
109
110/**
111 * clk_debug_register - add a clk node to the debugfs clk tree
112 * @clk: the clk being added to the debugfs clk tree
113 *
114 * Dynamically adds a clk to the debugfs clk tree if debugfs has been
115 * initialized. Otherwise it bails out early since the debugfs clk tree
116 * will be created lazily by clk_debug_init as part of a late_initcall.
117 *
118 * Caller must hold prepare_lock. Only clk_init calls this function (so
119 * far) so this is taken care.
120 */
121static int clk_debug_register(struct clk *clk)
122{
123 struct clk *parent;
124 struct dentry *pdentry;
125 int ret = 0;
126
127 if (!inited)
128 goto out;
129
130 parent = clk->parent;
131
132 /*
133 * Check to see if a clk is a root clk. Also check that it is
134 * safe to add this clk to debugfs
135 */
136 if (!parent)
137 if (clk->flags & CLK_IS_ROOT)
138 pdentry = rootdir;
139 else
140 pdentry = orphandir;
141 else
142 if (parent->dentry)
143 pdentry = parent->dentry;
144 else
145 goto out;
146
147 ret = clk_debug_create_subtree(clk, pdentry);
148
149out:
150 return ret;
151}
152
153/**
154 * clk_debug_init - lazily create the debugfs clk tree visualization
155 *
156 * clks are often initialized very early during boot before memory can
157 * be dynamically allocated and well before debugfs is setup.
158 * clk_debug_init walks the clk tree hierarchy while holding
159 * prepare_lock and creates the topology as part of a late_initcall,
160 * thus insuring that clks initialized very early will still be
161 * represented in the debugfs clk tree. This function should only be
162 * called once at boot-time, and all other clks added dynamically will
163 * be done so with clk_debug_register.
164 */
165static int __init clk_debug_init(void)
166{
167 struct clk *clk;
168 struct hlist_node *tmp;
169
170 rootdir = debugfs_create_dir("clk", NULL);
171
172 if (!rootdir)
173 return -ENOMEM;
174
175 orphandir = debugfs_create_dir("orphans", rootdir);
176
177 if (!orphandir)
178 return -ENOMEM;
179
180 mutex_lock(&prepare_lock);
181
182 hlist_for_each_entry(clk, tmp, &clk_root_list, child_node)
183 clk_debug_create_subtree(clk, rootdir);
184
185 hlist_for_each_entry(clk, tmp, &clk_orphan_list, child_node)
186 clk_debug_create_subtree(clk, orphandir);
187
188 inited = 1;
189
190 mutex_unlock(&prepare_lock);
191
192 return 0;
193}
194late_initcall(clk_debug_init);
195#else
196static inline int clk_debug_register(struct clk *clk) { return 0; }
197#endif /* CONFIG_COMMON_CLK_DEBUG */
198
199#ifdef CONFIG_COMMON_CLK_DISABLE_UNUSED
200/* caller must hold prepare_lock */
201static void clk_disable_unused_subtree(struct clk *clk)
202{
203 struct clk *child;
204 struct hlist_node *tmp;
205 unsigned long flags;
206
207 if (!clk)
208 goto out;
209
210 hlist_for_each_entry(child, tmp, &clk->children, child_node)
211 clk_disable_unused_subtree(child);
212
213 spin_lock_irqsave(&enable_lock, flags);
214
215 if (clk->enable_count)
216 goto unlock_out;
217
218 if (clk->flags & CLK_IGNORE_UNUSED)
219 goto unlock_out;
220
221 if (__clk_is_enabled(clk) && clk->ops->disable)
222 clk->ops->disable(clk->hw);
223
224unlock_out:
225 spin_unlock_irqrestore(&enable_lock, flags);
226
227out:
228 return;
229}
230
231static int clk_disable_unused(void)
232{
233 struct clk *clk;
234 struct hlist_node *tmp;
235
236 mutex_lock(&prepare_lock);
237
238 hlist_for_each_entry(clk, tmp, &clk_root_list, child_node)
239 clk_disable_unused_subtree(clk);
240
241 hlist_for_each_entry(clk, tmp, &clk_orphan_list, child_node)
242 clk_disable_unused_subtree(clk);
243
244 mutex_unlock(&prepare_lock);
245
246 return 0;
247}
248late_initcall(clk_disable_unused);
249#else
250static inline int clk_disable_unused(struct clk *clk) { return 0; }
251#endif /* CONFIG_COMMON_CLK_DISABLE_UNUSED */
252
253/*** helper functions ***/
254
255inline const char *__clk_get_name(struct clk *clk)
256{
257 return !clk ? NULL : clk->name;
258}
259
260inline struct clk_hw *__clk_get_hw(struct clk *clk)
261{
262 return !clk ? NULL : clk->hw;
263}
264
265inline u8 __clk_get_num_parents(struct clk *clk)
266{
267 return !clk ? -EINVAL : clk->num_parents;
268}
269
270inline struct clk *__clk_get_parent(struct clk *clk)
271{
272 return !clk ? NULL : clk->parent;
273}
274
275inline int __clk_get_enable_count(struct clk *clk)
276{
277 return !clk ? -EINVAL : clk->enable_count;
278}
279
280inline int __clk_get_prepare_count(struct clk *clk)
281{
282 return !clk ? -EINVAL : clk->prepare_count;
283}
284
285unsigned long __clk_get_rate(struct clk *clk)
286{
287 unsigned long ret;
288
289 if (!clk) {
290 ret = -EINVAL;
291 goto out;
292 }
293
294 ret = clk->rate;
295
296 if (clk->flags & CLK_IS_ROOT)
297 goto out;
298
299 if (!clk->parent)
300 ret = -ENODEV;
301
302out:
303 return ret;
304}
305
306inline unsigned long __clk_get_flags(struct clk *clk)
307{
308 return !clk ? -EINVAL : clk->flags;
309}
310
311int __clk_is_enabled(struct clk *clk)
312{
313 int ret;
314
315 if (!clk)
316 return -EINVAL;
317
318 /*
319 * .is_enabled is only mandatory for clocks that gate
320 * fall back to software usage counter if .is_enabled is missing
321 */
322 if (!clk->ops->is_enabled) {
323 ret = clk->enable_count ? 1 : 0;
324 goto out;
325 }
326
327 ret = clk->ops->is_enabled(clk->hw);
328out:
329 return ret;
330}
331
332static struct clk *__clk_lookup_subtree(const char *name, struct clk *clk)
333{
334 struct clk *child;
335 struct clk *ret;
336 struct hlist_node *tmp;
337
338 if (!strcmp(clk->name, name))
339 return clk;
340
341 hlist_for_each_entry(child, tmp, &clk->children, child_node) {
342 ret = __clk_lookup_subtree(name, child);
343 if (ret)
344 return ret;
345 }
346
347 return NULL;
348}
349
350struct clk *__clk_lookup(const char *name)
351{
352 struct clk *root_clk;
353 struct clk *ret;
354 struct hlist_node *tmp;
355
356 if (!name)
357 return NULL;
358
359 /* search the 'proper' clk tree first */
360 hlist_for_each_entry(root_clk, tmp, &clk_root_list, child_node) {
361 ret = __clk_lookup_subtree(name, root_clk);
362 if (ret)
363 return ret;
364 }
365
366 /* if not found, then search the orphan tree */
367 hlist_for_each_entry(root_clk, tmp, &clk_orphan_list, child_node) {
368 ret = __clk_lookup_subtree(name, root_clk);
369 if (ret)
370 return ret;
371 }
372
373 return NULL;
374}
375
376/*** clk api ***/
377
378void __clk_unprepare(struct clk *clk)
379{
380 if (!clk)
381 return;
382
383 if (WARN_ON(clk->prepare_count == 0))
384 return;
385
386 if (--clk->prepare_count > 0)
387 return;
388
389 WARN_ON(clk->enable_count > 0);
390
391 if (clk->ops->unprepare)
392 clk->ops->unprepare(clk->hw);
393
394 __clk_unprepare(clk->parent);
395}
396
397/**
398 * clk_unprepare - undo preparation of a clock source
399 * @clk: the clk being unprepare
400 *
401 * clk_unprepare may sleep, which differentiates it from clk_disable. In a
402 * simple case, clk_unprepare can be used instead of clk_disable to gate a clk
403 * if the operation may sleep. One example is a clk which is accessed over
404 * I2c. In the complex case a clk gate operation may require a fast and a slow
405 * part. It is this reason that clk_unprepare and clk_disable are not mutually
406 * exclusive. In fact clk_disable must be called before clk_unprepare.
407 */
408void clk_unprepare(struct clk *clk)
409{
410 mutex_lock(&prepare_lock);
411 __clk_unprepare(clk);
412 mutex_unlock(&prepare_lock);
413}
414EXPORT_SYMBOL_GPL(clk_unprepare);
415
416int __clk_prepare(struct clk *clk)
417{
418 int ret = 0;
419
420 if (!clk)
421 return 0;
422
423 if (clk->prepare_count == 0) {
424 ret = __clk_prepare(clk->parent);
425 if (ret)
426 return ret;
427
428 if (clk->ops->prepare) {
429 ret = clk->ops->prepare(clk->hw);
430 if (ret) {
431 __clk_unprepare(clk->parent);
432 return ret;
433 }
434 }
435 }
436
437 clk->prepare_count++;
438
439 return 0;
440}
441
442/**
443 * clk_prepare - prepare a clock source
444 * @clk: the clk being prepared
445 *
446 * clk_prepare may sleep, which differentiates it from clk_enable. In a simple
447 * case, clk_prepare can be used instead of clk_enable to ungate a clk if the
448 * operation may sleep. One example is a clk which is accessed over I2c. In
449 * the complex case a clk ungate operation may require a fast and a slow part.
450 * It is this reason that clk_prepare and clk_enable are not mutually
451 * exclusive. In fact clk_prepare must be called before clk_enable.
452 * Returns 0 on success, -EERROR otherwise.
453 */
454int clk_prepare(struct clk *clk)
455{
456 int ret;
457
458 mutex_lock(&prepare_lock);
459 ret = __clk_prepare(clk);
460 mutex_unlock(&prepare_lock);
461
462 return ret;
463}
464EXPORT_SYMBOL_GPL(clk_prepare);
465
466static void __clk_disable(struct clk *clk)
467{
468 if (!clk)
469 return;
470
471 if (WARN_ON(clk->enable_count == 0))
472 return;
473
474 if (--clk->enable_count > 0)
475 return;
476
477 if (clk->ops->disable)
478 clk->ops->disable(clk->hw);
479
480 __clk_disable(clk->parent);
481}
482
483/**
484 * clk_disable - gate a clock
485 * @clk: the clk being gated
486 *
487 * clk_disable must not sleep, which differentiates it from clk_unprepare. In
488 * a simple case, clk_disable can be used instead of clk_unprepare to gate a
489 * clk if the operation is fast and will never sleep. One example is a
490 * SoC-internal clk which is controlled via simple register writes. In the
491 * complex case a clk gate operation may require a fast and a slow part. It is
492 * this reason that clk_unprepare and clk_disable are not mutually exclusive.
493 * In fact clk_disable must be called before clk_unprepare.
494 */
495void clk_disable(struct clk *clk)
496{
497 unsigned long flags;
498
499 spin_lock_irqsave(&enable_lock, flags);
500 __clk_disable(clk);
501 spin_unlock_irqrestore(&enable_lock, flags);
502}
503EXPORT_SYMBOL_GPL(clk_disable);
504
505static int __clk_enable(struct clk *clk)
506{
507 int ret = 0;
508
509 if (!clk)
510 return 0;
511
512 if (WARN_ON(clk->prepare_count == 0))
513 return -ESHUTDOWN;
514
515 if (clk->enable_count == 0) {
516 ret = __clk_enable(clk->parent);
517
518 if (ret)
519 return ret;
520
521 if (clk->ops->enable) {
522 ret = clk->ops->enable(clk->hw);
523 if (ret) {
524 __clk_disable(clk->parent);
525 return ret;
526 }
527 }
528 }
529
530 clk->enable_count++;
531 return 0;
532}
533
534/**
535 * clk_enable - ungate a clock
536 * @clk: the clk being ungated
537 *
538 * clk_enable must not sleep, which differentiates it from clk_prepare. In a
539 * simple case, clk_enable can be used instead of clk_prepare to ungate a clk
540 * if the operation will never sleep. One example is a SoC-internal clk which
541 * is controlled via simple register writes. In the complex case a clk ungate
542 * operation may require a fast and a slow part. It is this reason that
543 * clk_enable and clk_prepare are not mutually exclusive. In fact clk_prepare
544 * must be called before clk_enable. Returns 0 on success, -EERROR
545 * otherwise.
546 */
547int clk_enable(struct clk *clk)
548{
549 unsigned long flags;
550 int ret;
551
552 spin_lock_irqsave(&enable_lock, flags);
553 ret = __clk_enable(clk);
554 spin_unlock_irqrestore(&enable_lock, flags);
555
556 return ret;
557}
558EXPORT_SYMBOL_GPL(clk_enable);
559
560/**
561 * clk_get_rate - return the rate of clk
562 * @clk: the clk whose rate is being returned
563 *
564 * Simply returns the cached rate of the clk. Does not query the hardware. If
565 * clk is NULL then returns -EINVAL.
566 */
567unsigned long clk_get_rate(struct clk *clk)
568{
569 unsigned long rate;
570
571 mutex_lock(&prepare_lock);
572 rate = __clk_get_rate(clk);
573 mutex_unlock(&prepare_lock);
574
575 return rate;
576}
577EXPORT_SYMBOL_GPL(clk_get_rate);
578
579/**
580 * __clk_round_rate - round the given rate for a clk
581 * @clk: round the rate of this clock
582 *
583 * Caller must hold prepare_lock. Useful for clk_ops such as .set_rate
584 */
585unsigned long __clk_round_rate(struct clk *clk, unsigned long rate)
586{
587 unsigned long unused;
588
589 if (!clk)
590 return -EINVAL;
591
592 if (!clk->ops->round_rate)
593 return clk->rate;
594
595 if (clk->flags & CLK_SET_RATE_PARENT)
596 return clk->ops->round_rate(clk->hw, rate, &unused);
597 else
598 return clk->ops->round_rate(clk->hw, rate, NULL);
599}
600
601/**
602 * clk_round_rate - round the given rate for a clk
603 * @clk: the clk for which we are rounding a rate
604 * @rate: the rate which is to be rounded
605 *
606 * Takes in a rate as input and rounds it to a rate that the clk can actually
607 * use which is then returned. If clk doesn't support round_rate operation
608 * then the parent rate is returned.
609 */
610long clk_round_rate(struct clk *clk, unsigned long rate)
611{
612 unsigned long ret;
613
614 mutex_lock(&prepare_lock);
615 ret = __clk_round_rate(clk, rate);
616 mutex_unlock(&prepare_lock);
617
618 return ret;
619}
620EXPORT_SYMBOL_GPL(clk_round_rate);
621
622/**
623 * __clk_notify - call clk notifier chain
624 * @clk: struct clk * that is changing rate
625 * @msg: clk notifier type (see include/linux/clk.h)
626 * @old_rate: old clk rate
627 * @new_rate: new clk rate
628 *
629 * Triggers a notifier call chain on the clk rate-change notification
630 * for 'clk'. Passes a pointer to the struct clk and the previous
631 * and current rates to the notifier callback. Intended to be called by
632 * internal clock code only. Returns NOTIFY_DONE from the last driver
633 * called if all went well, or NOTIFY_STOP or NOTIFY_BAD immediately if
634 * a driver returns that.
635 */
636static int __clk_notify(struct clk *clk, unsigned long msg,
637 unsigned long old_rate, unsigned long new_rate)
638{
639 struct clk_notifier *cn;
640 struct clk_notifier_data cnd;
641 int ret = NOTIFY_DONE;
642
643 cnd.clk = clk;
644 cnd.old_rate = old_rate;
645 cnd.new_rate = new_rate;
646
647 list_for_each_entry(cn, &clk_notifier_list, node) {
648 if (cn->clk == clk) {
649 ret = srcu_notifier_call_chain(&cn->notifier_head, msg,
650 &cnd);
651 break;
652 }
653 }
654
655 return ret;
656}
657
658/**
659 * __clk_recalc_rates
660 * @clk: first clk in the subtree
661 * @msg: notification type (see include/linux/clk.h)
662 *
663 * Walks the subtree of clks starting with clk and recalculates rates as it
664 * goes. Note that if a clk does not implement the .recalc_rate callback then
665 * it is assumed that the clock will take on the rate of it's parent.
666 *
667 * clk_recalc_rates also propagates the POST_RATE_CHANGE notification,
668 * if necessary.
669 *
670 * Caller must hold prepare_lock.
671 */
672static void __clk_recalc_rates(struct clk *clk, unsigned long msg)
673{
674 unsigned long old_rate;
675 unsigned long parent_rate = 0;
676 struct hlist_node *tmp;
677 struct clk *child;
678
679 old_rate = clk->rate;
680
681 if (clk->parent)
682 parent_rate = clk->parent->rate;
683
684 if (clk->ops->recalc_rate)
685 clk->rate = clk->ops->recalc_rate(clk->hw, parent_rate);
686 else
687 clk->rate = parent_rate;
688
689 /*
690 * ignore NOTIFY_STOP and NOTIFY_BAD return values for POST_RATE_CHANGE
691 * & ABORT_RATE_CHANGE notifiers
692 */
693 if (clk->notifier_count && msg)
694 __clk_notify(clk, msg, old_rate, clk->rate);
695
696 hlist_for_each_entry(child, tmp, &clk->children, child_node)
697 __clk_recalc_rates(child, msg);
698}
699
700/**
701 * __clk_speculate_rates
702 * @clk: first clk in the subtree
703 * @parent_rate: the "future" rate of clk's parent
704 *
705 * Walks the subtree of clks starting with clk, speculating rates as it
706 * goes and firing off PRE_RATE_CHANGE notifications as necessary.
707 *
708 * Unlike clk_recalc_rates, clk_speculate_rates exists only for sending
709 * pre-rate change notifications and returns early if no clks in the
710 * subtree have subscribed to the notifications. Note that if a clk does not
711 * implement the .recalc_rate callback then it is assumed that the clock will
712 * take on the rate of it's parent.
713 *
714 * Caller must hold prepare_lock.
715 */
716static int __clk_speculate_rates(struct clk *clk, unsigned long parent_rate)
717{
718 struct hlist_node *tmp;
719 struct clk *child;
720 unsigned long new_rate;
721 int ret = NOTIFY_DONE;
722
723 if (clk->ops->recalc_rate)
724 new_rate = clk->ops->recalc_rate(clk->hw, parent_rate);
725 else
726 new_rate = parent_rate;
727
728 /* abort the rate change if a driver returns NOTIFY_BAD */
729 if (clk->notifier_count)
730 ret = __clk_notify(clk, PRE_RATE_CHANGE, clk->rate, new_rate);
731
732 if (ret == NOTIFY_BAD)
733 goto out;
734
735 hlist_for_each_entry(child, tmp, &clk->children, child_node) {
736 ret = __clk_speculate_rates(child, new_rate);
737 if (ret == NOTIFY_BAD)
738 break;
739 }
740
741out:
742 return ret;
743}
744
745static void clk_calc_subtree(struct clk *clk, unsigned long new_rate)
746{
747 struct clk *child;
748 struct hlist_node *tmp;
749
750 clk->new_rate = new_rate;
751
752 hlist_for_each_entry(child, tmp, &clk->children, child_node) {
753 if (child->ops->recalc_rate)
754 child->new_rate = child->ops->recalc_rate(child->hw, new_rate);
755 else
756 child->new_rate = new_rate;
757 clk_calc_subtree(child, child->new_rate);
758 }
759}
760
761/*
762 * calculate the new rates returning the topmost clock that has to be
763 * changed.
764 */
765static struct clk *clk_calc_new_rates(struct clk *clk, unsigned long rate)
766{
767 struct clk *top = clk;
768 unsigned long best_parent_rate = clk->parent->rate;
769 unsigned long new_rate;
770
771 if (!clk->ops->round_rate && !(clk->flags & CLK_SET_RATE_PARENT)) {
772 clk->new_rate = clk->rate;
773 return NULL;
774 }
775
776 if (!clk->ops->round_rate && (clk->flags & CLK_SET_RATE_PARENT)) {
777 top = clk_calc_new_rates(clk->parent, rate);
778 new_rate = clk->new_rate = clk->parent->new_rate;
779
780 goto out;
781 }
782
783 if (clk->flags & CLK_SET_RATE_PARENT)
784 new_rate = clk->ops->round_rate(clk->hw, rate, &best_parent_rate);
785 else
786 new_rate = clk->ops->round_rate(clk->hw, rate, NULL);
787
788 if (best_parent_rate != clk->parent->rate) {
789 top = clk_calc_new_rates(clk->parent, best_parent_rate);
790
791 goto out;
792 }
793
794out:
795 clk_calc_subtree(clk, new_rate);
796
797 return top;
798}
799
800/*
801 * Notify about rate changes in a subtree. Always walk down the whole tree
802 * so that in case of an error we can walk down the whole tree again and
803 * abort the change.
804 */
805static struct clk *clk_propagate_rate_change(struct clk *clk, unsigned long event)
806{
807 struct hlist_node *tmp;
808 struct clk *child, *fail_clk = NULL;
809 int ret = NOTIFY_DONE;
810
811 if (clk->rate == clk->new_rate)
812 return 0;
813
814 if (clk->notifier_count) {
815 ret = __clk_notify(clk, event, clk->rate, clk->new_rate);
816 if (ret == NOTIFY_BAD)
817 fail_clk = clk;
818 }
819
820 hlist_for_each_entry(child, tmp, &clk->children, child_node) {
821 clk = clk_propagate_rate_change(child, event);
822 if (clk)
823 fail_clk = clk;
824 }
825
826 return fail_clk;
827}
828
829/*
830 * walk down a subtree and set the new rates notifying the rate
831 * change on the way
832 */
833static void clk_change_rate(struct clk *clk)
834{
835 struct clk *child;
836 unsigned long old_rate;
837 struct hlist_node *tmp;
838
839 old_rate = clk->rate;
840
841 if (clk->ops->set_rate)
842 clk->ops->set_rate(clk->hw, clk->new_rate);
843
844 if (clk->ops->recalc_rate)
845 clk->rate = clk->ops->recalc_rate(clk->hw,
846 clk->parent->rate);
847 else
848 clk->rate = clk->parent->rate;
849
850 if (clk->notifier_count && old_rate != clk->rate)
851 __clk_notify(clk, POST_RATE_CHANGE, old_rate, clk->rate);
852
853 hlist_for_each_entry(child, tmp, &clk->children, child_node)
854 clk_change_rate(child);
855}
856
857/**
858 * clk_set_rate - specify a new rate for clk
859 * @clk: the clk whose rate is being changed
860 * @rate: the new rate for clk
861 *
862 * In the simplest case clk_set_rate will only change the rate of clk.
863 *
864 * If clk has the CLK_SET_RATE_GATE flag set and it is enabled this call
865 * will fail; only when the clk is disabled will it be able to change
866 * its rate.
867 *
868 * Setting the CLK_SET_RATE_PARENT flag allows clk_set_rate to
869 * recursively propagate up to clk's parent; whether or not this happens
870 * depends on the outcome of clk's .round_rate implementation. If
871 * *parent_rate is 0 after calling .round_rate then upstream parent
872 * propagation is ignored. If *parent_rate comes back with a new rate
873 * for clk's parent then we propagate up to clk's parent and set it's
874 * rate. Upward propagation will continue until either a clk does not
875 * support the CLK_SET_RATE_PARENT flag or .round_rate stops requesting
876 * changes to clk's parent_rate. If there is a failure during upstream
877 * propagation then clk_set_rate will unwind and restore each clk's rate
878 * that had been successfully changed. Afterwards a rate change abort
879 * notification will be propagated downstream, starting from the clk
880 * that failed.
881 *
882 * At the end of all of the rate setting, clk_set_rate internally calls
883 * __clk_recalc_rates and propagates the rate changes downstream,
884 * starting from the highest clk whose rate was changed. This has the
885 * added benefit of propagating post-rate change notifiers.
886 *
887 * Note that while post-rate change and rate change abort notifications
888 * are guaranteed to be sent to a clk only once per call to
889 * clk_set_rate, pre-change notifications will be sent for every clk
890 * whose rate is changed. Stacking pre-change notifications is noisy
891 * for the drivers subscribed to them, but this allows drivers to react
892 * to intermediate clk rate changes up until the point where the final
893 * rate is achieved at the end of upstream propagation.
894 *
895 * Returns 0 on success, -EERROR otherwise.
896 */
897int clk_set_rate(struct clk *clk, unsigned long rate)
898{
899 struct clk *top, *fail_clk;
900 int ret = 0;
901
902 /* prevent racing with updates to the clock topology */
903 mutex_lock(&prepare_lock);
904
905 /* bail early if nothing to do */
906 if (rate == clk->rate)
907 goto out;
908
909 /* calculate new rates and get the topmost changed clock */
910 top = clk_calc_new_rates(clk, rate);
911 if (!top) {
912 ret = -EINVAL;
913 goto out;
914 }
915
916 /* notify that we are about to change rates */
917 fail_clk = clk_propagate_rate_change(top, PRE_RATE_CHANGE);
918 if (fail_clk) {
919 pr_warn("%s: failed to set %s rate\n", __func__,
920 fail_clk->name);
921 clk_propagate_rate_change(top, ABORT_RATE_CHANGE);
922 ret = -EBUSY;
923 goto out;
924 }
925
926 /* change the rates */
927 clk_change_rate(top);
928
929 mutex_unlock(&prepare_lock);
930
931 return 0;
932out:
933 mutex_unlock(&prepare_lock);
934
935 return ret;
936}
937EXPORT_SYMBOL_GPL(clk_set_rate);
938
939/**
940 * clk_get_parent - return the parent of a clk
941 * @clk: the clk whose parent gets returned
942 *
943 * Simply returns clk->parent. Returns NULL if clk is NULL.
944 */
945struct clk *clk_get_parent(struct clk *clk)
946{
947 struct clk *parent;
948
949 mutex_lock(&prepare_lock);
950 parent = __clk_get_parent(clk);
951 mutex_unlock(&prepare_lock);
952
953 return parent;
954}
955EXPORT_SYMBOL_GPL(clk_get_parent);
956
957/*
958 * .get_parent is mandatory for clocks with multiple possible parents. It is
959 * optional for single-parent clocks. Always call .get_parent if it is
960 * available and WARN if it is missing for multi-parent clocks.
961 *
962 * For single-parent clocks without .get_parent, first check to see if the
963 * .parents array exists, and if so use it to avoid an expensive tree
964 * traversal. If .parents does not exist then walk the tree with __clk_lookup.
965 */
966static struct clk *__clk_init_parent(struct clk *clk)
967{
968 struct clk *ret = NULL;
969 u8 index;
970
971 /* handle the trivial cases */
972
973 if (!clk->num_parents)
974 goto out;
975
976 if (clk->num_parents == 1) {
977 if (IS_ERR_OR_NULL(clk->parent))
978 ret = clk->parent = __clk_lookup(clk->parent_names[0]);
979 ret = clk->parent;
980 goto out;
981 }
982
983 if (!clk->ops->get_parent) {
984 WARN(!clk->ops->get_parent,
985 "%s: multi-parent clocks must implement .get_parent\n",
986 __func__);
987 goto out;
988 };
989
990 /*
991 * Do our best to cache parent clocks in clk->parents. This prevents
992 * unnecessary and expensive calls to __clk_lookup. We don't set
993 * clk->parent here; that is done by the calling function
994 */
995
996 index = clk->ops->get_parent(clk->hw);
997
998 if (!clk->parents)
999 clk->parents =
1000 kmalloc((sizeof(struct clk*) * clk->num_parents),
1001 GFP_KERNEL);
1002
1003 if (!clk->parents)
1004 ret = __clk_lookup(clk->parent_names[index]);
1005 else if (!clk->parents[index])
1006 ret = clk->parents[index] =
1007 __clk_lookup(clk->parent_names[index]);
1008 else
1009 ret = clk->parents[index];
1010
1011out:
1012 return ret;
1013}
1014
1015void __clk_reparent(struct clk *clk, struct clk *new_parent)
1016{
1017#ifdef CONFIG_COMMON_CLK_DEBUG
1018 struct dentry *d;
1019 struct dentry *new_parent_d;
1020#endif
1021
1022 if (!clk || !new_parent)
1023 return;
1024
1025 hlist_del(&clk->child_node);
1026
1027 if (new_parent)
1028 hlist_add_head(&clk->child_node, &new_parent->children);
1029 else
1030 hlist_add_head(&clk->child_node, &clk_orphan_list);
1031
1032#ifdef CONFIG_COMMON_CLK_DEBUG
1033 if (!inited)
1034 goto out;
1035
1036 if (new_parent)
1037 new_parent_d = new_parent->dentry;
1038 else
1039 new_parent_d = orphandir;
1040
1041 d = debugfs_rename(clk->dentry->d_parent, clk->dentry,
1042 new_parent_d, clk->name);
1043 if (d)
1044 clk->dentry = d;
1045 else
1046 pr_debug("%s: failed to rename debugfs entry for %s\n",
1047 __func__, clk->name);
1048out:
1049#endif
1050
1051 clk->parent = new_parent;
1052
1053 __clk_recalc_rates(clk, POST_RATE_CHANGE);
1054}
1055
1056static int __clk_set_parent(struct clk *clk, struct clk *parent)
1057{
1058 struct clk *old_parent;
1059 unsigned long flags;
1060 int ret = -EINVAL;
1061 u8 i;
1062
1063 old_parent = clk->parent;
1064
1065 /* find index of new parent clock using cached parent ptrs */
1066 for (i = 0; i < clk->num_parents; i++)
1067 if (clk->parents[i] == parent)
1068 break;
1069
1070 /*
1071 * find index of new parent clock using string name comparison
1072 * also try to cache the parent to avoid future calls to __clk_lookup
1073 */
1074 if (i == clk->num_parents)
1075 for (i = 0; i < clk->num_parents; i++)
1076 if (!strcmp(clk->parent_names[i], parent->name)) {
1077 clk->parents[i] = __clk_lookup(parent->name);
1078 break;
1079 }
1080
1081 if (i == clk->num_parents) {
1082 pr_debug("%s: clock %s is not a possible parent of clock %s\n",
1083 __func__, parent->name, clk->name);
1084 goto out;
1085 }
1086
1087 /* migrate prepare and enable */
1088 if (clk->prepare_count)
1089 __clk_prepare(parent);
1090
1091 /* FIXME replace with clk_is_enabled(clk) someday */
1092 spin_lock_irqsave(&enable_lock, flags);
1093 if (clk->enable_count)
1094 __clk_enable(parent);
1095 spin_unlock_irqrestore(&enable_lock, flags);
1096
1097 /* change clock input source */
1098 ret = clk->ops->set_parent(clk->hw, i);
1099
1100 /* clean up old prepare and enable */
1101 spin_lock_irqsave(&enable_lock, flags);
1102 if (clk->enable_count)
1103 __clk_disable(old_parent);
1104 spin_unlock_irqrestore(&enable_lock, flags);
1105
1106 if (clk->prepare_count)
1107 __clk_unprepare(old_parent);
1108
1109out:
1110 return ret;
1111}
1112
1113/**
1114 * clk_set_parent - switch the parent of a mux clk
1115 * @clk: the mux clk whose input we are switching
1116 * @parent: the new input to clk
1117 *
1118 * Re-parent clk to use parent as it's new input source. If clk has the
1119 * CLK_SET_PARENT_GATE flag set then clk must be gated for this
1120 * operation to succeed. After successfully changing clk's parent
1121 * clk_set_parent will update the clk topology, sysfs topology and
1122 * propagate rate recalculation via __clk_recalc_rates. Returns 0 on
1123 * success, -EERROR otherwise.
1124 */
1125int clk_set_parent(struct clk *clk, struct clk *parent)
1126{
1127 int ret = 0;
1128
1129 if (!clk || !clk->ops)
1130 return -EINVAL;
1131
1132 if (!clk->ops->set_parent)
1133 return -ENOSYS;
1134
1135 /* prevent racing with updates to the clock topology */
1136 mutex_lock(&prepare_lock);
1137
1138 if (clk->parent == parent)
1139 goto out;
1140
1141 /* propagate PRE_RATE_CHANGE notifications */
1142 if (clk->notifier_count)
1143 ret = __clk_speculate_rates(clk, parent->rate);
1144
1145 /* abort if a driver objects */
1146 if (ret == NOTIFY_STOP)
1147 goto out;
1148
1149 /* only re-parent if the clock is not in use */
1150 if ((clk->flags & CLK_SET_PARENT_GATE) && clk->prepare_count)
1151 ret = -EBUSY;
1152 else
1153 ret = __clk_set_parent(clk, parent);
1154
1155 /* propagate ABORT_RATE_CHANGE if .set_parent failed */
1156 if (ret) {
1157 __clk_recalc_rates(clk, ABORT_RATE_CHANGE);
1158 goto out;
1159 }
1160
1161 /* propagate rate recalculation downstream */
1162 __clk_reparent(clk, parent);
1163
1164out:
1165 mutex_unlock(&prepare_lock);
1166
1167 return ret;
1168}
1169EXPORT_SYMBOL_GPL(clk_set_parent);
1170
1171/**
1172 * __clk_init - initialize the data structures in a struct clk
1173 * @dev: device initializing this clk, placeholder for now
1174 * @clk: clk being initialized
1175 *
1176 * Initializes the lists in struct clk, queries the hardware for the
1177 * parent and rate and sets them both.
1178 *
1179 * Any struct clk passed into __clk_init must have the following members
1180 * populated:
1181 * .name
1182 * .ops
1183 * .hw
1184 * .parent_names
1185 * .num_parents
1186 * .flags
1187 *
1188 * Essentially, everything that would normally be passed into clk_register is
1189 * assumed to be initialized already in __clk_init. The other members may be
1190 * populated, but are optional.
1191 *
1192 * __clk_init is only exposed via clk-private.h and is intended for use with
1193 * very large numbers of clocks that need to be statically initialized. It is
1194 * a layering violation to include clk-private.h from any code which implements
1195 * a clock's .ops; as such any statically initialized clock data MUST be in a
1196 * separate C file from the logic that implements it's operations.
1197 */
1198void __clk_init(struct device *dev, struct clk *clk)
1199{
1200 int i;
1201 struct clk *orphan;
1202 struct hlist_node *tmp, *tmp2;
1203
1204 if (!clk)
1205 return;
1206
1207 mutex_lock(&prepare_lock);
1208
1209 /* check to see if a clock with this name is already registered */
1210 if (__clk_lookup(clk->name))
1211 goto out;
1212
1213 /* throw a WARN if any entries in parent_names are NULL */
1214 for (i = 0; i < clk->num_parents; i++)
1215 WARN(!clk->parent_names[i],
1216 "%s: invalid NULL in %s's .parent_names\n",
1217 __func__, clk->name);
1218
1219 /*
1220 * Allocate an array of struct clk *'s to avoid unnecessary string
1221 * look-ups of clk's possible parents. This can fail for clocks passed
1222 * in to clk_init during early boot; thus any access to clk->parents[]
1223 * must always check for a NULL pointer and try to populate it if
1224 * necessary.
1225 *
1226 * If clk->parents is not NULL we skip this entire block. This allows
1227 * for clock drivers to statically initialize clk->parents.
1228 */
1229 if (clk->num_parents && !clk->parents) {
1230 clk->parents = kmalloc((sizeof(struct clk*) * clk->num_parents),
1231 GFP_KERNEL);
1232 /*
1233 * __clk_lookup returns NULL for parents that have not been
1234 * clk_init'd; thus any access to clk->parents[] must check
1235 * for a NULL pointer. We can always perform lazy lookups for
1236 * missing parents later on.
1237 */
1238 if (clk->parents)
1239 for (i = 0; i < clk->num_parents; i++)
1240 clk->parents[i] =
1241 __clk_lookup(clk->parent_names[i]);
1242 }
1243
1244 clk->parent = __clk_init_parent(clk);
1245
1246 /*
1247 * Populate clk->parent if parent has already been __clk_init'd. If
1248 * parent has not yet been __clk_init'd then place clk in the orphan
1249 * list. If clk has set the CLK_IS_ROOT flag then place it in the root
1250 * clk list.
1251 *
1252 * Every time a new clk is clk_init'd then we walk the list of orphan
1253 * clocks and re-parent any that are children of the clock currently
1254 * being clk_init'd.
1255 */
1256 if (clk->parent)
1257 hlist_add_head(&clk->child_node,
1258 &clk->parent->children);
1259 else if (clk->flags & CLK_IS_ROOT)
1260 hlist_add_head(&clk->child_node, &clk_root_list);
1261 else
1262 hlist_add_head(&clk->child_node, &clk_orphan_list);
1263
1264 /*
1265 * Set clk's rate. The preferred method is to use .recalc_rate. For
1266 * simple clocks and lazy developers the default fallback is to use the
1267 * parent's rate. If a clock doesn't have a parent (or is orphaned)
1268 * then rate is set to zero.
1269 */
1270 if (clk->ops->recalc_rate)
1271 clk->rate = clk->ops->recalc_rate(clk->hw,
1272 __clk_get_rate(clk->parent));
1273 else if (clk->parent)
1274 clk->rate = clk->parent->rate;
1275 else
1276 clk->rate = 0;
1277
1278 /*
1279 * walk the list of orphan clocks and reparent any that are children of
1280 * this clock
1281 */
1282 hlist_for_each_entry_safe(orphan, tmp, tmp2, &clk_orphan_list, child_node)
1283 for (i = 0; i < orphan->num_parents; i++)
1284 if (!strcmp(clk->name, orphan->parent_names[i])) {
1285 __clk_reparent(orphan, clk);
1286 break;
1287 }
1288
1289 /*
1290 * optional platform-specific magic
1291 *
1292 * The .init callback is not used by any of the basic clock types, but
1293 * exists for weird hardware that must perform initialization magic.
1294 * Please consider other ways of solving initialization problems before
1295 * using this callback, as it's use is discouraged.
1296 */
1297 if (clk->ops->init)
1298 clk->ops->init(clk->hw);
1299
1300 clk_debug_register(clk);
1301
1302out:
1303 mutex_unlock(&prepare_lock);
1304
1305 return;
1306}
1307
1308/**
1309 * clk_register - allocate a new clock, register it and return an opaque cookie
1310 * @dev: device that is registering this clock
1311 * @name: clock name
1312 * @ops: operations this clock supports
1313 * @hw: link to hardware-specific clock data
1314 * @parent_names: array of string names for all possible parents
1315 * @num_parents: number of possible parents
1316 * @flags: framework-level hints and quirks
1317 *
1318 * clk_register is the primary interface for populating the clock tree with new
1319 * clock nodes. It returns a pointer to the newly allocated struct clk which
1320 * cannot be dereferenced by driver code but may be used in conjuction with the
1321 * rest of the clock API.
1322 */
1323struct clk *clk_register(struct device *dev, const char *name,
1324 const struct clk_ops *ops, struct clk_hw *hw,
1325 char **parent_names, u8 num_parents, unsigned long flags)
1326{
1327 struct clk *clk;
1328
1329 clk = kzalloc(sizeof(*clk), GFP_KERNEL);
1330 if (!clk)
1331 return NULL;
1332
1333 clk->name = name;
1334 clk->ops = ops;
1335 clk->hw = hw;
1336 clk->flags = flags;
1337 clk->parent_names = parent_names;
1338 clk->num_parents = num_parents;
1339 hw->clk = clk;
1340
1341 __clk_init(dev, clk);
1342
1343 return clk;
1344}
1345EXPORT_SYMBOL_GPL(clk_register);
1346
1347/*** clk rate change notifiers ***/
1348
1349/**
1350 * clk_notifier_register - add a clk rate change notifier
1351 * @clk: struct clk * to watch
1352 * @nb: struct notifier_block * with callback info
1353 *
1354 * Request notification when clk's rate changes. This uses an SRCU
1355 * notifier because we want it to block and notifier unregistrations are
1356 * uncommon. The callbacks associated with the notifier must not
1357 * re-enter into the clk framework by calling any top-level clk APIs;
1358 * this will cause a nested prepare_lock mutex.
1359 *
1360 * Pre-change notifier callbacks will be passed the current, pre-change
1361 * rate of the clk via struct clk_notifier_data.old_rate. The new,
1362 * post-change rate of the clk is passed via struct
1363 * clk_notifier_data.new_rate.
1364 *
1365 * Post-change notifiers will pass the now-current, post-change rate of
1366 * the clk in both struct clk_notifier_data.old_rate and struct
1367 * clk_notifier_data.new_rate.
1368 *
1369 * Abort-change notifiers are effectively the opposite of pre-change
1370 * notifiers: the original pre-change clk rate is passed in via struct
1371 * clk_notifier_data.new_rate and the failed post-change rate is passed
1372 * in via struct clk_notifier_data.old_rate.
1373 *
1374 * clk_notifier_register() must be called from non-atomic context.
1375 * Returns -EINVAL if called with null arguments, -ENOMEM upon
1376 * allocation failure; otherwise, passes along the return value of
1377 * srcu_notifier_chain_register().
1378 */
1379int clk_notifier_register(struct clk *clk, struct notifier_block *nb)
1380{
1381 struct clk_notifier *cn;
1382 int ret = -ENOMEM;
1383
1384 if (!clk || !nb)
1385 return -EINVAL;
1386
1387 mutex_lock(&prepare_lock);
1388
1389 /* search the list of notifiers for this clk */
1390 list_for_each_entry(cn, &clk_notifier_list, node)
1391 if (cn->clk == clk)
1392 break;
1393
1394 /* if clk wasn't in the notifier list, allocate new clk_notifier */
1395 if (cn->clk != clk) {
1396 cn = kzalloc(sizeof(struct clk_notifier), GFP_KERNEL);
1397 if (!cn)
1398 goto out;
1399
1400 cn->clk = clk;
1401 srcu_init_notifier_head(&cn->notifier_head);
1402
1403 list_add(&cn->node, &clk_notifier_list);
1404 }
1405
1406 ret = srcu_notifier_chain_register(&cn->notifier_head, nb);
1407
1408 clk->notifier_count++;
1409
1410out:
1411 mutex_unlock(&prepare_lock);
1412
1413 return ret;
1414}
1415EXPORT_SYMBOL_GPL(clk_notifier_register);
1416
1417/**
1418 * clk_notifier_unregister - remove a clk rate change notifier
1419 * @clk: struct clk *
1420 * @nb: struct notifier_block * with callback info
1421 *
1422 * Request no further notification for changes to 'clk' and frees memory
1423 * allocated in clk_notifier_register.
1424 *
1425 * Returns -EINVAL if called with null arguments; otherwise, passes
1426 * along the return value of srcu_notifier_chain_unregister().
1427 */
1428int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb)
1429{
1430 struct clk_notifier *cn = NULL;
1431 int ret = -EINVAL;
1432
1433 if (!clk || !nb)
1434 return -EINVAL;
1435
1436 mutex_lock(&prepare_lock);
1437
1438 list_for_each_entry(cn, &clk_notifier_list, node)
1439 if (cn->clk == clk)
1440 break;
1441
1442 if (cn->clk == clk) {
1443 ret = srcu_notifier_chain_unregister(&cn->notifier_head, nb);
1444
1445 clk->notifier_count--;
1446
1447 /* XXX the notifier code should handle this better */
1448 if (!cn->notifier_head.head) {
1449 srcu_cleanup_notifier_head(&cn->notifier_head);
1450 kfree(cn);
1451 }
1452
1453 } else {
1454 ret = -ENOENT;
1455 }
1456
1457 mutex_unlock(&prepare_lock);
1458
1459 return ret;
1460}
1461EXPORT_SYMBOL_GPL(clk_notifier_unregister);
diff --git a/include/linux/clk-private.h b/include/linux/clk-private.h
new file mode 100644
index 000000000000..5e4312b6f5cc
--- /dev/null
+++ b/include/linux/clk-private.h
@@ -0,0 +1,196 @@
1/*
2 * linux/include/linux/clk-private.h
3 *
4 * Copyright (c) 2010-2011 Jeremy Kerr <jeremy.kerr@canonical.com>
5 * Copyright (C) 2011-2012 Linaro Ltd <mturquette@linaro.org>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#ifndef __LINUX_CLK_PRIVATE_H
12#define __LINUX_CLK_PRIVATE_H
13
14#include <linux/clk-provider.h>
15#include <linux/list.h>
16
17/*
18 * WARNING: Do not include clk-private.h from any file that implements struct
19 * clk_ops. Doing so is a layering violation!
20 *
21 * This header exists only to allow for statically initialized clock data. Any
22 * static clock data must be defined in a separate file from the logic that
23 * implements the clock operations for that same data.
24 */
25
26#ifdef CONFIG_COMMON_CLK
27
28struct clk {
29 const char *name;
30 const struct clk_ops *ops;
31 struct clk_hw *hw;
32 struct clk *parent;
33 char **parent_names;
34 struct clk **parents;
35 u8 num_parents;
36 unsigned long rate;
37 unsigned long new_rate;
38 unsigned long flags;
39 unsigned int enable_count;
40 unsigned int prepare_count;
41 struct hlist_head children;
42 struct hlist_node child_node;
43 unsigned int notifier_count;
44#ifdef CONFIG_COMMON_CLK_DEBUG
45 struct dentry *dentry;
46#endif
47};
48
49/*
50 * DOC: Basic clock implementations common to many platforms
51 *
52 * Each basic clock hardware type is comprised of a structure describing the
53 * clock hardware, implementations of the relevant callbacks in struct clk_ops,
54 * unique flags for that hardware type, a registration function and an
55 * alternative macro for static initialization
56 */
57
58extern struct clk_ops clk_fixed_rate_ops;
59
60#define DEFINE_CLK_FIXED_RATE(_name, _flags, _rate, \
61 _fixed_rate_flags) \
62 static struct clk _name; \
63 static char *_name##_parent_names[] = {}; \
64 static struct clk_fixed_rate _name##_hw = { \
65 .hw = { \
66 .clk = &_name, \
67 }, \
68 .fixed_rate = _rate, \
69 .flags = _fixed_rate_flags, \
70 }; \
71 static struct clk _name = { \
72 .name = #_name, \
73 .ops = &clk_fixed_rate_ops, \
74 .hw = &_name##_hw.hw, \
75 .parent_names = _name##_parent_names, \
76 .num_parents = \
77 ARRAY_SIZE(_name##_parent_names), \
78 .flags = _flags, \
79 };
80
81extern struct clk_ops clk_gate_ops;
82
83#define DEFINE_CLK_GATE(_name, _parent_name, _parent_ptr, \
84 _flags, _reg, _bit_idx, \
85 _gate_flags, _lock) \
86 static struct clk _name; \
87 static char *_name##_parent_names[] = { \
88 _parent_name, \
89 }; \
90 static struct clk *_name##_parents[] = { \
91 _parent_ptr, \
92 }; \
93 static struct clk_gate _name##_hw = { \
94 .hw = { \
95 .clk = &_name, \
96 }, \
97 .reg = _reg, \
98 .bit_idx = _bit_idx, \
99 .flags = _gate_flags, \
100 .lock = _lock, \
101 }; \
102 static struct clk _name = { \
103 .name = #_name, \
104 .ops = &clk_gate_ops, \
105 .hw = &_name##_hw.hw, \
106 .parent_names = _name##_parent_names, \
107 .num_parents = \
108 ARRAY_SIZE(_name##_parent_names), \
109 .parents = _name##_parents, \
110 .flags = _flags, \
111 };
112
113extern struct clk_ops clk_divider_ops;
114
115#define DEFINE_CLK_DIVIDER(_name, _parent_name, _parent_ptr, \
116 _flags, _reg, _shift, _width, \
117 _divider_flags, _lock) \
118 static struct clk _name; \
119 static char *_name##_parent_names[] = { \
120 _parent_name, \
121 }; \
122 static struct clk *_name##_parents[] = { \
123 _parent_ptr, \
124 }; \
125 static struct clk_divider _name##_hw = { \
126 .hw = { \
127 .clk = &_name, \
128 }, \
129 .reg = _reg, \
130 .shift = _shift, \
131 .width = _width, \
132 .flags = _divider_flags, \
133 .lock = _lock, \
134 }; \
135 static struct clk _name = { \
136 .name = #_name, \
137 .ops = &clk_divider_ops, \
138 .hw = &_name##_hw.hw, \
139 .parent_names = _name##_parent_names, \
140 .num_parents = \
141 ARRAY_SIZE(_name##_parent_names), \
142 .parents = _name##_parents, \
143 .flags = _flags, \
144 };
145
146extern struct clk_ops clk_mux_ops;
147
148#define DEFINE_CLK_MUX(_name, _parent_names, _parents, _flags, \
149 _reg, _shift, _width, \
150 _mux_flags, _lock) \
151 static struct clk _name; \
152 static struct clk_mux _name##_hw = { \
153 .hw = { \
154 .clk = &_name, \
155 }, \
156 .reg = _reg, \
157 .shift = _shift, \
158 .width = _width, \
159 .flags = _mux_flags, \
160 .lock = _lock, \
161 }; \
162 static struct clk _name = { \
163 .name = #_name, \
164 .ops = &clk_mux_ops, \
165 .hw = &_name##_hw.hw, \
166 .parent_names = _parent_names, \
167 .num_parents = \
168 ARRAY_SIZE(_parent_names), \
169 .parents = _parents, \
170 .flags = _flags, \
171 };
172
173/**
174 * __clk_init - initialize the data structures in a struct clk
175 * @dev: device initializing this clk, placeholder for now
176 * @clk: clk being initialized
177 *
178 * Initializes the lists in struct clk, queries the hardware for the
179 * parent and rate and sets them both.
180 *
181 * Any struct clk passed into __clk_init must have the following members
182 * populated:
183 * .name
184 * .ops
185 * .hw
186 * .parent_names
187 * .num_parents
188 * .flags
189 *
190 * It is not necessary to call clk_register if __clk_init is used directly with
191 * statically initialized clock data.
192 */
193void __clk_init(struct device *dev, struct clk *clk);
194
195#endif /* CONFIG_COMMON_CLK */
196#endif /* CLK_PRIVATE_H */
diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
new file mode 100644
index 000000000000..5508897ad376
--- /dev/null
+++ b/include/linux/clk-provider.h
@@ -0,0 +1,300 @@
1/*
2 * linux/include/linux/clk-provider.h
3 *
4 * Copyright (c) 2010-2011 Jeremy Kerr <jeremy.kerr@canonical.com>
5 * Copyright (C) 2011-2012 Linaro Ltd <mturquette@linaro.org>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#ifndef __LINUX_CLK_PROVIDER_H
12#define __LINUX_CLK_PROVIDER_H
13
14#include <linux/clk.h>
15
16#ifdef CONFIG_COMMON_CLK
17
18/**
19 * struct clk_hw - handle for traversing from a struct clk to its corresponding
20 * hardware-specific structure. struct clk_hw should be declared within struct
21 * clk_foo and then referenced by the struct clk instance that uses struct
22 * clk_foo's clk_ops
23 *
24 * clk: pointer to the struct clk instance that points back to this struct
25 * clk_hw instance
26 */
27struct clk_hw {
28 struct clk *clk;
29};
30
31/*
32 * flags used across common struct clk. these flags should only affect the
33 * top-level framework. custom flags for dealing with hardware specifics
34 * belong in struct clk_foo
35 */
36#define CLK_SET_RATE_GATE BIT(0) /* must be gated across rate change */
37#define CLK_SET_PARENT_GATE BIT(1) /* must be gated across re-parent */
38#define CLK_SET_RATE_PARENT BIT(2) /* propagate rate change up one level */
39#define CLK_IGNORE_UNUSED BIT(3) /* do not gate even if unused */
40#define CLK_IS_ROOT BIT(4) /* root clk, has no parent */
41
42/**
43 * struct clk_ops - Callback operations for hardware clocks; these are to
44 * be provided by the clock implementation, and will be called by drivers
45 * through the clk_* api.
46 *
47 * @prepare: Prepare the clock for enabling. This must not return until
48 * the clock is fully prepared, and it's safe to call clk_enable.
49 * This callback is intended to allow clock implementations to
50 * do any initialisation that may sleep. Called with
51 * prepare_lock held.
52 *
53 * @unprepare: Release the clock from its prepared state. This will typically
54 * undo any work done in the @prepare callback. Called with
55 * prepare_lock held.
56 *
57 * @enable: Enable the clock atomically. This must not return until the
58 * clock is generating a valid clock signal, usable by consumer
59 * devices. Called with enable_lock held. This function must not
60 * sleep.
61 *
62 * @disable: Disable the clock atomically. Called with enable_lock held.
63 * This function must not sleep.
64 *
65 * @recalc_rate Recalculate the rate of this clock, by quering hardware. The
66 * parent rate is an input parameter. It is up to the caller to
67 * insure that the prepare_mutex is held across this call.
68 * Returns the calculated rate. Optional, but recommended - if
69 * this op is not set then clock rate will be initialized to 0.
70 *
71 * @round_rate: Given a target rate as input, returns the closest rate actually
72 * supported by the clock.
73 *
74 * @get_parent: Queries the hardware to determine the parent of a clock. The
75 * return value is a u8 which specifies the index corresponding to
76 * the parent clock. This index can be applied to either the
77 * .parent_names or .parents arrays. In short, this function
78 * translates the parent value read from hardware into an array
79 * index. Currently only called when the clock is initialized by
80 * __clk_init. This callback is mandatory for clocks with
81 * multiple parents. It is optional (and unnecessary) for clocks
82 * with 0 or 1 parents.
83 *
84 * @set_parent: Change the input source of this clock; for clocks with multiple
85 * possible parents specify a new parent by passing in the index
86 * as a u8 corresponding to the parent in either the .parent_names
87 * or .parents arrays. This function in affect translates an
88 * array index into the value programmed into the hardware.
89 * Returns 0 on success, -EERROR otherwise.
90 *
91 * @set_rate: Change the rate of this clock. If this callback returns
92 * CLK_SET_RATE_PARENT, the rate change will be propagated to the
93 * parent clock (which may propagate again if the parent clock
94 * also sets this flag). The requested rate of the parent is
95 * passed back from the callback in the second 'unsigned long *'
96 * argument. Note that it is up to the hardware clock's set_rate
97 * implementation to insure that clocks do not run out of spec
98 * when propgating the call to set_rate up to the parent. One way
99 * to do this is to gate the clock (via clk_disable and/or
100 * clk_unprepare) before calling clk_set_rate, then ungating it
101 * afterward. If your clock also has the CLK_GATE_SET_RATE flag
102 * set then this will insure safety. Returns 0 on success,
103 * -EERROR otherwise.
104 *
105 * The clk_enable/clk_disable and clk_prepare/clk_unprepare pairs allow
106 * implementations to split any work between atomic (enable) and sleepable
107 * (prepare) contexts. If enabling a clock requires code that might sleep,
108 * this must be done in clk_prepare. Clock enable code that will never be
109 * called in a sleepable context may be implement in clk_enable.
110 *
111 * Typically, drivers will call clk_prepare when a clock may be needed later
112 * (eg. when a device is opened), and clk_enable when the clock is actually
113 * required (eg. from an interrupt). Note that clk_prepare MUST have been
114 * called before clk_enable.
115 */
116struct clk_ops {
117 int (*prepare)(struct clk_hw *hw);
118 void (*unprepare)(struct clk_hw *hw);
119 int (*enable)(struct clk_hw *hw);
120 void (*disable)(struct clk_hw *hw);
121 int (*is_enabled)(struct clk_hw *hw);
122 unsigned long (*recalc_rate)(struct clk_hw *hw,
123 unsigned long parent_rate);
124 long (*round_rate)(struct clk_hw *hw, unsigned long,
125 unsigned long *);
126 int (*set_parent)(struct clk_hw *hw, u8 index);
127 u8 (*get_parent)(struct clk_hw *hw);
128 int (*set_rate)(struct clk_hw *hw, unsigned long);
129 void (*init)(struct clk_hw *hw);
130};
131
132/*
133 * DOC: Basic clock implementations common to many platforms
134 *
135 * Each basic clock hardware type is comprised of a structure describing the
136 * clock hardware, implementations of the relevant callbacks in struct clk_ops,
137 * unique flags for that hardware type, a registration function and an
138 * alternative macro for static initialization
139 */
140
141/**
142 * struct clk_fixed_rate - fixed-rate clock
143 * @hw: handle between common and hardware-specific interfaces
144 * @fixed_rate: constant frequency of clock
145 */
146struct clk_fixed_rate {
147 struct clk_hw hw;
148 unsigned long fixed_rate;
149 u8 flags;
150};
151
152struct clk *clk_register_fixed_rate(struct device *dev, const char *name,
153 const char *parent_name, unsigned long flags,
154 unsigned long fixed_rate);
155
156/**
157 * struct clk_gate - gating clock
158 *
159 * @hw: handle between common and hardware-specific interfaces
160 * @reg: register controlling gate
161 * @bit_idx: single bit controlling gate
162 * @flags: hardware-specific flags
163 * @lock: register lock
164 *
165 * Clock which can gate its output. Implements .enable & .disable
166 *
167 * Flags:
168 * CLK_GATE_SET_DISABLE - by default this clock sets the bit at bit_idx to
169 * enable the clock. Setting this flag does the opposite: setting the bit
170 * disable the clock and clearing it enables the clock
171 */
172struct clk_gate {
173 struct clk_hw hw;
174 void __iomem *reg;
175 u8 bit_idx;
176 u8 flags;
177 spinlock_t *lock;
178 char *parent[1];
179};
180
181#define CLK_GATE_SET_TO_DISABLE BIT(0)
182
183struct clk *clk_register_gate(struct device *dev, const char *name,
184 const char *parent_name, unsigned long flags,
185 void __iomem *reg, u8 bit_idx,
186 u8 clk_gate_flags, spinlock_t *lock);
187
188/**
189 * struct clk_divider - adjustable divider clock
190 *
191 * @hw: handle between common and hardware-specific interfaces
192 * @reg: register containing the divider
193 * @shift: shift to the divider bit field
194 * @width: width of the divider bit field
195 * @lock: register lock
196 *
197 * Clock with an adjustable divider affecting its output frequency. Implements
198 * .recalc_rate, .set_rate and .round_rate
199 *
200 * Flags:
201 * CLK_DIVIDER_ONE_BASED - by default the divisor is the value read from the
202 * register plus one. If CLK_DIVIDER_ONE_BASED is set then the divider is
203 * the raw value read from the register, with the value of zero considered
204 * invalid
205 * CLK_DIVIDER_POWER_OF_TWO - clock divisor is 2 raised to the value read from
206 * the hardware register
207 */
208struct clk_divider {
209 struct clk_hw hw;
210 void __iomem *reg;
211 u8 shift;
212 u8 width;
213 u8 flags;
214 spinlock_t *lock;
215 char *parent[1];
216};
217
218#define CLK_DIVIDER_ONE_BASED BIT(0)
219#define CLK_DIVIDER_POWER_OF_TWO BIT(1)
220
221struct clk *clk_register_divider(struct device *dev, const char *name,
222 const char *parent_name, unsigned long flags,
223 void __iomem *reg, u8 shift, u8 width,
224 u8 clk_divider_flags, spinlock_t *lock);
225
226/**
227 * struct clk_mux - multiplexer clock
228 *
229 * @hw: handle between common and hardware-specific interfaces
230 * @reg: register controlling multiplexer
231 * @shift: shift to multiplexer bit field
232 * @width: width of mutliplexer bit field
233 * @num_clks: number of parent clocks
234 * @lock: register lock
235 *
236 * Clock with multiple selectable parents. Implements .get_parent, .set_parent
237 * and .recalc_rate
238 *
239 * Flags:
240 * CLK_MUX_INDEX_ONE - register index starts at 1, not 0
241 * CLK_MUX_INDEX_BITWISE - register index is a single bit (power of two)
242 */
243struct clk_mux {
244 struct clk_hw hw;
245 void __iomem *reg;
246 u8 shift;
247 u8 width;
248 u8 flags;
249 spinlock_t *lock;
250};
251
252#define CLK_MUX_INDEX_ONE BIT(0)
253#define CLK_MUX_INDEX_BIT BIT(1)
254
255struct clk *clk_register_mux(struct device *dev, const char *name,
256 char **parent_names, u8 num_parents, unsigned long flags,
257 void __iomem *reg, u8 shift, u8 width,
258 u8 clk_mux_flags, spinlock_t *lock);
259
260/**
261 * clk_register - allocate a new clock, register it and return an opaque cookie
262 * @dev: device that is registering this clock
263 * @name: clock name
264 * @ops: operations this clock supports
265 * @hw: link to hardware-specific clock data
266 * @parent_names: array of string names for all possible parents
267 * @num_parents: number of possible parents
268 * @flags: framework-level hints and quirks
269 *
270 * clk_register is the primary interface for populating the clock tree with new
271 * clock nodes. It returns a pointer to the newly allocated struct clk which
272 * cannot be dereferenced by driver code but may be used in conjuction with the
273 * rest of the clock API.
274 */
275struct clk *clk_register(struct device *dev, const char *name,
276 const struct clk_ops *ops, struct clk_hw *hw,
277 char **parent_names, u8 num_parents, unsigned long flags);
278
279/* helper functions */
280const char *__clk_get_name(struct clk *clk);
281struct clk_hw *__clk_get_hw(struct clk *clk);
282u8 __clk_get_num_parents(struct clk *clk);
283struct clk *__clk_get_parent(struct clk *clk);
284inline int __clk_get_enable_count(struct clk *clk);
285inline int __clk_get_prepare_count(struct clk *clk);
286unsigned long __clk_get_rate(struct clk *clk);
287unsigned long __clk_get_flags(struct clk *clk);
288int __clk_is_enabled(struct clk *clk);
289struct clk *__clk_lookup(const char *name);
290
291/*
292 * FIXME clock api without lock protection
293 */
294int __clk_prepare(struct clk *clk);
295void __clk_unprepare(struct clk *clk);
296void __clk_reparent(struct clk *clk, struct clk *new_parent);
297unsigned long __clk_round_rate(struct clk *clk, unsigned long rate);
298
299#endif /* CONFIG_COMMON_CLK */
300#endif /* CLK_PROVIDER_H */
diff --git a/include/linux/clk.h b/include/linux/clk.h
index b9d46fa154b4..b0252726df61 100644
--- a/include/linux/clk.h
+++ b/include/linux/clk.h
@@ -3,6 +3,7 @@
3 * 3 *
4 * Copyright (C) 2004 ARM Limited. 4 * Copyright (C) 2004 ARM Limited.
5 * Written by Deep Blue Solutions Limited. 5 * Written by Deep Blue Solutions Limited.
6 * Copyright (C) 2011-2012 Linaro Ltd <mturquette@linaro.org>
6 * 7 *
7 * This program is free software; you can redistribute it and/or modify 8 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as 9 * it under the terms of the GNU General Public License version 2 as
@@ -12,18 +13,75 @@
12#define __LINUX_CLK_H 13#define __LINUX_CLK_H
13 14
14#include <linux/kernel.h> 15#include <linux/kernel.h>
16#include <linux/notifier.h>
15 17
16struct device; 18struct device;
17 19
18/* 20struct clk;
19 * The base API. 21
22#ifdef CONFIG_COMMON_CLK
23
24/**
25 * DOC: clk notifier callback types
26 *
27 * PRE_RATE_CHANGE - called immediately before the clk rate is changed,
28 * to indicate that the rate change will proceed. Drivers must
29 * immediately terminate any operations that will be affected by the
30 * rate change. Callbacks may either return NOTIFY_DONE or
31 * NOTIFY_STOP.
32 *
33 * ABORT_RATE_CHANGE: called if the rate change failed for some reason
34 * after PRE_RATE_CHANGE. In this case, all registered notifiers on
35 * the clk will be called with ABORT_RATE_CHANGE. Callbacks must
36 * always return NOTIFY_DONE.
37 *
38 * POST_RATE_CHANGE - called after the clk rate change has successfully
39 * completed. Callbacks must always return NOTIFY_DONE.
40 *
20 */ 41 */
42#define PRE_RATE_CHANGE BIT(0)
43#define POST_RATE_CHANGE BIT(1)
44#define ABORT_RATE_CHANGE BIT(2)
21 45
46/**
47 * struct clk_notifier - associate a clk with a notifier
48 * @clk: struct clk * to associate the notifier with
49 * @notifier_head: a blocking_notifier_head for this clk
50 * @node: linked list pointers
51 *
52 * A list of struct clk_notifier is maintained by the notifier code.
53 * An entry is created whenever code registers the first notifier on a
54 * particular @clk. Future notifiers on that @clk are added to the
55 * @notifier_head.
56 */
57struct clk_notifier {
58 struct clk *clk;
59 struct srcu_notifier_head notifier_head;
60 struct list_head node;
61};
22 62
23/* 63/**
24 * struct clk - an machine class defined object / cookie. 64 * struct clk_notifier_data - rate data to pass to the notifier callback
65 * @clk: struct clk * being changed
66 * @old_rate: previous rate of this clk
67 * @new_rate: new rate of this clk
68 *
69 * For a pre-notifier, old_rate is the clk's rate before this rate
70 * change, and new_rate is what the rate will be in the future. For a
71 * post-notifier, old_rate and new_rate are both set to the clk's
72 * current rate (this was done to optimize the implementation).
25 */ 73 */
26struct clk; 74struct clk_notifier_data {
75 struct clk *clk;
76 unsigned long old_rate;
77 unsigned long new_rate;
78};
79
80int clk_notifier_register(struct clk *clk, struct notifier_block *nb);
81
82int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb);
83
84#endif /* !CONFIG_COMMON_CLK */
27 85
28/** 86/**
29 * clk_get - lookup and obtain a reference to a clock producer. 87 * clk_get - lookup and obtain a reference to a clock producer.