aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sh/kernel/cpu/clock.c
diff options
context:
space:
mode:
authorMagnus Damm <damm@igel.co.jp>2009-05-08 04:23:29 -0400
committerPaul Mundt <lethal@linux-sh.org>2009-05-08 04:46:22 -0400
commit4f5ecaa05493dfddf155b40224b951592bfce325 (patch)
tree8f37268849bc0fa6599e35e39cf01b01961ccb51 /arch/sh/kernel/cpu/clock.c
parent7d170b1bc540a1d83098a9f27cf4939e026fda81 (diff)
sh: clock framework update, fix count and kill off kref
This patch updates the clock framework use count code. With this patch the enable() and disable() callbacks only get called when counting from and to zero. While at it the kref stuff gets replaced with an int. Signed-off-by: Magnus Damm <damm@igel.co.jp> Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'arch/sh/kernel/cpu/clock.c')
-rw-r--r--arch/sh/kernel/cpu/clock.c69
1 files changed, 37 insertions, 32 deletions
diff --git a/arch/sh/kernel/cpu/clock.c b/arch/sh/kernel/cpu/clock.c
index 099373ae57d8..133dbe403341 100644
--- a/arch/sh/kernel/cpu/clock.c
+++ b/arch/sh/kernel/cpu/clock.c
@@ -19,7 +19,6 @@
19#include <linux/module.h> 19#include <linux/module.h>
20#include <linux/mutex.h> 20#include <linux/mutex.h>
21#include <linux/list.h> 21#include <linux/list.h>
22#include <linux/kref.h>
23#include <linux/kobject.h> 22#include <linux/kobject.h>
24#include <linux/sysdev.h> 23#include <linux/sysdev.h>
25#include <linux/seq_file.h> 24#include <linux/seq_file.h>
@@ -90,7 +89,7 @@ static void propagate_rate(struct clk *clk)
90 } 89 }
91} 90}
92 91
93static int __clk_enable(struct clk *clk) 92static void __clk_init(struct clk *clk)
94{ 93{
95 /* 94 /*
96 * See if this is the first time we're enabling the clock, some 95 * See if this is the first time we're enabling the clock, some
@@ -100,19 +99,33 @@ static int __clk_enable(struct clk *clk)
100 * divisors to use before it can effectively recalc. 99 * divisors to use before it can effectively recalc.
101 */ 100 */
102 101
103 if (clk->flags & CLK_ALWAYS_ENABLED) { 102 if (clk->flags & CLK_NEEDS_INIT) {
104 kref_get(&clk->kref);
105 return 0;
106 }
107
108 if (unlikely(atomic_read(&clk->kref.refcount) == 1))
109 if (clk->ops && clk->ops->init) 103 if (clk->ops && clk->ops->init)
110 clk->ops->init(clk); 104 clk->ops->init(clk);
111 105
112 kref_get(&clk->kref); 106 clk->flags &= ~CLK_NEEDS_INIT;
107 }
108}
109
110static int __clk_enable(struct clk *clk)
111{
112 if (!clk)
113 return -EINVAL;
114
115 clk->usecount++;
116
117 /* nothing to do if always enabled */
118 if (clk->flags & CLK_ALWAYS_ENABLED)
119 return 0;
120
121 if (clk->usecount == 1) {
122 __clk_init(clk);
113 123
114 if (likely(clk->ops && clk->ops->enable)) 124 __clk_enable(clk->parent);
115 clk->ops->enable(clk); 125
126 if (clk->ops && clk->ops->enable)
127 clk->ops->enable(clk);
128 }
116 129
117 return 0; 130 return 0;
118} 131}
@@ -122,11 +135,6 @@ int clk_enable(struct clk *clk)
122 unsigned long flags; 135 unsigned long flags;
123 int ret; 136 int ret;
124 137
125 if (!clk)
126 return -EINVAL;
127
128 clk_enable(clk->parent);
129
130 spin_lock_irqsave(&clock_lock, flags); 138 spin_lock_irqsave(&clock_lock, flags);
131 ret = __clk_enable(clk); 139 ret = __clk_enable(clk);
132 spin_unlock_irqrestore(&clock_lock, flags); 140 spin_unlock_irqrestore(&clock_lock, flags);
@@ -135,21 +143,23 @@ int clk_enable(struct clk *clk)
135} 143}
136EXPORT_SYMBOL_GPL(clk_enable); 144EXPORT_SYMBOL_GPL(clk_enable);
137 145
138static void clk_kref_release(struct kref *kref)
139{
140 /* Nothing to do */
141}
142
143static void __clk_disable(struct clk *clk) 146static void __clk_disable(struct clk *clk)
144{ 147{
145 int count = kref_put(&clk->kref, clk_kref_release); 148 if (!clk)
149 return;
150
151 clk->usecount--;
152
153 WARN_ON(clk->usecount < 0);
146 154
147 if (clk->flags & CLK_ALWAYS_ENABLED) 155 if (clk->flags & CLK_ALWAYS_ENABLED)
148 return; 156 return;
149 157
150 if (!count) { /* count reaches zero, disable the clock */ 158 if (clk->usecount == 0) {
151 if (likely(clk->ops && clk->ops->disable)) 159 if (likely(clk->ops && clk->ops->disable))
152 clk->ops->disable(clk); 160 clk->ops->disable(clk);
161
162 __clk_disable(clk->parent);
153 } 163 }
154} 164}
155 165
@@ -157,14 +167,9 @@ void clk_disable(struct clk *clk)
157{ 167{
158 unsigned long flags; 168 unsigned long flags;
159 169
160 if (!clk)
161 return;
162
163 spin_lock_irqsave(&clock_lock, flags); 170 spin_lock_irqsave(&clock_lock, flags);
164 __clk_disable(clk); 171 __clk_disable(clk);
165 spin_unlock_irqrestore(&clock_lock, flags); 172 spin_unlock_irqrestore(&clock_lock, flags);
166
167 clk_disable(clk->parent);
168} 173}
169EXPORT_SYMBOL_GPL(clk_disable); 174EXPORT_SYMBOL_GPL(clk_disable);
170 175
@@ -173,14 +178,14 @@ int clk_register(struct clk *clk)
173 mutex_lock(&clock_list_sem); 178 mutex_lock(&clock_list_sem);
174 179
175 list_add(&clk->node, &clock_list); 180 list_add(&clk->node, &clock_list);
176 kref_init(&clk->kref); 181 clk->usecount = 0;
182 clk->flags |= CLK_NEEDS_INIT;
177 183
178 mutex_unlock(&clock_list_sem); 184 mutex_unlock(&clock_list_sem);
179 185
180 if (clk->flags & CLK_ALWAYS_ENABLED) { 186 if (clk->flags & CLK_ALWAYS_ENABLED) {
187 __clk_init(clk);
181 pr_debug( "Clock '%s' is ALWAYS_ENABLED\n", clk->name); 188 pr_debug( "Clock '%s' is ALWAYS_ENABLED\n", clk->name);
182 if (clk->ops && clk->ops->init)
183 clk->ops->init(clk);
184 if (clk->ops && clk->ops->enable) 189 if (clk->ops && clk->ops->enable)
185 clk->ops->enable(clk); 190 clk->ops->enable(clk);
186 pr_debug( "Enabled."); 191 pr_debug( "Enabled.");
@@ -356,7 +361,7 @@ static int show_clocks(char *buf, char **start, off_t off,
356 p += sprintf(p, "%-12s\t: %ld.%02ldMHz\t%s\n", clk->name, 361 p += sprintf(p, "%-12s\t: %ld.%02ldMHz\t%s\n", clk->name,
357 rate / 1000000, (rate % 1000000) / 10000, 362 rate / 1000000, (rate % 1000000) / 10000,
358 ((clk->flags & CLK_ALWAYS_ENABLED) || 363 ((clk->flags & CLK_ALWAYS_ENABLED) ||
359 (atomic_read(&clk->kref.refcount) != 1)) ? 364 clk->usecount > 0) ?
360 "enabled" : "disabled"); 365 "enabled" : "disabled");
361 } 366 }
362 367