aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/plat-omap/clock.c
diff options
context:
space:
mode:
authorPaul Walmsley <paul@pwsan.com>2011-02-16 17:38:38 -0500
committerPaul Walmsley <paul@pwsan.com>2011-03-07 22:19:39 -0500
commite07f469d284ca3d1f5dcf5438c22982be98bc071 (patch)
tree6ad643524090dadaa0e14a9dcf64f4efa6576f78 /arch/arm/plat-omap/clock.c
parent19c1c0ce9ddc45fe8f84b6cf12ba9dbecd7b1aa1 (diff)
OMAP: clock: bail out early if arch_clock functions not implemented
Bail out before we take the clockfw_lock spinlock if the corresponding OMAP1 or OMAP2+ clock function is not defined. The intention is to reduce and simplify the work that is done inside the spinlock. Signed-off-by: Paul Walmsley <paul@pwsan.com>
Diffstat (limited to 'arch/arm/plat-omap/clock.c')
-rw-r--r--arch/arm/plat-omap/clock.c66
1 files changed, 38 insertions, 28 deletions
diff --git a/arch/arm/plat-omap/clock.c b/arch/arm/plat-omap/clock.c
index 2770dddd72c0..c9122dd6ee8d 100644
--- a/arch/arm/plat-omap/clock.c
+++ b/arch/arm/plat-omap/clock.c
@@ -37,14 +37,16 @@ static struct clk_functions *arch_clock;
37int clk_enable(struct clk *clk) 37int clk_enable(struct clk *clk)
38{ 38{
39 unsigned long flags; 39 unsigned long flags;
40 int ret = 0; 40 int ret;
41 41
42 if (clk == NULL || IS_ERR(clk)) 42 if (clk == NULL || IS_ERR(clk))
43 return -EINVAL; 43 return -EINVAL;
44 44
45 if (!arch_clock || !arch_clock->clk_enable)
46 return -EINVAL;
47
45 spin_lock_irqsave(&clockfw_lock, flags); 48 spin_lock_irqsave(&clockfw_lock, flags);
46 if (arch_clock->clk_enable) 49 ret = arch_clock->clk_enable(clk);
47 ret = arch_clock->clk_enable(clk);
48 spin_unlock_irqrestore(&clockfw_lock, flags); 50 spin_unlock_irqrestore(&clockfw_lock, flags);
49 51
50 return ret; 52 return ret;
@@ -58,6 +60,9 @@ void clk_disable(struct clk *clk)
58 if (clk == NULL || IS_ERR(clk)) 60 if (clk == NULL || IS_ERR(clk))
59 return; 61 return;
60 62
63 if (!arch_clock || !arch_clock->clk_disable)
64 return;
65
61 spin_lock_irqsave(&clockfw_lock, flags); 66 spin_lock_irqsave(&clockfw_lock, flags);
62 if (clk->usecount == 0) { 67 if (clk->usecount == 0) {
63 pr_err("Trying disable clock %s with 0 usecount\n", 68 pr_err("Trying disable clock %s with 0 usecount\n",
@@ -66,8 +71,7 @@ void clk_disable(struct clk *clk)
66 goto out; 71 goto out;
67 } 72 }
68 73
69 if (arch_clock->clk_disable) 74 arch_clock->clk_disable(clk);
70 arch_clock->clk_disable(clk);
71 75
72out: 76out:
73 spin_unlock_irqrestore(&clockfw_lock, flags); 77 spin_unlock_irqrestore(&clockfw_lock, flags);
@@ -77,7 +81,7 @@ EXPORT_SYMBOL(clk_disable);
77unsigned long clk_get_rate(struct clk *clk) 81unsigned long clk_get_rate(struct clk *clk)
78{ 82{
79 unsigned long flags; 83 unsigned long flags;
80 unsigned long ret = 0; 84 unsigned long ret;
81 85
82 if (clk == NULL || IS_ERR(clk)) 86 if (clk == NULL || IS_ERR(clk))
83 return 0; 87 return 0;
@@ -97,14 +101,16 @@ EXPORT_SYMBOL(clk_get_rate);
97long clk_round_rate(struct clk *clk, unsigned long rate) 101long clk_round_rate(struct clk *clk, unsigned long rate)
98{ 102{
99 unsigned long flags; 103 unsigned long flags;
100 long ret = 0; 104 long ret;
101 105
102 if (clk == NULL || IS_ERR(clk)) 106 if (clk == NULL || IS_ERR(clk))
103 return ret; 107 return 0;
108
109 if (!arch_clock || !arch_clock->clk_round_rate)
110 return 0;
104 111
105 spin_lock_irqsave(&clockfw_lock, flags); 112 spin_lock_irqsave(&clockfw_lock, flags);
106 if (arch_clock->clk_round_rate) 113 ret = arch_clock->clk_round_rate(clk, rate);
107 ret = arch_clock->clk_round_rate(clk, rate);
108 spin_unlock_irqrestore(&clockfw_lock, flags); 114 spin_unlock_irqrestore(&clockfw_lock, flags);
109 115
110 return ret; 116 return ret;
@@ -119,14 +125,13 @@ int clk_set_rate(struct clk *clk, unsigned long rate)
119 if (clk == NULL || IS_ERR(clk)) 125 if (clk == NULL || IS_ERR(clk))
120 return ret; 126 return ret;
121 127
128 if (!arch_clock || !arch_clock->clk_set_rate)
129 return ret;
130
122 spin_lock_irqsave(&clockfw_lock, flags); 131 spin_lock_irqsave(&clockfw_lock, flags);
123 if (arch_clock->clk_set_rate) 132 ret = arch_clock->clk_set_rate(clk, rate);
124 ret = arch_clock->clk_set_rate(clk, rate); 133 if (ret == 0)
125 if (ret == 0) {
126 if (clk->recalc)
127 clk->rate = clk->recalc(clk);
128 propagate_rate(clk); 134 propagate_rate(clk);
129 }
130 spin_unlock_irqrestore(&clockfw_lock, flags); 135 spin_unlock_irqrestore(&clockfw_lock, flags);
131 136
132 return ret; 137 return ret;
@@ -141,15 +146,14 @@ int clk_set_parent(struct clk *clk, struct clk *parent)
141 if (clk == NULL || IS_ERR(clk) || parent == NULL || IS_ERR(parent)) 146 if (clk == NULL || IS_ERR(clk) || parent == NULL || IS_ERR(parent))
142 return ret; 147 return ret;
143 148
149 if (!arch_clock || !arch_clock->clk_set_parent)
150 return ret;
151
144 spin_lock_irqsave(&clockfw_lock, flags); 152 spin_lock_irqsave(&clockfw_lock, flags);
145 if (clk->usecount == 0) { 153 if (clk->usecount == 0) {
146 if (arch_clock->clk_set_parent) 154 ret = arch_clock->clk_set_parent(clk, parent);
147 ret = arch_clock->clk_set_parent(clk, parent); 155 if (ret == 0)
148 if (ret == 0) {
149 if (clk->recalc)
150 clk->rate = clk->recalc(clk);
151 propagate_rate(clk); 156 propagate_rate(clk);
152 }
153 } else 157 } else
154 ret = -EBUSY; 158 ret = -EBUSY;
155 spin_unlock_irqrestore(&clockfw_lock, flags); 159 spin_unlock_irqrestore(&clockfw_lock, flags);
@@ -399,9 +403,11 @@ void clk_init_cpufreq_table(struct cpufreq_frequency_table **table)
399{ 403{
400 unsigned long flags; 404 unsigned long flags;
401 405
406 if (!arch_clock || !arch_clock->clk_init_cpufreq_table)
407 return;
408
402 spin_lock_irqsave(&clockfw_lock, flags); 409 spin_lock_irqsave(&clockfw_lock, flags);
403 if (arch_clock->clk_init_cpufreq_table) 410 arch_clock->clk_init_cpufreq_table(table);
404 arch_clock->clk_init_cpufreq_table(table);
405 spin_unlock_irqrestore(&clockfw_lock, flags); 411 spin_unlock_irqrestore(&clockfw_lock, flags);
406} 412}
407 413
@@ -409,9 +415,11 @@ void clk_exit_cpufreq_table(struct cpufreq_frequency_table **table)
409{ 415{
410 unsigned long flags; 416 unsigned long flags;
411 417
418 if (!arch_clock || !arch_clock->clk_exit_cpufreq_table)
419 return;
420
412 spin_lock_irqsave(&clockfw_lock, flags); 421 spin_lock_irqsave(&clockfw_lock, flags);
413 if (arch_clock->clk_exit_cpufreq_table) 422 arch_clock->clk_exit_cpufreq_table(table);
414 arch_clock->clk_exit_cpufreq_table(table);
415 spin_unlock_irqrestore(&clockfw_lock, flags); 423 spin_unlock_irqrestore(&clockfw_lock, flags);
416} 424}
417#endif 425#endif
@@ -429,6 +437,9 @@ static int __init clk_disable_unused(void)
429 struct clk *ck; 437 struct clk *ck;
430 unsigned long flags; 438 unsigned long flags;
431 439
440 if (!arch_clock || !arch_clock->clk_disable_unused)
441 return 0;
442
432 pr_info("clock: disabling unused clocks to save power\n"); 443 pr_info("clock: disabling unused clocks to save power\n");
433 list_for_each_entry(ck, &clocks, node) { 444 list_for_each_entry(ck, &clocks, node) {
434 if (ck->ops == &clkops_null) 445 if (ck->ops == &clkops_null)
@@ -438,8 +449,7 @@ static int __init clk_disable_unused(void)
438 continue; 449 continue;
439 450
440 spin_lock_irqsave(&clockfw_lock, flags); 451 spin_lock_irqsave(&clockfw_lock, flags);
441 if (arch_clock->clk_disable_unused) 452 arch_clock->clk_disable_unused(ck);
442 arch_clock->clk_disable_unused(ck);
443 spin_unlock_irqrestore(&clockfw_lock, flags); 453 spin_unlock_irqrestore(&clockfw_lock, flags);
444 } 454 }
445 455