aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/base/power
diff options
context:
space:
mode:
authorRafael J. Wysocki <rjw@sisk.pl>2011-08-24 15:40:56 -0400
committerRafael J. Wysocki <rjw@sisk.pl>2011-08-24 15:40:56 -0400
commitb7ab83edba2d50583bc9520431618489379718b2 (patch)
tree25b780542b1b58451345a1352d221a32a3b86e9a /drivers/base/power
parenta53e77fa57145cef6e2a63925db3d426128b2335 (diff)
PM: Use spinlock instead of mutex in clock management functions
The lock member of struct pm_clk_data is of type struct mutex, which is a problem, because the suspend and resume routines defined in drivers/base/power/clock_ops.c cannot be executed with interrupts disabled for this reason. Modify struct pm_clk_data so that its lock member is a spinlock. Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl> Acked-by: Magnus Damm <damm@opensource.se>
Diffstat (limited to 'drivers/base/power')
-rw-r--r--drivers/base/power/clock_ops.c40
1 files changed, 22 insertions, 18 deletions
diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c
index a846b2f95cfb..2c18d584066d 100644
--- a/drivers/base/power/clock_ops.c
+++ b/drivers/base/power/clock_ops.c
@@ -19,7 +19,7 @@
19 19
20struct pm_clk_data { 20struct pm_clk_data {
21 struct list_head clock_list; 21 struct list_head clock_list;
22 struct mutex lock; 22 spinlock_t lock;
23}; 23};
24 24
25enum pce_status { 25enum pce_status {
@@ -73,9 +73,9 @@ int pm_clk_add(struct device *dev, const char *con_id)
73 } 73 }
74 } 74 }
75 75
76 mutex_lock(&pcd->lock); 76 spin_lock_irq(&pcd->lock);
77 list_add_tail(&ce->node, &pcd->clock_list); 77 list_add_tail(&ce->node, &pcd->clock_list);
78 mutex_unlock(&pcd->lock); 78 spin_unlock_irq(&pcd->lock);
79 return 0; 79 return 0;
80} 80}
81 81
@@ -83,8 +83,8 @@ int pm_clk_add(struct device *dev, const char *con_id)
83 * __pm_clk_remove - Destroy PM clock entry. 83 * __pm_clk_remove - Destroy PM clock entry.
84 * @ce: PM clock entry to destroy. 84 * @ce: PM clock entry to destroy.
85 * 85 *
86 * This routine must be called under the mutex protecting the PM list of clocks 86 * This routine must be called under the spinlock protecting the PM list of
87 * corresponding the the @ce's device. 87 * clocks corresponding the the @ce's device.
88 */ 88 */
89static void __pm_clk_remove(struct pm_clock_entry *ce) 89static void __pm_clk_remove(struct pm_clock_entry *ce)
90{ 90{
@@ -123,7 +123,7 @@ void pm_clk_remove(struct device *dev, const char *con_id)
123 if (!pcd) 123 if (!pcd)
124 return; 124 return;
125 125
126 mutex_lock(&pcd->lock); 126 spin_lock_irq(&pcd->lock);
127 127
128 list_for_each_entry(ce, &pcd->clock_list, node) { 128 list_for_each_entry(ce, &pcd->clock_list, node) {
129 if (!con_id && !ce->con_id) { 129 if (!con_id && !ce->con_id) {
@@ -137,7 +137,7 @@ void pm_clk_remove(struct device *dev, const char *con_id)
137 } 137 }
138 } 138 }
139 139
140 mutex_unlock(&pcd->lock); 140 spin_unlock_irq(&pcd->lock);
141} 141}
142 142
143/** 143/**
@@ -158,7 +158,7 @@ int pm_clk_init(struct device *dev)
158 } 158 }
159 159
160 INIT_LIST_HEAD(&pcd->clock_list); 160 INIT_LIST_HEAD(&pcd->clock_list);
161 mutex_init(&pcd->lock); 161 spin_lock_init(&pcd->lock);
162 dev->power.subsys_data = pcd; 162 dev->power.subsys_data = pcd;
163 return 0; 163 return 0;
164} 164}
@@ -181,12 +181,12 @@ void pm_clk_destroy(struct device *dev)
181 181
182 dev->power.subsys_data = NULL; 182 dev->power.subsys_data = NULL;
183 183
184 mutex_lock(&pcd->lock); 184 spin_lock_irq(&pcd->lock);
185 185
186 list_for_each_entry_safe_reverse(ce, c, &pcd->clock_list, node) 186 list_for_each_entry_safe_reverse(ce, c, &pcd->clock_list, node)
187 __pm_clk_remove(ce); 187 __pm_clk_remove(ce);
188 188
189 mutex_unlock(&pcd->lock); 189 spin_unlock_irq(&pcd->lock);
190 190
191 kfree(pcd); 191 kfree(pcd);
192} 192}
@@ -220,13 +220,14 @@ int pm_clk_suspend(struct device *dev)
220{ 220{
221 struct pm_clk_data *pcd = __to_pcd(dev); 221 struct pm_clk_data *pcd = __to_pcd(dev);
222 struct pm_clock_entry *ce; 222 struct pm_clock_entry *ce;
223 unsigned long flags;
223 224
224 dev_dbg(dev, "%s()\n", __func__); 225 dev_dbg(dev, "%s()\n", __func__);
225 226
226 if (!pcd) 227 if (!pcd)
227 return 0; 228 return 0;
228 229
229 mutex_lock(&pcd->lock); 230 spin_lock_irqsave(&pcd->lock, flags);
230 231
231 list_for_each_entry_reverse(ce, &pcd->clock_list, node) { 232 list_for_each_entry_reverse(ce, &pcd->clock_list, node) {
232 if (ce->status == PCE_STATUS_NONE) 233 if (ce->status == PCE_STATUS_NONE)
@@ -238,7 +239,7 @@ int pm_clk_suspend(struct device *dev)
238 } 239 }
239 } 240 }
240 241
241 mutex_unlock(&pcd->lock); 242 spin_unlock_irqrestore(&pcd->lock, flags);
242 243
243 return 0; 244 return 0;
244} 245}
@@ -251,13 +252,14 @@ int pm_clk_resume(struct device *dev)
251{ 252{
252 struct pm_clk_data *pcd = __to_pcd(dev); 253 struct pm_clk_data *pcd = __to_pcd(dev);
253 struct pm_clock_entry *ce; 254 struct pm_clock_entry *ce;
255 unsigned long flags;
254 256
255 dev_dbg(dev, "%s()\n", __func__); 257 dev_dbg(dev, "%s()\n", __func__);
256 258
257 if (!pcd) 259 if (!pcd)
258 return 0; 260 return 0;
259 261
260 mutex_lock(&pcd->lock); 262 spin_lock_irqsave(&pcd->lock, flags);
261 263
262 list_for_each_entry(ce, &pcd->clock_list, node) { 264 list_for_each_entry(ce, &pcd->clock_list, node) {
263 if (ce->status == PCE_STATUS_NONE) 265 if (ce->status == PCE_STATUS_NONE)
@@ -269,7 +271,7 @@ int pm_clk_resume(struct device *dev)
269 } 271 }
270 } 272 }
271 273
272 mutex_unlock(&pcd->lock); 274 spin_unlock_irqrestore(&pcd->lock, flags);
273 275
274 return 0; 276 return 0;
275} 277}
@@ -344,6 +346,7 @@ int pm_clk_suspend(struct device *dev)
344{ 346{
345 struct pm_clk_data *pcd = __to_pcd(dev); 347 struct pm_clk_data *pcd = __to_pcd(dev);
346 struct pm_clock_entry *ce; 348 struct pm_clock_entry *ce;
349 unsigned long flags;
347 350
348 dev_dbg(dev, "%s()\n", __func__); 351 dev_dbg(dev, "%s()\n", __func__);
349 352
@@ -351,12 +354,12 @@ int pm_clk_suspend(struct device *dev)
351 if (!pcd || !dev->driver) 354 if (!pcd || !dev->driver)
352 return 0; 355 return 0;
353 356
354 mutex_lock(&pcd->lock); 357 spin_lock_irqsave(&pcd->lock, flags);
355 358
356 list_for_each_entry_reverse(ce, &pcd->clock_list, node) 359 list_for_each_entry_reverse(ce, &pcd->clock_list, node)
357 clk_disable(ce->clk); 360 clk_disable(ce->clk);
358 361
359 mutex_unlock(&pcd->lock); 362 spin_unlock_irqrestore(&pcd->lock, flags);
360 363
361 return 0; 364 return 0;
362} 365}
@@ -369,6 +372,7 @@ int pm_clk_resume(struct device *dev)
369{ 372{
370 struct pm_clk_data *pcd = __to_pcd(dev); 373 struct pm_clk_data *pcd = __to_pcd(dev);
371 struct pm_clock_entry *ce; 374 struct pm_clock_entry *ce;
375 unsigned long flags;
372 376
373 dev_dbg(dev, "%s()\n", __func__); 377 dev_dbg(dev, "%s()\n", __func__);
374 378
@@ -376,12 +380,12 @@ int pm_clk_resume(struct device *dev)
376 if (!pcd || !dev->driver) 380 if (!pcd || !dev->driver)
377 return 0; 381 return 0;
378 382
379 mutex_lock(&pcd->lock); 383 spin_lock_irqsave(&pcd->lock, flags);
380 384
381 list_for_each_entry(ce, &pcd->clock_list, node) 385 list_for_each_entry(ce, &pcd->clock_list, node)
382 clk_enable(ce->clk); 386 clk_enable(ce->clk);
383 387
384 mutex_unlock(&pcd->lock); 388 spin_unlock_irqrestore(&pcd->lock, flags);
385 389
386 return 0; 390 return 0;
387} 391}