aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/base/power
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-11-13 23:41:48 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2013-11-13 23:41:48 -0500
commitf9300eaaac1ca300083ad41937923a90cc3a2394 (patch)
tree724b72ad729a8b85c09d2d54f8ca7d8ba22d774f /drivers/base/power
parent7f2dc5c4bcbff035b0d03f7aa78a182664b21e47 (diff)
parentfaddf2f5d278f1656e9444961bdd8d9db4deb5bf (diff)
Merge tag 'pm+acpi-3.13-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm
Pull ACPI and power management updates from Rafael J Wysocki: - New power capping framework and the the Intel Running Average Power Limit (RAPL) driver using it from Srinivas Pandruvada and Jacob Pan. - Addition of the in-kernel switching feature to the arm_big_little cpufreq driver from Viresh Kumar and Nicolas Pitre. - cpufreq support for iMac G5 from Aaro Koskinen. - Baytrail processors support for intel_pstate from Dirk Brandewie. - cpufreq support for Midway/ECX-2000 from Mark Langsdorf. - ARM vexpress/TC2 cpufreq support from Sudeep KarkadaNagesha. - ACPI power management support for the I2C and SPI bus types from Mika Westerberg and Lv Zheng. - cpufreq core fixes and cleanups from Viresh Kumar, Srivatsa S Bhat, Stratos Karafotis, Xiaoguang Chen, Lan Tianyu. - cpufreq drivers updates (mostly fixes and cleanups) from Viresh Kumar, Aaro Koskinen, Jungseok Lee, Sudeep KarkadaNagesha, Lukasz Majewski, Manish Badarkhe, Hans-Christian Egtvedt, Evgeny Kapaev. - intel_pstate updates from Dirk Brandewie and Adrian Huang. - ACPICA update to version 20130927 includig fixes and cleanups and some reduction of divergences between the ACPICA code in the kernel and ACPICA upstream in order to improve the automatic ACPICA patch generation process. From Bob Moore, Lv Zheng, Tomasz Nowicki, Naresh Bhat, Bjorn Helgaas, David E Box. - ACPI IPMI driver fixes and cleanups from Lv Zheng. - ACPI hotplug fixes and cleanups from Bjorn Helgaas, Toshi Kani, Zhang Yanfei, Rafael J Wysocki. - Conversion of the ACPI AC driver to the platform bus type and multiple driver fixes and cleanups related to ACPI from Zhang Rui. - ACPI processor driver fixes and cleanups from Hanjun Guo, Jiang Liu, Bartlomiej Zolnierkiewicz, Mathieu Rhéaume, Rafael J Wysocki. - Fixes and cleanups and new blacklist entries related to the ACPI video support from Aaron Lu, Felipe Contreras, Lennart Poettering, Kirill Tkhai. - cpuidle core cleanups from Viresh Kumar and Lorenzo Pieralisi. - cpuidle drivers fixes and cleanups from Daniel Lezcano, Jingoo Han, Bartlomiej Zolnierkiewicz, Prarit Bhargava. - devfreq updates from Sachin Kamat, Dan Carpenter, Manish Badarkhe. - Operation Performance Points (OPP) core updates from Nishanth Menon. - Runtime power management core fix from Rafael J Wysocki and update from Ulf Hansson. - Hibernation fixes from Aaron Lu and Rafael J Wysocki. - Device suspend/resume lockup detection mechanism from Benoit Goby. - Removal of unused proc directories created for various ACPI drivers from Lan Tianyu. - ACPI LPSS driver fix and new device IDs for the ACPI platform scan handler from Heikki Krogerus and Jarkko Nikula. - New ACPI _OSI blacklist entry for Toshiba NB100 from Levente Kurusa. - Assorted fixes and cleanups related to ACPI from Andy Shevchenko, Al Stone, Bartlomiej Zolnierkiewicz, Colin Ian King, Dan Carpenter, Felipe Contreras, Jianguo Wu, Lan Tianyu, Yinghai Lu, Mathias Krause, Liu Chuansheng. - Assorted PM fixes and cleanups from Andy Shevchenko, Thierry Reding, Jean-Christophe Plagniol-Villard. * tag 'pm+acpi-3.13-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm: (386 commits) cpufreq: conservative: fix requested_freq reduction issue ACPI / hotplug: Consolidate deferred execution of ACPI hotplug routines PM / runtime: Use pm_runtime_put_sync() in __device_release_driver() ACPI / event: remove unneeded NULL pointer check Revert "ACPI / video: Ignore BIOS initial backlight value for HP 250 G1" ACPI / video: Quirk initial backlight level 0 ACPI / video: Fix initial level validity test intel_pstate: skip the driver if ACPI has power mgmt option PM / hibernate: Avoid overflow in hibernate_preallocate_memory() ACPI / hotplug: Do not execute "insert in progress" _OST ACPI / hotplug: Carry out PCI root eject directly ACPI / hotplug: Merge device hot-removal routines ACPI / hotplug: Make acpi_bus_hot_remove_device() internal ACPI / hotplug: Simplify device ejection routines ACPI / hotplug: Fix handle_root_bridge_removal() ACPI / hotplug: Refuse to hot-remove all objects with disabled hotplug ACPI / scan: Start matching drivers after trying scan handlers ACPI: Remove acpi_pci_slot_init() headers from internal.h ACPI / blacklist: fix name of ThinkPad Edge E530 PowerCap: Fix build error with option -Werror=format-security ... Conflicts: arch/arm/mach-omap2/opp.c drivers/Kconfig drivers/spi/spi.c
Diffstat (limited to 'drivers/base/power')
-rw-r--r--drivers/base/power/main.c73
-rw-r--r--drivers/base/power/opp.c115
-rw-r--r--drivers/base/power/runtime.c5
3 files changed, 135 insertions, 58 deletions
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 9f098a82cf04..ee039afe9078 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -30,6 +30,8 @@
30#include <linux/suspend.h> 30#include <linux/suspend.h>
31#include <trace/events/power.h> 31#include <trace/events/power.h>
32#include <linux/cpuidle.h> 32#include <linux/cpuidle.h>
33#include <linux/timer.h>
34
33#include "../base.h" 35#include "../base.h"
34#include "power.h" 36#include "power.h"
35 37
@@ -390,6 +392,71 @@ static int dpm_run_callback(pm_callback_t cb, struct device *dev,
390 return error; 392 return error;
391} 393}
392 394
395#ifdef CONFIG_DPM_WATCHDOG
396struct dpm_watchdog {
397 struct device *dev;
398 struct task_struct *tsk;
399 struct timer_list timer;
400};
401
402#define DECLARE_DPM_WATCHDOG_ON_STACK(wd) \
403 struct dpm_watchdog wd
404
405/**
406 * dpm_watchdog_handler - Driver suspend / resume watchdog handler.
407 * @data: Watchdog object address.
408 *
409 * Called when a driver has timed out suspending or resuming.
410 * There's not much we can do here to recover so panic() to
411 * capture a crash-dump in pstore.
412 */
413static void dpm_watchdog_handler(unsigned long data)
414{
415 struct dpm_watchdog *wd = (void *)data;
416
417 dev_emerg(wd->dev, "**** DPM device timeout ****\n");
418 show_stack(wd->tsk, NULL);
419 panic("%s %s: unrecoverable failure\n",
420 dev_driver_string(wd->dev), dev_name(wd->dev));
421}
422
423/**
424 * dpm_watchdog_set - Enable pm watchdog for given device.
425 * @wd: Watchdog. Must be allocated on the stack.
426 * @dev: Device to handle.
427 */
428static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev)
429{
430 struct timer_list *timer = &wd->timer;
431
432 wd->dev = dev;
433 wd->tsk = current;
434
435 init_timer_on_stack(timer);
436 /* use same timeout value for both suspend and resume */
437 timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_TIMEOUT;
438 timer->function = dpm_watchdog_handler;
439 timer->data = (unsigned long)wd;
440 add_timer(timer);
441}
442
443/**
444 * dpm_watchdog_clear - Disable suspend/resume watchdog.
445 * @wd: Watchdog to disable.
446 */
447static void dpm_watchdog_clear(struct dpm_watchdog *wd)
448{
449 struct timer_list *timer = &wd->timer;
450
451 del_timer_sync(timer);
452 destroy_timer_on_stack(timer);
453}
454#else
455#define DECLARE_DPM_WATCHDOG_ON_STACK(wd)
456#define dpm_watchdog_set(x, y)
457#define dpm_watchdog_clear(x)
458#endif
459
393/*------------------------- Resume routines -------------------------*/ 460/*------------------------- Resume routines -------------------------*/
394 461
395/** 462/**
@@ -576,6 +643,7 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
576 pm_callback_t callback = NULL; 643 pm_callback_t callback = NULL;
577 char *info = NULL; 644 char *info = NULL;
578 int error = 0; 645 int error = 0;
646 DECLARE_DPM_WATCHDOG_ON_STACK(wd);
579 647
580 TRACE_DEVICE(dev); 648 TRACE_DEVICE(dev);
581 TRACE_RESUME(0); 649 TRACE_RESUME(0);
@@ -584,6 +652,7 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
584 goto Complete; 652 goto Complete;
585 653
586 dpm_wait(dev->parent, async); 654 dpm_wait(dev->parent, async);
655 dpm_watchdog_set(&wd, dev);
587 device_lock(dev); 656 device_lock(dev);
588 657
589 /* 658 /*
@@ -642,6 +711,7 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
642 711
643 Unlock: 712 Unlock:
644 device_unlock(dev); 713 device_unlock(dev);
714 dpm_watchdog_clear(&wd);
645 715
646 Complete: 716 Complete:
647 complete_all(&dev->power.completion); 717 complete_all(&dev->power.completion);
@@ -1060,6 +1130,7 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
1060 pm_callback_t callback = NULL; 1130 pm_callback_t callback = NULL;
1061 char *info = NULL; 1131 char *info = NULL;
1062 int error = 0; 1132 int error = 0;
1133 DECLARE_DPM_WATCHDOG_ON_STACK(wd);
1063 1134
1064 dpm_wait_for_children(dev, async); 1135 dpm_wait_for_children(dev, async);
1065 1136
@@ -1083,6 +1154,7 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
1083 if (dev->power.syscore) 1154 if (dev->power.syscore)
1084 goto Complete; 1155 goto Complete;
1085 1156
1157 dpm_watchdog_set(&wd, dev);
1086 device_lock(dev); 1158 device_lock(dev);
1087 1159
1088 if (dev->pm_domain) { 1160 if (dev->pm_domain) {
@@ -1139,6 +1211,7 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
1139 } 1211 }
1140 1212
1141 device_unlock(dev); 1213 device_unlock(dev);
1214 dpm_watchdog_clear(&wd);
1142 1215
1143 Complete: 1216 Complete:
1144 complete_all(&dev->power.completion); 1217 complete_all(&dev->power.completion);
diff --git a/drivers/base/power/opp.c b/drivers/base/power/opp.c
index ef89897c6043..fa4187418440 100644
--- a/drivers/base/power/opp.c
+++ b/drivers/base/power/opp.c
@@ -21,7 +21,7 @@
21#include <linux/list.h> 21#include <linux/list.h>
22#include <linux/rculist.h> 22#include <linux/rculist.h>
23#include <linux/rcupdate.h> 23#include <linux/rcupdate.h>
24#include <linux/opp.h> 24#include <linux/pm_opp.h>
25#include <linux/of.h> 25#include <linux/of.h>
26#include <linux/export.h> 26#include <linux/export.h>
27 27
@@ -42,7 +42,7 @@
42 */ 42 */
43 43
44/** 44/**
45 * struct opp - Generic OPP description structure 45 * struct dev_pm_opp - Generic OPP description structure
46 * @node: opp list node. The nodes are maintained throughout the lifetime 46 * @node: opp list node. The nodes are maintained throughout the lifetime
47 * of boot. It is expected only an optimal set of OPPs are 47 * of boot. It is expected only an optimal set of OPPs are
48 * added to the library by the SoC framework. 48 * added to the library by the SoC framework.
@@ -59,7 +59,7 @@
59 * 59 *
60 * This structure stores the OPP information for a given device. 60 * This structure stores the OPP information for a given device.
61 */ 61 */
62struct opp { 62struct dev_pm_opp {
63 struct list_head node; 63 struct list_head node;
64 64
65 bool available; 65 bool available;
@@ -136,7 +136,7 @@ static struct device_opp *find_device_opp(struct device *dev)
136} 136}
137 137
138/** 138/**
139 * opp_get_voltage() - Gets the voltage corresponding to an available opp 139 * dev_pm_opp_get_voltage() - Gets the voltage corresponding to an available opp
140 * @opp: opp for which voltage has to be returned for 140 * @opp: opp for which voltage has to be returned for
141 * 141 *
142 * Return voltage in micro volt corresponding to the opp, else 142 * Return voltage in micro volt corresponding to the opp, else
@@ -150,9 +150,9 @@ static struct device_opp *find_device_opp(struct device *dev)
150 * prior to unlocking with rcu_read_unlock() to maintain the integrity of the 150 * prior to unlocking with rcu_read_unlock() to maintain the integrity of the
151 * pointer. 151 * pointer.
152 */ 152 */
153unsigned long opp_get_voltage(struct opp *opp) 153unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp)
154{ 154{
155 struct opp *tmp_opp; 155 struct dev_pm_opp *tmp_opp;
156 unsigned long v = 0; 156 unsigned long v = 0;
157 157
158 tmp_opp = rcu_dereference(opp); 158 tmp_opp = rcu_dereference(opp);
@@ -163,10 +163,10 @@ unsigned long opp_get_voltage(struct opp *opp)
163 163
164 return v; 164 return v;
165} 165}
166EXPORT_SYMBOL_GPL(opp_get_voltage); 166EXPORT_SYMBOL_GPL(dev_pm_opp_get_voltage);
167 167
168/** 168/**
169 * opp_get_freq() - Gets the frequency corresponding to an available opp 169 * dev_pm_opp_get_freq() - Gets the frequency corresponding to an available opp
170 * @opp: opp for which frequency has to be returned for 170 * @opp: opp for which frequency has to be returned for
171 * 171 *
172 * Return frequency in hertz corresponding to the opp, else 172 * Return frequency in hertz corresponding to the opp, else
@@ -180,9 +180,9 @@ EXPORT_SYMBOL_GPL(opp_get_voltage);
180 * prior to unlocking with rcu_read_unlock() to maintain the integrity of the 180 * prior to unlocking with rcu_read_unlock() to maintain the integrity of the
181 * pointer. 181 * pointer.
182 */ 182 */
183unsigned long opp_get_freq(struct opp *opp) 183unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp)
184{ 184{
185 struct opp *tmp_opp; 185 struct dev_pm_opp *tmp_opp;
186 unsigned long f = 0; 186 unsigned long f = 0;
187 187
188 tmp_opp = rcu_dereference(opp); 188 tmp_opp = rcu_dereference(opp);
@@ -193,10 +193,10 @@ unsigned long opp_get_freq(struct opp *opp)
193 193
194 return f; 194 return f;
195} 195}
196EXPORT_SYMBOL_GPL(opp_get_freq); 196EXPORT_SYMBOL_GPL(dev_pm_opp_get_freq);
197 197
198/** 198/**
199 * opp_get_opp_count() - Get number of opps available in the opp list 199 * dev_pm_opp_get_opp_count() - Get number of opps available in the opp list
200 * @dev: device for which we do this operation 200 * @dev: device for which we do this operation
201 * 201 *
202 * This function returns the number of available opps if there are any, 202 * This function returns the number of available opps if there are any,
@@ -206,10 +206,10 @@ EXPORT_SYMBOL_GPL(opp_get_freq);
206 * internally references two RCU protected structures: device_opp and opp which 206 * internally references two RCU protected structures: device_opp and opp which
207 * are safe as long as we are under a common RCU locked section. 207 * are safe as long as we are under a common RCU locked section.
208 */ 208 */
209int opp_get_opp_count(struct device *dev) 209int dev_pm_opp_get_opp_count(struct device *dev)
210{ 210{
211 struct device_opp *dev_opp; 211 struct device_opp *dev_opp;
212 struct opp *temp_opp; 212 struct dev_pm_opp *temp_opp;
213 int count = 0; 213 int count = 0;
214 214
215 dev_opp = find_device_opp(dev); 215 dev_opp = find_device_opp(dev);
@@ -226,10 +226,10 @@ int opp_get_opp_count(struct device *dev)
226 226
227 return count; 227 return count;
228} 228}
229EXPORT_SYMBOL_GPL(opp_get_opp_count); 229EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_count);
230 230
231/** 231/**
232 * opp_find_freq_exact() - search for an exact frequency 232 * dev_pm_opp_find_freq_exact() - search for an exact frequency
233 * @dev: device for which we do this operation 233 * @dev: device for which we do this operation
234 * @freq: frequency to search for 234 * @freq: frequency to search for
235 * @available: true/false - match for available opp 235 * @available: true/false - match for available opp
@@ -254,11 +254,12 @@ EXPORT_SYMBOL_GPL(opp_get_opp_count);
254 * under the locked area. The pointer returned must be used prior to unlocking 254 * under the locked area. The pointer returned must be used prior to unlocking
255 * with rcu_read_unlock() to maintain the integrity of the pointer. 255 * with rcu_read_unlock() to maintain the integrity of the pointer.
256 */ 256 */
257struct opp *opp_find_freq_exact(struct device *dev, unsigned long freq, 257struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
258 bool available) 258 unsigned long freq,
259 bool available)
259{ 260{
260 struct device_opp *dev_opp; 261 struct device_opp *dev_opp;
261 struct opp *temp_opp, *opp = ERR_PTR(-ERANGE); 262 struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
262 263
263 dev_opp = find_device_opp(dev); 264 dev_opp = find_device_opp(dev);
264 if (IS_ERR(dev_opp)) { 265 if (IS_ERR(dev_opp)) {
@@ -277,10 +278,10 @@ struct opp *opp_find_freq_exact(struct device *dev, unsigned long freq,
277 278
278 return opp; 279 return opp;
279} 280}
280EXPORT_SYMBOL_GPL(opp_find_freq_exact); 281EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_exact);
281 282
282/** 283/**
283 * opp_find_freq_ceil() - Search for an rounded ceil freq 284 * dev_pm_opp_find_freq_ceil() - Search for an rounded ceil freq
284 * @dev: device for which we do this operation 285 * @dev: device for which we do this operation
285 * @freq: Start frequency 286 * @freq: Start frequency
286 * 287 *
@@ -300,10 +301,11 @@ EXPORT_SYMBOL_GPL(opp_find_freq_exact);
300 * under the locked area. The pointer returned must be used prior to unlocking 301 * under the locked area. The pointer returned must be used prior to unlocking
301 * with rcu_read_unlock() to maintain the integrity of the pointer. 302 * with rcu_read_unlock() to maintain the integrity of the pointer.
302 */ 303 */
303struct opp *opp_find_freq_ceil(struct device *dev, unsigned long *freq) 304struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev,
305 unsigned long *freq)
304{ 306{
305 struct device_opp *dev_opp; 307 struct device_opp *dev_opp;
306 struct opp *temp_opp, *opp = ERR_PTR(-ERANGE); 308 struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
307 309
308 if (!dev || !freq) { 310 if (!dev || !freq) {
309 dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq); 311 dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
@@ -324,10 +326,10 @@ struct opp *opp_find_freq_ceil(struct device *dev, unsigned long *freq)
324 326
325 return opp; 327 return opp;
326} 328}
327EXPORT_SYMBOL_GPL(opp_find_freq_ceil); 329EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_ceil);
328 330
329/** 331/**
330 * opp_find_freq_floor() - Search for a rounded floor freq 332 * dev_pm_opp_find_freq_floor() - Search for a rounded floor freq
331 * @dev: device for which we do this operation 333 * @dev: device for which we do this operation
332 * @freq: Start frequency 334 * @freq: Start frequency
333 * 335 *
@@ -347,10 +349,11 @@ EXPORT_SYMBOL_GPL(opp_find_freq_ceil);
347 * under the locked area. The pointer returned must be used prior to unlocking 349 * under the locked area. The pointer returned must be used prior to unlocking
348 * with rcu_read_unlock() to maintain the integrity of the pointer. 350 * with rcu_read_unlock() to maintain the integrity of the pointer.
349 */ 351 */
350struct opp *opp_find_freq_floor(struct device *dev, unsigned long *freq) 352struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
353 unsigned long *freq)
351{ 354{
352 struct device_opp *dev_opp; 355 struct device_opp *dev_opp;
353 struct opp *temp_opp, *opp = ERR_PTR(-ERANGE); 356 struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
354 357
355 if (!dev || !freq) { 358 if (!dev || !freq) {
356 dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq); 359 dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
@@ -375,17 +378,17 @@ struct opp *opp_find_freq_floor(struct device *dev, unsigned long *freq)
375 378
376 return opp; 379 return opp;
377} 380}
378EXPORT_SYMBOL_GPL(opp_find_freq_floor); 381EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_floor);
379 382
380/** 383/**
381 * opp_add() - Add an OPP table from a table definitions 384 * dev_pm_opp_add() - Add an OPP table from a table definitions
382 * @dev: device for which we do this operation 385 * @dev: device for which we do this operation
383 * @freq: Frequency in Hz for this OPP 386 * @freq: Frequency in Hz for this OPP
384 * @u_volt: Voltage in uVolts for this OPP 387 * @u_volt: Voltage in uVolts for this OPP
385 * 388 *
386 * This function adds an opp definition to the opp list and returns status. 389 * This function adds an opp definition to the opp list and returns status.
387 * The opp is made available by default and it can be controlled using 390 * The opp is made available by default and it can be controlled using
388 * opp_enable/disable functions. 391 * dev_pm_opp_enable/disable functions.
389 * 392 *
390 * Locking: The internal device_opp and opp structures are RCU protected. 393 * Locking: The internal device_opp and opp structures are RCU protected.
391 * Hence this function internally uses RCU updater strategy with mutex locks 394 * Hence this function internally uses RCU updater strategy with mutex locks
@@ -393,14 +396,14 @@ EXPORT_SYMBOL_GPL(opp_find_freq_floor);
393 * that this function is *NOT* called under RCU protection or in contexts where 396 * that this function is *NOT* called under RCU protection or in contexts where
394 * mutex cannot be locked. 397 * mutex cannot be locked.
395 */ 398 */
396int opp_add(struct device *dev, unsigned long freq, unsigned long u_volt) 399int dev_pm_opp_add(struct device *dev, unsigned long freq, unsigned long u_volt)
397{ 400{
398 struct device_opp *dev_opp = NULL; 401 struct device_opp *dev_opp = NULL;
399 struct opp *opp, *new_opp; 402 struct dev_pm_opp *opp, *new_opp;
400 struct list_head *head; 403 struct list_head *head;
401 404
402 /* allocate new OPP node */ 405 /* allocate new OPP node */
403 new_opp = kzalloc(sizeof(struct opp), GFP_KERNEL); 406 new_opp = kzalloc(sizeof(*new_opp), GFP_KERNEL);
404 if (!new_opp) { 407 if (!new_opp) {
405 dev_warn(dev, "%s: Unable to create new OPP node\n", __func__); 408 dev_warn(dev, "%s: Unable to create new OPP node\n", __func__);
406 return -ENOMEM; 409 return -ENOMEM;
@@ -460,7 +463,7 @@ int opp_add(struct device *dev, unsigned long freq, unsigned long u_volt)
460 srcu_notifier_call_chain(&dev_opp->head, OPP_EVENT_ADD, new_opp); 463 srcu_notifier_call_chain(&dev_opp->head, OPP_EVENT_ADD, new_opp);
461 return 0; 464 return 0;
462} 465}
463EXPORT_SYMBOL_GPL(opp_add); 466EXPORT_SYMBOL_GPL(dev_pm_opp_add);
464 467
465/** 468/**
466 * opp_set_availability() - helper to set the availability of an opp 469 * opp_set_availability() - helper to set the availability of an opp
@@ -485,11 +488,11 @@ static int opp_set_availability(struct device *dev, unsigned long freq,
485 bool availability_req) 488 bool availability_req)
486{ 489{
487 struct device_opp *tmp_dev_opp, *dev_opp = ERR_PTR(-ENODEV); 490 struct device_opp *tmp_dev_opp, *dev_opp = ERR_PTR(-ENODEV);
488 struct opp *new_opp, *tmp_opp, *opp = ERR_PTR(-ENODEV); 491 struct dev_pm_opp *new_opp, *tmp_opp, *opp = ERR_PTR(-ENODEV);
489 int r = 0; 492 int r = 0;
490 493
491 /* keep the node allocated */ 494 /* keep the node allocated */
492 new_opp = kmalloc(sizeof(struct opp), GFP_KERNEL); 495 new_opp = kmalloc(sizeof(*new_opp), GFP_KERNEL);
493 if (!new_opp) { 496 if (!new_opp) {
494 dev_warn(dev, "%s: Unable to create OPP\n", __func__); 497 dev_warn(dev, "%s: Unable to create OPP\n", __func__);
495 return -ENOMEM; 498 return -ENOMEM;
@@ -552,13 +555,13 @@ unlock:
552} 555}
553 556
554/** 557/**
555 * opp_enable() - Enable a specific OPP 558 * dev_pm_opp_enable() - Enable a specific OPP
556 * @dev: device for which we do this operation 559 * @dev: device for which we do this operation
557 * @freq: OPP frequency to enable 560 * @freq: OPP frequency to enable
558 * 561 *
559 * Enables a provided opp. If the operation is valid, this returns 0, else the 562 * Enables a provided opp. If the operation is valid, this returns 0, else the
560 * corresponding error value. It is meant to be used for users an OPP available 563 * corresponding error value. It is meant to be used for users an OPP available
561 * after being temporarily made unavailable with opp_disable. 564 * after being temporarily made unavailable with dev_pm_opp_disable.
562 * 565 *
563 * Locking: The internal device_opp and opp structures are RCU protected. 566 * Locking: The internal device_opp and opp structures are RCU protected.
564 * Hence this function indirectly uses RCU and mutex locks to keep the 567 * Hence this function indirectly uses RCU and mutex locks to keep the
@@ -566,21 +569,21 @@ unlock:
566 * this function is *NOT* called under RCU protection or in contexts where 569 * this function is *NOT* called under RCU protection or in contexts where
567 * mutex locking or synchronize_rcu() blocking calls cannot be used. 570 * mutex locking or synchronize_rcu() blocking calls cannot be used.
568 */ 571 */
569int opp_enable(struct device *dev, unsigned long freq) 572int dev_pm_opp_enable(struct device *dev, unsigned long freq)
570{ 573{
571 return opp_set_availability(dev, freq, true); 574 return opp_set_availability(dev, freq, true);
572} 575}
573EXPORT_SYMBOL_GPL(opp_enable); 576EXPORT_SYMBOL_GPL(dev_pm_opp_enable);
574 577
575/** 578/**
576 * opp_disable() - Disable a specific OPP 579 * dev_pm_opp_disable() - Disable a specific OPP
577 * @dev: device for which we do this operation 580 * @dev: device for which we do this operation
578 * @freq: OPP frequency to disable 581 * @freq: OPP frequency to disable
579 * 582 *
580 * Disables a provided opp. If the operation is valid, this returns 583 * Disables a provided opp. If the operation is valid, this returns
581 * 0, else the corresponding error value. It is meant to be a temporary 584 * 0, else the corresponding error value. It is meant to be a temporary
582 * control by users to make this OPP not available until the circumstances are 585 * control by users to make this OPP not available until the circumstances are
583 * right to make it available again (with a call to opp_enable). 586 * right to make it available again (with a call to dev_pm_opp_enable).
584 * 587 *
585 * Locking: The internal device_opp and opp structures are RCU protected. 588 * Locking: The internal device_opp and opp structures are RCU protected.
586 * Hence this function indirectly uses RCU and mutex locks to keep the 589 * Hence this function indirectly uses RCU and mutex locks to keep the
@@ -588,15 +591,15 @@ EXPORT_SYMBOL_GPL(opp_enable);
588 * this function is *NOT* called under RCU protection or in contexts where 591 * this function is *NOT* called under RCU protection or in contexts where
589 * mutex locking or synchronize_rcu() blocking calls cannot be used. 592 * mutex locking or synchronize_rcu() blocking calls cannot be used.
590 */ 593 */
591int opp_disable(struct device *dev, unsigned long freq) 594int dev_pm_opp_disable(struct device *dev, unsigned long freq)
592{ 595{
593 return opp_set_availability(dev, freq, false); 596 return opp_set_availability(dev, freq, false);
594} 597}
595EXPORT_SYMBOL_GPL(opp_disable); 598EXPORT_SYMBOL_GPL(dev_pm_opp_disable);
596 599
597#ifdef CONFIG_CPU_FREQ 600#ifdef CONFIG_CPU_FREQ
598/** 601/**
599 * opp_init_cpufreq_table() - create a cpufreq table for a device 602 * dev_pm_opp_init_cpufreq_table() - create a cpufreq table for a device
600 * @dev: device for which we do this operation 603 * @dev: device for which we do this operation
601 * @table: Cpufreq table returned back to caller 604 * @table: Cpufreq table returned back to caller
602 * 605 *
@@ -619,11 +622,11 @@ EXPORT_SYMBOL_GPL(opp_disable);
619 * Callers should ensure that this function is *NOT* called under RCU protection 622 * Callers should ensure that this function is *NOT* called under RCU protection
620 * or in contexts where mutex locking cannot be used. 623 * or in contexts where mutex locking cannot be used.
621 */ 624 */
622int opp_init_cpufreq_table(struct device *dev, 625int dev_pm_opp_init_cpufreq_table(struct device *dev,
623 struct cpufreq_frequency_table **table) 626 struct cpufreq_frequency_table **table)
624{ 627{
625 struct device_opp *dev_opp; 628 struct device_opp *dev_opp;
626 struct opp *opp; 629 struct dev_pm_opp *opp;
627 struct cpufreq_frequency_table *freq_table; 630 struct cpufreq_frequency_table *freq_table;
628 int i = 0; 631 int i = 0;
629 632
@@ -639,7 +642,7 @@ int opp_init_cpufreq_table(struct device *dev,
639 } 642 }
640 643
641 freq_table = kzalloc(sizeof(struct cpufreq_frequency_table) * 644 freq_table = kzalloc(sizeof(struct cpufreq_frequency_table) *
642 (opp_get_opp_count(dev) + 1), GFP_KERNEL); 645 (dev_pm_opp_get_opp_count(dev) + 1), GFP_KERNEL);
643 if (!freq_table) { 646 if (!freq_table) {
644 mutex_unlock(&dev_opp_list_lock); 647 mutex_unlock(&dev_opp_list_lock);
645 dev_warn(dev, "%s: Unable to allocate frequency table\n", 648 dev_warn(dev, "%s: Unable to allocate frequency table\n",
@@ -663,16 +666,16 @@ int opp_init_cpufreq_table(struct device *dev,
663 666
664 return 0; 667 return 0;
665} 668}
666EXPORT_SYMBOL_GPL(opp_init_cpufreq_table); 669EXPORT_SYMBOL_GPL(dev_pm_opp_init_cpufreq_table);
667 670
668/** 671/**
669 * opp_free_cpufreq_table() - free the cpufreq table 672 * dev_pm_opp_free_cpufreq_table() - free the cpufreq table
670 * @dev: device for which we do this operation 673 * @dev: device for which we do this operation
671 * @table: table to free 674 * @table: table to free
672 * 675 *
673 * Free up the table allocated by opp_init_cpufreq_table 676 * Free up the table allocated by dev_pm_opp_init_cpufreq_table
674 */ 677 */
675void opp_free_cpufreq_table(struct device *dev, 678void dev_pm_opp_free_cpufreq_table(struct device *dev,
676 struct cpufreq_frequency_table **table) 679 struct cpufreq_frequency_table **table)
677{ 680{
678 if (!table) 681 if (!table)
@@ -681,14 +684,14 @@ void opp_free_cpufreq_table(struct device *dev,
681 kfree(*table); 684 kfree(*table);
682 *table = NULL; 685 *table = NULL;
683} 686}
684EXPORT_SYMBOL_GPL(opp_free_cpufreq_table); 687EXPORT_SYMBOL_GPL(dev_pm_opp_free_cpufreq_table);
685#endif /* CONFIG_CPU_FREQ */ 688#endif /* CONFIG_CPU_FREQ */
686 689
687/** 690/**
688 * opp_get_notifier() - find notifier_head of the device with opp 691 * dev_pm_opp_get_notifier() - find notifier_head of the device with opp
689 * @dev: device pointer used to lookup device OPPs. 692 * @dev: device pointer used to lookup device OPPs.
690 */ 693 */
691struct srcu_notifier_head *opp_get_notifier(struct device *dev) 694struct srcu_notifier_head *dev_pm_opp_get_notifier(struct device *dev)
692{ 695{
693 struct device_opp *dev_opp = find_device_opp(dev); 696 struct device_opp *dev_opp = find_device_opp(dev);
694 697
@@ -732,7 +735,7 @@ int of_init_opp_table(struct device *dev)
732 unsigned long freq = be32_to_cpup(val++) * 1000; 735 unsigned long freq = be32_to_cpup(val++) * 1000;
733 unsigned long volt = be32_to_cpup(val++); 736 unsigned long volt = be32_to_cpup(val++);
734 737
735 if (opp_add(dev, freq, volt)) { 738 if (dev_pm_opp_add(dev, freq, volt)) {
736 dev_warn(dev, "%s: Failed to add OPP %ld\n", 739 dev_warn(dev, "%s: Failed to add OPP %ld\n",
737 __func__, freq); 740 __func__, freq);
738 continue; 741 continue;
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 268a35097578..72e00e66ecc5 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -258,7 +258,8 @@ static int __rpm_callback(int (*cb)(struct device *), struct device *dev)
258 * Check if the device's runtime PM status allows it to be suspended. If 258 * Check if the device's runtime PM status allows it to be suspended. If
259 * another idle notification has been started earlier, return immediately. If 259 * another idle notification has been started earlier, return immediately. If
260 * the RPM_ASYNC flag is set then queue an idle-notification request; otherwise 260 * the RPM_ASYNC flag is set then queue an idle-notification request; otherwise
261 * run the ->runtime_idle() callback directly. 261 * run the ->runtime_idle() callback directly. If the ->runtime_idle callback
262 * doesn't exist or if it returns 0, call rpm_suspend with the RPM_AUTO flag.
262 * 263 *
263 * This function must be called under dev->power.lock with interrupts disabled. 264 * This function must be called under dev->power.lock with interrupts disabled.
264 */ 265 */
@@ -331,7 +332,7 @@ static int rpm_idle(struct device *dev, int rpmflags)
331 332
332 out: 333 out:
333 trace_rpm_return_int(dev, _THIS_IP_, retval); 334 trace_rpm_return_int(dev, _THIS_IP_, retval);
334 return retval ? retval : rpm_suspend(dev, rpmflags); 335 return retval ? retval : rpm_suspend(dev, rpmflags | RPM_AUTO);
335} 336}
336 337
337/** 338/**