diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2013-05-16 18:12:34 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-05-16 18:12:34 -0400 |
commit | d5fe85af85367d5892e4308f596de4e2a5fb9017 (patch) | |
tree | 8f31a641a0e24c3ecb38dc1396665149ac4b4746 | |
parent | 896821657479905b95d5193595b81679155ce199 (diff) | |
parent | 49a9e4315d40e1ba1d3258ea33f3948254038455 (diff) |
Merge tag 'pm+acpi-3.10-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm
Pull power management and ACPI fixes from Rafael Wysocki:
- intel_pstate driver fixes and cleanups from Dirk Brandewie and Wei
Yongjun.
- cpufreq fixes related to ARM big.LITTLE support and the cpufreq-cpu0
driver from Viresh Kumar.
- Assorted cpufreq fixes from Srivatsa S Bhat, Borislav Petkov, Wolfram
Sang, Alexander Shiyan, and Nishanth Menon.
- Assorted ACPI fixes from Catalin Marinas, Lan Tianyu, Alex Hung,
Jan-Simon Möller, and Rafael J Wysocki.
- Fix for a kfree() under spinlock in the PM core from Shuah Khan.
- PM documentation updates from Borislav Petkov and Zhang Rui.
* tag 'pm+acpi-3.10-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm: (30 commits)
cpufreq: Preserve sysfs files across suspend/resume
ACPI / scan: Fix memory leak on acpi_scan_init_hotplug() error path
PM / hibernate: Correct documentation
PM / Documentation: remove inaccurate suspend/hibernate transition lantency statement
PM: Documentation update for freeze state
cpufreq / intel_pstate: use vzalloc() instead of vmalloc()/memset(0)
cpufreq, ondemand: Remove leftover debug line
PM: Avoid calling kfree() under spinlock in dev_pm_put_subsys_data()
cpufreq / kirkwood: don't check resource with devm_ioremap_resource
cpufreq / intel_pstate: remove #ifdef MODULE compile fence
cpufreq / intel_pstate: Remove idle mode PID
cpufreq / intel_pstate: fix ffmpeg regression
cpufreq / intel_pstate: use lowest requested max performance
cpufreq / intel_pstate: remove idle time and duration from sample and calculations
cpufreq: Fix incorrect dependecies for ARM SA11xx drivers
cpufreq: ARM big LITTLE: Fix Kconfig entries
cpufreq: cpufreq-cpu0: Free parent node for error cases
cpufreq: cpufreq-cpu0: defer probe when regulator is not ready
cpufreq: Issue CPUFREQ_GOV_POLICY_EXIT notifier before dropping policy refcount
cpufreq: governors: Fix CPUFREQ_GOV_POLICY_{INIT|EXIT} notifiers
...
26 files changed, 190 insertions, 195 deletions
diff --git a/Documentation/power/devices.txt b/Documentation/power/devices.txt index 504dfe4d52eb..a66c9821b5ce 100644 --- a/Documentation/power/devices.txt +++ b/Documentation/power/devices.txt | |||
@@ -268,7 +268,7 @@ situations. | |||
268 | System Power Management Phases | 268 | System Power Management Phases |
269 | ------------------------------ | 269 | ------------------------------ |
270 | Suspending or resuming the system is done in several phases. Different phases | 270 | Suspending or resuming the system is done in several phases. Different phases |
271 | are used for standby or memory sleep states ("suspend-to-RAM") and the | 271 | are used for freeze, standby, and memory sleep states ("suspend-to-RAM") and the |
272 | hibernation state ("suspend-to-disk"). Each phase involves executing callbacks | 272 | hibernation state ("suspend-to-disk"). Each phase involves executing callbacks |
273 | for every device before the next phase begins. Not all busses or classes | 273 | for every device before the next phase begins. Not all busses or classes |
274 | support all these callbacks and not all drivers use all the callbacks. The | 274 | support all these callbacks and not all drivers use all the callbacks. The |
@@ -309,7 +309,8 @@ execute the corresponding method from dev->driver->pm instead if there is one. | |||
309 | 309 | ||
310 | Entering System Suspend | 310 | Entering System Suspend |
311 | ----------------------- | 311 | ----------------------- |
312 | When the system goes into the standby or memory sleep state, the phases are: | 312 | When the system goes into the freeze, standby or memory sleep state, |
313 | the phases are: | ||
313 | 314 | ||
314 | prepare, suspend, suspend_late, suspend_noirq. | 315 | prepare, suspend, suspend_late, suspend_noirq. |
315 | 316 | ||
@@ -368,7 +369,7 @@ the devices that were suspended. | |||
368 | 369 | ||
369 | Leaving System Suspend | 370 | Leaving System Suspend |
370 | ---------------------- | 371 | ---------------------- |
371 | When resuming from standby or memory sleep, the phases are: | 372 | When resuming from freeze, standby or memory sleep, the phases are: |
372 | 373 | ||
373 | resume_noirq, resume_early, resume, complete. | 374 | resume_noirq, resume_early, resume, complete. |
374 | 375 | ||
@@ -433,8 +434,8 @@ the system log. | |||
433 | 434 | ||
434 | Entering Hibernation | 435 | Entering Hibernation |
435 | -------------------- | 436 | -------------------- |
436 | Hibernating the system is more complicated than putting it into the standby or | 437 | Hibernating the system is more complicated than putting it into the other |
437 | memory sleep state, because it involves creating and saving a system image. | 438 | sleep states, because it involves creating and saving a system image. |
438 | Therefore there are more phases for hibernation, with a different set of | 439 | Therefore there are more phases for hibernation, with a different set of |
439 | callbacks. These phases always run after tasks have been frozen and memory has | 440 | callbacks. These phases always run after tasks have been frozen and memory has |
440 | been freed. | 441 | been freed. |
@@ -485,8 +486,8 @@ image forms an atomic snapshot of the system state. | |||
485 | 486 | ||
486 | At this point the system image is saved, and the devices then need to be | 487 | At this point the system image is saved, and the devices then need to be |
487 | prepared for the upcoming system shutdown. This is much like suspending them | 488 | prepared for the upcoming system shutdown. This is much like suspending them |
488 | before putting the system into the standby or memory sleep state, and the phases | 489 | before putting the system into the freeze, standby or memory sleep state, |
489 | are similar. | 490 | and the phases are similar. |
490 | 491 | ||
491 | 9. The prepare phase is discussed above. | 492 | 9. The prepare phase is discussed above. |
492 | 493 | ||
diff --git a/Documentation/power/interface.txt b/Documentation/power/interface.txt index c537834af005..f1f0f59a7c47 100644 --- a/Documentation/power/interface.txt +++ b/Documentation/power/interface.txt | |||
@@ -7,8 +7,8 @@ running. The interface exists in /sys/power/ directory (assuming sysfs | |||
7 | is mounted at /sys). | 7 | is mounted at /sys). |
8 | 8 | ||
9 | /sys/power/state controls system power state. Reading from this file | 9 | /sys/power/state controls system power state. Reading from this file |
10 | returns what states are supported, which is hard-coded to 'standby' | 10 | returns what states are supported, which is hard-coded to 'freeze', |
11 | (Power-On Suspend), 'mem' (Suspend-to-RAM), and 'disk' | 11 | 'standby' (Power-On Suspend), 'mem' (Suspend-to-RAM), and 'disk' |
12 | (Suspend-to-Disk). | 12 | (Suspend-to-Disk). |
13 | 13 | ||
14 | Writing to this file one of those strings causes the system to | 14 | Writing to this file one of those strings causes the system to |
diff --git a/Documentation/power/notifiers.txt b/Documentation/power/notifiers.txt index c2a4a346c0d9..a81fa254303d 100644 --- a/Documentation/power/notifiers.txt +++ b/Documentation/power/notifiers.txt | |||
@@ -15,8 +15,10 @@ A suspend/hibernation notifier may be used for this purpose. | |||
15 | The subsystems or drivers having such needs can register suspend notifiers that | 15 | The subsystems or drivers having such needs can register suspend notifiers that |
16 | will be called upon the following events by the PM core: | 16 | will be called upon the following events by the PM core: |
17 | 17 | ||
18 | PM_HIBERNATION_PREPARE The system is going to hibernate or suspend, tasks will | 18 | PM_HIBERNATION_PREPARE The system is going to hibernate, tasks will be frozen |
19 | be frozen immediately. | 19 | immediately. This is different from PM_SUSPEND_PREPARE |
20 | below because here we do additional work between notifiers | ||
21 | and drivers freezing. | ||
20 | 22 | ||
21 | PM_POST_HIBERNATION The system memory state has been restored from a | 23 | PM_POST_HIBERNATION The system memory state has been restored from a |
22 | hibernation image or an error occurred during | 24 | hibernation image or an error occurred during |
diff --git a/Documentation/power/states.txt b/Documentation/power/states.txt index 4416b28630df..442d43df9b25 100644 --- a/Documentation/power/states.txt +++ b/Documentation/power/states.txt | |||
@@ -2,12 +2,26 @@ | |||
2 | System Power Management States | 2 | System Power Management States |
3 | 3 | ||
4 | 4 | ||
5 | The kernel supports three power management states generically, though | 5 | The kernel supports four power management states generically, though |
6 | each is dependent on platform support code to implement the low-level | 6 | one is generic and the other three are dependent on platform support |
7 | details for each state. This file describes each state, what they are | 7 | code to implement the low-level details for each state. |
8 | This file describes each state, what they are | ||
8 | commonly called, what ACPI state they map to, and what string to write | 9 | commonly called, what ACPI state they map to, and what string to write |
9 | to /sys/power/state to enter that state | 10 | to /sys/power/state to enter that state |
10 | 11 | ||
12 | state: Freeze / Low-Power Idle | ||
13 | ACPI state: S0 | ||
14 | String: "freeze" | ||
15 | |||
16 | This state is a generic, pure software, light-weight, low-power state. | ||
17 | It allows more energy to be saved relative to idle by freezing user | ||
18 | space and putting all I/O devices into low-power states (possibly | ||
19 | lower-power than available at run time), such that the processors can | ||
20 | spend more time in their idle states. | ||
21 | This state can be used for platforms without Standby/Suspend-to-RAM | ||
22 | support, or it can be used in addition to Suspend-to-RAM (memory sleep) | ||
23 | to provide reduced resume latency. | ||
24 | |||
11 | 25 | ||
12 | State: Standby / Power-On Suspend | 26 | State: Standby / Power-On Suspend |
13 | ACPI State: S1 | 27 | ACPI State: S1 |
@@ -22,9 +36,6 @@ We try to put devices in a low-power state equivalent to D1, which | |||
22 | also offers low power savings, but low resume latency. Not all devices | 36 | also offers low power savings, but low resume latency. Not all devices |
23 | support D1, and those that don't are left on. | 37 | support D1, and those that don't are left on. |
24 | 38 | ||
25 | A transition from Standby to the On state should take about 1-2 | ||
26 | seconds. | ||
27 | |||
28 | 39 | ||
29 | State: Suspend-to-RAM | 40 | State: Suspend-to-RAM |
30 | ACPI State: S3 | 41 | ACPI State: S3 |
@@ -42,9 +53,6 @@ transition back to the On state. | |||
42 | For at least ACPI, STR requires some minimal boot-strapping code to | 53 | For at least ACPI, STR requires some minimal boot-strapping code to |
43 | resume the system from STR. This may be true on other platforms. | 54 | resume the system from STR. This may be true on other platforms. |
44 | 55 | ||
45 | A transition from Suspend-to-RAM to the On state should take about | ||
46 | 3-5 seconds. | ||
47 | |||
48 | 56 | ||
49 | State: Suspend-to-disk | 57 | State: Suspend-to-disk |
50 | ACPI State: S4 | 58 | ACPI State: S4 |
@@ -74,7 +82,3 @@ low-power state (like ACPI S4), or it may simply power down. Powering | |||
74 | down offers greater savings, and allows this mechanism to work on any | 82 | down offers greater savings, and allows this mechanism to work on any |
75 | system. However, entering a real low-power state allows the user to | 83 | system. However, entering a real low-power state allows the user to |
76 | trigger wake up events (e.g. pressing a key or opening a laptop lid). | 84 | trigger wake up events (e.g. pressing a key or opening a laptop lid). |
77 | |||
78 | A transition from Suspend-to-Disk to the On state should take about 30 | ||
79 | seconds, though it's typically a bit more with the current | ||
80 | implementation. | ||
diff --git a/drivers/acpi/ac.c b/drivers/acpi/ac.c index 00d2efd674df..4f4e741d34b2 100644 --- a/drivers/acpi/ac.c +++ b/drivers/acpi/ac.c | |||
@@ -28,6 +28,8 @@ | |||
28 | #include <linux/slab.h> | 28 | #include <linux/slab.h> |
29 | #include <linux/init.h> | 29 | #include <linux/init.h> |
30 | #include <linux/types.h> | 30 | #include <linux/types.h> |
31 | #include <linux/dmi.h> | ||
32 | #include <linux/delay.h> | ||
31 | #ifdef CONFIG_ACPI_PROCFS_POWER | 33 | #ifdef CONFIG_ACPI_PROCFS_POWER |
32 | #include <linux/proc_fs.h> | 34 | #include <linux/proc_fs.h> |
33 | #include <linux/seq_file.h> | 35 | #include <linux/seq_file.h> |
@@ -74,6 +76,8 @@ static int acpi_ac_resume(struct device *dev); | |||
74 | #endif | 76 | #endif |
75 | static SIMPLE_DEV_PM_OPS(acpi_ac_pm, NULL, acpi_ac_resume); | 77 | static SIMPLE_DEV_PM_OPS(acpi_ac_pm, NULL, acpi_ac_resume); |
76 | 78 | ||
79 | static int ac_sleep_before_get_state_ms; | ||
80 | |||
77 | static struct acpi_driver acpi_ac_driver = { | 81 | static struct acpi_driver acpi_ac_driver = { |
78 | .name = "ac", | 82 | .name = "ac", |
79 | .class = ACPI_AC_CLASS, | 83 | .class = ACPI_AC_CLASS, |
@@ -252,6 +256,16 @@ static void acpi_ac_notify(struct acpi_device *device, u32 event) | |||
252 | case ACPI_AC_NOTIFY_STATUS: | 256 | case ACPI_AC_NOTIFY_STATUS: |
253 | case ACPI_NOTIFY_BUS_CHECK: | 257 | case ACPI_NOTIFY_BUS_CHECK: |
254 | case ACPI_NOTIFY_DEVICE_CHECK: | 258 | case ACPI_NOTIFY_DEVICE_CHECK: |
259 | /* | ||
260 | * A buggy BIOS may notify AC first and then sleep for | ||
261 | * a specific time before doing actual operations in the | ||
262 | * EC event handler (_Qxx). This will cause the AC state | ||
263 | * reported by the ACPI event to be incorrect, so wait for a | ||
264 | * specific time for the EC event handler to make progress. | ||
265 | */ | ||
266 | if (ac_sleep_before_get_state_ms > 0) | ||
267 | msleep(ac_sleep_before_get_state_ms); | ||
268 | |||
255 | acpi_ac_get_state(ac); | 269 | acpi_ac_get_state(ac); |
256 | acpi_bus_generate_proc_event(device, event, (u32) ac->state); | 270 | acpi_bus_generate_proc_event(device, event, (u32) ac->state); |
257 | acpi_bus_generate_netlink_event(device->pnp.device_class, | 271 | acpi_bus_generate_netlink_event(device->pnp.device_class, |
@@ -264,6 +278,24 @@ static void acpi_ac_notify(struct acpi_device *device, u32 event) | |||
264 | return; | 278 | return; |
265 | } | 279 | } |
266 | 280 | ||
281 | static int thinkpad_e530_quirk(const struct dmi_system_id *d) | ||
282 | { | ||
283 | ac_sleep_before_get_state_ms = 1000; | ||
284 | return 0; | ||
285 | } | ||
286 | |||
287 | static struct dmi_system_id ac_dmi_table[] = { | ||
288 | { | ||
289 | .callback = thinkpad_e530_quirk, | ||
290 | .ident = "thinkpad e530", | ||
291 | .matches = { | ||
292 | DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), | ||
293 | DMI_MATCH(DMI_PRODUCT_NAME, "32597CG"), | ||
294 | }, | ||
295 | }, | ||
296 | {}, | ||
297 | }; | ||
298 | |||
267 | static int acpi_ac_add(struct acpi_device *device) | 299 | static int acpi_ac_add(struct acpi_device *device) |
268 | { | 300 | { |
269 | int result = 0; | 301 | int result = 0; |
@@ -312,6 +344,7 @@ static int acpi_ac_add(struct acpi_device *device) | |||
312 | kfree(ac); | 344 | kfree(ac); |
313 | } | 345 | } |
314 | 346 | ||
347 | dmi_check_system(ac_dmi_table); | ||
315 | return result; | 348 | return result; |
316 | } | 349 | } |
317 | 350 | ||
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c index d45b2871d33b..edc00818c803 100644 --- a/drivers/acpi/ec.c +++ b/drivers/acpi/ec.c | |||
@@ -223,7 +223,7 @@ static int ec_check_sci_sync(struct acpi_ec *ec, u8 state) | |||
223 | static int ec_poll(struct acpi_ec *ec) | 223 | static int ec_poll(struct acpi_ec *ec) |
224 | { | 224 | { |
225 | unsigned long flags; | 225 | unsigned long flags; |
226 | int repeat = 2; /* number of command restarts */ | 226 | int repeat = 5; /* number of command restarts */ |
227 | while (repeat--) { | 227 | while (repeat--) { |
228 | unsigned long delay = jiffies + | 228 | unsigned long delay = jiffies + |
229 | msecs_to_jiffies(ec_delay); | 229 | msecs_to_jiffies(ec_delay); |
@@ -241,8 +241,6 @@ static int ec_poll(struct acpi_ec *ec) | |||
241 | } | 241 | } |
242 | advance_transaction(ec, acpi_ec_read_status(ec)); | 242 | advance_transaction(ec, acpi_ec_read_status(ec)); |
243 | } while (time_before(jiffies, delay)); | 243 | } while (time_before(jiffies, delay)); |
244 | if (acpi_ec_read_status(ec) & ACPI_EC_FLAG_IBF) | ||
245 | break; | ||
246 | pr_debug(PREFIX "controller reset, restart transaction\n"); | 244 | pr_debug(PREFIX "controller reset, restart transaction\n"); |
247 | spin_lock_irqsave(&ec->lock, flags); | 245 | spin_lock_irqsave(&ec->lock, flags); |
248 | start_transaction(ec); | 246 | start_transaction(ec); |
diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c index bec717ffd25f..c266cdc11784 100644 --- a/drivers/acpi/processor_driver.c +++ b/drivers/acpi/processor_driver.c | |||
@@ -95,9 +95,6 @@ static const struct acpi_device_id processor_device_ids[] = { | |||
95 | }; | 95 | }; |
96 | MODULE_DEVICE_TABLE(acpi, processor_device_ids); | 96 | MODULE_DEVICE_TABLE(acpi, processor_device_ids); |
97 | 97 | ||
98 | static SIMPLE_DEV_PM_OPS(acpi_processor_pm, | ||
99 | acpi_processor_suspend, acpi_processor_resume); | ||
100 | |||
101 | static struct acpi_driver acpi_processor_driver = { | 98 | static struct acpi_driver acpi_processor_driver = { |
102 | .name = "processor", | 99 | .name = "processor", |
103 | .class = ACPI_PROCESSOR_CLASS, | 100 | .class = ACPI_PROCESSOR_CLASS, |
@@ -107,7 +104,6 @@ static struct acpi_driver acpi_processor_driver = { | |||
107 | .remove = acpi_processor_remove, | 104 | .remove = acpi_processor_remove, |
108 | .notify = acpi_processor_notify, | 105 | .notify = acpi_processor_notify, |
109 | }, | 106 | }, |
110 | .drv.pm = &acpi_processor_pm, | ||
111 | }; | 107 | }; |
112 | 108 | ||
113 | #define INSTALL_NOTIFY_HANDLER 1 | 109 | #define INSTALL_NOTIFY_HANDLER 1 |
@@ -934,6 +930,8 @@ static int __init acpi_processor_init(void) | |||
934 | if (result < 0) | 930 | if (result < 0) |
935 | return result; | 931 | return result; |
936 | 932 | ||
933 | acpi_processor_syscore_init(); | ||
934 | |||
937 | acpi_processor_install_hotplug_notify(); | 935 | acpi_processor_install_hotplug_notify(); |
938 | 936 | ||
939 | acpi_thermal_cpufreq_init(); | 937 | acpi_thermal_cpufreq_init(); |
@@ -956,6 +954,8 @@ static void __exit acpi_processor_exit(void) | |||
956 | 954 | ||
957 | acpi_processor_uninstall_hotplug_notify(); | 955 | acpi_processor_uninstall_hotplug_notify(); |
958 | 956 | ||
957 | acpi_processor_syscore_exit(); | ||
958 | |||
959 | acpi_bus_unregister_driver(&acpi_processor_driver); | 959 | acpi_bus_unregister_driver(&acpi_processor_driver); |
960 | 960 | ||
961 | return; | 961 | return; |
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index f0df2c9434d2..eb133c77aadb 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c | |||
@@ -34,6 +34,7 @@ | |||
34 | #include <linux/sched.h> /* need_resched() */ | 34 | #include <linux/sched.h> /* need_resched() */ |
35 | #include <linux/clockchips.h> | 35 | #include <linux/clockchips.h> |
36 | #include <linux/cpuidle.h> | 36 | #include <linux/cpuidle.h> |
37 | #include <linux/syscore_ops.h> | ||
37 | 38 | ||
38 | /* | 39 | /* |
39 | * Include the apic definitions for x86 to have the APIC timer related defines | 40 | * Include the apic definitions for x86 to have the APIC timer related defines |
@@ -210,33 +211,41 @@ static void lapic_timer_state_broadcast(struct acpi_processor *pr, | |||
210 | 211 | ||
211 | #endif | 212 | #endif |
212 | 213 | ||
214 | #ifdef CONFIG_PM_SLEEP | ||
213 | static u32 saved_bm_rld; | 215 | static u32 saved_bm_rld; |
214 | 216 | ||
215 | static void acpi_idle_bm_rld_save(void) | 217 | int acpi_processor_suspend(void) |
216 | { | 218 | { |
217 | acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_RLD, &saved_bm_rld); | 219 | acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_RLD, &saved_bm_rld); |
220 | return 0; | ||
218 | } | 221 | } |
219 | static void acpi_idle_bm_rld_restore(void) | 222 | |
223 | void acpi_processor_resume(void) | ||
220 | { | 224 | { |
221 | u32 resumed_bm_rld; | 225 | u32 resumed_bm_rld; |
222 | 226 | ||
223 | acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_RLD, &resumed_bm_rld); | 227 | acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_RLD, &resumed_bm_rld); |
228 | if (resumed_bm_rld == saved_bm_rld) | ||
229 | return; | ||
224 | 230 | ||
225 | if (resumed_bm_rld != saved_bm_rld) | 231 | acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_RLD, saved_bm_rld); |
226 | acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_RLD, saved_bm_rld); | ||
227 | } | 232 | } |
228 | 233 | ||
229 | int acpi_processor_suspend(struct device *dev) | 234 | static struct syscore_ops acpi_processor_syscore_ops = { |
235 | .suspend = acpi_processor_suspend, | ||
236 | .resume = acpi_processor_resume, | ||
237 | }; | ||
238 | |||
239 | void acpi_processor_syscore_init(void) | ||
230 | { | 240 | { |
231 | acpi_idle_bm_rld_save(); | 241 | register_syscore_ops(&acpi_processor_syscore_ops); |
232 | return 0; | ||
233 | } | 242 | } |
234 | 243 | ||
235 | int acpi_processor_resume(struct device *dev) | 244 | void acpi_processor_syscore_exit(void) |
236 | { | 245 | { |
237 | acpi_idle_bm_rld_restore(); | 246 | unregister_syscore_ops(&acpi_processor_syscore_ops); |
238 | return 0; | ||
239 | } | 247 | } |
248 | #endif /* CONFIG_PM_SLEEP */ | ||
240 | 249 | ||
241 | #if defined(CONFIG_X86) | 250 | #if defined(CONFIG_X86) |
242 | static void tsc_check_state(int state) | 251 | static void tsc_check_state(int state) |
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c index fe158fd4f1df..c1bc608339a6 100644 --- a/drivers/acpi/scan.c +++ b/drivers/acpi/scan.c | |||
@@ -1785,7 +1785,7 @@ static void acpi_scan_init_hotplug(acpi_handle handle, int type) | |||
1785 | acpi_set_pnp_ids(handle, &pnp, type); | 1785 | acpi_set_pnp_ids(handle, &pnp, type); |
1786 | 1786 | ||
1787 | if (!pnp.type.hardware_id) | 1787 | if (!pnp.type.hardware_id) |
1788 | return; | 1788 | goto out; |
1789 | 1789 | ||
1790 | /* | 1790 | /* |
1791 | * This relies on the fact that acpi_install_notify_handler() will not | 1791 | * This relies on the fact that acpi_install_notify_handler() will not |
@@ -1800,6 +1800,7 @@ static void acpi_scan_init_hotplug(acpi_handle handle, int type) | |||
1800 | } | 1800 | } |
1801 | } | 1801 | } |
1802 | 1802 | ||
1803 | out: | ||
1803 | acpi_free_pnp_ids(&pnp); | 1804 | acpi_free_pnp_ids(&pnp); |
1804 | } | 1805 | } |
1805 | 1806 | ||
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c index c3932d0876e0..5b32e15a65ce 100644 --- a/drivers/acpi/video.c +++ b/drivers/acpi/video.c | |||
@@ -456,6 +456,14 @@ static struct dmi_system_id video_dmi_table[] __initdata = { | |||
456 | DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dm4 Notebook PC"), | 456 | DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dm4 Notebook PC"), |
457 | }, | 457 | }, |
458 | }, | 458 | }, |
459 | { | ||
460 | .callback = video_ignore_initial_backlight, | ||
461 | .ident = "HP 1000 Notebook PC", | ||
462 | .matches = { | ||
463 | DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"), | ||
464 | DMI_MATCH(DMI_PRODUCT_NAME, "HP 1000 Notebook PC"), | ||
465 | }, | ||
466 | }, | ||
459 | {} | 467 | {} |
460 | }; | 468 | }; |
461 | 469 | ||
diff --git a/drivers/base/power/common.c b/drivers/base/power/common.c index 39c32529b833..5da914041305 100644 --- a/drivers/base/power/common.c +++ b/drivers/base/power/common.c | |||
@@ -61,24 +61,24 @@ EXPORT_SYMBOL_GPL(dev_pm_get_subsys_data); | |||
61 | int dev_pm_put_subsys_data(struct device *dev) | 61 | int dev_pm_put_subsys_data(struct device *dev) |
62 | { | 62 | { |
63 | struct pm_subsys_data *psd; | 63 | struct pm_subsys_data *psd; |
64 | int ret = 0; | 64 | int ret = 1; |
65 | 65 | ||
66 | spin_lock_irq(&dev->power.lock); | 66 | spin_lock_irq(&dev->power.lock); |
67 | 67 | ||
68 | psd = dev_to_psd(dev); | 68 | psd = dev_to_psd(dev); |
69 | if (!psd) { | 69 | if (!psd) |
70 | ret = -EINVAL; | ||
71 | goto out; | 70 | goto out; |
72 | } | ||
73 | 71 | ||
74 | if (--psd->refcount == 0) { | 72 | if (--psd->refcount == 0) { |
75 | dev->power.subsys_data = NULL; | 73 | dev->power.subsys_data = NULL; |
76 | kfree(psd); | 74 | } else { |
77 | ret = 1; | 75 | psd = NULL; |
76 | ret = 0; | ||
78 | } | 77 | } |
79 | 78 | ||
80 | out: | 79 | out: |
81 | spin_unlock_irq(&dev->power.lock); | 80 | spin_unlock_irq(&dev->power.lock); |
81 | kfree(psd); | ||
82 | 82 | ||
83 | return ret; | 83 | return ret; |
84 | } | 84 | } |
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig index a1488f58f6ca..534fcb825153 100644 --- a/drivers/cpufreq/Kconfig +++ b/drivers/cpufreq/Kconfig | |||
@@ -47,7 +47,7 @@ config CPU_FREQ_STAT_DETAILS | |||
47 | 47 | ||
48 | choice | 48 | choice |
49 | prompt "Default CPUFreq governor" | 49 | prompt "Default CPUFreq governor" |
50 | default CPU_FREQ_DEFAULT_GOV_USERSPACE if CPU_FREQ_SA1100 || CPU_FREQ_SA1110 | 50 | default CPU_FREQ_DEFAULT_GOV_USERSPACE if ARM_SA1100_CPUFREQ || ARM_SA1110_CPUFREQ |
51 | default CPU_FREQ_DEFAULT_GOV_PERFORMANCE | 51 | default CPU_FREQ_DEFAULT_GOV_PERFORMANCE |
52 | help | 52 | help |
53 | This option sets which CPUFreq governor shall be loaded at | 53 | This option sets which CPUFreq governor shall be loaded at |
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm index f3af18b9acc5..6e57543fe0b9 100644 --- a/drivers/cpufreq/Kconfig.arm +++ b/drivers/cpufreq/Kconfig.arm | |||
@@ -3,16 +3,17 @@ | |||
3 | # | 3 | # |
4 | 4 | ||
5 | config ARM_BIG_LITTLE_CPUFREQ | 5 | config ARM_BIG_LITTLE_CPUFREQ |
6 | tristate | 6 | tristate "Generic ARM big LITTLE CPUfreq driver" |
7 | depends on ARM_CPU_TOPOLOGY | 7 | depends on ARM_CPU_TOPOLOGY && PM_OPP && HAVE_CLK |
8 | help | ||
9 | This enables the Generic CPUfreq driver for ARM big.LITTLE platforms. | ||
8 | 10 | ||
9 | config ARM_DT_BL_CPUFREQ | 11 | config ARM_DT_BL_CPUFREQ |
10 | tristate "Generic ARM big LITTLE CPUfreq driver probed via DT" | 12 | tristate "Generic probing via DT for ARM big LITTLE CPUfreq driver" |
11 | select ARM_BIG_LITTLE_CPUFREQ | 13 | depends on ARM_BIG_LITTLE_CPUFREQ && OF |
12 | depends on OF && HAVE_CLK | ||
13 | help | 14 | help |
14 | This enables the Generic CPUfreq driver for ARM big.LITTLE platform. | 15 | This enables probing via DT for Generic CPUfreq driver for ARM |
15 | This gets frequency tables from DT. | 16 | big.LITTLE platform. This gets frequency tables from DT. |
16 | 17 | ||
17 | config ARM_EXYNOS_CPUFREQ | 18 | config ARM_EXYNOS_CPUFREQ |
18 | bool "SAMSUNG EXYNOS SoCs" | 19 | bool "SAMSUNG EXYNOS SoCs" |
diff --git a/drivers/cpufreq/arm_big_little.c b/drivers/cpufreq/arm_big_little.c index dbdf677d2f36..5d7f53fcd6f5 100644 --- a/drivers/cpufreq/arm_big_little.c +++ b/drivers/cpufreq/arm_big_little.c | |||
@@ -40,11 +40,6 @@ static struct clk *clk[MAX_CLUSTERS]; | |||
40 | static struct cpufreq_frequency_table *freq_table[MAX_CLUSTERS]; | 40 | static struct cpufreq_frequency_table *freq_table[MAX_CLUSTERS]; |
41 | static atomic_t cluster_usage[MAX_CLUSTERS] = {ATOMIC_INIT(0), ATOMIC_INIT(0)}; | 41 | static atomic_t cluster_usage[MAX_CLUSTERS] = {ATOMIC_INIT(0), ATOMIC_INIT(0)}; |
42 | 42 | ||
43 | static int cpu_to_cluster(int cpu) | ||
44 | { | ||
45 | return topology_physical_package_id(cpu); | ||
46 | } | ||
47 | |||
48 | static unsigned int bL_cpufreq_get(unsigned int cpu) | 43 | static unsigned int bL_cpufreq_get(unsigned int cpu) |
49 | { | 44 | { |
50 | u32 cur_cluster = cpu_to_cluster(cpu); | 45 | u32 cur_cluster = cpu_to_cluster(cpu); |
@@ -192,7 +187,7 @@ static int bL_cpufreq_init(struct cpufreq_policy *policy) | |||
192 | 187 | ||
193 | cpumask_copy(policy->cpus, topology_core_cpumask(policy->cpu)); | 188 | cpumask_copy(policy->cpus, topology_core_cpumask(policy->cpu)); |
194 | 189 | ||
195 | dev_info(cpu_dev, "CPU %d initialized\n", policy->cpu); | 190 | dev_info(cpu_dev, "%s: CPU %d initialized\n", __func__, policy->cpu); |
196 | return 0; | 191 | return 0; |
197 | } | 192 | } |
198 | 193 | ||
diff --git a/drivers/cpufreq/arm_big_little.h b/drivers/cpufreq/arm_big_little.h index 70f18fc12d4a..79b2ce17884d 100644 --- a/drivers/cpufreq/arm_big_little.h +++ b/drivers/cpufreq/arm_big_little.h | |||
@@ -34,6 +34,11 @@ struct cpufreq_arm_bL_ops { | |||
34 | int (*init_opp_table)(struct device *cpu_dev); | 34 | int (*init_opp_table)(struct device *cpu_dev); |
35 | }; | 35 | }; |
36 | 36 | ||
37 | static inline int cpu_to_cluster(int cpu) | ||
38 | { | ||
39 | return topology_physical_package_id(cpu); | ||
40 | } | ||
41 | |||
37 | int bL_cpufreq_register(struct cpufreq_arm_bL_ops *ops); | 42 | int bL_cpufreq_register(struct cpufreq_arm_bL_ops *ops); |
38 | void bL_cpufreq_unregister(struct cpufreq_arm_bL_ops *ops); | 43 | void bL_cpufreq_unregister(struct cpufreq_arm_bL_ops *ops); |
39 | 44 | ||
diff --git a/drivers/cpufreq/arm_big_little_dt.c b/drivers/cpufreq/arm_big_little_dt.c index 44be3115375c..173ed059d95f 100644 --- a/drivers/cpufreq/arm_big_little_dt.c +++ b/drivers/cpufreq/arm_big_little_dt.c | |||
@@ -66,8 +66,8 @@ static int dt_get_transition_latency(struct device *cpu_dev) | |||
66 | 66 | ||
67 | parent = of_find_node_by_path("/cpus"); | 67 | parent = of_find_node_by_path("/cpus"); |
68 | if (!parent) { | 68 | if (!parent) { |
69 | pr_err("failed to find OF /cpus\n"); | 69 | pr_info("Failed to find OF /cpus. Use CPUFREQ_ETERNAL transition latency\n"); |
70 | return -ENOENT; | 70 | return CPUFREQ_ETERNAL; |
71 | } | 71 | } |
72 | 72 | ||
73 | for_each_child_of_node(parent, np) { | 73 | for_each_child_of_node(parent, np) { |
@@ -78,10 +78,11 @@ static int dt_get_transition_latency(struct device *cpu_dev) | |||
78 | of_node_put(np); | 78 | of_node_put(np); |
79 | of_node_put(parent); | 79 | of_node_put(parent); |
80 | 80 | ||
81 | return 0; | 81 | return transition_latency; |
82 | } | 82 | } |
83 | 83 | ||
84 | return -ENODEV; | 84 | pr_info("clock-latency isn't found, use CPUFREQ_ETERNAL transition latency\n"); |
85 | return CPUFREQ_ETERNAL; | ||
85 | } | 86 | } |
86 | 87 | ||
87 | static struct cpufreq_arm_bL_ops dt_bL_ops = { | 88 | static struct cpufreq_arm_bL_ops dt_bL_ops = { |
diff --git a/drivers/cpufreq/cpufreq-cpu0.c b/drivers/cpufreq/cpufreq-cpu0.c index 3ab8294eab04..a64eb8b70444 100644 --- a/drivers/cpufreq/cpufreq-cpu0.c +++ b/drivers/cpufreq/cpufreq-cpu0.c | |||
@@ -189,12 +189,29 @@ static int cpu0_cpufreq_probe(struct platform_device *pdev) | |||
189 | 189 | ||
190 | if (!np) { | 190 | if (!np) { |
191 | pr_err("failed to find cpu0 node\n"); | 191 | pr_err("failed to find cpu0 node\n"); |
192 | return -ENOENT; | 192 | ret = -ENOENT; |
193 | goto out_put_parent; | ||
193 | } | 194 | } |
194 | 195 | ||
195 | cpu_dev = &pdev->dev; | 196 | cpu_dev = &pdev->dev; |
196 | cpu_dev->of_node = np; | 197 | cpu_dev->of_node = np; |
197 | 198 | ||
199 | cpu_reg = devm_regulator_get(cpu_dev, "cpu0"); | ||
200 | if (IS_ERR(cpu_reg)) { | ||
201 | /* | ||
202 | * If cpu0 regulator supply node is present, but regulator is | ||
203 | * not yet registered, we should try defering probe. | ||
204 | */ | ||
205 | if (PTR_ERR(cpu_reg) == -EPROBE_DEFER) { | ||
206 | dev_err(cpu_dev, "cpu0 regulator not ready, retry\n"); | ||
207 | ret = -EPROBE_DEFER; | ||
208 | goto out_put_node; | ||
209 | } | ||
210 | pr_warn("failed to get cpu0 regulator: %ld\n", | ||
211 | PTR_ERR(cpu_reg)); | ||
212 | cpu_reg = NULL; | ||
213 | } | ||
214 | |||
198 | cpu_clk = devm_clk_get(cpu_dev, NULL); | 215 | cpu_clk = devm_clk_get(cpu_dev, NULL); |
199 | if (IS_ERR(cpu_clk)) { | 216 | if (IS_ERR(cpu_clk)) { |
200 | ret = PTR_ERR(cpu_clk); | 217 | ret = PTR_ERR(cpu_clk); |
@@ -202,12 +219,6 @@ static int cpu0_cpufreq_probe(struct platform_device *pdev) | |||
202 | goto out_put_node; | 219 | goto out_put_node; |
203 | } | 220 | } |
204 | 221 | ||
205 | cpu_reg = devm_regulator_get(cpu_dev, "cpu0"); | ||
206 | if (IS_ERR(cpu_reg)) { | ||
207 | pr_warn("failed to get cpu0 regulator\n"); | ||
208 | cpu_reg = NULL; | ||
209 | } | ||
210 | |||
211 | ret = of_init_opp_table(cpu_dev); | 222 | ret = of_init_opp_table(cpu_dev); |
212 | if (ret) { | 223 | if (ret) { |
213 | pr_err("failed to init OPP table: %d\n", ret); | 224 | pr_err("failed to init OPP table: %d\n", ret); |
@@ -264,6 +275,8 @@ out_free_table: | |||
264 | opp_free_cpufreq_table(cpu_dev, &freq_table); | 275 | opp_free_cpufreq_table(cpu_dev, &freq_table); |
265 | out_put_node: | 276 | out_put_node: |
266 | of_node_put(np); | 277 | of_node_put(np); |
278 | out_put_parent: | ||
279 | of_node_put(parent); | ||
267 | return ret; | 280 | return ret; |
268 | } | 281 | } |
269 | 282 | ||
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 1b8a48eaf90f..4b8c7f297d74 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c | |||
@@ -1075,14 +1075,14 @@ static int __cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif | |||
1075 | __func__, cpu_dev->id, cpu); | 1075 | __func__, cpu_dev->id, cpu); |
1076 | } | 1076 | } |
1077 | 1077 | ||
1078 | if ((cpus == 1) && (cpufreq_driver->target)) | ||
1079 | __cpufreq_governor(data, CPUFREQ_GOV_POLICY_EXIT); | ||
1080 | |||
1078 | pr_debug("%s: removing link, cpu: %d\n", __func__, cpu); | 1081 | pr_debug("%s: removing link, cpu: %d\n", __func__, cpu); |
1079 | cpufreq_cpu_put(data); | 1082 | cpufreq_cpu_put(data); |
1080 | 1083 | ||
1081 | /* If cpu is last user of policy, free policy */ | 1084 | /* If cpu is last user of policy, free policy */ |
1082 | if (cpus == 1) { | 1085 | if (cpus == 1) { |
1083 | if (cpufreq_driver->target) | ||
1084 | __cpufreq_governor(data, CPUFREQ_GOV_POLICY_EXIT); | ||
1085 | |||
1086 | lock_policy_rwsem_read(cpu); | 1086 | lock_policy_rwsem_read(cpu); |
1087 | kobj = &data->kobj; | 1087 | kobj = &data->kobj; |
1088 | cmp = &data->kobj_unregister; | 1088 | cmp = &data->kobj_unregister; |
@@ -1832,15 +1832,13 @@ static int __cpuinit cpufreq_cpu_callback(struct notifier_block *nfb, | |||
1832 | if (dev) { | 1832 | if (dev) { |
1833 | switch (action) { | 1833 | switch (action) { |
1834 | case CPU_ONLINE: | 1834 | case CPU_ONLINE: |
1835 | case CPU_ONLINE_FROZEN: | ||
1836 | cpufreq_add_dev(dev, NULL); | 1835 | cpufreq_add_dev(dev, NULL); |
1837 | break; | 1836 | break; |
1838 | case CPU_DOWN_PREPARE: | 1837 | case CPU_DOWN_PREPARE: |
1839 | case CPU_DOWN_PREPARE_FROZEN: | 1838 | case CPU_UP_CANCELED_FROZEN: |
1840 | __cpufreq_remove_dev(dev, NULL); | 1839 | __cpufreq_remove_dev(dev, NULL); |
1841 | break; | 1840 | break; |
1842 | case CPU_DOWN_FAILED: | 1841 | case CPU_DOWN_FAILED: |
1843 | case CPU_DOWN_FAILED_FROZEN: | ||
1844 | cpufreq_add_dev(dev, NULL); | 1842 | cpufreq_add_dev(dev, NULL); |
1845 | break; | 1843 | break; |
1846 | } | 1844 | } |
diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c index 443442df113b..5af40ad82d23 100644 --- a/drivers/cpufreq/cpufreq_governor.c +++ b/drivers/cpufreq/cpufreq_governor.c | |||
@@ -255,6 +255,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy, | |||
255 | if (have_governor_per_policy()) { | 255 | if (have_governor_per_policy()) { |
256 | WARN_ON(dbs_data); | 256 | WARN_ON(dbs_data); |
257 | } else if (dbs_data) { | 257 | } else if (dbs_data) { |
258 | dbs_data->usage_count++; | ||
258 | policy->governor_data = dbs_data; | 259 | policy->governor_data = dbs_data; |
259 | return 0; | 260 | return 0; |
260 | } | 261 | } |
@@ -266,6 +267,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy, | |||
266 | } | 267 | } |
267 | 268 | ||
268 | dbs_data->cdata = cdata; | 269 | dbs_data->cdata = cdata; |
270 | dbs_data->usage_count = 1; | ||
269 | rc = cdata->init(dbs_data); | 271 | rc = cdata->init(dbs_data); |
270 | if (rc) { | 272 | if (rc) { |
271 | pr_err("%s: POLICY_INIT: init() failed\n", __func__); | 273 | pr_err("%s: POLICY_INIT: init() failed\n", __func__); |
@@ -294,7 +296,8 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy, | |||
294 | set_sampling_rate(dbs_data, max(dbs_data->min_sampling_rate, | 296 | set_sampling_rate(dbs_data, max(dbs_data->min_sampling_rate, |
295 | latency * LATENCY_MULTIPLIER)); | 297 | latency * LATENCY_MULTIPLIER)); |
296 | 298 | ||
297 | if (dbs_data->cdata->governor == GOV_CONSERVATIVE) { | 299 | if ((cdata->governor == GOV_CONSERVATIVE) && |
300 | (!policy->governor->initialized)) { | ||
298 | struct cs_ops *cs_ops = dbs_data->cdata->gov_ops; | 301 | struct cs_ops *cs_ops = dbs_data->cdata->gov_ops; |
299 | 302 | ||
300 | cpufreq_register_notifier(cs_ops->notifier_block, | 303 | cpufreq_register_notifier(cs_ops->notifier_block, |
@@ -306,12 +309,12 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy, | |||
306 | 309 | ||
307 | return 0; | 310 | return 0; |
308 | case CPUFREQ_GOV_POLICY_EXIT: | 311 | case CPUFREQ_GOV_POLICY_EXIT: |
309 | if ((policy->governor->initialized == 1) || | 312 | if (!--dbs_data->usage_count) { |
310 | have_governor_per_policy()) { | ||
311 | sysfs_remove_group(get_governor_parent_kobj(policy), | 313 | sysfs_remove_group(get_governor_parent_kobj(policy), |
312 | get_sysfs_attr(dbs_data)); | 314 | get_sysfs_attr(dbs_data)); |
313 | 315 | ||
314 | if (dbs_data->cdata->governor == GOV_CONSERVATIVE) { | 316 | if ((dbs_data->cdata->governor == GOV_CONSERVATIVE) && |
317 | (policy->governor->initialized == 1)) { | ||
315 | struct cs_ops *cs_ops = dbs_data->cdata->gov_ops; | 318 | struct cs_ops *cs_ops = dbs_data->cdata->gov_ops; |
316 | 319 | ||
317 | cpufreq_unregister_notifier(cs_ops->notifier_block, | 320 | cpufreq_unregister_notifier(cs_ops->notifier_block, |
diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h index 8ac33538d0bd..e16a96130cb3 100644 --- a/drivers/cpufreq/cpufreq_governor.h +++ b/drivers/cpufreq/cpufreq_governor.h | |||
@@ -211,6 +211,7 @@ struct common_dbs_data { | |||
211 | struct dbs_data { | 211 | struct dbs_data { |
212 | struct common_dbs_data *cdata; | 212 | struct common_dbs_data *cdata; |
213 | unsigned int min_sampling_rate; | 213 | unsigned int min_sampling_rate; |
214 | int usage_count; | ||
214 | void *tuners; | 215 | void *tuners; |
215 | 216 | ||
216 | /* dbs_mutex protects dbs_enable in governor start/stop */ | 217 | /* dbs_mutex protects dbs_enable in governor start/stop */ |
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c index b0ffef96bf77..4b9bb5def6f1 100644 --- a/drivers/cpufreq/cpufreq_ondemand.c +++ b/drivers/cpufreq/cpufreq_ondemand.c | |||
@@ -547,7 +547,6 @@ static int od_init(struct dbs_data *dbs_data) | |||
547 | tuners->io_is_busy = should_io_be_busy(); | 547 | tuners->io_is_busy = should_io_be_busy(); |
548 | 548 | ||
549 | dbs_data->tuners = tuners; | 549 | dbs_data->tuners = tuners; |
550 | pr_info("%s: tuners %p\n", __func__, tuners); | ||
551 | mutex_init(&dbs_data->mutex); | 550 | mutex_init(&dbs_data->mutex); |
552 | return 0; | 551 | return 0; |
553 | } | 552 | } |
diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c index bfd6273fd873..fb65decffa28 100644 --- a/drivers/cpufreq/cpufreq_stats.c +++ b/drivers/cpufreq/cpufreq_stats.c | |||
@@ -349,15 +349,16 @@ static int __cpuinit cpufreq_stat_cpu_callback(struct notifier_block *nfb, | |||
349 | 349 | ||
350 | switch (action) { | 350 | switch (action) { |
351 | case CPU_ONLINE: | 351 | case CPU_ONLINE: |
352 | case CPU_ONLINE_FROZEN: | ||
353 | cpufreq_update_policy(cpu); | 352 | cpufreq_update_policy(cpu); |
354 | break; | 353 | break; |
355 | case CPU_DOWN_PREPARE: | 354 | case CPU_DOWN_PREPARE: |
356 | case CPU_DOWN_PREPARE_FROZEN: | ||
357 | cpufreq_stats_free_sysfs(cpu); | 355 | cpufreq_stats_free_sysfs(cpu); |
358 | break; | 356 | break; |
359 | case CPU_DEAD: | 357 | case CPU_DEAD: |
360 | case CPU_DEAD_FROZEN: | 358 | cpufreq_stats_free_table(cpu); |
359 | break; | ||
360 | case CPU_UP_CANCELED_FROZEN: | ||
361 | cpufreq_stats_free_sysfs(cpu); | ||
361 | cpufreq_stats_free_table(cpu); | 362 | cpufreq_stats_free_table(cpu); |
362 | break; | 363 | break; |
363 | } | 364 | } |
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index cc3a8e6c92be..9c36ace92a39 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c | |||
@@ -48,12 +48,7 @@ static inline int32_t div_fp(int32_t x, int32_t y) | |||
48 | } | 48 | } |
49 | 49 | ||
50 | struct sample { | 50 | struct sample { |
51 | ktime_t start_time; | ||
52 | ktime_t end_time; | ||
53 | int core_pct_busy; | 51 | int core_pct_busy; |
54 | int pstate_pct_busy; | ||
55 | u64 duration_us; | ||
56 | u64 idletime_us; | ||
57 | u64 aperf; | 52 | u64 aperf; |
58 | u64 mperf; | 53 | u64 mperf; |
59 | int freq; | 54 | int freq; |
@@ -86,13 +81,9 @@ struct cpudata { | |||
86 | struct pstate_adjust_policy *pstate_policy; | 81 | struct pstate_adjust_policy *pstate_policy; |
87 | struct pstate_data pstate; | 82 | struct pstate_data pstate; |
88 | struct _pid pid; | 83 | struct _pid pid; |
89 | struct _pid idle_pid; | ||
90 | 84 | ||
91 | int min_pstate_count; | 85 | int min_pstate_count; |
92 | int idle_mode; | ||
93 | 86 | ||
94 | ktime_t prev_sample; | ||
95 | u64 prev_idle_time_us; | ||
96 | u64 prev_aperf; | 87 | u64 prev_aperf; |
97 | u64 prev_mperf; | 88 | u64 prev_mperf; |
98 | int sample_ptr; | 89 | int sample_ptr; |
@@ -124,6 +115,8 @@ struct perf_limits { | |||
124 | int min_perf_pct; | 115 | int min_perf_pct; |
125 | int32_t max_perf; | 116 | int32_t max_perf; |
126 | int32_t min_perf; | 117 | int32_t min_perf; |
118 | int max_policy_pct; | ||
119 | int max_sysfs_pct; | ||
127 | }; | 120 | }; |
128 | 121 | ||
129 | static struct perf_limits limits = { | 122 | static struct perf_limits limits = { |
@@ -132,6 +125,8 @@ static struct perf_limits limits = { | |||
132 | .max_perf = int_tofp(1), | 125 | .max_perf = int_tofp(1), |
133 | .min_perf_pct = 0, | 126 | .min_perf_pct = 0, |
134 | .min_perf = 0, | 127 | .min_perf = 0, |
128 | .max_policy_pct = 100, | ||
129 | .max_sysfs_pct = 100, | ||
135 | }; | 130 | }; |
136 | 131 | ||
137 | static inline void pid_reset(struct _pid *pid, int setpoint, int busy, | 132 | static inline void pid_reset(struct _pid *pid, int setpoint, int busy, |
@@ -202,19 +197,6 @@ static inline void intel_pstate_busy_pid_reset(struct cpudata *cpu) | |||
202 | 0); | 197 | 0); |
203 | } | 198 | } |
204 | 199 | ||
205 | static inline void intel_pstate_idle_pid_reset(struct cpudata *cpu) | ||
206 | { | ||
207 | pid_p_gain_set(&cpu->idle_pid, cpu->pstate_policy->p_gain_pct); | ||
208 | pid_d_gain_set(&cpu->idle_pid, cpu->pstate_policy->d_gain_pct); | ||
209 | pid_i_gain_set(&cpu->idle_pid, cpu->pstate_policy->i_gain_pct); | ||
210 | |||
211 | pid_reset(&cpu->idle_pid, | ||
212 | 75, | ||
213 | 50, | ||
214 | cpu->pstate_policy->deadband, | ||
215 | 0); | ||
216 | } | ||
217 | |||
218 | static inline void intel_pstate_reset_all_pid(void) | 200 | static inline void intel_pstate_reset_all_pid(void) |
219 | { | 201 | { |
220 | unsigned int cpu; | 202 | unsigned int cpu; |
@@ -302,7 +284,8 @@ static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b, | |||
302 | if (ret != 1) | 284 | if (ret != 1) |
303 | return -EINVAL; | 285 | return -EINVAL; |
304 | 286 | ||
305 | limits.max_perf_pct = clamp_t(int, input, 0 , 100); | 287 | limits.max_sysfs_pct = clamp_t(int, input, 0 , 100); |
288 | limits.max_perf_pct = min(limits.max_policy_pct, limits.max_sysfs_pct); | ||
306 | limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100)); | 289 | limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100)); |
307 | return count; | 290 | return count; |
308 | } | 291 | } |
@@ -408,9 +391,8 @@ static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate) | |||
408 | if (pstate == cpu->pstate.current_pstate) | 391 | if (pstate == cpu->pstate.current_pstate) |
409 | return; | 392 | return; |
410 | 393 | ||
411 | #ifndef MODULE | ||
412 | trace_cpu_frequency(pstate * 100000, cpu->cpu); | 394 | trace_cpu_frequency(pstate * 100000, cpu->cpu); |
413 | #endif | 395 | |
414 | cpu->pstate.current_pstate = pstate; | 396 | cpu->pstate.current_pstate = pstate; |
415 | wrmsrl(MSR_IA32_PERF_CTL, pstate << 8); | 397 | wrmsrl(MSR_IA32_PERF_CTL, pstate << 8); |
416 | 398 | ||
@@ -450,48 +432,26 @@ static inline void intel_pstate_calc_busy(struct cpudata *cpu, | |||
450 | struct sample *sample) | 432 | struct sample *sample) |
451 | { | 433 | { |
452 | u64 core_pct; | 434 | u64 core_pct; |
453 | sample->pstate_pct_busy = 100 - div64_u64( | ||
454 | sample->idletime_us * 100, | ||
455 | sample->duration_us); | ||
456 | core_pct = div64_u64(sample->aperf * 100, sample->mperf); | 435 | core_pct = div64_u64(sample->aperf * 100, sample->mperf); |
457 | sample->freq = cpu->pstate.max_pstate * core_pct * 1000; | 436 | sample->freq = cpu->pstate.max_pstate * core_pct * 1000; |
458 | 437 | ||
459 | sample->core_pct_busy = div_s64((sample->pstate_pct_busy * core_pct), | 438 | sample->core_pct_busy = core_pct; |
460 | 100); | ||
461 | } | 439 | } |
462 | 440 | ||
463 | static inline void intel_pstate_sample(struct cpudata *cpu) | 441 | static inline void intel_pstate_sample(struct cpudata *cpu) |
464 | { | 442 | { |
465 | ktime_t now; | ||
466 | u64 idle_time_us; | ||
467 | u64 aperf, mperf; | 443 | u64 aperf, mperf; |
468 | 444 | ||
469 | now = ktime_get(); | ||
470 | idle_time_us = get_cpu_idle_time_us(cpu->cpu, NULL); | ||
471 | |||
472 | rdmsrl(MSR_IA32_APERF, aperf); | 445 | rdmsrl(MSR_IA32_APERF, aperf); |
473 | rdmsrl(MSR_IA32_MPERF, mperf); | 446 | rdmsrl(MSR_IA32_MPERF, mperf); |
474 | /* for the first sample, don't actually record a sample, just | 447 | cpu->sample_ptr = (cpu->sample_ptr + 1) % SAMPLE_COUNT; |
475 | * set the baseline */ | 448 | cpu->samples[cpu->sample_ptr].aperf = aperf; |
476 | if (cpu->prev_idle_time_us > 0) { | 449 | cpu->samples[cpu->sample_ptr].mperf = mperf; |
477 | cpu->sample_ptr = (cpu->sample_ptr + 1) % SAMPLE_COUNT; | 450 | cpu->samples[cpu->sample_ptr].aperf -= cpu->prev_aperf; |
478 | cpu->samples[cpu->sample_ptr].start_time = cpu->prev_sample; | 451 | cpu->samples[cpu->sample_ptr].mperf -= cpu->prev_mperf; |
479 | cpu->samples[cpu->sample_ptr].end_time = now; | 452 | |
480 | cpu->samples[cpu->sample_ptr].duration_us = | 453 | intel_pstate_calc_busy(cpu, &cpu->samples[cpu->sample_ptr]); |
481 | ktime_us_delta(now, cpu->prev_sample); | ||
482 | cpu->samples[cpu->sample_ptr].idletime_us = | ||
483 | idle_time_us - cpu->prev_idle_time_us; | ||
484 | |||
485 | cpu->samples[cpu->sample_ptr].aperf = aperf; | ||
486 | cpu->samples[cpu->sample_ptr].mperf = mperf; | ||
487 | cpu->samples[cpu->sample_ptr].aperf -= cpu->prev_aperf; | ||
488 | cpu->samples[cpu->sample_ptr].mperf -= cpu->prev_mperf; | ||
489 | |||
490 | intel_pstate_calc_busy(cpu, &cpu->samples[cpu->sample_ptr]); | ||
491 | } | ||
492 | 454 | ||
493 | cpu->prev_sample = now; | ||
494 | cpu->prev_idle_time_us = idle_time_us; | ||
495 | cpu->prev_aperf = aperf; | 455 | cpu->prev_aperf = aperf; |
496 | cpu->prev_mperf = mperf; | 456 | cpu->prev_mperf = mperf; |
497 | } | 457 | } |
@@ -505,16 +465,6 @@ static inline void intel_pstate_set_sample_time(struct cpudata *cpu) | |||
505 | mod_timer_pinned(&cpu->timer, jiffies + delay); | 465 | mod_timer_pinned(&cpu->timer, jiffies + delay); |
506 | } | 466 | } |
507 | 467 | ||
508 | static inline void intel_pstate_idle_mode(struct cpudata *cpu) | ||
509 | { | ||
510 | cpu->idle_mode = 1; | ||
511 | } | ||
512 | |||
513 | static inline void intel_pstate_normal_mode(struct cpudata *cpu) | ||
514 | { | ||
515 | cpu->idle_mode = 0; | ||
516 | } | ||
517 | |||
518 | static inline int intel_pstate_get_scaled_busy(struct cpudata *cpu) | 468 | static inline int intel_pstate_get_scaled_busy(struct cpudata *cpu) |
519 | { | 469 | { |
520 | int32_t busy_scaled; | 470 | int32_t busy_scaled; |
@@ -547,50 +497,21 @@ static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu) | |||
547 | intel_pstate_pstate_decrease(cpu, steps); | 497 | intel_pstate_pstate_decrease(cpu, steps); |
548 | } | 498 | } |
549 | 499 | ||
550 | static inline void intel_pstate_adjust_idle_pstate(struct cpudata *cpu) | ||
551 | { | ||
552 | int busy_scaled; | ||
553 | struct _pid *pid; | ||
554 | int ctl = 0; | ||
555 | int steps; | ||
556 | |||
557 | pid = &cpu->idle_pid; | ||
558 | |||
559 | busy_scaled = intel_pstate_get_scaled_busy(cpu); | ||
560 | |||
561 | ctl = pid_calc(pid, 100 - busy_scaled); | ||
562 | |||
563 | steps = abs(ctl); | ||
564 | if (ctl < 0) | ||
565 | intel_pstate_pstate_decrease(cpu, steps); | ||
566 | else | ||
567 | intel_pstate_pstate_increase(cpu, steps); | ||
568 | |||
569 | if (cpu->pstate.current_pstate == cpu->pstate.min_pstate) | ||
570 | intel_pstate_normal_mode(cpu); | ||
571 | } | ||
572 | |||
573 | static void intel_pstate_timer_func(unsigned long __data) | 500 | static void intel_pstate_timer_func(unsigned long __data) |
574 | { | 501 | { |
575 | struct cpudata *cpu = (struct cpudata *) __data; | 502 | struct cpudata *cpu = (struct cpudata *) __data; |
576 | 503 | ||
577 | intel_pstate_sample(cpu); | 504 | intel_pstate_sample(cpu); |
505 | intel_pstate_adjust_busy_pstate(cpu); | ||
578 | 506 | ||
579 | if (!cpu->idle_mode) | ||
580 | intel_pstate_adjust_busy_pstate(cpu); | ||
581 | else | ||
582 | intel_pstate_adjust_idle_pstate(cpu); | ||
583 | |||
584 | #if defined(XPERF_FIX) | ||
585 | if (cpu->pstate.current_pstate == cpu->pstate.min_pstate) { | 507 | if (cpu->pstate.current_pstate == cpu->pstate.min_pstate) { |
586 | cpu->min_pstate_count++; | 508 | cpu->min_pstate_count++; |
587 | if (!(cpu->min_pstate_count % 5)) { | 509 | if (!(cpu->min_pstate_count % 5)) { |
588 | intel_pstate_set_pstate(cpu, cpu->pstate.max_pstate); | 510 | intel_pstate_set_pstate(cpu, cpu->pstate.max_pstate); |
589 | intel_pstate_idle_mode(cpu); | ||
590 | } | 511 | } |
591 | } else | 512 | } else |
592 | cpu->min_pstate_count = 0; | 513 | cpu->min_pstate_count = 0; |
593 | #endif | 514 | |
594 | intel_pstate_set_sample_time(cpu); | 515 | intel_pstate_set_sample_time(cpu); |
595 | } | 516 | } |
596 | 517 | ||
@@ -631,7 +552,6 @@ static int intel_pstate_init_cpu(unsigned int cpunum) | |||
631 | (unsigned long)cpu; | 552 | (unsigned long)cpu; |
632 | cpu->timer.expires = jiffies + HZ/100; | 553 | cpu->timer.expires = jiffies + HZ/100; |
633 | intel_pstate_busy_pid_reset(cpu); | 554 | intel_pstate_busy_pid_reset(cpu); |
634 | intel_pstate_idle_pid_reset(cpu); | ||
635 | intel_pstate_sample(cpu); | 555 | intel_pstate_sample(cpu); |
636 | intel_pstate_set_pstate(cpu, cpu->pstate.max_pstate); | 556 | intel_pstate_set_pstate(cpu, cpu->pstate.max_pstate); |
637 | 557 | ||
@@ -675,8 +595,9 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy) | |||
675 | limits.min_perf_pct = clamp_t(int, limits.min_perf_pct, 0 , 100); | 595 | limits.min_perf_pct = clamp_t(int, limits.min_perf_pct, 0 , 100); |
676 | limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100)); | 596 | limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100)); |
677 | 597 | ||
678 | limits.max_perf_pct = policy->max * 100 / policy->cpuinfo.max_freq; | 598 | limits.max_policy_pct = policy->max * 100 / policy->cpuinfo.max_freq; |
679 | limits.max_perf_pct = clamp_t(int, limits.max_perf_pct, 0 , 100); | 599 | limits.max_policy_pct = clamp_t(int, limits.max_policy_pct, 0 , 100); |
600 | limits.max_perf_pct = min(limits.max_policy_pct, limits.max_sysfs_pct); | ||
680 | limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100)); | 601 | limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100)); |
681 | 602 | ||
682 | return 0; | 603 | return 0; |
@@ -788,10 +709,9 @@ static int __init intel_pstate_init(void) | |||
788 | 709 | ||
789 | pr_info("Intel P-state driver initializing.\n"); | 710 | pr_info("Intel P-state driver initializing.\n"); |
790 | 711 | ||
791 | all_cpu_data = vmalloc(sizeof(void *) * num_possible_cpus()); | 712 | all_cpu_data = vzalloc(sizeof(void *) * num_possible_cpus()); |
792 | if (!all_cpu_data) | 713 | if (!all_cpu_data) |
793 | return -ENOMEM; | 714 | return -ENOMEM; |
794 | memset(all_cpu_data, 0, sizeof(void *) * num_possible_cpus()); | ||
795 | 715 | ||
796 | rc = cpufreq_register_driver(&intel_pstate_driver); | 716 | rc = cpufreq_register_driver(&intel_pstate_driver); |
797 | if (rc) | 717 | if (rc) |
diff --git a/drivers/cpufreq/kirkwood-cpufreq.c b/drivers/cpufreq/kirkwood-cpufreq.c index d36ea8dc96eb..b2644af985ec 100644 --- a/drivers/cpufreq/kirkwood-cpufreq.c +++ b/drivers/cpufreq/kirkwood-cpufreq.c | |||
@@ -171,10 +171,6 @@ static int kirkwood_cpufreq_probe(struct platform_device *pdev) | |||
171 | priv.dev = &pdev->dev; | 171 | priv.dev = &pdev->dev; |
172 | 172 | ||
173 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 173 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
174 | if (!res) { | ||
175 | dev_err(&pdev->dev, "Cannot get memory resource\n"); | ||
176 | return -ENODEV; | ||
177 | } | ||
178 | priv.base = devm_ioremap_resource(&pdev->dev, res); | 174 | priv.base = devm_ioremap_resource(&pdev->dev, res); |
179 | if (IS_ERR(priv.base)) | 175 | if (IS_ERR(priv.base)) |
180 | return PTR_ERR(priv.base); | 176 | return PTR_ERR(priv.base); |
diff --git a/include/acpi/acpiosxf.h b/include/acpi/acpiosxf.h index 5b3d2bd4813a..64b8c7639520 100644 --- a/include/acpi/acpiosxf.h +++ b/include/acpi/acpiosxf.h | |||
@@ -77,7 +77,7 @@ struct acpi_signal_fatal_info { | |||
77 | /* | 77 | /* |
78 | * OSL Initialization and shutdown primitives | 78 | * OSL Initialization and shutdown primitives |
79 | */ | 79 | */ |
80 | acpi_status __initdata acpi_os_initialize(void); | 80 | acpi_status __init acpi_os_initialize(void); |
81 | 81 | ||
82 | acpi_status acpi_os_terminate(void); | 82 | acpi_status acpi_os_terminate(void); |
83 | 83 | ||
diff --git a/include/acpi/processor.h b/include/acpi/processor.h index b327b5a9296d..ea69367fdd3b 100644 --- a/include/acpi/processor.h +++ b/include/acpi/processor.h | |||
@@ -329,10 +329,16 @@ int acpi_processor_power_init(struct acpi_processor *pr); | |||
329 | int acpi_processor_power_exit(struct acpi_processor *pr); | 329 | int acpi_processor_power_exit(struct acpi_processor *pr); |
330 | int acpi_processor_cst_has_changed(struct acpi_processor *pr); | 330 | int acpi_processor_cst_has_changed(struct acpi_processor *pr); |
331 | int acpi_processor_hotplug(struct acpi_processor *pr); | 331 | int acpi_processor_hotplug(struct acpi_processor *pr); |
332 | int acpi_processor_suspend(struct device *dev); | ||
333 | int acpi_processor_resume(struct device *dev); | ||
334 | extern struct cpuidle_driver acpi_idle_driver; | 332 | extern struct cpuidle_driver acpi_idle_driver; |
335 | 333 | ||
334 | #ifdef CONFIG_PM_SLEEP | ||
335 | void acpi_processor_syscore_init(void); | ||
336 | void acpi_processor_syscore_exit(void); | ||
337 | #else | ||
338 | static inline void acpi_processor_syscore_init(void) {} | ||
339 | static inline void acpi_processor_syscore_exit(void) {} | ||
340 | #endif | ||
341 | |||
336 | /* in processor_thermal.c */ | 342 | /* in processor_thermal.c */ |
337 | int acpi_processor_get_limit_info(struct acpi_processor *pr); | 343 | int acpi_processor_get_limit_info(struct acpi_processor *pr); |
338 | extern const struct thermal_cooling_device_ops processor_cooling_ops; | 344 | extern const struct thermal_cooling_device_ops processor_cooling_ops; |