aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/ABI/testing/sysfs-devices-system-cpu11
-rw-r--r--Documentation/cpu-freq/boost.txt93
-rw-r--r--Documentation/cpuidle/sysfs.txt10
-rw-r--r--Documentation/devicetree/bindings/cpufreq/cpufreq-cpu0.txt55
-rw-r--r--Documentation/devicetree/bindings/power/opp.txt25
-rw-r--r--arch/arm/kernel/smp.c54
-rw-r--r--arch/arm/mach-shmobile/Makefile2
-rw-r--r--arch/arm/mach-shmobile/board-ap4evb.c21
-rw-r--r--arch/arm/mach-shmobile/board-armadillo800eva.c6
-rw-r--r--arch/arm/mach-shmobile/board-mackerel.c34
-rw-r--r--arch/arm/mach-shmobile/common.c24
-rw-r--r--arch/arm/mach-shmobile/cpuidle.c39
-rw-r--r--arch/arm/mach-shmobile/include/mach/common.h14
-rw-r--r--arch/arm/mach-shmobile/include/mach/pm-rmobile.h35
-rw-r--r--arch/arm/mach-shmobile/include/mach/r8a7740.h6
-rw-r--r--arch/arm/mach-shmobile/include/mach/r8a7779.h12
-rw-r--r--arch/arm/mach-shmobile/include/mach/sh7372.h20
-rw-r--r--arch/arm/mach-shmobile/pm-r8a7740.c42
-rw-r--r--arch/arm/mach-shmobile/pm-r8a7779.c71
-rw-r--r--arch/arm/mach-shmobile/pm-rmobile.c33
-rw-r--r--arch/arm/mach-shmobile/pm-sh7372.c283
-rw-r--r--arch/arm/mach-shmobile/setup-r8a7740.c27
-rw-r--r--arch/arm/mach-shmobile/setup-r8a7779.c5
-rw-r--r--arch/arm/mach-shmobile/setup-sh7372.c69
-rw-r--r--arch/x86/include/asm/msr-index.h3
-rw-r--r--drivers/acpi/processor_driver.c8
-rw-r--r--drivers/acpi/processor_idle.c40
-rw-r--r--drivers/acpi/processor_perflib.c30
-rw-r--r--drivers/base/platform.c2
-rw-r--r--drivers/base/power/domain.c244
-rw-r--r--drivers/base/power/main.c66
-rw-r--r--drivers/base/power/opp.c47
-rw-r--r--drivers/base/power/power.h36
-rw-r--r--drivers/base/power/runtime.c3
-rw-r--r--drivers/base/power/wakeup.c46
-rw-r--r--drivers/clocksource/sh_cmt.c71
-rw-r--r--drivers/clocksource/sh_mtu2.c41
-rw-r--r--drivers/clocksource/sh_tmu.c112
-rw-r--r--drivers/cpufreq/Kconfig11
-rw-r--r--drivers/cpufreq/Kconfig.x8618
-rw-r--r--drivers/cpufreq/Makefile4
-rw-r--r--drivers/cpufreq/acpi-cpufreq.c272
-rw-r--r--drivers/cpufreq/cpufreq-cpu0.c269
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c2
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c1
-rw-r--r--drivers/cpufreq/longhaul.h26
-rw-r--r--drivers/cpufreq/omap-cpufreq.c39
-rw-r--r--drivers/cpufreq/powernow-k8.c406
-rw-r--r--drivers/cpufreq/powernow-k8.h32
-rw-r--r--drivers/cpuidle/driver.c18
-rw-r--r--drivers/cpuidle/governors/ladder.c6
-rw-r--r--drivers/pci/pci-driver.c17
-rw-r--r--drivers/xen/xen-acpi-processor.c1
-rw-r--r--include/acpi/processor.h9
-rw-r--r--include/linux/clockchips.h8
-rw-r--r--include/linux/device.h7
-rw-r--r--include/linux/opp.h8
-rw-r--r--include/linux/pm.h2
-rw-r--r--include/linux/pm_domain.h92
-rw-r--r--kernel/power/Kconfig4
-rw-r--r--kernel/power/poweroff.c2
-rw-r--r--kernel/power/process.c2
-rw-r--r--kernel/power/qos.c1
-rw-r--r--kernel/time/clockevents.c24
-rw-r--r--kernel/time/timekeeping.c2
65 files changed, 2061 insertions, 962 deletions
diff --git a/Documentation/ABI/testing/sysfs-devices-system-cpu b/Documentation/ABI/testing/sysfs-devices-system-cpu
index 5dab36448b44..6943133afcb8 100644
--- a/Documentation/ABI/testing/sysfs-devices-system-cpu
+++ b/Documentation/ABI/testing/sysfs-devices-system-cpu
@@ -176,3 +176,14 @@ Description: Disable L3 cache indices
176 All AMD processors with L3 caches provide this functionality. 176 All AMD processors with L3 caches provide this functionality.
177 For details, see BKDGs at 177 For details, see BKDGs at
178 http://developer.amd.com/documentation/guides/Pages/default.aspx 178 http://developer.amd.com/documentation/guides/Pages/default.aspx
179
180
181What: /sys/devices/system/cpu/cpufreq/boost
182Date: August 2012
183Contact: Linux kernel mailing list <linux-kernel@vger.kernel.org>
184Description: Processor frequency boosting control
185
186 This switch controls the boost setting for the whole system.
187 Boosting allows the CPU and the firmware to run at a frequency
188 beyound it's nominal limit.
189 More details can be found in Documentation/cpu-freq/boost.txt
diff --git a/Documentation/cpu-freq/boost.txt b/Documentation/cpu-freq/boost.txt
new file mode 100644
index 000000000000..9b4edfcf486f
--- /dev/null
+++ b/Documentation/cpu-freq/boost.txt
@@ -0,0 +1,93 @@
1Processor boosting control
2
3 - information for users -
4
5Quick guide for the impatient:
6--------------------
7/sys/devices/system/cpu/cpufreq/boost
8controls the boost setting for the whole system. You can read and write
9that file with either "0" (boosting disabled) or "1" (boosting allowed).
10Reading or writing 1 does not mean that the system is boosting at this
11very moment, but only that the CPU _may_ raise the frequency at it's
12discretion.
13--------------------
14
15Introduction
16-------------
17Some CPUs support a functionality to raise the operating frequency of
18some cores in a multi-core package if certain conditions apply, mostly
19if the whole chip is not fully utilized and below it's intended thermal
20budget. This is done without operating system control by a combination
21of hardware and firmware.
22On Intel CPUs this is called "Turbo Boost", AMD calls it "Turbo-Core",
23in technical documentation "Core performance boost". In Linux we use
24the term "boost" for convenience.
25
26Rationale for disable switch
27----------------------------
28
29Though the idea is to just give better performance without any user
30intervention, sometimes the need arises to disable this functionality.
31Most systems offer a switch in the (BIOS) firmware to disable the
32functionality at all, but a more fine-grained and dynamic control would
33be desirable:
341. While running benchmarks, reproducible results are important. Since
35 the boosting functionality depends on the load of the whole package,
36 single thread performance can vary. By explicitly disabling the boost
37 functionality at least for the benchmark's run-time the system will run
38 at a fixed frequency and results are reproducible again.
392. To examine the impact of the boosting functionality it is helpful
40 to do tests with and without boosting.
413. Boosting means overclocking the processor, though under controlled
42 conditions. By raising the frequency and the voltage the processor
43 will consume more power than without the boosting, which may be
44 undesirable for instance for mobile users. Disabling boosting may
45 save power here, though this depends on the workload.
46
47
48User controlled switch
49----------------------
50
51To allow the user to toggle the boosting functionality, the acpi-cpufreq
52driver exports a sysfs knob to disable it. There is a file:
53/sys/devices/system/cpu/cpufreq/boost
54which can either read "0" (boosting disabled) or "1" (boosting enabled).
55Reading the file is always supported, even if the processor does not
56support boosting. In this case the file will be read-only and always
57reads as "0". Explicitly changing the permissions and writing to that
58file anyway will return EINVAL.
59
60On supported CPUs one can write either a "0" or a "1" into this file.
61This will either disable the boost functionality on all cores in the
62whole system (0) or will allow the hardware to boost at will (1).
63
64Writing a "1" does not explicitly boost the system, but just allows the
65CPU (and the firmware) to boost at their discretion. Some implementations
66take external factors like the chip's temperature into account, so
67boosting once does not necessarily mean that it will occur every time
68even using the exact same software setup.
69
70
71AMD legacy cpb switch
72---------------------
73The AMD powernow-k8 driver used to support a very similar switch to
74disable or enable the "Core Performance Boost" feature of some AMD CPUs.
75This switch was instantiated in each CPU's cpufreq directory
76(/sys/devices/system/cpu[0-9]*/cpufreq) and was called "cpb".
77Though the per CPU existence hints at a more fine grained control, the
78actual implementation only supported a system-global switch semantics,
79which was simply reflected into each CPU's file. Writing a 0 or 1 into it
80would pull the other CPUs to the same state.
81For compatibility reasons this file and its behavior is still supported
82on AMD CPUs, though it is now protected by a config switch
83(X86_ACPI_CPUFREQ_CPB). On Intel CPUs this file will never be created,
84even with the config option set.
85This functionality is considered legacy and will be removed in some future
86kernel version.
87
88More fine grained boosting control
89----------------------------------
90
91Technically it is possible to switch the boosting functionality at least
92on a per package basis, for some CPUs even per core. Currently the driver
93does not support it, but this may be implemented in the future.
diff --git a/Documentation/cpuidle/sysfs.txt b/Documentation/cpuidle/sysfs.txt
index 9d28a3406e74..b6f44f490ed7 100644
--- a/Documentation/cpuidle/sysfs.txt
+++ b/Documentation/cpuidle/sysfs.txt
@@ -76,9 +76,17 @@ total 0
76 76
77 77
78* desc : Small description about the idle state (string) 78* desc : Small description about the idle state (string)
79* disable : Option to disable this idle state (bool) 79* disable : Option to disable this idle state (bool) -> see note below
80* latency : Latency to exit out of this idle state (in microseconds) 80* latency : Latency to exit out of this idle state (in microseconds)
81* name : Name of the idle state (string) 81* name : Name of the idle state (string)
82* power : Power consumed while in this idle state (in milliwatts) 82* power : Power consumed while in this idle state (in milliwatts)
83* time : Total time spent in this idle state (in microseconds) 83* time : Total time spent in this idle state (in microseconds)
84* usage : Number of times this state was entered (count) 84* usage : Number of times this state was entered (count)
85
86Note:
87The behavior and the effect of the disable variable depends on the
88implementation of a particular governor. In the ladder governor, for
89example, it is not coherent, i.e. if one is disabling a light state,
90then all deeper states are disabled as well, but the disable variable
91does not reflect it. Likewise, if one enables a deep state but a lighter
92state still is disabled, then this has no effect.
diff --git a/Documentation/devicetree/bindings/cpufreq/cpufreq-cpu0.txt b/Documentation/devicetree/bindings/cpufreq/cpufreq-cpu0.txt
new file mode 100644
index 000000000000..4416ccc33472
--- /dev/null
+++ b/Documentation/devicetree/bindings/cpufreq/cpufreq-cpu0.txt
@@ -0,0 +1,55 @@
1Generic CPU0 cpufreq driver
2
3It is a generic cpufreq driver for CPU0 frequency management. It
4supports both uniprocessor (UP) and symmetric multiprocessor (SMP)
5systems which share clock and voltage across all CPUs.
6
7Both required and optional properties listed below must be defined
8under node /cpus/cpu@0.
9
10Required properties:
11- operating-points: Refer to Documentation/devicetree/bindings/power/opp.txt
12 for details
13
14Optional properties:
15- clock-latency: Specify the possible maximum transition latency for clock,
16 in unit of nanoseconds.
17- voltage-tolerance: Specify the CPU voltage tolerance in percentage.
18
19Examples:
20
21cpus {
22 #address-cells = <1>;
23 #size-cells = <0>;
24
25 cpu@0 {
26 compatible = "arm,cortex-a9";
27 reg = <0>;
28 next-level-cache = <&L2>;
29 operating-points = <
30 /* kHz uV */
31 792000 1100000
32 396000 950000
33 198000 850000
34 >;
35 transition-latency = <61036>; /* two CLK32 periods */
36 };
37
38 cpu@1 {
39 compatible = "arm,cortex-a9";
40 reg = <1>;
41 next-level-cache = <&L2>;
42 };
43
44 cpu@2 {
45 compatible = "arm,cortex-a9";
46 reg = <2>;
47 next-level-cache = <&L2>;
48 };
49
50 cpu@3 {
51 compatible = "arm,cortex-a9";
52 reg = <3>;
53 next-level-cache = <&L2>;
54 };
55};
diff --git a/Documentation/devicetree/bindings/power/opp.txt b/Documentation/devicetree/bindings/power/opp.txt
new file mode 100644
index 000000000000..74499e5033fc
--- /dev/null
+++ b/Documentation/devicetree/bindings/power/opp.txt
@@ -0,0 +1,25 @@
1* Generic OPP Interface
2
3SoCs have a standard set of tuples consisting of frequency and
4voltage pairs that the device will support per voltage domain. These
5are called Operating Performance Points or OPPs.
6
7Properties:
8- operating-points: An array of 2-tuples items, and each item consists
9 of frequency and voltage like <freq-kHz vol-uV>.
10 freq: clock frequency in kHz
11 vol: voltage in microvolt
12
13Examples:
14
15cpu@0 {
16 compatible = "arm,cortex-a9";
17 reg = <0>;
18 next-level-cache = <&L2>;
19 operating-points = <
20 /* kHz uV */
21 792000 1100000
22 396000 950000
23 198000 850000
24 >;
25};
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index aa4ffe6e5ecf..dea7a925c7e2 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -24,6 +24,7 @@
24#include <linux/percpu.h> 24#include <linux/percpu.h>
25#include <linux/clockchips.h> 25#include <linux/clockchips.h>
26#include <linux/completion.h> 26#include <linux/completion.h>
27#include <linux/cpufreq.h>
27 28
28#include <linux/atomic.h> 29#include <linux/atomic.h>
29#include <asm/smp.h> 30#include <asm/smp.h>
@@ -650,3 +651,56 @@ int setup_profiling_timer(unsigned int multiplier)
650{ 651{
651 return -EINVAL; 652 return -EINVAL;
652} 653}
654
655#ifdef CONFIG_CPU_FREQ
656
657static DEFINE_PER_CPU(unsigned long, l_p_j_ref);
658static DEFINE_PER_CPU(unsigned long, l_p_j_ref_freq);
659static unsigned long global_l_p_j_ref;
660static unsigned long global_l_p_j_ref_freq;
661
662static int cpufreq_callback(struct notifier_block *nb,
663 unsigned long val, void *data)
664{
665 struct cpufreq_freqs *freq = data;
666 int cpu = freq->cpu;
667
668 if (freq->flags & CPUFREQ_CONST_LOOPS)
669 return NOTIFY_OK;
670
671 if (!per_cpu(l_p_j_ref, cpu)) {
672 per_cpu(l_p_j_ref, cpu) =
673 per_cpu(cpu_data, cpu).loops_per_jiffy;
674 per_cpu(l_p_j_ref_freq, cpu) = freq->old;
675 if (!global_l_p_j_ref) {
676 global_l_p_j_ref = loops_per_jiffy;
677 global_l_p_j_ref_freq = freq->old;
678 }
679 }
680
681 if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) ||
682 (val == CPUFREQ_POSTCHANGE && freq->old > freq->new) ||
683 (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) {
684 loops_per_jiffy = cpufreq_scale(global_l_p_j_ref,
685 global_l_p_j_ref_freq,
686 freq->new);
687 per_cpu(cpu_data, cpu).loops_per_jiffy =
688 cpufreq_scale(per_cpu(l_p_j_ref, cpu),
689 per_cpu(l_p_j_ref_freq, cpu),
690 freq->new);
691 }
692 return NOTIFY_OK;
693}
694
695static struct notifier_block cpufreq_notifier = {
696 .notifier_call = cpufreq_callback,
697};
698
699static int __init register_cpufreq_notifier(void)
700{
701 return cpufreq_register_notifier(&cpufreq_notifier,
702 CPUFREQ_TRANSITION_NOTIFIER);
703}
704core_initcall(register_cpufreq_notifier);
705
706#endif
diff --git a/arch/arm/mach-shmobile/Makefile b/arch/arm/mach-shmobile/Makefile
index 0df5ae6740c6..fe2c97c179d1 100644
--- a/arch/arm/mach-shmobile/Makefile
+++ b/arch/arm/mach-shmobile/Makefile
@@ -3,7 +3,7 @@
3# 3#
4 4
5# Common objects 5# Common objects
6obj-y := timer.o console.o clock.o common.o 6obj-y := timer.o console.o clock.o
7 7
8# CPU objects 8# CPU objects
9obj-$(CONFIG_ARCH_SH7367) += setup-sh7367.o clock-sh7367.o intc-sh7367.o 9obj-$(CONFIG_ARCH_SH7367) += setup-sh7367.o clock-sh7367.o intc-sh7367.o
diff --git a/arch/arm/mach-shmobile/board-ap4evb.c b/arch/arm/mach-shmobile/board-ap4evb.c
index bc3b5da59e25..790dc68c4312 100644
--- a/arch/arm/mach-shmobile/board-ap4evb.c
+++ b/arch/arm/mach-shmobile/board-ap4evb.c
@@ -1231,6 +1231,15 @@ static struct i2c_board_info i2c1_devices[] = {
1231#define USCCR1 IOMEM(0xE6058144) 1231#define USCCR1 IOMEM(0xE6058144)
1232static void __init ap4evb_init(void) 1232static void __init ap4evb_init(void)
1233{ 1233{
1234 struct pm_domain_device domain_devices[] = {
1235 { "A4LC", &lcdc1_device, },
1236 { "A4LC", &lcdc_device, },
1237 { "A4MP", &fsi_device, },
1238 { "A3SP", &sh_mmcif_device, },
1239 { "A3SP", &sdhi0_device, },
1240 { "A3SP", &sdhi1_device, },
1241 { "A4R", &ceu_device, },
1242 };
1234 u32 srcr4; 1243 u32 srcr4;
1235 struct clk *clk; 1244 struct clk *clk;
1236 1245
@@ -1463,14 +1472,8 @@ static void __init ap4evb_init(void)
1463 1472
1464 platform_add_devices(ap4evb_devices, ARRAY_SIZE(ap4evb_devices)); 1473 platform_add_devices(ap4evb_devices, ARRAY_SIZE(ap4evb_devices));
1465 1474
1466 rmobile_add_device_to_domain(&sh7372_pd_a4lc, &lcdc1_device); 1475 rmobile_add_devices_to_domains(domain_devices,
1467 rmobile_add_device_to_domain(&sh7372_pd_a4lc, &lcdc_device); 1476 ARRAY_SIZE(domain_devices));
1468 rmobile_add_device_to_domain(&sh7372_pd_a4mp, &fsi_device);
1469
1470 rmobile_add_device_to_domain(&sh7372_pd_a3sp, &sh_mmcif_device);
1471 rmobile_add_device_to_domain(&sh7372_pd_a3sp, &sdhi0_device);
1472 rmobile_add_device_to_domain(&sh7372_pd_a3sp, &sdhi1_device);
1473 rmobile_add_device_to_domain(&sh7372_pd_a4r, &ceu_device);
1474 1477
1475 hdmi_init_pm_clock(); 1478 hdmi_init_pm_clock();
1476 fsi_init_pm_clock(); 1479 fsi_init_pm_clock();
@@ -1485,6 +1488,6 @@ MACHINE_START(AP4EVB, "ap4evb")
1485 .init_irq = sh7372_init_irq, 1488 .init_irq = sh7372_init_irq,
1486 .handle_irq = shmobile_handle_irq_intc, 1489 .handle_irq = shmobile_handle_irq_intc,
1487 .init_machine = ap4evb_init, 1490 .init_machine = ap4evb_init,
1488 .init_late = shmobile_init_late, 1491 .init_late = sh7372_pm_init_late,
1489 .timer = &shmobile_timer, 1492 .timer = &shmobile_timer,
1490MACHINE_END 1493MACHINE_END
diff --git a/arch/arm/mach-shmobile/board-armadillo800eva.c b/arch/arm/mach-shmobile/board-armadillo800eva.c
index c6593d394273..2912eab3b967 100644
--- a/arch/arm/mach-shmobile/board-armadillo800eva.c
+++ b/arch/arm/mach-shmobile/board-armadillo800eva.c
@@ -1209,10 +1209,10 @@ static void __init eva_init(void)
1209 1209
1210 eva_clock_init(); 1210 eva_clock_init();
1211 1211
1212 rmobile_add_device_to_domain(&r8a7740_pd_a4lc, &lcdc0_device); 1212 rmobile_add_device_to_domain("A4LC", &lcdc0_device);
1213 rmobile_add_device_to_domain(&r8a7740_pd_a4lc, &hdmi_lcdc_device); 1213 rmobile_add_device_to_domain("A4LC", &hdmi_lcdc_device);
1214 if (usb) 1214 if (usb)
1215 rmobile_add_device_to_domain(&r8a7740_pd_a3sp, usb); 1215 rmobile_add_device_to_domain("A3SP", usb);
1216} 1216}
1217 1217
1218static void __init eva_earlytimer_init(void) 1218static void __init eva_earlytimer_init(void)
diff --git a/arch/arm/mach-shmobile/board-mackerel.c b/arch/arm/mach-shmobile/board-mackerel.c
index 62783b5d8813..0c27c810cf99 100644
--- a/arch/arm/mach-shmobile/board-mackerel.c
+++ b/arch/arm/mach-shmobile/board-mackerel.c
@@ -1412,6 +1412,22 @@ static struct i2c_board_info i2c1_devices[] = {
1412#define USCCR1 IOMEM(0xE6058144) 1412#define USCCR1 IOMEM(0xE6058144)
1413static void __init mackerel_init(void) 1413static void __init mackerel_init(void)
1414{ 1414{
1415 struct pm_domain_device domain_devices[] = {
1416 { "A4LC", &lcdc_device, },
1417 { "A4LC", &hdmi_lcdc_device, },
1418 { "A4LC", &meram_device, },
1419 { "A4MP", &fsi_device, },
1420 { "A3SP", &usbhs0_device, },
1421 { "A3SP", &usbhs1_device, },
1422 { "A3SP", &nand_flash_device, },
1423 { "A3SP", &sh_mmcif_device, },
1424 { "A3SP", &sdhi0_device, },
1425#if !defined(CONFIG_MMC_SH_MMCIF) && !defined(CONFIG_MMC_SH_MMCIF_MODULE)
1426 { "A3SP", &sdhi1_device, },
1427#endif
1428 { "A3SP", &sdhi2_device, },
1429 { "A4R", &ceu_device, },
1430 };
1415 u32 srcr4; 1431 u32 srcr4;
1416 struct clk *clk; 1432 struct clk *clk;
1417 1433
@@ -1626,20 +1642,8 @@ static void __init mackerel_init(void)
1626 1642
1627 platform_add_devices(mackerel_devices, ARRAY_SIZE(mackerel_devices)); 1643 platform_add_devices(mackerel_devices, ARRAY_SIZE(mackerel_devices));
1628 1644
1629 rmobile_add_device_to_domain(&sh7372_pd_a4lc, &lcdc_device); 1645 rmobile_add_devices_to_domains(domain_devices,
1630 rmobile_add_device_to_domain(&sh7372_pd_a4lc, &hdmi_lcdc_device); 1646 ARRAY_SIZE(domain_devices));
1631 rmobile_add_device_to_domain(&sh7372_pd_a4lc, &meram_device);
1632 rmobile_add_device_to_domain(&sh7372_pd_a4mp, &fsi_device);
1633 rmobile_add_device_to_domain(&sh7372_pd_a3sp, &usbhs0_device);
1634 rmobile_add_device_to_domain(&sh7372_pd_a3sp, &usbhs1_device);
1635 rmobile_add_device_to_domain(&sh7372_pd_a3sp, &nand_flash_device);
1636 rmobile_add_device_to_domain(&sh7372_pd_a3sp, &sh_mmcif_device);
1637 rmobile_add_device_to_domain(&sh7372_pd_a3sp, &sdhi0_device);
1638#if !defined(CONFIG_MMC_SH_MMCIF) && !defined(CONFIG_MMC_SH_MMCIF_MODULE)
1639 rmobile_add_device_to_domain(&sh7372_pd_a3sp, &sdhi1_device);
1640#endif
1641 rmobile_add_device_to_domain(&sh7372_pd_a3sp, &sdhi2_device);
1642 rmobile_add_device_to_domain(&sh7372_pd_a4r, &ceu_device);
1643 1647
1644 hdmi_init_pm_clock(); 1648 hdmi_init_pm_clock();
1645 sh7372_pm_init(); 1649 sh7372_pm_init();
@@ -1653,6 +1657,6 @@ MACHINE_START(MACKEREL, "mackerel")
1653 .init_irq = sh7372_init_irq, 1657 .init_irq = sh7372_init_irq,
1654 .handle_irq = shmobile_handle_irq_intc, 1658 .handle_irq = shmobile_handle_irq_intc,
1655 .init_machine = mackerel_init, 1659 .init_machine = mackerel_init,
1656 .init_late = shmobile_init_late, 1660 .init_late = sh7372_pm_init_late,
1657 .timer = &shmobile_timer, 1661 .timer = &shmobile_timer,
1658MACHINE_END 1662MACHINE_END
diff --git a/arch/arm/mach-shmobile/common.c b/arch/arm/mach-shmobile/common.c
deleted file mode 100644
index 608aba9d60d7..000000000000
--- a/arch/arm/mach-shmobile/common.c
+++ /dev/null
@@ -1,24 +0,0 @@
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License as published by
4 * the Free Software Foundation; version 2 of the License.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
14 *
15 */
16#include <linux/kernel.h>
17#include <linux/init.h>
18#include <mach/common.h>
19
20void __init shmobile_init_late(void)
21{
22 shmobile_suspend_init();
23 shmobile_cpuidle_init();
24}
diff --git a/arch/arm/mach-shmobile/cpuidle.c b/arch/arm/mach-shmobile/cpuidle.c
index 7b541e911ab4..9e050268cde4 100644
--- a/arch/arm/mach-shmobile/cpuidle.c
+++ b/arch/arm/mach-shmobile/cpuidle.c
@@ -16,51 +16,38 @@
16#include <asm/cpuidle.h> 16#include <asm/cpuidle.h>
17#include <asm/io.h> 17#include <asm/io.h>
18 18
19static void shmobile_enter_wfi(void) 19int shmobile_enter_wfi(struct cpuidle_device *dev, struct cpuidle_driver *drv,
20 int index)
20{ 21{
21 cpu_do_idle(); 22 cpu_do_idle();
22} 23 return 0;
23
24void (*shmobile_cpuidle_modes[CPUIDLE_STATE_MAX])(void) = {
25 shmobile_enter_wfi, /* regular sleep mode */
26};
27
28static int shmobile_cpuidle_enter(struct cpuidle_device *dev,
29 struct cpuidle_driver *drv,
30 int index)
31{
32 shmobile_cpuidle_modes[index]();
33
34 return index;
35} 24}
36 25
37static struct cpuidle_device shmobile_cpuidle_dev; 26static struct cpuidle_device shmobile_cpuidle_dev;
38static struct cpuidle_driver shmobile_cpuidle_driver = { 27static struct cpuidle_driver shmobile_cpuidle_default_driver = {
39 .name = "shmobile_cpuidle", 28 .name = "shmobile_cpuidle",
40 .owner = THIS_MODULE, 29 .owner = THIS_MODULE,
41 .en_core_tk_irqen = 1, 30 .en_core_tk_irqen = 1,
42 .states[0] = ARM_CPUIDLE_WFI_STATE, 31 .states[0] = ARM_CPUIDLE_WFI_STATE,
32 .states[0].enter = shmobile_enter_wfi,
43 .safe_state_index = 0, /* C1 */ 33 .safe_state_index = 0, /* C1 */
44 .state_count = 1, 34 .state_count = 1,
45}; 35};
46 36
47void (*shmobile_cpuidle_setup)(struct cpuidle_driver *drv); 37static struct cpuidle_driver *cpuidle_drv = &shmobile_cpuidle_default_driver;
38
39void shmobile_cpuidle_set_driver(struct cpuidle_driver *drv)
40{
41 cpuidle_drv = drv;
42}
48 43
49int shmobile_cpuidle_init(void) 44int shmobile_cpuidle_init(void)
50{ 45{
51 struct cpuidle_device *dev = &shmobile_cpuidle_dev; 46 struct cpuidle_device *dev = &shmobile_cpuidle_dev;
52 struct cpuidle_driver *drv = &shmobile_cpuidle_driver;
53 int i;
54
55 for (i = 0; i < CPUIDLE_STATE_MAX; i++)
56 drv->states[i].enter = shmobile_cpuidle_enter;
57
58 if (shmobile_cpuidle_setup)
59 shmobile_cpuidle_setup(drv);
60 47
61 cpuidle_register_driver(drv); 48 cpuidle_register_driver(cpuidle_drv);
62 49
63 dev->state_count = drv->state_count; 50 dev->state_count = cpuidle_drv->state_count;
64 cpuidle_register_device(dev); 51 cpuidle_register_device(dev);
65 52
66 return 0; 53 return 0;
diff --git a/arch/arm/mach-shmobile/include/mach/common.h b/arch/arm/mach-shmobile/include/mach/common.h
index f80f9c549393..ed77ab8c9143 100644
--- a/arch/arm/mach-shmobile/include/mach/common.h
+++ b/arch/arm/mach-shmobile/include/mach/common.h
@@ -13,8 +13,10 @@ extern int shmobile_clk_init(void);
13extern void shmobile_handle_irq_intc(struct pt_regs *); 13extern void shmobile_handle_irq_intc(struct pt_regs *);
14extern struct platform_suspend_ops shmobile_suspend_ops; 14extern struct platform_suspend_ops shmobile_suspend_ops;
15struct cpuidle_driver; 15struct cpuidle_driver;
16extern void (*shmobile_cpuidle_modes[])(void); 16struct cpuidle_device;
17extern void (*shmobile_cpuidle_setup)(struct cpuidle_driver *drv); 17extern int shmobile_enter_wfi(struct cpuidle_device *dev,
18 struct cpuidle_driver *drv, int index);
19extern void shmobile_cpuidle_set_driver(struct cpuidle_driver *drv);
18 20
19extern void sh7367_init_irq(void); 21extern void sh7367_init_irq(void);
20extern void sh7367_map_io(void); 22extern void sh7367_map_io(void);
@@ -75,8 +77,6 @@ extern void r8a7740_meram_workaround(void);
75 77
76extern void r8a7779_register_twd(void); 78extern void r8a7779_register_twd(void);
77 79
78extern void shmobile_init_late(void);
79
80#ifdef CONFIG_SUSPEND 80#ifdef CONFIG_SUSPEND
81int shmobile_suspend_init(void); 81int shmobile_suspend_init(void);
82#else 82#else
@@ -100,4 +100,10 @@ static inline int shmobile_cpu_is_dead(unsigned int cpu) { return 1; }
100 100
101extern void shmobile_smp_init_cpus(unsigned int ncores); 101extern void shmobile_smp_init_cpus(unsigned int ncores);
102 102
103static inline void shmobile_init_late(void)
104{
105 shmobile_suspend_init();
106 shmobile_cpuidle_init();
107}
108
103#endif /* __ARCH_MACH_COMMON_H */ 109#endif /* __ARCH_MACH_COMMON_H */
diff --git a/arch/arm/mach-shmobile/include/mach/pm-rmobile.h b/arch/arm/mach-shmobile/include/mach/pm-rmobile.h
index 5a402840fe28..690553a06887 100644
--- a/arch/arm/mach-shmobile/include/mach/pm-rmobile.h
+++ b/arch/arm/mach-shmobile/include/mach/pm-rmobile.h
@@ -12,6 +12,8 @@
12 12
13#include <linux/pm_domain.h> 13#include <linux/pm_domain.h>
14 14
15#define DEFAULT_DEV_LATENCY_NS 250000
16
15struct platform_device; 17struct platform_device;
16 18
17struct rmobile_pm_domain { 19struct rmobile_pm_domain {
@@ -29,16 +31,33 @@ struct rmobile_pm_domain *to_rmobile_pd(struct generic_pm_domain *d)
29 return container_of(d, struct rmobile_pm_domain, genpd); 31 return container_of(d, struct rmobile_pm_domain, genpd);
30} 32}
31 33
34struct pm_domain_device {
35 const char *domain_name;
36 struct platform_device *pdev;
37};
38
32#ifdef CONFIG_PM 39#ifdef CONFIG_PM
33extern void rmobile_init_pm_domain(struct rmobile_pm_domain *rmobile_pd); 40extern void rmobile_init_domains(struct rmobile_pm_domain domains[], int num);
34extern void rmobile_add_device_to_domain(struct rmobile_pm_domain *rmobile_pd, 41extern void rmobile_add_device_to_domain_td(const char *domain_name,
35 struct platform_device *pdev); 42 struct platform_device *pdev,
36extern void rmobile_pm_add_subdomain(struct rmobile_pm_domain *rmobile_pd, 43 struct gpd_timing_data *td);
37 struct rmobile_pm_domain *rmobile_sd); 44
45static inline void rmobile_add_device_to_domain(const char *domain_name,
46 struct platform_device *pdev)
47{
48 rmobile_add_device_to_domain_td(domain_name, pdev, NULL);
49}
50
51extern void rmobile_add_devices_to_domains(struct pm_domain_device data[],
52 int size);
38#else 53#else
39#define rmobile_init_pm_domain(pd) do { } while (0) 54
40#define rmobile_add_device_to_domain(pd, pdev) do { } while (0) 55#define rmobile_init_domains(domains, num) do { } while (0)
41#define rmobile_pm_add_subdomain(pd, sd) do { } while (0) 56#define rmobile_add_device_to_domain_td(name, pdev, td) do { } while (0)
57#define rmobile_add_device_to_domain(name, pdev) do { } while (0)
58
59static inline void rmobile_add_devices_to_domains(struct pm_domain_device d[],
60 int size) {}
42#endif /* CONFIG_PM */ 61#endif /* CONFIG_PM */
43 62
44#endif /* PM_RMOBILE_H */ 63#endif /* PM_RMOBILE_H */
diff --git a/arch/arm/mach-shmobile/include/mach/r8a7740.h b/arch/arm/mach-shmobile/include/mach/r8a7740.h
index 7143147780df..59d252f4cf97 100644
--- a/arch/arm/mach-shmobile/include/mach/r8a7740.h
+++ b/arch/arm/mach-shmobile/include/mach/r8a7740.h
@@ -607,9 +607,9 @@ enum {
607}; 607};
608 608
609#ifdef CONFIG_PM 609#ifdef CONFIG_PM
610extern struct rmobile_pm_domain r8a7740_pd_a4s; 610extern void __init r8a7740_init_pm_domains(void);
611extern struct rmobile_pm_domain r8a7740_pd_a3sp; 611#else
612extern struct rmobile_pm_domain r8a7740_pd_a4lc; 612static inline void r8a7740_init_pm_domains(void) {}
613#endif /* CONFIG_PM */ 613#endif /* CONFIG_PM */
614 614
615#endif /* __ASM_R8A7740_H__ */ 615#endif /* __ASM_R8A7740_H__ */
diff --git a/arch/arm/mach-shmobile/include/mach/r8a7779.h b/arch/arm/mach-shmobile/include/mach/r8a7779.h
index f504c5e81b47..499f52d2a4a1 100644
--- a/arch/arm/mach-shmobile/include/mach/r8a7779.h
+++ b/arch/arm/mach-shmobile/include/mach/r8a7779.h
@@ -347,17 +347,9 @@ extern int r8a7779_sysc_power_down(struct r8a7779_pm_ch *r8a7779_ch);
347extern int r8a7779_sysc_power_up(struct r8a7779_pm_ch *r8a7779_ch); 347extern int r8a7779_sysc_power_up(struct r8a7779_pm_ch *r8a7779_ch);
348 348
349#ifdef CONFIG_PM 349#ifdef CONFIG_PM
350extern struct r8a7779_pm_domain r8a7779_sh4a; 350extern void __init r8a7779_init_pm_domains(void);
351extern struct r8a7779_pm_domain r8a7779_sgx;
352extern struct r8a7779_pm_domain r8a7779_vdp1;
353extern struct r8a7779_pm_domain r8a7779_impx3;
354
355extern void r8a7779_init_pm_domain(struct r8a7779_pm_domain *r8a7779_pd);
356extern void r8a7779_add_device_to_domain(struct r8a7779_pm_domain *r8a7779_pd,
357 struct platform_device *pdev);
358#else 351#else
359#define r8a7779_init_pm_domain(pd) do { } while (0) 352static inline void r8a7779_init_pm_domains(void) {}
360#define r8a7779_add_device_to_domain(pd, pdev) do { } while (0)
361#endif /* CONFIG_PM */ 353#endif /* CONFIG_PM */
362 354
363extern struct smp_operations r8a7779_smp_ops; 355extern struct smp_operations r8a7779_smp_ops;
diff --git a/arch/arm/mach-shmobile/include/mach/sh7372.h b/arch/arm/mach-shmobile/include/mach/sh7372.h
index b59048e6d8fd..eb98b45c5089 100644
--- a/arch/arm/mach-shmobile/include/mach/sh7372.h
+++ b/arch/arm/mach-shmobile/include/mach/sh7372.h
@@ -478,21 +478,17 @@ extern struct clk sh7372_fsibck_clk;
478extern struct clk sh7372_fsidiva_clk; 478extern struct clk sh7372_fsidiva_clk;
479extern struct clk sh7372_fsidivb_clk; 479extern struct clk sh7372_fsidivb_clk;
480 480
481#ifdef CONFIG_PM
482extern struct rmobile_pm_domain sh7372_pd_a4lc;
483extern struct rmobile_pm_domain sh7372_pd_a4mp;
484extern struct rmobile_pm_domain sh7372_pd_d4;
485extern struct rmobile_pm_domain sh7372_pd_a4r;
486extern struct rmobile_pm_domain sh7372_pd_a3rv;
487extern struct rmobile_pm_domain sh7372_pd_a3ri;
488extern struct rmobile_pm_domain sh7372_pd_a4s;
489extern struct rmobile_pm_domain sh7372_pd_a3sp;
490extern struct rmobile_pm_domain sh7372_pd_a3sg;
491#endif /* CONFIG_PM */
492
493extern void sh7372_intcs_suspend(void); 481extern void sh7372_intcs_suspend(void);
494extern void sh7372_intcs_resume(void); 482extern void sh7372_intcs_resume(void);
495extern void sh7372_intca_suspend(void); 483extern void sh7372_intca_suspend(void);
496extern void sh7372_intca_resume(void); 484extern void sh7372_intca_resume(void);
497 485
486#ifdef CONFIG_PM
487extern void __init sh7372_init_pm_domains(void);
488#else
489static inline void sh7372_init_pm_domains(void) {}
490#endif
491
492extern void __init sh7372_pm_init_late(void);
493
498#endif /* __ASM_SH7372_H__ */ 494#endif /* __ASM_SH7372_H__ */
diff --git a/arch/arm/mach-shmobile/pm-r8a7740.c b/arch/arm/mach-shmobile/pm-r8a7740.c
index 893504d012a6..21e5316d2d88 100644
--- a/arch/arm/mach-shmobile/pm-r8a7740.c
+++ b/arch/arm/mach-shmobile/pm-r8a7740.c
@@ -21,14 +21,6 @@ static int r8a7740_pd_a4s_suspend(void)
21 return -EBUSY; 21 return -EBUSY;
22} 22}
23 23
24struct rmobile_pm_domain r8a7740_pd_a4s = {
25 .genpd.name = "A4S",
26 .bit_shift = 10,
27 .gov = &pm_domain_always_on_gov,
28 .no_debug = true,
29 .suspend = r8a7740_pd_a4s_suspend,
30};
31
32static int r8a7740_pd_a3sp_suspend(void) 24static int r8a7740_pd_a3sp_suspend(void)
33{ 25{
34 /* 26 /*
@@ -38,17 +30,31 @@ static int r8a7740_pd_a3sp_suspend(void)
38 return console_suspend_enabled ? 0 : -EBUSY; 30 return console_suspend_enabled ? 0 : -EBUSY;
39} 31}
40 32
41struct rmobile_pm_domain r8a7740_pd_a3sp = { 33static struct rmobile_pm_domain r8a7740_pm_domains[] = {
42 .genpd.name = "A3SP", 34 {
43 .bit_shift = 11, 35 .genpd.name = "A4S",
44 .gov = &pm_domain_always_on_gov, 36 .bit_shift = 10,
45 .no_debug = true, 37 .gov = &pm_domain_always_on_gov,
46 .suspend = r8a7740_pd_a3sp_suspend, 38 .no_debug = true,
39 .suspend = r8a7740_pd_a4s_suspend,
40 },
41 {
42 .genpd.name = "A3SP",
43 .bit_shift = 11,
44 .gov = &pm_domain_always_on_gov,
45 .no_debug = true,
46 .suspend = r8a7740_pd_a3sp_suspend,
47 },
48 {
49 .genpd.name = "A4LC",
50 .bit_shift = 1,
51 },
47}; 52};
48 53
49struct rmobile_pm_domain r8a7740_pd_a4lc = { 54void __init r8a7740_init_pm_domains(void)
50 .genpd.name = "A4LC", 55{
51 .bit_shift = 1, 56 rmobile_init_domains(r8a7740_pm_domains, ARRAY_SIZE(r8a7740_pm_domains));
52}; 57 pm_genpd_add_subdomain_names("A4S", "A3SP");
58}
53 59
54#endif /* CONFIG_PM */ 60#endif /* CONFIG_PM */
diff --git a/arch/arm/mach-shmobile/pm-r8a7779.c b/arch/arm/mach-shmobile/pm-r8a7779.c
index a18a4ae16d2b..d50a8e9b94a4 100644
--- a/arch/arm/mach-shmobile/pm-r8a7779.c
+++ b/arch/arm/mach-shmobile/pm-r8a7779.c
@@ -183,7 +183,7 @@ static bool pd_active_wakeup(struct device *dev)
183 return true; 183 return true;
184} 184}
185 185
186void r8a7779_init_pm_domain(struct r8a7779_pm_domain *r8a7779_pd) 186static void r8a7779_init_pm_domain(struct r8a7779_pm_domain *r8a7779_pd)
187{ 187{
188 struct generic_pm_domain *genpd = &r8a7779_pd->genpd; 188 struct generic_pm_domain *genpd = &r8a7779_pd->genpd;
189 189
@@ -199,43 +199,44 @@ void r8a7779_init_pm_domain(struct r8a7779_pm_domain *r8a7779_pd)
199 pd_power_up(&r8a7779_pd->genpd); 199 pd_power_up(&r8a7779_pd->genpd);
200} 200}
201 201
202void r8a7779_add_device_to_domain(struct r8a7779_pm_domain *r8a7779_pd, 202static struct r8a7779_pm_domain r8a7779_pm_domains[] = {
203 struct platform_device *pdev) 203 {
204{ 204 .genpd.name = "SH4A",
205 struct device *dev = &pdev->dev; 205 .ch = {
206 206 .chan_offs = 0x80, /* PWRSR1 .. PWRER1 */
207 pm_genpd_add_device(&r8a7779_pd->genpd, dev); 207 .isr_bit = 16, /* SH4A */
208 if (pm_clk_no_clocks(dev)) 208 },
209 pm_clk_add(dev, NULL); 209 },
210} 210 {
211 211 .genpd.name = "SGX",
212struct r8a7779_pm_domain r8a7779_sh4a = { 212 .ch = {
213 .ch = { 213 .chan_offs = 0xc0, /* PWRSR2 .. PWRER2 */
214 .chan_offs = 0x80, /* PWRSR1 .. PWRER1 */ 214 .isr_bit = 20, /* SGX */
215 .isr_bit = 16, /* SH4A */ 215 },
216 } 216 },
217}; 217 {
218 218 .genpd.name = "VDP1",
219struct r8a7779_pm_domain r8a7779_sgx = { 219 .ch = {
220 .ch = { 220 .chan_offs = 0x100, /* PWRSR3 .. PWRER3 */
221 .chan_offs = 0xc0, /* PWRSR2 .. PWRER2 */ 221 .isr_bit = 21, /* VDP */
222 .isr_bit = 20, /* SGX */ 222 },
223 } 223 },
224 {
225 .genpd.name = "IMPX3",
226 .ch = {
227 .chan_offs = 0x140, /* PWRSR4 .. PWRER4 */
228 .isr_bit = 24, /* IMP */
229 },
230 },
224}; 231};
225 232
226struct r8a7779_pm_domain r8a7779_vdp1 = { 233void __init r8a7779_init_pm_domains(void)
227 .ch = { 234{
228 .chan_offs = 0x100, /* PWRSR3 .. PWRER3 */ 235 int j;
229 .isr_bit = 21, /* VDP */
230 }
231};
232 236
233struct r8a7779_pm_domain r8a7779_impx3 = { 237 for (j = 0; j < ARRAY_SIZE(r8a7779_pm_domains); j++)
234 .ch = { 238 r8a7779_init_pm_domain(&r8a7779_pm_domains[j]);
235 .chan_offs = 0x140, /* PWRSR4 .. PWRER4 */ 239}
236 .isr_bit = 24, /* IMP */
237 }
238};
239 240
240#endif /* CONFIG_PM */ 241#endif /* CONFIG_PM */
241 242
diff --git a/arch/arm/mach-shmobile/pm-rmobile.c b/arch/arm/mach-shmobile/pm-rmobile.c
index 32e177275e47..1fc05d9453d0 100644
--- a/arch/arm/mach-shmobile/pm-rmobile.c
+++ b/arch/arm/mach-shmobile/pm-rmobile.c
@@ -134,7 +134,7 @@ static int rmobile_pd_start_dev(struct device *dev)
134 return ret; 134 return ret;
135} 135}
136 136
137void rmobile_init_pm_domain(struct rmobile_pm_domain *rmobile_pd) 137static void rmobile_init_pm_domain(struct rmobile_pm_domain *rmobile_pd)
138{ 138{
139 struct generic_pm_domain *genpd = &rmobile_pd->genpd; 139 struct generic_pm_domain *genpd = &rmobile_pd->genpd;
140 struct dev_power_governor *gov = rmobile_pd->gov; 140 struct dev_power_governor *gov = rmobile_pd->gov;
@@ -149,19 +149,38 @@ void rmobile_init_pm_domain(struct rmobile_pm_domain *rmobile_pd)
149 __rmobile_pd_power_up(rmobile_pd, false); 149 __rmobile_pd_power_up(rmobile_pd, false);
150} 150}
151 151
152void rmobile_add_device_to_domain(struct rmobile_pm_domain *rmobile_pd, 152void rmobile_init_domains(struct rmobile_pm_domain domains[], int num)
153 struct platform_device *pdev) 153{
154 int j;
155
156 for (j = 0; j < num; j++)
157 rmobile_init_pm_domain(&domains[j]);
158}
159
160void rmobile_add_device_to_domain_td(const char *domain_name,
161 struct platform_device *pdev,
162 struct gpd_timing_data *td)
154{ 163{
155 struct device *dev = &pdev->dev; 164 struct device *dev = &pdev->dev;
156 165
157 pm_genpd_add_device(&rmobile_pd->genpd, dev); 166 __pm_genpd_name_add_device(domain_name, dev, td);
158 if (pm_clk_no_clocks(dev)) 167 if (pm_clk_no_clocks(dev))
159 pm_clk_add(dev, NULL); 168 pm_clk_add(dev, NULL);
160} 169}
161 170
162void rmobile_pm_add_subdomain(struct rmobile_pm_domain *rmobile_pd, 171void rmobile_add_devices_to_domains(struct pm_domain_device data[],
163 struct rmobile_pm_domain *rmobile_sd) 172 int size)
164{ 173{
165 pm_genpd_add_subdomain(&rmobile_pd->genpd, &rmobile_sd->genpd); 174 struct gpd_timing_data latencies = {
175 .stop_latency_ns = DEFAULT_DEV_LATENCY_NS,
176 .start_latency_ns = DEFAULT_DEV_LATENCY_NS,
177 .save_state_latency_ns = DEFAULT_DEV_LATENCY_NS,
178 .restore_state_latency_ns = DEFAULT_DEV_LATENCY_NS,
179 };
180 int j;
181
182 for (j = 0; j < size; j++)
183 rmobile_add_device_to_domain_td(data[j].domain_name,
184 data[j].pdev, &latencies);
166} 185}
167#endif /* CONFIG_PM */ 186#endif /* CONFIG_PM */
diff --git a/arch/arm/mach-shmobile/pm-sh7372.c b/arch/arm/mach-shmobile/pm-sh7372.c
index 162121842a2b..a0826a48dd08 100644
--- a/arch/arm/mach-shmobile/pm-sh7372.c
+++ b/arch/arm/mach-shmobile/pm-sh7372.c
@@ -21,6 +21,7 @@
21#include <linux/irq.h> 21#include <linux/irq.h>
22#include <linux/bitrev.h> 22#include <linux/bitrev.h>
23#include <linux/console.h> 23#include <linux/console.h>
24#include <asm/cpuidle.h>
24#include <asm/io.h> 25#include <asm/io.h>
25#include <asm/tlbflush.h> 26#include <asm/tlbflush.h>
26#include <asm/suspend.h> 27#include <asm/suspend.h>
@@ -72,20 +73,7 @@
72 73
73#ifdef CONFIG_PM 74#ifdef CONFIG_PM
74 75
75struct rmobile_pm_domain sh7372_pd_a4lc = { 76#define PM_DOMAIN_ON_OFF_LATENCY_NS 250000
76 .genpd.name = "A4LC",
77 .bit_shift = 1,
78};
79
80struct rmobile_pm_domain sh7372_pd_a4mp = {
81 .genpd.name = "A4MP",
82 .bit_shift = 2,
83};
84
85struct rmobile_pm_domain sh7372_pd_d4 = {
86 .genpd.name = "D4",
87 .bit_shift = 3,
88};
89 77
90static int sh7372_a4r_pd_suspend(void) 78static int sh7372_a4r_pd_suspend(void)
91{ 79{
@@ -94,39 +82,25 @@ static int sh7372_a4r_pd_suspend(void)
94 return 0; 82 return 0;
95} 83}
96 84
97struct rmobile_pm_domain sh7372_pd_a4r = { 85static bool a4s_suspend_ready;
98 .genpd.name = "A4R",
99 .bit_shift = 5,
100 .suspend = sh7372_a4r_pd_suspend,
101 .resume = sh7372_intcs_resume,
102};
103 86
104struct rmobile_pm_domain sh7372_pd_a3rv = { 87static int sh7372_a4s_pd_suspend(void)
105 .genpd.name = "A3RV",
106 .bit_shift = 6,
107};
108
109struct rmobile_pm_domain sh7372_pd_a3ri = {
110 .genpd.name = "A3RI",
111 .bit_shift = 8,
112};
113
114static int sh7372_pd_a4s_suspend(void)
115{ 88{
116 /* 89 /*
117 * The A4S domain contains the CPU core and therefore it should 90 * The A4S domain contains the CPU core and therefore it should
118 * only be turned off if the CPU is in use. 91 * only be turned off if the CPU is not in use. This may happen
92 * during system suspend, when SYSC is going to be used for generating
93 * resume signals and a4s_suspend_ready is set to let
94 * sh7372_enter_suspend() know that it can turn A4S off.
119 */ 95 */
96 a4s_suspend_ready = true;
120 return -EBUSY; 97 return -EBUSY;
121} 98}
122 99
123struct rmobile_pm_domain sh7372_pd_a4s = { 100static void sh7372_a4s_pd_resume(void)
124 .genpd.name = "A4S", 101{
125 .bit_shift = 10, 102 a4s_suspend_ready = false;
126 .gov = &pm_domain_always_on_gov, 103}
127 .no_debug = true,
128 .suspend = sh7372_pd_a4s_suspend,
129};
130 104
131static int sh7372_a3sp_pd_suspend(void) 105static int sh7372_a3sp_pd_suspend(void)
132{ 106{
@@ -137,18 +111,80 @@ static int sh7372_a3sp_pd_suspend(void)
137 return console_suspend_enabled ? 0 : -EBUSY; 111 return console_suspend_enabled ? 0 : -EBUSY;
138} 112}
139 113
140struct rmobile_pm_domain sh7372_pd_a3sp = { 114static struct rmobile_pm_domain sh7372_pm_domains[] = {
141 .genpd.name = "A3SP", 115 {
142 .bit_shift = 11, 116 .genpd.name = "A4LC",
143 .gov = &pm_domain_always_on_gov, 117 .genpd.power_on_latency_ns = PM_DOMAIN_ON_OFF_LATENCY_NS,
144 .no_debug = true, 118 .genpd.power_off_latency_ns = PM_DOMAIN_ON_OFF_LATENCY_NS,
145 .suspend = sh7372_a3sp_pd_suspend, 119 .bit_shift = 1,
120 },
121 {
122 .genpd.name = "A4MP",
123 .genpd.power_on_latency_ns = PM_DOMAIN_ON_OFF_LATENCY_NS,
124 .genpd.power_off_latency_ns = PM_DOMAIN_ON_OFF_LATENCY_NS,
125 .bit_shift = 2,
126 },
127 {
128 .genpd.name = "D4",
129 .genpd.power_on_latency_ns = PM_DOMAIN_ON_OFF_LATENCY_NS,
130 .genpd.power_off_latency_ns = PM_DOMAIN_ON_OFF_LATENCY_NS,
131 .bit_shift = 3,
132 },
133 {
134 .genpd.name = "A4R",
135 .genpd.power_on_latency_ns = PM_DOMAIN_ON_OFF_LATENCY_NS,
136 .genpd.power_off_latency_ns = PM_DOMAIN_ON_OFF_LATENCY_NS,
137 .bit_shift = 5,
138 .suspend = sh7372_a4r_pd_suspend,
139 .resume = sh7372_intcs_resume,
140 },
141 {
142 .genpd.name = "A3RV",
143 .genpd.power_on_latency_ns = PM_DOMAIN_ON_OFF_LATENCY_NS,
144 .genpd.power_off_latency_ns = PM_DOMAIN_ON_OFF_LATENCY_NS,
145 .bit_shift = 6,
146 },
147 {
148 .genpd.name = "A3RI",
149 .genpd.power_on_latency_ns = PM_DOMAIN_ON_OFF_LATENCY_NS,
150 .genpd.power_off_latency_ns = PM_DOMAIN_ON_OFF_LATENCY_NS,
151 .bit_shift = 8,
152 },
153 {
154 .genpd.name = "A4S",
155 .genpd.power_on_latency_ns = PM_DOMAIN_ON_OFF_LATENCY_NS,
156 .genpd.power_off_latency_ns = PM_DOMAIN_ON_OFF_LATENCY_NS,
157 .bit_shift = 10,
158 .gov = &pm_domain_always_on_gov,
159 .no_debug = true,
160 .suspend = sh7372_a4s_pd_suspend,
161 .resume = sh7372_a4s_pd_resume,
162 },
163 {
164 .genpd.name = "A3SP",
165 .genpd.power_on_latency_ns = PM_DOMAIN_ON_OFF_LATENCY_NS,
166 .genpd.power_off_latency_ns = PM_DOMAIN_ON_OFF_LATENCY_NS,
167 .bit_shift = 11,
168 .gov = &pm_domain_always_on_gov,
169 .no_debug = true,
170 .suspend = sh7372_a3sp_pd_suspend,
171 },
172 {
173 .genpd.name = "A3SG",
174 .genpd.power_on_latency_ns = PM_DOMAIN_ON_OFF_LATENCY_NS,
175 .genpd.power_off_latency_ns = PM_DOMAIN_ON_OFF_LATENCY_NS,
176 .bit_shift = 13,
177 },
146}; 178};
147 179
148struct rmobile_pm_domain sh7372_pd_a3sg = { 180void __init sh7372_init_pm_domains(void)
149 .genpd.name = "A3SG", 181{
150 .bit_shift = 13, 182 rmobile_init_domains(sh7372_pm_domains, ARRAY_SIZE(sh7372_pm_domains));
151}; 183 pm_genpd_add_subdomain_names("A4LC", "A3RV");
184 pm_genpd_add_subdomain_names("A4R", "A4LC");
185 pm_genpd_add_subdomain_names("A4S", "A3SG");
186 pm_genpd_add_subdomain_names("A4S", "A3SP");
187}
152 188
153#endif /* CONFIG_PM */ 189#endif /* CONFIG_PM */
154 190
@@ -304,6 +340,21 @@ static void sh7372_enter_a3sm_common(int pllc0_on)
304 sh7372_set_reset_vector(__pa(sh7372_resume_core_standby_sysc)); 340 sh7372_set_reset_vector(__pa(sh7372_resume_core_standby_sysc));
305 sh7372_enter_sysc(pllc0_on, 1 << 12); 341 sh7372_enter_sysc(pllc0_on, 1 << 12);
306} 342}
343
344static void sh7372_enter_a4s_common(int pllc0_on)
345{
346 sh7372_intca_suspend();
347 sh7372_set_reset_vector(SMFRAM);
348 sh7372_enter_sysc(pllc0_on, 1 << 10);
349 sh7372_intca_resume();
350}
351
352static void sh7372_pm_setup_smfram(void)
353{
354 memcpy((void *)SMFRAM, sh7372_resume_core_standby_sysc, 0x100);
355}
356#else
357static inline void sh7372_pm_setup_smfram(void) {}
307#endif /* CONFIG_SUSPEND || CONFIG_CPU_IDLE */ 358#endif /* CONFIG_SUSPEND || CONFIG_CPU_IDLE */
308 359
309#ifdef CONFIG_CPU_IDLE 360#ifdef CONFIG_CPU_IDLE
@@ -313,7 +364,8 @@ static int sh7372_do_idle_core_standby(unsigned long unused)
313 return 0; 364 return 0;
314} 365}
315 366
316static void sh7372_enter_core_standby(void) 367static int sh7372_enter_core_standby(struct cpuidle_device *dev,
368 struct cpuidle_driver *drv, int index)
317{ 369{
318 sh7372_set_reset_vector(__pa(sh7372_resume_core_standby_sysc)); 370 sh7372_set_reset_vector(__pa(sh7372_resume_core_standby_sysc));
319 371
@@ -324,83 +376,102 @@ static void sh7372_enter_core_standby(void)
324 376
325 /* disable reset vector translation */ 377 /* disable reset vector translation */
326 __raw_writel(0, SBAR); 378 __raw_writel(0, SBAR);
379
380 return 1;
327} 381}
328 382
329static void sh7372_enter_a3sm_pll_on(void) 383static int sh7372_enter_a3sm_pll_on(struct cpuidle_device *dev,
384 struct cpuidle_driver *drv, int index)
330{ 385{
331 sh7372_enter_a3sm_common(1); 386 sh7372_enter_a3sm_common(1);
387 return 2;
332} 388}
333 389
334static void sh7372_enter_a3sm_pll_off(void) 390static int sh7372_enter_a3sm_pll_off(struct cpuidle_device *dev,
391 struct cpuidle_driver *drv, int index)
335{ 392{
336 sh7372_enter_a3sm_common(0); 393 sh7372_enter_a3sm_common(0);
394 return 3;
337} 395}
338 396
339static void sh7372_cpuidle_setup(struct cpuidle_driver *drv) 397static int sh7372_enter_a4s(struct cpuidle_device *dev,
398 struct cpuidle_driver *drv, int index)
340{ 399{
341 struct cpuidle_state *state = &drv->states[drv->state_count]; 400 unsigned long msk, msk2;
342 401
343 snprintf(state->name, CPUIDLE_NAME_LEN, "C2"); 402 if (!sh7372_sysc_valid(&msk, &msk2))
344 strncpy(state->desc, "Core Standby Mode", CPUIDLE_DESC_LEN); 403 return sh7372_enter_a3sm_pll_off(dev, drv, index);
345 state->exit_latency = 10; 404
346 state->target_residency = 20 + 10; 405 sh7372_setup_sysc(msk, msk2);
347 state->flags = CPUIDLE_FLAG_TIME_VALID; 406 sh7372_enter_a4s_common(0);
348 shmobile_cpuidle_modes[drv->state_count] = sh7372_enter_core_standby; 407 return 4;
349 drv->state_count++;
350
351 state = &drv->states[drv->state_count];
352 snprintf(state->name, CPUIDLE_NAME_LEN, "C3");
353 strncpy(state->desc, "A3SM PLL ON", CPUIDLE_DESC_LEN);
354 state->exit_latency = 20;
355 state->target_residency = 30 + 20;
356 state->flags = CPUIDLE_FLAG_TIME_VALID;
357 shmobile_cpuidle_modes[drv->state_count] = sh7372_enter_a3sm_pll_on;
358 drv->state_count++;
359
360 state = &drv->states[drv->state_count];
361 snprintf(state->name, CPUIDLE_NAME_LEN, "C4");
362 strncpy(state->desc, "A3SM PLL OFF", CPUIDLE_DESC_LEN);
363 state->exit_latency = 120;
364 state->target_residency = 30 + 120;
365 state->flags = CPUIDLE_FLAG_TIME_VALID;
366 shmobile_cpuidle_modes[drv->state_count] = sh7372_enter_a3sm_pll_off;
367 drv->state_count++;
368} 408}
369 409
410static struct cpuidle_driver sh7372_cpuidle_driver = {
411 .name = "sh7372_cpuidle",
412 .owner = THIS_MODULE,
413 .en_core_tk_irqen = 1,
414 .state_count = 5,
415 .safe_state_index = 0, /* C1 */
416 .states[0] = ARM_CPUIDLE_WFI_STATE,
417 .states[0].enter = shmobile_enter_wfi,
418 .states[1] = {
419 .name = "C2",
420 .desc = "Core Standby Mode",
421 .exit_latency = 10,
422 .target_residency = 20 + 10,
423 .flags = CPUIDLE_FLAG_TIME_VALID,
424 .enter = sh7372_enter_core_standby,
425 },
426 .states[2] = {
427 .name = "C3",
428 .desc = "A3SM PLL ON",
429 .exit_latency = 20,
430 .target_residency = 30 + 20,
431 .flags = CPUIDLE_FLAG_TIME_VALID,
432 .enter = sh7372_enter_a3sm_pll_on,
433 },
434 .states[3] = {
435 .name = "C4",
436 .desc = "A3SM PLL OFF",
437 .exit_latency = 120,
438 .target_residency = 30 + 120,
439 .flags = CPUIDLE_FLAG_TIME_VALID,
440 .enter = sh7372_enter_a3sm_pll_off,
441 },
442 .states[4] = {
443 .name = "C5",
444 .desc = "A4S PLL OFF",
445 .exit_latency = 240,
446 .target_residency = 30 + 240,
447 .flags = CPUIDLE_FLAG_TIME_VALID,
448 .enter = sh7372_enter_a4s,
449 .disabled = true,
450 },
451};
452
370static void sh7372_cpuidle_init(void) 453static void sh7372_cpuidle_init(void)
371{ 454{
372 shmobile_cpuidle_setup = sh7372_cpuidle_setup; 455 shmobile_cpuidle_set_driver(&sh7372_cpuidle_driver);
373} 456}
374#else 457#else
375static void sh7372_cpuidle_init(void) {} 458static void sh7372_cpuidle_init(void) {}
376#endif 459#endif
377 460
378#ifdef CONFIG_SUSPEND 461#ifdef CONFIG_SUSPEND
379static void sh7372_enter_a4s_common(int pllc0_on)
380{
381 sh7372_intca_suspend();
382 memcpy((void *)SMFRAM, sh7372_resume_core_standby_sysc, 0x100);
383 sh7372_set_reset_vector(SMFRAM);
384 sh7372_enter_sysc(pllc0_on, 1 << 10);
385 sh7372_intca_resume();
386}
387
388static int sh7372_enter_suspend(suspend_state_t suspend_state) 462static int sh7372_enter_suspend(suspend_state_t suspend_state)
389{ 463{
390 unsigned long msk, msk2; 464 unsigned long msk, msk2;
391 465
392 /* check active clocks to determine potential wakeup sources */ 466 /* check active clocks to determine potential wakeup sources */
393 if (sh7372_sysc_valid(&msk, &msk2)) { 467 if (sh7372_sysc_valid(&msk, &msk2) && a4s_suspend_ready) {
394 if (!console_suspend_enabled && 468 /* convert INTC mask/sense to SYSC mask/sense */
395 sh7372_pd_a4s.genpd.status == GPD_STATE_POWER_OFF) { 469 sh7372_setup_sysc(msk, msk2);
396 /* convert INTC mask/sense to SYSC mask/sense */ 470
397 sh7372_setup_sysc(msk, msk2); 471 /* enter A4S sleep with PLLC0 off */
398 472 pr_debug("entering A4S\n");
399 /* enter A4S sleep with PLLC0 off */ 473 sh7372_enter_a4s_common(0);
400 pr_debug("entering A4S\n"); 474 return 0;
401 sh7372_enter_a4s_common(0);
402 return 0;
403 }
404 } 475 }
405 476
406 /* default to enter A3SM sleep with PLLC0 off */ 477 /* default to enter A3SM sleep with PLLC0 off */
@@ -426,7 +497,7 @@ static int sh7372_pm_notifier_fn(struct notifier_block *notifier,
426 * executed during system suspend and resume, respectively, so 497 * executed during system suspend and resume, respectively, so
427 * that those functions don't crash while accessing the INTCS. 498 * that those functions don't crash while accessing the INTCS.
428 */ 499 */
429 pm_genpd_poweron(&sh7372_pd_a4r.genpd); 500 pm_genpd_name_poweron("A4R");
430 break; 501 break;
431 case PM_POST_SUSPEND: 502 case PM_POST_SUSPEND:
432 pm_genpd_poweroff_unused(); 503 pm_genpd_poweroff_unused();
@@ -455,6 +526,14 @@ void __init sh7372_pm_init(void)
455 /* do not convert A3SM, A3SP, A3SG, A4R power down into A4S */ 526 /* do not convert A3SM, A3SP, A3SG, A4R power down into A4S */
456 __raw_writel(0, PDNSEL); 527 __raw_writel(0, PDNSEL);
457 528
529 sh7372_pm_setup_smfram();
530
458 sh7372_suspend_init(); 531 sh7372_suspend_init();
459 sh7372_cpuidle_init(); 532 sh7372_cpuidle_init();
460} 533}
534
535void __init sh7372_pm_init_late(void)
536{
537 shmobile_init_late();
538 pm_genpd_name_attach_cpuidle("A4S", 4);
539}
diff --git a/arch/arm/mach-shmobile/setup-r8a7740.c b/arch/arm/mach-shmobile/setup-r8a7740.c
index 78948a9dba0e..11bb1d984197 100644
--- a/arch/arm/mach-shmobile/setup-r8a7740.c
+++ b/arch/arm/mach-shmobile/setup-r8a7740.c
@@ -673,12 +673,7 @@ void __init r8a7740_add_standard_devices(void)
673 r8a7740_i2c_workaround(&i2c0_device); 673 r8a7740_i2c_workaround(&i2c0_device);
674 r8a7740_i2c_workaround(&i2c1_device); 674 r8a7740_i2c_workaround(&i2c1_device);
675 675
676 /* PM domain */ 676 r8a7740_init_pm_domains();
677 rmobile_init_pm_domain(&r8a7740_pd_a4s);
678 rmobile_init_pm_domain(&r8a7740_pd_a3sp);
679 rmobile_init_pm_domain(&r8a7740_pd_a4lc);
680
681 rmobile_pm_add_subdomain(&r8a7740_pd_a4s, &r8a7740_pd_a3sp);
682 677
683 /* add devices */ 678 /* add devices */
684 platform_add_devices(r8a7740_early_devices, 679 platform_add_devices(r8a7740_early_devices,
@@ -688,16 +683,16 @@ void __init r8a7740_add_standard_devices(void)
688 683
689 /* add devices to PM domain */ 684 /* add devices to PM domain */
690 685
691 rmobile_add_device_to_domain(&r8a7740_pd_a3sp, &scif0_device); 686 rmobile_add_device_to_domain("A3SP", &scif0_device);
692 rmobile_add_device_to_domain(&r8a7740_pd_a3sp, &scif1_device); 687 rmobile_add_device_to_domain("A3SP", &scif1_device);
693 rmobile_add_device_to_domain(&r8a7740_pd_a3sp, &scif2_device); 688 rmobile_add_device_to_domain("A3SP", &scif2_device);
694 rmobile_add_device_to_domain(&r8a7740_pd_a3sp, &scif3_device); 689 rmobile_add_device_to_domain("A3SP", &scif3_device);
695 rmobile_add_device_to_domain(&r8a7740_pd_a3sp, &scif4_device); 690 rmobile_add_device_to_domain("A3SP", &scif4_device);
696 rmobile_add_device_to_domain(&r8a7740_pd_a3sp, &scif5_device); 691 rmobile_add_device_to_domain("A3SP", &scif5_device);
697 rmobile_add_device_to_domain(&r8a7740_pd_a3sp, &scif6_device); 692 rmobile_add_device_to_domain("A3SP", &scif6_device);
698 rmobile_add_device_to_domain(&r8a7740_pd_a3sp, &scif7_device); 693 rmobile_add_device_to_domain("A3SP", &scif7_device);
699 rmobile_add_device_to_domain(&r8a7740_pd_a3sp, &scifb_device); 694 rmobile_add_device_to_domain("A3SP", &scifb_device);
700 rmobile_add_device_to_domain(&r8a7740_pd_a3sp, &i2c1_device); 695 rmobile_add_device_to_domain("A3SP", &i2c1_device);
701} 696}
702 697
703static void __init r8a7740_earlytimer_init(void) 698static void __init r8a7740_earlytimer_init(void)
diff --git a/arch/arm/mach-shmobile/setup-r8a7779.c b/arch/arm/mach-shmobile/setup-r8a7779.c
index e98e46f6cf55..2917668f0091 100644
--- a/arch/arm/mach-shmobile/setup-r8a7779.c
+++ b/arch/arm/mach-shmobile/setup-r8a7779.c
@@ -251,10 +251,7 @@ void __init r8a7779_add_standard_devices(void)
251#endif 251#endif
252 r8a7779_pm_init(); 252 r8a7779_pm_init();
253 253
254 r8a7779_init_pm_domain(&r8a7779_sh4a); 254 r8a7779_init_pm_domains();
255 r8a7779_init_pm_domain(&r8a7779_sgx);
256 r8a7779_init_pm_domain(&r8a7779_vdp1);
257 r8a7779_init_pm_domain(&r8a7779_impx3);
258 255
259 platform_add_devices(r8a7779_early_devices, 256 platform_add_devices(r8a7779_early_devices,
260 ARRAY_SIZE(r8a7779_early_devices)); 257 ARRAY_SIZE(r8a7779_early_devices));
diff --git a/arch/arm/mach-shmobile/setup-sh7372.c b/arch/arm/mach-shmobile/setup-sh7372.c
index 838a87be1d5c..a07954fbcd22 100644
--- a/arch/arm/mach-shmobile/setup-sh7372.c
+++ b/arch/arm/mach-shmobile/setup-sh7372.c
@@ -1001,21 +1001,34 @@ static struct platform_device *sh7372_late_devices[] __initdata = {
1001 1001
1002void __init sh7372_add_standard_devices(void) 1002void __init sh7372_add_standard_devices(void)
1003{ 1003{
1004 rmobile_init_pm_domain(&sh7372_pd_a4lc); 1004 struct pm_domain_device domain_devices[] = {
1005 rmobile_init_pm_domain(&sh7372_pd_a4mp); 1005 { "A3RV", &vpu_device, },
1006 rmobile_init_pm_domain(&sh7372_pd_d4); 1006 { "A4MP", &spu0_device, },
1007 rmobile_init_pm_domain(&sh7372_pd_a4r); 1007 { "A4MP", &spu1_device, },
1008 rmobile_init_pm_domain(&sh7372_pd_a3rv); 1008 { "A3SP", &scif0_device, },
1009 rmobile_init_pm_domain(&sh7372_pd_a3ri); 1009 { "A3SP", &scif1_device, },
1010 rmobile_init_pm_domain(&sh7372_pd_a4s); 1010 { "A3SP", &scif2_device, },
1011 rmobile_init_pm_domain(&sh7372_pd_a3sp); 1011 { "A3SP", &scif3_device, },
1012 rmobile_init_pm_domain(&sh7372_pd_a3sg); 1012 { "A3SP", &scif4_device, },
1013 1013 { "A3SP", &scif5_device, },
1014 rmobile_pm_add_subdomain(&sh7372_pd_a4lc, &sh7372_pd_a3rv); 1014 { "A3SP", &scif6_device, },
1015 rmobile_pm_add_subdomain(&sh7372_pd_a4r, &sh7372_pd_a4lc); 1015 { "A3SP", &iic1_device, },
1016 1016 { "A3SP", &dma0_device, },
1017 rmobile_pm_add_subdomain(&sh7372_pd_a4s, &sh7372_pd_a3sg); 1017 { "A3SP", &dma1_device, },
1018 rmobile_pm_add_subdomain(&sh7372_pd_a4s, &sh7372_pd_a3sp); 1018 { "A3SP", &dma2_device, },
1019 { "A3SP", &usb_dma0_device, },
1020 { "A3SP", &usb_dma1_device, },
1021 { "A4R", &iic0_device, },
1022 { "A4R", &veu0_device, },
1023 { "A4R", &veu1_device, },
1024 { "A4R", &veu2_device, },
1025 { "A4R", &veu3_device, },
1026 { "A4R", &jpu_device, },
1027 { "A4R", &tmu00_device, },
1028 { "A4R", &tmu01_device, },
1029 };
1030
1031 sh7372_init_pm_domains();
1019 1032
1020 platform_add_devices(sh7372_early_devices, 1033 platform_add_devices(sh7372_early_devices,
1021 ARRAY_SIZE(sh7372_early_devices)); 1034 ARRAY_SIZE(sh7372_early_devices));
@@ -1023,30 +1036,8 @@ void __init sh7372_add_standard_devices(void)
1023 platform_add_devices(sh7372_late_devices, 1036 platform_add_devices(sh7372_late_devices,
1024 ARRAY_SIZE(sh7372_late_devices)); 1037 ARRAY_SIZE(sh7372_late_devices));
1025 1038
1026 rmobile_add_device_to_domain(&sh7372_pd_a3rv, &vpu_device); 1039 rmobile_add_devices_to_domains(domain_devices,
1027 rmobile_add_device_to_domain(&sh7372_pd_a4mp, &spu0_device); 1040 ARRAY_SIZE(domain_devices));
1028 rmobile_add_device_to_domain(&sh7372_pd_a4mp, &spu1_device);
1029 rmobile_add_device_to_domain(&sh7372_pd_a3sp, &scif0_device);
1030 rmobile_add_device_to_domain(&sh7372_pd_a3sp, &scif1_device);
1031 rmobile_add_device_to_domain(&sh7372_pd_a3sp, &scif2_device);
1032 rmobile_add_device_to_domain(&sh7372_pd_a3sp, &scif3_device);
1033 rmobile_add_device_to_domain(&sh7372_pd_a3sp, &scif4_device);
1034 rmobile_add_device_to_domain(&sh7372_pd_a3sp, &scif5_device);
1035 rmobile_add_device_to_domain(&sh7372_pd_a3sp, &scif6_device);
1036 rmobile_add_device_to_domain(&sh7372_pd_a3sp, &iic1_device);
1037 rmobile_add_device_to_domain(&sh7372_pd_a3sp, &dma0_device);
1038 rmobile_add_device_to_domain(&sh7372_pd_a3sp, &dma1_device);
1039 rmobile_add_device_to_domain(&sh7372_pd_a3sp, &dma2_device);
1040 rmobile_add_device_to_domain(&sh7372_pd_a3sp, &usb_dma0_device);
1041 rmobile_add_device_to_domain(&sh7372_pd_a3sp, &usb_dma1_device);
1042 rmobile_add_device_to_domain(&sh7372_pd_a4r, &iic0_device);
1043 rmobile_add_device_to_domain(&sh7372_pd_a4r, &veu0_device);
1044 rmobile_add_device_to_domain(&sh7372_pd_a4r, &veu1_device);
1045 rmobile_add_device_to_domain(&sh7372_pd_a4r, &veu2_device);
1046 rmobile_add_device_to_domain(&sh7372_pd_a4r, &veu3_device);
1047 rmobile_add_device_to_domain(&sh7372_pd_a4r, &jpu_device);
1048 rmobile_add_device_to_domain(&sh7372_pd_a4r, &tmu00_device);
1049 rmobile_add_device_to_domain(&sh7372_pd_a4r, &tmu01_device);
1050} 1041}
1051 1042
1052static void __init sh7372_earlytimer_init(void) 1043static void __init sh7372_earlytimer_init(void)
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index 957ec87385af..fbee9714d9ab 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -248,6 +248,9 @@
248 248
249#define MSR_IA32_PERF_STATUS 0x00000198 249#define MSR_IA32_PERF_STATUS 0x00000198
250#define MSR_IA32_PERF_CTL 0x00000199 250#define MSR_IA32_PERF_CTL 0x00000199
251#define MSR_AMD_PSTATE_DEF_BASE 0xc0010064
252#define MSR_AMD_PERF_STATUS 0xc0010063
253#define MSR_AMD_PERF_CTL 0xc0010062
251 254
252#define MSR_IA32_MPERF 0x000000e7 255#define MSR_IA32_MPERF 0x000000e7
253#define MSR_IA32_APERF 0x000000e8 256#define MSR_IA32_APERF 0x000000e8
diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
index bfc31cb0dd3e..e78c2a52ea46 100644
--- a/drivers/acpi/processor_driver.c
+++ b/drivers/acpi/processor_driver.c
@@ -475,7 +475,7 @@ static __ref int acpi_processor_start(struct acpi_processor *pr)
475 acpi_processor_get_limit_info(pr); 475 acpi_processor_get_limit_info(pr);
476 476
477 if (!cpuidle_get_driver() || cpuidle_get_driver() == &acpi_idle_driver) 477 if (!cpuidle_get_driver() || cpuidle_get_driver() == &acpi_idle_driver)
478 acpi_processor_power_init(pr, device); 478 acpi_processor_power_init(pr);
479 479
480 pr->cdev = thermal_cooling_device_register("Processor", device, 480 pr->cdev = thermal_cooling_device_register("Processor", device,
481 &processor_cooling_ops); 481 &processor_cooling_ops);
@@ -509,7 +509,7 @@ err_remove_sysfs_thermal:
509err_thermal_unregister: 509err_thermal_unregister:
510 thermal_cooling_device_unregister(pr->cdev); 510 thermal_cooling_device_unregister(pr->cdev);
511err_power_exit: 511err_power_exit:
512 acpi_processor_power_exit(pr, device); 512 acpi_processor_power_exit(pr);
513 513
514 return result; 514 return result;
515} 515}
@@ -620,7 +620,7 @@ static int acpi_processor_remove(struct acpi_device *device, int type)
620 return -EINVAL; 620 return -EINVAL;
621 } 621 }
622 622
623 acpi_processor_power_exit(pr, device); 623 acpi_processor_power_exit(pr);
624 624
625 sysfs_remove_link(&device->dev.kobj, "sysdev"); 625 sysfs_remove_link(&device->dev.kobj, "sysdev");
626 626
@@ -905,8 +905,6 @@ static int __init acpi_processor_init(void)
905 if (acpi_disabled) 905 if (acpi_disabled)
906 return 0; 906 return 0;
907 907
908 memset(&errata, 0, sizeof(errata));
909
910 result = acpi_bus_register_driver(&acpi_processor_driver); 908 result = acpi_bus_register_driver(&acpi_processor_driver);
911 if (result < 0) 909 if (result < 0)
912 return result; 910 return result;
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index ad3730b4038b..3655ab923812 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -79,6 +79,8 @@ module_param(bm_check_disable, uint, 0000);
79static unsigned int latency_factor __read_mostly = 2; 79static unsigned int latency_factor __read_mostly = 2;
80module_param(latency_factor, uint, 0644); 80module_param(latency_factor, uint, 0644);
81 81
82static DEFINE_PER_CPU(struct cpuidle_device *, acpi_cpuidle_device);
83
82static int disabled_by_idle_boot_param(void) 84static int disabled_by_idle_boot_param(void)
83{ 85{
84 return boot_option_idle_override == IDLE_POLL || 86 return boot_option_idle_override == IDLE_POLL ||
@@ -483,8 +485,6 @@ static int acpi_processor_get_power_info_cst(struct acpi_processor *pr)
483 if (obj->type != ACPI_TYPE_INTEGER) 485 if (obj->type != ACPI_TYPE_INTEGER)
484 continue; 486 continue;
485 487
486 cx.power = obj->integer.value;
487
488 current_count++; 488 current_count++;
489 memcpy(&(pr->power.states[current_count]), &cx, sizeof(cx)); 489 memcpy(&(pr->power.states[current_count]), &cx, sizeof(cx));
490 490
@@ -1000,7 +1000,7 @@ static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr)
1000 int i, count = CPUIDLE_DRIVER_STATE_START; 1000 int i, count = CPUIDLE_DRIVER_STATE_START;
1001 struct acpi_processor_cx *cx; 1001 struct acpi_processor_cx *cx;
1002 struct cpuidle_state_usage *state_usage; 1002 struct cpuidle_state_usage *state_usage;
1003 struct cpuidle_device *dev = &pr->power.dev; 1003 struct cpuidle_device *dev = per_cpu(acpi_cpuidle_device, pr->id);
1004 1004
1005 if (!pr->flags.power_setup_done) 1005 if (!pr->flags.power_setup_done)
1006 return -EINVAL; 1006 return -EINVAL;
@@ -1132,6 +1132,7 @@ static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr)
1132int acpi_processor_hotplug(struct acpi_processor *pr) 1132int acpi_processor_hotplug(struct acpi_processor *pr)
1133{ 1133{
1134 int ret = 0; 1134 int ret = 0;
1135 struct cpuidle_device *dev = per_cpu(acpi_cpuidle_device, pr->id);
1135 1136
1136 if (disabled_by_idle_boot_param()) 1137 if (disabled_by_idle_boot_param())
1137 return 0; 1138 return 0;
@@ -1147,11 +1148,11 @@ int acpi_processor_hotplug(struct acpi_processor *pr)
1147 return -ENODEV; 1148 return -ENODEV;
1148 1149
1149 cpuidle_pause_and_lock(); 1150 cpuidle_pause_and_lock();
1150 cpuidle_disable_device(&pr->power.dev); 1151 cpuidle_disable_device(dev);
1151 acpi_processor_get_power_info(pr); 1152 acpi_processor_get_power_info(pr);
1152 if (pr->flags.power) { 1153 if (pr->flags.power) {
1153 acpi_processor_setup_cpuidle_cx(pr); 1154 acpi_processor_setup_cpuidle_cx(pr);
1154 ret = cpuidle_enable_device(&pr->power.dev); 1155 ret = cpuidle_enable_device(dev);
1155 } 1156 }
1156 cpuidle_resume_and_unlock(); 1157 cpuidle_resume_and_unlock();
1157 1158
@@ -1162,6 +1163,7 @@ int acpi_processor_cst_has_changed(struct acpi_processor *pr)
1162{ 1163{
1163 int cpu; 1164 int cpu;
1164 struct acpi_processor *_pr; 1165 struct acpi_processor *_pr;
1166 struct cpuidle_device *dev;
1165 1167
1166 if (disabled_by_idle_boot_param()) 1168 if (disabled_by_idle_boot_param())
1167 return 0; 1169 return 0;
@@ -1192,7 +1194,8 @@ int acpi_processor_cst_has_changed(struct acpi_processor *pr)
1192 _pr = per_cpu(processors, cpu); 1194 _pr = per_cpu(processors, cpu);
1193 if (!_pr || !_pr->flags.power_setup_done) 1195 if (!_pr || !_pr->flags.power_setup_done)
1194 continue; 1196 continue;
1195 cpuidle_disable_device(&_pr->power.dev); 1197 dev = per_cpu(acpi_cpuidle_device, cpu);
1198 cpuidle_disable_device(dev);
1196 } 1199 }
1197 1200
1198 /* Populate Updated C-state information */ 1201 /* Populate Updated C-state information */
@@ -1206,7 +1209,8 @@ int acpi_processor_cst_has_changed(struct acpi_processor *pr)
1206 acpi_processor_get_power_info(_pr); 1209 acpi_processor_get_power_info(_pr);
1207 if (_pr->flags.power) { 1210 if (_pr->flags.power) {
1208 acpi_processor_setup_cpuidle_cx(_pr); 1211 acpi_processor_setup_cpuidle_cx(_pr);
1209 cpuidle_enable_device(&_pr->power.dev); 1212 dev = per_cpu(acpi_cpuidle_device, cpu);
1213 cpuidle_enable_device(dev);
1210 } 1214 }
1211 } 1215 }
1212 put_online_cpus(); 1216 put_online_cpus();
@@ -1218,11 +1222,11 @@ int acpi_processor_cst_has_changed(struct acpi_processor *pr)
1218 1222
1219static int acpi_processor_registered; 1223static int acpi_processor_registered;
1220 1224
1221int __cpuinit acpi_processor_power_init(struct acpi_processor *pr, 1225int __cpuinit acpi_processor_power_init(struct acpi_processor *pr)
1222 struct acpi_device *device)
1223{ 1226{
1224 acpi_status status = 0; 1227 acpi_status status = 0;
1225 int retval; 1228 int retval;
1229 struct cpuidle_device *dev;
1226 static int first_run; 1230 static int first_run;
1227 1231
1228 if (disabled_by_idle_boot_param()) 1232 if (disabled_by_idle_boot_param())
@@ -1268,11 +1272,18 @@ int __cpuinit acpi_processor_power_init(struct acpi_processor *pr,
1268 printk(KERN_DEBUG "ACPI: %s registered with cpuidle\n", 1272 printk(KERN_DEBUG "ACPI: %s registered with cpuidle\n",
1269 acpi_idle_driver.name); 1273 acpi_idle_driver.name);
1270 } 1274 }
1275
1276 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
1277 if (!dev)
1278 return -ENOMEM;
1279 per_cpu(acpi_cpuidle_device, pr->id) = dev;
1280
1281 acpi_processor_setup_cpuidle_cx(pr);
1282
1271 /* Register per-cpu cpuidle_device. Cpuidle driver 1283 /* Register per-cpu cpuidle_device. Cpuidle driver
1272 * must already be registered before registering device 1284 * must already be registered before registering device
1273 */ 1285 */
1274 acpi_processor_setup_cpuidle_cx(pr); 1286 retval = cpuidle_register_device(dev);
1275 retval = cpuidle_register_device(&pr->power.dev);
1276 if (retval) { 1287 if (retval) {
1277 if (acpi_processor_registered == 0) 1288 if (acpi_processor_registered == 0)
1278 cpuidle_unregister_driver(&acpi_idle_driver); 1289 cpuidle_unregister_driver(&acpi_idle_driver);
@@ -1283,14 +1294,15 @@ int __cpuinit acpi_processor_power_init(struct acpi_processor *pr,
1283 return 0; 1294 return 0;
1284} 1295}
1285 1296
1286int acpi_processor_power_exit(struct acpi_processor *pr, 1297int acpi_processor_power_exit(struct acpi_processor *pr)
1287 struct acpi_device *device)
1288{ 1298{
1299 struct cpuidle_device *dev = per_cpu(acpi_cpuidle_device, pr->id);
1300
1289 if (disabled_by_idle_boot_param()) 1301 if (disabled_by_idle_boot_param())
1290 return 0; 1302 return 0;
1291 1303
1292 if (pr->flags.power) { 1304 if (pr->flags.power) {
1293 cpuidle_unregister_device(&pr->power.dev); 1305 cpuidle_unregister_device(dev);
1294 acpi_processor_registered--; 1306 acpi_processor_registered--;
1295 if (acpi_processor_registered == 0) 1307 if (acpi_processor_registered == 0)
1296 cpuidle_unregister_driver(&acpi_idle_driver); 1308 cpuidle_unregister_driver(&acpi_idle_driver);
diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c
index a093dc163a42..836bfe069042 100644
--- a/drivers/acpi/processor_perflib.c
+++ b/drivers/acpi/processor_perflib.c
@@ -324,6 +324,34 @@ static int acpi_processor_get_performance_control(struct acpi_processor *pr)
324 return result; 324 return result;
325} 325}
326 326
327#ifdef CONFIG_X86
328/*
329 * Some AMDs have 50MHz frequency multiples, but only provide 100MHz rounding
330 * in their ACPI data. Calculate the real values and fix up the _PSS data.
331 */
332static void amd_fixup_frequency(struct acpi_processor_px *px, int i)
333{
334 u32 hi, lo, fid, did;
335 int index = px->control & 0x00000007;
336
337 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
338 return;
339
340 if ((boot_cpu_data.x86 == 0x10 && boot_cpu_data.x86_model < 10)
341 || boot_cpu_data.x86 == 0x11) {
342 rdmsr(MSR_AMD_PSTATE_DEF_BASE + index, lo, hi);
343 fid = lo & 0x3f;
344 did = (lo >> 6) & 7;
345 if (boot_cpu_data.x86 == 0x10)
346 px->core_frequency = (100 * (fid + 0x10)) >> did;
347 else
348 px->core_frequency = (100 * (fid + 8)) >> did;
349 }
350}
351#else
352static void amd_fixup_frequency(struct acpi_processor_px *px, int i) {};
353#endif
354
327static int acpi_processor_get_performance_states(struct acpi_processor *pr) 355static int acpi_processor_get_performance_states(struct acpi_processor *pr)
328{ 356{
329 int result = 0; 357 int result = 0;
@@ -379,6 +407,8 @@ static int acpi_processor_get_performance_states(struct acpi_processor *pr)
379 goto end; 407 goto end;
380 } 408 }
381 409
410 amd_fixup_frequency(px, i);
411
382 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 412 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
383 "State [%d]: core_frequency[%d] power[%d] transition_latency[%d] bus_master_latency[%d] control[0x%x] status[0x%x]\n", 413 "State [%d]: core_frequency[%d] power[%d] transition_latency[%d] bus_master_latency[%d] control[0x%x] status[0x%x]\n",
384 i, 414 i,
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index ddeca142293c..8727e9c5eea4 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -23,6 +23,7 @@
23#include <linux/idr.h> 23#include <linux/idr.h>
24 24
25#include "base.h" 25#include "base.h"
26#include "power/power.h"
26 27
27/* For automatically allocated device IDs */ 28/* For automatically allocated device IDs */
28static DEFINE_IDA(platform_devid_ida); 29static DEFINE_IDA(platform_devid_ida);
@@ -983,6 +984,7 @@ void __init early_platform_add_devices(struct platform_device **devs, int num)
983 dev = &devs[i]->dev; 984 dev = &devs[i]->dev;
984 985
985 if (!dev->devres_head.next) { 986 if (!dev->devres_head.next) {
987 pm_runtime_early_init(dev);
986 INIT_LIST_HEAD(&dev->devres_head); 988 INIT_LIST_HEAD(&dev->devres_head);
987 list_add_tail(&dev->devres_head, 989 list_add_tail(&dev->devres_head,
988 &early_platform_device_list); 990 &early_platform_device_list);
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index ba3487c9835b..c22b869245d9 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -53,6 +53,24 @@
53static LIST_HEAD(gpd_list); 53static LIST_HEAD(gpd_list);
54static DEFINE_MUTEX(gpd_list_lock); 54static DEFINE_MUTEX(gpd_list_lock);
55 55
56static struct generic_pm_domain *pm_genpd_lookup_name(const char *domain_name)
57{
58 struct generic_pm_domain *genpd = NULL, *gpd;
59
60 if (IS_ERR_OR_NULL(domain_name))
61 return NULL;
62
63 mutex_lock(&gpd_list_lock);
64 list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
65 if (!strcmp(gpd->name, domain_name)) {
66 genpd = gpd;
67 break;
68 }
69 }
70 mutex_unlock(&gpd_list_lock);
71 return genpd;
72}
73
56#ifdef CONFIG_PM 74#ifdef CONFIG_PM
57 75
58struct generic_pm_domain *dev_to_genpd(struct device *dev) 76struct generic_pm_domain *dev_to_genpd(struct device *dev)
@@ -256,10 +274,28 @@ int pm_genpd_poweron(struct generic_pm_domain *genpd)
256 return ret; 274 return ret;
257} 275}
258 276
277/**
278 * pm_genpd_name_poweron - Restore power to a given PM domain and its masters.
279 * @domain_name: Name of the PM domain to power up.
280 */
281int pm_genpd_name_poweron(const char *domain_name)
282{
283 struct generic_pm_domain *genpd;
284
285 genpd = pm_genpd_lookup_name(domain_name);
286 return genpd ? pm_genpd_poweron(genpd) : -EINVAL;
287}
288
259#endif /* CONFIG_PM */ 289#endif /* CONFIG_PM */
260 290
261#ifdef CONFIG_PM_RUNTIME 291#ifdef CONFIG_PM_RUNTIME
262 292
293static int genpd_start_dev_no_timing(struct generic_pm_domain *genpd,
294 struct device *dev)
295{
296 return GENPD_DEV_CALLBACK(genpd, int, start, dev);
297}
298
263static int genpd_save_dev(struct generic_pm_domain *genpd, struct device *dev) 299static int genpd_save_dev(struct generic_pm_domain *genpd, struct device *dev)
264{ 300{
265 return GENPD_DEV_TIMED_CALLBACK(genpd, int, save_state, dev, 301 return GENPD_DEV_TIMED_CALLBACK(genpd, int, save_state, dev,
@@ -436,7 +472,7 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
436 not_suspended = 0; 472 not_suspended = 0;
437 list_for_each_entry(pdd, &genpd->dev_list, list_node) 473 list_for_each_entry(pdd, &genpd->dev_list, list_node)
438 if (pdd->dev->driver && (!pm_runtime_suspended(pdd->dev) 474 if (pdd->dev->driver && (!pm_runtime_suspended(pdd->dev)
439 || pdd->dev->power.irq_safe || to_gpd_data(pdd)->always_on)) 475 || pdd->dev->power.irq_safe))
440 not_suspended++; 476 not_suspended++;
441 477
442 if (not_suspended > genpd->in_progress) 478 if (not_suspended > genpd->in_progress)
@@ -578,9 +614,6 @@ static int pm_genpd_runtime_suspend(struct device *dev)
578 614
579 might_sleep_if(!genpd->dev_irq_safe); 615 might_sleep_if(!genpd->dev_irq_safe);
580 616
581 if (dev_gpd_data(dev)->always_on)
582 return -EBUSY;
583
584 stop_ok = genpd->gov ? genpd->gov->stop_ok : NULL; 617 stop_ok = genpd->gov ? genpd->gov->stop_ok : NULL;
585 if (stop_ok && !stop_ok(dev)) 618 if (stop_ok && !stop_ok(dev))
586 return -EBUSY; 619 return -EBUSY;
@@ -629,7 +662,7 @@ static int pm_genpd_runtime_resume(struct device *dev)
629 662
630 /* If power.irq_safe, the PM domain is never powered off. */ 663 /* If power.irq_safe, the PM domain is never powered off. */
631 if (dev->power.irq_safe) 664 if (dev->power.irq_safe)
632 return genpd_start_dev(genpd, dev); 665 return genpd_start_dev_no_timing(genpd, dev);
633 666
634 mutex_lock(&genpd->lock); 667 mutex_lock(&genpd->lock);
635 ret = __pm_genpd_poweron(genpd); 668 ret = __pm_genpd_poweron(genpd);
@@ -697,6 +730,24 @@ static inline void genpd_power_off_work_fn(struct work_struct *work) {}
697 730
698#ifdef CONFIG_PM_SLEEP 731#ifdef CONFIG_PM_SLEEP
699 732
733/**
734 * pm_genpd_present - Check if the given PM domain has been initialized.
735 * @genpd: PM domain to check.
736 */
737static bool pm_genpd_present(struct generic_pm_domain *genpd)
738{
739 struct generic_pm_domain *gpd;
740
741 if (IS_ERR_OR_NULL(genpd))
742 return false;
743
744 list_for_each_entry(gpd, &gpd_list, gpd_list_node)
745 if (gpd == genpd)
746 return true;
747
748 return false;
749}
750
700static bool genpd_dev_active_wakeup(struct generic_pm_domain *genpd, 751static bool genpd_dev_active_wakeup(struct generic_pm_domain *genpd,
701 struct device *dev) 752 struct device *dev)
702{ 753{
@@ -750,9 +801,10 @@ static int genpd_thaw_dev(struct generic_pm_domain *genpd, struct device *dev)
750 * Check if the given PM domain can be powered off (during system suspend or 801 * Check if the given PM domain can be powered off (during system suspend or
751 * hibernation) and do that if so. Also, in that case propagate to its masters. 802 * hibernation) and do that if so. Also, in that case propagate to its masters.
752 * 803 *
753 * This function is only called in "noirq" stages of system power transitions, 804 * This function is only called in "noirq" and "syscore" stages of system power
754 * so it need not acquire locks (all of the "noirq" callbacks are executed 805 * transitions, so it need not acquire locks (all of the "noirq" callbacks are
755 * sequentially, so it is guaranteed that it will never run twice in parallel). 806 * executed sequentially, so it is guaranteed that it will never run twice in
807 * parallel).
756 */ 808 */
757static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd) 809static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd)
758{ 810{
@@ -777,6 +829,33 @@ static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd)
777} 829}
778 830
779/** 831/**
832 * pm_genpd_sync_poweron - Synchronously power on a PM domain and its masters.
833 * @genpd: PM domain to power on.
834 *
835 * This function is only called in "noirq" and "syscore" stages of system power
836 * transitions, so it need not acquire locks (all of the "noirq" callbacks are
837 * executed sequentially, so it is guaranteed that it will never run twice in
838 * parallel).
839 */
840static void pm_genpd_sync_poweron(struct generic_pm_domain *genpd)
841{
842 struct gpd_link *link;
843
844 if (genpd->status != GPD_STATE_POWER_OFF)
845 return;
846
847 list_for_each_entry(link, &genpd->slave_links, slave_node) {
848 pm_genpd_sync_poweron(link->master);
849 genpd_sd_counter_inc(link->master);
850 }
851
852 if (genpd->power_on)
853 genpd->power_on(genpd);
854
855 genpd->status = GPD_STATE_ACTIVE;
856}
857
858/**
780 * resume_needed - Check whether to resume a device before system suspend. 859 * resume_needed - Check whether to resume a device before system suspend.
781 * @dev: Device to check. 860 * @dev: Device to check.
782 * @genpd: PM domain the device belongs to. 861 * @genpd: PM domain the device belongs to.
@@ -937,7 +1016,7 @@ static int pm_genpd_suspend_noirq(struct device *dev)
937 if (IS_ERR(genpd)) 1016 if (IS_ERR(genpd))
938 return -EINVAL; 1017 return -EINVAL;
939 1018
940 if (genpd->suspend_power_off || dev_gpd_data(dev)->always_on 1019 if (genpd->suspend_power_off
941 || (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev))) 1020 || (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev)))
942 return 0; 1021 return 0;
943 1022
@@ -970,7 +1049,7 @@ static int pm_genpd_resume_noirq(struct device *dev)
970 if (IS_ERR(genpd)) 1049 if (IS_ERR(genpd))
971 return -EINVAL; 1050 return -EINVAL;
972 1051
973 if (genpd->suspend_power_off || dev_gpd_data(dev)->always_on 1052 if (genpd->suspend_power_off
974 || (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev))) 1053 || (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev)))
975 return 0; 1054 return 0;
976 1055
@@ -979,7 +1058,7 @@ static int pm_genpd_resume_noirq(struct device *dev)
979 * guaranteed that this function will never run twice in parallel for 1058 * guaranteed that this function will never run twice in parallel for
980 * the same PM domain, so it is not necessary to use locking here. 1059 * the same PM domain, so it is not necessary to use locking here.
981 */ 1060 */
982 pm_genpd_poweron(genpd); 1061 pm_genpd_sync_poweron(genpd);
983 genpd->suspended_count--; 1062 genpd->suspended_count--;
984 1063
985 return genpd_start_dev(genpd, dev); 1064 return genpd_start_dev(genpd, dev);
@@ -1090,8 +1169,7 @@ static int pm_genpd_freeze_noirq(struct device *dev)
1090 if (IS_ERR(genpd)) 1169 if (IS_ERR(genpd))
1091 return -EINVAL; 1170 return -EINVAL;
1092 1171
1093 return genpd->suspend_power_off || dev_gpd_data(dev)->always_on ? 1172 return genpd->suspend_power_off ? 0 : genpd_stop_dev(genpd, dev);
1094 0 : genpd_stop_dev(genpd, dev);
1095} 1173}
1096 1174
1097/** 1175/**
@@ -1111,8 +1189,7 @@ static int pm_genpd_thaw_noirq(struct device *dev)
1111 if (IS_ERR(genpd)) 1189 if (IS_ERR(genpd))
1112 return -EINVAL; 1190 return -EINVAL;
1113 1191
1114 return genpd->suspend_power_off || dev_gpd_data(dev)->always_on ? 1192 return genpd->suspend_power_off ? 0 : genpd_start_dev(genpd, dev);
1115 0 : genpd_start_dev(genpd, dev);
1116} 1193}
1117 1194
1118/** 1195/**
@@ -1186,8 +1263,8 @@ static int pm_genpd_restore_noirq(struct device *dev)
1186 if (genpd->suspended_count++ == 0) { 1263 if (genpd->suspended_count++ == 0) {
1187 /* 1264 /*
1188 * The boot kernel might put the domain into arbitrary state, 1265 * The boot kernel might put the domain into arbitrary state,
1189 * so make it appear as powered off to pm_genpd_poweron(), so 1266 * so make it appear as powered off to pm_genpd_sync_poweron(),
1190 * that it tries to power it on in case it was really off. 1267 * so that it tries to power it on in case it was really off.
1191 */ 1268 */
1192 genpd->status = GPD_STATE_POWER_OFF; 1269 genpd->status = GPD_STATE_POWER_OFF;
1193 if (genpd->suspend_power_off) { 1270 if (genpd->suspend_power_off) {
@@ -1205,9 +1282,9 @@ static int pm_genpd_restore_noirq(struct device *dev)
1205 if (genpd->suspend_power_off) 1282 if (genpd->suspend_power_off)
1206 return 0; 1283 return 0;
1207 1284
1208 pm_genpd_poweron(genpd); 1285 pm_genpd_sync_poweron(genpd);
1209 1286
1210 return dev_gpd_data(dev)->always_on ? 0 : genpd_start_dev(genpd, dev); 1287 return genpd_start_dev(genpd, dev);
1211} 1288}
1212 1289
1213/** 1290/**
@@ -1246,6 +1323,31 @@ static void pm_genpd_complete(struct device *dev)
1246 } 1323 }
1247} 1324}
1248 1325
1326/**
1327 * pm_genpd_syscore_switch - Switch power during system core suspend or resume.
1328 * @dev: Device that normally is marked as "always on" to switch power for.
1329 *
1330 * This routine may only be called during the system core (syscore) suspend or
1331 * resume phase for devices whose "always on" flags are set.
1332 */
1333void pm_genpd_syscore_switch(struct device *dev, bool suspend)
1334{
1335 struct generic_pm_domain *genpd;
1336
1337 genpd = dev_to_genpd(dev);
1338 if (!pm_genpd_present(genpd))
1339 return;
1340
1341 if (suspend) {
1342 genpd->suspended_count++;
1343 pm_genpd_sync_poweroff(genpd);
1344 } else {
1345 pm_genpd_sync_poweron(genpd);
1346 genpd->suspended_count--;
1347 }
1348}
1349EXPORT_SYMBOL_GPL(pm_genpd_syscore_switch);
1350
1249#else 1351#else
1250 1352
1251#define pm_genpd_prepare NULL 1353#define pm_genpd_prepare NULL
@@ -1393,6 +1495,19 @@ int __pm_genpd_of_add_device(struct device_node *genpd_node, struct device *dev,
1393 return __pm_genpd_add_device(genpd, dev, td); 1495 return __pm_genpd_add_device(genpd, dev, td);
1394} 1496}
1395 1497
1498
1499/**
1500 * __pm_genpd_name_add_device - Find I/O PM domain and add a device to it.
1501 * @domain_name: Name of the PM domain to add the device to.
1502 * @dev: Device to be added.
1503 * @td: Set of PM QoS timing parameters to attach to the device.
1504 */
1505int __pm_genpd_name_add_device(const char *domain_name, struct device *dev,
1506 struct gpd_timing_data *td)
1507{
1508 return __pm_genpd_add_device(pm_genpd_lookup_name(domain_name), dev, td);
1509}
1510
1396/** 1511/**
1397 * pm_genpd_remove_device - Remove a device from an I/O PM domain. 1512 * pm_genpd_remove_device - Remove a device from an I/O PM domain.
1398 * @genpd: PM domain to remove the device from. 1513 * @genpd: PM domain to remove the device from.
@@ -1455,26 +1570,6 @@ int pm_genpd_remove_device(struct generic_pm_domain *genpd,
1455} 1570}
1456 1571
1457/** 1572/**
1458 * pm_genpd_dev_always_on - Set/unset the "always on" flag for a given device.
1459 * @dev: Device to set/unset the flag for.
1460 * @val: The new value of the device's "always on" flag.
1461 */
1462void pm_genpd_dev_always_on(struct device *dev, bool val)
1463{
1464 struct pm_subsys_data *psd;
1465 unsigned long flags;
1466
1467 spin_lock_irqsave(&dev->power.lock, flags);
1468
1469 psd = dev_to_psd(dev);
1470 if (psd && psd->domain_data)
1471 to_gpd_data(psd->domain_data)->always_on = val;
1472
1473 spin_unlock_irqrestore(&dev->power.lock, flags);
1474}
1475EXPORT_SYMBOL_GPL(pm_genpd_dev_always_on);
1476
1477/**
1478 * pm_genpd_dev_need_restore - Set/unset the device's "need restore" flag. 1573 * pm_genpd_dev_need_restore - Set/unset the device's "need restore" flag.
1479 * @dev: Device to set/unset the flag for. 1574 * @dev: Device to set/unset the flag for.
1480 * @val: The new value of the device's "need restore" flag. 1575 * @val: The new value of the device's "need restore" flag.
@@ -1505,7 +1600,8 @@ int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
1505 struct gpd_link *link; 1600 struct gpd_link *link;
1506 int ret = 0; 1601 int ret = 0;
1507 1602
1508 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain)) 1603 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain)
1604 || genpd == subdomain)
1509 return -EINVAL; 1605 return -EINVAL;
1510 1606
1511 start: 1607 start:
@@ -1552,6 +1648,35 @@ int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
1552} 1648}
1553 1649
1554/** 1650/**
1651 * pm_genpd_add_subdomain_names - Add a subdomain to an I/O PM domain.
1652 * @master_name: Name of the master PM domain to add the subdomain to.
1653 * @subdomain_name: Name of the subdomain to be added.
1654 */
1655int pm_genpd_add_subdomain_names(const char *master_name,
1656 const char *subdomain_name)
1657{
1658 struct generic_pm_domain *master = NULL, *subdomain = NULL, *gpd;
1659
1660 if (IS_ERR_OR_NULL(master_name) || IS_ERR_OR_NULL(subdomain_name))
1661 return -EINVAL;
1662
1663 mutex_lock(&gpd_list_lock);
1664 list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
1665 if (!master && !strcmp(gpd->name, master_name))
1666 master = gpd;
1667
1668 if (!subdomain && !strcmp(gpd->name, subdomain_name))
1669 subdomain = gpd;
1670
1671 if (master && subdomain)
1672 break;
1673 }
1674 mutex_unlock(&gpd_list_lock);
1675
1676 return pm_genpd_add_subdomain(master, subdomain);
1677}
1678
1679/**
1555 * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain. 1680 * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
1556 * @genpd: Master PM domain to remove the subdomain from. 1681 * @genpd: Master PM domain to remove the subdomain from.
1557 * @subdomain: Subdomain to be removed. 1682 * @subdomain: Subdomain to be removed.
@@ -1704,7 +1829,16 @@ int __pm_genpd_remove_callbacks(struct device *dev, bool clear_td)
1704} 1829}
1705EXPORT_SYMBOL_GPL(__pm_genpd_remove_callbacks); 1830EXPORT_SYMBOL_GPL(__pm_genpd_remove_callbacks);
1706 1831
1707int genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state) 1832/**
1833 * pm_genpd_attach_cpuidle - Connect the given PM domain with cpuidle.
1834 * @genpd: PM domain to be connected with cpuidle.
1835 * @state: cpuidle state this domain can disable/enable.
1836 *
1837 * Make a PM domain behave as though it contained a CPU core, that is, instead
1838 * of calling its power down routine it will enable the given cpuidle state so
1839 * that the cpuidle subsystem can power it down (if possible and desirable).
1840 */
1841int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state)
1708{ 1842{
1709 struct cpuidle_driver *cpuidle_drv; 1843 struct cpuidle_driver *cpuidle_drv;
1710 struct gpd_cpu_data *cpu_data; 1844 struct gpd_cpu_data *cpu_data;
@@ -1753,7 +1887,24 @@ int genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state)
1753 goto out; 1887 goto out;
1754} 1888}
1755 1889
1756int genpd_detach_cpuidle(struct generic_pm_domain *genpd) 1890/**
1891 * pm_genpd_name_attach_cpuidle - Find PM domain and connect cpuidle to it.
1892 * @name: Name of the domain to connect to cpuidle.
1893 * @state: cpuidle state this domain can manipulate.
1894 */
1895int pm_genpd_name_attach_cpuidle(const char *name, int state)
1896{
1897 return pm_genpd_attach_cpuidle(pm_genpd_lookup_name(name), state);
1898}
1899
1900/**
1901 * pm_genpd_detach_cpuidle - Remove the cpuidle connection from a PM domain.
1902 * @genpd: PM domain to remove the cpuidle connection from.
1903 *
1904 * Remove the cpuidle connection set up by pm_genpd_attach_cpuidle() from the
1905 * given PM domain.
1906 */
1907int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd)
1757{ 1908{
1758 struct gpd_cpu_data *cpu_data; 1909 struct gpd_cpu_data *cpu_data;
1759 struct cpuidle_state *idle_state; 1910 struct cpuidle_state *idle_state;
@@ -1784,6 +1935,15 @@ int genpd_detach_cpuidle(struct generic_pm_domain *genpd)
1784 return ret; 1935 return ret;
1785} 1936}
1786 1937
1938/**
1939 * pm_genpd_name_detach_cpuidle - Find PM domain and disconnect cpuidle from it.
1940 * @name: Name of the domain to disconnect cpuidle from.
1941 */
1942int pm_genpd_name_detach_cpuidle(const char *name)
1943{
1944 return pm_genpd_detach_cpuidle(pm_genpd_lookup_name(name));
1945}
1946
1787/* Default device callbacks for generic PM domains. */ 1947/* Default device callbacks for generic PM domains. */
1788 1948
1789/** 1949/**
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index b0b072a88f5f..a3c1404c7933 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -57,20 +57,17 @@ static pm_message_t pm_transition;
57static int async_error; 57static int async_error;
58 58
59/** 59/**
60 * device_pm_init - Initialize the PM-related part of a device object. 60 * device_pm_sleep_init - Initialize system suspend-related device fields.
61 * @dev: Device object being initialized. 61 * @dev: Device object being initialized.
62 */ 62 */
63void device_pm_init(struct device *dev) 63void device_pm_sleep_init(struct device *dev)
64{ 64{
65 dev->power.is_prepared = false; 65 dev->power.is_prepared = false;
66 dev->power.is_suspended = false; 66 dev->power.is_suspended = false;
67 init_completion(&dev->power.completion); 67 init_completion(&dev->power.completion);
68 complete_all(&dev->power.completion); 68 complete_all(&dev->power.completion);
69 dev->power.wakeup = NULL; 69 dev->power.wakeup = NULL;
70 spin_lock_init(&dev->power.lock);
71 pm_runtime_init(dev);
72 INIT_LIST_HEAD(&dev->power.entry); 70 INIT_LIST_HEAD(&dev->power.entry);
73 dev->power.power_state = PMSG_INVALID;
74} 71}
75 72
76/** 73/**
@@ -408,6 +405,9 @@ static int device_resume_noirq(struct device *dev, pm_message_t state)
408 TRACE_DEVICE(dev); 405 TRACE_DEVICE(dev);
409 TRACE_RESUME(0); 406 TRACE_RESUME(0);
410 407
408 if (dev->power.syscore)
409 goto Out;
410
411 if (dev->pm_domain) { 411 if (dev->pm_domain) {
412 info = "noirq power domain "; 412 info = "noirq power domain ";
413 callback = pm_noirq_op(&dev->pm_domain->ops, state); 413 callback = pm_noirq_op(&dev->pm_domain->ops, state);
@@ -429,6 +429,7 @@ static int device_resume_noirq(struct device *dev, pm_message_t state)
429 429
430 error = dpm_run_callback(callback, dev, state, info); 430 error = dpm_run_callback(callback, dev, state, info);
431 431
432 Out:
432 TRACE_RESUME(error); 433 TRACE_RESUME(error);
433 return error; 434 return error;
434} 435}
@@ -486,6 +487,9 @@ static int device_resume_early(struct device *dev, pm_message_t state)
486 TRACE_DEVICE(dev); 487 TRACE_DEVICE(dev);
487 TRACE_RESUME(0); 488 TRACE_RESUME(0);
488 489
490 if (dev->power.syscore)
491 goto Out;
492
489 if (dev->pm_domain) { 493 if (dev->pm_domain) {
490 info = "early power domain "; 494 info = "early power domain ";
491 callback = pm_late_early_op(&dev->pm_domain->ops, state); 495 callback = pm_late_early_op(&dev->pm_domain->ops, state);
@@ -507,6 +511,7 @@ static int device_resume_early(struct device *dev, pm_message_t state)
507 511
508 error = dpm_run_callback(callback, dev, state, info); 512 error = dpm_run_callback(callback, dev, state, info);
509 513
514 Out:
510 TRACE_RESUME(error); 515 TRACE_RESUME(error);
511 return error; 516 return error;
512} 517}
@@ -565,11 +570,13 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
565 pm_callback_t callback = NULL; 570 pm_callback_t callback = NULL;
566 char *info = NULL; 571 char *info = NULL;
567 int error = 0; 572 int error = 0;
568 bool put = false;
569 573
570 TRACE_DEVICE(dev); 574 TRACE_DEVICE(dev);
571 TRACE_RESUME(0); 575 TRACE_RESUME(0);
572 576
577 if (dev->power.syscore)
578 goto Complete;
579
573 dpm_wait(dev->parent, async); 580 dpm_wait(dev->parent, async);
574 device_lock(dev); 581 device_lock(dev);
575 582
@@ -583,7 +590,6 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
583 goto Unlock; 590 goto Unlock;
584 591
585 pm_runtime_enable(dev); 592 pm_runtime_enable(dev);
586 put = true;
587 593
588 if (dev->pm_domain) { 594 if (dev->pm_domain) {
589 info = "power domain "; 595 info = "power domain ";
@@ -632,13 +638,12 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
632 638
633 Unlock: 639 Unlock:
634 device_unlock(dev); 640 device_unlock(dev);
641
642 Complete:
635 complete_all(&dev->power.completion); 643 complete_all(&dev->power.completion);
636 644
637 TRACE_RESUME(error); 645 TRACE_RESUME(error);
638 646
639 if (put)
640 pm_runtime_put_sync(dev);
641
642 return error; 647 return error;
643} 648}
644 649
@@ -722,6 +727,9 @@ static void device_complete(struct device *dev, pm_message_t state)
722 void (*callback)(struct device *) = NULL; 727 void (*callback)(struct device *) = NULL;
723 char *info = NULL; 728 char *info = NULL;
724 729
730 if (dev->power.syscore)
731 return;
732
725 device_lock(dev); 733 device_lock(dev);
726 734
727 if (dev->pm_domain) { 735 if (dev->pm_domain) {
@@ -749,6 +757,8 @@ static void device_complete(struct device *dev, pm_message_t state)
749 } 757 }
750 758
751 device_unlock(dev); 759 device_unlock(dev);
760
761 pm_runtime_put_sync(dev);
752} 762}
753 763
754/** 764/**
@@ -834,6 +844,9 @@ static int device_suspend_noirq(struct device *dev, pm_message_t state)
834 pm_callback_t callback = NULL; 844 pm_callback_t callback = NULL;
835 char *info = NULL; 845 char *info = NULL;
836 846
847 if (dev->power.syscore)
848 return 0;
849
837 if (dev->pm_domain) { 850 if (dev->pm_domain) {
838 info = "noirq power domain "; 851 info = "noirq power domain ";
839 callback = pm_noirq_op(&dev->pm_domain->ops, state); 852 callback = pm_noirq_op(&dev->pm_domain->ops, state);
@@ -917,6 +930,9 @@ static int device_suspend_late(struct device *dev, pm_message_t state)
917 pm_callback_t callback = NULL; 930 pm_callback_t callback = NULL;
918 char *info = NULL; 931 char *info = NULL;
919 932
933 if (dev->power.syscore)
934 return 0;
935
920 if (dev->pm_domain) { 936 if (dev->pm_domain) {
921 info = "late power domain "; 937 info = "late power domain ";
922 callback = pm_late_early_op(&dev->pm_domain->ops, state); 938 callback = pm_late_early_op(&dev->pm_domain->ops, state);
@@ -996,7 +1012,7 @@ int dpm_suspend_end(pm_message_t state)
996 1012
997 error = dpm_suspend_noirq(state); 1013 error = dpm_suspend_noirq(state);
998 if (error) { 1014 if (error) {
999 dpm_resume_early(state); 1015 dpm_resume_early(resume_event(state));
1000 return error; 1016 return error;
1001 } 1017 }
1002 1018
@@ -1043,16 +1059,23 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
1043 if (async_error) 1059 if (async_error)
1044 goto Complete; 1060 goto Complete;
1045 1061
1046 pm_runtime_get_noresume(dev); 1062 /*
1063 * If a device configured to wake up the system from sleep states
1064 * has been suspended at run time and there's a resume request pending
1065 * for it, this is equivalent to the device signaling wakeup, so the
1066 * system suspend operation should be aborted.
1067 */
1047 if (pm_runtime_barrier(dev) && device_may_wakeup(dev)) 1068 if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
1048 pm_wakeup_event(dev, 0); 1069 pm_wakeup_event(dev, 0);
1049 1070
1050 if (pm_wakeup_pending()) { 1071 if (pm_wakeup_pending()) {
1051 pm_runtime_put_sync(dev);
1052 async_error = -EBUSY; 1072 async_error = -EBUSY;
1053 goto Complete; 1073 goto Complete;
1054 } 1074 }
1055 1075
1076 if (dev->power.syscore)
1077 goto Complete;
1078
1056 device_lock(dev); 1079 device_lock(dev);
1057 1080
1058 if (dev->pm_domain) { 1081 if (dev->pm_domain) {
@@ -1111,12 +1134,10 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
1111 Complete: 1134 Complete:
1112 complete_all(&dev->power.completion); 1135 complete_all(&dev->power.completion);
1113 1136
1114 if (error) { 1137 if (error)
1115 pm_runtime_put_sync(dev);
1116 async_error = error; 1138 async_error = error;
1117 } else if (dev->power.is_suspended) { 1139 else if (dev->power.is_suspended)
1118 __pm_runtime_disable(dev, false); 1140 __pm_runtime_disable(dev, false);
1119 }
1120 1141
1121 return error; 1142 return error;
1122} 1143}
@@ -1209,6 +1230,17 @@ static int device_prepare(struct device *dev, pm_message_t state)
1209 char *info = NULL; 1230 char *info = NULL;
1210 int error = 0; 1231 int error = 0;
1211 1232
1233 if (dev->power.syscore)
1234 return 0;
1235
1236 /*
1237 * If a device's parent goes into runtime suspend at the wrong time,
1238 * it won't be possible to resume the device. To prevent this we
1239 * block runtime suspend here, during the prepare phase, and allow
1240 * it again during the complete phase.
1241 */
1242 pm_runtime_get_noresume(dev);
1243
1212 device_lock(dev); 1244 device_lock(dev);
1213 1245
1214 dev->power.wakeup_path = device_may_wakeup(dev); 1246 dev->power.wakeup_path = device_may_wakeup(dev);
diff --git a/drivers/base/power/opp.c b/drivers/base/power/opp.c
index ac993eafec82..d9468642fc41 100644
--- a/drivers/base/power/opp.c
+++ b/drivers/base/power/opp.c
@@ -22,6 +22,7 @@
22#include <linux/rculist.h> 22#include <linux/rculist.h>
23#include <linux/rcupdate.h> 23#include <linux/rcupdate.h>
24#include <linux/opp.h> 24#include <linux/opp.h>
25#include <linux/of.h>
25 26
26/* 27/*
27 * Internal data structure organization with the OPP layer library is as 28 * Internal data structure organization with the OPP layer library is as
@@ -674,3 +675,49 @@ struct srcu_notifier_head *opp_get_notifier(struct device *dev)
674 675
675 return &dev_opp->head; 676 return &dev_opp->head;
676} 677}
678
679#ifdef CONFIG_OF
680/**
681 * of_init_opp_table() - Initialize opp table from device tree
682 * @dev: device pointer used to lookup device OPPs.
683 *
684 * Register the initial OPP table with the OPP library for given device.
685 */
686int of_init_opp_table(struct device *dev)
687{
688 const struct property *prop;
689 const __be32 *val;
690 int nr;
691
692 prop = of_find_property(dev->of_node, "operating-points", NULL);
693 if (!prop)
694 return -ENODEV;
695 if (!prop->value)
696 return -ENODATA;
697
698 /*
699 * Each OPP is a set of tuples consisting of frequency and
700 * voltage like <freq-kHz vol-uV>.
701 */
702 nr = prop->length / sizeof(u32);
703 if (nr % 2) {
704 dev_err(dev, "%s: Invalid OPP list\n", __func__);
705 return -EINVAL;
706 }
707
708 val = prop->value;
709 while (nr) {
710 unsigned long freq = be32_to_cpup(val++) * 1000;
711 unsigned long volt = be32_to_cpup(val++);
712
713 if (opp_add(dev, freq, volt)) {
714 dev_warn(dev, "%s: Failed to add OPP %ld\n",
715 __func__, freq);
716 continue;
717 }
718 nr -= 2;
719 }
720
721 return 0;
722}
723#endif
diff --git a/drivers/base/power/power.h b/drivers/base/power/power.h
index eeb4bff9505c..0dbfdf4419af 100644
--- a/drivers/base/power/power.h
+++ b/drivers/base/power/power.h
@@ -1,12 +1,32 @@
1#include <linux/pm_qos.h> 1#include <linux/pm_qos.h>
2 2
3static inline void device_pm_init_common(struct device *dev)
4{
5 if (!dev->power.early_init) {
6 spin_lock_init(&dev->power.lock);
7 dev->power.power_state = PMSG_INVALID;
8 dev->power.early_init = true;
9 }
10}
11
3#ifdef CONFIG_PM_RUNTIME 12#ifdef CONFIG_PM_RUNTIME
4 13
14static inline void pm_runtime_early_init(struct device *dev)
15{
16 dev->power.disable_depth = 1;
17 device_pm_init_common(dev);
18}
19
5extern void pm_runtime_init(struct device *dev); 20extern void pm_runtime_init(struct device *dev);
6extern void pm_runtime_remove(struct device *dev); 21extern void pm_runtime_remove(struct device *dev);
7 22
8#else /* !CONFIG_PM_RUNTIME */ 23#else /* !CONFIG_PM_RUNTIME */
9 24
25static inline void pm_runtime_early_init(struct device *dev)
26{
27 device_pm_init_common(dev);
28}
29
10static inline void pm_runtime_init(struct device *dev) {} 30static inline void pm_runtime_init(struct device *dev) {}
11static inline void pm_runtime_remove(struct device *dev) {} 31static inline void pm_runtime_remove(struct device *dev) {}
12 32
@@ -25,7 +45,7 @@ static inline struct device *to_device(struct list_head *entry)
25 return container_of(entry, struct device, power.entry); 45 return container_of(entry, struct device, power.entry);
26} 46}
27 47
28extern void device_pm_init(struct device *dev); 48extern void device_pm_sleep_init(struct device *dev);
29extern void device_pm_add(struct device *); 49extern void device_pm_add(struct device *);
30extern void device_pm_remove(struct device *); 50extern void device_pm_remove(struct device *);
31extern void device_pm_move_before(struct device *, struct device *); 51extern void device_pm_move_before(struct device *, struct device *);
@@ -34,12 +54,7 @@ extern void device_pm_move_last(struct device *);
34 54
35#else /* !CONFIG_PM_SLEEP */ 55#else /* !CONFIG_PM_SLEEP */
36 56
37static inline void device_pm_init(struct device *dev) 57static inline void device_pm_sleep_init(struct device *dev) {}
38{
39 spin_lock_init(&dev->power.lock);
40 dev->power.power_state = PMSG_INVALID;
41 pm_runtime_init(dev);
42}
43 58
44static inline void device_pm_add(struct device *dev) 59static inline void device_pm_add(struct device *dev)
45{ 60{
@@ -60,6 +75,13 @@ static inline void device_pm_move_last(struct device *dev) {}
60 75
61#endif /* !CONFIG_PM_SLEEP */ 76#endif /* !CONFIG_PM_SLEEP */
62 77
78static inline void device_pm_init(struct device *dev)
79{
80 device_pm_init_common(dev);
81 device_pm_sleep_init(dev);
82 pm_runtime_init(dev);
83}
84
63#ifdef CONFIG_PM 85#ifdef CONFIG_PM
64 86
65/* 87/*
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 7d9c1cb1c39a..3148b10dc2e5 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -509,6 +509,9 @@ static int rpm_resume(struct device *dev, int rpmflags)
509 repeat: 509 repeat:
510 if (dev->power.runtime_error) 510 if (dev->power.runtime_error)
511 retval = -EINVAL; 511 retval = -EINVAL;
512 else if (dev->power.disable_depth == 1 && dev->power.is_suspended
513 && dev->power.runtime_status == RPM_ACTIVE)
514 retval = 1;
512 else if (dev->power.disable_depth > 0) 515 else if (dev->power.disable_depth > 0)
513 retval = -EACCES; 516 retval = -EACCES;
514 if (retval) 517 if (retval)
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
index cbb463b3a750..e6ee5e80e546 100644
--- a/drivers/base/power/wakeup.c
+++ b/drivers/base/power/wakeup.c
@@ -127,6 +127,8 @@ EXPORT_SYMBOL_GPL(wakeup_source_destroy);
127 */ 127 */
128void wakeup_source_add(struct wakeup_source *ws) 128void wakeup_source_add(struct wakeup_source *ws)
129{ 129{
130 unsigned long flags;
131
130 if (WARN_ON(!ws)) 132 if (WARN_ON(!ws))
131 return; 133 return;
132 134
@@ -135,9 +137,9 @@ void wakeup_source_add(struct wakeup_source *ws)
135 ws->active = false; 137 ws->active = false;
136 ws->last_time = ktime_get(); 138 ws->last_time = ktime_get();
137 139
138 spin_lock_irq(&events_lock); 140 spin_lock_irqsave(&events_lock, flags);
139 list_add_rcu(&ws->entry, &wakeup_sources); 141 list_add_rcu(&ws->entry, &wakeup_sources);
140 spin_unlock_irq(&events_lock); 142 spin_unlock_irqrestore(&events_lock, flags);
141} 143}
142EXPORT_SYMBOL_GPL(wakeup_source_add); 144EXPORT_SYMBOL_GPL(wakeup_source_add);
143 145
@@ -147,12 +149,14 @@ EXPORT_SYMBOL_GPL(wakeup_source_add);
147 */ 149 */
148void wakeup_source_remove(struct wakeup_source *ws) 150void wakeup_source_remove(struct wakeup_source *ws)
149{ 151{
152 unsigned long flags;
153
150 if (WARN_ON(!ws)) 154 if (WARN_ON(!ws))
151 return; 155 return;
152 156
153 spin_lock_irq(&events_lock); 157 spin_lock_irqsave(&events_lock, flags);
154 list_del_rcu(&ws->entry); 158 list_del_rcu(&ws->entry);
155 spin_unlock_irq(&events_lock); 159 spin_unlock_irqrestore(&events_lock, flags);
156 synchronize_rcu(); 160 synchronize_rcu();
157} 161}
158EXPORT_SYMBOL_GPL(wakeup_source_remove); 162EXPORT_SYMBOL_GPL(wakeup_source_remove);
@@ -649,6 +653,31 @@ void pm_wakeup_event(struct device *dev, unsigned int msec)
649} 653}
650EXPORT_SYMBOL_GPL(pm_wakeup_event); 654EXPORT_SYMBOL_GPL(pm_wakeup_event);
651 655
656static void print_active_wakeup_sources(void)
657{
658 struct wakeup_source *ws;
659 int active = 0;
660 struct wakeup_source *last_activity_ws = NULL;
661
662 rcu_read_lock();
663 list_for_each_entry_rcu(ws, &wakeup_sources, entry) {
664 if (ws->active) {
665 pr_info("active wakeup source: %s\n", ws->name);
666 active = 1;
667 } else if (!active &&
668 (!last_activity_ws ||
669 ktime_to_ns(ws->last_time) >
670 ktime_to_ns(last_activity_ws->last_time))) {
671 last_activity_ws = ws;
672 }
673 }
674
675 if (!active && last_activity_ws)
676 pr_info("last active wakeup source: %s\n",
677 last_activity_ws->name);
678 rcu_read_unlock();
679}
680
652/** 681/**
653 * pm_wakeup_pending - Check if power transition in progress should be aborted. 682 * pm_wakeup_pending - Check if power transition in progress should be aborted.
654 * 683 *
@@ -671,6 +700,10 @@ bool pm_wakeup_pending(void)
671 events_check_enabled = !ret; 700 events_check_enabled = !ret;
672 } 701 }
673 spin_unlock_irqrestore(&events_lock, flags); 702 spin_unlock_irqrestore(&events_lock, flags);
703
704 if (ret)
705 print_active_wakeup_sources();
706
674 return ret; 707 return ret;
675} 708}
676 709
@@ -723,15 +756,16 @@ bool pm_get_wakeup_count(unsigned int *count, bool block)
723bool pm_save_wakeup_count(unsigned int count) 756bool pm_save_wakeup_count(unsigned int count)
724{ 757{
725 unsigned int cnt, inpr; 758 unsigned int cnt, inpr;
759 unsigned long flags;
726 760
727 events_check_enabled = false; 761 events_check_enabled = false;
728 spin_lock_irq(&events_lock); 762 spin_lock_irqsave(&events_lock, flags);
729 split_counters(&cnt, &inpr); 763 split_counters(&cnt, &inpr);
730 if (cnt == count && inpr == 0) { 764 if (cnt == count && inpr == 0) {
731 saved_count = count; 765 saved_count = count;
732 events_check_enabled = true; 766 events_check_enabled = true;
733 } 767 }
734 spin_unlock_irq(&events_lock); 768 spin_unlock_irqrestore(&events_lock, flags);
735 return events_check_enabled; 769 return events_check_enabled;
736} 770}
737 771
diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c
index 98b06baafcc6..a5f7829f2799 100644
--- a/drivers/clocksource/sh_cmt.c
+++ b/drivers/clocksource/sh_cmt.c
@@ -33,6 +33,7 @@
33#include <linux/slab.h> 33#include <linux/slab.h>
34#include <linux/module.h> 34#include <linux/module.h>
35#include <linux/pm_domain.h> 35#include <linux/pm_domain.h>
36#include <linux/pm_runtime.h>
36 37
37struct sh_cmt_priv { 38struct sh_cmt_priv {
38 void __iomem *mapbase; 39 void __iomem *mapbase;
@@ -52,6 +53,7 @@ struct sh_cmt_priv {
52 struct clock_event_device ced; 53 struct clock_event_device ced;
53 struct clocksource cs; 54 struct clocksource cs;
54 unsigned long total_cycles; 55 unsigned long total_cycles;
56 bool cs_enabled;
55}; 57};
56 58
57static DEFINE_RAW_SPINLOCK(sh_cmt_lock); 59static DEFINE_RAW_SPINLOCK(sh_cmt_lock);
@@ -155,6 +157,9 @@ static int sh_cmt_enable(struct sh_cmt_priv *p, unsigned long *rate)
155{ 157{
156 int k, ret; 158 int k, ret;
157 159
160 pm_runtime_get_sync(&p->pdev->dev);
161 dev_pm_syscore_device(&p->pdev->dev, true);
162
158 /* enable clock */ 163 /* enable clock */
159 ret = clk_enable(p->clk); 164 ret = clk_enable(p->clk);
160 if (ret) { 165 if (ret) {
@@ -221,6 +226,9 @@ static void sh_cmt_disable(struct sh_cmt_priv *p)
221 226
222 /* stop clock */ 227 /* stop clock */
223 clk_disable(p->clk); 228 clk_disable(p->clk);
229
230 dev_pm_syscore_device(&p->pdev->dev, false);
231 pm_runtime_put(&p->pdev->dev);
224} 232}
225 233
226/* private flags */ 234/* private flags */
@@ -451,22 +459,42 @@ static int sh_cmt_clocksource_enable(struct clocksource *cs)
451 int ret; 459 int ret;
452 struct sh_cmt_priv *p = cs_to_sh_cmt(cs); 460 struct sh_cmt_priv *p = cs_to_sh_cmt(cs);
453 461
462 WARN_ON(p->cs_enabled);
463
454 p->total_cycles = 0; 464 p->total_cycles = 0;
455 465
456 ret = sh_cmt_start(p, FLAG_CLOCKSOURCE); 466 ret = sh_cmt_start(p, FLAG_CLOCKSOURCE);
457 if (!ret) 467 if (!ret) {
458 __clocksource_updatefreq_hz(cs, p->rate); 468 __clocksource_updatefreq_hz(cs, p->rate);
469 p->cs_enabled = true;
470 }
459 return ret; 471 return ret;
460} 472}
461 473
462static void sh_cmt_clocksource_disable(struct clocksource *cs) 474static void sh_cmt_clocksource_disable(struct clocksource *cs)
463{ 475{
464 sh_cmt_stop(cs_to_sh_cmt(cs), FLAG_CLOCKSOURCE); 476 struct sh_cmt_priv *p = cs_to_sh_cmt(cs);
477
478 WARN_ON(!p->cs_enabled);
479
480 sh_cmt_stop(p, FLAG_CLOCKSOURCE);
481 p->cs_enabled = false;
482}
483
484static void sh_cmt_clocksource_suspend(struct clocksource *cs)
485{
486 struct sh_cmt_priv *p = cs_to_sh_cmt(cs);
487
488 sh_cmt_stop(p, FLAG_CLOCKSOURCE);
489 pm_genpd_syscore_poweroff(&p->pdev->dev);
465} 490}
466 491
467static void sh_cmt_clocksource_resume(struct clocksource *cs) 492static void sh_cmt_clocksource_resume(struct clocksource *cs)
468{ 493{
469 sh_cmt_start(cs_to_sh_cmt(cs), FLAG_CLOCKSOURCE); 494 struct sh_cmt_priv *p = cs_to_sh_cmt(cs);
495
496 pm_genpd_syscore_poweron(&p->pdev->dev);
497 sh_cmt_start(p, FLAG_CLOCKSOURCE);
470} 498}
471 499
472static int sh_cmt_register_clocksource(struct sh_cmt_priv *p, 500static int sh_cmt_register_clocksource(struct sh_cmt_priv *p,
@@ -479,7 +507,7 @@ static int sh_cmt_register_clocksource(struct sh_cmt_priv *p,
479 cs->read = sh_cmt_clocksource_read; 507 cs->read = sh_cmt_clocksource_read;
480 cs->enable = sh_cmt_clocksource_enable; 508 cs->enable = sh_cmt_clocksource_enable;
481 cs->disable = sh_cmt_clocksource_disable; 509 cs->disable = sh_cmt_clocksource_disable;
482 cs->suspend = sh_cmt_clocksource_disable; 510 cs->suspend = sh_cmt_clocksource_suspend;
483 cs->resume = sh_cmt_clocksource_resume; 511 cs->resume = sh_cmt_clocksource_resume;
484 cs->mask = CLOCKSOURCE_MASK(sizeof(unsigned long) * 8); 512 cs->mask = CLOCKSOURCE_MASK(sizeof(unsigned long) * 8);
485 cs->flags = CLOCK_SOURCE_IS_CONTINUOUS; 513 cs->flags = CLOCK_SOURCE_IS_CONTINUOUS;
@@ -562,6 +590,16 @@ static int sh_cmt_clock_event_next(unsigned long delta,
562 return 0; 590 return 0;
563} 591}
564 592
593static void sh_cmt_clock_event_suspend(struct clock_event_device *ced)
594{
595 pm_genpd_syscore_poweroff(&ced_to_sh_cmt(ced)->pdev->dev);
596}
597
598static void sh_cmt_clock_event_resume(struct clock_event_device *ced)
599{
600 pm_genpd_syscore_poweron(&ced_to_sh_cmt(ced)->pdev->dev);
601}
602
565static void sh_cmt_register_clockevent(struct sh_cmt_priv *p, 603static void sh_cmt_register_clockevent(struct sh_cmt_priv *p,
566 char *name, unsigned long rating) 604 char *name, unsigned long rating)
567{ 605{
@@ -576,6 +614,8 @@ static void sh_cmt_register_clockevent(struct sh_cmt_priv *p,
576 ced->cpumask = cpumask_of(0); 614 ced->cpumask = cpumask_of(0);
577 ced->set_next_event = sh_cmt_clock_event_next; 615 ced->set_next_event = sh_cmt_clock_event_next;
578 ced->set_mode = sh_cmt_clock_event_mode; 616 ced->set_mode = sh_cmt_clock_event_mode;
617 ced->suspend = sh_cmt_clock_event_suspend;
618 ced->resume = sh_cmt_clock_event_resume;
579 619
580 dev_info(&p->pdev->dev, "used for clock events\n"); 620 dev_info(&p->pdev->dev, "used for clock events\n");
581 clockevents_register_device(ced); 621 clockevents_register_device(ced);
@@ -670,6 +710,7 @@ static int sh_cmt_setup(struct sh_cmt_priv *p, struct platform_device *pdev)
670 dev_err(&p->pdev->dev, "registration failed\n"); 710 dev_err(&p->pdev->dev, "registration failed\n");
671 goto err1; 711 goto err1;
672 } 712 }
713 p->cs_enabled = false;
673 714
674 ret = setup_irq(irq, &p->irqaction); 715 ret = setup_irq(irq, &p->irqaction);
675 if (ret) { 716 if (ret) {
@@ -688,14 +729,17 @@ err0:
688static int __devinit sh_cmt_probe(struct platform_device *pdev) 729static int __devinit sh_cmt_probe(struct platform_device *pdev)
689{ 730{
690 struct sh_cmt_priv *p = platform_get_drvdata(pdev); 731 struct sh_cmt_priv *p = platform_get_drvdata(pdev);
732 struct sh_timer_config *cfg = pdev->dev.platform_data;
691 int ret; 733 int ret;
692 734
693 if (!is_early_platform_device(pdev)) 735 if (!is_early_platform_device(pdev)) {
694 pm_genpd_dev_always_on(&pdev->dev, true); 736 pm_runtime_set_active(&pdev->dev);
737 pm_runtime_enable(&pdev->dev);
738 }
695 739
696 if (p) { 740 if (p) {
697 dev_info(&pdev->dev, "kept as earlytimer\n"); 741 dev_info(&pdev->dev, "kept as earlytimer\n");
698 return 0; 742 goto out;
699 } 743 }
700 744
701 p = kmalloc(sizeof(*p), GFP_KERNEL); 745 p = kmalloc(sizeof(*p), GFP_KERNEL);
@@ -708,8 +752,19 @@ static int __devinit sh_cmt_probe(struct platform_device *pdev)
708 if (ret) { 752 if (ret) {
709 kfree(p); 753 kfree(p);
710 platform_set_drvdata(pdev, NULL); 754 platform_set_drvdata(pdev, NULL);
755 pm_runtime_idle(&pdev->dev);
756 return ret;
711 } 757 }
712 return ret; 758 if (is_early_platform_device(pdev))
759 return 0;
760
761 out:
762 if (cfg->clockevent_rating || cfg->clocksource_rating)
763 pm_runtime_irq_safe(&pdev->dev);
764 else
765 pm_runtime_idle(&pdev->dev);
766
767 return 0;
713} 768}
714 769
715static int __devexit sh_cmt_remove(struct platform_device *pdev) 770static int __devexit sh_cmt_remove(struct platform_device *pdev)
diff --git a/drivers/clocksource/sh_mtu2.c b/drivers/clocksource/sh_mtu2.c
index d9b76ca64a61..c5eea858054a 100644
--- a/drivers/clocksource/sh_mtu2.c
+++ b/drivers/clocksource/sh_mtu2.c
@@ -32,6 +32,7 @@
32#include <linux/slab.h> 32#include <linux/slab.h>
33#include <linux/module.h> 33#include <linux/module.h>
34#include <linux/pm_domain.h> 34#include <linux/pm_domain.h>
35#include <linux/pm_runtime.h>
35 36
36struct sh_mtu2_priv { 37struct sh_mtu2_priv {
37 void __iomem *mapbase; 38 void __iomem *mapbase;
@@ -123,6 +124,9 @@ static int sh_mtu2_enable(struct sh_mtu2_priv *p)
123{ 124{
124 int ret; 125 int ret;
125 126
127 pm_runtime_get_sync(&p->pdev->dev);
128 dev_pm_syscore_device(&p->pdev->dev, true);
129
126 /* enable clock */ 130 /* enable clock */
127 ret = clk_enable(p->clk); 131 ret = clk_enable(p->clk);
128 if (ret) { 132 if (ret) {
@@ -157,6 +161,9 @@ static void sh_mtu2_disable(struct sh_mtu2_priv *p)
157 161
158 /* stop clock */ 162 /* stop clock */
159 clk_disable(p->clk); 163 clk_disable(p->clk);
164
165 dev_pm_syscore_device(&p->pdev->dev, false);
166 pm_runtime_put(&p->pdev->dev);
160} 167}
161 168
162static irqreturn_t sh_mtu2_interrupt(int irq, void *dev_id) 169static irqreturn_t sh_mtu2_interrupt(int irq, void *dev_id)
@@ -208,6 +215,16 @@ static void sh_mtu2_clock_event_mode(enum clock_event_mode mode,
208 } 215 }
209} 216}
210 217
218static void sh_mtu2_clock_event_suspend(struct clock_event_device *ced)
219{
220 pm_genpd_syscore_poweroff(&ced_to_sh_mtu2(ced)->pdev->dev);
221}
222
223static void sh_mtu2_clock_event_resume(struct clock_event_device *ced)
224{
225 pm_genpd_syscore_poweron(&ced_to_sh_mtu2(ced)->pdev->dev);
226}
227
211static void sh_mtu2_register_clockevent(struct sh_mtu2_priv *p, 228static void sh_mtu2_register_clockevent(struct sh_mtu2_priv *p,
212 char *name, unsigned long rating) 229 char *name, unsigned long rating)
213{ 230{
@@ -221,6 +238,8 @@ static void sh_mtu2_register_clockevent(struct sh_mtu2_priv *p,
221 ced->rating = rating; 238 ced->rating = rating;
222 ced->cpumask = cpumask_of(0); 239 ced->cpumask = cpumask_of(0);
223 ced->set_mode = sh_mtu2_clock_event_mode; 240 ced->set_mode = sh_mtu2_clock_event_mode;
241 ced->suspend = sh_mtu2_clock_event_suspend;
242 ced->resume = sh_mtu2_clock_event_resume;
224 243
225 dev_info(&p->pdev->dev, "used for clock events\n"); 244 dev_info(&p->pdev->dev, "used for clock events\n");
226 clockevents_register_device(ced); 245 clockevents_register_device(ced);
@@ -305,14 +324,17 @@ static int sh_mtu2_setup(struct sh_mtu2_priv *p, struct platform_device *pdev)
305static int __devinit sh_mtu2_probe(struct platform_device *pdev) 324static int __devinit sh_mtu2_probe(struct platform_device *pdev)
306{ 325{
307 struct sh_mtu2_priv *p = platform_get_drvdata(pdev); 326 struct sh_mtu2_priv *p = platform_get_drvdata(pdev);
327 struct sh_timer_config *cfg = pdev->dev.platform_data;
308 int ret; 328 int ret;
309 329
310 if (!is_early_platform_device(pdev)) 330 if (!is_early_platform_device(pdev)) {
311 pm_genpd_dev_always_on(&pdev->dev, true); 331 pm_runtime_set_active(&pdev->dev);
332 pm_runtime_enable(&pdev->dev);
333 }
312 334
313 if (p) { 335 if (p) {
314 dev_info(&pdev->dev, "kept as earlytimer\n"); 336 dev_info(&pdev->dev, "kept as earlytimer\n");
315 return 0; 337 goto out;
316 } 338 }
317 339
318 p = kmalloc(sizeof(*p), GFP_KERNEL); 340 p = kmalloc(sizeof(*p), GFP_KERNEL);
@@ -325,8 +347,19 @@ static int __devinit sh_mtu2_probe(struct platform_device *pdev)
325 if (ret) { 347 if (ret) {
326 kfree(p); 348 kfree(p);
327 platform_set_drvdata(pdev, NULL); 349 platform_set_drvdata(pdev, NULL);
350 pm_runtime_idle(&pdev->dev);
351 return ret;
328 } 352 }
329 return ret; 353 if (is_early_platform_device(pdev))
354 return 0;
355
356 out:
357 if (cfg->clockevent_rating)
358 pm_runtime_irq_safe(&pdev->dev);
359 else
360 pm_runtime_idle(&pdev->dev);
361
362 return 0;
330} 363}
331 364
332static int __devexit sh_mtu2_remove(struct platform_device *pdev) 365static int __devexit sh_mtu2_remove(struct platform_device *pdev)
diff --git a/drivers/clocksource/sh_tmu.c b/drivers/clocksource/sh_tmu.c
index c1b51d49d106..0cc4add88279 100644
--- a/drivers/clocksource/sh_tmu.c
+++ b/drivers/clocksource/sh_tmu.c
@@ -33,6 +33,7 @@
33#include <linux/slab.h> 33#include <linux/slab.h>
34#include <linux/module.h> 34#include <linux/module.h>
35#include <linux/pm_domain.h> 35#include <linux/pm_domain.h>
36#include <linux/pm_runtime.h>
36 37
37struct sh_tmu_priv { 38struct sh_tmu_priv {
38 void __iomem *mapbase; 39 void __iomem *mapbase;
@@ -43,6 +44,8 @@ struct sh_tmu_priv {
43 unsigned long periodic; 44 unsigned long periodic;
44 struct clock_event_device ced; 45 struct clock_event_device ced;
45 struct clocksource cs; 46 struct clocksource cs;
47 bool cs_enabled;
48 unsigned int enable_count;
46}; 49};
47 50
48static DEFINE_RAW_SPINLOCK(sh_tmu_lock); 51static DEFINE_RAW_SPINLOCK(sh_tmu_lock);
@@ -107,7 +110,7 @@ static void sh_tmu_start_stop_ch(struct sh_tmu_priv *p, int start)
107 raw_spin_unlock_irqrestore(&sh_tmu_lock, flags); 110 raw_spin_unlock_irqrestore(&sh_tmu_lock, flags);
108} 111}
109 112
110static int sh_tmu_enable(struct sh_tmu_priv *p) 113static int __sh_tmu_enable(struct sh_tmu_priv *p)
111{ 114{
112 int ret; 115 int ret;
113 116
@@ -135,7 +138,18 @@ static int sh_tmu_enable(struct sh_tmu_priv *p)
135 return 0; 138 return 0;
136} 139}
137 140
138static void sh_tmu_disable(struct sh_tmu_priv *p) 141static int sh_tmu_enable(struct sh_tmu_priv *p)
142{
143 if (p->enable_count++ > 0)
144 return 0;
145
146 pm_runtime_get_sync(&p->pdev->dev);
147 dev_pm_syscore_device(&p->pdev->dev, true);
148
149 return __sh_tmu_enable(p);
150}
151
152static void __sh_tmu_disable(struct sh_tmu_priv *p)
139{ 153{
140 /* disable channel */ 154 /* disable channel */
141 sh_tmu_start_stop_ch(p, 0); 155 sh_tmu_start_stop_ch(p, 0);
@@ -147,6 +161,20 @@ static void sh_tmu_disable(struct sh_tmu_priv *p)
147 clk_disable(p->clk); 161 clk_disable(p->clk);
148} 162}
149 163
164static void sh_tmu_disable(struct sh_tmu_priv *p)
165{
166 if (WARN_ON(p->enable_count == 0))
167 return;
168
169 if (--p->enable_count > 0)
170 return;
171
172 __sh_tmu_disable(p);
173
174 dev_pm_syscore_device(&p->pdev->dev, false);
175 pm_runtime_put(&p->pdev->dev);
176}
177
150static void sh_tmu_set_next(struct sh_tmu_priv *p, unsigned long delta, 178static void sh_tmu_set_next(struct sh_tmu_priv *p, unsigned long delta,
151 int periodic) 179 int periodic)
152{ 180{
@@ -203,15 +231,53 @@ static int sh_tmu_clocksource_enable(struct clocksource *cs)
203 struct sh_tmu_priv *p = cs_to_sh_tmu(cs); 231 struct sh_tmu_priv *p = cs_to_sh_tmu(cs);
204 int ret; 232 int ret;
205 233
234 if (WARN_ON(p->cs_enabled))
235 return 0;
236
206 ret = sh_tmu_enable(p); 237 ret = sh_tmu_enable(p);
207 if (!ret) 238 if (!ret) {
208 __clocksource_updatefreq_hz(cs, p->rate); 239 __clocksource_updatefreq_hz(cs, p->rate);
240 p->cs_enabled = true;
241 }
242
209 return ret; 243 return ret;
210} 244}
211 245
212static void sh_tmu_clocksource_disable(struct clocksource *cs) 246static void sh_tmu_clocksource_disable(struct clocksource *cs)
213{ 247{
214 sh_tmu_disable(cs_to_sh_tmu(cs)); 248 struct sh_tmu_priv *p = cs_to_sh_tmu(cs);
249
250 if (WARN_ON(!p->cs_enabled))
251 return;
252
253 sh_tmu_disable(p);
254 p->cs_enabled = false;
255}
256
257static void sh_tmu_clocksource_suspend(struct clocksource *cs)
258{
259 struct sh_tmu_priv *p = cs_to_sh_tmu(cs);
260
261 if (!p->cs_enabled)
262 return;
263
264 if (--p->enable_count == 0) {
265 __sh_tmu_disable(p);
266 pm_genpd_syscore_poweroff(&p->pdev->dev);
267 }
268}
269
270static void sh_tmu_clocksource_resume(struct clocksource *cs)
271{
272 struct sh_tmu_priv *p = cs_to_sh_tmu(cs);
273
274 if (!p->cs_enabled)
275 return;
276
277 if (p->enable_count++ == 0) {
278 pm_genpd_syscore_poweron(&p->pdev->dev);
279 __sh_tmu_enable(p);
280 }
215} 281}
216 282
217static int sh_tmu_register_clocksource(struct sh_tmu_priv *p, 283static int sh_tmu_register_clocksource(struct sh_tmu_priv *p,
@@ -224,6 +290,8 @@ static int sh_tmu_register_clocksource(struct sh_tmu_priv *p,
224 cs->read = sh_tmu_clocksource_read; 290 cs->read = sh_tmu_clocksource_read;
225 cs->enable = sh_tmu_clocksource_enable; 291 cs->enable = sh_tmu_clocksource_enable;
226 cs->disable = sh_tmu_clocksource_disable; 292 cs->disable = sh_tmu_clocksource_disable;
293 cs->suspend = sh_tmu_clocksource_suspend;
294 cs->resume = sh_tmu_clocksource_resume;
227 cs->mask = CLOCKSOURCE_MASK(32); 295 cs->mask = CLOCKSOURCE_MASK(32);
228 cs->flags = CLOCK_SOURCE_IS_CONTINUOUS; 296 cs->flags = CLOCK_SOURCE_IS_CONTINUOUS;
229 297
@@ -301,6 +369,16 @@ static int sh_tmu_clock_event_next(unsigned long delta,
301 return 0; 369 return 0;
302} 370}
303 371
372static void sh_tmu_clock_event_suspend(struct clock_event_device *ced)
373{
374 pm_genpd_syscore_poweroff(&ced_to_sh_tmu(ced)->pdev->dev);
375}
376
377static void sh_tmu_clock_event_resume(struct clock_event_device *ced)
378{
379 pm_genpd_syscore_poweron(&ced_to_sh_tmu(ced)->pdev->dev);
380}
381
304static void sh_tmu_register_clockevent(struct sh_tmu_priv *p, 382static void sh_tmu_register_clockevent(struct sh_tmu_priv *p,
305 char *name, unsigned long rating) 383 char *name, unsigned long rating)
306{ 384{
@@ -316,6 +394,8 @@ static void sh_tmu_register_clockevent(struct sh_tmu_priv *p,
316 ced->cpumask = cpumask_of(0); 394 ced->cpumask = cpumask_of(0);
317 ced->set_next_event = sh_tmu_clock_event_next; 395 ced->set_next_event = sh_tmu_clock_event_next;
318 ced->set_mode = sh_tmu_clock_event_mode; 396 ced->set_mode = sh_tmu_clock_event_mode;
397 ced->suspend = sh_tmu_clock_event_suspend;
398 ced->resume = sh_tmu_clock_event_resume;
319 399
320 dev_info(&p->pdev->dev, "used for clock events\n"); 400 dev_info(&p->pdev->dev, "used for clock events\n");
321 401
@@ -392,6 +472,8 @@ static int sh_tmu_setup(struct sh_tmu_priv *p, struct platform_device *pdev)
392 ret = PTR_ERR(p->clk); 472 ret = PTR_ERR(p->clk);
393 goto err1; 473 goto err1;
394 } 474 }
475 p->cs_enabled = false;
476 p->enable_count = 0;
395 477
396 return sh_tmu_register(p, (char *)dev_name(&p->pdev->dev), 478 return sh_tmu_register(p, (char *)dev_name(&p->pdev->dev),
397 cfg->clockevent_rating, 479 cfg->clockevent_rating,
@@ -405,14 +487,17 @@ static int sh_tmu_setup(struct sh_tmu_priv *p, struct platform_device *pdev)
405static int __devinit sh_tmu_probe(struct platform_device *pdev) 487static int __devinit sh_tmu_probe(struct platform_device *pdev)
406{ 488{
407 struct sh_tmu_priv *p = platform_get_drvdata(pdev); 489 struct sh_tmu_priv *p = platform_get_drvdata(pdev);
490 struct sh_timer_config *cfg = pdev->dev.platform_data;
408 int ret; 491 int ret;
409 492
410 if (!is_early_platform_device(pdev)) 493 if (!is_early_platform_device(pdev)) {
411 pm_genpd_dev_always_on(&pdev->dev, true); 494 pm_runtime_set_active(&pdev->dev);
495 pm_runtime_enable(&pdev->dev);
496 }
412 497
413 if (p) { 498 if (p) {
414 dev_info(&pdev->dev, "kept as earlytimer\n"); 499 dev_info(&pdev->dev, "kept as earlytimer\n");
415 return 0; 500 goto out;
416 } 501 }
417 502
418 p = kmalloc(sizeof(*p), GFP_KERNEL); 503 p = kmalloc(sizeof(*p), GFP_KERNEL);
@@ -425,8 +510,19 @@ static int __devinit sh_tmu_probe(struct platform_device *pdev)
425 if (ret) { 510 if (ret) {
426 kfree(p); 511 kfree(p);
427 platform_set_drvdata(pdev, NULL); 512 platform_set_drvdata(pdev, NULL);
513 pm_runtime_idle(&pdev->dev);
514 return ret;
428 } 515 }
429 return ret; 516 if (is_early_platform_device(pdev))
517 return 0;
518
519 out:
520 if (cfg->clockevent_rating || cfg->clocksource_rating)
521 pm_runtime_irq_safe(&pdev->dev);
522 else
523 pm_runtime_idle(&pdev->dev);
524
525 return 0;
430} 526}
431 527
432static int __devexit sh_tmu_remove(struct platform_device *pdev) 528static int __devexit sh_tmu_remove(struct platform_device *pdev)
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
index e24a2a1b6666..ea512f47b789 100644
--- a/drivers/cpufreq/Kconfig
+++ b/drivers/cpufreq/Kconfig
@@ -179,6 +179,17 @@ config CPU_FREQ_GOV_CONSERVATIVE
179 179
180 If in doubt, say N. 180 If in doubt, say N.
181 181
182config GENERIC_CPUFREQ_CPU0
183 bool "Generic CPU0 cpufreq driver"
184 depends on HAVE_CLK && REGULATOR && PM_OPP && OF
185 select CPU_FREQ_TABLE
186 help
187 This adds a generic cpufreq driver for CPU0 frequency management.
188 It supports both uniprocessor (UP) and symmetric multiprocessor (SMP)
189 systems which share clock and voltage across all CPUs.
190
191 If in doubt, say N.
192
182menu "x86 CPU frequency scaling drivers" 193menu "x86 CPU frequency scaling drivers"
183depends on X86 194depends on X86
184source "drivers/cpufreq/Kconfig.x86" 195source "drivers/cpufreq/Kconfig.x86"
diff --git a/drivers/cpufreq/Kconfig.x86 b/drivers/cpufreq/Kconfig.x86
index 78ff7ee48951..934854ae5eb4 100644
--- a/drivers/cpufreq/Kconfig.x86
+++ b/drivers/cpufreq/Kconfig.x86
@@ -23,7 +23,8 @@ config X86_ACPI_CPUFREQ
23 help 23 help
24 This driver adds a CPUFreq driver which utilizes the ACPI 24 This driver adds a CPUFreq driver which utilizes the ACPI
25 Processor Performance States. 25 Processor Performance States.
26 This driver also supports Intel Enhanced Speedstep. 26 This driver also supports Intel Enhanced Speedstep and newer
27 AMD CPUs.
27 28
28 To compile this driver as a module, choose M here: the 29 To compile this driver as a module, choose M here: the
29 module will be called acpi-cpufreq. 30 module will be called acpi-cpufreq.
@@ -32,6 +33,18 @@ config X86_ACPI_CPUFREQ
32 33
33 If in doubt, say N. 34 If in doubt, say N.
34 35
36config X86_ACPI_CPUFREQ_CPB
37 default y
38 bool "Legacy cpb sysfs knob support for AMD CPUs"
39 depends on X86_ACPI_CPUFREQ && CPU_SUP_AMD
40 help
41 The powernow-k8 driver used to provide a sysfs knob called "cpb"
42 to disable the Core Performance Boosting feature of AMD CPUs. This
43 file has now been superseeded by the more generic "boost" entry.
44
45 By enabling this option the acpi_cpufreq driver provides the old
46 entry in addition to the new boost ones, for compatibility reasons.
47
35config ELAN_CPUFREQ 48config ELAN_CPUFREQ
36 tristate "AMD Elan SC400 and SC410" 49 tristate "AMD Elan SC400 and SC410"
37 select CPU_FREQ_TABLE 50 select CPU_FREQ_TABLE
@@ -95,7 +108,8 @@ config X86_POWERNOW_K8
95 select CPU_FREQ_TABLE 108 select CPU_FREQ_TABLE
96 depends on ACPI && ACPI_PROCESSOR 109 depends on ACPI && ACPI_PROCESSOR
97 help 110 help
98 This adds the CPUFreq driver for K8/K10 Opteron/Athlon64 processors. 111 This adds the CPUFreq driver for K8/early Opteron/Athlon64 processors.
112 Support for K10 and newer processors is now in acpi-cpufreq.
99 113
100 To compile this driver as a module, choose M here: the 114 To compile this driver as a module, choose M here: the
101 module will be called powernow-k8. 115 module will be called powernow-k8.
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index 9531fc2eda22..1bc90e1306d8 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -13,13 +13,15 @@ obj-$(CONFIG_CPU_FREQ_GOV_CONSERVATIVE) += cpufreq_conservative.o
13# CPUfreq cross-arch helpers 13# CPUfreq cross-arch helpers
14obj-$(CONFIG_CPU_FREQ_TABLE) += freq_table.o 14obj-$(CONFIG_CPU_FREQ_TABLE) += freq_table.o
15 15
16obj-$(CONFIG_GENERIC_CPUFREQ_CPU0) += cpufreq-cpu0.o
17
16################################################################################## 18##################################################################################
17# x86 drivers. 19# x86 drivers.
18# Link order matters. K8 is preferred to ACPI because of firmware bugs in early 20# Link order matters. K8 is preferred to ACPI because of firmware bugs in early
19# K8 systems. ACPI is preferred to all other hardware-specific drivers. 21# K8 systems. ACPI is preferred to all other hardware-specific drivers.
20# speedstep-* is preferred over p4-clockmod. 22# speedstep-* is preferred over p4-clockmod.
21 23
22obj-$(CONFIG_X86_POWERNOW_K8) += powernow-k8.o mperf.o 24obj-$(CONFIG_X86_POWERNOW_K8) += powernow-k8.o
23obj-$(CONFIG_X86_ACPI_CPUFREQ) += acpi-cpufreq.o mperf.o 25obj-$(CONFIG_X86_ACPI_CPUFREQ) += acpi-cpufreq.o mperf.o
24obj-$(CONFIG_X86_PCC_CPUFREQ) += pcc-cpufreq.o 26obj-$(CONFIG_X86_PCC_CPUFREQ) += pcc-cpufreq.o
25obj-$(CONFIG_X86_POWERNOW_K6) += powernow-k6.o 27obj-$(CONFIG_X86_POWERNOW_K6) += powernow-k6.o
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
index 56c6c6b4eb4d..0d048f6a2b23 100644
--- a/drivers/cpufreq/acpi-cpufreq.c
+++ b/drivers/cpufreq/acpi-cpufreq.c
@@ -51,13 +51,19 @@ MODULE_AUTHOR("Paul Diefenbaugh, Dominik Brodowski");
51MODULE_DESCRIPTION("ACPI Processor P-States Driver"); 51MODULE_DESCRIPTION("ACPI Processor P-States Driver");
52MODULE_LICENSE("GPL"); 52MODULE_LICENSE("GPL");
53 53
54#define PFX "acpi-cpufreq: "
55
54enum { 56enum {
55 UNDEFINED_CAPABLE = 0, 57 UNDEFINED_CAPABLE = 0,
56 SYSTEM_INTEL_MSR_CAPABLE, 58 SYSTEM_INTEL_MSR_CAPABLE,
59 SYSTEM_AMD_MSR_CAPABLE,
57 SYSTEM_IO_CAPABLE, 60 SYSTEM_IO_CAPABLE,
58}; 61};
59 62
60#define INTEL_MSR_RANGE (0xffff) 63#define INTEL_MSR_RANGE (0xffff)
64#define AMD_MSR_RANGE (0x7)
65
66#define MSR_K7_HWCR_CPB_DIS (1ULL << 25)
61 67
62struct acpi_cpufreq_data { 68struct acpi_cpufreq_data {
63 struct acpi_processor_performance *acpi_data; 69 struct acpi_processor_performance *acpi_data;
@@ -74,6 +80,116 @@ static struct acpi_processor_performance __percpu *acpi_perf_data;
74static struct cpufreq_driver acpi_cpufreq_driver; 80static struct cpufreq_driver acpi_cpufreq_driver;
75 81
76static unsigned int acpi_pstate_strict; 82static unsigned int acpi_pstate_strict;
83static bool boost_enabled, boost_supported;
84static struct msr __percpu *msrs;
85
86static bool boost_state(unsigned int cpu)
87{
88 u32 lo, hi;
89 u64 msr;
90
91 switch (boot_cpu_data.x86_vendor) {
92 case X86_VENDOR_INTEL:
93 rdmsr_on_cpu(cpu, MSR_IA32_MISC_ENABLE, &lo, &hi);
94 msr = lo | ((u64)hi << 32);
95 return !(msr & MSR_IA32_MISC_ENABLE_TURBO_DISABLE);
96 case X86_VENDOR_AMD:
97 rdmsr_on_cpu(cpu, MSR_K7_HWCR, &lo, &hi);
98 msr = lo | ((u64)hi << 32);
99 return !(msr & MSR_K7_HWCR_CPB_DIS);
100 }
101 return false;
102}
103
104static void boost_set_msrs(bool enable, const struct cpumask *cpumask)
105{
106 u32 cpu;
107 u32 msr_addr;
108 u64 msr_mask;
109
110 switch (boot_cpu_data.x86_vendor) {
111 case X86_VENDOR_INTEL:
112 msr_addr = MSR_IA32_MISC_ENABLE;
113 msr_mask = MSR_IA32_MISC_ENABLE_TURBO_DISABLE;
114 break;
115 case X86_VENDOR_AMD:
116 msr_addr = MSR_K7_HWCR;
117 msr_mask = MSR_K7_HWCR_CPB_DIS;
118 break;
119 default:
120 return;
121 }
122
123 rdmsr_on_cpus(cpumask, msr_addr, msrs);
124
125 for_each_cpu(cpu, cpumask) {
126 struct msr *reg = per_cpu_ptr(msrs, cpu);
127 if (enable)
128 reg->q &= ~msr_mask;
129 else
130 reg->q |= msr_mask;
131 }
132
133 wrmsr_on_cpus(cpumask, msr_addr, msrs);
134}
135
136static ssize_t _store_boost(const char *buf, size_t count)
137{
138 int ret;
139 unsigned long val = 0;
140
141 if (!boost_supported)
142 return -EINVAL;
143
144 ret = kstrtoul(buf, 10, &val);
145 if (ret || (val > 1))
146 return -EINVAL;
147
148 if ((val && boost_enabled) || (!val && !boost_enabled))
149 return count;
150
151 get_online_cpus();
152
153 boost_set_msrs(val, cpu_online_mask);
154
155 put_online_cpus();
156
157 boost_enabled = val;
158 pr_debug("Core Boosting %sabled.\n", val ? "en" : "dis");
159
160 return count;
161}
162
163static ssize_t store_global_boost(struct kobject *kobj, struct attribute *attr,
164 const char *buf, size_t count)
165{
166 return _store_boost(buf, count);
167}
168
169static ssize_t show_global_boost(struct kobject *kobj,
170 struct attribute *attr, char *buf)
171{
172 return sprintf(buf, "%u\n", boost_enabled);
173}
174
175static struct global_attr global_boost = __ATTR(boost, 0644,
176 show_global_boost,
177 store_global_boost);
178
179#ifdef CONFIG_X86_ACPI_CPUFREQ_CPB
180static ssize_t store_cpb(struct cpufreq_policy *policy, const char *buf,
181 size_t count)
182{
183 return _store_boost(buf, count);
184}
185
186static ssize_t show_cpb(struct cpufreq_policy *policy, char *buf)
187{
188 return sprintf(buf, "%u\n", boost_enabled);
189}
190
191static struct freq_attr cpb = __ATTR(cpb, 0644, show_cpb, store_cpb);
192#endif
77 193
78static int check_est_cpu(unsigned int cpuid) 194static int check_est_cpu(unsigned int cpuid)
79{ 195{
@@ -82,6 +198,13 @@ static int check_est_cpu(unsigned int cpuid)
82 return cpu_has(cpu, X86_FEATURE_EST); 198 return cpu_has(cpu, X86_FEATURE_EST);
83} 199}
84 200
201static int check_amd_hwpstate_cpu(unsigned int cpuid)
202{
203 struct cpuinfo_x86 *cpu = &cpu_data(cpuid);
204
205 return cpu_has(cpu, X86_FEATURE_HW_PSTATE);
206}
207
85static unsigned extract_io(u32 value, struct acpi_cpufreq_data *data) 208static unsigned extract_io(u32 value, struct acpi_cpufreq_data *data)
86{ 209{
87 struct acpi_processor_performance *perf; 210 struct acpi_processor_performance *perf;
@@ -101,7 +224,11 @@ static unsigned extract_msr(u32 msr, struct acpi_cpufreq_data *data)
101 int i; 224 int i;
102 struct acpi_processor_performance *perf; 225 struct acpi_processor_performance *perf;
103 226
104 msr &= INTEL_MSR_RANGE; 227 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
228 msr &= AMD_MSR_RANGE;
229 else
230 msr &= INTEL_MSR_RANGE;
231
105 perf = data->acpi_data; 232 perf = data->acpi_data;
106 233
107 for (i = 0; data->freq_table[i].frequency != CPUFREQ_TABLE_END; i++) { 234 for (i = 0; data->freq_table[i].frequency != CPUFREQ_TABLE_END; i++) {
@@ -115,6 +242,7 @@ static unsigned extract_freq(u32 val, struct acpi_cpufreq_data *data)
115{ 242{
116 switch (data->cpu_feature) { 243 switch (data->cpu_feature) {
117 case SYSTEM_INTEL_MSR_CAPABLE: 244 case SYSTEM_INTEL_MSR_CAPABLE:
245 case SYSTEM_AMD_MSR_CAPABLE:
118 return extract_msr(val, data); 246 return extract_msr(val, data);
119 case SYSTEM_IO_CAPABLE: 247 case SYSTEM_IO_CAPABLE:
120 return extract_io(val, data); 248 return extract_io(val, data);
@@ -150,6 +278,7 @@ static void do_drv_read(void *_cmd)
150 278
151 switch (cmd->type) { 279 switch (cmd->type) {
152 case SYSTEM_INTEL_MSR_CAPABLE: 280 case SYSTEM_INTEL_MSR_CAPABLE:
281 case SYSTEM_AMD_MSR_CAPABLE:
153 rdmsr(cmd->addr.msr.reg, cmd->val, h); 282 rdmsr(cmd->addr.msr.reg, cmd->val, h);
154 break; 283 break;
155 case SYSTEM_IO_CAPABLE: 284 case SYSTEM_IO_CAPABLE:
@@ -174,6 +303,9 @@ static void do_drv_write(void *_cmd)
174 lo = (lo & ~INTEL_MSR_RANGE) | (cmd->val & INTEL_MSR_RANGE); 303 lo = (lo & ~INTEL_MSR_RANGE) | (cmd->val & INTEL_MSR_RANGE);
175 wrmsr(cmd->addr.msr.reg, lo, hi); 304 wrmsr(cmd->addr.msr.reg, lo, hi);
176 break; 305 break;
306 case SYSTEM_AMD_MSR_CAPABLE:
307 wrmsr(cmd->addr.msr.reg, cmd->val, 0);
308 break;
177 case SYSTEM_IO_CAPABLE: 309 case SYSTEM_IO_CAPABLE:
178 acpi_os_write_port((acpi_io_address)cmd->addr.io.port, 310 acpi_os_write_port((acpi_io_address)cmd->addr.io.port,
179 cmd->val, 311 cmd->val,
@@ -217,6 +349,10 @@ static u32 get_cur_val(const struct cpumask *mask)
217 cmd.type = SYSTEM_INTEL_MSR_CAPABLE; 349 cmd.type = SYSTEM_INTEL_MSR_CAPABLE;
218 cmd.addr.msr.reg = MSR_IA32_PERF_STATUS; 350 cmd.addr.msr.reg = MSR_IA32_PERF_STATUS;
219 break; 351 break;
352 case SYSTEM_AMD_MSR_CAPABLE:
353 cmd.type = SYSTEM_AMD_MSR_CAPABLE;
354 cmd.addr.msr.reg = MSR_AMD_PERF_STATUS;
355 break;
220 case SYSTEM_IO_CAPABLE: 356 case SYSTEM_IO_CAPABLE:
221 cmd.type = SYSTEM_IO_CAPABLE; 357 cmd.type = SYSTEM_IO_CAPABLE;
222 perf = per_cpu(acfreq_data, cpumask_first(mask))->acpi_data; 358 perf = per_cpu(acfreq_data, cpumask_first(mask))->acpi_data;
@@ -326,6 +462,11 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy,
326 cmd.addr.msr.reg = MSR_IA32_PERF_CTL; 462 cmd.addr.msr.reg = MSR_IA32_PERF_CTL;
327 cmd.val = (u32) perf->states[next_perf_state].control; 463 cmd.val = (u32) perf->states[next_perf_state].control;
328 break; 464 break;
465 case SYSTEM_AMD_MSR_CAPABLE:
466 cmd.type = SYSTEM_AMD_MSR_CAPABLE;
467 cmd.addr.msr.reg = MSR_AMD_PERF_CTL;
468 cmd.val = (u32) perf->states[next_perf_state].control;
469 break;
329 case SYSTEM_IO_CAPABLE: 470 case SYSTEM_IO_CAPABLE:
330 cmd.type = SYSTEM_IO_CAPABLE; 471 cmd.type = SYSTEM_IO_CAPABLE;
331 cmd.addr.io.port = perf->control_register.address; 472 cmd.addr.io.port = perf->control_register.address;
@@ -419,6 +560,44 @@ static void free_acpi_perf_data(void)
419 free_percpu(acpi_perf_data); 560 free_percpu(acpi_perf_data);
420} 561}
421 562
563static int boost_notify(struct notifier_block *nb, unsigned long action,
564 void *hcpu)
565{
566 unsigned cpu = (long)hcpu;
567 const struct cpumask *cpumask;
568
569 cpumask = get_cpu_mask(cpu);
570
571 /*
572 * Clear the boost-disable bit on the CPU_DOWN path so that
573 * this cpu cannot block the remaining ones from boosting. On
574 * the CPU_UP path we simply keep the boost-disable flag in
575 * sync with the current global state.
576 */
577
578 switch (action) {
579 case CPU_UP_PREPARE:
580 case CPU_UP_PREPARE_FROZEN:
581 boost_set_msrs(boost_enabled, cpumask);
582 break;
583
584 case CPU_DOWN_PREPARE:
585 case CPU_DOWN_PREPARE_FROZEN:
586 boost_set_msrs(1, cpumask);
587 break;
588
589 default:
590 break;
591 }
592
593 return NOTIFY_OK;
594}
595
596
597static struct notifier_block boost_nb = {
598 .notifier_call = boost_notify,
599};
600
422/* 601/*
423 * acpi_cpufreq_early_init - initialize ACPI P-States library 602 * acpi_cpufreq_early_init - initialize ACPI P-States library
424 * 603 *
@@ -559,6 +738,14 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
559 policy->shared_type = CPUFREQ_SHARED_TYPE_ALL; 738 policy->shared_type = CPUFREQ_SHARED_TYPE_ALL;
560 cpumask_copy(policy->cpus, cpu_core_mask(cpu)); 739 cpumask_copy(policy->cpus, cpu_core_mask(cpu));
561 } 740 }
741
742 if (check_amd_hwpstate_cpu(cpu) && !acpi_pstate_strict) {
743 cpumask_clear(policy->cpus);
744 cpumask_set_cpu(cpu, policy->cpus);
745 cpumask_copy(policy->related_cpus, cpu_sibling_mask(cpu));
746 policy->shared_type = CPUFREQ_SHARED_TYPE_HW;
747 pr_info_once(PFX "overriding BIOS provided _PSD data\n");
748 }
562#endif 749#endif
563 750
564 /* capability check */ 751 /* capability check */
@@ -580,12 +767,16 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
580 break; 767 break;
581 case ACPI_ADR_SPACE_FIXED_HARDWARE: 768 case ACPI_ADR_SPACE_FIXED_HARDWARE:
582 pr_debug("HARDWARE addr space\n"); 769 pr_debug("HARDWARE addr space\n");
583 if (!check_est_cpu(cpu)) { 770 if (check_est_cpu(cpu)) {
584 result = -ENODEV; 771 data->cpu_feature = SYSTEM_INTEL_MSR_CAPABLE;
585 goto err_unreg; 772 break;
586 } 773 }
587 data->cpu_feature = SYSTEM_INTEL_MSR_CAPABLE; 774 if (check_amd_hwpstate_cpu(cpu)) {
588 break; 775 data->cpu_feature = SYSTEM_AMD_MSR_CAPABLE;
776 break;
777 }
778 result = -ENODEV;
779 goto err_unreg;
589 default: 780 default:
590 pr_debug("Unknown addr space %d\n", 781 pr_debug("Unknown addr space %d\n",
591 (u32) (perf->control_register.space_id)); 782 (u32) (perf->control_register.space_id));
@@ -718,6 +909,7 @@ static int acpi_cpufreq_resume(struct cpufreq_policy *policy)
718 909
719static struct freq_attr *acpi_cpufreq_attr[] = { 910static struct freq_attr *acpi_cpufreq_attr[] = {
720 &cpufreq_freq_attr_scaling_available_freqs, 911 &cpufreq_freq_attr_scaling_available_freqs,
912 NULL, /* this is a placeholder for cpb, do not remove */
721 NULL, 913 NULL,
722}; 914};
723 915
@@ -733,6 +925,49 @@ static struct cpufreq_driver acpi_cpufreq_driver = {
733 .attr = acpi_cpufreq_attr, 925 .attr = acpi_cpufreq_attr,
734}; 926};
735 927
928static void __init acpi_cpufreq_boost_init(void)
929{
930 if (boot_cpu_has(X86_FEATURE_CPB) || boot_cpu_has(X86_FEATURE_IDA)) {
931 msrs = msrs_alloc();
932
933 if (!msrs)
934 return;
935
936 boost_supported = true;
937 boost_enabled = boost_state(0);
938
939 get_online_cpus();
940
941 /* Force all MSRs to the same value */
942 boost_set_msrs(boost_enabled, cpu_online_mask);
943
944 register_cpu_notifier(&boost_nb);
945
946 put_online_cpus();
947 } else
948 global_boost.attr.mode = 0444;
949
950 /* We create the boost file in any case, though for systems without
951 * hardware support it will be read-only and hardwired to return 0.
952 */
953 if (sysfs_create_file(cpufreq_global_kobject, &(global_boost.attr)))
954 pr_warn(PFX "could not register global boost sysfs file\n");
955 else
956 pr_debug("registered global boost sysfs file\n");
957}
958
959static void __exit acpi_cpufreq_boost_exit(void)
960{
961 sysfs_remove_file(cpufreq_global_kobject, &(global_boost.attr));
962
963 if (msrs) {
964 unregister_cpu_notifier(&boost_nb);
965
966 msrs_free(msrs);
967 msrs = NULL;
968 }
969}
970
736static int __init acpi_cpufreq_init(void) 971static int __init acpi_cpufreq_init(void)
737{ 972{
738 int ret; 973 int ret;
@@ -746,9 +981,32 @@ static int __init acpi_cpufreq_init(void)
746 if (ret) 981 if (ret)
747 return ret; 982 return ret;
748 983
984#ifdef CONFIG_X86_ACPI_CPUFREQ_CPB
985 /* this is a sysfs file with a strange name and an even stranger
986 * semantic - per CPU instantiation, but system global effect.
987 * Lets enable it only on AMD CPUs for compatibility reasons and
988 * only if configured. This is considered legacy code, which
989 * will probably be removed at some point in the future.
990 */
991 if (check_amd_hwpstate_cpu(0)) {
992 struct freq_attr **iter;
993
994 pr_debug("adding sysfs entry for cpb\n");
995
996 for (iter = acpi_cpufreq_attr; *iter != NULL; iter++)
997 ;
998
999 /* make sure there is a terminator behind it */
1000 if (iter[1] == NULL)
1001 *iter = &cpb;
1002 }
1003#endif
1004
749 ret = cpufreq_register_driver(&acpi_cpufreq_driver); 1005 ret = cpufreq_register_driver(&acpi_cpufreq_driver);
750 if (ret) 1006 if (ret)
751 free_acpi_perf_data(); 1007 free_acpi_perf_data();
1008 else
1009 acpi_cpufreq_boost_init();
752 1010
753 return ret; 1011 return ret;
754} 1012}
@@ -757,6 +1015,8 @@ static void __exit acpi_cpufreq_exit(void)
757{ 1015{
758 pr_debug("acpi_cpufreq_exit\n"); 1016 pr_debug("acpi_cpufreq_exit\n");
759 1017
1018 acpi_cpufreq_boost_exit();
1019
760 cpufreq_unregister_driver(&acpi_cpufreq_driver); 1020 cpufreq_unregister_driver(&acpi_cpufreq_driver);
761 1021
762 free_acpi_perf_data(); 1022 free_acpi_perf_data();
diff --git a/drivers/cpufreq/cpufreq-cpu0.c b/drivers/cpufreq/cpufreq-cpu0.c
new file mode 100644
index 000000000000..e9158278c71d
--- /dev/null
+++ b/drivers/cpufreq/cpufreq-cpu0.c
@@ -0,0 +1,269 @@
1/*
2 * Copyright (C) 2012 Freescale Semiconductor, Inc.
3 *
4 * The OPP code in function cpu0_set_target() is reused from
5 * drivers/cpufreq/omap-cpufreq.c
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
14#include <linux/clk.h>
15#include <linux/cpu.h>
16#include <linux/cpufreq.h>
17#include <linux/err.h>
18#include <linux/module.h>
19#include <linux/of.h>
20#include <linux/opp.h>
21#include <linux/regulator/consumer.h>
22#include <linux/slab.h>
23
24static unsigned int transition_latency;
25static unsigned int voltage_tolerance; /* in percentage */
26
27static struct device *cpu_dev;
28static struct clk *cpu_clk;
29static struct regulator *cpu_reg;
30static struct cpufreq_frequency_table *freq_table;
31
32static int cpu0_verify_speed(struct cpufreq_policy *policy)
33{
34 return cpufreq_frequency_table_verify(policy, freq_table);
35}
36
37static unsigned int cpu0_get_speed(unsigned int cpu)
38{
39 return clk_get_rate(cpu_clk) / 1000;
40}
41
42static int cpu0_set_target(struct cpufreq_policy *policy,
43 unsigned int target_freq, unsigned int relation)
44{
45 struct cpufreq_freqs freqs;
46 struct opp *opp;
47 unsigned long freq_Hz, volt = 0, volt_old = 0, tol = 0;
48 unsigned int index, cpu;
49 int ret;
50
51 ret = cpufreq_frequency_table_target(policy, freq_table, target_freq,
52 relation, &index);
53 if (ret) {
54 pr_err("failed to match target freqency %d: %d\n",
55 target_freq, ret);
56 return ret;
57 }
58
59 freq_Hz = clk_round_rate(cpu_clk, freq_table[index].frequency * 1000);
60 if (freq_Hz < 0)
61 freq_Hz = freq_table[index].frequency * 1000;
62 freqs.new = freq_Hz / 1000;
63 freqs.old = clk_get_rate(cpu_clk) / 1000;
64
65 if (freqs.old == freqs.new)
66 return 0;
67
68 for_each_online_cpu(cpu) {
69 freqs.cpu = cpu;
70 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
71 }
72
73 if (cpu_reg) {
74 opp = opp_find_freq_ceil(cpu_dev, &freq_Hz);
75 if (IS_ERR(opp)) {
76 pr_err("failed to find OPP for %ld\n", freq_Hz);
77 return PTR_ERR(opp);
78 }
79 volt = opp_get_voltage(opp);
80 tol = volt * voltage_tolerance / 100;
81 volt_old = regulator_get_voltage(cpu_reg);
82 }
83
84 pr_debug("%u MHz, %ld mV --> %u MHz, %ld mV\n",
85 freqs.old / 1000, volt_old ? volt_old / 1000 : -1,
86 freqs.new / 1000, volt ? volt / 1000 : -1);
87
88 /* scaling up? scale voltage before frequency */
89 if (cpu_reg && freqs.new > freqs.old) {
90 ret = regulator_set_voltage_tol(cpu_reg, volt, tol);
91 if (ret) {
92 pr_err("failed to scale voltage up: %d\n", ret);
93 freqs.new = freqs.old;
94 return ret;
95 }
96 }
97
98 ret = clk_set_rate(cpu_clk, freqs.new * 1000);
99 if (ret) {
100 pr_err("failed to set clock rate: %d\n", ret);
101 if (cpu_reg)
102 regulator_set_voltage_tol(cpu_reg, volt_old, tol);
103 return ret;
104 }
105
106 /* scaling down? scale voltage after frequency */
107 if (cpu_reg && freqs.new < freqs.old) {
108 ret = regulator_set_voltage_tol(cpu_reg, volt, tol);
109 if (ret) {
110 pr_err("failed to scale voltage down: %d\n", ret);
111 clk_set_rate(cpu_clk, freqs.old * 1000);
112 freqs.new = freqs.old;
113 return ret;
114 }
115 }
116
117 for_each_online_cpu(cpu) {
118 freqs.cpu = cpu;
119 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
120 }
121
122 return 0;
123}
124
125static int cpu0_cpufreq_init(struct cpufreq_policy *policy)
126{
127 int ret;
128
129 if (policy->cpu != 0)
130 return -EINVAL;
131
132 ret = cpufreq_frequency_table_cpuinfo(policy, freq_table);
133 if (ret) {
134 pr_err("invalid frequency table: %d\n", ret);
135 return ret;
136 }
137
138 policy->cpuinfo.transition_latency = transition_latency;
139 policy->cur = clk_get_rate(cpu_clk) / 1000;
140
141 /*
142 * The driver only supports the SMP configuartion where all processors
143 * share the clock and voltage and clock. Use cpufreq affected_cpus
144 * interface to have all CPUs scaled together.
145 */
146 policy->shared_type = CPUFREQ_SHARED_TYPE_ANY;
147 cpumask_setall(policy->cpus);
148
149 cpufreq_frequency_table_get_attr(freq_table, policy->cpu);
150
151 return 0;
152}
153
154static int cpu0_cpufreq_exit(struct cpufreq_policy *policy)
155{
156 cpufreq_frequency_table_put_attr(policy->cpu);
157
158 return 0;
159}
160
161static struct freq_attr *cpu0_cpufreq_attr[] = {
162 &cpufreq_freq_attr_scaling_available_freqs,
163 NULL,
164};
165
166static struct cpufreq_driver cpu0_cpufreq_driver = {
167 .flags = CPUFREQ_STICKY,
168 .verify = cpu0_verify_speed,
169 .target = cpu0_set_target,
170 .get = cpu0_get_speed,
171 .init = cpu0_cpufreq_init,
172 .exit = cpu0_cpufreq_exit,
173 .name = "generic_cpu0",
174 .attr = cpu0_cpufreq_attr,
175};
176
177static int __devinit cpu0_cpufreq_driver_init(void)
178{
179 struct device_node *np;
180 int ret;
181
182 np = of_find_node_by_path("/cpus/cpu@0");
183 if (!np) {
184 pr_err("failed to find cpu0 node\n");
185 return -ENOENT;
186 }
187
188 cpu_dev = get_cpu_device(0);
189 if (!cpu_dev) {
190 pr_err("failed to get cpu0 device\n");
191 ret = -ENODEV;
192 goto out_put_node;
193 }
194
195 cpu_dev->of_node = np;
196
197 cpu_clk = clk_get(cpu_dev, NULL);
198 if (IS_ERR(cpu_clk)) {
199 ret = PTR_ERR(cpu_clk);
200 pr_err("failed to get cpu0 clock: %d\n", ret);
201 goto out_put_node;
202 }
203
204 cpu_reg = regulator_get(cpu_dev, "cpu0");
205 if (IS_ERR(cpu_reg)) {
206 pr_warn("failed to get cpu0 regulator\n");
207 cpu_reg = NULL;
208 }
209
210 ret = of_init_opp_table(cpu_dev);
211 if (ret) {
212 pr_err("failed to init OPP table: %d\n", ret);
213 goto out_put_node;
214 }
215
216 ret = opp_init_cpufreq_table(cpu_dev, &freq_table);
217 if (ret) {
218 pr_err("failed to init cpufreq table: %d\n", ret);
219 goto out_put_node;
220 }
221
222 of_property_read_u32(np, "voltage-tolerance", &voltage_tolerance);
223
224 if (of_property_read_u32(np, "clock-latency", &transition_latency))
225 transition_latency = CPUFREQ_ETERNAL;
226
227 if (cpu_reg) {
228 struct opp *opp;
229 unsigned long min_uV, max_uV;
230 int i;
231
232 /*
233 * OPP is maintained in order of increasing frequency, and
234 * freq_table initialised from OPP is therefore sorted in the
235 * same order.
236 */
237 for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++)
238 ;
239 opp = opp_find_freq_exact(cpu_dev,
240 freq_table[0].frequency * 1000, true);
241 min_uV = opp_get_voltage(opp);
242 opp = opp_find_freq_exact(cpu_dev,
243 freq_table[i-1].frequency * 1000, true);
244 max_uV = opp_get_voltage(opp);
245 ret = regulator_set_voltage_time(cpu_reg, min_uV, max_uV);
246 if (ret > 0)
247 transition_latency += ret * 1000;
248 }
249
250 ret = cpufreq_register_driver(&cpu0_cpufreq_driver);
251 if (ret) {
252 pr_err("failed register driver: %d\n", ret);
253 goto out_free_table;
254 }
255
256 of_node_put(np);
257 return 0;
258
259out_free_table:
260 opp_free_cpufreq_table(cpu_dev, &freq_table);
261out_put_node:
262 of_node_put(np);
263 return ret;
264}
265late_initcall(cpu0_cpufreq_driver_init);
266
267MODULE_AUTHOR("Shawn Guo <shawn.guo@linaro.org>");
268MODULE_DESCRIPTION("Generic CPU0 cpufreq driver");
269MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index 55f0354864e2..a152af7e1991 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -504,6 +504,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
504 j_dbs_info->prev_cpu_nice = 504 j_dbs_info->prev_cpu_nice =
505 kcpustat_cpu(j).cpustat[CPUTIME_NICE]; 505 kcpustat_cpu(j).cpustat[CPUTIME_NICE];
506 } 506 }
507 this_dbs_info->cpu = cpu;
507 this_dbs_info->down_skip = 0; 508 this_dbs_info->down_skip = 0;
508 this_dbs_info->requested_freq = policy->cur; 509 this_dbs_info->requested_freq = policy->cur;
509 510
@@ -583,6 +584,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
583 __cpufreq_driver_target( 584 __cpufreq_driver_target(
584 this_dbs_info->cur_policy, 585 this_dbs_info->cur_policy,
585 policy->min, CPUFREQ_RELATION_L); 586 policy->min, CPUFREQ_RELATION_L);
587 dbs_check_cpu(this_dbs_info);
586 mutex_unlock(&this_dbs_info->timer_mutex); 588 mutex_unlock(&this_dbs_info->timer_mutex);
587 589
588 break; 590 break;
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index 14c1af5a264f..396322f2a83f 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -761,6 +761,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
761 else if (policy->min > this_dbs_info->cur_policy->cur) 761 else if (policy->min > this_dbs_info->cur_policy->cur)
762 __cpufreq_driver_target(this_dbs_info->cur_policy, 762 __cpufreq_driver_target(this_dbs_info->cur_policy,
763 policy->min, CPUFREQ_RELATION_L); 763 policy->min, CPUFREQ_RELATION_L);
764 dbs_check_cpu(this_dbs_info);
764 mutex_unlock(&this_dbs_info->timer_mutex); 765 mutex_unlock(&this_dbs_info->timer_mutex);
765 break; 766 break;
766 } 767 }
diff --git a/drivers/cpufreq/longhaul.h b/drivers/cpufreq/longhaul.h
index cbf48fbca881..e2dc436099d1 100644
--- a/drivers/cpufreq/longhaul.h
+++ b/drivers/cpufreq/longhaul.h
@@ -56,7 +56,7 @@ union msr_longhaul {
56/* 56/*
57 * VIA C3 Samuel 1 & Samuel 2 (stepping 0) 57 * VIA C3 Samuel 1 & Samuel 2 (stepping 0)
58 */ 58 */
59static const int __cpuinitdata samuel1_mults[16] = { 59static const int __cpuinitconst samuel1_mults[16] = {
60 -1, /* 0000 -> RESERVED */ 60 -1, /* 0000 -> RESERVED */
61 30, /* 0001 -> 3.0x */ 61 30, /* 0001 -> 3.0x */
62 40, /* 0010 -> 4.0x */ 62 40, /* 0010 -> 4.0x */
@@ -75,7 +75,7 @@ static const int __cpuinitdata samuel1_mults[16] = {
75 -1, /* 1111 -> RESERVED */ 75 -1, /* 1111 -> RESERVED */
76}; 76};
77 77
78static const int __cpuinitdata samuel1_eblcr[16] = { 78static const int __cpuinitconst samuel1_eblcr[16] = {
79 50, /* 0000 -> RESERVED */ 79 50, /* 0000 -> RESERVED */
80 30, /* 0001 -> 3.0x */ 80 30, /* 0001 -> 3.0x */
81 40, /* 0010 -> 4.0x */ 81 40, /* 0010 -> 4.0x */
@@ -97,7 +97,7 @@ static const int __cpuinitdata samuel1_eblcr[16] = {
97/* 97/*
98 * VIA C3 Samuel2 Stepping 1->15 98 * VIA C3 Samuel2 Stepping 1->15
99 */ 99 */
100static const int __cpuinitdata samuel2_eblcr[16] = { 100static const int __cpuinitconst samuel2_eblcr[16] = {
101 50, /* 0000 -> 5.0x */ 101 50, /* 0000 -> 5.0x */
102 30, /* 0001 -> 3.0x */ 102 30, /* 0001 -> 3.0x */
103 40, /* 0010 -> 4.0x */ 103 40, /* 0010 -> 4.0x */
@@ -119,7 +119,7 @@ static const int __cpuinitdata samuel2_eblcr[16] = {
119/* 119/*
120 * VIA C3 Ezra 120 * VIA C3 Ezra
121 */ 121 */
122static const int __cpuinitdata ezra_mults[16] = { 122static const int __cpuinitconst ezra_mults[16] = {
123 100, /* 0000 -> 10.0x */ 123 100, /* 0000 -> 10.0x */
124 30, /* 0001 -> 3.0x */ 124 30, /* 0001 -> 3.0x */
125 40, /* 0010 -> 4.0x */ 125 40, /* 0010 -> 4.0x */
@@ -138,7 +138,7 @@ static const int __cpuinitdata ezra_mults[16] = {
138 120, /* 1111 -> 12.0x */ 138 120, /* 1111 -> 12.0x */
139}; 139};
140 140
141static const int __cpuinitdata ezra_eblcr[16] = { 141static const int __cpuinitconst ezra_eblcr[16] = {
142 50, /* 0000 -> 5.0x */ 142 50, /* 0000 -> 5.0x */
143 30, /* 0001 -> 3.0x */ 143 30, /* 0001 -> 3.0x */
144 40, /* 0010 -> 4.0x */ 144 40, /* 0010 -> 4.0x */
@@ -160,7 +160,7 @@ static const int __cpuinitdata ezra_eblcr[16] = {
160/* 160/*
161 * VIA C3 (Ezra-T) [C5M]. 161 * VIA C3 (Ezra-T) [C5M].
162 */ 162 */
163static const int __cpuinitdata ezrat_mults[32] = { 163static const int __cpuinitconst ezrat_mults[32] = {
164 100, /* 0000 -> 10.0x */ 164 100, /* 0000 -> 10.0x */
165 30, /* 0001 -> 3.0x */ 165 30, /* 0001 -> 3.0x */
166 40, /* 0010 -> 4.0x */ 166 40, /* 0010 -> 4.0x */
@@ -196,7 +196,7 @@ static const int __cpuinitdata ezrat_mults[32] = {
196 -1, /* 1111 -> RESERVED (12.0x) */ 196 -1, /* 1111 -> RESERVED (12.0x) */
197}; 197};
198 198
199static const int __cpuinitdata ezrat_eblcr[32] = { 199static const int __cpuinitconst ezrat_eblcr[32] = {
200 50, /* 0000 -> 5.0x */ 200 50, /* 0000 -> 5.0x */
201 30, /* 0001 -> 3.0x */ 201 30, /* 0001 -> 3.0x */
202 40, /* 0010 -> 4.0x */ 202 40, /* 0010 -> 4.0x */
@@ -235,7 +235,7 @@ static const int __cpuinitdata ezrat_eblcr[32] = {
235/* 235/*
236 * VIA C3 Nehemiah */ 236 * VIA C3 Nehemiah */
237 237
238static const int __cpuinitdata nehemiah_mults[32] = { 238static const int __cpuinitconst nehemiah_mults[32] = {
239 100, /* 0000 -> 10.0x */ 239 100, /* 0000 -> 10.0x */
240 -1, /* 0001 -> 16.0x */ 240 -1, /* 0001 -> 16.0x */
241 40, /* 0010 -> 4.0x */ 241 40, /* 0010 -> 4.0x */
@@ -270,7 +270,7 @@ static const int __cpuinitdata nehemiah_mults[32] = {
270 -1, /* 1111 -> 12.0x */ 270 -1, /* 1111 -> 12.0x */
271}; 271};
272 272
273static const int __cpuinitdata nehemiah_eblcr[32] = { 273static const int __cpuinitconst nehemiah_eblcr[32] = {
274 50, /* 0000 -> 5.0x */ 274 50, /* 0000 -> 5.0x */
275 160, /* 0001 -> 16.0x */ 275 160, /* 0001 -> 16.0x */
276 40, /* 0010 -> 4.0x */ 276 40, /* 0010 -> 4.0x */
@@ -315,7 +315,7 @@ struct mV_pos {
315 unsigned short pos; 315 unsigned short pos;
316}; 316};
317 317
318static const struct mV_pos __cpuinitdata vrm85_mV[32] = { 318static const struct mV_pos __cpuinitconst vrm85_mV[32] = {
319 {1250, 8}, {1200, 6}, {1150, 4}, {1100, 2}, 319 {1250, 8}, {1200, 6}, {1150, 4}, {1100, 2},
320 {1050, 0}, {1800, 30}, {1750, 28}, {1700, 26}, 320 {1050, 0}, {1800, 30}, {1750, 28}, {1700, 26},
321 {1650, 24}, {1600, 22}, {1550, 20}, {1500, 18}, 321 {1650, 24}, {1600, 22}, {1550, 20}, {1500, 18},
@@ -326,14 +326,14 @@ static const struct mV_pos __cpuinitdata vrm85_mV[32] = {
326 {1475, 17}, {1425, 15}, {1375, 13}, {1325, 11} 326 {1475, 17}, {1425, 15}, {1375, 13}, {1325, 11}
327}; 327};
328 328
329static const unsigned char __cpuinitdata mV_vrm85[32] = { 329static const unsigned char __cpuinitconst mV_vrm85[32] = {
330 0x04, 0x14, 0x03, 0x13, 0x02, 0x12, 0x01, 0x11, 330 0x04, 0x14, 0x03, 0x13, 0x02, 0x12, 0x01, 0x11,
331 0x00, 0x10, 0x0f, 0x1f, 0x0e, 0x1e, 0x0d, 0x1d, 331 0x00, 0x10, 0x0f, 0x1f, 0x0e, 0x1e, 0x0d, 0x1d,
332 0x0c, 0x1c, 0x0b, 0x1b, 0x0a, 0x1a, 0x09, 0x19, 332 0x0c, 0x1c, 0x0b, 0x1b, 0x0a, 0x1a, 0x09, 0x19,
333 0x08, 0x18, 0x07, 0x17, 0x06, 0x16, 0x05, 0x15 333 0x08, 0x18, 0x07, 0x17, 0x06, 0x16, 0x05, 0x15
334}; 334};
335 335
336static const struct mV_pos __cpuinitdata mobilevrm_mV[32] = { 336static const struct mV_pos __cpuinitconst mobilevrm_mV[32] = {
337 {1750, 31}, {1700, 30}, {1650, 29}, {1600, 28}, 337 {1750, 31}, {1700, 30}, {1650, 29}, {1600, 28},
338 {1550, 27}, {1500, 26}, {1450, 25}, {1400, 24}, 338 {1550, 27}, {1500, 26}, {1450, 25}, {1400, 24},
339 {1350, 23}, {1300, 22}, {1250, 21}, {1200, 20}, 339 {1350, 23}, {1300, 22}, {1250, 21}, {1200, 20},
@@ -344,7 +344,7 @@ static const struct mV_pos __cpuinitdata mobilevrm_mV[32] = {
344 {675, 3}, {650, 2}, {625, 1}, {600, 0} 344 {675, 3}, {650, 2}, {625, 1}, {600, 0}
345}; 345};
346 346
347static const unsigned char __cpuinitdata mV_mobilevrm[32] = { 347static const unsigned char __cpuinitconst mV_mobilevrm[32] = {
348 0x1f, 0x1e, 0x1d, 0x1c, 0x1b, 0x1a, 0x19, 0x18, 348 0x1f, 0x1e, 0x1d, 0x1c, 0x1b, 0x1a, 0x19, 0x18,
349 0x17, 0x16, 0x15, 0x14, 0x13, 0x12, 0x11, 0x10, 349 0x17, 0x16, 0x15, 0x14, 0x13, 0x12, 0x11, 0x10,
350 0x0f, 0x0e, 0x0d, 0x0c, 0x0b, 0x0a, 0x09, 0x08, 350 0x0f, 0x0e, 0x0d, 0x0c, 0x0b, 0x0a, 0x09, 0x08,
diff --git a/drivers/cpufreq/omap-cpufreq.c b/drivers/cpufreq/omap-cpufreq.c
index b47034e650a5..65f8e9a54975 100644
--- a/drivers/cpufreq/omap-cpufreq.c
+++ b/drivers/cpufreq/omap-cpufreq.c
@@ -40,16 +40,6 @@
40/* OPP tolerance in percentage */ 40/* OPP tolerance in percentage */
41#define OPP_TOLERANCE 4 41#define OPP_TOLERANCE 4
42 42
43#ifdef CONFIG_SMP
44struct lpj_info {
45 unsigned long ref;
46 unsigned int freq;
47};
48
49static DEFINE_PER_CPU(struct lpj_info, lpj_ref);
50static struct lpj_info global_lpj_ref;
51#endif
52
53static struct cpufreq_frequency_table *freq_table; 43static struct cpufreq_frequency_table *freq_table;
54static atomic_t freq_table_users = ATOMIC_INIT(0); 44static atomic_t freq_table_users = ATOMIC_INIT(0);
55static struct clk *mpu_clk; 45static struct clk *mpu_clk;
@@ -161,31 +151,6 @@ static int omap_target(struct cpufreq_policy *policy,
161 } 151 }
162 152
163 freqs.new = omap_getspeed(policy->cpu); 153 freqs.new = omap_getspeed(policy->cpu);
164#ifdef CONFIG_SMP
165 /*
166 * Note that loops_per_jiffy is not updated on SMP systems in
167 * cpufreq driver. So, update the per-CPU loops_per_jiffy value
168 * on frequency transition. We need to update all dependent CPUs.
169 */
170 for_each_cpu(i, policy->cpus) {
171 struct lpj_info *lpj = &per_cpu(lpj_ref, i);
172 if (!lpj->freq) {
173 lpj->ref = per_cpu(cpu_data, i).loops_per_jiffy;
174 lpj->freq = freqs.old;
175 }
176
177 per_cpu(cpu_data, i).loops_per_jiffy =
178 cpufreq_scale(lpj->ref, lpj->freq, freqs.new);
179 }
180
181 /* And don't forget to adjust the global one */
182 if (!global_lpj_ref.freq) {
183 global_lpj_ref.ref = loops_per_jiffy;
184 global_lpj_ref.freq = freqs.old;
185 }
186 loops_per_jiffy = cpufreq_scale(global_lpj_ref.ref, global_lpj_ref.freq,
187 freqs.new);
188#endif
189 154
190done: 155done:
191 /* notifiers */ 156 /* notifiers */
@@ -301,9 +266,9 @@ static int __init omap_cpufreq_init(void)
301 } 266 }
302 267
303 mpu_dev = omap_device_get_by_hwmod_name("mpu"); 268 mpu_dev = omap_device_get_by_hwmod_name("mpu");
304 if (!mpu_dev) { 269 if (IS_ERR(mpu_dev)) {
305 pr_warning("%s: unable to get the mpu device\n", __func__); 270 pr_warning("%s: unable to get the mpu device\n", __func__);
306 return -EINVAL; 271 return PTR_ERR(mpu_dev);
307 } 272 }
308 273
309 mpu_reg = regulator_get(mpu_dev, "vcc"); 274 mpu_reg = regulator_get(mpu_dev, "vcc");
diff --git a/drivers/cpufreq/powernow-k8.c b/drivers/cpufreq/powernow-k8.c
index 1a40935c85fd..129e80bfff22 100644
--- a/drivers/cpufreq/powernow-k8.c
+++ b/drivers/cpufreq/powernow-k8.c
@@ -48,22 +48,12 @@
48#define PFX "powernow-k8: " 48#define PFX "powernow-k8: "
49#define VERSION "version 2.20.00" 49#define VERSION "version 2.20.00"
50#include "powernow-k8.h" 50#include "powernow-k8.h"
51#include "mperf.h"
52 51
53/* serialize freq changes */ 52/* serialize freq changes */
54static DEFINE_MUTEX(fidvid_mutex); 53static DEFINE_MUTEX(fidvid_mutex);
55 54
56static DEFINE_PER_CPU(struct powernow_k8_data *, powernow_data); 55static DEFINE_PER_CPU(struct powernow_k8_data *, powernow_data);
57 56
58static int cpu_family = CPU_OPTERON;
59
60/* array to map SW pstate number to acpi state */
61static u32 ps_to_as[8];
62
63/* core performance boost */
64static bool cpb_capable, cpb_enabled;
65static struct msr __percpu *msrs;
66
67static struct cpufreq_driver cpufreq_amd64_driver; 57static struct cpufreq_driver cpufreq_amd64_driver;
68 58
69#ifndef CONFIG_SMP 59#ifndef CONFIG_SMP
@@ -85,12 +75,6 @@ static u32 find_khz_freq_from_fid(u32 fid)
85 return 1000 * find_freq_from_fid(fid); 75 return 1000 * find_freq_from_fid(fid);
86} 76}
87 77
88static u32 find_khz_freq_from_pstate(struct cpufreq_frequency_table *data,
89 u32 pstate)
90{
91 return data[ps_to_as[pstate]].frequency;
92}
93
94/* Return the vco fid for an input fid 78/* Return the vco fid for an input fid
95 * 79 *
96 * Each "low" fid has corresponding "high" fid, and you can get to "low" fids 80 * Each "low" fid has corresponding "high" fid, and you can get to "low" fids
@@ -113,9 +97,6 @@ static int pending_bit_stuck(void)
113{ 97{
114 u32 lo, hi; 98 u32 lo, hi;
115 99
116 if (cpu_family == CPU_HW_PSTATE)
117 return 0;
118
119 rdmsr(MSR_FIDVID_STATUS, lo, hi); 100 rdmsr(MSR_FIDVID_STATUS, lo, hi);
120 return lo & MSR_S_LO_CHANGE_PENDING ? 1 : 0; 101 return lo & MSR_S_LO_CHANGE_PENDING ? 1 : 0;
121} 102}
@@ -129,20 +110,6 @@ static int query_current_values_with_pending_wait(struct powernow_k8_data *data)
129 u32 lo, hi; 110 u32 lo, hi;
130 u32 i = 0; 111 u32 i = 0;
131 112
132 if (cpu_family == CPU_HW_PSTATE) {
133 rdmsr(MSR_PSTATE_STATUS, lo, hi);
134 i = lo & HW_PSTATE_MASK;
135 data->currpstate = i;
136
137 /*
138 * a workaround for family 11h erratum 311 might cause
139 * an "out-of-range Pstate if the core is in Pstate-0
140 */
141 if ((boot_cpu_data.x86 == 0x11) && (i >= data->numps))
142 data->currpstate = HW_PSTATE_0;
143
144 return 0;
145 }
146 do { 113 do {
147 if (i++ > 10000) { 114 if (i++ > 10000) {
148 pr_debug("detected change pending stuck\n"); 115 pr_debug("detected change pending stuck\n");
@@ -299,14 +266,6 @@ static int decrease_vid_code_by_step(struct powernow_k8_data *data,
299 return 0; 266 return 0;
300} 267}
301 268
302/* Change hardware pstate by single MSR write */
303static int transition_pstate(struct powernow_k8_data *data, u32 pstate)
304{
305 wrmsr(MSR_PSTATE_CTRL, pstate, 0);
306 data->currpstate = pstate;
307 return 0;
308}
309
310/* Change Opteron/Athlon64 fid and vid, by the 3 phases. */ 269/* Change Opteron/Athlon64 fid and vid, by the 3 phases. */
311static int transition_fid_vid(struct powernow_k8_data *data, 270static int transition_fid_vid(struct powernow_k8_data *data,
312 u32 reqfid, u32 reqvid) 271 u32 reqfid, u32 reqvid)
@@ -523,8 +482,6 @@ static int core_voltage_post_transition(struct powernow_k8_data *data,
523static const struct x86_cpu_id powernow_k8_ids[] = { 482static const struct x86_cpu_id powernow_k8_ids[] = {
524 /* IO based frequency switching */ 483 /* IO based frequency switching */
525 { X86_VENDOR_AMD, 0xf }, 484 { X86_VENDOR_AMD, 0xf },
526 /* MSR based frequency switching supported */
527 X86_FEATURE_MATCH(X86_FEATURE_HW_PSTATE),
528 {} 485 {}
529}; 486};
530MODULE_DEVICE_TABLE(x86cpu, powernow_k8_ids); 487MODULE_DEVICE_TABLE(x86cpu, powernow_k8_ids);
@@ -560,15 +517,8 @@ static void check_supported_cpu(void *_rc)
560 "Power state transitions not supported\n"); 517 "Power state transitions not supported\n");
561 return; 518 return;
562 } 519 }
563 } else { /* must be a HW Pstate capable processor */ 520 *rc = 0;
564 cpuid(CPUID_FREQ_VOLT_CAPABILITIES, &eax, &ebx, &ecx, &edx);
565 if ((edx & USE_HW_PSTATE) == USE_HW_PSTATE)
566 cpu_family = CPU_HW_PSTATE;
567 else
568 return;
569 } 521 }
570
571 *rc = 0;
572} 522}
573 523
574static int check_pst_table(struct powernow_k8_data *data, struct pst_s *pst, 524static int check_pst_table(struct powernow_k8_data *data, struct pst_s *pst,
@@ -632,18 +582,11 @@ static void print_basics(struct powernow_k8_data *data)
632 for (j = 0; j < data->numps; j++) { 582 for (j = 0; j < data->numps; j++) {
633 if (data->powernow_table[j].frequency != 583 if (data->powernow_table[j].frequency !=
634 CPUFREQ_ENTRY_INVALID) { 584 CPUFREQ_ENTRY_INVALID) {
635 if (cpu_family == CPU_HW_PSTATE) {
636 printk(KERN_INFO PFX
637 " %d : pstate %d (%d MHz)\n", j,
638 data->powernow_table[j].index,
639 data->powernow_table[j].frequency/1000);
640 } else {
641 printk(KERN_INFO PFX 585 printk(KERN_INFO PFX
642 "fid 0x%x (%d MHz), vid 0x%x\n", 586 "fid 0x%x (%d MHz), vid 0x%x\n",
643 data->powernow_table[j].index & 0xff, 587 data->powernow_table[j].index & 0xff,
644 data->powernow_table[j].frequency/1000, 588 data->powernow_table[j].frequency/1000,
645 data->powernow_table[j].index >> 8); 589 data->powernow_table[j].index >> 8);
646 }
647 } 590 }
648 } 591 }
649 if (data->batps) 592 if (data->batps)
@@ -651,20 +594,6 @@ static void print_basics(struct powernow_k8_data *data)
651 data->batps); 594 data->batps);
652} 595}
653 596
654static u32 freq_from_fid_did(u32 fid, u32 did)
655{
656 u32 mhz = 0;
657
658 if (boot_cpu_data.x86 == 0x10)
659 mhz = (100 * (fid + 0x10)) >> did;
660 else if (boot_cpu_data.x86 == 0x11)
661 mhz = (100 * (fid + 8)) >> did;
662 else
663 BUG();
664
665 return mhz * 1000;
666}
667
668static int fill_powernow_table(struct powernow_k8_data *data, 597static int fill_powernow_table(struct powernow_k8_data *data,
669 struct pst_s *pst, u8 maxvid) 598 struct pst_s *pst, u8 maxvid)
670{ 599{
@@ -824,7 +753,7 @@ static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data,
824{ 753{
825 u64 control; 754 u64 control;
826 755
827 if (!data->acpi_data.state_count || (cpu_family == CPU_HW_PSTATE)) 756 if (!data->acpi_data.state_count)
828 return; 757 return;
829 758
830 control = data->acpi_data.states[index].control; 759 control = data->acpi_data.states[index].control;
@@ -875,10 +804,7 @@ static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data)
875 data->numps = data->acpi_data.state_count; 804 data->numps = data->acpi_data.state_count;
876 powernow_k8_acpi_pst_values(data, 0); 805 powernow_k8_acpi_pst_values(data, 0);
877 806
878 if (cpu_family == CPU_HW_PSTATE) 807 ret_val = fill_powernow_table_fidvid(data, powernow_table);
879 ret_val = fill_powernow_table_pstate(data, powernow_table);
880 else
881 ret_val = fill_powernow_table_fidvid(data, powernow_table);
882 if (ret_val) 808 if (ret_val)
883 goto err_out_mem; 809 goto err_out_mem;
884 810
@@ -915,51 +841,6 @@ err_out:
915 return ret_val; 841 return ret_val;
916} 842}
917 843
918static int fill_powernow_table_pstate(struct powernow_k8_data *data,
919 struct cpufreq_frequency_table *powernow_table)
920{
921 int i;
922 u32 hi = 0, lo = 0;
923 rdmsr(MSR_PSTATE_CUR_LIMIT, lo, hi);
924 data->max_hw_pstate = (lo & HW_PSTATE_MAX_MASK) >> HW_PSTATE_MAX_SHIFT;
925
926 for (i = 0; i < data->acpi_data.state_count; i++) {
927 u32 index;
928
929 index = data->acpi_data.states[i].control & HW_PSTATE_MASK;
930 if (index > data->max_hw_pstate) {
931 printk(KERN_ERR PFX "invalid pstate %d - "
932 "bad value %d.\n", i, index);
933 printk(KERN_ERR PFX "Please report to BIOS "
934 "manufacturer\n");
935 invalidate_entry(powernow_table, i);
936 continue;
937 }
938
939 ps_to_as[index] = i;
940
941 /* Frequency may be rounded for these */
942 if ((boot_cpu_data.x86 == 0x10 && boot_cpu_data.x86_model < 10)
943 || boot_cpu_data.x86 == 0x11) {
944
945 rdmsr(MSR_PSTATE_DEF_BASE + index, lo, hi);
946 if (!(hi & HW_PSTATE_VALID_MASK)) {
947 pr_debug("invalid pstate %d, ignoring\n", index);
948 invalidate_entry(powernow_table, i);
949 continue;
950 }
951
952 powernow_table[i].frequency =
953 freq_from_fid_did(lo & 0x3f, (lo >> 6) & 7);
954 } else
955 powernow_table[i].frequency =
956 data->acpi_data.states[i].core_frequency * 1000;
957
958 powernow_table[i].index = index;
959 }
960 return 0;
961}
962
963static int fill_powernow_table_fidvid(struct powernow_k8_data *data, 844static int fill_powernow_table_fidvid(struct powernow_k8_data *data,
964 struct cpufreq_frequency_table *powernow_table) 845 struct cpufreq_frequency_table *powernow_table)
965{ 846{
@@ -1036,15 +917,7 @@ static int get_transition_latency(struct powernow_k8_data *data)
1036 max_latency = cur_latency; 917 max_latency = cur_latency;
1037 } 918 }
1038 if (max_latency == 0) { 919 if (max_latency == 0) {
1039 /* 920 pr_err(FW_WARN PFX "Invalid zero transition latency\n");
1040 * Fam 11h and later may return 0 as transition latency. This
1041 * is intended and means "very fast". While cpufreq core and
1042 * governors currently can handle that gracefully, better set it
1043 * to 1 to avoid problems in the future.
1044 */
1045 if (boot_cpu_data.x86 < 0x11)
1046 printk(KERN_ERR FW_WARN PFX "Invalid zero transition "
1047 "latency\n");
1048 max_latency = 1; 921 max_latency = 1;
1049 } 922 }
1050 /* value in usecs, needs to be in nanoseconds */ 923 /* value in usecs, needs to be in nanoseconds */
@@ -1104,40 +977,6 @@ static int transition_frequency_fidvid(struct powernow_k8_data *data,
1104 return res; 977 return res;
1105} 978}
1106 979
1107/* Take a frequency, and issue the hardware pstate transition command */
1108static int transition_frequency_pstate(struct powernow_k8_data *data,
1109 unsigned int index)
1110{
1111 u32 pstate = 0;
1112 int res, i;
1113 struct cpufreq_freqs freqs;
1114
1115 pr_debug("cpu %d transition to index %u\n", smp_processor_id(), index);
1116
1117 /* get MSR index for hardware pstate transition */
1118 pstate = index & HW_PSTATE_MASK;
1119 if (pstate > data->max_hw_pstate)
1120 return -EINVAL;
1121
1122 freqs.old = find_khz_freq_from_pstate(data->powernow_table,
1123 data->currpstate);
1124 freqs.new = find_khz_freq_from_pstate(data->powernow_table, pstate);
1125
1126 for_each_cpu(i, data->available_cores) {
1127 freqs.cpu = i;
1128 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
1129 }
1130
1131 res = transition_pstate(data, pstate);
1132 freqs.new = find_khz_freq_from_pstate(data->powernow_table, pstate);
1133
1134 for_each_cpu(i, data->available_cores) {
1135 freqs.cpu = i;
1136 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
1137 }
1138 return res;
1139}
1140
1141struct powernowk8_target_arg { 980struct powernowk8_target_arg {
1142 struct cpufreq_policy *pol; 981 struct cpufreq_policy *pol;
1143 unsigned targfreq; 982 unsigned targfreq;
@@ -1173,18 +1012,15 @@ static long powernowk8_target_fn(void *arg)
1173 if (query_current_values_with_pending_wait(data)) 1012 if (query_current_values_with_pending_wait(data))
1174 return -EIO; 1013 return -EIO;
1175 1014
1176 if (cpu_family != CPU_HW_PSTATE) { 1015 pr_debug("targ: curr fid 0x%x, vid 0x%x\n",
1177 pr_debug("targ: curr fid 0x%x, vid 0x%x\n", 1016 data->currfid, data->currvid);
1178 data->currfid, data->currvid);
1179 1017
1180 if ((checkvid != data->currvid) || 1018 if ((checkvid != data->currvid) ||
1181 (checkfid != data->currfid)) { 1019 (checkfid != data->currfid)) {
1182 printk(KERN_INFO PFX 1020 pr_info(PFX
1183 "error - out of sync, fix 0x%x 0x%x, " 1021 "error - out of sync, fix 0x%x 0x%x, vid 0x%x 0x%x\n",
1184 "vid 0x%x 0x%x\n", 1022 checkfid, data->currfid,
1185 checkfid, data->currfid, 1023 checkvid, data->currvid);
1186 checkvid, data->currvid);
1187 }
1188 } 1024 }
1189 1025
1190 if (cpufreq_frequency_table_target(pol, data->powernow_table, 1026 if (cpufreq_frequency_table_target(pol, data->powernow_table,
@@ -1195,11 +1031,8 @@ static long powernowk8_target_fn(void *arg)
1195 1031
1196 powernow_k8_acpi_pst_values(data, newstate); 1032 powernow_k8_acpi_pst_values(data, newstate);
1197 1033
1198 if (cpu_family == CPU_HW_PSTATE) 1034 ret = transition_frequency_fidvid(data, newstate);
1199 ret = transition_frequency_pstate(data, 1035
1200 data->powernow_table[newstate].index);
1201 else
1202 ret = transition_frequency_fidvid(data, newstate);
1203 if (ret) { 1036 if (ret) {
1204 printk(KERN_ERR PFX "transition frequency failed\n"); 1037 printk(KERN_ERR PFX "transition frequency failed\n");
1205 mutex_unlock(&fidvid_mutex); 1038 mutex_unlock(&fidvid_mutex);
@@ -1207,11 +1040,7 @@ static long powernowk8_target_fn(void *arg)
1207 } 1040 }
1208 mutex_unlock(&fidvid_mutex); 1041 mutex_unlock(&fidvid_mutex);
1209 1042
1210 if (cpu_family == CPU_HW_PSTATE) 1043 pol->cur = find_khz_freq_from_fid(data->currfid);
1211 pol->cur = find_khz_freq_from_pstate(data->powernow_table,
1212 data->powernow_table[newstate].index);
1213 else
1214 pol->cur = find_khz_freq_from_fid(data->currfid);
1215 1044
1216 return 0; 1045 return 0;
1217} 1046}
@@ -1264,22 +1093,23 @@ static void __cpuinit powernowk8_cpu_init_on_cpu(void *_init_on_cpu)
1264 return; 1093 return;
1265 } 1094 }
1266 1095
1267 if (cpu_family == CPU_OPTERON) 1096 fidvid_msr_init();
1268 fidvid_msr_init();
1269 1097
1270 init_on_cpu->rc = 0; 1098 init_on_cpu->rc = 0;
1271} 1099}
1272 1100
1101static const char missing_pss_msg[] =
1102 KERN_ERR
1103 FW_BUG PFX "No compatible ACPI _PSS objects found.\n"
1104 FW_BUG PFX "First, make sure Cool'N'Quiet is enabled in the BIOS.\n"
1105 FW_BUG PFX "If that doesn't help, try upgrading your BIOS.\n";
1106
1273/* per CPU init entry point to the driver */ 1107/* per CPU init entry point to the driver */
1274static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) 1108static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
1275{ 1109{
1276 static const char ACPI_PSS_BIOS_BUG_MSG[] =
1277 KERN_ERR FW_BUG PFX "No compatible ACPI _PSS objects found.\n"
1278 FW_BUG PFX "Try again with latest BIOS.\n";
1279 struct powernow_k8_data *data; 1110 struct powernow_k8_data *data;
1280 struct init_on_cpu init_on_cpu; 1111 struct init_on_cpu init_on_cpu;
1281 int rc; 1112 int rc;
1282 struct cpuinfo_x86 *c = &cpu_data(pol->cpu);
1283 1113
1284 if (!cpu_online(pol->cpu)) 1114 if (!cpu_online(pol->cpu))
1285 return -ENODEV; 1115 return -ENODEV;
@@ -1295,7 +1125,6 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
1295 } 1125 }
1296 1126
1297 data->cpu = pol->cpu; 1127 data->cpu = pol->cpu;
1298 data->currpstate = HW_PSTATE_INVALID;
1299 1128
1300 if (powernow_k8_cpu_init_acpi(data)) { 1129 if (powernow_k8_cpu_init_acpi(data)) {
1301 /* 1130 /*
@@ -1303,7 +1132,7 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
1303 * an UP version, and is deprecated by AMD. 1132 * an UP version, and is deprecated by AMD.
1304 */ 1133 */
1305 if (num_online_cpus() != 1) { 1134 if (num_online_cpus() != 1) {
1306 printk_once(ACPI_PSS_BIOS_BUG_MSG); 1135 printk_once(missing_pss_msg);
1307 goto err_out; 1136 goto err_out;
1308 } 1137 }
1309 if (pol->cpu != 0) { 1138 if (pol->cpu != 0) {
@@ -1332,17 +1161,10 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
1332 if (rc != 0) 1161 if (rc != 0)
1333 goto err_out_exit_acpi; 1162 goto err_out_exit_acpi;
1334 1163
1335 if (cpu_family == CPU_HW_PSTATE) 1164 cpumask_copy(pol->cpus, cpu_core_mask(pol->cpu));
1336 cpumask_copy(pol->cpus, cpumask_of(pol->cpu));
1337 else
1338 cpumask_copy(pol->cpus, cpu_core_mask(pol->cpu));
1339 data->available_cores = pol->cpus; 1165 data->available_cores = pol->cpus;
1340 1166
1341 if (cpu_family == CPU_HW_PSTATE) 1167 pol->cur = find_khz_freq_from_fid(data->currfid);
1342 pol->cur = find_khz_freq_from_pstate(data->powernow_table,
1343 data->currpstate);
1344 else
1345 pol->cur = find_khz_freq_from_fid(data->currfid);
1346 pr_debug("policy current frequency %d kHz\n", pol->cur); 1168 pr_debug("policy current frequency %d kHz\n", pol->cur);
1347 1169
1348 /* min/max the cpu is capable of */ 1170 /* min/max the cpu is capable of */
@@ -1354,18 +1176,10 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
1354 return -EINVAL; 1176 return -EINVAL;
1355 } 1177 }
1356 1178
1357 /* Check for APERF/MPERF support in hardware */
1358 if (cpu_has(c, X86_FEATURE_APERFMPERF))
1359 cpufreq_amd64_driver.getavg = cpufreq_get_measured_perf;
1360
1361 cpufreq_frequency_table_get_attr(data->powernow_table, pol->cpu); 1179 cpufreq_frequency_table_get_attr(data->powernow_table, pol->cpu);
1362 1180
1363 if (cpu_family == CPU_HW_PSTATE) 1181 pr_debug("cpu_init done, current fid 0x%x, vid 0x%x\n",
1364 pr_debug("cpu_init done, current pstate 0x%x\n", 1182 data->currfid, data->currvid);
1365 data->currpstate);
1366 else
1367 pr_debug("cpu_init done, current fid 0x%x, vid 0x%x\n",
1368 data->currfid, data->currvid);
1369 1183
1370 per_cpu(powernow_data, pol->cpu) = data; 1184 per_cpu(powernow_data, pol->cpu) = data;
1371 1185
@@ -1418,88 +1232,15 @@ static unsigned int powernowk8_get(unsigned int cpu)
1418 if (err) 1232 if (err)
1419 goto out; 1233 goto out;
1420 1234
1421 if (cpu_family == CPU_HW_PSTATE) 1235 khz = find_khz_freq_from_fid(data->currfid);
1422 khz = find_khz_freq_from_pstate(data->powernow_table,
1423 data->currpstate);
1424 else
1425 khz = find_khz_freq_from_fid(data->currfid);
1426 1236
1427 1237
1428out: 1238out:
1429 return khz; 1239 return khz;
1430} 1240}
1431 1241
1432static void _cpb_toggle_msrs(bool t)
1433{
1434 int cpu;
1435
1436 get_online_cpus();
1437
1438 rdmsr_on_cpus(cpu_online_mask, MSR_K7_HWCR, msrs);
1439
1440 for_each_cpu(cpu, cpu_online_mask) {
1441 struct msr *reg = per_cpu_ptr(msrs, cpu);
1442 if (t)
1443 reg->l &= ~BIT(25);
1444 else
1445 reg->l |= BIT(25);
1446 }
1447 wrmsr_on_cpus(cpu_online_mask, MSR_K7_HWCR, msrs);
1448
1449 put_online_cpus();
1450}
1451
1452/*
1453 * Switch on/off core performance boosting.
1454 *
1455 * 0=disable
1456 * 1=enable.
1457 */
1458static void cpb_toggle(bool t)
1459{
1460 if (!cpb_capable)
1461 return;
1462
1463 if (t && !cpb_enabled) {
1464 cpb_enabled = true;
1465 _cpb_toggle_msrs(t);
1466 printk(KERN_INFO PFX "Core Boosting enabled.\n");
1467 } else if (!t && cpb_enabled) {
1468 cpb_enabled = false;
1469 _cpb_toggle_msrs(t);
1470 printk(KERN_INFO PFX "Core Boosting disabled.\n");
1471 }
1472}
1473
1474static ssize_t store_cpb(struct cpufreq_policy *policy, const char *buf,
1475 size_t count)
1476{
1477 int ret = -EINVAL;
1478 unsigned long val = 0;
1479
1480 ret = strict_strtoul(buf, 10, &val);
1481 if (!ret && (val == 0 || val == 1) && cpb_capable)
1482 cpb_toggle(val);
1483 else
1484 return -EINVAL;
1485
1486 return count;
1487}
1488
1489static ssize_t show_cpb(struct cpufreq_policy *policy, char *buf)
1490{
1491 return sprintf(buf, "%u\n", cpb_enabled);
1492}
1493
1494#define define_one_rw(_name) \
1495static struct freq_attr _name = \
1496__ATTR(_name, 0644, show_##_name, store_##_name)
1497
1498define_one_rw(cpb);
1499
1500static struct freq_attr *powernow_k8_attr[] = { 1242static struct freq_attr *powernow_k8_attr[] = {
1501 &cpufreq_freq_attr_scaling_available_freqs, 1243 &cpufreq_freq_attr_scaling_available_freqs,
1502 &cpb,
1503 NULL, 1244 NULL,
1504}; 1245};
1505 1246
@@ -1515,53 +1256,18 @@ static struct cpufreq_driver cpufreq_amd64_driver = {
1515 .attr = powernow_k8_attr, 1256 .attr = powernow_k8_attr,
1516}; 1257};
1517 1258
1518/*
1519 * Clear the boost-disable flag on the CPU_DOWN path so that this cpu
1520 * cannot block the remaining ones from boosting. On the CPU_UP path we
1521 * simply keep the boost-disable flag in sync with the current global
1522 * state.
1523 */
1524static int cpb_notify(struct notifier_block *nb, unsigned long action,
1525 void *hcpu)
1526{
1527 unsigned cpu = (long)hcpu;
1528 u32 lo, hi;
1529
1530 switch (action) {
1531 case CPU_UP_PREPARE:
1532 case CPU_UP_PREPARE_FROZEN:
1533
1534 if (!cpb_enabled) {
1535 rdmsr_on_cpu(cpu, MSR_K7_HWCR, &lo, &hi);
1536 lo |= BIT(25);
1537 wrmsr_on_cpu(cpu, MSR_K7_HWCR, lo, hi);
1538 }
1539 break;
1540
1541 case CPU_DOWN_PREPARE:
1542 case CPU_DOWN_PREPARE_FROZEN:
1543 rdmsr_on_cpu(cpu, MSR_K7_HWCR, &lo, &hi);
1544 lo &= ~BIT(25);
1545 wrmsr_on_cpu(cpu, MSR_K7_HWCR, lo, hi);
1546 break;
1547
1548 default:
1549 break;
1550 }
1551
1552 return NOTIFY_OK;
1553}
1554
1555static struct notifier_block cpb_nb = {
1556 .notifier_call = cpb_notify,
1557};
1558
1559/* driver entry point for init */ 1259/* driver entry point for init */
1560static int __cpuinit powernowk8_init(void) 1260static int __cpuinit powernowk8_init(void)
1561{ 1261{
1562 unsigned int i, supported_cpus = 0, cpu; 1262 unsigned int i, supported_cpus = 0;
1563 int rv; 1263 int rv;
1564 1264
1265 if (static_cpu_has(X86_FEATURE_HW_PSTATE)) {
1266 pr_warn(PFX "this CPU is not supported anymore, using acpi-cpufreq instead.\n");
1267 request_module("acpi-cpufreq");
1268 return -ENODEV;
1269 }
1270
1565 if (!x86_match_cpu(powernow_k8_ids)) 1271 if (!x86_match_cpu(powernow_k8_ids))
1566 return -ENODEV; 1272 return -ENODEV;
1567 1273
@@ -1575,38 +1281,13 @@ static int __cpuinit powernowk8_init(void)
1575 if (supported_cpus != num_online_cpus()) 1281 if (supported_cpus != num_online_cpus())
1576 return -ENODEV; 1282 return -ENODEV;
1577 1283
1578 printk(KERN_INFO PFX "Found %d %s (%d cpu cores) (" VERSION ")\n", 1284 rv = cpufreq_register_driver(&cpufreq_amd64_driver);
1579 num_online_nodes(), boot_cpu_data.x86_model_id, supported_cpus);
1580
1581 if (boot_cpu_has(X86_FEATURE_CPB)) {
1582
1583 cpb_capable = true;
1584
1585 msrs = msrs_alloc();
1586 if (!msrs) {
1587 printk(KERN_ERR "%s: Error allocating msrs!\n", __func__);
1588 return -ENOMEM;
1589 }
1590
1591 register_cpu_notifier(&cpb_nb);
1592
1593 rdmsr_on_cpus(cpu_online_mask, MSR_K7_HWCR, msrs);
1594 1285
1595 for_each_cpu(cpu, cpu_online_mask) { 1286 if (!rv)
1596 struct msr *reg = per_cpu_ptr(msrs, cpu); 1287 pr_info(PFX "Found %d %s (%d cpu cores) (" VERSION ")\n",
1597 cpb_enabled |= !(!!(reg->l & BIT(25))); 1288 num_online_nodes(), boot_cpu_data.x86_model_id,
1598 } 1289 supported_cpus);
1599 1290
1600 printk(KERN_INFO PFX "Core Performance Boosting: %s.\n",
1601 (cpb_enabled ? "on" : "off"));
1602 }
1603
1604 rv = cpufreq_register_driver(&cpufreq_amd64_driver);
1605 if (rv < 0 && boot_cpu_has(X86_FEATURE_CPB)) {
1606 unregister_cpu_notifier(&cpb_nb);
1607 msrs_free(msrs);
1608 msrs = NULL;
1609 }
1610 return rv; 1291 return rv;
1611} 1292}
1612 1293
@@ -1615,13 +1296,6 @@ static void __exit powernowk8_exit(void)
1615{ 1296{
1616 pr_debug("exit\n"); 1297 pr_debug("exit\n");
1617 1298
1618 if (boot_cpu_has(X86_FEATURE_CPB)) {
1619 msrs_free(msrs);
1620 msrs = NULL;
1621
1622 unregister_cpu_notifier(&cpb_nb);
1623 }
1624
1625 cpufreq_unregister_driver(&cpufreq_amd64_driver); 1299 cpufreq_unregister_driver(&cpufreq_amd64_driver);
1626} 1300}
1627 1301
diff --git a/drivers/cpufreq/powernow-k8.h b/drivers/cpufreq/powernow-k8.h
index 3744d26cdc2b..79329d4d5abe 100644
--- a/drivers/cpufreq/powernow-k8.h
+++ b/drivers/cpufreq/powernow-k8.h
@@ -5,24 +5,11 @@
5 * http://www.gnu.org/licenses/gpl.html 5 * http://www.gnu.org/licenses/gpl.html
6 */ 6 */
7 7
8enum pstate {
9 HW_PSTATE_INVALID = 0xff,
10 HW_PSTATE_0 = 0,
11 HW_PSTATE_1 = 1,
12 HW_PSTATE_2 = 2,
13 HW_PSTATE_3 = 3,
14 HW_PSTATE_4 = 4,
15 HW_PSTATE_5 = 5,
16 HW_PSTATE_6 = 6,
17 HW_PSTATE_7 = 7,
18};
19
20struct powernow_k8_data { 8struct powernow_k8_data {
21 unsigned int cpu; 9 unsigned int cpu;
22 10
23 u32 numps; /* number of p-states */ 11 u32 numps; /* number of p-states */
24 u32 batps; /* number of p-states supported on battery */ 12 u32 batps; /* number of p-states supported on battery */
25 u32 max_hw_pstate; /* maximum legal hardware pstate */
26 13
27 /* these values are constant when the PSB is used to determine 14 /* these values are constant when the PSB is used to determine
28 * vid/fid pairings, but are modified during the ->target() call 15 * vid/fid pairings, but are modified during the ->target() call
@@ -37,7 +24,6 @@ struct powernow_k8_data {
37 /* keep track of the current fid / vid or pstate */ 24 /* keep track of the current fid / vid or pstate */
38 u32 currvid; 25 u32 currvid;
39 u32 currfid; 26 u32 currfid;
40 enum pstate currpstate;
41 27
42 /* the powernow_table includes all frequency and vid/fid pairings: 28 /* the powernow_table includes all frequency and vid/fid pairings:
43 * fid are the lower 8 bits of the index, vid are the upper 8 bits. 29 * fid are the lower 8 bits of the index, vid are the upper 8 bits.
@@ -97,23 +83,6 @@ struct powernow_k8_data {
97#define MSR_S_HI_CURRENT_VID 0x0000003f 83#define MSR_S_HI_CURRENT_VID 0x0000003f
98#define MSR_C_HI_STP_GNT_BENIGN 0x00000001 84#define MSR_C_HI_STP_GNT_BENIGN 0x00000001
99 85
100
101/* Hardware Pstate _PSS and MSR definitions */
102#define USE_HW_PSTATE 0x00000080
103#define HW_PSTATE_MASK 0x00000007
104#define HW_PSTATE_VALID_MASK 0x80000000
105#define HW_PSTATE_MAX_MASK 0x000000f0
106#define HW_PSTATE_MAX_SHIFT 4
107#define MSR_PSTATE_DEF_BASE 0xc0010064 /* base of Pstate MSRs */
108#define MSR_PSTATE_STATUS 0xc0010063 /* Pstate Status MSR */
109#define MSR_PSTATE_CTRL 0xc0010062 /* Pstate control MSR */
110#define MSR_PSTATE_CUR_LIMIT 0xc0010061 /* pstate current limit MSR */
111
112/* define the two driver architectures */
113#define CPU_OPTERON 0
114#define CPU_HW_PSTATE 1
115
116
117/* 86/*
118 * There are restrictions frequencies have to follow: 87 * There are restrictions frequencies have to follow:
119 * - only 1 entry in the low fid table ( <=1.4GHz ) 88 * - only 1 entry in the low fid table ( <=1.4GHz )
@@ -218,5 +187,4 @@ static int core_frequency_transition(struct powernow_k8_data *data, u32 reqfid);
218 187
219static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, unsigned int index); 188static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, unsigned int index);
220 189
221static int fill_powernow_table_pstate(struct powernow_k8_data *data, struct cpufreq_frequency_table *powernow_table);
222static int fill_powernow_table_fidvid(struct powernow_k8_data *data, struct cpufreq_frequency_table *powernow_table); 190static int fill_powernow_table_fidvid(struct powernow_k8_data *data, struct cpufreq_frequency_table *powernow_table);
diff --git a/drivers/cpuidle/driver.c b/drivers/cpuidle/driver.c
index 58bf3b1ac9c4..87db3877fead 100644
--- a/drivers/cpuidle/driver.c
+++ b/drivers/cpuidle/driver.c
@@ -18,9 +18,10 @@ static struct cpuidle_driver *cpuidle_curr_driver;
18DEFINE_SPINLOCK(cpuidle_driver_lock); 18DEFINE_SPINLOCK(cpuidle_driver_lock);
19int cpuidle_driver_refcount; 19int cpuidle_driver_refcount;
20 20
21static void __cpuidle_register_driver(struct cpuidle_driver *drv) 21static void set_power_states(struct cpuidle_driver *drv)
22{ 22{
23 int i; 23 int i;
24
24 /* 25 /*
25 * cpuidle driver should set the drv->power_specified bit 26 * cpuidle driver should set the drv->power_specified bit
26 * before registering if the driver provides 27 * before registering if the driver provides
@@ -35,13 +36,10 @@ static void __cpuidle_register_driver(struct cpuidle_driver *drv)
35 * an power value of -1. So we use -2, -3, etc, for other 36 * an power value of -1. So we use -2, -3, etc, for other
36 * c-states. 37 * c-states.
37 */ 38 */
38 if (!drv->power_specified) { 39 for (i = CPUIDLE_DRIVER_STATE_START; i < drv->state_count; i++)
39 for (i = CPUIDLE_DRIVER_STATE_START; i < drv->state_count; i++) 40 drv->states[i].power_usage = -1 - i;
40 drv->states[i].power_usage = -1 - i;
41 }
42} 41}
43 42
44
45/** 43/**
46 * cpuidle_register_driver - registers a driver 44 * cpuidle_register_driver - registers a driver
47 * @drv: the driver 45 * @drv: the driver
@@ -59,13 +57,16 @@ int cpuidle_register_driver(struct cpuidle_driver *drv)
59 spin_unlock(&cpuidle_driver_lock); 57 spin_unlock(&cpuidle_driver_lock);
60 return -EBUSY; 58 return -EBUSY;
61 } 59 }
62 __cpuidle_register_driver(drv); 60
61 if (!drv->power_specified)
62 set_power_states(drv);
63
63 cpuidle_curr_driver = drv; 64 cpuidle_curr_driver = drv;
65
64 spin_unlock(&cpuidle_driver_lock); 66 spin_unlock(&cpuidle_driver_lock);
65 67
66 return 0; 68 return 0;
67} 69}
68
69EXPORT_SYMBOL_GPL(cpuidle_register_driver); 70EXPORT_SYMBOL_GPL(cpuidle_register_driver);
70 71
71/** 72/**
@@ -96,7 +97,6 @@ void cpuidle_unregister_driver(struct cpuidle_driver *drv)
96 97
97 spin_unlock(&cpuidle_driver_lock); 98 spin_unlock(&cpuidle_driver_lock);
98} 99}
99
100EXPORT_SYMBOL_GPL(cpuidle_unregister_driver); 100EXPORT_SYMBOL_GPL(cpuidle_unregister_driver);
101 101
102struct cpuidle_driver *cpuidle_driver_ref(void) 102struct cpuidle_driver *cpuidle_driver_ref(void)
diff --git a/drivers/cpuidle/governors/ladder.c b/drivers/cpuidle/governors/ladder.c
index b6a09ea859b1..9b784051ec12 100644
--- a/drivers/cpuidle/governors/ladder.c
+++ b/drivers/cpuidle/governors/ladder.c
@@ -88,6 +88,8 @@ static int ladder_select_state(struct cpuidle_driver *drv,
88 88
89 /* consider promotion */ 89 /* consider promotion */
90 if (last_idx < drv->state_count - 1 && 90 if (last_idx < drv->state_count - 1 &&
91 !drv->states[last_idx + 1].disabled &&
92 !dev->states_usage[last_idx + 1].disable &&
91 last_residency > last_state->threshold.promotion_time && 93 last_residency > last_state->threshold.promotion_time &&
92 drv->states[last_idx + 1].exit_latency <= latency_req) { 94 drv->states[last_idx + 1].exit_latency <= latency_req) {
93 last_state->stats.promotion_count++; 95 last_state->stats.promotion_count++;
@@ -100,7 +102,9 @@ static int ladder_select_state(struct cpuidle_driver *drv,
100 102
101 /* consider demotion */ 103 /* consider demotion */
102 if (last_idx > CPUIDLE_DRIVER_STATE_START && 104 if (last_idx > CPUIDLE_DRIVER_STATE_START &&
103 drv->states[last_idx].exit_latency > latency_req) { 105 (drv->states[last_idx].disabled ||
106 dev->states_usage[last_idx].disable ||
107 drv->states[last_idx].exit_latency > latency_req)) {
104 int i; 108 int i;
105 109
106 for (i = last_idx - 1; i > CPUIDLE_DRIVER_STATE_START; i--) { 110 for (i = last_idx - 1; i > CPUIDLE_DRIVER_STATE_START; i--) {
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index 9e1d2959e226..94c6e2aa03d6 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -607,21 +607,6 @@ static int pci_pm_prepare(struct device *dev)
607 int error = 0; 607 int error = 0;
608 608
609 /* 609 /*
610 * If a PCI device configured to wake up the system from sleep states
611 * has been suspended at run time and there's a resume request pending
612 * for it, this is equivalent to the device signaling wakeup, so the
613 * system suspend operation should be aborted.
614 */
615 pm_runtime_get_noresume(dev);
616 if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
617 pm_wakeup_event(dev, 0);
618
619 if (pm_wakeup_pending()) {
620 pm_runtime_put_sync(dev);
621 return -EBUSY;
622 }
623
624 /*
625 * PCI devices suspended at run time need to be resumed at this 610 * PCI devices suspended at run time need to be resumed at this
626 * point, because in general it is necessary to reconfigure them for 611 * point, because in general it is necessary to reconfigure them for
627 * system suspend. Namely, if the device is supposed to wake up the 612 * system suspend. Namely, if the device is supposed to wake up the
@@ -644,8 +629,6 @@ static void pci_pm_complete(struct device *dev)
644 629
645 if (drv && drv->pm && drv->pm->complete) 630 if (drv && drv->pm && drv->pm->complete)
646 drv->pm->complete(dev); 631 drv->pm->complete(dev);
647
648 pm_runtime_put_sync(dev);
649} 632}
650 633
651#else /* !CONFIG_PM_SLEEP */ 634#else /* !CONFIG_PM_SLEEP */
diff --git a/drivers/xen/xen-acpi-processor.c b/drivers/xen/xen-acpi-processor.c
index b590ee067fcd..316df65163cf 100644
--- a/drivers/xen/xen-acpi-processor.c
+++ b/drivers/xen/xen-acpi-processor.c
@@ -98,7 +98,6 @@ static int push_cxx_to_hypervisor(struct acpi_processor *_pr)
98 98
99 dst_cx->type = cx->type; 99 dst_cx->type = cx->type;
100 dst_cx->latency = cx->latency; 100 dst_cx->latency = cx->latency;
101 dst_cx->power = cx->power;
102 101
103 dst_cx->dpcnt = 0; 102 dst_cx->dpcnt = 0;
104 set_xen_guest_handle(dst_cx->dp, NULL); 103 set_xen_guest_handle(dst_cx->dp, NULL);
diff --git a/include/acpi/processor.h b/include/acpi/processor.h
index 64ec644808bc..555d0337ad95 100644
--- a/include/acpi/processor.h
+++ b/include/acpi/processor.h
@@ -3,7 +3,6 @@
3 3
4#include <linux/kernel.h> 4#include <linux/kernel.h>
5#include <linux/cpu.h> 5#include <linux/cpu.h>
6#include <linux/cpuidle.h>
7#include <linux/thermal.h> 6#include <linux/thermal.h>
8#include <asm/acpi.h> 7#include <asm/acpi.h>
9 8
@@ -59,13 +58,11 @@ struct acpi_processor_cx {
59 u8 entry_method; 58 u8 entry_method;
60 u8 index; 59 u8 index;
61 u32 latency; 60 u32 latency;
62 u32 power;
63 u8 bm_sts_skip; 61 u8 bm_sts_skip;
64 char desc[ACPI_CX_DESC_LEN]; 62 char desc[ACPI_CX_DESC_LEN];
65}; 63};
66 64
67struct acpi_processor_power { 65struct acpi_processor_power {
68 struct cpuidle_device dev;
69 struct acpi_processor_cx *state; 66 struct acpi_processor_cx *state;
70 unsigned long bm_check_timestamp; 67 unsigned long bm_check_timestamp;
71 u32 default_state; 68 u32 default_state;
@@ -325,12 +322,10 @@ extern void acpi_processor_reevaluate_tstate(struct acpi_processor *pr,
325extern const struct file_operations acpi_processor_throttling_fops; 322extern const struct file_operations acpi_processor_throttling_fops;
326extern void acpi_processor_throttling_init(void); 323extern void acpi_processor_throttling_init(void);
327/* in processor_idle.c */ 324/* in processor_idle.c */
328int acpi_processor_power_init(struct acpi_processor *pr, 325int acpi_processor_power_init(struct acpi_processor *pr);
329 struct acpi_device *device); 326int acpi_processor_power_exit(struct acpi_processor *pr);
330int acpi_processor_cst_has_changed(struct acpi_processor *pr); 327int acpi_processor_cst_has_changed(struct acpi_processor *pr);
331int acpi_processor_hotplug(struct acpi_processor *pr); 328int acpi_processor_hotplug(struct acpi_processor *pr);
332int acpi_processor_power_exit(struct acpi_processor *pr,
333 struct acpi_device *device);
334int acpi_processor_suspend(struct device *dev); 329int acpi_processor_suspend(struct device *dev);
335int acpi_processor_resume(struct device *dev); 330int acpi_processor_resume(struct device *dev);
336extern struct cpuidle_driver acpi_idle_driver; 331extern struct cpuidle_driver acpi_idle_driver;
diff --git a/include/linux/clockchips.h b/include/linux/clockchips.h
index acba894374a1..8a7096fcb01e 100644
--- a/include/linux/clockchips.h
+++ b/include/linux/clockchips.h
@@ -97,6 +97,8 @@ struct clock_event_device {
97 void (*broadcast)(const struct cpumask *mask); 97 void (*broadcast)(const struct cpumask *mask);
98 void (*set_mode)(enum clock_event_mode mode, 98 void (*set_mode)(enum clock_event_mode mode,
99 struct clock_event_device *); 99 struct clock_event_device *);
100 void (*suspend)(struct clock_event_device *);
101 void (*resume)(struct clock_event_device *);
100 unsigned long min_delta_ticks; 102 unsigned long min_delta_ticks;
101 unsigned long max_delta_ticks; 103 unsigned long max_delta_ticks;
102 104
@@ -156,6 +158,9 @@ clockevents_calc_mult_shift(struct clock_event_device *ce, u32 freq, u32 minsec)
156 freq, minsec); 158 freq, minsec);
157} 159}
158 160
161extern void clockevents_suspend(void);
162extern void clockevents_resume(void);
163
159#ifdef CONFIG_GENERIC_CLOCKEVENTS 164#ifdef CONFIG_GENERIC_CLOCKEVENTS
160extern void clockevents_notify(unsigned long reason, void *arg); 165extern void clockevents_notify(unsigned long reason, void *arg);
161#else 166#else
@@ -164,6 +169,9 @@ extern void clockevents_notify(unsigned long reason, void *arg);
164 169
165#else /* CONFIG_GENERIC_CLOCKEVENTS_BUILD */ 170#else /* CONFIG_GENERIC_CLOCKEVENTS_BUILD */
166 171
172static inline void clockevents_suspend(void) {}
173static inline void clockevents_resume(void) {}
174
167#define clockevents_notify(reason, arg) do { } while (0) 175#define clockevents_notify(reason, arg) do { } while (0)
168 176
169#endif 177#endif
diff --git a/include/linux/device.h b/include/linux/device.h
index af92883bb4a6..86ef6ab553b1 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -776,6 +776,13 @@ static inline void pm_suspend_ignore_children(struct device *dev, bool enable)
776 dev->power.ignore_children = enable; 776 dev->power.ignore_children = enable;
777} 777}
778 778
779static inline void dev_pm_syscore_device(struct device *dev, bool val)
780{
781#ifdef CONFIG_PM_SLEEP
782 dev->power.syscore = val;
783#endif
784}
785
779static inline void device_lock(struct device *dev) 786static inline void device_lock(struct device *dev)
780{ 787{
781 mutex_lock(&dev->mutex); 788 mutex_lock(&dev->mutex);
diff --git a/include/linux/opp.h b/include/linux/opp.h
index 2a4e5faee904..214e0ebcb84d 100644
--- a/include/linux/opp.h
+++ b/include/linux/opp.h
@@ -48,6 +48,14 @@ int opp_disable(struct device *dev, unsigned long freq);
48 48
49struct srcu_notifier_head *opp_get_notifier(struct device *dev); 49struct srcu_notifier_head *opp_get_notifier(struct device *dev);
50 50
51#ifdef CONFIG_OF
52int of_init_opp_table(struct device *dev);
53#else
54static inline int of_init_opp_table(struct device *dev)
55{
56 return -EINVAL;
57}
58#endif /* CONFIG_OF */
51#else 59#else
52static inline unsigned long opp_get_voltage(struct opp *opp) 60static inline unsigned long opp_get_voltage(struct opp *opp)
53{ 61{
diff --git a/include/linux/pm.h b/include/linux/pm.h
index 88f034a23f2c..007e687c4f69 100644
--- a/include/linux/pm.h
+++ b/include/linux/pm.h
@@ -510,12 +510,14 @@ struct dev_pm_info {
510 bool is_prepared:1; /* Owned by the PM core */ 510 bool is_prepared:1; /* Owned by the PM core */
511 bool is_suspended:1; /* Ditto */ 511 bool is_suspended:1; /* Ditto */
512 bool ignore_children:1; 512 bool ignore_children:1;
513 bool early_init:1; /* Owned by the PM core */
513 spinlock_t lock; 514 spinlock_t lock;
514#ifdef CONFIG_PM_SLEEP 515#ifdef CONFIG_PM_SLEEP
515 struct list_head entry; 516 struct list_head entry;
516 struct completion completion; 517 struct completion completion;
517 struct wakeup_source *wakeup; 518 struct wakeup_source *wakeup;
518 bool wakeup_path:1; 519 bool wakeup_path:1;
520 bool syscore:1;
519#else 521#else
520 unsigned int should_wakeup:1; 522 unsigned int should_wakeup:1;
521#endif 523#endif
diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
index a7d6172922d4..7c1d252b20c0 100644
--- a/include/linux/pm_domain.h
+++ b/include/linux/pm_domain.h
@@ -114,7 +114,6 @@ struct generic_pm_domain_data {
114 struct mutex lock; 114 struct mutex lock;
115 unsigned int refcount; 115 unsigned int refcount;
116 bool need_restore; 116 bool need_restore;
117 bool always_on;
118}; 117};
119 118
120#ifdef CONFIG_PM_GENERIC_DOMAINS 119#ifdef CONFIG_PM_GENERIC_DOMAINS
@@ -139,36 +138,32 @@ extern int __pm_genpd_of_add_device(struct device_node *genpd_node,
139 struct device *dev, 138 struct device *dev,
140 struct gpd_timing_data *td); 139 struct gpd_timing_data *td);
141 140
142static inline int pm_genpd_add_device(struct generic_pm_domain *genpd, 141extern int __pm_genpd_name_add_device(const char *domain_name,
143 struct device *dev) 142 struct device *dev,
144{ 143 struct gpd_timing_data *td);
145 return __pm_genpd_add_device(genpd, dev, NULL);
146}
147
148static inline int pm_genpd_of_add_device(struct device_node *genpd_node,
149 struct device *dev)
150{
151 return __pm_genpd_of_add_device(genpd_node, dev, NULL);
152}
153 144
154extern int pm_genpd_remove_device(struct generic_pm_domain *genpd, 145extern int pm_genpd_remove_device(struct generic_pm_domain *genpd,
155 struct device *dev); 146 struct device *dev);
156extern void pm_genpd_dev_always_on(struct device *dev, bool val);
157extern void pm_genpd_dev_need_restore(struct device *dev, bool val); 147extern void pm_genpd_dev_need_restore(struct device *dev, bool val);
158extern int pm_genpd_add_subdomain(struct generic_pm_domain *genpd, 148extern int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
159 struct generic_pm_domain *new_subdomain); 149 struct generic_pm_domain *new_subdomain);
150extern int pm_genpd_add_subdomain_names(const char *master_name,
151 const char *subdomain_name);
160extern int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, 152extern int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
161 struct generic_pm_domain *target); 153 struct generic_pm_domain *target);
162extern int pm_genpd_add_callbacks(struct device *dev, 154extern int pm_genpd_add_callbacks(struct device *dev,
163 struct gpd_dev_ops *ops, 155 struct gpd_dev_ops *ops,
164 struct gpd_timing_data *td); 156 struct gpd_timing_data *td);
165extern int __pm_genpd_remove_callbacks(struct device *dev, bool clear_td); 157extern int __pm_genpd_remove_callbacks(struct device *dev, bool clear_td);
166extern int genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state); 158extern int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state);
167extern int genpd_detach_cpuidle(struct generic_pm_domain *genpd); 159extern int pm_genpd_name_attach_cpuidle(const char *name, int state);
160extern int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd);
161extern int pm_genpd_name_detach_cpuidle(const char *name);
168extern void pm_genpd_init(struct generic_pm_domain *genpd, 162extern void pm_genpd_init(struct generic_pm_domain *genpd,
169 struct dev_power_governor *gov, bool is_off); 163 struct dev_power_governor *gov, bool is_off);
170 164
171extern int pm_genpd_poweron(struct generic_pm_domain *genpd); 165extern int pm_genpd_poweron(struct generic_pm_domain *genpd);
166extern int pm_genpd_name_poweron(const char *domain_name);
172 167
173extern bool default_stop_ok(struct device *dev); 168extern bool default_stop_ok(struct device *dev);
174 169
@@ -189,8 +184,15 @@ static inline int __pm_genpd_add_device(struct generic_pm_domain *genpd,
189{ 184{
190 return -ENOSYS; 185 return -ENOSYS;
191} 186}
192static inline int pm_genpd_add_device(struct generic_pm_domain *genpd, 187static inline int __pm_genpd_of_add_device(struct device_node *genpd_node,
193 struct device *dev) 188 struct device *dev,
189 struct gpd_timing_data *td)
190{
191 return -ENOSYS;
192}
193static inline int __pm_genpd_name_add_device(const char *domain_name,
194 struct device *dev,
195 struct gpd_timing_data *td)
194{ 196{
195 return -ENOSYS; 197 return -ENOSYS;
196} 198}
@@ -199,13 +201,17 @@ static inline int pm_genpd_remove_device(struct generic_pm_domain *genpd,
199{ 201{
200 return -ENOSYS; 202 return -ENOSYS;
201} 203}
202static inline void pm_genpd_dev_always_on(struct device *dev, bool val) {}
203static inline void pm_genpd_dev_need_restore(struct device *dev, bool val) {} 204static inline void pm_genpd_dev_need_restore(struct device *dev, bool val) {}
204static inline int pm_genpd_add_subdomain(struct generic_pm_domain *genpd, 205static inline int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
205 struct generic_pm_domain *new_sd) 206 struct generic_pm_domain *new_sd)
206{ 207{
207 return -ENOSYS; 208 return -ENOSYS;
208} 209}
210static inline int pm_genpd_add_subdomain_names(const char *master_name,
211 const char *subdomain_name)
212{
213 return -ENOSYS;
214}
209static inline int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, 215static inline int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
210 struct generic_pm_domain *target) 216 struct generic_pm_domain *target)
211{ 217{
@@ -221,11 +227,19 @@ static inline int __pm_genpd_remove_callbacks(struct device *dev, bool clear_td)
221{ 227{
222 return -ENOSYS; 228 return -ENOSYS;
223} 229}
224static inline int genpd_attach_cpuidle(struct generic_pm_domain *genpd, int st) 230static inline int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int st)
225{ 231{
226 return -ENOSYS; 232 return -ENOSYS;
227} 233}
228static inline int genpd_detach_cpuidle(struct generic_pm_domain *genpd) 234static inline int pm_genpd_name_attach_cpuidle(const char *name, int state)
235{
236 return -ENOSYS;
237}
238static inline int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd)
239{
240 return -ENOSYS;
241}
242static inline int pm_genpd_name_detach_cpuidle(const char *name)
229{ 243{
230 return -ENOSYS; 244 return -ENOSYS;
231} 245}
@@ -237,6 +251,10 @@ static inline int pm_genpd_poweron(struct generic_pm_domain *genpd)
237{ 251{
238 return -ENOSYS; 252 return -ENOSYS;
239} 253}
254static inline int pm_genpd_name_poweron(const char *domain_name)
255{
256 return -ENOSYS;
257}
240static inline bool default_stop_ok(struct device *dev) 258static inline bool default_stop_ok(struct device *dev)
241{ 259{
242 return false; 260 return false;
@@ -245,6 +263,24 @@ static inline bool default_stop_ok(struct device *dev)
245#define pm_domain_always_on_gov NULL 263#define pm_domain_always_on_gov NULL
246#endif 264#endif
247 265
266static inline int pm_genpd_add_device(struct generic_pm_domain *genpd,
267 struct device *dev)
268{
269 return __pm_genpd_add_device(genpd, dev, NULL);
270}
271
272static inline int pm_genpd_of_add_device(struct device_node *genpd_node,
273 struct device *dev)
274{
275 return __pm_genpd_of_add_device(genpd_node, dev, NULL);
276}
277
278static inline int pm_genpd_name_add_device(const char *domain_name,
279 struct device *dev)
280{
281 return __pm_genpd_name_add_device(domain_name, dev, NULL);
282}
283
248static inline int pm_genpd_remove_callbacks(struct device *dev) 284static inline int pm_genpd_remove_callbacks(struct device *dev)
249{ 285{
250 return __pm_genpd_remove_callbacks(dev, true); 286 return __pm_genpd_remove_callbacks(dev, true);
@@ -258,4 +294,20 @@ static inline void genpd_queue_power_off_work(struct generic_pm_domain *gpd) {}
258static inline void pm_genpd_poweroff_unused(void) {} 294static inline void pm_genpd_poweroff_unused(void) {}
259#endif 295#endif
260 296
297#ifdef CONFIG_PM_GENERIC_DOMAINS_SLEEP
298extern void pm_genpd_syscore_switch(struct device *dev, bool suspend);
299#else
300static inline void pm_genpd_syscore_switch(struct device *dev, bool suspend) {}
301#endif
302
303static inline void pm_genpd_syscore_poweroff(struct device *dev)
304{
305 pm_genpd_syscore_switch(dev, true);
306}
307
308static inline void pm_genpd_syscore_poweron(struct device *dev)
309{
310 pm_genpd_syscore_switch(dev, false);
311}
312
261#endif /* _LINUX_PM_DOMAIN_H */ 313#endif /* _LINUX_PM_DOMAIN_H */
diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
index a70518c9d82f..5dfdc9ea180b 100644
--- a/kernel/power/Kconfig
+++ b/kernel/power/Kconfig
@@ -263,6 +263,10 @@ config PM_GENERIC_DOMAINS
263 bool 263 bool
264 depends on PM 264 depends on PM
265 265
266config PM_GENERIC_DOMAINS_SLEEP
267 def_bool y
268 depends on PM_SLEEP && PM_GENERIC_DOMAINS
269
266config PM_GENERIC_DOMAINS_RUNTIME 270config PM_GENERIC_DOMAINS_RUNTIME
267 def_bool y 271 def_bool y
268 depends on PM_RUNTIME && PM_GENERIC_DOMAINS 272 depends on PM_RUNTIME && PM_GENERIC_DOMAINS
diff --git a/kernel/power/poweroff.c b/kernel/power/poweroff.c
index d52359374e85..68197a4e8fc9 100644
--- a/kernel/power/poweroff.c
+++ b/kernel/power/poweroff.c
@@ -37,7 +37,7 @@ static struct sysrq_key_op sysrq_poweroff_op = {
37 .enable_mask = SYSRQ_ENABLE_BOOT, 37 .enable_mask = SYSRQ_ENABLE_BOOT,
38}; 38};
39 39
40static int pm_sysrq_init(void) 40static int __init pm_sysrq_init(void)
41{ 41{
42 register_sysrq_key('o', &sysrq_poweroff_op); 42 register_sysrq_key('o', &sysrq_poweroff_op);
43 return 0; 43 return 0;
diff --git a/kernel/power/process.c b/kernel/power/process.c
index 19db29f67558..87da817f9e13 100644
--- a/kernel/power/process.c
+++ b/kernel/power/process.c
@@ -79,7 +79,7 @@ static int try_to_freeze_tasks(bool user_only)
79 79
80 /* 80 /*
81 * We need to retry, but first give the freezing tasks some 81 * We need to retry, but first give the freezing tasks some
82 * time to enter the regrigerator. 82 * time to enter the refrigerator.
83 */ 83 */
84 msleep(10); 84 msleep(10);
85 } 85 }
diff --git a/kernel/power/qos.c b/kernel/power/qos.c
index 6a031e684026..846bd42c7ed1 100644
--- a/kernel/power/qos.c
+++ b/kernel/power/qos.c
@@ -139,6 +139,7 @@ static inline int pm_qos_get_value(struct pm_qos_constraints *c)
139 default: 139 default:
140 /* runtime check for not using enum */ 140 /* runtime check for not using enum */
141 BUG(); 141 BUG();
142 return PM_QOS_DEFAULT_VALUE;
142 } 143 }
143} 144}
144 145
diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c
index 7e1ce012a851..30b6de0d977c 100644
--- a/kernel/time/clockevents.c
+++ b/kernel/time/clockevents.c
@@ -397,6 +397,30 @@ void clockevents_exchange_device(struct clock_event_device *old,
397 local_irq_restore(flags); 397 local_irq_restore(flags);
398} 398}
399 399
400/**
401 * clockevents_suspend - suspend clock devices
402 */
403void clockevents_suspend(void)
404{
405 struct clock_event_device *dev;
406
407 list_for_each_entry_reverse(dev, &clockevent_devices, list)
408 if (dev->suspend)
409 dev->suspend(dev);
410}
411
412/**
413 * clockevents_resume - resume clock devices
414 */
415void clockevents_resume(void)
416{
417 struct clock_event_device *dev;
418
419 list_for_each_entry(dev, &clockevent_devices, list)
420 if (dev->resume)
421 dev->resume(dev);
422}
423
400#ifdef CONFIG_GENERIC_CLOCKEVENTS 424#ifdef CONFIG_GENERIC_CLOCKEVENTS
401/** 425/**
402 * clockevents_notify - notification about relevant events 426 * clockevents_notify - notification about relevant events
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index d3b91e75cecd..5ce06a3fa91e 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -776,6 +776,7 @@ static void timekeeping_resume(void)
776 776
777 read_persistent_clock(&ts); 777 read_persistent_clock(&ts);
778 778
779 clockevents_resume();
779 clocksource_resume(); 780 clocksource_resume();
780 781
781 write_seqlock_irqsave(&tk->lock, flags); 782 write_seqlock_irqsave(&tk->lock, flags);
@@ -835,6 +836,7 @@ static int timekeeping_suspend(void)
835 836
836 clockevents_notify(CLOCK_EVT_NOTIFY_SUSPEND, NULL); 837 clockevents_notify(CLOCK_EVT_NOTIFY_SUSPEND, NULL);
837 clocksource_suspend(); 838 clocksource_suspend();
839 clockevents_suspend();
838 840
839 return 0; 841 return 0;
840} 842}