aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/devicetree/bindings/arm/exynos/power_domain.txt13
-rw-r--r--Documentation/devicetree/bindings/cpufreq/cpufreq-dt.txt (renamed from Documentation/devicetree/bindings/cpufreq/cpufreq-cpu0.txt)8
-rw-r--r--Documentation/devicetree/bindings/power/power_domain.txt49
-rw-r--r--Documentation/devicetree/bindings/power/rockchip-io-domain.txt83
-rw-r--r--Documentation/kernel-parameters.txt10
-rw-r--r--Documentation/power/suspend-and-interrupts.txt123
-rw-r--r--MAINTAINERS4
-rw-r--r--arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts23
-rw-r--r--arch/arm/mach-exynos/exynos.c1
-rw-r--r--arch/arm/mach-exynos/pm_domains.c78
-rw-r--r--arch/arm/mach-imx/imx27-dt.c2
-rw-r--r--arch/arm/mach-imx/mach-imx51.c2
-rw-r--r--arch/arm/mach-mvebu/pmsu.c2
-rw-r--r--arch/arm/mach-omap2/pm.c2
-rw-r--r--arch/arm/mach-s3c64xx/common.c5
-rw-r--r--arch/arm/mach-s3c64xx/common.h7
-rw-r--r--arch/arm/mach-s3c64xx/mach-anw6410.c1
-rw-r--r--arch/arm/mach-s3c64xx/mach-crag6410.c1
-rw-r--r--arch/arm/mach-s3c64xx/mach-hmt.c1
-rw-r--r--arch/arm/mach-s3c64xx/mach-mini6410.c1
-rw-r--r--arch/arm/mach-s3c64xx/mach-ncp.c1
-rw-r--r--arch/arm/mach-s3c64xx/mach-real6410.c1
-rw-r--r--arch/arm/mach-s3c64xx/mach-smartq5.c1
-rw-r--r--arch/arm/mach-s3c64xx/mach-smartq7.c1
-rw-r--r--arch/arm/mach-s3c64xx/mach-smdk6400.c1
-rw-r--r--arch/arm/mach-s3c64xx/mach-smdk6410.c1
-rw-r--r--arch/arm/mach-s3c64xx/pm.c7
-rw-r--r--arch/arm/mach-shmobile/cpufreq.c2
-rw-r--r--arch/arm/mach-shmobile/pm-r8a7779.c1
-rw-r--r--arch/arm/mach-shmobile/pm-rmobile.c1
-rw-r--r--arch/arm/mach-zynq/common.c2
-rw-r--r--arch/x86/kernel/apic/io_apic.c5
-rw-r--r--drivers/acpi/acpi_lpss.c167
-rw-r--r--drivers/acpi/acpi_pnp.c4
-rw-r--r--drivers/acpi/acpica/evxfgpe.c32
-rw-r--r--drivers/acpi/acpica/hwgpe.c8
-rw-r--r--drivers/acpi/acpica/utresrc.c4
-rw-r--r--drivers/acpi/battery.c2
-rw-r--r--drivers/acpi/blacklist.c36
-rw-r--r--drivers/acpi/device_pm.c71
-rw-r--r--drivers/acpi/fan.c18
-rw-r--r--drivers/acpi/osl.c12
-rw-r--r--drivers/acpi/pci_root.c14
-rw-r--r--drivers/acpi/processor_core.c6
-rw-r--r--drivers/acpi/sbs.c80
-rw-r--r--drivers/acpi/sleep.c16
-rw-r--r--drivers/acpi/utils.c1
-rw-r--r--drivers/acpi/video.c291
-rw-r--r--drivers/acpi/video_detect.c8
-rw-r--r--drivers/amba/bus.c11
-rw-r--r--drivers/base/platform.c16
-rw-r--r--drivers/base/power/clock_ops.c19
-rw-r--r--drivers/base/power/common.c52
-rw-r--r--drivers/base/power/domain.c865
-rw-r--r--drivers/base/power/domain_governor.c7
-rw-r--r--drivers/base/power/main.c8
-rw-r--r--drivers/base/power/sysfs.c24
-rw-r--r--drivers/base/power/wakeup.c16
-rw-r--r--drivers/base/syscore.c7
-rw-r--r--drivers/cpufreq/Kconfig8
-rw-r--r--drivers/cpufreq/Kconfig.arm2
-rw-r--r--drivers/cpufreq/Makefile2
-rw-r--r--drivers/cpufreq/cpufreq-cpu0.c248
-rw-r--r--drivers/cpufreq/cpufreq-dt.c364
-rw-r--r--drivers/cpufreq/cpufreq.c8
-rw-r--r--drivers/cpufreq/exynos4210-cpufreq.c2
-rw-r--r--drivers/cpufreq/exynos4x12-cpufreq.c2
-rw-r--r--drivers/cpufreq/exynos5250-cpufreq.c2
-rw-r--r--drivers/cpufreq/highbank-cpufreq.c6
-rw-r--r--drivers/cpufreq/powernv-cpufreq.c44
-rw-r--r--drivers/cpufreq/ppc-corenet-cpufreq.c1
-rw-r--r--drivers/cpufreq/s5pv210-cpufreq.c2
-rw-r--r--drivers/cpuidle/Kconfig8
-rw-r--r--drivers/cpuidle/Kconfig.arm1
-rw-r--r--drivers/cpuidle/Kconfig.arm6414
-rw-r--r--drivers/cpuidle/Makefile5
-rw-r--r--drivers/cpuidle/cpuidle-arm64.c133
-rw-r--r--drivers/cpuidle/cpuidle-big_little.c20
-rw-r--r--drivers/cpuidle/dt_idle_states.c213
-rw-r--r--drivers/cpuidle/dt_idle_states.h7
-rw-r--r--drivers/cpuidle/governor.c2
-rw-r--r--drivers/devfreq/Kconfig3
-rw-r--r--drivers/devfreq/devfreq.c3
-rw-r--r--drivers/devfreq/exynos/exynos_ppmu.c3
-rw-r--r--drivers/i2c/i2c-core.c14
-rw-r--r--drivers/mmc/core/sdio_bus.c5
-rw-r--r--drivers/pci/pcie/pme.c61
-rw-r--r--drivers/platform/x86/fujitsu-laptop.c16
-rw-r--r--drivers/power/avs/Kconfig8
-rw-r--r--drivers/power/avs/Makefile1
-rw-r--r--drivers/power/avs/rockchip-io-domain.c351
-rw-r--r--drivers/sh/pm_runtime.c11
-rw-r--r--drivers/spi/spi.c13
-rw-r--r--include/acpi/acnames.h4
-rw-r--r--include/acpi/acpixf.h3
-rw-r--r--include/acpi/actbl1.h19
-rw-r--r--include/acpi/actbl3.h9
-rw-r--r--include/linux/acpi.h2
-rw-r--r--include/linux/cpufreq.h3
-rw-r--r--include/linux/interrupt.h5
-rw-r--r--include/linux/irq.h8
-rw-r--r--include/linux/irqdesc.h10
-rw-r--r--include/linux/pm.h5
-rw-r--r--include/linux/pm_domain.h130
-rw-r--r--include/linux/suspend.h6
-rw-r--r--kernel/irq/chip.c85
-rw-r--r--kernel/irq/internals.h16
-rw-r--r--kernel/irq/manage.c32
-rw-r--r--kernel/irq/pm.c159
-rw-r--r--kernel/power/Kconfig4
-rw-r--r--kernel/power/process.c1
-rw-r--r--kernel/power/snapshot.c54
-rw-r--r--kernel/power/suspend.c51
-rw-r--r--kernel/power/suspend_test.c32
114 files changed, 2972 insertions, 1484 deletions
diff --git a/Documentation/devicetree/bindings/arm/exynos/power_domain.txt b/Documentation/devicetree/bindings/arm/exynos/power_domain.txt
index 8b4f7b7fe88b..abde1ea8a119 100644
--- a/Documentation/devicetree/bindings/arm/exynos/power_domain.txt
+++ b/Documentation/devicetree/bindings/arm/exynos/power_domain.txt
@@ -8,6 +8,8 @@ Required Properties:
8 * samsung,exynos4210-pd - for exynos4210 type power domain. 8 * samsung,exynos4210-pd - for exynos4210 type power domain.
9- reg: physical base address of the controller and length of memory mapped 9- reg: physical base address of the controller and length of memory mapped
10 region. 10 region.
11- #power-domain-cells: number of cells in power domain specifier;
12 must be 0.
11 13
12Optional Properties: 14Optional Properties:
13- clocks: List of clock handles. The parent clocks of the input clocks to the 15- clocks: List of clock handles. The parent clocks of the input clocks to the
@@ -29,6 +31,7 @@ Example:
29 lcd0: power-domain-lcd0 { 31 lcd0: power-domain-lcd0 {
30 compatible = "samsung,exynos4210-pd"; 32 compatible = "samsung,exynos4210-pd";
31 reg = <0x10023C00 0x10>; 33 reg = <0x10023C00 0x10>;
34 #power-domain-cells = <0>;
32 }; 35 };
33 36
34 mfc_pd: power-domain@10044060 { 37 mfc_pd: power-domain@10044060 {
@@ -37,12 +40,8 @@ Example:
37 clocks = <&clock CLK_FIN_PLL>, <&clock CLK_MOUT_SW_ACLK333>, 40 clocks = <&clock CLK_FIN_PLL>, <&clock CLK_MOUT_SW_ACLK333>,
38 <&clock CLK_MOUT_USER_ACLK333>; 41 <&clock CLK_MOUT_USER_ACLK333>;
39 clock-names = "oscclk", "pclk0", "clk0"; 42 clock-names = "oscclk", "pclk0", "clk0";
43 #power-domain-cells = <0>;
40 }; 44 };
41 45
42Example of the node using power domain: 46See Documentation/devicetree/bindings/power/power_domain.txt for description
43 47of consumer-side bindings.
44 node {
45 /* ... */
46 samsung,power-domain = <&lcd0>;
47 /* ... */
48 };
diff --git a/Documentation/devicetree/bindings/cpufreq/cpufreq-cpu0.txt b/Documentation/devicetree/bindings/cpufreq/cpufreq-dt.txt
index 366690cb86a3..e41c98ffbccb 100644
--- a/Documentation/devicetree/bindings/cpufreq/cpufreq-cpu0.txt
+++ b/Documentation/devicetree/bindings/cpufreq/cpufreq-dt.txt
@@ -1,8 +1,8 @@
1Generic CPU0 cpufreq driver 1Generic cpufreq driver
2 2
3It is a generic cpufreq driver for CPU0 frequency management. It 3It is a generic DT based cpufreq driver for frequency management. It supports
4supports both uniprocessor (UP) and symmetric multiprocessor (SMP) 4both uniprocessor (UP) and symmetric multiprocessor (SMP) systems which share
5systems which share clock and voltage across all CPUs. 5clock and voltage across all CPUs.
6 6
7Both required and optional properties listed below must be defined 7Both required and optional properties listed below must be defined
8under node /cpus/cpu@0. 8under node /cpus/cpu@0.
diff --git a/Documentation/devicetree/bindings/power/power_domain.txt b/Documentation/devicetree/bindings/power/power_domain.txt
new file mode 100644
index 000000000000..98c16672ab5f
--- /dev/null
+++ b/Documentation/devicetree/bindings/power/power_domain.txt
@@ -0,0 +1,49 @@
1* Generic PM domains
2
3System on chip designs are often divided into multiple PM domains that can be
4used for power gating of selected IP blocks for power saving by reduced leakage
5current.
6
7This device tree binding can be used to bind PM domain consumer devices with
8their PM domains provided by PM domain providers. A PM domain provider can be
9represented by any node in the device tree and can provide one or more PM
10domains. A consumer node can refer to the provider by a phandle and a set of
11phandle arguments (so called PM domain specifiers) of length specified by the
12#power-domain-cells property in the PM domain provider node.
13
14==PM domain providers==
15
16Required properties:
17 - #power-domain-cells : Number of cells in a PM domain specifier;
18 Typically 0 for nodes representing a single PM domain and 1 for nodes
19 providing multiple PM domains (e.g. power controllers), but can be any value
20 as specified by device tree binding documentation of particular provider.
21
22Example:
23
24 power: power-controller@12340000 {
25 compatible = "foo,power-controller";
26 reg = <0x12340000 0x1000>;
27 #power-domain-cells = <1>;
28 };
29
30The node above defines a power controller that is a PM domain provider and
31expects one cell as its phandle argument.
32
33==PM domain consumers==
34
35Required properties:
36 - power-domains : A phandle and PM domain specifier as defined by bindings of
37 the power controller specified by phandle.
38
39Example:
40
41 leaky-device@12350000 {
42 compatible = "foo,i-leak-current";
43 reg = <0x12350000 0x1000>;
44 power-domains = <&power 0>;
45 };
46
47The node above defines a typical PM domain consumer device, which is located
48inside a PM domain with index 0 of a power controller represented by a node
49with the label "power".
diff --git a/Documentation/devicetree/bindings/power/rockchip-io-domain.txt b/Documentation/devicetree/bindings/power/rockchip-io-domain.txt
new file mode 100644
index 000000000000..6fbf6e7ecde6
--- /dev/null
+++ b/Documentation/devicetree/bindings/power/rockchip-io-domain.txt
@@ -0,0 +1,83 @@
1Rockchip SRAM for IO Voltage Domains:
2-------------------------------------
3
4IO domain voltages on some Rockchip SoCs are variable but need to be
5kept in sync between the regulators and the SoC using a special
6register.
7
8A specific example using rk3288:
9- If the regulator hooked up to a pin like SDMMC0_VDD is 3.3V then
10 bit 7 of GRF_IO_VSEL needs to be 0. If the regulator hooked up to
11 that same pin is 1.8V then bit 7 of GRF_IO_VSEL needs to be 1.
12
13Said another way, this driver simply handles keeping bits in the SoC's
14general register file (GRF) in sync with the actual value of a voltage
15hooked up to the pins.
16
17Note that this driver specifically doesn't include:
18- any logic for deciding what voltage we should set regulators to
19- any logic for deciding whether regulators (or internal SoC blocks)
20 should have power or not have power
21
22If there were some other software that had the smarts of making
23decisions about regulators, it would work in conjunction with this
24driver. When that other software adjusted a regulator's voltage then
25this driver would handle telling the SoC about it. A good example is
26vqmmc for SD. In that case the dw_mmc driver simply is told about a
27regulator. It changes the regulator between 3.3V and 1.8V at the
28right time. This driver notices the change and makes sure that the
29SoC is on the same page.
30
31
32Required properties:
33- compatible: should be one of:
34 - "rockchip,rk3188-io-voltage-domain" for rk3188
35 - "rockchip,rk3288-io-voltage-domain" for rk3288
36- rockchip,grf: phandle to the syscon managing the "general register files"
37
38
39You specify supplies using the standard regulator bindings by including
40a phandle the the relevant regulator. All specified supplies must be able
41to report their voltage. The IO Voltage Domain for any non-specified
42supplies will be not be touched.
43
44Possible supplies for rk3188:
45- ap0-supply: The supply connected to AP0_VCC.
46- ap1-supply: The supply connected to AP1_VCC.
47- cif-supply: The supply connected to CIF_VCC.
48- flash-supply: The supply connected to FLASH_VCC.
49- lcdc0-supply: The supply connected to LCD0_VCC.
50- lcdc1-supply: The supply connected to LCD1_VCC.
51- vccio0-supply: The supply connected to VCCIO0.
52- vccio1-supply: The supply connected to VCCIO1.
53 Sometimes also labeled VCCIO1 and VCCIO2.
54
55Possible supplies for rk3288:
56- audio-supply: The supply connected to APIO4_VDD.
57- bb-supply: The supply connected to APIO5_VDD.
58- dvp-supply: The supply connected to DVPIO_VDD.
59- flash0-supply: The supply connected to FLASH0_VDD. Typically for eMMC
60- flash1-supply: The supply connected to FLASH1_VDD. Also known as SDIO1.
61- gpio30-supply: The supply connected to APIO1_VDD.
62- gpio1830 The supply connected to APIO2_VDD.
63- lcdc-supply: The supply connected to LCDC_VDD.
64- sdcard-supply: The supply connected to SDMMC0_VDD.
65- wifi-supply: The supply connected to APIO3_VDD. Also known as SDIO0.
66
67
68Example:
69
70 io-domains {
71 compatible = "rockchip,rk3288-io-voltage-domain";
72 rockchip,grf = <&grf>;
73
74 audio-supply = <&vcc18_codec>;
75 bb-supply = <&vcc33_io>;
76 dvp-supply = <&vcc_18>;
77 flash0-supply = <&vcc18_flashio>;
78 gpio1830-supply = <&vcc33_io>;
79 gpio30-supply = <&vcc33_pmuio>;
80 lcdc-supply = <&vcc33_lcd>;
81 sdcard-supply = <&vccio_sd>;
82 wifi-supply = <&vcc18_wl>;
83 };
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index d9a452e8fb9b..cc4ab2517abc 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -3321,11 +3321,13 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
3321 3321
3322 tdfx= [HW,DRM] 3322 tdfx= [HW,DRM]
3323 3323
3324 test_suspend= [SUSPEND] 3324 test_suspend= [SUSPEND][,N]
3325 Specify "mem" (for Suspend-to-RAM) or "standby" (for 3325 Specify "mem" (for Suspend-to-RAM) or "standby" (for
3326 standby suspend) as the system sleep state to briefly 3326 standby suspend) or "freeze" (for suspend type freeze)
3327 enter during system startup. The system is woken from 3327 as the system sleep state during system startup with
3328 this state using a wakeup-capable RTC alarm. 3328 the optional capability to repeat N number of times.
3329 The system is woken from this state using a
3330 wakeup-capable RTC alarm.
3329 3331
3330 thash_entries= [KNL,NET] 3332 thash_entries= [KNL,NET]
3331 Set number of hash buckets for TCP connection 3333 Set number of hash buckets for TCP connection
diff --git a/Documentation/power/suspend-and-interrupts.txt b/Documentation/power/suspend-and-interrupts.txt
new file mode 100644
index 000000000000..69663640dea5
--- /dev/null
+++ b/Documentation/power/suspend-and-interrupts.txt
@@ -0,0 +1,123 @@
1System Suspend and Device Interrupts
2
3Copyright (C) 2014 Intel Corp.
4Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
5
6
7Suspending and Resuming Device IRQs
8-----------------------------------
9
10Device interrupt request lines (IRQs) are generally disabled during system
11suspend after the "late" phase of suspending devices (that is, after all of the
12->prepare, ->suspend and ->suspend_late callbacks have been executed for all
13devices). That is done by suspend_device_irqs().
14
15The rationale for doing so is that after the "late" phase of device suspend
16there is no legitimate reason why any interrupts from suspended devices should
17trigger and if any devices have not been suspended properly yet, it is better to
18block interrupts from them anyway. Also, in the past we had problems with
19interrupt handlers for shared IRQs that device drivers implementing them were
20not prepared for interrupts triggering after their devices had been suspended.
21In some cases they would attempt to access, for example, memory address spaces
22of suspended devices and cause unpredictable behavior to ensue as a result.
23Unfortunately, such problems are very difficult to debug and the introduction
24of suspend_device_irqs(), along with the "noirq" phase of device suspend and
25resume, was the only practical way to mitigate them.
26
27Device IRQs are re-enabled during system resume, right before the "early" phase
28of resuming devices (that is, before starting to execute ->resume_early
29callbacks for devices). The function doing that is resume_device_irqs().
30
31
32The IRQF_NO_SUSPEND Flag
33------------------------
34
35There are interrupts that can legitimately trigger during the entire system
36suspend-resume cycle, including the "noirq" phases of suspending and resuming
37devices as well as during the time when nonboot CPUs are taken offline and
38brought back online. That applies to timer interrupts in the first place,
39but also to IPIs and to some other special-purpose interrupts.
40
41The IRQF_NO_SUSPEND flag is used to indicate that to the IRQ subsystem when
42requesting a special-purpose interrupt. It causes suspend_device_irqs() to
43leave the corresponding IRQ enabled so as to allow the interrupt to work all
44the time as expected.
45
46Note that the IRQF_NO_SUSPEND flag affects the entire IRQ and not just one
47user of it. Thus, if the IRQ is shared, all of the interrupt handlers installed
48for it will be executed as usual after suspend_device_irqs(), even if the
49IRQF_NO_SUSPEND flag was not passed to request_irq() (or equivalent) by some of
50the IRQ's users. For this reason, using IRQF_NO_SUSPEND and IRQF_SHARED at the
51same time should be avoided.
52
53
54System Wakeup Interrupts, enable_irq_wake() and disable_irq_wake()
55------------------------------------------------------------------
56
57System wakeup interrupts generally need to be configured to wake up the system
58from sleep states, especially if they are used for different purposes (e.g. as
59I/O interrupts) in the working state.
60
61That may involve turning on a special signal handling logic within the platform
62(such as an SoC) so that signals from a given line are routed in a different way
63during system sleep so as to trigger a system wakeup when needed. For example,
64the platform may include a dedicated interrupt controller used specifically for
65handling system wakeup events. Then, if a given interrupt line is supposed to
66wake up the system from sleep sates, the corresponding input of that interrupt
67controller needs to be enabled to receive signals from the line in question.
68After wakeup, it generally is better to disable that input to prevent the
69dedicated controller from triggering interrupts unnecessarily.
70
71The IRQ subsystem provides two helper functions to be used by device drivers for
72those purposes. Namely, enable_irq_wake() turns on the platform's logic for
73handling the given IRQ as a system wakeup interrupt line and disable_irq_wake()
74turns that logic off.
75
76Calling enable_irq_wake() causes suspend_device_irqs() to treat the given IRQ
77in a special way. Namely, the IRQ remains enabled, by on the first interrupt
78it will be disabled, marked as pending and "suspended" so that it will be
79re-enabled by resume_device_irqs() during the subsequent system resume. Also
80the PM core is notified about the event which casues the system suspend in
81progress to be aborted (that doesn't have to happen immediately, but at one
82of the points where the suspend thread looks for pending wakeup events).
83
84This way every interrupt from a wakeup interrupt source will either cause the
85system suspend currently in progress to be aborted or wake up the system if
86already suspended. However, after suspend_device_irqs() interrupt handlers are
87not executed for system wakeup IRQs. They are only executed for IRQF_NO_SUSPEND
88IRQs at that time, but those IRQs should not be configured for system wakeup
89using enable_irq_wake().
90
91
92Interrupts and Suspend-to-Idle
93------------------------------
94
95Suspend-to-idle (also known as the "freeze" sleep state) is a relatively new
96system sleep state that works by idling all of the processors and waiting for
97interrupts right after the "noirq" phase of suspending devices.
98
99Of course, this means that all of the interrupts with the IRQF_NO_SUSPEND flag
100set will bring CPUs out of idle while in that state, but they will not cause the
101IRQ subsystem to trigger a system wakeup.
102
103System wakeup interrupts, in turn, will trigger wakeup from suspend-to-idle in
104analogy with what they do in the full system suspend case. The only difference
105is that the wakeup from suspend-to-idle is signaled using the usual working
106state interrupt delivery mechanisms and doesn't require the platform to use
107any special interrupt handling logic for it to work.
108
109
110IRQF_NO_SUSPEND and enable_irq_wake()
111-------------------------------------
112
113There are no valid reasons to use both enable_irq_wake() and the IRQF_NO_SUSPEND
114flag on the same IRQ.
115
116First of all, if the IRQ is not shared, the rules for handling IRQF_NO_SUSPEND
117interrupts (interrupt handlers are invoked after suspend_device_irqs()) are
118directly at odds with the rules for handling system wakeup interrupts (interrupt
119handlers are not invoked after suspend_device_irqs()).
120
121Second, both enable_irq_wake() and IRQF_NO_SUSPEND apply to entire IRQs and not
122to individual interrupt handlers, so sharing an IRQ between a system wakeup
123interrupt source and an IRQF_NO_SUSPEND interrupt source does not make sense.
diff --git a/MAINTAINERS b/MAINTAINERS
index 75b98b4958c8..40d4796886c9 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -8490,11 +8490,11 @@ S: Maintained
8490F: Documentation/security/Smack.txt 8490F: Documentation/security/Smack.txt
8491F: security/smack/ 8491F: security/smack/
8492 8492
8493SMARTREFLEX DRIVERS FOR ADAPTIVE VOLTAGE SCALING (AVS) 8493DRIVERS FOR ADAPTIVE VOLTAGE SCALING (AVS)
8494M: Kevin Hilman <khilman@kernel.org> 8494M: Kevin Hilman <khilman@kernel.org>
8495M: Nishanth Menon <nm@ti.com> 8495M: Nishanth Menon <nm@ti.com>
8496S: Maintained 8496S: Maintained
8497F: drivers/power/avs/smartreflex.c 8497F: drivers/power/avs/
8498F: include/linux/power/smartreflex.h 8498F: include/linux/power/smartreflex.h
8499L: linux-pm@vger.kernel.org 8499L: linux-pm@vger.kernel.org
8500 8500
diff --git a/arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts b/arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts
index a25c262326dc..322fd1519b09 100644
--- a/arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts
+++ b/arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts
@@ -38,6 +38,7 @@
38 compatible = "arm,cortex-a15"; 38 compatible = "arm,cortex-a15";
39 reg = <0>; 39 reg = <0>;
40 cci-control-port = <&cci_control1>; 40 cci-control-port = <&cci_control1>;
41 cpu-idle-states = <&CLUSTER_SLEEP_BIG>;
41 }; 42 };
42 43
43 cpu1: cpu@1 { 44 cpu1: cpu@1 {
@@ -45,6 +46,7 @@
45 compatible = "arm,cortex-a15"; 46 compatible = "arm,cortex-a15";
46 reg = <1>; 47 reg = <1>;
47 cci-control-port = <&cci_control1>; 48 cci-control-port = <&cci_control1>;
49 cpu-idle-states = <&CLUSTER_SLEEP_BIG>;
48 }; 50 };
49 51
50 cpu2: cpu@2 { 52 cpu2: cpu@2 {
@@ -52,6 +54,7 @@
52 compatible = "arm,cortex-a7"; 54 compatible = "arm,cortex-a7";
53 reg = <0x100>; 55 reg = <0x100>;
54 cci-control-port = <&cci_control2>; 56 cci-control-port = <&cci_control2>;
57 cpu-idle-states = <&CLUSTER_SLEEP_LITTLE>;
55 }; 58 };
56 59
57 cpu3: cpu@3 { 60 cpu3: cpu@3 {
@@ -59,6 +62,7 @@
59 compatible = "arm,cortex-a7"; 62 compatible = "arm,cortex-a7";
60 reg = <0x101>; 63 reg = <0x101>;
61 cci-control-port = <&cci_control2>; 64 cci-control-port = <&cci_control2>;
65 cpu-idle-states = <&CLUSTER_SLEEP_LITTLE>;
62 }; 66 };
63 67
64 cpu4: cpu@4 { 68 cpu4: cpu@4 {
@@ -66,6 +70,25 @@
66 compatible = "arm,cortex-a7"; 70 compatible = "arm,cortex-a7";
67 reg = <0x102>; 71 reg = <0x102>;
68 cci-control-port = <&cci_control2>; 72 cci-control-port = <&cci_control2>;
73 cpu-idle-states = <&CLUSTER_SLEEP_LITTLE>;
74 };
75
76 idle-states {
77 CLUSTER_SLEEP_BIG: cluster-sleep-big {
78 compatible = "arm,idle-state";
79 local-timer-stop;
80 entry-latency-us = <1000>;
81 exit-latency-us = <700>;
82 min-residency-us = <2000>;
83 };
84
85 CLUSTER_SLEEP_LITTLE: cluster-sleep-little {
86 compatible = "arm,idle-state";
87 local-timer-stop;
88 entry-latency-us = <1000>;
89 exit-latency-us = <500>;
90 min-residency-us = <2500>;
91 };
69 }; 92 };
70 }; 93 };
71 94
diff --git a/arch/arm/mach-exynos/exynos.c b/arch/arm/mach-exynos/exynos.c
index 6a24e111d6e1..b89e5f35db84 100644
--- a/arch/arm/mach-exynos/exynos.c
+++ b/arch/arm/mach-exynos/exynos.c
@@ -193,7 +193,6 @@ static void __init exynos_init_late(void)
193 /* to be supported later */ 193 /* to be supported later */
194 return; 194 return;
195 195
196 pm_genpd_poweroff_unused();
197 exynos_pm_init(); 196 exynos_pm_init();
198} 197}
199 198
diff --git a/arch/arm/mach-exynos/pm_domains.c b/arch/arm/mach-exynos/pm_domains.c
index fd76e1b5a471..20f267121b3e 100644
--- a/arch/arm/mach-exynos/pm_domains.c
+++ b/arch/arm/mach-exynos/pm_domains.c
@@ -105,78 +105,6 @@ static int exynos_pd_power_off(struct generic_pm_domain *domain)
105 return exynos_pd_power(domain, false); 105 return exynos_pd_power(domain, false);
106} 106}
107 107
108static void exynos_add_device_to_domain(struct exynos_pm_domain *pd,
109 struct device *dev)
110{
111 int ret;
112
113 dev_dbg(dev, "adding to power domain %s\n", pd->pd.name);
114
115 while (1) {
116 ret = pm_genpd_add_device(&pd->pd, dev);
117 if (ret != -EAGAIN)
118 break;
119 cond_resched();
120 }
121
122 pm_genpd_dev_need_restore(dev, true);
123}
124
125static void exynos_remove_device_from_domain(struct device *dev)
126{
127 struct generic_pm_domain *genpd = dev_to_genpd(dev);
128 int ret;
129
130 dev_dbg(dev, "removing from power domain %s\n", genpd->name);
131
132 while (1) {
133 ret = pm_genpd_remove_device(genpd, dev);
134 if (ret != -EAGAIN)
135 break;
136 cond_resched();
137 }
138}
139
140static void exynos_read_domain_from_dt(struct device *dev)
141{
142 struct platform_device *pd_pdev;
143 struct exynos_pm_domain *pd;
144 struct device_node *node;
145
146 node = of_parse_phandle(dev->of_node, "samsung,power-domain", 0);
147 if (!node)
148 return;
149 pd_pdev = of_find_device_by_node(node);
150 if (!pd_pdev)
151 return;
152 pd = platform_get_drvdata(pd_pdev);
153 exynos_add_device_to_domain(pd, dev);
154}
155
156static int exynos_pm_notifier_call(struct notifier_block *nb,
157 unsigned long event, void *data)
158{
159 struct device *dev = data;
160
161 switch (event) {
162 case BUS_NOTIFY_BIND_DRIVER:
163 if (dev->of_node)
164 exynos_read_domain_from_dt(dev);
165
166 break;
167
168 case BUS_NOTIFY_UNBOUND_DRIVER:
169 exynos_remove_device_from_domain(dev);
170
171 break;
172 }
173 return NOTIFY_DONE;
174}
175
176static struct notifier_block platform_nb = {
177 .notifier_call = exynos_pm_notifier_call,
178};
179
180static __init int exynos4_pm_init_power_domain(void) 108static __init int exynos4_pm_init_power_domain(void)
181{ 109{
182 struct platform_device *pdev; 110 struct platform_device *pdev;
@@ -202,7 +130,6 @@ static __init int exynos4_pm_init_power_domain(void)
202 pd->base = of_iomap(np, 0); 130 pd->base = of_iomap(np, 0);
203 pd->pd.power_off = exynos_pd_power_off; 131 pd->pd.power_off = exynos_pd_power_off;
204 pd->pd.power_on = exynos_pd_power_on; 132 pd->pd.power_on = exynos_pd_power_on;
205 pd->pd.of_node = np;
206 133
207 pd->oscclk = clk_get(dev, "oscclk"); 134 pd->oscclk = clk_get(dev, "oscclk");
208 if (IS_ERR(pd->oscclk)) 135 if (IS_ERR(pd->oscclk))
@@ -228,15 +155,12 @@ static __init int exynos4_pm_init_power_domain(void)
228 clk_put(pd->oscclk); 155 clk_put(pd->oscclk);
229 156
230no_clk: 157no_clk:
231 platform_set_drvdata(pdev, pd);
232
233 on = __raw_readl(pd->base + 0x4) & INT_LOCAL_PWR_EN; 158 on = __raw_readl(pd->base + 0x4) & INT_LOCAL_PWR_EN;
234 159
235 pm_genpd_init(&pd->pd, NULL, !on); 160 pm_genpd_init(&pd->pd, NULL, !on);
161 of_genpd_add_provider_simple(np, &pd->pd);
236 } 162 }
237 163
238 bus_register_notifier(&platform_bus_type, &platform_nb);
239
240 return 0; 164 return 0;
241} 165}
242arch_initcall(exynos4_pm_init_power_domain); 166arch_initcall(exynos4_pm_init_power_domain);
diff --git a/arch/arm/mach-imx/imx27-dt.c b/arch/arm/mach-imx/imx27-dt.c
index 080e66c6a1d0..dc8f1a6f45f2 100644
--- a/arch/arm/mach-imx/imx27-dt.c
+++ b/arch/arm/mach-imx/imx27-dt.c
@@ -20,7 +20,7 @@
20 20
21static void __init imx27_dt_init(void) 21static void __init imx27_dt_init(void)
22{ 22{
23 struct platform_device_info devinfo = { .name = "cpufreq-cpu0", }; 23 struct platform_device_info devinfo = { .name = "cpufreq-dt", };
24 24
25 mxc_arch_reset_init_dt(); 25 mxc_arch_reset_init_dt();
26 26
diff --git a/arch/arm/mach-imx/mach-imx51.c b/arch/arm/mach-imx/mach-imx51.c
index c77deb3f0893..2c5fcaf8675b 100644
--- a/arch/arm/mach-imx/mach-imx51.c
+++ b/arch/arm/mach-imx/mach-imx51.c
@@ -51,7 +51,7 @@ static void __init imx51_ipu_mipi_setup(void)
51 51
52static void __init imx51_dt_init(void) 52static void __init imx51_dt_init(void)
53{ 53{
54 struct platform_device_info devinfo = { .name = "cpufreq-cpu0", }; 54 struct platform_device_info devinfo = { .name = "cpufreq-dt", };
55 55
56 mxc_arch_reset_init_dt(); 56 mxc_arch_reset_init_dt();
57 imx51_ipu_mipi_setup(); 57 imx51_ipu_mipi_setup();
diff --git a/arch/arm/mach-mvebu/pmsu.c b/arch/arm/mach-mvebu/pmsu.c
index 8a70a51533fd..bbd8664d1bac 100644
--- a/arch/arm/mach-mvebu/pmsu.c
+++ b/arch/arm/mach-mvebu/pmsu.c
@@ -644,7 +644,7 @@ static int __init armada_xp_pmsu_cpufreq_init(void)
644 } 644 }
645 } 645 }
646 646
647 platform_device_register_simple("cpufreq-generic", -1, NULL, 0); 647 platform_device_register_simple("cpufreq-dt", -1, NULL, 0);
648 return 0; 648 return 0;
649} 649}
650 650
diff --git a/arch/arm/mach-omap2/pm.c b/arch/arm/mach-omap2/pm.c
index 828aee9ea6a8..58920bc8807b 100644
--- a/arch/arm/mach-omap2/pm.c
+++ b/arch/arm/mach-omap2/pm.c
@@ -282,7 +282,7 @@ static inline void omap_init_cpufreq(void)
282 if (!of_have_populated_dt()) 282 if (!of_have_populated_dt())
283 devinfo.name = "omap-cpufreq"; 283 devinfo.name = "omap-cpufreq";
284 else 284 else
285 devinfo.name = "cpufreq-cpu0"; 285 devinfo.name = "cpufreq-dt";
286 platform_device_register_full(&devinfo); 286 platform_device_register_full(&devinfo);
287} 287}
288 288
diff --git a/arch/arm/mach-s3c64xx/common.c b/arch/arm/mach-s3c64xx/common.c
index 5c45aae675b6..16547f2641a3 100644
--- a/arch/arm/mach-s3c64xx/common.c
+++ b/arch/arm/mach-s3c64xx/common.c
@@ -440,8 +440,3 @@ void s3c64xx_restart(enum reboot_mode mode, const char *cmd)
440 /* if all else fails, or mode was for soft, jump to 0 */ 440 /* if all else fails, or mode was for soft, jump to 0 */
441 soft_restart(0); 441 soft_restart(0);
442} 442}
443
444void __init s3c64xx_init_late(void)
445{
446 s3c64xx_pm_late_initcall();
447}
diff --git a/arch/arm/mach-s3c64xx/common.h b/arch/arm/mach-s3c64xx/common.h
index 7043e7a3a67e..9eb864412911 100644
--- a/arch/arm/mach-s3c64xx/common.h
+++ b/arch/arm/mach-s3c64xx/common.h
@@ -23,7 +23,6 @@ void s3c64xx_init_irq(u32 vic0, u32 vic1);
23void s3c64xx_init_io(struct map_desc *mach_desc, int size); 23void s3c64xx_init_io(struct map_desc *mach_desc, int size);
24 24
25void s3c64xx_restart(enum reboot_mode mode, const char *cmd); 25void s3c64xx_restart(enum reboot_mode mode, const char *cmd);
26void s3c64xx_init_late(void);
27 26
28void s3c64xx_clk_init(struct device_node *np, unsigned long xtal_f, 27void s3c64xx_clk_init(struct device_node *np, unsigned long xtal_f,
29 unsigned long xusbxti_f, bool is_s3c6400, void __iomem *reg_base); 28 unsigned long xusbxti_f, bool is_s3c6400, void __iomem *reg_base);
@@ -52,12 +51,6 @@ extern void s3c6410_map_io(void);
52#define s3c6410_init NULL 51#define s3c6410_init NULL
53#endif 52#endif
54 53
55#ifdef CONFIG_PM
56int __init s3c64xx_pm_late_initcall(void);
57#else
58static inline int s3c64xx_pm_late_initcall(void) { return 0; }
59#endif
60
61#ifdef CONFIG_S3C64XX_PL080 54#ifdef CONFIG_S3C64XX_PL080
62extern struct pl08x_platform_data s3c64xx_dma0_plat_data; 55extern struct pl08x_platform_data s3c64xx_dma0_plat_data;
63extern struct pl08x_platform_data s3c64xx_dma1_plat_data; 56extern struct pl08x_platform_data s3c64xx_dma1_plat_data;
diff --git a/arch/arm/mach-s3c64xx/mach-anw6410.c b/arch/arm/mach-s3c64xx/mach-anw6410.c
index 60576dfbea8d..6224c67f5061 100644
--- a/arch/arm/mach-s3c64xx/mach-anw6410.c
+++ b/arch/arm/mach-s3c64xx/mach-anw6410.c
@@ -233,7 +233,6 @@ MACHINE_START(ANW6410, "A&W6410")
233 .init_irq = s3c6410_init_irq, 233 .init_irq = s3c6410_init_irq,
234 .map_io = anw6410_map_io, 234 .map_io = anw6410_map_io,
235 .init_machine = anw6410_machine_init, 235 .init_machine = anw6410_machine_init,
236 .init_late = s3c64xx_init_late,
237 .init_time = samsung_timer_init, 236 .init_time = samsung_timer_init,
238 .restart = s3c64xx_restart, 237 .restart = s3c64xx_restart,
239MACHINE_END 238MACHINE_END
diff --git a/arch/arm/mach-s3c64xx/mach-crag6410.c b/arch/arm/mach-s3c64xx/mach-crag6410.c
index fe116334afda..10b913baab28 100644
--- a/arch/arm/mach-s3c64xx/mach-crag6410.c
+++ b/arch/arm/mach-s3c64xx/mach-crag6410.c
@@ -857,7 +857,6 @@ MACHINE_START(WLF_CRAGG_6410, "Wolfson Cragganmore 6410")
857 .init_irq = s3c6410_init_irq, 857 .init_irq = s3c6410_init_irq,
858 .map_io = crag6410_map_io, 858 .map_io = crag6410_map_io,
859 .init_machine = crag6410_machine_init, 859 .init_machine = crag6410_machine_init,
860 .init_late = s3c64xx_init_late,
861 .init_time = samsung_timer_init, 860 .init_time = samsung_timer_init,
862 .restart = s3c64xx_restart, 861 .restart = s3c64xx_restart,
863MACHINE_END 862MACHINE_END
diff --git a/arch/arm/mach-s3c64xx/mach-hmt.c b/arch/arm/mach-s3c64xx/mach-hmt.c
index 19e8feb908fd..e4b087c58ee6 100644
--- a/arch/arm/mach-s3c64xx/mach-hmt.c
+++ b/arch/arm/mach-s3c64xx/mach-hmt.c
@@ -277,7 +277,6 @@ MACHINE_START(HMT, "Airgoo-HMT")
277 .init_irq = s3c6410_init_irq, 277 .init_irq = s3c6410_init_irq,
278 .map_io = hmt_map_io, 278 .map_io = hmt_map_io,
279 .init_machine = hmt_machine_init, 279 .init_machine = hmt_machine_init,
280 .init_late = s3c64xx_init_late,
281 .init_time = samsung_timer_init, 280 .init_time = samsung_timer_init,
282 .restart = s3c64xx_restart, 281 .restart = s3c64xx_restart,
283MACHINE_END 282MACHINE_END
diff --git a/arch/arm/mach-s3c64xx/mach-mini6410.c b/arch/arm/mach-s3c64xx/mach-mini6410.c
index 9cbc07602ef3..ab61af50bfb9 100644
--- a/arch/arm/mach-s3c64xx/mach-mini6410.c
+++ b/arch/arm/mach-s3c64xx/mach-mini6410.c
@@ -366,7 +366,6 @@ MACHINE_START(MINI6410, "MINI6410")
366 .init_irq = s3c6410_init_irq, 366 .init_irq = s3c6410_init_irq,
367 .map_io = mini6410_map_io, 367 .map_io = mini6410_map_io,
368 .init_machine = mini6410_machine_init, 368 .init_machine = mini6410_machine_init,
369 .init_late = s3c64xx_init_late,
370 .init_time = samsung_timer_init, 369 .init_time = samsung_timer_init,
371 .restart = s3c64xx_restart, 370 .restart = s3c64xx_restart,
372MACHINE_END 371MACHINE_END
diff --git a/arch/arm/mach-s3c64xx/mach-ncp.c b/arch/arm/mach-s3c64xx/mach-ncp.c
index 4bae7dc49eea..80cb1446f69f 100644
--- a/arch/arm/mach-s3c64xx/mach-ncp.c
+++ b/arch/arm/mach-s3c64xx/mach-ncp.c
@@ -103,7 +103,6 @@ MACHINE_START(NCP, "NCP")
103 .init_irq = s3c6410_init_irq, 103 .init_irq = s3c6410_init_irq,
104 .map_io = ncp_map_io, 104 .map_io = ncp_map_io,
105 .init_machine = ncp_machine_init, 105 .init_machine = ncp_machine_init,
106 .init_late = s3c64xx_init_late,
107 .init_time = samsung_timer_init, 106 .init_time = samsung_timer_init,
108 .restart = s3c64xx_restart, 107 .restart = s3c64xx_restart,
109MACHINE_END 108MACHINE_END
diff --git a/arch/arm/mach-s3c64xx/mach-real6410.c b/arch/arm/mach-s3c64xx/mach-real6410.c
index fbad2af1ef16..85fa9598b980 100644
--- a/arch/arm/mach-s3c64xx/mach-real6410.c
+++ b/arch/arm/mach-s3c64xx/mach-real6410.c
@@ -335,7 +335,6 @@ MACHINE_START(REAL6410, "REAL6410")
335 .init_irq = s3c6410_init_irq, 335 .init_irq = s3c6410_init_irq,
336 .map_io = real6410_map_io, 336 .map_io = real6410_map_io,
337 .init_machine = real6410_machine_init, 337 .init_machine = real6410_machine_init,
338 .init_late = s3c64xx_init_late,
339 .init_time = samsung_timer_init, 338 .init_time = samsung_timer_init,
340 .restart = s3c64xx_restart, 339 .restart = s3c64xx_restart,
341MACHINE_END 340MACHINE_END
diff --git a/arch/arm/mach-s3c64xx/mach-smartq5.c b/arch/arm/mach-s3c64xx/mach-smartq5.c
index dec4c08e834f..33224ab36fac 100644
--- a/arch/arm/mach-s3c64xx/mach-smartq5.c
+++ b/arch/arm/mach-s3c64xx/mach-smartq5.c
@@ -156,7 +156,6 @@ MACHINE_START(SMARTQ5, "SmartQ 5")
156 .init_irq = s3c6410_init_irq, 156 .init_irq = s3c6410_init_irq,
157 .map_io = smartq_map_io, 157 .map_io = smartq_map_io,
158 .init_machine = smartq5_machine_init, 158 .init_machine = smartq5_machine_init,
159 .init_late = s3c64xx_init_late,
160 .init_time = samsung_timer_init, 159 .init_time = samsung_timer_init,
161 .restart = s3c64xx_restart, 160 .restart = s3c64xx_restart,
162MACHINE_END 161MACHINE_END
diff --git a/arch/arm/mach-s3c64xx/mach-smartq7.c b/arch/arm/mach-s3c64xx/mach-smartq7.c
index 27b322069c7d..fc7fece22fb0 100644
--- a/arch/arm/mach-s3c64xx/mach-smartq7.c
+++ b/arch/arm/mach-s3c64xx/mach-smartq7.c
@@ -172,7 +172,6 @@ MACHINE_START(SMARTQ7, "SmartQ 7")
172 .init_irq = s3c6410_init_irq, 172 .init_irq = s3c6410_init_irq,
173 .map_io = smartq_map_io, 173 .map_io = smartq_map_io,
174 .init_machine = smartq7_machine_init, 174 .init_machine = smartq7_machine_init,
175 .init_late = s3c64xx_init_late,
176 .init_time = samsung_timer_init, 175 .init_time = samsung_timer_init,
177 .restart = s3c64xx_restart, 176 .restart = s3c64xx_restart,
178MACHINE_END 177MACHINE_END
diff --git a/arch/arm/mach-s3c64xx/mach-smdk6400.c b/arch/arm/mach-s3c64xx/mach-smdk6400.c
index 910749768340..6f425126a735 100644
--- a/arch/arm/mach-s3c64xx/mach-smdk6400.c
+++ b/arch/arm/mach-s3c64xx/mach-smdk6400.c
@@ -92,7 +92,6 @@ MACHINE_START(SMDK6400, "SMDK6400")
92 .init_irq = s3c6400_init_irq, 92 .init_irq = s3c6400_init_irq,
93 .map_io = smdk6400_map_io, 93 .map_io = smdk6400_map_io,
94 .init_machine = smdk6400_machine_init, 94 .init_machine = smdk6400_machine_init,
95 .init_late = s3c64xx_init_late,
96 .init_time = samsung_timer_init, 95 .init_time = samsung_timer_init,
97 .restart = s3c64xx_restart, 96 .restart = s3c64xx_restart,
98MACHINE_END 97MACHINE_END
diff --git a/arch/arm/mach-s3c64xx/mach-smdk6410.c b/arch/arm/mach-s3c64xx/mach-smdk6410.c
index 1dc86d76b530..661eb662d051 100644
--- a/arch/arm/mach-s3c64xx/mach-smdk6410.c
+++ b/arch/arm/mach-s3c64xx/mach-smdk6410.c
@@ -705,7 +705,6 @@ MACHINE_START(SMDK6410, "SMDK6410")
705 .init_irq = s3c6410_init_irq, 705 .init_irq = s3c6410_init_irq,
706 .map_io = smdk6410_map_io, 706 .map_io = smdk6410_map_io,
707 .init_machine = smdk6410_machine_init, 707 .init_machine = smdk6410_machine_init,
708 .init_late = s3c64xx_init_late,
709 .init_time = samsung_timer_init, 708 .init_time = samsung_timer_init,
710 .restart = s3c64xx_restart, 709 .restart = s3c64xx_restart,
711MACHINE_END 710MACHINE_END
diff --git a/arch/arm/mach-s3c64xx/pm.c b/arch/arm/mach-s3c64xx/pm.c
index 6b37694fa335..aaf7bea4032f 100644
--- a/arch/arm/mach-s3c64xx/pm.c
+++ b/arch/arm/mach-s3c64xx/pm.c
@@ -347,10 +347,3 @@ static __init int s3c64xx_pm_initcall(void)
347 return 0; 347 return 0;
348} 348}
349arch_initcall(s3c64xx_pm_initcall); 349arch_initcall(s3c64xx_pm_initcall);
350
351int __init s3c64xx_pm_late_initcall(void)
352{
353 pm_genpd_poweroff_unused();
354
355 return 0;
356}
diff --git a/arch/arm/mach-shmobile/cpufreq.c b/arch/arm/mach-shmobile/cpufreq.c
index 8a24b2be46ae..57fbff024dcd 100644
--- a/arch/arm/mach-shmobile/cpufreq.c
+++ b/arch/arm/mach-shmobile/cpufreq.c
@@ -12,6 +12,6 @@
12 12
13int __init shmobile_cpufreq_init(void) 13int __init shmobile_cpufreq_init(void)
14{ 14{
15 platform_device_register_simple("cpufreq-cpu0", -1, NULL, 0); 15 platform_device_register_simple("cpufreq-dt", -1, NULL, 0);
16 return 0; 16 return 0;
17} 17}
diff --git a/arch/arm/mach-shmobile/pm-r8a7779.c b/arch/arm/mach-shmobile/pm-r8a7779.c
index 69f70b7f7fb2..82fe3d7f9662 100644
--- a/arch/arm/mach-shmobile/pm-r8a7779.c
+++ b/arch/arm/mach-shmobile/pm-r8a7779.c
@@ -87,7 +87,6 @@ static void r8a7779_init_pm_domain(struct r8a7779_pm_domain *r8a7779_pd)
87 genpd->dev_ops.stop = pm_clk_suspend; 87 genpd->dev_ops.stop = pm_clk_suspend;
88 genpd->dev_ops.start = pm_clk_resume; 88 genpd->dev_ops.start = pm_clk_resume;
89 genpd->dev_ops.active_wakeup = pd_active_wakeup; 89 genpd->dev_ops.active_wakeup = pd_active_wakeup;
90 genpd->dev_irq_safe = true;
91 genpd->power_off = pd_power_down; 90 genpd->power_off = pd_power_down;
92 genpd->power_on = pd_power_up; 91 genpd->power_on = pd_power_up;
93 92
diff --git a/arch/arm/mach-shmobile/pm-rmobile.c b/arch/arm/mach-shmobile/pm-rmobile.c
index a88079af7914..717e6413d29c 100644
--- a/arch/arm/mach-shmobile/pm-rmobile.c
+++ b/arch/arm/mach-shmobile/pm-rmobile.c
@@ -110,7 +110,6 @@ static void rmobile_init_pm_domain(struct rmobile_pm_domain *rmobile_pd)
110 genpd->dev_ops.stop = pm_clk_suspend; 110 genpd->dev_ops.stop = pm_clk_suspend;
111 genpd->dev_ops.start = pm_clk_resume; 111 genpd->dev_ops.start = pm_clk_resume;
112 genpd->dev_ops.active_wakeup = rmobile_pd_active_wakeup; 112 genpd->dev_ops.active_wakeup = rmobile_pd_active_wakeup;
113 genpd->dev_irq_safe = true;
114 genpd->power_off = rmobile_pd_power_down; 113 genpd->power_off = rmobile_pd_power_down;
115 genpd->power_on = rmobile_pd_power_up; 114 genpd->power_on = rmobile_pd_power_up;
116 __rmobile_pd_power_up(rmobile_pd, false); 115 __rmobile_pd_power_up(rmobile_pd, false);
diff --git a/arch/arm/mach-zynq/common.c b/arch/arm/mach-zynq/common.c
index 613c476872eb..26f92c28d22b 100644
--- a/arch/arm/mach-zynq/common.c
+++ b/arch/arm/mach-zynq/common.c
@@ -110,7 +110,7 @@ static void __init zynq_init_late(void)
110 */ 110 */
111static void __init zynq_init_machine(void) 111static void __init zynq_init_machine(void)
112{ 112{
113 struct platform_device_info devinfo = { .name = "cpufreq-cpu0", }; 113 struct platform_device_info devinfo = { .name = "cpufreq-dt", };
114 struct soc_device_attribute *soc_dev_attr; 114 struct soc_device_attribute *soc_dev_attr;
115 struct soc_device *soc_dev; 115 struct soc_device *soc_dev;
116 struct device *parent = NULL; 116 struct device *parent = NULL;
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index 337ce5a9b15c..1183d545da1e 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -2623,6 +2623,7 @@ static struct irq_chip ioapic_chip __read_mostly = {
2623 .irq_eoi = ack_apic_level, 2623 .irq_eoi = ack_apic_level,
2624 .irq_set_affinity = native_ioapic_set_affinity, 2624 .irq_set_affinity = native_ioapic_set_affinity,
2625 .irq_retrigger = ioapic_retrigger_irq, 2625 .irq_retrigger = ioapic_retrigger_irq,
2626 .flags = IRQCHIP_SKIP_SET_WAKE,
2626}; 2627};
2627 2628
2628static inline void init_IO_APIC_traps(void) 2629static inline void init_IO_APIC_traps(void)
@@ -3173,6 +3174,7 @@ static struct irq_chip msi_chip = {
3173 .irq_ack = ack_apic_edge, 3174 .irq_ack = ack_apic_edge,
3174 .irq_set_affinity = msi_set_affinity, 3175 .irq_set_affinity = msi_set_affinity,
3175 .irq_retrigger = ioapic_retrigger_irq, 3176 .irq_retrigger = ioapic_retrigger_irq,
3177 .flags = IRQCHIP_SKIP_SET_WAKE,
3176}; 3178};
3177 3179
3178int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, 3180int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc,
@@ -3271,6 +3273,7 @@ static struct irq_chip dmar_msi_type = {
3271 .irq_ack = ack_apic_edge, 3273 .irq_ack = ack_apic_edge,
3272 .irq_set_affinity = dmar_msi_set_affinity, 3274 .irq_set_affinity = dmar_msi_set_affinity,
3273 .irq_retrigger = ioapic_retrigger_irq, 3275 .irq_retrigger = ioapic_retrigger_irq,
3276 .flags = IRQCHIP_SKIP_SET_WAKE,
3274}; 3277};
3275 3278
3276int arch_setup_dmar_msi(unsigned int irq) 3279int arch_setup_dmar_msi(unsigned int irq)
@@ -3321,6 +3324,7 @@ static struct irq_chip hpet_msi_type = {
3321 .irq_ack = ack_apic_edge, 3324 .irq_ack = ack_apic_edge,
3322 .irq_set_affinity = hpet_msi_set_affinity, 3325 .irq_set_affinity = hpet_msi_set_affinity,
3323 .irq_retrigger = ioapic_retrigger_irq, 3326 .irq_retrigger = ioapic_retrigger_irq,
3327 .flags = IRQCHIP_SKIP_SET_WAKE,
3324}; 3328};
3325 3329
3326int default_setup_hpet_msi(unsigned int irq, unsigned int id) 3330int default_setup_hpet_msi(unsigned int irq, unsigned int id)
@@ -3384,6 +3388,7 @@ static struct irq_chip ht_irq_chip = {
3384 .irq_ack = ack_apic_edge, 3388 .irq_ack = ack_apic_edge,
3385 .irq_set_affinity = ht_set_affinity, 3389 .irq_set_affinity = ht_set_affinity,
3386 .irq_retrigger = ioapic_retrigger_irq, 3390 .irq_retrigger = ioapic_retrigger_irq,
3391 .flags = IRQCHIP_SKIP_SET_WAKE,
3387}; 3392};
3388 3393
3389int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev) 3394int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev)
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
index b0ea767c8696..93d160661f4c 100644
--- a/drivers/acpi/acpi_lpss.c
+++ b/drivers/acpi/acpi_lpss.c
@@ -54,55 +54,58 @@ ACPI_MODULE_NAME("acpi_lpss");
54 54
55#define LPSS_PRV_REG_COUNT 9 55#define LPSS_PRV_REG_COUNT 9
56 56
57struct lpss_shared_clock { 57/* LPSS Flags */
58 const char *name; 58#define LPSS_CLK BIT(0)
59 unsigned long rate; 59#define LPSS_CLK_GATE BIT(1)
60 struct clk *clk; 60#define LPSS_CLK_DIVIDER BIT(2)
61}; 61#define LPSS_LTR BIT(3)
62#define LPSS_SAVE_CTX BIT(4)
62 63
63struct lpss_private_data; 64struct lpss_private_data;
64 65
65struct lpss_device_desc { 66struct lpss_device_desc {
66 bool clk_required; 67 unsigned int flags;
67 const char *clkdev_name;
68 bool ltr_required;
69 unsigned int prv_offset; 68 unsigned int prv_offset;
70 size_t prv_size_override; 69 size_t prv_size_override;
71 bool clk_divider;
72 bool clk_gate;
73 bool save_ctx;
74 struct lpss_shared_clock *shared_clock;
75 void (*setup)(struct lpss_private_data *pdata); 70 void (*setup)(struct lpss_private_data *pdata);
76}; 71};
77 72
78static struct lpss_device_desc lpss_dma_desc = { 73static struct lpss_device_desc lpss_dma_desc = {
79 .clk_required = true, 74 .flags = LPSS_CLK,
80 .clkdev_name = "hclk",
81}; 75};
82 76
83struct lpss_private_data { 77struct lpss_private_data {
84 void __iomem *mmio_base; 78 void __iomem *mmio_base;
85 resource_size_t mmio_size; 79 resource_size_t mmio_size;
80 unsigned int fixed_clk_rate;
86 struct clk *clk; 81 struct clk *clk;
87 const struct lpss_device_desc *dev_desc; 82 const struct lpss_device_desc *dev_desc;
88 u32 prv_reg_ctx[LPSS_PRV_REG_COUNT]; 83 u32 prv_reg_ctx[LPSS_PRV_REG_COUNT];
89}; 84};
90 85
86/* UART Component Parameter Register */
87#define LPSS_UART_CPR 0xF4
88#define LPSS_UART_CPR_AFCE BIT(4)
89
91static void lpss_uart_setup(struct lpss_private_data *pdata) 90static void lpss_uart_setup(struct lpss_private_data *pdata)
92{ 91{
93 unsigned int offset; 92 unsigned int offset;
94 u32 reg; 93 u32 val;
95 94
96 offset = pdata->dev_desc->prv_offset + LPSS_TX_INT; 95 offset = pdata->dev_desc->prv_offset + LPSS_TX_INT;
97 reg = readl(pdata->mmio_base + offset); 96 val = readl(pdata->mmio_base + offset);
98 writel(reg | LPSS_TX_INT_MASK, pdata->mmio_base + offset); 97 writel(val | LPSS_TX_INT_MASK, pdata->mmio_base + offset);
99 98
100 offset = pdata->dev_desc->prv_offset + LPSS_GENERAL; 99 val = readl(pdata->mmio_base + LPSS_UART_CPR);
101 reg = readl(pdata->mmio_base + offset); 100 if (!(val & LPSS_UART_CPR_AFCE)) {
102 writel(reg | LPSS_GENERAL_UART_RTS_OVRD, pdata->mmio_base + offset); 101 offset = pdata->dev_desc->prv_offset + LPSS_GENERAL;
102 val = readl(pdata->mmio_base + offset);
103 val |= LPSS_GENERAL_UART_RTS_OVRD;
104 writel(val, pdata->mmio_base + offset);
105 }
103} 106}
104 107
105static void lpss_i2c_setup(struct lpss_private_data *pdata) 108static void byt_i2c_setup(struct lpss_private_data *pdata)
106{ 109{
107 unsigned int offset; 110 unsigned int offset;
108 u32 val; 111 u32 val;
@@ -111,100 +114,56 @@ static void lpss_i2c_setup(struct lpss_private_data *pdata)
111 val = readl(pdata->mmio_base + offset); 114 val = readl(pdata->mmio_base + offset);
112 val |= LPSS_RESETS_RESET_APB | LPSS_RESETS_RESET_FUNC; 115 val |= LPSS_RESETS_RESET_APB | LPSS_RESETS_RESET_FUNC;
113 writel(val, pdata->mmio_base + offset); 116 writel(val, pdata->mmio_base + offset);
114}
115 117
116static struct lpss_device_desc wpt_dev_desc = { 118 if (readl(pdata->mmio_base + pdata->dev_desc->prv_offset))
117 .clk_required = true, 119 pdata->fixed_clk_rate = 133000000;
118 .prv_offset = 0x800, 120}
119 .ltr_required = true,
120 .clk_divider = true,
121 .clk_gate = true,
122};
123 121
124static struct lpss_device_desc lpt_dev_desc = { 122static struct lpss_device_desc lpt_dev_desc = {
125 .clk_required = true, 123 .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_LTR,
126 .prv_offset = 0x800, 124 .prv_offset = 0x800,
127 .ltr_required = true,
128 .clk_divider = true,
129 .clk_gate = true,
130}; 125};
131 126
132static struct lpss_device_desc lpt_i2c_dev_desc = { 127static struct lpss_device_desc lpt_i2c_dev_desc = {
133 .clk_required = true, 128 .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_LTR,
134 .prv_offset = 0x800, 129 .prv_offset = 0x800,
135 .ltr_required = true,
136 .clk_gate = true,
137}; 130};
138 131
139static struct lpss_device_desc lpt_uart_dev_desc = { 132static struct lpss_device_desc lpt_uart_dev_desc = {
140 .clk_required = true, 133 .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_LTR,
141 .prv_offset = 0x800, 134 .prv_offset = 0x800,
142 .ltr_required = true,
143 .clk_divider = true,
144 .clk_gate = true,
145 .setup = lpss_uart_setup, 135 .setup = lpss_uart_setup,
146}; 136};
147 137
148static struct lpss_device_desc lpt_sdio_dev_desc = { 138static struct lpss_device_desc lpt_sdio_dev_desc = {
139 .flags = LPSS_LTR,
149 .prv_offset = 0x1000, 140 .prv_offset = 0x1000,
150 .prv_size_override = 0x1018, 141 .prv_size_override = 0x1018,
151 .ltr_required = true,
152};
153
154static struct lpss_shared_clock pwm_clock = {
155 .name = "pwm_clk",
156 .rate = 25000000,
157}; 142};
158 143
159static struct lpss_device_desc byt_pwm_dev_desc = { 144static struct lpss_device_desc byt_pwm_dev_desc = {
160 .clk_required = true, 145 .flags = LPSS_SAVE_CTX,
161 .save_ctx = true,
162 .shared_clock = &pwm_clock,
163}; 146};
164 147
165static struct lpss_device_desc byt_uart_dev_desc = { 148static struct lpss_device_desc byt_uart_dev_desc = {
166 .clk_required = true, 149 .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX,
167 .prv_offset = 0x800, 150 .prv_offset = 0x800,
168 .clk_divider = true,
169 .clk_gate = true,
170 .save_ctx = true,
171 .setup = lpss_uart_setup, 151 .setup = lpss_uart_setup,
172}; 152};
173 153
174static struct lpss_device_desc byt_spi_dev_desc = { 154static struct lpss_device_desc byt_spi_dev_desc = {
175 .clk_required = true, 155 .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX,
176 .prv_offset = 0x400, 156 .prv_offset = 0x400,
177 .clk_divider = true,
178 .clk_gate = true,
179 .save_ctx = true,
180}; 157};
181 158
182static struct lpss_device_desc byt_sdio_dev_desc = { 159static struct lpss_device_desc byt_sdio_dev_desc = {
183 .clk_required = true, 160 .flags = LPSS_CLK,
184};
185
186static struct lpss_shared_clock i2c_clock = {
187 .name = "i2c_clk",
188 .rate = 100000000,
189}; 161};
190 162
191static struct lpss_device_desc byt_i2c_dev_desc = { 163static struct lpss_device_desc byt_i2c_dev_desc = {
192 .clk_required = true, 164 .flags = LPSS_CLK | LPSS_SAVE_CTX,
193 .prv_offset = 0x800, 165 .prv_offset = 0x800,
194 .save_ctx = true, 166 .setup = byt_i2c_setup,
195 .shared_clock = &i2c_clock,
196 .setup = lpss_i2c_setup,
197};
198
199static struct lpss_shared_clock bsw_pwm_clock = {
200 .name = "pwm_clk",
201 .rate = 19200000,
202};
203
204static struct lpss_device_desc bsw_pwm_dev_desc = {
205 .clk_required = true,
206 .save_ctx = true,
207 .shared_clock = &bsw_pwm_clock,
208}; 167};
209 168
210#else 169#else
@@ -237,7 +196,7 @@ static const struct acpi_device_id acpi_lpss_device_ids[] = {
237 { "INT33FC", }, 196 { "INT33FC", },
238 197
239 /* Braswell LPSS devices */ 198 /* Braswell LPSS devices */
240 { "80862288", LPSS_ADDR(bsw_pwm_dev_desc) }, 199 { "80862288", LPSS_ADDR(byt_pwm_dev_desc) },
241 { "8086228A", LPSS_ADDR(byt_uart_dev_desc) }, 200 { "8086228A", LPSS_ADDR(byt_uart_dev_desc) },
242 { "8086228E", LPSS_ADDR(byt_spi_dev_desc) }, 201 { "8086228E", LPSS_ADDR(byt_spi_dev_desc) },
243 { "808622C1", LPSS_ADDR(byt_i2c_dev_desc) }, 202 { "808622C1", LPSS_ADDR(byt_i2c_dev_desc) },
@@ -251,7 +210,8 @@ static const struct acpi_device_id acpi_lpss_device_ids[] = {
251 { "INT3436", LPSS_ADDR(lpt_sdio_dev_desc) }, 210 { "INT3436", LPSS_ADDR(lpt_sdio_dev_desc) },
252 { "INT3437", }, 211 { "INT3437", },
253 212
254 { "INT3438", LPSS_ADDR(wpt_dev_desc) }, 213 /* Wildcat Point LPSS devices */
214 { "INT3438", LPSS_ADDR(lpt_dev_desc) },
255 215
256 { } 216 { }
257}; 217};
@@ -276,7 +236,6 @@ static int register_device_clock(struct acpi_device *adev,
276 struct lpss_private_data *pdata) 236 struct lpss_private_data *pdata)
277{ 237{
278 const struct lpss_device_desc *dev_desc = pdata->dev_desc; 238 const struct lpss_device_desc *dev_desc = pdata->dev_desc;
279 struct lpss_shared_clock *shared_clock = dev_desc->shared_clock;
280 const char *devname = dev_name(&adev->dev); 239 const char *devname = dev_name(&adev->dev);
281 struct clk *clk = ERR_PTR(-ENODEV); 240 struct clk *clk = ERR_PTR(-ENODEV);
282 struct lpss_clk_data *clk_data; 241 struct lpss_clk_data *clk_data;
@@ -289,12 +248,7 @@ static int register_device_clock(struct acpi_device *adev,
289 clk_data = platform_get_drvdata(lpss_clk_dev); 248 clk_data = platform_get_drvdata(lpss_clk_dev);
290 if (!clk_data) 249 if (!clk_data)
291 return -ENODEV; 250 return -ENODEV;
292 251 clk = clk_data->clk;
293 if (dev_desc->clkdev_name) {
294 clk_register_clkdev(clk_data->clk, dev_desc->clkdev_name,
295 devname);
296 return 0;
297 }
298 252
299 if (!pdata->mmio_base 253 if (!pdata->mmio_base
300 || pdata->mmio_size < dev_desc->prv_offset + LPSS_CLK_SIZE) 254 || pdata->mmio_size < dev_desc->prv_offset + LPSS_CLK_SIZE)
@@ -303,24 +257,19 @@ static int register_device_clock(struct acpi_device *adev,
303 parent = clk_data->name; 257 parent = clk_data->name;
304 prv_base = pdata->mmio_base + dev_desc->prv_offset; 258 prv_base = pdata->mmio_base + dev_desc->prv_offset;
305 259
306 if (shared_clock) { 260 if (pdata->fixed_clk_rate) {
307 clk = shared_clock->clk; 261 clk = clk_register_fixed_rate(NULL, devname, parent, 0,
308 if (!clk) { 262 pdata->fixed_clk_rate);
309 clk = clk_register_fixed_rate(NULL, shared_clock->name, 263 goto out;
310 "lpss_clk", 0,
311 shared_clock->rate);
312 shared_clock->clk = clk;
313 }
314 parent = shared_clock->name;
315 } 264 }
316 265
317 if (dev_desc->clk_gate) { 266 if (dev_desc->flags & LPSS_CLK_GATE) {
318 clk = clk_register_gate(NULL, devname, parent, 0, 267 clk = clk_register_gate(NULL, devname, parent, 0,
319 prv_base, 0, 0, NULL); 268 prv_base, 0, 0, NULL);
320 parent = devname; 269 parent = devname;
321 } 270 }
322 271
323 if (dev_desc->clk_divider) { 272 if (dev_desc->flags & LPSS_CLK_DIVIDER) {
324 /* Prevent division by zero */ 273 /* Prevent division by zero */
325 if (!readl(prv_base)) 274 if (!readl(prv_base))
326 writel(LPSS_CLK_DIVIDER_DEF_MASK, prv_base); 275 writel(LPSS_CLK_DIVIDER_DEF_MASK, prv_base);
@@ -344,7 +293,7 @@ static int register_device_clock(struct acpi_device *adev,
344 kfree(parent); 293 kfree(parent);
345 kfree(clk_name); 294 kfree(clk_name);
346 } 295 }
347 296out:
348 if (IS_ERR(clk)) 297 if (IS_ERR(clk))
349 return PTR_ERR(clk); 298 return PTR_ERR(clk);
350 299
@@ -392,7 +341,10 @@ static int acpi_lpss_create_device(struct acpi_device *adev,
392 341
393 pdata->dev_desc = dev_desc; 342 pdata->dev_desc = dev_desc;
394 343
395 if (dev_desc->clk_required) { 344 if (dev_desc->setup)
345 dev_desc->setup(pdata);
346
347 if (dev_desc->flags & LPSS_CLK) {
396 ret = register_device_clock(adev, pdata); 348 ret = register_device_clock(adev, pdata);
397 if (ret) { 349 if (ret) {
398 /* Skip the device, but continue the namespace scan. */ 350 /* Skip the device, but continue the namespace scan. */
@@ -413,9 +365,6 @@ static int acpi_lpss_create_device(struct acpi_device *adev,
413 goto err_out; 365 goto err_out;
414 } 366 }
415 367
416 if (dev_desc->setup)
417 dev_desc->setup(pdata);
418
419 adev->driver_data = pdata; 368 adev->driver_data = pdata;
420 pdev = acpi_create_platform_device(adev); 369 pdev = acpi_create_platform_device(adev);
421 if (!IS_ERR_OR_NULL(pdev)) { 370 if (!IS_ERR_OR_NULL(pdev)) {
@@ -692,19 +641,19 @@ static int acpi_lpss_platform_notify(struct notifier_block *nb,
692 641
693 switch (action) { 642 switch (action) {
694 case BUS_NOTIFY_BOUND_DRIVER: 643 case BUS_NOTIFY_BOUND_DRIVER:
695 if (pdata->dev_desc->save_ctx) 644 if (pdata->dev_desc->flags & LPSS_SAVE_CTX)
696 pdev->dev.pm_domain = &acpi_lpss_pm_domain; 645 pdev->dev.pm_domain = &acpi_lpss_pm_domain;
697 break; 646 break;
698 case BUS_NOTIFY_UNBOUND_DRIVER: 647 case BUS_NOTIFY_UNBOUND_DRIVER:
699 if (pdata->dev_desc->save_ctx) 648 if (pdata->dev_desc->flags & LPSS_SAVE_CTX)
700 pdev->dev.pm_domain = NULL; 649 pdev->dev.pm_domain = NULL;
701 break; 650 break;
702 case BUS_NOTIFY_ADD_DEVICE: 651 case BUS_NOTIFY_ADD_DEVICE:
703 if (pdata->dev_desc->ltr_required) 652 if (pdata->dev_desc->flags & LPSS_LTR)
704 return sysfs_create_group(&pdev->dev.kobj, 653 return sysfs_create_group(&pdev->dev.kobj,
705 &lpss_attr_group); 654 &lpss_attr_group);
706 case BUS_NOTIFY_DEL_DEVICE: 655 case BUS_NOTIFY_DEL_DEVICE:
707 if (pdata->dev_desc->ltr_required) 656 if (pdata->dev_desc->flags & LPSS_LTR)
708 sysfs_remove_group(&pdev->dev.kobj, &lpss_attr_group); 657 sysfs_remove_group(&pdev->dev.kobj, &lpss_attr_group);
709 default: 658 default:
710 break; 659 break;
@@ -721,7 +670,7 @@ static void acpi_lpss_bind(struct device *dev)
721{ 670{
722 struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev)); 671 struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
723 672
724 if (!pdata || !pdata->mmio_base || !pdata->dev_desc->ltr_required) 673 if (!pdata || !pdata->mmio_base || !(pdata->dev_desc->flags & LPSS_LTR))
725 return; 674 return;
726 675
727 if (pdata->mmio_size >= pdata->dev_desc->prv_offset + LPSS_LTR_SIZE) 676 if (pdata->mmio_size >= pdata->dev_desc->prv_offset + LPSS_LTR_SIZE)
diff --git a/drivers/acpi/acpi_pnp.c b/drivers/acpi/acpi_pnp.c
index 1f8b20496f32..b193f8425999 100644
--- a/drivers/acpi/acpi_pnp.c
+++ b/drivers/acpi/acpi_pnp.c
@@ -130,10 +130,6 @@ static const struct acpi_device_id acpi_pnp_device_ids[] = {
130 {"PNP0401"}, /* ECP Printer Port */ 130 {"PNP0401"}, /* ECP Printer Port */
131 /* apple-gmux */ 131 /* apple-gmux */
132 {"APP000B"}, 132 {"APP000B"},
133 /* fujitsu-laptop.c */
134 {"FUJ02bf"},
135 {"FUJ02B1"},
136 {"FUJ02E3"},
137 /* system */ 133 /* system */
138 {"PNP0c02"}, /* General ID for reserving resources */ 134 {"PNP0c02"}, /* General ID for reserving resources */
139 {"PNP0c01"}, /* memory controller */ 135 {"PNP0c01"}, /* memory controller */
diff --git a/drivers/acpi/acpica/evxfgpe.c b/drivers/acpi/acpica/evxfgpe.c
index 0cf159cc6e6d..56710a03c9b0 100644
--- a/drivers/acpi/acpica/evxfgpe.c
+++ b/drivers/acpi/acpica/evxfgpe.c
@@ -596,6 +596,38 @@ acpi_status acpi_enable_all_runtime_gpes(void)
596 596
597ACPI_EXPORT_SYMBOL(acpi_enable_all_runtime_gpes) 597ACPI_EXPORT_SYMBOL(acpi_enable_all_runtime_gpes)
598 598
599/******************************************************************************
600 *
601 * FUNCTION: acpi_enable_all_wakeup_gpes
602 *
603 * PARAMETERS: None
604 *
605 * RETURN: Status
606 *
607 * DESCRIPTION: Enable all "wakeup" GPEs and disable all of the other GPEs, in
608 * all GPE blocks.
609 *
610 ******************************************************************************/
611
612acpi_status acpi_enable_all_wakeup_gpes(void)
613{
614 acpi_status status;
615
616 ACPI_FUNCTION_TRACE(acpi_enable_all_wakeup_gpes);
617
618 status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
619 if (ACPI_FAILURE(status)) {
620 return_ACPI_STATUS(status);
621 }
622
623 status = acpi_hw_enable_all_wakeup_gpes();
624 (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
625
626 return_ACPI_STATUS(status);
627}
628
629ACPI_EXPORT_SYMBOL(acpi_enable_all_wakeup_gpes)
630
599/******************************************************************************* 631/*******************************************************************************
600 * 632 *
601 * FUNCTION: acpi_install_gpe_block 633 * FUNCTION: acpi_install_gpe_block
diff --git a/drivers/acpi/acpica/hwgpe.c b/drivers/acpi/acpica/hwgpe.c
index 2e6caabba07a..ea62d40fd161 100644
--- a/drivers/acpi/acpica/hwgpe.c
+++ b/drivers/acpi/acpica/hwgpe.c
@@ -396,11 +396,11 @@ acpi_hw_enable_wakeup_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
396 /* Examine each GPE Register within the block */ 396 /* Examine each GPE Register within the block */
397 397
398 for (i = 0; i < gpe_block->register_count; i++) { 398 for (i = 0; i < gpe_block->register_count; i++) {
399 if (!gpe_block->register_info[i].enable_for_wake) {
400 continue;
401 }
402 399
403 /* Enable all "wake" GPEs in this register */ 400 /*
401 * Enable all "wake" GPEs in this register and disable the
402 * remaining ones.
403 */
404 404
405 status = 405 status =
406 acpi_hw_write(gpe_block->register_info[i].enable_for_wake, 406 acpi_hw_write(gpe_block->register_info[i].enable_for_wake,
diff --git a/drivers/acpi/acpica/utresrc.c b/drivers/acpi/acpica/utresrc.c
index 14cb6c0c8be2..5cd017c7ac0e 100644
--- a/drivers/acpi/acpica/utresrc.c
+++ b/drivers/acpi/acpica/utresrc.c
@@ -87,7 +87,9 @@ const char *acpi_gbl_io_decode[] = {
87 87
88const char *acpi_gbl_ll_decode[] = { 88const char *acpi_gbl_ll_decode[] = {
89 "ActiveHigh", 89 "ActiveHigh",
90 "ActiveLow" 90 "ActiveLow",
91 "ActiveBoth",
92 "Reserved"
91}; 93};
92 94
93const char *acpi_gbl_max_decode[] = { 95const char *acpi_gbl_max_decode[] = {
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index 5fdfe65fe165..8ec8a89a20ab 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -695,7 +695,7 @@ static void acpi_battery_quirks(struct acpi_battery *battery)
695 if (battery->power_unit && dmi_name_in_vendors("LENOVO")) { 695 if (battery->power_unit && dmi_name_in_vendors("LENOVO")) {
696 const char *s; 696 const char *s;
697 s = dmi_get_system_info(DMI_PRODUCT_VERSION); 697 s = dmi_get_system_info(DMI_PRODUCT_VERSION);
698 if (s && !strnicmp(s, "ThinkPad", 8)) { 698 if (s && !strncasecmp(s, "ThinkPad", 8)) {
699 dmi_walk(find_battery, battery); 699 dmi_walk(find_battery, battery);
700 if (test_bit(ACPI_BATTERY_QUIRK_THINKPAD_MAH, 700 if (test_bit(ACPI_BATTERY_QUIRK_THINKPAD_MAH,
701 &battery->flags) && 701 &battery->flags) &&
diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c
index 36eb42e3b0bb..ed122e17636e 100644
--- a/drivers/acpi/blacklist.c
+++ b/drivers/acpi/blacklist.c
@@ -247,8 +247,8 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
247 }, 247 },
248 248
249 /* 249 /*
250 * These machines will power on immediately after shutdown when 250 * The wireless hotkey does not work on those machines when
251 * reporting the Windows 2012 OSI. 251 * returning true for _OSI("Windows 2012")
252 */ 252 */
253 { 253 {
254 .callback = dmi_disable_osi_win8, 254 .callback = dmi_disable_osi_win8,
@@ -258,6 +258,38 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
258 DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 7737"), 258 DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 7737"),
259 }, 259 },
260 }, 260 },
261 {
262 .callback = dmi_disable_osi_win8,
263 .ident = "Dell Inspiron 7537",
264 .matches = {
265 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
266 DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 7537"),
267 },
268 },
269 {
270 .callback = dmi_disable_osi_win8,
271 .ident = "Dell Inspiron 5437",
272 .matches = {
273 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
274 DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 5437"),
275 },
276 },
277 {
278 .callback = dmi_disable_osi_win8,
279 .ident = "Dell Inspiron 3437",
280 .matches = {
281 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
282 DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 3437"),
283 },
284 },
285 {
286 .callback = dmi_disable_osi_win8,
287 .ident = "Dell Vostro 3446",
288 .matches = {
289 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
290 DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 3446"),
291 },
292 },
261 293
262 /* 294 /*
263 * BIOS invocation of _OSI(Linux) is almost always a BIOS bug. 295 * BIOS invocation of _OSI(Linux) is almost always a BIOS bug.
diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
index 67075f800e34..bea6896be122 100644
--- a/drivers/acpi/device_pm.c
+++ b/drivers/acpi/device_pm.c
@@ -1041,6 +1041,40 @@ static struct dev_pm_domain acpi_general_pm_domain = {
1041}; 1041};
1042 1042
1043/** 1043/**
1044 * acpi_dev_pm_detach - Remove ACPI power management from the device.
1045 * @dev: Device to take care of.
1046 * @power_off: Whether or not to try to remove power from the device.
1047 *
1048 * Remove the device from the general ACPI PM domain and remove its wakeup
1049 * notifier. If @power_off is set, additionally remove power from the device if
1050 * possible.
1051 *
1052 * Callers must ensure proper synchronization of this function with power
1053 * management callbacks.
1054 */
1055static void acpi_dev_pm_detach(struct device *dev, bool power_off)
1056{
1057 struct acpi_device *adev = ACPI_COMPANION(dev);
1058
1059 if (adev && dev->pm_domain == &acpi_general_pm_domain) {
1060 dev->pm_domain = NULL;
1061 acpi_remove_pm_notifier(adev);
1062 if (power_off) {
1063 /*
1064 * If the device's PM QoS resume latency limit or flags
1065 * have been exposed to user space, they have to be
1066 * hidden at this point, so that they don't affect the
1067 * choice of the low-power state to put the device into.
1068 */
1069 dev_pm_qos_hide_latency_limit(dev);
1070 dev_pm_qos_hide_flags(dev);
1071 acpi_device_wakeup(adev, ACPI_STATE_S0, false);
1072 acpi_dev_pm_low_power(dev, adev, ACPI_STATE_S0);
1073 }
1074 }
1075}
1076
1077/**
1044 * acpi_dev_pm_attach - Prepare device for ACPI power management. 1078 * acpi_dev_pm_attach - Prepare device for ACPI power management.
1045 * @dev: Device to prepare. 1079 * @dev: Device to prepare.
1046 * @power_on: Whether or not to power on the device. 1080 * @power_on: Whether or not to power on the device.
@@ -1072,42 +1106,9 @@ int acpi_dev_pm_attach(struct device *dev, bool power_on)
1072 acpi_dev_pm_full_power(adev); 1106 acpi_dev_pm_full_power(adev);
1073 acpi_device_wakeup(adev, ACPI_STATE_S0, false); 1107 acpi_device_wakeup(adev, ACPI_STATE_S0, false);
1074 } 1108 }
1109
1110 dev->pm_domain->detach = acpi_dev_pm_detach;
1075 return 0; 1111 return 0;
1076} 1112}
1077EXPORT_SYMBOL_GPL(acpi_dev_pm_attach); 1113EXPORT_SYMBOL_GPL(acpi_dev_pm_attach);
1078
1079/**
1080 * acpi_dev_pm_detach - Remove ACPI power management from the device.
1081 * @dev: Device to take care of.
1082 * @power_off: Whether or not to try to remove power from the device.
1083 *
1084 * Remove the device from the general ACPI PM domain and remove its wakeup
1085 * notifier. If @power_off is set, additionally remove power from the device if
1086 * possible.
1087 *
1088 * Callers must ensure proper synchronization of this function with power
1089 * management callbacks.
1090 */
1091void acpi_dev_pm_detach(struct device *dev, bool power_off)
1092{
1093 struct acpi_device *adev = ACPI_COMPANION(dev);
1094
1095 if (adev && dev->pm_domain == &acpi_general_pm_domain) {
1096 dev->pm_domain = NULL;
1097 acpi_remove_pm_notifier(adev);
1098 if (power_off) {
1099 /*
1100 * If the device's PM QoS resume latency limit or flags
1101 * have been exposed to user space, they have to be
1102 * hidden at this point, so that they don't affect the
1103 * choice of the low-power state to put the device into.
1104 */
1105 dev_pm_qos_hide_latency_limit(dev);
1106 dev_pm_qos_hide_flags(dev);
1107 acpi_device_wakeup(adev, ACPI_STATE_S0, false);
1108 acpi_dev_pm_low_power(dev, adev, ACPI_STATE_S0);
1109 }
1110 }
1111}
1112EXPORT_SYMBOL_GPL(acpi_dev_pm_detach);
1113#endif /* CONFIG_PM */ 1114#endif /* CONFIG_PM */
diff --git a/drivers/acpi/fan.c b/drivers/acpi/fan.c
index 8acf53e62966..5328b1090e08 100644
--- a/drivers/acpi/fan.c
+++ b/drivers/acpi/fan.c
@@ -27,12 +27,10 @@
27#include <linux/module.h> 27#include <linux/module.h>
28#include <linux/init.h> 28#include <linux/init.h>
29#include <linux/types.h> 29#include <linux/types.h>
30#include <asm/uaccess.h> 30#include <linux/uaccess.h>
31#include <linux/thermal.h> 31#include <linux/thermal.h>
32#include <linux/acpi.h> 32#include <linux/acpi.h>
33 33
34#define PREFIX "ACPI: "
35
36#define ACPI_FAN_CLASS "fan" 34#define ACPI_FAN_CLASS "fan"
37#define ACPI_FAN_FILE_STATE "state" 35#define ACPI_FAN_FILE_STATE "state"
38 36
@@ -127,8 +125,9 @@ static const struct thermal_cooling_device_ops fan_cooling_ops = {
127}; 125};
128 126
129/* -------------------------------------------------------------------------- 127/* --------------------------------------------------------------------------
130 Driver Interface 128 * Driver Interface
131 -------------------------------------------------------------------------- */ 129 * --------------------------------------------------------------------------
130*/
132 131
133static int acpi_fan_add(struct acpi_device *device) 132static int acpi_fan_add(struct acpi_device *device)
134{ 133{
@@ -143,7 +142,7 @@ static int acpi_fan_add(struct acpi_device *device)
143 142
144 result = acpi_bus_update_power(device->handle, NULL); 143 result = acpi_bus_update_power(device->handle, NULL);
145 if (result) { 144 if (result) {
146 printk(KERN_ERR PREFIX "Setting initial power state\n"); 145 dev_err(&device->dev, "Setting initial power state\n");
147 goto end; 146 goto end;
148 } 147 }
149 148
@@ -168,10 +167,9 @@ static int acpi_fan_add(struct acpi_device *device)
168 &device->dev.kobj, 167 &device->dev.kobj,
169 "device"); 168 "device");
170 if (result) 169 if (result)
171 dev_err(&device->dev, "Failed to create sysfs link " 170 dev_err(&device->dev, "Failed to create sysfs link 'device'\n");
172 "'device'\n");
173 171
174 printk(KERN_INFO PREFIX "%s [%s] (%s)\n", 172 dev_info(&device->dev, "ACPI: %s [%s] (%s)\n",
175 acpi_device_name(device), acpi_device_bid(device), 173 acpi_device_name(device), acpi_device_bid(device),
176 !device->power.state ? "on" : "off"); 174 !device->power.state ? "on" : "off");
177 175
@@ -217,7 +215,7 @@ static int acpi_fan_resume(struct device *dev)
217 215
218 result = acpi_bus_update_power(to_acpi_device(dev)->handle, NULL); 216 result = acpi_bus_update_power(to_acpi_device(dev)->handle, NULL);
219 if (result) 217 if (result)
220 printk(KERN_ERR PREFIX "Error updating fan power state\n"); 218 dev_err(dev, "Error updating fan power state\n");
221 219
222 return result; 220 return result;
223} 221}
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index 3abe9b223ba7..9964f70be98d 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -152,6 +152,16 @@ static u32 acpi_osi_handler(acpi_string interface, u32 supported)
152 osi_linux.dmi ? " via DMI" : ""); 152 osi_linux.dmi ? " via DMI" : "");
153 } 153 }
154 154
155 if (!strcmp("Darwin", interface)) {
156 /*
157 * Apple firmware will behave poorly if it receives positive
158 * answers to "Darwin" and any other OS. Respond positively
159 * to Darwin and then disable all other vendor strings.
160 */
161 acpi_update_interfaces(ACPI_DISABLE_ALL_VENDOR_STRINGS);
162 supported = ACPI_UINT32_MAX;
163 }
164
155 return supported; 165 return supported;
156} 166}
157 167
@@ -825,7 +835,7 @@ acpi_os_install_interrupt_handler(u32 gsi, acpi_osd_handler handler,
825 835
826 acpi_irq_handler = handler; 836 acpi_irq_handler = handler;
827 acpi_irq_context = context; 837 acpi_irq_context = context;
828 if (request_irq(irq, acpi_irq, IRQF_SHARED | IRQF_NO_SUSPEND, "acpi", acpi_irq)) { 838 if (request_irq(irq, acpi_irq, IRQF_SHARED, "acpi", acpi_irq)) {
829 printk(KERN_ERR PREFIX "SCI (IRQ%d) allocation failed\n", irq); 839 printk(KERN_ERR PREFIX "SCI (IRQ%d) allocation failed\n", irq);
830 acpi_irq_handler = NULL; 840 acpi_irq_handler = NULL;
831 return AE_NOT_ACQUIRED; 841 return AE_NOT_ACQUIRED;
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index e6ae603ed1a1..cd4de7e038ea 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -35,6 +35,7 @@
35#include <linux/pci-aspm.h> 35#include <linux/pci-aspm.h>
36#include <linux/acpi.h> 36#include <linux/acpi.h>
37#include <linux/slab.h> 37#include <linux/slab.h>
38#include <linux/dmi.h>
38#include <acpi/apei.h> /* for acpi_hest_init() */ 39#include <acpi/apei.h> /* for acpi_hest_init() */
39 40
40#include "internal.h" 41#include "internal.h"
@@ -430,6 +431,19 @@ static void negotiate_os_control(struct acpi_pci_root *root, int *no_aspm,
430 acpi_handle handle = device->handle; 431 acpi_handle handle = device->handle;
431 432
432 /* 433 /*
434 * Apple always return failure on _OSC calls when _OSI("Darwin") has
435 * been called successfully. We know the feature set supported by the
436 * platform, so avoid calling _OSC at all
437 */
438
439 if (dmi_match(DMI_SYS_VENDOR, "Apple Inc.")) {
440 root->osc_control_set = ~OSC_PCI_EXPRESS_PME_CONTROL;
441 decode_osc_control(root, "OS assumes control of",
442 root->osc_control_set);
443 return;
444 }
445
446 /*
433 * All supported architectures that use ACPI have support for 447 * All supported architectures that use ACPI have support for
434 * PCI domains, so we indicate this in _OSC support capabilities. 448 * PCI domains, so we indicate this in _OSC support capabilities.
435 */ 449 */
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
index e32321ce9a5c..ef58f46c8442 100644
--- a/drivers/acpi/processor_core.c
+++ b/drivers/acpi/processor_core.c
@@ -16,7 +16,7 @@ static int map_lapic_id(struct acpi_subtable_header *entry,
16 u32 acpi_id, int *apic_id) 16 u32 acpi_id, int *apic_id)
17{ 17{
18 struct acpi_madt_local_apic *lapic = 18 struct acpi_madt_local_apic *lapic =
19 (struct acpi_madt_local_apic *)entry; 19 container_of(entry, struct acpi_madt_local_apic, header);
20 20
21 if (!(lapic->lapic_flags & ACPI_MADT_ENABLED)) 21 if (!(lapic->lapic_flags & ACPI_MADT_ENABLED))
22 return -ENODEV; 22 return -ENODEV;
@@ -32,7 +32,7 @@ static int map_x2apic_id(struct acpi_subtable_header *entry,
32 int device_declaration, u32 acpi_id, int *apic_id) 32 int device_declaration, u32 acpi_id, int *apic_id)
33{ 33{
34 struct acpi_madt_local_x2apic *apic = 34 struct acpi_madt_local_x2apic *apic =
35 (struct acpi_madt_local_x2apic *)entry; 35 container_of(entry, struct acpi_madt_local_x2apic, header);
36 36
37 if (!(apic->lapic_flags & ACPI_MADT_ENABLED)) 37 if (!(apic->lapic_flags & ACPI_MADT_ENABLED))
38 return -ENODEV; 38 return -ENODEV;
@@ -49,7 +49,7 @@ static int map_lsapic_id(struct acpi_subtable_header *entry,
49 int device_declaration, u32 acpi_id, int *apic_id) 49 int device_declaration, u32 acpi_id, int *apic_id)
50{ 50{
51 struct acpi_madt_local_sapic *lsapic = 51 struct acpi_madt_local_sapic *lsapic =
52 (struct acpi_madt_local_sapic *)entry; 52 container_of(entry, struct acpi_madt_local_sapic, header);
53 53
54 if (!(lsapic->lapic_flags & ACPI_MADT_ENABLED)) 54 if (!(lsapic->lapic_flags & ACPI_MADT_ENABLED))
55 return -ENODEV; 55 return -ENODEV;
diff --git a/drivers/acpi/sbs.c b/drivers/acpi/sbs.c
index 366ca40a6f70..a7a3edd28beb 100644
--- a/drivers/acpi/sbs.c
+++ b/drivers/acpi/sbs.c
@@ -35,6 +35,7 @@
35#include <linux/jiffies.h> 35#include <linux/jiffies.h>
36#include <linux/delay.h> 36#include <linux/delay.h>
37#include <linux/power_supply.h> 37#include <linux/power_supply.h>
38#include <linux/dmi.h>
38 39
39#include "sbshc.h" 40#include "sbshc.h"
40#include "battery.h" 41#include "battery.h"
@@ -61,6 +62,8 @@ static unsigned int cache_time = 1000;
61module_param(cache_time, uint, 0644); 62module_param(cache_time, uint, 0644);
62MODULE_PARM_DESC(cache_time, "cache time in milliseconds"); 63MODULE_PARM_DESC(cache_time, "cache time in milliseconds");
63 64
65static bool sbs_manager_broken;
66
64#define MAX_SBS_BAT 4 67#define MAX_SBS_BAT 4
65#define ACPI_SBS_BLOCK_MAX 32 68#define ACPI_SBS_BLOCK_MAX 32
66 69
@@ -109,6 +112,7 @@ struct acpi_sbs {
109 u8 batteries_supported:4; 112 u8 batteries_supported:4;
110 u8 manager_present:1; 113 u8 manager_present:1;
111 u8 charger_present:1; 114 u8 charger_present:1;
115 u8 charger_exists:1;
112}; 116};
113 117
114#define to_acpi_sbs(x) container_of(x, struct acpi_sbs, charger) 118#define to_acpi_sbs(x) container_of(x, struct acpi_sbs, charger)
@@ -429,9 +433,19 @@ static int acpi_ac_get_present(struct acpi_sbs *sbs)
429 433
430 result = acpi_smbus_read(sbs->hc, SMBUS_READ_WORD, ACPI_SBS_CHARGER, 434 result = acpi_smbus_read(sbs->hc, SMBUS_READ_WORD, ACPI_SBS_CHARGER,
431 0x13, (u8 *) & status); 435 0x13, (u8 *) & status);
432 if (!result) 436
433 sbs->charger_present = (status >> 15) & 0x1; 437 if (result)
434 return result; 438 return result;
439
440 /*
441 * The spec requires that bit 4 always be 1. If it's not set, assume
442 * that the implementation doesn't support an SBS charger
443 */
444 if (!((status >> 4) & 0x1))
445 return -ENODEV;
446
447 sbs->charger_present = (status >> 15) & 0x1;
448 return 0;
435} 449}
436 450
437static ssize_t acpi_battery_alarm_show(struct device *dev, 451static ssize_t acpi_battery_alarm_show(struct device *dev,
@@ -483,16 +497,21 @@ static int acpi_battery_read(struct acpi_battery *battery)
483 ACPI_SBS_MANAGER, 0x01, (u8 *)&state, 2); 497 ACPI_SBS_MANAGER, 0x01, (u8 *)&state, 2);
484 } else if (battery->id == 0) 498 } else if (battery->id == 0)
485 battery->present = 1; 499 battery->present = 1;
500
486 if (result || !battery->present) 501 if (result || !battery->present)
487 return result; 502 return result;
488 503
489 if (saved_present != battery->present) { 504 if (saved_present != battery->present) {
490 battery->update_time = 0; 505 battery->update_time = 0;
491 result = acpi_battery_get_info(battery); 506 result = acpi_battery_get_info(battery);
492 if (result) 507 if (result) {
508 battery->present = 0;
493 return result; 509 return result;
510 }
494 } 511 }
495 result = acpi_battery_get_state(battery); 512 result = acpi_battery_get_state(battery);
513 if (result)
514 battery->present = 0;
496 return result; 515 return result;
497} 516}
498 517
@@ -524,6 +543,7 @@ static int acpi_battery_add(struct acpi_sbs *sbs, int id)
524 result = power_supply_register(&sbs->device->dev, &battery->bat); 543 result = power_supply_register(&sbs->device->dev, &battery->bat);
525 if (result) 544 if (result)
526 goto end; 545 goto end;
546
527 result = device_create_file(battery->bat.dev, &alarm_attr); 547 result = device_create_file(battery->bat.dev, &alarm_attr);
528 if (result) 548 if (result)
529 goto end; 549 goto end;
@@ -554,6 +574,7 @@ static int acpi_charger_add(struct acpi_sbs *sbs)
554 if (result) 574 if (result)
555 goto end; 575 goto end;
556 576
577 sbs->charger_exists = 1;
557 sbs->charger.name = "sbs-charger"; 578 sbs->charger.name = "sbs-charger";
558 sbs->charger.type = POWER_SUPPLY_TYPE_MAINS; 579 sbs->charger.type = POWER_SUPPLY_TYPE_MAINS;
559 sbs->charger.properties = sbs_ac_props; 580 sbs->charger.properties = sbs_ac_props;
@@ -580,9 +601,12 @@ static void acpi_sbs_callback(void *context)
580 struct acpi_battery *bat; 601 struct acpi_battery *bat;
581 u8 saved_charger_state = sbs->charger_present; 602 u8 saved_charger_state = sbs->charger_present;
582 u8 saved_battery_state; 603 u8 saved_battery_state;
583 acpi_ac_get_present(sbs); 604
584 if (sbs->charger_present != saved_charger_state) 605 if (sbs->charger_exists) {
585 kobject_uevent(&sbs->charger.dev->kobj, KOBJ_CHANGE); 606 acpi_ac_get_present(sbs);
607 if (sbs->charger_present != saved_charger_state)
608 kobject_uevent(&sbs->charger.dev->kobj, KOBJ_CHANGE);
609 }
586 610
587 if (sbs->manager_present) { 611 if (sbs->manager_present) {
588 for (id = 0; id < MAX_SBS_BAT; ++id) { 612 for (id = 0; id < MAX_SBS_BAT; ++id) {
@@ -598,12 +622,31 @@ static void acpi_sbs_callback(void *context)
598 } 622 }
599} 623}
600 624
625static int disable_sbs_manager(const struct dmi_system_id *d)
626{
627 sbs_manager_broken = true;
628 return 0;
629}
630
631static struct dmi_system_id acpi_sbs_dmi_table[] = {
632 {
633 .callback = disable_sbs_manager,
634 .ident = "Apple",
635 .matches = {
636 DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc.")
637 },
638 },
639 { },
640};
641
601static int acpi_sbs_add(struct acpi_device *device) 642static int acpi_sbs_add(struct acpi_device *device)
602{ 643{
603 struct acpi_sbs *sbs; 644 struct acpi_sbs *sbs;
604 int result = 0; 645 int result = 0;
605 int id; 646 int id;
606 647
648 dmi_check_system(acpi_sbs_dmi_table);
649
607 sbs = kzalloc(sizeof(struct acpi_sbs), GFP_KERNEL); 650 sbs = kzalloc(sizeof(struct acpi_sbs), GFP_KERNEL);
608 if (!sbs) { 651 if (!sbs) {
609 result = -ENOMEM; 652 result = -ENOMEM;
@@ -619,17 +662,24 @@ static int acpi_sbs_add(struct acpi_device *device)
619 device->driver_data = sbs; 662 device->driver_data = sbs;
620 663
621 result = acpi_charger_add(sbs); 664 result = acpi_charger_add(sbs);
622 if (result) 665 if (result && result != -ENODEV)
623 goto end; 666 goto end;
624 667
625 result = acpi_manager_get_info(sbs); 668 result = 0;
626 if (!result) { 669
627 sbs->manager_present = 1; 670 if (!sbs_manager_broken) {
628 for (id = 0; id < MAX_SBS_BAT; ++id) 671 result = acpi_manager_get_info(sbs);
629 if ((sbs->batteries_supported & (1 << id))) 672 if (!result) {
630 acpi_battery_add(sbs, id); 673 sbs->manager_present = 0;
631 } else 674 for (id = 0; id < MAX_SBS_BAT; ++id)
675 if ((sbs->batteries_supported & (1 << id)))
676 acpi_battery_add(sbs, id);
677 }
678 }
679
680 if (!sbs->manager_present)
632 acpi_battery_add(sbs, 0); 681 acpi_battery_add(sbs, 0);
682
633 acpi_smbus_register_callback(sbs->hc, acpi_sbs_callback, sbs); 683 acpi_smbus_register_callback(sbs->hc, acpi_sbs_callback, sbs);
634 end: 684 end:
635 if (result) 685 if (result)
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index 54da4a3fe65e..05a31b573fc3 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -14,6 +14,7 @@
14#include <linux/irq.h> 14#include <linux/irq.h>
15#include <linux/dmi.h> 15#include <linux/dmi.h>
16#include <linux/device.h> 16#include <linux/device.h>
17#include <linux/interrupt.h>
17#include <linux/suspend.h> 18#include <linux/suspend.h>
18#include <linux/reboot.h> 19#include <linux/reboot.h>
19#include <linux/acpi.h> 20#include <linux/acpi.h>
@@ -626,6 +627,19 @@ static int acpi_freeze_begin(void)
626 return 0; 627 return 0;
627} 628}
628 629
630static int acpi_freeze_prepare(void)
631{
632 acpi_enable_all_wakeup_gpes();
633 enable_irq_wake(acpi_gbl_FADT.sci_interrupt);
634 return 0;
635}
636
637static void acpi_freeze_restore(void)
638{
639 disable_irq_wake(acpi_gbl_FADT.sci_interrupt);
640 acpi_enable_all_runtime_gpes();
641}
642
629static void acpi_freeze_end(void) 643static void acpi_freeze_end(void)
630{ 644{
631 acpi_scan_lock_release(); 645 acpi_scan_lock_release();
@@ -633,6 +647,8 @@ static void acpi_freeze_end(void)
633 647
634static const struct platform_freeze_ops acpi_freeze_ops = { 648static const struct platform_freeze_ops acpi_freeze_ops = {
635 .begin = acpi_freeze_begin, 649 .begin = acpi_freeze_begin,
650 .prepare = acpi_freeze_prepare,
651 .restore = acpi_freeze_restore,
636 .end = acpi_freeze_end, 652 .end = acpi_freeze_end,
637}; 653};
638 654
diff --git a/drivers/acpi/utils.c b/drivers/acpi/utils.c
index 07c8c5a5ee95..834f35c4bf8d 100644
--- a/drivers/acpi/utils.c
+++ b/drivers/acpi/utils.c
@@ -661,7 +661,6 @@ EXPORT_SYMBOL(acpi_evaluate_dsm);
661 * @uuid: UUID of requested functions, should be 16 bytes at least 661 * @uuid: UUID of requested functions, should be 16 bytes at least
662 * @rev: revision number of requested functions 662 * @rev: revision number of requested functions
663 * @funcs: bitmap of requested functions 663 * @funcs: bitmap of requested functions
664 * @exclude: excluding special value, used to support i915 and nouveau
665 * 664 *
666 * Evaluate device's _DSM method to check whether it supports requested 665 * Evaluate device's _DSM method to check whether it supports requested
667 * functions. Currently only support 64 functions at maximum, should be 666 * functions. Currently only support 64 functions at maximum, should be
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index 8e7e18567ae6..807a88a0f394 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -411,12 +411,6 @@ static int __init video_set_bqc_offset(const struct dmi_system_id *d)
411 return 0; 411 return 0;
412} 412}
413 413
414static int __init video_set_use_native_backlight(const struct dmi_system_id *d)
415{
416 use_native_backlight_dmi = true;
417 return 0;
418}
419
420static int __init video_disable_native_backlight(const struct dmi_system_id *d) 414static int __init video_disable_native_backlight(const struct dmi_system_id *d)
421{ 415{
422 use_native_backlight_dmi = false; 416 use_native_backlight_dmi = false;
@@ -467,265 +461,6 @@ static struct dmi_system_id video_dmi_table[] __initdata = {
467 DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 7720"), 461 DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 7720"),
468 }, 462 },
469 }, 463 },
470 {
471 .callback = video_set_use_native_backlight,
472 .ident = "ThinkPad X230",
473 .matches = {
474 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
475 DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X230"),
476 },
477 },
478 {
479 .callback = video_set_use_native_backlight,
480 .ident = "ThinkPad T430 and T430s",
481 .matches = {
482 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
483 DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T430"),
484 },
485 },
486 {
487 .callback = video_set_use_native_backlight,
488 .ident = "ThinkPad T430",
489 .matches = {
490 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
491 DMI_MATCH(DMI_PRODUCT_VERSION, "2349D15"),
492 },
493 },
494 {
495 .callback = video_set_use_native_backlight,
496 .ident = "ThinkPad T431s",
497 .matches = {
498 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
499 DMI_MATCH(DMI_PRODUCT_VERSION, "20AACTO1WW"),
500 },
501 },
502 {
503 .callback = video_set_use_native_backlight,
504 .ident = "ThinkPad Edge E530",
505 .matches = {
506 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
507 DMI_MATCH(DMI_PRODUCT_VERSION, "3259A2G"),
508 },
509 },
510 {
511 .callback = video_set_use_native_backlight,
512 .ident = "ThinkPad Edge E530",
513 .matches = {
514 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
515 DMI_MATCH(DMI_PRODUCT_VERSION, "3259CTO"),
516 },
517 },
518 {
519 .callback = video_set_use_native_backlight,
520 .ident = "ThinkPad Edge E530",
521 .matches = {
522 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
523 DMI_MATCH(DMI_PRODUCT_VERSION, "3259HJG"),
524 },
525 },
526 {
527 .callback = video_set_use_native_backlight,
528 .ident = "ThinkPad W530",
529 .matches = {
530 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
531 DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad W530"),
532 },
533 },
534 {
535 .callback = video_set_use_native_backlight,
536 .ident = "ThinkPad X1 Carbon",
537 .matches = {
538 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
539 DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X1 Carbon"),
540 },
541 },
542 {
543 .callback = video_set_use_native_backlight,
544 .ident = "Lenovo Yoga 13",
545 .matches = {
546 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
547 DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo IdeaPad Yoga 13"),
548 },
549 },
550 {
551 .callback = video_set_use_native_backlight,
552 .ident = "Lenovo Yoga 2 11",
553 .matches = {
554 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
555 DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo Yoga 2 11"),
556 },
557 },
558 {
559 .callback = video_set_use_native_backlight,
560 .ident = "Thinkpad Helix",
561 .matches = {
562 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
563 DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad Helix"),
564 },
565 },
566 {
567 .callback = video_set_use_native_backlight,
568 .ident = "Dell Inspiron 7520",
569 .matches = {
570 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
571 DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 7520"),
572 },
573 },
574 {
575 .callback = video_set_use_native_backlight,
576 .ident = "Acer Aspire 5733Z",
577 .matches = {
578 DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
579 DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5733Z"),
580 },
581 },
582 {
583 .callback = video_set_use_native_backlight,
584 .ident = "Acer Aspire 5742G",
585 .matches = {
586 DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
587 DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5742G"),
588 },
589 },
590 {
591 .callback = video_set_use_native_backlight,
592 .ident = "Acer Aspire V5-171",
593 .matches = {
594 DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
595 DMI_MATCH(DMI_PRODUCT_NAME, "V5-171"),
596 },
597 },
598 {
599 .callback = video_set_use_native_backlight,
600 .ident = "Acer Aspire V5-431",
601 .matches = {
602 DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
603 DMI_MATCH(DMI_PRODUCT_NAME, "Aspire V5-431"),
604 },
605 },
606 {
607 .callback = video_set_use_native_backlight,
608 .ident = "Acer Aspire V5-471G",
609 .matches = {
610 DMI_MATCH(DMI_BOARD_VENDOR, "Acer"),
611 DMI_MATCH(DMI_PRODUCT_NAME, "Aspire V5-471G"),
612 },
613 },
614 {
615 .callback = video_set_use_native_backlight,
616 .ident = "Acer TravelMate B113",
617 .matches = {
618 DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
619 DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate B113"),
620 },
621 },
622 {
623 .callback = video_set_use_native_backlight,
624 .ident = "Acer Aspire V5-572G",
625 .matches = {
626 DMI_MATCH(DMI_SYS_VENDOR, "Acer Aspire"),
627 DMI_MATCH(DMI_PRODUCT_VERSION, "V5-572G/Dazzle_CX"),
628 },
629 },
630 {
631 .callback = video_set_use_native_backlight,
632 .ident = "Acer Aspire V5-573G",
633 .matches = {
634 DMI_MATCH(DMI_SYS_VENDOR, "Acer Aspire"),
635 DMI_MATCH(DMI_PRODUCT_VERSION, "V5-573G/Dazzle_HW"),
636 },
637 },
638 {
639 .callback = video_set_use_native_backlight,
640 .ident = "ASUS Zenbook Prime UX31A",
641 .matches = {
642 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
643 DMI_MATCH(DMI_PRODUCT_NAME, "UX31A"),
644 },
645 },
646 {
647 .callback = video_set_use_native_backlight,
648 .ident = "HP ProBook 4340s",
649 .matches = {
650 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
651 DMI_MATCH(DMI_PRODUCT_VERSION, "HP ProBook 4340s"),
652 },
653 },
654 {
655 .callback = video_set_use_native_backlight,
656 .ident = "HP ProBook 4540s",
657 .matches = {
658 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
659 DMI_MATCH(DMI_PRODUCT_VERSION, "HP ProBook 4540s"),
660 },
661 },
662 {
663 .callback = video_set_use_native_backlight,
664 .ident = "HP ProBook 2013 models",
665 .matches = {
666 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
667 DMI_MATCH(DMI_PRODUCT_NAME, "HP ProBook "),
668 DMI_MATCH(DMI_PRODUCT_NAME, " G1"),
669 },
670 },
671 {
672 .callback = video_set_use_native_backlight,
673 .ident = "HP EliteBook 2013 models",
674 .matches = {
675 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
676 DMI_MATCH(DMI_PRODUCT_NAME, "HP EliteBook "),
677 DMI_MATCH(DMI_PRODUCT_NAME, " G1"),
678 },
679 },
680 {
681 .callback = video_set_use_native_backlight,
682 .ident = "HP EliteBook 2014 models",
683 .matches = {
684 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
685 DMI_MATCH(DMI_PRODUCT_NAME, "HP EliteBook "),
686 DMI_MATCH(DMI_PRODUCT_NAME, " G2"),
687 },
688 },
689 {
690 .callback = video_set_use_native_backlight,
691 .ident = "HP ZBook 14",
692 .matches = {
693 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
694 DMI_MATCH(DMI_PRODUCT_NAME, "HP ZBook 14"),
695 },
696 },
697 {
698 .callback = video_set_use_native_backlight,
699 .ident = "HP ZBook 15",
700 .matches = {
701 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
702 DMI_MATCH(DMI_PRODUCT_NAME, "HP ZBook 15"),
703 },
704 },
705 {
706 .callback = video_set_use_native_backlight,
707 .ident = "HP ZBook 17",
708 .matches = {
709 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
710 DMI_MATCH(DMI_PRODUCT_NAME, "HP ZBook 17"),
711 },
712 },
713 {
714 .callback = video_set_use_native_backlight,
715 .ident = "HP EliteBook 8470p",
716 .matches = {
717 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
718 DMI_MATCH(DMI_PRODUCT_NAME, "HP EliteBook 8470p"),
719 },
720 },
721 {
722 .callback = video_set_use_native_backlight,
723 .ident = "HP EliteBook 8780w",
724 .matches = {
725 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
726 DMI_MATCH(DMI_PRODUCT_NAME, "HP EliteBook 8780w"),
727 },
728 },
729 464
730 /* 465 /*
731 * These models have a working acpi_video backlight control, and using 466 * These models have a working acpi_video backlight control, and using
@@ -1419,6 +1154,23 @@ acpi_video_device_bind(struct acpi_video_bus *video,
1419 } 1154 }
1420} 1155}
1421 1156
1157static bool acpi_video_device_in_dod(struct acpi_video_device *device)
1158{
1159 struct acpi_video_bus *video = device->video;
1160 int i;
1161
1162 /* If we have a broken _DOD, no need to test */
1163 if (!video->attached_count)
1164 return true;
1165
1166 for (i = 0; i < video->attached_count; i++) {
1167 if (video->attached_array[i].bind_info == device)
1168 return true;
1169 }
1170
1171 return false;
1172}
1173
1422/* 1174/*
1423 * Arg: 1175 * Arg:
1424 * video : video bus device 1176 * video : video bus device
@@ -1858,6 +1610,15 @@ static void acpi_video_dev_register_backlight(struct acpi_video_device *device)
1858 static int count; 1610 static int count;
1859 char *name; 1611 char *name;
1860 1612
1613 /*
1614 * Do not create backlight device for video output
1615 * device that is not in the enumerated list.
1616 */
1617 if (!acpi_video_device_in_dod(device)) {
1618 dev_dbg(&device->dev->dev, "not in _DOD list, ignore\n");
1619 return;
1620 }
1621
1861 result = acpi_video_init_brightness(device); 1622 result = acpi_video_init_brightness(device);
1862 if (result) 1623 if (result)
1863 return; 1624 return;
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
index c42feb2bacd0..27c43499977a 100644
--- a/drivers/acpi/video_detect.c
+++ b/drivers/acpi/video_detect.c
@@ -174,6 +174,14 @@ static struct dmi_system_id video_detect_dmi_table[] = {
174 DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 5737"), 174 DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 5737"),
175 }, 175 },
176 }, 176 },
177 {
178 .callback = video_detect_force_vendor,
179 .ident = "Lenovo IdeaPad Z570",
180 .matches = {
181 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
182 DMI_MATCH(DMI_PRODUCT_VERSION, "Ideapad Z570"),
183 },
184 },
177 { }, 185 { },
178}; 186};
179 187
diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c
index 3cf61a127ee5..47bbdc1b5be3 100644
--- a/drivers/amba/bus.c
+++ b/drivers/amba/bus.c
@@ -15,6 +15,7 @@
15#include <linux/io.h> 15#include <linux/io.h>
16#include <linux/pm.h> 16#include <linux/pm.h>
17#include <linux/pm_runtime.h> 17#include <linux/pm_runtime.h>
18#include <linux/pm_domain.h>
18#include <linux/amba/bus.h> 19#include <linux/amba/bus.h>
19#include <linux/sizes.h> 20#include <linux/sizes.h>
20 21
@@ -182,9 +183,15 @@ static int amba_probe(struct device *dev)
182 int ret; 183 int ret;
183 184
184 do { 185 do {
186 ret = dev_pm_domain_attach(dev, true);
187 if (ret == -EPROBE_DEFER)
188 break;
189
185 ret = amba_get_enable_pclk(pcdev); 190 ret = amba_get_enable_pclk(pcdev);
186 if (ret) 191 if (ret) {
192 dev_pm_domain_detach(dev, true);
187 break; 193 break;
194 }
188 195
189 pm_runtime_get_noresume(dev); 196 pm_runtime_get_noresume(dev);
190 pm_runtime_set_active(dev); 197 pm_runtime_set_active(dev);
@@ -199,6 +206,7 @@ static int amba_probe(struct device *dev)
199 pm_runtime_put_noidle(dev); 206 pm_runtime_put_noidle(dev);
200 207
201 amba_put_disable_pclk(pcdev); 208 amba_put_disable_pclk(pcdev);
209 dev_pm_domain_detach(dev, true);
202 } while (0); 210 } while (0);
203 211
204 return ret; 212 return ret;
@@ -220,6 +228,7 @@ static int amba_remove(struct device *dev)
220 pm_runtime_put_noidle(dev); 228 pm_runtime_put_noidle(dev);
221 229
222 amba_put_disable_pclk(pcdev); 230 amba_put_disable_pclk(pcdev);
231 dev_pm_domain_detach(dev, true);
223 232
224 return ret; 233 return ret;
225} 234}
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index ab4f4ce02722..b2afc29403f9 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -21,6 +21,7 @@
21#include <linux/err.h> 21#include <linux/err.h>
22#include <linux/slab.h> 22#include <linux/slab.h>
23#include <linux/pm_runtime.h> 23#include <linux/pm_runtime.h>
24#include <linux/pm_domain.h>
24#include <linux/idr.h> 25#include <linux/idr.h>
25#include <linux/acpi.h> 26#include <linux/acpi.h>
26#include <linux/clk/clk-conf.h> 27#include <linux/clk/clk-conf.h>
@@ -506,11 +507,12 @@ static int platform_drv_probe(struct device *_dev)
506 if (ret < 0) 507 if (ret < 0)
507 return ret; 508 return ret;
508 509
509 acpi_dev_pm_attach(_dev, true); 510 ret = dev_pm_domain_attach(_dev, true);
510 511 if (ret != -EPROBE_DEFER) {
511 ret = drv->probe(dev); 512 ret = drv->probe(dev);
512 if (ret) 513 if (ret)
513 acpi_dev_pm_detach(_dev, true); 514 dev_pm_domain_detach(_dev, true);
515 }
514 516
515 if (drv->prevent_deferred_probe && ret == -EPROBE_DEFER) { 517 if (drv->prevent_deferred_probe && ret == -EPROBE_DEFER) {
516 dev_warn(_dev, "probe deferral not supported\n"); 518 dev_warn(_dev, "probe deferral not supported\n");
@@ -532,7 +534,7 @@ static int platform_drv_remove(struct device *_dev)
532 int ret; 534 int ret;
533 535
534 ret = drv->remove(dev); 536 ret = drv->remove(dev);
535 acpi_dev_pm_detach(_dev, true); 537 dev_pm_domain_detach(_dev, true);
536 538
537 return ret; 539 return ret;
538} 540}
@@ -543,7 +545,7 @@ static void platform_drv_shutdown(struct device *_dev)
543 struct platform_device *dev = to_platform_device(_dev); 545 struct platform_device *dev = to_platform_device(_dev);
544 546
545 drv->shutdown(dev); 547 drv->shutdown(dev);
546 acpi_dev_pm_detach(_dev, true); 548 dev_pm_domain_detach(_dev, true);
547} 549}
548 550
549/** 551/**
diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c
index b99e6c06ee67..78369305e069 100644
--- a/drivers/base/power/clock_ops.c
+++ b/drivers/base/power/clock_ops.c
@@ -368,8 +368,13 @@ int pm_clk_suspend(struct device *dev)
368 368
369 spin_lock_irqsave(&psd->lock, flags); 369 spin_lock_irqsave(&psd->lock, flags);
370 370
371 list_for_each_entry_reverse(ce, &psd->clock_list, node) 371 list_for_each_entry_reverse(ce, &psd->clock_list, node) {
372 clk_disable(ce->clk); 372 if (ce->status < PCE_STATUS_ERROR) {
373 if (ce->status == PCE_STATUS_ENABLED)
374 clk_disable(ce->clk);
375 ce->status = PCE_STATUS_ACQUIRED;
376 }
377 }
373 378
374 spin_unlock_irqrestore(&psd->lock, flags); 379 spin_unlock_irqrestore(&psd->lock, flags);
375 380
@@ -385,6 +390,7 @@ int pm_clk_resume(struct device *dev)
385 struct pm_subsys_data *psd = dev_to_psd(dev); 390 struct pm_subsys_data *psd = dev_to_psd(dev);
386 struct pm_clock_entry *ce; 391 struct pm_clock_entry *ce;
387 unsigned long flags; 392 unsigned long flags;
393 int ret;
388 394
389 dev_dbg(dev, "%s()\n", __func__); 395 dev_dbg(dev, "%s()\n", __func__);
390 396
@@ -394,8 +400,13 @@ int pm_clk_resume(struct device *dev)
394 400
395 spin_lock_irqsave(&psd->lock, flags); 401 spin_lock_irqsave(&psd->lock, flags);
396 402
397 list_for_each_entry(ce, &psd->clock_list, node) 403 list_for_each_entry(ce, &psd->clock_list, node) {
398 __pm_clk_enable(dev, ce->clk); 404 if (ce->status < PCE_STATUS_ERROR) {
405 ret = __pm_clk_enable(dev, ce->clk);
406 if (!ret)
407 ce->status = PCE_STATUS_ENABLED;
408 }
409 }
399 410
400 spin_unlock_irqrestore(&psd->lock, flags); 411 spin_unlock_irqrestore(&psd->lock, flags);
401 412
diff --git a/drivers/base/power/common.c b/drivers/base/power/common.c
index df2e5eeaeb05..b0f138806bbc 100644
--- a/drivers/base/power/common.c
+++ b/drivers/base/power/common.c
@@ -11,6 +11,8 @@
11#include <linux/export.h> 11#include <linux/export.h>
12#include <linux/slab.h> 12#include <linux/slab.h>
13#include <linux/pm_clock.h> 13#include <linux/pm_clock.h>
14#include <linux/acpi.h>
15#include <linux/pm_domain.h>
14 16
15/** 17/**
16 * dev_pm_get_subsys_data - Create or refcount power.subsys_data for device. 18 * dev_pm_get_subsys_data - Create or refcount power.subsys_data for device.
@@ -82,3 +84,53 @@ int dev_pm_put_subsys_data(struct device *dev)
82 return ret; 84 return ret;
83} 85}
84EXPORT_SYMBOL_GPL(dev_pm_put_subsys_data); 86EXPORT_SYMBOL_GPL(dev_pm_put_subsys_data);
87
88/**
89 * dev_pm_domain_attach - Attach a device to its PM domain.
90 * @dev: Device to attach.
91 * @power_on: Used to indicate whether we should power on the device.
92 *
93 * The @dev may only be attached to a single PM domain. By iterating through
94 * the available alternatives we try to find a valid PM domain for the device.
95 * As attachment succeeds, the ->detach() callback in the struct dev_pm_domain
96 * should be assigned by the corresponding attach function.
97 *
98 * This function should typically be invoked from subsystem level code during
99 * the probe phase. Especially for those that holds devices which requires
100 * power management through PM domains.
101 *
102 * Callers must ensure proper synchronization of this function with power
103 * management callbacks.
104 *
105 * Returns 0 on successfully attached PM domain or negative error code.
106 */
107int dev_pm_domain_attach(struct device *dev, bool power_on)
108{
109 int ret;
110
111 ret = acpi_dev_pm_attach(dev, power_on);
112 if (ret)
113 ret = genpd_dev_pm_attach(dev);
114
115 return ret;
116}
117EXPORT_SYMBOL_GPL(dev_pm_domain_attach);
118
119/**
120 * dev_pm_domain_detach - Detach a device from its PM domain.
121 * @dev: Device to attach.
122 * @power_off: Used to indicate whether we should power off the device.
123 *
124 * This functions will reverse the actions from dev_pm_domain_attach() and thus
125 * try to detach the @dev from its PM domain. Typically it should be invoked
126 * from subsystem level code during the remove phase.
127 *
128 * Callers must ensure proper synchronization of this function with power
129 * management callbacks.
130 */
131void dev_pm_domain_detach(struct device *dev, bool power_off)
132{
133 if (dev->pm_domain && dev->pm_domain->detach)
134 dev->pm_domain->detach(dev, power_off);
135}
136EXPORT_SYMBOL_GPL(dev_pm_domain_detach);
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index eee55c1e5fde..40bc2f4072cc 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -8,6 +8,7 @@
8 8
9#include <linux/kernel.h> 9#include <linux/kernel.h>
10#include <linux/io.h> 10#include <linux/io.h>
11#include <linux/platform_device.h>
11#include <linux/pm_runtime.h> 12#include <linux/pm_runtime.h>
12#include <linux/pm_domain.h> 13#include <linux/pm_domain.h>
13#include <linux/pm_qos.h> 14#include <linux/pm_qos.h>
@@ -25,10 +26,6 @@
25 __routine = genpd->dev_ops.callback; \ 26 __routine = genpd->dev_ops.callback; \
26 if (__routine) { \ 27 if (__routine) { \
27 __ret = __routine(dev); \ 28 __ret = __routine(dev); \
28 } else { \
29 __routine = dev_gpd_data(dev)->ops.callback; \
30 if (__routine) \
31 __ret = __routine(dev); \
32 } \ 29 } \
33 __ret; \ 30 __ret; \
34}) 31})
@@ -70,8 +67,6 @@ static struct generic_pm_domain *pm_genpd_lookup_name(const char *domain_name)
70 return genpd; 67 return genpd;
71} 68}
72 69
73#ifdef CONFIG_PM
74
75struct generic_pm_domain *dev_to_genpd(struct device *dev) 70struct generic_pm_domain *dev_to_genpd(struct device *dev)
76{ 71{
77 if (IS_ERR_OR_NULL(dev->pm_domain)) 72 if (IS_ERR_OR_NULL(dev->pm_domain))
@@ -147,13 +142,13 @@ static void genpd_recalc_cpu_exit_latency(struct generic_pm_domain *genpd)
147{ 142{
148 s64 usecs64; 143 s64 usecs64;
149 144
150 if (!genpd->cpu_data) 145 if (!genpd->cpuidle_data)
151 return; 146 return;
152 147
153 usecs64 = genpd->power_on_latency_ns; 148 usecs64 = genpd->power_on_latency_ns;
154 do_div(usecs64, NSEC_PER_USEC); 149 do_div(usecs64, NSEC_PER_USEC);
155 usecs64 += genpd->cpu_data->saved_exit_latency; 150 usecs64 += genpd->cpuidle_data->saved_exit_latency;
156 genpd->cpu_data->idle_state->exit_latency = usecs64; 151 genpd->cpuidle_data->idle_state->exit_latency = usecs64;
157} 152}
158 153
159/** 154/**
@@ -193,9 +188,9 @@ static int __pm_genpd_poweron(struct generic_pm_domain *genpd)
193 return 0; 188 return 0;
194 } 189 }
195 190
196 if (genpd->cpu_data) { 191 if (genpd->cpuidle_data) {
197 cpuidle_pause_and_lock(); 192 cpuidle_pause_and_lock();
198 genpd->cpu_data->idle_state->disabled = true; 193 genpd->cpuidle_data->idle_state->disabled = true;
199 cpuidle_resume_and_unlock(); 194 cpuidle_resume_and_unlock();
200 goto out; 195 goto out;
201 } 196 }
@@ -285,8 +280,6 @@ int pm_genpd_name_poweron(const char *domain_name)
285 return genpd ? pm_genpd_poweron(genpd) : -EINVAL; 280 return genpd ? pm_genpd_poweron(genpd) : -EINVAL;
286} 281}
287 282
288#endif /* CONFIG_PM */
289
290#ifdef CONFIG_PM_RUNTIME 283#ifdef CONFIG_PM_RUNTIME
291 284
292static int genpd_start_dev_no_timing(struct generic_pm_domain *genpd, 285static int genpd_start_dev_no_timing(struct generic_pm_domain *genpd,
@@ -430,7 +423,7 @@ static bool genpd_abort_poweroff(struct generic_pm_domain *genpd)
430 * Queue up the execution of pm_genpd_poweroff() unless it's already been done 423 * Queue up the execution of pm_genpd_poweroff() unless it's already been done
431 * before. 424 * before.
432 */ 425 */
433void genpd_queue_power_off_work(struct generic_pm_domain *genpd) 426static void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
434{ 427{
435 queue_work(pm_wq, &genpd->power_off_work); 428 queue_work(pm_wq, &genpd->power_off_work);
436} 429}
@@ -520,17 +513,17 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
520 } 513 }
521 } 514 }
522 515
523 if (genpd->cpu_data) { 516 if (genpd->cpuidle_data) {
524 /* 517 /*
525 * If cpu_data is set, cpuidle should turn the domain off when 518 * If cpuidle_data is set, cpuidle should turn the domain off
526 * the CPU in it is idle. In that case we don't decrement the 519 * when the CPU in it is idle. In that case we don't decrement
527 * subdomain counts of the master domains, so that power is not 520 * the subdomain counts of the master domains, so that power is
528 * removed from the current domain prematurely as a result of 521 * not removed from the current domain prematurely as a result
529 * cutting off the masters' power. 522 * of cutting off the masters' power.
530 */ 523 */
531 genpd->status = GPD_STATE_POWER_OFF; 524 genpd->status = GPD_STATE_POWER_OFF;
532 cpuidle_pause_and_lock(); 525 cpuidle_pause_and_lock();
533 genpd->cpu_data->idle_state->disabled = false; 526 genpd->cpuidle_data->idle_state->disabled = false;
534 cpuidle_resume_and_unlock(); 527 cpuidle_resume_and_unlock();
535 goto out; 528 goto out;
536 } 529 }
@@ -619,8 +612,6 @@ static int pm_genpd_runtime_suspend(struct device *dev)
619 if (IS_ERR(genpd)) 612 if (IS_ERR(genpd))
620 return -EINVAL; 613 return -EINVAL;
621 614
622 might_sleep_if(!genpd->dev_irq_safe);
623
624 stop_ok = genpd->gov ? genpd->gov->stop_ok : NULL; 615 stop_ok = genpd->gov ? genpd->gov->stop_ok : NULL;
625 if (stop_ok && !stop_ok(dev)) 616 if (stop_ok && !stop_ok(dev))
626 return -EBUSY; 617 return -EBUSY;
@@ -665,8 +656,6 @@ static int pm_genpd_runtime_resume(struct device *dev)
665 if (IS_ERR(genpd)) 656 if (IS_ERR(genpd))
666 return -EINVAL; 657 return -EINVAL;
667 658
668 might_sleep_if(!genpd->dev_irq_safe);
669
670 /* If power.irq_safe, the PM domain is never powered off. */ 659 /* If power.irq_safe, the PM domain is never powered off. */
671 if (dev->power.irq_safe) 660 if (dev->power.irq_safe)
672 return genpd_start_dev_no_timing(genpd, dev); 661 return genpd_start_dev_no_timing(genpd, dev);
@@ -733,6 +722,13 @@ void pm_genpd_poweroff_unused(void)
733 mutex_unlock(&gpd_list_lock); 722 mutex_unlock(&gpd_list_lock);
734} 723}
735 724
725static int __init genpd_poweroff_unused(void)
726{
727 pm_genpd_poweroff_unused();
728 return 0;
729}
730late_initcall(genpd_poweroff_unused);
731
736#else 732#else
737 733
738static inline int genpd_dev_pm_qos_notifier(struct notifier_block *nb, 734static inline int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
@@ -741,6 +737,9 @@ static inline int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
741 return NOTIFY_DONE; 737 return NOTIFY_DONE;
742} 738}
743 739
740static inline void
741genpd_queue_power_off_work(struct generic_pm_domain *genpd) {}
742
744static inline void genpd_power_off_work_fn(struct work_struct *work) {} 743static inline void genpd_power_off_work_fn(struct work_struct *work) {}
745 744
746#define pm_genpd_runtime_suspend NULL 745#define pm_genpd_runtime_suspend NULL
@@ -774,46 +773,6 @@ static bool genpd_dev_active_wakeup(struct generic_pm_domain *genpd,
774 return GENPD_DEV_CALLBACK(genpd, bool, active_wakeup, dev); 773 return GENPD_DEV_CALLBACK(genpd, bool, active_wakeup, dev);
775} 774}
776 775
777static int genpd_suspend_dev(struct generic_pm_domain *genpd, struct device *dev)
778{
779 return GENPD_DEV_CALLBACK(genpd, int, suspend, dev);
780}
781
782static int genpd_suspend_late(struct generic_pm_domain *genpd, struct device *dev)
783{
784 return GENPD_DEV_CALLBACK(genpd, int, suspend_late, dev);
785}
786
787static int genpd_resume_early(struct generic_pm_domain *genpd, struct device *dev)
788{
789 return GENPD_DEV_CALLBACK(genpd, int, resume_early, dev);
790}
791
792static int genpd_resume_dev(struct generic_pm_domain *genpd, struct device *dev)
793{
794 return GENPD_DEV_CALLBACK(genpd, int, resume, dev);
795}
796
797static int genpd_freeze_dev(struct generic_pm_domain *genpd, struct device *dev)
798{
799 return GENPD_DEV_CALLBACK(genpd, int, freeze, dev);
800}
801
802static int genpd_freeze_late(struct generic_pm_domain *genpd, struct device *dev)
803{
804 return GENPD_DEV_CALLBACK(genpd, int, freeze_late, dev);
805}
806
807static int genpd_thaw_early(struct generic_pm_domain *genpd, struct device *dev)
808{
809 return GENPD_DEV_CALLBACK(genpd, int, thaw_early, dev);
810}
811
812static int genpd_thaw_dev(struct generic_pm_domain *genpd, struct device *dev)
813{
814 return GENPD_DEV_CALLBACK(genpd, int, thaw, dev);
815}
816
817/** 776/**
818 * pm_genpd_sync_poweroff - Synchronously power off a PM domain and its masters. 777 * pm_genpd_sync_poweroff - Synchronously power off a PM domain and its masters.
819 * @genpd: PM domain to power off, if possible. 778 * @genpd: PM domain to power off, if possible.
@@ -995,7 +954,7 @@ static int pm_genpd_suspend(struct device *dev)
995 if (IS_ERR(genpd)) 954 if (IS_ERR(genpd))
996 return -EINVAL; 955 return -EINVAL;
997 956
998 return genpd->suspend_power_off ? 0 : genpd_suspend_dev(genpd, dev); 957 return genpd->suspend_power_off ? 0 : pm_generic_suspend(dev);
999} 958}
1000 959
1001/** 960/**
@@ -1016,7 +975,7 @@ static int pm_genpd_suspend_late(struct device *dev)
1016 if (IS_ERR(genpd)) 975 if (IS_ERR(genpd))
1017 return -EINVAL; 976 return -EINVAL;
1018 977
1019 return genpd->suspend_power_off ? 0 : genpd_suspend_late(genpd, dev); 978 return genpd->suspend_power_off ? 0 : pm_generic_suspend_late(dev);
1020} 979}
1021 980
1022/** 981/**
@@ -1103,7 +1062,7 @@ static int pm_genpd_resume_early(struct device *dev)
1103 if (IS_ERR(genpd)) 1062 if (IS_ERR(genpd))
1104 return -EINVAL; 1063 return -EINVAL;
1105 1064
1106 return genpd->suspend_power_off ? 0 : genpd_resume_early(genpd, dev); 1065 return genpd->suspend_power_off ? 0 : pm_generic_resume_early(dev);
1107} 1066}
1108 1067
1109/** 1068/**
@@ -1124,7 +1083,7 @@ static int pm_genpd_resume(struct device *dev)
1124 if (IS_ERR(genpd)) 1083 if (IS_ERR(genpd))
1125 return -EINVAL; 1084 return -EINVAL;
1126 1085
1127 return genpd->suspend_power_off ? 0 : genpd_resume_dev(genpd, dev); 1086 return genpd->suspend_power_off ? 0 : pm_generic_resume(dev);
1128} 1087}
1129 1088
1130/** 1089/**
@@ -1145,7 +1104,7 @@ static int pm_genpd_freeze(struct device *dev)
1145 if (IS_ERR(genpd)) 1104 if (IS_ERR(genpd))
1146 return -EINVAL; 1105 return -EINVAL;
1147 1106
1148 return genpd->suspend_power_off ? 0 : genpd_freeze_dev(genpd, dev); 1107 return genpd->suspend_power_off ? 0 : pm_generic_freeze(dev);
1149} 1108}
1150 1109
1151/** 1110/**
@@ -1167,7 +1126,7 @@ static int pm_genpd_freeze_late(struct device *dev)
1167 if (IS_ERR(genpd)) 1126 if (IS_ERR(genpd))
1168 return -EINVAL; 1127 return -EINVAL;
1169 1128
1170 return genpd->suspend_power_off ? 0 : genpd_freeze_late(genpd, dev); 1129 return genpd->suspend_power_off ? 0 : pm_generic_freeze_late(dev);
1171} 1130}
1172 1131
1173/** 1132/**
@@ -1231,7 +1190,7 @@ static int pm_genpd_thaw_early(struct device *dev)
1231 if (IS_ERR(genpd)) 1190 if (IS_ERR(genpd))
1232 return -EINVAL; 1191 return -EINVAL;
1233 1192
1234 return genpd->suspend_power_off ? 0 : genpd_thaw_early(genpd, dev); 1193 return genpd->suspend_power_off ? 0 : pm_generic_thaw_early(dev);
1235} 1194}
1236 1195
1237/** 1196/**
@@ -1252,7 +1211,7 @@ static int pm_genpd_thaw(struct device *dev)
1252 if (IS_ERR(genpd)) 1211 if (IS_ERR(genpd))
1253 return -EINVAL; 1212 return -EINVAL;
1254 1213
1255 return genpd->suspend_power_off ? 0 : genpd_thaw_dev(genpd, dev); 1214 return genpd->suspend_power_off ? 0 : pm_generic_thaw(dev);
1256} 1215}
1257 1216
1258/** 1217/**
@@ -1344,13 +1303,13 @@ static void pm_genpd_complete(struct device *dev)
1344} 1303}
1345 1304
1346/** 1305/**
1347 * pm_genpd_syscore_switch - Switch power during system core suspend or resume. 1306 * genpd_syscore_switch - Switch power during system core suspend or resume.
1348 * @dev: Device that normally is marked as "always on" to switch power for. 1307 * @dev: Device that normally is marked as "always on" to switch power for.
1349 * 1308 *
1350 * This routine may only be called during the system core (syscore) suspend or 1309 * This routine may only be called during the system core (syscore) suspend or
1351 * resume phase for devices whose "always on" flags are set. 1310 * resume phase for devices whose "always on" flags are set.
1352 */ 1311 */
1353void pm_genpd_syscore_switch(struct device *dev, bool suspend) 1312static void genpd_syscore_switch(struct device *dev, bool suspend)
1354{ 1313{
1355 struct generic_pm_domain *genpd; 1314 struct generic_pm_domain *genpd;
1356 1315
@@ -1366,7 +1325,18 @@ void pm_genpd_syscore_switch(struct device *dev, bool suspend)
1366 genpd->suspended_count--; 1325 genpd->suspended_count--;
1367 } 1326 }
1368} 1327}
1369EXPORT_SYMBOL_GPL(pm_genpd_syscore_switch); 1328
1329void pm_genpd_syscore_poweroff(struct device *dev)
1330{
1331 genpd_syscore_switch(dev, true);
1332}
1333EXPORT_SYMBOL_GPL(pm_genpd_syscore_poweroff);
1334
1335void pm_genpd_syscore_poweron(struct device *dev)
1336{
1337 genpd_syscore_switch(dev, false);
1338}
1339EXPORT_SYMBOL_GPL(pm_genpd_syscore_poweron);
1370 1340
1371#else 1341#else
1372 1342
@@ -1466,6 +1436,9 @@ int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
1466 1436
1467 spin_unlock_irq(&dev->power.lock); 1437 spin_unlock_irq(&dev->power.lock);
1468 1438
1439 if (genpd->attach_dev)
1440 genpd->attach_dev(dev);
1441
1469 mutex_lock(&gpd_data->lock); 1442 mutex_lock(&gpd_data->lock);
1470 gpd_data->base.dev = dev; 1443 gpd_data->base.dev = dev;
1471 list_add_tail(&gpd_data->base.list_node, &genpd->dev_list); 1444 list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
@@ -1484,39 +1457,6 @@ int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
1484} 1457}
1485 1458
1486/** 1459/**
1487 * __pm_genpd_of_add_device - Add a device to an I/O PM domain.
1488 * @genpd_node: Device tree node pointer representing a PM domain to which the
1489 * the device is added to.
1490 * @dev: Device to be added.
1491 * @td: Set of PM QoS timing parameters to attach to the device.
1492 */
1493int __pm_genpd_of_add_device(struct device_node *genpd_node, struct device *dev,
1494 struct gpd_timing_data *td)
1495{
1496 struct generic_pm_domain *genpd = NULL, *gpd;
1497
1498 dev_dbg(dev, "%s()\n", __func__);
1499
1500 if (IS_ERR_OR_NULL(genpd_node) || IS_ERR_OR_NULL(dev))
1501 return -EINVAL;
1502
1503 mutex_lock(&gpd_list_lock);
1504 list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
1505 if (gpd->of_node == genpd_node) {
1506 genpd = gpd;
1507 break;
1508 }
1509 }
1510 mutex_unlock(&gpd_list_lock);
1511
1512 if (!genpd)
1513 return -EINVAL;
1514
1515 return __pm_genpd_add_device(genpd, dev, td);
1516}
1517
1518
1519/**
1520 * __pm_genpd_name_add_device - Find I/O PM domain and add a device to it. 1460 * __pm_genpd_name_add_device - Find I/O PM domain and add a device to it.
1521 * @domain_name: Name of the PM domain to add the device to. 1461 * @domain_name: Name of the PM domain to add the device to.
1522 * @dev: Device to be added. 1462 * @dev: Device to be added.
@@ -1558,6 +1498,9 @@ int pm_genpd_remove_device(struct generic_pm_domain *genpd,
1558 genpd->device_count--; 1498 genpd->device_count--;
1559 genpd->max_off_time_changed = true; 1499 genpd->max_off_time_changed = true;
1560 1500
1501 if (genpd->detach_dev)
1502 genpd->detach_dev(dev);
1503
1561 spin_lock_irq(&dev->power.lock); 1504 spin_lock_irq(&dev->power.lock);
1562 1505
1563 dev->pm_domain = NULL; 1506 dev->pm_domain = NULL;
@@ -1744,112 +1687,6 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
1744} 1687}
1745 1688
1746/** 1689/**
1747 * pm_genpd_add_callbacks - Add PM domain callbacks to a given device.
1748 * @dev: Device to add the callbacks to.
1749 * @ops: Set of callbacks to add.
1750 * @td: Timing data to add to the device along with the callbacks (optional).
1751 *
1752 * Every call to this routine should be balanced with a call to
1753 * __pm_genpd_remove_callbacks() and they must not be nested.
1754 */
1755int pm_genpd_add_callbacks(struct device *dev, struct gpd_dev_ops *ops,
1756 struct gpd_timing_data *td)
1757{
1758 struct generic_pm_domain_data *gpd_data_new, *gpd_data = NULL;
1759 int ret = 0;
1760
1761 if (!(dev && ops))
1762 return -EINVAL;
1763
1764 gpd_data_new = __pm_genpd_alloc_dev_data(dev);
1765 if (!gpd_data_new)
1766 return -ENOMEM;
1767
1768 pm_runtime_disable(dev);
1769 device_pm_lock();
1770
1771 ret = dev_pm_get_subsys_data(dev);
1772 if (ret)
1773 goto out;
1774
1775 spin_lock_irq(&dev->power.lock);
1776
1777 if (dev->power.subsys_data->domain_data) {
1778 gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
1779 } else {
1780 gpd_data = gpd_data_new;
1781 dev->power.subsys_data->domain_data = &gpd_data->base;
1782 }
1783 gpd_data->refcount++;
1784 gpd_data->ops = *ops;
1785 if (td)
1786 gpd_data->td = *td;
1787
1788 spin_unlock_irq(&dev->power.lock);
1789
1790 out:
1791 device_pm_unlock();
1792 pm_runtime_enable(dev);
1793
1794 if (gpd_data != gpd_data_new)
1795 __pm_genpd_free_dev_data(dev, gpd_data_new);
1796
1797 return ret;
1798}
1799EXPORT_SYMBOL_GPL(pm_genpd_add_callbacks);
1800
1801/**
1802 * __pm_genpd_remove_callbacks - Remove PM domain callbacks from a given device.
1803 * @dev: Device to remove the callbacks from.
1804 * @clear_td: If set, clear the device's timing data too.
1805 *
1806 * This routine can only be called after pm_genpd_add_callbacks().
1807 */
1808int __pm_genpd_remove_callbacks(struct device *dev, bool clear_td)
1809{
1810 struct generic_pm_domain_data *gpd_data = NULL;
1811 bool remove = false;
1812 int ret = 0;
1813
1814 if (!(dev && dev->power.subsys_data))
1815 return -EINVAL;
1816
1817 pm_runtime_disable(dev);
1818 device_pm_lock();
1819
1820 spin_lock_irq(&dev->power.lock);
1821
1822 if (dev->power.subsys_data->domain_data) {
1823 gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
1824 gpd_data->ops = (struct gpd_dev_ops){ NULL };
1825 if (clear_td)
1826 gpd_data->td = (struct gpd_timing_data){ 0 };
1827
1828 if (--gpd_data->refcount == 0) {
1829 dev->power.subsys_data->domain_data = NULL;
1830 remove = true;
1831 }
1832 } else {
1833 ret = -EINVAL;
1834 }
1835
1836 spin_unlock_irq(&dev->power.lock);
1837
1838 device_pm_unlock();
1839 pm_runtime_enable(dev);
1840
1841 if (ret)
1842 return ret;
1843
1844 dev_pm_put_subsys_data(dev);
1845 if (remove)
1846 __pm_genpd_free_dev_data(dev, gpd_data);
1847
1848 return 0;
1849}
1850EXPORT_SYMBOL_GPL(__pm_genpd_remove_callbacks);
1851
1852/**
1853 * pm_genpd_attach_cpuidle - Connect the given PM domain with cpuidle. 1690 * pm_genpd_attach_cpuidle - Connect the given PM domain with cpuidle.
1854 * @genpd: PM domain to be connected with cpuidle. 1691 * @genpd: PM domain to be connected with cpuidle.
1855 * @state: cpuidle state this domain can disable/enable. 1692 * @state: cpuidle state this domain can disable/enable.
@@ -1861,7 +1698,7 @@ EXPORT_SYMBOL_GPL(__pm_genpd_remove_callbacks);
1861int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state) 1698int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state)
1862{ 1699{
1863 struct cpuidle_driver *cpuidle_drv; 1700 struct cpuidle_driver *cpuidle_drv;
1864 struct gpd_cpu_data *cpu_data; 1701 struct gpd_cpuidle_data *cpuidle_data;
1865 struct cpuidle_state *idle_state; 1702 struct cpuidle_state *idle_state;
1866 int ret = 0; 1703 int ret = 0;
1867 1704
@@ -1870,12 +1707,12 @@ int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state)
1870 1707
1871 genpd_acquire_lock(genpd); 1708 genpd_acquire_lock(genpd);
1872 1709
1873 if (genpd->cpu_data) { 1710 if (genpd->cpuidle_data) {
1874 ret = -EEXIST; 1711 ret = -EEXIST;
1875 goto out; 1712 goto out;
1876 } 1713 }
1877 cpu_data = kzalloc(sizeof(*cpu_data), GFP_KERNEL); 1714 cpuidle_data = kzalloc(sizeof(*cpuidle_data), GFP_KERNEL);
1878 if (!cpu_data) { 1715 if (!cpuidle_data) {
1879 ret = -ENOMEM; 1716 ret = -ENOMEM;
1880 goto out; 1717 goto out;
1881 } 1718 }
@@ -1893,9 +1730,9 @@ int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state)
1893 ret = -EAGAIN; 1730 ret = -EAGAIN;
1894 goto err; 1731 goto err;
1895 } 1732 }
1896 cpu_data->idle_state = idle_state; 1733 cpuidle_data->idle_state = idle_state;
1897 cpu_data->saved_exit_latency = idle_state->exit_latency; 1734 cpuidle_data->saved_exit_latency = idle_state->exit_latency;
1898 genpd->cpu_data = cpu_data; 1735 genpd->cpuidle_data = cpuidle_data;
1899 genpd_recalc_cpu_exit_latency(genpd); 1736 genpd_recalc_cpu_exit_latency(genpd);
1900 1737
1901 out: 1738 out:
@@ -1906,7 +1743,7 @@ int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state)
1906 cpuidle_driver_unref(); 1743 cpuidle_driver_unref();
1907 1744
1908 err_drv: 1745 err_drv:
1909 kfree(cpu_data); 1746 kfree(cpuidle_data);
1910 goto out; 1747 goto out;
1911} 1748}
1912 1749
@@ -1929,7 +1766,7 @@ int pm_genpd_name_attach_cpuidle(const char *name, int state)
1929 */ 1766 */
1930int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd) 1767int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd)
1931{ 1768{
1932 struct gpd_cpu_data *cpu_data; 1769 struct gpd_cpuidle_data *cpuidle_data;
1933 struct cpuidle_state *idle_state; 1770 struct cpuidle_state *idle_state;
1934 int ret = 0; 1771 int ret = 0;
1935 1772
@@ -1938,20 +1775,20 @@ int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd)
1938 1775
1939 genpd_acquire_lock(genpd); 1776 genpd_acquire_lock(genpd);
1940 1777
1941 cpu_data = genpd->cpu_data; 1778 cpuidle_data = genpd->cpuidle_data;
1942 if (!cpu_data) { 1779 if (!cpuidle_data) {
1943 ret = -ENODEV; 1780 ret = -ENODEV;
1944 goto out; 1781 goto out;
1945 } 1782 }
1946 idle_state = cpu_data->idle_state; 1783 idle_state = cpuidle_data->idle_state;
1947 if (!idle_state->disabled) { 1784 if (!idle_state->disabled) {
1948 ret = -EAGAIN; 1785 ret = -EAGAIN;
1949 goto out; 1786 goto out;
1950 } 1787 }
1951 idle_state->exit_latency = cpu_data->saved_exit_latency; 1788 idle_state->exit_latency = cpuidle_data->saved_exit_latency;
1952 cpuidle_driver_unref(); 1789 cpuidle_driver_unref();
1953 genpd->cpu_data = NULL; 1790 genpd->cpuidle_data = NULL;
1954 kfree(cpu_data); 1791 kfree(cpuidle_data);
1955 1792
1956 out: 1793 out:
1957 genpd_release_lock(genpd); 1794 genpd_release_lock(genpd);
@@ -1970,17 +1807,13 @@ int pm_genpd_name_detach_cpuidle(const char *name)
1970/* Default device callbacks for generic PM domains. */ 1807/* Default device callbacks for generic PM domains. */
1971 1808
1972/** 1809/**
1973 * pm_genpd_default_save_state - Default "save device state" for PM domians. 1810 * pm_genpd_default_save_state - Default "save device state" for PM domains.
1974 * @dev: Device to handle. 1811 * @dev: Device to handle.
1975 */ 1812 */
1976static int pm_genpd_default_save_state(struct device *dev) 1813static int pm_genpd_default_save_state(struct device *dev)
1977{ 1814{
1978 int (*cb)(struct device *__dev); 1815 int (*cb)(struct device *__dev);
1979 1816
1980 cb = dev_gpd_data(dev)->ops.save_state;
1981 if (cb)
1982 return cb(dev);
1983
1984 if (dev->type && dev->type->pm) 1817 if (dev->type && dev->type->pm)
1985 cb = dev->type->pm->runtime_suspend; 1818 cb = dev->type->pm->runtime_suspend;
1986 else if (dev->class && dev->class->pm) 1819 else if (dev->class && dev->class->pm)
@@ -1997,17 +1830,13 @@ static int pm_genpd_default_save_state(struct device *dev)
1997} 1830}
1998 1831
1999/** 1832/**
2000 * pm_genpd_default_restore_state - Default PM domians "restore device state". 1833 * pm_genpd_default_restore_state - Default PM domains "restore device state".
2001 * @dev: Device to handle. 1834 * @dev: Device to handle.
2002 */ 1835 */
2003static int pm_genpd_default_restore_state(struct device *dev) 1836static int pm_genpd_default_restore_state(struct device *dev)
2004{ 1837{
2005 int (*cb)(struct device *__dev); 1838 int (*cb)(struct device *__dev);
2006 1839
2007 cb = dev_gpd_data(dev)->ops.restore_state;
2008 if (cb)
2009 return cb(dev);
2010
2011 if (dev->type && dev->type->pm) 1840 if (dev->type && dev->type->pm)
2012 cb = dev->type->pm->runtime_resume; 1841 cb = dev->type->pm->runtime_resume;
2013 else if (dev->class && dev->class->pm) 1842 else if (dev->class && dev->class->pm)
@@ -2023,109 +1852,6 @@ static int pm_genpd_default_restore_state(struct device *dev)
2023 return cb ? cb(dev) : 0; 1852 return cb ? cb(dev) : 0;
2024} 1853}
2025 1854
2026#ifdef CONFIG_PM_SLEEP
2027
2028/**
2029 * pm_genpd_default_suspend - Default "device suspend" for PM domians.
2030 * @dev: Device to handle.
2031 */
2032static int pm_genpd_default_suspend(struct device *dev)
2033{
2034 int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.suspend;
2035
2036 return cb ? cb(dev) : pm_generic_suspend(dev);
2037}
2038
2039/**
2040 * pm_genpd_default_suspend_late - Default "late device suspend" for PM domians.
2041 * @dev: Device to handle.
2042 */
2043static int pm_genpd_default_suspend_late(struct device *dev)
2044{
2045 int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.suspend_late;
2046
2047 return cb ? cb(dev) : pm_generic_suspend_late(dev);
2048}
2049
2050/**
2051 * pm_genpd_default_resume_early - Default "early device resume" for PM domians.
2052 * @dev: Device to handle.
2053 */
2054static int pm_genpd_default_resume_early(struct device *dev)
2055{
2056 int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.resume_early;
2057
2058 return cb ? cb(dev) : pm_generic_resume_early(dev);
2059}
2060
2061/**
2062 * pm_genpd_default_resume - Default "device resume" for PM domians.
2063 * @dev: Device to handle.
2064 */
2065static int pm_genpd_default_resume(struct device *dev)
2066{
2067 int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.resume;
2068
2069 return cb ? cb(dev) : pm_generic_resume(dev);
2070}
2071
2072/**
2073 * pm_genpd_default_freeze - Default "device freeze" for PM domians.
2074 * @dev: Device to handle.
2075 */
2076static int pm_genpd_default_freeze(struct device *dev)
2077{
2078 int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.freeze;
2079
2080 return cb ? cb(dev) : pm_generic_freeze(dev);
2081}
2082
2083/**
2084 * pm_genpd_default_freeze_late - Default "late device freeze" for PM domians.
2085 * @dev: Device to handle.
2086 */
2087static int pm_genpd_default_freeze_late(struct device *dev)
2088{
2089 int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.freeze_late;
2090
2091 return cb ? cb(dev) : pm_generic_freeze_late(dev);
2092}
2093
2094/**
2095 * pm_genpd_default_thaw_early - Default "early device thaw" for PM domians.
2096 * @dev: Device to handle.
2097 */
2098static int pm_genpd_default_thaw_early(struct device *dev)
2099{
2100 int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.thaw_early;
2101
2102 return cb ? cb(dev) : pm_generic_thaw_early(dev);
2103}
2104
2105/**
2106 * pm_genpd_default_thaw - Default "device thaw" for PM domians.
2107 * @dev: Device to handle.
2108 */
2109static int pm_genpd_default_thaw(struct device *dev)
2110{
2111 int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.thaw;
2112
2113 return cb ? cb(dev) : pm_generic_thaw(dev);
2114}
2115
2116#else /* !CONFIG_PM_SLEEP */
2117
2118#define pm_genpd_default_suspend NULL
2119#define pm_genpd_default_suspend_late NULL
2120#define pm_genpd_default_resume_early NULL
2121#define pm_genpd_default_resume NULL
2122#define pm_genpd_default_freeze NULL
2123#define pm_genpd_default_freeze_late NULL
2124#define pm_genpd_default_thaw_early NULL
2125#define pm_genpd_default_thaw NULL
2126
2127#endif /* !CONFIG_PM_SLEEP */
2128
2129/** 1855/**
2130 * pm_genpd_init - Initialize a generic I/O PM domain object. 1856 * pm_genpd_init - Initialize a generic I/O PM domain object.
2131 * @genpd: PM domain object to initialize. 1857 * @genpd: PM domain object to initialize.
@@ -2177,15 +1903,452 @@ void pm_genpd_init(struct generic_pm_domain *genpd,
2177 genpd->domain.ops.complete = pm_genpd_complete; 1903 genpd->domain.ops.complete = pm_genpd_complete;
2178 genpd->dev_ops.save_state = pm_genpd_default_save_state; 1904 genpd->dev_ops.save_state = pm_genpd_default_save_state;
2179 genpd->dev_ops.restore_state = pm_genpd_default_restore_state; 1905 genpd->dev_ops.restore_state = pm_genpd_default_restore_state;
2180 genpd->dev_ops.suspend = pm_genpd_default_suspend;
2181 genpd->dev_ops.suspend_late = pm_genpd_default_suspend_late;
2182 genpd->dev_ops.resume_early = pm_genpd_default_resume_early;
2183 genpd->dev_ops.resume = pm_genpd_default_resume;
2184 genpd->dev_ops.freeze = pm_genpd_default_freeze;
2185 genpd->dev_ops.freeze_late = pm_genpd_default_freeze_late;
2186 genpd->dev_ops.thaw_early = pm_genpd_default_thaw_early;
2187 genpd->dev_ops.thaw = pm_genpd_default_thaw;
2188 mutex_lock(&gpd_list_lock); 1906 mutex_lock(&gpd_list_lock);
2189 list_add(&genpd->gpd_list_node, &gpd_list); 1907 list_add(&genpd->gpd_list_node, &gpd_list);
2190 mutex_unlock(&gpd_list_lock); 1908 mutex_unlock(&gpd_list_lock);
2191} 1909}
1910
1911#ifdef CONFIG_PM_GENERIC_DOMAINS_OF
1912/*
1913 * Device Tree based PM domain providers.
1914 *
1915 * The code below implements generic device tree based PM domain providers that
1916 * bind device tree nodes with generic PM domains registered in the system.
1917 *
1918 * Any driver that registers generic PM domains and needs to support binding of
1919 * devices to these domains is supposed to register a PM domain provider, which
1920 * maps a PM domain specifier retrieved from the device tree to a PM domain.
1921 *
1922 * Two simple mapping functions have been provided for convenience:
1923 * - __of_genpd_xlate_simple() for 1:1 device tree node to PM domain mapping.
1924 * - __of_genpd_xlate_onecell() for mapping of multiple PM domains per node by
1925 * index.
1926 */
1927
1928/**
1929 * struct of_genpd_provider - PM domain provider registration structure
1930 * @link: Entry in global list of PM domain providers
1931 * @node: Pointer to device tree node of PM domain provider
1932 * @xlate: Provider-specific xlate callback mapping a set of specifier cells
1933 * into a PM domain.
1934 * @data: context pointer to be passed into @xlate callback
1935 */
1936struct of_genpd_provider {
1937 struct list_head link;
1938 struct device_node *node;
1939 genpd_xlate_t xlate;
1940 void *data;
1941};
1942
1943/* List of registered PM domain providers. */
1944static LIST_HEAD(of_genpd_providers);
1945/* Mutex to protect the list above. */
1946static DEFINE_MUTEX(of_genpd_mutex);
1947
1948/**
1949 * __of_genpd_xlate_simple() - Xlate function for direct node-domain mapping
1950 * @genpdspec: OF phandle args to map into a PM domain
1951 * @data: xlate function private data - pointer to struct generic_pm_domain
1952 *
1953 * This is a generic xlate function that can be used to model PM domains that
1954 * have their own device tree nodes. The private data of xlate function needs
1955 * to be a valid pointer to struct generic_pm_domain.
1956 */
1957struct generic_pm_domain *__of_genpd_xlate_simple(
1958 struct of_phandle_args *genpdspec,
1959 void *data)
1960{
1961 if (genpdspec->args_count != 0)
1962 return ERR_PTR(-EINVAL);
1963 return data;
1964}
1965EXPORT_SYMBOL_GPL(__of_genpd_xlate_simple);
1966
1967/**
1968 * __of_genpd_xlate_onecell() - Xlate function using a single index.
1969 * @genpdspec: OF phandle args to map into a PM domain
1970 * @data: xlate function private data - pointer to struct genpd_onecell_data
1971 *
1972 * This is a generic xlate function that can be used to model simple PM domain
1973 * controllers that have one device tree node and provide multiple PM domains.
1974 * A single cell is used as an index into an array of PM domains specified in
1975 * the genpd_onecell_data struct when registering the provider.
1976 */
1977struct generic_pm_domain *__of_genpd_xlate_onecell(
1978 struct of_phandle_args *genpdspec,
1979 void *data)
1980{
1981 struct genpd_onecell_data *genpd_data = data;
1982 unsigned int idx = genpdspec->args[0];
1983
1984 if (genpdspec->args_count != 1)
1985 return ERR_PTR(-EINVAL);
1986
1987 if (idx >= genpd_data->num_domains) {
1988 pr_err("%s: invalid domain index %u\n", __func__, idx);
1989 return ERR_PTR(-EINVAL);
1990 }
1991
1992 if (!genpd_data->domains[idx])
1993 return ERR_PTR(-ENOENT);
1994
1995 return genpd_data->domains[idx];
1996}
1997EXPORT_SYMBOL_GPL(__of_genpd_xlate_onecell);
1998
1999/**
2000 * __of_genpd_add_provider() - Register a PM domain provider for a node
2001 * @np: Device node pointer associated with the PM domain provider.
2002 * @xlate: Callback for decoding PM domain from phandle arguments.
2003 * @data: Context pointer for @xlate callback.
2004 */
2005int __of_genpd_add_provider(struct device_node *np, genpd_xlate_t xlate,
2006 void *data)
2007{
2008 struct of_genpd_provider *cp;
2009
2010 cp = kzalloc(sizeof(*cp), GFP_KERNEL);
2011 if (!cp)
2012 return -ENOMEM;
2013
2014 cp->node = of_node_get(np);
2015 cp->data = data;
2016 cp->xlate = xlate;
2017
2018 mutex_lock(&of_genpd_mutex);
2019 list_add(&cp->link, &of_genpd_providers);
2020 mutex_unlock(&of_genpd_mutex);
2021 pr_debug("Added domain provider from %s\n", np->full_name);
2022
2023 return 0;
2024}
2025EXPORT_SYMBOL_GPL(__of_genpd_add_provider);
2026
2027/**
2028 * of_genpd_del_provider() - Remove a previously registered PM domain provider
2029 * @np: Device node pointer associated with the PM domain provider
2030 */
2031void of_genpd_del_provider(struct device_node *np)
2032{
2033 struct of_genpd_provider *cp;
2034
2035 mutex_lock(&of_genpd_mutex);
2036 list_for_each_entry(cp, &of_genpd_providers, link) {
2037 if (cp->node == np) {
2038 list_del(&cp->link);
2039 of_node_put(cp->node);
2040 kfree(cp);
2041 break;
2042 }
2043 }
2044 mutex_unlock(&of_genpd_mutex);
2045}
2046EXPORT_SYMBOL_GPL(of_genpd_del_provider);
2047
2048/**
2049 * of_genpd_get_from_provider() - Look-up PM domain
2050 * @genpdspec: OF phandle args to use for look-up
2051 *
2052 * Looks for a PM domain provider under the node specified by @genpdspec and if
2053 * found, uses xlate function of the provider to map phandle args to a PM
2054 * domain.
2055 *
2056 * Returns a valid pointer to struct generic_pm_domain on success or ERR_PTR()
2057 * on failure.
2058 */
2059static struct generic_pm_domain *of_genpd_get_from_provider(
2060 struct of_phandle_args *genpdspec)
2061{
2062 struct generic_pm_domain *genpd = ERR_PTR(-ENOENT);
2063 struct of_genpd_provider *provider;
2064
2065 mutex_lock(&of_genpd_mutex);
2066
2067 /* Check if we have such a provider in our array */
2068 list_for_each_entry(provider, &of_genpd_providers, link) {
2069 if (provider->node == genpdspec->np)
2070 genpd = provider->xlate(genpdspec, provider->data);
2071 if (!IS_ERR(genpd))
2072 break;
2073 }
2074
2075 mutex_unlock(&of_genpd_mutex);
2076
2077 return genpd;
2078}
2079
2080/**
2081 * genpd_dev_pm_detach - Detach a device from its PM domain.
2082 * @dev: Device to attach.
2083 * @power_off: Currently not used
2084 *
2085 * Try to locate a corresponding generic PM domain, which the device was
2086 * attached to previously. If such is found, the device is detached from it.
2087 */
2088static void genpd_dev_pm_detach(struct device *dev, bool power_off)
2089{
2090 struct generic_pm_domain *pd = NULL, *gpd;
2091 int ret = 0;
2092
2093 if (!dev->pm_domain)
2094 return;
2095
2096 mutex_lock(&gpd_list_lock);
2097 list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
2098 if (&gpd->domain == dev->pm_domain) {
2099 pd = gpd;
2100 break;
2101 }
2102 }
2103 mutex_unlock(&gpd_list_lock);
2104
2105 if (!pd)
2106 return;
2107
2108 dev_dbg(dev, "removing from PM domain %s\n", pd->name);
2109
2110 while (1) {
2111 ret = pm_genpd_remove_device(pd, dev);
2112 if (ret != -EAGAIN)
2113 break;
2114 cond_resched();
2115 }
2116
2117 if (ret < 0) {
2118 dev_err(dev, "failed to remove from PM domain %s: %d",
2119 pd->name, ret);
2120 return;
2121 }
2122
2123 /* Check if PM domain can be powered off after removing this device. */
2124 genpd_queue_power_off_work(pd);
2125}
2126
2127/**
2128 * genpd_dev_pm_attach - Attach a device to its PM domain using DT.
2129 * @dev: Device to attach.
2130 *
2131 * Parse device's OF node to find a PM domain specifier. If such is found,
2132 * attaches the device to retrieved pm_domain ops.
2133 *
2134 * Both generic and legacy Samsung-specific DT bindings are supported to keep
2135 * backwards compatibility with existing DTBs.
2136 *
2137 * Returns 0 on successfully attached PM domain or negative error code.
2138 */
2139int genpd_dev_pm_attach(struct device *dev)
2140{
2141 struct of_phandle_args pd_args;
2142 struct generic_pm_domain *pd;
2143 int ret;
2144
2145 if (!dev->of_node)
2146 return -ENODEV;
2147
2148 if (dev->pm_domain)
2149 return -EEXIST;
2150
2151 ret = of_parse_phandle_with_args(dev->of_node, "power-domains",
2152 "#power-domain-cells", 0, &pd_args);
2153 if (ret < 0) {
2154 if (ret != -ENOENT)
2155 return ret;
2156
2157 /*
2158 * Try legacy Samsung-specific bindings
2159 * (for backwards compatibility of DT ABI)
2160 */
2161 pd_args.args_count = 0;
2162 pd_args.np = of_parse_phandle(dev->of_node,
2163 "samsung,power-domain", 0);
2164 if (!pd_args.np)
2165 return -ENOENT;
2166 }
2167
2168 pd = of_genpd_get_from_provider(&pd_args);
2169 if (IS_ERR(pd)) {
2170 dev_dbg(dev, "%s() failed to find PM domain: %ld\n",
2171 __func__, PTR_ERR(pd));
2172 of_node_put(dev->of_node);
2173 return PTR_ERR(pd);
2174 }
2175
2176 dev_dbg(dev, "adding to PM domain %s\n", pd->name);
2177
2178 while (1) {
2179 ret = pm_genpd_add_device(pd, dev);
2180 if (ret != -EAGAIN)
2181 break;
2182 cond_resched();
2183 }
2184
2185 if (ret < 0) {
2186 dev_err(dev, "failed to add to PM domain %s: %d",
2187 pd->name, ret);
2188 of_node_put(dev->of_node);
2189 return ret;
2190 }
2191
2192 dev->pm_domain->detach = genpd_dev_pm_detach;
2193
2194 return 0;
2195}
2196EXPORT_SYMBOL_GPL(genpd_dev_pm_attach);
2197#endif
2198
2199
2200/*** debugfs support ***/
2201
2202#ifdef CONFIG_PM_ADVANCED_DEBUG
2203#include <linux/pm.h>
2204#include <linux/device.h>
2205#include <linux/debugfs.h>
2206#include <linux/seq_file.h>
2207#include <linux/init.h>
2208#include <linux/kobject.h>
2209static struct dentry *pm_genpd_debugfs_dir;
2210
2211/*
2212 * TODO: This function is a slightly modified version of rtpm_status_show
2213 * from sysfs.c, but dependencies between PM_GENERIC_DOMAINS and PM_RUNTIME
2214 * are too loose to generalize it.
2215 */
2216#ifdef CONFIG_PM_RUNTIME
2217static void rtpm_status_str(struct seq_file *s, struct device *dev)
2218{
2219 static const char * const status_lookup[] = {
2220 [RPM_ACTIVE] = "active",
2221 [RPM_RESUMING] = "resuming",
2222 [RPM_SUSPENDED] = "suspended",
2223 [RPM_SUSPENDING] = "suspending"
2224 };
2225 const char *p = "";
2226
2227 if (dev->power.runtime_error)
2228 p = "error";
2229 else if (dev->power.disable_depth)
2230 p = "unsupported";
2231 else if (dev->power.runtime_status < ARRAY_SIZE(status_lookup))
2232 p = status_lookup[dev->power.runtime_status];
2233 else
2234 WARN_ON(1);
2235
2236 seq_puts(s, p);
2237}
2238#else
2239static void rtpm_status_str(struct seq_file *s, struct device *dev)
2240{
2241 seq_puts(s, "active");
2242}
2243#endif
2244
2245static int pm_genpd_summary_one(struct seq_file *s,
2246 struct generic_pm_domain *gpd)
2247{
2248 static const char * const status_lookup[] = {
2249 [GPD_STATE_ACTIVE] = "on",
2250 [GPD_STATE_WAIT_MASTER] = "wait-master",
2251 [GPD_STATE_BUSY] = "busy",
2252 [GPD_STATE_REPEAT] = "off-in-progress",
2253 [GPD_STATE_POWER_OFF] = "off"
2254 };
2255 struct pm_domain_data *pm_data;
2256 const char *kobj_path;
2257 struct gpd_link *link;
2258 int ret;
2259
2260 ret = mutex_lock_interruptible(&gpd->lock);
2261 if (ret)
2262 return -ERESTARTSYS;
2263
2264 if (WARN_ON(gpd->status >= ARRAY_SIZE(status_lookup)))
2265 goto exit;
2266 seq_printf(s, "%-30s %-15s ", gpd->name, status_lookup[gpd->status]);
2267
2268 /*
2269 * Modifications on the list require holding locks on both
2270 * master and slave, so we are safe.
2271 * Also gpd->name is immutable.
2272 */
2273 list_for_each_entry(link, &gpd->master_links, master_node) {
2274 seq_printf(s, "%s", link->slave->name);
2275 if (!list_is_last(&link->master_node, &gpd->master_links))
2276 seq_puts(s, ", ");
2277 }
2278
2279 list_for_each_entry(pm_data, &gpd->dev_list, list_node) {
2280 kobj_path = kobject_get_path(&pm_data->dev->kobj, GFP_KERNEL);
2281 if (kobj_path == NULL)
2282 continue;
2283
2284 seq_printf(s, "\n %-50s ", kobj_path);
2285 rtpm_status_str(s, pm_data->dev);
2286 kfree(kobj_path);
2287 }
2288
2289 seq_puts(s, "\n");
2290exit:
2291 mutex_unlock(&gpd->lock);
2292
2293 return 0;
2294}
2295
2296static int pm_genpd_summary_show(struct seq_file *s, void *data)
2297{
2298 struct generic_pm_domain *gpd;
2299 int ret = 0;
2300
2301 seq_puts(s, " domain status slaves\n");
2302 seq_puts(s, " /device runtime status\n");
2303 seq_puts(s, "----------------------------------------------------------------------\n");
2304
2305 ret = mutex_lock_interruptible(&gpd_list_lock);
2306 if (ret)
2307 return -ERESTARTSYS;
2308
2309 list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
2310 ret = pm_genpd_summary_one(s, gpd);
2311 if (ret)
2312 break;
2313 }
2314 mutex_unlock(&gpd_list_lock);
2315
2316 return ret;
2317}
2318
2319static int pm_genpd_summary_open(struct inode *inode, struct file *file)
2320{
2321 return single_open(file, pm_genpd_summary_show, NULL);
2322}
2323
2324static const struct file_operations pm_genpd_summary_fops = {
2325 .open = pm_genpd_summary_open,
2326 .read = seq_read,
2327 .llseek = seq_lseek,
2328 .release = single_release,
2329};
2330
2331static int __init pm_genpd_debug_init(void)
2332{
2333 struct dentry *d;
2334
2335 pm_genpd_debugfs_dir = debugfs_create_dir("pm_genpd", NULL);
2336
2337 if (!pm_genpd_debugfs_dir)
2338 return -ENOMEM;
2339
2340 d = debugfs_create_file("pm_genpd_summary", S_IRUGO,
2341 pm_genpd_debugfs_dir, NULL, &pm_genpd_summary_fops);
2342 if (!d)
2343 return -ENOMEM;
2344
2345 return 0;
2346}
2347late_initcall(pm_genpd_debug_init);
2348
2349static void __exit pm_genpd_debug_exit(void)
2350{
2351 debugfs_remove_recursive(pm_genpd_debugfs_dir);
2352}
2353__exitcall(pm_genpd_debug_exit);
2354#endif /* CONFIG_PM_ADVANCED_DEBUG */
diff --git a/drivers/base/power/domain_governor.c b/drivers/base/power/domain_governor.c
index a089e3bcdfbc..d88a62e104d4 100644
--- a/drivers/base/power/domain_governor.c
+++ b/drivers/base/power/domain_governor.c
@@ -42,7 +42,7 @@ static int dev_update_qos_constraint(struct device *dev, void *data)
42 * default_stop_ok - Default PM domain governor routine for stopping devices. 42 * default_stop_ok - Default PM domain governor routine for stopping devices.
43 * @dev: Device to check. 43 * @dev: Device to check.
44 */ 44 */
45bool default_stop_ok(struct device *dev) 45static bool default_stop_ok(struct device *dev)
46{ 46{
47 struct gpd_timing_data *td = &dev_gpd_data(dev)->td; 47 struct gpd_timing_data *td = &dev_gpd_data(dev)->td;
48 unsigned long flags; 48 unsigned long flags;
@@ -229,10 +229,7 @@ static bool always_on_power_down_ok(struct dev_pm_domain *domain)
229 229
230#else /* !CONFIG_PM_RUNTIME */ 230#else /* !CONFIG_PM_RUNTIME */
231 231
232bool default_stop_ok(struct device *dev) 232static inline bool default_stop_ok(struct device *dev) { return false; }
233{
234 return false;
235}
236 233
237#define default_power_down_ok NULL 234#define default_power_down_ok NULL
238#define always_on_power_down_ok NULL 235#define always_on_power_down_ok NULL
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index b67d9aef9fe4..44973196d3fd 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -540,7 +540,7 @@ static void async_resume_noirq(void *data, async_cookie_t cookie)
540 * Call the "noirq" resume handlers for all devices in dpm_noirq_list and 540 * Call the "noirq" resume handlers for all devices in dpm_noirq_list and
541 * enable device drivers to receive interrupts. 541 * enable device drivers to receive interrupts.
542 */ 542 */
543static void dpm_resume_noirq(pm_message_t state) 543void dpm_resume_noirq(pm_message_t state)
544{ 544{
545 struct device *dev; 545 struct device *dev;
546 ktime_t starttime = ktime_get(); 546 ktime_t starttime = ktime_get();
@@ -662,7 +662,7 @@ static void async_resume_early(void *data, async_cookie_t cookie)
662 * dpm_resume_early - Execute "early resume" callbacks for all devices. 662 * dpm_resume_early - Execute "early resume" callbacks for all devices.
663 * @state: PM transition of the system being carried out. 663 * @state: PM transition of the system being carried out.
664 */ 664 */
665static void dpm_resume_early(pm_message_t state) 665void dpm_resume_early(pm_message_t state)
666{ 666{
667 struct device *dev; 667 struct device *dev;
668 ktime_t starttime = ktime_get(); 668 ktime_t starttime = ktime_get();
@@ -1093,7 +1093,7 @@ static int device_suspend_noirq(struct device *dev)
1093 * Prevent device drivers from receiving interrupts and call the "noirq" suspend 1093 * Prevent device drivers from receiving interrupts and call the "noirq" suspend
1094 * handlers for all non-sysdev devices. 1094 * handlers for all non-sysdev devices.
1095 */ 1095 */
1096static int dpm_suspend_noirq(pm_message_t state) 1096int dpm_suspend_noirq(pm_message_t state)
1097{ 1097{
1098 ktime_t starttime = ktime_get(); 1098 ktime_t starttime = ktime_get();
1099 int error = 0; 1099 int error = 0;
@@ -1232,7 +1232,7 @@ static int device_suspend_late(struct device *dev)
1232 * dpm_suspend_late - Execute "late suspend" callbacks for all devices. 1232 * dpm_suspend_late - Execute "late suspend" callbacks for all devices.
1233 * @state: PM transition of the system being carried out. 1233 * @state: PM transition of the system being carried out.
1234 */ 1234 */
1235static int dpm_suspend_late(pm_message_t state) 1235int dpm_suspend_late(pm_message_t state)
1236{ 1236{
1237 ktime_t starttime = ktime_get(); 1237 ktime_t starttime = ktime_get();
1238 int error = 0; 1238 int error = 0;
diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
index 95b181d1ca6d..a9d26ed11bf4 100644
--- a/drivers/base/power/sysfs.c
+++ b/drivers/base/power/sysfs.c
@@ -92,9 +92,6 @@
92 * wakeup_count - Report the number of wakeup events related to the device 92 * wakeup_count - Report the number of wakeup events related to the device
93 */ 93 */
94 94
95static const char enabled[] = "enabled";
96static const char disabled[] = "disabled";
97
98const char power_group_name[] = "power"; 95const char power_group_name[] = "power";
99EXPORT_SYMBOL_GPL(power_group_name); 96EXPORT_SYMBOL_GPL(power_group_name);
100 97
@@ -336,11 +333,14 @@ static DEVICE_ATTR(pm_qos_remote_wakeup, 0644,
336#endif /* CONFIG_PM_RUNTIME */ 333#endif /* CONFIG_PM_RUNTIME */
337 334
338#ifdef CONFIG_PM_SLEEP 335#ifdef CONFIG_PM_SLEEP
336static const char _enabled[] = "enabled";
337static const char _disabled[] = "disabled";
338
339static ssize_t 339static ssize_t
340wake_show(struct device * dev, struct device_attribute *attr, char * buf) 340wake_show(struct device * dev, struct device_attribute *attr, char * buf)
341{ 341{
342 return sprintf(buf, "%s\n", device_can_wakeup(dev) 342 return sprintf(buf, "%s\n", device_can_wakeup(dev)
343 ? (device_may_wakeup(dev) ? enabled : disabled) 343 ? (device_may_wakeup(dev) ? _enabled : _disabled)
344 : ""); 344 : "");
345} 345}
346 346
@@ -357,11 +357,11 @@ wake_store(struct device * dev, struct device_attribute *attr,
357 cp = memchr(buf, '\n', n); 357 cp = memchr(buf, '\n', n);
358 if (cp) 358 if (cp)
359 len = cp - buf; 359 len = cp - buf;
360 if (len == sizeof enabled - 1 360 if (len == sizeof _enabled - 1
361 && strncmp(buf, enabled, sizeof enabled - 1) == 0) 361 && strncmp(buf, _enabled, sizeof _enabled - 1) == 0)
362 device_set_wakeup_enable(dev, 1); 362 device_set_wakeup_enable(dev, 1);
363 else if (len == sizeof disabled - 1 363 else if (len == sizeof _disabled - 1
364 && strncmp(buf, disabled, sizeof disabled - 1) == 0) 364 && strncmp(buf, _disabled, sizeof _disabled - 1) == 0)
365 device_set_wakeup_enable(dev, 0); 365 device_set_wakeup_enable(dev, 0);
366 else 366 else
367 return -EINVAL; 367 return -EINVAL;
@@ -570,7 +570,8 @@ static ssize_t async_show(struct device *dev, struct device_attribute *attr,
570 char *buf) 570 char *buf)
571{ 571{
572 return sprintf(buf, "%s\n", 572 return sprintf(buf, "%s\n",
573 device_async_suspend_enabled(dev) ? enabled : disabled); 573 device_async_suspend_enabled(dev) ?
574 _enabled : _disabled);
574} 575}
575 576
576static ssize_t async_store(struct device *dev, struct device_attribute *attr, 577static ssize_t async_store(struct device *dev, struct device_attribute *attr,
@@ -582,9 +583,10 @@ static ssize_t async_store(struct device *dev, struct device_attribute *attr,
582 cp = memchr(buf, '\n', n); 583 cp = memchr(buf, '\n', n);
583 if (cp) 584 if (cp)
584 len = cp - buf; 585 len = cp - buf;
585 if (len == sizeof enabled - 1 && strncmp(buf, enabled, len) == 0) 586 if (len == sizeof _enabled - 1 && strncmp(buf, _enabled, len) == 0)
586 device_enable_async_suspend(dev); 587 device_enable_async_suspend(dev);
587 else if (len == sizeof disabled - 1 && strncmp(buf, disabled, len) == 0) 588 else if (len == sizeof _disabled - 1 &&
589 strncmp(buf, _disabled, len) == 0)
588 device_disable_async_suspend(dev); 590 device_disable_async_suspend(dev);
589 else 591 else
590 return -EINVAL; 592 return -EINVAL;
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
index eb1bd2ecad8b..c2744b30d5d9 100644
--- a/drivers/base/power/wakeup.c
+++ b/drivers/base/power/wakeup.c
@@ -24,6 +24,9 @@
24 */ 24 */
25bool events_check_enabled __read_mostly; 25bool events_check_enabled __read_mostly;
26 26
27/* If set and the system is suspending, terminate the suspend. */
28static bool pm_abort_suspend __read_mostly;
29
27/* 30/*
28 * Combined counters of registered wakeup events and wakeup events in progress. 31 * Combined counters of registered wakeup events and wakeup events in progress.
29 * They need to be modified together atomically, so it's better to use one 32 * They need to be modified together atomically, so it's better to use one
@@ -719,7 +722,18 @@ bool pm_wakeup_pending(void)
719 pm_print_active_wakeup_sources(); 722 pm_print_active_wakeup_sources();
720 } 723 }
721 724
722 return ret; 725 return ret || pm_abort_suspend;
726}
727
728void pm_system_wakeup(void)
729{
730 pm_abort_suspend = true;
731 freeze_wake();
732}
733
734void pm_wakeup_clear(void)
735{
736 pm_abort_suspend = false;
723} 737}
724 738
725/** 739/**
diff --git a/drivers/base/syscore.c b/drivers/base/syscore.c
index dbb8350ea8dc..8d98a329f6ea 100644
--- a/drivers/base/syscore.c
+++ b/drivers/base/syscore.c
@@ -9,7 +9,7 @@
9#include <linux/syscore_ops.h> 9#include <linux/syscore_ops.h>
10#include <linux/mutex.h> 10#include <linux/mutex.h>
11#include <linux/module.h> 11#include <linux/module.h>
12#include <linux/interrupt.h> 12#include <linux/suspend.h>
13#include <trace/events/power.h> 13#include <trace/events/power.h>
14 14
15static LIST_HEAD(syscore_ops_list); 15static LIST_HEAD(syscore_ops_list);
@@ -54,9 +54,8 @@ int syscore_suspend(void)
54 pr_debug("Checking wakeup interrupts\n"); 54 pr_debug("Checking wakeup interrupts\n");
55 55
56 /* Return error code if there are any wakeup interrupts pending. */ 56 /* Return error code if there are any wakeup interrupts pending. */
57 ret = check_wakeup_irqs(); 57 if (pm_wakeup_pending())
58 if (ret) 58 return -EBUSY;
59 return ret;
60 59
61 WARN_ONCE(!irqs_disabled(), 60 WARN_ONCE(!irqs_disabled(),
62 "Interrupts enabled before system core suspend.\n"); 61 "Interrupts enabled before system core suspend.\n");
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
index ffe350f86bca..3489f8f5fada 100644
--- a/drivers/cpufreq/Kconfig
+++ b/drivers/cpufreq/Kconfig
@@ -183,14 +183,14 @@ config CPU_FREQ_GOV_CONSERVATIVE
183 183
184 If in doubt, say N. 184 If in doubt, say N.
185 185
186config GENERIC_CPUFREQ_CPU0 186config CPUFREQ_DT
187 tristate "Generic CPU0 cpufreq driver" 187 tristate "Generic DT based cpufreq driver"
188 depends on HAVE_CLK && OF 188 depends on HAVE_CLK && OF
189 # if CPU_THERMAL is on and THERMAL=m, CPU0 cannot be =y: 189 # if CPU_THERMAL is on and THERMAL=m, CPUFREQ_DT cannot be =y:
190 depends on !CPU_THERMAL || THERMAL 190 depends on !CPU_THERMAL || THERMAL
191 select PM_OPP 191 select PM_OPP
192 help 192 help
193 This adds a generic cpufreq driver for CPU0 frequency management. 193 This adds a generic DT based cpufreq driver for frequency management.
194 It supports both uniprocessor (UP) and symmetric multiprocessor (SMP) 194 It supports both uniprocessor (UP) and symmetric multiprocessor (SMP)
195 systems which share clock and voltage across all CPUs. 195 systems which share clock and voltage across all CPUs.
196 196
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index 28c666c80149..83a75dc84761 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -92,7 +92,7 @@ config ARM_EXYNOS_CPU_FREQ_BOOST_SW
92 92
93config ARM_HIGHBANK_CPUFREQ 93config ARM_HIGHBANK_CPUFREQ
94 tristate "Calxeda Highbank-based" 94 tristate "Calxeda Highbank-based"
95 depends on ARCH_HIGHBANK && GENERIC_CPUFREQ_CPU0 && REGULATOR 95 depends on ARCH_HIGHBANK && CPUFREQ_DT && REGULATOR
96 default m 96 default m
97 help 97 help
98 This adds the CPUFreq driver for Calxeda Highbank SoC 98 This adds the CPUFreq driver for Calxeda Highbank SoC
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index db6d9a2fea4d..40c53dc1937e 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -13,7 +13,7 @@ obj-$(CONFIG_CPU_FREQ_GOV_ONDEMAND) += cpufreq_ondemand.o
13obj-$(CONFIG_CPU_FREQ_GOV_CONSERVATIVE) += cpufreq_conservative.o 13obj-$(CONFIG_CPU_FREQ_GOV_CONSERVATIVE) += cpufreq_conservative.o
14obj-$(CONFIG_CPU_FREQ_GOV_COMMON) += cpufreq_governor.o 14obj-$(CONFIG_CPU_FREQ_GOV_COMMON) += cpufreq_governor.o
15 15
16obj-$(CONFIG_GENERIC_CPUFREQ_CPU0) += cpufreq-cpu0.o 16obj-$(CONFIG_CPUFREQ_DT) += cpufreq-dt.o
17 17
18################################################################################## 18##################################################################################
19# x86 drivers. 19# x86 drivers.
diff --git a/drivers/cpufreq/cpufreq-cpu0.c b/drivers/cpufreq/cpufreq-cpu0.c
deleted file mode 100644
index 0d2172b07765..000000000000
--- a/drivers/cpufreq/cpufreq-cpu0.c
+++ /dev/null
@@ -1,248 +0,0 @@
1/*
2 * Copyright (C) 2012 Freescale Semiconductor, Inc.
3 *
4 * The OPP code in function cpu0_set_target() is reused from
5 * drivers/cpufreq/omap-cpufreq.c
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
14#include <linux/clk.h>
15#include <linux/cpu.h>
16#include <linux/cpu_cooling.h>
17#include <linux/cpufreq.h>
18#include <linux/cpumask.h>
19#include <linux/err.h>
20#include <linux/module.h>
21#include <linux/of.h>
22#include <linux/pm_opp.h>
23#include <linux/platform_device.h>
24#include <linux/regulator/consumer.h>
25#include <linux/slab.h>
26#include <linux/thermal.h>
27
28static unsigned int transition_latency;
29static unsigned int voltage_tolerance; /* in percentage */
30
31static struct device *cpu_dev;
32static struct clk *cpu_clk;
33static struct regulator *cpu_reg;
34static struct cpufreq_frequency_table *freq_table;
35static struct thermal_cooling_device *cdev;
36
37static int cpu0_set_target(struct cpufreq_policy *policy, unsigned int index)
38{
39 struct dev_pm_opp *opp;
40 unsigned long volt = 0, volt_old = 0, tol = 0;
41 unsigned int old_freq, new_freq;
42 long freq_Hz, freq_exact;
43 int ret;
44
45 freq_Hz = clk_round_rate(cpu_clk, freq_table[index].frequency * 1000);
46 if (freq_Hz <= 0)
47 freq_Hz = freq_table[index].frequency * 1000;
48
49 freq_exact = freq_Hz;
50 new_freq = freq_Hz / 1000;
51 old_freq = clk_get_rate(cpu_clk) / 1000;
52
53 if (!IS_ERR(cpu_reg)) {
54 rcu_read_lock();
55 opp = dev_pm_opp_find_freq_ceil(cpu_dev, &freq_Hz);
56 if (IS_ERR(opp)) {
57 rcu_read_unlock();
58 pr_err("failed to find OPP for %ld\n", freq_Hz);
59 return PTR_ERR(opp);
60 }
61 volt = dev_pm_opp_get_voltage(opp);
62 rcu_read_unlock();
63 tol = volt * voltage_tolerance / 100;
64 volt_old = regulator_get_voltage(cpu_reg);
65 }
66
67 pr_debug("%u MHz, %ld mV --> %u MHz, %ld mV\n",
68 old_freq / 1000, volt_old ? volt_old / 1000 : -1,
69 new_freq / 1000, volt ? volt / 1000 : -1);
70
71 /* scaling up? scale voltage before frequency */
72 if (!IS_ERR(cpu_reg) && new_freq > old_freq) {
73 ret = regulator_set_voltage_tol(cpu_reg, volt, tol);
74 if (ret) {
75 pr_err("failed to scale voltage up: %d\n", ret);
76 return ret;
77 }
78 }
79
80 ret = clk_set_rate(cpu_clk, freq_exact);
81 if (ret) {
82 pr_err("failed to set clock rate: %d\n", ret);
83 if (!IS_ERR(cpu_reg))
84 regulator_set_voltage_tol(cpu_reg, volt_old, tol);
85 return ret;
86 }
87
88 /* scaling down? scale voltage after frequency */
89 if (!IS_ERR(cpu_reg) && new_freq < old_freq) {
90 ret = regulator_set_voltage_tol(cpu_reg, volt, tol);
91 if (ret) {
92 pr_err("failed to scale voltage down: %d\n", ret);
93 clk_set_rate(cpu_clk, old_freq * 1000);
94 }
95 }
96
97 return ret;
98}
99
100static int cpu0_cpufreq_init(struct cpufreq_policy *policy)
101{
102 policy->clk = cpu_clk;
103 return cpufreq_generic_init(policy, freq_table, transition_latency);
104}
105
106static struct cpufreq_driver cpu0_cpufreq_driver = {
107 .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK,
108 .verify = cpufreq_generic_frequency_table_verify,
109 .target_index = cpu0_set_target,
110 .get = cpufreq_generic_get,
111 .init = cpu0_cpufreq_init,
112 .name = "generic_cpu0",
113 .attr = cpufreq_generic_attr,
114};
115
116static int cpu0_cpufreq_probe(struct platform_device *pdev)
117{
118 struct device_node *np;
119 int ret;
120
121 cpu_dev = get_cpu_device(0);
122 if (!cpu_dev) {
123 pr_err("failed to get cpu0 device\n");
124 return -ENODEV;
125 }
126
127 np = of_node_get(cpu_dev->of_node);
128 if (!np) {
129 pr_err("failed to find cpu0 node\n");
130 return -ENOENT;
131 }
132
133 cpu_reg = regulator_get_optional(cpu_dev, "cpu0");
134 if (IS_ERR(cpu_reg)) {
135 /*
136 * If cpu0 regulator supply node is present, but regulator is
137 * not yet registered, we should try defering probe.
138 */
139 if (PTR_ERR(cpu_reg) == -EPROBE_DEFER) {
140 dev_dbg(cpu_dev, "cpu0 regulator not ready, retry\n");
141 ret = -EPROBE_DEFER;
142 goto out_put_node;
143 }
144 pr_warn("failed to get cpu0 regulator: %ld\n",
145 PTR_ERR(cpu_reg));
146 }
147
148 cpu_clk = clk_get(cpu_dev, NULL);
149 if (IS_ERR(cpu_clk)) {
150 ret = PTR_ERR(cpu_clk);
151 pr_err("failed to get cpu0 clock: %d\n", ret);
152 goto out_put_reg;
153 }
154
155 /* OPPs might be populated at runtime, don't check for error here */
156 of_init_opp_table(cpu_dev);
157
158 ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table);
159 if (ret) {
160 pr_err("failed to init cpufreq table: %d\n", ret);
161 goto out_put_clk;
162 }
163
164 of_property_read_u32(np, "voltage-tolerance", &voltage_tolerance);
165
166 if (of_property_read_u32(np, "clock-latency", &transition_latency))
167 transition_latency = CPUFREQ_ETERNAL;
168
169 if (!IS_ERR(cpu_reg)) {
170 struct dev_pm_opp *opp;
171 unsigned long min_uV, max_uV;
172 int i;
173
174 /*
175 * OPP is maintained in order of increasing frequency, and
176 * freq_table initialised from OPP is therefore sorted in the
177 * same order.
178 */
179 for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++)
180 ;
181 rcu_read_lock();
182 opp = dev_pm_opp_find_freq_exact(cpu_dev,
183 freq_table[0].frequency * 1000, true);
184 min_uV = dev_pm_opp_get_voltage(opp);
185 opp = dev_pm_opp_find_freq_exact(cpu_dev,
186 freq_table[i-1].frequency * 1000, true);
187 max_uV = dev_pm_opp_get_voltage(opp);
188 rcu_read_unlock();
189 ret = regulator_set_voltage_time(cpu_reg, min_uV, max_uV);
190 if (ret > 0)
191 transition_latency += ret * 1000;
192 }
193
194 ret = cpufreq_register_driver(&cpu0_cpufreq_driver);
195 if (ret) {
196 pr_err("failed register driver: %d\n", ret);
197 goto out_free_table;
198 }
199
200 /*
201 * For now, just loading the cooling device;
202 * thermal DT code takes care of matching them.
203 */
204 if (of_find_property(np, "#cooling-cells", NULL)) {
205 cdev = of_cpufreq_cooling_register(np, cpu_present_mask);
206 if (IS_ERR(cdev))
207 pr_err("running cpufreq without cooling device: %ld\n",
208 PTR_ERR(cdev));
209 }
210
211 of_node_put(np);
212 return 0;
213
214out_free_table:
215 dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table);
216out_put_clk:
217 if (!IS_ERR(cpu_clk))
218 clk_put(cpu_clk);
219out_put_reg:
220 if (!IS_ERR(cpu_reg))
221 regulator_put(cpu_reg);
222out_put_node:
223 of_node_put(np);
224 return ret;
225}
226
227static int cpu0_cpufreq_remove(struct platform_device *pdev)
228{
229 cpufreq_cooling_unregister(cdev);
230 cpufreq_unregister_driver(&cpu0_cpufreq_driver);
231 dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table);
232
233 return 0;
234}
235
236static struct platform_driver cpu0_cpufreq_platdrv = {
237 .driver = {
238 .name = "cpufreq-cpu0",
239 .owner = THIS_MODULE,
240 },
241 .probe = cpu0_cpufreq_probe,
242 .remove = cpu0_cpufreq_remove,
243};
244module_platform_driver(cpu0_cpufreq_platdrv);
245
246MODULE_AUTHOR("Shawn Guo <shawn.guo@linaro.org>");
247MODULE_DESCRIPTION("Generic CPU0 cpufreq driver");
248MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c
new file mode 100644
index 000000000000..6bbb8b913446
--- /dev/null
+++ b/drivers/cpufreq/cpufreq-dt.c
@@ -0,0 +1,364 @@
1/*
2 * Copyright (C) 2012 Freescale Semiconductor, Inc.
3 *
4 * Copyright (C) 2014 Linaro.
5 * Viresh Kumar <viresh.kumar@linaro.org>
6 *
7 * The OPP code in function set_target() is reused from
8 * drivers/cpufreq/omap-cpufreq.c
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 */
14
15#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16
17#include <linux/clk.h>
18#include <linux/cpu.h>
19#include <linux/cpu_cooling.h>
20#include <linux/cpufreq.h>
21#include <linux/cpumask.h>
22#include <linux/err.h>
23#include <linux/module.h>
24#include <linux/of.h>
25#include <linux/pm_opp.h>
26#include <linux/platform_device.h>
27#include <linux/regulator/consumer.h>
28#include <linux/slab.h>
29#include <linux/thermal.h>
30
31struct private_data {
32 struct device *cpu_dev;
33 struct regulator *cpu_reg;
34 struct thermal_cooling_device *cdev;
35 unsigned int voltage_tolerance; /* in percentage */
36};
37
38static int set_target(struct cpufreq_policy *policy, unsigned int index)
39{
40 struct dev_pm_opp *opp;
41 struct cpufreq_frequency_table *freq_table = policy->freq_table;
42 struct clk *cpu_clk = policy->clk;
43 struct private_data *priv = policy->driver_data;
44 struct device *cpu_dev = priv->cpu_dev;
45 struct regulator *cpu_reg = priv->cpu_reg;
46 unsigned long volt = 0, volt_old = 0, tol = 0;
47 unsigned int old_freq, new_freq;
48 long freq_Hz, freq_exact;
49 int ret;
50
51 freq_Hz = clk_round_rate(cpu_clk, freq_table[index].frequency * 1000);
52 if (freq_Hz <= 0)
53 freq_Hz = freq_table[index].frequency * 1000;
54
55 freq_exact = freq_Hz;
56 new_freq = freq_Hz / 1000;
57 old_freq = clk_get_rate(cpu_clk) / 1000;
58
59 if (!IS_ERR(cpu_reg)) {
60 rcu_read_lock();
61 opp = dev_pm_opp_find_freq_ceil(cpu_dev, &freq_Hz);
62 if (IS_ERR(opp)) {
63 rcu_read_unlock();
64 dev_err(cpu_dev, "failed to find OPP for %ld\n",
65 freq_Hz);
66 return PTR_ERR(opp);
67 }
68 volt = dev_pm_opp_get_voltage(opp);
69 rcu_read_unlock();
70 tol = volt * priv->voltage_tolerance / 100;
71 volt_old = regulator_get_voltage(cpu_reg);
72 }
73
74 dev_dbg(cpu_dev, "%u MHz, %ld mV --> %u MHz, %ld mV\n",
75 old_freq / 1000, volt_old ? volt_old / 1000 : -1,
76 new_freq / 1000, volt ? volt / 1000 : -1);
77
78 /* scaling up? scale voltage before frequency */
79 if (!IS_ERR(cpu_reg) && new_freq > old_freq) {
80 ret = regulator_set_voltage_tol(cpu_reg, volt, tol);
81 if (ret) {
82 dev_err(cpu_dev, "failed to scale voltage up: %d\n",
83 ret);
84 return ret;
85 }
86 }
87
88 ret = clk_set_rate(cpu_clk, freq_exact);
89 if (ret) {
90 dev_err(cpu_dev, "failed to set clock rate: %d\n", ret);
91 if (!IS_ERR(cpu_reg))
92 regulator_set_voltage_tol(cpu_reg, volt_old, tol);
93 return ret;
94 }
95
96 /* scaling down? scale voltage after frequency */
97 if (!IS_ERR(cpu_reg) && new_freq < old_freq) {
98 ret = regulator_set_voltage_tol(cpu_reg, volt, tol);
99 if (ret) {
100 dev_err(cpu_dev, "failed to scale voltage down: %d\n",
101 ret);
102 clk_set_rate(cpu_clk, old_freq * 1000);
103 }
104 }
105
106 return ret;
107}
108
109static int allocate_resources(int cpu, struct device **cdev,
110 struct regulator **creg, struct clk **cclk)
111{
112 struct device *cpu_dev;
113 struct regulator *cpu_reg;
114 struct clk *cpu_clk;
115 int ret = 0;
116 char *reg_cpu0 = "cpu0", *reg_cpu = "cpu", *reg;
117
118 cpu_dev = get_cpu_device(cpu);
119 if (!cpu_dev) {
120 pr_err("failed to get cpu%d device\n", cpu);
121 return -ENODEV;
122 }
123
124 /* Try "cpu0" for older DTs */
125 if (!cpu)
126 reg = reg_cpu0;
127 else
128 reg = reg_cpu;
129
130try_again:
131 cpu_reg = regulator_get_optional(cpu_dev, reg);
132 if (IS_ERR(cpu_reg)) {
133 /*
134 * If cpu's regulator supply node is present, but regulator is
135 * not yet registered, we should try defering probe.
136 */
137 if (PTR_ERR(cpu_reg) == -EPROBE_DEFER) {
138 dev_dbg(cpu_dev, "cpu%d regulator not ready, retry\n",
139 cpu);
140 return -EPROBE_DEFER;
141 }
142
143 /* Try with "cpu-supply" */
144 if (reg == reg_cpu0) {
145 reg = reg_cpu;
146 goto try_again;
147 }
148
149 dev_warn(cpu_dev, "failed to get cpu%d regulator: %ld\n",
150 cpu, PTR_ERR(cpu_reg));
151 }
152
153 cpu_clk = clk_get(cpu_dev, NULL);
154 if (IS_ERR(cpu_clk)) {
155 /* put regulator */
156 if (!IS_ERR(cpu_reg))
157 regulator_put(cpu_reg);
158
159 ret = PTR_ERR(cpu_clk);
160
161 /*
162 * If cpu's clk node is present, but clock is not yet
163 * registered, we should try defering probe.
164 */
165 if (ret == -EPROBE_DEFER)
166 dev_dbg(cpu_dev, "cpu%d clock not ready, retry\n", cpu);
167 else
168 dev_err(cpu_dev, "failed to get cpu%d clock: %d\n", ret,
169 cpu);
170 } else {
171 *cdev = cpu_dev;
172 *creg = cpu_reg;
173 *cclk = cpu_clk;
174 }
175
176 return ret;
177}
178
179static int cpufreq_init(struct cpufreq_policy *policy)
180{
181 struct cpufreq_frequency_table *freq_table;
182 struct thermal_cooling_device *cdev;
183 struct device_node *np;
184 struct private_data *priv;
185 struct device *cpu_dev;
186 struct regulator *cpu_reg;
187 struct clk *cpu_clk;
188 unsigned int transition_latency;
189 int ret;
190
191 ret = allocate_resources(policy->cpu, &cpu_dev, &cpu_reg, &cpu_clk);
192 if (ret) {
193 pr_err("%s: Failed to allocate resources\n: %d", __func__, ret);
194 return ret;
195 }
196
197 np = of_node_get(cpu_dev->of_node);
198 if (!np) {
199 dev_err(cpu_dev, "failed to find cpu%d node\n", policy->cpu);
200 ret = -ENOENT;
201 goto out_put_reg_clk;
202 }
203
204 /* OPPs might be populated at runtime, don't check for error here */
205 of_init_opp_table(cpu_dev);
206
207 ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table);
208 if (ret) {
209 dev_err(cpu_dev, "failed to init cpufreq table: %d\n", ret);
210 goto out_put_node;
211 }
212
213 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
214 if (!priv) {
215 ret = -ENOMEM;
216 goto out_free_table;
217 }
218
219 of_property_read_u32(np, "voltage-tolerance", &priv->voltage_tolerance);
220
221 if (of_property_read_u32(np, "clock-latency", &transition_latency))
222 transition_latency = CPUFREQ_ETERNAL;
223
224 if (!IS_ERR(cpu_reg)) {
225 struct dev_pm_opp *opp;
226 unsigned long min_uV, max_uV;
227 int i;
228
229 /*
230 * OPP is maintained in order of increasing frequency, and
231 * freq_table initialised from OPP is therefore sorted in the
232 * same order.
233 */
234 for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++)
235 ;
236 rcu_read_lock();
237 opp = dev_pm_opp_find_freq_exact(cpu_dev,
238 freq_table[0].frequency * 1000, true);
239 min_uV = dev_pm_opp_get_voltage(opp);
240 opp = dev_pm_opp_find_freq_exact(cpu_dev,
241 freq_table[i-1].frequency * 1000, true);
242 max_uV = dev_pm_opp_get_voltage(opp);
243 rcu_read_unlock();
244 ret = regulator_set_voltage_time(cpu_reg, min_uV, max_uV);
245 if (ret > 0)
246 transition_latency += ret * 1000;
247 }
248
249 /*
250 * For now, just loading the cooling device;
251 * thermal DT code takes care of matching them.
252 */
253 if (of_find_property(np, "#cooling-cells", NULL)) {
254 cdev = of_cpufreq_cooling_register(np, cpu_present_mask);
255 if (IS_ERR(cdev))
256 dev_err(cpu_dev,
257 "running cpufreq without cooling device: %ld\n",
258 PTR_ERR(cdev));
259 else
260 priv->cdev = cdev;
261 }
262
263 priv->cpu_dev = cpu_dev;
264 priv->cpu_reg = cpu_reg;
265 policy->driver_data = priv;
266
267 policy->clk = cpu_clk;
268 ret = cpufreq_generic_init(policy, freq_table, transition_latency);
269 if (ret)
270 goto out_cooling_unregister;
271
272 of_node_put(np);
273
274 return 0;
275
276out_cooling_unregister:
277 cpufreq_cooling_unregister(priv->cdev);
278 kfree(priv);
279out_free_table:
280 dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table);
281out_put_node:
282 of_node_put(np);
283out_put_reg_clk:
284 clk_put(cpu_clk);
285 if (!IS_ERR(cpu_reg))
286 regulator_put(cpu_reg);
287
288 return ret;
289}
290
291static int cpufreq_exit(struct cpufreq_policy *policy)
292{
293 struct private_data *priv = policy->driver_data;
294
295 cpufreq_cooling_unregister(priv->cdev);
296 dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table);
297 clk_put(policy->clk);
298 if (!IS_ERR(priv->cpu_reg))
299 regulator_put(priv->cpu_reg);
300 kfree(priv);
301
302 return 0;
303}
304
305static struct cpufreq_driver dt_cpufreq_driver = {
306 .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK,
307 .verify = cpufreq_generic_frequency_table_verify,
308 .target_index = set_target,
309 .get = cpufreq_generic_get,
310 .init = cpufreq_init,
311 .exit = cpufreq_exit,
312 .name = "cpufreq-dt",
313 .attr = cpufreq_generic_attr,
314};
315
316static int dt_cpufreq_probe(struct platform_device *pdev)
317{
318 struct device *cpu_dev;
319 struct regulator *cpu_reg;
320 struct clk *cpu_clk;
321 int ret;
322
323 /*
324 * All per-cluster (CPUs sharing clock/voltages) initialization is done
325 * from ->init(). In probe(), we just need to make sure that clk and
326 * regulators are available. Else defer probe and retry.
327 *
328 * FIXME: Is checking this only for CPU0 sufficient ?
329 */
330 ret = allocate_resources(0, &cpu_dev, &cpu_reg, &cpu_clk);
331 if (ret)
332 return ret;
333
334 clk_put(cpu_clk);
335 if (!IS_ERR(cpu_reg))
336 regulator_put(cpu_reg);
337
338 ret = cpufreq_register_driver(&dt_cpufreq_driver);
339 if (ret)
340 dev_err(cpu_dev, "failed register driver: %d\n", ret);
341
342 return ret;
343}
344
345static int dt_cpufreq_remove(struct platform_device *pdev)
346{
347 cpufreq_unregister_driver(&dt_cpufreq_driver);
348 return 0;
349}
350
351static struct platform_driver dt_cpufreq_platdrv = {
352 .driver = {
353 .name = "cpufreq-dt",
354 .owner = THIS_MODULE,
355 },
356 .probe = dt_cpufreq_probe,
357 .remove = dt_cpufreq_remove,
358};
359module_platform_driver(dt_cpufreq_platdrv);
360
361MODULE_AUTHOR("Viresh Kumar <viresh.kumar@linaro.org>");
362MODULE_AUTHOR("Shawn Guo <shawn.guo@linaro.org>");
363MODULE_DESCRIPTION("Generic cpufreq driver");
364MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 61190f6b4829..24bf76fba141 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -437,7 +437,7 @@ static struct cpufreq_governor *__find_governor(const char *str_governor)
437 struct cpufreq_governor *t; 437 struct cpufreq_governor *t;
438 438
439 list_for_each_entry(t, &cpufreq_governor_list, governor_list) 439 list_for_each_entry(t, &cpufreq_governor_list, governor_list)
440 if (!strnicmp(str_governor, t->name, CPUFREQ_NAME_LEN)) 440 if (!strncasecmp(str_governor, t->name, CPUFREQ_NAME_LEN))
441 return t; 441 return t;
442 442
443 return NULL; 443 return NULL;
@@ -455,10 +455,10 @@ static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
455 goto out; 455 goto out;
456 456
457 if (cpufreq_driver->setpolicy) { 457 if (cpufreq_driver->setpolicy) {
458 if (!strnicmp(str_governor, "performance", CPUFREQ_NAME_LEN)) { 458 if (!strncasecmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
459 *policy = CPUFREQ_POLICY_PERFORMANCE; 459 *policy = CPUFREQ_POLICY_PERFORMANCE;
460 err = 0; 460 err = 0;
461 } else if (!strnicmp(str_governor, "powersave", 461 } else if (!strncasecmp(str_governor, "powersave",
462 CPUFREQ_NAME_LEN)) { 462 CPUFREQ_NAME_LEN)) {
463 *policy = CPUFREQ_POLICY_POWERSAVE; 463 *policy = CPUFREQ_POLICY_POWERSAVE;
464 err = 0; 464 err = 0;
@@ -1382,7 +1382,7 @@ static int __cpufreq_remove_dev_prepare(struct device *dev,
1382 if (!cpufreq_suspended) 1382 if (!cpufreq_suspended)
1383 pr_debug("%s: policy Kobject moved to cpu: %d from: %d\n", 1383 pr_debug("%s: policy Kobject moved to cpu: %d from: %d\n",
1384 __func__, new_cpu, cpu); 1384 __func__, new_cpu, cpu);
1385 } else if (cpufreq_driver->stop_cpu && cpufreq_driver->setpolicy) { 1385 } else if (cpufreq_driver->stop_cpu) {
1386 cpufreq_driver->stop_cpu(policy); 1386 cpufreq_driver->stop_cpu(policy);
1387 } 1387 }
1388 1388
diff --git a/drivers/cpufreq/exynos4210-cpufreq.c b/drivers/cpufreq/exynos4210-cpufreq.c
index 61a54310a1b9..843ec824fd91 100644
--- a/drivers/cpufreq/exynos4210-cpufreq.c
+++ b/drivers/cpufreq/exynos4210-cpufreq.c
@@ -127,7 +127,7 @@ int exynos4210_cpufreq_init(struct exynos_dvfs_info *info)
127 * dependencies on platform headers. It is necessary to enable 127 * dependencies on platform headers. It is necessary to enable
128 * Exynos multi-platform support and will be removed together with 128 * Exynos multi-platform support and will be removed together with
129 * this whole driver as soon as Exynos gets migrated to use 129 * this whole driver as soon as Exynos gets migrated to use
130 * cpufreq-cpu0 driver. 130 * cpufreq-dt driver.
131 */ 131 */
132 np = of_find_compatible_node(NULL, NULL, "samsung,exynos4210-clock"); 132 np = of_find_compatible_node(NULL, NULL, "samsung,exynos4210-clock");
133 if (!np) { 133 if (!np) {
diff --git a/drivers/cpufreq/exynos4x12-cpufreq.c b/drivers/cpufreq/exynos4x12-cpufreq.c
index 351a2074cfea..9e78a850e29f 100644
--- a/drivers/cpufreq/exynos4x12-cpufreq.c
+++ b/drivers/cpufreq/exynos4x12-cpufreq.c
@@ -174,7 +174,7 @@ int exynos4x12_cpufreq_init(struct exynos_dvfs_info *info)
174 * dependencies on platform headers. It is necessary to enable 174 * dependencies on platform headers. It is necessary to enable
175 * Exynos multi-platform support and will be removed together with 175 * Exynos multi-platform support and will be removed together with
176 * this whole driver as soon as Exynos gets migrated to use 176 * this whole driver as soon as Exynos gets migrated to use
177 * cpufreq-cpu0 driver. 177 * cpufreq-dt driver.
178 */ 178 */
179 np = of_find_compatible_node(NULL, NULL, "samsung,exynos4412-clock"); 179 np = of_find_compatible_node(NULL, NULL, "samsung,exynos4412-clock");
180 if (!np) { 180 if (!np) {
diff --git a/drivers/cpufreq/exynos5250-cpufreq.c b/drivers/cpufreq/exynos5250-cpufreq.c
index c91ce69dc631..3eafdc7ba787 100644
--- a/drivers/cpufreq/exynos5250-cpufreq.c
+++ b/drivers/cpufreq/exynos5250-cpufreq.c
@@ -153,7 +153,7 @@ int exynos5250_cpufreq_init(struct exynos_dvfs_info *info)
153 * dependencies on platform headers. It is necessary to enable 153 * dependencies on platform headers. It is necessary to enable
154 * Exynos multi-platform support and will be removed together with 154 * Exynos multi-platform support and will be removed together with
155 * this whole driver as soon as Exynos gets migrated to use 155 * this whole driver as soon as Exynos gets migrated to use
156 * cpufreq-cpu0 driver. 156 * cpufreq-dt driver.
157 */ 157 */
158 np = of_find_compatible_node(NULL, NULL, "samsung,exynos5250-clock"); 158 np = of_find_compatible_node(NULL, NULL, "samsung,exynos5250-clock");
159 if (!np) { 159 if (!np) {
diff --git a/drivers/cpufreq/highbank-cpufreq.c b/drivers/cpufreq/highbank-cpufreq.c
index bf8902a0866d..ec399ad2f059 100644
--- a/drivers/cpufreq/highbank-cpufreq.c
+++ b/drivers/cpufreq/highbank-cpufreq.c
@@ -6,7 +6,7 @@
6 * published by the Free Software Foundation. 6 * published by the Free Software Foundation.
7 * 7 *
8 * This driver provides the clk notifier callbacks that are used when 8 * This driver provides the clk notifier callbacks that are used when
9 * the cpufreq-cpu0 driver changes to frequency to alert the highbank 9 * the cpufreq-dt driver changes to frequency to alert the highbank
10 * EnergyCore Management Engine (ECME) about the need to change 10 * EnergyCore Management Engine (ECME) about the need to change
11 * voltage. The ECME interfaces with the actual voltage regulators. 11 * voltage. The ECME interfaces with the actual voltage regulators.
12 */ 12 */
@@ -60,7 +60,7 @@ static struct notifier_block hb_cpufreq_clk_nb = {
60 60
61static int hb_cpufreq_driver_init(void) 61static int hb_cpufreq_driver_init(void)
62{ 62{
63 struct platform_device_info devinfo = { .name = "cpufreq-cpu0", }; 63 struct platform_device_info devinfo = { .name = "cpufreq-dt", };
64 struct device *cpu_dev; 64 struct device *cpu_dev;
65 struct clk *cpu_clk; 65 struct clk *cpu_clk;
66 struct device_node *np; 66 struct device_node *np;
@@ -95,7 +95,7 @@ static int hb_cpufreq_driver_init(void)
95 goto out_put_node; 95 goto out_put_node;
96 } 96 }
97 97
98 /* Instantiate cpufreq-cpu0 */ 98 /* Instantiate cpufreq-dt */
99 platform_device_register_full(&devinfo); 99 platform_device_register_full(&devinfo);
100 100
101out_put_node: 101out_put_node:
diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c
index 379c0837f5a9..2dfd4fdb5a52 100644
--- a/drivers/cpufreq/powernv-cpufreq.c
+++ b/drivers/cpufreq/powernv-cpufreq.c
@@ -26,6 +26,7 @@
26#include <linux/cpufreq.h> 26#include <linux/cpufreq.h>
27#include <linux/smp.h> 27#include <linux/smp.h>
28#include <linux/of.h> 28#include <linux/of.h>
29#include <linux/reboot.h>
29 30
30#include <asm/cputhreads.h> 31#include <asm/cputhreads.h>
31#include <asm/firmware.h> 32#include <asm/firmware.h>
@@ -35,6 +36,7 @@
35#define POWERNV_MAX_PSTATES 256 36#define POWERNV_MAX_PSTATES 256
36 37
37static struct cpufreq_frequency_table powernv_freqs[POWERNV_MAX_PSTATES+1]; 38static struct cpufreq_frequency_table powernv_freqs[POWERNV_MAX_PSTATES+1];
39static bool rebooting;
38 40
39/* 41/*
40 * Note: The set of pstates consists of contiguous integers, the 42 * Note: The set of pstates consists of contiguous integers, the
@@ -284,6 +286,15 @@ static void set_pstate(void *freq_data)
284} 286}
285 287
286/* 288/*
289 * get_nominal_index: Returns the index corresponding to the nominal
290 * pstate in the cpufreq table
291 */
292static inline unsigned int get_nominal_index(void)
293{
294 return powernv_pstate_info.max - powernv_pstate_info.nominal;
295}
296
297/*
287 * powernv_cpufreq_target_index: Sets the frequency corresponding to 298 * powernv_cpufreq_target_index: Sets the frequency corresponding to
288 * the cpufreq table entry indexed by new_index on the cpus in the 299 * the cpufreq table entry indexed by new_index on the cpus in the
289 * mask policy->cpus 300 * mask policy->cpus
@@ -293,6 +304,9 @@ static int powernv_cpufreq_target_index(struct cpufreq_policy *policy,
293{ 304{
294 struct powernv_smp_call_data freq_data; 305 struct powernv_smp_call_data freq_data;
295 306
307 if (unlikely(rebooting) && new_index != get_nominal_index())
308 return 0;
309
296 freq_data.pstate_id = powernv_freqs[new_index].driver_data; 310 freq_data.pstate_id = powernv_freqs[new_index].driver_data;
297 311
298 /* 312 /*
@@ -317,6 +331,33 @@ static int powernv_cpufreq_cpu_init(struct cpufreq_policy *policy)
317 return cpufreq_table_validate_and_show(policy, powernv_freqs); 331 return cpufreq_table_validate_and_show(policy, powernv_freqs);
318} 332}
319 333
334static int powernv_cpufreq_reboot_notifier(struct notifier_block *nb,
335 unsigned long action, void *unused)
336{
337 int cpu;
338 struct cpufreq_policy cpu_policy;
339
340 rebooting = true;
341 for_each_online_cpu(cpu) {
342 cpufreq_get_policy(&cpu_policy, cpu);
343 powernv_cpufreq_target_index(&cpu_policy, get_nominal_index());
344 }
345
346 return NOTIFY_DONE;
347}
348
349static struct notifier_block powernv_cpufreq_reboot_nb = {
350 .notifier_call = powernv_cpufreq_reboot_notifier,
351};
352
353static void powernv_cpufreq_stop_cpu(struct cpufreq_policy *policy)
354{
355 struct powernv_smp_call_data freq_data;
356
357 freq_data.pstate_id = powernv_pstate_info.min;
358 smp_call_function_single(policy->cpu, set_pstate, &freq_data, 1);
359}
360
320static struct cpufreq_driver powernv_cpufreq_driver = { 361static struct cpufreq_driver powernv_cpufreq_driver = {
321 .name = "powernv-cpufreq", 362 .name = "powernv-cpufreq",
322 .flags = CPUFREQ_CONST_LOOPS, 363 .flags = CPUFREQ_CONST_LOOPS,
@@ -324,6 +365,7 @@ static struct cpufreq_driver powernv_cpufreq_driver = {
324 .verify = cpufreq_generic_frequency_table_verify, 365 .verify = cpufreq_generic_frequency_table_verify,
325 .target_index = powernv_cpufreq_target_index, 366 .target_index = powernv_cpufreq_target_index,
326 .get = powernv_cpufreq_get, 367 .get = powernv_cpufreq_get,
368 .stop_cpu = powernv_cpufreq_stop_cpu,
327 .attr = powernv_cpu_freq_attr, 369 .attr = powernv_cpu_freq_attr,
328}; 370};
329 371
@@ -342,12 +384,14 @@ static int __init powernv_cpufreq_init(void)
342 return rc; 384 return rc;
343 } 385 }
344 386
387 register_reboot_notifier(&powernv_cpufreq_reboot_nb);
345 return cpufreq_register_driver(&powernv_cpufreq_driver); 388 return cpufreq_register_driver(&powernv_cpufreq_driver);
346} 389}
347module_init(powernv_cpufreq_init); 390module_init(powernv_cpufreq_init);
348 391
349static void __exit powernv_cpufreq_exit(void) 392static void __exit powernv_cpufreq_exit(void)
350{ 393{
394 unregister_reboot_notifier(&powernv_cpufreq_reboot_nb);
351 cpufreq_unregister_driver(&powernv_cpufreq_driver); 395 cpufreq_unregister_driver(&powernv_cpufreq_driver);
352} 396}
353module_exit(powernv_cpufreq_exit); 397module_exit(powernv_cpufreq_exit);
diff --git a/drivers/cpufreq/ppc-corenet-cpufreq.c b/drivers/cpufreq/ppc-corenet-cpufreq.c
index 3607070797af..bee5df7794d3 100644
--- a/drivers/cpufreq/ppc-corenet-cpufreq.c
+++ b/drivers/cpufreq/ppc-corenet-cpufreq.c
@@ -199,7 +199,6 @@ static int corenet_cpufreq_cpu_init(struct cpufreq_policy *policy)
199 } 199 }
200 200
201 data->table = table; 201 data->table = table;
202 per_cpu(cpu_data, cpu) = data;
203 202
204 /* update ->cpus if we have cluster, no harm if not */ 203 /* update ->cpus if we have cluster, no harm if not */
205 cpumask_copy(policy->cpus, per_cpu(cpu_mask, cpu)); 204 cpumask_copy(policy->cpus, per_cpu(cpu_mask, cpu));
diff --git a/drivers/cpufreq/s5pv210-cpufreq.c b/drivers/cpufreq/s5pv210-cpufreq.c
index 3f9791f07b8e..567caa6313ff 100644
--- a/drivers/cpufreq/s5pv210-cpufreq.c
+++ b/drivers/cpufreq/s5pv210-cpufreq.c
@@ -597,7 +597,7 @@ static int s5pv210_cpufreq_probe(struct platform_device *pdev)
597 * and dependencies on platform headers. It is necessary to enable 597 * and dependencies on platform headers. It is necessary to enable
598 * S5PV210 multi-platform support and will be removed together with 598 * S5PV210 multi-platform support and will be removed together with
599 * this whole driver as soon as S5PV210 gets migrated to use 599 * this whole driver as soon as S5PV210 gets migrated to use
600 * cpufreq-cpu0 driver. 600 * cpufreq-dt driver.
601 */ 601 */
602 np = of_find_compatible_node(NULL, NULL, "samsung,s5pv210-clock"); 602 np = of_find_compatible_node(NULL, NULL, "samsung,s5pv210-clock");
603 if (!np) { 603 if (!np) {
diff --git a/drivers/cpuidle/Kconfig b/drivers/cpuidle/Kconfig
index 32748c36c477..c5029c1209b4 100644
--- a/drivers/cpuidle/Kconfig
+++ b/drivers/cpuidle/Kconfig
@@ -25,11 +25,19 @@ config CPU_IDLE_GOV_MENU
25 bool "Menu governor (for tickless system)" 25 bool "Menu governor (for tickless system)"
26 default y 26 default y
27 27
28config DT_IDLE_STATES
29 bool
30
28menu "ARM CPU Idle Drivers" 31menu "ARM CPU Idle Drivers"
29depends on ARM 32depends on ARM
30source "drivers/cpuidle/Kconfig.arm" 33source "drivers/cpuidle/Kconfig.arm"
31endmenu 34endmenu
32 35
36menu "ARM64 CPU Idle Drivers"
37depends on ARM64
38source "drivers/cpuidle/Kconfig.arm64"
39endmenu
40
33menu "MIPS CPU Idle Drivers" 41menu "MIPS CPU Idle Drivers"
34depends on MIPS 42depends on MIPS
35source "drivers/cpuidle/Kconfig.mips" 43source "drivers/cpuidle/Kconfig.mips"
diff --git a/drivers/cpuidle/Kconfig.arm b/drivers/cpuidle/Kconfig.arm
index 58bcd0d166ec..8c16ab20fb15 100644
--- a/drivers/cpuidle/Kconfig.arm
+++ b/drivers/cpuidle/Kconfig.arm
@@ -7,6 +7,7 @@ config ARM_BIG_LITTLE_CPUIDLE
7 depends on MCPM 7 depends on MCPM
8 select ARM_CPU_SUSPEND 8 select ARM_CPU_SUSPEND
9 select CPU_IDLE_MULTIPLE_DRIVERS 9 select CPU_IDLE_MULTIPLE_DRIVERS
10 select DT_IDLE_STATES
10 help 11 help
11 Select this option to enable CPU idle driver for big.LITTLE based 12 Select this option to enable CPU idle driver for big.LITTLE based
12 ARM systems. Driver manages CPUs coordination through MCPM and 13 ARM systems. Driver manages CPUs coordination through MCPM and
diff --git a/drivers/cpuidle/Kconfig.arm64 b/drivers/cpuidle/Kconfig.arm64
new file mode 100644
index 000000000000..d0a08ed1b2ee
--- /dev/null
+++ b/drivers/cpuidle/Kconfig.arm64
@@ -0,0 +1,14 @@
1#
2# ARM64 CPU Idle drivers
3#
4
5config ARM64_CPUIDLE
6 bool "Generic ARM64 CPU idle Driver"
7 select ARM64_CPU_SUSPEND
8 select DT_IDLE_STATES
9 help
10 Select this to enable generic cpuidle driver for ARM64.
11 It provides a generic idle driver whose idle states are configured
12 at run-time through DT nodes. The CPUidle suspend backend is
13 initialized by calling the CPU operations init idle hook
14 provided by architecture code.
diff --git a/drivers/cpuidle/Makefile b/drivers/cpuidle/Makefile
index 11edb31c55e9..4d177b916f75 100644
--- a/drivers/cpuidle/Makefile
+++ b/drivers/cpuidle/Makefile
@@ -4,6 +4,7 @@
4 4
5obj-y += cpuidle.o driver.o governor.o sysfs.o governors/ 5obj-y += cpuidle.o driver.o governor.o sysfs.o governors/
6obj-$(CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED) += coupled.o 6obj-$(CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED) += coupled.o
7obj-$(CONFIG_DT_IDLE_STATES) += dt_idle_states.o
7 8
8################################################################################## 9##################################################################################
9# ARM SoC drivers 10# ARM SoC drivers
@@ -22,6 +23,10 @@ obj-$(CONFIG_ARM_EXYNOS_CPUIDLE) += cpuidle-exynos.o
22obj-$(CONFIG_MIPS_CPS_CPUIDLE) += cpuidle-cps.o 23obj-$(CONFIG_MIPS_CPS_CPUIDLE) += cpuidle-cps.o
23 24
24############################################################################### 25###############################################################################
26# ARM64 drivers
27obj-$(CONFIG_ARM64_CPUIDLE) += cpuidle-arm64.o
28
29###############################################################################
25# POWERPC drivers 30# POWERPC drivers
26obj-$(CONFIG_PSERIES_CPUIDLE) += cpuidle-pseries.o 31obj-$(CONFIG_PSERIES_CPUIDLE) += cpuidle-pseries.o
27obj-$(CONFIG_POWERNV_CPUIDLE) += cpuidle-powernv.o 32obj-$(CONFIG_POWERNV_CPUIDLE) += cpuidle-powernv.o
diff --git a/drivers/cpuidle/cpuidle-arm64.c b/drivers/cpuidle/cpuidle-arm64.c
new file mode 100644
index 000000000000..50997ea942fc
--- /dev/null
+++ b/drivers/cpuidle/cpuidle-arm64.c
@@ -0,0 +1,133 @@
1/*
2 * ARM64 generic CPU idle driver.
3 *
4 * Copyright (C) 2014 ARM Ltd.
5 * Author: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#define pr_fmt(fmt) "CPUidle arm64: " fmt
13
14#include <linux/cpuidle.h>
15#include <linux/cpumask.h>
16#include <linux/cpu_pm.h>
17#include <linux/kernel.h>
18#include <linux/module.h>
19#include <linux/of.h>
20
21#include <asm/cpuidle.h>
22#include <asm/suspend.h>
23
24#include "dt_idle_states.h"
25
26/*
27 * arm64_enter_idle_state - Programs CPU to enter the specified state
28 *
29 * dev: cpuidle device
30 * drv: cpuidle driver
31 * idx: state index
32 *
33 * Called from the CPUidle framework to program the device to the
34 * specified target state selected by the governor.
35 */
36static int arm64_enter_idle_state(struct cpuidle_device *dev,
37 struct cpuidle_driver *drv, int idx)
38{
39 int ret;
40
41 if (!idx) {
42 cpu_do_idle();
43 return idx;
44 }
45
46 ret = cpu_pm_enter();
47 if (!ret) {
48 /*
49 * Pass idle state index to cpu_suspend which in turn will
50 * call the CPU ops suspend protocol with idle index as a
51 * parameter.
52 */
53 ret = cpu_suspend(idx);
54
55 cpu_pm_exit();
56 }
57
58 return ret ? -1 : idx;
59}
60
61static struct cpuidle_driver arm64_idle_driver = {
62 .name = "arm64_idle",
63 .owner = THIS_MODULE,
64 /*
65 * State at index 0 is standby wfi and considered standard
66 * on all ARM platforms. If in some platforms simple wfi
67 * can't be used as "state 0", DT bindings must be implemented
68 * to work around this issue and allow installing a special
69 * handler for idle state index 0.
70 */
71 .states[0] = {
72 .enter = arm64_enter_idle_state,
73 .exit_latency = 1,
74 .target_residency = 1,
75 .power_usage = UINT_MAX,
76 .flags = CPUIDLE_FLAG_TIME_VALID,
77 .name = "WFI",
78 .desc = "ARM64 WFI",
79 }
80};
81
82static const struct of_device_id arm64_idle_state_match[] __initconst = {
83 { .compatible = "arm,idle-state",
84 .data = arm64_enter_idle_state },
85 { },
86};
87
88/*
89 * arm64_idle_init
90 *
91 * Registers the arm64 specific cpuidle driver with the cpuidle
92 * framework. It relies on core code to parse the idle states
93 * and initialize them using driver data structures accordingly.
94 */
95static int __init arm64_idle_init(void)
96{
97 int cpu, ret;
98 struct cpuidle_driver *drv = &arm64_idle_driver;
99
100 /*
101 * Initialize idle states data, starting at index 1.
102 * This driver is DT only, if no DT idle states are detected (ret == 0)
103 * let the driver initialization fail accordingly since there is no
104 * reason to initialize the idle driver if only wfi is supported.
105 */
106 ret = dt_init_idle_driver(drv, arm64_idle_state_match, 1);
107 if (ret <= 0) {
108 if (ret)
109 pr_err("failed to initialize idle states\n");
110 return ret ? : -ENODEV;
111 }
112
113 /*
114 * Call arch CPU operations in order to initialize
115 * idle states suspend back-end specific data
116 */
117 for_each_possible_cpu(cpu) {
118 ret = cpu_init_idle(cpu);
119 if (ret) {
120 pr_err("CPU %d failed to init idle CPU ops\n", cpu);
121 return ret;
122 }
123 }
124
125 ret = cpuidle_register(drv, NULL);
126 if (ret) {
127 pr_err("failed to register cpuidle driver\n");
128 return ret;
129 }
130
131 return 0;
132}
133device_initcall(arm64_idle_init);
diff --git a/drivers/cpuidle/cpuidle-big_little.c b/drivers/cpuidle/cpuidle-big_little.c
index ef94c3b81f18..fbc00a1d3c48 100644
--- a/drivers/cpuidle/cpuidle-big_little.c
+++ b/drivers/cpuidle/cpuidle-big_little.c
@@ -24,6 +24,8 @@
24#include <asm/smp_plat.h> 24#include <asm/smp_plat.h>
25#include <asm/suspend.h> 25#include <asm/suspend.h>
26 26
27#include "dt_idle_states.h"
28
27static int bl_enter_powerdown(struct cpuidle_device *dev, 29static int bl_enter_powerdown(struct cpuidle_device *dev,
28 struct cpuidle_driver *drv, int idx); 30 struct cpuidle_driver *drv, int idx);
29 31
@@ -73,6 +75,12 @@ static struct cpuidle_driver bl_idle_little_driver = {
73 .state_count = 2, 75 .state_count = 2,
74}; 76};
75 77
78static const struct of_device_id bl_idle_state_match[] __initconst = {
79 { .compatible = "arm,idle-state",
80 .data = bl_enter_powerdown },
81 { },
82};
83
76static struct cpuidle_driver bl_idle_big_driver = { 84static struct cpuidle_driver bl_idle_big_driver = {
77 .name = "big_idle", 85 .name = "big_idle",
78 .owner = THIS_MODULE, 86 .owner = THIS_MODULE,
@@ -159,6 +167,7 @@ static int __init bl_idle_driver_init(struct cpuidle_driver *drv, int part_id)
159static const struct of_device_id compatible_machine_match[] = { 167static const struct of_device_id compatible_machine_match[] = {
160 { .compatible = "arm,vexpress,v2p-ca15_a7" }, 168 { .compatible = "arm,vexpress,v2p-ca15_a7" },
161 { .compatible = "samsung,exynos5420" }, 169 { .compatible = "samsung,exynos5420" },
170 { .compatible = "samsung,exynos5800" },
162 {}, 171 {},
163}; 172};
164 173
@@ -190,6 +199,17 @@ static int __init bl_idle_init(void)
190 if (ret) 199 if (ret)
191 goto out_uninit_little; 200 goto out_uninit_little;
192 201
202 /* Start at index 1, index 0 standard WFI */
203 ret = dt_init_idle_driver(&bl_idle_big_driver, bl_idle_state_match, 1);
204 if (ret < 0)
205 goto out_uninit_big;
206
207 /* Start at index 1, index 0 standard WFI */
208 ret = dt_init_idle_driver(&bl_idle_little_driver,
209 bl_idle_state_match, 1);
210 if (ret < 0)
211 goto out_uninit_big;
212
193 ret = cpuidle_register(&bl_idle_little_driver, NULL); 213 ret = cpuidle_register(&bl_idle_little_driver, NULL);
194 if (ret) 214 if (ret)
195 goto out_uninit_big; 215 goto out_uninit_big;
diff --git a/drivers/cpuidle/dt_idle_states.c b/drivers/cpuidle/dt_idle_states.c
new file mode 100644
index 000000000000..52f4d11bbf3f
--- /dev/null
+++ b/drivers/cpuidle/dt_idle_states.c
@@ -0,0 +1,213 @@
1/*
2 * DT idle states parsing code.
3 *
4 * Copyright (C) 2014 ARM Ltd.
5 * Author: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#define pr_fmt(fmt) "DT idle-states: " fmt
13
14#include <linux/cpuidle.h>
15#include <linux/cpumask.h>
16#include <linux/errno.h>
17#include <linux/kernel.h>
18#include <linux/module.h>
19#include <linux/of.h>
20#include <linux/of_device.h>
21
22#include "dt_idle_states.h"
23
24static int init_state_node(struct cpuidle_state *idle_state,
25 const struct of_device_id *matches,
26 struct device_node *state_node)
27{
28 int err;
29 const struct of_device_id *match_id;
30
31 match_id = of_match_node(matches, state_node);
32 if (!match_id)
33 return -ENODEV;
34 /*
35 * CPUidle drivers are expected to initialize the const void *data
36 * pointer of the passed in struct of_device_id array to the idle
37 * state enter function.
38 */
39 idle_state->enter = match_id->data;
40
41 err = of_property_read_u32(state_node, "wakeup-latency-us",
42 &idle_state->exit_latency);
43 if (err) {
44 u32 entry_latency, exit_latency;
45
46 err = of_property_read_u32(state_node, "entry-latency-us",
47 &entry_latency);
48 if (err) {
49 pr_debug(" * %s missing entry-latency-us property\n",
50 state_node->full_name);
51 return -EINVAL;
52 }
53
54 err = of_property_read_u32(state_node, "exit-latency-us",
55 &exit_latency);
56 if (err) {
57 pr_debug(" * %s missing exit-latency-us property\n",
58 state_node->full_name);
59 return -EINVAL;
60 }
61 /*
62 * If wakeup-latency-us is missing, default to entry+exit
63 * latencies as defined in idle states bindings
64 */
65 idle_state->exit_latency = entry_latency + exit_latency;
66 }
67
68 err = of_property_read_u32(state_node, "min-residency-us",
69 &idle_state->target_residency);
70 if (err) {
71 pr_debug(" * %s missing min-residency-us property\n",
72 state_node->full_name);
73 return -EINVAL;
74 }
75
76 idle_state->flags = CPUIDLE_FLAG_TIME_VALID;
77 if (of_property_read_bool(state_node, "local-timer-stop"))
78 idle_state->flags |= CPUIDLE_FLAG_TIMER_STOP;
79 /*
80 * TODO:
81 * replace with kstrdup and pointer assignment when name
82 * and desc become string pointers
83 */
84 strncpy(idle_state->name, state_node->name, CPUIDLE_NAME_LEN - 1);
85 strncpy(idle_state->desc, state_node->name, CPUIDLE_DESC_LEN - 1);
86 return 0;
87}
88
89/*
90 * Check that the idle state is uniform across all CPUs in the CPUidle driver
91 * cpumask
92 */
93static bool idle_state_valid(struct device_node *state_node, unsigned int idx,
94 const cpumask_t *cpumask)
95{
96 int cpu;
97 struct device_node *cpu_node, *curr_state_node;
98 bool valid = true;
99
100 /*
101 * Compare idle state phandles for index idx on all CPUs in the
102 * CPUidle driver cpumask. Start from next logical cpu following
103 * cpumask_first(cpumask) since that's the CPU state_node was
104 * retrieved from. If a mismatch is found bail out straight
105 * away since we certainly hit a firmware misconfiguration.
106 */
107 for (cpu = cpumask_next(cpumask_first(cpumask), cpumask);
108 cpu < nr_cpu_ids; cpu = cpumask_next(cpu, cpumask)) {
109 cpu_node = of_cpu_device_node_get(cpu);
110 curr_state_node = of_parse_phandle(cpu_node, "cpu-idle-states",
111 idx);
112 if (state_node != curr_state_node)
113 valid = false;
114
115 of_node_put(curr_state_node);
116 of_node_put(cpu_node);
117 if (!valid)
118 break;
119 }
120
121 return valid;
122}
123
124/**
125 * dt_init_idle_driver() - Parse the DT idle states and initialize the
126 * idle driver states array
127 * @drv: Pointer to CPU idle driver to be initialized
128 * @matches: Array of of_device_id match structures to search in for
129 * compatible idle state nodes. The data pointer for each valid
130 * struct of_device_id entry in the matches array must point to
131 * a function with the following signature, that corresponds to
132 * the CPUidle state enter function signature:
133 *
134 * int (*)(struct cpuidle_device *dev,
135 * struct cpuidle_driver *drv,
136 * int index);
137 *
138 * @start_idx: First idle state index to be initialized
139 *
140 * If DT idle states are detected and are valid the state count and states
141 * array entries in the cpuidle driver are initialized accordingly starting
142 * from index start_idx.
143 *
144 * Return: number of valid DT idle states parsed, <0 on failure
145 */
146int dt_init_idle_driver(struct cpuidle_driver *drv,
147 const struct of_device_id *matches,
148 unsigned int start_idx)
149{
150 struct cpuidle_state *idle_state;
151 struct device_node *state_node, *cpu_node;
152 int i, err = 0;
153 const cpumask_t *cpumask;
154 unsigned int state_idx = start_idx;
155
156 if (state_idx >= CPUIDLE_STATE_MAX)
157 return -EINVAL;
158 /*
159 * We get the idle states for the first logical cpu in the
160 * driver mask (or cpu_possible_mask if the driver cpumask is not set)
161 * and we check through idle_state_valid() if they are uniform
162 * across CPUs, otherwise we hit a firmware misconfiguration.
163 */
164 cpumask = drv->cpumask ? : cpu_possible_mask;
165 cpu_node = of_cpu_device_node_get(cpumask_first(cpumask));
166
167 for (i = 0; ; i++) {
168 state_node = of_parse_phandle(cpu_node, "cpu-idle-states", i);
169 if (!state_node)
170 break;
171
172 if (!idle_state_valid(state_node, i, cpumask)) {
173 pr_warn("%s idle state not valid, bailing out\n",
174 state_node->full_name);
175 err = -EINVAL;
176 break;
177 }
178
179 if (state_idx == CPUIDLE_STATE_MAX) {
180 pr_warn("State index reached static CPU idle driver states array size\n");
181 break;
182 }
183
184 idle_state = &drv->states[state_idx++];
185 err = init_state_node(idle_state, matches, state_node);
186 if (err) {
187 pr_err("Parsing idle state node %s failed with err %d\n",
188 state_node->full_name, err);
189 err = -EINVAL;
190 break;
191 }
192 of_node_put(state_node);
193 }
194
195 of_node_put(state_node);
196 of_node_put(cpu_node);
197 if (err)
198 return err;
199 /*
200 * Update the driver state count only if some valid DT idle states
201 * were detected
202 */
203 if (i)
204 drv->state_count = state_idx;
205
206 /*
207 * Return the number of present and valid DT idle states, which can
208 * also be 0 on platforms with missing DT idle states or legacy DT
209 * configuration predating the DT idle states bindings.
210 */
211 return i;
212}
213EXPORT_SYMBOL_GPL(dt_init_idle_driver);
diff --git a/drivers/cpuidle/dt_idle_states.h b/drivers/cpuidle/dt_idle_states.h
new file mode 100644
index 000000000000..4818134bc65b
--- /dev/null
+++ b/drivers/cpuidle/dt_idle_states.h
@@ -0,0 +1,7 @@
1#ifndef __DT_IDLE_STATES
2#define __DT_IDLE_STATES
3
4int dt_init_idle_driver(struct cpuidle_driver *drv,
5 const struct of_device_id *matches,
6 unsigned int start_idx);
7#endif
diff --git a/drivers/cpuidle/governor.c b/drivers/cpuidle/governor.c
index ca89412f5122..fb9f511cca23 100644
--- a/drivers/cpuidle/governor.c
+++ b/drivers/cpuidle/governor.c
@@ -28,7 +28,7 @@ static struct cpuidle_governor * __cpuidle_find_governor(const char *str)
28 struct cpuidle_governor *gov; 28 struct cpuidle_governor *gov;
29 29
30 list_for_each_entry(gov, &cpuidle_governors, governor_list) 30 list_for_each_entry(gov, &cpuidle_governors, governor_list)
31 if (!strnicmp(str, gov->name, CPUIDLE_NAME_LEN)) 31 if (!strncasecmp(str, gov->name, CPUIDLE_NAME_LEN))
32 return gov; 32 return gov;
33 33
34 return NULL; 34 return NULL;
diff --git a/drivers/devfreq/Kconfig b/drivers/devfreq/Kconfig
index 3dced0a9eae3..faf4e70c42e0 100644
--- a/drivers/devfreq/Kconfig
+++ b/drivers/devfreq/Kconfig
@@ -78,9 +78,8 @@ config ARM_EXYNOS4_BUS_DEVFREQ
78 This does not yet operate with optimal voltages. 78 This does not yet operate with optimal voltages.
79 79
80config ARM_EXYNOS5_BUS_DEVFREQ 80config ARM_EXYNOS5_BUS_DEVFREQ
81 bool "ARM Exynos5250 Bus DEVFREQ Driver" 81 tristate "ARM Exynos5250 Bus DEVFREQ Driver"
82 depends on SOC_EXYNOS5250 82 depends on SOC_EXYNOS5250
83 select ARCH_HAS_OPP
84 select DEVFREQ_GOV_SIMPLE_ONDEMAND 83 select DEVFREQ_GOV_SIMPLE_ONDEMAND
85 select PM_OPP 84 select PM_OPP
86 help 85 help
diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
index 9f90369dd6bd..30b538d8cc90 100644
--- a/drivers/devfreq/devfreq.c
+++ b/drivers/devfreq/devfreq.c
@@ -1119,6 +1119,7 @@ struct dev_pm_opp *devfreq_recommended_opp(struct device *dev,
1119 1119
1120 return opp; 1120 return opp;
1121} 1121}
1122EXPORT_SYMBOL(devfreq_recommended_opp);
1122 1123
1123/** 1124/**
1124 * devfreq_register_opp_notifier() - Helper function to get devfreq notified 1125 * devfreq_register_opp_notifier() - Helper function to get devfreq notified
@@ -1142,6 +1143,7 @@ int devfreq_register_opp_notifier(struct device *dev, struct devfreq *devfreq)
1142 1143
1143 return ret; 1144 return ret;
1144} 1145}
1146EXPORT_SYMBOL(devfreq_register_opp_notifier);
1145 1147
1146/** 1148/**
1147 * devfreq_unregister_opp_notifier() - Helper function to stop getting devfreq 1149 * devfreq_unregister_opp_notifier() - Helper function to stop getting devfreq
@@ -1168,6 +1170,7 @@ int devfreq_unregister_opp_notifier(struct device *dev, struct devfreq *devfreq)
1168 1170
1169 return ret; 1171 return ret;
1170} 1172}
1173EXPORT_SYMBOL(devfreq_unregister_opp_notifier);
1171 1174
1172static void devm_devfreq_opp_release(struct device *dev, void *res) 1175static void devm_devfreq_opp_release(struct device *dev, void *res)
1173{ 1176{
diff --git a/drivers/devfreq/exynos/exynos_ppmu.c b/drivers/devfreq/exynos/exynos_ppmu.c
index 75fcc5140ffb..97b75e513d29 100644
--- a/drivers/devfreq/exynos/exynos_ppmu.c
+++ b/drivers/devfreq/exynos/exynos_ppmu.c
@@ -73,6 +73,7 @@ void busfreq_mon_reset(struct busfreq_ppmu_data *ppmu_data)
73 exynos_ppmu_start(ppmu_base); 73 exynos_ppmu_start(ppmu_base);
74 } 74 }
75} 75}
76EXPORT_SYMBOL(busfreq_mon_reset);
76 77
77void exynos_read_ppmu(struct busfreq_ppmu_data *ppmu_data) 78void exynos_read_ppmu(struct busfreq_ppmu_data *ppmu_data)
78{ 79{
@@ -97,6 +98,7 @@ void exynos_read_ppmu(struct busfreq_ppmu_data *ppmu_data)
97 98
98 busfreq_mon_reset(ppmu_data); 99 busfreq_mon_reset(ppmu_data);
99} 100}
101EXPORT_SYMBOL(exynos_read_ppmu);
100 102
101int exynos_get_busier_ppmu(struct busfreq_ppmu_data *ppmu_data) 103int exynos_get_busier_ppmu(struct busfreq_ppmu_data *ppmu_data)
102{ 104{
@@ -114,3 +116,4 @@ int exynos_get_busier_ppmu(struct busfreq_ppmu_data *ppmu_data)
114 116
115 return busy; 117 return busy;
116} 118}
119EXPORT_SYMBOL(exynos_get_busier_ppmu);
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index ccfbbab82a15..2f90ac6a7f79 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -50,6 +50,7 @@
50#include <linux/irqflags.h> 50#include <linux/irqflags.h>
51#include <linux/rwsem.h> 51#include <linux/rwsem.h>
52#include <linux/pm_runtime.h> 52#include <linux/pm_runtime.h>
53#include <linux/pm_domain.h>
53#include <linux/acpi.h> 54#include <linux/acpi.h>
54#include <linux/jump_label.h> 55#include <linux/jump_label.h>
55#include <asm/uaccess.h> 56#include <asm/uaccess.h>
@@ -643,10 +644,13 @@ static int i2c_device_probe(struct device *dev)
643 if (status < 0) 644 if (status < 0)
644 return status; 645 return status;
645 646
646 acpi_dev_pm_attach(&client->dev, true); 647 status = dev_pm_domain_attach(&client->dev, true);
647 status = driver->probe(client, i2c_match_id(driver->id_table, client)); 648 if (status != -EPROBE_DEFER) {
648 if (status) 649 status = driver->probe(client, i2c_match_id(driver->id_table,
649 acpi_dev_pm_detach(&client->dev, true); 650 client));
651 if (status)
652 dev_pm_domain_detach(&client->dev, true);
653 }
650 654
651 return status; 655 return status;
652} 656}
@@ -666,7 +670,7 @@ static int i2c_device_remove(struct device *dev)
666 status = driver->remove(client); 670 status = driver->remove(client);
667 } 671 }
668 672
669 acpi_dev_pm_detach(&client->dev, true); 673 dev_pm_domain_detach(&client->dev, true);
670 return status; 674 return status;
671} 675}
672 676
diff --git a/drivers/mmc/core/sdio_bus.c b/drivers/mmc/core/sdio_bus.c
index 4fa8fef9147f..65cf7a7e05ea 100644
--- a/drivers/mmc/core/sdio_bus.c
+++ b/drivers/mmc/core/sdio_bus.c
@@ -16,6 +16,7 @@
16#include <linux/export.h> 16#include <linux/export.h>
17#include <linux/slab.h> 17#include <linux/slab.h>
18#include <linux/pm_runtime.h> 18#include <linux/pm_runtime.h>
19#include <linux/pm_domain.h>
19#include <linux/acpi.h> 20#include <linux/acpi.h>
20 21
21#include <linux/mmc/card.h> 22#include <linux/mmc/card.h>
@@ -315,7 +316,7 @@ int sdio_add_func(struct sdio_func *func)
315 ret = device_add(&func->dev); 316 ret = device_add(&func->dev);
316 if (ret == 0) { 317 if (ret == 0) {
317 sdio_func_set_present(func); 318 sdio_func_set_present(func);
318 acpi_dev_pm_attach(&func->dev, false); 319 dev_pm_domain_attach(&func->dev, false);
319 } 320 }
320 321
321 return ret; 322 return ret;
@@ -332,7 +333,7 @@ void sdio_remove_func(struct sdio_func *func)
332 if (!sdio_func_present(func)) 333 if (!sdio_func_present(func))
333 return; 334 return;
334 335
335 acpi_dev_pm_detach(&func->dev, false); 336 dev_pm_domain_detach(&func->dev, false);
336 device_del(&func->dev); 337 device_del(&func->dev);
337 put_device(&func->dev); 338 put_device(&func->dev);
338} 339}
diff --git a/drivers/pci/pcie/pme.c b/drivers/pci/pcie/pme.c
index 82e06a86cd77..a9f9c46e5022 100644
--- a/drivers/pci/pcie/pme.c
+++ b/drivers/pci/pcie/pme.c
@@ -41,11 +41,17 @@ static int __init pcie_pme_setup(char *str)
41} 41}
42__setup("pcie_pme=", pcie_pme_setup); 42__setup("pcie_pme=", pcie_pme_setup);
43 43
44enum pme_suspend_level {
45 PME_SUSPEND_NONE = 0,
46 PME_SUSPEND_WAKEUP,
47 PME_SUSPEND_NOIRQ,
48};
49
44struct pcie_pme_service_data { 50struct pcie_pme_service_data {
45 spinlock_t lock; 51 spinlock_t lock;
46 struct pcie_device *srv; 52 struct pcie_device *srv;
47 struct work_struct work; 53 struct work_struct work;
48 bool noirq; /* Don't enable the PME interrupt used by this service. */ 54 enum pme_suspend_level suspend_level;
49}; 55};
50 56
51/** 57/**
@@ -223,7 +229,7 @@ static void pcie_pme_work_fn(struct work_struct *work)
223 spin_lock_irq(&data->lock); 229 spin_lock_irq(&data->lock);
224 230
225 for (;;) { 231 for (;;) {
226 if (data->noirq) 232 if (data->suspend_level != PME_SUSPEND_NONE)
227 break; 233 break;
228 234
229 pcie_capability_read_dword(port, PCI_EXP_RTSTA, &rtsta); 235 pcie_capability_read_dword(port, PCI_EXP_RTSTA, &rtsta);
@@ -250,7 +256,7 @@ static void pcie_pme_work_fn(struct work_struct *work)
250 spin_lock_irq(&data->lock); 256 spin_lock_irq(&data->lock);
251 } 257 }
252 258
253 if (!data->noirq) 259 if (data->suspend_level == PME_SUSPEND_NONE)
254 pcie_pme_interrupt_enable(port, true); 260 pcie_pme_interrupt_enable(port, true);
255 261
256 spin_unlock_irq(&data->lock); 262 spin_unlock_irq(&data->lock);
@@ -367,6 +373,21 @@ static int pcie_pme_probe(struct pcie_device *srv)
367 return ret; 373 return ret;
368} 374}
369 375
376static bool pcie_pme_check_wakeup(struct pci_bus *bus)
377{
378 struct pci_dev *dev;
379
380 if (!bus)
381 return false;
382
383 list_for_each_entry(dev, &bus->devices, bus_list)
384 if (device_may_wakeup(&dev->dev)
385 || pcie_pme_check_wakeup(dev->subordinate))
386 return true;
387
388 return false;
389}
390
370/** 391/**
371 * pcie_pme_suspend - Suspend PCIe PME service device. 392 * pcie_pme_suspend - Suspend PCIe PME service device.
372 * @srv: PCIe service device to suspend. 393 * @srv: PCIe service device to suspend.
@@ -375,11 +396,26 @@ static int pcie_pme_suspend(struct pcie_device *srv)
375{ 396{
376 struct pcie_pme_service_data *data = get_service_data(srv); 397 struct pcie_pme_service_data *data = get_service_data(srv);
377 struct pci_dev *port = srv->port; 398 struct pci_dev *port = srv->port;
399 bool wakeup;
378 400
401 if (device_may_wakeup(&port->dev)) {
402 wakeup = true;
403 } else {
404 down_read(&pci_bus_sem);
405 wakeup = pcie_pme_check_wakeup(port->subordinate);
406 up_read(&pci_bus_sem);
407 }
379 spin_lock_irq(&data->lock); 408 spin_lock_irq(&data->lock);
380 pcie_pme_interrupt_enable(port, false); 409 if (wakeup) {
381 pcie_clear_root_pme_status(port); 410 enable_irq_wake(srv->irq);
382 data->noirq = true; 411 data->suspend_level = PME_SUSPEND_WAKEUP;
412 } else {
413 struct pci_dev *port = srv->port;
414
415 pcie_pme_interrupt_enable(port, false);
416 pcie_clear_root_pme_status(port);
417 data->suspend_level = PME_SUSPEND_NOIRQ;
418 }
383 spin_unlock_irq(&data->lock); 419 spin_unlock_irq(&data->lock);
384 420
385 synchronize_irq(srv->irq); 421 synchronize_irq(srv->irq);
@@ -394,12 +430,17 @@ static int pcie_pme_suspend(struct pcie_device *srv)
394static int pcie_pme_resume(struct pcie_device *srv) 430static int pcie_pme_resume(struct pcie_device *srv)
395{ 431{
396 struct pcie_pme_service_data *data = get_service_data(srv); 432 struct pcie_pme_service_data *data = get_service_data(srv);
397 struct pci_dev *port = srv->port;
398 433
399 spin_lock_irq(&data->lock); 434 spin_lock_irq(&data->lock);
400 data->noirq = false; 435 if (data->suspend_level == PME_SUSPEND_NOIRQ) {
401 pcie_clear_root_pme_status(port); 436 struct pci_dev *port = srv->port;
402 pcie_pme_interrupt_enable(port, true); 437
438 pcie_clear_root_pme_status(port);
439 pcie_pme_interrupt_enable(port, true);
440 } else {
441 disable_irq_wake(srv->irq);
442 }
443 data->suspend_level = PME_SUSPEND_NONE;
403 spin_unlock_irq(&data->lock); 444 spin_unlock_irq(&data->lock);
404 445
405 return 0; 446 return 0;
diff --git a/drivers/platform/x86/fujitsu-laptop.c b/drivers/platform/x86/fujitsu-laptop.c
index 87aa28c4280f..2655d4a988f3 100644
--- a/drivers/platform/x86/fujitsu-laptop.c
+++ b/drivers/platform/x86/fujitsu-laptop.c
@@ -1050,6 +1050,13 @@ static struct acpi_driver acpi_fujitsu_hotkey_driver = {
1050 }, 1050 },
1051}; 1051};
1052 1052
1053static const struct acpi_device_id fujitsu_ids[] __used = {
1054 {ACPI_FUJITSU_HID, 0},
1055 {ACPI_FUJITSU_HOTKEY_HID, 0},
1056 {"", 0}
1057};
1058MODULE_DEVICE_TABLE(acpi, fujitsu_ids);
1059
1053static int __init fujitsu_init(void) 1060static int __init fujitsu_init(void)
1054{ 1061{
1055 int ret, result, max_brightness; 1062 int ret, result, max_brightness;
@@ -1208,12 +1215,3 @@ MODULE_LICENSE("GPL");
1208MODULE_ALIAS("dmi:*:svnFUJITSUSIEMENS:*:pvr:rvnFUJITSU:rnFJNB1D3:*:cvrS6410:*"); 1215MODULE_ALIAS("dmi:*:svnFUJITSUSIEMENS:*:pvr:rvnFUJITSU:rnFJNB1D3:*:cvrS6410:*");
1209MODULE_ALIAS("dmi:*:svnFUJITSUSIEMENS:*:pvr:rvnFUJITSU:rnFJNB1E6:*:cvrS6420:*"); 1216MODULE_ALIAS("dmi:*:svnFUJITSUSIEMENS:*:pvr:rvnFUJITSU:rnFJNB1E6:*:cvrS6420:*");
1210MODULE_ALIAS("dmi:*:svnFUJITSU:*:pvr:rvnFUJITSU:rnFJNB19C:*:cvrS7020:*"); 1217MODULE_ALIAS("dmi:*:svnFUJITSU:*:pvr:rvnFUJITSU:rnFJNB19C:*:cvrS7020:*");
1211
1212static struct pnp_device_id pnp_ids[] __used = {
1213 {.id = "FUJ02bf"},
1214 {.id = "FUJ02B1"},
1215 {.id = "FUJ02E3"},
1216 {.id = ""}
1217};
1218
1219MODULE_DEVICE_TABLE(pnp, pnp_ids);
diff --git a/drivers/power/avs/Kconfig b/drivers/power/avs/Kconfig
index 2a1008b61121..7f3d389bd601 100644
--- a/drivers/power/avs/Kconfig
+++ b/drivers/power/avs/Kconfig
@@ -10,3 +10,11 @@ menuconfig POWER_AVS
10 AVS is also called SmartReflex on OMAP devices. 10 AVS is also called SmartReflex on OMAP devices.
11 11
12 Say Y here to enable Adaptive Voltage Scaling class support. 12 Say Y here to enable Adaptive Voltage Scaling class support.
13
14config ROCKCHIP_IODOMAIN
15 tristate "Rockchip IO domain support"
16 depends on ARCH_ROCKCHIP && OF
17 help
18 Say y here to enable support io domains on Rockchip SoCs. It is
19 necessary for the io domain setting of the SoC to match the
20 voltage supplied by the regulators.
diff --git a/drivers/power/avs/Makefile b/drivers/power/avs/Makefile
index 0843386a6c19..ba4c7bc69225 100644
--- a/drivers/power/avs/Makefile
+++ b/drivers/power/avs/Makefile
@@ -1 +1,2 @@
1obj-$(CONFIG_POWER_AVS_OMAP) += smartreflex.o 1obj-$(CONFIG_POWER_AVS_OMAP) += smartreflex.o
2obj-$(CONFIG_ROCKCHIP_IODOMAIN) += rockchip-io-domain.o
diff --git a/drivers/power/avs/rockchip-io-domain.c b/drivers/power/avs/rockchip-io-domain.c
new file mode 100644
index 000000000000..3ae35d0590d2
--- /dev/null
+++ b/drivers/power/avs/rockchip-io-domain.c
@@ -0,0 +1,351 @@
1/*
2 * Rockchip IO Voltage Domain driver
3 *
4 * Copyright 2014 MundoReader S.L.
5 * Copyright 2014 Google, Inc.
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 */
16
17#include <linux/kernel.h>
18#include <linux/module.h>
19#include <linux/err.h>
20#include <linux/mfd/syscon.h>
21#include <linux/of.h>
22#include <linux/platform_device.h>
23#include <linux/regmap.h>
24#include <linux/regulator/consumer.h>
25
26#define MAX_SUPPLIES 16
27
28/*
29 * The max voltage for 1.8V and 3.3V come from the Rockchip datasheet under
30 * "Recommended Operating Conditions" for "Digital GPIO". When the typical
31 * is 3.3V the max is 3.6V. When the typical is 1.8V the max is 1.98V.
32 *
33 * They are used like this:
34 * - If the voltage on a rail is above the "1.8" voltage (1.98V) we'll tell the
35 * SoC we're at 3.3.
36 * - If the voltage on a rail is above the "3.3" voltage (3.6V) we'll consider
37 * that to be an error.
38 */
39#define MAX_VOLTAGE_1_8 1980000
40#define MAX_VOLTAGE_3_3 3600000
41
42#define RK3288_SOC_CON2 0x24c
43#define RK3288_SOC_CON2_FLASH0 BIT(7)
44#define RK3288_SOC_FLASH_SUPPLY_NUM 2
45
46struct rockchip_iodomain;
47
48/**
49 * @supplies: voltage settings matching the register bits.
50 */
51struct rockchip_iodomain_soc_data {
52 int grf_offset;
53 const char *supply_names[MAX_SUPPLIES];
54 void (*init)(struct rockchip_iodomain *iod);
55};
56
57struct rockchip_iodomain_supply {
58 struct rockchip_iodomain *iod;
59 struct regulator *reg;
60 struct notifier_block nb;
61 int idx;
62};
63
64struct rockchip_iodomain {
65 struct device *dev;
66 struct regmap *grf;
67 struct rockchip_iodomain_soc_data *soc_data;
68 struct rockchip_iodomain_supply supplies[MAX_SUPPLIES];
69};
70
71static int rockchip_iodomain_write(struct rockchip_iodomain_supply *supply,
72 int uV)
73{
74 struct rockchip_iodomain *iod = supply->iod;
75 u32 val;
76 int ret;
77
78 /* set value bit */
79 val = (uV > MAX_VOLTAGE_1_8) ? 0 : 1;
80 val <<= supply->idx;
81
82 /* apply hiword-mask */
83 val |= (BIT(supply->idx) << 16);
84
85 ret = regmap_write(iod->grf, iod->soc_data->grf_offset, val);
86 if (ret)
87 dev_err(iod->dev, "Couldn't write to GRF\n");
88
89 return ret;
90}
91
92static int rockchip_iodomain_notify(struct notifier_block *nb,
93 unsigned long event,
94 void *data)
95{
96 struct rockchip_iodomain_supply *supply =
97 container_of(nb, struct rockchip_iodomain_supply, nb);
98 int uV;
99 int ret;
100
101 /*
102 * According to Rockchip it's important to keep the SoC IO domain
103 * higher than (or equal to) the external voltage. That means we need
104 * to change it before external voltage changes happen in the case
105 * of an increase.
106 *
107 * Note that in the "pre" change we pick the max possible voltage that
108 * the regulator might end up at (the client requests a range and we
109 * don't know for certain the exact voltage). Right now we rely on the
110 * slop in MAX_VOLTAGE_1_8 and MAX_VOLTAGE_3_3 to save us if clients
111 * request something like a max of 3.6V when they really want 3.3V.
112 * We could attempt to come up with better rules if this fails.
113 */
114 if (event & REGULATOR_EVENT_PRE_VOLTAGE_CHANGE) {
115 struct pre_voltage_change_data *pvc_data = data;
116
117 uV = max_t(unsigned long, pvc_data->old_uV, pvc_data->max_uV);
118 } else if (event & (REGULATOR_EVENT_VOLTAGE_CHANGE |
119 REGULATOR_EVENT_ABORT_VOLTAGE_CHANGE)) {
120 uV = (unsigned long)data;
121 } else {
122 return NOTIFY_OK;
123 }
124
125 dev_dbg(supply->iod->dev, "Setting to %d\n", uV);
126
127 if (uV > MAX_VOLTAGE_3_3) {
128 dev_err(supply->iod->dev, "Voltage too high: %d\n", uV);
129
130 if (event == REGULATOR_EVENT_PRE_VOLTAGE_CHANGE)
131 return NOTIFY_BAD;
132 }
133
134 ret = rockchip_iodomain_write(supply, uV);
135 if (ret && event == REGULATOR_EVENT_PRE_VOLTAGE_CHANGE)
136 return NOTIFY_BAD;
137
138 dev_info(supply->iod->dev, "Setting to %d done\n", uV);
139 return NOTIFY_OK;
140}
141
142static void rk3288_iodomain_init(struct rockchip_iodomain *iod)
143{
144 int ret;
145 u32 val;
146
147 /* if no flash supply we should leave things alone */
148 if (!iod->supplies[RK3288_SOC_FLASH_SUPPLY_NUM].reg)
149 return;
150
151 /*
152 * set flash0 iodomain to also use this framework
153 * instead of a special gpio.
154 */
155 val = RK3288_SOC_CON2_FLASH0 | (RK3288_SOC_CON2_FLASH0 << 16);
156 ret = regmap_write(iod->grf, RK3288_SOC_CON2, val);
157 if (ret < 0)
158 dev_warn(iod->dev, "couldn't update flash0 ctrl\n");
159}
160
161/*
162 * On the rk3188 the io-domains are handled by a shared register with the
163 * lower 8 bits being still being continuing drive-strength settings.
164 */
165static const struct rockchip_iodomain_soc_data soc_data_rk3188 = {
166 .grf_offset = 0x104,
167 .supply_names = {
168 NULL,
169 NULL,
170 NULL,
171 NULL,
172 NULL,
173 NULL,
174 NULL,
175 NULL,
176 "ap0",
177 "ap1",
178 "cif",
179 "flash",
180 "vccio0",
181 "vccio1",
182 "lcdc0",
183 "lcdc1",
184 },
185};
186
187static const struct rockchip_iodomain_soc_data soc_data_rk3288 = {
188 .grf_offset = 0x380,
189 .supply_names = {
190 "lcdc", /* LCDC_VDD */
191 "dvp", /* DVPIO_VDD */
192 "flash0", /* FLASH0_VDD (emmc) */
193 "flash1", /* FLASH1_VDD (sdio1) */
194 "wifi", /* APIO3_VDD (sdio0) */
195 "bb", /* APIO5_VDD */
196 "audio", /* APIO4_VDD */
197 "sdcard", /* SDMMC0_VDD (sdmmc) */
198 "gpio30", /* APIO1_VDD */
199 "gpio1830", /* APIO2_VDD */
200 },
201 .init = rk3288_iodomain_init,
202};
203
204static const struct of_device_id rockchip_iodomain_match[] = {
205 {
206 .compatible = "rockchip,rk3188-io-voltage-domain",
207 .data = (void *)&soc_data_rk3188
208 },
209 {
210 .compatible = "rockchip,rk3288-io-voltage-domain",
211 .data = (void *)&soc_data_rk3288
212 },
213 { /* sentinel */ },
214};
215
216static int rockchip_iodomain_probe(struct platform_device *pdev)
217{
218 struct device_node *np = pdev->dev.of_node;
219 const struct of_device_id *match;
220 struct rockchip_iodomain *iod;
221 int i, ret = 0;
222
223 if (!np)
224 return -ENODEV;
225
226 iod = devm_kzalloc(&pdev->dev, sizeof(*iod), GFP_KERNEL);
227 if (!iod)
228 return -ENOMEM;
229
230 iod->dev = &pdev->dev;
231 platform_set_drvdata(pdev, iod);
232
233 match = of_match_node(rockchip_iodomain_match, np);
234 iod->soc_data = (struct rockchip_iodomain_soc_data *)match->data;
235
236 iod->grf = syscon_regmap_lookup_by_phandle(np, "rockchip,grf");
237 if (IS_ERR(iod->grf)) {
238 dev_err(&pdev->dev, "couldn't find grf regmap\n");
239 return PTR_ERR(iod->grf);
240 }
241
242 for (i = 0; i < MAX_SUPPLIES; i++) {
243 const char *supply_name = iod->soc_data->supply_names[i];
244 struct rockchip_iodomain_supply *supply = &iod->supplies[i];
245 struct regulator *reg;
246 int uV;
247
248 if (!supply_name)
249 continue;
250
251 reg = devm_regulator_get_optional(iod->dev, supply_name);
252 if (IS_ERR(reg)) {
253 ret = PTR_ERR(reg);
254
255 /* If a supply wasn't specified, that's OK */
256 if (ret == -ENODEV)
257 continue;
258 else if (ret != -EPROBE_DEFER)
259 dev_err(iod->dev, "couldn't get regulator %s\n",
260 supply_name);
261 goto unreg_notify;
262 }
263
264 /* set initial correct value */
265 uV = regulator_get_voltage(reg);
266
267 /* must be a regulator we can get the voltage of */
268 if (uV < 0) {
269 dev_err(iod->dev, "Can't determine voltage: %s\n",
270 supply_name);
271 goto unreg_notify;
272 }
273
274 if (uV > MAX_VOLTAGE_3_3) {
275 dev_crit(iod->dev,
276 "%d uV is too high. May damage SoC!\n",
277 uV);
278 ret = -EINVAL;
279 goto unreg_notify;
280 }
281
282 /* setup our supply */
283 supply->idx = i;
284 supply->iod = iod;
285 supply->reg = reg;
286 supply->nb.notifier_call = rockchip_iodomain_notify;
287
288 ret = rockchip_iodomain_write(supply, uV);
289 if (ret) {
290 supply->reg = NULL;
291 goto unreg_notify;
292 }
293
294 /* register regulator notifier */
295 ret = regulator_register_notifier(reg, &supply->nb);
296 if (ret) {
297 dev_err(&pdev->dev,
298 "regulator notifier request failed\n");
299 supply->reg = NULL;
300 goto unreg_notify;
301 }
302 }
303
304 if (iod->soc_data->init)
305 iod->soc_data->init(iod);
306
307 return 0;
308
309unreg_notify:
310 for (i = MAX_SUPPLIES - 1; i >= 0; i--) {
311 struct rockchip_iodomain_supply *io_supply = &iod->supplies[i];
312
313 if (io_supply->reg)
314 regulator_unregister_notifier(io_supply->reg,
315 &io_supply->nb);
316 }
317
318 return ret;
319}
320
321static int rockchip_iodomain_remove(struct platform_device *pdev)
322{
323 struct rockchip_iodomain *iod = platform_get_drvdata(pdev);
324 int i;
325
326 for (i = MAX_SUPPLIES - 1; i >= 0; i--) {
327 struct rockchip_iodomain_supply *io_supply = &iod->supplies[i];
328
329 if (io_supply->reg)
330 regulator_unregister_notifier(io_supply->reg,
331 &io_supply->nb);
332 }
333
334 return 0;
335}
336
337static struct platform_driver rockchip_iodomain_driver = {
338 .probe = rockchip_iodomain_probe,
339 .remove = rockchip_iodomain_remove,
340 .driver = {
341 .name = "rockchip-iodomain",
342 .of_match_table = rockchip_iodomain_match,
343 },
344};
345
346module_platform_driver(rockchip_iodomain_driver);
347
348MODULE_DESCRIPTION("Rockchip IO-domain driver");
349MODULE_AUTHOR("Heiko Stuebner <heiko@sntech.de>");
350MODULE_AUTHOR("Doug Anderson <dianders@chromium.org>");
351MODULE_LICENSE("GPL v2");
diff --git a/drivers/sh/pm_runtime.c b/drivers/sh/pm_runtime.c
index 72f63817a1a0..fe2c2d595f59 100644
--- a/drivers/sh/pm_runtime.c
+++ b/drivers/sh/pm_runtime.c
@@ -75,8 +75,6 @@ static struct pm_clk_notifier_block platform_bus_notifier = {
75 .con_ids = { NULL, }, 75 .con_ids = { NULL, },
76}; 76};
77 77
78static bool default_pm_on;
79
80static int __init sh_pm_runtime_init(void) 78static int __init sh_pm_runtime_init(void)
81{ 79{
82 if (IS_ENABLED(CONFIG_ARCH_SHMOBILE_MULTI)) { 80 if (IS_ENABLED(CONFIG_ARCH_SHMOBILE_MULTI)) {
@@ -96,16 +94,7 @@ static int __init sh_pm_runtime_init(void)
96 return 0; 94 return 0;
97 } 95 }
98 96
99 default_pm_on = true;
100 pm_clk_add_notifier(&platform_bus_type, &platform_bus_notifier); 97 pm_clk_add_notifier(&platform_bus_type, &platform_bus_notifier);
101 return 0; 98 return 0;
102} 99}
103core_initcall(sh_pm_runtime_init); 100core_initcall(sh_pm_runtime_init);
104
105static int __init sh_pm_runtime_late_init(void)
106{
107 if (default_pm_on)
108 pm_genpd_poweroff_unused();
109 return 0;
110}
111late_initcall(sh_pm_runtime_late_init);
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index e19512ffc40e..ebcb33df2eb2 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -35,6 +35,7 @@
35#include <linux/spi/spi.h> 35#include <linux/spi/spi.h>
36#include <linux/of_gpio.h> 36#include <linux/of_gpio.h>
37#include <linux/pm_runtime.h> 37#include <linux/pm_runtime.h>
38#include <linux/pm_domain.h>
38#include <linux/export.h> 39#include <linux/export.h>
39#include <linux/sched/rt.h> 40#include <linux/sched/rt.h>
40#include <linux/delay.h> 41#include <linux/delay.h>
@@ -264,10 +265,12 @@ static int spi_drv_probe(struct device *dev)
264 if (ret) 265 if (ret)
265 return ret; 266 return ret;
266 267
267 acpi_dev_pm_attach(dev, true); 268 ret = dev_pm_domain_attach(dev, true);
268 ret = sdrv->probe(to_spi_device(dev)); 269 if (ret != -EPROBE_DEFER) {
269 if (ret) 270 ret = sdrv->probe(to_spi_device(dev));
270 acpi_dev_pm_detach(dev, true); 271 if (ret)
272 dev_pm_domain_detach(dev, true);
273 }
271 274
272 return ret; 275 return ret;
273} 276}
@@ -278,7 +281,7 @@ static int spi_drv_remove(struct device *dev)
278 int ret; 281 int ret;
279 282
280 ret = sdrv->remove(to_spi_device(dev)); 283 ret = sdrv->remove(to_spi_device(dev));
281 acpi_dev_pm_detach(dev, true); 284 dev_pm_domain_detach(dev, true);
282 285
283 return ret; 286 return ret;
284} 287}
diff --git a/include/acpi/acnames.h b/include/acpi/acnames.h
index c728113374f5..f97804bdf1ff 100644
--- a/include/acpi/acnames.h
+++ b/include/acpi/acnames.h
@@ -59,6 +59,10 @@
59#define METHOD_NAME__PRS "_PRS" 59#define METHOD_NAME__PRS "_PRS"
60#define METHOD_NAME__PRT "_PRT" 60#define METHOD_NAME__PRT "_PRT"
61#define METHOD_NAME__PRW "_PRW" 61#define METHOD_NAME__PRW "_PRW"
62#define METHOD_NAME__PS0 "_PS0"
63#define METHOD_NAME__PS1 "_PS1"
64#define METHOD_NAME__PS2 "_PS2"
65#define METHOD_NAME__PS3 "_PS3"
62#define METHOD_NAME__REG "_REG" 66#define METHOD_NAME__REG "_REG"
63#define METHOD_NAME__SB_ "_SB_" 67#define METHOD_NAME__SB_ "_SB_"
64#define METHOD_NAME__SEG "_SEG" 68#define METHOD_NAME__SEG "_SEG"
diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h
index b7c89d47efbe..9fc1d71c82bc 100644
--- a/include/acpi/acpixf.h
+++ b/include/acpi/acpixf.h
@@ -46,7 +46,7 @@
46 46
47/* Current ACPICA subsystem version in YYYYMMDD format */ 47/* Current ACPICA subsystem version in YYYYMMDD format */
48 48
49#define ACPI_CA_VERSION 0x20140724 49#define ACPI_CA_VERSION 0x20140828
50 50
51#include <acpi/acconfig.h> 51#include <acpi/acconfig.h>
52#include <acpi/actypes.h> 52#include <acpi/actypes.h>
@@ -692,6 +692,7 @@ ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
692 *event_status)) 692 *event_status))
693ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_disable_all_gpes(void)) 693ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_disable_all_gpes(void))
694ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_enable_all_runtime_gpes(void)) 694ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_enable_all_runtime_gpes(void))
695ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_enable_all_wakeup_gpes(void))
695 696
696ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status 697ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
697 acpi_get_gpe_device(u32 gpe_index, 698 acpi_get_gpe_device(u32 gpe_index,
diff --git a/include/acpi/actbl1.h b/include/acpi/actbl1.h
index 7626bfeac2cb..29e79370641d 100644
--- a/include/acpi/actbl1.h
+++ b/include/acpi/actbl1.h
@@ -952,7 +952,8 @@ enum acpi_srat_type {
952 ACPI_SRAT_TYPE_CPU_AFFINITY = 0, 952 ACPI_SRAT_TYPE_CPU_AFFINITY = 0,
953 ACPI_SRAT_TYPE_MEMORY_AFFINITY = 1, 953 ACPI_SRAT_TYPE_MEMORY_AFFINITY = 1,
954 ACPI_SRAT_TYPE_X2APIC_CPU_AFFINITY = 2, 954 ACPI_SRAT_TYPE_X2APIC_CPU_AFFINITY = 2,
955 ACPI_SRAT_TYPE_RESERVED = 3 /* 3 and greater are reserved */ 955 ACPI_SRAT_TYPE_GICC_AFFINITY = 3,
956 ACPI_SRAT_TYPE_RESERVED = 4 /* 4 and greater are reserved */
956}; 957};
957 958
958/* 959/*
@@ -968,7 +969,7 @@ struct acpi_srat_cpu_affinity {
968 u32 flags; 969 u32 flags;
969 u8 local_sapic_eid; 970 u8 local_sapic_eid;
970 u8 proximity_domain_hi[3]; 971 u8 proximity_domain_hi[3];
971 u32 reserved; /* Reserved, must be zero */ 972 u32 clock_domain;
972}; 973};
973 974
974/* Flags */ 975/* Flags */
@@ -1010,6 +1011,20 @@ struct acpi_srat_x2apic_cpu_affinity {
1010 1011
1011#define ACPI_SRAT_CPU_ENABLED (1) /* 00: Use affinity structure */ 1012#define ACPI_SRAT_CPU_ENABLED (1) /* 00: Use affinity structure */
1012 1013
1014/* 3: GICC Affinity (ACPI 5.1) */
1015
1016struct acpi_srat_gicc_affinity {
1017 struct acpi_subtable_header header;
1018 u32 proximity_domain;
1019 u32 acpi_processor_uid;
1020 u32 flags;
1021 u32 clock_domain;
1022};
1023
1024/* Flags for struct acpi_srat_gicc_affinity */
1025
1026#define ACPI_SRAT_GICC_ENABLED (1) /* 00: Use affinity structure */
1027
1013/* Reset to default packing */ 1028/* Reset to default packing */
1014 1029
1015#pragma pack() 1030#pragma pack()
diff --git a/include/acpi/actbl3.h b/include/acpi/actbl3.h
index 787bcc814463..5480cb2236bf 100644
--- a/include/acpi/actbl3.h
+++ b/include/acpi/actbl3.h
@@ -310,10 +310,15 @@ struct acpi_gtdt_timer_entry {
310 u32 common_flags; 310 u32 common_flags;
311}; 311};
312 312
313/* Flag Definitions: timer_flags and virtual_timer_flags above */
314
315#define ACPI_GTDT_GT_IRQ_MODE (1)
316#define ACPI_GTDT_GT_IRQ_POLARITY (1<<1)
317
313/* Flag Definitions: common_flags above */ 318/* Flag Definitions: common_flags above */
314 319
315#define ACPI_GTDT_GT_IS_SECURE_TIMER (1) 320#define ACPI_GTDT_GT_IS_SECURE_TIMER (1)
316#define ACPI_GTDT_GT_ALWAYS_ON (1<<1) 321#define ACPI_GTDT_GT_ALWAYS_ON (1<<1)
317 322
318/* 1: SBSA Generic Watchdog Structure */ 323/* 1: SBSA Generic Watchdog Structure */
319 324
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 807cbc46d73e..b7926bb9b444 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -587,7 +587,6 @@ static inline int acpi_subsys_freeze(struct device *dev) { return 0; }
587#if defined(CONFIG_ACPI) && defined(CONFIG_PM) 587#if defined(CONFIG_ACPI) && defined(CONFIG_PM)
588struct acpi_device *acpi_dev_pm_get_node(struct device *dev); 588struct acpi_device *acpi_dev_pm_get_node(struct device *dev);
589int acpi_dev_pm_attach(struct device *dev, bool power_on); 589int acpi_dev_pm_attach(struct device *dev, bool power_on);
590void acpi_dev_pm_detach(struct device *dev, bool power_off);
591#else 590#else
592static inline struct acpi_device *acpi_dev_pm_get_node(struct device *dev) 591static inline struct acpi_device *acpi_dev_pm_get_node(struct device *dev)
593{ 592{
@@ -597,7 +596,6 @@ static inline int acpi_dev_pm_attach(struct device *dev, bool power_on)
597{ 596{
598 return -ENODEV; 597 return -ENODEV;
599} 598}
600static inline void acpi_dev_pm_detach(struct device *dev, bool power_off) {}
601#endif 599#endif
602 600
603#ifdef CONFIG_ACPI 601#ifdef CONFIG_ACPI
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 7d1955afa62c..138336b6bb04 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -112,6 +112,9 @@ struct cpufreq_policy {
112 spinlock_t transition_lock; 112 spinlock_t transition_lock;
113 wait_queue_head_t transition_wait; 113 wait_queue_head_t transition_wait;
114 struct task_struct *transition_task; /* Task which is doing the transition */ 114 struct task_struct *transition_task; /* Task which is doing the transition */
115
116 /* For cpufreq driver's internal use */
117 void *driver_data;
115}; 118};
116 119
117/* Only for ACPI */ 120/* Only for ACPI */
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 698ad053d064..69517a24bc50 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -193,11 +193,6 @@ extern void irq_wake_thread(unsigned int irq, void *dev_id);
193/* The following three functions are for the core kernel use only. */ 193/* The following three functions are for the core kernel use only. */
194extern void suspend_device_irqs(void); 194extern void suspend_device_irqs(void);
195extern void resume_device_irqs(void); 195extern void resume_device_irqs(void);
196#ifdef CONFIG_PM_SLEEP
197extern int check_wakeup_irqs(void);
198#else
199static inline int check_wakeup_irqs(void) { return 0; }
200#endif
201 196
202/** 197/**
203 * struct irq_affinity_notify - context for notification of IRQ affinity changes 198 * struct irq_affinity_notify - context for notification of IRQ affinity changes
diff --git a/include/linux/irq.h b/include/linux/irq.h
index 62af59242ddc..03f48d936f66 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -173,6 +173,7 @@ struct irq_data {
173 * IRQD_IRQ_DISABLED - Disabled state of the interrupt 173 * IRQD_IRQ_DISABLED - Disabled state of the interrupt
174 * IRQD_IRQ_MASKED - Masked state of the interrupt 174 * IRQD_IRQ_MASKED - Masked state of the interrupt
175 * IRQD_IRQ_INPROGRESS - In progress state of the interrupt 175 * IRQD_IRQ_INPROGRESS - In progress state of the interrupt
176 * IRQD_WAKEUP_ARMED - Wakeup mode armed
176 */ 177 */
177enum { 178enum {
178 IRQD_TRIGGER_MASK = 0xf, 179 IRQD_TRIGGER_MASK = 0xf,
@@ -186,6 +187,7 @@ enum {
186 IRQD_IRQ_DISABLED = (1 << 16), 187 IRQD_IRQ_DISABLED = (1 << 16),
187 IRQD_IRQ_MASKED = (1 << 17), 188 IRQD_IRQ_MASKED = (1 << 17),
188 IRQD_IRQ_INPROGRESS = (1 << 18), 189 IRQD_IRQ_INPROGRESS = (1 << 18),
190 IRQD_WAKEUP_ARMED = (1 << 19),
189}; 191};
190 192
191static inline bool irqd_is_setaffinity_pending(struct irq_data *d) 193static inline bool irqd_is_setaffinity_pending(struct irq_data *d)
@@ -257,6 +259,12 @@ static inline bool irqd_irq_inprogress(struct irq_data *d)
257 return d->state_use_accessors & IRQD_IRQ_INPROGRESS; 259 return d->state_use_accessors & IRQD_IRQ_INPROGRESS;
258} 260}
259 261
262static inline bool irqd_is_wakeup_armed(struct irq_data *d)
263{
264 return d->state_use_accessors & IRQD_WAKEUP_ARMED;
265}
266
267
260/* 268/*
261 * Functions for chained handlers which can be enabled/disabled by the 269 * Functions for chained handlers which can be enabled/disabled by the
262 * standard disable_irq/enable_irq calls. Must be called with 270 * standard disable_irq/enable_irq calls. Must be called with
diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h
index ff24667cd86c..faf433af425e 100644
--- a/include/linux/irqdesc.h
+++ b/include/linux/irqdesc.h
@@ -38,6 +38,11 @@ struct pt_regs;
38 * @threads_oneshot: bitfield to handle shared oneshot threads 38 * @threads_oneshot: bitfield to handle shared oneshot threads
39 * @threads_active: number of irqaction threads currently running 39 * @threads_active: number of irqaction threads currently running
40 * @wait_for_threads: wait queue for sync_irq to wait for threaded handlers 40 * @wait_for_threads: wait queue for sync_irq to wait for threaded handlers
41 * @nr_actions: number of installed actions on this descriptor
42 * @no_suspend_depth: number of irqactions on a irq descriptor with
43 * IRQF_NO_SUSPEND set
44 * @force_resume_depth: number of irqactions on a irq descriptor with
45 * IRQF_FORCE_RESUME set
41 * @dir: /proc/irq/ procfs entry 46 * @dir: /proc/irq/ procfs entry
42 * @name: flow handler name for /proc/interrupts output 47 * @name: flow handler name for /proc/interrupts output
43 */ 48 */
@@ -70,6 +75,11 @@ struct irq_desc {
70 unsigned long threads_oneshot; 75 unsigned long threads_oneshot;
71 atomic_t threads_active; 76 atomic_t threads_active;
72 wait_queue_head_t wait_for_threads; 77 wait_queue_head_t wait_for_threads;
78#ifdef CONFIG_PM_SLEEP
79 unsigned int nr_actions;
80 unsigned int no_suspend_depth;
81 unsigned int force_resume_depth;
82#endif
73#ifdef CONFIG_PROC_FS 83#ifdef CONFIG_PROC_FS
74 struct proc_dir_entry *dir; 84 struct proc_dir_entry *dir;
75#endif 85#endif
diff --git a/include/linux/pm.h b/include/linux/pm.h
index 72c0fe098a27..383fd68aaee1 100644
--- a/include/linux/pm.h
+++ b/include/linux/pm.h
@@ -619,6 +619,7 @@ extern int dev_pm_put_subsys_data(struct device *dev);
619 */ 619 */
620struct dev_pm_domain { 620struct dev_pm_domain {
621 struct dev_pm_ops ops; 621 struct dev_pm_ops ops;
622 void (*detach)(struct device *dev, bool power_off);
622}; 623};
623 624
624/* 625/*
@@ -679,12 +680,16 @@ struct dev_pm_domain {
679extern void device_pm_lock(void); 680extern void device_pm_lock(void);
680extern void dpm_resume_start(pm_message_t state); 681extern void dpm_resume_start(pm_message_t state);
681extern void dpm_resume_end(pm_message_t state); 682extern void dpm_resume_end(pm_message_t state);
683extern void dpm_resume_noirq(pm_message_t state);
684extern void dpm_resume_early(pm_message_t state);
682extern void dpm_resume(pm_message_t state); 685extern void dpm_resume(pm_message_t state);
683extern void dpm_complete(pm_message_t state); 686extern void dpm_complete(pm_message_t state);
684 687
685extern void device_pm_unlock(void); 688extern void device_pm_unlock(void);
686extern int dpm_suspend_end(pm_message_t state); 689extern int dpm_suspend_end(pm_message_t state);
687extern int dpm_suspend_start(pm_message_t state); 690extern int dpm_suspend_start(pm_message_t state);
691extern int dpm_suspend_noirq(pm_message_t state);
692extern int dpm_suspend_late(pm_message_t state);
688extern int dpm_suspend(pm_message_t state); 693extern int dpm_suspend(pm_message_t state);
689extern int dpm_prepare(pm_message_t state); 694extern int dpm_prepare(pm_message_t state);
690 695
diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
index ebc4c76ffb73..73e938b7e937 100644
--- a/include/linux/pm_domain.h
+++ b/include/linux/pm_domain.h
@@ -35,18 +35,10 @@ struct gpd_dev_ops {
35 int (*stop)(struct device *dev); 35 int (*stop)(struct device *dev);
36 int (*save_state)(struct device *dev); 36 int (*save_state)(struct device *dev);
37 int (*restore_state)(struct device *dev); 37 int (*restore_state)(struct device *dev);
38 int (*suspend)(struct device *dev);
39 int (*suspend_late)(struct device *dev);
40 int (*resume_early)(struct device *dev);
41 int (*resume)(struct device *dev);
42 int (*freeze)(struct device *dev);
43 int (*freeze_late)(struct device *dev);
44 int (*thaw_early)(struct device *dev);
45 int (*thaw)(struct device *dev);
46 bool (*active_wakeup)(struct device *dev); 38 bool (*active_wakeup)(struct device *dev);
47}; 39};
48 40
49struct gpd_cpu_data { 41struct gpd_cpuidle_data {
50 unsigned int saved_exit_latency; 42 unsigned int saved_exit_latency;
51 struct cpuidle_state *idle_state; 43 struct cpuidle_state *idle_state;
52}; 44};
@@ -71,7 +63,6 @@ struct generic_pm_domain {
71 unsigned int suspended_count; /* System suspend device counter */ 63 unsigned int suspended_count; /* System suspend device counter */
72 unsigned int prepared_count; /* Suspend counter of prepared devices */ 64 unsigned int prepared_count; /* Suspend counter of prepared devices */
73 bool suspend_power_off; /* Power status before system suspend */ 65 bool suspend_power_off; /* Power status before system suspend */
74 bool dev_irq_safe; /* Device callbacks are IRQ-safe */
75 int (*power_off)(struct generic_pm_domain *domain); 66 int (*power_off)(struct generic_pm_domain *domain);
76 s64 power_off_latency_ns; 67 s64 power_off_latency_ns;
77 int (*power_on)(struct generic_pm_domain *domain); 68 int (*power_on)(struct generic_pm_domain *domain);
@@ -80,8 +71,9 @@ struct generic_pm_domain {
80 s64 max_off_time_ns; /* Maximum allowed "suspended" time. */ 71 s64 max_off_time_ns; /* Maximum allowed "suspended" time. */
81 bool max_off_time_changed; 72 bool max_off_time_changed;
82 bool cached_power_down_ok; 73 bool cached_power_down_ok;
83 struct device_node *of_node; /* Node in device tree */ 74 struct gpd_cpuidle_data *cpuidle_data;
84 struct gpd_cpu_data *cpu_data; 75 void (*attach_dev)(struct device *dev);
76 void (*detach_dev)(struct device *dev);
85}; 77};
86 78
87static inline struct generic_pm_domain *pd_to_genpd(struct dev_pm_domain *pd) 79static inline struct generic_pm_domain *pd_to_genpd(struct dev_pm_domain *pd)
@@ -108,7 +100,6 @@ struct gpd_timing_data {
108 100
109struct generic_pm_domain_data { 101struct generic_pm_domain_data {
110 struct pm_domain_data base; 102 struct pm_domain_data base;
111 struct gpd_dev_ops ops;
112 struct gpd_timing_data td; 103 struct gpd_timing_data td;
113 struct notifier_block nb; 104 struct notifier_block nb;
114 struct mutex lock; 105 struct mutex lock;
@@ -127,17 +118,11 @@ static inline struct generic_pm_domain_data *dev_gpd_data(struct device *dev)
127 return to_gpd_data(dev->power.subsys_data->domain_data); 118 return to_gpd_data(dev->power.subsys_data->domain_data);
128} 119}
129 120
130extern struct dev_power_governor simple_qos_governor;
131
132extern struct generic_pm_domain *dev_to_genpd(struct device *dev); 121extern struct generic_pm_domain *dev_to_genpd(struct device *dev);
133extern int __pm_genpd_add_device(struct generic_pm_domain *genpd, 122extern int __pm_genpd_add_device(struct generic_pm_domain *genpd,
134 struct device *dev, 123 struct device *dev,
135 struct gpd_timing_data *td); 124 struct gpd_timing_data *td);
136 125
137extern int __pm_genpd_of_add_device(struct device_node *genpd_node,
138 struct device *dev,
139 struct gpd_timing_data *td);
140
141extern int __pm_genpd_name_add_device(const char *domain_name, 126extern int __pm_genpd_name_add_device(const char *domain_name,
142 struct device *dev, 127 struct device *dev,
143 struct gpd_timing_data *td); 128 struct gpd_timing_data *td);
@@ -151,10 +136,6 @@ extern int pm_genpd_add_subdomain_names(const char *master_name,
151 const char *subdomain_name); 136 const char *subdomain_name);
152extern int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, 137extern int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
153 struct generic_pm_domain *target); 138 struct generic_pm_domain *target);
154extern int pm_genpd_add_callbacks(struct device *dev,
155 struct gpd_dev_ops *ops,
156 struct gpd_timing_data *td);
157extern int __pm_genpd_remove_callbacks(struct device *dev, bool clear_td);
158extern int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state); 139extern int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state);
159extern int pm_genpd_name_attach_cpuidle(const char *name, int state); 140extern int pm_genpd_name_attach_cpuidle(const char *name, int state);
160extern int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd); 141extern int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd);
@@ -165,8 +146,7 @@ extern void pm_genpd_init(struct generic_pm_domain *genpd,
165extern int pm_genpd_poweron(struct generic_pm_domain *genpd); 146extern int pm_genpd_poweron(struct generic_pm_domain *genpd);
166extern int pm_genpd_name_poweron(const char *domain_name); 147extern int pm_genpd_name_poweron(const char *domain_name);
167 148
168extern bool default_stop_ok(struct device *dev); 149extern struct dev_power_governor simple_qos_governor;
169
170extern struct dev_power_governor pm_domain_always_on_gov; 150extern struct dev_power_governor pm_domain_always_on_gov;
171#else 151#else
172 152
@@ -184,12 +164,6 @@ static inline int __pm_genpd_add_device(struct generic_pm_domain *genpd,
184{ 164{
185 return -ENOSYS; 165 return -ENOSYS;
186} 166}
187static inline int __pm_genpd_of_add_device(struct device_node *genpd_node,
188 struct device *dev,
189 struct gpd_timing_data *td)
190{
191 return -ENOSYS;
192}
193static inline int __pm_genpd_name_add_device(const char *domain_name, 167static inline int __pm_genpd_name_add_device(const char *domain_name,
194 struct device *dev, 168 struct device *dev,
195 struct gpd_timing_data *td) 169 struct gpd_timing_data *td)
@@ -217,16 +191,6 @@ static inline int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
217{ 191{
218 return -ENOSYS; 192 return -ENOSYS;
219} 193}
220static inline int pm_genpd_add_callbacks(struct device *dev,
221 struct gpd_dev_ops *ops,
222 struct gpd_timing_data *td)
223{
224 return -ENOSYS;
225}
226static inline int __pm_genpd_remove_callbacks(struct device *dev, bool clear_td)
227{
228 return -ENOSYS;
229}
230static inline int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int st) 194static inline int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int st)
231{ 195{
232 return -ENOSYS; 196 return -ENOSYS;
@@ -255,10 +219,6 @@ static inline int pm_genpd_name_poweron(const char *domain_name)
255{ 219{
256 return -ENOSYS; 220 return -ENOSYS;
257} 221}
258static inline bool default_stop_ok(struct device *dev)
259{
260 return false;
261}
262#define simple_qos_governor NULL 222#define simple_qos_governor NULL
263#define pm_domain_always_on_gov NULL 223#define pm_domain_always_on_gov NULL
264#endif 224#endif
@@ -269,45 +229,87 @@ static inline int pm_genpd_add_device(struct generic_pm_domain *genpd,
269 return __pm_genpd_add_device(genpd, dev, NULL); 229 return __pm_genpd_add_device(genpd, dev, NULL);
270} 230}
271 231
272static inline int pm_genpd_of_add_device(struct device_node *genpd_node,
273 struct device *dev)
274{
275 return __pm_genpd_of_add_device(genpd_node, dev, NULL);
276}
277
278static inline int pm_genpd_name_add_device(const char *domain_name, 232static inline int pm_genpd_name_add_device(const char *domain_name,
279 struct device *dev) 233 struct device *dev)
280{ 234{
281 return __pm_genpd_name_add_device(domain_name, dev, NULL); 235 return __pm_genpd_name_add_device(domain_name, dev, NULL);
282} 236}
283 237
284static inline int pm_genpd_remove_callbacks(struct device *dev)
285{
286 return __pm_genpd_remove_callbacks(dev, true);
287}
288
289#ifdef CONFIG_PM_GENERIC_DOMAINS_RUNTIME 238#ifdef CONFIG_PM_GENERIC_DOMAINS_RUNTIME
290extern void genpd_queue_power_off_work(struct generic_pm_domain *genpd);
291extern void pm_genpd_poweroff_unused(void); 239extern void pm_genpd_poweroff_unused(void);
292#else 240#else
293static inline void genpd_queue_power_off_work(struct generic_pm_domain *gpd) {}
294static inline void pm_genpd_poweroff_unused(void) {} 241static inline void pm_genpd_poweroff_unused(void) {}
295#endif 242#endif
296 243
297#ifdef CONFIG_PM_GENERIC_DOMAINS_SLEEP 244#ifdef CONFIG_PM_GENERIC_DOMAINS_SLEEP
298extern void pm_genpd_syscore_switch(struct device *dev, bool suspend); 245extern void pm_genpd_syscore_poweroff(struct device *dev);
246extern void pm_genpd_syscore_poweron(struct device *dev);
299#else 247#else
300static inline void pm_genpd_syscore_switch(struct device *dev, bool suspend) {} 248static inline void pm_genpd_syscore_poweroff(struct device *dev) {}
249static inline void pm_genpd_syscore_poweron(struct device *dev) {}
301#endif 250#endif
302 251
303static inline void pm_genpd_syscore_poweroff(struct device *dev) 252/* OF PM domain providers */
253struct of_device_id;
254
255struct genpd_onecell_data {
256 struct generic_pm_domain **domains;
257 unsigned int num_domains;
258};
259
260typedef struct generic_pm_domain *(*genpd_xlate_t)(struct of_phandle_args *args,
261 void *data);
262
263#ifdef CONFIG_PM_GENERIC_DOMAINS_OF
264int __of_genpd_add_provider(struct device_node *np, genpd_xlate_t xlate,
265 void *data);
266void of_genpd_del_provider(struct device_node *np);
267
268struct generic_pm_domain *__of_genpd_xlate_simple(
269 struct of_phandle_args *genpdspec,
270 void *data);
271struct generic_pm_domain *__of_genpd_xlate_onecell(
272 struct of_phandle_args *genpdspec,
273 void *data);
274
275int genpd_dev_pm_attach(struct device *dev);
276#else /* !CONFIG_PM_GENERIC_DOMAINS_OF */
277static inline int __of_genpd_add_provider(struct device_node *np,
278 genpd_xlate_t xlate, void *data)
279{
280 return 0;
281}
282static inline void of_genpd_del_provider(struct device_node *np) {}
283
284#define __of_genpd_xlate_simple NULL
285#define __of_genpd_xlate_onecell NULL
286
287static inline int genpd_dev_pm_attach(struct device *dev)
288{
289 return -ENODEV;
290}
291#endif /* CONFIG_PM_GENERIC_DOMAINS_OF */
292
293static inline int of_genpd_add_provider_simple(struct device_node *np,
294 struct generic_pm_domain *genpd)
295{
296 return __of_genpd_add_provider(np, __of_genpd_xlate_simple, genpd);
297}
298static inline int of_genpd_add_provider_onecell(struct device_node *np,
299 struct genpd_onecell_data *data)
304{ 300{
305 pm_genpd_syscore_switch(dev, true); 301 return __of_genpd_add_provider(np, __of_genpd_xlate_onecell, data);
306} 302}
307 303
308static inline void pm_genpd_syscore_poweron(struct device *dev) 304#ifdef CONFIG_PM
305extern int dev_pm_domain_attach(struct device *dev, bool power_on);
306extern void dev_pm_domain_detach(struct device *dev, bool power_off);
307#else
308static inline int dev_pm_domain_attach(struct device *dev, bool power_on)
309{ 309{
310 pm_genpd_syscore_switch(dev, false); 310 return -ENODEV;
311} 311}
312static inline void dev_pm_domain_detach(struct device *dev, bool power_off) {}
313#endif
312 314
313#endif /* _LINUX_PM_DOMAIN_H */ 315#endif /* _LINUX_PM_DOMAIN_H */
diff --git a/include/linux/suspend.h b/include/linux/suspend.h
index 519064e0c943..3388c1b6f7d8 100644
--- a/include/linux/suspend.h
+++ b/include/linux/suspend.h
@@ -189,6 +189,8 @@ struct platform_suspend_ops {
189 189
190struct platform_freeze_ops { 190struct platform_freeze_ops {
191 int (*begin)(void); 191 int (*begin)(void);
192 int (*prepare)(void);
193 void (*restore)(void);
192 void (*end)(void); 194 void (*end)(void);
193}; 195};
194 196
@@ -371,6 +373,8 @@ extern int unregister_pm_notifier(struct notifier_block *nb);
371extern bool events_check_enabled; 373extern bool events_check_enabled;
372 374
373extern bool pm_wakeup_pending(void); 375extern bool pm_wakeup_pending(void);
376extern void pm_system_wakeup(void);
377extern void pm_wakeup_clear(void);
374extern bool pm_get_wakeup_count(unsigned int *count, bool block); 378extern bool pm_get_wakeup_count(unsigned int *count, bool block);
375extern bool pm_save_wakeup_count(unsigned int count); 379extern bool pm_save_wakeup_count(unsigned int count);
376extern void pm_wakep_autosleep_enabled(bool set); 380extern void pm_wakep_autosleep_enabled(bool set);
@@ -418,6 +422,8 @@ static inline int unregister_pm_notifier(struct notifier_block *nb)
418#define pm_notifier(fn, pri) do { (void)(fn); } while (0) 422#define pm_notifier(fn, pri) do { (void)(fn); } while (0)
419 423
420static inline bool pm_wakeup_pending(void) { return false; } 424static inline bool pm_wakeup_pending(void) { return false; }
425static inline void pm_system_wakeup(void) {}
426static inline void pm_wakeup_clear(void) {}
421 427
422static inline void lock_system_sleep(void) {} 428static inline void lock_system_sleep(void) {}
423static inline void unlock_system_sleep(void) {} 429static inline void unlock_system_sleep(void) {}
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index 6223fab9a9d2..8fb52e9bddc1 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -342,6 +342,31 @@ static bool irq_check_poll(struct irq_desc *desc)
342 return irq_wait_for_poll(desc); 342 return irq_wait_for_poll(desc);
343} 343}
344 344
345static bool irq_may_run(struct irq_desc *desc)
346{
347 unsigned int mask = IRQD_IRQ_INPROGRESS | IRQD_WAKEUP_ARMED;
348
349 /*
350 * If the interrupt is not in progress and is not an armed
351 * wakeup interrupt, proceed.
352 */
353 if (!irqd_has_set(&desc->irq_data, mask))
354 return true;
355
356 /*
357 * If the interrupt is an armed wakeup source, mark it pending
358 * and suspended, disable it and notify the pm core about the
359 * event.
360 */
361 if (irq_pm_check_wakeup(desc))
362 return false;
363
364 /*
365 * Handle a potential concurrent poll on a different core.
366 */
367 return irq_check_poll(desc);
368}
369
345/** 370/**
346 * handle_simple_irq - Simple and software-decoded IRQs. 371 * handle_simple_irq - Simple and software-decoded IRQs.
347 * @irq: the interrupt number 372 * @irq: the interrupt number
@@ -359,9 +384,8 @@ handle_simple_irq(unsigned int irq, struct irq_desc *desc)
359{ 384{
360 raw_spin_lock(&desc->lock); 385 raw_spin_lock(&desc->lock);
361 386
362 if (unlikely(irqd_irq_inprogress(&desc->irq_data))) 387 if (!irq_may_run(desc))
363 if (!irq_check_poll(desc)) 388 goto out_unlock;
364 goto out_unlock;
365 389
366 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 390 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
367 kstat_incr_irqs_this_cpu(irq, desc); 391 kstat_incr_irqs_this_cpu(irq, desc);
@@ -412,9 +436,8 @@ handle_level_irq(unsigned int irq, struct irq_desc *desc)
412 raw_spin_lock(&desc->lock); 436 raw_spin_lock(&desc->lock);
413 mask_ack_irq(desc); 437 mask_ack_irq(desc);
414 438
415 if (unlikely(irqd_irq_inprogress(&desc->irq_data))) 439 if (!irq_may_run(desc))
416 if (!irq_check_poll(desc)) 440 goto out_unlock;
417 goto out_unlock;
418 441
419 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 442 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
420 kstat_incr_irqs_this_cpu(irq, desc); 443 kstat_incr_irqs_this_cpu(irq, desc);
@@ -485,9 +508,8 @@ handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc)
485 508
486 raw_spin_lock(&desc->lock); 509 raw_spin_lock(&desc->lock);
487 510
488 if (unlikely(irqd_irq_inprogress(&desc->irq_data))) 511 if (!irq_may_run(desc))
489 if (!irq_check_poll(desc)) 512 goto out;
490 goto out;
491 513
492 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 514 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
493 kstat_incr_irqs_this_cpu(irq, desc); 515 kstat_incr_irqs_this_cpu(irq, desc);
@@ -541,19 +563,23 @@ handle_edge_irq(unsigned int irq, struct irq_desc *desc)
541 raw_spin_lock(&desc->lock); 563 raw_spin_lock(&desc->lock);
542 564
543 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 565 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
566
567 if (!irq_may_run(desc)) {
568 desc->istate |= IRQS_PENDING;
569 mask_ack_irq(desc);
570 goto out_unlock;
571 }
572
544 /* 573 /*
545 * If we're currently running this IRQ, or its disabled, 574 * If its disabled or no action available then mask it and get
546 * we shouldn't process the IRQ. Mark it pending, handle 575 * out of here.
547 * the necessary masking and go out
548 */ 576 */
549 if (unlikely(irqd_irq_disabled(&desc->irq_data) || 577 if (irqd_irq_disabled(&desc->irq_data) || !desc->action) {
550 irqd_irq_inprogress(&desc->irq_data) || !desc->action)) { 578 desc->istate |= IRQS_PENDING;
551 if (!irq_check_poll(desc)) { 579 mask_ack_irq(desc);
552 desc->istate |= IRQS_PENDING; 580 goto out_unlock;
553 mask_ack_irq(desc);
554 goto out_unlock;
555 }
556 } 581 }
582
557 kstat_incr_irqs_this_cpu(irq, desc); 583 kstat_incr_irqs_this_cpu(irq, desc);
558 584
559 /* Start handling the irq */ 585 /* Start handling the irq */
@@ -602,18 +628,21 @@ void handle_edge_eoi_irq(unsigned int irq, struct irq_desc *desc)
602 raw_spin_lock(&desc->lock); 628 raw_spin_lock(&desc->lock);
603 629
604 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 630 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
631
632 if (!irq_may_run(desc)) {
633 desc->istate |= IRQS_PENDING;
634 goto out_eoi;
635 }
636
605 /* 637 /*
606 * If we're currently running this IRQ, or its disabled, 638 * If its disabled or no action available then mask it and get
607 * we shouldn't process the IRQ. Mark it pending, handle 639 * out of here.
608 * the necessary masking and go out
609 */ 640 */
610 if (unlikely(irqd_irq_disabled(&desc->irq_data) || 641 if (irqd_irq_disabled(&desc->irq_data) || !desc->action) {
611 irqd_irq_inprogress(&desc->irq_data) || !desc->action)) { 642 desc->istate |= IRQS_PENDING;
612 if (!irq_check_poll(desc)) { 643 goto out_eoi;
613 desc->istate |= IRQS_PENDING;
614 goto out_eoi;
615 }
616 } 644 }
645
617 kstat_incr_irqs_this_cpu(irq, desc); 646 kstat_incr_irqs_this_cpu(irq, desc);
618 647
619 do { 648 do {
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h
index 099ea2e0eb88..4332d766619d 100644
--- a/kernel/irq/internals.h
+++ b/kernel/irq/internals.h
@@ -63,8 +63,8 @@ enum {
63 63
64extern int __irq_set_trigger(struct irq_desc *desc, unsigned int irq, 64extern int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
65 unsigned long flags); 65 unsigned long flags);
66extern void __disable_irq(struct irq_desc *desc, unsigned int irq, bool susp); 66extern void __disable_irq(struct irq_desc *desc, unsigned int irq);
67extern void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume); 67extern void __enable_irq(struct irq_desc *desc, unsigned int irq);
68 68
69extern int irq_startup(struct irq_desc *desc, bool resend); 69extern int irq_startup(struct irq_desc *desc, bool resend);
70extern void irq_shutdown(struct irq_desc *desc); 70extern void irq_shutdown(struct irq_desc *desc);
@@ -194,3 +194,15 @@ static inline void kstat_incr_irqs_this_cpu(unsigned int irq, struct irq_desc *d
194 __this_cpu_inc(*desc->kstat_irqs); 194 __this_cpu_inc(*desc->kstat_irqs);
195 __this_cpu_inc(kstat.irqs_sum); 195 __this_cpu_inc(kstat.irqs_sum);
196} 196}
197
198#ifdef CONFIG_PM_SLEEP
199bool irq_pm_check_wakeup(struct irq_desc *desc);
200void irq_pm_install_action(struct irq_desc *desc, struct irqaction *action);
201void irq_pm_remove_action(struct irq_desc *desc, struct irqaction *action);
202#else
203static inline bool irq_pm_check_wakeup(struct irq_desc *desc) { return false; }
204static inline void
205irq_pm_install_action(struct irq_desc *desc, struct irqaction *action) { }
206static inline void
207irq_pm_remove_action(struct irq_desc *desc, struct irqaction *action) { }
208#endif
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 3dc6a61bf06a..0a9104b4608b 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -382,14 +382,8 @@ setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask)
382} 382}
383#endif 383#endif
384 384
385void __disable_irq(struct irq_desc *desc, unsigned int irq, bool suspend) 385void __disable_irq(struct irq_desc *desc, unsigned int irq)
386{ 386{
387 if (suspend) {
388 if (!desc->action || (desc->action->flags & IRQF_NO_SUSPEND))
389 return;
390 desc->istate |= IRQS_SUSPENDED;
391 }
392
393 if (!desc->depth++) 387 if (!desc->depth++)
394 irq_disable(desc); 388 irq_disable(desc);
395} 389}
@@ -401,7 +395,7 @@ static int __disable_irq_nosync(unsigned int irq)
401 395
402 if (!desc) 396 if (!desc)
403 return -EINVAL; 397 return -EINVAL;
404 __disable_irq(desc, irq, false); 398 __disable_irq(desc, irq);
405 irq_put_desc_busunlock(desc, flags); 399 irq_put_desc_busunlock(desc, flags);
406 return 0; 400 return 0;
407} 401}
@@ -442,20 +436,8 @@ void disable_irq(unsigned int irq)
442} 436}
443EXPORT_SYMBOL(disable_irq); 437EXPORT_SYMBOL(disable_irq);
444 438
445void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume) 439void __enable_irq(struct irq_desc *desc, unsigned int irq)
446{ 440{
447 if (resume) {
448 if (!(desc->istate & IRQS_SUSPENDED)) {
449 if (!desc->action)
450 return;
451 if (!(desc->action->flags & IRQF_FORCE_RESUME))
452 return;
453 /* Pretend that it got disabled ! */
454 desc->depth++;
455 }
456 desc->istate &= ~IRQS_SUSPENDED;
457 }
458
459 switch (desc->depth) { 441 switch (desc->depth) {
460 case 0: 442 case 0:
461 err_out: 443 err_out:
@@ -497,7 +479,7 @@ void enable_irq(unsigned int irq)
497 KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq)) 479 KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq))
498 goto out; 480 goto out;
499 481
500 __enable_irq(desc, irq, false); 482 __enable_irq(desc, irq);
501out: 483out:
502 irq_put_desc_busunlock(desc, flags); 484 irq_put_desc_busunlock(desc, flags);
503} 485}
@@ -1218,6 +1200,8 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
1218 new->irq = irq; 1200 new->irq = irq;
1219 *old_ptr = new; 1201 *old_ptr = new;
1220 1202
1203 irq_pm_install_action(desc, new);
1204
1221 /* Reset broken irq detection when installing new handler */ 1205 /* Reset broken irq detection when installing new handler */
1222 desc->irq_count = 0; 1206 desc->irq_count = 0;
1223 desc->irqs_unhandled = 0; 1207 desc->irqs_unhandled = 0;
@@ -1228,7 +1212,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
1228 */ 1212 */
1229 if (shared && (desc->istate & IRQS_SPURIOUS_DISABLED)) { 1213 if (shared && (desc->istate & IRQS_SPURIOUS_DISABLED)) {
1230 desc->istate &= ~IRQS_SPURIOUS_DISABLED; 1214 desc->istate &= ~IRQS_SPURIOUS_DISABLED;
1231 __enable_irq(desc, irq, false); 1215 __enable_irq(desc, irq);
1232 } 1216 }
1233 1217
1234 raw_spin_unlock_irqrestore(&desc->lock, flags); 1218 raw_spin_unlock_irqrestore(&desc->lock, flags);
@@ -1336,6 +1320,8 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
1336 /* Found it - now remove it from the list of entries: */ 1320 /* Found it - now remove it from the list of entries: */
1337 *action_ptr = action->next; 1321 *action_ptr = action->next;
1338 1322
1323 irq_pm_remove_action(desc, action);
1324
1339 /* If this was the last handler, shut down the IRQ line: */ 1325 /* If this was the last handler, shut down the IRQ line: */
1340 if (!desc->action) { 1326 if (!desc->action) {
1341 irq_shutdown(desc); 1327 irq_shutdown(desc);
diff --git a/kernel/irq/pm.c b/kernel/irq/pm.c
index abcd6ca86cb7..3ca532592704 100644
--- a/kernel/irq/pm.c
+++ b/kernel/irq/pm.c
@@ -9,17 +9,105 @@
9#include <linux/irq.h> 9#include <linux/irq.h>
10#include <linux/module.h> 10#include <linux/module.h>
11#include <linux/interrupt.h> 11#include <linux/interrupt.h>
12#include <linux/suspend.h>
12#include <linux/syscore_ops.h> 13#include <linux/syscore_ops.h>
13 14
14#include "internals.h" 15#include "internals.h"
15 16
17bool irq_pm_check_wakeup(struct irq_desc *desc)
18{
19 if (irqd_is_wakeup_armed(&desc->irq_data)) {
20 irqd_clear(&desc->irq_data, IRQD_WAKEUP_ARMED);
21 desc->istate |= IRQS_SUSPENDED | IRQS_PENDING;
22 desc->depth++;
23 irq_disable(desc);
24 pm_system_wakeup();
25 return true;
26 }
27 return false;
28}
29
30/*
31 * Called from __setup_irq() with desc->lock held after @action has
32 * been installed in the action chain.
33 */
34void irq_pm_install_action(struct irq_desc *desc, struct irqaction *action)
35{
36 desc->nr_actions++;
37
38 if (action->flags & IRQF_FORCE_RESUME)
39 desc->force_resume_depth++;
40
41 WARN_ON_ONCE(desc->force_resume_depth &&
42 desc->force_resume_depth != desc->nr_actions);
43
44 if (action->flags & IRQF_NO_SUSPEND)
45 desc->no_suspend_depth++;
46
47 WARN_ON_ONCE(desc->no_suspend_depth &&
48 desc->no_suspend_depth != desc->nr_actions);
49}
50
51/*
52 * Called from __free_irq() with desc->lock held after @action has
53 * been removed from the action chain.
54 */
55void irq_pm_remove_action(struct irq_desc *desc, struct irqaction *action)
56{
57 desc->nr_actions--;
58
59 if (action->flags & IRQF_FORCE_RESUME)
60 desc->force_resume_depth--;
61
62 if (action->flags & IRQF_NO_SUSPEND)
63 desc->no_suspend_depth--;
64}
65
66static bool suspend_device_irq(struct irq_desc *desc, int irq)
67{
68 if (!desc->action || desc->no_suspend_depth)
69 return false;
70
71 if (irqd_is_wakeup_set(&desc->irq_data)) {
72 irqd_set(&desc->irq_data, IRQD_WAKEUP_ARMED);
73 /*
74 * We return true here to force the caller to issue
75 * synchronize_irq(). We need to make sure that the
76 * IRQD_WAKEUP_ARMED is visible before we return from
77 * suspend_device_irqs().
78 */
79 return true;
80 }
81
82 desc->istate |= IRQS_SUSPENDED;
83 __disable_irq(desc, irq);
84
85 /*
86 * Hardware which has no wakeup source configuration facility
87 * requires that the non wakeup interrupts are masked at the
88 * chip level. The chip implementation indicates that with
89 * IRQCHIP_MASK_ON_SUSPEND.
90 */
91 if (irq_desc_get_chip(desc)->flags & IRQCHIP_MASK_ON_SUSPEND)
92 mask_irq(desc);
93 return true;
94}
95
16/** 96/**
17 * suspend_device_irqs - disable all currently enabled interrupt lines 97 * suspend_device_irqs - disable all currently enabled interrupt lines
18 * 98 *
19 * During system-wide suspend or hibernation device drivers need to be prevented 99 * During system-wide suspend or hibernation device drivers need to be
20 * from receiving interrupts and this function is provided for this purpose. 100 * prevented from receiving interrupts and this function is provided
21 * It marks all interrupt lines in use, except for the timer ones, as disabled 101 * for this purpose.
22 * and sets the IRQS_SUSPENDED flag for each of them. 102 *
103 * So we disable all interrupts and mark them IRQS_SUSPENDED except
104 * for those which are unused, those which are marked as not
105 * suspendable via an interrupt request with the flag IRQF_NO_SUSPEND
106 * set and those which are marked as active wakeup sources.
107 *
108 * The active wakeup sources are handled by the flow handler entry
109 * code which checks for the IRQD_WAKEUP_ARMED flag, suspends the
110 * interrupt and notifies the pm core about the wakeup.
23 */ 111 */
24void suspend_device_irqs(void) 112void suspend_device_irqs(void)
25{ 113{
@@ -28,18 +116,36 @@ void suspend_device_irqs(void)
28 116
29 for_each_irq_desc(irq, desc) { 117 for_each_irq_desc(irq, desc) {
30 unsigned long flags; 118 unsigned long flags;
119 bool sync;
31 120
32 raw_spin_lock_irqsave(&desc->lock, flags); 121 raw_spin_lock_irqsave(&desc->lock, flags);
33 __disable_irq(desc, irq, true); 122 sync = suspend_device_irq(desc, irq);
34 raw_spin_unlock_irqrestore(&desc->lock, flags); 123 raw_spin_unlock_irqrestore(&desc->lock, flags);
35 }
36 124
37 for_each_irq_desc(irq, desc) 125 if (sync)
38 if (desc->istate & IRQS_SUSPENDED)
39 synchronize_irq(irq); 126 synchronize_irq(irq);
127 }
40} 128}
41EXPORT_SYMBOL_GPL(suspend_device_irqs); 129EXPORT_SYMBOL_GPL(suspend_device_irqs);
42 130
131static void resume_irq(struct irq_desc *desc, int irq)
132{
133 irqd_clear(&desc->irq_data, IRQD_WAKEUP_ARMED);
134
135 if (desc->istate & IRQS_SUSPENDED)
136 goto resume;
137
138 /* Force resume the interrupt? */
139 if (!desc->force_resume_depth)
140 return;
141
142 /* Pretend that it got disabled ! */
143 desc->depth++;
144resume:
145 desc->istate &= ~IRQS_SUSPENDED;
146 __enable_irq(desc, irq);
147}
148
43static void resume_irqs(bool want_early) 149static void resume_irqs(bool want_early)
44{ 150{
45 struct irq_desc *desc; 151 struct irq_desc *desc;
@@ -54,7 +160,7 @@ static void resume_irqs(bool want_early)
54 continue; 160 continue;
55 161
56 raw_spin_lock_irqsave(&desc->lock, flags); 162 raw_spin_lock_irqsave(&desc->lock, flags);
57 __enable_irq(desc, irq, true); 163 resume_irq(desc, irq);
58 raw_spin_unlock_irqrestore(&desc->lock, flags); 164 raw_spin_unlock_irqrestore(&desc->lock, flags);
59 } 165 }
60} 166}
@@ -93,38 +199,3 @@ void resume_device_irqs(void)
93 resume_irqs(false); 199 resume_irqs(false);
94} 200}
95EXPORT_SYMBOL_GPL(resume_device_irqs); 201EXPORT_SYMBOL_GPL(resume_device_irqs);
96
97/**
98 * check_wakeup_irqs - check if any wake-up interrupts are pending
99 */
100int check_wakeup_irqs(void)
101{
102 struct irq_desc *desc;
103 int irq;
104
105 for_each_irq_desc(irq, desc) {
106 /*
107 * Only interrupts which are marked as wakeup source
108 * and have not been disabled before the suspend check
109 * can abort suspend.
110 */
111 if (irqd_is_wakeup_set(&desc->irq_data)) {
112 if (desc->depth == 1 && desc->istate & IRQS_PENDING)
113 return -EBUSY;
114 continue;
115 }
116 /*
117 * Check the non wakeup interrupts whether they need
118 * to be masked before finally going into suspend
119 * state. That's for hardware which has no wakeup
120 * source configuration facility. The chip
121 * implementation indicates that with
122 * IRQCHIP_MASK_ON_SUSPEND.
123 */
124 if (desc->istate & IRQS_SUSPENDED &&
125 irq_desc_get_chip(desc)->flags & IRQCHIP_MASK_ON_SUSPEND)
126 mask_irq(desc);
127 }
128
129 return 0;
130}
diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
index e4e4121fa327..bbef57f5bdfd 100644
--- a/kernel/power/Kconfig
+++ b/kernel/power/Kconfig
@@ -302,6 +302,10 @@ config PM_GENERIC_DOMAINS_RUNTIME
302 def_bool y 302 def_bool y
303 depends on PM_RUNTIME && PM_GENERIC_DOMAINS 303 depends on PM_RUNTIME && PM_GENERIC_DOMAINS
304 304
305config PM_GENERIC_DOMAINS_OF
306 def_bool y
307 depends on PM_GENERIC_DOMAINS && OF
308
305config CPU_PM 309config CPU_PM
306 bool 310 bool
307 depends on SUSPEND || CPU_IDLE 311 depends on SUSPEND || CPU_IDLE
diff --git a/kernel/power/process.c b/kernel/power/process.c
index 4ee194eb524b..7b323221b9ee 100644
--- a/kernel/power/process.c
+++ b/kernel/power/process.c
@@ -129,6 +129,7 @@ int freeze_processes(void)
129 if (!pm_freezing) 129 if (!pm_freezing)
130 atomic_inc(&system_freezing_cnt); 130 atomic_inc(&system_freezing_cnt);
131 131
132 pm_wakeup_clear();
132 printk("Freezing user space processes ... "); 133 printk("Freezing user space processes ... ");
133 pm_freezing = true; 134 pm_freezing = true;
134 error = try_to_freeze_tasks(true); 135 error = try_to_freeze_tasks(true);
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
index f1604d8cf489..791a61892bb5 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
@@ -725,6 +725,14 @@ static void memory_bm_clear_bit(struct memory_bitmap *bm, unsigned long pfn)
725 clear_bit(bit, addr); 725 clear_bit(bit, addr);
726} 726}
727 727
728static void memory_bm_clear_current(struct memory_bitmap *bm)
729{
730 int bit;
731
732 bit = max(bm->cur.node_bit - 1, 0);
733 clear_bit(bit, bm->cur.node->data);
734}
735
728static int memory_bm_test_bit(struct memory_bitmap *bm, unsigned long pfn) 736static int memory_bm_test_bit(struct memory_bitmap *bm, unsigned long pfn)
729{ 737{
730 void *addr; 738 void *addr;
@@ -1333,23 +1341,39 @@ static struct memory_bitmap copy_bm;
1333 1341
1334void swsusp_free(void) 1342void swsusp_free(void)
1335{ 1343{
1336 struct zone *zone; 1344 unsigned long fb_pfn, fr_pfn;
1337 unsigned long pfn, max_zone_pfn;
1338 1345
1339 for_each_populated_zone(zone) { 1346 if (!forbidden_pages_map || !free_pages_map)
1340 max_zone_pfn = zone_end_pfn(zone); 1347 goto out;
1341 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) 1348
1342 if (pfn_valid(pfn)) { 1349 memory_bm_position_reset(forbidden_pages_map);
1343 struct page *page = pfn_to_page(pfn); 1350 memory_bm_position_reset(free_pages_map);
1344 1351
1345 if (swsusp_page_is_forbidden(page) && 1352loop:
1346 swsusp_page_is_free(page)) { 1353 fr_pfn = memory_bm_next_pfn(free_pages_map);
1347 swsusp_unset_page_forbidden(page); 1354 fb_pfn = memory_bm_next_pfn(forbidden_pages_map);
1348 swsusp_unset_page_free(page); 1355
1349 __free_page(page); 1356 /*
1350 } 1357 * Find the next bit set in both bitmaps. This is guaranteed to
1351 } 1358 * terminate when fb_pfn == fr_pfn == BM_END_OF_MAP.
1359 */
1360 do {
1361 if (fb_pfn < fr_pfn)
1362 fb_pfn = memory_bm_next_pfn(forbidden_pages_map);
1363 if (fr_pfn < fb_pfn)
1364 fr_pfn = memory_bm_next_pfn(free_pages_map);
1365 } while (fb_pfn != fr_pfn);
1366
1367 if (fr_pfn != BM_END_OF_MAP && pfn_valid(fr_pfn)) {
1368 struct page *page = pfn_to_page(fr_pfn);
1369
1370 memory_bm_clear_current(forbidden_pages_map);
1371 memory_bm_clear_current(free_pages_map);
1372 __free_page(page);
1373 goto loop;
1352 } 1374 }
1375
1376out:
1353 nr_copy_pages = 0; 1377 nr_copy_pages = 0;
1354 nr_meta_pages = 0; 1378 nr_meta_pages = 0;
1355 restore_pblist = NULL; 1379 restore_pblist = NULL;
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
index 18c62195660f..4ca9a33ff620 100644
--- a/kernel/power/suspend.c
+++ b/kernel/power/suspend.c
@@ -146,17 +146,29 @@ static int platform_suspend_prepare(suspend_state_t state)
146 146
147static int platform_suspend_prepare_late(suspend_state_t state) 147static int platform_suspend_prepare_late(suspend_state_t state)
148{ 148{
149 return state == PM_SUSPEND_FREEZE && freeze_ops->prepare ?
150 freeze_ops->prepare() : 0;
151}
152
153static int platform_suspend_prepare_noirq(suspend_state_t state)
154{
149 return state != PM_SUSPEND_FREEZE && suspend_ops->prepare_late ? 155 return state != PM_SUSPEND_FREEZE && suspend_ops->prepare_late ?
150 suspend_ops->prepare_late() : 0; 156 suspend_ops->prepare_late() : 0;
151} 157}
152 158
153static void platform_suspend_wake(suspend_state_t state) 159static void platform_resume_noirq(suspend_state_t state)
154{ 160{
155 if (state != PM_SUSPEND_FREEZE && suspend_ops->wake) 161 if (state != PM_SUSPEND_FREEZE && suspend_ops->wake)
156 suspend_ops->wake(); 162 suspend_ops->wake();
157} 163}
158 164
159static void platform_suspend_finish(suspend_state_t state) 165static void platform_resume_early(suspend_state_t state)
166{
167 if (state == PM_SUSPEND_FREEZE && freeze_ops->restore)
168 freeze_ops->restore();
169}
170
171static void platform_resume_finish(suspend_state_t state)
160{ 172{
161 if (state != PM_SUSPEND_FREEZE && suspend_ops->finish) 173 if (state != PM_SUSPEND_FREEZE && suspend_ops->finish)
162 suspend_ops->finish(); 174 suspend_ops->finish();
@@ -172,7 +184,7 @@ static int platform_suspend_begin(suspend_state_t state)
172 return 0; 184 return 0;
173} 185}
174 186
175static void platform_suspend_end(suspend_state_t state) 187static void platform_resume_end(suspend_state_t state)
176{ 188{
177 if (state == PM_SUSPEND_FREEZE && freeze_ops && freeze_ops->end) 189 if (state == PM_SUSPEND_FREEZE && freeze_ops && freeze_ops->end)
178 freeze_ops->end(); 190 freeze_ops->end();
@@ -180,7 +192,7 @@ static void platform_suspend_end(suspend_state_t state)
180 suspend_ops->end(); 192 suspend_ops->end();
181} 193}
182 194
183static void platform_suspend_recover(suspend_state_t state) 195static void platform_recover(suspend_state_t state)
184{ 196{
185 if (state != PM_SUSPEND_FREEZE && suspend_ops->recover) 197 if (state != PM_SUSPEND_FREEZE && suspend_ops->recover)
186 suspend_ops->recover(); 198 suspend_ops->recover();
@@ -265,13 +277,22 @@ static int suspend_enter(suspend_state_t state, bool *wakeup)
265 if (error) 277 if (error)
266 goto Platform_finish; 278 goto Platform_finish;
267 279
268 error = dpm_suspend_end(PMSG_SUSPEND); 280 error = dpm_suspend_late(PMSG_SUSPEND);
269 if (error) { 281 if (error) {
270 printk(KERN_ERR "PM: Some devices failed to power down\n"); 282 printk(KERN_ERR "PM: late suspend of devices failed\n");
271 goto Platform_finish; 283 goto Platform_finish;
272 } 284 }
273 error = platform_suspend_prepare_late(state); 285 error = platform_suspend_prepare_late(state);
274 if (error) 286 if (error)
287 goto Devices_early_resume;
288
289 error = dpm_suspend_noirq(PMSG_SUSPEND);
290 if (error) {
291 printk(KERN_ERR "PM: noirq suspend of devices failed\n");
292 goto Platform_early_resume;
293 }
294 error = platform_suspend_prepare_noirq(state);
295 if (error)
275 goto Platform_wake; 296 goto Platform_wake;
276 297
277 if (suspend_test(TEST_PLATFORM)) 298 if (suspend_test(TEST_PLATFORM))
@@ -318,11 +339,17 @@ static int suspend_enter(suspend_state_t state, bool *wakeup)
318 enable_nonboot_cpus(); 339 enable_nonboot_cpus();
319 340
320 Platform_wake: 341 Platform_wake:
321 platform_suspend_wake(state); 342 platform_resume_noirq(state);
322 dpm_resume_start(PMSG_RESUME); 343 dpm_resume_noirq(PMSG_RESUME);
344
345 Platform_early_resume:
346 platform_resume_early(state);
347
348 Devices_early_resume:
349 dpm_resume_early(PMSG_RESUME);
323 350
324 Platform_finish: 351 Platform_finish:
325 platform_suspend_finish(state); 352 platform_resume_finish(state);
326 return error; 353 return error;
327} 354}
328 355
@@ -361,14 +388,16 @@ int suspend_devices_and_enter(suspend_state_t state)
361 suspend_test_start(); 388 suspend_test_start();
362 dpm_resume_end(PMSG_RESUME); 389 dpm_resume_end(PMSG_RESUME);
363 suspend_test_finish("resume devices"); 390 suspend_test_finish("resume devices");
391 trace_suspend_resume(TPS("resume_console"), state, true);
364 resume_console(); 392 resume_console();
393 trace_suspend_resume(TPS("resume_console"), state, false);
365 394
366 Close: 395 Close:
367 platform_suspend_end(state); 396 platform_resume_end(state);
368 return error; 397 return error;
369 398
370 Recover_platform: 399 Recover_platform:
371 platform_suspend_recover(state); 400 platform_recover(state);
372 goto Resume_devices; 401 goto Resume_devices;
373} 402}
374 403
diff --git a/kernel/power/suspend_test.c b/kernel/power/suspend_test.c
index bd91bc177c93..084452e34a12 100644
--- a/kernel/power/suspend_test.c
+++ b/kernel/power/suspend_test.c
@@ -22,6 +22,8 @@
22#define TEST_SUSPEND_SECONDS 10 22#define TEST_SUSPEND_SECONDS 10
23 23
24static unsigned long suspend_test_start_time; 24static unsigned long suspend_test_start_time;
25static u32 test_repeat_count_max = 1;
26static u32 test_repeat_count_current;
25 27
26void suspend_test_start(void) 28void suspend_test_start(void)
27{ 29{
@@ -74,6 +76,7 @@ static void __init test_wakealarm(struct rtc_device *rtc, suspend_state_t state)
74 int status; 76 int status;
75 77
76 /* this may fail if the RTC hasn't been initialized */ 78 /* this may fail if the RTC hasn't been initialized */
79repeat:
77 status = rtc_read_time(rtc, &alm.time); 80 status = rtc_read_time(rtc, &alm.time);
78 if (status < 0) { 81 if (status < 0) {
79 printk(err_readtime, dev_name(&rtc->dev), status); 82 printk(err_readtime, dev_name(&rtc->dev), status);
@@ -100,10 +103,21 @@ static void __init test_wakealarm(struct rtc_device *rtc, suspend_state_t state)
100 if (state == PM_SUSPEND_STANDBY) { 103 if (state == PM_SUSPEND_STANDBY) {
101 printk(info_test, pm_states[state]); 104 printk(info_test, pm_states[state]);
102 status = pm_suspend(state); 105 status = pm_suspend(state);
106 if (status < 0)
107 state = PM_SUSPEND_FREEZE;
103 } 108 }
109 if (state == PM_SUSPEND_FREEZE) {
110 printk(info_test, pm_states[state]);
111 status = pm_suspend(state);
112 }
113
104 if (status < 0) 114 if (status < 0)
105 printk(err_suspend, status); 115 printk(err_suspend, status);
106 116
117 test_repeat_count_current++;
118 if (test_repeat_count_current < test_repeat_count_max)
119 goto repeat;
120
107 /* Some platforms can't detect that the alarm triggered the 121 /* Some platforms can't detect that the alarm triggered the
108 * wakeup, or (accordingly) disable it after it afterwards. 122 * wakeup, or (accordingly) disable it after it afterwards.
109 * It's supposed to give oneshot behavior; cope. 123 * It's supposed to give oneshot behavior; cope.
@@ -137,16 +151,28 @@ static char warn_bad_state[] __initdata =
137static int __init setup_test_suspend(char *value) 151static int __init setup_test_suspend(char *value)
138{ 152{
139 int i; 153 int i;
154 char *repeat;
155 char *suspend_type;
140 156
141 /* "=mem" ==> "mem" */ 157 /* example : "=mem[,N]" ==> "mem[,N]" */
142 value++; 158 value++;
159 suspend_type = strsep(&value, ",");
160 if (!suspend_type)
161 return 0;
162
163 repeat = strsep(&value, ",");
164 if (repeat) {
165 if (kstrtou32(repeat, 0, &test_repeat_count_max))
166 return 0;
167 }
168
143 for (i = 0; pm_labels[i]; i++) 169 for (i = 0; pm_labels[i]; i++)
144 if (!strcmp(pm_labels[i], value)) { 170 if (!strcmp(pm_labels[i], suspend_type)) {
145 test_state_label = pm_labels[i]; 171 test_state_label = pm_labels[i];
146 return 0; 172 return 0;
147 } 173 }
148 174
149 printk(warn_bad_state, value); 175 printk(warn_bad_state, suspend_type);
150 return 0; 176 return 0;
151} 177}
152__setup("test_suspend", setup_test_suspend); 178__setup("test_suspend", setup_test_suspend);