aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/ABI/testing/sysfs-devices-system-cpu18
-rw-r--r--Documentation/admin-guide/pm/cpufreq.rst18
-rw-r--r--Documentation/admin-guide/pm/cpuidle.rst8
-rw-r--r--Documentation/admin-guide/pm/index.rst2
-rw-r--r--Documentation/admin-guide/pm/intel_epb.rst41
-rw-r--r--Documentation/admin-guide/pm/intel_pstate.rst32
-rw-r--r--Documentation/admin-guide/pm/sleep-states.rst8
-rw-r--r--Documentation/admin-guide/pm/strategies.rst8
-rw-r--r--Documentation/admin-guide/pm/system-wide.rst2
-rw-r--r--Documentation/admin-guide/pm/working-state.rst3
-rw-r--r--Documentation/driver-api/pm/cpuidle.rst7
-rw-r--r--Documentation/driver-api/pm/devices.rst12
-rw-r--r--Documentation/driver-api/pm/index.rst2
-rw-r--r--Documentation/driver-api/pm/notifiers.rst8
-rw-r--r--Documentation/driver-api/pm/types.rst2
-rw-r--r--MAINTAINERS3
-rw-r--r--arch/x86/kernel/cpu/Makefile2
-rw-r--r--arch/x86/kernel/cpu/common.c17
-rw-r--r--arch/x86/kernel/cpu/cpu.h1
-rw-r--r--arch/x86/kernel/cpu/intel.c34
-rw-r--r--arch/x86/kernel/cpu/intel_epb.c216
-rw-r--r--arch/x86/kernel/tsc.c29
-rw-r--r--drivers/acpi/processor_perflib.c2
-rw-r--r--drivers/base/power/domain.c118
-rw-r--r--drivers/base/power/domain_governor.c67
-rw-r--r--drivers/base/power/main.c70
-rw-r--r--drivers/base/power/wakeup.c6
-rw-r--r--drivers/cpufreq/Kconfig4
-rw-r--r--drivers/cpufreq/acpi-cpufreq.c19
-rw-r--r--drivers/cpufreq/amd_freq_sensitivity.c2
-rw-r--r--drivers/cpufreq/armada-37xx-cpufreq.c22
-rw-r--r--drivers/cpufreq/armada-8k-cpufreq.c1
-rw-r--r--drivers/cpufreq/cpufreq.c100
-rw-r--r--drivers/cpufreq/cpufreq_governor.c2
-rw-r--r--drivers/cpufreq/cpufreq_stats.c15
-rw-r--r--drivers/cpufreq/freq_table.c3
-rw-r--r--drivers/cpufreq/imx6q-cpufreq.c4
-rw-r--r--drivers/cpufreq/intel_pstate.c65
-rw-r--r--drivers/cpufreq/kirkwood-cpufreq.c19
-rw-r--r--drivers/cpufreq/maple-cpufreq.c6
-rw-r--r--drivers/cpufreq/pasemi-cpufreq.c1
-rw-r--r--drivers/cpufreq/pmac32-cpufreq.c2
-rw-r--r--drivers/cpufreq/powernow-k8.c2
-rw-r--r--drivers/cpufreq/ppc_cbe_cpufreq.c1
-rw-r--r--drivers/cpufreq/qoriq-cpufreq.c2
-rw-r--r--drivers/cpufreq/speedstep-centrino.c2
-rw-r--r--drivers/cpuidle/cpuidle-exynos.c2
-rw-r--r--drivers/cpuidle/cpuidle.c19
-rw-r--r--drivers/devfreq/devfreq-event.c2
-rw-r--r--drivers/devfreq/devfreq.c90
-rw-r--r--drivers/devfreq/event/exynos-ppmu.c2
-rw-r--r--drivers/devfreq/event/rockchip-dfi.c25
-rw-r--r--drivers/devfreq/exynos-bus.c8
-rw-r--r--drivers/devfreq/rk3399_dmc.c73
-rw-r--r--drivers/devfreq/tegra-devfreq.c7
-rw-r--r--drivers/firmware/Kconfig15
-rw-r--r--drivers/firmware/Makefile3
-rw-r--r--drivers/firmware/psci/Kconfig13
-rw-r--r--drivers/firmware/psci/Makefile4
-rw-r--r--drivers/firmware/psci/psci.c (renamed from drivers/firmware/psci.c)110
-rw-r--r--drivers/firmware/psci/psci_checker.c (renamed from drivers/firmware/psci_checker.c)0
-rw-r--r--drivers/opp/core.c54
-rw-r--r--include/linux/cpufreq.h14
-rw-r--r--include/linux/cpuhotplug.h1
-rw-r--r--include/linux/cpuidle.h1
-rw-r--r--include/linux/pm_domain.h22
-rw-r--r--include/linux/pm_opp.h8
-rw-r--r--include/linux/suspend.h3
-rw-r--r--include/linux/tick.h7
-rw-r--r--include/soc/rockchip/rk3399_grf.h21
-rw-r--r--include/soc/rockchip/rockchip_sip.h1
-rw-r--r--include/trace/events/devfreq.h40
-rw-r--r--include/uapi/linux/psci.h7
-rw-r--r--kernel/power/hibernate.c5
-rw-r--r--kernel/power/main.c14
-rw-r--r--kernel/power/suspend.c13
-rw-r--r--kernel/power/user.c5
-rw-r--r--kernel/sched/cpufreq_schedutil.c21
-rw-r--r--kernel/time/tick-sched.c12
79 files changed, 1240 insertions, 390 deletions
diff --git a/Documentation/ABI/testing/sysfs-devices-system-cpu b/Documentation/ABI/testing/sysfs-devices-system-cpu
index 5eea46fefcb2..4fb76c0e8d30 100644
--- a/Documentation/ABI/testing/sysfs-devices-system-cpu
+++ b/Documentation/ABI/testing/sysfs-devices-system-cpu
@@ -520,3 +520,21 @@ Description: Control Symetric Multi Threading (SMT)
520 520
521 If control status is "forceoff" or "notsupported" writes 521 If control status is "forceoff" or "notsupported" writes
522 are rejected. 522 are rejected.
523
524What: /sys/devices/system/cpu/cpu#/power/energy_perf_bias
525Date: March 2019
526Contact: linux-pm@vger.kernel.org
527Description: Intel Energy and Performance Bias Hint (EPB)
528
529 EPB for the given CPU in a sliding scale 0 - 15, where a value
530 of 0 corresponds to a hint preference for highest performance
531 and a value of 15 corresponds to the maximum energy savings.
532
533 In order to change the EPB value for the CPU, write either
534 a number in the 0 - 15 sliding scale above, or one of the
535 strings: "performance", "balance-performance", "normal",
536 "balance-power", "power" (that represent values reflected by
537 their meaning), to this attribute.
538
539 This attribute is present for all online CPUs supporting the
540 Intel EPB feature.
diff --git a/Documentation/admin-guide/pm/cpufreq.rst b/Documentation/admin-guide/pm/cpufreq.rst
index 7eca9026a9ed..0c74a7784964 100644
--- a/Documentation/admin-guide/pm/cpufreq.rst
+++ b/Documentation/admin-guide/pm/cpufreq.rst
@@ -1,3 +1,6 @@
1.. SPDX-License-Identifier: GPL-2.0
2.. include:: <isonum.txt>
3
1.. |struct cpufreq_policy| replace:: :c:type:`struct cpufreq_policy <cpufreq_policy>` 4.. |struct cpufreq_policy| replace:: :c:type:`struct cpufreq_policy <cpufreq_policy>`
2.. |intel_pstate| replace:: :doc:`intel_pstate <intel_pstate>` 5.. |intel_pstate| replace:: :doc:`intel_pstate <intel_pstate>`
3 6
@@ -5,9 +8,10 @@
5CPU Performance Scaling 8CPU Performance Scaling
6======================= 9=======================
7 10
8:: 11:Copyright: |copy| 2017 Intel Corporation
12
13:Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
9 14
10 Copyright (c) 2017 Intel Corp., Rafael J. Wysocki <rafael.j.wysocki@intel.com>
11 15
12The Concept of CPU Performance Scaling 16The Concept of CPU Performance Scaling
13====================================== 17======================================
@@ -396,8 +400,8 @@ RT or deadline scheduling classes, the governor will increase the frequency to
396the allowed maximum (that is, the ``scaling_max_freq`` policy limit). In turn, 400the allowed maximum (that is, the ``scaling_max_freq`` policy limit). In turn,
397if it is invoked by the CFS scheduling class, the governor will use the 401if it is invoked by the CFS scheduling class, the governor will use the
398Per-Entity Load Tracking (PELT) metric for the root control group of the 402Per-Entity Load Tracking (PELT) metric for the root control group of the
399given CPU as the CPU utilization estimate (see the `Per-entity load tracking`_ 403given CPU as the CPU utilization estimate (see the *Per-entity load tracking*
400LWN.net article for a description of the PELT mechanism). Then, the new 404LWN.net article [1]_ for a description of the PELT mechanism). Then, the new
401CPU frequency to apply is computed in accordance with the formula 405CPU frequency to apply is computed in accordance with the formula
402 406
403 f = 1.25 * ``f_0`` * ``util`` / ``max`` 407 f = 1.25 * ``f_0`` * ``util`` / ``max``
@@ -698,4 +702,8 @@ hardware feature (e.g. all Intel ones), even if the
698:c:macro:`CONFIG_X86_ACPI_CPUFREQ_CPB` configuration option is set. 702:c:macro:`CONFIG_X86_ACPI_CPUFREQ_CPB` configuration option is set.
699 703
700 704
701.. _Per-entity load tracking: https://lwn.net/Articles/531853/ 705References
706==========
707
708.. [1] Jonathan Corbet, *Per-entity load tracking*,
709 https://lwn.net/Articles/531853/
diff --git a/Documentation/admin-guide/pm/cpuidle.rst b/Documentation/admin-guide/pm/cpuidle.rst
index 9c58b35a81cb..e70b365dbc60 100644
--- a/Documentation/admin-guide/pm/cpuidle.rst
+++ b/Documentation/admin-guide/pm/cpuidle.rst
@@ -1,3 +1,6 @@
1.. SPDX-License-Identifier: GPL-2.0
2.. include:: <isonum.txt>
3
1.. |struct cpuidle_state| replace:: :c:type:`struct cpuidle_state <cpuidle_state>` 4.. |struct cpuidle_state| replace:: :c:type:`struct cpuidle_state <cpuidle_state>`
2.. |cpufreq| replace:: :doc:`CPU Performance Scaling <cpufreq>` 5.. |cpufreq| replace:: :doc:`CPU Performance Scaling <cpufreq>`
3 6
@@ -5,9 +8,10 @@
5CPU Idle Time Management 8CPU Idle Time Management
6======================== 9========================
7 10
8:: 11:Copyright: |copy| 2018 Intel Corporation
12
13:Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
9 14
10 Copyright (c) 2018 Intel Corp., Rafael J. Wysocki <rafael.j.wysocki@intel.com>
11 15
12Concepts 16Concepts
13======== 17========
diff --git a/Documentation/admin-guide/pm/index.rst b/Documentation/admin-guide/pm/index.rst
index 49237ac73442..39f8f9f81e7a 100644
--- a/Documentation/admin-guide/pm/index.rst
+++ b/Documentation/admin-guide/pm/index.rst
@@ -1,3 +1,5 @@
1.. SPDX-License-Identifier: GPL-2.0
2
1================ 3================
2Power Management 4Power Management
3================ 5================
diff --git a/Documentation/admin-guide/pm/intel_epb.rst b/Documentation/admin-guide/pm/intel_epb.rst
new file mode 100644
index 000000000000..005121167af7
--- /dev/null
+++ b/Documentation/admin-guide/pm/intel_epb.rst
@@ -0,0 +1,41 @@
1.. SPDX-License-Identifier: GPL-2.0
2.. include:: <isonum.txt>
3
4======================================
5Intel Performance and Energy Bias Hint
6======================================
7
8:Copyright: |copy| 2019 Intel Corporation
9
10:Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
11
12
13.. kernel-doc:: arch/x86/kernel/cpu/intel_epb.c
14 :doc: overview
15
16Intel Performance and Energy Bias Attribute in ``sysfs``
17========================================================
18
19The Intel Performance and Energy Bias Hint (EPB) value for a given (logical) CPU
20can be checked or updated through a ``sysfs`` attribute (file) under
21:file:`/sys/devices/system/cpu/cpu<N>/power/`, where the CPU number ``<N>``
22is allocated at the system initialization time:
23
24``energy_perf_bias``
25 Shows the current EPB value for the CPU in a sliding scale 0 - 15, where
26 a value of 0 corresponds to a hint preference for highest performance
27 and a value of 15 corresponds to the maximum energy savings.
28
29 In order to update the EPB value for the CPU, this attribute can be
30 written to, either with a number in the 0 - 15 sliding scale above, or
31 with one of the strings: "performance", "balance-performance", "normal",
32 "balance-power", "power" that represent values reflected by their
33 meaning.
34
35 This attribute is present for all online CPUs supporting the EPB
36 feature.
37
38Note that while the EPB interface to the processor is defined at the logical CPU
39level, the physical register backing it may be shared by multiple CPUs (for
40example, SMT siblings or cores in one package). For this reason, updating the
41EPB value for one CPU may cause the EPB values for other CPUs to change.
diff --git a/Documentation/admin-guide/pm/intel_pstate.rst b/Documentation/admin-guide/pm/intel_pstate.rst
index ec0f7c111f65..67e414e34f37 100644
--- a/Documentation/admin-guide/pm/intel_pstate.rst
+++ b/Documentation/admin-guide/pm/intel_pstate.rst
@@ -1,10 +1,13 @@
1.. SPDX-License-Identifier: GPL-2.0
2.. include:: <isonum.txt>
3
1=============================================== 4===============================================
2``intel_pstate`` CPU Performance Scaling Driver 5``intel_pstate`` CPU Performance Scaling Driver
3=============================================== 6===============================================
4 7
5:: 8:Copyright: |copy| 2017 Intel Corporation
6 9
7 Copyright (c) 2017 Intel Corp., Rafael J. Wysocki <rafael.j.wysocki@intel.com> 10:Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
8 11
9 12
10General Information 13General Information
@@ -20,11 +23,10 @@ you have not done that yet.]
20 23
21For the processors supported by ``intel_pstate``, the P-state concept is broader 24For the processors supported by ``intel_pstate``, the P-state concept is broader
22than just an operating frequency or an operating performance point (see the 25than just an operating frequency or an operating performance point (see the
23`LinuxCon Europe 2015 presentation by Kristen Accardi <LCEU2015_>`_ for more 26LinuxCon Europe 2015 presentation by Kristen Accardi [1]_ for more
24information about that). For this reason, the representation of P-states used 27information about that). For this reason, the representation of P-states used
25by ``intel_pstate`` internally follows the hardware specification (for details 28by ``intel_pstate`` internally follows the hardware specification (for details
26refer to `Intel® 64 and IA-32 Architectures Software Developer’s Manual 29refer to Intel Software Developer’s Manual [2]_). However, the ``CPUFreq`` core
27Volume 3: System Programming Guide <SDM_>`_). However, the ``CPUFreq`` core
28uses frequencies for identifying operating performance points of CPUs and 30uses frequencies for identifying operating performance points of CPUs and
29frequencies are involved in the user space interface exposed by it, so 31frequencies are involved in the user space interface exposed by it, so
30``intel_pstate`` maps its internal representation of P-states to frequencies too 32``intel_pstate`` maps its internal representation of P-states to frequencies too
@@ -561,9 +563,9 @@ or to pin every task potentially sensitive to them to a specific CPU.]
561 563
562On the majority of systems supported by ``intel_pstate``, the ACPI tables 564On the majority of systems supported by ``intel_pstate``, the ACPI tables
563provided by the platform firmware contain ``_PSS`` objects returning information 565provided by the platform firmware contain ``_PSS`` objects returning information
564that can be used for CPU performance scaling (refer to the `ACPI specification`_ 566that can be used for CPU performance scaling (refer to the ACPI specification
565for details on the ``_PSS`` objects and the format of the information returned 567[3]_ for details on the ``_PSS`` objects and the format of the information
566by them). 568returned by them).
567 569
568The information returned by the ACPI ``_PSS`` objects is used by the 570The information returned by the ACPI ``_PSS`` objects is used by the
569``acpi-cpufreq`` scaling driver. On systems supported by ``intel_pstate`` 571``acpi-cpufreq`` scaling driver. On systems supported by ``intel_pstate``
@@ -728,6 +730,14 @@ P-state is called, the ``ftrace`` filter can be set to to
728 <idle>-0 [000] ..s. 2537.654843: intel_pstate_set_pstate <-intel_pstate_timer_func 730 <idle>-0 [000] ..s. 2537.654843: intel_pstate_set_pstate <-intel_pstate_timer_func
729 731
730 732
731.. _LCEU2015: http://events.linuxfoundation.org/sites/events/files/slides/LinuxConEurope_2015.pdf 733References
732.. _SDM: http://www.intel.com/content/www/us/en/architecture-and-technology/64-ia-32-architectures-software-developer-system-programming-manual-325384.html 734==========
733.. _ACPI specification: http://www.uefi.org/sites/default/files/resources/ACPI_6_1.pdf 735
736.. [1] Kristen Accardi, *Balancing Power and Performance in the Linux Kernel*,
737 http://events.linuxfoundation.org/sites/events/files/slides/LinuxConEurope_2015.pdf
738
739.. [2] *Intel® 64 and IA-32 Architectures Software Developer’s Manual Volume 3: System Programming Guide*,
740 http://www.intel.com/content/www/us/en/architecture-and-technology/64-ia-32-architectures-software-developer-system-programming-manual-325384.html
741
742.. [3] *Advanced Configuration and Power Interface Specification*,
743 https://uefi.org/sites/default/files/resources/ACPI_6_3_final_Jan30.pdf
diff --git a/Documentation/admin-guide/pm/sleep-states.rst b/Documentation/admin-guide/pm/sleep-states.rst
index dbf5acd49f35..cd3a28cb81f4 100644
--- a/Documentation/admin-guide/pm/sleep-states.rst
+++ b/Documentation/admin-guide/pm/sleep-states.rst
@@ -1,10 +1,14 @@
1.. SPDX-License-Identifier: GPL-2.0
2.. include:: <isonum.txt>
3
1=================== 4===================
2System Sleep States 5System Sleep States
3=================== 6===================
4 7
5:: 8:Copyright: |copy| 2017 Intel Corporation
9
10:Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
6 11
7 Copyright (c) 2017 Intel Corp., Rafael J. Wysocki <rafael.j.wysocki@intel.com>
8 12
9Sleep states are global low-power states of the entire system in which user 13Sleep states are global low-power states of the entire system in which user
10space code cannot be executed and the overall system activity is significantly 14space code cannot be executed and the overall system activity is significantly
diff --git a/Documentation/admin-guide/pm/strategies.rst b/Documentation/admin-guide/pm/strategies.rst
index afe4d3f831fe..dd0362e32fa5 100644
--- a/Documentation/admin-guide/pm/strategies.rst
+++ b/Documentation/admin-guide/pm/strategies.rst
@@ -1,10 +1,14 @@
1.. SPDX-License-Identifier: GPL-2.0
2.. include:: <isonum.txt>
3
1=========================== 4===========================
2Power Management Strategies 5Power Management Strategies
3=========================== 6===========================
4 7
5:: 8:Copyright: |copy| 2017 Intel Corporation
9
10:Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
6 11
7 Copyright (c) 2017 Intel Corp., Rafael J. Wysocki <rafael.j.wysocki@intel.com>
8 12
9The Linux kernel supports two major high-level power management strategies. 13The Linux kernel supports two major high-level power management strategies.
10 14
diff --git a/Documentation/admin-guide/pm/system-wide.rst b/Documentation/admin-guide/pm/system-wide.rst
index 0c81e4c5de39..2b1f987b34f0 100644
--- a/Documentation/admin-guide/pm/system-wide.rst
+++ b/Documentation/admin-guide/pm/system-wide.rst
@@ -1,3 +1,5 @@
1.. SPDX-License-Identifier: GPL-2.0
2
1============================ 3============================
2System-Wide Power Management 4System-Wide Power Management
3============================ 5============================
diff --git a/Documentation/admin-guide/pm/working-state.rst b/Documentation/admin-guide/pm/working-state.rst
index b6cef9b5e961..fc298eb1234b 100644
--- a/Documentation/admin-guide/pm/working-state.rst
+++ b/Documentation/admin-guide/pm/working-state.rst
@@ -1,3 +1,5 @@
1.. SPDX-License-Identifier: GPL-2.0
2
1============================== 3==============================
2Working-State Power Management 4Working-State Power Management
3============================== 5==============================
@@ -8,3 +10,4 @@ Working-State Power Management
8 cpuidle 10 cpuidle
9 cpufreq 11 cpufreq
10 intel_pstate 12 intel_pstate
13 intel_epb
diff --git a/Documentation/driver-api/pm/cpuidle.rst b/Documentation/driver-api/pm/cpuidle.rst
index 5842ab621a58..006cf6db40c6 100644
--- a/Documentation/driver-api/pm/cpuidle.rst
+++ b/Documentation/driver-api/pm/cpuidle.rst
@@ -1,3 +1,6 @@
1.. SPDX-License-Identifier: GPL-2.0
2.. include:: <isonum.txt>
3
1.. |struct cpuidle_governor| replace:: :c:type:`struct cpuidle_governor <cpuidle_governor>` 4.. |struct cpuidle_governor| replace:: :c:type:`struct cpuidle_governor <cpuidle_governor>`
2.. |struct cpuidle_device| replace:: :c:type:`struct cpuidle_device <cpuidle_device>` 5.. |struct cpuidle_device| replace:: :c:type:`struct cpuidle_device <cpuidle_device>`
3.. |struct cpuidle_driver| replace:: :c:type:`struct cpuidle_driver <cpuidle_driver>` 6.. |struct cpuidle_driver| replace:: :c:type:`struct cpuidle_driver <cpuidle_driver>`
@@ -7,9 +10,9 @@
7CPU Idle Time Management 10CPU Idle Time Management
8======================== 11========================
9 12
10:: 13:Copyright: |copy| 2019 Intel Corporation
11 14
12 Copyright (c) 2019 Intel Corp., Rafael J. Wysocki <rafael.j.wysocki@intel.com> 15:Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
13 16
14 17
15CPU Idle Time Management Subsystem 18CPU Idle Time Management Subsystem
diff --git a/Documentation/driver-api/pm/devices.rst b/Documentation/driver-api/pm/devices.rst
index 090c151aa86b..30835683616a 100644
--- a/Documentation/driver-api/pm/devices.rst
+++ b/Documentation/driver-api/pm/devices.rst
@@ -1,3 +1,6 @@
1.. SPDX-License-Identifier: GPL-2.0
2.. include:: <isonum.txt>
3
1.. |struct dev_pm_ops| replace:: :c:type:`struct dev_pm_ops <dev_pm_ops>` 4.. |struct dev_pm_ops| replace:: :c:type:`struct dev_pm_ops <dev_pm_ops>`
2.. |struct dev_pm_domain| replace:: :c:type:`struct dev_pm_domain <dev_pm_domain>` 5.. |struct dev_pm_domain| replace:: :c:type:`struct dev_pm_domain <dev_pm_domain>`
3.. |struct bus_type| replace:: :c:type:`struct bus_type <bus_type>` 6.. |struct bus_type| replace:: :c:type:`struct bus_type <bus_type>`
@@ -12,11 +15,12 @@
12Device Power Management Basics 15Device Power Management Basics
13============================== 16==============================
14 17
15:: 18:Copyright: |copy| 2010-2011 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
19:Copyright: |copy| 2010 Alan Stern <stern@rowland.harvard.edu>
20:Copyright: |copy| 2016 Intel Corporation
21
22:Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
16 23
17 Copyright (c) 2010-2011 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
18 Copyright (c) 2010 Alan Stern <stern@rowland.harvard.edu>
19 Copyright (c) 2016 Intel Corp., Rafael J. Wysocki <rafael.j.wysocki@intel.com>
20 24
21Most of the code in Linux is device drivers, so most of the Linux power 25Most of the code in Linux is device drivers, so most of the Linux power
22management (PM) code is also driver-specific. Most drivers will do very 26management (PM) code is also driver-specific. Most drivers will do very
diff --git a/Documentation/driver-api/pm/index.rst b/Documentation/driver-api/pm/index.rst
index 56975c6bc789..c2a9ef8d115c 100644
--- a/Documentation/driver-api/pm/index.rst
+++ b/Documentation/driver-api/pm/index.rst
@@ -1,3 +1,5 @@
1.. SPDX-License-Identifier: GPL-2.0
2
1=============================== 3===============================
2CPU and Device Power Management 4CPU and Device Power Management
3=============================== 5===============================
diff --git a/Documentation/driver-api/pm/notifiers.rst b/Documentation/driver-api/pm/notifiers.rst
index 62f860026992..186435c43b77 100644
--- a/Documentation/driver-api/pm/notifiers.rst
+++ b/Documentation/driver-api/pm/notifiers.rst
@@ -1,10 +1,14 @@
1.. SPDX-License-Identifier: GPL-2.0
2.. include:: <isonum.txt>
3
1============================= 4=============================
2Suspend/Hibernation Notifiers 5Suspend/Hibernation Notifiers
3============================= 6=============================
4 7
5:: 8:Copyright: |copy| 2016 Intel Corporation
9
10:Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
6 11
7 Copyright (c) 2016 Intel Corp., Rafael J. Wysocki <rafael.j.wysocki@intel.com>
8 12
9There are some operations that subsystems or drivers may want to carry out 13There are some operations that subsystems or drivers may want to carry out
10before hibernation/suspend or after restore/resume, but they require the system 14before hibernation/suspend or after restore/resume, but they require the system
diff --git a/Documentation/driver-api/pm/types.rst b/Documentation/driver-api/pm/types.rst
index 3ebdecc54104..73a231caf764 100644
--- a/Documentation/driver-api/pm/types.rst
+++ b/Documentation/driver-api/pm/types.rst
@@ -1,3 +1,5 @@
1.. SPDX-License-Identifier: GPL-2.0
2
1================================== 3==================================
2Device Power Management Data Types 4Device Power Management Data Types
3================================== 5==================================
diff --git a/MAINTAINERS b/MAINTAINERS
index d846ccd81235..bd40a852207b 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -4553,6 +4553,7 @@ S: Maintained
4553F: drivers/devfreq/ 4553F: drivers/devfreq/
4554F: include/linux/devfreq.h 4554F: include/linux/devfreq.h
4555F: Documentation/devicetree/bindings/devfreq/ 4555F: Documentation/devicetree/bindings/devfreq/
4556F: include/trace/events/devfreq.h
4556 4557
4557DEVICE FREQUENCY EVENT (DEVFREQ-EVENT) 4558DEVICE FREQUENCY EVENT (DEVFREQ-EVENT)
4558M: Chanwoo Choi <cw00.choi@samsung.com> 4559M: Chanwoo Choi <cw00.choi@samsung.com>
@@ -12416,7 +12417,7 @@ M: Mark Rutland <mark.rutland@arm.com>
12416M: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com> 12417M: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
12417L: linux-arm-kernel@lists.infradead.org 12418L: linux-arm-kernel@lists.infradead.org
12418S: Maintained 12419S: Maintained
12419F: drivers/firmware/psci*.c 12420F: drivers/firmware/psci/
12420F: include/linux/psci.h 12421F: include/linux/psci.h
12421F: include/uapi/linux/psci.h 12422F: include/uapi/linux/psci.h
12422 12423
diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
index cfd24f9f7614..1796d2bdcaaa 100644
--- a/arch/x86/kernel/cpu/Makefile
+++ b/arch/x86/kernel/cpu/Makefile
@@ -28,7 +28,7 @@ obj-y += cpuid-deps.o
28obj-$(CONFIG_PROC_FS) += proc.o 28obj-$(CONFIG_PROC_FS) += proc.o
29obj-$(CONFIG_X86_FEATURE_NAMES) += capflags.o powerflags.o 29obj-$(CONFIG_X86_FEATURE_NAMES) += capflags.o powerflags.o
30 30
31obj-$(CONFIG_CPU_SUP_INTEL) += intel.o intel_pconfig.o 31obj-$(CONFIG_CPU_SUP_INTEL) += intel.o intel_pconfig.o intel_epb.o
32obj-$(CONFIG_CPU_SUP_AMD) += amd.o 32obj-$(CONFIG_CPU_SUP_AMD) += amd.o
33obj-$(CONFIG_CPU_SUP_HYGON) += hygon.o 33obj-$(CONFIG_CPU_SUP_HYGON) += hygon.o
34obj-$(CONFIG_CPU_SUP_CYRIX_32) += cyrix.o 34obj-$(CONFIG_CPU_SUP_CYRIX_32) += cyrix.o
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 37f7d438a6ef..37640544e12f 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -1824,23 +1824,6 @@ void cpu_init(void)
1824} 1824}
1825#endif 1825#endif
1826 1826
1827static void bsp_resume(void)
1828{
1829 if (this_cpu->c_bsp_resume)
1830 this_cpu->c_bsp_resume(&boot_cpu_data);
1831}
1832
1833static struct syscore_ops cpu_syscore_ops = {
1834 .resume = bsp_resume,
1835};
1836
1837static int __init init_cpu_syscore(void)
1838{
1839 register_syscore_ops(&cpu_syscore_ops);
1840 return 0;
1841}
1842core_initcall(init_cpu_syscore);
1843
1844/* 1827/*
1845 * The microcode loader calls this upon late microcode load to recheck features, 1828 * The microcode loader calls this upon late microcode load to recheck features,
1846 * only when microcode has been updated. Caller holds microcode_mutex and CPU 1829 * only when microcode has been updated. Caller holds microcode_mutex and CPU
diff --git a/arch/x86/kernel/cpu/cpu.h b/arch/x86/kernel/cpu/cpu.h
index 5eb946b9a9f3..c0e2407abdd6 100644
--- a/arch/x86/kernel/cpu/cpu.h
+++ b/arch/x86/kernel/cpu/cpu.h
@@ -14,7 +14,6 @@ struct cpu_dev {
14 void (*c_init)(struct cpuinfo_x86 *); 14 void (*c_init)(struct cpuinfo_x86 *);
15 void (*c_identify)(struct cpuinfo_x86 *); 15 void (*c_identify)(struct cpuinfo_x86 *);
16 void (*c_detect_tlb)(struct cpuinfo_x86 *); 16 void (*c_detect_tlb)(struct cpuinfo_x86 *);
17 void (*c_bsp_resume)(struct cpuinfo_x86 *);
18 int c_x86_vendor; 17 int c_x86_vendor;
19#ifdef CONFIG_X86_32 18#ifdef CONFIG_X86_32
20 /* Optional vendor specific routine to obtain the cache size. */ 19 /* Optional vendor specific routine to obtain the cache size. */
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index 3142fd7a9b32..f17c1a714779 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -596,36 +596,6 @@ detect_keyid_bits:
596 c->x86_phys_bits -= keyid_bits; 596 c->x86_phys_bits -= keyid_bits;
597} 597}
598 598
599static void init_intel_energy_perf(struct cpuinfo_x86 *c)
600{
601 u64 epb;
602
603 /*
604 * Initialize MSR_IA32_ENERGY_PERF_BIAS if not already initialized.
605 * (x86_energy_perf_policy(8) is available to change it at run-time.)
606 */
607 if (!cpu_has(c, X86_FEATURE_EPB))
608 return;
609
610 rdmsrl(MSR_IA32_ENERGY_PERF_BIAS, epb);
611 if ((epb & 0xF) != ENERGY_PERF_BIAS_PERFORMANCE)
612 return;
613
614 pr_info_once("ENERGY_PERF_BIAS: Set to 'normal', was 'performance'\n");
615 pr_info_once("ENERGY_PERF_BIAS: View and update with x86_energy_perf_policy(8)\n");
616 epb = (epb & ~0xF) | ENERGY_PERF_BIAS_NORMAL;
617 wrmsrl(MSR_IA32_ENERGY_PERF_BIAS, epb);
618}
619
620static void intel_bsp_resume(struct cpuinfo_x86 *c)
621{
622 /*
623 * MSR_IA32_ENERGY_PERF_BIAS is lost across suspend/resume,
624 * so reinitialize it properly like during bootup:
625 */
626 init_intel_energy_perf(c);
627}
628
629static void init_cpuid_fault(struct cpuinfo_x86 *c) 599static void init_cpuid_fault(struct cpuinfo_x86 *c)
630{ 600{
631 u64 msr; 601 u64 msr;
@@ -763,8 +733,6 @@ static void init_intel(struct cpuinfo_x86 *c)
763 if (cpu_has(c, X86_FEATURE_TME)) 733 if (cpu_has(c, X86_FEATURE_TME))
764 detect_tme(c); 734 detect_tme(c);
765 735
766 init_intel_energy_perf(c);
767
768 init_intel_misc_features(c); 736 init_intel_misc_features(c);
769} 737}
770 738
@@ -1023,9 +991,7 @@ static const struct cpu_dev intel_cpu_dev = {
1023 .c_detect_tlb = intel_detect_tlb, 991 .c_detect_tlb = intel_detect_tlb,
1024 .c_early_init = early_init_intel, 992 .c_early_init = early_init_intel,
1025 .c_init = init_intel, 993 .c_init = init_intel,
1026 .c_bsp_resume = intel_bsp_resume,
1027 .c_x86_vendor = X86_VENDOR_INTEL, 994 .c_x86_vendor = X86_VENDOR_INTEL,
1028}; 995};
1029 996
1030cpu_dev_register(intel_cpu_dev); 997cpu_dev_register(intel_cpu_dev);
1031
diff --git a/arch/x86/kernel/cpu/intel_epb.c b/arch/x86/kernel/cpu/intel_epb.c
new file mode 100644
index 000000000000..f4dd73396f28
--- /dev/null
+++ b/arch/x86/kernel/cpu/intel_epb.c
@@ -0,0 +1,216 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Intel Performance and Energy Bias Hint support.
4 *
5 * Copyright (C) 2019 Intel Corporation
6 *
7 * Author:
8 * Rafael J. Wysocki <rafael.j.wysocki@intel.com>
9 */
10
11#include <linux/cpuhotplug.h>
12#include <linux/cpu.h>
13#include <linux/device.h>
14#include <linux/kernel.h>
15#include <linux/string.h>
16#include <linux/syscore_ops.h>
17#include <linux/pm.h>
18
19#include <asm/cpufeature.h>
20#include <asm/msr.h>
21
22/**
23 * DOC: overview
24 *
25 * The Performance and Energy Bias Hint (EPB) allows software to specify its
26 * preference with respect to the power-performance tradeoffs present in the
27 * processor. Generally, the EPB is expected to be set by user space (directly
28 * via sysfs or with the help of the x86_energy_perf_policy tool), but there are
29 * two reasons for the kernel to update it.
30 *
31 * First, there are systems where the platform firmware resets the EPB during
32 * system-wide transitions from sleep states back into the working state
33 * effectively causing the previous EPB updates by user space to be lost.
34 * Thus the kernel needs to save the current EPB values for all CPUs during
35 * system-wide transitions to sleep states and restore them on the way back to
36 * the working state. That can be achieved by saving EPB for secondary CPUs
37 * when they are taken offline during transitions into system sleep states and
38 * for the boot CPU in a syscore suspend operation, so that it can be restored
39 * for the boot CPU in a syscore resume operation and for the other CPUs when
40 * they are brought back online. However, CPUs that are already offline when
41 * a system-wide PM transition is started are not taken offline again, but their
42 * EPB values may still be reset by the platform firmware during the transition,
43 * so in fact it is necessary to save the EPB of any CPU taken offline and to
44 * restore it when the given CPU goes back online at all times.
45 *
46 * Second, on many systems the initial EPB value coming from the platform
47 * firmware is 0 ('performance') and at least on some of them that is because
48 * the platform firmware does not initialize EPB at all with the assumption that
49 * the OS will do that anyway. That sometimes is problematic, as it may cause
50 * the system battery to drain too fast, for example, so it is better to adjust
51 * it on CPU bring-up and if the initial EPB value for a given CPU is 0, the
52 * kernel changes it to 6 ('normal').
53 */
54
55static DEFINE_PER_CPU(u8, saved_epb);
56
57#define EPB_MASK 0x0fULL
58#define EPB_SAVED 0x10ULL
59#define MAX_EPB EPB_MASK
60
61static int intel_epb_save(void)
62{
63 u64 epb;
64
65 rdmsrl(MSR_IA32_ENERGY_PERF_BIAS, epb);
66 /*
67 * Ensure that saved_epb will always be nonzero after this write even if
68 * the EPB value read from the MSR is 0.
69 */
70 this_cpu_write(saved_epb, (epb & EPB_MASK) | EPB_SAVED);
71
72 return 0;
73}
74
75static void intel_epb_restore(void)
76{
77 u64 val = this_cpu_read(saved_epb);
78 u64 epb;
79
80 rdmsrl(MSR_IA32_ENERGY_PERF_BIAS, epb);
81 if (val) {
82 val &= EPB_MASK;
83 } else {
84 /*
85 * Because intel_epb_save() has not run for the current CPU yet,
86 * it is going online for the first time, so if its EPB value is
87 * 0 ('performance') at this point, assume that it has not been
88 * initialized by the platform firmware and set it to 6
89 * ('normal').
90 */
91 val = epb & EPB_MASK;
92 if (val == ENERGY_PERF_BIAS_PERFORMANCE) {
93 val = ENERGY_PERF_BIAS_NORMAL;
94 pr_warn_once("ENERGY_PERF_BIAS: Set to 'normal', was 'performance'\n");
95 }
96 }
97 wrmsrl(MSR_IA32_ENERGY_PERF_BIAS, (epb & ~EPB_MASK) | val);
98}
99
100static struct syscore_ops intel_epb_syscore_ops = {
101 .suspend = intel_epb_save,
102 .resume = intel_epb_restore,
103};
104
105static const char * const energy_perf_strings[] = {
106 "performance",
107 "balance-performance",
108 "normal",
109 "balance-power",
110 "power"
111};
112static const u8 energ_perf_values[] = {
113 ENERGY_PERF_BIAS_PERFORMANCE,
114 ENERGY_PERF_BIAS_BALANCE_PERFORMANCE,
115 ENERGY_PERF_BIAS_NORMAL,
116 ENERGY_PERF_BIAS_BALANCE_POWERSAVE,
117 ENERGY_PERF_BIAS_POWERSAVE
118};
119
120static ssize_t energy_perf_bias_show(struct device *dev,
121 struct device_attribute *attr,
122 char *buf)
123{
124 unsigned int cpu = dev->id;
125 u64 epb;
126 int ret;
127
128 ret = rdmsrl_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb);
129 if (ret < 0)
130 return ret;
131
132 return sprintf(buf, "%llu\n", epb);
133}
134
135static ssize_t energy_perf_bias_store(struct device *dev,
136 struct device_attribute *attr,
137 const char *buf, size_t count)
138{
139 unsigned int cpu = dev->id;
140 u64 epb, val;
141 int ret;
142
143 ret = __sysfs_match_string(energy_perf_strings,
144 ARRAY_SIZE(energy_perf_strings), buf);
145 if (ret >= 0)
146 val = energ_perf_values[ret];
147 else if (kstrtou64(buf, 0, &val) || val > MAX_EPB)
148 return -EINVAL;
149
150 ret = rdmsrl_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb);
151 if (ret < 0)
152 return ret;
153
154 ret = wrmsrl_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS,
155 (epb & ~EPB_MASK) | val);
156 if (ret < 0)
157 return ret;
158
159 return count;
160}
161
162static DEVICE_ATTR_RW(energy_perf_bias);
163
164static struct attribute *intel_epb_attrs[] = {
165 &dev_attr_energy_perf_bias.attr,
166 NULL
167};
168
169static const struct attribute_group intel_epb_attr_group = {
170 .name = power_group_name,
171 .attrs = intel_epb_attrs
172};
173
174static int intel_epb_online(unsigned int cpu)
175{
176 struct device *cpu_dev = get_cpu_device(cpu);
177
178 intel_epb_restore();
179 if (!cpuhp_tasks_frozen)
180 sysfs_merge_group(&cpu_dev->kobj, &intel_epb_attr_group);
181
182 return 0;
183}
184
185static int intel_epb_offline(unsigned int cpu)
186{
187 struct device *cpu_dev = get_cpu_device(cpu);
188
189 if (!cpuhp_tasks_frozen)
190 sysfs_unmerge_group(&cpu_dev->kobj, &intel_epb_attr_group);
191
192 intel_epb_save();
193 return 0;
194}
195
196static __init int intel_epb_init(void)
197{
198 int ret;
199
200 if (!boot_cpu_has(X86_FEATURE_EPB))
201 return -ENODEV;
202
203 ret = cpuhp_setup_state(CPUHP_AP_X86_INTEL_EPB_ONLINE,
204 "x86/intel/epb:online", intel_epb_online,
205 intel_epb_offline);
206 if (ret < 0)
207 goto err_out_online;
208
209 register_syscore_ops(&intel_epb_syscore_ops);
210 return 0;
211
212err_out_online:
213 cpuhp_remove_state(CPUHP_AP_X86_INTEL_EPB_ONLINE);
214 return ret;
215}
216subsys_initcall(intel_epb_init);
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index aab0c82e0a0d..15b5e98a86f9 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -185,8 +185,7 @@ static void __init cyc2ns_init_boot_cpu(void)
185/* 185/*
186 * Secondary CPUs do not run through tsc_init(), so set up 186 * Secondary CPUs do not run through tsc_init(), so set up
187 * all the scale factors for all CPUs, assuming the same 187 * all the scale factors for all CPUs, assuming the same
188 * speed as the bootup CPU. (cpufreq notifiers will fix this 188 * speed as the bootup CPU.
189 * up if their speed diverges)
190 */ 189 */
191static void __init cyc2ns_init_secondary_cpus(void) 190static void __init cyc2ns_init_secondary_cpus(void)
192{ 191{
@@ -940,12 +939,12 @@ void tsc_restore_sched_clock_state(void)
940} 939}
941 940
942#ifdef CONFIG_CPU_FREQ 941#ifdef CONFIG_CPU_FREQ
943/* Frequency scaling support. Adjust the TSC based timer when the cpu frequency 942/*
943 * Frequency scaling support. Adjust the TSC based timer when the CPU frequency
944 * changes. 944 * changes.
945 * 945 *
946 * RED-PEN: On SMP we assume all CPUs run with the same frequency. It's 946 * NOTE: On SMP the situation is not fixable in general, so simply mark the TSC
947 * not that important because current Opteron setups do not support 947 * as unstable and give up in those cases.
948 * scaling on SMP anyroads.
949 * 948 *
950 * Should fix up last_tsc too. Currently gettimeofday in the 949 * Should fix up last_tsc too. Currently gettimeofday in the
951 * first tick after the change will be slightly wrong. 950 * first tick after the change will be slightly wrong.
@@ -959,22 +958,22 @@ static int time_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
959 void *data) 958 void *data)
960{ 959{
961 struct cpufreq_freqs *freq = data; 960 struct cpufreq_freqs *freq = data;
962 unsigned long *lpj;
963 961
964 lpj = &boot_cpu_data.loops_per_jiffy; 962 if (num_online_cpus() > 1) {
965#ifdef CONFIG_SMP 963 mark_tsc_unstable("cpufreq changes on SMP");
966 if (!(freq->flags & CPUFREQ_CONST_LOOPS)) 964 return 0;
967 lpj = &cpu_data(freq->cpu).loops_per_jiffy; 965 }
968#endif
969 966
970 if (!ref_freq) { 967 if (!ref_freq) {
971 ref_freq = freq->old; 968 ref_freq = freq->old;
972 loops_per_jiffy_ref = *lpj; 969 loops_per_jiffy_ref = boot_cpu_data.loops_per_jiffy;
973 tsc_khz_ref = tsc_khz; 970 tsc_khz_ref = tsc_khz;
974 } 971 }
972
975 if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) || 973 if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) ||
976 (val == CPUFREQ_POSTCHANGE && freq->old > freq->new)) { 974 (val == CPUFREQ_POSTCHANGE && freq->old > freq->new)) {
977 *lpj = cpufreq_scale(loops_per_jiffy_ref, ref_freq, freq->new); 975 boot_cpu_data.loops_per_jiffy =
976 cpufreq_scale(loops_per_jiffy_ref, ref_freq, freq->new);
978 977
979 tsc_khz = cpufreq_scale(tsc_khz_ref, ref_freq, freq->new); 978 tsc_khz = cpufreq_scale(tsc_khz_ref, ref_freq, freq->new);
980 if (!(freq->flags & CPUFREQ_CONST_LOOPS)) 979 if (!(freq->flags & CPUFREQ_CONST_LOOPS))
diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c
index a303fd0e108c..c73d3a62799a 100644
--- a/drivers/acpi/processor_perflib.c
+++ b/drivers/acpi/processor_perflib.c
@@ -181,7 +181,7 @@ void acpi_processor_ppc_has_changed(struct acpi_processor *pr, int event_flag)
181 acpi_processor_ppc_ost(pr->handle, 0); 181 acpi_processor_ppc_ost(pr->handle, 0);
182 } 182 }
183 if (ret >= 0) 183 if (ret >= 0)
184 cpufreq_update_policy(pr->id); 184 cpufreq_update_limits(pr->id);
185} 185}
186 186
187int acpi_processor_get_bios_limit(int cpu, unsigned int *limit) 187int acpi_processor_get_bios_limit(int cpu, unsigned int *limit)
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index 96a6dc9d305c..3d899e8abd58 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -22,6 +22,7 @@
22#include <linux/sched.h> 22#include <linux/sched.h>
23#include <linux/suspend.h> 23#include <linux/suspend.h>
24#include <linux/export.h> 24#include <linux/export.h>
25#include <linux/cpu.h>
25 26
26#include "power.h" 27#include "power.h"
27 28
@@ -128,6 +129,7 @@ static const struct genpd_lock_ops genpd_spin_ops = {
128#define genpd_is_irq_safe(genpd) (genpd->flags & GENPD_FLAG_IRQ_SAFE) 129#define genpd_is_irq_safe(genpd) (genpd->flags & GENPD_FLAG_IRQ_SAFE)
129#define genpd_is_always_on(genpd) (genpd->flags & GENPD_FLAG_ALWAYS_ON) 130#define genpd_is_always_on(genpd) (genpd->flags & GENPD_FLAG_ALWAYS_ON)
130#define genpd_is_active_wakeup(genpd) (genpd->flags & GENPD_FLAG_ACTIVE_WAKEUP) 131#define genpd_is_active_wakeup(genpd) (genpd->flags & GENPD_FLAG_ACTIVE_WAKEUP)
132#define genpd_is_cpu_domain(genpd) (genpd->flags & GENPD_FLAG_CPU_DOMAIN)
131 133
132static inline bool irq_safe_dev_in_no_sleep_domain(struct device *dev, 134static inline bool irq_safe_dev_in_no_sleep_domain(struct device *dev,
133 const struct generic_pm_domain *genpd) 135 const struct generic_pm_domain *genpd)
@@ -391,11 +393,9 @@ int dev_pm_genpd_set_performance_state(struct device *dev, unsigned int state)
391 if (unlikely(!genpd->set_performance_state)) 393 if (unlikely(!genpd->set_performance_state))
392 return -EINVAL; 394 return -EINVAL;
393 395
394 if (unlikely(!dev->power.subsys_data || 396 if (WARN_ON(!dev->power.subsys_data ||
395 !dev->power.subsys_data->domain_data)) { 397 !dev->power.subsys_data->domain_data))
396 WARN_ON(1);
397 return -EINVAL; 398 return -EINVAL;
398 }
399 399
400 genpd_lock(genpd); 400 genpd_lock(genpd);
401 401
@@ -1396,8 +1396,7 @@ EXPORT_SYMBOL_GPL(pm_genpd_syscore_poweron);
1396 1396
1397#endif /* CONFIG_PM_SLEEP */ 1397#endif /* CONFIG_PM_SLEEP */
1398 1398
1399static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev, 1399static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev)
1400 struct gpd_timing_data *td)
1401{ 1400{
1402 struct generic_pm_domain_data *gpd_data; 1401 struct generic_pm_domain_data *gpd_data;
1403 int ret; 1402 int ret;
@@ -1412,9 +1411,6 @@ static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev,
1412 goto err_put; 1411 goto err_put;
1413 } 1412 }
1414 1413
1415 if (td)
1416 gpd_data->td = *td;
1417
1418 gpd_data->base.dev = dev; 1414 gpd_data->base.dev = dev;
1419 gpd_data->td.constraint_changed = true; 1415 gpd_data->td.constraint_changed = true;
1420 gpd_data->td.effective_constraint_ns = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT_NS; 1416 gpd_data->td.effective_constraint_ns = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT_NS;
@@ -1454,8 +1450,57 @@ static void genpd_free_dev_data(struct device *dev,
1454 dev_pm_put_subsys_data(dev); 1450 dev_pm_put_subsys_data(dev);
1455} 1451}
1456 1452
1453static void genpd_update_cpumask(struct generic_pm_domain *genpd,
1454 int cpu, bool set, unsigned int depth)
1455{
1456 struct gpd_link *link;
1457
1458 if (!genpd_is_cpu_domain(genpd))
1459 return;
1460
1461 list_for_each_entry(link, &genpd->slave_links, slave_node) {
1462 struct generic_pm_domain *master = link->master;
1463
1464 genpd_lock_nested(master, depth + 1);
1465 genpd_update_cpumask(master, cpu, set, depth + 1);
1466 genpd_unlock(master);
1467 }
1468
1469 if (set)
1470 cpumask_set_cpu(cpu, genpd->cpus);
1471 else
1472 cpumask_clear_cpu(cpu, genpd->cpus);
1473}
1474
1475static void genpd_set_cpumask(struct generic_pm_domain *genpd, int cpu)
1476{
1477 if (cpu >= 0)
1478 genpd_update_cpumask(genpd, cpu, true, 0);
1479}
1480
1481static void genpd_clear_cpumask(struct generic_pm_domain *genpd, int cpu)
1482{
1483 if (cpu >= 0)
1484 genpd_update_cpumask(genpd, cpu, false, 0);
1485}
1486
1487static int genpd_get_cpu(struct generic_pm_domain *genpd, struct device *dev)
1488{
1489 int cpu;
1490
1491 if (!genpd_is_cpu_domain(genpd))
1492 return -1;
1493
1494 for_each_possible_cpu(cpu) {
1495 if (get_cpu_device(cpu) == dev)
1496 return cpu;
1497 }
1498
1499 return -1;
1500}
1501
1457static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev, 1502static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
1458 struct gpd_timing_data *td) 1503 struct device *base_dev)
1459{ 1504{
1460 struct generic_pm_domain_data *gpd_data; 1505 struct generic_pm_domain_data *gpd_data;
1461 int ret; 1506 int ret;
@@ -1465,16 +1510,19 @@ static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
1465 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev)) 1510 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev))
1466 return -EINVAL; 1511 return -EINVAL;
1467 1512
1468 gpd_data = genpd_alloc_dev_data(dev, td); 1513 gpd_data = genpd_alloc_dev_data(dev);
1469 if (IS_ERR(gpd_data)) 1514 if (IS_ERR(gpd_data))
1470 return PTR_ERR(gpd_data); 1515 return PTR_ERR(gpd_data);
1471 1516
1517 gpd_data->cpu = genpd_get_cpu(genpd, base_dev);
1518
1472 ret = genpd->attach_dev ? genpd->attach_dev(genpd, dev) : 0; 1519 ret = genpd->attach_dev ? genpd->attach_dev(genpd, dev) : 0;
1473 if (ret) 1520 if (ret)
1474 goto out; 1521 goto out;
1475 1522
1476 genpd_lock(genpd); 1523 genpd_lock(genpd);
1477 1524
1525 genpd_set_cpumask(genpd, gpd_data->cpu);
1478 dev_pm_domain_set(dev, &genpd->domain); 1526 dev_pm_domain_set(dev, &genpd->domain);
1479 1527
1480 genpd->device_count++; 1528 genpd->device_count++;
@@ -1502,7 +1550,7 @@ int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev)
1502 int ret; 1550 int ret;
1503 1551
1504 mutex_lock(&gpd_list_lock); 1552 mutex_lock(&gpd_list_lock);
1505 ret = genpd_add_device(genpd, dev, NULL); 1553 ret = genpd_add_device(genpd, dev, dev);
1506 mutex_unlock(&gpd_list_lock); 1554 mutex_unlock(&gpd_list_lock);
1507 1555
1508 return ret; 1556 return ret;
@@ -1532,6 +1580,7 @@ static int genpd_remove_device(struct generic_pm_domain *genpd,
1532 genpd->device_count--; 1580 genpd->device_count--;
1533 genpd->max_off_time_changed = true; 1581 genpd->max_off_time_changed = true;
1534 1582
1583 genpd_clear_cpumask(genpd, gpd_data->cpu);
1535 dev_pm_domain_set(dev, NULL); 1584 dev_pm_domain_set(dev, NULL);
1536 1585
1537 list_del_init(&pdd->list_node); 1586 list_del_init(&pdd->list_node);
@@ -1686,6 +1735,12 @@ out:
1686} 1735}
1687EXPORT_SYMBOL_GPL(pm_genpd_remove_subdomain); 1736EXPORT_SYMBOL_GPL(pm_genpd_remove_subdomain);
1688 1737
1738static void genpd_free_default_power_state(struct genpd_power_state *states,
1739 unsigned int state_count)
1740{
1741 kfree(states);
1742}
1743
1689static int genpd_set_default_power_state(struct generic_pm_domain *genpd) 1744static int genpd_set_default_power_state(struct generic_pm_domain *genpd)
1690{ 1745{
1691 struct genpd_power_state *state; 1746 struct genpd_power_state *state;
@@ -1696,7 +1751,7 @@ static int genpd_set_default_power_state(struct generic_pm_domain *genpd)
1696 1751
1697 genpd->states = state; 1752 genpd->states = state;
1698 genpd->state_count = 1; 1753 genpd->state_count = 1;
1699 genpd->free = state; 1754 genpd->free_states = genpd_free_default_power_state;
1700 1755
1701 return 0; 1756 return 0;
1702} 1757}
@@ -1762,11 +1817,18 @@ int pm_genpd_init(struct generic_pm_domain *genpd,
1762 if (genpd_is_always_on(genpd) && !genpd_status_on(genpd)) 1817 if (genpd_is_always_on(genpd) && !genpd_status_on(genpd))
1763 return -EINVAL; 1818 return -EINVAL;
1764 1819
1820 if (genpd_is_cpu_domain(genpd) &&
1821 !zalloc_cpumask_var(&genpd->cpus, GFP_KERNEL))
1822 return -ENOMEM;
1823
1765 /* Use only one "off" state if there were no states declared */ 1824 /* Use only one "off" state if there were no states declared */
1766 if (genpd->state_count == 0) { 1825 if (genpd->state_count == 0) {
1767 ret = genpd_set_default_power_state(genpd); 1826 ret = genpd_set_default_power_state(genpd);
1768 if (ret) 1827 if (ret) {
1828 if (genpd_is_cpu_domain(genpd))
1829 free_cpumask_var(genpd->cpus);
1769 return ret; 1830 return ret;
1831 }
1770 } else if (!gov && genpd->state_count > 1) { 1832 } else if (!gov && genpd->state_count > 1) {
1771 pr_warn("%s: no governor for states\n", genpd->name); 1833 pr_warn("%s: no governor for states\n", genpd->name);
1772 } 1834 }
@@ -1812,7 +1874,11 @@ static int genpd_remove(struct generic_pm_domain *genpd)
1812 list_del(&genpd->gpd_list_node); 1874 list_del(&genpd->gpd_list_node);
1813 genpd_unlock(genpd); 1875 genpd_unlock(genpd);
1814 cancel_work_sync(&genpd->power_off_work); 1876 cancel_work_sync(&genpd->power_off_work);
1815 kfree(genpd->free); 1877 if (genpd_is_cpu_domain(genpd))
1878 free_cpumask_var(genpd->cpus);
1879 if (genpd->free_states)
1880 genpd->free_states(genpd->states, genpd->state_count);
1881
1816 pr_debug("%s: removed %s\n", __func__, genpd->name); 1882 pr_debug("%s: removed %s\n", __func__, genpd->name);
1817 1883
1818 return 0; 1884 return 0;
@@ -2190,7 +2256,7 @@ int of_genpd_add_device(struct of_phandle_args *genpdspec, struct device *dev)
2190 goto out; 2256 goto out;
2191 } 2257 }
2192 2258
2193 ret = genpd_add_device(genpd, dev, NULL); 2259 ret = genpd_add_device(genpd, dev, dev);
2194 2260
2195out: 2261out:
2196 mutex_unlock(&gpd_list_lock); 2262 mutex_unlock(&gpd_list_lock);
@@ -2274,6 +2340,7 @@ EXPORT_SYMBOL_GPL(of_genpd_remove_last);
2274 2340
2275static void genpd_release_dev(struct device *dev) 2341static void genpd_release_dev(struct device *dev)
2276{ 2342{
2343 of_node_put(dev->of_node);
2277 kfree(dev); 2344 kfree(dev);
2278} 2345}
2279 2346
@@ -2335,14 +2402,14 @@ static void genpd_dev_pm_sync(struct device *dev)
2335 genpd_queue_power_off_work(pd); 2402 genpd_queue_power_off_work(pd);
2336} 2403}
2337 2404
2338static int __genpd_dev_pm_attach(struct device *dev, struct device_node *np, 2405static int __genpd_dev_pm_attach(struct device *dev, struct device *base_dev,
2339 unsigned int index, bool power_on) 2406 unsigned int index, bool power_on)
2340{ 2407{
2341 struct of_phandle_args pd_args; 2408 struct of_phandle_args pd_args;
2342 struct generic_pm_domain *pd; 2409 struct generic_pm_domain *pd;
2343 int ret; 2410 int ret;
2344 2411
2345 ret = of_parse_phandle_with_args(np, "power-domains", 2412 ret = of_parse_phandle_with_args(dev->of_node, "power-domains",
2346 "#power-domain-cells", index, &pd_args); 2413 "#power-domain-cells", index, &pd_args);
2347 if (ret < 0) 2414 if (ret < 0)
2348 return ret; 2415 return ret;
@@ -2354,12 +2421,12 @@ static int __genpd_dev_pm_attach(struct device *dev, struct device_node *np,
2354 mutex_unlock(&gpd_list_lock); 2421 mutex_unlock(&gpd_list_lock);
2355 dev_dbg(dev, "%s() failed to find PM domain: %ld\n", 2422 dev_dbg(dev, "%s() failed to find PM domain: %ld\n",
2356 __func__, PTR_ERR(pd)); 2423 __func__, PTR_ERR(pd));
2357 return driver_deferred_probe_check_state(dev); 2424 return driver_deferred_probe_check_state(base_dev);
2358 } 2425 }
2359 2426
2360 dev_dbg(dev, "adding to PM domain %s\n", pd->name); 2427 dev_dbg(dev, "adding to PM domain %s\n", pd->name);
2361 2428
2362 ret = genpd_add_device(pd, dev, NULL); 2429 ret = genpd_add_device(pd, dev, base_dev);
2363 mutex_unlock(&gpd_list_lock); 2430 mutex_unlock(&gpd_list_lock);
2364 2431
2365 if (ret < 0) { 2432 if (ret < 0) {
@@ -2410,7 +2477,7 @@ int genpd_dev_pm_attach(struct device *dev)
2410 "#power-domain-cells") != 1) 2477 "#power-domain-cells") != 1)
2411 return 0; 2478 return 0;
2412 2479
2413 return __genpd_dev_pm_attach(dev, dev->of_node, 0, true); 2480 return __genpd_dev_pm_attach(dev, dev, 0, true);
2414} 2481}
2415EXPORT_SYMBOL_GPL(genpd_dev_pm_attach); 2482EXPORT_SYMBOL_GPL(genpd_dev_pm_attach);
2416 2483
@@ -2440,10 +2507,10 @@ struct device *genpd_dev_pm_attach_by_id(struct device *dev,
2440 if (!dev->of_node) 2507 if (!dev->of_node)
2441 return NULL; 2508 return NULL;
2442 2509
2443 /* Deal only with devices using multiple PM domains. */ 2510 /* Verify that the index is within a valid range. */
2444 num_domains = of_count_phandle_with_args(dev->of_node, "power-domains", 2511 num_domains = of_count_phandle_with_args(dev->of_node, "power-domains",
2445 "#power-domain-cells"); 2512 "#power-domain-cells");
2446 if (num_domains < 2 || index >= num_domains) 2513 if (index >= num_domains)
2447 return NULL; 2514 return NULL;
2448 2515
2449 /* Allocate and register device on the genpd bus. */ 2516 /* Allocate and register device on the genpd bus. */
@@ -2454,15 +2521,16 @@ struct device *genpd_dev_pm_attach_by_id(struct device *dev,
2454 dev_set_name(virt_dev, "genpd:%u:%s", index, dev_name(dev)); 2521 dev_set_name(virt_dev, "genpd:%u:%s", index, dev_name(dev));
2455 virt_dev->bus = &genpd_bus_type; 2522 virt_dev->bus = &genpd_bus_type;
2456 virt_dev->release = genpd_release_dev; 2523 virt_dev->release = genpd_release_dev;
2524 virt_dev->of_node = of_node_get(dev->of_node);
2457 2525
2458 ret = device_register(virt_dev); 2526 ret = device_register(virt_dev);
2459 if (ret) { 2527 if (ret) {
2460 kfree(virt_dev); 2528 put_device(virt_dev);
2461 return ERR_PTR(ret); 2529 return ERR_PTR(ret);
2462 } 2530 }
2463 2531
2464 /* Try to attach the device to the PM domain at the specified index. */ 2532 /* Try to attach the device to the PM domain at the specified index. */
2465 ret = __genpd_dev_pm_attach(virt_dev, dev->of_node, index, false); 2533 ret = __genpd_dev_pm_attach(virt_dev, dev, index, false);
2466 if (ret < 1) { 2534 if (ret < 1) {
2467 device_unregister(virt_dev); 2535 device_unregister(virt_dev);
2468 return ret ? ERR_PTR(ret) : NULL; 2536 return ret ? ERR_PTR(ret) : NULL;
diff --git a/drivers/base/power/domain_governor.c b/drivers/base/power/domain_governor.c
index 4d07e38a8247..7912bc957244 100644
--- a/drivers/base/power/domain_governor.c
+++ b/drivers/base/power/domain_governor.c
@@ -10,6 +10,9 @@
10#include <linux/pm_domain.h> 10#include <linux/pm_domain.h>
11#include <linux/pm_qos.h> 11#include <linux/pm_qos.h>
12#include <linux/hrtimer.h> 12#include <linux/hrtimer.h>
13#include <linux/cpuidle.h>
14#include <linux/cpumask.h>
15#include <linux/ktime.h>
13 16
14static int dev_update_qos_constraint(struct device *dev, void *data) 17static int dev_update_qos_constraint(struct device *dev, void *data)
15{ 18{
@@ -210,8 +213,10 @@ static bool default_power_down_ok(struct dev_pm_domain *pd)
210 struct generic_pm_domain *genpd = pd_to_genpd(pd); 213 struct generic_pm_domain *genpd = pd_to_genpd(pd);
211 struct gpd_link *link; 214 struct gpd_link *link;
212 215
213 if (!genpd->max_off_time_changed) 216 if (!genpd->max_off_time_changed) {
217 genpd->state_idx = genpd->cached_power_down_state_idx;
214 return genpd->cached_power_down_ok; 218 return genpd->cached_power_down_ok;
219 }
215 220
216 /* 221 /*
217 * We have to invalidate the cached results for the masters, so 222 * We have to invalidate the cached results for the masters, so
@@ -236,6 +241,7 @@ static bool default_power_down_ok(struct dev_pm_domain *pd)
236 genpd->state_idx--; 241 genpd->state_idx--;
237 } 242 }
238 243
244 genpd->cached_power_down_state_idx = genpd->state_idx;
239 return genpd->cached_power_down_ok; 245 return genpd->cached_power_down_ok;
240} 246}
241 247
@@ -244,6 +250,65 @@ static bool always_on_power_down_ok(struct dev_pm_domain *domain)
244 return false; 250 return false;
245} 251}
246 252
253#ifdef CONFIG_CPU_IDLE
254static bool cpu_power_down_ok(struct dev_pm_domain *pd)
255{
256 struct generic_pm_domain *genpd = pd_to_genpd(pd);
257 struct cpuidle_device *dev;
258 ktime_t domain_wakeup, next_hrtimer;
259 s64 idle_duration_ns;
260 int cpu, i;
261
262 /* Validate dev PM QoS constraints. */
263 if (!default_power_down_ok(pd))
264 return false;
265
266 if (!(genpd->flags & GENPD_FLAG_CPU_DOMAIN))
267 return true;
268
269 /*
270 * Find the next wakeup for any of the online CPUs within the PM domain
271 * and its subdomains. Note, we only need the genpd->cpus, as it already
272 * contains a mask of all CPUs from subdomains.
273 */
274 domain_wakeup = ktime_set(KTIME_SEC_MAX, 0);
275 for_each_cpu_and(cpu, genpd->cpus, cpu_online_mask) {
276 dev = per_cpu(cpuidle_devices, cpu);
277 if (dev) {
278 next_hrtimer = READ_ONCE(dev->next_hrtimer);
279 if (ktime_before(next_hrtimer, domain_wakeup))
280 domain_wakeup = next_hrtimer;
281 }
282 }
283
284 /* The minimum idle duration is from now - until the next wakeup. */
285 idle_duration_ns = ktime_to_ns(ktime_sub(domain_wakeup, ktime_get()));
286 if (idle_duration_ns <= 0)
287 return false;
288
289 /*
290 * Find the deepest idle state that has its residency value satisfied
291 * and by also taking into account the power off latency for the state.
292 * Start at the state picked by the dev PM QoS constraint validation.
293 */
294 i = genpd->state_idx;
295 do {
296 if (idle_duration_ns >= (genpd->states[i].residency_ns +
297 genpd->states[i].power_off_latency_ns)) {
298 genpd->state_idx = i;
299 return true;
300 }
301 } while (--i >= 0);
302
303 return false;
304}
305
306struct dev_power_governor pm_domain_cpu_gov = {
307 .suspend_ok = default_suspend_ok,
308 .power_down_ok = cpu_power_down_ok,
309};
310#endif
311
247struct dev_power_governor simple_qos_governor = { 312struct dev_power_governor simple_qos_governor = {
248 .suspend_ok = default_suspend_ok, 313 .suspend_ok = default_suspend_ok,
249 .power_down_ok = default_power_down_ok, 314 .power_down_ok = default_power_down_ok,
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index f80d298de3fa..43e863cc0c1b 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -478,7 +478,7 @@ struct dpm_watchdog {
478 478
479/** 479/**
480 * dpm_watchdog_handler - Driver suspend / resume watchdog handler. 480 * dpm_watchdog_handler - Driver suspend / resume watchdog handler.
481 * @data: Watchdog object address. 481 * @t: The timer that PM watchdog depends on.
482 * 482 *
483 * Called when a driver has timed out suspending or resuming. 483 * Called when a driver has timed out suspending or resuming.
484 * There's not much we can do here to recover so panic() to 484 * There's not much we can do here to recover so panic() to
@@ -706,6 +706,19 @@ static bool is_async(struct device *dev)
706 && !pm_trace_is_enabled(); 706 && !pm_trace_is_enabled();
707} 707}
708 708
709static bool dpm_async_fn(struct device *dev, async_func_t func)
710{
711 reinit_completion(&dev->power.completion);
712
713 if (is_async(dev)) {
714 get_device(dev);
715 async_schedule(func, dev);
716 return true;
717 }
718
719 return false;
720}
721
709static void async_resume_noirq(void *data, async_cookie_t cookie) 722static void async_resume_noirq(void *data, async_cookie_t cookie)
710{ 723{
711 struct device *dev = (struct device *)data; 724 struct device *dev = (struct device *)data;
@@ -732,13 +745,8 @@ void dpm_noirq_resume_devices(pm_message_t state)
732 * in case the starting of async threads is 745 * in case the starting of async threads is
733 * delayed by non-async resuming devices. 746 * delayed by non-async resuming devices.
734 */ 747 */
735 list_for_each_entry(dev, &dpm_noirq_list, power.entry) { 748 list_for_each_entry(dev, &dpm_noirq_list, power.entry)
736 reinit_completion(&dev->power.completion); 749 dpm_async_fn(dev, async_resume_noirq);
737 if (is_async(dev)) {
738 get_device(dev);
739 async_schedule_dev(async_resume_noirq, dev);
740 }
741 }
742 750
743 while (!list_empty(&dpm_noirq_list)) { 751 while (!list_empty(&dpm_noirq_list)) {
744 dev = to_device(dpm_noirq_list.next); 752 dev = to_device(dpm_noirq_list.next);
@@ -889,13 +897,8 @@ void dpm_resume_early(pm_message_t state)
889 * in case the starting of async threads is 897 * in case the starting of async threads is
890 * delayed by non-async resuming devices. 898 * delayed by non-async resuming devices.
891 */ 899 */
892 list_for_each_entry(dev, &dpm_late_early_list, power.entry) { 900 list_for_each_entry(dev, &dpm_late_early_list, power.entry)
893 reinit_completion(&dev->power.completion); 901 dpm_async_fn(dev, async_resume_early);
894 if (is_async(dev)) {
895 get_device(dev);
896 async_schedule_dev(async_resume_early, dev);
897 }
898 }
899 902
900 while (!list_empty(&dpm_late_early_list)) { 903 while (!list_empty(&dpm_late_early_list)) {
901 dev = to_device(dpm_late_early_list.next); 904 dev = to_device(dpm_late_early_list.next);
@@ -1053,13 +1056,8 @@ void dpm_resume(pm_message_t state)
1053 pm_transition = state; 1056 pm_transition = state;
1054 async_error = 0; 1057 async_error = 0;
1055 1058
1056 list_for_each_entry(dev, &dpm_suspended_list, power.entry) { 1059 list_for_each_entry(dev, &dpm_suspended_list, power.entry)
1057 reinit_completion(&dev->power.completion); 1060 dpm_async_fn(dev, async_resume);
1058 if (is_async(dev)) {
1059 get_device(dev);
1060 async_schedule_dev(async_resume, dev);
1061 }
1062 }
1063 1061
1064 while (!list_empty(&dpm_suspended_list)) { 1062 while (!list_empty(&dpm_suspended_list)) {
1065 dev = to_device(dpm_suspended_list.next); 1063 dev = to_device(dpm_suspended_list.next);
@@ -1373,13 +1371,9 @@ static void async_suspend_noirq(void *data, async_cookie_t cookie)
1373 1371
1374static int device_suspend_noirq(struct device *dev) 1372static int device_suspend_noirq(struct device *dev)
1375{ 1373{
1376 reinit_completion(&dev->power.completion); 1374 if (dpm_async_fn(dev, async_suspend_noirq))
1377
1378 if (is_async(dev)) {
1379 get_device(dev);
1380 async_schedule_dev(async_suspend_noirq, dev);
1381 return 0; 1375 return 0;
1382 } 1376
1383 return __device_suspend_noirq(dev, pm_transition, false); 1377 return __device_suspend_noirq(dev, pm_transition, false);
1384} 1378}
1385 1379
@@ -1576,13 +1570,8 @@ static void async_suspend_late(void *data, async_cookie_t cookie)
1576 1570
1577static int device_suspend_late(struct device *dev) 1571static int device_suspend_late(struct device *dev)
1578{ 1572{
1579 reinit_completion(&dev->power.completion); 1573 if (dpm_async_fn(dev, async_suspend_late))
1580
1581 if (is_async(dev)) {
1582 get_device(dev);
1583 async_schedule_dev(async_suspend_late, dev);
1584 return 0; 1574 return 0;
1585 }
1586 1575
1587 return __device_suspend_late(dev, pm_transition, false); 1576 return __device_suspend_late(dev, pm_transition, false);
1588} 1577}
@@ -1747,6 +1736,10 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
1747 if (dev->power.syscore) 1736 if (dev->power.syscore)
1748 goto Complete; 1737 goto Complete;
1749 1738
1739 /* Avoid direct_complete to let wakeup_path propagate. */
1740 if (device_may_wakeup(dev) || dev->power.wakeup_path)
1741 dev->power.direct_complete = false;
1742
1750 if (dev->power.direct_complete) { 1743 if (dev->power.direct_complete) {
1751 if (pm_runtime_status_suspended(dev)) { 1744 if (pm_runtime_status_suspended(dev)) {
1752 pm_runtime_disable(dev); 1745 pm_runtime_disable(dev);
@@ -1842,13 +1835,8 @@ static void async_suspend(void *data, async_cookie_t cookie)
1842 1835
1843static int device_suspend(struct device *dev) 1836static int device_suspend(struct device *dev)
1844{ 1837{
1845 reinit_completion(&dev->power.completion); 1838 if (dpm_async_fn(dev, async_suspend))
1846
1847 if (is_async(dev)) {
1848 get_device(dev);
1849 async_schedule_dev(async_suspend, dev);
1850 return 0; 1839 return 0;
1851 }
1852 1840
1853 return __device_suspend(dev, pm_transition, false); 1841 return __device_suspend(dev, pm_transition, false);
1854} 1842}
@@ -2069,8 +2057,8 @@ EXPORT_SYMBOL_GPL(__suspend_report_result);
2069 2057
2070/** 2058/**
2071 * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete. 2059 * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
2072 * @dev: Device to wait for.
2073 * @subordinate: Device that needs to wait for @dev. 2060 * @subordinate: Device that needs to wait for @dev.
2061 * @dev: Device to wait for.
2074 */ 2062 */
2075int device_pm_wait_for_dev(struct device *subordinate, struct device *dev) 2063int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
2076{ 2064{
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
index bb1ae175fae1..23c243a4c675 100644
--- a/drivers/base/power/wakeup.c
+++ b/drivers/base/power/wakeup.c
@@ -804,7 +804,7 @@ void pm_print_active_wakeup_sources(void)
804 srcuidx = srcu_read_lock(&wakeup_srcu); 804 srcuidx = srcu_read_lock(&wakeup_srcu);
805 list_for_each_entry_rcu(ws, &wakeup_sources, entry) { 805 list_for_each_entry_rcu(ws, &wakeup_sources, entry) {
806 if (ws->active) { 806 if (ws->active) {
807 pr_debug("active wakeup source: %s\n", ws->name); 807 pm_pr_dbg("active wakeup source: %s\n", ws->name);
808 active = 1; 808 active = 1;
809 } else if (!active && 809 } else if (!active &&
810 (!last_activity_ws || 810 (!last_activity_ws ||
@@ -815,7 +815,7 @@ void pm_print_active_wakeup_sources(void)
815 } 815 }
816 816
817 if (!active && last_activity_ws) 817 if (!active && last_activity_ws)
818 pr_debug("last active wakeup source: %s\n", 818 pm_pr_dbg("last active wakeup source: %s\n",
819 last_activity_ws->name); 819 last_activity_ws->name);
820 srcu_read_unlock(&wakeup_srcu, srcuidx); 820 srcu_read_unlock(&wakeup_srcu, srcuidx);
821} 821}
@@ -845,7 +845,7 @@ bool pm_wakeup_pending(void)
845 raw_spin_unlock_irqrestore(&events_lock, flags); 845 raw_spin_unlock_irqrestore(&events_lock, flags);
846 846
847 if (ret) { 847 if (ret) {
848 pr_debug("Wakeup pending, aborting suspend\n"); 848 pm_pr_dbg("Wakeup pending, aborting suspend\n");
849 pm_print_active_wakeup_sources(); 849 pm_print_active_wakeup_sources();
850 } 850 }
851 851
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
index b22e6bba71f1..4d2b33a30292 100644
--- a/drivers/cpufreq/Kconfig
+++ b/drivers/cpufreq/Kconfig
@@ -26,10 +26,6 @@ config CPU_FREQ_GOV_COMMON
26 select IRQ_WORK 26 select IRQ_WORK
27 bool 27 bool
28 28
29config CPU_FREQ_BOOST_SW
30 bool
31 depends on THERMAL
32
33config CPU_FREQ_STAT 29config CPU_FREQ_STAT
34 bool "CPU frequency transition statistics" 30 bool "CPU frequency transition statistics"
35 help 31 help
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
index c72258a44ba4..73bb2aafb1a8 100644
--- a/drivers/cpufreq/acpi-cpufreq.c
+++ b/drivers/cpufreq/acpi-cpufreq.c
@@ -366,7 +366,7 @@ static u32 get_cur_val(const struct cpumask *mask, struct acpi_cpufreq_data *dat
366 366
367 val = drv_read(data, mask); 367 val = drv_read(data, mask);
368 368
369 pr_debug("get_cur_val = %u\n", val); 369 pr_debug("%s = %u\n", __func__, val);
370 370
371 return val; 371 return val;
372} 372}
@@ -378,7 +378,7 @@ static unsigned int get_cur_freq_on_cpu(unsigned int cpu)
378 unsigned int freq; 378 unsigned int freq;
379 unsigned int cached_freq; 379 unsigned int cached_freq;
380 380
381 pr_debug("get_cur_freq_on_cpu (%d)\n", cpu); 381 pr_debug("%s (%d)\n", __func__, cpu);
382 382
383 policy = cpufreq_cpu_get_raw(cpu); 383 policy = cpufreq_cpu_get_raw(cpu);
384 if (unlikely(!policy)) 384 if (unlikely(!policy))
@@ -458,8 +458,7 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy,
458 if (acpi_pstate_strict) { 458 if (acpi_pstate_strict) {
459 if (!check_freqs(policy, mask, 459 if (!check_freqs(policy, mask,
460 policy->freq_table[index].frequency)) { 460 policy->freq_table[index].frequency)) {
461 pr_debug("acpi_cpufreq_target failed (%d)\n", 461 pr_debug("%s (%d)\n", __func__, policy->cpu);
462 policy->cpu);
463 result = -EAGAIN; 462 result = -EAGAIN;
464 } 463 }
465 } 464 }
@@ -573,7 +572,7 @@ static int cpufreq_boost_down_prep(unsigned int cpu)
573static int __init acpi_cpufreq_early_init(void) 572static int __init acpi_cpufreq_early_init(void)
574{ 573{
575 unsigned int i; 574 unsigned int i;
576 pr_debug("acpi_cpufreq_early_init\n"); 575 pr_debug("%s\n", __func__);
577 576
578 acpi_perf_data = alloc_percpu(struct acpi_processor_performance); 577 acpi_perf_data = alloc_percpu(struct acpi_processor_performance);
579 if (!acpi_perf_data) { 578 if (!acpi_perf_data) {
@@ -657,7 +656,7 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
657 static int blacklisted; 656 static int blacklisted;
658#endif 657#endif
659 658
660 pr_debug("acpi_cpufreq_cpu_init\n"); 659 pr_debug("%s\n", __func__);
661 660
662#ifdef CONFIG_SMP 661#ifdef CONFIG_SMP
663 if (blacklisted) 662 if (blacklisted)
@@ -856,7 +855,7 @@ static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy)
856{ 855{
857 struct acpi_cpufreq_data *data = policy->driver_data; 856 struct acpi_cpufreq_data *data = policy->driver_data;
858 857
859 pr_debug("acpi_cpufreq_cpu_exit\n"); 858 pr_debug("%s\n", __func__);
860 859
861 policy->fast_switch_possible = false; 860 policy->fast_switch_possible = false;
862 policy->driver_data = NULL; 861 policy->driver_data = NULL;
@@ -881,7 +880,7 @@ static int acpi_cpufreq_resume(struct cpufreq_policy *policy)
881{ 880{
882 struct acpi_cpufreq_data *data = policy->driver_data; 881 struct acpi_cpufreq_data *data = policy->driver_data;
883 882
884 pr_debug("acpi_cpufreq_resume\n"); 883 pr_debug("%s\n", __func__);
885 884
886 data->resume = 1; 885 data->resume = 1;
887 886
@@ -954,7 +953,7 @@ static int __init acpi_cpufreq_init(void)
954 if (cpufreq_get_current_driver()) 953 if (cpufreq_get_current_driver())
955 return -EEXIST; 954 return -EEXIST;
956 955
957 pr_debug("acpi_cpufreq_init\n"); 956 pr_debug("%s\n", __func__);
958 957
959 ret = acpi_cpufreq_early_init(); 958 ret = acpi_cpufreq_early_init();
960 if (ret) 959 if (ret)
@@ -991,7 +990,7 @@ static int __init acpi_cpufreq_init(void)
991 990
992static void __exit acpi_cpufreq_exit(void) 991static void __exit acpi_cpufreq_exit(void)
993{ 992{
994 pr_debug("acpi_cpufreq_exit\n"); 993 pr_debug("%s\n", __func__);
995 994
996 acpi_cpufreq_boost_exit(); 995 acpi_cpufreq_boost_exit();
997 996
diff --git a/drivers/cpufreq/amd_freq_sensitivity.c b/drivers/cpufreq/amd_freq_sensitivity.c
index 4ac7c3cf34be..6927a8c0e748 100644
--- a/drivers/cpufreq/amd_freq_sensitivity.c
+++ b/drivers/cpufreq/amd_freq_sensitivity.c
@@ -124,7 +124,7 @@ static int __init amd_freq_sensitivity_init(void)
124 PCI_DEVICE_ID_AMD_KERNCZ_SMBUS, NULL); 124 PCI_DEVICE_ID_AMD_KERNCZ_SMBUS, NULL);
125 125
126 if (!pcidev) { 126 if (!pcidev) {
127 if (!static_cpu_has(X86_FEATURE_PROC_FEEDBACK)) 127 if (!boot_cpu_has(X86_FEATURE_PROC_FEEDBACK))
128 return -ENODEV; 128 return -ENODEV;
129 } 129 }
130 130
diff --git a/drivers/cpufreq/armada-37xx-cpufreq.c b/drivers/cpufreq/armada-37xx-cpufreq.c
index 75491fc841a6..0df16eb1eb3c 100644
--- a/drivers/cpufreq/armada-37xx-cpufreq.c
+++ b/drivers/cpufreq/armada-37xx-cpufreq.c
@@ -359,11 +359,11 @@ static int __init armada37xx_cpufreq_driver_init(void)
359 struct armada_37xx_dvfs *dvfs; 359 struct armada_37xx_dvfs *dvfs;
360 struct platform_device *pdev; 360 struct platform_device *pdev;
361 unsigned long freq; 361 unsigned long freq;
362 unsigned int cur_frequency; 362 unsigned int cur_frequency, base_frequency;
363 struct regmap *nb_pm_base, *avs_base; 363 struct regmap *nb_pm_base, *avs_base;
364 struct device *cpu_dev; 364 struct device *cpu_dev;
365 int load_lvl, ret; 365 int load_lvl, ret;
366 struct clk *clk; 366 struct clk *clk, *parent;
367 367
368 nb_pm_base = 368 nb_pm_base =
369 syscon_regmap_lookup_by_compatible("marvell,armada-3700-nb-pm"); 369 syscon_regmap_lookup_by_compatible("marvell,armada-3700-nb-pm");
@@ -399,6 +399,22 @@ static int __init armada37xx_cpufreq_driver_init(void)
399 return PTR_ERR(clk); 399 return PTR_ERR(clk);
400 } 400 }
401 401
402 parent = clk_get_parent(clk);
403 if (IS_ERR(parent)) {
404 dev_err(cpu_dev, "Cannot get parent clock for CPU0\n");
405 clk_put(clk);
406 return PTR_ERR(parent);
407 }
408
409 /* Get parent CPU frequency */
410 base_frequency = clk_get_rate(parent);
411
412 if (!base_frequency) {
413 dev_err(cpu_dev, "Failed to get parent clock rate for CPU\n");
414 clk_put(clk);
415 return -EINVAL;
416 }
417
402 /* Get nominal (current) CPU frequency */ 418 /* Get nominal (current) CPU frequency */
403 cur_frequency = clk_get_rate(clk); 419 cur_frequency = clk_get_rate(clk);
404 if (!cur_frequency) { 420 if (!cur_frequency) {
@@ -431,7 +447,7 @@ static int __init armada37xx_cpufreq_driver_init(void)
431 for (load_lvl = ARMADA_37XX_DVFS_LOAD_0; load_lvl < LOAD_LEVEL_NR; 447 for (load_lvl = ARMADA_37XX_DVFS_LOAD_0; load_lvl < LOAD_LEVEL_NR;
432 load_lvl++) { 448 load_lvl++) {
433 unsigned long u_volt = avs_map[dvfs->avs[load_lvl]] * 1000; 449 unsigned long u_volt = avs_map[dvfs->avs[load_lvl]] * 1000;
434 freq = cur_frequency / dvfs->divider[load_lvl]; 450 freq = base_frequency / dvfs->divider[load_lvl];
435 ret = dev_pm_opp_add(cpu_dev, freq, u_volt); 451 ret = dev_pm_opp_add(cpu_dev, freq, u_volt);
436 if (ret) 452 if (ret)
437 goto remove_opp; 453 goto remove_opp;
diff --git a/drivers/cpufreq/armada-8k-cpufreq.c b/drivers/cpufreq/armada-8k-cpufreq.c
index b3f4bd647e9b..988ebc326bdb 100644
--- a/drivers/cpufreq/armada-8k-cpufreq.c
+++ b/drivers/cpufreq/armada-8k-cpufreq.c
@@ -132,6 +132,7 @@ static int __init armada_8k_cpufreq_init(void)
132 of_node_put(node); 132 of_node_put(node);
133 return -ENODEV; 133 return -ENODEV;
134 } 134 }
135 of_node_put(node);
135 136
136 nb_cpus = num_possible_cpus(); 137 nb_cpus = num_possible_cpus();
137 freq_tables = kcalloc(nb_cpus, sizeof(*freq_tables), GFP_KERNEL); 138 freq_tables = kcalloc(nb_cpus, sizeof(*freq_tables), GFP_KERNEL);
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index e10922709d13..7ea217c88c2e 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -34,11 +34,6 @@
34 34
35static LIST_HEAD(cpufreq_policy_list); 35static LIST_HEAD(cpufreq_policy_list);
36 36
37static inline bool policy_is_inactive(struct cpufreq_policy *policy)
38{
39 return cpumask_empty(policy->cpus);
40}
41
42/* Macros to iterate over CPU policies */ 37/* Macros to iterate over CPU policies */
43#define for_each_suitable_policy(__policy, __active) \ 38#define for_each_suitable_policy(__policy, __active) \
44 list_for_each_entry(__policy, &cpufreq_policy_list, policy_list) \ 39 list_for_each_entry(__policy, &cpufreq_policy_list, policy_list) \
@@ -250,6 +245,51 @@ void cpufreq_cpu_put(struct cpufreq_policy *policy)
250} 245}
251EXPORT_SYMBOL_GPL(cpufreq_cpu_put); 246EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
252 247
248/**
249 * cpufreq_cpu_release - Unlock a policy and decrement its usage counter.
250 * @policy: cpufreq policy returned by cpufreq_cpu_acquire().
251 */
252void cpufreq_cpu_release(struct cpufreq_policy *policy)
253{
254 if (WARN_ON(!policy))
255 return;
256
257 lockdep_assert_held(&policy->rwsem);
258
259 up_write(&policy->rwsem);
260
261 cpufreq_cpu_put(policy);
262}
263
264/**
265 * cpufreq_cpu_acquire - Find policy for a CPU, mark it as busy and lock it.
266 * @cpu: CPU to find the policy for.
267 *
268 * Call cpufreq_cpu_get() to get a reference on the cpufreq policy for @cpu and
269 * if the policy returned by it is not NULL, acquire its rwsem for writing.
270 * Return the policy if it is active or release it and return NULL otherwise.
271 *
272 * The policy returned by this function has to be released with the help of
273 * cpufreq_cpu_release() in order to release its rwsem and balance its usage
274 * counter properly.
275 */
276struct cpufreq_policy *cpufreq_cpu_acquire(unsigned int cpu)
277{
278 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
279
280 if (!policy)
281 return NULL;
282
283 down_write(&policy->rwsem);
284
285 if (policy_is_inactive(policy)) {
286 cpufreq_cpu_release(policy);
287 return NULL;
288 }
289
290 return policy;
291}
292
253/********************************************************************* 293/*********************************************************************
254 * EXTERNALLY AFFECTING FREQUENCY CHANGES * 294 * EXTERNALLY AFFECTING FREQUENCY CHANGES *
255 *********************************************************************/ 295 *********************************************************************/
@@ -669,9 +709,6 @@ static ssize_t show_scaling_cur_freq(struct cpufreq_policy *policy, char *buf)
669 return ret; 709 return ret;
670} 710}
671 711
672static int cpufreq_set_policy(struct cpufreq_policy *policy,
673 struct cpufreq_policy *new_policy);
674
675/** 712/**
676 * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access 713 * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
677 */ 714 */
@@ -857,11 +894,9 @@ static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
857{ 894{
858 unsigned int limit; 895 unsigned int limit;
859 int ret; 896 int ret;
860 if (cpufreq_driver->bios_limit) { 897 ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
861 ret = cpufreq_driver->bios_limit(policy->cpu, &limit); 898 if (!ret)
862 if (!ret) 899 return sprintf(buf, "%u\n", limit);
863 return sprintf(buf, "%u\n", limit);
864 }
865 return sprintf(buf, "%u\n", policy->cpuinfo.max_freq); 900 return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
866} 901}
867 902
@@ -1098,6 +1133,7 @@ static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu)
1098 cpufreq_global_kobject, "policy%u", cpu); 1133 cpufreq_global_kobject, "policy%u", cpu);
1099 if (ret) { 1134 if (ret) {
1100 pr_err("%s: failed to init policy->kobj: %d\n", __func__, ret); 1135 pr_err("%s: failed to init policy->kobj: %d\n", __func__, ret);
1136 kobject_put(&policy->kobj);
1101 goto err_free_real_cpus; 1137 goto err_free_real_cpus;
1102 } 1138 }
1103 1139
@@ -1550,7 +1586,7 @@ static unsigned int __cpufreq_get(struct cpufreq_policy *policy)
1550{ 1586{
1551 unsigned int ret_freq = 0; 1587 unsigned int ret_freq = 0;
1552 1588
1553 if (unlikely(policy_is_inactive(policy)) || !cpufreq_driver->get) 1589 if (unlikely(policy_is_inactive(policy)))
1554 return ret_freq; 1590 return ret_freq;
1555 1591
1556 ret_freq = cpufreq_driver->get(policy->cpu); 1592 ret_freq = cpufreq_driver->get(policy->cpu);
@@ -1588,7 +1624,8 @@ unsigned int cpufreq_get(unsigned int cpu)
1588 1624
1589 if (policy) { 1625 if (policy) {
1590 down_read(&policy->rwsem); 1626 down_read(&policy->rwsem);
1591 ret_freq = __cpufreq_get(policy); 1627 if (cpufreq_driver->get)
1628 ret_freq = __cpufreq_get(policy);
1592 up_read(&policy->rwsem); 1629 up_read(&policy->rwsem);
1593 1630
1594 cpufreq_cpu_put(policy); 1631 cpufreq_cpu_put(policy);
@@ -2229,8 +2266,8 @@ EXPORT_SYMBOL(cpufreq_get_policy);
2229 * 2266 *
2230 * The cpuinfo part of @policy is not updated by this function. 2267 * The cpuinfo part of @policy is not updated by this function.
2231 */ 2268 */
2232static int cpufreq_set_policy(struct cpufreq_policy *policy, 2269int cpufreq_set_policy(struct cpufreq_policy *policy,
2233 struct cpufreq_policy *new_policy) 2270 struct cpufreq_policy *new_policy)
2234{ 2271{
2235 struct cpufreq_governor *old_gov; 2272 struct cpufreq_governor *old_gov;
2236 int ret; 2273 int ret;
@@ -2337,17 +2374,12 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
2337 */ 2374 */
2338void cpufreq_update_policy(unsigned int cpu) 2375void cpufreq_update_policy(unsigned int cpu)
2339{ 2376{
2340 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); 2377 struct cpufreq_policy *policy = cpufreq_cpu_acquire(cpu);
2341 struct cpufreq_policy new_policy; 2378 struct cpufreq_policy new_policy;
2342 2379
2343 if (!policy) 2380 if (!policy)
2344 return; 2381 return;
2345 2382
2346 down_write(&policy->rwsem);
2347
2348 if (policy_is_inactive(policy))
2349 goto unlock;
2350
2351 /* 2383 /*
2352 * BIOS might change freq behind our back 2384 * BIOS might change freq behind our back
2353 * -> ask driver for current freq and notify governors about a change 2385 * -> ask driver for current freq and notify governors about a change
@@ -2364,12 +2396,26 @@ void cpufreq_update_policy(unsigned int cpu)
2364 cpufreq_set_policy(policy, &new_policy); 2396 cpufreq_set_policy(policy, &new_policy);
2365 2397
2366unlock: 2398unlock:
2367 up_write(&policy->rwsem); 2399 cpufreq_cpu_release(policy);
2368
2369 cpufreq_cpu_put(policy);
2370} 2400}
2371EXPORT_SYMBOL(cpufreq_update_policy); 2401EXPORT_SYMBOL(cpufreq_update_policy);
2372 2402
2403/**
2404 * cpufreq_update_limits - Update policy limits for a given CPU.
2405 * @cpu: CPU to update the policy limits for.
2406 *
2407 * Invoke the driver's ->update_limits callback if present or call
2408 * cpufreq_update_policy() for @cpu.
2409 */
2410void cpufreq_update_limits(unsigned int cpu)
2411{
2412 if (cpufreq_driver->update_limits)
2413 cpufreq_driver->update_limits(cpu);
2414 else
2415 cpufreq_update_policy(cpu);
2416}
2417EXPORT_SYMBOL_GPL(cpufreq_update_limits);
2418
2373/********************************************************************* 2419/*********************************************************************
2374 * BOOST * 2420 * BOOST *
2375 *********************************************************************/ 2421 *********************************************************************/
@@ -2426,7 +2472,7 @@ int cpufreq_boost_trigger_state(int state)
2426 2472
2427static bool cpufreq_boost_supported(void) 2473static bool cpufreq_boost_supported(void)
2428{ 2474{
2429 return likely(cpufreq_driver) && cpufreq_driver->set_boost; 2475 return cpufreq_driver->set_boost;
2430} 2476}
2431 2477
2432static int create_boost_sysfs_file(void) 2478static int create_boost_sysfs_file(void)
diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
index ffa9adeaba31..9d1d9bf02710 100644
--- a/drivers/cpufreq/cpufreq_governor.c
+++ b/drivers/cpufreq/cpufreq_governor.c
@@ -459,6 +459,8 @@ int cpufreq_dbs_governor_init(struct cpufreq_policy *policy)
459 /* Failure, so roll back. */ 459 /* Failure, so roll back. */
460 pr_err("initialization failed (dbs_data kobject init error %d)\n", ret); 460 pr_err("initialization failed (dbs_data kobject init error %d)\n", ret);
461 461
462 kobject_put(&dbs_data->attr_set.kobj);
463
462 policy->governor_data = NULL; 464 policy->governor_data = NULL;
463 465
464 if (!have_governor_per_policy()) 466 if (!have_governor_per_policy())
diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
index e2db5581489a..08b192eb22c6 100644
--- a/drivers/cpufreq/cpufreq_stats.c
+++ b/drivers/cpufreq/cpufreq_stats.c
@@ -14,7 +14,6 @@
14#include <linux/module.h> 14#include <linux/module.h>
15#include <linux/slab.h> 15#include <linux/slab.h>
16 16
17static DEFINE_SPINLOCK(cpufreq_stats_lock);
18 17
19struct cpufreq_stats { 18struct cpufreq_stats {
20 unsigned int total_trans; 19 unsigned int total_trans;
@@ -23,6 +22,7 @@ struct cpufreq_stats {
23 unsigned int state_num; 22 unsigned int state_num;
24 unsigned int last_index; 23 unsigned int last_index;
25 u64 *time_in_state; 24 u64 *time_in_state;
25 spinlock_t lock;
26 unsigned int *freq_table; 26 unsigned int *freq_table;
27 unsigned int *trans_table; 27 unsigned int *trans_table;
28}; 28};
@@ -39,12 +39,12 @@ static void cpufreq_stats_clear_table(struct cpufreq_stats *stats)
39{ 39{
40 unsigned int count = stats->max_state; 40 unsigned int count = stats->max_state;
41 41
42 spin_lock(&cpufreq_stats_lock); 42 spin_lock(&stats->lock);
43 memset(stats->time_in_state, 0, count * sizeof(u64)); 43 memset(stats->time_in_state, 0, count * sizeof(u64));
44 memset(stats->trans_table, 0, count * count * sizeof(int)); 44 memset(stats->trans_table, 0, count * count * sizeof(int));
45 stats->last_time = get_jiffies_64(); 45 stats->last_time = get_jiffies_64();
46 stats->total_trans = 0; 46 stats->total_trans = 0;
47 spin_unlock(&cpufreq_stats_lock); 47 spin_unlock(&stats->lock);
48} 48}
49 49
50static ssize_t show_total_trans(struct cpufreq_policy *policy, char *buf) 50static ssize_t show_total_trans(struct cpufreq_policy *policy, char *buf)
@@ -62,9 +62,9 @@ static ssize_t show_time_in_state(struct cpufreq_policy *policy, char *buf)
62 if (policy->fast_switch_enabled) 62 if (policy->fast_switch_enabled)
63 return 0; 63 return 0;
64 64
65 spin_lock(&cpufreq_stats_lock); 65 spin_lock(&stats->lock);
66 cpufreq_stats_update(stats); 66 cpufreq_stats_update(stats);
67 spin_unlock(&cpufreq_stats_lock); 67 spin_unlock(&stats->lock);
68 68
69 for (i = 0; i < stats->state_num; i++) { 69 for (i = 0; i < stats->state_num; i++) {
70 len += sprintf(buf + len, "%u %llu\n", stats->freq_table[i], 70 len += sprintf(buf + len, "%u %llu\n", stats->freq_table[i],
@@ -211,6 +211,7 @@ void cpufreq_stats_create_table(struct cpufreq_policy *policy)
211 stats->state_num = i; 211 stats->state_num = i;
212 stats->last_time = get_jiffies_64(); 212 stats->last_time = get_jiffies_64();
213 stats->last_index = freq_table_get_index(stats, policy->cur); 213 stats->last_index = freq_table_get_index(stats, policy->cur);
214 spin_lock_init(&stats->lock);
214 215
215 policy->stats = stats; 216 policy->stats = stats;
216 ret = sysfs_create_group(&policy->kobj, &stats_attr_group); 217 ret = sysfs_create_group(&policy->kobj, &stats_attr_group);
@@ -242,11 +243,11 @@ void cpufreq_stats_record_transition(struct cpufreq_policy *policy,
242 if (old_index == -1 || new_index == -1 || old_index == new_index) 243 if (old_index == -1 || new_index == -1 || old_index == new_index)
243 return; 244 return;
244 245
245 spin_lock(&cpufreq_stats_lock); 246 spin_lock(&stats->lock);
246 cpufreq_stats_update(stats); 247 cpufreq_stats_update(stats);
247 248
248 stats->last_index = new_index; 249 stats->last_index = new_index;
249 stats->trans_table[old_index * stats->max_state + new_index]++; 250 stats->trans_table[old_index * stats->max_state + new_index]++;
250 stats->total_trans++; 251 stats->total_trans++;
251 spin_unlock(&cpufreq_stats_lock); 252 spin_unlock(&stats->lock);
252} 253}
diff --git a/drivers/cpufreq/freq_table.c b/drivers/cpufreq/freq_table.c
index 3a8cc99e6815..e7be0af3199f 100644
--- a/drivers/cpufreq/freq_table.c
+++ b/drivers/cpufreq/freq_table.c
@@ -290,9 +290,6 @@ EXPORT_SYMBOL_GPL(cpufreq_freq_attr_scaling_boost_freqs);
290 290
291struct freq_attr *cpufreq_generic_attr[] = { 291struct freq_attr *cpufreq_generic_attr[] = {
292 &cpufreq_freq_attr_scaling_available_freqs, 292 &cpufreq_freq_attr_scaling_available_freqs,
293#ifdef CONFIG_CPU_FREQ_BOOST_SW
294 &cpufreq_freq_attr_scaling_boost_freqs,
295#endif
296 NULL, 293 NULL,
297}; 294};
298EXPORT_SYMBOL_GPL(cpufreq_generic_attr); 295EXPORT_SYMBOL_GPL(cpufreq_generic_attr);
diff --git a/drivers/cpufreq/imx6q-cpufreq.c b/drivers/cpufreq/imx6q-cpufreq.c
index a4ff09f91c8f..3e17560b1efe 100644
--- a/drivers/cpufreq/imx6q-cpufreq.c
+++ b/drivers/cpufreq/imx6q-cpufreq.c
@@ -388,11 +388,11 @@ static int imx6q_cpufreq_probe(struct platform_device *pdev)
388 ret = imx6ul_opp_check_speed_grading(cpu_dev); 388 ret = imx6ul_opp_check_speed_grading(cpu_dev);
389 if (ret) { 389 if (ret) {
390 if (ret == -EPROBE_DEFER) 390 if (ret == -EPROBE_DEFER)
391 return ret; 391 goto put_node;
392 392
393 dev_err(cpu_dev, "failed to read ocotp: %d\n", 393 dev_err(cpu_dev, "failed to read ocotp: %d\n",
394 ret); 394 ret);
395 return ret; 395 goto put_node;
396 } 396 }
397 } else { 397 } else {
398 imx6q_opp_check_speed_grading(cpu_dev); 398 imx6q_opp_check_speed_grading(cpu_dev);
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 2986119dd31f..34b54df41aaa 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -179,6 +179,7 @@ struct vid_data {
179 * based on the MSR_IA32_MISC_ENABLE value and whether or 179 * based on the MSR_IA32_MISC_ENABLE value and whether or
180 * not the maximum reported turbo P-state is different from 180 * not the maximum reported turbo P-state is different from
181 * the maximum reported non-turbo one. 181 * the maximum reported non-turbo one.
182 * @turbo_disabled_mf: The @turbo_disabled value reflected by cpuinfo.max_freq.
182 * @min_perf_pct: Minimum capacity limit in percent of the maximum turbo 183 * @min_perf_pct: Minimum capacity limit in percent of the maximum turbo
183 * P-state capacity. 184 * P-state capacity.
184 * @max_perf_pct: Maximum capacity limit in percent of the maximum turbo 185 * @max_perf_pct: Maximum capacity limit in percent of the maximum turbo
@@ -187,6 +188,7 @@ struct vid_data {
187struct global_params { 188struct global_params {
188 bool no_turbo; 189 bool no_turbo;
189 bool turbo_disabled; 190 bool turbo_disabled;
191 bool turbo_disabled_mf;
190 int max_perf_pct; 192 int max_perf_pct;
191 int min_perf_pct; 193 int min_perf_pct;
192}; 194};
@@ -525,7 +527,7 @@ static s16 intel_pstate_get_epb(struct cpudata *cpu_data)
525 u64 epb; 527 u64 epb;
526 int ret; 528 int ret;
527 529
528 if (!static_cpu_has(X86_FEATURE_EPB)) 530 if (!boot_cpu_has(X86_FEATURE_EPB))
529 return -ENXIO; 531 return -ENXIO;
530 532
531 ret = rdmsrl_on_cpu(cpu_data->cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb); 533 ret = rdmsrl_on_cpu(cpu_data->cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb);
@@ -539,7 +541,7 @@ static s16 intel_pstate_get_epp(struct cpudata *cpu_data, u64 hwp_req_data)
539{ 541{
540 s16 epp; 542 s16 epp;
541 543
542 if (static_cpu_has(X86_FEATURE_HWP_EPP)) { 544 if (boot_cpu_has(X86_FEATURE_HWP_EPP)) {
543 /* 545 /*
544 * When hwp_req_data is 0, means that caller didn't read 546 * When hwp_req_data is 0, means that caller didn't read
545 * MSR_HWP_REQUEST, so need to read and get EPP. 547 * MSR_HWP_REQUEST, so need to read and get EPP.
@@ -564,7 +566,7 @@ static int intel_pstate_set_epb(int cpu, s16 pref)
564 u64 epb; 566 u64 epb;
565 int ret; 567 int ret;
566 568
567 if (!static_cpu_has(X86_FEATURE_EPB)) 569 if (!boot_cpu_has(X86_FEATURE_EPB))
568 return -ENXIO; 570 return -ENXIO;
569 571
570 ret = rdmsrl_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb); 572 ret = rdmsrl_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb);
@@ -612,7 +614,7 @@ static int intel_pstate_get_energy_pref_index(struct cpudata *cpu_data)
612 if (epp < 0) 614 if (epp < 0)
613 return epp; 615 return epp;
614 616
615 if (static_cpu_has(X86_FEATURE_HWP_EPP)) { 617 if (boot_cpu_has(X86_FEATURE_HWP_EPP)) {
616 if (epp == HWP_EPP_PERFORMANCE) 618 if (epp == HWP_EPP_PERFORMANCE)
617 return 1; 619 return 1;
618 if (epp <= HWP_EPP_BALANCE_PERFORMANCE) 620 if (epp <= HWP_EPP_BALANCE_PERFORMANCE)
@@ -621,7 +623,7 @@ static int intel_pstate_get_energy_pref_index(struct cpudata *cpu_data)
621 return 3; 623 return 3;
622 else 624 else
623 return 4; 625 return 4;
624 } else if (static_cpu_has(X86_FEATURE_EPB)) { 626 } else if (boot_cpu_has(X86_FEATURE_EPB)) {
625 /* 627 /*
626 * Range: 628 * Range:
627 * 0x00-0x03 : Performance 629 * 0x00-0x03 : Performance
@@ -649,7 +651,7 @@ static int intel_pstate_set_energy_pref_index(struct cpudata *cpu_data,
649 651
650 mutex_lock(&intel_pstate_limits_lock); 652 mutex_lock(&intel_pstate_limits_lock);
651 653
652 if (static_cpu_has(X86_FEATURE_HWP_EPP)) { 654 if (boot_cpu_has(X86_FEATURE_HWP_EPP)) {
653 u64 value; 655 u64 value;
654 656
655 ret = rdmsrl_on_cpu(cpu_data->cpu, MSR_HWP_REQUEST, &value); 657 ret = rdmsrl_on_cpu(cpu_data->cpu, MSR_HWP_REQUEST, &value);
@@ -824,7 +826,7 @@ static void intel_pstate_hwp_set(unsigned int cpu)
824 epp = cpu_data->epp_powersave; 826 epp = cpu_data->epp_powersave;
825 } 827 }
826update_epp: 828update_epp:
827 if (static_cpu_has(X86_FEATURE_HWP_EPP)) { 829 if (boot_cpu_has(X86_FEATURE_HWP_EPP)) {
828 value &= ~GENMASK_ULL(31, 24); 830 value &= ~GENMASK_ULL(31, 24);
829 value |= (u64)epp << 24; 831 value |= (u64)epp << 24;
830 } else { 832 } else {
@@ -849,7 +851,7 @@ static void intel_pstate_hwp_force_min_perf(int cpu)
849 value |= HWP_MIN_PERF(min_perf); 851 value |= HWP_MIN_PERF(min_perf);
850 852
851 /* Set EPP/EPB to min */ 853 /* Set EPP/EPB to min */
852 if (static_cpu_has(X86_FEATURE_HWP_EPP)) 854 if (boot_cpu_has(X86_FEATURE_HWP_EPP))
853 value |= HWP_ENERGY_PERF_PREFERENCE(HWP_EPP_POWERSAVE); 855 value |= HWP_ENERGY_PERF_PREFERENCE(HWP_EPP_POWERSAVE);
854 else 856 else
855 intel_pstate_set_epb(cpu, HWP_EPP_BALANCE_POWERSAVE); 857 intel_pstate_set_epb(cpu, HWP_EPP_BALANCE_POWERSAVE);
@@ -897,6 +899,48 @@ static void intel_pstate_update_policies(void)
897 cpufreq_update_policy(cpu); 899 cpufreq_update_policy(cpu);
898} 900}
899 901
902static void intel_pstate_update_max_freq(unsigned int cpu)
903{
904 struct cpufreq_policy *policy = cpufreq_cpu_acquire(cpu);
905 struct cpufreq_policy new_policy;
906 struct cpudata *cpudata;
907
908 if (!policy)
909 return;
910
911 cpudata = all_cpu_data[cpu];
912 policy->cpuinfo.max_freq = global.turbo_disabled_mf ?
913 cpudata->pstate.max_freq : cpudata->pstate.turbo_freq;
914
915 memcpy(&new_policy, policy, sizeof(*policy));
916 new_policy.max = min(policy->user_policy.max, policy->cpuinfo.max_freq);
917 new_policy.min = min(policy->user_policy.min, new_policy.max);
918
919 cpufreq_set_policy(policy, &new_policy);
920
921 cpufreq_cpu_release(policy);
922}
923
924static void intel_pstate_update_limits(unsigned int cpu)
925{
926 mutex_lock(&intel_pstate_driver_lock);
927
928 update_turbo_state();
929 /*
930 * If turbo has been turned on or off globally, policy limits for
931 * all CPUs need to be updated to reflect that.
932 */
933 if (global.turbo_disabled_mf != global.turbo_disabled) {
934 global.turbo_disabled_mf = global.turbo_disabled;
935 for_each_possible_cpu(cpu)
936 intel_pstate_update_max_freq(cpu);
937 } else {
938 cpufreq_update_policy(cpu);
939 }
940
941 mutex_unlock(&intel_pstate_driver_lock);
942}
943
900/************************** sysfs begin ************************/ 944/************************** sysfs begin ************************/
901#define show_one(file_name, object) \ 945#define show_one(file_name, object) \
902 static ssize_t show_##file_name \ 946 static ssize_t show_##file_name \
@@ -1197,7 +1241,7 @@ static void __init intel_pstate_sysfs_expose_params(void)
1197static void intel_pstate_hwp_enable(struct cpudata *cpudata) 1241static void intel_pstate_hwp_enable(struct cpudata *cpudata)
1198{ 1242{
1199 /* First disable HWP notification interrupt as we don't process them */ 1243 /* First disable HWP notification interrupt as we don't process them */
1200 if (static_cpu_has(X86_FEATURE_HWP_NOTIFY)) 1244 if (boot_cpu_has(X86_FEATURE_HWP_NOTIFY))
1201 wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00); 1245 wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00);
1202 1246
1203 wrmsrl_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1); 1247 wrmsrl_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1);
@@ -2138,6 +2182,7 @@ static int __intel_pstate_cpu_init(struct cpufreq_policy *policy)
2138 /* cpuinfo and default policy values */ 2182 /* cpuinfo and default policy values */
2139 policy->cpuinfo.min_freq = cpu->pstate.min_pstate * cpu->pstate.scaling; 2183 policy->cpuinfo.min_freq = cpu->pstate.min_pstate * cpu->pstate.scaling;
2140 update_turbo_state(); 2184 update_turbo_state();
2185 global.turbo_disabled_mf = global.turbo_disabled;
2141 policy->cpuinfo.max_freq = global.turbo_disabled ? 2186 policy->cpuinfo.max_freq = global.turbo_disabled ?
2142 cpu->pstate.max_pstate : cpu->pstate.turbo_pstate; 2187 cpu->pstate.max_pstate : cpu->pstate.turbo_pstate;
2143 policy->cpuinfo.max_freq *= cpu->pstate.scaling; 2188 policy->cpuinfo.max_freq *= cpu->pstate.scaling;
@@ -2182,6 +2227,7 @@ static struct cpufreq_driver intel_pstate = {
2182 .init = intel_pstate_cpu_init, 2227 .init = intel_pstate_cpu_init,
2183 .exit = intel_pstate_cpu_exit, 2228 .exit = intel_pstate_cpu_exit,
2184 .stop_cpu = intel_pstate_stop_cpu, 2229 .stop_cpu = intel_pstate_stop_cpu,
2230 .update_limits = intel_pstate_update_limits,
2185 .name = "intel_pstate", 2231 .name = "intel_pstate",
2186}; 2232};
2187 2233
@@ -2316,6 +2362,7 @@ static struct cpufreq_driver intel_cpufreq = {
2316 .init = intel_cpufreq_cpu_init, 2362 .init = intel_cpufreq_cpu_init,
2317 .exit = intel_pstate_cpu_exit, 2363 .exit = intel_pstate_cpu_exit,
2318 .stop_cpu = intel_cpufreq_stop_cpu, 2364 .stop_cpu = intel_cpufreq_stop_cpu,
2365 .update_limits = intel_pstate_update_limits,
2319 .name = "intel_cpufreq", 2366 .name = "intel_cpufreq",
2320}; 2367};
2321 2368
diff --git a/drivers/cpufreq/kirkwood-cpufreq.c b/drivers/cpufreq/kirkwood-cpufreq.c
index c2dd43f3f5d8..8d63a6dc8383 100644
--- a/drivers/cpufreq/kirkwood-cpufreq.c
+++ b/drivers/cpufreq/kirkwood-cpufreq.c
@@ -124,13 +124,14 @@ static int kirkwood_cpufreq_probe(struct platform_device *pdev)
124 priv.cpu_clk = of_clk_get_by_name(np, "cpu_clk"); 124 priv.cpu_clk = of_clk_get_by_name(np, "cpu_clk");
125 if (IS_ERR(priv.cpu_clk)) { 125 if (IS_ERR(priv.cpu_clk)) {
126 dev_err(priv.dev, "Unable to get cpuclk\n"); 126 dev_err(priv.dev, "Unable to get cpuclk\n");
127 return PTR_ERR(priv.cpu_clk); 127 err = PTR_ERR(priv.cpu_clk);
128 goto out_node;
128 } 129 }
129 130
130 err = clk_prepare_enable(priv.cpu_clk); 131 err = clk_prepare_enable(priv.cpu_clk);
131 if (err) { 132 if (err) {
132 dev_err(priv.dev, "Unable to prepare cpuclk\n"); 133 dev_err(priv.dev, "Unable to prepare cpuclk\n");
133 return err; 134 goto out_node;
134 } 135 }
135 136
136 kirkwood_freq_table[0].frequency = clk_get_rate(priv.cpu_clk) / 1000; 137 kirkwood_freq_table[0].frequency = clk_get_rate(priv.cpu_clk) / 1000;
@@ -161,20 +162,22 @@ static int kirkwood_cpufreq_probe(struct platform_device *pdev)
161 goto out_ddr; 162 goto out_ddr;
162 } 163 }
163 164
164 of_node_put(np);
165 np = NULL;
166
167 err = cpufreq_register_driver(&kirkwood_cpufreq_driver); 165 err = cpufreq_register_driver(&kirkwood_cpufreq_driver);
168 if (!err) 166 if (err) {
169 return 0; 167 dev_err(priv.dev, "Failed to register cpufreq driver\n");
168 goto out_powersave;
169 }
170 170
171 dev_err(priv.dev, "Failed to register cpufreq driver\n"); 171 of_node_put(np);
172 return 0;
172 173
174out_powersave:
173 clk_disable_unprepare(priv.powersave_clk); 175 clk_disable_unprepare(priv.powersave_clk);
174out_ddr: 176out_ddr:
175 clk_disable_unprepare(priv.ddr_clk); 177 clk_disable_unprepare(priv.ddr_clk);
176out_cpu: 178out_cpu:
177 clk_disable_unprepare(priv.cpu_clk); 179 clk_disable_unprepare(priv.cpu_clk);
180out_node:
178 of_node_put(np); 181 of_node_put(np);
179 182
180 return err; 183 return err;
diff --git a/drivers/cpufreq/maple-cpufreq.c b/drivers/cpufreq/maple-cpufreq.c
index d9df89392b84..a94355723ef8 100644
--- a/drivers/cpufreq/maple-cpufreq.c
+++ b/drivers/cpufreq/maple-cpufreq.c
@@ -210,7 +210,7 @@ static int __init maple_cpufreq_init(void)
210 */ 210 */
211 valp = of_get_property(cpunode, "clock-frequency", NULL); 211 valp = of_get_property(cpunode, "clock-frequency", NULL);
212 if (!valp) 212 if (!valp)
213 return -ENODEV; 213 goto bail_noprops;
214 max_freq = (*valp)/1000; 214 max_freq = (*valp)/1000;
215 maple_cpu_freqs[0].frequency = max_freq; 215 maple_cpu_freqs[0].frequency = max_freq;
216 maple_cpu_freqs[1].frequency = max_freq/2; 216 maple_cpu_freqs[1].frequency = max_freq/2;
@@ -231,10 +231,6 @@ static int __init maple_cpufreq_init(void)
231 231
232 rc = cpufreq_register_driver(&maple_cpufreq_driver); 232 rc = cpufreq_register_driver(&maple_cpufreq_driver);
233 233
234 of_node_put(cpunode);
235
236 return rc;
237
238bail_noprops: 234bail_noprops:
239 of_node_put(cpunode); 235 of_node_put(cpunode);
240 236
diff --git a/drivers/cpufreq/pasemi-cpufreq.c b/drivers/cpufreq/pasemi-cpufreq.c
index 75dfbd2a58ea..c7710c149de8 100644
--- a/drivers/cpufreq/pasemi-cpufreq.c
+++ b/drivers/cpufreq/pasemi-cpufreq.c
@@ -146,6 +146,7 @@ static int pas_cpufreq_cpu_init(struct cpufreq_policy *policy)
146 146
147 cpu = of_get_cpu_node(policy->cpu, NULL); 147 cpu = of_get_cpu_node(policy->cpu, NULL);
148 148
149 of_node_put(cpu);
149 if (!cpu) 150 if (!cpu)
150 goto out; 151 goto out;
151 152
diff --git a/drivers/cpufreq/pmac32-cpufreq.c b/drivers/cpufreq/pmac32-cpufreq.c
index 52f0d91d30c1..9b4ce2eb8222 100644
--- a/drivers/cpufreq/pmac32-cpufreq.c
+++ b/drivers/cpufreq/pmac32-cpufreq.c
@@ -552,6 +552,7 @@ static int pmac_cpufreq_init_7447A(struct device_node *cpunode)
552 volt_gpio_np = of_find_node_by_name(NULL, "cpu-vcore-select"); 552 volt_gpio_np = of_find_node_by_name(NULL, "cpu-vcore-select");
553 if (volt_gpio_np) 553 if (volt_gpio_np)
554 voltage_gpio = read_gpio(volt_gpio_np); 554 voltage_gpio = read_gpio(volt_gpio_np);
555 of_node_put(volt_gpio_np);
555 if (!voltage_gpio){ 556 if (!voltage_gpio){
556 pr_err("missing cpu-vcore-select gpio\n"); 557 pr_err("missing cpu-vcore-select gpio\n");
557 return 1; 558 return 1;
@@ -588,6 +589,7 @@ static int pmac_cpufreq_init_750FX(struct device_node *cpunode)
588 if (volt_gpio_np) 589 if (volt_gpio_np)
589 voltage_gpio = read_gpio(volt_gpio_np); 590 voltage_gpio = read_gpio(volt_gpio_np);
590 591
592 of_node_put(volt_gpio_np);
591 pvr = mfspr(SPRN_PVR); 593 pvr = mfspr(SPRN_PVR);
592 has_cpu_l2lve = !((pvr & 0xf00) == 0x100); 594 has_cpu_l2lve = !((pvr & 0xf00) == 0x100);
593 595
diff --git a/drivers/cpufreq/powernow-k8.c b/drivers/cpufreq/powernow-k8.c
index fb77b39a4ce3..3c12e03fa343 100644
--- a/drivers/cpufreq/powernow-k8.c
+++ b/drivers/cpufreq/powernow-k8.c
@@ -1178,7 +1178,7 @@ static int powernowk8_init(void)
1178 unsigned int i, supported_cpus = 0; 1178 unsigned int i, supported_cpus = 0;
1179 int ret; 1179 int ret;
1180 1180
1181 if (static_cpu_has(X86_FEATURE_HW_PSTATE)) { 1181 if (boot_cpu_has(X86_FEATURE_HW_PSTATE)) {
1182 __request_acpi_cpufreq(); 1182 __request_acpi_cpufreq();
1183 return -ENODEV; 1183 return -ENODEV;
1184 } 1184 }
diff --git a/drivers/cpufreq/ppc_cbe_cpufreq.c b/drivers/cpufreq/ppc_cbe_cpufreq.c
index 41a0f0be3f9f..8414c3a4ea08 100644
--- a/drivers/cpufreq/ppc_cbe_cpufreq.c
+++ b/drivers/cpufreq/ppc_cbe_cpufreq.c
@@ -86,6 +86,7 @@ static int cbe_cpufreq_cpu_init(struct cpufreq_policy *policy)
86 if (!cbe_get_cpu_pmd_regs(policy->cpu) || 86 if (!cbe_get_cpu_pmd_regs(policy->cpu) ||
87 !cbe_get_cpu_mic_tm_regs(policy->cpu)) { 87 !cbe_get_cpu_mic_tm_regs(policy->cpu)) {
88 pr_info("invalid CBE regs pointers for cpufreq\n"); 88 pr_info("invalid CBE regs pointers for cpufreq\n");
89 of_node_put(cpu);
89 return -EINVAL; 90 return -EINVAL;
90 } 91 }
91 92
diff --git a/drivers/cpufreq/qoriq-cpufreq.c b/drivers/cpufreq/qoriq-cpufreq.c
index 4295e5476264..71b640c8c1a5 100644
--- a/drivers/cpufreq/qoriq-cpufreq.c
+++ b/drivers/cpufreq/qoriq-cpufreq.c
@@ -280,10 +280,12 @@ static const struct of_device_id node_matches[] __initconst = {
280 280
281 { .compatible = "fsl,ls1012a-clockgen", }, 281 { .compatible = "fsl,ls1012a-clockgen", },
282 { .compatible = "fsl,ls1021a-clockgen", }, 282 { .compatible = "fsl,ls1021a-clockgen", },
283 { .compatible = "fsl,ls1028a-clockgen", },
283 { .compatible = "fsl,ls1043a-clockgen", }, 284 { .compatible = "fsl,ls1043a-clockgen", },
284 { .compatible = "fsl,ls1046a-clockgen", }, 285 { .compatible = "fsl,ls1046a-clockgen", },
285 { .compatible = "fsl,ls1088a-clockgen", }, 286 { .compatible = "fsl,ls1088a-clockgen", },
286 { .compatible = "fsl,ls2080a-clockgen", }, 287 { .compatible = "fsl,ls2080a-clockgen", },
288 { .compatible = "fsl,lx2160a-clockgen", },
287 { .compatible = "fsl,p4080-clockgen", }, 289 { .compatible = "fsl,p4080-clockgen", },
288 { .compatible = "fsl,qoriq-clockgen-1.0", }, 290 { .compatible = "fsl,qoriq-clockgen-1.0", },
289 { .compatible = "fsl,qoriq-clockgen-2.0", }, 291 { .compatible = "fsl,qoriq-clockgen-2.0", },
diff --git a/drivers/cpufreq/speedstep-centrino.c b/drivers/cpufreq/speedstep-centrino.c
index a1fb735685db..e086b2dd4072 100644
--- a/drivers/cpufreq/speedstep-centrino.c
+++ b/drivers/cpufreq/speedstep-centrino.c
@@ -412,7 +412,7 @@ static int centrino_cpu_exit(struct cpufreq_policy *policy)
412} 412}
413 413
414/** 414/**
415 * centrino_setpolicy - set a new CPUFreq policy 415 * centrino_target - set a new CPUFreq policy
416 * @policy: new policy 416 * @policy: new policy
417 * @index: index of target frequency 417 * @index: index of target frequency
418 * 418 *
diff --git a/drivers/cpuidle/cpuidle-exynos.c b/drivers/cpuidle/cpuidle-exynos.c
index 0171a6e190d7..f7199a35cbb6 100644
--- a/drivers/cpuidle/cpuidle-exynos.c
+++ b/drivers/cpuidle/cpuidle-exynos.c
@@ -84,7 +84,7 @@ static struct cpuidle_driver exynos_idle_driver = {
84 [1] = { 84 [1] = {
85 .enter = exynos_enter_lowpower, 85 .enter = exynos_enter_lowpower,
86 .exit_latency = 300, 86 .exit_latency = 300,
87 .target_residency = 100000, 87 .target_residency = 10000,
88 .name = "C1", 88 .name = "C1",
89 .desc = "ARM power down", 89 .desc = "ARM power down",
90 }, 90 },
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index 7f108309e871..0f4b7c45df3e 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -328,9 +328,23 @@ int cpuidle_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
328int cpuidle_enter(struct cpuidle_driver *drv, struct cpuidle_device *dev, 328int cpuidle_enter(struct cpuidle_driver *drv, struct cpuidle_device *dev,
329 int index) 329 int index)
330{ 330{
331 int ret = 0;
332
333 /*
334 * Store the next hrtimer, which becomes either next tick or the next
335 * timer event, whatever expires first. Additionally, to make this data
336 * useful for consumers outside cpuidle, we rely on that the governor's
337 * ->select() callback have decided, whether to stop the tick or not.
338 */
339 WRITE_ONCE(dev->next_hrtimer, tick_nohz_get_next_hrtimer());
340
331 if (cpuidle_state_is_coupled(drv, index)) 341 if (cpuidle_state_is_coupled(drv, index))
332 return cpuidle_enter_state_coupled(dev, drv, index); 342 ret = cpuidle_enter_state_coupled(dev, drv, index);
333 return cpuidle_enter_state(dev, drv, index); 343 else
344 ret = cpuidle_enter_state(dev, drv, index);
345
346 WRITE_ONCE(dev->next_hrtimer, 0);
347 return ret;
334} 348}
335 349
336/** 350/**
@@ -511,6 +525,7 @@ static void __cpuidle_device_init(struct cpuidle_device *dev)
511{ 525{
512 memset(dev->states_usage, 0, sizeof(dev->states_usage)); 526 memset(dev->states_usage, 0, sizeof(dev->states_usage));
513 dev->last_residency = 0; 527 dev->last_residency = 0;
528 dev->next_hrtimer = 0;
514} 529}
515 530
516/** 531/**
diff --git a/drivers/devfreq/devfreq-event.c b/drivers/devfreq/devfreq-event.c
index d67242d87744..87e93406d7cd 100644
--- a/drivers/devfreq/devfreq-event.c
+++ b/drivers/devfreq/devfreq-event.c
@@ -240,7 +240,7 @@ struct devfreq_event_dev *devfreq_event_get_edev_by_phandle(struct device *dev,
240 } 240 }
241 241
242 list_for_each_entry(edev, &devfreq_event_list, node) { 242 list_for_each_entry(edev, &devfreq_event_list, node) {
243 if (!strcmp(edev->desc->name, node->name)) 243 if (of_node_name_eq(node, edev->desc->name))
244 goto out; 244 goto out;
245 } 245 }
246 edev = NULL; 246 edev = NULL;
diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
index 0ae3de76833b..6b6991f0e873 100644
--- a/drivers/devfreq/devfreq.c
+++ b/drivers/devfreq/devfreq.c
@@ -29,6 +29,9 @@
29#include <linux/of.h> 29#include <linux/of.h>
30#include "governor.h" 30#include "governor.h"
31 31
32#define CREATE_TRACE_POINTS
33#include <trace/events/devfreq.h>
34
32static struct class *devfreq_class; 35static struct class *devfreq_class;
33 36
34/* 37/*
@@ -228,7 +231,7 @@ static struct devfreq_governor *find_devfreq_governor(const char *name)
228 * if is not found. This can happen when both drivers (the governor driver 231 * if is not found. This can happen when both drivers (the governor driver
229 * and the driver that call devfreq_add_device) are built as modules. 232 * and the driver that call devfreq_add_device) are built as modules.
230 * devfreq_list_lock should be held by the caller. Returns the matched 233 * devfreq_list_lock should be held by the caller. Returns the matched
231 * governor's pointer. 234 * governor's pointer or an error pointer.
232 */ 235 */
233static struct devfreq_governor *try_then_request_governor(const char *name) 236static struct devfreq_governor *try_then_request_governor(const char *name)
234{ 237{
@@ -254,7 +257,7 @@ static struct devfreq_governor *try_then_request_governor(const char *name)
254 /* Restore previous state before return */ 257 /* Restore previous state before return */
255 mutex_lock(&devfreq_list_lock); 258 mutex_lock(&devfreq_list_lock);
256 if (err) 259 if (err)
257 return NULL; 260 return ERR_PTR(err);
258 261
259 governor = find_devfreq_governor(name); 262 governor = find_devfreq_governor(name);
260 } 263 }
@@ -394,6 +397,8 @@ static void devfreq_monitor(struct work_struct *work)
394 queue_delayed_work(devfreq_wq, &devfreq->work, 397 queue_delayed_work(devfreq_wq, &devfreq->work,
395 msecs_to_jiffies(devfreq->profile->polling_ms)); 398 msecs_to_jiffies(devfreq->profile->polling_ms));
396 mutex_unlock(&devfreq->lock); 399 mutex_unlock(&devfreq->lock);
400
401 trace_devfreq_monitor(devfreq);
397} 402}
398 403
399/** 404/**
@@ -528,7 +533,7 @@ void devfreq_interval_update(struct devfreq *devfreq, unsigned int *delay)
528 mutex_lock(&devfreq->lock); 533 mutex_lock(&devfreq->lock);
529 if (!devfreq->stop_polling) 534 if (!devfreq->stop_polling)
530 queue_delayed_work(devfreq_wq, &devfreq->work, 535 queue_delayed_work(devfreq_wq, &devfreq->work,
531 msecs_to_jiffies(devfreq->profile->polling_ms)); 536 msecs_to_jiffies(devfreq->profile->polling_ms));
532 } 537 }
533out: 538out:
534 mutex_unlock(&devfreq->lock); 539 mutex_unlock(&devfreq->lock);
@@ -537,7 +542,7 @@ EXPORT_SYMBOL(devfreq_interval_update);
537 542
538/** 543/**
539 * devfreq_notifier_call() - Notify that the device frequency requirements 544 * devfreq_notifier_call() - Notify that the device frequency requirements
540 * has been changed out of devfreq framework. 545 * has been changed out of devfreq framework.
541 * @nb: the notifier_block (supposed to be devfreq->nb) 546 * @nb: the notifier_block (supposed to be devfreq->nb)
542 * @type: not used 547 * @type: not used
543 * @devp: not used 548 * @devp: not used
@@ -651,7 +656,7 @@ struct devfreq *devfreq_add_device(struct device *dev,
651 mutex_unlock(&devfreq->lock); 656 mutex_unlock(&devfreq->lock);
652 err = set_freq_table(devfreq); 657 err = set_freq_table(devfreq);
653 if (err < 0) 658 if (err < 0)
654 goto err_out; 659 goto err_dev;
655 mutex_lock(&devfreq->lock); 660 mutex_lock(&devfreq->lock);
656 } 661 }
657 662
@@ -683,16 +688,27 @@ struct devfreq *devfreq_add_device(struct device *dev,
683 goto err_out; 688 goto err_out;
684 } 689 }
685 690
686 devfreq->trans_table = 691 devfreq->trans_table = devm_kzalloc(&devfreq->dev,
687 devm_kzalloc(&devfreq->dev, 692 array3_size(sizeof(unsigned int),
688 array3_size(sizeof(unsigned int), 693 devfreq->profile->max_state,
689 devfreq->profile->max_state, 694 devfreq->profile->max_state),
690 devfreq->profile->max_state), 695 GFP_KERNEL);
691 GFP_KERNEL); 696 if (!devfreq->trans_table) {
697 mutex_unlock(&devfreq->lock);
698 err = -ENOMEM;
699 goto err_devfreq;
700 }
701
692 devfreq->time_in_state = devm_kcalloc(&devfreq->dev, 702 devfreq->time_in_state = devm_kcalloc(&devfreq->dev,
693 devfreq->profile->max_state, 703 devfreq->profile->max_state,
694 sizeof(unsigned long), 704 sizeof(unsigned long),
695 GFP_KERNEL); 705 GFP_KERNEL);
706 if (!devfreq->time_in_state) {
707 mutex_unlock(&devfreq->lock);
708 err = -ENOMEM;
709 goto err_devfreq;
710 }
711
696 devfreq->last_stat_updated = jiffies; 712 devfreq->last_stat_updated = jiffies;
697 713
698 srcu_init_notifier_head(&devfreq->transition_notifier_list); 714 srcu_init_notifier_head(&devfreq->transition_notifier_list);
@@ -726,7 +742,7 @@ struct devfreq *devfreq_add_device(struct device *dev,
726 742
727err_init: 743err_init:
728 mutex_unlock(&devfreq_list_lock); 744 mutex_unlock(&devfreq_list_lock);
729 745err_devfreq:
730 devfreq_remove_device(devfreq); 746 devfreq_remove_device(devfreq);
731 devfreq = NULL; 747 devfreq = NULL;
732err_dev: 748err_dev:
@@ -1113,7 +1129,7 @@ static ssize_t governor_store(struct device *dev, struct device_attribute *attr,
1113 struct devfreq *df = to_devfreq(dev); 1129 struct devfreq *df = to_devfreq(dev);
1114 int ret; 1130 int ret;
1115 char str_governor[DEVFREQ_NAME_LEN + 1]; 1131 char str_governor[DEVFREQ_NAME_LEN + 1];
1116 struct devfreq_governor *governor; 1132 const struct devfreq_governor *governor, *prev_governor;
1117 1133
1118 ret = sscanf(buf, "%" __stringify(DEVFREQ_NAME_LEN) "s", str_governor); 1134 ret = sscanf(buf, "%" __stringify(DEVFREQ_NAME_LEN) "s", str_governor);
1119 if (ret != 1) 1135 if (ret != 1)
@@ -1142,12 +1158,24 @@ static ssize_t governor_store(struct device *dev, struct device_attribute *attr,
1142 goto out; 1158 goto out;
1143 } 1159 }
1144 } 1160 }
1161 prev_governor = df->governor;
1145 df->governor = governor; 1162 df->governor = governor;
1146 strncpy(df->governor_name, governor->name, DEVFREQ_NAME_LEN); 1163 strncpy(df->governor_name, governor->name, DEVFREQ_NAME_LEN);
1147 ret = df->governor->event_handler(df, DEVFREQ_GOV_START, NULL); 1164 ret = df->governor->event_handler(df, DEVFREQ_GOV_START, NULL);
1148 if (ret) 1165 if (ret) {
1149 dev_warn(dev, "%s: Governor %s not started(%d)\n", 1166 dev_warn(dev, "%s: Governor %s not started(%d)\n",
1150 __func__, df->governor->name, ret); 1167 __func__, df->governor->name, ret);
1168 df->governor = prev_governor;
1169 strncpy(df->governor_name, prev_governor->name,
1170 DEVFREQ_NAME_LEN);
1171 ret = df->governor->event_handler(df, DEVFREQ_GOV_START, NULL);
1172 if (ret) {
1173 dev_err(dev,
1174 "%s: reverting to Governor %s failed (%d)\n",
1175 __func__, df->governor_name, ret);
1176 df->governor = NULL;
1177 }
1178 }
1151out: 1179out:
1152 mutex_unlock(&devfreq_list_lock); 1180 mutex_unlock(&devfreq_list_lock);
1153 1181
@@ -1172,7 +1200,7 @@ static ssize_t available_governors_show(struct device *d,
1172 */ 1200 */
1173 if (df->governor->immutable) { 1201 if (df->governor->immutable) {
1174 count = scnprintf(&buf[count], DEVFREQ_NAME_LEN, 1202 count = scnprintf(&buf[count], DEVFREQ_NAME_LEN,
1175 "%s ", df->governor_name); 1203 "%s ", df->governor_name);
1176 /* 1204 /*
1177 * The devfreq device shows the registered governor except for 1205 * The devfreq device shows the registered governor except for
1178 * immutable governors such as passive governor . 1206 * immutable governors such as passive governor .
@@ -1485,8 +1513,8 @@ EXPORT_SYMBOL(devfreq_recommended_opp);
1485 1513
1486/** 1514/**
1487 * devfreq_register_opp_notifier() - Helper function to get devfreq notified 1515 * devfreq_register_opp_notifier() - Helper function to get devfreq notified
1488 * for any changes in the OPP availability 1516 * for any changes in the OPP availability
1489 * changes 1517 * changes
1490 * @dev: The devfreq user device. (parent of devfreq) 1518 * @dev: The devfreq user device. (parent of devfreq)
1491 * @devfreq: The devfreq object. 1519 * @devfreq: The devfreq object.
1492 */ 1520 */
@@ -1498,8 +1526,8 @@ EXPORT_SYMBOL(devfreq_register_opp_notifier);
1498 1526
1499/** 1527/**
1500 * devfreq_unregister_opp_notifier() - Helper function to stop getting devfreq 1528 * devfreq_unregister_opp_notifier() - Helper function to stop getting devfreq
1501 * notified for any changes in the OPP 1529 * notified for any changes in the OPP
1502 * availability changes anymore. 1530 * availability changes anymore.
1503 * @dev: The devfreq user device. (parent of devfreq) 1531 * @dev: The devfreq user device. (parent of devfreq)
1504 * @devfreq: The devfreq object. 1532 * @devfreq: The devfreq object.
1505 * 1533 *
@@ -1518,8 +1546,8 @@ static void devm_devfreq_opp_release(struct device *dev, void *res)
1518} 1546}
1519 1547
1520/** 1548/**
1521 * devm_ devfreq_register_opp_notifier() 1549 * devm_devfreq_register_opp_notifier() - Resource-managed
1522 * - Resource-managed devfreq_register_opp_notifier() 1550 * devfreq_register_opp_notifier()
1523 * @dev: The devfreq user device. (parent of devfreq) 1551 * @dev: The devfreq user device. (parent of devfreq)
1524 * @devfreq: The devfreq object. 1552 * @devfreq: The devfreq object.
1525 */ 1553 */
@@ -1547,8 +1575,8 @@ int devm_devfreq_register_opp_notifier(struct device *dev,
1547EXPORT_SYMBOL(devm_devfreq_register_opp_notifier); 1575EXPORT_SYMBOL(devm_devfreq_register_opp_notifier);
1548 1576
1549/** 1577/**
1550 * devm_devfreq_unregister_opp_notifier() 1578 * devm_devfreq_unregister_opp_notifier() - Resource-managed
1551 * - Resource-managed devfreq_unregister_opp_notifier() 1579 * devfreq_unregister_opp_notifier()
1552 * @dev: The devfreq user device. (parent of devfreq) 1580 * @dev: The devfreq user device. (parent of devfreq)
1553 * @devfreq: The devfreq object. 1581 * @devfreq: The devfreq object.
1554 */ 1582 */
@@ -1567,8 +1595,8 @@ EXPORT_SYMBOL(devm_devfreq_unregister_opp_notifier);
1567 * @list: DEVFREQ_TRANSITION_NOTIFIER. 1595 * @list: DEVFREQ_TRANSITION_NOTIFIER.
1568 */ 1596 */
1569int devfreq_register_notifier(struct devfreq *devfreq, 1597int devfreq_register_notifier(struct devfreq *devfreq,
1570 struct notifier_block *nb, 1598 struct notifier_block *nb,
1571 unsigned int list) 1599 unsigned int list)
1572{ 1600{
1573 int ret = 0; 1601 int ret = 0;
1574 1602
@@ -1674,9 +1702,9 @@ EXPORT_SYMBOL(devm_devfreq_register_notifier);
1674 * @list: DEVFREQ_TRANSITION_NOTIFIER. 1702 * @list: DEVFREQ_TRANSITION_NOTIFIER.
1675 */ 1703 */
1676void devm_devfreq_unregister_notifier(struct device *dev, 1704void devm_devfreq_unregister_notifier(struct device *dev,
1677 struct devfreq *devfreq, 1705 struct devfreq *devfreq,
1678 struct notifier_block *nb, 1706 struct notifier_block *nb,
1679 unsigned int list) 1707 unsigned int list)
1680{ 1708{
1681 WARN_ON(devres_release(dev, devm_devfreq_notifier_release, 1709 WARN_ON(devres_release(dev, devm_devfreq_notifier_release,
1682 devm_devfreq_dev_match, devfreq)); 1710 devm_devfreq_dev_match, devfreq));
diff --git a/drivers/devfreq/event/exynos-ppmu.c b/drivers/devfreq/event/exynos-ppmu.c
index c61de0bdf053..c2ea94957501 100644
--- a/drivers/devfreq/event/exynos-ppmu.c
+++ b/drivers/devfreq/event/exynos-ppmu.c
@@ -529,7 +529,7 @@ static int of_get_devfreq_events(struct device_node *np,
529 if (!ppmu_events[i].name) 529 if (!ppmu_events[i].name)
530 continue; 530 continue;
531 531
532 if (!of_node_cmp(node->name, ppmu_events[i].name)) 532 if (of_node_name_eq(node, ppmu_events[i].name))
533 break; 533 break;
534 } 534 }
535 535
diff --git a/drivers/devfreq/event/rockchip-dfi.c b/drivers/devfreq/event/rockchip-dfi.c
index 22b113363ffc..a436ec4901bb 100644
--- a/drivers/devfreq/event/rockchip-dfi.c
+++ b/drivers/devfreq/event/rockchip-dfi.c
@@ -26,6 +26,8 @@
26#include <linux/list.h> 26#include <linux/list.h>
27#include <linux/of.h> 27#include <linux/of.h>
28 28
29#include <soc/rockchip/rk3399_grf.h>
30
29#define RK3399_DMC_NUM_CH 2 31#define RK3399_DMC_NUM_CH 2
30 32
31/* DDRMON_CTRL */ 33/* DDRMON_CTRL */
@@ -43,18 +45,6 @@
43#define DDRMON_CH1_COUNT_NUM 0x3c 45#define DDRMON_CH1_COUNT_NUM 0x3c
44#define DDRMON_CH1_DFI_ACCESS_NUM 0x40 46#define DDRMON_CH1_DFI_ACCESS_NUM 0x40
45 47
46/* pmu grf */
47#define PMUGRF_OS_REG2 0x308
48#define DDRTYPE_SHIFT 13
49#define DDRTYPE_MASK 7
50
51enum {
52 DDR3 = 3,
53 LPDDR3 = 6,
54 LPDDR4 = 7,
55 UNUSED = 0xFF
56};
57
58struct dmc_usage { 48struct dmc_usage {
59 u32 access; 49 u32 access;
60 u32 total; 50 u32 total;
@@ -83,16 +73,17 @@ static void rockchip_dfi_start_hardware_counter(struct devfreq_event_dev *edev)
83 u32 ddr_type; 73 u32 ddr_type;
84 74
85 /* get ddr type */ 75 /* get ddr type */
86 regmap_read(info->regmap_pmu, PMUGRF_OS_REG2, &val); 76 regmap_read(info->regmap_pmu, RK3399_PMUGRF_OS_REG2, &val);
87 ddr_type = (val >> DDRTYPE_SHIFT) & DDRTYPE_MASK; 77 ddr_type = (val >> RK3399_PMUGRF_DDRTYPE_SHIFT) &
78 RK3399_PMUGRF_DDRTYPE_MASK;
88 79
89 /* clear DDRMON_CTRL setting */ 80 /* clear DDRMON_CTRL setting */
90 writel_relaxed(CLR_DDRMON_CTRL, dfi_regs + DDRMON_CTRL); 81 writel_relaxed(CLR_DDRMON_CTRL, dfi_regs + DDRMON_CTRL);
91 82
92 /* set ddr type to dfi */ 83 /* set ddr type to dfi */
93 if (ddr_type == LPDDR3) 84 if (ddr_type == RK3399_PMUGRF_DDRTYPE_LPDDR3)
94 writel_relaxed(LPDDR3_EN, dfi_regs + DDRMON_CTRL); 85 writel_relaxed(LPDDR3_EN, dfi_regs + DDRMON_CTRL);
95 else if (ddr_type == LPDDR4) 86 else if (ddr_type == RK3399_PMUGRF_DDRTYPE_LPDDR4)
96 writel_relaxed(LPDDR4_EN, dfi_regs + DDRMON_CTRL); 87 writel_relaxed(LPDDR4_EN, dfi_regs + DDRMON_CTRL);
97 88
98 /* enable count, use software mode */ 89 /* enable count, use software mode */
@@ -211,7 +202,7 @@ static int rockchip_dfi_probe(struct platform_device *pdev)
211 if (IS_ERR(data->clk)) { 202 if (IS_ERR(data->clk)) {
212 dev_err(dev, "Cannot get the clk dmc_clk\n"); 203 dev_err(dev, "Cannot get the clk dmc_clk\n");
213 return PTR_ERR(data->clk); 204 return PTR_ERR(data->clk);
214 }; 205 }
215 206
216 /* try to find the optional reference to the pmu syscon */ 207 /* try to find the optional reference to the pmu syscon */
217 node = of_parse_phandle(np, "rockchip,pmu", 0); 208 node = of_parse_phandle(np, "rockchip,pmu", 0);
diff --git a/drivers/devfreq/exynos-bus.c b/drivers/devfreq/exynos-bus.c
index c25658b26598..486cc5b422f1 100644
--- a/drivers/devfreq/exynos-bus.c
+++ b/drivers/devfreq/exynos-bus.c
@@ -514,6 +514,13 @@ err:
514 return ret; 514 return ret;
515} 515}
516 516
517static void exynos_bus_shutdown(struct platform_device *pdev)
518{
519 struct exynos_bus *bus = dev_get_drvdata(&pdev->dev);
520
521 devfreq_suspend_device(bus->devfreq);
522}
523
517#ifdef CONFIG_PM_SLEEP 524#ifdef CONFIG_PM_SLEEP
518static int exynos_bus_resume(struct device *dev) 525static int exynos_bus_resume(struct device *dev)
519{ 526{
@@ -556,6 +563,7 @@ MODULE_DEVICE_TABLE(of, exynos_bus_of_match);
556 563
557static struct platform_driver exynos_bus_platdrv = { 564static struct platform_driver exynos_bus_platdrv = {
558 .probe = exynos_bus_probe, 565 .probe = exynos_bus_probe,
566 .shutdown = exynos_bus_shutdown,
559 .driver = { 567 .driver = {
560 .name = "exynos-bus", 568 .name = "exynos-bus",
561 .pm = &exynos_bus_pm, 569 .pm = &exynos_bus_pm,
diff --git a/drivers/devfreq/rk3399_dmc.c b/drivers/devfreq/rk3399_dmc.c
index e795ad2b3f6b..567c034d0301 100644
--- a/drivers/devfreq/rk3399_dmc.c
+++ b/drivers/devfreq/rk3399_dmc.c
@@ -18,14 +18,17 @@
18#include <linux/devfreq.h> 18#include <linux/devfreq.h>
19#include <linux/devfreq-event.h> 19#include <linux/devfreq-event.h>
20#include <linux/interrupt.h> 20#include <linux/interrupt.h>
21#include <linux/mfd/syscon.h>
21#include <linux/module.h> 22#include <linux/module.h>
22#include <linux/of.h> 23#include <linux/of.h>
23#include <linux/platform_device.h> 24#include <linux/platform_device.h>
24#include <linux/pm_opp.h> 25#include <linux/pm_opp.h>
26#include <linux/regmap.h>
25#include <linux/regulator/consumer.h> 27#include <linux/regulator/consumer.h>
26#include <linux/rwsem.h> 28#include <linux/rwsem.h>
27#include <linux/suspend.h> 29#include <linux/suspend.h>
28 30
31#include <soc/rockchip/rk3399_grf.h>
29#include <soc/rockchip/rockchip_sip.h> 32#include <soc/rockchip/rockchip_sip.h>
30 33
31struct dram_timing { 34struct dram_timing {
@@ -69,8 +72,11 @@ struct rk3399_dmcfreq {
69 struct mutex lock; 72 struct mutex lock;
70 struct dram_timing timing; 73 struct dram_timing timing;
71 struct regulator *vdd_center; 74 struct regulator *vdd_center;
75 struct regmap *regmap_pmu;
72 unsigned long rate, target_rate; 76 unsigned long rate, target_rate;
73 unsigned long volt, target_volt; 77 unsigned long volt, target_volt;
78 unsigned int odt_dis_freq;
79 int odt_pd_arg0, odt_pd_arg1;
74}; 80};
75 81
76static int rk3399_dmcfreq_target(struct device *dev, unsigned long *freq, 82static int rk3399_dmcfreq_target(struct device *dev, unsigned long *freq,
@@ -80,6 +86,8 @@ static int rk3399_dmcfreq_target(struct device *dev, unsigned long *freq,
80 struct dev_pm_opp *opp; 86 struct dev_pm_opp *opp;
81 unsigned long old_clk_rate = dmcfreq->rate; 87 unsigned long old_clk_rate = dmcfreq->rate;
82 unsigned long target_volt, target_rate; 88 unsigned long target_volt, target_rate;
89 struct arm_smccc_res res;
90 bool odt_enable = false;
83 int err; 91 int err;
84 92
85 opp = devfreq_recommended_opp(dev, freq, flags); 93 opp = devfreq_recommended_opp(dev, freq, flags);
@@ -95,6 +103,19 @@ static int rk3399_dmcfreq_target(struct device *dev, unsigned long *freq,
95 103
96 mutex_lock(&dmcfreq->lock); 104 mutex_lock(&dmcfreq->lock);
97 105
106 if (target_rate >= dmcfreq->odt_dis_freq)
107 odt_enable = true;
108
109 /*
110 * This makes a SMC call to the TF-A to set the DDR PD (power-down)
111 * timings and to enable or disable the ODT (on-die termination)
112 * resistors.
113 */
114 arm_smccc_smc(ROCKCHIP_SIP_DRAM_FREQ, dmcfreq->odt_pd_arg0,
115 dmcfreq->odt_pd_arg1,
116 ROCKCHIP_SIP_CONFIG_DRAM_SET_ODT_PD,
117 odt_enable, 0, 0, 0, &res);
118
98 /* 119 /*
99 * If frequency scaling from low to high, adjust voltage first. 120 * If frequency scaling from low to high, adjust voltage first.
100 * If frequency scaling from high to low, adjust frequency first. 121 * If frequency scaling from high to low, adjust frequency first.
@@ -294,11 +315,13 @@ static int rk3399_dmcfreq_probe(struct platform_device *pdev)
294{ 315{
295 struct arm_smccc_res res; 316 struct arm_smccc_res res;
296 struct device *dev = &pdev->dev; 317 struct device *dev = &pdev->dev;
297 struct device_node *np = pdev->dev.of_node; 318 struct device_node *np = pdev->dev.of_node, *node;
298 struct rk3399_dmcfreq *data; 319 struct rk3399_dmcfreq *data;
299 int ret, index, size; 320 int ret, index, size;
300 uint32_t *timing; 321 uint32_t *timing;
301 struct dev_pm_opp *opp; 322 struct dev_pm_opp *opp;
323 u32 ddr_type;
324 u32 val;
302 325
303 data = devm_kzalloc(dev, sizeof(struct rk3399_dmcfreq), GFP_KERNEL); 326 data = devm_kzalloc(dev, sizeof(struct rk3399_dmcfreq), GFP_KERNEL);
304 if (!data) 327 if (!data)
@@ -322,7 +345,7 @@ static int rk3399_dmcfreq_probe(struct platform_device *pdev)
322 345
323 dev_err(dev, "Cannot get the clk dmc_clk\n"); 346 dev_err(dev, "Cannot get the clk dmc_clk\n");
324 return PTR_ERR(data->dmc_clk); 347 return PTR_ERR(data->dmc_clk);
325 }; 348 }
326 349
327 data->edev = devfreq_event_get_edev_by_phandle(dev, 0); 350 data->edev = devfreq_event_get_edev_by_phandle(dev, 0);
328 if (IS_ERR(data->edev)) 351 if (IS_ERR(data->edev))
@@ -354,11 +377,57 @@ static int rk3399_dmcfreq_probe(struct platform_device *pdev)
354 } 377 }
355 } 378 }
356 379
380 node = of_parse_phandle(np, "rockchip,pmu", 0);
381 if (node) {
382 data->regmap_pmu = syscon_node_to_regmap(node);
383 if (IS_ERR(data->regmap_pmu))
384 return PTR_ERR(data->regmap_pmu);
385 }
386
387 regmap_read(data->regmap_pmu, RK3399_PMUGRF_OS_REG2, &val);
388 ddr_type = (val >> RK3399_PMUGRF_DDRTYPE_SHIFT) &
389 RK3399_PMUGRF_DDRTYPE_MASK;
390
391 switch (ddr_type) {
392 case RK3399_PMUGRF_DDRTYPE_DDR3:
393 data->odt_dis_freq = data->timing.ddr3_odt_dis_freq;
394 break;
395 case RK3399_PMUGRF_DDRTYPE_LPDDR3:
396 data->odt_dis_freq = data->timing.lpddr3_odt_dis_freq;
397 break;
398 case RK3399_PMUGRF_DDRTYPE_LPDDR4:
399 data->odt_dis_freq = data->timing.lpddr4_odt_dis_freq;
400 break;
401 default:
402 return -EINVAL;
403 };
404
357 arm_smccc_smc(ROCKCHIP_SIP_DRAM_FREQ, 0, 0, 405 arm_smccc_smc(ROCKCHIP_SIP_DRAM_FREQ, 0, 0,
358 ROCKCHIP_SIP_CONFIG_DRAM_INIT, 406 ROCKCHIP_SIP_CONFIG_DRAM_INIT,
359 0, 0, 0, 0, &res); 407 0, 0, 0, 0, &res);
360 408
361 /* 409 /*
410 * In TF-A there is a platform SIP call to set the PD (power-down)
411 * timings and to enable or disable the ODT (on-die termination).
412 * This call needs three arguments as follows:
413 *
414 * arg0:
415 * bit[0-7] : sr_idle
416 * bit[8-15] : sr_mc_gate_idle
417 * bit[16-31] : standby idle
418 * arg1:
419 * bit[0-11] : pd_idle
420 * bit[16-27] : srpd_lite_idle
421 * arg2:
422 * bit[0] : odt enable
423 */
424 data->odt_pd_arg0 = (data->timing.sr_idle & 0xff) |
425 ((data->timing.sr_mc_gate_idle & 0xff) << 8) |
426 ((data->timing.standby_idle & 0xffff) << 16);
427 data->odt_pd_arg1 = (data->timing.pd_idle & 0xfff) |
428 ((data->timing.srpd_lite_idle & 0xfff) << 16);
429
430 /*
362 * We add a devfreq driver to our parent since it has a device tree node 431 * We add a devfreq driver to our parent since it has a device tree node
363 * with operating points. 432 * with operating points.
364 */ 433 */
diff --git a/drivers/devfreq/tegra-devfreq.c b/drivers/devfreq/tegra-devfreq.c
index c59d2eee5d30..c89ba7b834ff 100644
--- a/drivers/devfreq/tegra-devfreq.c
+++ b/drivers/devfreq/tegra-devfreq.c
@@ -573,10 +573,7 @@ static int tegra_governor_get_target(struct devfreq *devfreq,
573static int tegra_governor_event_handler(struct devfreq *devfreq, 573static int tegra_governor_event_handler(struct devfreq *devfreq,
574 unsigned int event, void *data) 574 unsigned int event, void *data)
575{ 575{
576 struct tegra_devfreq *tegra; 576 struct tegra_devfreq *tegra = dev_get_drvdata(devfreq->dev.parent);
577 int ret = 0;
578
579 tegra = dev_get_drvdata(devfreq->dev.parent);
580 577
581 switch (event) { 578 switch (event) {
582 case DEVFREQ_GOV_START: 579 case DEVFREQ_GOV_START:
@@ -600,7 +597,7 @@ static int tegra_governor_event_handler(struct devfreq *devfreq,
600 break; 597 break;
601 } 598 }
602 599
603 return ret; 600 return 0;
604} 601}
605 602
606static struct devfreq_governor tegra_devfreq_governor = { 603static struct devfreq_governor tegra_devfreq_governor = {
diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig
index cac16c4b0df3..7b655f6156fb 100644
--- a/drivers/firmware/Kconfig
+++ b/drivers/firmware/Kconfig
@@ -5,20 +5,6 @@
5 5
6menu "Firmware Drivers" 6menu "Firmware Drivers"
7 7
8config ARM_PSCI_FW
9 bool
10
11config ARM_PSCI_CHECKER
12 bool "ARM PSCI checker"
13 depends on ARM_PSCI_FW && HOTPLUG_CPU && CPU_IDLE && !TORTURE_TEST
14 help
15 Run the PSCI checker during startup. This checks that hotplug and
16 suspend operations work correctly when using PSCI.
17
18 The torture tests may interfere with the PSCI checker by turning CPUs
19 on and off through hotplug, so for now torture tests and PSCI checker
20 are mutually exclusive.
21
22config ARM_SCMI_PROTOCOL 8config ARM_SCMI_PROTOCOL
23 bool "ARM System Control and Management Interface (SCMI) Message Protocol" 9 bool "ARM System Control and Management Interface (SCMI) Message Protocol"
24 depends on ARM || ARM64 || COMPILE_TEST 10 depends on ARM || ARM64 || COMPILE_TEST
@@ -270,6 +256,7 @@ config TI_SCI_PROTOCOL
270config HAVE_ARM_SMCCC 256config HAVE_ARM_SMCCC
271 bool 257 bool
272 258
259source "drivers/firmware/psci/Kconfig"
273source "drivers/firmware/broadcom/Kconfig" 260source "drivers/firmware/broadcom/Kconfig"
274source "drivers/firmware/google/Kconfig" 261source "drivers/firmware/google/Kconfig"
275source "drivers/firmware/efi/Kconfig" 262source "drivers/firmware/efi/Kconfig"
diff --git a/drivers/firmware/Makefile b/drivers/firmware/Makefile
index 80feb635120f..9a3909a22682 100644
--- a/drivers/firmware/Makefile
+++ b/drivers/firmware/Makefile
@@ -2,8 +2,6 @@
2# 2#
3# Makefile for the linux kernel. 3# Makefile for the linux kernel.
4# 4#
5obj-$(CONFIG_ARM_PSCI_FW) += psci.o
6obj-$(CONFIG_ARM_PSCI_CHECKER) += psci_checker.o
7obj-$(CONFIG_ARM_SCPI_PROTOCOL) += arm_scpi.o 5obj-$(CONFIG_ARM_SCPI_PROTOCOL) += arm_scpi.o
8obj-$(CONFIG_ARM_SCPI_POWER_DOMAIN) += scpi_pm_domain.o 6obj-$(CONFIG_ARM_SCPI_POWER_DOMAIN) += scpi_pm_domain.o
9obj-$(CONFIG_ARM_SDE_INTERFACE) += arm_sdei.o 7obj-$(CONFIG_ARM_SDE_INTERFACE) += arm_sdei.o
@@ -25,6 +23,7 @@ CFLAGS_qcom_scm-32.o :=$(call as-instr,.arch armv7-a\n.arch_extension sec,-DREQU
25obj-$(CONFIG_TI_SCI_PROTOCOL) += ti_sci.o 23obj-$(CONFIG_TI_SCI_PROTOCOL) += ti_sci.o
26 24
27obj-$(CONFIG_ARM_SCMI_PROTOCOL) += arm_scmi/ 25obj-$(CONFIG_ARM_SCMI_PROTOCOL) += arm_scmi/
26obj-y += psci/
28obj-y += broadcom/ 27obj-y += broadcom/
29obj-y += meson/ 28obj-y += meson/
30obj-$(CONFIG_GOOGLE_FIRMWARE) += google/ 29obj-$(CONFIG_GOOGLE_FIRMWARE) += google/
diff --git a/drivers/firmware/psci/Kconfig b/drivers/firmware/psci/Kconfig
new file mode 100644
index 000000000000..26a3b32bf7ab
--- /dev/null
+++ b/drivers/firmware/psci/Kconfig
@@ -0,0 +1,13 @@
1config ARM_PSCI_FW
2 bool
3
4config ARM_PSCI_CHECKER
5 bool "ARM PSCI checker"
6 depends on ARM_PSCI_FW && HOTPLUG_CPU && CPU_IDLE && !TORTURE_TEST
7 help
8 Run the PSCI checker during startup. This checks that hotplug and
9 suspend operations work correctly when using PSCI.
10
11 The torture tests may interfere with the PSCI checker by turning CPUs
12 on and off through hotplug, so for now torture tests and PSCI checker
13 are mutually exclusive.
diff --git a/drivers/firmware/psci/Makefile b/drivers/firmware/psci/Makefile
new file mode 100644
index 000000000000..1956b882470f
--- /dev/null
+++ b/drivers/firmware/psci/Makefile
@@ -0,0 +1,4 @@
1# SPDX-License-Identifier: GPL-2.0
2#
3obj-$(CONFIG_ARM_PSCI_FW) += psci.o
4obj-$(CONFIG_ARM_PSCI_CHECKER) += psci_checker.o
diff --git a/drivers/firmware/psci.c b/drivers/firmware/psci/psci.c
index c80ec1d03274..fe090ef43d28 100644
--- a/drivers/firmware/psci.c
+++ b/drivers/firmware/psci/psci.c
@@ -88,6 +88,7 @@ static u32 psci_function_id[PSCI_FN_MAX];
88 PSCI_1_0_EXT_POWER_STATE_TYPE_MASK) 88 PSCI_1_0_EXT_POWER_STATE_TYPE_MASK)
89 89
90static u32 psci_cpu_suspend_feature; 90static u32 psci_cpu_suspend_feature;
91static bool psci_system_reset2_supported;
91 92
92static inline bool psci_has_ext_power_state(void) 93static inline bool psci_has_ext_power_state(void)
93{ 94{
@@ -95,6 +96,11 @@ static inline bool psci_has_ext_power_state(void)
95 PSCI_1_0_FEATURES_CPU_SUSPEND_PF_MASK; 96 PSCI_1_0_FEATURES_CPU_SUSPEND_PF_MASK;
96} 97}
97 98
99static inline bool psci_has_osi_support(void)
100{
101 return psci_cpu_suspend_feature & PSCI_1_0_OS_INITIATED;
102}
103
98static inline bool psci_power_state_loses_context(u32 state) 104static inline bool psci_power_state_loses_context(u32 state)
99{ 105{
100 const u32 mask = psci_has_ext_power_state() ? 106 const u32 mask = psci_has_ext_power_state() ?
@@ -253,7 +259,17 @@ static int get_set_conduit_method(struct device_node *np)
253 259
254static void psci_sys_reset(enum reboot_mode reboot_mode, const char *cmd) 260static void psci_sys_reset(enum reboot_mode reboot_mode, const char *cmd)
255{ 261{
256 invoke_psci_fn(PSCI_0_2_FN_SYSTEM_RESET, 0, 0, 0); 262 if ((reboot_mode == REBOOT_WARM || reboot_mode == REBOOT_SOFT) &&
263 psci_system_reset2_supported) {
264 /*
265 * reset_type[31] = 0 (architectural)
266 * reset_type[30:0] = 0 (SYSTEM_WARM_RESET)
267 * cookie = 0 (ignored by the implementation)
268 */
269 invoke_psci_fn(PSCI_FN_NATIVE(1_1, SYSTEM_RESET2), 0, 0, 0);
270 } else {
271 invoke_psci_fn(PSCI_0_2_FN_SYSTEM_RESET, 0, 0, 0);
272 }
257} 273}
258 274
259static void psci_sys_poweroff(void) 275static void psci_sys_poweroff(void)
@@ -270,9 +286,26 @@ static int __init psci_features(u32 psci_func_id)
270#ifdef CONFIG_CPU_IDLE 286#ifdef CONFIG_CPU_IDLE
271static DEFINE_PER_CPU_READ_MOSTLY(u32 *, psci_power_state); 287static DEFINE_PER_CPU_READ_MOSTLY(u32 *, psci_power_state);
272 288
289static int psci_dt_parse_state_node(struct device_node *np, u32 *state)
290{
291 int err = of_property_read_u32(np, "arm,psci-suspend-param", state);
292
293 if (err) {
294 pr_warn("%pOF missing arm,psci-suspend-param property\n", np);
295 return err;
296 }
297
298 if (!psci_power_state_is_valid(*state)) {
299 pr_warn("Invalid PSCI power state %#x\n", *state);
300 return -EINVAL;
301 }
302
303 return 0;
304}
305
273static int psci_dt_cpu_init_idle(struct device_node *cpu_node, int cpu) 306static int psci_dt_cpu_init_idle(struct device_node *cpu_node, int cpu)
274{ 307{
275 int i, ret, count = 0; 308 int i, ret = 0, count = 0;
276 u32 *psci_states; 309 u32 *psci_states;
277 struct device_node *state_node; 310 struct device_node *state_node;
278 311
@@ -291,29 +324,16 @@ static int psci_dt_cpu_init_idle(struct device_node *cpu_node, int cpu)
291 return -ENOMEM; 324 return -ENOMEM;
292 325
293 for (i = 0; i < count; i++) { 326 for (i = 0; i < count; i++) {
294 u32 state;
295
296 state_node = of_parse_phandle(cpu_node, "cpu-idle-states", i); 327 state_node = of_parse_phandle(cpu_node, "cpu-idle-states", i);
328 ret = psci_dt_parse_state_node(state_node, &psci_states[i]);
329 of_node_put(state_node);
297 330
298 ret = of_property_read_u32(state_node, 331 if (ret)
299 "arm,psci-suspend-param",
300 &state);
301 if (ret) {
302 pr_warn(" * %pOF missing arm,psci-suspend-param property\n",
303 state_node);
304 of_node_put(state_node);
305 goto free_mem; 332 goto free_mem;
306 }
307 333
308 of_node_put(state_node); 334 pr_debug("psci-power-state %#x index %d\n", psci_states[i], i);
309 pr_debug("psci-power-state %#x index %d\n", state, i);
310 if (!psci_power_state_is_valid(state)) {
311 pr_warn("Invalid PSCI power state %#x\n", state);
312 ret = -EINVAL;
313 goto free_mem;
314 }
315 psci_states[i] = state;
316 } 335 }
336
317 /* Idle states parsed correctly, initialize per-cpu pointer */ 337 /* Idle states parsed correctly, initialize per-cpu pointer */
318 per_cpu(psci_power_state, cpu) = psci_states; 338 per_cpu(psci_power_state, cpu) = psci_states;
319 return 0; 339 return 0;
@@ -451,6 +471,16 @@ static const struct platform_suspend_ops psci_suspend_ops = {
451 .enter = psci_system_suspend_enter, 471 .enter = psci_system_suspend_enter,
452}; 472};
453 473
474static void __init psci_init_system_reset2(void)
475{
476 int ret;
477
478 ret = psci_features(PSCI_FN_NATIVE(1_1, SYSTEM_RESET2));
479
480 if (ret != PSCI_RET_NOT_SUPPORTED)
481 psci_system_reset2_supported = true;
482}
483
454static void __init psci_init_system_suspend(void) 484static void __init psci_init_system_suspend(void)
455{ 485{
456 int ret; 486 int ret;
@@ -588,6 +618,7 @@ static int __init psci_probe(void)
588 psci_init_smccc(); 618 psci_init_smccc();
589 psci_init_cpu_suspend(); 619 psci_init_cpu_suspend();
590 psci_init_system_suspend(); 620 psci_init_system_suspend();
621 psci_init_system_reset2();
591 } 622 }
592 623
593 return 0; 624 return 0;
@@ -605,9 +636,9 @@ static int __init psci_0_2_init(struct device_node *np)
605 int err; 636 int err;
606 637
607 err = get_set_conduit_method(np); 638 err = get_set_conduit_method(np);
608
609 if (err) 639 if (err)
610 goto out_put_node; 640 return err;
641
611 /* 642 /*
612 * Starting with v0.2, the PSCI specification introduced a call 643 * Starting with v0.2, the PSCI specification introduced a call
613 * (PSCI_VERSION) that allows probing the firmware version, so 644 * (PSCI_VERSION) that allows probing the firmware version, so
@@ -615,11 +646,7 @@ static int __init psci_0_2_init(struct device_node *np)
615 * can be carried out according to the specific version reported 646 * can be carried out according to the specific version reported
616 * by firmware 647 * by firmware
617 */ 648 */
618 err = psci_probe(); 649 return psci_probe();
619
620out_put_node:
621 of_node_put(np);
622 return err;
623} 650}
624 651
625/* 652/*
@@ -631,9 +658,8 @@ static int __init psci_0_1_init(struct device_node *np)
631 int err; 658 int err;
632 659
633 err = get_set_conduit_method(np); 660 err = get_set_conduit_method(np);
634
635 if (err) 661 if (err)
636 goto out_put_node; 662 return err;
637 663
638 pr_info("Using PSCI v0.1 Function IDs from DT\n"); 664 pr_info("Using PSCI v0.1 Function IDs from DT\n");
639 665
@@ -657,15 +683,27 @@ static int __init psci_0_1_init(struct device_node *np)
657 psci_ops.migrate = psci_migrate; 683 psci_ops.migrate = psci_migrate;
658 } 684 }
659 685
660out_put_node: 686 return 0;
661 of_node_put(np); 687}
662 return err; 688
689static int __init psci_1_0_init(struct device_node *np)
690{
691 int err;
692
693 err = psci_0_2_init(np);
694 if (err)
695 return err;
696
697 if (psci_has_osi_support())
698 pr_info("OSI mode supported.\n");
699
700 return 0;
663} 701}
664 702
665static const struct of_device_id psci_of_match[] __initconst = { 703static const struct of_device_id psci_of_match[] __initconst = {
666 { .compatible = "arm,psci", .data = psci_0_1_init}, 704 { .compatible = "arm,psci", .data = psci_0_1_init},
667 { .compatible = "arm,psci-0.2", .data = psci_0_2_init}, 705 { .compatible = "arm,psci-0.2", .data = psci_0_2_init},
668 { .compatible = "arm,psci-1.0", .data = psci_0_2_init}, 706 { .compatible = "arm,psci-1.0", .data = psci_1_0_init},
669 {}, 707 {},
670}; 708};
671 709
@@ -674,6 +712,7 @@ int __init psci_dt_init(void)
674 struct device_node *np; 712 struct device_node *np;
675 const struct of_device_id *matched_np; 713 const struct of_device_id *matched_np;
676 psci_initcall_t init_fn; 714 psci_initcall_t init_fn;
715 int ret;
677 716
678 np = of_find_matching_node_and_match(NULL, psci_of_match, &matched_np); 717 np = of_find_matching_node_and_match(NULL, psci_of_match, &matched_np);
679 718
@@ -681,7 +720,10 @@ int __init psci_dt_init(void)
681 return -ENODEV; 720 return -ENODEV;
682 721
683 init_fn = (psci_initcall_t)matched_np->data; 722 init_fn = (psci_initcall_t)matched_np->data;
684 return init_fn(np); 723 ret = init_fn(np);
724
725 of_node_put(np);
726 return ret;
685} 727}
686 728
687#ifdef CONFIG_ACPI 729#ifdef CONFIG_ACPI
diff --git a/drivers/firmware/psci_checker.c b/drivers/firmware/psci/psci_checker.c
index 346943657962..346943657962 100644
--- a/drivers/firmware/psci_checker.c
+++ b/drivers/firmware/psci/psci_checker.c
diff --git a/drivers/opp/core.c b/drivers/opp/core.c
index 0420f7e8ad5b..0e7703fe733f 100644
--- a/drivers/opp/core.c
+++ b/drivers/opp/core.c
@@ -526,6 +526,60 @@ struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
526} 526}
527EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_floor); 527EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_floor);
528 528
529/**
530 * dev_pm_opp_find_freq_ceil_by_volt() - Find OPP with highest frequency for
531 * target voltage.
532 * @dev: Device for which we do this operation.
533 * @u_volt: Target voltage.
534 *
535 * Search for OPP with highest (ceil) frequency and has voltage <= u_volt.
536 *
537 * Return: matching *opp, else returns ERR_PTR in case of error which should be
538 * handled using IS_ERR.
539 *
540 * Error return values can be:
541 * EINVAL: bad parameters
542 *
543 * The callers are required to call dev_pm_opp_put() for the returned OPP after
544 * use.
545 */
546struct dev_pm_opp *dev_pm_opp_find_freq_ceil_by_volt(struct device *dev,
547 unsigned long u_volt)
548{
549 struct opp_table *opp_table;
550 struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
551
552 if (!dev || !u_volt) {
553 dev_err(dev, "%s: Invalid argument volt=%lu\n", __func__,
554 u_volt);
555 return ERR_PTR(-EINVAL);
556 }
557
558 opp_table = _find_opp_table(dev);
559 if (IS_ERR(opp_table))
560 return ERR_CAST(opp_table);
561
562 mutex_lock(&opp_table->lock);
563
564 list_for_each_entry(temp_opp, &opp_table->opp_list, node) {
565 if (temp_opp->available) {
566 if (temp_opp->supplies[0].u_volt > u_volt)
567 break;
568 opp = temp_opp;
569 }
570 }
571
572 /* Increment the reference count of OPP */
573 if (!IS_ERR(opp))
574 dev_pm_opp_get(opp);
575
576 mutex_unlock(&opp_table->lock);
577 dev_pm_opp_put_opp_table(opp_table);
578
579 return opp;
580}
581EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_ceil_by_volt);
582
529static int _set_opp_voltage(struct device *dev, struct regulator *reg, 583static int _set_opp_voltage(struct device *dev, struct regulator *reg,
530 struct dev_pm_opp_supply *supply) 584 struct dev_pm_opp_supply *supply)
531{ 585{
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index b160e98076e3..684caf067003 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -178,6 +178,11 @@ static inline struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
178static inline void cpufreq_cpu_put(struct cpufreq_policy *policy) { } 178static inline void cpufreq_cpu_put(struct cpufreq_policy *policy) { }
179#endif 179#endif
180 180
181static inline bool policy_is_inactive(struct cpufreq_policy *policy)
182{
183 return cpumask_empty(policy->cpus);
184}
185
181static inline bool policy_is_shared(struct cpufreq_policy *policy) 186static inline bool policy_is_shared(struct cpufreq_policy *policy)
182{ 187{
183 return cpumask_weight(policy->cpus) > 1; 188 return cpumask_weight(policy->cpus) > 1;
@@ -193,8 +198,14 @@ unsigned int cpufreq_quick_get_max(unsigned int cpu);
193void disable_cpufreq(void); 198void disable_cpufreq(void);
194 199
195u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy); 200u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy);
201
202struct cpufreq_policy *cpufreq_cpu_acquire(unsigned int cpu);
203void cpufreq_cpu_release(struct cpufreq_policy *policy);
196int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu); 204int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu);
205int cpufreq_set_policy(struct cpufreq_policy *policy,
206 struct cpufreq_policy *new_policy);
197void cpufreq_update_policy(unsigned int cpu); 207void cpufreq_update_policy(unsigned int cpu);
208void cpufreq_update_limits(unsigned int cpu);
198bool have_governor_per_policy(void); 209bool have_governor_per_policy(void);
199struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy); 210struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy);
200void cpufreq_enable_fast_switch(struct cpufreq_policy *policy); 211void cpufreq_enable_fast_switch(struct cpufreq_policy *policy);
@@ -322,6 +333,9 @@ struct cpufreq_driver {
322 /* should be defined, if possible */ 333 /* should be defined, if possible */
323 unsigned int (*get)(unsigned int cpu); 334 unsigned int (*get)(unsigned int cpu);
324 335
336 /* Called to update policy limits on firmware notifications. */
337 void (*update_limits)(unsigned int cpu);
338
325 /* optional */ 339 /* optional */
326 int (*bios_limit)(int cpu, unsigned int *limit); 340 int (*bios_limit)(int cpu, unsigned int *limit);
327 341
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
index e78281d07b70..dbfdd0fadbef 100644
--- a/include/linux/cpuhotplug.h
+++ b/include/linux/cpuhotplug.h
@@ -147,6 +147,7 @@ enum cpuhp_state {
147 CPUHP_AP_X86_VDSO_VMA_ONLINE, 147 CPUHP_AP_X86_VDSO_VMA_ONLINE,
148 CPUHP_AP_IRQ_AFFINITY_ONLINE, 148 CPUHP_AP_IRQ_AFFINITY_ONLINE,
149 CPUHP_AP_ARM_MVEBU_SYNC_CLOCKS, 149 CPUHP_AP_ARM_MVEBU_SYNC_CLOCKS,
150 CPUHP_AP_X86_INTEL_EPB_ONLINE,
150 CPUHP_AP_PERF_ONLINE, 151 CPUHP_AP_PERF_ONLINE,
151 CPUHP_AP_PERF_X86_ONLINE, 152 CPUHP_AP_PERF_X86_ONLINE,
152 CPUHP_AP_PERF_X86_UNCORE_ONLINE, 153 CPUHP_AP_PERF_X86_UNCORE_ONLINE,
diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
index 3b39472324a3..bb9a0db89f1a 100644
--- a/include/linux/cpuidle.h
+++ b/include/linux/cpuidle.h
@@ -83,6 +83,7 @@ struct cpuidle_device {
83 unsigned int use_deepest_state:1; 83 unsigned int use_deepest_state:1;
84 unsigned int poll_time_limit:1; 84 unsigned int poll_time_limit:1;
85 unsigned int cpu; 85 unsigned int cpu;
86 ktime_t next_hrtimer;
86 87
87 int last_residency; 88 int last_residency;
88 struct cpuidle_state_usage states_usage[CPUIDLE_STATE_MAX]; 89 struct cpuidle_state_usage states_usage[CPUIDLE_STATE_MAX];
diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
index 1ed5874bcee0..0e8e356bed6a 100644
--- a/include/linux/pm_domain.h
+++ b/include/linux/pm_domain.h
@@ -16,6 +16,7 @@
16#include <linux/of.h> 16#include <linux/of.h>
17#include <linux/notifier.h> 17#include <linux/notifier.h>
18#include <linux/spinlock.h> 18#include <linux/spinlock.h>
19#include <linux/cpumask.h>
19 20
20/* 21/*
21 * Flags to control the behaviour of a genpd. 22 * Flags to control the behaviour of a genpd.
@@ -42,11 +43,22 @@
42 * GENPD_FLAG_ACTIVE_WAKEUP: Instructs genpd to keep the PM domain powered 43 * GENPD_FLAG_ACTIVE_WAKEUP: Instructs genpd to keep the PM domain powered
43 * on, in case any of its attached devices is used 44 * on, in case any of its attached devices is used
44 * in the wakeup path to serve system wakeups. 45 * in the wakeup path to serve system wakeups.
46 *
47 * GENPD_FLAG_CPU_DOMAIN: Instructs genpd that it should expect to get
48 * devices attached, which may belong to CPUs or
49 * possibly have subdomains with CPUs attached.
50 * This flag enables the genpd backend driver to
51 * deploy idle power management support for CPUs
52 * and groups of CPUs. Note that, the backend
53 * driver must then comply with the so called,
54 * last-man-standing algorithm, for the CPUs in the
55 * PM domain.
45 */ 56 */
46#define GENPD_FLAG_PM_CLK (1U << 0) 57#define GENPD_FLAG_PM_CLK (1U << 0)
47#define GENPD_FLAG_IRQ_SAFE (1U << 1) 58#define GENPD_FLAG_IRQ_SAFE (1U << 1)
48#define GENPD_FLAG_ALWAYS_ON (1U << 2) 59#define GENPD_FLAG_ALWAYS_ON (1U << 2)
49#define GENPD_FLAG_ACTIVE_WAKEUP (1U << 3) 60#define GENPD_FLAG_ACTIVE_WAKEUP (1U << 3)
61#define GENPD_FLAG_CPU_DOMAIN (1U << 4)
50 62
51enum gpd_status { 63enum gpd_status {
52 GPD_STATE_ACTIVE = 0, /* PM domain is active */ 64 GPD_STATE_ACTIVE = 0, /* PM domain is active */
@@ -69,6 +81,7 @@ struct genpd_power_state {
69 s64 residency_ns; 81 s64 residency_ns;
70 struct fwnode_handle *fwnode; 82 struct fwnode_handle *fwnode;
71 ktime_t idle_time; 83 ktime_t idle_time;
84 void *data;
72}; 85};
73 86
74struct genpd_lock_ops; 87struct genpd_lock_ops;
@@ -93,6 +106,7 @@ struct generic_pm_domain {
93 unsigned int suspended_count; /* System suspend device counter */ 106 unsigned int suspended_count; /* System suspend device counter */
94 unsigned int prepared_count; /* Suspend counter of prepared devices */ 107 unsigned int prepared_count; /* Suspend counter of prepared devices */
95 unsigned int performance_state; /* Aggregated max performance state */ 108 unsigned int performance_state; /* Aggregated max performance state */
109 cpumask_var_t cpus; /* A cpumask of the attached CPUs */
96 int (*power_off)(struct generic_pm_domain *domain); 110 int (*power_off)(struct generic_pm_domain *domain);
97 int (*power_on)(struct generic_pm_domain *domain); 111 int (*power_on)(struct generic_pm_domain *domain);
98 struct opp_table *opp_table; /* OPP table of the genpd */ 112 struct opp_table *opp_table; /* OPP table of the genpd */
@@ -104,15 +118,17 @@ struct generic_pm_domain {
104 s64 max_off_time_ns; /* Maximum allowed "suspended" time. */ 118 s64 max_off_time_ns; /* Maximum allowed "suspended" time. */
105 bool max_off_time_changed; 119 bool max_off_time_changed;
106 bool cached_power_down_ok; 120 bool cached_power_down_ok;
121 bool cached_power_down_state_idx;
107 int (*attach_dev)(struct generic_pm_domain *domain, 122 int (*attach_dev)(struct generic_pm_domain *domain,
108 struct device *dev); 123 struct device *dev);
109 void (*detach_dev)(struct generic_pm_domain *domain, 124 void (*detach_dev)(struct generic_pm_domain *domain,
110 struct device *dev); 125 struct device *dev);
111 unsigned int flags; /* Bit field of configs for genpd */ 126 unsigned int flags; /* Bit field of configs for genpd */
112 struct genpd_power_state *states; 127 struct genpd_power_state *states;
128 void (*free_states)(struct genpd_power_state *states,
129 unsigned int state_count);
113 unsigned int state_count; /* number of states */ 130 unsigned int state_count; /* number of states */
114 unsigned int state_idx; /* state that genpd will go to when off */ 131 unsigned int state_idx; /* state that genpd will go to when off */
115 void *free; /* Free the state that was allocated for default */
116 ktime_t on_time; 132 ktime_t on_time;
117 ktime_t accounting_time; 133 ktime_t accounting_time;
118 const struct genpd_lock_ops *lock_ops; 134 const struct genpd_lock_ops *lock_ops;
@@ -159,6 +175,7 @@ struct generic_pm_domain_data {
159 struct pm_domain_data base; 175 struct pm_domain_data base;
160 struct gpd_timing_data td; 176 struct gpd_timing_data td;
161 struct notifier_block nb; 177 struct notifier_block nb;
178 int cpu;
162 unsigned int performance_state; 179 unsigned int performance_state;
163 void *data; 180 void *data;
164}; 181};
@@ -187,6 +204,9 @@ int dev_pm_genpd_set_performance_state(struct device *dev, unsigned int state);
187 204
188extern struct dev_power_governor simple_qos_governor; 205extern struct dev_power_governor simple_qos_governor;
189extern struct dev_power_governor pm_domain_always_on_gov; 206extern struct dev_power_governor pm_domain_always_on_gov;
207#ifdef CONFIG_CPU_IDLE
208extern struct dev_power_governor pm_domain_cpu_gov;
209#endif
190#else 210#else
191 211
192static inline struct generic_pm_domain_data *dev_gpd_data(struct device *dev) 212static inline struct generic_pm_domain_data *dev_gpd_data(struct device *dev)
diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h
index 24c757a32a7b..b150fe97ce5a 100644
--- a/include/linux/pm_opp.h
+++ b/include/linux/pm_opp.h
@@ -102,6 +102,8 @@ struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
102 102
103struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev, 103struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
104 unsigned long *freq); 104 unsigned long *freq);
105struct dev_pm_opp *dev_pm_opp_find_freq_ceil_by_volt(struct device *dev,
106 unsigned long u_volt);
105 107
106struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev, 108struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev,
107 unsigned long *freq); 109 unsigned long *freq);
@@ -207,6 +209,12 @@ static inline struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
207 return ERR_PTR(-ENOTSUPP); 209 return ERR_PTR(-ENOTSUPP);
208} 210}
209 211
212static inline struct dev_pm_opp *dev_pm_opp_find_freq_ceil_by_volt(struct device *dev,
213 unsigned long u_volt)
214{
215 return ERR_PTR(-ENOTSUPP);
216}
217
210static inline struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev, 218static inline struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev,
211 unsigned long *freq) 219 unsigned long *freq)
212{ 220{
diff --git a/include/linux/suspend.h b/include/linux/suspend.h
index 3f529ad9a9d2..6b3ea9ea6a9e 100644
--- a/include/linux/suspend.h
+++ b/include/linux/suspend.h
@@ -425,6 +425,7 @@ void restore_processor_state(void);
425/* kernel/power/main.c */ 425/* kernel/power/main.c */
426extern int register_pm_notifier(struct notifier_block *nb); 426extern int register_pm_notifier(struct notifier_block *nb);
427extern int unregister_pm_notifier(struct notifier_block *nb); 427extern int unregister_pm_notifier(struct notifier_block *nb);
428extern void ksys_sync_helper(void);
428 429
429#define pm_notifier(fn, pri) { \ 430#define pm_notifier(fn, pri) { \
430 static struct notifier_block fn##_nb = \ 431 static struct notifier_block fn##_nb = \
@@ -462,6 +463,8 @@ static inline int unregister_pm_notifier(struct notifier_block *nb)
462 return 0; 463 return 0;
463} 464}
464 465
466static inline void ksys_sync_helper(void) {}
467
465#define pm_notifier(fn, pri) do { (void)(fn); } while (0) 468#define pm_notifier(fn, pri) do { (void)(fn); } while (0)
466 469
467static inline bool pm_wakeup_pending(void) { return false; } 470static inline bool pm_wakeup_pending(void) { return false; }
diff --git a/include/linux/tick.h b/include/linux/tick.h
index 76acb48acdb7..f92a10b5e112 100644
--- a/include/linux/tick.h
+++ b/include/linux/tick.h
@@ -128,6 +128,7 @@ extern void tick_nohz_idle_enter(void);
128extern void tick_nohz_idle_exit(void); 128extern void tick_nohz_idle_exit(void);
129extern void tick_nohz_irq_exit(void); 129extern void tick_nohz_irq_exit(void);
130extern bool tick_nohz_idle_got_tick(void); 130extern bool tick_nohz_idle_got_tick(void);
131extern ktime_t tick_nohz_get_next_hrtimer(void);
131extern ktime_t tick_nohz_get_sleep_length(ktime_t *delta_next); 132extern ktime_t tick_nohz_get_sleep_length(ktime_t *delta_next);
132extern unsigned long tick_nohz_get_idle_calls(void); 133extern unsigned long tick_nohz_get_idle_calls(void);
133extern unsigned long tick_nohz_get_idle_calls_cpu(int cpu); 134extern unsigned long tick_nohz_get_idle_calls_cpu(int cpu);
@@ -151,7 +152,11 @@ static inline void tick_nohz_idle_restart_tick(void) { }
151static inline void tick_nohz_idle_enter(void) { } 152static inline void tick_nohz_idle_enter(void) { }
152static inline void tick_nohz_idle_exit(void) { } 153static inline void tick_nohz_idle_exit(void) { }
153static inline bool tick_nohz_idle_got_tick(void) { return false; } 154static inline bool tick_nohz_idle_got_tick(void) { return false; }
154 155static inline ktime_t tick_nohz_get_next_hrtimer(void)
156{
157 /* Next wake up is the tick period, assume it starts now */
158 return ktime_add(ktime_get(), TICK_NSEC);
159}
155static inline ktime_t tick_nohz_get_sleep_length(ktime_t *delta_next) 160static inline ktime_t tick_nohz_get_sleep_length(ktime_t *delta_next)
156{ 161{
157 *delta_next = TICK_NSEC; 162 *delta_next = TICK_NSEC;
diff --git a/include/soc/rockchip/rk3399_grf.h b/include/soc/rockchip/rk3399_grf.h
new file mode 100644
index 000000000000..3eebabcb2812
--- /dev/null
+++ b/include/soc/rockchip/rk3399_grf.h
@@ -0,0 +1,21 @@
1/* SPDX-License-Identifier: GPL-2.0+ */
2/*
3 * Rockchip General Register Files definitions
4 *
5 * Copyright (c) 2018, Collabora Ltd.
6 * Author: Enric Balletbo i Serra <enric.balletbo@collabora.com>
7 */
8
9#ifndef __SOC_RK3399_GRF_H
10#define __SOC_RK3399_GRF_H
11
12/* PMU GRF Registers */
13#define RK3399_PMUGRF_OS_REG2 0x308
14#define RK3399_PMUGRF_DDRTYPE_SHIFT 13
15#define RK3399_PMUGRF_DDRTYPE_MASK 7
16#define RK3399_PMUGRF_DDRTYPE_DDR3 3
17#define RK3399_PMUGRF_DDRTYPE_LPDDR2 5
18#define RK3399_PMUGRF_DDRTYPE_LPDDR3 6
19#define RK3399_PMUGRF_DDRTYPE_LPDDR4 7
20
21#endif
diff --git a/include/soc/rockchip/rockchip_sip.h b/include/soc/rockchip/rockchip_sip.h
index 7e28092c4d3d..ad9482c56797 100644
--- a/include/soc/rockchip/rockchip_sip.h
+++ b/include/soc/rockchip/rockchip_sip.h
@@ -23,5 +23,6 @@
23#define ROCKCHIP_SIP_CONFIG_DRAM_GET_RATE 0x05 23#define ROCKCHIP_SIP_CONFIG_DRAM_GET_RATE 0x05
24#define ROCKCHIP_SIP_CONFIG_DRAM_CLR_IRQ 0x06 24#define ROCKCHIP_SIP_CONFIG_DRAM_CLR_IRQ 0x06
25#define ROCKCHIP_SIP_CONFIG_DRAM_SET_PARAM 0x07 25#define ROCKCHIP_SIP_CONFIG_DRAM_SET_PARAM 0x07
26#define ROCKCHIP_SIP_CONFIG_DRAM_SET_ODT_PD 0x08
26 27
27#endif 28#endif
diff --git a/include/trace/events/devfreq.h b/include/trace/events/devfreq.h
new file mode 100644
index 000000000000..cf5b8772175d
--- /dev/null
+++ b/include/trace/events/devfreq.h
@@ -0,0 +1,40 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2#undef TRACE_SYSTEM
3#define TRACE_SYSTEM devfreq
4
5#if !defined(_TRACE_DEVFREQ_H) || defined(TRACE_HEADER_MULTI_READ)
6#define _TRACE_DEVFREQ_H
7
8#include <linux/devfreq.h>
9#include <linux/tracepoint.h>
10
11TRACE_EVENT(devfreq_monitor,
12 TP_PROTO(struct devfreq *devfreq),
13
14 TP_ARGS(devfreq),
15
16 TP_STRUCT__entry(
17 __field(unsigned long, freq)
18 __field(unsigned long, busy_time)
19 __field(unsigned long, total_time)
20 __field(unsigned int, polling_ms)
21 __string(dev_name, dev_name(&devfreq->dev))
22 ),
23
24 TP_fast_assign(
25 __entry->freq = devfreq->previous_freq;
26 __entry->busy_time = devfreq->last_status.busy_time;
27 __entry->total_time = devfreq->last_status.total_time;
28 __entry->polling_ms = devfreq->profile->polling_ms;
29 __assign_str(dev_name, dev_name(&devfreq->dev));
30 ),
31
32 TP_printk("dev_name=%s freq=%lu polling_ms=%u load=%lu",
33 __get_str(dev_name), __entry->freq, __entry->polling_ms,
34 __entry->total_time == 0 ? 0 :
35 (100 * __entry->busy_time) / __entry->total_time)
36);
37#endif /* _TRACE_DEVFREQ_H */
38
39/* This part must be outside protection */
40#include <trace/define_trace.h>
diff --git a/include/uapi/linux/psci.h b/include/uapi/linux/psci.h
index b3bcabe380da..2fcad1dd0b0e 100644
--- a/include/uapi/linux/psci.h
+++ b/include/uapi/linux/psci.h
@@ -49,8 +49,11 @@
49 49
50#define PSCI_1_0_FN_PSCI_FEATURES PSCI_0_2_FN(10) 50#define PSCI_1_0_FN_PSCI_FEATURES PSCI_0_2_FN(10)
51#define PSCI_1_0_FN_SYSTEM_SUSPEND PSCI_0_2_FN(14) 51#define PSCI_1_0_FN_SYSTEM_SUSPEND PSCI_0_2_FN(14)
52#define PSCI_1_0_FN_SET_SUSPEND_MODE PSCI_0_2_FN(15)
53#define PSCI_1_1_FN_SYSTEM_RESET2 PSCI_0_2_FN(18)
52 54
53#define PSCI_1_0_FN64_SYSTEM_SUSPEND PSCI_0_2_FN64(14) 55#define PSCI_1_0_FN64_SYSTEM_SUSPEND PSCI_0_2_FN64(14)
56#define PSCI_1_1_FN64_SYSTEM_RESET2 PSCI_0_2_FN64(18)
54 57
55/* PSCI v0.2 power state encoding for CPU_SUSPEND function */ 58/* PSCI v0.2 power state encoding for CPU_SUSPEND function */
56#define PSCI_0_2_POWER_STATE_ID_MASK 0xffff 59#define PSCI_0_2_POWER_STATE_ID_MASK 0xffff
@@ -97,6 +100,10 @@
97#define PSCI_1_0_FEATURES_CPU_SUSPEND_PF_MASK \ 100#define PSCI_1_0_FEATURES_CPU_SUSPEND_PF_MASK \
98 (0x1 << PSCI_1_0_FEATURES_CPU_SUSPEND_PF_SHIFT) 101 (0x1 << PSCI_1_0_FEATURES_CPU_SUSPEND_PF_SHIFT)
99 102
103#define PSCI_1_0_OS_INITIATED BIT(0)
104#define PSCI_1_0_SUSPEND_MODE_PC 0
105#define PSCI_1_0_SUSPEND_MODE_OSI 1
106
100/* PSCI return values (inclusive of all PSCI versions) */ 107/* PSCI return values (inclusive of all PSCI versions) */
101#define PSCI_RET_SUCCESS 0 108#define PSCI_RET_SUCCESS 0
102#define PSCI_RET_NOT_SUPPORTED -1 109#define PSCI_RET_NOT_SUPPORTED -1
diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
index cfc7a57049e4..c8c272df7154 100644
--- a/kernel/power/hibernate.c
+++ b/kernel/power/hibernate.c
@@ -14,7 +14,6 @@
14 14
15#include <linux/export.h> 15#include <linux/export.h>
16#include <linux/suspend.h> 16#include <linux/suspend.h>
17#include <linux/syscalls.h>
18#include <linux/reboot.h> 17#include <linux/reboot.h>
19#include <linux/string.h> 18#include <linux/string.h>
20#include <linux/device.h> 19#include <linux/device.h>
@@ -709,9 +708,7 @@ int hibernate(void)
709 goto Exit; 708 goto Exit;
710 } 709 }
711 710
712 pr_info("Syncing filesystems ... \n"); 711 ksys_sync_helper();
713 ksys_sync();
714 pr_info("done.\n");
715 712
716 error = freeze_processes(); 713 error = freeze_processes();
717 if (error) 714 if (error)
diff --git a/kernel/power/main.c b/kernel/power/main.c
index 98e76cad128b..4f43e724f6eb 100644
--- a/kernel/power/main.c
+++ b/kernel/power/main.c
@@ -16,6 +16,7 @@
16#include <linux/debugfs.h> 16#include <linux/debugfs.h>
17#include <linux/seq_file.h> 17#include <linux/seq_file.h>
18#include <linux/suspend.h> 18#include <linux/suspend.h>
19#include <linux/syscalls.h>
19 20
20#include "power.h" 21#include "power.h"
21 22
@@ -51,6 +52,19 @@ void unlock_system_sleep(void)
51} 52}
52EXPORT_SYMBOL_GPL(unlock_system_sleep); 53EXPORT_SYMBOL_GPL(unlock_system_sleep);
53 54
55void ksys_sync_helper(void)
56{
57 ktime_t start;
58 long elapsed_msecs;
59
60 start = ktime_get();
61 ksys_sync();
62 elapsed_msecs = ktime_to_ms(ktime_sub(ktime_get(), start));
63 pr_info("Filesystems sync: %ld.%03ld seconds\n",
64 elapsed_msecs / MSEC_PER_SEC, elapsed_msecs % MSEC_PER_SEC);
65}
66EXPORT_SYMBOL_GPL(ksys_sync_helper);
67
54/* Routines for PM-transition notifications */ 68/* Routines for PM-transition notifications */
55 69
56static BLOCKING_NOTIFIER_HEAD(pm_chain_head); 70static BLOCKING_NOTIFIER_HEAD(pm_chain_head);
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
index 59b6def23046..ef908c134b34 100644
--- a/kernel/power/suspend.c
+++ b/kernel/power/suspend.c
@@ -17,7 +17,6 @@
17#include <linux/console.h> 17#include <linux/console.h>
18#include <linux/cpu.h> 18#include <linux/cpu.h>
19#include <linux/cpuidle.h> 19#include <linux/cpuidle.h>
20#include <linux/syscalls.h>
21#include <linux/gfp.h> 20#include <linux/gfp.h>
22#include <linux/io.h> 21#include <linux/io.h>
23#include <linux/kernel.h> 22#include <linux/kernel.h>
@@ -568,13 +567,11 @@ static int enter_state(suspend_state_t state)
568 if (state == PM_SUSPEND_TO_IDLE) 567 if (state == PM_SUSPEND_TO_IDLE)
569 s2idle_begin(); 568 s2idle_begin();
570 569
571#ifndef CONFIG_SUSPEND_SKIP_SYNC 570 if (!IS_ENABLED(CONFIG_SUSPEND_SKIP_SYNC)) {
572 trace_suspend_resume(TPS("sync_filesystems"), 0, true); 571 trace_suspend_resume(TPS("sync_filesystems"), 0, true);
573 pr_info("Syncing filesystems ... "); 572 ksys_sync_helper();
574 ksys_sync(); 573 trace_suspend_resume(TPS("sync_filesystems"), 0, false);
575 pr_cont("done.\n"); 574 }
576 trace_suspend_resume(TPS("sync_filesystems"), 0, false);
577#endif
578 575
579 pm_pr_dbg("Preparing system for sleep (%s)\n", mem_sleep_labels[state]); 576 pm_pr_dbg("Preparing system for sleep (%s)\n", mem_sleep_labels[state]);
580 pm_suspend_clear_flags(); 577 pm_suspend_clear_flags();
diff --git a/kernel/power/user.c b/kernel/power/user.c
index 2d8b60a3c86b..cb24e840a3e6 100644
--- a/kernel/power/user.c
+++ b/kernel/power/user.c
@@ -10,7 +10,6 @@
10 */ 10 */
11 11
12#include <linux/suspend.h> 12#include <linux/suspend.h>
13#include <linux/syscalls.h>
14#include <linux/reboot.h> 13#include <linux/reboot.h>
15#include <linux/string.h> 14#include <linux/string.h>
16#include <linux/device.h> 15#include <linux/device.h>
@@ -228,9 +227,7 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd,
228 if (data->frozen) 227 if (data->frozen)
229 break; 228 break;
230 229
231 printk("Syncing filesystems ... "); 230 ksys_sync_helper();
232 ksys_sync();
233 printk("done.\n");
234 231
235 error = freeze_processes(); 232 error = freeze_processes();
236 if (error) 233 if (error)
diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
index 3638d2377e3c..5403479073b0 100644
--- a/kernel/sched/cpufreq_schedutil.c
+++ b/kernel/sched/cpufreq_schedutil.c
@@ -13,6 +13,8 @@
13#include <linux/sched/cpufreq.h> 13#include <linux/sched/cpufreq.h>
14#include <trace/events/power.h> 14#include <trace/events/power.h>
15 15
16#define IOWAIT_BOOST_MIN (SCHED_CAPACITY_SCALE / 8)
17
16struct sugov_tunables { 18struct sugov_tunables {
17 struct gov_attr_set attr_set; 19 struct gov_attr_set attr_set;
18 unsigned int rate_limit_us; 20 unsigned int rate_limit_us;
@@ -51,7 +53,6 @@ struct sugov_cpu {
51 u64 last_update; 53 u64 last_update;
52 54
53 unsigned long bw_dl; 55 unsigned long bw_dl;
54 unsigned long min;
55 unsigned long max; 56 unsigned long max;
56 57
57 /* The field below is for single-CPU policies only: */ 58 /* The field below is for single-CPU policies only: */
@@ -291,8 +292,8 @@ static unsigned long sugov_get_util(struct sugov_cpu *sg_cpu)
291 * 292 *
292 * The IO wait boost of a task is disabled after a tick since the last update 293 * The IO wait boost of a task is disabled after a tick since the last update
293 * of a CPU. If a new IO wait boost is requested after more then a tick, then 294 * of a CPU. If a new IO wait boost is requested after more then a tick, then
294 * we enable the boost starting from the minimum frequency, which improves 295 * we enable the boost starting from IOWAIT_BOOST_MIN, which improves energy
295 * energy efficiency by ignoring sporadic wakeups from IO. 296 * efficiency by ignoring sporadic wakeups from IO.
296 */ 297 */
297static bool sugov_iowait_reset(struct sugov_cpu *sg_cpu, u64 time, 298static bool sugov_iowait_reset(struct sugov_cpu *sg_cpu, u64 time,
298 bool set_iowait_boost) 299 bool set_iowait_boost)
@@ -303,7 +304,7 @@ static bool sugov_iowait_reset(struct sugov_cpu *sg_cpu, u64 time,
303 if (delta_ns <= TICK_NSEC) 304 if (delta_ns <= TICK_NSEC)
304 return false; 305 return false;
305 306
306 sg_cpu->iowait_boost = set_iowait_boost ? sg_cpu->min : 0; 307 sg_cpu->iowait_boost = set_iowait_boost ? IOWAIT_BOOST_MIN : 0;
307 sg_cpu->iowait_boost_pending = set_iowait_boost; 308 sg_cpu->iowait_boost_pending = set_iowait_boost;
308 309
309 return true; 310 return true;
@@ -317,8 +318,9 @@ static bool sugov_iowait_reset(struct sugov_cpu *sg_cpu, u64 time,
317 * 318 *
318 * Each time a task wakes up after an IO operation, the CPU utilization can be 319 * Each time a task wakes up after an IO operation, the CPU utilization can be
319 * boosted to a certain utilization which doubles at each "frequent and 320 * boosted to a certain utilization which doubles at each "frequent and
320 * successive" wakeup from IO, ranging from the utilization of the minimum 321 * successive" wakeup from IO, ranging from IOWAIT_BOOST_MIN to the utilization
321 * OPP to the utilization of the maximum OPP. 322 * of the maximum OPP.
323 *
322 * To keep doubling, an IO boost has to be requested at least once per tick, 324 * To keep doubling, an IO boost has to be requested at least once per tick,
323 * otherwise we restart from the utilization of the minimum OPP. 325 * otherwise we restart from the utilization of the minimum OPP.
324 */ 326 */
@@ -349,7 +351,7 @@ static void sugov_iowait_boost(struct sugov_cpu *sg_cpu, u64 time,
349 } 351 }
350 352
351 /* First wakeup after IO: start with minimum boost */ 353 /* First wakeup after IO: start with minimum boost */
352 sg_cpu->iowait_boost = sg_cpu->min; 354 sg_cpu->iowait_boost = IOWAIT_BOOST_MIN;
353} 355}
354 356
355/** 357/**
@@ -389,7 +391,7 @@ static unsigned long sugov_iowait_apply(struct sugov_cpu *sg_cpu, u64 time,
389 * No boost pending; reduce the boost value. 391 * No boost pending; reduce the boost value.
390 */ 392 */
391 sg_cpu->iowait_boost >>= 1; 393 sg_cpu->iowait_boost >>= 1;
392 if (sg_cpu->iowait_boost < sg_cpu->min) { 394 if (sg_cpu->iowait_boost < IOWAIT_BOOST_MIN) {
393 sg_cpu->iowait_boost = 0; 395 sg_cpu->iowait_boost = 0;
394 return util; 396 return util;
395 } 397 }
@@ -827,9 +829,6 @@ static int sugov_start(struct cpufreq_policy *policy)
827 memset(sg_cpu, 0, sizeof(*sg_cpu)); 829 memset(sg_cpu, 0, sizeof(*sg_cpu));
828 sg_cpu->cpu = cpu; 830 sg_cpu->cpu = cpu;
829 sg_cpu->sg_policy = sg_policy; 831 sg_cpu->sg_policy = sg_policy;
830 sg_cpu->min =
831 (SCHED_CAPACITY_SCALE * policy->cpuinfo.min_freq) /
832 policy->cpuinfo.max_freq;
833 } 832 }
834 833
835 for_each_cpu(cpu, policy->cpus) { 834 for_each_cpu(cpu, policy->cpus) {
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index bdf00c763ee3..f4ee1a3428ae 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -1038,6 +1038,18 @@ bool tick_nohz_idle_got_tick(void)
1038} 1038}
1039 1039
1040/** 1040/**
1041 * tick_nohz_get_next_hrtimer - return the next expiration time for the hrtimer
1042 * or the tick, whatever that expires first. Note that, if the tick has been
1043 * stopped, it returns the next hrtimer.
1044 *
1045 * Called from power state control code with interrupts disabled
1046 */
1047ktime_t tick_nohz_get_next_hrtimer(void)
1048{
1049 return __this_cpu_read(tick_cpu_device.evtdev)->next_event;
1050}
1051
1052/**
1041 * tick_nohz_get_sleep_length - return the expected length of the current sleep 1053 * tick_nohz_get_sleep_length - return the expected length of the current sleep
1042 * @delta_next: duration until the next event if the tick cannot be stopped 1054 * @delta_next: duration until the next event if the tick cannot be stopped
1043 * 1055 *