summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2019-03-06 15:59:46 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2019-03-06 15:59:46 -0500
commitef8006846a3a97d9d8bf49e63dba948d0d2dbbf0 (patch)
tree5929135c14913ba7aebe538567d0550b80a61d73
parent8dcd175bc3d50b78413c56d5b17d4bddd77412ef (diff)
parent1271d6d576b7e7c80519de211f250cfd4eebca1a (diff)
Merge tag 'pm-5.1-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm
Pull power management updates from Rafael Wysocki: "These are PM-runtime framework changes to use ktime instead of jiffies for accounting, new PM core flag to mark devices that don't need any form of power management, cpuidle updates including driver API documentation and a new governor, cpufreq updates including a new driver for Armada 8K, thermal cleanups and more, some energy-aware scheduling (EAS) enabling changes, new chips support in the intel_idle and RAPL drivers and assorted cleanups in some other places. Specifics: - Update the PM-runtime framework to use ktime instead of jiffies for accounting (Thara Gopinath, Vincent Guittot) - Optimize the autosuspend code in the PM-runtime framework somewhat (Ladislav Michl) - Add a PM core flag to mark devices that don't need any form of power management (Sudeep Holla) - Introduce driver API documentation for cpuidle and add a new cpuidle governor for tickless systems (Rafael Wysocki) - Add Jacobsville support to the intel_idle driver (Zhang Rui) - Clean up a cpuidle core header file and the cpuidle-dt and ACPI processor-idle drivers (Yangtao Li, Joseph Lo, Yazen Ghannam) - Add new cpufreq driver for Armada 8K (Gregory Clement) - Fix and clean up cpufreq core (Rafael Wysocki, Viresh Kumar, Amit Kucheria) - Add support for light-weight tear-down and bring-up of CPUs to the cpufreq core and use it in the cpufreq-dt driver (Viresh Kumar) - Fix cpu_cooling Kconfig dependencies, add support for CPU cooling auto-registration to the cpufreq core and use it in multiple cpufreq drivers (Amit Kucheria) - Fix some minor issues and do some cleanups in the davinci, e_powersaver, ap806, s5pv210, qcom and kryo cpufreq drivers (Bartosz Golaszewski, Gustavo Silva, Julia Lawall, Paweł Chmiel, Taniya Das, Viresh Kumar) - Add a Hisilicon CPPC quirk to the cppc_cpufreq driver (Xiongfeng Wang) - Clean up the intel_pstate and acpi-cpufreq drivers (Erwan Velu, Rafael Wysocki) - Clean up multiple cpufreq drivers (Yangtao Li) - Update cpufreq-related MAINTAINERS entries (Baruch Siach, Lukas Bulwahn) - Add support for exposing the Energy Model via debugfs and make multiple cpufreq drivers register an Energy Model to support energy-aware scheduling (Quentin Perret, Dietmar Eggemann, Matthias Kaehlcke) - Add Ice Lake mobile and Jacobsville support to the Intel RAPL power-capping driver (Gayatri Kammela, Zhang Rui) - Add a power estimation helper to the operating performance points (OPP) framework and clean up a core function in it (Quentin Perret, Viresh Kumar) - Make minor improvements in the generic power domains (genpd), OPP and system suspend frameworks and in the PM core (Aditya Pakki, Douglas Anderson, Greg Kroah-Hartman, Rafael Wysocki, Yangtao Li)" * tag 'pm-5.1-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm: (80 commits) cpufreq: kryo: Release OPP tables on module removal cpufreq: ap806: add missing of_node_put after of_device_is_available cpufreq: acpi-cpufreq: Report if CPU doesn't support boost technologies cpufreq: Pass updated policy to driver ->setpolicy() callback cpufreq: Fix two debug messages in cpufreq_set_policy() cpufreq: Reorder and simplify cpufreq_update_policy() cpufreq: Add kerneldoc comments for two core functions PM / core: Add support to skip power management in device/driver model cpufreq: intel_pstate: Rework iowait boosting to be less aggressive cpufreq: intel_pstate: Eliminate intel_pstate_get_base_pstate() cpufreq: intel_pstate: Avoid redundant initialization of local vars powercap/intel_rapl: add Ice Lake mobile ACPI / processor: Set P_LVL{2,3} idle state descriptions cpufreq / cppc: Work around for Hisilicon CPPC cpufreq ACPI / CPPC: Add a helper to get desired performance cpufreq: davinci: move configuration to include/linux/platform_data cpufreq: speedstep: convert BUG() to BUG_ON() cpufreq: powernv: fix missing check of return value in init_powernv_pstates() cpufreq: longhaul: remove unneeded semicolon cpufreq: pcc-cpufreq: remove unneeded semicolon ..
-rw-r--r--Documentation/admin-guide/pm/cpuidle.rst104
-rw-r--r--Documentation/cpuidle/driver.txt37
-rw-r--r--Documentation/cpuidle/governor.txt28
-rw-r--r--Documentation/driver-api/pm/cpuidle.rst282
-rw-r--r--Documentation/driver-api/pm/index.rst7
-rw-r--r--MAINTAINERS14
-rw-r--r--arch/arm/mach-davinci/da850.c2
-rw-r--r--arch/arm/mach-davinci/include/mach/cpufreq.h26
-rw-r--r--drivers/acpi/cppc_acpi.c42
-rw-r--r--drivers/acpi/processor_idle.c7
-rw-r--r--drivers/base/cpu.c1
-rw-r--r--drivers/base/power/clock_ops.c13
-rw-r--r--drivers/base/power/common.c2
-rw-r--r--drivers/base/power/domain.c13
-rw-r--r--drivers/base/power/main.c11
-rw-r--r--drivers/base/power/runtime.c70
-rw-r--r--drivers/base/power/sysfs.c17
-rw-r--r--drivers/base/power/wakeup.c2
-rw-r--r--drivers/cpufreq/Kconfig3
-rw-r--r--drivers/cpufreq/Kconfig.arm16
-rw-r--r--drivers/cpufreq/Makefile1
-rw-r--r--drivers/cpufreq/acpi-cpufreq.c4
-rw-r--r--drivers/cpufreq/arm_big_little.c2
-rw-r--r--drivers/cpufreq/armada-8k-cpufreq.c206
-rw-r--r--drivers/cpufreq/cppc_cpufreq.c65
-rw-r--r--drivers/cpufreq/cpufreq-dt.c33
-rw-r--r--drivers/cpufreq/cpufreq.c134
-rw-r--r--drivers/cpufreq/cpufreq_stats.c16
-rw-r--r--drivers/cpufreq/davinci-cpufreq.c5
-rw-r--r--drivers/cpufreq/e_powersaver.c5
-rw-r--r--drivers/cpufreq/imx6q-cpufreq.c25
-rw-r--r--drivers/cpufreq/intel_pstate.c105
-rw-r--r--drivers/cpufreq/longhaul.c2
-rw-r--r--drivers/cpufreq/mediatek-cpufreq.c16
-rw-r--r--drivers/cpufreq/omap-cpufreq.c4
-rw-r--r--drivers/cpufreq/pcc-cpufreq.c2
-rw-r--r--drivers/cpufreq/powernv-cpufreq.c10
-rw-r--r--drivers/cpufreq/qcom-cpufreq-hw.c53
-rw-r--r--drivers/cpufreq/qcom-cpufreq-kryo.c22
-rw-r--r--drivers/cpufreq/qoriq-cpufreq.c15
-rw-r--r--drivers/cpufreq/s5pv210-cpufreq.c67
-rw-r--r--drivers/cpufreq/scmi-cpufreq.c53
-rw-r--r--drivers/cpufreq/scpi-cpufreq.c17
-rw-r--r--drivers/cpufreq/speedstep-ich.c3
-rw-r--r--drivers/cpufreq/tegra124-cpufreq.c2
-rw-r--r--drivers/cpuidle/Kconfig11
-rw-r--r--drivers/cpuidle/dt_idle_states.c15
-rw-r--r--drivers/cpuidle/governors/Makefile1
-rw-r--r--drivers/cpuidle/governors/teo.c444
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.c16
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.h4
-rw-r--r--drivers/idle/intel_idle.c1
-rw-r--r--drivers/opp/core.c22
-rw-r--r--drivers/opp/debugfs.c110
-rw-r--r--drivers/opp/of.c99
-rw-r--r--drivers/opp/opp.h15
-rw-r--r--drivers/powercap/intel_rapl.c2
-rw-r--r--drivers/thermal/Kconfig1
-rw-r--r--include/acpi/cppc_acpi.h1
-rw-r--r--include/linux/cpufreq.h48
-rw-r--r--include/linux/cpuidle.h8
-rw-r--r--include/linux/device.h10
-rw-r--r--include/linux/platform_data/davinci-cpufreq.h19
-rw-r--r--include/linux/pm.h7
-rw-r--r--include/linux/pm_domain.h8
-rw-r--r--include/linux/pm_opp.h6
-rw-r--r--include/linux/pm_runtime.h2
-rw-r--r--kernel/power/energy_model.c57
-rw-r--r--kernel/power/qos.c8
69 files changed, 1921 insertions, 558 deletions
diff --git a/Documentation/admin-guide/pm/cpuidle.rst b/Documentation/admin-guide/pm/cpuidle.rst
index 106379e2619f..9c58b35a81cb 100644
--- a/Documentation/admin-guide/pm/cpuidle.rst
+++ b/Documentation/admin-guide/pm/cpuidle.rst
@@ -155,14 +155,14 @@ governor uses that information depends on what algorithm is implemented by it
155and that is the primary reason for having more than one governor in the 155and that is the primary reason for having more than one governor in the
156``CPUIdle`` subsystem. 156``CPUIdle`` subsystem.
157 157
158There are two ``CPUIdle`` governors available, ``menu`` and ``ladder``. Which 158There are three ``CPUIdle`` governors available, ``menu``, `TEO <teo-gov_>`_
159of them is used depends on the configuration of the kernel and in particular on 159and ``ladder``. Which of them is used by default depends on the configuration
160whether or not the scheduler tick can be `stopped by the idle 160of the kernel and in particular on whether or not the scheduler tick can be
161loop <idle-cpus-and-tick_>`_. It is possible to change the governor at run time 161`stopped by the idle loop <idle-cpus-and-tick_>`_. It is possible to change the
162if the ``cpuidle_sysfs_switch`` command line parameter has been passed to the 162governor at run time if the ``cpuidle_sysfs_switch`` command line parameter has
163kernel, but that is not safe in general, so it should not be done on production 163been passed to the kernel, but that is not safe in general, so it should not be
164systems (that may change in the future, though). The name of the ``CPUIdle`` 164done on production systems (that may change in the future, though). The name of
165governor currently used by the kernel can be read from the 165the ``CPUIdle`` governor currently used by the kernel can be read from the
166:file:`current_governor_ro` (or :file:`current_governor` if 166:file:`current_governor_ro` (or :file:`current_governor` if
167``cpuidle_sysfs_switch`` is present in the kernel command line) file under 167``cpuidle_sysfs_switch`` is present in the kernel command line) file under
168:file:`/sys/devices/system/cpu/cpuidle/` in ``sysfs``. 168:file:`/sys/devices/system/cpu/cpuidle/` in ``sysfs``.
@@ -256,6 +256,8 @@ the ``menu`` governor by default and if it is not tickless, the default
256``CPUIdle`` governor on it will be ``ladder``. 256``CPUIdle`` governor on it will be ``ladder``.
257 257
258 258
259.. _menu-gov:
260
259The ``menu`` Governor 261The ``menu`` Governor
260===================== 262=====================
261 263
@@ -333,6 +335,92 @@ that time, the governor may need to select a shallower state with a suitable
333target residency. 335target residency.
334 336
335 337
338.. _teo-gov:
339
340The Timer Events Oriented (TEO) Governor
341========================================
342
343The timer events oriented (TEO) governor is an alternative ``CPUIdle`` governor
344for tickless systems. It follows the same basic strategy as the ``menu`` `one
345<menu-gov_>`_: it always tries to find the deepest idle state suitable for the
346given conditions. However, it applies a different approach to that problem.
347
348First, it does not use sleep length correction factors, but instead it attempts
349to correlate the observed idle duration values with the available idle states
350and use that information to pick up the idle state that is most likely to
351"match" the upcoming CPU idle interval. Second, it does not take the tasks
352that were running on the given CPU in the past and are waiting on some I/O
353operations to complete now at all (there is no guarantee that they will run on
354the same CPU when they become runnable again) and the pattern detection code in
355it avoids taking timer wakeups into account. It also only uses idle duration
356values less than the current time till the closest timer (with the scheduler
357tick excluded) for that purpose.
358
359Like in the ``menu`` governor `case <menu-gov_>`_, the first step is to obtain
360the *sleep length*, which is the time until the closest timer event with the
361assumption that the scheduler tick will be stopped (that also is the upper bound
362on the time until the next CPU wakeup). That value is then used to preselect an
363idle state on the basis of three metrics maintained for each idle state provided
364by the ``CPUIdle`` driver: ``hits``, ``misses`` and ``early_hits``.
365
366The ``hits`` and ``misses`` metrics measure the likelihood that a given idle
367state will "match" the observed (post-wakeup) idle duration if it "matches" the
368sleep length. They both are subject to decay (after a CPU wakeup) every time
369the target residency of the idle state corresponding to them is less than or
370equal to the sleep length and the target residency of the next idle state is
371greater than the sleep length (that is, when the idle state corresponding to
372them "matches" the sleep length). The ``hits`` metric is increased if the
373former condition is satisfied and the target residency of the given idle state
374is less than or equal to the observed idle duration and the target residency of
375the next idle state is greater than the observed idle duration at the same time
376(that is, it is increased when the given idle state "matches" both the sleep
377length and the observed idle duration). In turn, the ``misses`` metric is
378increased when the given idle state "matches" the sleep length only and the
379observed idle duration is too short for its target residency.
380
381The ``early_hits`` metric measures the likelihood that a given idle state will
382"match" the observed (post-wakeup) idle duration if it does not "match" the
383sleep length. It is subject to decay on every CPU wakeup and it is increased
384when the idle state corresponding to it "matches" the observed (post-wakeup)
385idle duration and the target residency of the next idle state is less than or
386equal to the sleep length (i.e. the idle state "matching" the sleep length is
387deeper than the given one).
388
389The governor walks the list of idle states provided by the ``CPUIdle`` driver
390and finds the last (deepest) one with the target residency less than or equal
391to the sleep length. Then, the ``hits`` and ``misses`` metrics of that idle
392state are compared with each other and it is preselected if the ``hits`` one is
393greater (which means that that idle state is likely to "match" the observed idle
394duration after CPU wakeup). If the ``misses`` one is greater, the governor
395preselects the shallower idle state with the maximum ``early_hits`` metric
396(or if there are multiple shallower idle states with equal ``early_hits``
397metric which also is the maximum, the shallowest of them will be preselected).
398[If there is a wakeup latency constraint coming from the `PM QoS framework
399<cpu-pm-qos_>`_ which is hit before reaching the deepest idle state with the
400target residency within the sleep length, the deepest idle state with the exit
401latency within the constraint is preselected without consulting the ``hits``,
402``misses`` and ``early_hits`` metrics.]
403
404Next, the governor takes several idle duration values observed most recently
405into consideration and if at least a half of them are greater than or equal to
406the target residency of the preselected idle state, that idle state becomes the
407final candidate to ask for. Otherwise, the average of the most recent idle
408duration values below the target residency of the preselected idle state is
409computed and the governor walks the idle states shallower than the preselected
410one and finds the deepest of them with the target residency within that average.
411That idle state is then taken as the final candidate to ask for.
412
413Still, at this point the governor may need to refine the idle state selection if
414it has not decided to `stop the scheduler tick <idle-cpus-and-tick_>`_. That
415generally happens if the target residency of the idle state selected so far is
416less than the tick period and the tick has not been stopped already (in a
417previous iteration of the idle loop). Then, like in the ``menu`` governor
418`case <menu-gov_>`_, the sleep length used in the previous computations may not
419reflect the real time until the closest timer event and if it really is greater
420than that time, a shallower state with a suitable target residency may need to
421be selected.
422
423
336.. _idle-states-representation: 424.. _idle-states-representation:
337 425
338Representation of Idle States 426Representation of Idle States
diff --git a/Documentation/cpuidle/driver.txt b/Documentation/cpuidle/driver.txt
deleted file mode 100644
index 1b0d81d92583..000000000000
--- a/Documentation/cpuidle/driver.txt
+++ /dev/null
@@ -1,37 +0,0 @@
1
2
3 Supporting multiple CPU idle levels in kernel
4
5 cpuidle drivers
6
7
8
9
10cpuidle driver hooks into the cpuidle infrastructure and handles the
11architecture/platform dependent part of CPU idle states. Driver
12provides the platform idle state detection capability and also
13has mechanisms in place to support actual entry-exit into CPU idle states.
14
15cpuidle driver initializes the cpuidle_device structure for each CPU device
16and registers with cpuidle using cpuidle_register_device.
17
18If all the idle states are the same, the wrapper function cpuidle_register
19could be used instead.
20
21It can also support the dynamic changes (like battery <-> AC), by using
22cpuidle_pause_and_lock, cpuidle_disable_device and cpuidle_enable_device,
23cpuidle_resume_and_unlock.
24
25Interfaces:
26extern int cpuidle_register(struct cpuidle_driver *drv,
27 const struct cpumask *const coupled_cpus);
28extern int cpuidle_unregister(struct cpuidle_driver *drv);
29extern int cpuidle_register_driver(struct cpuidle_driver *drv);
30extern void cpuidle_unregister_driver(struct cpuidle_driver *drv);
31extern int cpuidle_register_device(struct cpuidle_device *dev);
32extern void cpuidle_unregister_device(struct cpuidle_device *dev);
33
34extern void cpuidle_pause_and_lock(void);
35extern void cpuidle_resume_and_unlock(void);
36extern int cpuidle_enable_device(struct cpuidle_device *dev);
37extern void cpuidle_disable_device(struct cpuidle_device *dev);
diff --git a/Documentation/cpuidle/governor.txt b/Documentation/cpuidle/governor.txt
deleted file mode 100644
index d9020f5e847b..000000000000
--- a/Documentation/cpuidle/governor.txt
+++ /dev/null
@@ -1,28 +0,0 @@
1
2
3
4 Supporting multiple CPU idle levels in kernel
5
6 cpuidle governors
7
8
9
10
11cpuidle governor is policy routine that decides what idle state to enter at
12any given time. cpuidle core uses different callbacks to the governor.
13
14* enable() to enable governor for a particular device
15* disable() to disable governor for a particular device
16* select() to select an idle state to enter
17* reflect() called after returning from the idle state, which can be used
18 by the governor for some record keeping.
19
20More than one governor can be registered at the same time and
21users can switch between drivers using /sysfs interface (when enabled).
22More than one governor part is supported for developers to easily experiment
23with different governors. By default, most optimal governor based on your
24kernel configuration and platform will be selected by cpuidle.
25
26Interfaces:
27extern int cpuidle_register_governor(struct cpuidle_governor *gov);
28struct cpuidle_governor
diff --git a/Documentation/driver-api/pm/cpuidle.rst b/Documentation/driver-api/pm/cpuidle.rst
new file mode 100644
index 000000000000..5842ab621a58
--- /dev/null
+++ b/Documentation/driver-api/pm/cpuidle.rst
@@ -0,0 +1,282 @@
1.. |struct cpuidle_governor| replace:: :c:type:`struct cpuidle_governor <cpuidle_governor>`
2.. |struct cpuidle_device| replace:: :c:type:`struct cpuidle_device <cpuidle_device>`
3.. |struct cpuidle_driver| replace:: :c:type:`struct cpuidle_driver <cpuidle_driver>`
4.. |struct cpuidle_state| replace:: :c:type:`struct cpuidle_state <cpuidle_state>`
5
6========================
7CPU Idle Time Management
8========================
9
10::
11
12 Copyright (c) 2019 Intel Corp., Rafael J. Wysocki <rafael.j.wysocki@intel.com>
13
14
15CPU Idle Time Management Subsystem
16==================================
17
18Every time one of the logical CPUs in the system (the entities that appear to
19fetch and execute instructions: hardware threads, if present, or processor
20cores) is idle after an interrupt or equivalent wakeup event, which means that
21there are no tasks to run on it except for the special "idle" task associated
22with it, there is an opportunity to save energy for the processor that it
23belongs to. That can be done by making the idle logical CPU stop fetching
24instructions from memory and putting some of the processor's functional units
25depended on by it into an idle state in which they will draw less power.
26
27However, there may be multiple different idle states that can be used in such a
28situation in principle, so it may be necessary to find the most suitable one
29(from the kernel perspective) and ask the processor to use (or "enter") that
30particular idle state. That is the role of the CPU idle time management
31subsystem in the kernel, called ``CPUIdle``.
32
33The design of ``CPUIdle`` is modular and based on the code duplication avoidance
34principle, so the generic code that in principle need not depend on the hardware
35or platform design details in it is separate from the code that interacts with
36the hardware. It generally is divided into three categories of functional
37units: *governors* responsible for selecting idle states to ask the processor
38to enter, *drivers* that pass the governors' decisions on to the hardware and
39the *core* providing a common framework for them.
40
41
42CPU Idle Time Governors
43=======================
44
45A CPU idle time (``CPUIdle``) governor is a bundle of policy code invoked when
46one of the logical CPUs in the system turns out to be idle. Its role is to
47select an idle state to ask the processor to enter in order to save some energy.
48
49``CPUIdle`` governors are generic and each of them can be used on any hardware
50platform that the Linux kernel can run on. For this reason, data structures
51operated on by them cannot depend on any hardware architecture or platform
52design details as well.
53
54The governor itself is represented by a |struct cpuidle_governor| object
55containing four callback pointers, :c:member:`enable`, :c:member:`disable`,
56:c:member:`select`, :c:member:`reflect`, a :c:member:`rating` field described
57below, and a name (string) used for identifying it.
58
59For the governor to be available at all, that object needs to be registered
60with the ``CPUIdle`` core by calling :c:func:`cpuidle_register_governor()` with
61a pointer to it passed as the argument. If successful, that causes the core to
62add the governor to the global list of available governors and, if it is the
63only one in the list (that is, the list was empty before) or the value of its
64:c:member:`rating` field is greater than the value of that field for the
65governor currently in use, or the name of the new governor was passed to the
66kernel as the value of the ``cpuidle.governor=`` command line parameter, the new
67governor will be used from that point on (there can be only one ``CPUIdle``
68governor in use at a time). Also, if ``cpuidle_sysfs_switch`` is passed to the
69kernel in the command line, user space can choose the ``CPUIdle`` governor to
70use at run time via ``sysfs``.
71
72Once registered, ``CPUIdle`` governors cannot be unregistered, so it is not
73practical to put them into loadable kernel modules.
74
75The interface between ``CPUIdle`` governors and the core consists of four
76callbacks:
77
78:c:member:`enable`
79 ::
80
81 int (*enable) (struct cpuidle_driver *drv, struct cpuidle_device *dev);
82
83 The role of this callback is to prepare the governor for handling the
84 (logical) CPU represented by the |struct cpuidle_device| object pointed
85 to by the ``dev`` argument. The |struct cpuidle_driver| object pointed
86 to by the ``drv`` argument represents the ``CPUIdle`` driver to be used
87 with that CPU (among other things, it should contain the list of
88 |struct cpuidle_state| objects representing idle states that the
89 processor holding the given CPU can be asked to enter).
90
91 It may fail, in which case it is expected to return a negative error
92 code, and that causes the kernel to run the architecture-specific
93 default code for idle CPUs on the CPU in question instead of ``CPUIdle``
94 until the ``->enable()`` governor callback is invoked for that CPU
95 again.
96
97:c:member:`disable`
98 ::
99
100 void (*disable) (struct cpuidle_driver *drv, struct cpuidle_device *dev);
101
102 Called to make the governor stop handling the (logical) CPU represented
103 by the |struct cpuidle_device| object pointed to by the ``dev``
104 argument.
105
106 It is expected to reverse any changes made by the ``->enable()``
107 callback when it was last invoked for the target CPU, free all memory
108 allocated by that callback and so on.
109
110:c:member:`select`
111 ::
112
113 int (*select) (struct cpuidle_driver *drv, struct cpuidle_device *dev,
114 bool *stop_tick);
115
116 Called to select an idle state for the processor holding the (logical)
117 CPU represented by the |struct cpuidle_device| object pointed to by the
118 ``dev`` argument.
119
120 The list of idle states to take into consideration is represented by the
121 :c:member:`states` array of |struct cpuidle_state| objects held by the
122 |struct cpuidle_driver| object pointed to by the ``drv`` argument (which
123 represents the ``CPUIdle`` driver to be used with the CPU at hand). The
124 value returned by this callback is interpreted as an index into that
125 array (unless it is a negative error code).
126
127 The ``stop_tick`` argument is used to indicate whether or not to stop
128 the scheduler tick before asking the processor to enter the selected
129 idle state. When the ``bool`` variable pointed to by it (which is set
130 to ``true`` before invoking this callback) is cleared to ``false``, the
131 processor will be asked to enter the selected idle state without
132 stopping the scheduler tick on the given CPU (if the tick has been
133 stopped on that CPU already, however, it will not be restarted before
134 asking the processor to enter the idle state).
135
136 This callback is mandatory (i.e. the :c:member:`select` callback pointer
137 in |struct cpuidle_governor| must not be ``NULL`` for the registration
138 of the governor to succeed).
139
140:c:member:`reflect`
141 ::
142
143 void (*reflect) (struct cpuidle_device *dev, int index);
144
145 Called to allow the governor to evaluate the accuracy of the idle state
146 selection made by the ``->select()`` callback (when it was invoked last
147 time) and possibly use the result of that to improve the accuracy of
148 idle state selections in the future.
149
150In addition, ``CPUIdle`` governors are required to take power management
151quality of service (PM QoS) constraints on the processor wakeup latency into
152account when selecting idle states. In order to obtain the current effective
153PM QoS wakeup latency constraint for a given CPU, a ``CPUIdle`` governor is
154expected to pass the number of the CPU to
155:c:func:`cpuidle_governor_latency_req()`. Then, the governor's ``->select()``
156callback must not return the index of an indle state whose
157:c:member:`exit_latency` value is greater than the number returned by that
158function.
159
160
161CPU Idle Time Management Drivers
162================================
163
164CPU idle time management (``CPUIdle``) drivers provide an interface between the
165other parts of ``CPUIdle`` and the hardware.
166
167First of all, a ``CPUIdle`` driver has to populate the :c:member:`states` array
168of |struct cpuidle_state| objects included in the |struct cpuidle_driver| object
169representing it. Going forward this array will represent the list of available
170idle states that the processor hardware can be asked to enter shared by all of
171the logical CPUs handled by the given driver.
172
173The entries in the :c:member:`states` array are expected to be sorted by the
174value of the :c:member:`target_residency` field in |struct cpuidle_state| in
175the ascending order (that is, index 0 should correspond to the idle state with
176the minimum value of :c:member:`target_residency`). [Since the
177:c:member:`target_residency` value is expected to reflect the "depth" of the
178idle state represented by the |struct cpuidle_state| object holding it, this
179sorting order should be the same as the ascending sorting order by the idle
180state "depth".]
181
182Three fields in |struct cpuidle_state| are used by the existing ``CPUIdle``
183governors for computations related to idle state selection:
184
185:c:member:`target_residency`
186 Minimum time to spend in this idle state including the time needed to
187 enter it (which may be substantial) to save more energy than could
188 be saved by staying in a shallower idle state for the same amount of
189 time, in microseconds.
190
191:c:member:`exit_latency`
192 Maximum time it will take a CPU asking the processor to enter this idle
193 state to start executing the first instruction after a wakeup from it,
194 in microseconds.
195
196:c:member:`flags`
197 Flags representing idle state properties. Currently, governors only use
198 the ``CPUIDLE_FLAG_POLLING`` flag which is set if the given object
199 does not represent a real idle state, but an interface to a software
200 "loop" that can be used in order to avoid asking the processor to enter
201 any idle state at all. [There are other flags used by the ``CPUIdle``
202 core in special situations.]
203
204The :c:member:`enter` callback pointer in |struct cpuidle_state|, which must not
205be ``NULL``, points to the routine to execute in order to ask the processor to
206enter this particular idle state:
207
208::
209
210 void (*enter) (struct cpuidle_device *dev, struct cpuidle_driver *drv,
211 int index);
212
213The first two arguments of it point to the |struct cpuidle_device| object
214representing the logical CPU running this callback and the
215|struct cpuidle_driver| object representing the driver itself, respectively,
216and the last one is an index of the |struct cpuidle_state| entry in the driver's
217:c:member:`states` array representing the idle state to ask the processor to
218enter.
219
220The analogous ``->enter_s2idle()`` callback in |struct cpuidle_state| is used
221only for implementing the suspend-to-idle system-wide power management feature.
222The difference between in and ``->enter()`` is that it must not re-enable
223interrupts at any point (even temporarily) or attempt to change the states of
224clock event devices, which the ``->enter()`` callback may do sometimes.
225
226Once the :c:member:`states` array has been populated, the number of valid
227entries in it has to be stored in the :c:member:`state_count` field of the
228|struct cpuidle_driver| object representing the driver. Moreover, if any
229entries in the :c:member:`states` array represent "coupled" idle states (that
230is, idle states that can only be asked for if multiple related logical CPUs are
231idle), the :c:member:`safe_state_index` field in |struct cpuidle_driver| needs
232to be the index of an idle state that is not "coupled" (that is, one that can be
233asked for if only one logical CPU is idle).
234
235In addition to that, if the given ``CPUIdle`` driver is only going to handle a
236subset of logical CPUs in the system, the :c:member:`cpumask` field in its
237|struct cpuidle_driver| object must point to the set (mask) of CPUs that will be
238handled by it.
239
240A ``CPUIdle`` driver can only be used after it has been registered. If there
241are no "coupled" idle state entries in the driver's :c:member:`states` array,
242that can be accomplished by passing the driver's |struct cpuidle_driver| object
243to :c:func:`cpuidle_register_driver()`. Otherwise, :c:func:`cpuidle_register()`
244should be used for this purpose.
245
246However, it also is necessary to register |struct cpuidle_device| objects for
247all of the logical CPUs to be handled by the given ``CPUIdle`` driver with the
248help of :c:func:`cpuidle_register_device()` after the driver has been registered
249and :c:func:`cpuidle_register_driver()`, unlike :c:func:`cpuidle_register()`,
250does not do that automatically. For this reason, the drivers that use
251:c:func:`cpuidle_register_driver()` to register themselves must also take care
252of registering the |struct cpuidle_device| objects as needed, so it is generally
253recommended to use :c:func:`cpuidle_register()` for ``CPUIdle`` driver
254registration in all cases.
255
256The registration of a |struct cpuidle_device| object causes the ``CPUIdle``
257``sysfs`` interface to be created and the governor's ``->enable()`` callback to
258be invoked for the logical CPU represented by it, so it must take place after
259registering the driver that will handle the CPU in question.
260
261``CPUIdle`` drivers and |struct cpuidle_device| objects can be unregistered
262when they are not necessary any more which allows some resources associated with
263them to be released. Due to dependencies between them, all of the
264|struct cpuidle_device| objects representing CPUs handled by the given
265``CPUIdle`` driver must be unregistered, with the help of
266:c:func:`cpuidle_unregister_device()`, before calling
267:c:func:`cpuidle_unregister_driver()` to unregister the driver. Alternatively,
268:c:func:`cpuidle_unregister()` can be called to unregister a ``CPUIdle`` driver
269along with all of the |struct cpuidle_device| objects representing CPUs handled
270by it.
271
272``CPUIdle`` drivers can respond to runtime system configuration changes that
273lead to modifications of the list of available processor idle states (which can
274happen, for example, when the system's power source is switched from AC to
275battery or the other way around). Upon a notification of such a change,
276a ``CPUIdle`` driver is expected to call :c:func:`cpuidle_pause_and_lock()` to
277turn ``CPUIdle`` off temporarily and then :c:func:`cpuidle_disable_device()` for
278all of the |struct cpuidle_device| objects representing CPUs affected by that
279change. Next, it can update its :c:member:`states` array in accordance with
280the new configuration of the system, call :c:func:`cpuidle_enable_device()` for
281all of the relevant |struct cpuidle_device| objects and invoke
282:c:func:`cpuidle_resume_and_unlock()` to allow ``CPUIdle`` to be used again.
diff --git a/Documentation/driver-api/pm/index.rst b/Documentation/driver-api/pm/index.rst
index 2f6d0e9cf6b7..56975c6bc789 100644
--- a/Documentation/driver-api/pm/index.rst
+++ b/Documentation/driver-api/pm/index.rst
@@ -1,9 +1,10 @@
1======================= 1===============================
2Device Power Management 2CPU and Device Power Management
3======================= 3===============================
4 4
5.. toctree:: 5.. toctree::
6 6
7 cpuidle
7 devices 8 devices
8 notifiers 9 notifiers
9 types 10 types
diff --git a/MAINTAINERS b/MAINTAINERS
index c7d3e51c7064..a3ccc414a1e7 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1736,6 +1736,7 @@ F: arch/arm/configs/mvebu_*_defconfig
1736F: arch/arm/mach-mvebu/ 1736F: arch/arm/mach-mvebu/
1737F: arch/arm64/boot/dts/marvell/armada* 1737F: arch/arm64/boot/dts/marvell/armada*
1738F: drivers/cpufreq/armada-37xx-cpufreq.c 1738F: drivers/cpufreq/armada-37xx-cpufreq.c
1739F: drivers/cpufreq/armada-8k-cpufreq.c
1739F: drivers/cpufreq/mvebu-cpufreq.c 1740F: drivers/cpufreq/mvebu-cpufreq.c
1740F: drivers/irqchip/irq-armada-370-xp.c 1741F: drivers/irqchip/irq-armada-370-xp.c
1741F: drivers/irqchip/irq-mvebu-* 1742F: drivers/irqchip/irq-mvebu-*
@@ -3994,7 +3995,7 @@ M: Viresh Kumar <viresh.kumar@linaro.org>
3994L: linux-pm@vger.kernel.org 3995L: linux-pm@vger.kernel.org
3995S: Maintained 3996S: Maintained
3996T: git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm.git 3997T: git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm.git
3997T: git git://git.linaro.org/people/vireshk/linux.git (For ARM Updates) 3998T: git git://git.kernel.org/pub/scm/linux/kernel/git/vireshk/pm.git (For ARM Updates)
3998B: https://bugzilla.kernel.org 3999B: https://bugzilla.kernel.org
3999F: Documentation/admin-guide/pm/cpufreq.rst 4000F: Documentation/admin-guide/pm/cpufreq.rst
4000F: Documentation/admin-guide/pm/intel_pstate.rst 4001F: Documentation/admin-guide/pm/intel_pstate.rst
@@ -4054,6 +4055,7 @@ S: Maintained
4054T: git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm.git 4055T: git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm.git
4055B: https://bugzilla.kernel.org 4056B: https://bugzilla.kernel.org
4056F: Documentation/admin-guide/pm/cpuidle.rst 4057F: Documentation/admin-guide/pm/cpuidle.rst
4058F: Documentation/driver-api/pm/cpuidle.rst
4057F: drivers/cpuidle/* 4059F: drivers/cpuidle/*
4058F: include/linux/cpuidle.h 4060F: include/linux/cpuidle.h
4059 4061
@@ -12679,11 +12681,11 @@ F: Documentation/media/v4l-drivers/qcom_camss.rst
12679F: drivers/media/platform/qcom/camss/ 12681F: drivers/media/platform/qcom/camss/
12680 12682
12681QUALCOMM CPUFREQ DRIVER MSM8996/APQ8096 12683QUALCOMM CPUFREQ DRIVER MSM8996/APQ8096
12682M: Ilia Lin <ilia.lin@gmail.com> 12684M: Ilia Lin <ilia.lin@kernel.org>
12683L: linux-pm@vger.kernel.org 12685L: linux-pm@vger.kernel.org
12684S: Maintained 12686S: Maintained
12685F: Documentation/devicetree/bindings/opp/kryo-cpufreq.txt 12687F: Documentation/devicetree/bindings/opp/kryo-cpufreq.txt
12686F: drivers/cpufreq/qcom-cpufreq-kryo.c 12688F: drivers/cpufreq/qcom-cpufreq-kryo.c
12687 12689
12688QUALCOMM EMAC GIGABIT ETHERNET DRIVER 12690QUALCOMM EMAC GIGABIT ETHERNET DRIVER
12689M: Timur Tabi <timur@kernel.org> 12691M: Timur Tabi <timur@kernel.org>
diff --git a/arch/arm/mach-davinci/da850.c b/arch/arm/mach-davinci/da850.c
index 8a50956a9181..67ab71ba3ad3 100644
--- a/arch/arm/mach-davinci/da850.c
+++ b/arch/arm/mach-davinci/da850.c
@@ -22,6 +22,7 @@
22#include <linux/mfd/da8xx-cfgchip.h> 22#include <linux/mfd/da8xx-cfgchip.h>
23#include <linux/platform_data/clk-da8xx-cfgchip.h> 23#include <linux/platform_data/clk-da8xx-cfgchip.h>
24#include <linux/platform_data/clk-davinci-pll.h> 24#include <linux/platform_data/clk-davinci-pll.h>
25#include <linux/platform_data/davinci-cpufreq.h>
25#include <linux/platform_data/gpio-davinci.h> 26#include <linux/platform_data/gpio-davinci.h>
26#include <linux/platform_device.h> 27#include <linux/platform_device.h>
27#include <linux/regmap.h> 28#include <linux/regmap.h>
@@ -30,7 +31,6 @@
30#include <asm/mach/map.h> 31#include <asm/mach/map.h>
31 32
32#include <mach/common.h> 33#include <mach/common.h>
33#include <mach/cpufreq.h>
34#include <mach/cputype.h> 34#include <mach/cputype.h>
35#include <mach/da8xx.h> 35#include <mach/da8xx.h>
36#include <mach/pm.h> 36#include <mach/pm.h>
diff --git a/arch/arm/mach-davinci/include/mach/cpufreq.h b/arch/arm/mach-davinci/include/mach/cpufreq.h
deleted file mode 100644
index 3c089cfb6cd6..000000000000
--- a/arch/arm/mach-davinci/include/mach/cpufreq.h
+++ /dev/null
@@ -1,26 +0,0 @@
1/*
2 * TI DaVinci CPUFreq platform support.
3 *
4 * Copyright (C) 2009 Texas Instruments, Inc. http://www.ti.com/
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation version 2.
9 *
10 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
11 * kind, whether express or implied; without even the implied warranty
12 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15#ifndef _MACH_DAVINCI_CPUFREQ_H
16#define _MACH_DAVINCI_CPUFREQ_H
17
18#include <linux/cpufreq.h>
19
20struct davinci_cpufreq_config {
21 struct cpufreq_frequency_table *freq_table;
22 int (*set_voltage) (unsigned int index);
23 int (*init) (void);
24};
25
26#endif
diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c
index 217a782c3e55..1b207fca1420 100644
--- a/drivers/acpi/cppc_acpi.c
+++ b/drivers/acpi/cppc_acpi.c
@@ -1051,6 +1051,48 @@ static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val)
1051} 1051}
1052 1052
1053/** 1053/**
1054 * cppc_get_desired_perf - Get the value of desired performance register.
1055 * @cpunum: CPU from which to get desired performance.
1056 * @desired_perf: address of a variable to store the returned desired performance
1057 *
1058 * Return: 0 for success, -EIO otherwise.
1059 */
1060int cppc_get_desired_perf(int cpunum, u64 *desired_perf)
1061{
1062 struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
1063 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum);
1064 struct cpc_register_resource *desired_reg;
1065 struct cppc_pcc_data *pcc_ss_data = NULL;
1066
1067 desired_reg = &cpc_desc->cpc_regs[DESIRED_PERF];
1068
1069 if (CPC_IN_PCC(desired_reg)) {
1070 int ret = 0;
1071
1072 if (pcc_ss_id < 0)
1073 return -EIO;
1074
1075 pcc_ss_data = pcc_data[pcc_ss_id];
1076
1077 down_write(&pcc_ss_data->pcc_lock);
1078
1079 if (send_pcc_cmd(pcc_ss_id, CMD_READ) >= 0)
1080 cpc_read(cpunum, desired_reg, desired_perf);
1081 else
1082 ret = -EIO;
1083
1084 up_write(&pcc_ss_data->pcc_lock);
1085
1086 return ret;
1087 }
1088
1089 cpc_read(cpunum, desired_reg, desired_perf);
1090
1091 return 0;
1092}
1093EXPORT_SYMBOL_GPL(cppc_get_desired_perf);
1094
1095/**
1054 * cppc_get_perf_caps - Get a CPUs performance capabilities. 1096 * cppc_get_perf_caps - Get a CPUs performance capabilities.
1055 * @cpunum: CPU from which to get capabilities info. 1097 * @cpunum: CPU from which to get capabilities info.
1056 * @perf_caps: ptr to cppc_perf_caps. See cppc_acpi.h 1098 * @perf_caps: ptr to cppc_perf_caps. See cppc_acpi.h
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index b2131c4ea124..98d4ec5bf450 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -282,6 +282,13 @@ static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr)
282 pr->power.states[ACPI_STATE_C2].address, 282 pr->power.states[ACPI_STATE_C2].address,
283 pr->power.states[ACPI_STATE_C3].address)); 283 pr->power.states[ACPI_STATE_C3].address));
284 284
285 snprintf(pr->power.states[ACPI_STATE_C2].desc,
286 ACPI_CX_DESC_LEN, "ACPI P_LVL2 IOPORT 0x%x",
287 pr->power.states[ACPI_STATE_C2].address);
288 snprintf(pr->power.states[ACPI_STATE_C3].desc,
289 ACPI_CX_DESC_LEN, "ACPI P_LVL3 IOPORT 0x%x",
290 pr->power.states[ACPI_STATE_C3].address);
291
285 return 0; 292 return 0;
286} 293}
287 294
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index eb9443d5bae1..6ce93a52bf3f 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -427,6 +427,7 @@ __cpu_device_create(struct device *parent, void *drvdata,
427 dev->parent = parent; 427 dev->parent = parent;
428 dev->groups = groups; 428 dev->groups = groups;
429 dev->release = device_create_release; 429 dev->release = device_create_release;
430 device_set_pm_not_required(dev);
430 dev_set_drvdata(dev, drvdata); 431 dev_set_drvdata(dev, drvdata);
431 432
432 retval = kobject_set_name_vargs(&dev->kobj, fmt, args); 433 retval = kobject_set_name_vargs(&dev->kobj, fmt, args);
diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c
index 5a42ae4078c2..365ad751ce0f 100644
--- a/drivers/base/power/clock_ops.c
+++ b/drivers/base/power/clock_ops.c
@@ -65,10 +65,15 @@ static void pm_clk_acquire(struct device *dev, struct pm_clock_entry *ce)
65 if (IS_ERR(ce->clk)) { 65 if (IS_ERR(ce->clk)) {
66 ce->status = PCE_STATUS_ERROR; 66 ce->status = PCE_STATUS_ERROR;
67 } else { 67 } else {
68 clk_prepare(ce->clk); 68 if (clk_prepare(ce->clk)) {
69 ce->status = PCE_STATUS_ACQUIRED; 69 ce->status = PCE_STATUS_ERROR;
70 dev_dbg(dev, "Clock %pC con_id %s managed by runtime PM.\n", 70 dev_err(dev, "clk_prepare() failed\n");
71 ce->clk, ce->con_id); 71 } else {
72 ce->status = PCE_STATUS_ACQUIRED;
73 dev_dbg(dev,
74 "Clock %pC con_id %s managed by runtime PM.\n",
75 ce->clk, ce->con_id);
76 }
72 } 77 }
73} 78}
74 79
diff --git a/drivers/base/power/common.c b/drivers/base/power/common.c
index b413951c6abc..22aedb28aad7 100644
--- a/drivers/base/power/common.c
+++ b/drivers/base/power/common.c
@@ -160,7 +160,7 @@ EXPORT_SYMBOL_GPL(dev_pm_domain_attach_by_id);
160 * For a detailed function description, see dev_pm_domain_attach_by_id(). 160 * For a detailed function description, see dev_pm_domain_attach_by_id().
161 */ 161 */
162struct device *dev_pm_domain_attach_by_name(struct device *dev, 162struct device *dev_pm_domain_attach_by_name(struct device *dev,
163 char *name) 163 const char *name)
164{ 164{
165 if (dev->pm_domain) 165 if (dev->pm_domain)
166 return ERR_PTR(-EEXIST); 166 return ERR_PTR(-EEXIST);
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index 500de1dee967..2c334c01fc43 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -2483,7 +2483,7 @@ EXPORT_SYMBOL_GPL(genpd_dev_pm_attach_by_id);
2483 * power-domain-names DT property. For further description see 2483 * power-domain-names DT property. For further description see
2484 * genpd_dev_pm_attach_by_id(). 2484 * genpd_dev_pm_attach_by_id().
2485 */ 2485 */
2486struct device *genpd_dev_pm_attach_by_name(struct device *dev, char *name) 2486struct device *genpd_dev_pm_attach_by_name(struct device *dev, const char *name)
2487{ 2487{
2488 int index; 2488 int index;
2489 2489
@@ -2948,18 +2948,11 @@ static int __init genpd_debug_init(void)
2948 2948
2949 genpd_debugfs_dir = debugfs_create_dir("pm_genpd", NULL); 2949 genpd_debugfs_dir = debugfs_create_dir("pm_genpd", NULL);
2950 2950
2951 if (!genpd_debugfs_dir) 2951 debugfs_create_file("pm_genpd_summary", S_IRUGO, genpd_debugfs_dir,
2952 return -ENOMEM; 2952 NULL, &summary_fops);
2953
2954 d = debugfs_create_file("pm_genpd_summary", S_IRUGO,
2955 genpd_debugfs_dir, NULL, &summary_fops);
2956 if (!d)
2957 return -ENOMEM;
2958 2953
2959 list_for_each_entry(genpd, &gpd_list, gpd_list_node) { 2954 list_for_each_entry(genpd, &gpd_list, gpd_list_node) {
2960 d = debugfs_create_dir(genpd->name, genpd_debugfs_dir); 2955 d = debugfs_create_dir(genpd->name, genpd_debugfs_dir);
2961 if (!d)
2962 return -ENOMEM;
2963 2956
2964 debugfs_create_file("current_state", 0444, 2957 debugfs_create_file("current_state", 0444,
2965 d, genpd, &status_fops); 2958 d, genpd, &status_fops);
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 0992e67e862b..893ae464bfd6 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -124,6 +124,10 @@ void device_pm_unlock(void)
124 */ 124 */
125void device_pm_add(struct device *dev) 125void device_pm_add(struct device *dev)
126{ 126{
127 /* Skip PM setup/initialization. */
128 if (device_pm_not_required(dev))
129 return;
130
127 pr_debug("PM: Adding info for %s:%s\n", 131 pr_debug("PM: Adding info for %s:%s\n",
128 dev->bus ? dev->bus->name : "No Bus", dev_name(dev)); 132 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
129 device_pm_check_callbacks(dev); 133 device_pm_check_callbacks(dev);
@@ -142,6 +146,9 @@ void device_pm_add(struct device *dev)
142 */ 146 */
143void device_pm_remove(struct device *dev) 147void device_pm_remove(struct device *dev)
144{ 148{
149 if (device_pm_not_required(dev))
150 return;
151
145 pr_debug("PM: Removing info for %s:%s\n", 152 pr_debug("PM: Removing info for %s:%s\n",
146 dev->bus ? dev->bus->name : "No Bus", dev_name(dev)); 153 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
147 complete_all(&dev->power.completion); 154 complete_all(&dev->power.completion);
@@ -1741,8 +1748,10 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
1741 if (dev->power.direct_complete) { 1748 if (dev->power.direct_complete) {
1742 if (pm_runtime_status_suspended(dev)) { 1749 if (pm_runtime_status_suspended(dev)) {
1743 pm_runtime_disable(dev); 1750 pm_runtime_disable(dev);
1744 if (pm_runtime_status_suspended(dev)) 1751 if (pm_runtime_status_suspended(dev)) {
1752 pm_dev_dbg(dev, state, "direct-complete ");
1745 goto Complete; 1753 goto Complete;
1754 }
1746 1755
1747 pm_runtime_enable(dev); 1756 pm_runtime_enable(dev);
1748 } 1757 }
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index ccd296dbb95c..78937c45278c 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -66,20 +66,30 @@ static int rpm_suspend(struct device *dev, int rpmflags);
66 */ 66 */
67void update_pm_runtime_accounting(struct device *dev) 67void update_pm_runtime_accounting(struct device *dev)
68{ 68{
69 unsigned long now = jiffies; 69 u64 now, last, delta;
70 unsigned long delta;
71 70
72 delta = now - dev->power.accounting_timestamp; 71 if (dev->power.disable_depth > 0)
72 return;
73
74 last = dev->power.accounting_timestamp;
73 75
76 now = ktime_get_mono_fast_ns();
74 dev->power.accounting_timestamp = now; 77 dev->power.accounting_timestamp = now;
75 78
76 if (dev->power.disable_depth > 0) 79 /*
80 * Because ktime_get_mono_fast_ns() is not monotonic during
81 * timekeeping updates, ensure that 'now' is after the last saved
82 * timesptamp.
83 */
84 if (now < last)
77 return; 85 return;
78 86
87 delta = now - last;
88
79 if (dev->power.runtime_status == RPM_SUSPENDED) 89 if (dev->power.runtime_status == RPM_SUSPENDED)
80 dev->power.suspended_jiffies += delta; 90 dev->power.suspended_time += delta;
81 else 91 else
82 dev->power.active_jiffies += delta; 92 dev->power.active_time += delta;
83} 93}
84 94
85static void __update_runtime_status(struct device *dev, enum rpm_status status) 95static void __update_runtime_status(struct device *dev, enum rpm_status status)
@@ -88,6 +98,22 @@ static void __update_runtime_status(struct device *dev, enum rpm_status status)
88 dev->power.runtime_status = status; 98 dev->power.runtime_status = status;
89} 99}
90 100
101u64 pm_runtime_suspended_time(struct device *dev)
102{
103 u64 time;
104 unsigned long flags;
105
106 spin_lock_irqsave(&dev->power.lock, flags);
107
108 update_pm_runtime_accounting(dev);
109 time = dev->power.suspended_time;
110
111 spin_unlock_irqrestore(&dev->power.lock, flags);
112
113 return time;
114}
115EXPORT_SYMBOL_GPL(pm_runtime_suspended_time);
116
91/** 117/**
92 * pm_runtime_deactivate_timer - Deactivate given device's suspend timer. 118 * pm_runtime_deactivate_timer - Deactivate given device's suspend timer.
93 * @dev: Device to handle. 119 * @dev: Device to handle.
@@ -129,24 +155,21 @@ static void pm_runtime_cancel_pending(struct device *dev)
129u64 pm_runtime_autosuspend_expiration(struct device *dev) 155u64 pm_runtime_autosuspend_expiration(struct device *dev)
130{ 156{
131 int autosuspend_delay; 157 int autosuspend_delay;
132 u64 last_busy, expires = 0; 158 u64 expires;
133 u64 now = ktime_get_mono_fast_ns();
134 159
135 if (!dev->power.use_autosuspend) 160 if (!dev->power.use_autosuspend)
136 goto out; 161 return 0;
137 162
138 autosuspend_delay = READ_ONCE(dev->power.autosuspend_delay); 163 autosuspend_delay = READ_ONCE(dev->power.autosuspend_delay);
139 if (autosuspend_delay < 0) 164 if (autosuspend_delay < 0)
140 goto out; 165 return 0;
141
142 last_busy = READ_ONCE(dev->power.last_busy);
143 166
144 expires = last_busy + (u64)autosuspend_delay * NSEC_PER_MSEC; 167 expires = READ_ONCE(dev->power.last_busy);
145 if (expires <= now) 168 expires += (u64)autosuspend_delay * NSEC_PER_MSEC;
146 expires = 0; /* Already expired. */ 169 if (expires > ktime_get_mono_fast_ns())
170 return expires; /* Expires in the future */
147 171
148 out: 172 return 0;
149 return expires;
150} 173}
151EXPORT_SYMBOL_GPL(pm_runtime_autosuspend_expiration); 174EXPORT_SYMBOL_GPL(pm_runtime_autosuspend_expiration);
152 175
@@ -1276,6 +1299,9 @@ void __pm_runtime_disable(struct device *dev, bool check_resume)
1276 pm_runtime_put_noidle(dev); 1299 pm_runtime_put_noidle(dev);
1277 } 1300 }
1278 1301
1302 /* Update time accounting before disabling PM-runtime. */
1303 update_pm_runtime_accounting(dev);
1304
1279 if (!dev->power.disable_depth++) 1305 if (!dev->power.disable_depth++)
1280 __pm_runtime_barrier(dev); 1306 __pm_runtime_barrier(dev);
1281 1307
@@ -1294,10 +1320,15 @@ void pm_runtime_enable(struct device *dev)
1294 1320
1295 spin_lock_irqsave(&dev->power.lock, flags); 1321 spin_lock_irqsave(&dev->power.lock, flags);
1296 1322
1297 if (dev->power.disable_depth > 0) 1323 if (dev->power.disable_depth > 0) {
1298 dev->power.disable_depth--; 1324 dev->power.disable_depth--;
1299 else 1325
1326 /* About to enable runtime pm, set accounting_timestamp to now */
1327 if (!dev->power.disable_depth)
1328 dev->power.accounting_timestamp = ktime_get_mono_fast_ns();
1329 } else {
1300 dev_warn(dev, "Unbalanced %s!\n", __func__); 1330 dev_warn(dev, "Unbalanced %s!\n", __func__);
1331 }
1301 1332
1302 WARN(!dev->power.disable_depth && 1333 WARN(!dev->power.disable_depth &&
1303 dev->power.runtime_status == RPM_SUSPENDED && 1334 dev->power.runtime_status == RPM_SUSPENDED &&
@@ -1494,7 +1525,6 @@ void pm_runtime_init(struct device *dev)
1494 dev->power.request_pending = false; 1525 dev->power.request_pending = false;
1495 dev->power.request = RPM_REQ_NONE; 1526 dev->power.request = RPM_REQ_NONE;
1496 dev->power.deferred_resume = false; 1527 dev->power.deferred_resume = false;
1497 dev->power.accounting_timestamp = jiffies;
1498 INIT_WORK(&dev->power.work, pm_runtime_work); 1528 INIT_WORK(&dev->power.work, pm_runtime_work);
1499 1529
1500 dev->power.timer_expires = 0; 1530 dev->power.timer_expires = 0;
diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
index d713738ce796..c6bf76124184 100644
--- a/drivers/base/power/sysfs.c
+++ b/drivers/base/power/sysfs.c
@@ -125,9 +125,12 @@ static ssize_t runtime_active_time_show(struct device *dev,
125 struct device_attribute *attr, char *buf) 125 struct device_attribute *attr, char *buf)
126{ 126{
127 int ret; 127 int ret;
128 u64 tmp;
128 spin_lock_irq(&dev->power.lock); 129 spin_lock_irq(&dev->power.lock);
129 update_pm_runtime_accounting(dev); 130 update_pm_runtime_accounting(dev);
130 ret = sprintf(buf, "%i\n", jiffies_to_msecs(dev->power.active_jiffies)); 131 tmp = dev->power.active_time;
132 do_div(tmp, NSEC_PER_MSEC);
133 ret = sprintf(buf, "%llu\n", tmp);
131 spin_unlock_irq(&dev->power.lock); 134 spin_unlock_irq(&dev->power.lock);
132 return ret; 135 return ret;
133} 136}
@@ -138,10 +141,12 @@ static ssize_t runtime_suspended_time_show(struct device *dev,
138 struct device_attribute *attr, char *buf) 141 struct device_attribute *attr, char *buf)
139{ 142{
140 int ret; 143 int ret;
144 u64 tmp;
141 spin_lock_irq(&dev->power.lock); 145 spin_lock_irq(&dev->power.lock);
142 update_pm_runtime_accounting(dev); 146 update_pm_runtime_accounting(dev);
143 ret = sprintf(buf, "%i\n", 147 tmp = dev->power.suspended_time;
144 jiffies_to_msecs(dev->power.suspended_jiffies)); 148 do_div(tmp, NSEC_PER_MSEC);
149 ret = sprintf(buf, "%llu\n", tmp);
145 spin_unlock_irq(&dev->power.lock); 150 spin_unlock_irq(&dev->power.lock);
146 return ret; 151 return ret;
147} 152}
@@ -648,6 +653,10 @@ int dpm_sysfs_add(struct device *dev)
648{ 653{
649 int rc; 654 int rc;
650 655
656 /* No need to create PM sysfs if explicitly disabled. */
657 if (device_pm_not_required(dev))
658 return 0;
659
651 rc = sysfs_create_group(&dev->kobj, &pm_attr_group); 660 rc = sysfs_create_group(&dev->kobj, &pm_attr_group);
652 if (rc) 661 if (rc)
653 return rc; 662 return rc;
@@ -727,6 +736,8 @@ void rpm_sysfs_remove(struct device *dev)
727 736
728void dpm_sysfs_remove(struct device *dev) 737void dpm_sysfs_remove(struct device *dev)
729{ 738{
739 if (device_pm_not_required(dev))
740 return;
730 sysfs_unmerge_group(&dev->kobj, &pm_qos_latency_tolerance_attr_group); 741 sysfs_unmerge_group(&dev->kobj, &pm_qos_latency_tolerance_attr_group);
731 dev_pm_qos_constraints_destroy(dev); 742 dev_pm_qos_constraints_destroy(dev);
732 rpm_sysfs_remove(dev); 743 rpm_sysfs_remove(dev);
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
index 5fa1898755a3..f1fee72ed970 100644
--- a/drivers/base/power/wakeup.c
+++ b/drivers/base/power/wakeup.c
@@ -783,7 +783,7 @@ void pm_wakeup_ws_event(struct wakeup_source *ws, unsigned int msec, bool hard)
783EXPORT_SYMBOL_GPL(pm_wakeup_ws_event); 783EXPORT_SYMBOL_GPL(pm_wakeup_ws_event);
784 784
785/** 785/**
786 * pm_wakeup_event - Notify the PM core of a wakeup event. 786 * pm_wakeup_dev_event - Notify the PM core of a wakeup event.
787 * @dev: Device the wakeup event is related to. 787 * @dev: Device the wakeup event is related to.
788 * @msec: Anticipated event processing time (in milliseconds). 788 * @msec: Anticipated event processing time (in milliseconds).
789 * @hard: If set, abort suspends in progress and wake up from suspend-to-idle. 789 * @hard: If set, abort suspends in progress and wake up from suspend-to-idle.
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
index 608af20a3494..b22e6bba71f1 100644
--- a/drivers/cpufreq/Kconfig
+++ b/drivers/cpufreq/Kconfig
@@ -207,8 +207,6 @@ comment "CPU frequency scaling drivers"
207config CPUFREQ_DT 207config CPUFREQ_DT
208 tristate "Generic DT based cpufreq driver" 208 tristate "Generic DT based cpufreq driver"
209 depends on HAVE_CLK && OF 209 depends on HAVE_CLK && OF
210 # if CPU_THERMAL is on and THERMAL=m, CPUFREQ_DT cannot be =y:
211 depends on !CPU_THERMAL || THERMAL
212 select CPUFREQ_DT_PLATDEV 210 select CPUFREQ_DT_PLATDEV
213 select PM_OPP 211 select PM_OPP
214 help 212 help
@@ -327,7 +325,6 @@ endif
327config QORIQ_CPUFREQ 325config QORIQ_CPUFREQ
328 tristate "CPU frequency scaling driver for Freescale QorIQ SoCs" 326 tristate "CPU frequency scaling driver for Freescale QorIQ SoCs"
329 depends on OF && COMMON_CLK && (PPC_E500MC || ARM || ARM64) 327 depends on OF && COMMON_CLK && (PPC_E500MC || ARM || ARM64)
330 depends on !CPU_THERMAL || THERMAL
331 select CLK_QORIQ 328 select CLK_QORIQ
332 help 329 help
333 This adds the CPUFreq driver support for Freescale QorIQ SoCs 330 This adds the CPUFreq driver support for Freescale QorIQ SoCs
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index 1a6778e81f90..179a1d302f48 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -25,12 +25,21 @@ config ARM_ARMADA_37XX_CPUFREQ
25 This adds the CPUFreq driver support for Marvell Armada 37xx SoCs. 25 This adds the CPUFreq driver support for Marvell Armada 37xx SoCs.
26 The Armada 37xx PMU supports 4 frequency and VDD levels. 26 The Armada 37xx PMU supports 4 frequency and VDD levels.
27 27
28config ARM_ARMADA_8K_CPUFREQ
29 tristate "Armada 8K CPUFreq driver"
30 depends on ARCH_MVEBU && CPUFREQ_DT
31 help
32 This enables the CPUFreq driver support for Marvell
33 Armada8k SOCs.
34 Armada8K device has the AP806 which supports scaling
35 to any full integer divider.
36
37 If in doubt, say N.
38
28# big LITTLE core layer and glue drivers 39# big LITTLE core layer and glue drivers
29config ARM_BIG_LITTLE_CPUFREQ 40config ARM_BIG_LITTLE_CPUFREQ
30 tristate "Generic ARM big LITTLE CPUfreq driver" 41 tristate "Generic ARM big LITTLE CPUfreq driver"
31 depends on ARM_CPU_TOPOLOGY && HAVE_CLK 42 depends on ARM_CPU_TOPOLOGY && HAVE_CLK
32 # if CPU_THERMAL is on and THERMAL=m, ARM_BIT_LITTLE_CPUFREQ cannot be =y
33 depends on !CPU_THERMAL || THERMAL
34 select PM_OPP 43 select PM_OPP
35 help 44 help
36 This enables the Generic CPUfreq driver for ARM big.LITTLE platforms. 45 This enables the Generic CPUfreq driver for ARM big.LITTLE platforms.
@@ -38,7 +47,6 @@ config ARM_BIG_LITTLE_CPUFREQ
38config ARM_SCPI_CPUFREQ 47config ARM_SCPI_CPUFREQ
39 tristate "SCPI based CPUfreq driver" 48 tristate "SCPI based CPUfreq driver"
40 depends on ARM_SCPI_PROTOCOL && COMMON_CLK_SCPI 49 depends on ARM_SCPI_PROTOCOL && COMMON_CLK_SCPI
41 depends on !CPU_THERMAL || THERMAL
42 help 50 help
43 This adds the CPUfreq driver support for ARM platforms using SCPI 51 This adds the CPUfreq driver support for ARM platforms using SCPI
44 protocol for CPU power management. 52 protocol for CPU power management.
@@ -93,7 +101,6 @@ config ARM_KIRKWOOD_CPUFREQ
93config ARM_MEDIATEK_CPUFREQ 101config ARM_MEDIATEK_CPUFREQ
94 tristate "CPU Frequency scaling support for MediaTek SoCs" 102 tristate "CPU Frequency scaling support for MediaTek SoCs"
95 depends on ARCH_MEDIATEK && REGULATOR 103 depends on ARCH_MEDIATEK && REGULATOR
96 depends on !CPU_THERMAL || THERMAL
97 select PM_OPP 104 select PM_OPP
98 help 105 help
99 This adds the CPUFreq driver support for MediaTek SoCs. 106 This adds the CPUFreq driver support for MediaTek SoCs.
@@ -233,7 +240,6 @@ config ARM_SA1110_CPUFREQ
233config ARM_SCMI_CPUFREQ 240config ARM_SCMI_CPUFREQ
234 tristate "SCMI based CPUfreq driver" 241 tristate "SCMI based CPUfreq driver"
235 depends on ARM_SCMI_PROTOCOL || COMPILE_TEST 242 depends on ARM_SCMI_PROTOCOL || COMPILE_TEST
236 depends on !CPU_THERMAL || THERMAL
237 select PM_OPP 243 select PM_OPP
238 help 244 help
239 This adds the CPUfreq driver support for ARM platforms using SCMI 245 This adds the CPUfreq driver support for ARM platforms using SCMI
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index 08c071be2491..689b26c6f949 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -50,6 +50,7 @@ obj-$(CONFIG_X86_SFI_CPUFREQ) += sfi-cpufreq.o
50obj-$(CONFIG_ARM_BIG_LITTLE_CPUFREQ) += arm_big_little.o 50obj-$(CONFIG_ARM_BIG_LITTLE_CPUFREQ) += arm_big_little.o
51 51
52obj-$(CONFIG_ARM_ARMADA_37XX_CPUFREQ) += armada-37xx-cpufreq.o 52obj-$(CONFIG_ARM_ARMADA_37XX_CPUFREQ) += armada-37xx-cpufreq.o
53obj-$(CONFIG_ARM_ARMADA_8K_CPUFREQ) += armada-8k-cpufreq.o
53obj-$(CONFIG_ARM_BRCMSTB_AVS_CPUFREQ) += brcmstb-avs-cpufreq.o 54obj-$(CONFIG_ARM_BRCMSTB_AVS_CPUFREQ) += brcmstb-avs-cpufreq.o
54obj-$(CONFIG_ACPI_CPPC_CPUFREQ) += cppc_cpufreq.o 55obj-$(CONFIG_ACPI_CPPC_CPUFREQ) += cppc_cpufreq.o
55obj-$(CONFIG_ARCH_DAVINCI) += davinci-cpufreq.o 56obj-$(CONFIG_ARCH_DAVINCI) += davinci-cpufreq.o
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
index d62fd374d5c7..c72258a44ba4 100644
--- a/drivers/cpufreq/acpi-cpufreq.c
+++ b/drivers/cpufreq/acpi-cpufreq.c
@@ -916,8 +916,10 @@ static void __init acpi_cpufreq_boost_init(void)
916{ 916{
917 int ret; 917 int ret;
918 918
919 if (!(boot_cpu_has(X86_FEATURE_CPB) || boot_cpu_has(X86_FEATURE_IDA))) 919 if (!(boot_cpu_has(X86_FEATURE_CPB) || boot_cpu_has(X86_FEATURE_IDA))) {
920 pr_debug("Boost capabilities not present in the processor\n");
920 return; 921 return;
922 }
921 923
922 acpi_cpufreq_driver.set_boost = set_boost; 924 acpi_cpufreq_driver.set_boost = set_boost;
923 acpi_cpufreq_driver.boost_enabled = boost_state(0); 925 acpi_cpufreq_driver.boost_enabled = boost_state(0);
diff --git a/drivers/cpufreq/arm_big_little.c b/drivers/cpufreq/arm_big_little.c
index cf62a1f64dd7..7fe52fcddcf1 100644
--- a/drivers/cpufreq/arm_big_little.c
+++ b/drivers/cpufreq/arm_big_little.c
@@ -487,6 +487,8 @@ static int bL_cpufreq_init(struct cpufreq_policy *policy)
487 policy->cpuinfo.transition_latency = 487 policy->cpuinfo.transition_latency =
488 arm_bL_ops->get_transition_latency(cpu_dev); 488 arm_bL_ops->get_transition_latency(cpu_dev);
489 489
490 dev_pm_opp_of_register_em(policy->cpus);
491
490 if (is_bL_switching_enabled()) 492 if (is_bL_switching_enabled())
491 per_cpu(cpu_last_req_freq, policy->cpu) = clk_get_cpu_rate(policy->cpu); 493 per_cpu(cpu_last_req_freq, policy->cpu) = clk_get_cpu_rate(policy->cpu);
492 494
diff --git a/drivers/cpufreq/armada-8k-cpufreq.c b/drivers/cpufreq/armada-8k-cpufreq.c
new file mode 100644
index 000000000000..b3f4bd647e9b
--- /dev/null
+++ b/drivers/cpufreq/armada-8k-cpufreq.c
@@ -0,0 +1,206 @@
1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * CPUFreq support for Armada 8K
4 *
5 * Copyright (C) 2018 Marvell
6 *
7 * Omri Itach <omrii@marvell.com>
8 * Gregory Clement <gregory.clement@bootlin.com>
9 */
10
11#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12
13#include <linux/clk.h>
14#include <linux/cpu.h>
15#include <linux/err.h>
16#include <linux/init.h>
17#include <linux/kernel.h>
18#include <linux/module.h>
19#include <linux/of.h>
20#include <linux/platform_device.h>
21#include <linux/pm_opp.h>
22#include <linux/slab.h>
23
24/*
25 * Setup the opps list with the divider for the max frequency, that
26 * will be filled at runtime.
27 */
28static const int opps_div[] __initconst = {1, 2, 3, 4};
29
30static struct platform_device *armada_8k_pdev;
31
32struct freq_table {
33 struct device *cpu_dev;
34 unsigned int freq[ARRAY_SIZE(opps_div)];
35};
36
37/* If the CPUs share the same clock, then they are in the same cluster. */
38static void __init armada_8k_get_sharing_cpus(struct clk *cur_clk,
39 struct cpumask *cpumask)
40{
41 int cpu;
42
43 for_each_possible_cpu(cpu) {
44 struct device *cpu_dev;
45 struct clk *clk;
46
47 cpu_dev = get_cpu_device(cpu);
48 if (!cpu_dev) {
49 pr_warn("Failed to get cpu%d device\n", cpu);
50 continue;
51 }
52
53 clk = clk_get(cpu_dev, 0);
54 if (IS_ERR(clk)) {
55 pr_warn("Cannot get clock for CPU %d\n", cpu);
56 } else {
57 if (clk_is_match(clk, cur_clk))
58 cpumask_set_cpu(cpu, cpumask);
59
60 clk_put(clk);
61 }
62 }
63}
64
65static int __init armada_8k_add_opp(struct clk *clk, struct device *cpu_dev,
66 struct freq_table *freq_tables,
67 int opps_index)
68{
69 unsigned int cur_frequency;
70 unsigned int freq;
71 int i, ret;
72
73 /* Get nominal (current) CPU frequency. */
74 cur_frequency = clk_get_rate(clk);
75 if (!cur_frequency) {
76 dev_err(cpu_dev, "Failed to get clock rate for this CPU\n");
77 return -EINVAL;
78 }
79
80 freq_tables[opps_index].cpu_dev = cpu_dev;
81
82 for (i = 0; i < ARRAY_SIZE(opps_div); i++) {
83 freq = cur_frequency / opps_div[i];
84
85 ret = dev_pm_opp_add(cpu_dev, freq, 0);
86 if (ret)
87 return ret;
88
89 freq_tables[opps_index].freq[i] = freq;
90 }
91
92 return 0;
93}
94
95static void armada_8k_cpufreq_free_table(struct freq_table *freq_tables)
96{
97 int opps_index, nb_cpus = num_possible_cpus();
98
99 for (opps_index = 0 ; opps_index <= nb_cpus; opps_index++) {
100 int i;
101
102 /* If cpu_dev is NULL then we reached the end of the array */
103 if (!freq_tables[opps_index].cpu_dev)
104 break;
105
106 for (i = 0; i < ARRAY_SIZE(opps_div); i++) {
107 /*
108 * A 0Hz frequency is not valid, this meant
109 * that it was not yet initialized so there is
110 * no more opp to free
111 */
112 if (freq_tables[opps_index].freq[i] == 0)
113 break;
114
115 dev_pm_opp_remove(freq_tables[opps_index].cpu_dev,
116 freq_tables[opps_index].freq[i]);
117 }
118 }
119
120 kfree(freq_tables);
121}
122
123static int __init armada_8k_cpufreq_init(void)
124{
125 int ret = 0, opps_index = 0, cpu, nb_cpus;
126 struct freq_table *freq_tables;
127 struct device_node *node;
128 struct cpumask cpus;
129
130 node = of_find_compatible_node(NULL, NULL, "marvell,ap806-cpu-clock");
131 if (!node || !of_device_is_available(node)) {
132 of_node_put(node);
133 return -ENODEV;
134 }
135
136 nb_cpus = num_possible_cpus();
137 freq_tables = kcalloc(nb_cpus, sizeof(*freq_tables), GFP_KERNEL);
138 cpumask_copy(&cpus, cpu_possible_mask);
139
140 /*
141 * For each CPU, this loop registers the operating points
142 * supported (which are the nominal CPU frequency and full integer
143 * divisions of it).
144 */
145 for_each_cpu(cpu, &cpus) {
146 struct cpumask shared_cpus;
147 struct device *cpu_dev;
148 struct clk *clk;
149
150 cpu_dev = get_cpu_device(cpu);
151
152 if (!cpu_dev) {
153 pr_err("Cannot get CPU %d\n", cpu);
154 continue;
155 }
156
157 clk = clk_get(cpu_dev, 0);
158
159 if (IS_ERR(clk)) {
160 pr_err("Cannot get clock for CPU %d\n", cpu);
161 ret = PTR_ERR(clk);
162 goto remove_opp;
163 }
164
165 ret = armada_8k_add_opp(clk, cpu_dev, freq_tables, opps_index);
166 if (ret) {
167 clk_put(clk);
168 goto remove_opp;
169 }
170
171 opps_index++;
172 cpumask_clear(&shared_cpus);
173 armada_8k_get_sharing_cpus(clk, &shared_cpus);
174 dev_pm_opp_set_sharing_cpus(cpu_dev, &shared_cpus);
175 cpumask_andnot(&cpus, &cpus, &shared_cpus);
176 clk_put(clk);
177 }
178
179 armada_8k_pdev = platform_device_register_simple("cpufreq-dt", -1,
180 NULL, 0);
181 ret = PTR_ERR_OR_ZERO(armada_8k_pdev);
182 if (ret)
183 goto remove_opp;
184
185 platform_set_drvdata(armada_8k_pdev, freq_tables);
186
187 return 0;
188
189remove_opp:
190 armada_8k_cpufreq_free_table(freq_tables);
191 return ret;
192}
193module_init(armada_8k_cpufreq_init);
194
195static void __exit armada_8k_cpufreq_exit(void)
196{
197 struct freq_table *freq_tables = platform_get_drvdata(armada_8k_pdev);
198
199 platform_device_unregister(armada_8k_pdev);
200 armada_8k_cpufreq_free_table(freq_tables);
201}
202module_exit(armada_8k_cpufreq_exit);
203
204MODULE_AUTHOR("Gregory Clement <gregory.clement@bootlin.com>");
205MODULE_DESCRIPTION("Armada 8K cpufreq driver");
206MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c
index fd25c21cee72..2ae978d27e61 100644
--- a/drivers/cpufreq/cppc_cpufreq.c
+++ b/drivers/cpufreq/cppc_cpufreq.c
@@ -42,6 +42,66 @@
42 */ 42 */
43static struct cppc_cpudata **all_cpu_data; 43static struct cppc_cpudata **all_cpu_data;
44 44
45struct cppc_workaround_oem_info {
46 char oem_id[ACPI_OEM_ID_SIZE +1];
47 char oem_table_id[ACPI_OEM_TABLE_ID_SIZE + 1];
48 u32 oem_revision;
49};
50
51static bool apply_hisi_workaround;
52
53static struct cppc_workaround_oem_info wa_info[] = {
54 {
55 .oem_id = "HISI ",
56 .oem_table_id = "HIP07 ",
57 .oem_revision = 0,
58 }, {
59 .oem_id = "HISI ",
60 .oem_table_id = "HIP08 ",
61 .oem_revision = 0,
62 }
63};
64
65static unsigned int cppc_cpufreq_perf_to_khz(struct cppc_cpudata *cpu,
66 unsigned int perf);
67
68/*
69 * HISI platform does not support delivered performance counter and
70 * reference performance counter. It can calculate the performance using the
71 * platform specific mechanism. We reuse the desired performance register to
72 * store the real performance calculated by the platform.
73 */
74static unsigned int hisi_cppc_cpufreq_get_rate(unsigned int cpunum)
75{
76 struct cppc_cpudata *cpudata = all_cpu_data[cpunum];
77 u64 desired_perf;
78 int ret;
79
80 ret = cppc_get_desired_perf(cpunum, &desired_perf);
81 if (ret < 0)
82 return -EIO;
83
84 return cppc_cpufreq_perf_to_khz(cpudata, desired_perf);
85}
86
87static void cppc_check_hisi_workaround(void)
88{
89 struct acpi_table_header *tbl;
90 acpi_status status = AE_OK;
91 int i;
92
93 status = acpi_get_table(ACPI_SIG_PCCT, 0, &tbl);
94 if (ACPI_FAILURE(status) || !tbl)
95 return;
96
97 for (i = 0; i < ARRAY_SIZE(wa_info); i++) {
98 if (!memcmp(wa_info[i].oem_id, tbl->oem_id, ACPI_OEM_ID_SIZE) &&
99 !memcmp(wa_info[i].oem_table_id, tbl->oem_table_id, ACPI_OEM_TABLE_ID_SIZE) &&
100 wa_info[i].oem_revision == tbl->oem_revision)
101 apply_hisi_workaround = true;
102 }
103}
104
45/* Callback function used to retrieve the max frequency from DMI */ 105/* Callback function used to retrieve the max frequency from DMI */
46static void cppc_find_dmi_mhz(const struct dmi_header *dm, void *private) 106static void cppc_find_dmi_mhz(const struct dmi_header *dm, void *private)
47{ 107{
@@ -334,6 +394,9 @@ static unsigned int cppc_cpufreq_get_rate(unsigned int cpunum)
334 struct cppc_cpudata *cpu = all_cpu_data[cpunum]; 394 struct cppc_cpudata *cpu = all_cpu_data[cpunum];
335 int ret; 395 int ret;
336 396
397 if (apply_hisi_workaround)
398 return hisi_cppc_cpufreq_get_rate(cpunum);
399
337 ret = cppc_get_perf_ctrs(cpunum, &fb_ctrs_t0); 400 ret = cppc_get_perf_ctrs(cpunum, &fb_ctrs_t0);
338 if (ret) 401 if (ret)
339 return ret; 402 return ret;
@@ -386,6 +449,8 @@ static int __init cppc_cpufreq_init(void)
386 goto out; 449 goto out;
387 } 450 }
388 451
452 cppc_check_hisi_workaround();
453
389 ret = cpufreq_register_driver(&cppc_cpufreq_driver); 454 ret = cpufreq_register_driver(&cppc_cpufreq_driver);
390 if (ret) 455 if (ret)
391 goto out; 456 goto out;
diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c
index e58bfcb1169e..bde28878725b 100644
--- a/drivers/cpufreq/cpufreq-dt.c
+++ b/drivers/cpufreq/cpufreq-dt.c
@@ -13,7 +13,6 @@
13 13
14#include <linux/clk.h> 14#include <linux/clk.h>
15#include <linux/cpu.h> 15#include <linux/cpu.h>
16#include <linux/cpu_cooling.h>
17#include <linux/cpufreq.h> 16#include <linux/cpufreq.h>
18#include <linux/cpumask.h> 17#include <linux/cpumask.h>
19#include <linux/err.h> 18#include <linux/err.h>
@@ -30,7 +29,6 @@
30struct private_data { 29struct private_data {
31 struct opp_table *opp_table; 30 struct opp_table *opp_table;
32 struct device *cpu_dev; 31 struct device *cpu_dev;
33 struct thermal_cooling_device *cdev;
34 const char *reg_name; 32 const char *reg_name;
35 bool have_static_opps; 33 bool have_static_opps;
36}; 34};
@@ -280,6 +278,8 @@ static int cpufreq_init(struct cpufreq_policy *policy)
280 policy->cpuinfo.transition_latency = transition_latency; 278 policy->cpuinfo.transition_latency = transition_latency;
281 policy->dvfs_possible_from_any_cpu = true; 279 policy->dvfs_possible_from_any_cpu = true;
282 280
281 dev_pm_opp_of_register_em(policy->cpus);
282
283 return 0; 283 return 0;
284 284
285out_free_cpufreq_table: 285out_free_cpufreq_table:
@@ -297,11 +297,25 @@ out_put_clk:
297 return ret; 297 return ret;
298} 298}
299 299
300static int cpufreq_online(struct cpufreq_policy *policy)
301{
302 /* We did light-weight tear down earlier, nothing to do here */
303 return 0;
304}
305
306static int cpufreq_offline(struct cpufreq_policy *policy)
307{
308 /*
309 * Preserve policy->driver_data and don't free resources on light-weight
310 * tear down.
311 */
312 return 0;
313}
314
300static int cpufreq_exit(struct cpufreq_policy *policy) 315static int cpufreq_exit(struct cpufreq_policy *policy)
301{ 316{
302 struct private_data *priv = policy->driver_data; 317 struct private_data *priv = policy->driver_data;
303 318
304 cpufreq_cooling_unregister(priv->cdev);
305 dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table); 319 dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table);
306 if (priv->have_static_opps) 320 if (priv->have_static_opps)
307 dev_pm_opp_of_cpumask_remove_table(policy->related_cpus); 321 dev_pm_opp_of_cpumask_remove_table(policy->related_cpus);
@@ -314,21 +328,16 @@ static int cpufreq_exit(struct cpufreq_policy *policy)
314 return 0; 328 return 0;
315} 329}
316 330
317static void cpufreq_ready(struct cpufreq_policy *policy)
318{
319 struct private_data *priv = policy->driver_data;
320
321 priv->cdev = of_cpufreq_cooling_register(policy);
322}
323
324static struct cpufreq_driver dt_cpufreq_driver = { 331static struct cpufreq_driver dt_cpufreq_driver = {
325 .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK, 332 .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK |
333 CPUFREQ_IS_COOLING_DEV,
326 .verify = cpufreq_generic_frequency_table_verify, 334 .verify = cpufreq_generic_frequency_table_verify,
327 .target_index = set_target, 335 .target_index = set_target,
328 .get = cpufreq_generic_get, 336 .get = cpufreq_generic_get,
329 .init = cpufreq_init, 337 .init = cpufreq_init,
330 .exit = cpufreq_exit, 338 .exit = cpufreq_exit,
331 .ready = cpufreq_ready, 339 .online = cpufreq_online,
340 .offline = cpufreq_offline,
332 .name = "cpufreq-dt", 341 .name = "cpufreq-dt",
333 .attr = cpufreq_dt_attr, 342 .attr = cpufreq_dt_attr,
334 .suspend = cpufreq_generic_suspend, 343 .suspend = cpufreq_generic_suspend,
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index e35a886e00bc..0e626b00053b 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -19,6 +19,7 @@
19 19
20#include <linux/cpu.h> 20#include <linux/cpu.h>
21#include <linux/cpufreq.h> 21#include <linux/cpufreq.h>
22#include <linux/cpu_cooling.h>
22#include <linux/delay.h> 23#include <linux/delay.h>
23#include <linux/device.h> 24#include <linux/device.h>
24#include <linux/init.h> 25#include <linux/init.h>
@@ -545,13 +546,13 @@ EXPORT_SYMBOL_GPL(cpufreq_policy_transition_delay_us);
545 * SYSFS INTERFACE * 546 * SYSFS INTERFACE *
546 *********************************************************************/ 547 *********************************************************************/
547static ssize_t show_boost(struct kobject *kobj, 548static ssize_t show_boost(struct kobject *kobj,
548 struct attribute *attr, char *buf) 549 struct kobj_attribute *attr, char *buf)
549{ 550{
550 return sprintf(buf, "%d\n", cpufreq_driver->boost_enabled); 551 return sprintf(buf, "%d\n", cpufreq_driver->boost_enabled);
551} 552}
552 553
553static ssize_t store_boost(struct kobject *kobj, struct attribute *attr, 554static ssize_t store_boost(struct kobject *kobj, struct kobj_attribute *attr,
554 const char *buf, size_t count) 555 const char *buf, size_t count)
555{ 556{
556 int ret, enable; 557 int ret, enable;
557 558
@@ -1200,28 +1201,39 @@ static int cpufreq_online(unsigned int cpu)
1200 return -ENOMEM; 1201 return -ENOMEM;
1201 } 1202 }
1202 1203
1203 cpumask_copy(policy->cpus, cpumask_of(cpu)); 1204 if (!new_policy && cpufreq_driver->online) {
1205 ret = cpufreq_driver->online(policy);
1206 if (ret) {
1207 pr_debug("%s: %d: initialization failed\n", __func__,
1208 __LINE__);
1209 goto out_exit_policy;
1210 }
1204 1211
1205 /* call driver. From then on the cpufreq must be able 1212 /* Recover policy->cpus using related_cpus */
1206 * to accept all calls to ->verify and ->setpolicy for this CPU 1213 cpumask_copy(policy->cpus, policy->related_cpus);
1207 */ 1214 } else {
1208 ret = cpufreq_driver->init(policy); 1215 cpumask_copy(policy->cpus, cpumask_of(cpu));
1209 if (ret) {
1210 pr_debug("initialization failed\n");
1211 goto out_free_policy;
1212 }
1213 1216
1214 ret = cpufreq_table_validate_and_sort(policy); 1217 /*
1215 if (ret) 1218 * Call driver. From then on the cpufreq must be able
1216 goto out_exit_policy; 1219 * to accept all calls to ->verify and ->setpolicy for this CPU.
1220 */
1221 ret = cpufreq_driver->init(policy);
1222 if (ret) {
1223 pr_debug("%s: %d: initialization failed\n", __func__,
1224 __LINE__);
1225 goto out_free_policy;
1226 }
1217 1227
1218 down_write(&policy->rwsem); 1228 ret = cpufreq_table_validate_and_sort(policy);
1229 if (ret)
1230 goto out_exit_policy;
1219 1231
1220 if (new_policy) {
1221 /* related_cpus should at least include policy->cpus. */ 1232 /* related_cpus should at least include policy->cpus. */
1222 cpumask_copy(policy->related_cpus, policy->cpus); 1233 cpumask_copy(policy->related_cpus, policy->cpus);
1223 } 1234 }
1224 1235
1236 down_write(&policy->rwsem);
1225 /* 1237 /*
1226 * affected cpus must always be the one, which are online. We aren't 1238 * affected cpus must always be the one, which are online. We aren't
1227 * managing offline cpus here. 1239 * managing offline cpus here.
@@ -1305,8 +1317,6 @@ static int cpufreq_online(unsigned int cpu)
1305 if (ret) { 1317 if (ret) {
1306 pr_err("%s: Failed to initialize policy for cpu: %d (%d)\n", 1318 pr_err("%s: Failed to initialize policy for cpu: %d (%d)\n",
1307 __func__, cpu, ret); 1319 __func__, cpu, ret);
1308 /* cpufreq_policy_free() will notify based on this */
1309 new_policy = false;
1310 goto out_destroy_policy; 1320 goto out_destroy_policy;
1311 } 1321 }
1312 1322
@@ -1318,6 +1328,10 @@ static int cpufreq_online(unsigned int cpu)
1318 if (cpufreq_driver->ready) 1328 if (cpufreq_driver->ready)
1319 cpufreq_driver->ready(policy); 1329 cpufreq_driver->ready(policy);
1320 1330
1331 if (IS_ENABLED(CONFIG_CPU_THERMAL) &&
1332 cpufreq_driver->flags & CPUFREQ_IS_COOLING_DEV)
1333 policy->cdev = of_cpufreq_cooling_register(policy);
1334
1321 pr_debug("initialization complete\n"); 1335 pr_debug("initialization complete\n");
1322 1336
1323 return 0; 1337 return 0;
@@ -1405,6 +1419,12 @@ static int cpufreq_offline(unsigned int cpu)
1405 goto unlock; 1419 goto unlock;
1406 } 1420 }
1407 1421
1422 if (IS_ENABLED(CONFIG_CPU_THERMAL) &&
1423 cpufreq_driver->flags & CPUFREQ_IS_COOLING_DEV) {
1424 cpufreq_cooling_unregister(policy->cdev);
1425 policy->cdev = NULL;
1426 }
1427
1408 if (cpufreq_driver->stop_cpu) 1428 if (cpufreq_driver->stop_cpu)
1409 cpufreq_driver->stop_cpu(policy); 1429 cpufreq_driver->stop_cpu(policy);
1410 1430
@@ -1412,11 +1432,12 @@ static int cpufreq_offline(unsigned int cpu)
1412 cpufreq_exit_governor(policy); 1432 cpufreq_exit_governor(policy);
1413 1433
1414 /* 1434 /*
1415 * Perform the ->exit() even during light-weight tear-down, 1435 * Perform the ->offline() during light-weight tear-down, as
1416 * since this is a core component, and is essential for the 1436 * that allows fast recovery when the CPU comes back.
1417 * subsequent light-weight ->init() to succeed.
1418 */ 1437 */
1419 if (cpufreq_driver->exit) { 1438 if (cpufreq_driver->offline) {
1439 cpufreq_driver->offline(policy);
1440 } else if (cpufreq_driver->exit) {
1420 cpufreq_driver->exit(policy); 1441 cpufreq_driver->exit(policy);
1421 policy->freq_table = NULL; 1442 policy->freq_table = NULL;
1422 } 1443 }
@@ -1445,8 +1466,13 @@ static void cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
1445 cpumask_clear_cpu(cpu, policy->real_cpus); 1466 cpumask_clear_cpu(cpu, policy->real_cpus);
1446 remove_cpu_dev_symlink(policy, dev); 1467 remove_cpu_dev_symlink(policy, dev);
1447 1468
1448 if (cpumask_empty(policy->real_cpus)) 1469 if (cpumask_empty(policy->real_cpus)) {
1470 /* We did light-weight exit earlier, do full tear down now */
1471 if (cpufreq_driver->offline)
1472 cpufreq_driver->exit(policy);
1473
1449 cpufreq_policy_free(policy); 1474 cpufreq_policy_free(policy);
1475 }
1450} 1476}
1451 1477
1452/** 1478/**
@@ -2192,12 +2218,25 @@ int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
2192} 2218}
2193EXPORT_SYMBOL(cpufreq_get_policy); 2219EXPORT_SYMBOL(cpufreq_get_policy);
2194 2220
2195/* 2221/**
2196 * policy : current policy. 2222 * cpufreq_set_policy - Modify cpufreq policy parameters.
2197 * new_policy: policy to be set. 2223 * @policy: Policy object to modify.
2224 * @new_policy: New policy data.
2225 *
2226 * Pass @new_policy to the cpufreq driver's ->verify() callback, run the
2227 * installed policy notifiers for it with the CPUFREQ_ADJUST value, pass it to
2228 * the driver's ->verify() callback again and run the notifiers for it again
2229 * with the CPUFREQ_NOTIFY value. Next, copy the min and max parameters
2230 * of @new_policy to @policy and either invoke the driver's ->setpolicy()
2231 * callback (if present) or carry out a governor update for @policy. That is,
2232 * run the current governor's ->limits() callback (if the governor field in
2233 * @new_policy points to the same object as the one in @policy) or replace the
2234 * governor for @policy with the new one stored in @new_policy.
2235 *
2236 * The cpuinfo part of @policy is not updated by this function.
2198 */ 2237 */
2199static int cpufreq_set_policy(struct cpufreq_policy *policy, 2238static int cpufreq_set_policy(struct cpufreq_policy *policy,
2200 struct cpufreq_policy *new_policy) 2239 struct cpufreq_policy *new_policy)
2201{ 2240{
2202 struct cpufreq_governor *old_gov; 2241 struct cpufreq_governor *old_gov;
2203 int ret; 2242 int ret;
@@ -2247,11 +2286,11 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
2247 if (cpufreq_driver->setpolicy) { 2286 if (cpufreq_driver->setpolicy) {
2248 policy->policy = new_policy->policy; 2287 policy->policy = new_policy->policy;
2249 pr_debug("setting range\n"); 2288 pr_debug("setting range\n");
2250 return cpufreq_driver->setpolicy(new_policy); 2289 return cpufreq_driver->setpolicy(policy);
2251 } 2290 }
2252 2291
2253 if (new_policy->governor == policy->governor) { 2292 if (new_policy->governor == policy->governor) {
2254 pr_debug("cpufreq: governor limits update\n"); 2293 pr_debug("governor limits update\n");
2255 cpufreq_governor_limits(policy); 2294 cpufreq_governor_limits(policy);
2256 return 0; 2295 return 0;
2257 } 2296 }
@@ -2272,7 +2311,7 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
2272 if (!ret) { 2311 if (!ret) {
2273 ret = cpufreq_start_governor(policy); 2312 ret = cpufreq_start_governor(policy);
2274 if (!ret) { 2313 if (!ret) {
2275 pr_debug("cpufreq: governor change\n"); 2314 pr_debug("governor change\n");
2276 sched_cpufreq_governor_change(policy, old_gov); 2315 sched_cpufreq_governor_change(policy, old_gov);
2277 return 0; 2316 return 0;
2278 } 2317 }
@@ -2293,11 +2332,14 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
2293} 2332}
2294 2333
2295/** 2334/**
2296 * cpufreq_update_policy - re-evaluate an existing cpufreq policy 2335 * cpufreq_update_policy - Re-evaluate an existing cpufreq policy.
2297 * @cpu: CPU which shall be re-evaluated 2336 * @cpu: CPU to re-evaluate the policy for.
2298 * 2337 *
2299 * Useful for policy notifiers which have different necessities 2338 * Update the current frequency for the cpufreq policy of @cpu and use
2300 * at different times. 2339 * cpufreq_set_policy() to re-apply the min and max limits saved in the
2340 * user_policy sub-structure of that policy, which triggers the evaluation
2341 * of policy notifiers and the cpufreq driver's ->verify() callback for the
2342 * policy in question, among other things.
2301 */ 2343 */
2302void cpufreq_update_policy(unsigned int cpu) 2344void cpufreq_update_policy(unsigned int cpu)
2303{ 2345{
@@ -2312,23 +2354,18 @@ void cpufreq_update_policy(unsigned int cpu)
2312 if (policy_is_inactive(policy)) 2354 if (policy_is_inactive(policy))
2313 goto unlock; 2355 goto unlock;
2314 2356
2315 pr_debug("updating policy for CPU %u\n", cpu);
2316 memcpy(&new_policy, policy, sizeof(*policy));
2317 new_policy.min = policy->user_policy.min;
2318 new_policy.max = policy->user_policy.max;
2319
2320 /* 2357 /*
2321 * BIOS might change freq behind our back 2358 * BIOS might change freq behind our back
2322 * -> ask driver for current freq and notify governors about a change 2359 * -> ask driver for current freq and notify governors about a change
2323 */ 2360 */
2324 if (cpufreq_driver->get && !cpufreq_driver->setpolicy) { 2361 if (cpufreq_driver->get && !cpufreq_driver->setpolicy &&
2325 if (cpufreq_suspended) 2362 (cpufreq_suspended || WARN_ON(!cpufreq_update_current_freq(policy))))
2326 goto unlock; 2363 goto unlock;
2327 2364
2328 new_policy.cur = cpufreq_update_current_freq(policy); 2365 pr_debug("updating policy for CPU %u\n", cpu);
2329 if (WARN_ON(!new_policy.cur)) 2366 memcpy(&new_policy, policy, sizeof(*policy));
2330 goto unlock; 2367 new_policy.min = policy->user_policy.min;
2331 } 2368 new_policy.max = policy->user_policy.max;
2332 2369
2333 cpufreq_set_policy(policy, &new_policy); 2370 cpufreq_set_policy(policy, &new_policy);
2334 2371
@@ -2479,7 +2516,8 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
2479 driver_data->target) || 2516 driver_data->target) ||
2480 (driver_data->setpolicy && (driver_data->target_index || 2517 (driver_data->setpolicy && (driver_data->target_index ||
2481 driver_data->target)) || 2518 driver_data->target)) ||
2482 (!!driver_data->get_intermediate != !!driver_data->target_intermediate)) 2519 (!driver_data->get_intermediate != !driver_data->target_intermediate) ||
2520 (!driver_data->online != !driver_data->offline))
2483 return -EINVAL; 2521 return -EINVAL;
2484 2522
2485 pr_debug("trying to register driver %s\n", driver_data->name); 2523 pr_debug("trying to register driver %s\n", driver_data->name);
diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
index 1572129844a5..e2db5581489a 100644
--- a/drivers/cpufreq/cpufreq_stats.c
+++ b/drivers/cpufreq/cpufreq_stats.c
@@ -31,26 +31,27 @@ static void cpufreq_stats_update(struct cpufreq_stats *stats)
31{ 31{
32 unsigned long long cur_time = get_jiffies_64(); 32 unsigned long long cur_time = get_jiffies_64();
33 33
34 spin_lock(&cpufreq_stats_lock);
35 stats->time_in_state[stats->last_index] += cur_time - stats->last_time; 34 stats->time_in_state[stats->last_index] += cur_time - stats->last_time;
36 stats->last_time = cur_time; 35 stats->last_time = cur_time;
37 spin_unlock(&cpufreq_stats_lock);
38} 36}
39 37
40static void cpufreq_stats_clear_table(struct cpufreq_stats *stats) 38static void cpufreq_stats_clear_table(struct cpufreq_stats *stats)
41{ 39{
42 unsigned int count = stats->max_state; 40 unsigned int count = stats->max_state;
43 41
42 spin_lock(&cpufreq_stats_lock);
44 memset(stats->time_in_state, 0, count * sizeof(u64)); 43 memset(stats->time_in_state, 0, count * sizeof(u64));
45 memset(stats->trans_table, 0, count * count * sizeof(int)); 44 memset(stats->trans_table, 0, count * count * sizeof(int));
46 stats->last_time = get_jiffies_64(); 45 stats->last_time = get_jiffies_64();
47 stats->total_trans = 0; 46 stats->total_trans = 0;
47 spin_unlock(&cpufreq_stats_lock);
48} 48}
49 49
50static ssize_t show_total_trans(struct cpufreq_policy *policy, char *buf) 50static ssize_t show_total_trans(struct cpufreq_policy *policy, char *buf)
51{ 51{
52 return sprintf(buf, "%d\n", policy->stats->total_trans); 52 return sprintf(buf, "%d\n", policy->stats->total_trans);
53} 53}
54cpufreq_freq_attr_ro(total_trans);
54 55
55static ssize_t show_time_in_state(struct cpufreq_policy *policy, char *buf) 56static ssize_t show_time_in_state(struct cpufreq_policy *policy, char *buf)
56{ 57{
@@ -61,7 +62,10 @@ static ssize_t show_time_in_state(struct cpufreq_policy *policy, char *buf)
61 if (policy->fast_switch_enabled) 62 if (policy->fast_switch_enabled)
62 return 0; 63 return 0;
63 64
65 spin_lock(&cpufreq_stats_lock);
64 cpufreq_stats_update(stats); 66 cpufreq_stats_update(stats);
67 spin_unlock(&cpufreq_stats_lock);
68
65 for (i = 0; i < stats->state_num; i++) { 69 for (i = 0; i < stats->state_num; i++) {
66 len += sprintf(buf + len, "%u %llu\n", stats->freq_table[i], 70 len += sprintf(buf + len, "%u %llu\n", stats->freq_table[i],
67 (unsigned long long) 71 (unsigned long long)
@@ -69,6 +73,7 @@ static ssize_t show_time_in_state(struct cpufreq_policy *policy, char *buf)
69 } 73 }
70 return len; 74 return len;
71} 75}
76cpufreq_freq_attr_ro(time_in_state);
72 77
73static ssize_t store_reset(struct cpufreq_policy *policy, const char *buf, 78static ssize_t store_reset(struct cpufreq_policy *policy, const char *buf,
74 size_t count) 79 size_t count)
@@ -77,6 +82,7 @@ static ssize_t store_reset(struct cpufreq_policy *policy, const char *buf,
77 cpufreq_stats_clear_table(policy->stats); 82 cpufreq_stats_clear_table(policy->stats);
78 return count; 83 return count;
79} 84}
85cpufreq_freq_attr_wo(reset);
80 86
81static ssize_t show_trans_table(struct cpufreq_policy *policy, char *buf) 87static ssize_t show_trans_table(struct cpufreq_policy *policy, char *buf)
82{ 88{
@@ -126,10 +132,6 @@ static ssize_t show_trans_table(struct cpufreq_policy *policy, char *buf)
126} 132}
127cpufreq_freq_attr_ro(trans_table); 133cpufreq_freq_attr_ro(trans_table);
128 134
129cpufreq_freq_attr_ro(total_trans);
130cpufreq_freq_attr_ro(time_in_state);
131cpufreq_freq_attr_wo(reset);
132
133static struct attribute *default_attrs[] = { 135static struct attribute *default_attrs[] = {
134 &total_trans.attr, 136 &total_trans.attr,
135 &time_in_state.attr, 137 &time_in_state.attr,
@@ -240,9 +242,11 @@ void cpufreq_stats_record_transition(struct cpufreq_policy *policy,
240 if (old_index == -1 || new_index == -1 || old_index == new_index) 242 if (old_index == -1 || new_index == -1 || old_index == new_index)
241 return; 243 return;
242 244
245 spin_lock(&cpufreq_stats_lock);
243 cpufreq_stats_update(stats); 246 cpufreq_stats_update(stats);
244 247
245 stats->last_index = new_index; 248 stats->last_index = new_index;
246 stats->trans_table[old_index * stats->max_state + new_index]++; 249 stats->trans_table[old_index * stats->max_state + new_index]++;
247 stats->total_trans++; 250 stats->total_trans++;
251 spin_unlock(&cpufreq_stats_lock);
248} 252}
diff --git a/drivers/cpufreq/davinci-cpufreq.c b/drivers/cpufreq/davinci-cpufreq.c
index d54a27c99121..940fe85db97a 100644
--- a/drivers/cpufreq/davinci-cpufreq.c
+++ b/drivers/cpufreq/davinci-cpufreq.c
@@ -23,13 +23,10 @@
23#include <linux/init.h> 23#include <linux/init.h>
24#include <linux/err.h> 24#include <linux/err.h>
25#include <linux/clk.h> 25#include <linux/clk.h>
26#include <linux/platform_data/davinci-cpufreq.h>
26#include <linux/platform_device.h> 27#include <linux/platform_device.h>
27#include <linux/export.h> 28#include <linux/export.h>
28 29
29#include <mach/hardware.h>
30#include <mach/cpufreq.h>
31#include <mach/common.h>
32
33struct davinci_cpufreq { 30struct davinci_cpufreq {
34 struct device *dev; 31 struct device *dev;
35 struct clk *armclk; 32 struct clk *armclk;
diff --git a/drivers/cpufreq/e_powersaver.c b/drivers/cpufreq/e_powersaver.c
index 60bea302abbe..2d3ef208dd70 100644
--- a/drivers/cpufreq/e_powersaver.c
+++ b/drivers/cpufreq/e_powersaver.c
@@ -323,9 +323,8 @@ static int eps_cpu_init(struct cpufreq_policy *policy)
323 states = 2; 323 states = 2;
324 324
325 /* Allocate private data and frequency table for current cpu */ 325 /* Allocate private data and frequency table for current cpu */
326 centaur = kzalloc(sizeof(*centaur) 326 centaur = kzalloc(struct_size(centaur, freq_table, states + 1),
327 + (states + 1) * sizeof(struct cpufreq_frequency_table), 327 GFP_KERNEL);
328 GFP_KERNEL);
329 if (!centaur) 328 if (!centaur)
330 return -ENOMEM; 329 return -ENOMEM;
331 eps_cpu[0] = centaur; 330 eps_cpu[0] = centaur;
diff --git a/drivers/cpufreq/imx6q-cpufreq.c b/drivers/cpufreq/imx6q-cpufreq.c
index 9fedf627e000..a4ff09f91c8f 100644
--- a/drivers/cpufreq/imx6q-cpufreq.c
+++ b/drivers/cpufreq/imx6q-cpufreq.c
@@ -9,7 +9,6 @@
9#include <linux/clk.h> 9#include <linux/clk.h>
10#include <linux/cpu.h> 10#include <linux/cpu.h>
11#include <linux/cpufreq.h> 11#include <linux/cpufreq.h>
12#include <linux/cpu_cooling.h>
13#include <linux/err.h> 12#include <linux/err.h>
14#include <linux/module.h> 13#include <linux/module.h>
15#include <linux/nvmem-consumer.h> 14#include <linux/nvmem-consumer.h>
@@ -52,7 +51,6 @@ static struct clk_bulk_data clks[] = {
52}; 51};
53 52
54static struct device *cpu_dev; 53static struct device *cpu_dev;
55static struct thermal_cooling_device *cdev;
56static bool free_opp; 54static bool free_opp;
57static struct cpufreq_frequency_table *freq_table; 55static struct cpufreq_frequency_table *freq_table;
58static unsigned int max_freq; 56static unsigned int max_freq;
@@ -193,16 +191,6 @@ static int imx6q_set_target(struct cpufreq_policy *policy, unsigned int index)
193 return 0; 191 return 0;
194} 192}
195 193
196static void imx6q_cpufreq_ready(struct cpufreq_policy *policy)
197{
198 cdev = of_cpufreq_cooling_register(policy);
199
200 if (!cdev)
201 dev_err(cpu_dev,
202 "running cpufreq without cooling device: %ld\n",
203 PTR_ERR(cdev));
204}
205
206static int imx6q_cpufreq_init(struct cpufreq_policy *policy) 194static int imx6q_cpufreq_init(struct cpufreq_policy *policy)
207{ 195{
208 int ret; 196 int ret;
@@ -210,26 +198,19 @@ static int imx6q_cpufreq_init(struct cpufreq_policy *policy)
210 policy->clk = clks[ARM].clk; 198 policy->clk = clks[ARM].clk;
211 ret = cpufreq_generic_init(policy, freq_table, transition_latency); 199 ret = cpufreq_generic_init(policy, freq_table, transition_latency);
212 policy->suspend_freq = max_freq; 200 policy->suspend_freq = max_freq;
201 dev_pm_opp_of_register_em(policy->cpus);
213 202
214 return ret; 203 return ret;
215} 204}
216 205
217static int imx6q_cpufreq_exit(struct cpufreq_policy *policy)
218{
219 cpufreq_cooling_unregister(cdev);
220
221 return 0;
222}
223
224static struct cpufreq_driver imx6q_cpufreq_driver = { 206static struct cpufreq_driver imx6q_cpufreq_driver = {
225 .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK, 207 .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK |
208 CPUFREQ_IS_COOLING_DEV,
226 .verify = cpufreq_generic_frequency_table_verify, 209 .verify = cpufreq_generic_frequency_table_verify,
227 .target_index = imx6q_set_target, 210 .target_index = imx6q_set_target,
228 .get = cpufreq_generic_get, 211 .get = cpufreq_generic_get,
229 .init = imx6q_cpufreq_init, 212 .init = imx6q_cpufreq_init,
230 .exit = imx6q_cpufreq_exit,
231 .name = "imx6q-cpufreq", 213 .name = "imx6q-cpufreq",
232 .ready = imx6q_cpufreq_ready,
233 .attr = cpufreq_generic_attr, 214 .attr = cpufreq_generic_attr,
234 .suspend = cpufreq_generic_suspend, 215 .suspend = cpufreq_generic_suspend,
235}; 216};
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index dd66decf2087..002f5169d4eb 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -50,6 +50,8 @@
50#define int_tofp(X) ((int64_t)(X) << FRAC_BITS) 50#define int_tofp(X) ((int64_t)(X) << FRAC_BITS)
51#define fp_toint(X) ((X) >> FRAC_BITS) 51#define fp_toint(X) ((X) >> FRAC_BITS)
52 52
53#define ONE_EIGHTH_FP ((int64_t)1 << (FRAC_BITS - 3))
54
53#define EXT_BITS 6 55#define EXT_BITS 6
54#define EXT_FRAC_BITS (EXT_BITS + FRAC_BITS) 56#define EXT_FRAC_BITS (EXT_BITS + FRAC_BITS)
55#define fp_ext_toint(X) ((X) >> EXT_FRAC_BITS) 57#define fp_ext_toint(X) ((X) >> EXT_FRAC_BITS)
@@ -895,7 +897,7 @@ static void intel_pstate_update_policies(void)
895/************************** sysfs begin ************************/ 897/************************** sysfs begin ************************/
896#define show_one(file_name, object) \ 898#define show_one(file_name, object) \
897 static ssize_t show_##file_name \ 899 static ssize_t show_##file_name \
898 (struct kobject *kobj, struct attribute *attr, char *buf) \ 900 (struct kobject *kobj, struct kobj_attribute *attr, char *buf) \
899 { \ 901 { \
900 return sprintf(buf, "%u\n", global.object); \ 902 return sprintf(buf, "%u\n", global.object); \
901 } 903 }
@@ -904,7 +906,7 @@ static ssize_t intel_pstate_show_status(char *buf);
904static int intel_pstate_update_status(const char *buf, size_t size); 906static int intel_pstate_update_status(const char *buf, size_t size);
905 907
906static ssize_t show_status(struct kobject *kobj, 908static ssize_t show_status(struct kobject *kobj,
907 struct attribute *attr, char *buf) 909 struct kobj_attribute *attr, char *buf)
908{ 910{
909 ssize_t ret; 911 ssize_t ret;
910 912
@@ -915,7 +917,7 @@ static ssize_t show_status(struct kobject *kobj,
915 return ret; 917 return ret;
916} 918}
917 919
918static ssize_t store_status(struct kobject *a, struct attribute *b, 920static ssize_t store_status(struct kobject *a, struct kobj_attribute *b,
919 const char *buf, size_t count) 921 const char *buf, size_t count)
920{ 922{
921 char *p = memchr(buf, '\n', count); 923 char *p = memchr(buf, '\n', count);
@@ -929,7 +931,7 @@ static ssize_t store_status(struct kobject *a, struct attribute *b,
929} 931}
930 932
931static ssize_t show_turbo_pct(struct kobject *kobj, 933static ssize_t show_turbo_pct(struct kobject *kobj,
932 struct attribute *attr, char *buf) 934 struct kobj_attribute *attr, char *buf)
933{ 935{
934 struct cpudata *cpu; 936 struct cpudata *cpu;
935 int total, no_turbo, turbo_pct; 937 int total, no_turbo, turbo_pct;
@@ -955,7 +957,7 @@ static ssize_t show_turbo_pct(struct kobject *kobj,
955} 957}
956 958
957static ssize_t show_num_pstates(struct kobject *kobj, 959static ssize_t show_num_pstates(struct kobject *kobj,
958 struct attribute *attr, char *buf) 960 struct kobj_attribute *attr, char *buf)
959{ 961{
960 struct cpudata *cpu; 962 struct cpudata *cpu;
961 int total; 963 int total;
@@ -976,7 +978,7 @@ static ssize_t show_num_pstates(struct kobject *kobj,
976} 978}
977 979
978static ssize_t show_no_turbo(struct kobject *kobj, 980static ssize_t show_no_turbo(struct kobject *kobj,
979 struct attribute *attr, char *buf) 981 struct kobj_attribute *attr, char *buf)
980{ 982{
981 ssize_t ret; 983 ssize_t ret;
982 984
@@ -998,7 +1000,7 @@ static ssize_t show_no_turbo(struct kobject *kobj,
998 return ret; 1000 return ret;
999} 1001}
1000 1002
1001static ssize_t store_no_turbo(struct kobject *a, struct attribute *b, 1003static ssize_t store_no_turbo(struct kobject *a, struct kobj_attribute *b,
1002 const char *buf, size_t count) 1004 const char *buf, size_t count)
1003{ 1005{
1004 unsigned int input; 1006 unsigned int input;
@@ -1045,7 +1047,7 @@ static ssize_t store_no_turbo(struct kobject *a, struct attribute *b,
1045 return count; 1047 return count;
1046} 1048}
1047 1049
1048static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b, 1050static ssize_t store_max_perf_pct(struct kobject *a, struct kobj_attribute *b,
1049 const char *buf, size_t count) 1051 const char *buf, size_t count)
1050{ 1052{
1051 unsigned int input; 1053 unsigned int input;
@@ -1075,7 +1077,7 @@ static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b,
1075 return count; 1077 return count;
1076} 1078}
1077 1079
1078static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b, 1080static ssize_t store_min_perf_pct(struct kobject *a, struct kobj_attribute *b,
1079 const char *buf, size_t count) 1081 const char *buf, size_t count)
1080{ 1082{
1081 unsigned int input; 1083 unsigned int input;
@@ -1107,12 +1109,13 @@ static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b,
1107} 1109}
1108 1110
1109static ssize_t show_hwp_dynamic_boost(struct kobject *kobj, 1111static ssize_t show_hwp_dynamic_boost(struct kobject *kobj,
1110 struct attribute *attr, char *buf) 1112 struct kobj_attribute *attr, char *buf)
1111{ 1113{
1112 return sprintf(buf, "%u\n", hwp_boost); 1114 return sprintf(buf, "%u\n", hwp_boost);
1113} 1115}
1114 1116
1115static ssize_t store_hwp_dynamic_boost(struct kobject *a, struct attribute *b, 1117static ssize_t store_hwp_dynamic_boost(struct kobject *a,
1118 struct kobj_attribute *b,
1116 const char *buf, size_t count) 1119 const char *buf, size_t count)
1117{ 1120{
1118 unsigned int input; 1121 unsigned int input;
@@ -1444,12 +1447,6 @@ static int knl_get_turbo_pstate(void)
1444 return ret; 1447 return ret;
1445} 1448}
1446 1449
1447static int intel_pstate_get_base_pstate(struct cpudata *cpu)
1448{
1449 return global.no_turbo || global.turbo_disabled ?
1450 cpu->pstate.max_pstate : cpu->pstate.turbo_pstate;
1451}
1452
1453static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate) 1450static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate)
1454{ 1451{
1455 trace_cpu_frequency(pstate * cpu->pstate.scaling, cpu->cpu); 1452 trace_cpu_frequency(pstate * cpu->pstate.scaling, cpu->cpu);
@@ -1470,11 +1467,9 @@ static void intel_pstate_set_min_pstate(struct cpudata *cpu)
1470 1467
1471static void intel_pstate_max_within_limits(struct cpudata *cpu) 1468static void intel_pstate_max_within_limits(struct cpudata *cpu)
1472{ 1469{
1473 int pstate; 1470 int pstate = max(cpu->pstate.min_pstate, cpu->max_perf_ratio);
1474 1471
1475 update_turbo_state(); 1472 update_turbo_state();
1476 pstate = intel_pstate_get_base_pstate(cpu);
1477 pstate = max(cpu->pstate.min_pstate, cpu->max_perf_ratio);
1478 intel_pstate_set_pstate(cpu, pstate); 1473 intel_pstate_set_pstate(cpu, pstate);
1479} 1474}
1480 1475
@@ -1678,17 +1673,14 @@ static inline int32_t get_avg_pstate(struct cpudata *cpu)
1678static inline int32_t get_target_pstate(struct cpudata *cpu) 1673static inline int32_t get_target_pstate(struct cpudata *cpu)
1679{ 1674{
1680 struct sample *sample = &cpu->sample; 1675 struct sample *sample = &cpu->sample;
1681 int32_t busy_frac, boost; 1676 int32_t busy_frac;
1682 int target, avg_pstate; 1677 int target, avg_pstate;
1683 1678
1684 busy_frac = div_fp(sample->mperf << cpu->aperf_mperf_shift, 1679 busy_frac = div_fp(sample->mperf << cpu->aperf_mperf_shift,
1685 sample->tsc); 1680 sample->tsc);
1686 1681
1687 boost = cpu->iowait_boost; 1682 if (busy_frac < cpu->iowait_boost)
1688 cpu->iowait_boost >>= 1; 1683 busy_frac = cpu->iowait_boost;
1689
1690 if (busy_frac < boost)
1691 busy_frac = boost;
1692 1684
1693 sample->busy_scaled = busy_frac * 100; 1685 sample->busy_scaled = busy_frac * 100;
1694 1686
@@ -1715,11 +1707,9 @@ static inline int32_t get_target_pstate(struct cpudata *cpu)
1715 1707
1716static int intel_pstate_prepare_request(struct cpudata *cpu, int pstate) 1708static int intel_pstate_prepare_request(struct cpudata *cpu, int pstate)
1717{ 1709{
1718 int max_pstate = intel_pstate_get_base_pstate(cpu); 1710 int min_pstate = max(cpu->pstate.min_pstate, cpu->min_perf_ratio);
1719 int min_pstate; 1711 int max_pstate = max(min_pstate, cpu->max_perf_ratio);
1720 1712
1721 min_pstate = max(cpu->pstate.min_pstate, cpu->min_perf_ratio);
1722 max_pstate = max(min_pstate, cpu->max_perf_ratio);
1723 return clamp_t(int, pstate, min_pstate, max_pstate); 1713 return clamp_t(int, pstate, min_pstate, max_pstate);
1724} 1714}
1725 1715
@@ -1767,29 +1757,30 @@ static void intel_pstate_update_util(struct update_util_data *data, u64 time,
1767 if (smp_processor_id() != cpu->cpu) 1757 if (smp_processor_id() != cpu->cpu)
1768 return; 1758 return;
1769 1759
1760 delta_ns = time - cpu->last_update;
1770 if (flags & SCHED_CPUFREQ_IOWAIT) { 1761 if (flags & SCHED_CPUFREQ_IOWAIT) {
1771 cpu->iowait_boost = int_tofp(1); 1762 /* Start over if the CPU may have been idle. */
1772 cpu->last_update = time; 1763 if (delta_ns > TICK_NSEC) {
1773 /* 1764 cpu->iowait_boost = ONE_EIGHTH_FP;
1774 * The last time the busy was 100% so P-state was max anyway 1765 } else if (cpu->iowait_boost) {
1775 * so avoid overhead of computation. 1766 cpu->iowait_boost <<= 1;
1776 */ 1767 if (cpu->iowait_boost > int_tofp(1))
1777 if (fp_toint(cpu->sample.busy_scaled) == 100) 1768 cpu->iowait_boost = int_tofp(1);
1778 return; 1769 } else {
1779 1770 cpu->iowait_boost = ONE_EIGHTH_FP;
1780 goto set_pstate; 1771 }
1781 } else if (cpu->iowait_boost) { 1772 } else if (cpu->iowait_boost) {
1782 /* Clear iowait_boost if the CPU may have been idle. */ 1773 /* Clear iowait_boost if the CPU may have been idle. */
1783 delta_ns = time - cpu->last_update;
1784 if (delta_ns > TICK_NSEC) 1774 if (delta_ns > TICK_NSEC)
1785 cpu->iowait_boost = 0; 1775 cpu->iowait_boost = 0;
1776 else
1777 cpu->iowait_boost >>= 1;
1786 } 1778 }
1787 cpu->last_update = time; 1779 cpu->last_update = time;
1788 delta_ns = time - cpu->sample.time; 1780 delta_ns = time - cpu->sample.time;
1789 if ((s64)delta_ns < INTEL_PSTATE_SAMPLING_INTERVAL) 1781 if ((s64)delta_ns < INTEL_PSTATE_SAMPLING_INTERVAL)
1790 return; 1782 return;
1791 1783
1792set_pstate:
1793 if (intel_pstate_sample(cpu, time)) 1784 if (intel_pstate_sample(cpu, time))
1794 intel_pstate_adjust_pstate(cpu); 1785 intel_pstate_adjust_pstate(cpu);
1795} 1786}
@@ -1976,7 +1967,8 @@ static void intel_pstate_update_perf_limits(struct cpufreq_policy *policy,
1976 if (hwp_active) { 1967 if (hwp_active) {
1977 intel_pstate_get_hwp_max(cpu->cpu, &turbo_max, &max_state); 1968 intel_pstate_get_hwp_max(cpu->cpu, &turbo_max, &max_state);
1978 } else { 1969 } else {
1979 max_state = intel_pstate_get_base_pstate(cpu); 1970 max_state = global.no_turbo || global.turbo_disabled ?
1971 cpu->pstate.max_pstate : cpu->pstate.turbo_pstate;
1980 turbo_max = cpu->pstate.turbo_pstate; 1972 turbo_max = cpu->pstate.turbo_pstate;
1981 } 1973 }
1982 1974
@@ -2475,6 +2467,7 @@ static bool __init intel_pstate_no_acpi_pss(void)
2475 kfree(pss); 2467 kfree(pss);
2476 } 2468 }
2477 2469
2470 pr_debug("ACPI _PSS not found\n");
2478 return true; 2471 return true;
2479} 2472}
2480 2473
@@ -2485,9 +2478,14 @@ static bool __init intel_pstate_no_acpi_pcch(void)
2485 2478
2486 status = acpi_get_handle(NULL, "\\_SB", &handle); 2479 status = acpi_get_handle(NULL, "\\_SB", &handle);
2487 if (ACPI_FAILURE(status)) 2480 if (ACPI_FAILURE(status))
2488 return true; 2481 goto not_found;
2482
2483 if (acpi_has_method(handle, "PCCH"))
2484 return false;
2489 2485
2490 return !acpi_has_method(handle, "PCCH"); 2486not_found:
2487 pr_debug("ACPI PCCH not found\n");
2488 return true;
2491} 2489}
2492 2490
2493static bool __init intel_pstate_has_acpi_ppc(void) 2491static bool __init intel_pstate_has_acpi_ppc(void)
@@ -2502,6 +2500,7 @@ static bool __init intel_pstate_has_acpi_ppc(void)
2502 if (acpi_has_method(pr->handle, "_PPC")) 2500 if (acpi_has_method(pr->handle, "_PPC"))
2503 return true; 2501 return true;
2504 } 2502 }
2503 pr_debug("ACPI _PPC not found\n");
2505 return false; 2504 return false;
2506} 2505}
2507 2506
@@ -2539,8 +2538,10 @@ static bool __init intel_pstate_platform_pwr_mgmt_exists(void)
2539 id = x86_match_cpu(intel_pstate_cpu_oob_ids); 2538 id = x86_match_cpu(intel_pstate_cpu_oob_ids);
2540 if (id) { 2539 if (id) {
2541 rdmsrl(MSR_MISC_PWR_MGMT, misc_pwr); 2540 rdmsrl(MSR_MISC_PWR_MGMT, misc_pwr);
2542 if ( misc_pwr & (1 << 8)) 2541 if (misc_pwr & (1 << 8)) {
2542 pr_debug("Bit 8 in the MISC_PWR_MGMT MSR set\n");
2543 return true; 2543 return true;
2544 }
2544 } 2545 }
2545 2546
2546 idx = acpi_match_platform_list(plat_info); 2547 idx = acpi_match_platform_list(plat_info);
@@ -2606,22 +2607,28 @@ static int __init intel_pstate_init(void)
2606 } 2607 }
2607 } else { 2608 } else {
2608 id = x86_match_cpu(intel_pstate_cpu_ids); 2609 id = x86_match_cpu(intel_pstate_cpu_ids);
2609 if (!id) 2610 if (!id) {
2611 pr_info("CPU ID not supported\n");
2610 return -ENODEV; 2612 return -ENODEV;
2613 }
2611 2614
2612 copy_cpu_funcs((struct pstate_funcs *)id->driver_data); 2615 copy_cpu_funcs((struct pstate_funcs *)id->driver_data);
2613 } 2616 }
2614 2617
2615 if (intel_pstate_msrs_not_valid()) 2618 if (intel_pstate_msrs_not_valid()) {
2619 pr_info("Invalid MSRs\n");
2616 return -ENODEV; 2620 return -ENODEV;
2621 }
2617 2622
2618hwp_cpu_matched: 2623hwp_cpu_matched:
2619 /* 2624 /*
2620 * The Intel pstate driver will be ignored if the platform 2625 * The Intel pstate driver will be ignored if the platform
2621 * firmware has its own power management modes. 2626 * firmware has its own power management modes.
2622 */ 2627 */
2623 if (intel_pstate_platform_pwr_mgmt_exists()) 2628 if (intel_pstate_platform_pwr_mgmt_exists()) {
2629 pr_info("P-states controlled by the platform\n");
2624 return -ENODEV; 2630 return -ENODEV;
2631 }
2625 2632
2626 if (!hwp_active && hwp_only) 2633 if (!hwp_active && hwp_only)
2627 return -ENOTSUPP; 2634 return -ENOTSUPP;
diff --git a/drivers/cpufreq/longhaul.c b/drivers/cpufreq/longhaul.c
index 279bd9e9fa95..fb546e0d0356 100644
--- a/drivers/cpufreq/longhaul.c
+++ b/drivers/cpufreq/longhaul.c
@@ -851,7 +851,7 @@ static int longhaul_cpu_init(struct cpufreq_policy *policy)
851 case TYPE_POWERSAVER: 851 case TYPE_POWERSAVER:
852 pr_cont("Powersaver supported\n"); 852 pr_cont("Powersaver supported\n");
853 break; 853 break;
854 }; 854 }
855 855
856 /* Doesn't hurt */ 856 /* Doesn't hurt */
857 longhaul_setup_southbridge(); 857 longhaul_setup_southbridge();
diff --git a/drivers/cpufreq/mediatek-cpufreq.c b/drivers/cpufreq/mediatek-cpufreq.c
index eb8920d39818..48e9829274c6 100644
--- a/drivers/cpufreq/mediatek-cpufreq.c
+++ b/drivers/cpufreq/mediatek-cpufreq.c
@@ -14,7 +14,6 @@
14 14
15#include <linux/clk.h> 15#include <linux/clk.h>
16#include <linux/cpu.h> 16#include <linux/cpu.h>
17#include <linux/cpu_cooling.h>
18#include <linux/cpufreq.h> 17#include <linux/cpufreq.h>
19#include <linux/cpumask.h> 18#include <linux/cpumask.h>
20#include <linux/module.h> 19#include <linux/module.h>
@@ -48,7 +47,6 @@ struct mtk_cpu_dvfs_info {
48 struct regulator *sram_reg; 47 struct regulator *sram_reg;
49 struct clk *cpu_clk; 48 struct clk *cpu_clk;
50 struct clk *inter_clk; 49 struct clk *inter_clk;
51 struct thermal_cooling_device *cdev;
52 struct list_head list_head; 50 struct list_head list_head;
53 int intermediate_voltage; 51 int intermediate_voltage;
54 bool need_voltage_tracking; 52 bool need_voltage_tracking;
@@ -307,13 +305,6 @@ static int mtk_cpufreq_set_target(struct cpufreq_policy *policy,
307 305
308#define DYNAMIC_POWER "dynamic-power-coefficient" 306#define DYNAMIC_POWER "dynamic-power-coefficient"
309 307
310static void mtk_cpufreq_ready(struct cpufreq_policy *policy)
311{
312 struct mtk_cpu_dvfs_info *info = policy->driver_data;
313
314 info->cdev = of_cpufreq_cooling_register(policy);
315}
316
317static int mtk_cpu_dvfs_info_init(struct mtk_cpu_dvfs_info *info, int cpu) 308static int mtk_cpu_dvfs_info_init(struct mtk_cpu_dvfs_info *info, int cpu)
318{ 309{
319 struct device *cpu_dev; 310 struct device *cpu_dev;
@@ -465,6 +456,8 @@ static int mtk_cpufreq_init(struct cpufreq_policy *policy)
465 policy->driver_data = info; 456 policy->driver_data = info;
466 policy->clk = info->cpu_clk; 457 policy->clk = info->cpu_clk;
467 458
459 dev_pm_opp_of_register_em(policy->cpus);
460
468 return 0; 461 return 0;
469} 462}
470 463
@@ -472,7 +465,6 @@ static int mtk_cpufreq_exit(struct cpufreq_policy *policy)
472{ 465{
473 struct mtk_cpu_dvfs_info *info = policy->driver_data; 466 struct mtk_cpu_dvfs_info *info = policy->driver_data;
474 467
475 cpufreq_cooling_unregister(info->cdev);
476 dev_pm_opp_free_cpufreq_table(info->cpu_dev, &policy->freq_table); 468 dev_pm_opp_free_cpufreq_table(info->cpu_dev, &policy->freq_table);
477 469
478 return 0; 470 return 0;
@@ -480,13 +472,13 @@ static int mtk_cpufreq_exit(struct cpufreq_policy *policy)
480 472
481static struct cpufreq_driver mtk_cpufreq_driver = { 473static struct cpufreq_driver mtk_cpufreq_driver = {
482 .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK | 474 .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK |
483 CPUFREQ_HAVE_GOVERNOR_PER_POLICY, 475 CPUFREQ_HAVE_GOVERNOR_PER_POLICY |
476 CPUFREQ_IS_COOLING_DEV,
484 .verify = cpufreq_generic_frequency_table_verify, 477 .verify = cpufreq_generic_frequency_table_verify,
485 .target_index = mtk_cpufreq_set_target, 478 .target_index = mtk_cpufreq_set_target,
486 .get = cpufreq_generic_get, 479 .get = cpufreq_generic_get,
487 .init = mtk_cpufreq_init, 480 .init = mtk_cpufreq_init,
488 .exit = mtk_cpufreq_exit, 481 .exit = mtk_cpufreq_exit,
489 .ready = mtk_cpufreq_ready,
490 .name = "mtk-cpufreq", 482 .name = "mtk-cpufreq",
491 .attr = cpufreq_generic_attr, 483 .attr = cpufreq_generic_attr,
492}; 484};
diff --git a/drivers/cpufreq/omap-cpufreq.c b/drivers/cpufreq/omap-cpufreq.c
index 71e81bbf031b..68052b74d28f 100644
--- a/drivers/cpufreq/omap-cpufreq.c
+++ b/drivers/cpufreq/omap-cpufreq.c
@@ -133,8 +133,10 @@ static int omap_cpu_init(struct cpufreq_policy *policy)
133 133
134 /* FIXME: what's the actual transition time? */ 134 /* FIXME: what's the actual transition time? */
135 result = cpufreq_generic_init(policy, freq_table, 300 * 1000); 135 result = cpufreq_generic_init(policy, freq_table, 300 * 1000);
136 if (!result) 136 if (!result) {
137 dev_pm_opp_of_register_em(policy->cpus);
137 return 0; 138 return 0;
139 }
138 140
139 freq_table_free(); 141 freq_table_free();
140fail: 142fail:
diff --git a/drivers/cpufreq/pcc-cpufreq.c b/drivers/cpufreq/pcc-cpufreq.c
index 099a849396f6..1e5e64643c3a 100644
--- a/drivers/cpufreq/pcc-cpufreq.c
+++ b/drivers/cpufreq/pcc-cpufreq.c
@@ -268,7 +268,7 @@ static int pcc_get_offset(int cpu)
268 if (!pccp || pccp->type != ACPI_TYPE_PACKAGE) { 268 if (!pccp || pccp->type != ACPI_TYPE_PACKAGE) {
269 ret = -ENODEV; 269 ret = -ENODEV;
270 goto out_free; 270 goto out_free;
271 }; 271 }
272 272
273 offset = &(pccp->package.elements[0]); 273 offset = &(pccp->package.elements[0]);
274 if (!offset || offset->type != ACPI_TYPE_INTEGER) { 274 if (!offset || offset->type != ACPI_TYPE_INTEGER) {
diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c
index 7e7ad3879c4e..d2230812fa4b 100644
--- a/drivers/cpufreq/powernv-cpufreq.c
+++ b/drivers/cpufreq/powernv-cpufreq.c
@@ -244,6 +244,7 @@ static int init_powernv_pstates(void)
244 u32 len_ids, len_freqs; 244 u32 len_ids, len_freqs;
245 u32 pstate_min, pstate_max, pstate_nominal; 245 u32 pstate_min, pstate_max, pstate_nominal;
246 u32 pstate_turbo, pstate_ultra_turbo; 246 u32 pstate_turbo, pstate_ultra_turbo;
247 int rc = -ENODEV;
247 248
248 power_mgt = of_find_node_by_path("/ibm,opal/power-mgt"); 249 power_mgt = of_find_node_by_path("/ibm,opal/power-mgt");
249 if (!power_mgt) { 250 if (!power_mgt) {
@@ -327,8 +328,11 @@ next:
327 powernv_freqs[i].frequency = freq * 1000; /* kHz */ 328 powernv_freqs[i].frequency = freq * 1000; /* kHz */
328 powernv_freqs[i].driver_data = id & 0xFF; 329 powernv_freqs[i].driver_data = id & 0xFF;
329 330
330 revmap_data = (struct pstate_idx_revmap_data *) 331 revmap_data = kmalloc(sizeof(*revmap_data), GFP_KERNEL);
331 kmalloc(sizeof(*revmap_data), GFP_KERNEL); 332 if (!revmap_data) {
333 rc = -ENOMEM;
334 goto out;
335 }
332 336
333 revmap_data->pstate_id = id & 0xFF; 337 revmap_data->pstate_id = id & 0xFF;
334 revmap_data->cpufreq_table_idx = i; 338 revmap_data->cpufreq_table_idx = i;
@@ -357,7 +361,7 @@ next:
357 return 0; 361 return 0;
358out: 362out:
359 of_node_put(power_mgt); 363 of_node_put(power_mgt);
360 return -ENODEV; 364 return rc;
361} 365}
362 366
363/* Returns the CPU frequency corresponding to the pstate_id. */ 367/* Returns the CPU frequency corresponding to the pstate_id. */
diff --git a/drivers/cpufreq/qcom-cpufreq-hw.c b/drivers/cpufreq/qcom-cpufreq-hw.c
index d83939a1b3d4..4b0b50403901 100644
--- a/drivers/cpufreq/qcom-cpufreq-hw.c
+++ b/drivers/cpufreq/qcom-cpufreq-hw.c
@@ -10,18 +10,21 @@
10#include <linux/module.h> 10#include <linux/module.h>
11#include <linux/of_address.h> 11#include <linux/of_address.h>
12#include <linux/of_platform.h> 12#include <linux/of_platform.h>
13#include <linux/pm_opp.h>
13#include <linux/slab.h> 14#include <linux/slab.h>
14 15
15#define LUT_MAX_ENTRIES 40U 16#define LUT_MAX_ENTRIES 40U
16#define LUT_SRC GENMASK(31, 30) 17#define LUT_SRC GENMASK(31, 30)
17#define LUT_L_VAL GENMASK(7, 0) 18#define LUT_L_VAL GENMASK(7, 0)
18#define LUT_CORE_COUNT GENMASK(18, 16) 19#define LUT_CORE_COUNT GENMASK(18, 16)
20#define LUT_VOLT GENMASK(11, 0)
19#define LUT_ROW_SIZE 32 21#define LUT_ROW_SIZE 32
20#define CLK_HW_DIV 2 22#define CLK_HW_DIV 2
21 23
22/* Register offsets */ 24/* Register offsets */
23#define REG_ENABLE 0x0 25#define REG_ENABLE 0x0
24#define REG_LUT_TABLE 0x110 26#define REG_FREQ_LUT 0x110
27#define REG_VOLT_LUT 0x114
25#define REG_PERF_STATE 0x920 28#define REG_PERF_STATE 0x920
26 29
27static unsigned long cpu_hw_rate, xo_rate; 30static unsigned long cpu_hw_rate, xo_rate;
@@ -70,11 +73,12 @@ static unsigned int qcom_cpufreq_hw_fast_switch(struct cpufreq_policy *policy,
70 return policy->freq_table[index].frequency; 73 return policy->freq_table[index].frequency;
71} 74}
72 75
73static int qcom_cpufreq_hw_read_lut(struct device *dev, 76static int qcom_cpufreq_hw_read_lut(struct device *cpu_dev,
74 struct cpufreq_policy *policy, 77 struct cpufreq_policy *policy,
75 void __iomem *base) 78 void __iomem *base)
76{ 79{
77 u32 data, src, lval, i, core_count, prev_cc = 0, prev_freq = 0, freq; 80 u32 data, src, lval, i, core_count, prev_cc = 0, prev_freq = 0, freq;
81 u32 volt;
78 unsigned int max_cores = cpumask_weight(policy->cpus); 82 unsigned int max_cores = cpumask_weight(policy->cpus);
79 struct cpufreq_frequency_table *table; 83 struct cpufreq_frequency_table *table;
80 84
@@ -83,23 +87,28 @@ static int qcom_cpufreq_hw_read_lut(struct device *dev,
83 return -ENOMEM; 87 return -ENOMEM;
84 88
85 for (i = 0; i < LUT_MAX_ENTRIES; i++) { 89 for (i = 0; i < LUT_MAX_ENTRIES; i++) {
86 data = readl_relaxed(base + REG_LUT_TABLE + i * LUT_ROW_SIZE); 90 data = readl_relaxed(base + REG_FREQ_LUT +
91 i * LUT_ROW_SIZE);
87 src = FIELD_GET(LUT_SRC, data); 92 src = FIELD_GET(LUT_SRC, data);
88 lval = FIELD_GET(LUT_L_VAL, data); 93 lval = FIELD_GET(LUT_L_VAL, data);
89 core_count = FIELD_GET(LUT_CORE_COUNT, data); 94 core_count = FIELD_GET(LUT_CORE_COUNT, data);
90 95
96 data = readl_relaxed(base + REG_VOLT_LUT +
97 i * LUT_ROW_SIZE);
98 volt = FIELD_GET(LUT_VOLT, data) * 1000;
99
91 if (src) 100 if (src)
92 freq = xo_rate * lval / 1000; 101 freq = xo_rate * lval / 1000;
93 else 102 else
94 freq = cpu_hw_rate / 1000; 103 freq = cpu_hw_rate / 1000;
95 104
96 /* Ignore boosts in the middle of the table */ 105 if (freq != prev_freq && core_count == max_cores) {
97 if (core_count != max_cores) {
98 table[i].frequency = CPUFREQ_ENTRY_INVALID;
99 } else {
100 table[i].frequency = freq; 106 table[i].frequency = freq;
101 dev_dbg(dev, "index=%d freq=%d, core_count %d\n", i, 107 dev_pm_opp_add(cpu_dev, freq * 1000, volt);
108 dev_dbg(cpu_dev, "index=%d freq=%d, core_count %d\n", i,
102 freq, core_count); 109 freq, core_count);
110 } else {
111 table[i].frequency = CPUFREQ_ENTRY_INVALID;
103 } 112 }
104 113
105 /* 114 /*
@@ -116,6 +125,7 @@ static int qcom_cpufreq_hw_read_lut(struct device *dev,
116 if (prev_cc != max_cores) { 125 if (prev_cc != max_cores) {
117 prev->frequency = prev_freq; 126 prev->frequency = prev_freq;
118 prev->flags = CPUFREQ_BOOST_FREQ; 127 prev->flags = CPUFREQ_BOOST_FREQ;
128 dev_pm_opp_add(cpu_dev, prev_freq * 1000, volt);
119 } 129 }
120 130
121 break; 131 break;
@@ -127,6 +137,7 @@ static int qcom_cpufreq_hw_read_lut(struct device *dev,
127 137
128 table[i].frequency = CPUFREQ_TABLE_END; 138 table[i].frequency = CPUFREQ_TABLE_END;
129 policy->freq_table = table; 139 policy->freq_table = table;
140 dev_pm_opp_set_sharing_cpus(cpu_dev, policy->cpus);
130 141
131 return 0; 142 return 0;
132} 143}
@@ -159,10 +170,18 @@ static int qcom_cpufreq_hw_cpu_init(struct cpufreq_policy *policy)
159 struct device *dev = &global_pdev->dev; 170 struct device *dev = &global_pdev->dev;
160 struct of_phandle_args args; 171 struct of_phandle_args args;
161 struct device_node *cpu_np; 172 struct device_node *cpu_np;
173 struct device *cpu_dev;
162 struct resource *res; 174 struct resource *res;
163 void __iomem *base; 175 void __iomem *base;
164 int ret, index; 176 int ret, index;
165 177
178 cpu_dev = get_cpu_device(policy->cpu);
179 if (!cpu_dev) {
180 pr_err("%s: failed to get cpu%d device\n", __func__,
181 policy->cpu);
182 return -ENODEV;
183 }
184
166 cpu_np = of_cpu_device_node_get(policy->cpu); 185 cpu_np = of_cpu_device_node_get(policy->cpu);
167 if (!cpu_np) 186 if (!cpu_np)
168 return -EINVAL; 187 return -EINVAL;
@@ -199,12 +218,21 @@ static int qcom_cpufreq_hw_cpu_init(struct cpufreq_policy *policy)
199 218
200 policy->driver_data = base + REG_PERF_STATE; 219 policy->driver_data = base + REG_PERF_STATE;
201 220
202 ret = qcom_cpufreq_hw_read_lut(dev, policy, base); 221 ret = qcom_cpufreq_hw_read_lut(cpu_dev, policy, base);
203 if (ret) { 222 if (ret) {
204 dev_err(dev, "Domain-%d failed to read LUT\n", index); 223 dev_err(dev, "Domain-%d failed to read LUT\n", index);
205 goto error; 224 goto error;
206 } 225 }
207 226
227 ret = dev_pm_opp_get_opp_count(cpu_dev);
228 if (ret <= 0) {
229 dev_err(cpu_dev, "Failed to add OPPs\n");
230 ret = -ENODEV;
231 goto error;
232 }
233
234 dev_pm_opp_of_register_em(policy->cpus);
235
208 policy->fast_switch_possible = true; 236 policy->fast_switch_possible = true;
209 237
210 return 0; 238 return 0;
@@ -215,8 +243,10 @@ error:
215 243
216static int qcom_cpufreq_hw_cpu_exit(struct cpufreq_policy *policy) 244static int qcom_cpufreq_hw_cpu_exit(struct cpufreq_policy *policy)
217{ 245{
246 struct device *cpu_dev = get_cpu_device(policy->cpu);
218 void __iomem *base = policy->driver_data - REG_PERF_STATE; 247 void __iomem *base = policy->driver_data - REG_PERF_STATE;
219 248
249 dev_pm_opp_remove_all_dynamic(cpu_dev);
220 kfree(policy->freq_table); 250 kfree(policy->freq_table);
221 devm_iounmap(&global_pdev->dev, base); 251 devm_iounmap(&global_pdev->dev, base);
222 252
@@ -231,7 +261,8 @@ static struct freq_attr *qcom_cpufreq_hw_attr[] = {
231 261
232static struct cpufreq_driver cpufreq_qcom_hw_driver = { 262static struct cpufreq_driver cpufreq_qcom_hw_driver = {
233 .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK | 263 .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK |
234 CPUFREQ_HAVE_GOVERNOR_PER_POLICY, 264 CPUFREQ_HAVE_GOVERNOR_PER_POLICY |
265 CPUFREQ_IS_COOLING_DEV,
235 .verify = cpufreq_generic_frequency_table_verify, 266 .verify = cpufreq_generic_frequency_table_verify,
236 .target_index = qcom_cpufreq_hw_target_index, 267 .target_index = qcom_cpufreq_hw_target_index,
237 .get = qcom_cpufreq_hw_get, 268 .get = qcom_cpufreq_hw_get,
@@ -296,7 +327,7 @@ static int __init qcom_cpufreq_hw_init(void)
296{ 327{
297 return platform_driver_register(&qcom_cpufreq_hw_driver); 328 return platform_driver_register(&qcom_cpufreq_hw_driver);
298} 329}
299subsys_initcall(qcom_cpufreq_hw_init); 330device_initcall(qcom_cpufreq_hw_init);
300 331
301static void __exit qcom_cpufreq_hw_exit(void) 332static void __exit qcom_cpufreq_hw_exit(void)
302{ 333{
diff --git a/drivers/cpufreq/qcom-cpufreq-kryo.c b/drivers/cpufreq/qcom-cpufreq-kryo.c
index 2a3675c24032..dd64dcf89c74 100644
--- a/drivers/cpufreq/qcom-cpufreq-kryo.c
+++ b/drivers/cpufreq/qcom-cpufreq-kryo.c
@@ -42,7 +42,7 @@ enum _msm8996_version {
42 NUM_OF_MSM8996_VERSIONS, 42 NUM_OF_MSM8996_VERSIONS,
43}; 43};
44 44
45struct platform_device *cpufreq_dt_pdev, *kryo_cpufreq_pdev; 45static struct platform_device *cpufreq_dt_pdev, *kryo_cpufreq_pdev;
46 46
47static enum _msm8996_version qcom_cpufreq_kryo_get_msm_id(void) 47static enum _msm8996_version qcom_cpufreq_kryo_get_msm_id(void)
48{ 48{
@@ -75,7 +75,7 @@ static enum _msm8996_version qcom_cpufreq_kryo_get_msm_id(void)
75 75
76static int qcom_cpufreq_kryo_probe(struct platform_device *pdev) 76static int qcom_cpufreq_kryo_probe(struct platform_device *pdev)
77{ 77{
78 struct opp_table *opp_tables[NR_CPUS] = {0}; 78 struct opp_table **opp_tables;
79 enum _msm8996_version msm8996_version; 79 enum _msm8996_version msm8996_version;
80 struct nvmem_cell *speedbin_nvmem; 80 struct nvmem_cell *speedbin_nvmem;
81 struct device_node *np; 81 struct device_node *np;
@@ -133,6 +133,10 @@ static int qcom_cpufreq_kryo_probe(struct platform_device *pdev)
133 } 133 }
134 kfree(speedbin); 134 kfree(speedbin);
135 135
136 opp_tables = kcalloc(num_possible_cpus(), sizeof(*opp_tables), GFP_KERNEL);
137 if (!opp_tables)
138 return -ENOMEM;
139
136 for_each_possible_cpu(cpu) { 140 for_each_possible_cpu(cpu) {
137 cpu_dev = get_cpu_device(cpu); 141 cpu_dev = get_cpu_device(cpu);
138 if (NULL == cpu_dev) { 142 if (NULL == cpu_dev) {
@@ -151,8 +155,10 @@ static int qcom_cpufreq_kryo_probe(struct platform_device *pdev)
151 155
152 cpufreq_dt_pdev = platform_device_register_simple("cpufreq-dt", -1, 156 cpufreq_dt_pdev = platform_device_register_simple("cpufreq-dt", -1,
153 NULL, 0); 157 NULL, 0);
154 if (!IS_ERR(cpufreq_dt_pdev)) 158 if (!IS_ERR(cpufreq_dt_pdev)) {
159 platform_set_drvdata(pdev, opp_tables);
155 return 0; 160 return 0;
161 }
156 162
157 ret = PTR_ERR(cpufreq_dt_pdev); 163 ret = PTR_ERR(cpufreq_dt_pdev);
158 dev_err(cpu_dev, "Failed to register platform device\n"); 164 dev_err(cpu_dev, "Failed to register platform device\n");
@@ -163,13 +169,23 @@ free_opp:
163 break; 169 break;
164 dev_pm_opp_put_supported_hw(opp_tables[cpu]); 170 dev_pm_opp_put_supported_hw(opp_tables[cpu]);
165 } 171 }
172 kfree(opp_tables);
166 173
167 return ret; 174 return ret;
168} 175}
169 176
170static int qcom_cpufreq_kryo_remove(struct platform_device *pdev) 177static int qcom_cpufreq_kryo_remove(struct platform_device *pdev)
171{ 178{
179 struct opp_table **opp_tables = platform_get_drvdata(pdev);
180 unsigned int cpu;
181
172 platform_device_unregister(cpufreq_dt_pdev); 182 platform_device_unregister(cpufreq_dt_pdev);
183
184 for_each_possible_cpu(cpu)
185 dev_pm_opp_put_supported_hw(opp_tables[cpu]);
186
187 kfree(opp_tables);
188
173 return 0; 189 return 0;
174} 190}
175 191
diff --git a/drivers/cpufreq/qoriq-cpufreq.c b/drivers/cpufreq/qoriq-cpufreq.c
index 3d773f64b4df..4295e5476264 100644
--- a/drivers/cpufreq/qoriq-cpufreq.c
+++ b/drivers/cpufreq/qoriq-cpufreq.c
@@ -13,7 +13,6 @@
13#include <linux/clk.h> 13#include <linux/clk.h>
14#include <linux/clk-provider.h> 14#include <linux/clk-provider.h>
15#include <linux/cpufreq.h> 15#include <linux/cpufreq.h>
16#include <linux/cpu_cooling.h>
17#include <linux/errno.h> 16#include <linux/errno.h>
18#include <linux/init.h> 17#include <linux/init.h>
19#include <linux/kernel.h> 18#include <linux/kernel.h>
@@ -31,7 +30,6 @@
31struct cpu_data { 30struct cpu_data {
32 struct clk **pclk; 31 struct clk **pclk;
33 struct cpufreq_frequency_table *table; 32 struct cpufreq_frequency_table *table;
34 struct thermal_cooling_device *cdev;
35}; 33};
36 34
37/* 35/*
@@ -239,7 +237,6 @@ static int qoriq_cpufreq_cpu_exit(struct cpufreq_policy *policy)
239{ 237{
240 struct cpu_data *data = policy->driver_data; 238 struct cpu_data *data = policy->driver_data;
241 239
242 cpufreq_cooling_unregister(data->cdev);
243 kfree(data->pclk); 240 kfree(data->pclk);
244 kfree(data->table); 241 kfree(data->table);
245 kfree(data); 242 kfree(data);
@@ -258,23 +255,15 @@ static int qoriq_cpufreq_target(struct cpufreq_policy *policy,
258 return clk_set_parent(policy->clk, parent); 255 return clk_set_parent(policy->clk, parent);
259} 256}
260 257
261
262static void qoriq_cpufreq_ready(struct cpufreq_policy *policy)
263{
264 struct cpu_data *cpud = policy->driver_data;
265
266 cpud->cdev = of_cpufreq_cooling_register(policy);
267}
268
269static struct cpufreq_driver qoriq_cpufreq_driver = { 258static struct cpufreq_driver qoriq_cpufreq_driver = {
270 .name = "qoriq_cpufreq", 259 .name = "qoriq_cpufreq",
271 .flags = CPUFREQ_CONST_LOOPS, 260 .flags = CPUFREQ_CONST_LOOPS |
261 CPUFREQ_IS_COOLING_DEV,
272 .init = qoriq_cpufreq_cpu_init, 262 .init = qoriq_cpufreq_cpu_init,
273 .exit = qoriq_cpufreq_cpu_exit, 263 .exit = qoriq_cpufreq_cpu_exit,
274 .verify = cpufreq_generic_frequency_table_verify, 264 .verify = cpufreq_generic_frequency_table_verify,
275 .target_index = qoriq_cpufreq_target, 265 .target_index = qoriq_cpufreq_target,
276 .get = cpufreq_generic_get, 266 .get = cpufreq_generic_get,
277 .ready = qoriq_cpufreq_ready,
278 .attr = cpufreq_generic_attr, 267 .attr = cpufreq_generic_attr,
279}; 268};
280 269
diff --git a/drivers/cpufreq/s5pv210-cpufreq.c b/drivers/cpufreq/s5pv210-cpufreq.c
index dbecd7667db2..5b4289460bc9 100644
--- a/drivers/cpufreq/s5pv210-cpufreq.c
+++ b/drivers/cpufreq/s5pv210-cpufreq.c
@@ -584,7 +584,7 @@ static struct notifier_block s5pv210_cpufreq_reboot_notifier = {
584static int s5pv210_cpufreq_probe(struct platform_device *pdev) 584static int s5pv210_cpufreq_probe(struct platform_device *pdev)
585{ 585{
586 struct device_node *np; 586 struct device_node *np;
587 int id; 587 int id, result = 0;
588 588
589 /* 589 /*
590 * HACK: This is a temporary workaround to get access to clock 590 * HACK: This is a temporary workaround to get access to clock
@@ -594,18 +594,39 @@ static int s5pv210_cpufreq_probe(struct platform_device *pdev)
594 * this whole driver as soon as S5PV210 gets migrated to use 594 * this whole driver as soon as S5PV210 gets migrated to use
595 * cpufreq-dt driver. 595 * cpufreq-dt driver.
596 */ 596 */
597 arm_regulator = regulator_get(NULL, "vddarm");
598 if (IS_ERR(arm_regulator)) {
599 if (PTR_ERR(arm_regulator) == -EPROBE_DEFER)
600 pr_debug("vddarm regulator not ready, defer\n");
601 else
602 pr_err("failed to get regulator vddarm\n");
603 return PTR_ERR(arm_regulator);
604 }
605
606 int_regulator = regulator_get(NULL, "vddint");
607 if (IS_ERR(int_regulator)) {
608 if (PTR_ERR(int_regulator) == -EPROBE_DEFER)
609 pr_debug("vddint regulator not ready, defer\n");
610 else
611 pr_err("failed to get regulator vddint\n");
612 result = PTR_ERR(int_regulator);
613 goto err_int_regulator;
614 }
615
597 np = of_find_compatible_node(NULL, NULL, "samsung,s5pv210-clock"); 616 np = of_find_compatible_node(NULL, NULL, "samsung,s5pv210-clock");
598 if (!np) { 617 if (!np) {
599 pr_err("%s: failed to find clock controller DT node\n", 618 pr_err("%s: failed to find clock controller DT node\n",
600 __func__); 619 __func__);
601 return -ENODEV; 620 result = -ENODEV;
621 goto err_clock;
602 } 622 }
603 623
604 clk_base = of_iomap(np, 0); 624 clk_base = of_iomap(np, 0);
605 of_node_put(np); 625 of_node_put(np);
606 if (!clk_base) { 626 if (!clk_base) {
607 pr_err("%s: failed to map clock registers\n", __func__); 627 pr_err("%s: failed to map clock registers\n", __func__);
608 return -EFAULT; 628 result = -EFAULT;
629 goto err_clock;
609 } 630 }
610 631
611 for_each_compatible_node(np, NULL, "samsung,s5pv210-dmc") { 632 for_each_compatible_node(np, NULL, "samsung,s5pv210-dmc") {
@@ -614,7 +635,8 @@ static int s5pv210_cpufreq_probe(struct platform_device *pdev)
614 pr_err("%s: failed to get alias of dmc node '%pOFn'\n", 635 pr_err("%s: failed to get alias of dmc node '%pOFn'\n",
615 __func__, np); 636 __func__, np);
616 of_node_put(np); 637 of_node_put(np);
617 return id; 638 result = id;
639 goto err_clk_base;
618 } 640 }
619 641
620 dmc_base[id] = of_iomap(np, 0); 642 dmc_base[id] = of_iomap(np, 0);
@@ -622,33 +644,40 @@ static int s5pv210_cpufreq_probe(struct platform_device *pdev)
622 pr_err("%s: failed to map dmc%d registers\n", 644 pr_err("%s: failed to map dmc%d registers\n",
623 __func__, id); 645 __func__, id);
624 of_node_put(np); 646 of_node_put(np);
625 return -EFAULT; 647 result = -EFAULT;
648 goto err_dmc;
626 } 649 }
627 } 650 }
628 651
629 for (id = 0; id < ARRAY_SIZE(dmc_base); ++id) { 652 for (id = 0; id < ARRAY_SIZE(dmc_base); ++id) {
630 if (!dmc_base[id]) { 653 if (!dmc_base[id]) {
631 pr_err("%s: failed to find dmc%d node\n", __func__, id); 654 pr_err("%s: failed to find dmc%d node\n", __func__, id);
632 return -ENODEV; 655 result = -ENODEV;
656 goto err_dmc;
633 } 657 }
634 } 658 }
635 659
636 arm_regulator = regulator_get(NULL, "vddarm");
637 if (IS_ERR(arm_regulator)) {
638 pr_err("failed to get regulator vddarm\n");
639 return PTR_ERR(arm_regulator);
640 }
641
642 int_regulator = regulator_get(NULL, "vddint");
643 if (IS_ERR(int_regulator)) {
644 pr_err("failed to get regulator vddint\n");
645 regulator_put(arm_regulator);
646 return PTR_ERR(int_regulator);
647 }
648
649 register_reboot_notifier(&s5pv210_cpufreq_reboot_notifier); 660 register_reboot_notifier(&s5pv210_cpufreq_reboot_notifier);
650 661
651 return cpufreq_register_driver(&s5pv210_driver); 662 return cpufreq_register_driver(&s5pv210_driver);
663
664err_dmc:
665 for (id = 0; id < ARRAY_SIZE(dmc_base); ++id)
666 if (dmc_base[id]) {
667 iounmap(dmc_base[id]);
668 dmc_base[id] = NULL;
669 }
670
671err_clk_base:
672 iounmap(clk_base);
673
674err_clock:
675 regulator_put(int_regulator);
676
677err_int_regulator:
678 regulator_put(arm_regulator);
679
680 return result;
652} 681}
653 682
654static struct platform_driver s5pv210_cpufreq_platdrv = { 683static struct platform_driver s5pv210_cpufreq_platdrv = {
diff --git a/drivers/cpufreq/scmi-cpufreq.c b/drivers/cpufreq/scmi-cpufreq.c
index 9ed46d188cb5..e6182c89df79 100644
--- a/drivers/cpufreq/scmi-cpufreq.c
+++ b/drivers/cpufreq/scmi-cpufreq.c
@@ -11,7 +11,7 @@
11#include <linux/cpu.h> 11#include <linux/cpu.h>
12#include <linux/cpufreq.h> 12#include <linux/cpufreq.h>
13#include <linux/cpumask.h> 13#include <linux/cpumask.h>
14#include <linux/cpu_cooling.h> 14#include <linux/energy_model.h>
15#include <linux/export.h> 15#include <linux/export.h>
16#include <linux/module.h> 16#include <linux/module.h>
17#include <linux/pm_opp.h> 17#include <linux/pm_opp.h>
@@ -22,7 +22,6 @@
22struct scmi_data { 22struct scmi_data {
23 int domain_id; 23 int domain_id;
24 struct device *cpu_dev; 24 struct device *cpu_dev;
25 struct thermal_cooling_device *cdev;
26}; 25};
27 26
28static const struct scmi_handle *handle; 27static const struct scmi_handle *handle;
@@ -103,13 +102,42 @@ scmi_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask)
103 return 0; 102 return 0;
104} 103}
105 104
105static int __maybe_unused
106scmi_get_cpu_power(unsigned long *power, unsigned long *KHz, int cpu)
107{
108 struct device *cpu_dev = get_cpu_device(cpu);
109 unsigned long Hz;
110 int ret, domain;
111
112 if (!cpu_dev) {
113 pr_err("failed to get cpu%d device\n", cpu);
114 return -ENODEV;
115 }
116
117 domain = handle->perf_ops->device_domain_id(cpu_dev);
118 if (domain < 0)
119 return domain;
120
121 /* Get the power cost of the performance domain. */
122 Hz = *KHz * 1000;
123 ret = handle->perf_ops->est_power_get(handle, domain, &Hz, power);
124 if (ret)
125 return ret;
126
127 /* The EM framework specifies the frequency in KHz. */
128 *KHz = Hz / 1000;
129
130 return 0;
131}
132
106static int scmi_cpufreq_init(struct cpufreq_policy *policy) 133static int scmi_cpufreq_init(struct cpufreq_policy *policy)
107{ 134{
108 int ret; 135 int ret, nr_opp;
109 unsigned int latency; 136 unsigned int latency;
110 struct device *cpu_dev; 137 struct device *cpu_dev;
111 struct scmi_data *priv; 138 struct scmi_data *priv;
112 struct cpufreq_frequency_table *freq_table; 139 struct cpufreq_frequency_table *freq_table;
140 struct em_data_callback em_cb = EM_DATA_CB(scmi_get_cpu_power);
113 141
114 cpu_dev = get_cpu_device(policy->cpu); 142 cpu_dev = get_cpu_device(policy->cpu);
115 if (!cpu_dev) { 143 if (!cpu_dev) {
@@ -136,8 +164,8 @@ static int scmi_cpufreq_init(struct cpufreq_policy *policy)
136 return ret; 164 return ret;
137 } 165 }
138 166
139 ret = dev_pm_opp_get_opp_count(cpu_dev); 167 nr_opp = dev_pm_opp_get_opp_count(cpu_dev);
140 if (ret <= 0) { 168 if (nr_opp <= 0) {
141 dev_dbg(cpu_dev, "OPP table is not ready, deferring probe\n"); 169 dev_dbg(cpu_dev, "OPP table is not ready, deferring probe\n");
142 ret = -EPROBE_DEFER; 170 ret = -EPROBE_DEFER;
143 goto out_free_opp; 171 goto out_free_opp;
@@ -171,6 +199,9 @@ static int scmi_cpufreq_init(struct cpufreq_policy *policy)
171 policy->cpuinfo.transition_latency = latency; 199 policy->cpuinfo.transition_latency = latency;
172 200
173 policy->fast_switch_possible = true; 201 policy->fast_switch_possible = true;
202
203 em_register_perf_domain(policy->cpus, nr_opp, &em_cb);
204
174 return 0; 205 return 0;
175 206
176out_free_priv: 207out_free_priv:
@@ -185,7 +216,6 @@ static int scmi_cpufreq_exit(struct cpufreq_policy *policy)
185{ 216{
186 struct scmi_data *priv = policy->driver_data; 217 struct scmi_data *priv = policy->driver_data;
187 218
188 cpufreq_cooling_unregister(priv->cdev);
189 dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table); 219 dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table);
190 dev_pm_opp_remove_all_dynamic(priv->cpu_dev); 220 dev_pm_opp_remove_all_dynamic(priv->cpu_dev);
191 kfree(priv); 221 kfree(priv);
@@ -193,17 +223,11 @@ static int scmi_cpufreq_exit(struct cpufreq_policy *policy)
193 return 0; 223 return 0;
194} 224}
195 225
196static void scmi_cpufreq_ready(struct cpufreq_policy *policy)
197{
198 struct scmi_data *priv = policy->driver_data;
199
200 priv->cdev = of_cpufreq_cooling_register(policy);
201}
202
203static struct cpufreq_driver scmi_cpufreq_driver = { 226static struct cpufreq_driver scmi_cpufreq_driver = {
204 .name = "scmi", 227 .name = "scmi",
205 .flags = CPUFREQ_STICKY | CPUFREQ_HAVE_GOVERNOR_PER_POLICY | 228 .flags = CPUFREQ_STICKY | CPUFREQ_HAVE_GOVERNOR_PER_POLICY |
206 CPUFREQ_NEED_INITIAL_FREQ_CHECK, 229 CPUFREQ_NEED_INITIAL_FREQ_CHECK |
230 CPUFREQ_IS_COOLING_DEV,
207 .verify = cpufreq_generic_frequency_table_verify, 231 .verify = cpufreq_generic_frequency_table_verify,
208 .attr = cpufreq_generic_attr, 232 .attr = cpufreq_generic_attr,
209 .target_index = scmi_cpufreq_set_target, 233 .target_index = scmi_cpufreq_set_target,
@@ -211,7 +235,6 @@ static struct cpufreq_driver scmi_cpufreq_driver = {
211 .get = scmi_cpufreq_get_rate, 235 .get = scmi_cpufreq_get_rate,
212 .init = scmi_cpufreq_init, 236 .init = scmi_cpufreq_init,
213 .exit = scmi_cpufreq_exit, 237 .exit = scmi_cpufreq_exit,
214 .ready = scmi_cpufreq_ready,
215}; 238};
216 239
217static int scmi_cpufreq_probe(struct scmi_device *sdev) 240static int scmi_cpufreq_probe(struct scmi_device *sdev)
diff --git a/drivers/cpufreq/scpi-cpufreq.c b/drivers/cpufreq/scpi-cpufreq.c
index 99449738faa4..3f49427766b8 100644
--- a/drivers/cpufreq/scpi-cpufreq.c
+++ b/drivers/cpufreq/scpi-cpufreq.c
@@ -22,7 +22,6 @@
22#include <linux/cpu.h> 22#include <linux/cpu.h>
23#include <linux/cpufreq.h> 23#include <linux/cpufreq.h>
24#include <linux/cpumask.h> 24#include <linux/cpumask.h>
25#include <linux/cpu_cooling.h>
26#include <linux/export.h> 25#include <linux/export.h>
27#include <linux/module.h> 26#include <linux/module.h>
28#include <linux/of_platform.h> 27#include <linux/of_platform.h>
@@ -34,7 +33,6 @@
34struct scpi_data { 33struct scpi_data {
35 struct clk *clk; 34 struct clk *clk;
36 struct device *cpu_dev; 35 struct device *cpu_dev;
37 struct thermal_cooling_device *cdev;
38}; 36};
39 37
40static struct scpi_ops *scpi_ops; 38static struct scpi_ops *scpi_ops;
@@ -170,6 +168,9 @@ static int scpi_cpufreq_init(struct cpufreq_policy *policy)
170 policy->cpuinfo.transition_latency = latency; 168 policy->cpuinfo.transition_latency = latency;
171 169
172 policy->fast_switch_possible = false; 170 policy->fast_switch_possible = false;
171
172 dev_pm_opp_of_register_em(policy->cpus);
173
173 return 0; 174 return 0;
174 175
175out_free_cpufreq_table: 176out_free_cpufreq_table:
@@ -186,7 +187,6 @@ static int scpi_cpufreq_exit(struct cpufreq_policy *policy)
186{ 187{
187 struct scpi_data *priv = policy->driver_data; 188 struct scpi_data *priv = policy->driver_data;
188 189
189 cpufreq_cooling_unregister(priv->cdev);
190 clk_put(priv->clk); 190 clk_put(priv->clk);
191 dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table); 191 dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table);
192 kfree(priv); 192 kfree(priv);
@@ -195,23 +195,16 @@ static int scpi_cpufreq_exit(struct cpufreq_policy *policy)
195 return 0; 195 return 0;
196} 196}
197 197
198static void scpi_cpufreq_ready(struct cpufreq_policy *policy)
199{
200 struct scpi_data *priv = policy->driver_data;
201
202 priv->cdev = of_cpufreq_cooling_register(policy);
203}
204
205static struct cpufreq_driver scpi_cpufreq_driver = { 198static struct cpufreq_driver scpi_cpufreq_driver = {
206 .name = "scpi-cpufreq", 199 .name = "scpi-cpufreq",
207 .flags = CPUFREQ_STICKY | CPUFREQ_HAVE_GOVERNOR_PER_POLICY | 200 .flags = CPUFREQ_STICKY | CPUFREQ_HAVE_GOVERNOR_PER_POLICY |
208 CPUFREQ_NEED_INITIAL_FREQ_CHECK, 201 CPUFREQ_NEED_INITIAL_FREQ_CHECK |
202 CPUFREQ_IS_COOLING_DEV,
209 .verify = cpufreq_generic_frequency_table_verify, 203 .verify = cpufreq_generic_frequency_table_verify,
210 .attr = cpufreq_generic_attr, 204 .attr = cpufreq_generic_attr,
211 .get = scpi_cpufreq_get_rate, 205 .get = scpi_cpufreq_get_rate,
212 .init = scpi_cpufreq_init, 206 .init = scpi_cpufreq_init,
213 .exit = scpi_cpufreq_exit, 207 .exit = scpi_cpufreq_exit,
214 .ready = scpi_cpufreq_ready,
215 .target_index = scpi_cpufreq_set_target, 208 .target_index = scpi_cpufreq_set_target,
216}; 209};
217 210
diff --git a/drivers/cpufreq/speedstep-ich.c b/drivers/cpufreq/speedstep-ich.c
index fbbcb88db061..5d8a09b82efb 100644
--- a/drivers/cpufreq/speedstep-ich.c
+++ b/drivers/cpufreq/speedstep-ich.c
@@ -243,8 +243,7 @@ static unsigned int speedstep_get(unsigned int cpu)
243 unsigned int speed; 243 unsigned int speed;
244 244
245 /* You're supposed to ensure CPU is online. */ 245 /* You're supposed to ensure CPU is online. */
246 if (smp_call_function_single(cpu, get_freq_data, &speed, 1) != 0) 246 BUG_ON(smp_call_function_single(cpu, get_freq_data, &speed, 1));
247 BUG();
248 247
249 pr_debug("detected %u kHz as current frequency\n", speed); 248 pr_debug("detected %u kHz as current frequency\n", speed);
250 return speed; 249 return speed;
diff --git a/drivers/cpufreq/tegra124-cpufreq.c b/drivers/cpufreq/tegra124-cpufreq.c
index ba3795e13ac6..5e748c8a5c9a 100644
--- a/drivers/cpufreq/tegra124-cpufreq.c
+++ b/drivers/cpufreq/tegra124-cpufreq.c
@@ -118,6 +118,8 @@ static int tegra124_cpufreq_probe(struct platform_device *pdev)
118 118
119 platform_set_drvdata(pdev, priv); 119 platform_set_drvdata(pdev, priv);
120 120
121 of_node_put(np);
122
121 return 0; 123 return 0;
122 124
123out_put_pllp_clk: 125out_put_pllp_clk:
diff --git a/drivers/cpuidle/Kconfig b/drivers/cpuidle/Kconfig
index 7e48eb5bf0a7..8caccbbd7353 100644
--- a/drivers/cpuidle/Kconfig
+++ b/drivers/cpuidle/Kconfig
@@ -4,7 +4,7 @@ config CPU_IDLE
4 bool "CPU idle PM support" 4 bool "CPU idle PM support"
5 default y if ACPI || PPC_PSERIES 5 default y if ACPI || PPC_PSERIES
6 select CPU_IDLE_GOV_LADDER if (!NO_HZ && !NO_HZ_IDLE) 6 select CPU_IDLE_GOV_LADDER if (!NO_HZ && !NO_HZ_IDLE)
7 select CPU_IDLE_GOV_MENU if (NO_HZ || NO_HZ_IDLE) 7 select CPU_IDLE_GOV_MENU if (NO_HZ || NO_HZ_IDLE) && !CPU_IDLE_GOV_TEO
8 help 8 help
9 CPU idle is a generic framework for supporting software-controlled 9 CPU idle is a generic framework for supporting software-controlled
10 idle processor power management. It includes modular cross-platform 10 idle processor power management. It includes modular cross-platform
@@ -23,6 +23,15 @@ config CPU_IDLE_GOV_LADDER
23config CPU_IDLE_GOV_MENU 23config CPU_IDLE_GOV_MENU
24 bool "Menu governor (for tickless system)" 24 bool "Menu governor (for tickless system)"
25 25
26config CPU_IDLE_GOV_TEO
27 bool "Timer events oriented (TEO) governor (for tickless systems)"
28 help
29 This governor implements a simplified idle state selection method
30 focused on timer events and does not do any interactivity boosting.
31
32 Some workloads benefit from using it and it generally should be safe
33 to use. Say Y here if you are not happy with the alternatives.
34
26config DT_IDLE_STATES 35config DT_IDLE_STATES
27 bool 36 bool
28 37
diff --git a/drivers/cpuidle/dt_idle_states.c b/drivers/cpuidle/dt_idle_states.c
index 53342b7f1010..add9569636b5 100644
--- a/drivers/cpuidle/dt_idle_states.c
+++ b/drivers/cpuidle/dt_idle_states.c
@@ -22,16 +22,12 @@
22#include "dt_idle_states.h" 22#include "dt_idle_states.h"
23 23
24static int init_state_node(struct cpuidle_state *idle_state, 24static int init_state_node(struct cpuidle_state *idle_state,
25 const struct of_device_id *matches, 25 const struct of_device_id *match_id,
26 struct device_node *state_node) 26 struct device_node *state_node)
27{ 27{
28 int err; 28 int err;
29 const struct of_device_id *match_id;
30 const char *desc; 29 const char *desc;
31 30
32 match_id = of_match_node(matches, state_node);
33 if (!match_id)
34 return -ENODEV;
35 /* 31 /*
36 * CPUidle drivers are expected to initialize the const void *data 32 * CPUidle drivers are expected to initialize the const void *data
37 * pointer of the passed in struct of_device_id array to the idle 33 * pointer of the passed in struct of_device_id array to the idle
@@ -160,6 +156,7 @@ int dt_init_idle_driver(struct cpuidle_driver *drv,
160{ 156{
161 struct cpuidle_state *idle_state; 157 struct cpuidle_state *idle_state;
162 struct device_node *state_node, *cpu_node; 158 struct device_node *state_node, *cpu_node;
159 const struct of_device_id *match_id;
163 int i, err = 0; 160 int i, err = 0;
164 const cpumask_t *cpumask; 161 const cpumask_t *cpumask;
165 unsigned int state_idx = start_idx; 162 unsigned int state_idx = start_idx;
@@ -180,6 +177,12 @@ int dt_init_idle_driver(struct cpuidle_driver *drv,
180 if (!state_node) 177 if (!state_node)
181 break; 178 break;
182 179
180 match_id = of_match_node(matches, state_node);
181 if (!match_id) {
182 err = -ENODEV;
183 break;
184 }
185
183 if (!of_device_is_available(state_node)) { 186 if (!of_device_is_available(state_node)) {
184 of_node_put(state_node); 187 of_node_put(state_node);
185 continue; 188 continue;
@@ -198,7 +201,7 @@ int dt_init_idle_driver(struct cpuidle_driver *drv,
198 } 201 }
199 202
200 idle_state = &drv->states[state_idx++]; 203 idle_state = &drv->states[state_idx++];
201 err = init_state_node(idle_state, matches, state_node); 204 err = init_state_node(idle_state, match_id, state_node);
202 if (err) { 205 if (err) {
203 pr_err("Parsing idle state node %pOF failed with err %d\n", 206 pr_err("Parsing idle state node %pOF failed with err %d\n",
204 state_node, err); 207 state_node, err);
diff --git a/drivers/cpuidle/governors/Makefile b/drivers/cpuidle/governors/Makefile
index 1b512722689f..4d8aff5248a8 100644
--- a/drivers/cpuidle/governors/Makefile
+++ b/drivers/cpuidle/governors/Makefile
@@ -4,3 +4,4 @@
4 4
5obj-$(CONFIG_CPU_IDLE_GOV_LADDER) += ladder.o 5obj-$(CONFIG_CPU_IDLE_GOV_LADDER) += ladder.o
6obj-$(CONFIG_CPU_IDLE_GOV_MENU) += menu.o 6obj-$(CONFIG_CPU_IDLE_GOV_MENU) += menu.o
7obj-$(CONFIG_CPU_IDLE_GOV_TEO) += teo.o
diff --git a/drivers/cpuidle/governors/teo.c b/drivers/cpuidle/governors/teo.c
new file mode 100644
index 000000000000..7d05efdbd3c6
--- /dev/null
+++ b/drivers/cpuidle/governors/teo.c
@@ -0,0 +1,444 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Timer events oriented CPU idle governor
4 *
5 * Copyright (C) 2018 Intel Corporation
6 * Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
7 *
8 * The idea of this governor is based on the observation that on many systems
9 * timer events are two or more orders of magnitude more frequent than any
10 * other interrupts, so they are likely to be the most significant source of CPU
11 * wakeups from idle states. Moreover, information about what happened in the
12 * (relatively recent) past can be used to estimate whether or not the deepest
13 * idle state with target residency within the time to the closest timer is
14 * likely to be suitable for the upcoming idle time of the CPU and, if not, then
15 * which of the shallower idle states to choose.
16 *
17 * Of course, non-timer wakeup sources are more important in some use cases and
18 * they can be covered by taking a few most recent idle time intervals of the
19 * CPU into account. However, even in that case it is not necessary to consider
20 * idle duration values greater than the time till the closest timer, as the
21 * patterns that they may belong to produce average values close enough to
22 * the time till the closest timer (sleep length) anyway.
23 *
24 * Thus this governor estimates whether or not the upcoming idle time of the CPU
25 * is likely to be significantly shorter than the sleep length and selects an
26 * idle state for it in accordance with that, as follows:
27 *
28 * - Find an idle state on the basis of the sleep length and state statistics
29 * collected over time:
30 *
31 * o Find the deepest idle state whose target residency is less than or equal
32 * to the sleep length.
33 *
34 * o Select it if it matched both the sleep length and the observed idle
35 * duration in the past more often than it matched the sleep length alone
36 * (i.e. the observed idle duration was significantly shorter than the sleep
37 * length matched by it).
38 *
39 * o Otherwise, select the shallower state with the greatest matched "early"
40 * wakeups metric.
41 *
42 * - If the majority of the most recent idle duration values are below the
43 * target residency of the idle state selected so far, use those values to
44 * compute the new expected idle duration and find an idle state matching it
45 * (which has to be shallower than the one selected so far).
46 */
47
48#include <linux/cpuidle.h>
49#include <linux/jiffies.h>
50#include <linux/kernel.h>
51#include <linux/sched/clock.h>
52#include <linux/tick.h>
53
54/*
55 * The PULSE value is added to metrics when they grow and the DECAY_SHIFT value
56 * is used for decreasing metrics on a regular basis.
57 */
58#define PULSE 1024
59#define DECAY_SHIFT 3
60
61/*
62 * Number of the most recent idle duration values to take into consideration for
63 * the detection of wakeup patterns.
64 */
65#define INTERVALS 8
66
67/**
68 * struct teo_idle_state - Idle state data used by the TEO cpuidle governor.
69 * @early_hits: "Early" CPU wakeups "matching" this state.
70 * @hits: "On time" CPU wakeups "matching" this state.
71 * @misses: CPU wakeups "missing" this state.
72 *
73 * A CPU wakeup is "matched" by a given idle state if the idle duration measured
74 * after the wakeup is between the target residency of that state and the target
75 * residency of the next one (or if this is the deepest available idle state, it
76 * "matches" a CPU wakeup when the measured idle duration is at least equal to
77 * its target residency).
78 *
79 * Also, from the TEO governor perspective, a CPU wakeup from idle is "early" if
80 * it occurs significantly earlier than the closest expected timer event (that
81 * is, early enough to match an idle state shallower than the one matching the
82 * time till the closest timer event). Otherwise, the wakeup is "on time", or
83 * it is a "hit".
84 *
85 * A "miss" occurs when the given state doesn't match the wakeup, but it matches
86 * the time till the closest timer event used for idle state selection.
87 */
88struct teo_idle_state {
89 unsigned int early_hits;
90 unsigned int hits;
91 unsigned int misses;
92};
93
94/**
95 * struct teo_cpu - CPU data used by the TEO cpuidle governor.
96 * @time_span_ns: Time between idle state selection and post-wakeup update.
97 * @sleep_length_ns: Time till the closest timer event (at the selection time).
98 * @states: Idle states data corresponding to this CPU.
99 * @last_state: Idle state entered by the CPU last time.
100 * @interval_idx: Index of the most recent saved idle interval.
101 * @intervals: Saved idle duration values.
102 */
103struct teo_cpu {
104 u64 time_span_ns;
105 u64 sleep_length_ns;
106 struct teo_idle_state states[CPUIDLE_STATE_MAX];
107 int last_state;
108 int interval_idx;
109 unsigned int intervals[INTERVALS];
110};
111
112static DEFINE_PER_CPU(struct teo_cpu, teo_cpus);
113
114/**
115 * teo_update - Update CPU data after wakeup.
116 * @drv: cpuidle driver containing state data.
117 * @dev: Target CPU.
118 */
119static void teo_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
120{
121 struct teo_cpu *cpu_data = per_cpu_ptr(&teo_cpus, dev->cpu);
122 unsigned int sleep_length_us = ktime_to_us(cpu_data->sleep_length_ns);
123 int i, idx_hit = -1, idx_timer = -1;
124 unsigned int measured_us;
125
126 if (cpu_data->time_span_ns >= cpu_data->sleep_length_ns) {
127 /*
128 * One of the safety nets has triggered or this was a timer
129 * wakeup (or equivalent).
130 */
131 measured_us = sleep_length_us;
132 } else {
133 unsigned int lat = drv->states[cpu_data->last_state].exit_latency;
134
135 measured_us = ktime_to_us(cpu_data->time_span_ns);
136 /*
137 * The delay between the wakeup and the first instruction
138 * executed by the CPU is not likely to be worst-case every
139 * time, so take 1/2 of the exit latency as a very rough
140 * approximation of the average of it.
141 */
142 if (measured_us >= lat)
143 measured_us -= lat / 2;
144 else
145 measured_us /= 2;
146 }
147
148 /*
149 * Decay the "early hits" metric for all of the states and find the
150 * states matching the sleep length and the measured idle duration.
151 */
152 for (i = 0; i < drv->state_count; i++) {
153 unsigned int early_hits = cpu_data->states[i].early_hits;
154
155 cpu_data->states[i].early_hits -= early_hits >> DECAY_SHIFT;
156
157 if (drv->states[i].target_residency <= sleep_length_us) {
158 idx_timer = i;
159 if (drv->states[i].target_residency <= measured_us)
160 idx_hit = i;
161 }
162 }
163
164 /*
165 * Update the "hits" and "misses" data for the state matching the sleep
166 * length. If it matches the measured idle duration too, this is a hit,
167 * so increase the "hits" metric for it then. Otherwise, this is a
168 * miss, so increase the "misses" metric for it. In the latter case
169 * also increase the "early hits" metric for the state that actually
170 * matches the measured idle duration.
171 */
172 if (idx_timer >= 0) {
173 unsigned int hits = cpu_data->states[idx_timer].hits;
174 unsigned int misses = cpu_data->states[idx_timer].misses;
175
176 hits -= hits >> DECAY_SHIFT;
177 misses -= misses >> DECAY_SHIFT;
178
179 if (idx_timer > idx_hit) {
180 misses += PULSE;
181 if (idx_hit >= 0)
182 cpu_data->states[idx_hit].early_hits += PULSE;
183 } else {
184 hits += PULSE;
185 }
186
187 cpu_data->states[idx_timer].misses = misses;
188 cpu_data->states[idx_timer].hits = hits;
189 }
190
191 /*
192 * If the total time span between idle state selection and the "reflect"
193 * callback is greater than or equal to the sleep length determined at
194 * the idle state selection time, the wakeup is likely to be due to a
195 * timer event.
196 */
197 if (cpu_data->time_span_ns >= cpu_data->sleep_length_ns)
198 measured_us = UINT_MAX;
199
200 /*
201 * Save idle duration values corresponding to non-timer wakeups for
202 * pattern detection.
203 */
204 cpu_data->intervals[cpu_data->interval_idx++] = measured_us;
205 if (cpu_data->interval_idx > INTERVALS)
206 cpu_data->interval_idx = 0;
207}
208
209/**
210 * teo_find_shallower_state - Find shallower idle state matching given duration.
211 * @drv: cpuidle driver containing state data.
212 * @dev: Target CPU.
213 * @state_idx: Index of the capping idle state.
214 * @duration_us: Idle duration value to match.
215 */
216static int teo_find_shallower_state(struct cpuidle_driver *drv,
217 struct cpuidle_device *dev, int state_idx,
218 unsigned int duration_us)
219{
220 int i;
221
222 for (i = state_idx - 1; i >= 0; i--) {
223 if (drv->states[i].disabled || dev->states_usage[i].disable)
224 continue;
225
226 state_idx = i;
227 if (drv->states[i].target_residency <= duration_us)
228 break;
229 }
230 return state_idx;
231}
232
233/**
234 * teo_select - Selects the next idle state to enter.
235 * @drv: cpuidle driver containing state data.
236 * @dev: Target CPU.
237 * @stop_tick: Indication on whether or not to stop the scheduler tick.
238 */
239static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
240 bool *stop_tick)
241{
242 struct teo_cpu *cpu_data = per_cpu_ptr(&teo_cpus, dev->cpu);
243 int latency_req = cpuidle_governor_latency_req(dev->cpu);
244 unsigned int duration_us, count;
245 int max_early_idx, idx, i;
246 ktime_t delta_tick;
247
248 if (cpu_data->last_state >= 0) {
249 teo_update(drv, dev);
250 cpu_data->last_state = -1;
251 }
252
253 cpu_data->time_span_ns = local_clock();
254
255 cpu_data->sleep_length_ns = tick_nohz_get_sleep_length(&delta_tick);
256 duration_us = ktime_to_us(cpu_data->sleep_length_ns);
257
258 count = 0;
259 max_early_idx = -1;
260 idx = -1;
261
262 for (i = 0; i < drv->state_count; i++) {
263 struct cpuidle_state *s = &drv->states[i];
264 struct cpuidle_state_usage *su = &dev->states_usage[i];
265
266 if (s->disabled || su->disable) {
267 /*
268 * If the "early hits" metric of a disabled state is
269 * greater than the current maximum, it should be taken
270 * into account, because it would be a mistake to select
271 * a deeper state with lower "early hits" metric. The
272 * index cannot be changed to point to it, however, so
273 * just increase the max count alone and let the index
274 * still point to a shallower idle state.
275 */
276 if (max_early_idx >= 0 &&
277 count < cpu_data->states[i].early_hits)
278 count = cpu_data->states[i].early_hits;
279
280 continue;
281 }
282
283 if (idx < 0)
284 idx = i; /* first enabled state */
285
286 if (s->target_residency > duration_us)
287 break;
288
289 if (s->exit_latency > latency_req) {
290 /*
291 * If we break out of the loop for latency reasons, use
292 * the target residency of the selected state as the
293 * expected idle duration to avoid stopping the tick
294 * as long as that target residency is low enough.
295 */
296 duration_us = drv->states[idx].target_residency;
297 goto refine;
298 }
299
300 idx = i;
301
302 if (count < cpu_data->states[i].early_hits &&
303 !(tick_nohz_tick_stopped() &&
304 drv->states[i].target_residency < TICK_USEC)) {
305 count = cpu_data->states[i].early_hits;
306 max_early_idx = i;
307 }
308 }
309
310 /*
311 * If the "hits" metric of the idle state matching the sleep length is
312 * greater than its "misses" metric, that is the one to use. Otherwise,
313 * it is more likely that one of the shallower states will match the
314 * idle duration observed after wakeup, so take the one with the maximum
315 * "early hits" metric, but if that cannot be determined, just use the
316 * state selected so far.
317 */
318 if (cpu_data->states[idx].hits <= cpu_data->states[idx].misses &&
319 max_early_idx >= 0) {
320 idx = max_early_idx;
321 duration_us = drv->states[idx].target_residency;
322 }
323
324refine:
325 if (idx < 0) {
326 idx = 0; /* No states enabled. Must use 0. */
327 } else if (idx > 0) {
328 u64 sum = 0;
329
330 count = 0;
331
332 /*
333 * Count and sum the most recent idle duration values less than
334 * the target residency of the state selected so far, find the
335 * max.
336 */
337 for (i = 0; i < INTERVALS; i++) {
338 unsigned int val = cpu_data->intervals[i];
339
340 if (val >= drv->states[idx].target_residency)
341 continue;
342
343 count++;
344 sum += val;
345 }
346
347 /*
348 * Give up unless the majority of the most recent idle duration
349 * values are in the interesting range.
350 */
351 if (count > INTERVALS / 2) {
352 unsigned int avg_us = div64_u64(sum, count);
353
354 /*
355 * Avoid spending too much time in an idle state that
356 * would be too shallow.
357 */
358 if (!(tick_nohz_tick_stopped() && avg_us < TICK_USEC)) {
359 idx = teo_find_shallower_state(drv, dev, idx, avg_us);
360 duration_us = avg_us;
361 }
362 }
363 }
364
365 /*
366 * Don't stop the tick if the selected state is a polling one or if the
367 * expected idle duration is shorter than the tick period length.
368 */
369 if (((drv->states[idx].flags & CPUIDLE_FLAG_POLLING) ||
370 duration_us < TICK_USEC) && !tick_nohz_tick_stopped()) {
371 unsigned int delta_tick_us = ktime_to_us(delta_tick);
372
373 *stop_tick = false;
374
375 /*
376 * The tick is not going to be stopped, so if the target
377 * residency of the state to be returned is not within the time
378 * till the closest timer including the tick, try to correct
379 * that.
380 */
381 if (idx > 0 && drv->states[idx].target_residency > delta_tick_us)
382 idx = teo_find_shallower_state(drv, dev, idx, delta_tick_us);
383 }
384
385 return idx;
386}
387
388/**
389 * teo_reflect - Note that governor data for the CPU need to be updated.
390 * @dev: Target CPU.
391 * @state: Entered state.
392 */
393static void teo_reflect(struct cpuidle_device *dev, int state)
394{
395 struct teo_cpu *cpu_data = per_cpu_ptr(&teo_cpus, dev->cpu);
396
397 cpu_data->last_state = state;
398 /*
399 * If the wakeup was not "natural", but triggered by one of the safety
400 * nets, assume that the CPU might have been idle for the entire sleep
401 * length time.
402 */
403 if (dev->poll_time_limit ||
404 (tick_nohz_idle_got_tick() && cpu_data->sleep_length_ns > TICK_NSEC)) {
405 dev->poll_time_limit = false;
406 cpu_data->time_span_ns = cpu_data->sleep_length_ns;
407 } else {
408 cpu_data->time_span_ns = local_clock() - cpu_data->time_span_ns;
409 }
410}
411
412/**
413 * teo_enable_device - Initialize the governor's data for the target CPU.
414 * @drv: cpuidle driver (not used).
415 * @dev: Target CPU.
416 */
417static int teo_enable_device(struct cpuidle_driver *drv,
418 struct cpuidle_device *dev)
419{
420 struct teo_cpu *cpu_data = per_cpu_ptr(&teo_cpus, dev->cpu);
421 int i;
422
423 memset(cpu_data, 0, sizeof(*cpu_data));
424
425 for (i = 0; i < INTERVALS; i++)
426 cpu_data->intervals[i] = UINT_MAX;
427
428 return 0;
429}
430
431static struct cpuidle_governor teo_governor = {
432 .name = "teo",
433 .rating = 19,
434 .enable = teo_enable_device,
435 .select = teo_select,
436 .reflect = teo_reflect,
437};
438
439static int __init teo_governor_init(void)
440{
441 return cpuidle_register_governor(&teo_governor);
442}
443
444postcore_initcall(teo_governor_init);
diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c
index 017fc602a10e..cf7c66bb3ed9 100644
--- a/drivers/gpu/drm/i915/i915_pmu.c
+++ b/drivers/gpu/drm/i915/i915_pmu.c
@@ -5,6 +5,7 @@
5 */ 5 */
6 6
7#include <linux/irq.h> 7#include <linux/irq.h>
8#include <linux/pm_runtime.h>
8#include "i915_pmu.h" 9#include "i915_pmu.h"
9#include "intel_ringbuffer.h" 10#include "intel_ringbuffer.h"
10#include "i915_drv.h" 11#include "i915_drv.h"
@@ -478,7 +479,6 @@ static u64 get_rc6(struct drm_i915_private *i915)
478 * counter value. 479 * counter value.
479 */ 480 */
480 spin_lock_irqsave(&i915->pmu.lock, flags); 481 spin_lock_irqsave(&i915->pmu.lock, flags);
481 spin_lock(&kdev->power.lock);
482 482
483 /* 483 /*
484 * After the above branch intel_runtime_pm_get_if_in_use failed 484 * After the above branch intel_runtime_pm_get_if_in_use failed
@@ -491,16 +491,13 @@ static u64 get_rc6(struct drm_i915_private *i915)
491 * suspended and if not we cannot do better than report the last 491 * suspended and if not we cannot do better than report the last
492 * known RC6 value. 492 * known RC6 value.
493 */ 493 */
494 if (kdev->power.runtime_status == RPM_SUSPENDED) { 494 if (pm_runtime_status_suspended(kdev)) {
495 if (!i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur) 495 val = pm_runtime_suspended_time(kdev);
496 i915->pmu.suspended_jiffies_last =
497 kdev->power.suspended_jiffies;
498 496
499 val = kdev->power.suspended_jiffies - 497 if (!i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur)
500 i915->pmu.suspended_jiffies_last; 498 i915->pmu.suspended_time_last = val;
501 val += jiffies - kdev->power.accounting_timestamp;
502 499
503 val = jiffies_to_nsecs(val); 500 val -= i915->pmu.suspended_time_last;
504 val += i915->pmu.sample[__I915_SAMPLE_RC6].cur; 501 val += i915->pmu.sample[__I915_SAMPLE_RC6].cur;
505 502
506 i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur = val; 503 i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur = val;
@@ -510,7 +507,6 @@ static u64 get_rc6(struct drm_i915_private *i915)
510 val = i915->pmu.sample[__I915_SAMPLE_RC6].cur; 507 val = i915->pmu.sample[__I915_SAMPLE_RC6].cur;
511 } 508 }
512 509
513 spin_unlock(&kdev->power.lock);
514 spin_unlock_irqrestore(&i915->pmu.lock, flags); 510 spin_unlock_irqrestore(&i915->pmu.lock, flags);
515 } 511 }
516 512
diff --git a/drivers/gpu/drm/i915/i915_pmu.h b/drivers/gpu/drm/i915/i915_pmu.h
index b3728c5f13e7..4fc4f2478301 100644
--- a/drivers/gpu/drm/i915/i915_pmu.h
+++ b/drivers/gpu/drm/i915/i915_pmu.h
@@ -97,9 +97,9 @@ struct i915_pmu {
97 */ 97 */
98 struct i915_pmu_sample sample[__I915_NUM_PMU_SAMPLERS]; 98 struct i915_pmu_sample sample[__I915_NUM_PMU_SAMPLERS];
99 /** 99 /**
100 * @suspended_jiffies_last: Cached suspend time from PM core. 100 * @suspended_time_last: Cached suspend time from PM core.
101 */ 101 */
102 unsigned long suspended_jiffies_last; 102 u64 suspended_time_last;
103 /** 103 /**
104 * @i915_attr: Memory block holding device attributes. 104 * @i915_attr: Memory block holding device attributes.
105 */ 105 */
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index 8b5d85c91e9d..b8647b5c3d4d 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -1103,6 +1103,7 @@ static const struct x86_cpu_id intel_idle_ids[] __initconst = {
1103 INTEL_CPU_FAM6(ATOM_GOLDMONT, idle_cpu_bxt), 1103 INTEL_CPU_FAM6(ATOM_GOLDMONT, idle_cpu_bxt),
1104 INTEL_CPU_FAM6(ATOM_GOLDMONT_PLUS, idle_cpu_bxt), 1104 INTEL_CPU_FAM6(ATOM_GOLDMONT_PLUS, idle_cpu_bxt),
1105 INTEL_CPU_FAM6(ATOM_GOLDMONT_X, idle_cpu_dnv), 1105 INTEL_CPU_FAM6(ATOM_GOLDMONT_X, idle_cpu_dnv),
1106 INTEL_CPU_FAM6(ATOM_TREMONT_X, idle_cpu_dnv),
1106 {} 1107 {}
1107}; 1108};
1108 1109
diff --git a/drivers/opp/core.c b/drivers/opp/core.c
index e06a0ab05ad6..d7f97167cac3 100644
--- a/drivers/opp/core.c
+++ b/drivers/opp/core.c
@@ -551,9 +551,8 @@ static int _set_opp_voltage(struct device *dev, struct regulator *reg,
551 return ret; 551 return ret;
552} 552}
553 553
554static inline int 554static inline int _generic_set_opp_clk_only(struct device *dev, struct clk *clk,
555_generic_set_opp_clk_only(struct device *dev, struct clk *clk, 555 unsigned long freq)
556 unsigned long old_freq, unsigned long freq)
557{ 556{
558 int ret; 557 int ret;
559 558
@@ -590,7 +589,7 @@ static int _generic_set_opp_regulator(const struct opp_table *opp_table,
590 } 589 }
591 590
592 /* Change frequency */ 591 /* Change frequency */
593 ret = _generic_set_opp_clk_only(dev, opp_table->clk, old_freq, freq); 592 ret = _generic_set_opp_clk_only(dev, opp_table->clk, freq);
594 if (ret) 593 if (ret)
595 goto restore_voltage; 594 goto restore_voltage;
596 595
@@ -604,7 +603,7 @@ static int _generic_set_opp_regulator(const struct opp_table *opp_table,
604 return 0; 603 return 0;
605 604
606restore_freq: 605restore_freq:
607 if (_generic_set_opp_clk_only(dev, opp_table->clk, freq, old_freq)) 606 if (_generic_set_opp_clk_only(dev, opp_table->clk, old_freq))
608 dev_err(dev, "%s: failed to restore old-freq (%lu Hz)\n", 607 dev_err(dev, "%s: failed to restore old-freq (%lu Hz)\n",
609 __func__, old_freq); 608 __func__, old_freq);
610restore_voltage: 609restore_voltage:
@@ -777,7 +776,7 @@ int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
777 opp->supplies); 776 opp->supplies);
778 } else { 777 } else {
779 /* Only frequency scaling */ 778 /* Only frequency scaling */
780 ret = _generic_set_opp_clk_only(dev, clk, old_freq, freq); 779 ret = _generic_set_opp_clk_only(dev, clk, freq);
781 } 780 }
782 781
783 /* Scaling down? Configure required OPPs after frequency */ 782 /* Scaling down? Configure required OPPs after frequency */
@@ -811,7 +810,6 @@ static struct opp_device *_add_opp_dev_unlocked(const struct device *dev,
811 struct opp_table *opp_table) 810 struct opp_table *opp_table)
812{ 811{
813 struct opp_device *opp_dev; 812 struct opp_device *opp_dev;
814 int ret;
815 813
816 opp_dev = kzalloc(sizeof(*opp_dev), GFP_KERNEL); 814 opp_dev = kzalloc(sizeof(*opp_dev), GFP_KERNEL);
817 if (!opp_dev) 815 if (!opp_dev)
@@ -823,10 +821,7 @@ static struct opp_device *_add_opp_dev_unlocked(const struct device *dev,
823 list_add(&opp_dev->node, &opp_table->dev_list); 821 list_add(&opp_dev->node, &opp_table->dev_list);
824 822
825 /* Create debugfs entries for the opp_table */ 823 /* Create debugfs entries for the opp_table */
826 ret = opp_debug_register(opp_dev, opp_table); 824 opp_debug_register(opp_dev, opp_table);
827 if (ret)
828 dev_err(dev, "%s: Failed to register opp debugfs (%d)\n",
829 __func__, ret);
830 825
831 return opp_dev; 826 return opp_dev;
832} 827}
@@ -1247,10 +1242,7 @@ int _opp_add(struct device *dev, struct dev_pm_opp *new_opp,
1247 new_opp->opp_table = opp_table; 1242 new_opp->opp_table = opp_table;
1248 kref_init(&new_opp->kref); 1243 kref_init(&new_opp->kref);
1249 1244
1250 ret = opp_debug_create_one(new_opp, opp_table); 1245 opp_debug_create_one(new_opp, opp_table);
1251 if (ret)
1252 dev_err(dev, "%s: Failed to register opp to debugfs (%d)\n",
1253 __func__, ret);
1254 1246
1255 if (!_opp_supported_by_regulators(new_opp, opp_table)) { 1247 if (!_opp_supported_by_regulators(new_opp, opp_table)) {
1256 new_opp->available = false; 1248 new_opp->available = false;
diff --git a/drivers/opp/debugfs.c b/drivers/opp/debugfs.c
index e6828e5f81b0..a1c57fe14de4 100644
--- a/drivers/opp/debugfs.c
+++ b/drivers/opp/debugfs.c
@@ -35,7 +35,7 @@ void opp_debug_remove_one(struct dev_pm_opp *opp)
35 debugfs_remove_recursive(opp->dentry); 35 debugfs_remove_recursive(opp->dentry);
36} 36}
37 37
38static bool opp_debug_create_supplies(struct dev_pm_opp *opp, 38static void opp_debug_create_supplies(struct dev_pm_opp *opp,
39 struct opp_table *opp_table, 39 struct opp_table *opp_table,
40 struct dentry *pdentry) 40 struct dentry *pdentry)
41{ 41{
@@ -50,30 +50,21 @@ static bool opp_debug_create_supplies(struct dev_pm_opp *opp,
50 /* Create per-opp directory */ 50 /* Create per-opp directory */
51 d = debugfs_create_dir(name, pdentry); 51 d = debugfs_create_dir(name, pdentry);
52 52
53 if (!d) 53 debugfs_create_ulong("u_volt_target", S_IRUGO, d,
54 return false; 54 &opp->supplies[i].u_volt);
55 55
56 if (!debugfs_create_ulong("u_volt_target", S_IRUGO, d, 56 debugfs_create_ulong("u_volt_min", S_IRUGO, d,
57 &opp->supplies[i].u_volt)) 57 &opp->supplies[i].u_volt_min);
58 return false;
59 58
60 if (!debugfs_create_ulong("u_volt_min", S_IRUGO, d, 59 debugfs_create_ulong("u_volt_max", S_IRUGO, d,
61 &opp->supplies[i].u_volt_min)) 60 &opp->supplies[i].u_volt_max);
62 return false;
63 61
64 if (!debugfs_create_ulong("u_volt_max", S_IRUGO, d, 62 debugfs_create_ulong("u_amp", S_IRUGO, d,
65 &opp->supplies[i].u_volt_max)) 63 &opp->supplies[i].u_amp);
66 return false;
67
68 if (!debugfs_create_ulong("u_amp", S_IRUGO, d,
69 &opp->supplies[i].u_amp))
70 return false;
71 } 64 }
72
73 return true;
74} 65}
75 66
76int opp_debug_create_one(struct dev_pm_opp *opp, struct opp_table *opp_table) 67void opp_debug_create_one(struct dev_pm_opp *opp, struct opp_table *opp_table)
77{ 68{
78 struct dentry *pdentry = opp_table->dentry; 69 struct dentry *pdentry = opp_table->dentry;
79 struct dentry *d; 70 struct dentry *d;
@@ -95,40 +86,23 @@ int opp_debug_create_one(struct dev_pm_opp *opp, struct opp_table *opp_table)
95 86
96 /* Create per-opp directory */ 87 /* Create per-opp directory */
97 d = debugfs_create_dir(name, pdentry); 88 d = debugfs_create_dir(name, pdentry);
98 if (!d)
99 return -ENOMEM;
100
101 if (!debugfs_create_bool("available", S_IRUGO, d, &opp->available))
102 return -ENOMEM;
103
104 if (!debugfs_create_bool("dynamic", S_IRUGO, d, &opp->dynamic))
105 return -ENOMEM;
106
107 if (!debugfs_create_bool("turbo", S_IRUGO, d, &opp->turbo))
108 return -ENOMEM;
109
110 if (!debugfs_create_bool("suspend", S_IRUGO, d, &opp->suspend))
111 return -ENOMEM;
112
113 if (!debugfs_create_u32("performance_state", S_IRUGO, d, &opp->pstate))
114 return -ENOMEM;
115 89
116 if (!debugfs_create_ulong("rate_hz", S_IRUGO, d, &opp->rate)) 90 debugfs_create_bool("available", S_IRUGO, d, &opp->available);
117 return -ENOMEM; 91 debugfs_create_bool("dynamic", S_IRUGO, d, &opp->dynamic);
92 debugfs_create_bool("turbo", S_IRUGO, d, &opp->turbo);
93 debugfs_create_bool("suspend", S_IRUGO, d, &opp->suspend);
94 debugfs_create_u32("performance_state", S_IRUGO, d, &opp->pstate);
95 debugfs_create_ulong("rate_hz", S_IRUGO, d, &opp->rate);
96 debugfs_create_ulong("clock_latency_ns", S_IRUGO, d,
97 &opp->clock_latency_ns);
118 98
119 if (!opp_debug_create_supplies(opp, opp_table, d)) 99 opp_debug_create_supplies(opp, opp_table, d);
120 return -ENOMEM;
121
122 if (!debugfs_create_ulong("clock_latency_ns", S_IRUGO, d,
123 &opp->clock_latency_ns))
124 return -ENOMEM;
125 100
126 opp->dentry = d; 101 opp->dentry = d;
127 return 0;
128} 102}
129 103
130static int opp_list_debug_create_dir(struct opp_device *opp_dev, 104static void opp_list_debug_create_dir(struct opp_device *opp_dev,
131 struct opp_table *opp_table) 105 struct opp_table *opp_table)
132{ 106{
133 const struct device *dev = opp_dev->dev; 107 const struct device *dev = opp_dev->dev;
134 struct dentry *d; 108 struct dentry *d;
@@ -137,36 +111,21 @@ static int opp_list_debug_create_dir(struct opp_device *opp_dev,
137 111
138 /* Create device specific directory */ 112 /* Create device specific directory */
139 d = debugfs_create_dir(opp_table->dentry_name, rootdir); 113 d = debugfs_create_dir(opp_table->dentry_name, rootdir);
140 if (!d) {
141 dev_err(dev, "%s: Failed to create debugfs dir\n", __func__);
142 return -ENOMEM;
143 }
144 114
145 opp_dev->dentry = d; 115 opp_dev->dentry = d;
146 opp_table->dentry = d; 116 opp_table->dentry = d;
147
148 return 0;
149} 117}
150 118
151static int opp_list_debug_create_link(struct opp_device *opp_dev, 119static void opp_list_debug_create_link(struct opp_device *opp_dev,
152 struct opp_table *opp_table) 120 struct opp_table *opp_table)
153{ 121{
154 const struct device *dev = opp_dev->dev;
155 char name[NAME_MAX]; 122 char name[NAME_MAX];
156 struct dentry *d;
157 123
158 opp_set_dev_name(opp_dev->dev, name); 124 opp_set_dev_name(opp_dev->dev, name);
159 125
160 /* Create device specific directory link */ 126 /* Create device specific directory link */
161 d = debugfs_create_symlink(name, rootdir, opp_table->dentry_name); 127 opp_dev->dentry = debugfs_create_symlink(name, rootdir,
162 if (!d) { 128 opp_table->dentry_name);
163 dev_err(dev, "%s: Failed to create link\n", __func__);
164 return -ENOMEM;
165 }
166
167 opp_dev->dentry = d;
168
169 return 0;
170} 129}
171 130
172/** 131/**
@@ -177,20 +136,13 @@ static int opp_list_debug_create_link(struct opp_device *opp_dev,
177 * Dynamically adds device specific directory in debugfs 'opp' directory. If the 136 * Dynamically adds device specific directory in debugfs 'opp' directory. If the
178 * device-opp is shared with other devices, then links will be created for all 137 * device-opp is shared with other devices, then links will be created for all
179 * devices except the first. 138 * devices except the first.
180 *
181 * Return: 0 on success, otherwise negative error.
182 */ 139 */
183int opp_debug_register(struct opp_device *opp_dev, struct opp_table *opp_table) 140void opp_debug_register(struct opp_device *opp_dev, struct opp_table *opp_table)
184{ 141{
185 if (!rootdir) {
186 pr_debug("%s: Uninitialized rootdir\n", __func__);
187 return -EINVAL;
188 }
189
190 if (opp_table->dentry) 142 if (opp_table->dentry)
191 return opp_list_debug_create_link(opp_dev, opp_table); 143 opp_list_debug_create_link(opp_dev, opp_table);
192 144 else
193 return opp_list_debug_create_dir(opp_dev, opp_table); 145 opp_list_debug_create_dir(opp_dev, opp_table);
194} 146}
195 147
196static void opp_migrate_dentry(struct opp_device *opp_dev, 148static void opp_migrate_dentry(struct opp_device *opp_dev,
@@ -252,10 +204,6 @@ static int __init opp_debug_init(void)
252{ 204{
253 /* Create /sys/kernel/debug/opp directory */ 205 /* Create /sys/kernel/debug/opp directory */
254 rootdir = debugfs_create_dir("opp", NULL); 206 rootdir = debugfs_create_dir("opp", NULL);
255 if (!rootdir) {
256 pr_err("%s: Failed to create root directory\n", __func__);
257 return -ENOMEM;
258 }
259 207
260 return 0; 208 return 0;
261} 209}
diff --git a/drivers/opp/of.c b/drivers/opp/of.c
index 1779f2c93291..62504b18f198 100644
--- a/drivers/opp/of.c
+++ b/drivers/opp/of.c
@@ -20,6 +20,7 @@
20#include <linux/pm_domain.h> 20#include <linux/pm_domain.h>
21#include <linux/slab.h> 21#include <linux/slab.h>
22#include <linux/export.h> 22#include <linux/export.h>
23#include <linux/energy_model.h>
23 24
24#include "opp.h" 25#include "opp.h"
25 26
@@ -1049,3 +1050,101 @@ struct device_node *dev_pm_opp_get_of_node(struct dev_pm_opp *opp)
1049 return of_node_get(opp->np); 1050 return of_node_get(opp->np);
1050} 1051}
1051EXPORT_SYMBOL_GPL(dev_pm_opp_get_of_node); 1052EXPORT_SYMBOL_GPL(dev_pm_opp_get_of_node);
1053
1054/*
1055 * Callback function provided to the Energy Model framework upon registration.
1056 * This computes the power estimated by @CPU at @kHz if it is the frequency
1057 * of an existing OPP, or at the frequency of the first OPP above @kHz otherwise
1058 * (see dev_pm_opp_find_freq_ceil()). This function updates @kHz to the ceiled
1059 * frequency and @mW to the associated power. The power is estimated as
1060 * P = C * V^2 * f with C being the CPU's capacitance and V and f respectively
1061 * the voltage and frequency of the OPP.
1062 *
1063 * Returns -ENODEV if the CPU device cannot be found, -EINVAL if the power
1064 * calculation failed because of missing parameters, 0 otherwise.
1065 */
1066static int __maybe_unused _get_cpu_power(unsigned long *mW, unsigned long *kHz,
1067 int cpu)
1068{
1069 struct device *cpu_dev;
1070 struct dev_pm_opp *opp;
1071 struct device_node *np;
1072 unsigned long mV, Hz;
1073 u32 cap;
1074 u64 tmp;
1075 int ret;
1076
1077 cpu_dev = get_cpu_device(cpu);
1078 if (!cpu_dev)
1079 return -ENODEV;
1080
1081 np = of_node_get(cpu_dev->of_node);
1082 if (!np)
1083 return -EINVAL;
1084
1085 ret = of_property_read_u32(np, "dynamic-power-coefficient", &cap);
1086 of_node_put(np);
1087 if (ret)
1088 return -EINVAL;
1089
1090 Hz = *kHz * 1000;
1091 opp = dev_pm_opp_find_freq_ceil(cpu_dev, &Hz);
1092 if (IS_ERR(opp))
1093 return -EINVAL;
1094
1095 mV = dev_pm_opp_get_voltage(opp) / 1000;
1096 dev_pm_opp_put(opp);
1097 if (!mV)
1098 return -EINVAL;
1099
1100 tmp = (u64)cap * mV * mV * (Hz / 1000000);
1101 do_div(tmp, 1000000000);
1102
1103 *mW = (unsigned long)tmp;
1104 *kHz = Hz / 1000;
1105
1106 return 0;
1107}
1108
1109/**
1110 * dev_pm_opp_of_register_em() - Attempt to register an Energy Model
1111 * @cpus : CPUs for which an Energy Model has to be registered
1112 *
1113 * This checks whether the "dynamic-power-coefficient" devicetree property has
1114 * been specified, and tries to register an Energy Model with it if it has.
1115 */
1116void dev_pm_opp_of_register_em(struct cpumask *cpus)
1117{
1118 struct em_data_callback em_cb = EM_DATA_CB(_get_cpu_power);
1119 int ret, nr_opp, cpu = cpumask_first(cpus);
1120 struct device *cpu_dev;
1121 struct device_node *np;
1122 u32 cap;
1123
1124 cpu_dev = get_cpu_device(cpu);
1125 if (!cpu_dev)
1126 return;
1127
1128 nr_opp = dev_pm_opp_get_opp_count(cpu_dev);
1129 if (nr_opp <= 0)
1130 return;
1131
1132 np = of_node_get(cpu_dev->of_node);
1133 if (!np)
1134 return;
1135
1136 /*
1137 * Register an EM only if the 'dynamic-power-coefficient' property is
1138 * set in devicetree. It is assumed the voltage values are known if that
1139 * property is set since it is useless otherwise. If voltages are not
1140 * known, just let the EM registration fail with an error to alert the
1141 * user about the inconsistent configuration.
1142 */
1143 ret = of_property_read_u32(np, "dynamic-power-coefficient", &cap);
1144 of_node_put(np);
1145 if (ret || !cap)
1146 return;
1147
1148 em_register_perf_domain(cpus, nr_opp, &em_cb);
1149}
1150EXPORT_SYMBOL_GPL(dev_pm_opp_of_register_em);
diff --git a/drivers/opp/opp.h b/drivers/opp/opp.h
index 4458175aa661..569b3525aa67 100644
--- a/drivers/opp/opp.h
+++ b/drivers/opp/opp.h
@@ -238,18 +238,17 @@ static inline void _of_opp_free_required_opps(struct opp_table *opp_table,
238 238
239#ifdef CONFIG_DEBUG_FS 239#ifdef CONFIG_DEBUG_FS
240void opp_debug_remove_one(struct dev_pm_opp *opp); 240void opp_debug_remove_one(struct dev_pm_opp *opp);
241int opp_debug_create_one(struct dev_pm_opp *opp, struct opp_table *opp_table); 241void opp_debug_create_one(struct dev_pm_opp *opp, struct opp_table *opp_table);
242int opp_debug_register(struct opp_device *opp_dev, struct opp_table *opp_table); 242void opp_debug_register(struct opp_device *opp_dev, struct opp_table *opp_table);
243void opp_debug_unregister(struct opp_device *opp_dev, struct opp_table *opp_table); 243void opp_debug_unregister(struct opp_device *opp_dev, struct opp_table *opp_table);
244#else 244#else
245static inline void opp_debug_remove_one(struct dev_pm_opp *opp) {} 245static inline void opp_debug_remove_one(struct dev_pm_opp *opp) {}
246 246
247static inline int opp_debug_create_one(struct dev_pm_opp *opp, 247static inline void opp_debug_create_one(struct dev_pm_opp *opp,
248 struct opp_table *opp_table) 248 struct opp_table *opp_table) { }
249{ return 0; } 249
250static inline int opp_debug_register(struct opp_device *opp_dev, 250static inline void opp_debug_register(struct opp_device *opp_dev,
251 struct opp_table *opp_table) 251 struct opp_table *opp_table) { }
252{ return 0; }
253 252
254static inline void opp_debug_unregister(struct opp_device *opp_dev, 253static inline void opp_debug_unregister(struct opp_device *opp_dev,
255 struct opp_table *opp_table) 254 struct opp_table *opp_table)
diff --git a/drivers/powercap/intel_rapl.c b/drivers/powercap/intel_rapl.c
index 6cdb2c14eee4..4347f15165f8 100644
--- a/drivers/powercap/intel_rapl.c
+++ b/drivers/powercap/intel_rapl.c
@@ -1156,6 +1156,7 @@ static const struct x86_cpu_id rapl_ids[] __initconst = {
1156 INTEL_CPU_FAM6(KABYLAKE_MOBILE, rapl_defaults_core), 1156 INTEL_CPU_FAM6(KABYLAKE_MOBILE, rapl_defaults_core),
1157 INTEL_CPU_FAM6(KABYLAKE_DESKTOP, rapl_defaults_core), 1157 INTEL_CPU_FAM6(KABYLAKE_DESKTOP, rapl_defaults_core),
1158 INTEL_CPU_FAM6(CANNONLAKE_MOBILE, rapl_defaults_core), 1158 INTEL_CPU_FAM6(CANNONLAKE_MOBILE, rapl_defaults_core),
1159 INTEL_CPU_FAM6(ICELAKE_MOBILE, rapl_defaults_core),
1159 1160
1160 INTEL_CPU_FAM6(ATOM_SILVERMONT, rapl_defaults_byt), 1161 INTEL_CPU_FAM6(ATOM_SILVERMONT, rapl_defaults_byt),
1161 INTEL_CPU_FAM6(ATOM_AIRMONT, rapl_defaults_cht), 1162 INTEL_CPU_FAM6(ATOM_AIRMONT, rapl_defaults_cht),
@@ -1164,6 +1165,7 @@ static const struct x86_cpu_id rapl_ids[] __initconst = {
1164 INTEL_CPU_FAM6(ATOM_GOLDMONT, rapl_defaults_core), 1165 INTEL_CPU_FAM6(ATOM_GOLDMONT, rapl_defaults_core),
1165 INTEL_CPU_FAM6(ATOM_GOLDMONT_PLUS, rapl_defaults_core), 1166 INTEL_CPU_FAM6(ATOM_GOLDMONT_PLUS, rapl_defaults_core),
1166 INTEL_CPU_FAM6(ATOM_GOLDMONT_X, rapl_defaults_core), 1167 INTEL_CPU_FAM6(ATOM_GOLDMONT_X, rapl_defaults_core),
1168 INTEL_CPU_FAM6(ATOM_TREMONT_X, rapl_defaults_core),
1167 1169
1168 INTEL_CPU_FAM6(XEON_PHI_KNL, rapl_defaults_hsw_server), 1170 INTEL_CPU_FAM6(XEON_PHI_KNL, rapl_defaults_hsw_server),
1169 INTEL_CPU_FAM6(XEON_PHI_KNM, rapl_defaults_hsw_server), 1171 INTEL_CPU_FAM6(XEON_PHI_KNM, rapl_defaults_hsw_server),
diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig
index 30323426902e..58bb7d72dc2b 100644
--- a/drivers/thermal/Kconfig
+++ b/drivers/thermal/Kconfig
@@ -152,6 +152,7 @@ config CPU_THERMAL
152 bool "generic cpu cooling support" 152 bool "generic cpu cooling support"
153 depends on CPU_FREQ 153 depends on CPU_FREQ
154 depends on THERMAL_OF 154 depends on THERMAL_OF
155 depends on THERMAL=y
155 help 156 help
156 This implements the generic cpu cooling mechanism through frequency 157 This implements the generic cpu cooling mechanism through frequency
157 reduction. An ACPI version of this already exists 158 reduction. An ACPI version of this already exists
diff --git a/include/acpi/cppc_acpi.h b/include/acpi/cppc_acpi.h
index 4f34734e7f36..ba6fd7202775 100644
--- a/include/acpi/cppc_acpi.h
+++ b/include/acpi/cppc_acpi.h
@@ -137,6 +137,7 @@ struct cppc_cpudata {
137 cpumask_var_t shared_cpu_map; 137 cpumask_var_t shared_cpu_map;
138}; 138};
139 139
140extern int cppc_get_desired_perf(int cpunum, u64 *desired_perf);
140extern int cppc_get_perf_ctrs(int cpu, struct cppc_perf_fb_ctrs *perf_fb_ctrs); 141extern int cppc_get_perf_ctrs(int cpu, struct cppc_perf_fb_ctrs *perf_fb_ctrs);
141extern int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls); 142extern int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls);
142extern int cppc_get_perf_caps(int cpu, struct cppc_perf_caps *caps); 143extern int cppc_get_perf_caps(int cpu, struct cppc_perf_caps *caps);
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index c86d6d8bdfed..b160e98076e3 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -151,6 +151,9 @@ struct cpufreq_policy {
151 151
152 /* For cpufreq driver's internal use */ 152 /* For cpufreq driver's internal use */
153 void *driver_data; 153 void *driver_data;
154
155 /* Pointer to the cooling device if used for thermal mitigation */
156 struct thermal_cooling_device *cdev;
154}; 157};
155 158
156/* Only for ACPI */ 159/* Only for ACPI */
@@ -254,20 +257,12 @@ __ATTR(_name, 0644, show_##_name, store_##_name)
254static struct freq_attr _name = \ 257static struct freq_attr _name = \
255__ATTR(_name, 0200, NULL, store_##_name) 258__ATTR(_name, 0200, NULL, store_##_name)
256 259
257struct global_attr {
258 struct attribute attr;
259 ssize_t (*show)(struct kobject *kobj,
260 struct attribute *attr, char *buf);
261 ssize_t (*store)(struct kobject *a, struct attribute *b,
262 const char *c, size_t count);
263};
264
265#define define_one_global_ro(_name) \ 260#define define_one_global_ro(_name) \
266static struct global_attr _name = \ 261static struct kobj_attribute _name = \
267__ATTR(_name, 0444, show_##_name, NULL) 262__ATTR(_name, 0444, show_##_name, NULL)
268 263
269#define define_one_global_rw(_name) \ 264#define define_one_global_rw(_name) \
270static struct global_attr _name = \ 265static struct kobj_attribute _name = \
271__ATTR(_name, 0644, show_##_name, store_##_name) 266__ATTR(_name, 0644, show_##_name, store_##_name)
272 267
273 268
@@ -330,6 +325,8 @@ struct cpufreq_driver {
330 /* optional */ 325 /* optional */
331 int (*bios_limit)(int cpu, unsigned int *limit); 326 int (*bios_limit)(int cpu, unsigned int *limit);
332 327
328 int (*online)(struct cpufreq_policy *policy);
329 int (*offline)(struct cpufreq_policy *policy);
333 int (*exit)(struct cpufreq_policy *policy); 330 int (*exit)(struct cpufreq_policy *policy);
334 void (*stop_cpu)(struct cpufreq_policy *policy); 331 void (*stop_cpu)(struct cpufreq_policy *policy);
335 int (*suspend)(struct cpufreq_policy *policy); 332 int (*suspend)(struct cpufreq_policy *policy);
@@ -346,14 +343,15 @@ struct cpufreq_driver {
346}; 343};
347 344
348/* flags */ 345/* flags */
349#define CPUFREQ_STICKY (1 << 0) /* driver isn't removed even if 346
350 all ->init() calls failed */ 347/* driver isn't removed even if all ->init() calls failed */
351#define CPUFREQ_CONST_LOOPS (1 << 1) /* loops_per_jiffy or other 348#define CPUFREQ_STICKY BIT(0)
352 kernel "constants" aren't 349
353 affected by frequency 350/* loops_per_jiffy or other kernel "constants" aren't affected by frequency transitions */
354 transitions */ 351#define CPUFREQ_CONST_LOOPS BIT(1)
355#define CPUFREQ_PM_NO_WARN (1 << 2) /* don't warn on suspend/resume 352
356 speed mismatches */ 353/* don't warn on suspend/resume speed mismatches */
354#define CPUFREQ_PM_NO_WARN BIT(2)
357 355
358/* 356/*
359 * This should be set by platforms having multiple clock-domains, i.e. 357 * This should be set by platforms having multiple clock-domains, i.e.
@@ -361,14 +359,14 @@ struct cpufreq_driver {
361 * be created in cpu/cpu<num>/cpufreq/ directory and so they can use the same 359 * be created in cpu/cpu<num>/cpufreq/ directory and so they can use the same
362 * governor with different tunables for different clusters. 360 * governor with different tunables for different clusters.
363 */ 361 */
364#define CPUFREQ_HAVE_GOVERNOR_PER_POLICY (1 << 3) 362#define CPUFREQ_HAVE_GOVERNOR_PER_POLICY BIT(3)
365 363
366/* 364/*
367 * Driver will do POSTCHANGE notifications from outside of their ->target() 365 * Driver will do POSTCHANGE notifications from outside of their ->target()
368 * routine and so must set cpufreq_driver->flags with this flag, so that core 366 * routine and so must set cpufreq_driver->flags with this flag, so that core
369 * can handle them specially. 367 * can handle them specially.
370 */ 368 */
371#define CPUFREQ_ASYNC_NOTIFICATION (1 << 4) 369#define CPUFREQ_ASYNC_NOTIFICATION BIT(4)
372 370
373/* 371/*
374 * Set by drivers which want cpufreq core to check if CPU is running at a 372 * Set by drivers which want cpufreq core to check if CPU is running at a
@@ -377,13 +375,19 @@ struct cpufreq_driver {
377 * from the table. And if that fails, we will stop further boot process by 375 * from the table. And if that fails, we will stop further boot process by
378 * issuing a BUG_ON(). 376 * issuing a BUG_ON().
379 */ 377 */
380#define CPUFREQ_NEED_INITIAL_FREQ_CHECK (1 << 5) 378#define CPUFREQ_NEED_INITIAL_FREQ_CHECK BIT(5)
381 379
382/* 380/*
383 * Set by drivers to disallow use of governors with "dynamic_switching" flag 381 * Set by drivers to disallow use of governors with "dynamic_switching" flag
384 * set. 382 * set.
385 */ 383 */
386#define CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING (1 << 6) 384#define CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING BIT(6)
385
386/*
387 * Set by drivers that want the core to automatically register the cpufreq
388 * driver as a thermal cooling device.
389 */
390#define CPUFREQ_IS_COOLING_DEV BIT(7)
387 391
388int cpufreq_register_driver(struct cpufreq_driver *driver_data); 392int cpufreq_register_driver(struct cpufreq_driver *driver_data);
389int cpufreq_unregister_driver(struct cpufreq_driver *driver_data); 393int cpufreq_unregister_driver(struct cpufreq_driver *driver_data);
diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
index 4dff74f48d4b..3b39472324a3 100644
--- a/include/linux/cpuidle.h
+++ b/include/linux/cpuidle.h
@@ -69,11 +69,9 @@ struct cpuidle_state {
69 69
70/* Idle State Flags */ 70/* Idle State Flags */
71#define CPUIDLE_FLAG_NONE (0x00) 71#define CPUIDLE_FLAG_NONE (0x00)
72#define CPUIDLE_FLAG_POLLING (0x01) /* polling state */ 72#define CPUIDLE_FLAG_POLLING BIT(0) /* polling state */
73#define CPUIDLE_FLAG_COUPLED (0x02) /* state applies to multiple cpus */ 73#define CPUIDLE_FLAG_COUPLED BIT(1) /* state applies to multiple cpus */
74#define CPUIDLE_FLAG_TIMER_STOP (0x04) /* timer is stopped on this state */ 74#define CPUIDLE_FLAG_TIMER_STOP BIT(2) /* timer is stopped on this state */
75
76#define CPUIDLE_DRIVER_FLAGS_MASK (0xFFFF0000)
77 75
78struct cpuidle_device_kobj; 76struct cpuidle_device_kobj;
79struct cpuidle_state_kobj; 77struct cpuidle_state_kobj;
diff --git a/include/linux/device.h b/include/linux/device.h
index 4d2f13e8c540..d88d2362e8c3 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -1165,6 +1165,16 @@ static inline bool device_async_suspend_enabled(struct device *dev)
1165 return !!dev->power.async_suspend; 1165 return !!dev->power.async_suspend;
1166} 1166}
1167 1167
1168static inline bool device_pm_not_required(struct device *dev)
1169{
1170 return dev->power.no_pm;
1171}
1172
1173static inline void device_set_pm_not_required(struct device *dev)
1174{
1175 dev->power.no_pm = true;
1176}
1177
1168static inline void dev_pm_syscore_device(struct device *dev, bool val) 1178static inline void dev_pm_syscore_device(struct device *dev, bool val)
1169{ 1179{
1170#ifdef CONFIG_PM_SLEEP 1180#ifdef CONFIG_PM_SLEEP
diff --git a/include/linux/platform_data/davinci-cpufreq.h b/include/linux/platform_data/davinci-cpufreq.h
new file mode 100644
index 000000000000..3fbf9f2793b5
--- /dev/null
+++ b/include/linux/platform_data/davinci-cpufreq.h
@@ -0,0 +1,19 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * TI DaVinci CPUFreq platform support.
4 *
5 * Copyright (C) 2009 Texas Instruments, Inc. http://www.ti.com/
6 */
7
8#ifndef _MACH_DAVINCI_CPUFREQ_H
9#define _MACH_DAVINCI_CPUFREQ_H
10
11#include <linux/cpufreq.h>
12
13struct davinci_cpufreq_config {
14 struct cpufreq_frequency_table *freq_table;
15 int (*set_voltage)(unsigned int index);
16 int (*init)(void);
17};
18
19#endif /* _MACH_DAVINCI_CPUFREQ_H */
diff --git a/include/linux/pm.h b/include/linux/pm.h
index 0bd9de116826..06f7ed893928 100644
--- a/include/linux/pm.h
+++ b/include/linux/pm.h
@@ -592,6 +592,7 @@ struct dev_pm_info {
592 bool is_suspended:1; /* Ditto */ 592 bool is_suspended:1; /* Ditto */
593 bool is_noirq_suspended:1; 593 bool is_noirq_suspended:1;
594 bool is_late_suspended:1; 594 bool is_late_suspended:1;
595 bool no_pm:1;
595 bool early_init:1; /* Owned by the PM core */ 596 bool early_init:1; /* Owned by the PM core */
596 bool direct_complete:1; /* Owned by the PM core */ 597 bool direct_complete:1; /* Owned by the PM core */
597 u32 driver_flags; 598 u32 driver_flags;
@@ -633,9 +634,9 @@ struct dev_pm_info {
633 int runtime_error; 634 int runtime_error;
634 int autosuspend_delay; 635 int autosuspend_delay;
635 u64 last_busy; 636 u64 last_busy;
636 unsigned long active_jiffies; 637 u64 active_time;
637 unsigned long suspended_jiffies; 638 u64 suspended_time;
638 unsigned long accounting_timestamp; 639 u64 accounting_timestamp;
639#endif 640#endif
640 struct pm_subsys_data *subsys_data; /* Owned by the subsystem. */ 641 struct pm_subsys_data *subsys_data; /* Owned by the subsystem. */
641 void (*set_latency_tolerance)(struct device *, s32); 642 void (*set_latency_tolerance)(struct device *, s32);
diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
index dd364abb649a..1ed5874bcee0 100644
--- a/include/linux/pm_domain.h
+++ b/include/linux/pm_domain.h
@@ -271,7 +271,7 @@ int genpd_dev_pm_attach(struct device *dev);
271struct device *genpd_dev_pm_attach_by_id(struct device *dev, 271struct device *genpd_dev_pm_attach_by_id(struct device *dev,
272 unsigned int index); 272 unsigned int index);
273struct device *genpd_dev_pm_attach_by_name(struct device *dev, 273struct device *genpd_dev_pm_attach_by_name(struct device *dev,
274 char *name); 274 const char *name);
275#else /* !CONFIG_PM_GENERIC_DOMAINS_OF */ 275#else /* !CONFIG_PM_GENERIC_DOMAINS_OF */
276static inline int of_genpd_add_provider_simple(struct device_node *np, 276static inline int of_genpd_add_provider_simple(struct device_node *np,
277 struct generic_pm_domain *genpd) 277 struct generic_pm_domain *genpd)
@@ -324,7 +324,7 @@ static inline struct device *genpd_dev_pm_attach_by_id(struct device *dev,
324} 324}
325 325
326static inline struct device *genpd_dev_pm_attach_by_name(struct device *dev, 326static inline struct device *genpd_dev_pm_attach_by_name(struct device *dev,
327 char *name) 327 const char *name)
328{ 328{
329 return NULL; 329 return NULL;
330} 330}
@@ -341,7 +341,7 @@ int dev_pm_domain_attach(struct device *dev, bool power_on);
341struct device *dev_pm_domain_attach_by_id(struct device *dev, 341struct device *dev_pm_domain_attach_by_id(struct device *dev,
342 unsigned int index); 342 unsigned int index);
343struct device *dev_pm_domain_attach_by_name(struct device *dev, 343struct device *dev_pm_domain_attach_by_name(struct device *dev,
344 char *name); 344 const char *name);
345void dev_pm_domain_detach(struct device *dev, bool power_off); 345void dev_pm_domain_detach(struct device *dev, bool power_off);
346void dev_pm_domain_set(struct device *dev, struct dev_pm_domain *pd); 346void dev_pm_domain_set(struct device *dev, struct dev_pm_domain *pd);
347#else 347#else
@@ -355,7 +355,7 @@ static inline struct device *dev_pm_domain_attach_by_id(struct device *dev,
355 return NULL; 355 return NULL;
356} 356}
357static inline struct device *dev_pm_domain_attach_by_name(struct device *dev, 357static inline struct device *dev_pm_domain_attach_by_name(struct device *dev,
358 char *name) 358 const char *name)
359{ 359{
360 return NULL; 360 return NULL;
361} 361}
diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h
index 900359342965..24c757a32a7b 100644
--- a/include/linux/pm_opp.h
+++ b/include/linux/pm_opp.h
@@ -334,6 +334,7 @@ int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpuma
334struct device_node *dev_pm_opp_of_get_opp_desc_node(struct device *dev); 334struct device_node *dev_pm_opp_of_get_opp_desc_node(struct device *dev);
335struct device_node *dev_pm_opp_get_of_node(struct dev_pm_opp *opp); 335struct device_node *dev_pm_opp_get_of_node(struct dev_pm_opp *opp);
336int of_get_required_opp_performance_state(struct device_node *np, int index); 336int of_get_required_opp_performance_state(struct device_node *np, int index);
337void dev_pm_opp_of_register_em(struct cpumask *cpus);
337#else 338#else
338static inline int dev_pm_opp_of_add_table(struct device *dev) 339static inline int dev_pm_opp_of_add_table(struct device *dev)
339{ 340{
@@ -372,6 +373,11 @@ static inline struct device_node *dev_pm_opp_get_of_node(struct dev_pm_opp *opp)
372{ 373{
373 return NULL; 374 return NULL;
374} 375}
376
377static inline void dev_pm_opp_of_register_em(struct cpumask *cpus)
378{
379}
380
375static inline int of_get_required_opp_performance_state(struct device_node *np, int index) 381static inline int of_get_required_opp_performance_state(struct device_node *np, int index)
376{ 382{
377 return -ENOTSUPP; 383 return -ENOTSUPP;
diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
index fed5be706bc9..9dc6eebf62d2 100644
--- a/include/linux/pm_runtime.h
+++ b/include/linux/pm_runtime.h
@@ -113,6 +113,8 @@ static inline bool pm_runtime_is_irq_safe(struct device *dev)
113 return dev->power.irq_safe; 113 return dev->power.irq_safe;
114} 114}
115 115
116extern u64 pm_runtime_suspended_time(struct device *dev);
117
116#else /* !CONFIG_PM */ 118#else /* !CONFIG_PM */
117 119
118static inline bool queue_pm_work(struct work_struct *work) { return false; } 120static inline bool queue_pm_work(struct work_struct *work) { return false; }
diff --git a/kernel/power/energy_model.c b/kernel/power/energy_model.c
index d9dc2c38764a..7d66ee68aaaf 100644
--- a/kernel/power/energy_model.c
+++ b/kernel/power/energy_model.c
@@ -10,6 +10,7 @@
10 10
11#include <linux/cpu.h> 11#include <linux/cpu.h>
12#include <linux/cpumask.h> 12#include <linux/cpumask.h>
13#include <linux/debugfs.h>
13#include <linux/energy_model.h> 14#include <linux/energy_model.h>
14#include <linux/sched/topology.h> 15#include <linux/sched/topology.h>
15#include <linux/slab.h> 16#include <linux/slab.h>
@@ -23,6 +24,60 @@ static DEFINE_PER_CPU(struct em_perf_domain *, em_data);
23 */ 24 */
24static DEFINE_MUTEX(em_pd_mutex); 25static DEFINE_MUTEX(em_pd_mutex);
25 26
27#ifdef CONFIG_DEBUG_FS
28static struct dentry *rootdir;
29
30static void em_debug_create_cs(struct em_cap_state *cs, struct dentry *pd)
31{
32 struct dentry *d;
33 char name[24];
34
35 snprintf(name, sizeof(name), "cs:%lu", cs->frequency);
36
37 /* Create per-cs directory */
38 d = debugfs_create_dir(name, pd);
39 debugfs_create_ulong("frequency", 0444, d, &cs->frequency);
40 debugfs_create_ulong("power", 0444, d, &cs->power);
41 debugfs_create_ulong("cost", 0444, d, &cs->cost);
42}
43
44static int em_debug_cpus_show(struct seq_file *s, void *unused)
45{
46 seq_printf(s, "%*pbl\n", cpumask_pr_args(to_cpumask(s->private)));
47
48 return 0;
49}
50DEFINE_SHOW_ATTRIBUTE(em_debug_cpus);
51
52static void em_debug_create_pd(struct em_perf_domain *pd, int cpu)
53{
54 struct dentry *d;
55 char name[8];
56 int i;
57
58 snprintf(name, sizeof(name), "pd%d", cpu);
59
60 /* Create the directory of the performance domain */
61 d = debugfs_create_dir(name, rootdir);
62
63 debugfs_create_file("cpus", 0444, d, pd->cpus, &em_debug_cpus_fops);
64
65 /* Create a sub-directory for each capacity state */
66 for (i = 0; i < pd->nr_cap_states; i++)
67 em_debug_create_cs(&pd->table[i], d);
68}
69
70static int __init em_debug_init(void)
71{
72 /* Create /sys/kernel/debug/energy_model directory */
73 rootdir = debugfs_create_dir("energy_model", NULL);
74
75 return 0;
76}
77core_initcall(em_debug_init);
78#else /* CONFIG_DEBUG_FS */
79static void em_debug_create_pd(struct em_perf_domain *pd, int cpu) {}
80#endif
26static struct em_perf_domain *em_create_pd(cpumask_t *span, int nr_states, 81static struct em_perf_domain *em_create_pd(cpumask_t *span, int nr_states,
27 struct em_data_callback *cb) 82 struct em_data_callback *cb)
28{ 83{
@@ -102,6 +157,8 @@ static struct em_perf_domain *em_create_pd(cpumask_t *span, int nr_states,
102 pd->nr_cap_states = nr_states; 157 pd->nr_cap_states = nr_states;
103 cpumask_copy(to_cpumask(pd->cpus), span); 158 cpumask_copy(to_cpumask(pd->cpus), span);
104 159
160 em_debug_create_pd(pd, cpu);
161
105 return pd; 162 return pd;
106 163
107free_cs_table: 164free_cs_table:
diff --git a/kernel/power/qos.c b/kernel/power/qos.c
index b7a82502857a..9d22131afc1e 100644
--- a/kernel/power/qos.c
+++ b/kernel/power/qos.c
@@ -582,10 +582,8 @@ static int register_pm_qos_misc(struct pm_qos_object *qos, struct dentry *d)
582 qos->pm_qos_power_miscdev.name = qos->name; 582 qos->pm_qos_power_miscdev.name = qos->name;
583 qos->pm_qos_power_miscdev.fops = &pm_qos_power_fops; 583 qos->pm_qos_power_miscdev.fops = &pm_qos_power_fops;
584 584
585 if (d) { 585 debugfs_create_file(qos->name, S_IRUGO, d, (void *)qos,
586 (void)debugfs_create_file(qos->name, S_IRUGO, d, 586 &pm_qos_debug_fops);
587 (void *)qos, &pm_qos_debug_fops);
588 }
589 587
590 return misc_register(&qos->pm_qos_power_miscdev); 588 return misc_register(&qos->pm_qos_power_miscdev);
591} 589}
@@ -685,8 +683,6 @@ static int __init pm_qos_power_init(void)
685 BUILD_BUG_ON(ARRAY_SIZE(pm_qos_array) != PM_QOS_NUM_CLASSES); 683 BUILD_BUG_ON(ARRAY_SIZE(pm_qos_array) != PM_QOS_NUM_CLASSES);
686 684
687 d = debugfs_create_dir("pm_qos", NULL); 685 d = debugfs_create_dir("pm_qos", NULL);
688 if (IS_ERR_OR_NULL(d))
689 d = NULL;
690 686
691 for (i = PM_QOS_CPU_DMA_LATENCY; i < PM_QOS_NUM_CLASSES; i++) { 687 for (i = PM_QOS_CPU_DMA_LATENCY; i < PM_QOS_NUM_CLASSES; i++) {
692 ret = register_pm_qos_misc(pm_qos_array[i], d); 688 ret = register_pm_qos_misc(pm_qos_array[i], d);