summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRafael J. Wysocki <rafael.j.wysocki@intel.com>2019-09-17 03:44:29 -0400
committerRafael J. Wysocki <rafael.j.wysocki@intel.com>2019-09-17 03:44:29 -0400
commitca61a72ac371b5a12fafd36248b93431c6694c3c (patch)
treeefcaab34d3bffc19acc3dcf4d427d2b987e782b0
parent2cdd5cc7032636d5f17822c6ba30ac08bfd2cb6d (diff)
parent1c5c1b5d8efe21efa74b7a21e8c078711b984ae4 (diff)
Merge branch 'pm-cpufreq'
* pm-cpufreq: (36 commits) cpufreq: Add qcs404 to cpufreq-dt-platdev blacklist cpufreq: qcom: Add support for qcs404 on nvmem driver cpufreq: qcom: Refactor the driver to make it easier to extend cpufreq: qcom: Re-organise kryo cpufreq to use it for other nvmem based qcom socs dt-bindings: opp: Add qcom-opp bindings with properties needed for CPR dt-bindings: opp: qcom-nvmem: Support pstates provided by a power domain Documentation: cpufreq: Update policy notifier documentation cpufreq: Remove CPUFREQ_ADJUST and CPUFREQ_NOTIFY policy notifier events sched/cpufreq: Align trace event behavior of fast switching ACPI: cpufreq: Switch to QoS requests instead of cpufreq notifier video: pxafb: Remove cpufreq policy notifier video: sa1100fb: Remove cpufreq policy notifier arch_topology: Use CPUFREQ_CREATE_POLICY instead of CPUFREQ_NOTIFY cpufreq: powerpc_cbe: Switch to QoS requests for freq limits cpufreq: powerpc: macintosh: Switch to QoS requests for freq limits cpufreq: Print driver name if cpufreq_suspend() fails cpufreq: mediatek: Add support for mt8183 cpufreq: mediatek: change to regulator_get_optional cpufreq: imx-cpufreq-dt: Add i.MX8MN support cpufreq: Use imx-cpufreq-dt for i.MX8MN's speed grading ...
-rw-r--r--Documentation/cpu-freq/core.txt16
-rw-r--r--Documentation/devicetree/bindings/opp/qcom-nvmem-cpufreq.txt (renamed from Documentation/devicetree/bindings/opp/kryo-cpufreq.txt)127
-rw-r--r--Documentation/devicetree/bindings/opp/qcom-opp.txt19
-rw-r--r--Documentation/devicetree/bindings/opp/sun50i-nvmem-cpufreq.txt167
-rw-r--r--MAINTAINERS11
-rw-r--r--drivers/acpi/processor_driver.c39
-rw-r--r--drivers/acpi/processor_perflib.c100
-rw-r--r--drivers/acpi/processor_thermal.c84
-rw-r--r--drivers/base/arch_topology.c2
-rw-r--r--drivers/cpufreq/Kconfig.arm16
-rw-r--r--drivers/cpufreq/Makefile3
-rw-r--r--drivers/cpufreq/armada-8k-cpufreq.c2
-rw-r--r--drivers/cpufreq/cpufreq-dt-platdev.c5
-rw-r--r--drivers/cpufreq/cpufreq.c57
-rw-r--r--drivers/cpufreq/imx-cpufreq-dt.c8
-rw-r--r--drivers/cpufreq/intel_pstate.c120
-rw-r--r--drivers/cpufreq/mediatek-cpufreq.c4
-rw-r--r--drivers/cpufreq/ppc_cbe_cpufreq.c19
-rw-r--r--drivers/cpufreq/ppc_cbe_cpufreq.h8
-rw-r--r--drivers/cpufreq/ppc_cbe_cpufreq_pmi.c96
-rw-r--r--drivers/cpufreq/qcom-cpufreq-hw.c23
-rw-r--r--drivers/cpufreq/qcom-cpufreq-kryo.c249
-rw-r--r--drivers/cpufreq/qcom-cpufreq-nvmem.c352
-rw-r--r--drivers/cpufreq/sun50i-cpufreq-nvmem.c226
-rw-r--r--drivers/cpufreq/ti-cpufreq.c1
-rw-r--r--drivers/macintosh/windfarm_cpufreq_clamp.c77
-rw-r--r--drivers/opp/core.c68
-rw-r--r--drivers/thermal/cpu_cooling.c110
-rw-r--r--drivers/video/fbdev/pxafb.c21
-rw-r--r--drivers/video/fbdev/pxafb.h1
-rw-r--r--drivers/video/fbdev/sa1100fb.c27
-rw-r--r--drivers/video/fbdev/sa1100fb.h1
-rw-r--r--include/acpi/processor.h26
-rw-r--r--include/linux/cpufreq.h4
-rw-r--r--include/linux/pm_opp.h12
-rw-r--r--kernel/sched/cpufreq_schedutil.c7
36 files changed, 1458 insertions, 650 deletions
diff --git a/Documentation/cpu-freq/core.txt b/Documentation/cpu-freq/core.txt
index 55193e680250..ed577d9c154b 100644
--- a/Documentation/cpu-freq/core.txt
+++ b/Documentation/cpu-freq/core.txt
@@ -57,19 +57,11 @@ transition notifiers.
572.1 CPUFreq policy notifiers 572.1 CPUFreq policy notifiers
58---------------------------- 58----------------------------
59 59
60These are notified when a new policy is intended to be set. Each 60These are notified when a new policy is created or removed.
61CPUFreq policy notifier is called twice for a policy transition:
62 61
631.) During CPUFREQ_ADJUST all CPUFreq notifiers may change the limit if 62The phase is specified in the second argument to the notifier. The phase is
64 they see a need for this - may it be thermal considerations or 63CPUFREQ_CREATE_POLICY when the policy is first created and it is
65 hardware limitations. 64CPUFREQ_REMOVE_POLICY when the policy is removed.
66
672.) And during CPUFREQ_NOTIFY all notifiers are informed of the new policy
68 - if two hardware drivers failed to agree on a new policy before this
69 stage, the incompatible hardware shall be shut down, and the user
70 informed of this.
71
72The phase is specified in the second argument to the notifier.
73 65
74The third argument, a void *pointer, points to a struct cpufreq_policy 66The third argument, a void *pointer, points to a struct cpufreq_policy
75consisting of several values, including min, max (the lower and upper 67consisting of several values, including min, max (the lower and upper
diff --git a/Documentation/devicetree/bindings/opp/kryo-cpufreq.txt b/Documentation/devicetree/bindings/opp/qcom-nvmem-cpufreq.txt
index c2127b96805a..4751029b9b74 100644
--- a/Documentation/devicetree/bindings/opp/kryo-cpufreq.txt
+++ b/Documentation/devicetree/bindings/opp/qcom-nvmem-cpufreq.txt
@@ -1,25 +1,38 @@
1Qualcomm Technologies, Inc. KRYO CPUFreq and OPP bindings 1Qualcomm Technologies, Inc. NVMEM CPUFreq and OPP bindings
2=================================== 2===================================
3 3
4In Certain Qualcomm Technologies, Inc. SoCs like apq8096 and msm8996 4In Certain Qualcomm Technologies, Inc. SoCs like apq8096 and msm8996,
5that have KRYO processors, the CPU ferequencies subset and voltage value 5the CPU frequencies subset and voltage value of each OPP varies based on
6of each OPP varies based on the silicon variant in use. 6the silicon variant in use.
7Qualcomm Technologies, Inc. Process Voltage Scaling Tables 7Qualcomm Technologies, Inc. Process Voltage Scaling Tables
8defines the voltage and frequency value based on the msm-id in SMEM 8defines the voltage and frequency value based on the msm-id in SMEM
9and speedbin blown in the efuse combination. 9and speedbin blown in the efuse combination.
10The qcom-cpufreq-kryo driver reads the msm-id and efuse value from the SoC 10The qcom-cpufreq-nvmem driver reads the msm-id and efuse value from the SoC
11to provide the OPP framework with required information (existing HW bitmap). 11to provide the OPP framework with required information (existing HW bitmap).
12This is used to determine the voltage and frequency value for each OPP of 12This is used to determine the voltage and frequency value for each OPP of
13operating-points-v2 table when it is parsed by the OPP framework. 13operating-points-v2 table when it is parsed by the OPP framework.
14 14
15Required properties: 15Required properties:
16-------------------- 16--------------------
17In 'cpus' nodes: 17In 'cpu' nodes:
18- operating-points-v2: Phandle to the operating-points-v2 table to use. 18- operating-points-v2: Phandle to the operating-points-v2 table to use.
19 19
20In 'operating-points-v2' table: 20In 'operating-points-v2' table:
21- compatible: Should be 21- compatible: Should be
22 - 'operating-points-v2-kryo-cpu' for apq8096 and msm8996. 22 - 'operating-points-v2-kryo-cpu' for apq8096 and msm8996.
23
24Optional properties:
25--------------------
26In 'cpu' nodes:
27- power-domains: A phandle pointing to the PM domain specifier which provides
28 the performance states available for active state management.
29 Please refer to the power-domains bindings
30 Documentation/devicetree/bindings/power/power_domain.txt
31 and also examples below.
32- power-domain-names: Should be
33 - 'cpr' for qcs404.
34
35In 'operating-points-v2' table:
23- nvmem-cells: A phandle pointing to a nvmem-cells node representing the 36- nvmem-cells: A phandle pointing to a nvmem-cells node representing the
24 efuse registers that has information about the 37 efuse registers that has information about the
25 speedbin that is used to select the right frequency/voltage 38 speedbin that is used to select the right frequency/voltage
@@ -678,3 +691,105 @@ soc {
678 }; 691 };
679 }; 692 };
680}; 693};
694
695Example 2:
696---------
697
698 cpus {
699 #address-cells = <1>;
700 #size-cells = <0>;
701
702 CPU0: cpu@100 {
703 device_type = "cpu";
704 compatible = "arm,cortex-a53";
705 reg = <0x100>;
706 ....
707 clocks = <&apcs_glb>;
708 operating-points-v2 = <&cpu_opp_table>;
709 power-domains = <&cpr>;
710 power-domain-names = "cpr";
711 };
712
713 CPU1: cpu@101 {
714 device_type = "cpu";
715 compatible = "arm,cortex-a53";
716 reg = <0x101>;
717 ....
718 clocks = <&apcs_glb>;
719 operating-points-v2 = <&cpu_opp_table>;
720 power-domains = <&cpr>;
721 power-domain-names = "cpr";
722 };
723
724 CPU2: cpu@102 {
725 device_type = "cpu";
726 compatible = "arm,cortex-a53";
727 reg = <0x102>;
728 ....
729 clocks = <&apcs_glb>;
730 operating-points-v2 = <&cpu_opp_table>;
731 power-domains = <&cpr>;
732 power-domain-names = "cpr";
733 };
734
735 CPU3: cpu@103 {
736 device_type = "cpu";
737 compatible = "arm,cortex-a53";
738 reg = <0x103>;
739 ....
740 clocks = <&apcs_glb>;
741 operating-points-v2 = <&cpu_opp_table>;
742 power-domains = <&cpr>;
743 power-domain-names = "cpr";
744 };
745 };
746
747 cpu_opp_table: cpu-opp-table {
748 compatible = "operating-points-v2-kryo-cpu";
749 opp-shared;
750
751 opp-1094400000 {
752 opp-hz = /bits/ 64 <1094400000>;
753 required-opps = <&cpr_opp1>;
754 };
755 opp-1248000000 {
756 opp-hz = /bits/ 64 <1248000000>;
757 required-opps = <&cpr_opp2>;
758 };
759 opp-1401600000 {
760 opp-hz = /bits/ 64 <1401600000>;
761 required-opps = <&cpr_opp3>;
762 };
763 };
764
765 cpr_opp_table: cpr-opp-table {
766 compatible = "operating-points-v2-qcom-level";
767
768 cpr_opp1: opp1 {
769 opp-level = <1>;
770 qcom,opp-fuse-level = <1>;
771 };
772 cpr_opp2: opp2 {
773 opp-level = <2>;
774 qcom,opp-fuse-level = <2>;
775 };
776 cpr_opp3: opp3 {
777 opp-level = <3>;
778 qcom,opp-fuse-level = <3>;
779 };
780 };
781
782....
783
784soc {
785....
786 cpr: power-controller@b018000 {
787 compatible = "qcom,qcs404-cpr", "qcom,cpr";
788 reg = <0x0b018000 0x1000>;
789 ....
790 vdd-apc-supply = <&pms405_s3>;
791 #power-domain-cells = <0>;
792 operating-points-v2 = <&cpr_opp_table>;
793 ....
794 };
795};
diff --git a/Documentation/devicetree/bindings/opp/qcom-opp.txt b/Documentation/devicetree/bindings/opp/qcom-opp.txt
new file mode 100644
index 000000000000..32eb0793c7e6
--- /dev/null
+++ b/Documentation/devicetree/bindings/opp/qcom-opp.txt
@@ -0,0 +1,19 @@
1Qualcomm OPP bindings to describe OPP nodes
2
3The bindings are based on top of the operating-points-v2 bindings
4described in Documentation/devicetree/bindings/opp/opp.txt
5Additional properties are described below.
6
7* OPP Table Node
8
9Required properties:
10- compatible: Allow OPPs to express their compatibility. It should be:
11 "operating-points-v2-qcom-level"
12
13* OPP Node
14
15Required properties:
16- qcom,opp-fuse-level: A positive value representing the fuse corner/level
17 associated with this OPP node. Sometimes several corners/levels shares
18 a certain fuse corner/level. A fuse corner/level contains e.g. ref uV,
19 min uV, and max uV.
diff --git a/Documentation/devicetree/bindings/opp/sun50i-nvmem-cpufreq.txt b/Documentation/devicetree/bindings/opp/sun50i-nvmem-cpufreq.txt
new file mode 100644
index 000000000000..7deae57a587b
--- /dev/null
+++ b/Documentation/devicetree/bindings/opp/sun50i-nvmem-cpufreq.txt
@@ -0,0 +1,167 @@
1Allwinner Technologies, Inc. NVMEM CPUFreq and OPP bindings
2===================================
3
4For some SoCs, the CPU frequency subset and voltage value of each OPP
5varies based on the silicon variant in use. Allwinner Process Voltage
6Scaling Tables defines the voltage and frequency value based on the
7speedbin blown in the efuse combination. The sun50i-cpufreq-nvmem driver
8reads the efuse value from the SoC to provide the OPP framework with
9required information.
10
11Required properties:
12--------------------
13In 'cpus' nodes:
14- operating-points-v2: Phandle to the operating-points-v2 table to use.
15
16In 'operating-points-v2' table:
17- compatible: Should be
18 - 'allwinner,sun50i-h6-operating-points'.
19- nvmem-cells: A phandle pointing to a nvmem-cells node representing the
20 efuse registers that has information about the speedbin
21 that is used to select the right frequency/voltage value
22 pair. Please refer the for nvmem-cells bindings
23 Documentation/devicetree/bindings/nvmem/nvmem.txt and
24 also examples below.
25
26In every OPP node:
27- opp-microvolt-<name>: Voltage in micro Volts.
28 At runtime, the platform can pick a <name> and
29 matching opp-microvolt-<name> property.
30 [See: opp.txt]
31 HW: <name>:
32 sun50i-h6 speed0 speed1 speed2
33
34Example 1:
35---------
36
37 cpus {
38 #address-cells = <1>;
39 #size-cells = <0>;
40
41 cpu0: cpu@0 {
42 compatible = "arm,cortex-a53";
43 device_type = "cpu";
44 reg = <0>;
45 enable-method = "psci";
46 clocks = <&ccu CLK_CPUX>;
47 clock-latency-ns = <244144>; /* 8 32k periods */
48 operating-points-v2 = <&cpu_opp_table>;
49 #cooling-cells = <2>;
50 };
51
52 cpu1: cpu@1 {
53 compatible = "arm,cortex-a53";
54 device_type = "cpu";
55 reg = <1>;
56 enable-method = "psci";
57 clocks = <&ccu CLK_CPUX>;
58 clock-latency-ns = <244144>; /* 8 32k periods */
59 operating-points-v2 = <&cpu_opp_table>;
60 #cooling-cells = <2>;
61 };
62
63 cpu2: cpu@2 {
64 compatible = "arm,cortex-a53";
65 device_type = "cpu";
66 reg = <2>;
67 enable-method = "psci";
68 clocks = <&ccu CLK_CPUX>;
69 clock-latency-ns = <244144>; /* 8 32k periods */
70 operating-points-v2 = <&cpu_opp_table>;
71 #cooling-cells = <2>;
72 };
73
74 cpu3: cpu@3 {
75 compatible = "arm,cortex-a53";
76 device_type = "cpu";
77 reg = <3>;
78 enable-method = "psci";
79 clocks = <&ccu CLK_CPUX>;
80 clock-latency-ns = <244144>; /* 8 32k periods */
81 operating-points-v2 = <&cpu_opp_table>;
82 #cooling-cells = <2>;
83 };
84 };
85
86 cpu_opp_table: opp_table {
87 compatible = "allwinner,sun50i-h6-operating-points";
88 nvmem-cells = <&speedbin_efuse>;
89 opp-shared;
90
91 opp@480000000 {
92 clock-latency-ns = <244144>; /* 8 32k periods */
93 opp-hz = /bits/ 64 <480000000>;
94
95 opp-microvolt-speed0 = <880000>;
96 opp-microvolt-speed1 = <820000>;
97 opp-microvolt-speed2 = <800000>;
98 };
99
100 opp@720000000 {
101 clock-latency-ns = <244144>; /* 8 32k periods */
102 opp-hz = /bits/ 64 <720000000>;
103
104 opp-microvolt-speed0 = <880000>;
105 opp-microvolt-speed1 = <820000>;
106 opp-microvolt-speed2 = <800000>;
107 };
108
109 opp@816000000 {
110 clock-latency-ns = <244144>; /* 8 32k periods */
111 opp-hz = /bits/ 64 <816000000>;
112
113 opp-microvolt-speed0 = <880000>;
114 opp-microvolt-speed1 = <820000>;
115 opp-microvolt-speed2 = <800000>;
116 };
117
118 opp@888000000 {
119 clock-latency-ns = <244144>; /* 8 32k periods */
120 opp-hz = /bits/ 64 <888000000>;
121
122 opp-microvolt-speed0 = <940000>;
123 opp-microvolt-speed1 = <820000>;
124 opp-microvolt-speed2 = <800000>;
125 };
126
127 opp@1080000000 {
128 clock-latency-ns = <244144>; /* 8 32k periods */
129 opp-hz = /bits/ 64 <1080000000>;
130
131 opp-microvolt-speed0 = <1060000>;
132 opp-microvolt-speed1 = <880000>;
133 opp-microvolt-speed2 = <840000>;
134 };
135
136 opp@1320000000 {
137 clock-latency-ns = <244144>; /* 8 32k periods */
138 opp-hz = /bits/ 64 <1320000000>;
139
140 opp-microvolt-speed0 = <1160000>;
141 opp-microvolt-speed1 = <940000>;
142 opp-microvolt-speed2 = <900000>;
143 };
144
145 opp@1488000000 {
146 clock-latency-ns = <244144>; /* 8 32k periods */
147 opp-hz = /bits/ 64 <1488000000>;
148
149 opp-microvolt-speed0 = <1160000>;
150 opp-microvolt-speed1 = <1000000>;
151 opp-microvolt-speed2 = <960000>;
152 };
153 };
154....
155soc {
156....
157 sid: sid@3006000 {
158 compatible = "allwinner,sun50i-h6-sid";
159 reg = <0x03006000 0x400>;
160 #address-cells = <1>;
161 #size-cells = <1>;
162 ....
163 speedbin_efuse: speed@1c {
164 reg = <0x1c 4>;
165 };
166 };
167};
diff --git a/MAINTAINERS b/MAINTAINERS
index e7a47b5210fd..161c4dc9efd9 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -676,6 +676,13 @@ L: linux-media@vger.kernel.org
676S: Maintained 676S: Maintained
677F: drivers/staging/media/allegro-dvt/ 677F: drivers/staging/media/allegro-dvt/
678 678
679ALLWINNER CPUFREQ DRIVER
680M: Yangtao Li <tiny.windzz@gmail.com>
681L: linux-pm@vger.kernel.org
682S: Maintained
683F: Documentation/devicetree/bindings/opp/sun50i-nvmem-cpufreq.txt
684F: drivers/cpufreq/sun50i-cpufreq-nvmem.c
685
679ALLWINNER SECURITY SYSTEM 686ALLWINNER SECURITY SYSTEM
680M: Corentin Labbe <clabbe.montjoie@gmail.com> 687M: Corentin Labbe <clabbe.montjoie@gmail.com>
681L: linux-crypto@vger.kernel.org 688L: linux-crypto@vger.kernel.org
@@ -13308,8 +13315,8 @@ QUALCOMM CPUFREQ DRIVER MSM8996/APQ8096
13308M: Ilia Lin <ilia.lin@kernel.org> 13315M: Ilia Lin <ilia.lin@kernel.org>
13309L: linux-pm@vger.kernel.org 13316L: linux-pm@vger.kernel.org
13310S: Maintained 13317S: Maintained
13311F: Documentation/devicetree/bindings/opp/kryo-cpufreq.txt 13318F: Documentation/devicetree/bindings/opp/qcom-nvmem-cpufreq.txt
13312F: drivers/cpufreq/qcom-cpufreq-kryo.c 13319F: drivers/cpufreq/qcom-cpufreq-nvmem.c
13313 13320
13314QUALCOMM EMAC GIGABIT ETHERNET DRIVER 13321QUALCOMM EMAC GIGABIT ETHERNET DRIVER
13315M: Timur Tabi <timur@kernel.org> 13322M: Timur Tabi <timur@kernel.org>
diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
index aea8d674a33d..08da9c29f1e9 100644
--- a/drivers/acpi/processor_driver.c
+++ b/drivers/acpi/processor_driver.c
@@ -284,6 +284,29 @@ static int acpi_processor_stop(struct device *dev)
284 return 0; 284 return 0;
285} 285}
286 286
287bool acpi_processor_cpufreq_init;
288
289static int acpi_processor_notifier(struct notifier_block *nb,
290 unsigned long event, void *data)
291{
292 struct cpufreq_policy *policy = data;
293 int cpu = policy->cpu;
294
295 if (event == CPUFREQ_CREATE_POLICY) {
296 acpi_thermal_cpufreq_init(cpu);
297 acpi_processor_ppc_init(cpu);
298 } else if (event == CPUFREQ_REMOVE_POLICY) {
299 acpi_processor_ppc_exit(cpu);
300 acpi_thermal_cpufreq_exit(cpu);
301 }
302
303 return 0;
304}
305
306static struct notifier_block acpi_processor_notifier_block = {
307 .notifier_call = acpi_processor_notifier,
308};
309
287/* 310/*
288 * We keep the driver loaded even when ACPI is not running. 311 * We keep the driver loaded even when ACPI is not running.
289 * This is needed for the powernow-k8 driver, that works even without 312 * This is needed for the powernow-k8 driver, that works even without
@@ -310,8 +333,12 @@ static int __init acpi_processor_driver_init(void)
310 cpuhp_setup_state_nocalls(CPUHP_ACPI_CPUDRV_DEAD, "acpi/cpu-drv:dead", 333 cpuhp_setup_state_nocalls(CPUHP_ACPI_CPUDRV_DEAD, "acpi/cpu-drv:dead",
311 NULL, acpi_soft_cpu_dead); 334 NULL, acpi_soft_cpu_dead);
312 335
313 acpi_thermal_cpufreq_init(); 336 if (!cpufreq_register_notifier(&acpi_processor_notifier_block,
314 acpi_processor_ppc_init(); 337 CPUFREQ_POLICY_NOTIFIER)) {
338 acpi_processor_cpufreq_init = true;
339 acpi_processor_ignore_ppc_init();
340 }
341
315 acpi_processor_throttling_init(); 342 acpi_processor_throttling_init();
316 return 0; 343 return 0;
317err: 344err:
@@ -324,8 +351,12 @@ static void __exit acpi_processor_driver_exit(void)
324 if (acpi_disabled) 351 if (acpi_disabled)
325 return; 352 return;
326 353
327 acpi_processor_ppc_exit(); 354 if (acpi_processor_cpufreq_init) {
328 acpi_thermal_cpufreq_exit(); 355 cpufreq_unregister_notifier(&acpi_processor_notifier_block,
356 CPUFREQ_POLICY_NOTIFIER);
357 acpi_processor_cpufreq_init = false;
358 }
359
329 cpuhp_remove_state_nocalls(hp_online); 360 cpuhp_remove_state_nocalls(hp_online);
330 cpuhp_remove_state_nocalls(CPUHP_ACPI_CPUDRV_DEAD); 361 cpuhp_remove_state_nocalls(CPUHP_ACPI_CPUDRV_DEAD);
331 driver_unregister(&acpi_processor_driver); 362 driver_unregister(&acpi_processor_driver);
diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c
index ee87cb6f6e59..2261713d1aec 100644
--- a/drivers/acpi/processor_perflib.c
+++ b/drivers/acpi/processor_perflib.c
@@ -50,57 +50,13 @@ module_param(ignore_ppc, int, 0644);
50MODULE_PARM_DESC(ignore_ppc, "If the frequency of your machine gets wrongly" \ 50MODULE_PARM_DESC(ignore_ppc, "If the frequency of your machine gets wrongly" \
51 "limited by BIOS, this should help"); 51 "limited by BIOS, this should help");
52 52
53#define PPC_REGISTERED 1 53static bool acpi_processor_ppc_in_use;
54#define PPC_IN_USE 2
55
56static int acpi_processor_ppc_status;
57
58static int acpi_processor_ppc_notifier(struct notifier_block *nb,
59 unsigned long event, void *data)
60{
61 struct cpufreq_policy *policy = data;
62 struct acpi_processor *pr;
63 unsigned int ppc = 0;
64
65 if (ignore_ppc < 0)
66 ignore_ppc = 0;
67
68 if (ignore_ppc)
69 return 0;
70
71 if (event != CPUFREQ_ADJUST)
72 return 0;
73
74 mutex_lock(&performance_mutex);
75
76 pr = per_cpu(processors, policy->cpu);
77 if (!pr || !pr->performance)
78 goto out;
79
80 ppc = (unsigned int)pr->performance_platform_limit;
81
82 if (ppc >= pr->performance->state_count)
83 goto out;
84
85 cpufreq_verify_within_limits(policy, 0,
86 pr->performance->states[ppc].
87 core_frequency * 1000);
88
89 out:
90 mutex_unlock(&performance_mutex);
91
92 return 0;
93}
94
95static struct notifier_block acpi_ppc_notifier_block = {
96 .notifier_call = acpi_processor_ppc_notifier,
97};
98 54
99static int acpi_processor_get_platform_limit(struct acpi_processor *pr) 55static int acpi_processor_get_platform_limit(struct acpi_processor *pr)
100{ 56{
101 acpi_status status = 0; 57 acpi_status status = 0;
102 unsigned long long ppc = 0; 58 unsigned long long ppc = 0;
103 59 int ret;
104 60
105 if (!pr) 61 if (!pr)
106 return -EINVAL; 62 return -EINVAL;
@@ -112,7 +68,7 @@ static int acpi_processor_get_platform_limit(struct acpi_processor *pr)
112 status = acpi_evaluate_integer(pr->handle, "_PPC", NULL, &ppc); 68 status = acpi_evaluate_integer(pr->handle, "_PPC", NULL, &ppc);
113 69
114 if (status != AE_NOT_FOUND) 70 if (status != AE_NOT_FOUND)
115 acpi_processor_ppc_status |= PPC_IN_USE; 71 acpi_processor_ppc_in_use = true;
116 72
117 if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) { 73 if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
118 ACPI_EXCEPTION((AE_INFO, status, "Evaluating _PPC")); 74 ACPI_EXCEPTION((AE_INFO, status, "Evaluating _PPC"));
@@ -124,6 +80,17 @@ static int acpi_processor_get_platform_limit(struct acpi_processor *pr)
124 80
125 pr->performance_platform_limit = (int)ppc; 81 pr->performance_platform_limit = (int)ppc;
126 82
83 if (ppc >= pr->performance->state_count ||
84 unlikely(!dev_pm_qos_request_active(&pr->perflib_req)))
85 return 0;
86
87 ret = dev_pm_qos_update_request(&pr->perflib_req,
88 pr->performance->states[ppc].core_frequency * 1000);
89 if (ret < 0) {
90 pr_warn("Failed to update perflib freq constraint: CPU%d (%d)\n",
91 pr->id, ret);
92 }
93
127 return 0; 94 return 0;
128} 95}
129 96
@@ -184,23 +151,32 @@ int acpi_processor_get_bios_limit(int cpu, unsigned int *limit)
184} 151}
185EXPORT_SYMBOL(acpi_processor_get_bios_limit); 152EXPORT_SYMBOL(acpi_processor_get_bios_limit);
186 153
187void acpi_processor_ppc_init(void) 154void acpi_processor_ignore_ppc_init(void)
188{ 155{
189 if (!cpufreq_register_notifier 156 if (ignore_ppc < 0)
190 (&acpi_ppc_notifier_block, CPUFREQ_POLICY_NOTIFIER)) 157 ignore_ppc = 0;
191 acpi_processor_ppc_status |= PPC_REGISTERED; 158}
192 else 159
193 printk(KERN_DEBUG 160void acpi_processor_ppc_init(int cpu)
194 "Warning: Processor Platform Limit not supported.\n"); 161{
162 struct acpi_processor *pr = per_cpu(processors, cpu);
163 int ret;
164
165 ret = dev_pm_qos_add_request(get_cpu_device(cpu),
166 &pr->perflib_req, DEV_PM_QOS_MAX_FREQUENCY,
167 INT_MAX);
168 if (ret < 0) {
169 pr_err("Failed to add freq constraint for CPU%d (%d)\n", cpu,
170 ret);
171 return;
172 }
195} 173}
196 174
197void acpi_processor_ppc_exit(void) 175void acpi_processor_ppc_exit(int cpu)
198{ 176{
199 if (acpi_processor_ppc_status & PPC_REGISTERED) 177 struct acpi_processor *pr = per_cpu(processors, cpu);
200 cpufreq_unregister_notifier(&acpi_ppc_notifier_block,
201 CPUFREQ_POLICY_NOTIFIER);
202 178
203 acpi_processor_ppc_status &= ~PPC_REGISTERED; 179 dev_pm_qos_remove_request(&pr->perflib_req);
204} 180}
205 181
206static int acpi_processor_get_performance_control(struct acpi_processor *pr) 182static int acpi_processor_get_performance_control(struct acpi_processor *pr)
@@ -477,7 +453,7 @@ int acpi_processor_notify_smm(struct module *calling_module)
477 static int is_done = 0; 453 static int is_done = 0;
478 int result; 454 int result;
479 455
480 if (!(acpi_processor_ppc_status & PPC_REGISTERED)) 456 if (!acpi_processor_cpufreq_init)
481 return -EBUSY; 457 return -EBUSY;
482 458
483 if (!try_module_get(calling_module)) 459 if (!try_module_get(calling_module))
@@ -513,7 +489,7 @@ int acpi_processor_notify_smm(struct module *calling_module)
513 * we can allow the cpufreq driver to be rmmod'ed. */ 489 * we can allow the cpufreq driver to be rmmod'ed. */
514 is_done = 1; 490 is_done = 1;
515 491
516 if (!(acpi_processor_ppc_status & PPC_IN_USE)) 492 if (!acpi_processor_ppc_in_use)
517 module_put(calling_module); 493 module_put(calling_module);
518 494
519 return 0; 495 return 0;
@@ -742,7 +718,7 @@ acpi_processor_register_performance(struct acpi_processor_performance
742{ 718{
743 struct acpi_processor *pr; 719 struct acpi_processor *pr;
744 720
745 if (!(acpi_processor_ppc_status & PPC_REGISTERED)) 721 if (!acpi_processor_cpufreq_init)
746 return -EINVAL; 722 return -EINVAL;
747 723
748 mutex_lock(&performance_mutex); 724 mutex_lock(&performance_mutex);
diff --git a/drivers/acpi/processor_thermal.c b/drivers/acpi/processor_thermal.c
index 50fb0107375e..ec2638f1df4f 100644
--- a/drivers/acpi/processor_thermal.c
+++ b/drivers/acpi/processor_thermal.c
@@ -35,7 +35,6 @@ ACPI_MODULE_NAME("processor_thermal");
35#define CPUFREQ_THERMAL_MAX_STEP 3 35#define CPUFREQ_THERMAL_MAX_STEP 3
36 36
37static DEFINE_PER_CPU(unsigned int, cpufreq_thermal_reduction_pctg); 37static DEFINE_PER_CPU(unsigned int, cpufreq_thermal_reduction_pctg);
38static unsigned int acpi_thermal_cpufreq_is_init = 0;
39 38
40#define reduction_pctg(cpu) \ 39#define reduction_pctg(cpu) \
41 per_cpu(cpufreq_thermal_reduction_pctg, phys_package_first_cpu(cpu)) 40 per_cpu(cpufreq_thermal_reduction_pctg, phys_package_first_cpu(cpu))
@@ -61,35 +60,11 @@ static int phys_package_first_cpu(int cpu)
61static int cpu_has_cpufreq(unsigned int cpu) 60static int cpu_has_cpufreq(unsigned int cpu)
62{ 61{
63 struct cpufreq_policy policy; 62 struct cpufreq_policy policy;
64 if (!acpi_thermal_cpufreq_is_init || cpufreq_get_policy(&policy, cpu)) 63 if (!acpi_processor_cpufreq_init || cpufreq_get_policy(&policy, cpu))
65 return 0; 64 return 0;
66 return 1; 65 return 1;
67} 66}
68 67
69static int acpi_thermal_cpufreq_notifier(struct notifier_block *nb,
70 unsigned long event, void *data)
71{
72 struct cpufreq_policy *policy = data;
73 unsigned long max_freq = 0;
74
75 if (event != CPUFREQ_ADJUST)
76 goto out;
77
78 max_freq = (
79 policy->cpuinfo.max_freq *
80 (100 - reduction_pctg(policy->cpu) * 20)
81 ) / 100;
82
83 cpufreq_verify_within_limits(policy, 0, max_freq);
84
85 out:
86 return 0;
87}
88
89static struct notifier_block acpi_thermal_cpufreq_notifier_block = {
90 .notifier_call = acpi_thermal_cpufreq_notifier,
91};
92
93static int cpufreq_get_max_state(unsigned int cpu) 68static int cpufreq_get_max_state(unsigned int cpu)
94{ 69{
95 if (!cpu_has_cpufreq(cpu)) 70 if (!cpu_has_cpufreq(cpu))
@@ -108,7 +83,10 @@ static int cpufreq_get_cur_state(unsigned int cpu)
108 83
109static int cpufreq_set_cur_state(unsigned int cpu, int state) 84static int cpufreq_set_cur_state(unsigned int cpu, int state)
110{ 85{
111 int i; 86 struct cpufreq_policy *policy;
87 struct acpi_processor *pr;
88 unsigned long max_freq;
89 int i, ret;
112 90
113 if (!cpu_has_cpufreq(cpu)) 91 if (!cpu_has_cpufreq(cpu))
114 return 0; 92 return 0;
@@ -121,33 +99,53 @@ static int cpufreq_set_cur_state(unsigned int cpu, int state)
121 * frequency. 99 * frequency.
122 */ 100 */
123 for_each_online_cpu(i) { 101 for_each_online_cpu(i) {
124 if (topology_physical_package_id(i) == 102 if (topology_physical_package_id(i) !=
125 topology_physical_package_id(cpu)) 103 topology_physical_package_id(cpu))
126 cpufreq_update_policy(i); 104 continue;
105
106 pr = per_cpu(processors, i);
107
108 if (unlikely(!dev_pm_qos_request_active(&pr->thermal_req)))
109 continue;
110
111 policy = cpufreq_cpu_get(i);
112 if (!policy)
113 return -EINVAL;
114
115 max_freq = (policy->cpuinfo.max_freq * (100 - reduction_pctg(i) * 20)) / 100;
116
117 cpufreq_cpu_put(policy);
118
119 ret = dev_pm_qos_update_request(&pr->thermal_req, max_freq);
120 if (ret < 0) {
121 pr_warn("Failed to update thermal freq constraint: CPU%d (%d)\n",
122 pr->id, ret);
123 }
127 } 124 }
128 return 0; 125 return 0;
129} 126}
130 127
131void acpi_thermal_cpufreq_init(void) 128void acpi_thermal_cpufreq_init(int cpu)
132{ 129{
133 int i; 130 struct acpi_processor *pr = per_cpu(processors, cpu);
134 131 int ret;
135 i = cpufreq_register_notifier(&acpi_thermal_cpufreq_notifier_block, 132
136 CPUFREQ_POLICY_NOTIFIER); 133 ret = dev_pm_qos_add_request(get_cpu_device(cpu),
137 if (!i) 134 &pr->thermal_req, DEV_PM_QOS_MAX_FREQUENCY,
138 acpi_thermal_cpufreq_is_init = 1; 135 INT_MAX);
136 if (ret < 0) {
137 pr_err("Failed to add freq constraint for CPU%d (%d)\n", cpu,
138 ret);
139 return;
140 }
139} 141}
140 142
141void acpi_thermal_cpufreq_exit(void) 143void acpi_thermal_cpufreq_exit(int cpu)
142{ 144{
143 if (acpi_thermal_cpufreq_is_init) 145 struct acpi_processor *pr = per_cpu(processors, cpu);
144 cpufreq_unregister_notifier
145 (&acpi_thermal_cpufreq_notifier_block,
146 CPUFREQ_POLICY_NOTIFIER);
147 146
148 acpi_thermal_cpufreq_is_init = 0; 147 dev_pm_qos_remove_request(&pr->thermal_req);
149} 148}
150
151#else /* ! CONFIG_CPU_FREQ */ 149#else /* ! CONFIG_CPU_FREQ */
152static int cpufreq_get_max_state(unsigned int cpu) 150static int cpufreq_get_max_state(unsigned int cpu)
153{ 151{
diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c
index 63c1e76739f1..8cab1f5a8e0c 100644
--- a/drivers/base/arch_topology.c
+++ b/drivers/base/arch_topology.c
@@ -174,7 +174,7 @@ init_cpu_capacity_callback(struct notifier_block *nb,
174 if (!raw_capacity) 174 if (!raw_capacity)
175 return 0; 175 return 0;
176 176
177 if (val != CPUFREQ_NOTIFY) 177 if (val != CPUFREQ_CREATE_POLICY)
178 return 0; 178 return 0;
179 179
180 pr_debug("cpu_capacity: init cpu capacity for CPUs [%*pbl] (to_visit=%*pbl)\n", 180 pr_debug("cpu_capacity: init cpu capacity for CPUs [%*pbl] (to_visit=%*pbl)\n",
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index 56c31a78c692..a905796f7f85 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -19,6 +19,18 @@ config ACPI_CPPC_CPUFREQ
19 19
20 If in doubt, say N. 20 If in doubt, say N.
21 21
22config ARM_ALLWINNER_SUN50I_CPUFREQ_NVMEM
23 tristate "Allwinner nvmem based SUN50I CPUFreq driver"
24 depends on ARCH_SUNXI
25 depends on NVMEM_SUNXI_SID
26 select PM_OPP
27 help
28 This adds the nvmem based CPUFreq driver for Allwinner
29 h6 SoC.
30
31 To compile this driver as a module, choose M here: the
32 module will be called sun50i-cpufreq-nvmem.
33
22config ARM_ARMADA_37XX_CPUFREQ 34config ARM_ARMADA_37XX_CPUFREQ
23 tristate "Armada 37xx CPUFreq support" 35 tristate "Armada 37xx CPUFreq support"
24 depends on ARCH_MVEBU && CPUFREQ_DT 36 depends on ARCH_MVEBU && CPUFREQ_DT
@@ -120,8 +132,8 @@ config ARM_OMAP2PLUS_CPUFREQ
120 depends on ARCH_OMAP2PLUS 132 depends on ARCH_OMAP2PLUS
121 default ARCH_OMAP2PLUS 133 default ARCH_OMAP2PLUS
122 134
123config ARM_QCOM_CPUFREQ_KRYO 135config ARM_QCOM_CPUFREQ_NVMEM
124 tristate "Qualcomm Kryo based CPUFreq" 136 tristate "Qualcomm nvmem based CPUFreq"
125 depends on ARM64 137 depends on ARM64
126 depends on QCOM_QFPROM 138 depends on QCOM_QFPROM
127 depends on QCOM_SMEM 139 depends on QCOM_SMEM
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index 5a6c70d26c98..9a9f5ccd13d9 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -64,7 +64,7 @@ obj-$(CONFIG_ARM_OMAP2PLUS_CPUFREQ) += omap-cpufreq.o
64obj-$(CONFIG_ARM_PXA2xx_CPUFREQ) += pxa2xx-cpufreq.o 64obj-$(CONFIG_ARM_PXA2xx_CPUFREQ) += pxa2xx-cpufreq.o
65obj-$(CONFIG_PXA3xx) += pxa3xx-cpufreq.o 65obj-$(CONFIG_PXA3xx) += pxa3xx-cpufreq.o
66obj-$(CONFIG_ARM_QCOM_CPUFREQ_HW) += qcom-cpufreq-hw.o 66obj-$(CONFIG_ARM_QCOM_CPUFREQ_HW) += qcom-cpufreq-hw.o
67obj-$(CONFIG_ARM_QCOM_CPUFREQ_KRYO) += qcom-cpufreq-kryo.o 67obj-$(CONFIG_ARM_QCOM_CPUFREQ_NVMEM) += qcom-cpufreq-nvmem.o
68obj-$(CONFIG_ARM_RASPBERRYPI_CPUFREQ) += raspberrypi-cpufreq.o 68obj-$(CONFIG_ARM_RASPBERRYPI_CPUFREQ) += raspberrypi-cpufreq.o
69obj-$(CONFIG_ARM_S3C2410_CPUFREQ) += s3c2410-cpufreq.o 69obj-$(CONFIG_ARM_S3C2410_CPUFREQ) += s3c2410-cpufreq.o
70obj-$(CONFIG_ARM_S3C2412_CPUFREQ) += s3c2412-cpufreq.o 70obj-$(CONFIG_ARM_S3C2412_CPUFREQ) += s3c2412-cpufreq.o
@@ -80,6 +80,7 @@ obj-$(CONFIG_ARM_SCMI_CPUFREQ) += scmi-cpufreq.o
80obj-$(CONFIG_ARM_SCPI_CPUFREQ) += scpi-cpufreq.o 80obj-$(CONFIG_ARM_SCPI_CPUFREQ) += scpi-cpufreq.o
81obj-$(CONFIG_ARM_SPEAR_CPUFREQ) += spear-cpufreq.o 81obj-$(CONFIG_ARM_SPEAR_CPUFREQ) += spear-cpufreq.o
82obj-$(CONFIG_ARM_STI_CPUFREQ) += sti-cpufreq.o 82obj-$(CONFIG_ARM_STI_CPUFREQ) += sti-cpufreq.o
83obj-$(CONFIG_ARM_ALLWINNER_SUN50I_CPUFREQ_NVMEM) += sun50i-cpufreq-nvmem.o
83obj-$(CONFIG_ARM_TANGO_CPUFREQ) += tango-cpufreq.o 84obj-$(CONFIG_ARM_TANGO_CPUFREQ) += tango-cpufreq.o
84obj-$(CONFIG_ARM_TEGRA20_CPUFREQ) += tegra20-cpufreq.o 85obj-$(CONFIG_ARM_TEGRA20_CPUFREQ) += tegra20-cpufreq.o
85obj-$(CONFIG_ARM_TEGRA124_CPUFREQ) += tegra124-cpufreq.o 86obj-$(CONFIG_ARM_TEGRA124_CPUFREQ) += tegra124-cpufreq.o
diff --git a/drivers/cpufreq/armada-8k-cpufreq.c b/drivers/cpufreq/armada-8k-cpufreq.c
index 988ebc326bdb..39e34f5066d3 100644
--- a/drivers/cpufreq/armada-8k-cpufreq.c
+++ b/drivers/cpufreq/armada-8k-cpufreq.c
@@ -136,6 +136,8 @@ static int __init armada_8k_cpufreq_init(void)
136 136
137 nb_cpus = num_possible_cpus(); 137 nb_cpus = num_possible_cpus();
138 freq_tables = kcalloc(nb_cpus, sizeof(*freq_tables), GFP_KERNEL); 138 freq_tables = kcalloc(nb_cpus, sizeof(*freq_tables), GFP_KERNEL);
139 if (!freq_tables)
140 return -ENOMEM;
139 cpumask_copy(&cpus, cpu_possible_mask); 141 cpumask_copy(&cpus, cpu_possible_mask);
140 142
141 /* 143 /*
diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c
index 03dc4244ab00..bca8d1f47fd2 100644
--- a/drivers/cpufreq/cpufreq-dt-platdev.c
+++ b/drivers/cpufreq/cpufreq-dt-platdev.c
@@ -101,12 +101,15 @@ static const struct of_device_id whitelist[] __initconst = {
101 * platforms using "operating-points-v2" property. 101 * platforms using "operating-points-v2" property.
102 */ 102 */
103static const struct of_device_id blacklist[] __initconst = { 103static const struct of_device_id blacklist[] __initconst = {
104 { .compatible = "allwinner,sun50i-h6", },
105
104 { .compatible = "calxeda,highbank", }, 106 { .compatible = "calxeda,highbank", },
105 { .compatible = "calxeda,ecx-2000", }, 107 { .compatible = "calxeda,ecx-2000", },
106 108
107 { .compatible = "fsl,imx7d", }, 109 { .compatible = "fsl,imx7d", },
108 { .compatible = "fsl,imx8mq", }, 110 { .compatible = "fsl,imx8mq", },
109 { .compatible = "fsl,imx8mm", }, 111 { .compatible = "fsl,imx8mm", },
112 { .compatible = "fsl,imx8mn", },
110 113
111 { .compatible = "marvell,armadaxp", }, 114 { .compatible = "marvell,armadaxp", },
112 115
@@ -117,12 +120,14 @@ static const struct of_device_id blacklist[] __initconst = {
117 { .compatible = "mediatek,mt817x", }, 120 { .compatible = "mediatek,mt817x", },
118 { .compatible = "mediatek,mt8173", }, 121 { .compatible = "mediatek,mt8173", },
119 { .compatible = "mediatek,mt8176", }, 122 { .compatible = "mediatek,mt8176", },
123 { .compatible = "mediatek,mt8183", },
120 124
121 { .compatible = "nvidia,tegra124", }, 125 { .compatible = "nvidia,tegra124", },
122 { .compatible = "nvidia,tegra210", }, 126 { .compatible = "nvidia,tegra210", },
123 127
124 { .compatible = "qcom,apq8096", }, 128 { .compatible = "qcom,apq8096", },
125 { .compatible = "qcom,msm8996", }, 129 { .compatible = "qcom,msm8996", },
130 { .compatible = "qcom,qcs404", },
126 131
127 { .compatible = "st,stih407", }, 132 { .compatible = "st,stih407", },
128 { .compatible = "st,stih410", }, 133 { .compatible = "st,stih410", },
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index c28ebf2810f1..c52d6fa32aac 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -1266,7 +1266,17 @@ static void cpufreq_policy_free(struct cpufreq_policy *policy)
1266 DEV_PM_QOS_MAX_FREQUENCY); 1266 DEV_PM_QOS_MAX_FREQUENCY);
1267 dev_pm_qos_remove_notifier(dev, &policy->nb_min, 1267 dev_pm_qos_remove_notifier(dev, &policy->nb_min,
1268 DEV_PM_QOS_MIN_FREQUENCY); 1268 DEV_PM_QOS_MIN_FREQUENCY);
1269 dev_pm_qos_remove_request(policy->max_freq_req); 1269
1270 if (policy->max_freq_req) {
1271 /*
1272 * CPUFREQ_CREATE_POLICY notification is sent only after
1273 * successfully adding max_freq_req request.
1274 */
1275 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1276 CPUFREQ_REMOVE_POLICY, policy);
1277 dev_pm_qos_remove_request(policy->max_freq_req);
1278 }
1279
1270 dev_pm_qos_remove_request(policy->min_freq_req); 1280 dev_pm_qos_remove_request(policy->min_freq_req);
1271 kfree(policy->min_freq_req); 1281 kfree(policy->min_freq_req);
1272 1282
@@ -1391,6 +1401,9 @@ static int cpufreq_online(unsigned int cpu)
1391 ret); 1401 ret);
1392 goto out_destroy_policy; 1402 goto out_destroy_policy;
1393 } 1403 }
1404
1405 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1406 CPUFREQ_CREATE_POLICY, policy);
1394 } 1407 }
1395 1408
1396 if (cpufreq_driver->get && has_target()) { 1409 if (cpufreq_driver->get && has_target()) {
@@ -1807,8 +1820,8 @@ void cpufreq_suspend(void)
1807 } 1820 }
1808 1821
1809 if (cpufreq_driver->suspend && cpufreq_driver->suspend(policy)) 1822 if (cpufreq_driver->suspend && cpufreq_driver->suspend(policy))
1810 pr_err("%s: Failed to suspend driver: %p\n", __func__, 1823 pr_err("%s: Failed to suspend driver: %s\n", __func__,
1811 policy); 1824 cpufreq_driver->name);
1812 } 1825 }
1813 1826
1814suspend: 1827suspend:
@@ -2140,7 +2153,7 @@ int cpufreq_driver_target(struct cpufreq_policy *policy,
2140 unsigned int target_freq, 2153 unsigned int target_freq,
2141 unsigned int relation) 2154 unsigned int relation)
2142{ 2155{
2143 int ret = -EINVAL; 2156 int ret;
2144 2157
2145 down_write(&policy->rwsem); 2158 down_write(&policy->rwsem);
2146 2159
@@ -2347,15 +2360,13 @@ EXPORT_SYMBOL(cpufreq_get_policy);
2347 * @policy: Policy object to modify. 2360 * @policy: Policy object to modify.
2348 * @new_policy: New policy data. 2361 * @new_policy: New policy data.
2349 * 2362 *
2350 * Pass @new_policy to the cpufreq driver's ->verify() callback, run the 2363 * Pass @new_policy to the cpufreq driver's ->verify() callback. Next, copy the
2351 * installed policy notifiers for it with the CPUFREQ_ADJUST value, pass it to 2364 * min and max parameters of @new_policy to @policy and either invoke the
2352 * the driver's ->verify() callback again and run the notifiers for it again 2365 * driver's ->setpolicy() callback (if present) or carry out a governor update
2353 * with the CPUFREQ_NOTIFY value. Next, copy the min and max parameters 2366 * for @policy. That is, run the current governor's ->limits() callback (if the
2354 * of @new_policy to @policy and either invoke the driver's ->setpolicy() 2367 * governor field in @new_policy points to the same object as the one in
2355 * callback (if present) or carry out a governor update for @policy. That is, 2368 * @policy) or replace the governor for @policy with the new one stored in
2356 * run the current governor's ->limits() callback (if the governor field in 2369 * @new_policy.
2357 * @new_policy points to the same object as the one in @policy) or replace the
2358 * governor for @policy with the new one stored in @new_policy.
2359 * 2370 *
2360 * The cpuinfo part of @policy is not updated by this function. 2371 * The cpuinfo part of @policy is not updated by this function.
2361 */ 2372 */
@@ -2383,26 +2394,6 @@ int cpufreq_set_policy(struct cpufreq_policy *policy,
2383 if (ret) 2394 if (ret)
2384 return ret; 2395 return ret;
2385 2396
2386 /*
2387 * The notifier-chain shall be removed once all the users of
2388 * CPUFREQ_ADJUST are moved to use the QoS framework.
2389 */
2390 /* adjust if necessary - all reasons */
2391 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
2392 CPUFREQ_ADJUST, new_policy);
2393
2394 /*
2395 * verify the cpu speed can be set within this limit, which might be
2396 * different to the first one
2397 */
2398 ret = cpufreq_driver->verify(new_policy);
2399 if (ret)
2400 return ret;
2401
2402 /* notification of the new policy */
2403 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
2404 CPUFREQ_NOTIFY, new_policy);
2405
2406 policy->min = new_policy->min; 2397 policy->min = new_policy->min;
2407 policy->max = new_policy->max; 2398 policy->max = new_policy->max;
2408 trace_cpu_frequency_limits(policy); 2399 trace_cpu_frequency_limits(policy);
diff --git a/drivers/cpufreq/imx-cpufreq-dt.c b/drivers/cpufreq/imx-cpufreq-dt.c
index 4f85f3112784..35db14cf3102 100644
--- a/drivers/cpufreq/imx-cpufreq-dt.c
+++ b/drivers/cpufreq/imx-cpufreq-dt.c
@@ -16,6 +16,7 @@
16 16
17#define OCOTP_CFG3_SPEED_GRADE_SHIFT 8 17#define OCOTP_CFG3_SPEED_GRADE_SHIFT 8
18#define OCOTP_CFG3_SPEED_GRADE_MASK (0x3 << 8) 18#define OCOTP_CFG3_SPEED_GRADE_MASK (0x3 << 8)
19#define IMX8MN_OCOTP_CFG3_SPEED_GRADE_MASK (0xf << 8)
19#define OCOTP_CFG3_MKT_SEGMENT_SHIFT 6 20#define OCOTP_CFG3_MKT_SEGMENT_SHIFT 6
20#define OCOTP_CFG3_MKT_SEGMENT_MASK (0x3 << 6) 21#define OCOTP_CFG3_MKT_SEGMENT_MASK (0x3 << 6)
21 22
@@ -34,7 +35,12 @@ static int imx_cpufreq_dt_probe(struct platform_device *pdev)
34 if (ret) 35 if (ret)
35 return ret; 36 return ret;
36 37
37 speed_grade = (cell_value & OCOTP_CFG3_SPEED_GRADE_MASK) >> OCOTP_CFG3_SPEED_GRADE_SHIFT; 38 if (of_machine_is_compatible("fsl,imx8mn"))
39 speed_grade = (cell_value & IMX8MN_OCOTP_CFG3_SPEED_GRADE_MASK)
40 >> OCOTP_CFG3_SPEED_GRADE_SHIFT;
41 else
42 speed_grade = (cell_value & OCOTP_CFG3_SPEED_GRADE_MASK)
43 >> OCOTP_CFG3_SPEED_GRADE_SHIFT;
38 mkt_segment = (cell_value & OCOTP_CFG3_MKT_SEGMENT_MASK) >> OCOTP_CFG3_MKT_SEGMENT_SHIFT; 44 mkt_segment = (cell_value & OCOTP_CFG3_MKT_SEGMENT_MASK) >> OCOTP_CFG3_MKT_SEGMENT_SHIFT;
39 45
40 /* 46 /*
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index cc27d4c59dca..32f27563613b 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -24,6 +24,7 @@
24#include <linux/fs.h> 24#include <linux/fs.h>
25#include <linux/acpi.h> 25#include <linux/acpi.h>
26#include <linux/vmalloc.h> 26#include <linux/vmalloc.h>
27#include <linux/pm_qos.h>
27#include <trace/events/power.h> 28#include <trace/events/power.h>
28 29
29#include <asm/div64.h> 30#include <asm/div64.h>
@@ -1085,6 +1086,47 @@ static ssize_t store_no_turbo(struct kobject *a, struct kobj_attribute *b,
1085 return count; 1086 return count;
1086} 1087}
1087 1088
1089static struct cpufreq_driver intel_pstate;
1090
1091static void update_qos_request(enum dev_pm_qos_req_type type)
1092{
1093 int max_state, turbo_max, freq, i, perf_pct;
1094 struct dev_pm_qos_request *req;
1095 struct cpufreq_policy *policy;
1096
1097 for_each_possible_cpu(i) {
1098 struct cpudata *cpu = all_cpu_data[i];
1099
1100 policy = cpufreq_cpu_get(i);
1101 if (!policy)
1102 continue;
1103
1104 req = policy->driver_data;
1105 cpufreq_cpu_put(policy);
1106
1107 if (!req)
1108 continue;
1109
1110 if (hwp_active)
1111 intel_pstate_get_hwp_max(i, &turbo_max, &max_state);
1112 else
1113 turbo_max = cpu->pstate.turbo_pstate;
1114
1115 if (type == DEV_PM_QOS_MIN_FREQUENCY) {
1116 perf_pct = global.min_perf_pct;
1117 } else {
1118 req++;
1119 perf_pct = global.max_perf_pct;
1120 }
1121
1122 freq = DIV_ROUND_UP(turbo_max * perf_pct, 100);
1123 freq *= cpu->pstate.scaling;
1124
1125 if (dev_pm_qos_update_request(req, freq) < 0)
1126 pr_warn("Failed to update freq constraint: CPU%d\n", i);
1127 }
1128}
1129
1088static ssize_t store_max_perf_pct(struct kobject *a, struct kobj_attribute *b, 1130static ssize_t store_max_perf_pct(struct kobject *a, struct kobj_attribute *b,
1089 const char *buf, size_t count) 1131 const char *buf, size_t count)
1090{ 1132{
@@ -1108,7 +1150,10 @@ static ssize_t store_max_perf_pct(struct kobject *a, struct kobj_attribute *b,
1108 1150
1109 mutex_unlock(&intel_pstate_limits_lock); 1151 mutex_unlock(&intel_pstate_limits_lock);
1110 1152
1111 intel_pstate_update_policies(); 1153 if (intel_pstate_driver == &intel_pstate)
1154 intel_pstate_update_policies();
1155 else
1156 update_qos_request(DEV_PM_QOS_MAX_FREQUENCY);
1112 1157
1113 mutex_unlock(&intel_pstate_driver_lock); 1158 mutex_unlock(&intel_pstate_driver_lock);
1114 1159
@@ -1139,7 +1184,10 @@ static ssize_t store_min_perf_pct(struct kobject *a, struct kobj_attribute *b,
1139 1184
1140 mutex_unlock(&intel_pstate_limits_lock); 1185 mutex_unlock(&intel_pstate_limits_lock);
1141 1186
1142 intel_pstate_update_policies(); 1187 if (intel_pstate_driver == &intel_pstate)
1188 intel_pstate_update_policies();
1189 else
1190 update_qos_request(DEV_PM_QOS_MIN_FREQUENCY);
1143 1191
1144 mutex_unlock(&intel_pstate_driver_lock); 1192 mutex_unlock(&intel_pstate_driver_lock);
1145 1193
@@ -2332,8 +2380,16 @@ static unsigned int intel_cpufreq_fast_switch(struct cpufreq_policy *policy,
2332 2380
2333static int intel_cpufreq_cpu_init(struct cpufreq_policy *policy) 2381static int intel_cpufreq_cpu_init(struct cpufreq_policy *policy)
2334{ 2382{
2335 int ret = __intel_pstate_cpu_init(policy); 2383 int max_state, turbo_max, min_freq, max_freq, ret;
2384 struct dev_pm_qos_request *req;
2385 struct cpudata *cpu;
2386 struct device *dev;
2387
2388 dev = get_cpu_device(policy->cpu);
2389 if (!dev)
2390 return -ENODEV;
2336 2391
2392 ret = __intel_pstate_cpu_init(policy);
2337 if (ret) 2393 if (ret)
2338 return ret; 2394 return ret;
2339 2395
@@ -2342,7 +2398,63 @@ static int intel_cpufreq_cpu_init(struct cpufreq_policy *policy)
2342 /* This reflects the intel_pstate_get_cpu_pstates() setting. */ 2398 /* This reflects the intel_pstate_get_cpu_pstates() setting. */
2343 policy->cur = policy->cpuinfo.min_freq; 2399 policy->cur = policy->cpuinfo.min_freq;
2344 2400
2401 req = kcalloc(2, sizeof(*req), GFP_KERNEL);
2402 if (!req) {
2403 ret = -ENOMEM;
2404 goto pstate_exit;
2405 }
2406
2407 cpu = all_cpu_data[policy->cpu];
2408
2409 if (hwp_active)
2410 intel_pstate_get_hwp_max(policy->cpu, &turbo_max, &max_state);
2411 else
2412 turbo_max = cpu->pstate.turbo_pstate;
2413
2414 min_freq = DIV_ROUND_UP(turbo_max * global.min_perf_pct, 100);
2415 min_freq *= cpu->pstate.scaling;
2416 max_freq = DIV_ROUND_UP(turbo_max * global.max_perf_pct, 100);
2417 max_freq *= cpu->pstate.scaling;
2418
2419 ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_MIN_FREQUENCY,
2420 min_freq);
2421 if (ret < 0) {
2422 dev_err(dev, "Failed to add min-freq constraint (%d)\n", ret);
2423 goto free_req;
2424 }
2425
2426 ret = dev_pm_qos_add_request(dev, req + 1, DEV_PM_QOS_MAX_FREQUENCY,
2427 max_freq);
2428 if (ret < 0) {
2429 dev_err(dev, "Failed to add max-freq constraint (%d)\n", ret);
2430 goto remove_min_req;
2431 }
2432
2433 policy->driver_data = req;
2434
2345 return 0; 2435 return 0;
2436
2437remove_min_req:
2438 dev_pm_qos_remove_request(req);
2439free_req:
2440 kfree(req);
2441pstate_exit:
2442 intel_pstate_exit_perf_limits(policy);
2443
2444 return ret;
2445}
2446
2447static int intel_cpufreq_cpu_exit(struct cpufreq_policy *policy)
2448{
2449 struct dev_pm_qos_request *req;
2450
2451 req = policy->driver_data;
2452
2453 dev_pm_qos_remove_request(req + 1);
2454 dev_pm_qos_remove_request(req);
2455 kfree(req);
2456
2457 return intel_pstate_cpu_exit(policy);
2346} 2458}
2347 2459
2348static struct cpufreq_driver intel_cpufreq = { 2460static struct cpufreq_driver intel_cpufreq = {
@@ -2351,7 +2463,7 @@ static struct cpufreq_driver intel_cpufreq = {
2351 .target = intel_cpufreq_target, 2463 .target = intel_cpufreq_target,
2352 .fast_switch = intel_cpufreq_fast_switch, 2464 .fast_switch = intel_cpufreq_fast_switch,
2353 .init = intel_cpufreq_cpu_init, 2465 .init = intel_cpufreq_cpu_init,
2354 .exit = intel_pstate_cpu_exit, 2466 .exit = intel_cpufreq_cpu_exit,
2355 .stop_cpu = intel_cpufreq_stop_cpu, 2467 .stop_cpu = intel_cpufreq_stop_cpu,
2356 .update_limits = intel_pstate_update_limits, 2468 .update_limits = intel_pstate_update_limits,
2357 .name = "intel_cpufreq", 2469 .name = "intel_cpufreq",
diff --git a/drivers/cpufreq/mediatek-cpufreq.c b/drivers/cpufreq/mediatek-cpufreq.c
index f14f3a85f2f7..0c98dd08273d 100644
--- a/drivers/cpufreq/mediatek-cpufreq.c
+++ b/drivers/cpufreq/mediatek-cpufreq.c
@@ -338,7 +338,7 @@ static int mtk_cpu_dvfs_info_init(struct mtk_cpu_dvfs_info *info, int cpu)
338 goto out_free_resources; 338 goto out_free_resources;
339 } 339 }
340 340
341 proc_reg = regulator_get_exclusive(cpu_dev, "proc"); 341 proc_reg = regulator_get_optional(cpu_dev, "proc");
342 if (IS_ERR(proc_reg)) { 342 if (IS_ERR(proc_reg)) {
343 if (PTR_ERR(proc_reg) == -EPROBE_DEFER) 343 if (PTR_ERR(proc_reg) == -EPROBE_DEFER)
344 pr_warn("proc regulator for cpu%d not ready, retry.\n", 344 pr_warn("proc regulator for cpu%d not ready, retry.\n",
@@ -535,6 +535,8 @@ static const struct of_device_id mtk_cpufreq_machines[] __initconst = {
535 { .compatible = "mediatek,mt817x", }, 535 { .compatible = "mediatek,mt817x", },
536 { .compatible = "mediatek,mt8173", }, 536 { .compatible = "mediatek,mt8173", },
537 { .compatible = "mediatek,mt8176", }, 537 { .compatible = "mediatek,mt8176", },
538 { .compatible = "mediatek,mt8183", },
539 { .compatible = "mediatek,mt8516", },
538 540
539 { } 541 { }
540}; 542};
diff --git a/drivers/cpufreq/ppc_cbe_cpufreq.c b/drivers/cpufreq/ppc_cbe_cpufreq.c
index b83f36febf03..c58abb4cca3a 100644
--- a/drivers/cpufreq/ppc_cbe_cpufreq.c
+++ b/drivers/cpufreq/ppc_cbe_cpufreq.c
@@ -110,6 +110,13 @@ static int cbe_cpufreq_cpu_init(struct cpufreq_policy *policy)
110#endif 110#endif
111 111
112 policy->freq_table = cbe_freqs; 112 policy->freq_table = cbe_freqs;
113 cbe_cpufreq_pmi_policy_init(policy);
114 return 0;
115}
116
117static int cbe_cpufreq_cpu_exit(struct cpufreq_policy *policy)
118{
119 cbe_cpufreq_pmi_policy_exit(policy);
113 return 0; 120 return 0;
114} 121}
115 122
@@ -129,6 +136,7 @@ static struct cpufreq_driver cbe_cpufreq_driver = {
129 .verify = cpufreq_generic_frequency_table_verify, 136 .verify = cpufreq_generic_frequency_table_verify,
130 .target_index = cbe_cpufreq_target, 137 .target_index = cbe_cpufreq_target,
131 .init = cbe_cpufreq_cpu_init, 138 .init = cbe_cpufreq_cpu_init,
139 .exit = cbe_cpufreq_cpu_exit,
132 .name = "cbe-cpufreq", 140 .name = "cbe-cpufreq",
133 .flags = CPUFREQ_CONST_LOOPS, 141 .flags = CPUFREQ_CONST_LOOPS,
134}; 142};
@@ -139,15 +147,24 @@ static struct cpufreq_driver cbe_cpufreq_driver = {
139 147
140static int __init cbe_cpufreq_init(void) 148static int __init cbe_cpufreq_init(void)
141{ 149{
150 int ret;
151
142 if (!machine_is(cell)) 152 if (!machine_is(cell))
143 return -ENODEV; 153 return -ENODEV;
144 154
145 return cpufreq_register_driver(&cbe_cpufreq_driver); 155 cbe_cpufreq_pmi_init();
156
157 ret = cpufreq_register_driver(&cbe_cpufreq_driver);
158 if (ret)
159 cbe_cpufreq_pmi_exit();
160
161 return ret;
146} 162}
147 163
148static void __exit cbe_cpufreq_exit(void) 164static void __exit cbe_cpufreq_exit(void)
149{ 165{
150 cpufreq_unregister_driver(&cbe_cpufreq_driver); 166 cpufreq_unregister_driver(&cbe_cpufreq_driver);
167 cbe_cpufreq_pmi_exit();
151} 168}
152 169
153module_init(cbe_cpufreq_init); 170module_init(cbe_cpufreq_init);
diff --git a/drivers/cpufreq/ppc_cbe_cpufreq.h b/drivers/cpufreq/ppc_cbe_cpufreq.h
index 9d973519d669..00cd8633b0d9 100644
--- a/drivers/cpufreq/ppc_cbe_cpufreq.h
+++ b/drivers/cpufreq/ppc_cbe_cpufreq.h
@@ -20,6 +20,14 @@ int cbe_cpufreq_set_pmode_pmi(int cpu, unsigned int pmode);
20 20
21#if IS_ENABLED(CONFIG_CPU_FREQ_CBE_PMI) 21#if IS_ENABLED(CONFIG_CPU_FREQ_CBE_PMI)
22extern bool cbe_cpufreq_has_pmi; 22extern bool cbe_cpufreq_has_pmi;
23void cbe_cpufreq_pmi_policy_init(struct cpufreq_policy *policy);
24void cbe_cpufreq_pmi_policy_exit(struct cpufreq_policy *policy);
25void cbe_cpufreq_pmi_init(void);
26void cbe_cpufreq_pmi_exit(void);
23#else 27#else
24#define cbe_cpufreq_has_pmi (0) 28#define cbe_cpufreq_has_pmi (0)
29static inline void cbe_cpufreq_pmi_policy_init(struct cpufreq_policy *policy) {}
30static inline void cbe_cpufreq_pmi_policy_exit(struct cpufreq_policy *policy) {}
31static inline void cbe_cpufreq_pmi_init(void) {}
32static inline void cbe_cpufreq_pmi_exit(void) {}
25#endif 33#endif
diff --git a/drivers/cpufreq/ppc_cbe_cpufreq_pmi.c b/drivers/cpufreq/ppc_cbe_cpufreq_pmi.c
index 97c8ee4614b7..bc9dd30395c4 100644
--- a/drivers/cpufreq/ppc_cbe_cpufreq_pmi.c
+++ b/drivers/cpufreq/ppc_cbe_cpufreq_pmi.c
@@ -12,6 +12,7 @@
12#include <linux/timer.h> 12#include <linux/timer.h>
13#include <linux/init.h> 13#include <linux/init.h>
14#include <linux/of_platform.h> 14#include <linux/of_platform.h>
15#include <linux/pm_qos.h>
15 16
16#include <asm/processor.h> 17#include <asm/processor.h>
17#include <asm/prom.h> 18#include <asm/prom.h>
@@ -24,8 +25,6 @@
24 25
25#include "ppc_cbe_cpufreq.h" 26#include "ppc_cbe_cpufreq.h"
26 27
27static u8 pmi_slow_mode_limit[MAX_CBE];
28
29bool cbe_cpufreq_has_pmi = false; 28bool cbe_cpufreq_has_pmi = false;
30EXPORT_SYMBOL_GPL(cbe_cpufreq_has_pmi); 29EXPORT_SYMBOL_GPL(cbe_cpufreq_has_pmi);
31 30
@@ -65,64 +64,89 @@ EXPORT_SYMBOL_GPL(cbe_cpufreq_set_pmode_pmi);
65 64
66static void cbe_cpufreq_handle_pmi(pmi_message_t pmi_msg) 65static void cbe_cpufreq_handle_pmi(pmi_message_t pmi_msg)
67{ 66{
67 struct cpufreq_policy *policy;
68 struct dev_pm_qos_request *req;
68 u8 node, slow_mode; 69 u8 node, slow_mode;
70 int cpu, ret;
69 71
70 BUG_ON(pmi_msg.type != PMI_TYPE_FREQ_CHANGE); 72 BUG_ON(pmi_msg.type != PMI_TYPE_FREQ_CHANGE);
71 73
72 node = pmi_msg.data1; 74 node = pmi_msg.data1;
73 slow_mode = pmi_msg.data2; 75 slow_mode = pmi_msg.data2;
74 76
75 pmi_slow_mode_limit[node] = slow_mode; 77 cpu = cbe_node_to_cpu(node);
76 78
77 pr_debug("cbe_handle_pmi: node: %d max_freq: %d\n", node, slow_mode); 79 pr_debug("cbe_handle_pmi: node: %d max_freq: %d\n", node, slow_mode);
78}
79
80static int pmi_notifier(struct notifier_block *nb,
81 unsigned long event, void *data)
82{
83 struct cpufreq_policy *policy = data;
84 struct cpufreq_frequency_table *cbe_freqs = policy->freq_table;
85 u8 node;
86
87 /* Should this really be called for CPUFREQ_ADJUST and CPUFREQ_NOTIFY
88 * policy events?)
89 */
90 node = cbe_cpu_to_node(policy->cpu);
91
92 pr_debug("got notified, event=%lu, node=%u\n", event, node);
93 80
94 if (pmi_slow_mode_limit[node] != 0) { 81 policy = cpufreq_cpu_get(cpu);
95 pr_debug("limiting node %d to slow mode %d\n", 82 if (!policy) {
96 node, pmi_slow_mode_limit[node]); 83 pr_warn("cpufreq policy not found cpu%d\n", cpu);
84 return;
85 }
97 86
98 cpufreq_verify_within_limits(policy, 0, 87 req = policy->driver_data;
99 88
100 cbe_freqs[pmi_slow_mode_limit[node]].frequency); 89 ret = dev_pm_qos_update_request(req,
101 } 90 policy->freq_table[slow_mode].frequency);
91 if (ret < 0)
92 pr_warn("Failed to update freq constraint: %d\n", ret);
93 else
94 pr_debug("limiting node %d to slow mode %d\n", node, slow_mode);
102 95
103 return 0; 96 cpufreq_cpu_put(policy);
104} 97}
105 98
106static struct notifier_block pmi_notifier_block = {
107 .notifier_call = pmi_notifier,
108};
109
110static struct pmi_handler cbe_pmi_handler = { 99static struct pmi_handler cbe_pmi_handler = {
111 .type = PMI_TYPE_FREQ_CHANGE, 100 .type = PMI_TYPE_FREQ_CHANGE,
112 .handle_pmi_message = cbe_cpufreq_handle_pmi, 101 .handle_pmi_message = cbe_cpufreq_handle_pmi,
113}; 102};
114 103
104void cbe_cpufreq_pmi_policy_init(struct cpufreq_policy *policy)
105{
106 struct dev_pm_qos_request *req;
107 int ret;
108
109 if (!cbe_cpufreq_has_pmi)
110 return;
111
112 req = kzalloc(sizeof(*req), GFP_KERNEL);
113 if (!req)
114 return;
115
116 ret = dev_pm_qos_add_request(get_cpu_device(policy->cpu), req,
117 DEV_PM_QOS_MAX_FREQUENCY,
118 policy->freq_table[0].frequency);
119 if (ret < 0) {
120 pr_err("Failed to add freq constraint (%d)\n", ret);
121 kfree(req);
122 return;
123 }
115 124
125 policy->driver_data = req;
126}
127EXPORT_SYMBOL_GPL(cbe_cpufreq_pmi_policy_init);
116 128
117static int __init cbe_cpufreq_pmi_init(void) 129void cbe_cpufreq_pmi_policy_exit(struct cpufreq_policy *policy)
118{ 130{
119 cbe_cpufreq_has_pmi = pmi_register_handler(&cbe_pmi_handler) == 0; 131 struct dev_pm_qos_request *req = policy->driver_data;
120 132
121 if (!cbe_cpufreq_has_pmi) 133 if (cbe_cpufreq_has_pmi) {
122 return -ENODEV; 134 dev_pm_qos_remove_request(req);
135 kfree(req);
136 }
137}
138EXPORT_SYMBOL_GPL(cbe_cpufreq_pmi_policy_exit);
123 139
124 cpufreq_register_notifier(&pmi_notifier_block, CPUFREQ_POLICY_NOTIFIER); 140void cbe_cpufreq_pmi_init(void)
141{
142 if (!pmi_register_handler(&cbe_pmi_handler))
143 cbe_cpufreq_has_pmi = true;
144}
145EXPORT_SYMBOL_GPL(cbe_cpufreq_pmi_init);
125 146
126 return 0; 147void cbe_cpufreq_pmi_exit(void)
148{
149 pmi_unregister_handler(&cbe_pmi_handler);
150 cbe_cpufreq_has_pmi = false;
127} 151}
128device_initcall(cbe_cpufreq_pmi_init); 152EXPORT_SYMBOL_GPL(cbe_cpufreq_pmi_exit);
diff --git a/drivers/cpufreq/qcom-cpufreq-hw.c b/drivers/cpufreq/qcom-cpufreq-hw.c
index 4b0b50403901..a9ae2f84a4ef 100644
--- a/drivers/cpufreq/qcom-cpufreq-hw.c
+++ b/drivers/cpufreq/qcom-cpufreq-hw.c
@@ -20,6 +20,7 @@
20#define LUT_VOLT GENMASK(11, 0) 20#define LUT_VOLT GENMASK(11, 0)
21#define LUT_ROW_SIZE 32 21#define LUT_ROW_SIZE 32
22#define CLK_HW_DIV 2 22#define CLK_HW_DIV 2
23#define LUT_TURBO_IND 1
23 24
24/* Register offsets */ 25/* Register offsets */
25#define REG_ENABLE 0x0 26#define REG_ENABLE 0x0
@@ -34,9 +35,12 @@ static int qcom_cpufreq_hw_target_index(struct cpufreq_policy *policy,
34 unsigned int index) 35 unsigned int index)
35{ 36{
36 void __iomem *perf_state_reg = policy->driver_data; 37 void __iomem *perf_state_reg = policy->driver_data;
38 unsigned long freq = policy->freq_table[index].frequency;
37 39
38 writel_relaxed(index, perf_state_reg); 40 writel_relaxed(index, perf_state_reg);
39 41
42 arch_set_freq_scale(policy->related_cpus, freq,
43 policy->cpuinfo.max_freq);
40 return 0; 44 return 0;
41} 45}
42 46
@@ -63,6 +67,7 @@ static unsigned int qcom_cpufreq_hw_fast_switch(struct cpufreq_policy *policy,
63{ 67{
64 void __iomem *perf_state_reg = policy->driver_data; 68 void __iomem *perf_state_reg = policy->driver_data;
65 int index; 69 int index;
70 unsigned long freq;
66 71
67 index = policy->cached_resolved_idx; 72 index = policy->cached_resolved_idx;
68 if (index < 0) 73 if (index < 0)
@@ -70,16 +75,19 @@ static unsigned int qcom_cpufreq_hw_fast_switch(struct cpufreq_policy *policy,
70 75
71 writel_relaxed(index, perf_state_reg); 76 writel_relaxed(index, perf_state_reg);
72 77
73 return policy->freq_table[index].frequency; 78 freq = policy->freq_table[index].frequency;
79 arch_set_freq_scale(policy->related_cpus, freq,
80 policy->cpuinfo.max_freq);
81
82 return freq;
74} 83}
75 84
76static int qcom_cpufreq_hw_read_lut(struct device *cpu_dev, 85static int qcom_cpufreq_hw_read_lut(struct device *cpu_dev,
77 struct cpufreq_policy *policy, 86 struct cpufreq_policy *policy,
78 void __iomem *base) 87 void __iomem *base)
79{ 88{
80 u32 data, src, lval, i, core_count, prev_cc = 0, prev_freq = 0, freq; 89 u32 data, src, lval, i, core_count, prev_freq = 0, freq;
81 u32 volt; 90 u32 volt;
82 unsigned int max_cores = cpumask_weight(policy->cpus);
83 struct cpufreq_frequency_table *table; 91 struct cpufreq_frequency_table *table;
84 92
85 table = kcalloc(LUT_MAX_ENTRIES + 1, sizeof(*table), GFP_KERNEL); 93 table = kcalloc(LUT_MAX_ENTRIES + 1, sizeof(*table), GFP_KERNEL);
@@ -102,12 +110,12 @@ static int qcom_cpufreq_hw_read_lut(struct device *cpu_dev,
102 else 110 else
103 freq = cpu_hw_rate / 1000; 111 freq = cpu_hw_rate / 1000;
104 112
105 if (freq != prev_freq && core_count == max_cores) { 113 if (freq != prev_freq && core_count != LUT_TURBO_IND) {
106 table[i].frequency = freq; 114 table[i].frequency = freq;
107 dev_pm_opp_add(cpu_dev, freq * 1000, volt); 115 dev_pm_opp_add(cpu_dev, freq * 1000, volt);
108 dev_dbg(cpu_dev, "index=%d freq=%d, core_count %d\n", i, 116 dev_dbg(cpu_dev, "index=%d freq=%d, core_count %d\n", i,
109 freq, core_count); 117 freq, core_count);
110 } else { 118 } else if (core_count == LUT_TURBO_IND) {
111 table[i].frequency = CPUFREQ_ENTRY_INVALID; 119 table[i].frequency = CPUFREQ_ENTRY_INVALID;
112 } 120 }
113 121
@@ -115,14 +123,14 @@ static int qcom_cpufreq_hw_read_lut(struct device *cpu_dev,
115 * Two of the same frequencies with the same core counts means 123 * Two of the same frequencies with the same core counts means
116 * end of table 124 * end of table
117 */ 125 */
118 if (i > 0 && prev_freq == freq && prev_cc == core_count) { 126 if (i > 0 && prev_freq == freq) {
119 struct cpufreq_frequency_table *prev = &table[i - 1]; 127 struct cpufreq_frequency_table *prev = &table[i - 1];
120 128
121 /* 129 /*
122 * Only treat the last frequency that might be a boost 130 * Only treat the last frequency that might be a boost
123 * as the boost frequency 131 * as the boost frequency
124 */ 132 */
125 if (prev_cc != max_cores) { 133 if (prev->frequency == CPUFREQ_ENTRY_INVALID) {
126 prev->frequency = prev_freq; 134 prev->frequency = prev_freq;
127 prev->flags = CPUFREQ_BOOST_FREQ; 135 prev->flags = CPUFREQ_BOOST_FREQ;
128 dev_pm_opp_add(cpu_dev, prev_freq * 1000, volt); 136 dev_pm_opp_add(cpu_dev, prev_freq * 1000, volt);
@@ -131,7 +139,6 @@ static int qcom_cpufreq_hw_read_lut(struct device *cpu_dev,
131 break; 139 break;
132 } 140 }
133 141
134 prev_cc = core_count;
135 prev_freq = freq; 142 prev_freq = freq;
136 } 143 }
137 144
diff --git a/drivers/cpufreq/qcom-cpufreq-kryo.c b/drivers/cpufreq/qcom-cpufreq-kryo.c
deleted file mode 100644
index dd64dcf89c74..000000000000
--- a/drivers/cpufreq/qcom-cpufreq-kryo.c
+++ /dev/null
@@ -1,249 +0,0 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (c) 2018, The Linux Foundation. All rights reserved.
4 */
5
6/*
7 * In Certain QCOM SoCs like apq8096 and msm8996 that have KRYO processors,
8 * the CPU frequency subset and voltage value of each OPP varies
9 * based on the silicon variant in use. Qualcomm Process Voltage Scaling Tables
10 * defines the voltage and frequency value based on the msm-id in SMEM
11 * and speedbin blown in the efuse combination.
12 * The qcom-cpufreq-kryo driver reads the msm-id and efuse value from the SoC
13 * to provide the OPP framework with required information.
14 * This is used to determine the voltage and frequency value for each OPP of
15 * operating-points-v2 table when it is parsed by the OPP framework.
16 */
17
18#include <linux/cpu.h>
19#include <linux/err.h>
20#include <linux/init.h>
21#include <linux/kernel.h>
22#include <linux/module.h>
23#include <linux/nvmem-consumer.h>
24#include <linux/of.h>
25#include <linux/platform_device.h>
26#include <linux/pm_opp.h>
27#include <linux/slab.h>
28#include <linux/soc/qcom/smem.h>
29
30#define MSM_ID_SMEM 137
31
32enum _msm_id {
33 MSM8996V3 = 0xF6ul,
34 APQ8096V3 = 0x123ul,
35 MSM8996SG = 0x131ul,
36 APQ8096SG = 0x138ul,
37};
38
39enum _msm8996_version {
40 MSM8996_V3,
41 MSM8996_SG,
42 NUM_OF_MSM8996_VERSIONS,
43};
44
45static struct platform_device *cpufreq_dt_pdev, *kryo_cpufreq_pdev;
46
47static enum _msm8996_version qcom_cpufreq_kryo_get_msm_id(void)
48{
49 size_t len;
50 u32 *msm_id;
51 enum _msm8996_version version;
52
53 msm_id = qcom_smem_get(QCOM_SMEM_HOST_ANY, MSM_ID_SMEM, &len);
54 if (IS_ERR(msm_id))
55 return NUM_OF_MSM8996_VERSIONS;
56
57 /* The first 4 bytes are format, next to them is the actual msm-id */
58 msm_id++;
59
60 switch ((enum _msm_id)*msm_id) {
61 case MSM8996V3:
62 case APQ8096V3:
63 version = MSM8996_V3;
64 break;
65 case MSM8996SG:
66 case APQ8096SG:
67 version = MSM8996_SG;
68 break;
69 default:
70 version = NUM_OF_MSM8996_VERSIONS;
71 }
72
73 return version;
74}
75
76static int qcom_cpufreq_kryo_probe(struct platform_device *pdev)
77{
78 struct opp_table **opp_tables;
79 enum _msm8996_version msm8996_version;
80 struct nvmem_cell *speedbin_nvmem;
81 struct device_node *np;
82 struct device *cpu_dev;
83 unsigned cpu;
84 u8 *speedbin;
85 u32 versions;
86 size_t len;
87 int ret;
88
89 cpu_dev = get_cpu_device(0);
90 if (!cpu_dev)
91 return -ENODEV;
92
93 msm8996_version = qcom_cpufreq_kryo_get_msm_id();
94 if (NUM_OF_MSM8996_VERSIONS == msm8996_version) {
95 dev_err(cpu_dev, "Not Snapdragon 820/821!");
96 return -ENODEV;
97 }
98
99 np = dev_pm_opp_of_get_opp_desc_node(cpu_dev);
100 if (!np)
101 return -ENOENT;
102
103 ret = of_device_is_compatible(np, "operating-points-v2-kryo-cpu");
104 if (!ret) {
105 of_node_put(np);
106 return -ENOENT;
107 }
108
109 speedbin_nvmem = of_nvmem_cell_get(np, NULL);
110 of_node_put(np);
111 if (IS_ERR(speedbin_nvmem)) {
112 if (PTR_ERR(speedbin_nvmem) != -EPROBE_DEFER)
113 dev_err(cpu_dev, "Could not get nvmem cell: %ld\n",
114 PTR_ERR(speedbin_nvmem));
115 return PTR_ERR(speedbin_nvmem);
116 }
117
118 speedbin = nvmem_cell_read(speedbin_nvmem, &len);
119 nvmem_cell_put(speedbin_nvmem);
120 if (IS_ERR(speedbin))
121 return PTR_ERR(speedbin);
122
123 switch (msm8996_version) {
124 case MSM8996_V3:
125 versions = 1 << (unsigned int)(*speedbin);
126 break;
127 case MSM8996_SG:
128 versions = 1 << ((unsigned int)(*speedbin) + 4);
129 break;
130 default:
131 BUG();
132 break;
133 }
134 kfree(speedbin);
135
136 opp_tables = kcalloc(num_possible_cpus(), sizeof(*opp_tables), GFP_KERNEL);
137 if (!opp_tables)
138 return -ENOMEM;
139
140 for_each_possible_cpu(cpu) {
141 cpu_dev = get_cpu_device(cpu);
142 if (NULL == cpu_dev) {
143 ret = -ENODEV;
144 goto free_opp;
145 }
146
147 opp_tables[cpu] = dev_pm_opp_set_supported_hw(cpu_dev,
148 &versions, 1);
149 if (IS_ERR(opp_tables[cpu])) {
150 ret = PTR_ERR(opp_tables[cpu]);
151 dev_err(cpu_dev, "Failed to set supported hardware\n");
152 goto free_opp;
153 }
154 }
155
156 cpufreq_dt_pdev = platform_device_register_simple("cpufreq-dt", -1,
157 NULL, 0);
158 if (!IS_ERR(cpufreq_dt_pdev)) {
159 platform_set_drvdata(pdev, opp_tables);
160 return 0;
161 }
162
163 ret = PTR_ERR(cpufreq_dt_pdev);
164 dev_err(cpu_dev, "Failed to register platform device\n");
165
166free_opp:
167 for_each_possible_cpu(cpu) {
168 if (IS_ERR_OR_NULL(opp_tables[cpu]))
169 break;
170 dev_pm_opp_put_supported_hw(opp_tables[cpu]);
171 }
172 kfree(opp_tables);
173
174 return ret;
175}
176
177static int qcom_cpufreq_kryo_remove(struct platform_device *pdev)
178{
179 struct opp_table **opp_tables = platform_get_drvdata(pdev);
180 unsigned int cpu;
181
182 platform_device_unregister(cpufreq_dt_pdev);
183
184 for_each_possible_cpu(cpu)
185 dev_pm_opp_put_supported_hw(opp_tables[cpu]);
186
187 kfree(opp_tables);
188
189 return 0;
190}
191
192static struct platform_driver qcom_cpufreq_kryo_driver = {
193 .probe = qcom_cpufreq_kryo_probe,
194 .remove = qcom_cpufreq_kryo_remove,
195 .driver = {
196 .name = "qcom-cpufreq-kryo",
197 },
198};
199
200static const struct of_device_id qcom_cpufreq_kryo_match_list[] __initconst = {
201 { .compatible = "qcom,apq8096", },
202 { .compatible = "qcom,msm8996", },
203 {}
204};
205
206/*
207 * Since the driver depends on smem and nvmem drivers, which may
208 * return EPROBE_DEFER, all the real activity is done in the probe,
209 * which may be defered as well. The init here is only registering
210 * the driver and the platform device.
211 */
212static int __init qcom_cpufreq_kryo_init(void)
213{
214 struct device_node *np = of_find_node_by_path("/");
215 const struct of_device_id *match;
216 int ret;
217
218 if (!np)
219 return -ENODEV;
220
221 match = of_match_node(qcom_cpufreq_kryo_match_list, np);
222 of_node_put(np);
223 if (!match)
224 return -ENODEV;
225
226 ret = platform_driver_register(&qcom_cpufreq_kryo_driver);
227 if (unlikely(ret < 0))
228 return ret;
229
230 kryo_cpufreq_pdev = platform_device_register_simple(
231 "qcom-cpufreq-kryo", -1, NULL, 0);
232 ret = PTR_ERR_OR_ZERO(kryo_cpufreq_pdev);
233 if (0 == ret)
234 return 0;
235
236 platform_driver_unregister(&qcom_cpufreq_kryo_driver);
237 return ret;
238}
239module_init(qcom_cpufreq_kryo_init);
240
241static void __exit qcom_cpufreq_kryo_exit(void)
242{
243 platform_device_unregister(kryo_cpufreq_pdev);
244 platform_driver_unregister(&qcom_cpufreq_kryo_driver);
245}
246module_exit(qcom_cpufreq_kryo_exit);
247
248MODULE_DESCRIPTION("Qualcomm Technologies, Inc. Kryo CPUfreq driver");
249MODULE_LICENSE("GPL v2");
diff --git a/drivers/cpufreq/qcom-cpufreq-nvmem.c b/drivers/cpufreq/qcom-cpufreq-nvmem.c
new file mode 100644
index 000000000000..f0d2d5035413
--- /dev/null
+++ b/drivers/cpufreq/qcom-cpufreq-nvmem.c
@@ -0,0 +1,352 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (c) 2018, The Linux Foundation. All rights reserved.
4 */
5
6/*
7 * In Certain QCOM SoCs like apq8096 and msm8996 that have KRYO processors,
8 * the CPU frequency subset and voltage value of each OPP varies
9 * based on the silicon variant in use. Qualcomm Process Voltage Scaling Tables
10 * defines the voltage and frequency value based on the msm-id in SMEM
11 * and speedbin blown in the efuse combination.
12 * The qcom-cpufreq-nvmem driver reads the msm-id and efuse value from the SoC
13 * to provide the OPP framework with required information.
14 * This is used to determine the voltage and frequency value for each OPP of
15 * operating-points-v2 table when it is parsed by the OPP framework.
16 */
17
18#include <linux/cpu.h>
19#include <linux/err.h>
20#include <linux/init.h>
21#include <linux/kernel.h>
22#include <linux/module.h>
23#include <linux/nvmem-consumer.h>
24#include <linux/of.h>
25#include <linux/of_device.h>
26#include <linux/platform_device.h>
27#include <linux/pm_domain.h>
28#include <linux/pm_opp.h>
29#include <linux/slab.h>
30#include <linux/soc/qcom/smem.h>
31
32#define MSM_ID_SMEM 137
33
34enum _msm_id {
35 MSM8996V3 = 0xF6ul,
36 APQ8096V3 = 0x123ul,
37 MSM8996SG = 0x131ul,
38 APQ8096SG = 0x138ul,
39};
40
41enum _msm8996_version {
42 MSM8996_V3,
43 MSM8996_SG,
44 NUM_OF_MSM8996_VERSIONS,
45};
46
47struct qcom_cpufreq_drv;
48
49struct qcom_cpufreq_match_data {
50 int (*get_version)(struct device *cpu_dev,
51 struct nvmem_cell *speedbin_nvmem,
52 struct qcom_cpufreq_drv *drv);
53 const char **genpd_names;
54};
55
56struct qcom_cpufreq_drv {
57 struct opp_table **opp_tables;
58 struct opp_table **genpd_opp_tables;
59 u32 versions;
60 const struct qcom_cpufreq_match_data *data;
61};
62
63static struct platform_device *cpufreq_dt_pdev, *cpufreq_pdev;
64
65static enum _msm8996_version qcom_cpufreq_get_msm_id(void)
66{
67 size_t len;
68 u32 *msm_id;
69 enum _msm8996_version version;
70
71 msm_id = qcom_smem_get(QCOM_SMEM_HOST_ANY, MSM_ID_SMEM, &len);
72 if (IS_ERR(msm_id))
73 return NUM_OF_MSM8996_VERSIONS;
74
75 /* The first 4 bytes are format, next to them is the actual msm-id */
76 msm_id++;
77
78 switch ((enum _msm_id)*msm_id) {
79 case MSM8996V3:
80 case APQ8096V3:
81 version = MSM8996_V3;
82 break;
83 case MSM8996SG:
84 case APQ8096SG:
85 version = MSM8996_SG;
86 break;
87 default:
88 version = NUM_OF_MSM8996_VERSIONS;
89 }
90
91 return version;
92}
93
94static int qcom_cpufreq_kryo_name_version(struct device *cpu_dev,
95 struct nvmem_cell *speedbin_nvmem,
96 struct qcom_cpufreq_drv *drv)
97{
98 size_t len;
99 u8 *speedbin;
100 enum _msm8996_version msm8996_version;
101
102 msm8996_version = qcom_cpufreq_get_msm_id();
103 if (NUM_OF_MSM8996_VERSIONS == msm8996_version) {
104 dev_err(cpu_dev, "Not Snapdragon 820/821!");
105 return -ENODEV;
106 }
107
108 speedbin = nvmem_cell_read(speedbin_nvmem, &len);
109 if (IS_ERR(speedbin))
110 return PTR_ERR(speedbin);
111
112 switch (msm8996_version) {
113 case MSM8996_V3:
114 drv->versions = 1 << (unsigned int)(*speedbin);
115 break;
116 case MSM8996_SG:
117 drv->versions = 1 << ((unsigned int)(*speedbin) + 4);
118 break;
119 default:
120 BUG();
121 break;
122 }
123
124 kfree(speedbin);
125 return 0;
126}
127
128static const struct qcom_cpufreq_match_data match_data_kryo = {
129 .get_version = qcom_cpufreq_kryo_name_version,
130};
131
132static const char *qcs404_genpd_names[] = { "cpr", NULL };
133
134static const struct qcom_cpufreq_match_data match_data_qcs404 = {
135 .genpd_names = qcs404_genpd_names,
136};
137
138static int qcom_cpufreq_probe(struct platform_device *pdev)
139{
140 struct qcom_cpufreq_drv *drv;
141 struct nvmem_cell *speedbin_nvmem;
142 struct device_node *np;
143 struct device *cpu_dev;
144 unsigned cpu;
145 const struct of_device_id *match;
146 int ret;
147
148 cpu_dev = get_cpu_device(0);
149 if (!cpu_dev)
150 return -ENODEV;
151
152 np = dev_pm_opp_of_get_opp_desc_node(cpu_dev);
153 if (!np)
154 return -ENOENT;
155
156 ret = of_device_is_compatible(np, "operating-points-v2-kryo-cpu");
157 if (!ret) {
158 of_node_put(np);
159 return -ENOENT;
160 }
161
162 drv = kzalloc(sizeof(*drv), GFP_KERNEL);
163 if (!drv)
164 return -ENOMEM;
165
166 match = pdev->dev.platform_data;
167 drv->data = match->data;
168 if (!drv->data) {
169 ret = -ENODEV;
170 goto free_drv;
171 }
172
173 if (drv->data->get_version) {
174 speedbin_nvmem = of_nvmem_cell_get(np, NULL);
175 if (IS_ERR(speedbin_nvmem)) {
176 if (PTR_ERR(speedbin_nvmem) != -EPROBE_DEFER)
177 dev_err(cpu_dev,
178 "Could not get nvmem cell: %ld\n",
179 PTR_ERR(speedbin_nvmem));
180 ret = PTR_ERR(speedbin_nvmem);
181 goto free_drv;
182 }
183
184 ret = drv->data->get_version(cpu_dev, speedbin_nvmem, drv);
185 if (ret) {
186 nvmem_cell_put(speedbin_nvmem);
187 goto free_drv;
188 }
189 nvmem_cell_put(speedbin_nvmem);
190 }
191 of_node_put(np);
192
193 drv->opp_tables = kcalloc(num_possible_cpus(), sizeof(*drv->opp_tables),
194 GFP_KERNEL);
195 if (!drv->opp_tables) {
196 ret = -ENOMEM;
197 goto free_drv;
198 }
199
200 drv->genpd_opp_tables = kcalloc(num_possible_cpus(),
201 sizeof(*drv->genpd_opp_tables),
202 GFP_KERNEL);
203 if (!drv->genpd_opp_tables) {
204 ret = -ENOMEM;
205 goto free_opp;
206 }
207
208 for_each_possible_cpu(cpu) {
209 cpu_dev = get_cpu_device(cpu);
210 if (NULL == cpu_dev) {
211 ret = -ENODEV;
212 goto free_genpd_opp;
213 }
214
215 if (drv->data->get_version) {
216 drv->opp_tables[cpu] =
217 dev_pm_opp_set_supported_hw(cpu_dev,
218 &drv->versions, 1);
219 if (IS_ERR(drv->opp_tables[cpu])) {
220 ret = PTR_ERR(drv->opp_tables[cpu]);
221 dev_err(cpu_dev,
222 "Failed to set supported hardware\n");
223 goto free_genpd_opp;
224 }
225 }
226
227 if (drv->data->genpd_names) {
228 drv->genpd_opp_tables[cpu] =
229 dev_pm_opp_attach_genpd(cpu_dev,
230 drv->data->genpd_names,
231 NULL);
232 if (IS_ERR(drv->genpd_opp_tables[cpu])) {
233 ret = PTR_ERR(drv->genpd_opp_tables[cpu]);
234 if (ret != -EPROBE_DEFER)
235 dev_err(cpu_dev,
236 "Could not attach to pm_domain: %d\n",
237 ret);
238 goto free_genpd_opp;
239 }
240 }
241 }
242
243 cpufreq_dt_pdev = platform_device_register_simple("cpufreq-dt", -1,
244 NULL, 0);
245 if (!IS_ERR(cpufreq_dt_pdev)) {
246 platform_set_drvdata(pdev, drv);
247 return 0;
248 }
249
250 ret = PTR_ERR(cpufreq_dt_pdev);
251 dev_err(cpu_dev, "Failed to register platform device\n");
252
253free_genpd_opp:
254 for_each_possible_cpu(cpu) {
255 if (IS_ERR_OR_NULL(drv->genpd_opp_tables[cpu]))
256 break;
257 dev_pm_opp_detach_genpd(drv->genpd_opp_tables[cpu]);
258 }
259 kfree(drv->genpd_opp_tables);
260free_opp:
261 for_each_possible_cpu(cpu) {
262 if (IS_ERR_OR_NULL(drv->opp_tables[cpu]))
263 break;
264 dev_pm_opp_put_supported_hw(drv->opp_tables[cpu]);
265 }
266 kfree(drv->opp_tables);
267free_drv:
268 kfree(drv);
269
270 return ret;
271}
272
273static int qcom_cpufreq_remove(struct platform_device *pdev)
274{
275 struct qcom_cpufreq_drv *drv = platform_get_drvdata(pdev);
276 unsigned int cpu;
277
278 platform_device_unregister(cpufreq_dt_pdev);
279
280 for_each_possible_cpu(cpu) {
281 if (drv->opp_tables[cpu])
282 dev_pm_opp_put_supported_hw(drv->opp_tables[cpu]);
283 if (drv->genpd_opp_tables[cpu])
284 dev_pm_opp_detach_genpd(drv->genpd_opp_tables[cpu]);
285 }
286
287 kfree(drv->opp_tables);
288 kfree(drv->genpd_opp_tables);
289 kfree(drv);
290
291 return 0;
292}
293
294static struct platform_driver qcom_cpufreq_driver = {
295 .probe = qcom_cpufreq_probe,
296 .remove = qcom_cpufreq_remove,
297 .driver = {
298 .name = "qcom-cpufreq-nvmem",
299 },
300};
301
302static const struct of_device_id qcom_cpufreq_match_list[] __initconst = {
303 { .compatible = "qcom,apq8096", .data = &match_data_kryo },
304 { .compatible = "qcom,msm8996", .data = &match_data_kryo },
305 { .compatible = "qcom,qcs404", .data = &match_data_qcs404 },
306 {},
307};
308
309/*
310 * Since the driver depends on smem and nvmem drivers, which may
311 * return EPROBE_DEFER, all the real activity is done in the probe,
312 * which may be defered as well. The init here is only registering
313 * the driver and the platform device.
314 */
315static int __init qcom_cpufreq_init(void)
316{
317 struct device_node *np = of_find_node_by_path("/");
318 const struct of_device_id *match;
319 int ret;
320
321 if (!np)
322 return -ENODEV;
323
324 match = of_match_node(qcom_cpufreq_match_list, np);
325 of_node_put(np);
326 if (!match)
327 return -ENODEV;
328
329 ret = platform_driver_register(&qcom_cpufreq_driver);
330 if (unlikely(ret < 0))
331 return ret;
332
333 cpufreq_pdev = platform_device_register_data(NULL, "qcom-cpufreq-nvmem",
334 -1, match, sizeof(*match));
335 ret = PTR_ERR_OR_ZERO(cpufreq_pdev);
336 if (0 == ret)
337 return 0;
338
339 platform_driver_unregister(&qcom_cpufreq_driver);
340 return ret;
341}
342module_init(qcom_cpufreq_init);
343
344static void __exit qcom_cpufreq_exit(void)
345{
346 platform_device_unregister(cpufreq_pdev);
347 platform_driver_unregister(&qcom_cpufreq_driver);
348}
349module_exit(qcom_cpufreq_exit);
350
351MODULE_DESCRIPTION("Qualcomm Technologies, Inc. CPUfreq driver");
352MODULE_LICENSE("GPL v2");
diff --git a/drivers/cpufreq/sun50i-cpufreq-nvmem.c b/drivers/cpufreq/sun50i-cpufreq-nvmem.c
new file mode 100644
index 000000000000..eca32e443716
--- /dev/null
+++ b/drivers/cpufreq/sun50i-cpufreq-nvmem.c
@@ -0,0 +1,226 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Allwinner CPUFreq nvmem based driver
4 *
5 * The sun50i-cpufreq-nvmem driver reads the efuse value from the SoC to
6 * provide the OPP framework with required information.
7 *
8 * Copyright (C) 2019 Yangtao Li <tiny.windzz@gmail.com>
9 */
10
11#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12
13#include <linux/module.h>
14#include <linux/nvmem-consumer.h>
15#include <linux/of_device.h>
16#include <linux/platform_device.h>
17#include <linux/pm_opp.h>
18#include <linux/slab.h>
19
20#define MAX_NAME_LEN 7
21
22#define NVMEM_MASK 0x7
23#define NVMEM_SHIFT 5
24
25static struct platform_device *cpufreq_dt_pdev, *sun50i_cpufreq_pdev;
26
27/**
28 * sun50i_cpufreq_get_efuse() - Parse and return efuse value present on SoC
29 * @versions: Set to the value parsed from efuse
30 *
31 * Returns 0 if success.
32 */
33static int sun50i_cpufreq_get_efuse(u32 *versions)
34{
35 struct nvmem_cell *speedbin_nvmem;
36 struct device_node *np;
37 struct device *cpu_dev;
38 u32 *speedbin, efuse_value;
39 size_t len;
40 int ret;
41
42 cpu_dev = get_cpu_device(0);
43 if (!cpu_dev)
44 return -ENODEV;
45
46 np = dev_pm_opp_of_get_opp_desc_node(cpu_dev);
47 if (!np)
48 return -ENOENT;
49
50 ret = of_device_is_compatible(np,
51 "allwinner,sun50i-h6-operating-points");
52 if (!ret) {
53 of_node_put(np);
54 return -ENOENT;
55 }
56
57 speedbin_nvmem = of_nvmem_cell_get(np, NULL);
58 of_node_put(np);
59 if (IS_ERR(speedbin_nvmem)) {
60 if (PTR_ERR(speedbin_nvmem) != -EPROBE_DEFER)
61 pr_err("Could not get nvmem cell: %ld\n",
62 PTR_ERR(speedbin_nvmem));
63 return PTR_ERR(speedbin_nvmem);
64 }
65
66 speedbin = nvmem_cell_read(speedbin_nvmem, &len);
67 nvmem_cell_put(speedbin_nvmem);
68 if (IS_ERR(speedbin))
69 return PTR_ERR(speedbin);
70
71 efuse_value = (*speedbin >> NVMEM_SHIFT) & NVMEM_MASK;
72 switch (efuse_value) {
73 case 0b0001:
74 *versions = 1;
75 break;
76 case 0b0011:
77 *versions = 2;
78 break;
79 default:
80 /*
81 * For other situations, we treat it as bin0.
82 * This vf table can be run for any good cpu.
83 */
84 *versions = 0;
85 break;
86 }
87
88 kfree(speedbin);
89 return 0;
90};
91
92static int sun50i_cpufreq_nvmem_probe(struct platform_device *pdev)
93{
94 struct opp_table **opp_tables;
95 char name[MAX_NAME_LEN];
96 unsigned int cpu;
97 u32 speed = 0;
98 int ret;
99
100 opp_tables = kcalloc(num_possible_cpus(), sizeof(*opp_tables),
101 GFP_KERNEL);
102 if (!opp_tables)
103 return -ENOMEM;
104
105 ret = sun50i_cpufreq_get_efuse(&speed);
106 if (ret)
107 return ret;
108
109 snprintf(name, MAX_NAME_LEN, "speed%d", speed);
110
111 for_each_possible_cpu(cpu) {
112 struct device *cpu_dev = get_cpu_device(cpu);
113
114 if (!cpu_dev) {
115 ret = -ENODEV;
116 goto free_opp;
117 }
118
119 opp_tables[cpu] = dev_pm_opp_set_prop_name(cpu_dev, name);
120 if (IS_ERR(opp_tables[cpu])) {
121 ret = PTR_ERR(opp_tables[cpu]);
122 pr_err("Failed to set prop name\n");
123 goto free_opp;
124 }
125 }
126
127 cpufreq_dt_pdev = platform_device_register_simple("cpufreq-dt", -1,
128 NULL, 0);
129 if (!IS_ERR(cpufreq_dt_pdev)) {
130 platform_set_drvdata(pdev, opp_tables);
131 return 0;
132 }
133
134 ret = PTR_ERR(cpufreq_dt_pdev);
135 pr_err("Failed to register platform device\n");
136
137free_opp:
138 for_each_possible_cpu(cpu) {
139 if (IS_ERR_OR_NULL(opp_tables[cpu]))
140 break;
141 dev_pm_opp_put_prop_name(opp_tables[cpu]);
142 }
143 kfree(opp_tables);
144
145 return ret;
146}
147
148static int sun50i_cpufreq_nvmem_remove(struct platform_device *pdev)
149{
150 struct opp_table **opp_tables = platform_get_drvdata(pdev);
151 unsigned int cpu;
152
153 platform_device_unregister(cpufreq_dt_pdev);
154
155 for_each_possible_cpu(cpu)
156 dev_pm_opp_put_prop_name(opp_tables[cpu]);
157
158 kfree(opp_tables);
159
160 return 0;
161}
162
163static struct platform_driver sun50i_cpufreq_driver = {
164 .probe = sun50i_cpufreq_nvmem_probe,
165 .remove = sun50i_cpufreq_nvmem_remove,
166 .driver = {
167 .name = "sun50i-cpufreq-nvmem",
168 },
169};
170
171static const struct of_device_id sun50i_cpufreq_match_list[] = {
172 { .compatible = "allwinner,sun50i-h6" },
173 {}
174};
175
176static const struct of_device_id *sun50i_cpufreq_match_node(void)
177{
178 const struct of_device_id *match;
179 struct device_node *np;
180
181 np = of_find_node_by_path("/");
182 match = of_match_node(sun50i_cpufreq_match_list, np);
183 of_node_put(np);
184
185 return match;
186}
187
188/*
189 * Since the driver depends on nvmem drivers, which may return EPROBE_DEFER,
190 * all the real activity is done in the probe, which may be defered as well.
191 * The init here is only registering the driver and the platform device.
192 */
193static int __init sun50i_cpufreq_init(void)
194{
195 const struct of_device_id *match;
196 int ret;
197
198 match = sun50i_cpufreq_match_node();
199 if (!match)
200 return -ENODEV;
201
202 ret = platform_driver_register(&sun50i_cpufreq_driver);
203 if (unlikely(ret < 0))
204 return ret;
205
206 sun50i_cpufreq_pdev =
207 platform_device_register_simple("sun50i-cpufreq-nvmem",
208 -1, NULL, 0);
209 ret = PTR_ERR_OR_ZERO(sun50i_cpufreq_pdev);
210 if (ret == 0)
211 return 0;
212
213 platform_driver_unregister(&sun50i_cpufreq_driver);
214 return ret;
215}
216module_init(sun50i_cpufreq_init);
217
218static void __exit sun50i_cpufreq_exit(void)
219{
220 platform_device_unregister(sun50i_cpufreq_pdev);
221 platform_driver_unregister(&sun50i_cpufreq_driver);
222}
223module_exit(sun50i_cpufreq_exit);
224
225MODULE_DESCRIPTION("Sun50i-h6 cpufreq driver");
226MODULE_LICENSE("GPL v2");
diff --git a/drivers/cpufreq/ti-cpufreq.c b/drivers/cpufreq/ti-cpufreq.c
index 2ad1ae17932d..aeaa883a8c9d 100644
--- a/drivers/cpufreq/ti-cpufreq.c
+++ b/drivers/cpufreq/ti-cpufreq.c
@@ -77,6 +77,7 @@ static unsigned long dra7_efuse_xlate(struct ti_cpufreq_data *opp_data,
77 case DRA7_EFUSE_HAS_ALL_MPU_OPP: 77 case DRA7_EFUSE_HAS_ALL_MPU_OPP:
78 case DRA7_EFUSE_HAS_HIGH_MPU_OPP: 78 case DRA7_EFUSE_HAS_HIGH_MPU_OPP:
79 calculated_efuse |= DRA7_EFUSE_HIGH_MPU_OPP; 79 calculated_efuse |= DRA7_EFUSE_HIGH_MPU_OPP;
80 /* Fall through */
80 case DRA7_EFUSE_HAS_OD_MPU_OPP: 81 case DRA7_EFUSE_HAS_OD_MPU_OPP:
81 calculated_efuse |= DRA7_EFUSE_OD_MPU_OPP; 82 calculated_efuse |= DRA7_EFUSE_OD_MPU_OPP;
82 } 83 }
diff --git a/drivers/macintosh/windfarm_cpufreq_clamp.c b/drivers/macintosh/windfarm_cpufreq_clamp.c
index 52fd5fca89a0..705c6200814b 100644
--- a/drivers/macintosh/windfarm_cpufreq_clamp.c
+++ b/drivers/macintosh/windfarm_cpufreq_clamp.c
@@ -3,9 +3,11 @@
3#include <linux/errno.h> 3#include <linux/errno.h>
4#include <linux/kernel.h> 4#include <linux/kernel.h>
5#include <linux/delay.h> 5#include <linux/delay.h>
6#include <linux/pm_qos.h>
6#include <linux/slab.h> 7#include <linux/slab.h>
7#include <linux/init.h> 8#include <linux/init.h>
8#include <linux/wait.h> 9#include <linux/wait.h>
10#include <linux/cpu.h>
9#include <linux/cpufreq.h> 11#include <linux/cpufreq.h>
10 12
11#include <asm/prom.h> 13#include <asm/prom.h>
@@ -16,36 +18,24 @@
16 18
17static int clamped; 19static int clamped;
18static struct wf_control *clamp_control; 20static struct wf_control *clamp_control;
19 21static struct dev_pm_qos_request qos_req;
20static int clamp_notifier_call(struct notifier_block *self, 22static unsigned int min_freq, max_freq;
21 unsigned long event, void *data)
22{
23 struct cpufreq_policy *p = data;
24 unsigned long max_freq;
25
26 if (event != CPUFREQ_ADJUST)
27 return 0;
28
29 max_freq = clamped ? (p->cpuinfo.min_freq) : (p->cpuinfo.max_freq);
30 cpufreq_verify_within_limits(p, 0, max_freq);
31
32 return 0;
33}
34
35static struct notifier_block clamp_notifier = {
36 .notifier_call = clamp_notifier_call,
37};
38 23
39static int clamp_set(struct wf_control *ct, s32 value) 24static int clamp_set(struct wf_control *ct, s32 value)
40{ 25{
41 if (value) 26 unsigned int freq;
27
28 if (value) {
29 freq = min_freq;
42 printk(KERN_INFO "windfarm: Clamping CPU frequency to " 30 printk(KERN_INFO "windfarm: Clamping CPU frequency to "
43 "minimum !\n"); 31 "minimum !\n");
44 else 32 } else {
33 freq = max_freq;
45 printk(KERN_INFO "windfarm: CPU frequency unclamped !\n"); 34 printk(KERN_INFO "windfarm: CPU frequency unclamped !\n");
35 }
46 clamped = value; 36 clamped = value;
47 cpufreq_update_policy(0); 37
48 return 0; 38 return dev_pm_qos_update_request(&qos_req, freq);
49} 39}
50 40
51static int clamp_get(struct wf_control *ct, s32 *value) 41static int clamp_get(struct wf_control *ct, s32 *value)
@@ -74,27 +64,60 @@ static const struct wf_control_ops clamp_ops = {
74 64
75static int __init wf_cpufreq_clamp_init(void) 65static int __init wf_cpufreq_clamp_init(void)
76{ 66{
67 struct cpufreq_policy *policy;
77 struct wf_control *clamp; 68 struct wf_control *clamp;
69 struct device *dev;
70 int ret;
71
72 policy = cpufreq_cpu_get(0);
73 if (!policy) {
74 pr_warn("%s: cpufreq policy not found cpu0\n", __func__);
75 return -EPROBE_DEFER;
76 }
77
78 min_freq = policy->cpuinfo.min_freq;
79 max_freq = policy->cpuinfo.max_freq;
80 cpufreq_cpu_put(policy);
81
82 dev = get_cpu_device(0);
83 if (unlikely(!dev)) {
84 pr_warn("%s: No cpu device for cpu0\n", __func__);
85 return -ENODEV;
86 }
78 87
79 clamp = kmalloc(sizeof(struct wf_control), GFP_KERNEL); 88 clamp = kmalloc(sizeof(struct wf_control), GFP_KERNEL);
80 if (clamp == NULL) 89 if (clamp == NULL)
81 return -ENOMEM; 90 return -ENOMEM;
82 cpufreq_register_notifier(&clamp_notifier, CPUFREQ_POLICY_NOTIFIER); 91
92 ret = dev_pm_qos_add_request(dev, &qos_req, DEV_PM_QOS_MAX_FREQUENCY,
93 max_freq);
94 if (ret < 0) {
95 pr_err("%s: Failed to add freq constraint (%d)\n", __func__,
96 ret);
97 goto free;
98 }
99
83 clamp->ops = &clamp_ops; 100 clamp->ops = &clamp_ops;
84 clamp->name = "cpufreq-clamp"; 101 clamp->name = "cpufreq-clamp";
85 if (wf_register_control(clamp)) 102 ret = wf_register_control(clamp);
103 if (ret)
86 goto fail; 104 goto fail;
87 clamp_control = clamp; 105 clamp_control = clamp;
88 return 0; 106 return 0;
89 fail: 107 fail:
108 dev_pm_qos_remove_request(&qos_req);
109
110 free:
90 kfree(clamp); 111 kfree(clamp);
91 return -ENODEV; 112 return ret;
92} 113}
93 114
94static void __exit wf_cpufreq_clamp_exit(void) 115static void __exit wf_cpufreq_clamp_exit(void)
95{ 116{
96 if (clamp_control) 117 if (clamp_control) {
97 wf_unregister_control(clamp_control); 118 wf_unregister_control(clamp_control);
119 dev_pm_qos_remove_request(&qos_req);
120 }
98} 121}
99 122
100 123
diff --git a/drivers/opp/core.c b/drivers/opp/core.c
index c094d5d20fd7..0ee8c0133d3e 100644
--- a/drivers/opp/core.c
+++ b/drivers/opp/core.c
@@ -401,6 +401,54 @@ struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
401} 401}
402EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_exact); 402EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_exact);
403 403
404/**
405 * dev_pm_opp_find_level_exact() - search for an exact level
406 * @dev: device for which we do this operation
407 * @level: level to search for
408 *
409 * Return: Searches for exact match in the opp table and returns pointer to the
410 * matching opp if found, else returns ERR_PTR in case of error and should
411 * be handled using IS_ERR. Error return values can be:
412 * EINVAL: for bad pointer
413 * ERANGE: no match found for search
414 * ENODEV: if device not found in list of registered devices
415 *
416 * The callers are required to call dev_pm_opp_put() for the returned OPP after
417 * use.
418 */
419struct dev_pm_opp *dev_pm_opp_find_level_exact(struct device *dev,
420 unsigned int level)
421{
422 struct opp_table *opp_table;
423 struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
424
425 opp_table = _find_opp_table(dev);
426 if (IS_ERR(opp_table)) {
427 int r = PTR_ERR(opp_table);
428
429 dev_err(dev, "%s: OPP table not found (%d)\n", __func__, r);
430 return ERR_PTR(r);
431 }
432
433 mutex_lock(&opp_table->lock);
434
435 list_for_each_entry(temp_opp, &opp_table->opp_list, node) {
436 if (temp_opp->level == level) {
437 opp = temp_opp;
438
439 /* Increment the reference count of OPP */
440 dev_pm_opp_get(opp);
441 break;
442 }
443 }
444
445 mutex_unlock(&opp_table->lock);
446 dev_pm_opp_put_opp_table(opp_table);
447
448 return opp;
449}
450EXPORT_SYMBOL_GPL(dev_pm_opp_find_level_exact);
451
404static noinline struct dev_pm_opp *_find_freq_ceil(struct opp_table *opp_table, 452static noinline struct dev_pm_opp *_find_freq_ceil(struct opp_table *opp_table,
405 unsigned long *freq) 453 unsigned long *freq)
406{ 454{
@@ -1771,6 +1819,7 @@ static void _opp_detach_genpd(struct opp_table *opp_table)
1771 * dev_pm_opp_attach_genpd - Attach genpd(s) for the device and save virtual device pointer 1819 * dev_pm_opp_attach_genpd - Attach genpd(s) for the device and save virtual device pointer
1772 * @dev: Consumer device for which the genpd is getting attached. 1820 * @dev: Consumer device for which the genpd is getting attached.
1773 * @names: Null terminated array of pointers containing names of genpd to attach. 1821 * @names: Null terminated array of pointers containing names of genpd to attach.
1822 * @virt_devs: Pointer to return the array of virtual devices.
1774 * 1823 *
1775 * Multiple generic power domains for a device are supported with the help of 1824 * Multiple generic power domains for a device are supported with the help of
1776 * virtual genpd devices, which are created for each consumer device - genpd 1825 * virtual genpd devices, which are created for each consumer device - genpd
@@ -1784,12 +1833,16 @@ static void _opp_detach_genpd(struct opp_table *opp_table)
1784 * 1833 *
1785 * This helper needs to be called once with a list of all genpd to attach. 1834 * This helper needs to be called once with a list of all genpd to attach.
1786 * Otherwise the original device structure will be used instead by the OPP core. 1835 * Otherwise the original device structure will be used instead by the OPP core.
1836 *
1837 * The order of entries in the names array must match the order in which
1838 * "required-opps" are added in DT.
1787 */ 1839 */
1788struct opp_table *dev_pm_opp_attach_genpd(struct device *dev, const char **names) 1840struct opp_table *dev_pm_opp_attach_genpd(struct device *dev,
1841 const char **names, struct device ***virt_devs)
1789{ 1842{
1790 struct opp_table *opp_table; 1843 struct opp_table *opp_table;
1791 struct device *virt_dev; 1844 struct device *virt_dev;
1792 int index, ret = -EINVAL; 1845 int index = 0, ret = -EINVAL;
1793 const char **name = names; 1846 const char **name = names;
1794 1847
1795 opp_table = dev_pm_opp_get_opp_table(dev); 1848 opp_table = dev_pm_opp_get_opp_table(dev);
@@ -1815,14 +1868,6 @@ struct opp_table *dev_pm_opp_attach_genpd(struct device *dev, const char **names
1815 goto unlock; 1868 goto unlock;
1816 1869
1817 while (*name) { 1870 while (*name) {
1818 index = of_property_match_string(dev->of_node,
1819 "power-domain-names", *name);
1820 if (index < 0) {
1821 dev_err(dev, "Failed to find power domain: %s (%d)\n",
1822 *name, index);
1823 goto err;
1824 }
1825
1826 if (index >= opp_table->required_opp_count) { 1871 if (index >= opp_table->required_opp_count) {
1827 dev_err(dev, "Index can't be greater than required-opp-count - 1, %s (%d : %d)\n", 1872 dev_err(dev, "Index can't be greater than required-opp-count - 1, %s (%d : %d)\n",
1828 *name, opp_table->required_opp_count, index); 1873 *name, opp_table->required_opp_count, index);
@@ -1843,9 +1888,12 @@ struct opp_table *dev_pm_opp_attach_genpd(struct device *dev, const char **names
1843 } 1888 }
1844 1889
1845 opp_table->genpd_virt_devs[index] = virt_dev; 1890 opp_table->genpd_virt_devs[index] = virt_dev;
1891 index++;
1846 name++; 1892 name++;
1847 } 1893 }
1848 1894
1895 if (virt_devs)
1896 *virt_devs = opp_table->genpd_virt_devs;
1849 mutex_unlock(&opp_table->genpd_virt_dev_lock); 1897 mutex_unlock(&opp_table->genpd_virt_dev_lock);
1850 1898
1851 return opp_table; 1899 return opp_table;
diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c
index 4c5db59a619b..391f39776c6a 100644
--- a/drivers/thermal/cpu_cooling.c
+++ b/drivers/thermal/cpu_cooling.c
@@ -16,6 +16,7 @@
16#include <linux/err.h> 16#include <linux/err.h>
17#include <linux/idr.h> 17#include <linux/idr.h>
18#include <linux/pm_opp.h> 18#include <linux/pm_opp.h>
19#include <linux/pm_qos.h>
19#include <linux/slab.h> 20#include <linux/slab.h>
20#include <linux/cpu.h> 21#include <linux/cpu.h>
21#include <linux/cpu_cooling.h> 22#include <linux/cpu_cooling.h>
@@ -66,8 +67,6 @@ struct time_in_idle {
66 * @last_load: load measured by the latest call to cpufreq_get_requested_power() 67 * @last_load: load measured by the latest call to cpufreq_get_requested_power()
67 * @cpufreq_state: integer value representing the current state of cpufreq 68 * @cpufreq_state: integer value representing the current state of cpufreq
68 * cooling devices. 69 * cooling devices.
69 * @clipped_freq: integer value representing the absolute value of the clipped
70 * frequency.
71 * @max_level: maximum cooling level. One less than total number of valid 70 * @max_level: maximum cooling level. One less than total number of valid
72 * cpufreq frequencies. 71 * cpufreq frequencies.
73 * @freq_table: Freq table in descending order of frequencies 72 * @freq_table: Freq table in descending order of frequencies
@@ -84,12 +83,12 @@ struct cpufreq_cooling_device {
84 int id; 83 int id;
85 u32 last_load; 84 u32 last_load;
86 unsigned int cpufreq_state; 85 unsigned int cpufreq_state;
87 unsigned int clipped_freq;
88 unsigned int max_level; 86 unsigned int max_level;
89 struct freq_table *freq_table; /* In descending order */ 87 struct freq_table *freq_table; /* In descending order */
90 struct cpufreq_policy *policy; 88 struct cpufreq_policy *policy;
91 struct list_head node; 89 struct list_head node;
92 struct time_in_idle *idle_time; 90 struct time_in_idle *idle_time;
91 struct dev_pm_qos_request qos_req;
93}; 92};
94 93
95static DEFINE_IDA(cpufreq_ida); 94static DEFINE_IDA(cpufreq_ida);
@@ -119,59 +118,6 @@ static unsigned long get_level(struct cpufreq_cooling_device *cpufreq_cdev,
119} 118}
120 119
121/** 120/**
122 * cpufreq_thermal_notifier - notifier callback for cpufreq policy change.
123 * @nb: struct notifier_block * with callback info.
124 * @event: value showing cpufreq event for which this function invoked.
125 * @data: callback-specific data
126 *
127 * Callback to hijack the notification on cpufreq policy transition.
128 * Every time there is a change in policy, we will intercept and
129 * update the cpufreq policy with thermal constraints.
130 *
131 * Return: 0 (success)
132 */
133static int cpufreq_thermal_notifier(struct notifier_block *nb,
134 unsigned long event, void *data)
135{
136 struct cpufreq_policy *policy = data;
137 unsigned long clipped_freq;
138 struct cpufreq_cooling_device *cpufreq_cdev;
139
140 if (event != CPUFREQ_ADJUST)
141 return NOTIFY_DONE;
142
143 mutex_lock(&cooling_list_lock);
144 list_for_each_entry(cpufreq_cdev, &cpufreq_cdev_list, node) {
145 /*
146 * A new copy of the policy is sent to the notifier and can't
147 * compare that directly.
148 */
149 if (policy->cpu != cpufreq_cdev->policy->cpu)
150 continue;
151
152 /*
153 * policy->max is the maximum allowed frequency defined by user
154 * and clipped_freq is the maximum that thermal constraints
155 * allow.
156 *
157 * If clipped_freq is lower than policy->max, then we need to
158 * readjust policy->max.
159 *
160 * But, if clipped_freq is greater than policy->max, we don't
161 * need to do anything.
162 */
163 clipped_freq = cpufreq_cdev->clipped_freq;
164
165 if (policy->max > clipped_freq)
166 cpufreq_verify_within_limits(policy, 0, clipped_freq);
167 break;
168 }
169 mutex_unlock(&cooling_list_lock);
170
171 return NOTIFY_OK;
172}
173
174/**
175 * update_freq_table() - Update the freq table with power numbers 121 * update_freq_table() - Update the freq table with power numbers
176 * @cpufreq_cdev: the cpufreq cooling device in which to update the table 122 * @cpufreq_cdev: the cpufreq cooling device in which to update the table
177 * @capacitance: dynamic power coefficient for these cpus 123 * @capacitance: dynamic power coefficient for these cpus
@@ -374,7 +320,6 @@ static int cpufreq_set_cur_state(struct thermal_cooling_device *cdev,
374 unsigned long state) 320 unsigned long state)
375{ 321{
376 struct cpufreq_cooling_device *cpufreq_cdev = cdev->devdata; 322 struct cpufreq_cooling_device *cpufreq_cdev = cdev->devdata;
377 unsigned int clip_freq;
378 323
379 /* Request state should be less than max_level */ 324 /* Request state should be less than max_level */
380 if (WARN_ON(state > cpufreq_cdev->max_level)) 325 if (WARN_ON(state > cpufreq_cdev->max_level))
@@ -384,13 +329,10 @@ static int cpufreq_set_cur_state(struct thermal_cooling_device *cdev,
384 if (cpufreq_cdev->cpufreq_state == state) 329 if (cpufreq_cdev->cpufreq_state == state)
385 return 0; 330 return 0;
386 331
387 clip_freq = cpufreq_cdev->freq_table[state].frequency;
388 cpufreq_cdev->cpufreq_state = state; 332 cpufreq_cdev->cpufreq_state = state;
389 cpufreq_cdev->clipped_freq = clip_freq;
390
391 cpufreq_update_policy(cpufreq_cdev->policy->cpu);
392 333
393 return 0; 334 return dev_pm_qos_update_request(&cpufreq_cdev->qos_req,
335 cpufreq_cdev->freq_table[state].frequency);
394} 336}
395 337
396/** 338/**
@@ -554,11 +496,6 @@ static struct thermal_cooling_device_ops cpufreq_power_cooling_ops = {
554 .power2state = cpufreq_power2state, 496 .power2state = cpufreq_power2state,
555}; 497};
556 498
557/* Notifier for cpufreq policy change */
558static struct notifier_block thermal_cpufreq_notifier_block = {
559 .notifier_call = cpufreq_thermal_notifier,
560};
561
562static unsigned int find_next_max(struct cpufreq_frequency_table *table, 499static unsigned int find_next_max(struct cpufreq_frequency_table *table,
563 unsigned int prev_max) 500 unsigned int prev_max)
564{ 501{
@@ -596,9 +533,16 @@ __cpufreq_cooling_register(struct device_node *np,
596 struct cpufreq_cooling_device *cpufreq_cdev; 533 struct cpufreq_cooling_device *cpufreq_cdev;
597 char dev_name[THERMAL_NAME_LENGTH]; 534 char dev_name[THERMAL_NAME_LENGTH];
598 unsigned int freq, i, num_cpus; 535 unsigned int freq, i, num_cpus;
536 struct device *dev;
599 int ret; 537 int ret;
600 struct thermal_cooling_device_ops *cooling_ops; 538 struct thermal_cooling_device_ops *cooling_ops;
601 bool first; 539
540 dev = get_cpu_device(policy->cpu);
541 if (unlikely(!dev)) {
542 pr_warn("No cpu device for cpu %d\n", policy->cpu);
543 return ERR_PTR(-ENODEV);
544 }
545
602 546
603 if (IS_ERR_OR_NULL(policy)) { 547 if (IS_ERR_OR_NULL(policy)) {
604 pr_err("%s: cpufreq policy isn't valid: %p\n", __func__, policy); 548 pr_err("%s: cpufreq policy isn't valid: %p\n", __func__, policy);
@@ -671,25 +615,29 @@ __cpufreq_cooling_register(struct device_node *np,
671 cooling_ops = &cpufreq_cooling_ops; 615 cooling_ops = &cpufreq_cooling_ops;
672 } 616 }
673 617
618 ret = dev_pm_qos_add_request(dev, &cpufreq_cdev->qos_req,
619 DEV_PM_QOS_MAX_FREQUENCY,
620 cpufreq_cdev->freq_table[0].frequency);
621 if (ret < 0) {
622 pr_err("%s: Failed to add freq constraint (%d)\n", __func__,
623 ret);
624 cdev = ERR_PTR(ret);
625 goto remove_ida;
626 }
627
674 cdev = thermal_of_cooling_device_register(np, dev_name, cpufreq_cdev, 628 cdev = thermal_of_cooling_device_register(np, dev_name, cpufreq_cdev,
675 cooling_ops); 629 cooling_ops);
676 if (IS_ERR(cdev)) 630 if (IS_ERR(cdev))
677 goto remove_ida; 631 goto remove_qos_req;
678
679 cpufreq_cdev->clipped_freq = cpufreq_cdev->freq_table[0].frequency;
680 632
681 mutex_lock(&cooling_list_lock); 633 mutex_lock(&cooling_list_lock);
682 /* Register the notifier for first cpufreq cooling device */
683 first = list_empty(&cpufreq_cdev_list);
684 list_add(&cpufreq_cdev->node, &cpufreq_cdev_list); 634 list_add(&cpufreq_cdev->node, &cpufreq_cdev_list);
685 mutex_unlock(&cooling_list_lock); 635 mutex_unlock(&cooling_list_lock);
686 636
687 if (first)
688 cpufreq_register_notifier(&thermal_cpufreq_notifier_block,
689 CPUFREQ_POLICY_NOTIFIER);
690
691 return cdev; 637 return cdev;
692 638
639remove_qos_req:
640 dev_pm_qos_remove_request(&cpufreq_cdev->qos_req);
693remove_ida: 641remove_ida:
694 ida_simple_remove(&cpufreq_ida, cpufreq_cdev->id); 642 ida_simple_remove(&cpufreq_ida, cpufreq_cdev->id);
695free_table: 643free_table:
@@ -777,7 +725,6 @@ EXPORT_SYMBOL_GPL(of_cpufreq_cooling_register);
777void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev) 725void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev)
778{ 726{
779 struct cpufreq_cooling_device *cpufreq_cdev; 727 struct cpufreq_cooling_device *cpufreq_cdev;
780 bool last;
781 728
782 if (!cdev) 729 if (!cdev)
783 return; 730 return;
@@ -786,15 +733,10 @@ void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev)
786 733
787 mutex_lock(&cooling_list_lock); 734 mutex_lock(&cooling_list_lock);
788 list_del(&cpufreq_cdev->node); 735 list_del(&cpufreq_cdev->node);
789 /* Unregister the notifier for the last cpufreq cooling device */
790 last = list_empty(&cpufreq_cdev_list);
791 mutex_unlock(&cooling_list_lock); 736 mutex_unlock(&cooling_list_lock);
792 737
793 if (last)
794 cpufreq_unregister_notifier(&thermal_cpufreq_notifier_block,
795 CPUFREQ_POLICY_NOTIFIER);
796
797 thermal_cooling_device_unregister(cdev); 738 thermal_cooling_device_unregister(cdev);
739 dev_pm_qos_remove_request(&cpufreq_cdev->qos_req);
798 ida_simple_remove(&cpufreq_ida, cpufreq_cdev->id); 740 ida_simple_remove(&cpufreq_ida, cpufreq_cdev->id);
799 kfree(cpufreq_cdev->idle_time); 741 kfree(cpufreq_cdev->idle_time);
800 kfree(cpufreq_cdev->freq_table); 742 kfree(cpufreq_cdev->freq_table);
diff --git a/drivers/video/fbdev/pxafb.c b/drivers/video/fbdev/pxafb.c
index 4282cb117b92..f70c9f79622e 100644
--- a/drivers/video/fbdev/pxafb.c
+++ b/drivers/video/fbdev/pxafb.c
@@ -1678,24 +1678,6 @@ pxafb_freq_transition(struct notifier_block *nb, unsigned long val, void *data)
1678 } 1678 }
1679 return 0; 1679 return 0;
1680} 1680}
1681
1682static int
1683pxafb_freq_policy(struct notifier_block *nb, unsigned long val, void *data)
1684{
1685 struct pxafb_info *fbi = TO_INF(nb, freq_policy);
1686 struct fb_var_screeninfo *var = &fbi->fb.var;
1687 struct cpufreq_policy *policy = data;
1688
1689 switch (val) {
1690 case CPUFREQ_ADJUST:
1691 pr_debug("min dma period: %d ps, "
1692 "new clock %d kHz\n", pxafb_display_dma_period(var),
1693 policy->max);
1694 /* TODO: fill in min/max values */
1695 break;
1696 }
1697 return 0;
1698}
1699#endif 1681#endif
1700 1682
1701#ifdef CONFIG_PM 1683#ifdef CONFIG_PM
@@ -2400,11 +2382,8 @@ static int pxafb_probe(struct platform_device *dev)
2400 2382
2401#ifdef CONFIG_CPU_FREQ 2383#ifdef CONFIG_CPU_FREQ
2402 fbi->freq_transition.notifier_call = pxafb_freq_transition; 2384 fbi->freq_transition.notifier_call = pxafb_freq_transition;
2403 fbi->freq_policy.notifier_call = pxafb_freq_policy;
2404 cpufreq_register_notifier(&fbi->freq_transition, 2385 cpufreq_register_notifier(&fbi->freq_transition,
2405 CPUFREQ_TRANSITION_NOTIFIER); 2386 CPUFREQ_TRANSITION_NOTIFIER);
2406 cpufreq_register_notifier(&fbi->freq_policy,
2407 CPUFREQ_POLICY_NOTIFIER);
2408#endif 2387#endif
2409 2388
2410 /* 2389 /*
diff --git a/drivers/video/fbdev/pxafb.h b/drivers/video/fbdev/pxafb.h
index b641289c8a99..86b1e9ab1a38 100644
--- a/drivers/video/fbdev/pxafb.h
+++ b/drivers/video/fbdev/pxafb.h
@@ -162,7 +162,6 @@ struct pxafb_info {
162 162
163#ifdef CONFIG_CPU_FREQ 163#ifdef CONFIG_CPU_FREQ
164 struct notifier_block freq_transition; 164 struct notifier_block freq_transition;
165 struct notifier_block freq_policy;
166#endif 165#endif
167 166
168 struct regulator *lcd_supply; 167 struct regulator *lcd_supply;
diff --git a/drivers/video/fbdev/sa1100fb.c b/drivers/video/fbdev/sa1100fb.c
index f7f8dee044b1..ae2bcfee338a 100644
--- a/drivers/video/fbdev/sa1100fb.c
+++ b/drivers/video/fbdev/sa1100fb.c
@@ -1005,31 +1005,6 @@ sa1100fb_freq_transition(struct notifier_block *nb, unsigned long val,
1005 } 1005 }
1006 return 0; 1006 return 0;
1007} 1007}
1008
1009static int
1010sa1100fb_freq_policy(struct notifier_block *nb, unsigned long val,
1011 void *data)
1012{
1013 struct sa1100fb_info *fbi = TO_INF(nb, freq_policy);
1014 struct cpufreq_policy *policy = data;
1015
1016 switch (val) {
1017 case CPUFREQ_ADJUST:
1018 dev_dbg(fbi->dev, "min dma period: %d ps, "
1019 "new clock %d kHz\n", sa1100fb_min_dma_period(fbi),
1020 policy->max);
1021 /* todo: fill in min/max values */
1022 break;
1023 case CPUFREQ_NOTIFY:
1024 do {} while(0);
1025 /* todo: panic if min/max values aren't fulfilled
1026 * [can't really happen unless there's a bug in the
1027 * CPU policy verififcation process *
1028 */
1029 break;
1030 }
1031 return 0;
1032}
1033#endif 1008#endif
1034 1009
1035#ifdef CONFIG_PM 1010#ifdef CONFIG_PM
@@ -1242,9 +1217,7 @@ static int sa1100fb_probe(struct platform_device *pdev)
1242 1217
1243#ifdef CONFIG_CPU_FREQ 1218#ifdef CONFIG_CPU_FREQ
1244 fbi->freq_transition.notifier_call = sa1100fb_freq_transition; 1219 fbi->freq_transition.notifier_call = sa1100fb_freq_transition;
1245 fbi->freq_policy.notifier_call = sa1100fb_freq_policy;
1246 cpufreq_register_notifier(&fbi->freq_transition, CPUFREQ_TRANSITION_NOTIFIER); 1220 cpufreq_register_notifier(&fbi->freq_transition, CPUFREQ_TRANSITION_NOTIFIER);
1247 cpufreq_register_notifier(&fbi->freq_policy, CPUFREQ_POLICY_NOTIFIER);
1248#endif 1221#endif
1249 1222
1250 /* This driver cannot be unloaded at the moment */ 1223 /* This driver cannot be unloaded at the moment */
diff --git a/drivers/video/fbdev/sa1100fb.h b/drivers/video/fbdev/sa1100fb.h
index 7a1a9ca33cec..d0aa33b0b88a 100644
--- a/drivers/video/fbdev/sa1100fb.h
+++ b/drivers/video/fbdev/sa1100fb.h
@@ -64,7 +64,6 @@ struct sa1100fb_info {
64 64
65#ifdef CONFIG_CPU_FREQ 65#ifdef CONFIG_CPU_FREQ
66 struct notifier_block freq_transition; 66 struct notifier_block freq_transition;
67 struct notifier_block freq_policy;
68#endif 67#endif
69 68
70 const struct sa1100fb_mach_info *inf; 69 const struct sa1100fb_mach_info *inf;
diff --git a/include/acpi/processor.h b/include/acpi/processor.h
index 1194a4c78d55..f936033cb9e6 100644
--- a/include/acpi/processor.h
+++ b/include/acpi/processor.h
@@ -4,6 +4,8 @@
4 4
5#include <linux/kernel.h> 5#include <linux/kernel.h>
6#include <linux/cpu.h> 6#include <linux/cpu.h>
7#include <linux/cpufreq.h>
8#include <linux/pm_qos.h>
7#include <linux/thermal.h> 9#include <linux/thermal.h>
8#include <asm/acpi.h> 10#include <asm/acpi.h>
9 11
@@ -230,6 +232,8 @@ struct acpi_processor {
230 struct acpi_processor_limit limit; 232 struct acpi_processor_limit limit;
231 struct thermal_cooling_device *cdev; 233 struct thermal_cooling_device *cdev;
232 struct device *dev; /* Processor device. */ 234 struct device *dev; /* Processor device. */
235 struct dev_pm_qos_request perflib_req;
236 struct dev_pm_qos_request thermal_req;
233}; 237};
234 238
235struct acpi_processor_errata { 239struct acpi_processor_errata {
@@ -296,16 +300,22 @@ static inline void acpi_processor_ffh_cstate_enter(struct acpi_processor_cx
296/* in processor_perflib.c */ 300/* in processor_perflib.c */
297 301
298#ifdef CONFIG_CPU_FREQ 302#ifdef CONFIG_CPU_FREQ
299void acpi_processor_ppc_init(void); 303extern bool acpi_processor_cpufreq_init;
300void acpi_processor_ppc_exit(void); 304void acpi_processor_ignore_ppc_init(void);
305void acpi_processor_ppc_init(int cpu);
306void acpi_processor_ppc_exit(int cpu);
301void acpi_processor_ppc_has_changed(struct acpi_processor *pr, int event_flag); 307void acpi_processor_ppc_has_changed(struct acpi_processor *pr, int event_flag);
302extern int acpi_processor_get_bios_limit(int cpu, unsigned int *limit); 308extern int acpi_processor_get_bios_limit(int cpu, unsigned int *limit);
303#else 309#else
304static inline void acpi_processor_ppc_init(void) 310static inline void acpi_processor_ignore_ppc_init(void)
305{ 311{
306 return; 312 return;
307} 313}
308static inline void acpi_processor_ppc_exit(void) 314static inline void acpi_processor_ppc_init(int cpu)
315{
316 return;
317}
318static inline void acpi_processor_ppc_exit(int cpu)
309{ 319{
310 return; 320 return;
311} 321}
@@ -421,14 +431,14 @@ static inline int acpi_processor_hotplug(struct acpi_processor *pr)
421int acpi_processor_get_limit_info(struct acpi_processor *pr); 431int acpi_processor_get_limit_info(struct acpi_processor *pr);
422extern const struct thermal_cooling_device_ops processor_cooling_ops; 432extern const struct thermal_cooling_device_ops processor_cooling_ops;
423#if defined(CONFIG_ACPI_CPU_FREQ_PSS) & defined(CONFIG_CPU_FREQ) 433#if defined(CONFIG_ACPI_CPU_FREQ_PSS) & defined(CONFIG_CPU_FREQ)
424void acpi_thermal_cpufreq_init(void); 434void acpi_thermal_cpufreq_init(int cpu);
425void acpi_thermal_cpufreq_exit(void); 435void acpi_thermal_cpufreq_exit(int cpu);
426#else 436#else
427static inline void acpi_thermal_cpufreq_init(void) 437static inline void acpi_thermal_cpufreq_init(int cpu)
428{ 438{
429 return; 439 return;
430} 440}
431static inline void acpi_thermal_cpufreq_exit(void) 441static inline void acpi_thermal_cpufreq_exit(int cpu)
432{ 442{
433 return; 443 return;
434} 444}
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 536a049d7ecc..c57e88e85c41 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -456,8 +456,8 @@ static inline void cpufreq_resume(void) {}
456#define CPUFREQ_POSTCHANGE (1) 456#define CPUFREQ_POSTCHANGE (1)
457 457
458/* Policy Notifiers */ 458/* Policy Notifiers */
459#define CPUFREQ_ADJUST (0) 459#define CPUFREQ_CREATE_POLICY (0)
460#define CPUFREQ_NOTIFY (1) 460#define CPUFREQ_REMOVE_POLICY (1)
461 461
462#ifdef CONFIG_CPU_FREQ 462#ifdef CONFIG_CPU_FREQ
463int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list); 463int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list);
diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h
index af5021f27cb7..b8197ab014f2 100644
--- a/include/linux/pm_opp.h
+++ b/include/linux/pm_opp.h
@@ -96,6 +96,8 @@ unsigned long dev_pm_opp_get_suspend_opp_freq(struct device *dev);
96struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev, 96struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
97 unsigned long freq, 97 unsigned long freq,
98 bool available); 98 bool available);
99struct dev_pm_opp *dev_pm_opp_find_level_exact(struct device *dev,
100 unsigned int level);
99 101
100struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev, 102struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
101 unsigned long *freq); 103 unsigned long *freq);
@@ -128,7 +130,7 @@ struct opp_table *dev_pm_opp_set_clkname(struct device *dev, const char * name);
128void dev_pm_opp_put_clkname(struct opp_table *opp_table); 130void dev_pm_opp_put_clkname(struct opp_table *opp_table);
129struct opp_table *dev_pm_opp_register_set_opp_helper(struct device *dev, int (*set_opp)(struct dev_pm_set_opp_data *data)); 131struct opp_table *dev_pm_opp_register_set_opp_helper(struct device *dev, int (*set_opp)(struct dev_pm_set_opp_data *data));
130void dev_pm_opp_unregister_set_opp_helper(struct opp_table *opp_table); 132void dev_pm_opp_unregister_set_opp_helper(struct opp_table *opp_table);
131struct opp_table *dev_pm_opp_attach_genpd(struct device *dev, const char **names); 133struct opp_table *dev_pm_opp_attach_genpd(struct device *dev, const char **names, struct device ***virt_devs);
132void dev_pm_opp_detach_genpd(struct opp_table *opp_table); 134void dev_pm_opp_detach_genpd(struct opp_table *opp_table);
133int dev_pm_opp_xlate_performance_state(struct opp_table *src_table, struct opp_table *dst_table, unsigned int pstate); 135int dev_pm_opp_xlate_performance_state(struct opp_table *src_table, struct opp_table *dst_table, unsigned int pstate);
134int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq); 136int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq);
@@ -200,6 +202,12 @@ static inline struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
200 return ERR_PTR(-ENOTSUPP); 202 return ERR_PTR(-ENOTSUPP);
201} 203}
202 204
205static inline struct dev_pm_opp *dev_pm_opp_find_level_exact(struct device *dev,
206 unsigned int level)
207{
208 return ERR_PTR(-ENOTSUPP);
209}
210
203static inline struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev, 211static inline struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
204 unsigned long *freq) 212 unsigned long *freq)
205{ 213{
@@ -292,7 +300,7 @@ static inline struct opp_table *dev_pm_opp_set_clkname(struct device *dev, const
292 300
293static inline void dev_pm_opp_put_clkname(struct opp_table *opp_table) {} 301static inline void dev_pm_opp_put_clkname(struct opp_table *opp_table) {}
294 302
295static inline struct opp_table *dev_pm_opp_attach_genpd(struct device *dev, const char **names) 303static inline struct opp_table *dev_pm_opp_attach_genpd(struct device *dev, const char **names, struct device ***virt_devs)
296{ 304{
297 return ERR_PTR(-ENOTSUPP); 305 return ERR_PTR(-ENOTSUPP);
298} 306}
diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
index 867b4bb6d4be..b03ca2f73713 100644
--- a/kernel/sched/cpufreq_schedutil.c
+++ b/kernel/sched/cpufreq_schedutil.c
@@ -117,6 +117,7 @@ static void sugov_fast_switch(struct sugov_policy *sg_policy, u64 time,
117 unsigned int next_freq) 117 unsigned int next_freq)
118{ 118{
119 struct cpufreq_policy *policy = sg_policy->policy; 119 struct cpufreq_policy *policy = sg_policy->policy;
120 int cpu;
120 121
121 if (!sugov_update_next_freq(sg_policy, time, next_freq)) 122 if (!sugov_update_next_freq(sg_policy, time, next_freq))
122 return; 123 return;
@@ -126,7 +127,11 @@ static void sugov_fast_switch(struct sugov_policy *sg_policy, u64 time,
126 return; 127 return;
127 128
128 policy->cur = next_freq; 129 policy->cur = next_freq;
129 trace_cpu_frequency(next_freq, smp_processor_id()); 130
131 if (trace_cpu_frequency_enabled()) {
132 for_each_cpu(cpu, policy->cpus)
133 trace_cpu_frequency(next_freq, cpu);
134 }
130} 135}
131 136
132static void sugov_deferred_update(struct sugov_policy *sg_policy, u64 time, 137static void sugov_deferred_update(struct sugov_policy *sg_policy, u64 time,