aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-03-25 01:59:58 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-03-25 01:59:58 -0400
commit3d66c6ba3f978fa88d62b83ad35e9adc31c8ea9e (patch)
tree880f111338fee64205d84931cd0f4df59da14ba2
parent8407ef4685895759f111190d091394ef974f52fb (diff)
parentee0140dc8ffc89bdc7b74a858089d5a75a654b4a (diff)
Merge tag 'pm+acpi-4.6-rc1-2' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm
Pull more power management and ACPI updates from Rafael Wysocki: "The second batch of power management and ACPI updates for v4.6. Included are fixups on top of the previous PM/ACPI pull request and other material that didn't make into it but still should go into 4.6. Among other things, there's a fix for an intel_pstate driver issue uncovered by recent cpufreq changes, a workaround for a boot hang on Skylake-H related to the handling of deep C-states by the platform and a PCI/ACPI fix for the handling of IO port resources on non-x86 architectures plus some new device IDs and similar. Specifics: - Fix for an intel_pstate driver issue related to the handling of MSR updates uncovered by the recent cpufreq rework (Rafael Wysocki). - cpufreq core cleanups related to starting governors and frequency synchronization during resume from system suspend and a locking fix for cpufreq_quick_get() (Rafael Wysocki, Richard Cochran). - acpi-cpufreq and powernv cpufreq driver updates (Jisheng Zhang, Michael Neuling, Richard Cochran, Shilpasri Bhat). - intel_idle driver update preventing some Skylake-H systems from hanging during initialization by disabling deep C-states mishandled by the platform in the problematic configurations (Len Brown). - Intel Xeon Phi Processor x200 support for intel_idle (Dasaratharaman Chandramouli). - cpuidle menu governor updates to make it always honor PM QoS latency constraints (and prevent C1 from being used as the fallback C-state on x86 when they are set below its exit latency) and to restore the previous behavior to fall back to C1 if the next timer event is set far enough in the future that was changed in 4.4 which led to an energy consumption regression (Rik van Riel, Rafael Wysocki). - New device ID for a future AMD UART controller in the ACPI driver for AMD SoCs (Wang Hongcheng). - Rockchip rk3399 support for the rockchip-io-domain adaptive voltage scaling (AVS) driver (David Wu). - ACPI PCI resources management fix for the handling of IO space resources on architectures where the IO space is memory mapped (IA64 and ARM64) broken by the introduction of common ACPI resources parsing for PCI host bridges in 4.4 (Lorenzo Pieralisi). - Fix for the ACPI backend of the generic device properties API to make it parse non-device (data node only) children of an ACPI device correctly (Irina Tirdea). - Fixes for the handling of global suspend flags (introduced in 4.4) during hibernation and resume from it (Lukas Wunner). - Support for obtaining configuration information from Device Trees in the PM clocks framework (Jon Hunter). - ACPI _DSM helper code and devfreq framework cleanups (Colin Ian King, Geert Uytterhoeven)" * tag 'pm+acpi-4.6-rc1-2' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm: (23 commits) PM / AVS: rockchip-io: add io selectors and supplies for rk3399 intel_idle: Support for Intel Xeon Phi Processor x200 Product Family intel_idle: prevent SKL-H boot failure when C8+C9+C10 enabled ACPI / PM: Runtime resume devices when waking from hibernate PM / sleep: Clear pm_suspend_global_flags upon hibernate cpufreq: governor: Always schedule work on the CPU running update cpufreq: Always update current frequency before startig governor cpufreq: Introduce cpufreq_update_current_freq() cpufreq: Introduce cpufreq_start_governor() cpufreq: powernv: Add sysfs attributes to show throttle stats cpufreq: acpi-cpufreq: make Intel/AMD MSR access, io port access static PCI: ACPI: IA64: fix IO port generic range check ACPI / util: cast data to u64 before shifting to fix sign extension cpufreq: powernv: Define per_cpu chip pointer to optimize hot-path cpuidle: menu: Fall back to polling if next timer event is near cpufreq: acpi-cpufreq: Clean up hot plug notifier callback intel_pstate: Do not call wrmsrl_on_cpu() with disabled interrupts cpufreq: Make cpufreq_quick_get() safe to call ACPI / property: fix data node parsing in acpi_get_next_subnode() ACPI / APD: Add device HID for future AMD UART controller ...
-rw-r--r--Documentation/ABI/testing/sysfs-devices-system-cpu69
-rw-r--r--Documentation/devicetree/bindings/power/rockchip-io-domain.txt11
-rw-r--r--drivers/acpi/acpi_apd.c1
-rw-r--r--drivers/acpi/property.c1
-rw-r--r--drivers/acpi/resource.c14
-rw-r--r--drivers/acpi/sleep.c1
-rw-r--r--drivers/acpi/utils.c2
-rw-r--r--drivers/base/power/clock_ops.c89
-rw-r--r--drivers/cpufreq/acpi-cpufreq.c18
-rw-r--r--drivers/cpufreq/cpufreq.c98
-rw-r--r--drivers/cpufreq/cpufreq_governor.c2
-rw-r--r--drivers/cpufreq/intel_pstate.c73
-rw-r--r--drivers/cpufreq/powernv-cpufreq.c124
-rw-r--r--drivers/cpuidle/governors/menu.c50
-rw-r--r--drivers/devfreq/Kconfig2
-rw-r--r--drivers/idle/intel_idle.c133
-rwxr-xr-x[-rw-r--r--]drivers/power/avs/rockchip-io-domain.c58
-rw-r--r--include/linux/pm_clock.h9
-rw-r--r--kernel/power/hibernate.c1
19 files changed, 593 insertions, 163 deletions
diff --git a/Documentation/ABI/testing/sysfs-devices-system-cpu b/Documentation/ABI/testing/sysfs-devices-system-cpu
index b683e8ee69ec..16501334b99f 100644
--- a/Documentation/ABI/testing/sysfs-devices-system-cpu
+++ b/Documentation/ABI/testing/sysfs-devices-system-cpu
@@ -271,3 +271,72 @@ Description: Parameters for the CPU cache attributes
271 - WriteBack: data is written only to the cache line and 271 - WriteBack: data is written only to the cache line and
272 the modified cache line is written to main 272 the modified cache line is written to main
273 memory only when it is replaced 273 memory only when it is replaced
274
275What: /sys/devices/system/cpu/cpuX/cpufreq/throttle_stats
276 /sys/devices/system/cpu/cpuX/cpufreq/throttle_stats/turbo_stat
277 /sys/devices/system/cpu/cpuX/cpufreq/throttle_stats/sub_turbo_stat
278 /sys/devices/system/cpu/cpuX/cpufreq/throttle_stats/unthrottle
279 /sys/devices/system/cpu/cpuX/cpufreq/throttle_stats/powercap
280 /sys/devices/system/cpu/cpuX/cpufreq/throttle_stats/overtemp
281 /sys/devices/system/cpu/cpuX/cpufreq/throttle_stats/supply_fault
282 /sys/devices/system/cpu/cpuX/cpufreq/throttle_stats/overcurrent
283 /sys/devices/system/cpu/cpuX/cpufreq/throttle_stats/occ_reset
284Date: March 2016
285Contact: Linux kernel mailing list <linux-kernel@vger.kernel.org>
286 Linux for PowerPC mailing list <linuxppc-dev@ozlabs.org>
287Description: POWERNV CPUFreq driver's frequency throttle stats directory and
288 attributes
289
290 'cpuX/cpufreq/throttle_stats' directory contains the CPU frequency
291 throttle stat attributes for the chip. The throttle stats of a cpu
292 is common across all the cpus belonging to a chip. Below are the
293 throttle attributes exported in the 'throttle_stats' directory:
294
295 - turbo_stat : This file gives the total number of times the max
296 frequency is throttled to lower frequency in turbo (at and above
297 nominal frequency) range of frequencies.
298
299 - sub_turbo_stat : This file gives the total number of times the
300 max frequency is throttled to lower frequency in sub-turbo(below
301 nominal frequency) range of frequencies.
302
303 - unthrottle : This file gives the total number of times the max
304 frequency is unthrottled after being throttled.
305
306 - powercap : This file gives the total number of times the max
307 frequency is throttled due to 'Power Capping'.
308
309 - overtemp : This file gives the total number of times the max
310 frequency is throttled due to 'CPU Over Temperature'.
311
312 - supply_fault : This file gives the total number of times the
313 max frequency is throttled due to 'Power Supply Failure'.
314
315 - overcurrent : This file gives the total number of times the
316 max frequency is throttled due to 'Overcurrent'.
317
318 - occ_reset : This file gives the total number of times the max
319 frequency is throttled due to 'OCC Reset'.
320
321 The sysfs attributes representing different throttle reasons like
322 powercap, overtemp, supply_fault, overcurrent and occ_reset map to
323 the reasons provided by OCC firmware for throttling the frequency.
324
325What: /sys/devices/system/cpu/cpufreq/policyX/throttle_stats
326 /sys/devices/system/cpu/cpufreq/policyX/throttle_stats/turbo_stat
327 /sys/devices/system/cpu/cpufreq/policyX/throttle_stats/sub_turbo_stat
328 /sys/devices/system/cpu/cpufreq/policyX/throttle_stats/unthrottle
329 /sys/devices/system/cpu/cpufreq/policyX/throttle_stats/powercap
330 /sys/devices/system/cpu/cpufreq/policyX/throttle_stats/overtemp
331 /sys/devices/system/cpu/cpufreq/policyX/throttle_stats/supply_fault
332 /sys/devices/system/cpu/cpufreq/policyX/throttle_stats/overcurrent
333 /sys/devices/system/cpu/cpufreq/policyX/throttle_stats/occ_reset
334Date: March 2016
335Contact: Linux kernel mailing list <linux-kernel@vger.kernel.org>
336 Linux for PowerPC mailing list <linuxppc-dev@ozlabs.org>
337Description: POWERNV CPUFreq driver's frequency throttle stats directory and
338 attributes
339
340 'policyX/throttle_stats' directory and all the attributes are same as
341 the /sys/devices/system/cpu/cpuX/cpufreq/throttle_stats directory and
342 attributes which give the frequency throttle information of the chip.
diff --git a/Documentation/devicetree/bindings/power/rockchip-io-domain.txt b/Documentation/devicetree/bindings/power/rockchip-io-domain.txt
index b8627e763dba..c84fb47265eb 100644
--- a/Documentation/devicetree/bindings/power/rockchip-io-domain.txt
+++ b/Documentation/devicetree/bindings/power/rockchip-io-domain.txt
@@ -35,6 +35,8 @@ Required properties:
35 - "rockchip,rk3288-io-voltage-domain" for rk3288 35 - "rockchip,rk3288-io-voltage-domain" for rk3288
36 - "rockchip,rk3368-io-voltage-domain" for rk3368 36 - "rockchip,rk3368-io-voltage-domain" for rk3368
37 - "rockchip,rk3368-pmu-io-voltage-domain" for rk3368 pmu-domains 37 - "rockchip,rk3368-pmu-io-voltage-domain" for rk3368 pmu-domains
38 - "rockchip,rk3399-io-voltage-domain" for rk3399
39 - "rockchip,rk3399-pmu-io-voltage-domain" for rk3399 pmu-domains
38- rockchip,grf: phandle to the syscon managing the "general register files" 40- rockchip,grf: phandle to the syscon managing the "general register files"
39 41
40 42
@@ -79,6 +81,15 @@ Possible supplies for rk3368 pmu-domains:
79- pmu-supply: The supply connected to PMUIO_VDD. 81- pmu-supply: The supply connected to PMUIO_VDD.
80- vop-supply: The supply connected to LCDC_VDD. 82- vop-supply: The supply connected to LCDC_VDD.
81 83
84Possible supplies for rk3399:
85- bt656-supply: The supply connected to APIO2_VDD.
86- audio-supply: The supply connected to APIO5_VDD.
87- sdmmc-supply: The supply connected to SDMMC0_VDD.
88- gpio1830 The supply connected to APIO4_VDD.
89
90Possible supplies for rk3399 pmu-domains:
91- pmu1830-supply:The supply connected to PMUIO2_VDD.
92
82Example: 93Example:
83 94
84 io-domains { 95 io-domains {
diff --git a/drivers/acpi/acpi_apd.c b/drivers/acpi/acpi_apd.c
index d0aad06b3872..f245bf35bedb 100644
--- a/drivers/acpi/acpi_apd.c
+++ b/drivers/acpi/acpi_apd.c
@@ -145,6 +145,7 @@ static const struct acpi_device_id acpi_apd_device_ids[] = {
145 { "AMD0010", APD_ADDR(cz_i2c_desc) }, 145 { "AMD0010", APD_ADDR(cz_i2c_desc) },
146 { "AMDI0010", APD_ADDR(cz_i2c_desc) }, 146 { "AMDI0010", APD_ADDR(cz_i2c_desc) },
147 { "AMD0020", APD_ADDR(cz_uart_desc) }, 147 { "AMD0020", APD_ADDR(cz_uart_desc) },
148 { "AMDI0020", APD_ADDR(cz_uart_desc) },
148 { "AMD0030", }, 149 { "AMD0030", },
149#endif 150#endif
150#ifdef CONFIG_ARM64 151#ifdef CONFIG_ARM64
diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c
index 2aee41655ce9..f2fd3fee588a 100644
--- a/drivers/acpi/property.c
+++ b/drivers/acpi/property.c
@@ -816,6 +816,7 @@ struct fwnode_handle *acpi_get_next_subnode(struct device *dev,
816 next = adev->node.next; 816 next = adev->node.next;
817 if (next == head) { 817 if (next == head) {
818 child = NULL; 818 child = NULL;
819 adev = ACPI_COMPANION(dev);
819 goto nondev; 820 goto nondev;
820 } 821 }
821 adev = list_entry(next, struct acpi_device, node); 822 adev = list_entry(next, struct acpi_device, node);
diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
index d02fd53042a5..56241eb341f4 100644
--- a/drivers/acpi/resource.c
+++ b/drivers/acpi/resource.c
@@ -27,8 +27,20 @@
27 27
28#ifdef CONFIG_X86 28#ifdef CONFIG_X86
29#define valid_IRQ(i) (((i) != 0) && ((i) != 2)) 29#define valid_IRQ(i) (((i) != 0) && ((i) != 2))
30static inline bool acpi_iospace_resource_valid(struct resource *res)
31{
32 /* On X86 IO space is limited to the [0 - 64K] IO port range */
33 return res->end < 0x10003;
34}
30#else 35#else
31#define valid_IRQ(i) (true) 36#define valid_IRQ(i) (true)
37/*
38 * ACPI IO descriptors on arches other than X86 contain MMIO CPU physical
39 * addresses mapping IO space in CPU physical address space, IO space
40 * resources can be placed anywhere in the 64-bit physical address space.
41 */
42static inline bool
43acpi_iospace_resource_valid(struct resource *res) { return true; }
32#endif 44#endif
33 45
34static bool acpi_dev_resource_len_valid(u64 start, u64 end, u64 len, bool io) 46static bool acpi_dev_resource_len_valid(u64 start, u64 end, u64 len, bool io)
@@ -127,7 +139,7 @@ static void acpi_dev_ioresource_flags(struct resource *res, u64 len,
127 if (!acpi_dev_resource_len_valid(res->start, res->end, len, true)) 139 if (!acpi_dev_resource_len_valid(res->start, res->end, len, true))
128 res->flags |= IORESOURCE_DISABLED | IORESOURCE_UNSET; 140 res->flags |= IORESOURCE_DISABLED | IORESOURCE_UNSET;
129 141
130 if (res->end >= 0x10003) 142 if (!acpi_iospace_resource_valid(res))
131 res->flags |= IORESOURCE_DISABLED | IORESOURCE_UNSET; 143 res->flags |= IORESOURCE_DISABLED | IORESOURCE_UNSET;
132 144
133 if (io_decode == ACPI_DECODE_16) 145 if (io_decode == ACPI_DECODE_16)
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index fbfcce3b5227..2a8b59644297 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -748,6 +748,7 @@ static int acpi_hibernation_enter(void)
748 748
749static void acpi_hibernation_leave(void) 749static void acpi_hibernation_leave(void)
750{ 750{
751 pm_set_resume_via_firmware();
751 /* 752 /*
752 * If ACPI is not enabled by the BIOS and the boot kernel, we need to 753 * If ACPI is not enabled by the BIOS and the boot kernel, we need to
753 * enable it here. 754 * enable it here.
diff --git a/drivers/acpi/utils.c b/drivers/acpi/utils.c
index f12a72428aac..050673f0c0b3 100644
--- a/drivers/acpi/utils.c
+++ b/drivers/acpi/utils.c
@@ -692,7 +692,7 @@ bool acpi_check_dsm(acpi_handle handle, const u8 *uuid, int rev, u64 funcs)
692 mask = obj->integer.value; 692 mask = obj->integer.value;
693 else if (obj->type == ACPI_TYPE_BUFFER) 693 else if (obj->type == ACPI_TYPE_BUFFER)
694 for (i = 0; i < obj->buffer.length && i < 8; i++) 694 for (i = 0; i < obj->buffer.length && i < 8; i++)
695 mask |= (((u8)obj->buffer.pointer[i]) << (i * 8)); 695 mask |= (((u64)obj->buffer.pointer[i]) << (i * 8));
696 ACPI_FREE(obj); 696 ACPI_FREE(obj);
697 697
698 /* 698 /*
diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c
index 272a52ebafc0..0e64a1b5e62a 100644
--- a/drivers/base/power/clock_ops.c
+++ b/drivers/base/power/clock_ops.c
@@ -137,6 +137,62 @@ int pm_clk_add_clk(struct device *dev, struct clk *clk)
137 return __pm_clk_add(dev, NULL, clk); 137 return __pm_clk_add(dev, NULL, clk);
138} 138}
139 139
140
141/**
142 * of_pm_clk_add_clks - Start using device clock(s) for power management.
143 * @dev: Device whose clock(s) is going to be used for power management.
144 *
145 * Add a series of clocks described in the 'clocks' device-tree node for
146 * a device to the list of clocks used for the power management of @dev.
147 * On success, returns the number of clocks added. Returns a negative
148 * error code if there are no clocks in the device node for the device
149 * or if adding a clock fails.
150 */
151int of_pm_clk_add_clks(struct device *dev)
152{
153 struct clk **clks;
154 unsigned int i, count;
155 int ret;
156
157 if (!dev || !dev->of_node)
158 return -EINVAL;
159
160 count = of_count_phandle_with_args(dev->of_node, "clocks",
161 "#clock-cells");
162 if (count == 0)
163 return -ENODEV;
164
165 clks = kcalloc(count, sizeof(*clks), GFP_KERNEL);
166 if (!clks)
167 return -ENOMEM;
168
169 for (i = 0; i < count; i++) {
170 clks[i] = of_clk_get(dev->of_node, i);
171 if (IS_ERR(clks[i])) {
172 ret = PTR_ERR(clks[i]);
173 goto error;
174 }
175
176 ret = pm_clk_add_clk(dev, clks[i]);
177 if (ret) {
178 clk_put(clks[i]);
179 goto error;
180 }
181 }
182
183 kfree(clks);
184
185 return i;
186
187error:
188 while (i--)
189 pm_clk_remove_clk(dev, clks[i]);
190
191 kfree(clks);
192
193 return ret;
194}
195
140/** 196/**
141 * __pm_clk_remove - Destroy PM clock entry. 197 * __pm_clk_remove - Destroy PM clock entry.
142 * @ce: PM clock entry to destroy. 198 * @ce: PM clock entry to destroy.
@@ -198,6 +254,39 @@ void pm_clk_remove(struct device *dev, const char *con_id)
198} 254}
199 255
200/** 256/**
257 * pm_clk_remove_clk - Stop using a device clock for power management.
258 * @dev: Device whose clock should not be used for PM any more.
259 * @clk: Clock pointer
260 *
261 * Remove the clock pointed to by @clk from the list of clocks used for
262 * the power management of @dev.
263 */
264void pm_clk_remove_clk(struct device *dev, struct clk *clk)
265{
266 struct pm_subsys_data *psd = dev_to_psd(dev);
267 struct pm_clock_entry *ce;
268
269 if (!psd || !clk)
270 return;
271
272 spin_lock_irq(&psd->lock);
273
274 list_for_each_entry(ce, &psd->clock_list, node) {
275 if (clk == ce->clk)
276 goto remove;
277 }
278
279 spin_unlock_irq(&psd->lock);
280 return;
281
282 remove:
283 list_del(&ce->node);
284 spin_unlock_irq(&psd->lock);
285
286 __pm_clk_remove(ce);
287}
288
289/**
201 * pm_clk_init - Initialize a device's list of power management clocks. 290 * pm_clk_init - Initialize a device's list of power management clocks.
202 * @dev: Device to initialize the list of PM clocks for. 291 * @dev: Device to initialize the list of PM clocks for.
203 * 292 *
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
index 59a7b380fbe2..fb5712141040 100644
--- a/drivers/cpufreq/acpi-cpufreq.c
+++ b/drivers/cpufreq/acpi-cpufreq.c
@@ -245,7 +245,7 @@ static unsigned extract_freq(u32 val, struct acpi_cpufreq_data *data)
245 } 245 }
246} 246}
247 247
248u32 cpu_freq_read_intel(struct acpi_pct_register *not_used) 248static u32 cpu_freq_read_intel(struct acpi_pct_register *not_used)
249{ 249{
250 u32 val, dummy; 250 u32 val, dummy;
251 251
@@ -253,7 +253,7 @@ u32 cpu_freq_read_intel(struct acpi_pct_register *not_used)
253 return val; 253 return val;
254} 254}
255 255
256void cpu_freq_write_intel(struct acpi_pct_register *not_used, u32 val) 256static void cpu_freq_write_intel(struct acpi_pct_register *not_used, u32 val)
257{ 257{
258 u32 lo, hi; 258 u32 lo, hi;
259 259
@@ -262,7 +262,7 @@ void cpu_freq_write_intel(struct acpi_pct_register *not_used, u32 val)
262 wrmsr(MSR_IA32_PERF_CTL, lo, hi); 262 wrmsr(MSR_IA32_PERF_CTL, lo, hi);
263} 263}
264 264
265u32 cpu_freq_read_amd(struct acpi_pct_register *not_used) 265static u32 cpu_freq_read_amd(struct acpi_pct_register *not_used)
266{ 266{
267 u32 val, dummy; 267 u32 val, dummy;
268 268
@@ -270,12 +270,12 @@ u32 cpu_freq_read_amd(struct acpi_pct_register *not_used)
270 return val; 270 return val;
271} 271}
272 272
273void cpu_freq_write_amd(struct acpi_pct_register *not_used, u32 val) 273static void cpu_freq_write_amd(struct acpi_pct_register *not_used, u32 val)
274{ 274{
275 wrmsr(MSR_AMD_PERF_CTL, val, 0); 275 wrmsr(MSR_AMD_PERF_CTL, val, 0);
276} 276}
277 277
278u32 cpu_freq_read_io(struct acpi_pct_register *reg) 278static u32 cpu_freq_read_io(struct acpi_pct_register *reg)
279{ 279{
280 u32 val; 280 u32 val;
281 281
@@ -283,7 +283,7 @@ u32 cpu_freq_read_io(struct acpi_pct_register *reg)
283 return val; 283 return val;
284} 284}
285 285
286void cpu_freq_write_io(struct acpi_pct_register *reg, u32 val) 286static void cpu_freq_write_io(struct acpi_pct_register *reg, u32 val)
287{ 287{
288 acpi_os_write_port(reg->address, val, reg->bit_width); 288 acpi_os_write_port(reg->address, val, reg->bit_width);
289} 289}
@@ -514,8 +514,10 @@ static int boost_notify(struct notifier_block *nb, unsigned long action,
514 */ 514 */
515 515
516 switch (action) { 516 switch (action) {
517 case CPU_UP_PREPARE: 517 case CPU_DOWN_FAILED:
518 case CPU_UP_PREPARE_FROZEN: 518 case CPU_DOWN_FAILED_FROZEN:
519 case CPU_ONLINE:
520 case CPU_ONLINE_FROZEN:
519 boost_set_msrs(acpi_cpufreq_driver.boost_enabled, cpumask); 521 boost_set_msrs(acpi_cpufreq_driver.boost_enabled, cpumask);
520 break; 522 break;
521 523
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 4c7825856eab..b87596b591b3 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -76,6 +76,7 @@ static inline bool has_target(void)
76/* internal prototypes */ 76/* internal prototypes */
77static int cpufreq_governor(struct cpufreq_policy *policy, unsigned int event); 77static int cpufreq_governor(struct cpufreq_policy *policy, unsigned int event);
78static unsigned int __cpufreq_get(struct cpufreq_policy *policy); 78static unsigned int __cpufreq_get(struct cpufreq_policy *policy);
79static int cpufreq_start_governor(struct cpufreq_policy *policy);
79 80
80/** 81/**
81 * Two notifier lists: the "policy" list is involved in the 82 * Two notifier lists: the "policy" list is involved in the
@@ -964,10 +965,7 @@ static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, unsigned int cp
964 cpumask_set_cpu(cpu, policy->cpus); 965 cpumask_set_cpu(cpu, policy->cpus);
965 966
966 if (has_target()) { 967 if (has_target()) {
967 ret = cpufreq_governor(policy, CPUFREQ_GOV_START); 968 ret = cpufreq_start_governor(policy);
968 if (!ret)
969 ret = cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
970
971 if (ret) 969 if (ret)
972 pr_err("%s: Failed to start governor\n", __func__); 970 pr_err("%s: Failed to start governor\n", __func__);
973 } 971 }
@@ -1308,10 +1306,7 @@ static void cpufreq_offline(unsigned int cpu)
1308 /* Start governor again for active policy */ 1306 /* Start governor again for active policy */
1309 if (!policy_is_inactive(policy)) { 1307 if (!policy_is_inactive(policy)) {
1310 if (has_target()) { 1308 if (has_target()) {
1311 ret = cpufreq_governor(policy, CPUFREQ_GOV_START); 1309 ret = cpufreq_start_governor(policy);
1312 if (!ret)
1313 ret = cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
1314
1315 if (ret) 1310 if (ret)
1316 pr_err("%s: Failed to start governor\n", __func__); 1311 pr_err("%s: Failed to start governor\n", __func__);
1317 } 1312 }
@@ -1401,9 +1396,17 @@ unsigned int cpufreq_quick_get(unsigned int cpu)
1401{ 1396{
1402 struct cpufreq_policy *policy; 1397 struct cpufreq_policy *policy;
1403 unsigned int ret_freq = 0; 1398 unsigned int ret_freq = 0;
1399 unsigned long flags;
1404 1400
1405 if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get) 1401 read_lock_irqsave(&cpufreq_driver_lock, flags);
1406 return cpufreq_driver->get(cpu); 1402
1403 if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get) {
1404 ret_freq = cpufreq_driver->get(cpu);
1405 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1406 return ret_freq;
1407 }
1408
1409 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1407 1410
1408 policy = cpufreq_cpu_get(cpu); 1411 policy = cpufreq_cpu_get(cpu);
1409 if (policy) { 1412 if (policy) {
@@ -1484,6 +1487,24 @@ unsigned int cpufreq_get(unsigned int cpu)
1484} 1487}
1485EXPORT_SYMBOL(cpufreq_get); 1488EXPORT_SYMBOL(cpufreq_get);
1486 1489
1490static unsigned int cpufreq_update_current_freq(struct cpufreq_policy *policy)
1491{
1492 unsigned int new_freq;
1493
1494 new_freq = cpufreq_driver->get(policy->cpu);
1495 if (!new_freq)
1496 return 0;
1497
1498 if (!policy->cur) {
1499 pr_debug("cpufreq: Driver did not initialize current freq\n");
1500 policy->cur = new_freq;
1501 } else if (policy->cur != new_freq && has_target()) {
1502 cpufreq_out_of_sync(policy, new_freq);
1503 }
1504
1505 return new_freq;
1506}
1507
1487static struct subsys_interface cpufreq_interface = { 1508static struct subsys_interface cpufreq_interface = {
1488 .name = "cpufreq", 1509 .name = "cpufreq",
1489 .subsys = &cpu_subsys, 1510 .subsys = &cpu_subsys,
@@ -1583,9 +1604,7 @@ void cpufreq_resume(void)
1583 policy); 1604 policy);
1584 } else { 1605 } else {
1585 down_write(&policy->rwsem); 1606 down_write(&policy->rwsem);
1586 ret = cpufreq_governor(policy, CPUFREQ_GOV_START); 1607 ret = cpufreq_start_governor(policy);
1587 if (!ret)
1588 cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
1589 up_write(&policy->rwsem); 1608 up_write(&policy->rwsem);
1590 1609
1591 if (ret) 1610 if (ret)
@@ -1593,17 +1612,6 @@ void cpufreq_resume(void)
1593 __func__, policy); 1612 __func__, policy);
1594 } 1613 }
1595 } 1614 }
1596
1597 /*
1598 * schedule call cpufreq_update_policy() for first-online CPU, as that
1599 * wouldn't be hotplugged-out on suspend. It will verify that the
1600 * current freq is in sync with what we believe it to be.
1601 */
1602 policy = cpufreq_cpu_get_raw(cpumask_first(cpu_online_mask));
1603 if (WARN_ON(!policy))
1604 return;
1605
1606 schedule_work(&policy->update);
1607} 1615}
1608 1616
1609/** 1617/**
@@ -1927,6 +1935,17 @@ static int cpufreq_governor(struct cpufreq_policy *policy, unsigned int event)
1927 return ret; 1935 return ret;
1928} 1936}
1929 1937
1938static int cpufreq_start_governor(struct cpufreq_policy *policy)
1939{
1940 int ret;
1941
1942 if (cpufreq_driver->get && !cpufreq_driver->setpolicy)
1943 cpufreq_update_current_freq(policy);
1944
1945 ret = cpufreq_governor(policy, CPUFREQ_GOV_START);
1946 return ret ? ret : cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
1947}
1948
1930int cpufreq_register_governor(struct cpufreq_governor *governor) 1949int cpufreq_register_governor(struct cpufreq_governor *governor)
1931{ 1950{
1932 int err; 1951 int err;
@@ -2063,8 +2082,10 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
2063 return cpufreq_driver->setpolicy(new_policy); 2082 return cpufreq_driver->setpolicy(new_policy);
2064 } 2083 }
2065 2084
2066 if (new_policy->governor == policy->governor) 2085 if (new_policy->governor == policy->governor) {
2067 goto out; 2086 pr_debug("cpufreq: governor limits update\n");
2087 return cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
2088 }
2068 2089
2069 pr_debug("governor switch\n"); 2090 pr_debug("governor switch\n");
2070 2091
@@ -2092,10 +2113,11 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
2092 policy->governor = new_policy->governor; 2113 policy->governor = new_policy->governor;
2093 ret = cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT); 2114 ret = cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT);
2094 if (!ret) { 2115 if (!ret) {
2095 ret = cpufreq_governor(policy, CPUFREQ_GOV_START); 2116 ret = cpufreq_start_governor(policy);
2096 if (!ret) 2117 if (!ret) {
2097 goto out; 2118 pr_debug("cpufreq: governor change\n");
2098 2119 return 0;
2120 }
2099 cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT); 2121 cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
2100 } 2122 }
2101 2123
@@ -2106,14 +2128,10 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
2106 if (cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT)) 2128 if (cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT))
2107 policy->governor = NULL; 2129 policy->governor = NULL;
2108 else 2130 else
2109 cpufreq_governor(policy, CPUFREQ_GOV_START); 2131 cpufreq_start_governor(policy);
2110 } 2132 }
2111 2133
2112 return ret; 2134 return ret;
2113
2114 out:
2115 pr_debug("governor: change or update limits\n");
2116 return cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
2117} 2135}
2118 2136
2119/** 2137/**
@@ -2144,19 +2162,11 @@ int cpufreq_update_policy(unsigned int cpu)
2144 * -> ask driver for current freq and notify governors about a change 2162 * -> ask driver for current freq and notify governors about a change
2145 */ 2163 */
2146 if (cpufreq_driver->get && !cpufreq_driver->setpolicy) { 2164 if (cpufreq_driver->get && !cpufreq_driver->setpolicy) {
2147 new_policy.cur = cpufreq_driver->get(cpu); 2165 new_policy.cur = cpufreq_update_current_freq(policy);
2148 if (WARN_ON(!new_policy.cur)) { 2166 if (WARN_ON(!new_policy.cur)) {
2149 ret = -EIO; 2167 ret = -EIO;
2150 goto unlock; 2168 goto unlock;
2151 } 2169 }
2152
2153 if (!policy->cur) {
2154 pr_debug("Driver did not initialize current freq\n");
2155 policy->cur = new_policy.cur;
2156 } else {
2157 if (policy->cur != new_policy.cur && has_target())
2158 cpufreq_out_of_sync(policy, new_policy.cur);
2159 }
2160 } 2170 }
2161 2171
2162 ret = cpufreq_set_policy(policy, &new_policy); 2172 ret = cpufreq_set_policy(policy, &new_policy);
diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
index 1c25ef405616..10a5cfeae8c5 100644
--- a/drivers/cpufreq/cpufreq_governor.c
+++ b/drivers/cpufreq/cpufreq_governor.c
@@ -329,7 +329,7 @@ static void dbs_irq_work(struct irq_work *irq_work)
329 struct policy_dbs_info *policy_dbs; 329 struct policy_dbs_info *policy_dbs;
330 330
331 policy_dbs = container_of(irq_work, struct policy_dbs_info, irq_work); 331 policy_dbs = container_of(irq_work, struct policy_dbs_info, irq_work);
332 schedule_work(&policy_dbs->work); 332 schedule_work_on(smp_processor_id(), &policy_dbs->work);
333} 333}
334 334
335static void dbs_update_util_handler(struct update_util_data *data, u64 time, 335static void dbs_update_util_handler(struct update_util_data *data, u64 time,
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index cb5607495816..4b644526fd59 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -134,7 +134,7 @@ struct pstate_funcs {
134 int (*get_min)(void); 134 int (*get_min)(void);
135 int (*get_turbo)(void); 135 int (*get_turbo)(void);
136 int (*get_scaling)(void); 136 int (*get_scaling)(void);
137 void (*set)(struct cpudata*, int pstate); 137 u64 (*get_val)(struct cpudata*, int pstate);
138 void (*get_vid)(struct cpudata *); 138 void (*get_vid)(struct cpudata *);
139 int32_t (*get_target_pstate)(struct cpudata *); 139 int32_t (*get_target_pstate)(struct cpudata *);
140}; 140};
@@ -565,7 +565,7 @@ static int atom_get_turbo_pstate(void)
565 return value & 0x7F; 565 return value & 0x7F;
566} 566}
567 567
568static void atom_set_pstate(struct cpudata *cpudata, int pstate) 568static u64 atom_get_val(struct cpudata *cpudata, int pstate)
569{ 569{
570 u64 val; 570 u64 val;
571 int32_t vid_fp; 571 int32_t vid_fp;
@@ -585,9 +585,7 @@ static void atom_set_pstate(struct cpudata *cpudata, int pstate)
585 if (pstate > cpudata->pstate.max_pstate) 585 if (pstate > cpudata->pstate.max_pstate)
586 vid = cpudata->vid.turbo; 586 vid = cpudata->vid.turbo;
587 587
588 val |= vid; 588 return val | vid;
589
590 wrmsrl_on_cpu(cpudata->cpu, MSR_IA32_PERF_CTL, val);
591} 589}
592 590
593static int silvermont_get_scaling(void) 591static int silvermont_get_scaling(void)
@@ -711,7 +709,7 @@ static inline int core_get_scaling(void)
711 return 100000; 709 return 100000;
712} 710}
713 711
714static void core_set_pstate(struct cpudata *cpudata, int pstate) 712static u64 core_get_val(struct cpudata *cpudata, int pstate)
715{ 713{
716 u64 val; 714 u64 val;
717 715
@@ -719,7 +717,7 @@ static void core_set_pstate(struct cpudata *cpudata, int pstate)
719 if (limits->no_turbo && !limits->turbo_disabled) 717 if (limits->no_turbo && !limits->turbo_disabled)
720 val |= (u64)1 << 32; 718 val |= (u64)1 << 32;
721 719
722 wrmsrl(MSR_IA32_PERF_CTL, val); 720 return val;
723} 721}
724 722
725static int knl_get_turbo_pstate(void) 723static int knl_get_turbo_pstate(void)
@@ -750,7 +748,7 @@ static struct cpu_defaults core_params = {
750 .get_min = core_get_min_pstate, 748 .get_min = core_get_min_pstate,
751 .get_turbo = core_get_turbo_pstate, 749 .get_turbo = core_get_turbo_pstate,
752 .get_scaling = core_get_scaling, 750 .get_scaling = core_get_scaling,
753 .set = core_set_pstate, 751 .get_val = core_get_val,
754 .get_target_pstate = get_target_pstate_use_performance, 752 .get_target_pstate = get_target_pstate_use_performance,
755 }, 753 },
756}; 754};
@@ -769,7 +767,7 @@ static struct cpu_defaults silvermont_params = {
769 .get_max_physical = atom_get_max_pstate, 767 .get_max_physical = atom_get_max_pstate,
770 .get_min = atom_get_min_pstate, 768 .get_min = atom_get_min_pstate,
771 .get_turbo = atom_get_turbo_pstate, 769 .get_turbo = atom_get_turbo_pstate,
772 .set = atom_set_pstate, 770 .get_val = atom_get_val,
773 .get_scaling = silvermont_get_scaling, 771 .get_scaling = silvermont_get_scaling,
774 .get_vid = atom_get_vid, 772 .get_vid = atom_get_vid,
775 .get_target_pstate = get_target_pstate_use_cpu_load, 773 .get_target_pstate = get_target_pstate_use_cpu_load,
@@ -790,7 +788,7 @@ static struct cpu_defaults airmont_params = {
790 .get_max_physical = atom_get_max_pstate, 788 .get_max_physical = atom_get_max_pstate,
791 .get_min = atom_get_min_pstate, 789 .get_min = atom_get_min_pstate,
792 .get_turbo = atom_get_turbo_pstate, 790 .get_turbo = atom_get_turbo_pstate,
793 .set = atom_set_pstate, 791 .get_val = atom_get_val,
794 .get_scaling = airmont_get_scaling, 792 .get_scaling = airmont_get_scaling,
795 .get_vid = atom_get_vid, 793 .get_vid = atom_get_vid,
796 .get_target_pstate = get_target_pstate_use_cpu_load, 794 .get_target_pstate = get_target_pstate_use_cpu_load,
@@ -812,7 +810,7 @@ static struct cpu_defaults knl_params = {
812 .get_min = core_get_min_pstate, 810 .get_min = core_get_min_pstate,
813 .get_turbo = knl_get_turbo_pstate, 811 .get_turbo = knl_get_turbo_pstate,
814 .get_scaling = core_get_scaling, 812 .get_scaling = core_get_scaling,
815 .set = core_set_pstate, 813 .get_val = core_get_val,
816 .get_target_pstate = get_target_pstate_use_performance, 814 .get_target_pstate = get_target_pstate_use_performance,
817 }, 815 },
818}; 816};
@@ -839,25 +837,24 @@ static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max)
839 *min = clamp_t(int, min_perf, cpu->pstate.min_pstate, max_perf); 837 *min = clamp_t(int, min_perf, cpu->pstate.min_pstate, max_perf);
840} 838}
841 839
842static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate, bool force) 840static inline void intel_pstate_record_pstate(struct cpudata *cpu, int pstate)
843{ 841{
844 int max_perf, min_perf;
845
846 if (force) {
847 update_turbo_state();
848
849 intel_pstate_get_min_max(cpu, &min_perf, &max_perf);
850
851 pstate = clamp_t(int, pstate, min_perf, max_perf);
852
853 if (pstate == cpu->pstate.current_pstate)
854 return;
855 }
856 trace_cpu_frequency(pstate * cpu->pstate.scaling, cpu->cpu); 842 trace_cpu_frequency(pstate * cpu->pstate.scaling, cpu->cpu);
857
858 cpu->pstate.current_pstate = pstate; 843 cpu->pstate.current_pstate = pstate;
844}
859 845
860 pstate_funcs.set(cpu, pstate); 846static void intel_pstate_set_min_pstate(struct cpudata *cpu)
847{
848 int pstate = cpu->pstate.min_pstate;
849
850 intel_pstate_record_pstate(cpu, pstate);
851 /*
852 * Generally, there is no guarantee that this code will always run on
853 * the CPU being updated, so force the register update to run on the
854 * right CPU.
855 */
856 wrmsrl_on_cpu(cpu->cpu, MSR_IA32_PERF_CTL,
857 pstate_funcs.get_val(cpu, pstate));
861} 858}
862 859
863static void intel_pstate_get_cpu_pstates(struct cpudata *cpu) 860static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
@@ -870,7 +867,8 @@ static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
870 867
871 if (pstate_funcs.get_vid) 868 if (pstate_funcs.get_vid)
872 pstate_funcs.get_vid(cpu); 869 pstate_funcs.get_vid(cpu);
873 intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate, false); 870
871 intel_pstate_set_min_pstate(cpu);
874} 872}
875 873
876static inline void intel_pstate_calc_busy(struct cpudata *cpu) 874static inline void intel_pstate_calc_busy(struct cpudata *cpu)
@@ -997,6 +995,21 @@ static inline int32_t get_target_pstate_use_performance(struct cpudata *cpu)
997 return cpu->pstate.current_pstate - pid_calc(&cpu->pid, core_busy); 995 return cpu->pstate.current_pstate - pid_calc(&cpu->pid, core_busy);
998} 996}
999 997
998static inline void intel_pstate_update_pstate(struct cpudata *cpu, int pstate)
999{
1000 int max_perf, min_perf;
1001
1002 update_turbo_state();
1003
1004 intel_pstate_get_min_max(cpu, &min_perf, &max_perf);
1005 pstate = clamp_t(int, pstate, min_perf, max_perf);
1006 if (pstate == cpu->pstate.current_pstate)
1007 return;
1008
1009 intel_pstate_record_pstate(cpu, pstate);
1010 wrmsrl(MSR_IA32_PERF_CTL, pstate_funcs.get_val(cpu, pstate));
1011}
1012
1000static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu) 1013static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu)
1001{ 1014{
1002 int from, target_pstate; 1015 int from, target_pstate;
@@ -1006,7 +1019,7 @@ static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu)
1006 1019
1007 target_pstate = pstate_funcs.get_target_pstate(cpu); 1020 target_pstate = pstate_funcs.get_target_pstate(cpu);
1008 1021
1009 intel_pstate_set_pstate(cpu, target_pstate, true); 1022 intel_pstate_update_pstate(cpu, target_pstate);
1010 1023
1011 sample = &cpu->sample; 1024 sample = &cpu->sample;
1012 trace_pstate_sample(fp_toint(sample->core_pct_busy), 1025 trace_pstate_sample(fp_toint(sample->core_pct_busy),
@@ -1180,7 +1193,7 @@ static void intel_pstate_stop_cpu(struct cpufreq_policy *policy)
1180 if (hwp_active) 1193 if (hwp_active)
1181 return; 1194 return;
1182 1195
1183 intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate, false); 1196 intel_pstate_set_min_pstate(cpu);
1184} 1197}
1185 1198
1186static int intel_pstate_cpu_init(struct cpufreq_policy *policy) 1199static int intel_pstate_cpu_init(struct cpufreq_policy *policy)
@@ -1255,7 +1268,7 @@ static void copy_cpu_funcs(struct pstate_funcs *funcs)
1255 pstate_funcs.get_min = funcs->get_min; 1268 pstate_funcs.get_min = funcs->get_min;
1256 pstate_funcs.get_turbo = funcs->get_turbo; 1269 pstate_funcs.get_turbo = funcs->get_turbo;
1257 pstate_funcs.get_scaling = funcs->get_scaling; 1270 pstate_funcs.get_scaling = funcs->get_scaling;
1258 pstate_funcs.set = funcs->set; 1271 pstate_funcs.get_val = funcs->get_val;
1259 pstate_funcs.get_vid = funcs->get_vid; 1272 pstate_funcs.get_vid = funcs->get_vid;
1260 pstate_funcs.get_target_pstate = funcs->get_target_pstate; 1273 pstate_funcs.get_target_pstate = funcs->get_target_pstate;
1261 1274
diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c
index 50bf12033bbc..39ac78c94be0 100644
--- a/drivers/cpufreq/powernv-cpufreq.c
+++ b/drivers/cpufreq/powernv-cpufreq.c
@@ -44,7 +44,6 @@
44 44
45static struct cpufreq_frequency_table powernv_freqs[POWERNV_MAX_PSTATES+1]; 45static struct cpufreq_frequency_table powernv_freqs[POWERNV_MAX_PSTATES+1];
46static bool rebooting, throttled, occ_reset; 46static bool rebooting, throttled, occ_reset;
47static unsigned int *core_to_chip_map;
48 47
49static const char * const throttle_reason[] = { 48static const char * const throttle_reason[] = {
50 "No throttling", 49 "No throttling",
@@ -55,6 +54,16 @@ static const char * const throttle_reason[] = {
55 "OCC Reset" 54 "OCC Reset"
56}; 55};
57 56
57enum throttle_reason_type {
58 NO_THROTTLE = 0,
59 POWERCAP,
60 CPU_OVERTEMP,
61 POWER_SUPPLY_FAILURE,
62 OVERCURRENT,
63 OCC_RESET_THROTTLE,
64 OCC_MAX_REASON
65};
66
58static struct chip { 67static struct chip {
59 unsigned int id; 68 unsigned int id;
60 bool throttled; 69 bool throttled;
@@ -62,9 +71,13 @@ static struct chip {
62 u8 throttle_reason; 71 u8 throttle_reason;
63 cpumask_t mask; 72 cpumask_t mask;
64 struct work_struct throttle; 73 struct work_struct throttle;
74 int throttle_turbo;
75 int throttle_sub_turbo;
76 int reason[OCC_MAX_REASON];
65} *chips; 77} *chips;
66 78
67static int nr_chips; 79static int nr_chips;
80static DEFINE_PER_CPU(struct chip *, chip_info);
68 81
69/* 82/*
70 * Note: The set of pstates consists of contiguous integers, the 83 * Note: The set of pstates consists of contiguous integers, the
@@ -196,6 +209,42 @@ static struct freq_attr *powernv_cpu_freq_attr[] = {
196 NULL, 209 NULL,
197}; 210};
198 211
212#define throttle_attr(name, member) \
213static ssize_t name##_show(struct cpufreq_policy *policy, char *buf) \
214{ \
215 struct chip *chip = per_cpu(chip_info, policy->cpu); \
216 \
217 return sprintf(buf, "%u\n", chip->member); \
218} \
219 \
220static struct freq_attr throttle_attr_##name = __ATTR_RO(name) \
221
222throttle_attr(unthrottle, reason[NO_THROTTLE]);
223throttle_attr(powercap, reason[POWERCAP]);
224throttle_attr(overtemp, reason[CPU_OVERTEMP]);
225throttle_attr(supply_fault, reason[POWER_SUPPLY_FAILURE]);
226throttle_attr(overcurrent, reason[OVERCURRENT]);
227throttle_attr(occ_reset, reason[OCC_RESET_THROTTLE]);
228throttle_attr(turbo_stat, throttle_turbo);
229throttle_attr(sub_turbo_stat, throttle_sub_turbo);
230
231static struct attribute *throttle_attrs[] = {
232 &throttle_attr_unthrottle.attr,
233 &throttle_attr_powercap.attr,
234 &throttle_attr_overtemp.attr,
235 &throttle_attr_supply_fault.attr,
236 &throttle_attr_overcurrent.attr,
237 &throttle_attr_occ_reset.attr,
238 &throttle_attr_turbo_stat.attr,
239 &throttle_attr_sub_turbo_stat.attr,
240 NULL,
241};
242
243static const struct attribute_group throttle_attr_grp = {
244 .name = "throttle_stats",
245 .attrs = throttle_attrs,
246};
247
199/* Helper routines */ 248/* Helper routines */
200 249
201/* Access helpers to power mgt SPR */ 250/* Access helpers to power mgt SPR */
@@ -324,34 +373,35 @@ static inline unsigned int get_nominal_index(void)
324 373
325static void powernv_cpufreq_throttle_check(void *data) 374static void powernv_cpufreq_throttle_check(void *data)
326{ 375{
376 struct chip *chip;
327 unsigned int cpu = smp_processor_id(); 377 unsigned int cpu = smp_processor_id();
328 unsigned int chip_id = core_to_chip_map[cpu_core_index_of_thread(cpu)];
329 unsigned long pmsr; 378 unsigned long pmsr;
330 int pmsr_pmax, i; 379 int pmsr_pmax;
331 380
332 pmsr = get_pmspr(SPRN_PMSR); 381 pmsr = get_pmspr(SPRN_PMSR);
333 382 chip = this_cpu_read(chip_info);
334 for (i = 0; i < nr_chips; i++)
335 if (chips[i].id == chip_id)
336 break;
337 383
338 /* Check for Pmax Capping */ 384 /* Check for Pmax Capping */
339 pmsr_pmax = (s8)PMSR_MAX(pmsr); 385 pmsr_pmax = (s8)PMSR_MAX(pmsr);
340 if (pmsr_pmax != powernv_pstate_info.max) { 386 if (pmsr_pmax != powernv_pstate_info.max) {
341 if (chips[i].throttled) 387 if (chip->throttled)
342 goto next; 388 goto next;
343 chips[i].throttled = true; 389 chip->throttled = true;
344 if (pmsr_pmax < powernv_pstate_info.nominal) 390 if (pmsr_pmax < powernv_pstate_info.nominal) {
345 pr_warn_once("CPU %d on Chip %u has Pmax reduced below nominal frequency (%d < %d)\n", 391 pr_warn_once("CPU %d on Chip %u has Pmax reduced below nominal frequency (%d < %d)\n",
346 cpu, chips[i].id, pmsr_pmax, 392 cpu, chip->id, pmsr_pmax,
347 powernv_pstate_info.nominal); 393 powernv_pstate_info.nominal);
348 trace_powernv_throttle(chips[i].id, 394 chip->throttle_sub_turbo++;
349 throttle_reason[chips[i].throttle_reason], 395 } else {
396 chip->throttle_turbo++;
397 }
398 trace_powernv_throttle(chip->id,
399 throttle_reason[chip->throttle_reason],
350 pmsr_pmax); 400 pmsr_pmax);
351 } else if (chips[i].throttled) { 401 } else if (chip->throttled) {
352 chips[i].throttled = false; 402 chip->throttled = false;
353 trace_powernv_throttle(chips[i].id, 403 trace_powernv_throttle(chip->id,
354 throttle_reason[chips[i].throttle_reason], 404 throttle_reason[chip->throttle_reason],
355 pmsr_pmax); 405 pmsr_pmax);
356 } 406 }
357 407
@@ -411,6 +461,21 @@ static int powernv_cpufreq_cpu_init(struct cpufreq_policy *policy)
411 for (i = 0; i < threads_per_core; i++) 461 for (i = 0; i < threads_per_core; i++)
412 cpumask_set_cpu(base + i, policy->cpus); 462 cpumask_set_cpu(base + i, policy->cpus);
413 463
464 if (!policy->driver_data) {
465 int ret;
466
467 ret = sysfs_create_group(&policy->kobj, &throttle_attr_grp);
468 if (ret) {
469 pr_info("Failed to create throttle stats directory for cpu %d\n",
470 policy->cpu);
471 return ret;
472 }
473 /*
474 * policy->driver_data is used as a flag for one-time
475 * creation of throttle sysfs files.
476 */
477 policy->driver_data = policy;
478 }
414 return cpufreq_table_validate_and_show(policy, powernv_freqs); 479 return cpufreq_table_validate_and_show(policy, powernv_freqs);
415} 480}
416 481
@@ -517,8 +582,10 @@ static int powernv_cpufreq_occ_msg(struct notifier_block *nb,
517 break; 582 break;
518 583
519 if (omsg.throttle_status >= 0 && 584 if (omsg.throttle_status >= 0 &&
520 omsg.throttle_status <= OCC_MAX_THROTTLE_STATUS) 585 omsg.throttle_status <= OCC_MAX_THROTTLE_STATUS) {
521 chips[i].throttle_reason = omsg.throttle_status; 586 chips[i].throttle_reason = omsg.throttle_status;
587 chips[i].reason[omsg.throttle_status]++;
588 }
522 589
523 if (!omsg.throttle_status) 590 if (!omsg.throttle_status)
524 chips[i].restore = true; 591 chips[i].restore = true;
@@ -558,47 +625,34 @@ static int init_chip_info(void)
558 unsigned int chip[256]; 625 unsigned int chip[256];
559 unsigned int cpu, i; 626 unsigned int cpu, i;
560 unsigned int prev_chip_id = UINT_MAX; 627 unsigned int prev_chip_id = UINT_MAX;
561 cpumask_t cpu_mask;
562 int ret = -ENOMEM;
563
564 core_to_chip_map = kcalloc(cpu_nr_cores(), sizeof(unsigned int),
565 GFP_KERNEL);
566 if (!core_to_chip_map)
567 goto out;
568 628
569 cpumask_copy(&cpu_mask, cpu_possible_mask); 629 for_each_possible_cpu(cpu) {
570 for_each_cpu(cpu, &cpu_mask) {
571 unsigned int id = cpu_to_chip_id(cpu); 630 unsigned int id = cpu_to_chip_id(cpu);
572 631
573 if (prev_chip_id != id) { 632 if (prev_chip_id != id) {
574 prev_chip_id = id; 633 prev_chip_id = id;
575 chip[nr_chips++] = id; 634 chip[nr_chips++] = id;
576 } 635 }
577 core_to_chip_map[cpu_core_index_of_thread(cpu)] = id;
578 cpumask_andnot(&cpu_mask, &cpu_mask, cpu_sibling_mask(cpu));
579 } 636 }
580 637
581 chips = kcalloc(nr_chips, sizeof(struct chip), GFP_KERNEL); 638 chips = kcalloc(nr_chips, sizeof(struct chip), GFP_KERNEL);
582 if (!chips) 639 if (!chips)
583 goto free_chip_map; 640 return -ENOMEM;
584 641
585 for (i = 0; i < nr_chips; i++) { 642 for (i = 0; i < nr_chips; i++) {
586 chips[i].id = chip[i]; 643 chips[i].id = chip[i];
587 cpumask_copy(&chips[i].mask, cpumask_of_node(chip[i])); 644 cpumask_copy(&chips[i].mask, cpumask_of_node(chip[i]));
588 INIT_WORK(&chips[i].throttle, powernv_cpufreq_work_fn); 645 INIT_WORK(&chips[i].throttle, powernv_cpufreq_work_fn);
646 for_each_cpu(cpu, &chips[i].mask)
647 per_cpu(chip_info, cpu) = &chips[i];
589 } 648 }
590 649
591 return 0; 650 return 0;
592free_chip_map:
593 kfree(core_to_chip_map);
594out:
595 return ret;
596} 651}
597 652
598static inline void clean_chip_info(void) 653static inline void clean_chip_info(void)
599{ 654{
600 kfree(chips); 655 kfree(chips);
601 kfree(core_to_chip_map);
602} 656}
603 657
604static inline void unregister_all_notifiers(void) 658static inline void unregister_all_notifiers(void)
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c
index 27fc733cb5b9..03d38c291de6 100644
--- a/drivers/cpuidle/governors/menu.c
+++ b/drivers/cpuidle/governors/menu.c
@@ -196,7 +196,7 @@ static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev);
196 * of points is below a threshold. If it is... then use the 196 * of points is below a threshold. If it is... then use the
197 * average of these 8 points as the estimated value. 197 * average of these 8 points as the estimated value.
198 */ 198 */
199static void get_typical_interval(struct menu_device *data) 199static unsigned int get_typical_interval(struct menu_device *data)
200{ 200{
201 int i, divisor; 201 int i, divisor;
202 unsigned int max, thresh, avg; 202 unsigned int max, thresh, avg;
@@ -253,9 +253,7 @@ again:
253 if (likely(variance <= U64_MAX/36)) { 253 if (likely(variance <= U64_MAX/36)) {
254 if ((((u64)avg*avg > variance*36) && (divisor * 4 >= INTERVALS * 3)) 254 if ((((u64)avg*avg > variance*36) && (divisor * 4 >= INTERVALS * 3))
255 || variance <= 400) { 255 || variance <= 400) {
256 if (data->next_timer_us > avg) 256 return avg;
257 data->predicted_us = avg;
258 return;
259 } 257 }
260 } 258 }
261 259
@@ -269,7 +267,7 @@ again:
269 * with sporadic activity with a bunch of short pauses. 267 * with sporadic activity with a bunch of short pauses.
270 */ 268 */
271 if ((divisor * 4) <= INTERVALS * 3) 269 if ((divisor * 4) <= INTERVALS * 3)
272 return; 270 return UINT_MAX;
273 271
274 thresh = max - 1; 272 thresh = max - 1;
275 goto again; 273 goto again;
@@ -286,6 +284,7 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
286 int latency_req = pm_qos_request(PM_QOS_CPU_DMA_LATENCY); 284 int latency_req = pm_qos_request(PM_QOS_CPU_DMA_LATENCY);
287 int i; 285 int i;
288 unsigned int interactivity_req; 286 unsigned int interactivity_req;
287 unsigned int expected_interval;
289 unsigned long nr_iowaiters, cpu_load; 288 unsigned long nr_iowaiters, cpu_load;
290 289
291 if (data->needs_update) { 290 if (data->needs_update) {
@@ -312,32 +311,43 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
312 data->correction_factor[data->bucket], 311 data->correction_factor[data->bucket],
313 RESOLUTION * DECAY); 312 RESOLUTION * DECAY);
314 313
315 get_typical_interval(data); 314 expected_interval = get_typical_interval(data);
316 315 expected_interval = min(expected_interval, data->next_timer_us);
317 /*
318 * Performance multiplier defines a minimum predicted idle
319 * duration / latency ratio. Adjust the latency limit if
320 * necessary.
321 */
322 interactivity_req = data->predicted_us / performance_multiplier(nr_iowaiters, cpu_load);
323 if (latency_req > interactivity_req)
324 latency_req = interactivity_req;
325 316
326 if (CPUIDLE_DRIVER_STATE_START > 0) { 317 if (CPUIDLE_DRIVER_STATE_START > 0) {
327 data->last_state_idx = CPUIDLE_DRIVER_STATE_START - 1; 318 struct cpuidle_state *s = &drv->states[CPUIDLE_DRIVER_STATE_START];
319 unsigned int polling_threshold;
320
328 /* 321 /*
329 * We want to default to C1 (hlt), not to busy polling 322 * We want to default to C1 (hlt), not to busy polling
330 * unless the timer is happening really really soon. 323 * unless the timer is happening really really soon, or
324 * C1's exit latency exceeds the user configured limit.
331 */ 325 */
332 if (interactivity_req > 20 && 326 polling_threshold = max_t(unsigned int, 20, s->target_residency);
333 !drv->states[CPUIDLE_DRIVER_STATE_START].disabled && 327 if (data->next_timer_us > polling_threshold &&
334 dev->states_usage[CPUIDLE_DRIVER_STATE_START].disable == 0) 328 latency_req > s->exit_latency && !s->disabled &&
329 !dev->states_usage[CPUIDLE_DRIVER_STATE_START].disable)
335 data->last_state_idx = CPUIDLE_DRIVER_STATE_START; 330 data->last_state_idx = CPUIDLE_DRIVER_STATE_START;
331 else
332 data->last_state_idx = CPUIDLE_DRIVER_STATE_START - 1;
336 } else { 333 } else {
337 data->last_state_idx = CPUIDLE_DRIVER_STATE_START; 334 data->last_state_idx = CPUIDLE_DRIVER_STATE_START;
338 } 335 }
339 336
340 /* 337 /*
338 * Use the lowest expected idle interval to pick the idle state.
339 */
340 data->predicted_us = min(data->predicted_us, expected_interval);
341
342 /*
343 * Use the performance multiplier and the user-configurable
344 * latency_req to determine the maximum exit latency.
345 */
346 interactivity_req = data->predicted_us / performance_multiplier(nr_iowaiters, cpu_load);
347 if (latency_req > interactivity_req)
348 latency_req = interactivity_req;
349
350 /*
341 * Find the idle state with the lowest power while satisfying 351 * Find the idle state with the lowest power while satisfying
342 * our constraints. 352 * our constraints.
343 */ 353 */
diff --git a/drivers/devfreq/Kconfig b/drivers/devfreq/Kconfig
index 64281bb2f650..4de78c552251 100644
--- a/drivers/devfreq/Kconfig
+++ b/drivers/devfreq/Kconfig
@@ -61,7 +61,7 @@ config DEVFREQ_GOV_USERSPACE
61 Sets the frequency at the user specified one. 61 Sets the frequency at the user specified one.
62 This governor returns the user configured frequency if there 62 This governor returns the user configured frequency if there
63 has been an input to /sys/devices/.../power/devfreq_set_freq. 63 has been an input to /sys/devices/.../power/devfreq_set_freq.
64 Otherwise, the governor does not change the frequnecy 64 Otherwise, the governor does not change the frequency
65 given at the initialization. 65 given at the initialization.
66 66
67comment "DEVFREQ Drivers" 67comment "DEVFREQ Drivers"
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index cd4510a63375..ba947df5a8c7 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -65,7 +65,7 @@
65#include <asm/mwait.h> 65#include <asm/mwait.h>
66#include <asm/msr.h> 66#include <asm/msr.h>
67 67
68#define INTEL_IDLE_VERSION "0.4" 68#define INTEL_IDLE_VERSION "0.4.1"
69#define PREFIX "intel_idle: " 69#define PREFIX "intel_idle: "
70 70
71static struct cpuidle_driver intel_idle_driver = { 71static struct cpuidle_driver intel_idle_driver = {
@@ -716,6 +716,26 @@ static struct cpuidle_state avn_cstates[] = {
716 { 716 {
717 .enter = NULL } 717 .enter = NULL }
718}; 718};
719static struct cpuidle_state knl_cstates[] = {
720 {
721 .name = "C1-KNL",
722 .desc = "MWAIT 0x00",
723 .flags = MWAIT2flg(0x00),
724 .exit_latency = 1,
725 .target_residency = 2,
726 .enter = &intel_idle,
727 .enter_freeze = intel_idle_freeze },
728 {
729 .name = "C6-KNL",
730 .desc = "MWAIT 0x10",
731 .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED,
732 .exit_latency = 120,
733 .target_residency = 500,
734 .enter = &intel_idle,
735 .enter_freeze = intel_idle_freeze },
736 {
737 .enter = NULL }
738};
719 739
720/** 740/**
721 * intel_idle 741 * intel_idle
@@ -890,6 +910,10 @@ static const struct idle_cpu idle_cpu_avn = {
890 .disable_promotion_to_c1e = true, 910 .disable_promotion_to_c1e = true,
891}; 911};
892 912
913static const struct idle_cpu idle_cpu_knl = {
914 .state_table = knl_cstates,
915};
916
893#define ICPU(model, cpu) \ 917#define ICPU(model, cpu) \
894 { X86_VENDOR_INTEL, 6, model, X86_FEATURE_MWAIT, (unsigned long)&cpu } 918 { X86_VENDOR_INTEL, 6, model, X86_FEATURE_MWAIT, (unsigned long)&cpu }
895 919
@@ -921,6 +945,7 @@ static const struct x86_cpu_id intel_idle_ids[] __initconst = {
921 ICPU(0x56, idle_cpu_bdw), 945 ICPU(0x56, idle_cpu_bdw),
922 ICPU(0x4e, idle_cpu_skl), 946 ICPU(0x4e, idle_cpu_skl),
923 ICPU(0x5e, idle_cpu_skl), 947 ICPU(0x5e, idle_cpu_skl),
948 ICPU(0x57, idle_cpu_knl),
924 {} 949 {}
925}; 950};
926MODULE_DEVICE_TABLE(x86cpu, intel_idle_ids); 951MODULE_DEVICE_TABLE(x86cpu, intel_idle_ids);
@@ -994,36 +1019,92 @@ static void intel_idle_cpuidle_devices_uninit(void)
994} 1019}
995 1020
996/* 1021/*
997 * intel_idle_state_table_update() 1022 * ivt_idle_state_table_update(void)
998 * 1023 *
999 * Update the default state_table for this CPU-id 1024 * Tune IVT multi-socket targets
1000 *
1001 * Currently used to access tuned IVT multi-socket targets
1002 * Assumption: num_sockets == (max_package_num + 1) 1025 * Assumption: num_sockets == (max_package_num + 1)
1003 */ 1026 */
1004void intel_idle_state_table_update(void) 1027static void ivt_idle_state_table_update(void)
1005{ 1028{
1006 /* IVT uses a different table for 1-2, 3-4, and > 4 sockets */ 1029 /* IVT uses a different table for 1-2, 3-4, and > 4 sockets */
1007 if (boot_cpu_data.x86_model == 0x3e) { /* IVT */ 1030 int cpu, package_num, num_sockets = 1;
1008 int cpu, package_num, num_sockets = 1; 1031
1009 1032 for_each_online_cpu(cpu) {
1010 for_each_online_cpu(cpu) { 1033 package_num = topology_physical_package_id(cpu);
1011 package_num = topology_physical_package_id(cpu); 1034 if (package_num + 1 > num_sockets) {
1012 if (package_num + 1 > num_sockets) { 1035 num_sockets = package_num + 1;
1013 num_sockets = package_num + 1; 1036
1014 1037 if (num_sockets > 4) {
1015 if (num_sockets > 4) { 1038 cpuidle_state_table = ivt_cstates_8s;
1016 cpuidle_state_table = ivt_cstates_8s; 1039 return;
1017 return;
1018 }
1019 } 1040 }
1020 } 1041 }
1042 }
1043
1044 if (num_sockets > 2)
1045 cpuidle_state_table = ivt_cstates_4s;
1021 1046
1022 if (num_sockets > 2) 1047 /* else, 1 and 2 socket systems use default ivt_cstates */
1023 cpuidle_state_table = ivt_cstates_4s; 1048}
1024 /* else, 1 and 2 socket systems use default ivt_cstates */ 1049/*
1050 * sklh_idle_state_table_update(void)
1051 *
1052 * On SKL-H (model 0x5e) disable C8 and C9 if:
1053 * C10 is enabled and SGX disabled
1054 */
1055static void sklh_idle_state_table_update(void)
1056{
1057 unsigned long long msr;
1058 unsigned int eax, ebx, ecx, edx;
1059
1060
1061 /* if PC10 disabled via cmdline intel_idle.max_cstate=7 or shallower */
1062 if (max_cstate <= 7)
1063 return;
1064
1065 /* if PC10 not present in CPUID.MWAIT.EDX */
1066 if ((mwait_substates & (0xF << 28)) == 0)
1067 return;
1068
1069 rdmsrl(MSR_NHM_SNB_PKG_CST_CFG_CTL, msr);
1070
1071 /* PC10 is not enabled in PKG C-state limit */
1072 if ((msr & 0xF) != 8)
1073 return;
1074
1075 ecx = 0;
1076 cpuid(7, &eax, &ebx, &ecx, &edx);
1077
1078 /* if SGX is present */
1079 if (ebx & (1 << 2)) {
1080
1081 rdmsrl(MSR_IA32_FEATURE_CONTROL, msr);
1082
1083 /* if SGX is enabled */
1084 if (msr & (1 << 18))
1085 return;
1086 }
1087
1088 skl_cstates[5].disabled = 1; /* C8-SKL */
1089 skl_cstates[6].disabled = 1; /* C9-SKL */
1090}
1091/*
1092 * intel_idle_state_table_update()
1093 *
1094 * Update the default state_table for this CPU-id
1095 */
1096
1097static void intel_idle_state_table_update(void)
1098{
1099 switch (boot_cpu_data.x86_model) {
1100
1101 case 0x3e: /* IVT */
1102 ivt_idle_state_table_update();
1103 break;
1104 case 0x5e: /* SKL-H */
1105 sklh_idle_state_table_update();
1106 break;
1025 } 1107 }
1026 return;
1027} 1108}
1028 1109
1029/* 1110/*
@@ -1063,6 +1144,14 @@ static int __init intel_idle_cpuidle_driver_init(void)
1063 if (num_substates == 0) 1144 if (num_substates == 0)
1064 continue; 1145 continue;
1065 1146
1147 /* if state marked as disabled, skip it */
1148 if (cpuidle_state_table[cstate].disabled != 0) {
1149 pr_debug(PREFIX "state %s is disabled",
1150 cpuidle_state_table[cstate].name);
1151 continue;
1152 }
1153
1154
1066 if (((mwait_cstate + 1) > 2) && 1155 if (((mwait_cstate + 1) > 2) &&
1067 !boot_cpu_has(X86_FEATURE_NONSTOP_TSC)) 1156 !boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
1068 mark_tsc_unstable("TSC halts in idle" 1157 mark_tsc_unstable("TSC halts in idle"
diff --git a/drivers/power/avs/rockchip-io-domain.c b/drivers/power/avs/rockchip-io-domain.c
index 80994566a1c8..8986382718dd 100644..100755
--- a/drivers/power/avs/rockchip-io-domain.c
+++ b/drivers/power/avs/rockchip-io-domain.c
@@ -47,6 +47,10 @@
47#define RK3368_SOC_CON15_FLASH0 BIT(14) 47#define RK3368_SOC_CON15_FLASH0 BIT(14)
48#define RK3368_SOC_FLASH_SUPPLY_NUM 2 48#define RK3368_SOC_FLASH_SUPPLY_NUM 2
49 49
50#define RK3399_PMUGRF_CON0 0x180
51#define RK3399_PMUGRF_CON0_VSEL BIT(8)
52#define RK3399_PMUGRF_VSEL_SUPPLY_NUM 9
53
50struct rockchip_iodomain; 54struct rockchip_iodomain;
51 55
52/** 56/**
@@ -181,6 +185,25 @@ static void rk3368_iodomain_init(struct rockchip_iodomain *iod)
181 dev_warn(iod->dev, "couldn't update flash0 ctrl\n"); 185 dev_warn(iod->dev, "couldn't update flash0 ctrl\n");
182} 186}
183 187
188static void rk3399_pmu_iodomain_init(struct rockchip_iodomain *iod)
189{
190 int ret;
191 u32 val;
192
193 /* if no pmu io supply we should leave things alone */
194 if (!iod->supplies[RK3399_PMUGRF_VSEL_SUPPLY_NUM].reg)
195 return;
196
197 /*
198 * set pmu io iodomain to also use this framework
199 * instead of a special gpio.
200 */
201 val = RK3399_PMUGRF_CON0_VSEL | (RK3399_PMUGRF_CON0_VSEL << 16);
202 ret = regmap_write(iod->grf, RK3399_PMUGRF_CON0, val);
203 if (ret < 0)
204 dev_warn(iod->dev, "couldn't update pmu io iodomain ctrl\n");
205}
206
184/* 207/*
185 * On the rk3188 the io-domains are handled by a shared register with the 208 * On the rk3188 the io-domains are handled by a shared register with the
186 * lower 8 bits being still being continuing drive-strength settings. 209 * lower 8 bits being still being continuing drive-strength settings.
@@ -252,6 +275,33 @@ static const struct rockchip_iodomain_soc_data soc_data_rk3368_pmu = {
252 }, 275 },
253}; 276};
254 277
278static const struct rockchip_iodomain_soc_data soc_data_rk3399 = {
279 .grf_offset = 0xe640,
280 .supply_names = {
281 "bt656", /* APIO2_VDD */
282 "audio", /* APIO5_VDD */
283 "sdmmc", /* SDMMC0_VDD */
284 "gpio1830", /* APIO4_VDD */
285 },
286};
287
288static const struct rockchip_iodomain_soc_data soc_data_rk3399_pmu = {
289 .grf_offset = 0x180,
290 .supply_names = {
291 NULL,
292 NULL,
293 NULL,
294 NULL,
295 NULL,
296 NULL,
297 NULL,
298 NULL,
299 NULL,
300 "pmu1830", /* PMUIO2_VDD */
301 },
302 .init = rk3399_pmu_iodomain_init,
303};
304
255static const struct of_device_id rockchip_iodomain_match[] = { 305static const struct of_device_id rockchip_iodomain_match[] = {
256 { 306 {
257 .compatible = "rockchip,rk3188-io-voltage-domain", 307 .compatible = "rockchip,rk3188-io-voltage-domain",
@@ -269,6 +319,14 @@ static const struct of_device_id rockchip_iodomain_match[] = {
269 .compatible = "rockchip,rk3368-pmu-io-voltage-domain", 319 .compatible = "rockchip,rk3368-pmu-io-voltage-domain",
270 .data = (void *)&soc_data_rk3368_pmu 320 .data = (void *)&soc_data_rk3368_pmu
271 }, 321 },
322 {
323 .compatible = "rockchip,rk3399-io-voltage-domain",
324 .data = (void *)&soc_data_rk3399
325 },
326 {
327 .compatible = "rockchip,rk3399-pmu-io-voltage-domain",
328 .data = (void *)&soc_data_rk3399_pmu
329 },
272 { /* sentinel */ }, 330 { /* sentinel */ },
273}; 331};
274MODULE_DEVICE_TABLE(of, rockchip_iodomain_match); 332MODULE_DEVICE_TABLE(of, rockchip_iodomain_match);
diff --git a/include/linux/pm_clock.h b/include/linux/pm_clock.h
index 25266c600021..308d6044f153 100644
--- a/include/linux/pm_clock.h
+++ b/include/linux/pm_clock.h
@@ -42,7 +42,9 @@ extern int pm_clk_create(struct device *dev);
42extern void pm_clk_destroy(struct device *dev); 42extern void pm_clk_destroy(struct device *dev);
43extern int pm_clk_add(struct device *dev, const char *con_id); 43extern int pm_clk_add(struct device *dev, const char *con_id);
44extern int pm_clk_add_clk(struct device *dev, struct clk *clk); 44extern int pm_clk_add_clk(struct device *dev, struct clk *clk);
45extern int of_pm_clk_add_clks(struct device *dev);
45extern void pm_clk_remove(struct device *dev, const char *con_id); 46extern void pm_clk_remove(struct device *dev, const char *con_id);
47extern void pm_clk_remove_clk(struct device *dev, struct clk *clk);
46extern int pm_clk_suspend(struct device *dev); 48extern int pm_clk_suspend(struct device *dev);
47extern int pm_clk_resume(struct device *dev); 49extern int pm_clk_resume(struct device *dev);
48#else 50#else
@@ -69,11 +71,18 @@ static inline int pm_clk_add_clk(struct device *dev, struct clk *clk)
69{ 71{
70 return -EINVAL; 72 return -EINVAL;
71} 73}
74static inline int of_pm_clk_add_clks(struct device *dev)
75{
76 return -EINVAL;
77}
72static inline void pm_clk_remove(struct device *dev, const char *con_id) 78static inline void pm_clk_remove(struct device *dev, const char *con_id)
73{ 79{
74} 80}
75#define pm_clk_suspend NULL 81#define pm_clk_suspend NULL
76#define pm_clk_resume NULL 82#define pm_clk_resume NULL
83static inline void pm_clk_remove_clk(struct device *dev, struct clk *clk)
84{
85}
77#endif 86#endif
78 87
79#ifdef CONFIG_HAVE_CLK 88#ifdef CONFIG_HAVE_CLK
diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
index aa0f26b58426..fca9254280ee 100644
--- a/kernel/power/hibernate.c
+++ b/kernel/power/hibernate.c
@@ -339,6 +339,7 @@ int hibernation_snapshot(int platform_mode)
339 pm_message_t msg; 339 pm_message_t msg;
340 int error; 340 int error;
341 341
342 pm_suspend_clear_flags();
342 error = platform_begin(platform_mode); 343 error = platform_begin(platform_mode);
343 if (error) 344 if (error)
344 goto Close; 345 goto Close;