aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRafael J. Wysocki <rafael.j.wysocki@intel.com>2017-11-12 19:34:14 -0500
committerRafael J. Wysocki <rafael.j.wysocki@intel.com>2017-11-12 19:34:14 -0500
commit622ade3a2ff0fc4c026f116ea6018eddaeb49925 (patch)
tree44d62d3929632a0f1a802decc6b1f1f680ce658f
parent4762573b934cced83b91950f0e7a9f160e3983e3 (diff)
parenta4c447533a18ee86e07232d6344ba12b1f9c5077 (diff)
Merge branch 'pm-cpuidle'
* pm-cpuidle: intel_idle: Graceful probe failure when MWAIT is disabled cpuidle: Avoid assignment in if () argument cpuidle: Clean up cpuidle_enable_device() error handling a bit cpuidle: ladder: Add per CPU PM QoS resume latency support ARM: cpuidle: Refactor rollback operations if init fails ARM: cpuidle: Correct driver unregistration if init fails intel_idle: replace conditionals with static_cpu_has(X86_FEATURE_ARAT) cpuidle: fix broadcast control when broadcast can not be entered Conflicts: drivers/idle/intel_idle.c
-rw-r--r--drivers/cpuidle/cpuidle-arm.c153
-rw-r--r--drivers/cpuidle/cpuidle.c14
-rw-r--r--drivers/cpuidle/governors/ladder.c7
-rw-r--r--drivers/idle/intel_idle.c23
4 files changed, 122 insertions, 75 deletions
diff --git a/drivers/cpuidle/cpuidle-arm.c b/drivers/cpuidle/cpuidle-arm.c
index 52a75053ee03..ddee1b601b89 100644
--- a/drivers/cpuidle/cpuidle-arm.c
+++ b/drivers/cpuidle/cpuidle-arm.c
@@ -72,12 +72,94 @@ static const struct of_device_id arm_idle_state_match[] __initconst = {
72}; 72};
73 73
74/* 74/*
75 * arm_idle_init 75 * arm_idle_init_cpu
76 * 76 *
77 * Registers the arm specific cpuidle driver with the cpuidle 77 * Registers the arm specific cpuidle driver with the cpuidle
78 * framework. It relies on core code to parse the idle states 78 * framework. It relies on core code to parse the idle states
79 * and initialize them using driver data structures accordingly. 79 * and initialize them using driver data structures accordingly.
80 */ 80 */
81static int __init arm_idle_init_cpu(int cpu)
82{
83 int ret;
84 struct cpuidle_driver *drv;
85 struct cpuidle_device *dev;
86
87 drv = kmemdup(&arm_idle_driver, sizeof(*drv), GFP_KERNEL);
88 if (!drv)
89 return -ENOMEM;
90
91 drv->cpumask = (struct cpumask *)cpumask_of(cpu);
92
93 /*
94 * Initialize idle states data, starting at index 1. This
95 * driver is DT only, if no DT idle states are detected (ret
96 * == 0) let the driver initialization fail accordingly since
97 * there is no reason to initialize the idle driver if only
98 * wfi is supported.
99 */
100 ret = dt_init_idle_driver(drv, arm_idle_state_match, 1);
101 if (ret <= 0) {
102 ret = ret ? : -ENODEV;
103 goto out_kfree_drv;
104 }
105
106 ret = cpuidle_register_driver(drv);
107 if (ret) {
108 pr_err("Failed to register cpuidle driver\n");
109 goto out_kfree_drv;
110 }
111
112 /*
113 * Call arch CPU operations in order to initialize
114 * idle states suspend back-end specific data
115 */
116 ret = arm_cpuidle_init(cpu);
117
118 /*
119 * Skip the cpuidle device initialization if the reported
120 * failure is a HW misconfiguration/breakage (-ENXIO).
121 */
122 if (ret == -ENXIO)
123 return 0;
124
125 if (ret) {
126 pr_err("CPU %d failed to init idle CPU ops\n", cpu);
127 goto out_unregister_drv;
128 }
129
130 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
131 if (!dev) {
132 pr_err("Failed to allocate cpuidle device\n");
133 ret = -ENOMEM;
134 goto out_unregister_drv;
135 }
136 dev->cpu = cpu;
137
138 ret = cpuidle_register_device(dev);
139 if (ret) {
140 pr_err("Failed to register cpuidle device for CPU %d\n",
141 cpu);
142 goto out_kfree_dev;
143 }
144
145 return 0;
146
147out_kfree_dev:
148 kfree(dev);
149out_unregister_drv:
150 cpuidle_unregister_driver(drv);
151out_kfree_drv:
152 kfree(drv);
153 return ret;
154}
155
156/*
157 * arm_idle_init - Initializes arm cpuidle driver
158 *
159 * Initializes arm cpuidle driver for all CPUs, if any CPU fails
160 * to register cpuidle driver then rollback to cancel all CPUs
161 * registeration.
162 */
81static int __init arm_idle_init(void) 163static int __init arm_idle_init(void)
82{ 164{
83 int cpu, ret; 165 int cpu, ret;
@@ -85,79 +167,20 @@ static int __init arm_idle_init(void)
85 struct cpuidle_device *dev; 167 struct cpuidle_device *dev;
86 168
87 for_each_possible_cpu(cpu) { 169 for_each_possible_cpu(cpu) {
88 170 ret = arm_idle_init_cpu(cpu);
89 drv = kmemdup(&arm_idle_driver, sizeof(*drv), GFP_KERNEL); 171 if (ret)
90 if (!drv) {
91 ret = -ENOMEM;
92 goto out_fail;
93 }
94
95 drv->cpumask = (struct cpumask *)cpumask_of(cpu);
96
97 /*
98 * Initialize idle states data, starting at index 1. This
99 * driver is DT only, if no DT idle states are detected (ret
100 * == 0) let the driver initialization fail accordingly since
101 * there is no reason to initialize the idle driver if only
102 * wfi is supported.
103 */
104 ret = dt_init_idle_driver(drv, arm_idle_state_match, 1);
105 if (ret <= 0) {
106 ret = ret ? : -ENODEV;
107 goto init_fail;
108 }
109
110 ret = cpuidle_register_driver(drv);
111 if (ret) {
112 pr_err("Failed to register cpuidle driver\n");
113 goto init_fail;
114 }
115
116 /*
117 * Call arch CPU operations in order to initialize
118 * idle states suspend back-end specific data
119 */
120 ret = arm_cpuidle_init(cpu);
121
122 /*
123 * Skip the cpuidle device initialization if the reported
124 * failure is a HW misconfiguration/breakage (-ENXIO).
125 */
126 if (ret == -ENXIO)
127 continue;
128
129 if (ret) {
130 pr_err("CPU %d failed to init idle CPU ops\n", cpu);
131 goto out_fail;
132 }
133
134 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
135 if (!dev) {
136 pr_err("Failed to allocate cpuidle device\n");
137 ret = -ENOMEM;
138 goto out_fail; 172 goto out_fail;
139 }
140 dev->cpu = cpu;
141
142 ret = cpuidle_register_device(dev);
143 if (ret) {
144 pr_err("Failed to register cpuidle device for CPU %d\n",
145 cpu);
146 kfree(dev);
147 goto out_fail;
148 }
149 } 173 }
150 174
151 return 0; 175 return 0;
152init_fail: 176
153 kfree(drv);
154out_fail: 177out_fail:
155 while (--cpu >= 0) { 178 while (--cpu >= 0) {
156 dev = per_cpu(cpuidle_devices, cpu); 179 dev = per_cpu(cpuidle_devices, cpu);
180 drv = cpuidle_get_cpu_driver(dev);
157 cpuidle_unregister_device(dev); 181 cpuidle_unregister_device(dev);
158 kfree(dev);
159 drv = cpuidle_get_driver();
160 cpuidle_unregister_driver(drv); 182 cpuidle_unregister_driver(drv);
183 kfree(dev);
161 kfree(drv); 184 kfree(drv);
162 } 185 }
163 186
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index 484cc8909d5c..68a16827f45f 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -208,6 +208,7 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
208 return -EBUSY; 208 return -EBUSY;
209 } 209 }
210 target_state = &drv->states[index]; 210 target_state = &drv->states[index];
211 broadcast = false;
211 } 212 }
212 213
213 /* Take note of the planned idle state. */ 214 /* Take note of the planned idle state. */
@@ -387,9 +388,12 @@ int cpuidle_enable_device(struct cpuidle_device *dev)
387 if (dev->enabled) 388 if (dev->enabled)
388 return 0; 389 return 0;
389 390
391 if (!cpuidle_curr_governor)
392 return -EIO;
393
390 drv = cpuidle_get_cpu_driver(dev); 394 drv = cpuidle_get_cpu_driver(dev);
391 395
392 if (!drv || !cpuidle_curr_governor) 396 if (!drv)
393 return -EIO; 397 return -EIO;
394 398
395 if (!dev->registered) 399 if (!dev->registered)
@@ -399,9 +403,11 @@ int cpuidle_enable_device(struct cpuidle_device *dev)
399 if (ret) 403 if (ret)
400 return ret; 404 return ret;
401 405
402 if (cpuidle_curr_governor->enable && 406 if (cpuidle_curr_governor->enable) {
403 (ret = cpuidle_curr_governor->enable(drv, dev))) 407 ret = cpuidle_curr_governor->enable(drv, dev);
404 goto fail_sysfs; 408 if (ret)
409 goto fail_sysfs;
410 }
405 411
406 smp_wmb(); 412 smp_wmb();
407 413
diff --git a/drivers/cpuidle/governors/ladder.c b/drivers/cpuidle/governors/ladder.c
index ce1a2ffffb2a..1ad8745fd6d6 100644
--- a/drivers/cpuidle/governors/ladder.c
+++ b/drivers/cpuidle/governors/ladder.c
@@ -17,6 +17,7 @@
17#include <linux/pm_qos.h> 17#include <linux/pm_qos.h>
18#include <linux/jiffies.h> 18#include <linux/jiffies.h>
19#include <linux/tick.h> 19#include <linux/tick.h>
20#include <linux/cpu.h>
20 21
21#include <asm/io.h> 22#include <asm/io.h>
22#include <linux/uaccess.h> 23#include <linux/uaccess.h>
@@ -67,10 +68,16 @@ static int ladder_select_state(struct cpuidle_driver *drv,
67 struct cpuidle_device *dev) 68 struct cpuidle_device *dev)
68{ 69{
69 struct ladder_device *ldev = this_cpu_ptr(&ladder_devices); 70 struct ladder_device *ldev = this_cpu_ptr(&ladder_devices);
71 struct device *device = get_cpu_device(dev->cpu);
70 struct ladder_device_state *last_state; 72 struct ladder_device_state *last_state;
71 int last_residency, last_idx = ldev->last_state_idx; 73 int last_residency, last_idx = ldev->last_state_idx;
72 int first_idx = drv->states[0].flags & CPUIDLE_FLAG_POLLING ? 1 : 0; 74 int first_idx = drv->states[0].flags & CPUIDLE_FLAG_POLLING ? 1 : 0;
73 int latency_req = pm_qos_request(PM_QOS_CPU_DMA_LATENCY); 75 int latency_req = pm_qos_request(PM_QOS_CPU_DMA_LATENCY);
76 int resume_latency = dev_pm_qos_raw_read_value(device);
77
78 if (resume_latency < latency_req &&
79 resume_latency != PM_QOS_RESUME_LATENCY_NO_CONSTRAINT)
80 latency_req = resume_latency;
74 81
75 /* Special case when user has set very strict latency requirement */ 82 /* Special case when user has set very strict latency requirement */
76 if (unlikely(latency_req == 0)) { 83 if (unlikely(latency_req == 0)) {
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index f0b06b14e782..b2ccce5fb071 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -913,10 +913,9 @@ static __cpuidle int intel_idle(struct cpuidle_device *dev,
913 struct cpuidle_state *state = &drv->states[index]; 913 struct cpuidle_state *state = &drv->states[index];
914 unsigned long eax = flg2MWAIT(state->flags); 914 unsigned long eax = flg2MWAIT(state->flags);
915 unsigned int cstate; 915 unsigned int cstate;
916 bool uninitialized_var(tick);
916 int cpu = smp_processor_id(); 917 int cpu = smp_processor_id();
917 918
918 cstate = (((eax) >> MWAIT_SUBSTATE_SIZE) & MWAIT_CSTATE_MASK) + 1;
919
920 /* 919 /*
921 * leave_mm() to avoid costly and often unnecessary wakeups 920 * leave_mm() to avoid costly and often unnecessary wakeups
922 * for flushing the user TLB's associated with the active mm. 921 * for flushing the user TLB's associated with the active mm.
@@ -924,12 +923,19 @@ static __cpuidle int intel_idle(struct cpuidle_device *dev,
924 if (state->flags & CPUIDLE_FLAG_TLB_FLUSHED) 923 if (state->flags & CPUIDLE_FLAG_TLB_FLUSHED)
925 leave_mm(cpu); 924 leave_mm(cpu);
926 925
927 if (!(lapic_timer_reliable_states & (1 << (cstate)))) 926 if (!static_cpu_has(X86_FEATURE_ARAT)) {
928 tick_broadcast_enter(); 927 cstate = (((eax) >> MWAIT_SUBSTATE_SIZE) &
928 MWAIT_CSTATE_MASK) + 1;
929 tick = false;
930 if (!(lapic_timer_reliable_states & (1 << (cstate)))) {
931 tick = true;
932 tick_broadcast_enter();
933 }
934 }
929 935
930 mwait_idle_with_hints(eax, ecx); 936 mwait_idle_with_hints(eax, ecx);
931 937
932 if (!(lapic_timer_reliable_states & (1 << (cstate)))) 938 if (!static_cpu_has(X86_FEATURE_ARAT) && tick)
933 tick_broadcast_exit(); 939 tick_broadcast_exit();
934 940
935 return index; 941 return index;
@@ -1061,7 +1067,7 @@ static const struct idle_cpu idle_cpu_dnv = {
1061}; 1067};
1062 1068
1063#define ICPU(model, cpu) \ 1069#define ICPU(model, cpu) \
1064 { X86_VENDOR_INTEL, 6, model, X86_FEATURE_MWAIT, (unsigned long)&cpu } 1070 { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (unsigned long)&cpu }
1065 1071
1066static const struct x86_cpu_id intel_idle_ids[] __initconst = { 1072static const struct x86_cpu_id intel_idle_ids[] __initconst = {
1067 ICPU(INTEL_FAM6_NEHALEM_EP, idle_cpu_nehalem), 1073 ICPU(INTEL_FAM6_NEHALEM_EP, idle_cpu_nehalem),
@@ -1125,6 +1131,11 @@ static int __init intel_idle_probe(void)
1125 return -ENODEV; 1131 return -ENODEV;
1126 } 1132 }
1127 1133
1134 if (!boot_cpu_has(X86_FEATURE_MWAIT)) {
1135 pr_debug("Please enable MWAIT in BIOS SETUP\n");
1136 return -ENODEV;
1137 }
1138
1128 if (boot_cpu_data.cpuid_level < CPUID_MWAIT_LEAF) 1139 if (boot_cpu_data.cpuid_level < CPUID_MWAIT_LEAF)
1129 return -ENODEV; 1140 return -ENODEV;
1130 1141