aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRafael J. Wysocki <rafael.j.wysocki@intel.com>2015-06-18 19:17:44 -0400
committerRafael J. Wysocki <rafael.j.wysocki@intel.com>2015-06-18 19:17:44 -0400
commit4a3004e5e651baf15f0452da9200dd5c3bda5907 (patch)
tree43c7b1026dee3659def0e70ddaa214587bd8d4b6
parente193cd15ae98817ad82cc8bad61a200ac561e98c (diff)
parent7d51d97925e6cbfa2f7f14e3e3aa363b35ee5c24 (diff)
Merge branch 'pm-cpuidle'
* pm-cpuidle: cpuidle: Do not use CPUIDLE_DRIVER_STATE_START in cpuidle.c cpuidle: Select a different state on tick_broadcast_enter() failures sched / idle: Call default_idle_call() from cpuidle_enter_state() sched / idle: Call idle_set_state() from cpuidle_enter_state() cpuidle: Fix the kerneldoc comment for cpuidle_enter_state() sched / idle: Eliminate the "reflect" check from cpuidle_idle_call() cpuidle: Check the sign of index in cpuidle_reflect() sched / idle: Move the default idle call code to a separate function
-rw-r--r--drivers/cpuidle/cpuidle.c38
-rw-r--r--drivers/cpuidle/governors/menu.c4
-rw-r--r--include/linux/cpuidle.h4
-rw-r--r--kernel/sched/idle.c114
4 files changed, 93 insertions, 67 deletions
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index 61c417b9e53f..7f1b8f507a56 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -65,7 +65,7 @@ int cpuidle_play_dead(void)
65 return -ENODEV; 65 return -ENODEV;
66 66
67 /* Find lowest-power state that supports long-term idle */ 67 /* Find lowest-power state that supports long-term idle */
68 for (i = drv->state_count - 1; i >= CPUIDLE_DRIVER_STATE_START; i--) 68 for (i = drv->state_count - 1; i >= 0; i--)
69 if (drv->states[i].enter_dead) 69 if (drv->states[i].enter_dead)
70 return drv->states[i].enter_dead(dev, i); 70 return drv->states[i].enter_dead(dev, i);
71 71
@@ -73,16 +73,21 @@ int cpuidle_play_dead(void)
73} 73}
74 74
75static int find_deepest_state(struct cpuidle_driver *drv, 75static int find_deepest_state(struct cpuidle_driver *drv,
76 struct cpuidle_device *dev, bool freeze) 76 struct cpuidle_device *dev,
77 unsigned int max_latency,
78 unsigned int forbidden_flags,
79 bool freeze)
77{ 80{
78 unsigned int latency_req = 0; 81 unsigned int latency_req = 0;
79 int i, ret = freeze ? -1 : CPUIDLE_DRIVER_STATE_START - 1; 82 int i, ret = -ENXIO;
80 83
81 for (i = CPUIDLE_DRIVER_STATE_START; i < drv->state_count; i++) { 84 for (i = 0; i < drv->state_count; i++) {
82 struct cpuidle_state *s = &drv->states[i]; 85 struct cpuidle_state *s = &drv->states[i];
83 struct cpuidle_state_usage *su = &dev->states_usage[i]; 86 struct cpuidle_state_usage *su = &dev->states_usage[i];
84 87
85 if (s->disabled || su->disable || s->exit_latency <= latency_req 88 if (s->disabled || su->disable || s->exit_latency <= latency_req
89 || s->exit_latency > max_latency
90 || (s->flags & forbidden_flags)
86 || (freeze && !s->enter_freeze)) 91 || (freeze && !s->enter_freeze))
87 continue; 92 continue;
88 93
@@ -100,7 +105,7 @@ static int find_deepest_state(struct cpuidle_driver *drv,
100int cpuidle_find_deepest_state(struct cpuidle_driver *drv, 105int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
101 struct cpuidle_device *dev) 106 struct cpuidle_device *dev)
102{ 107{
103 return find_deepest_state(drv, dev, false); 108 return find_deepest_state(drv, dev, UINT_MAX, 0, false);
104} 109}
105 110
106static void enter_freeze_proper(struct cpuidle_driver *drv, 111static void enter_freeze_proper(struct cpuidle_driver *drv,
@@ -139,7 +144,7 @@ int cpuidle_enter_freeze(struct cpuidle_driver *drv, struct cpuidle_device *dev)
139 * that interrupts won't be enabled when it exits and allows the tick to 144 * that interrupts won't be enabled when it exits and allows the tick to
140 * be frozen safely. 145 * be frozen safely.
141 */ 146 */
142 index = find_deepest_state(drv, dev, true); 147 index = find_deepest_state(drv, dev, UINT_MAX, 0, true);
143 if (index >= 0) 148 if (index >= 0)
144 enter_freeze_proper(drv, dev, index); 149 enter_freeze_proper(drv, dev, index);
145 150
@@ -150,7 +155,7 @@ int cpuidle_enter_freeze(struct cpuidle_driver *drv, struct cpuidle_device *dev)
150 * cpuidle_enter_state - enter the state and update stats 155 * cpuidle_enter_state - enter the state and update stats
151 * @dev: cpuidle device for this cpu 156 * @dev: cpuidle device for this cpu
152 * @drv: cpuidle driver for this cpu 157 * @drv: cpuidle driver for this cpu
153 * @next_state: index into drv->states of the state to enter 158 * @index: index into the states table in @drv of the state to enter
154 */ 159 */
155int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv, 160int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
156 int index) 161 int index)
@@ -167,8 +172,18 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
167 * local timer will be shut down. If a local timer is used from another 172 * local timer will be shut down. If a local timer is used from another
168 * CPU as a broadcast timer, this call may fail if it is not available. 173 * CPU as a broadcast timer, this call may fail if it is not available.
169 */ 174 */
170 if (broadcast && tick_broadcast_enter()) 175 if (broadcast && tick_broadcast_enter()) {
171 return -EBUSY; 176 index = find_deepest_state(drv, dev, target_state->exit_latency,
177 CPUIDLE_FLAG_TIMER_STOP, false);
178 if (index < 0) {
179 default_idle_call();
180 return -EBUSY;
181 }
182 target_state = &drv->states[index];
183 }
184
185 /* Take note of the planned idle state. */
186 sched_idle_set_state(target_state);
172 187
173 trace_cpu_idle_rcuidle(index, dev->cpu); 188 trace_cpu_idle_rcuidle(index, dev->cpu);
174 time_start = ktime_get(); 189 time_start = ktime_get();
@@ -178,6 +193,9 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
178 time_end = ktime_get(); 193 time_end = ktime_get();
179 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu); 194 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu);
180 195
196 /* The cpu is no longer idle or about to enter idle. */
197 sched_idle_set_state(NULL);
198
181 if (broadcast) { 199 if (broadcast) {
182 if (WARN_ON_ONCE(!irqs_disabled())) 200 if (WARN_ON_ONCE(!irqs_disabled()))
183 local_irq_disable(); 201 local_irq_disable();
@@ -249,7 +267,7 @@ int cpuidle_enter(struct cpuidle_driver *drv, struct cpuidle_device *dev,
249 */ 267 */
250void cpuidle_reflect(struct cpuidle_device *dev, int index) 268void cpuidle_reflect(struct cpuidle_device *dev, int index)
251{ 269{
252 if (cpuidle_curr_governor->reflect) 270 if (cpuidle_curr_governor->reflect && index >= 0)
253 cpuidle_curr_governor->reflect(dev, index); 271 cpuidle_curr_governor->reflect(dev, index);
254} 272}
255 273
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c
index b8a5fa15ca24..22e4463d1787 100644
--- a/drivers/cpuidle/governors/menu.c
+++ b/drivers/cpuidle/governors/menu.c
@@ -367,9 +367,9 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
367static void menu_reflect(struct cpuidle_device *dev, int index) 367static void menu_reflect(struct cpuidle_device *dev, int index)
368{ 368{
369 struct menu_device *data = this_cpu_ptr(&menu_devices); 369 struct menu_device *data = this_cpu_ptr(&menu_devices);
370
370 data->last_state_idx = index; 371 data->last_state_idx = index;
371 if (index >= 0) 372 data->needs_update = 1;
372 data->needs_update = 1;
373} 373}
374 374
375/** 375/**
diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
index 9c5e89254796..c7a63643658e 100644
--- a/include/linux/cpuidle.h
+++ b/include/linux/cpuidle.h
@@ -200,6 +200,10 @@ static inline struct cpuidle_driver *cpuidle_get_cpu_driver(
200 struct cpuidle_device *dev) {return NULL; } 200 struct cpuidle_device *dev) {return NULL; }
201#endif 201#endif
202 202
203/* kernel/sched/idle.c */
204extern void sched_idle_set_state(struct cpuidle_state *idle_state);
205extern void default_idle_call(void);
206
203#ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED 207#ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED
204void cpuidle_coupled_parallel_barrier(struct cpuidle_device *dev, atomic_t *a); 208void cpuidle_coupled_parallel_barrier(struct cpuidle_device *dev, atomic_t *a);
205#else 209#else
diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c
index fefcb1fa5160..594275ed2620 100644
--- a/kernel/sched/idle.c
+++ b/kernel/sched/idle.c
@@ -15,6 +15,15 @@
15 15
16#include "sched.h" 16#include "sched.h"
17 17
18/**
19 * sched_idle_set_state - Record idle state for the current CPU.
20 * @idle_state: State to record.
21 */
22void sched_idle_set_state(struct cpuidle_state *idle_state)
23{
24 idle_set_state(this_rq(), idle_state);
25}
26
18static int __read_mostly cpu_idle_force_poll; 27static int __read_mostly cpu_idle_force_poll;
19 28
20void cpu_idle_poll_ctrl(bool enable) 29void cpu_idle_poll_ctrl(bool enable)
@@ -68,6 +77,46 @@ void __weak arch_cpu_idle(void)
68} 77}
69 78
70/** 79/**
80 * default_idle_call - Default CPU idle routine.
81 *
82 * To use when the cpuidle framework cannot be used.
83 */
84void default_idle_call(void)
85{
86 if (current_clr_polling_and_test())
87 local_irq_enable();
88 else
89 arch_cpu_idle();
90}
91
92static int call_cpuidle(struct cpuidle_driver *drv, struct cpuidle_device *dev,
93 int next_state)
94{
95 /* Fall back to the default arch idle method on errors. */
96 if (next_state < 0) {
97 default_idle_call();
98 return next_state;
99 }
100
101 /*
102 * The idle task must be scheduled, it is pointless to go to idle, just
103 * update no idle residency and return.
104 */
105 if (current_clr_polling_and_test()) {
106 dev->last_residency = 0;
107 local_irq_enable();
108 return -EBUSY;
109 }
110
111 /*
112 * Enter the idle state previously returned by the governor decision.
113 * This function will block until an interrupt occurs and will take
114 * care of re-enabling the local interrupts
115 */
116 return cpuidle_enter(drv, dev, next_state);
117}
118
119/**
71 * cpuidle_idle_call - the main idle function 120 * cpuidle_idle_call - the main idle function
72 * 121 *
73 * NOTE: no locks or semaphores should be used here 122 * NOTE: no locks or semaphores should be used here
@@ -81,7 +130,6 @@ static void cpuidle_idle_call(void)
81 struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices); 130 struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices);
82 struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); 131 struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
83 int next_state, entered_state; 132 int next_state, entered_state;
84 bool reflect;
85 133
86 /* 134 /*
87 * Check if the idle task must be rescheduled. If it is the 135 * Check if the idle task must be rescheduled. If it is the
@@ -105,8 +153,10 @@ static void cpuidle_idle_call(void)
105 */ 153 */
106 rcu_idle_enter(); 154 rcu_idle_enter();
107 155
108 if (cpuidle_not_available(drv, dev)) 156 if (cpuidle_not_available(drv, dev)) {
109 goto use_default; 157 default_idle_call();
158 goto exit_idle;
159 }
110 160
111 /* 161 /*
112 * Suspend-to-idle ("freeze") is a system state in which all user space 162 * Suspend-to-idle ("freeze") is a system state in which all user space
@@ -124,52 +174,19 @@ static void cpuidle_idle_call(void)
124 goto exit_idle; 174 goto exit_idle;
125 } 175 }
126 176
127 reflect = false;
128 next_state = cpuidle_find_deepest_state(drv, dev); 177 next_state = cpuidle_find_deepest_state(drv, dev);
178 call_cpuidle(drv, dev, next_state);
129 } else { 179 } else {
130 reflect = true;
131 /* 180 /*
132 * Ask the cpuidle framework to choose a convenient idle state. 181 * Ask the cpuidle framework to choose a convenient idle state.
133 */ 182 */
134 next_state = cpuidle_select(drv, dev); 183 next_state = cpuidle_select(drv, dev);
135 } 184 entered_state = call_cpuidle(drv, dev, next_state);
136 /* Fall back to the default arch idle method on errors. */ 185 /*
137 if (next_state < 0) 186 * Give the governor an opportunity to reflect on the outcome
138 goto use_default; 187 */
139
140 /*
141 * The idle task must be scheduled, it is pointless to
142 * go to idle, just update no idle residency and get
143 * out of this function
144 */
145 if (current_clr_polling_and_test()) {
146 dev->last_residency = 0;
147 entered_state = next_state;
148 local_irq_enable();
149 goto exit_idle;
150 }
151
152 /* Take note of the planned idle state. */
153 idle_set_state(this_rq(), &drv->states[next_state]);
154
155 /*
156 * Enter the idle state previously returned by the governor decision.
157 * This function will block until an interrupt occurs and will take
158 * care of re-enabling the local interrupts
159 */
160 entered_state = cpuidle_enter(drv, dev, next_state);
161
162 /* The cpu is no longer idle or about to enter idle. */
163 idle_set_state(this_rq(), NULL);
164
165 if (entered_state == -EBUSY)
166 goto use_default;
167
168 /*
169 * Give the governor an opportunity to reflect on the outcome
170 */
171 if (reflect)
172 cpuidle_reflect(dev, entered_state); 188 cpuidle_reflect(dev, entered_state);
189 }
173 190
174exit_idle: 191exit_idle:
175 __current_set_polling(); 192 __current_set_polling();
@@ -182,19 +199,6 @@ exit_idle:
182 199
183 rcu_idle_exit(); 200 rcu_idle_exit();
184 start_critical_timings(); 201 start_critical_timings();
185 return;
186
187use_default:
188 /*
189 * We can't use the cpuidle framework, let's use the default
190 * idle routine.
191 */
192 if (current_clr_polling_and_test())
193 local_irq_enable();
194 else
195 arch_cpu_idle();
196
197 goto exit_idle;
198} 202}
199 203
200DEFINE_PER_CPU(bool, cpu_dead_idle); 204DEFINE_PER_CPU(bool, cpu_dead_idle);