aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRafael J. Wysocki <rafael.j.wysocki@intel.com>2015-05-04 16:53:35 -0400
committerRafael J. Wysocki <rafael.j.wysocki@intel.com>2015-05-04 16:53:35 -0400
commitbcf6ad8a4a3d002e8bc8f6639cdc119168f4e87b (patch)
tree6536efb9c4c8d199aca4118af959d79192bab716
parenta802ea96454570f3c526dd9d7ad8c706e570444d (diff)
sched / idle: Eliminate the "reflect" check from cpuidle_idle_call()
Since cpuidle_reflect() should only be called if the idle state to enter was selected by cpuidle_select(), there is the "reflect" variable in cpuidle_idle_call() whose value is used to determine whether or not that is the case. However, if the entire code run between the conditional setting "reflect" and the call to cpuidle_reflect() is moved to a separate function, it will be possible to call that new function in both branches of the conditional, in which case cpuidle_reflect() will only need to be called from one of them too and the "reflect" variable won't be necessary any more. This eliminates one check made by cpuidle_idle_call() on the majority of its invocations, so change the code as described. Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com> Reviewed-by: Daniel Lezcano <daniel.lezcano@linaro.org> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
-rw-r--r--kernel/sched/idle.c90
1 files changed, 46 insertions, 44 deletions
diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c
index ae7c0be90d16..9c919b42f846 100644
--- a/kernel/sched/idle.c
+++ b/kernel/sched/idle.c
@@ -79,6 +79,46 @@ static void default_idle_call(void)
79 arch_cpu_idle(); 79 arch_cpu_idle();
80} 80}
81 81
82static int call_cpuidle(struct cpuidle_driver *drv, struct cpuidle_device *dev,
83 int next_state)
84{
85 int entered_state;
86
87 /* Fall back to the default arch idle method on errors. */
88 if (next_state < 0) {
89 default_idle_call();
90 return next_state;
91 }
92
93 /*
94 * The idle task must be scheduled, it is pointless to go to idle, just
95 * update no idle residency and return.
96 */
97 if (current_clr_polling_and_test()) {
98 dev->last_residency = 0;
99 local_irq_enable();
100 return -EBUSY;
101 }
102
103 /* Take note of the planned idle state. */
104 idle_set_state(this_rq(), &drv->states[next_state]);
105
106 /*
107 * Enter the idle state previously returned by the governor decision.
108 * This function will block until an interrupt occurs and will take
109 * care of re-enabling the local interrupts
110 */
111 entered_state = cpuidle_enter(drv, dev, next_state);
112
113 /* The cpu is no longer idle or about to enter idle. */
114 idle_set_state(this_rq(), NULL);
115
116 if (entered_state == -EBUSY)
117 default_idle_call();
118
119 return entered_state;
120}
121
82/** 122/**
83 * cpuidle_idle_call - the main idle function 123 * cpuidle_idle_call - the main idle function
84 * 124 *
@@ -93,7 +133,6 @@ static void cpuidle_idle_call(void)
93 struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices); 133 struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices);
94 struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); 134 struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
95 int next_state, entered_state; 135 int next_state, entered_state;
96 bool reflect;
97 136
98 /* 137 /*
99 * Check if the idle task must be rescheduled. If it is the 138 * Check if the idle task must be rescheduled. If it is the
@@ -138,56 +177,19 @@ static void cpuidle_idle_call(void)
138 goto exit_idle; 177 goto exit_idle;
139 } 178 }
140 179
141 reflect = false;
142 next_state = cpuidle_find_deepest_state(drv, dev); 180 next_state = cpuidle_find_deepest_state(drv, dev);
181 call_cpuidle(drv, dev, next_state);
143 } else { 182 } else {
144 reflect = true;
145 /* 183 /*
146 * Ask the cpuidle framework to choose a convenient idle state. 184 * Ask the cpuidle framework to choose a convenient idle state.
147 */ 185 */
148 next_state = cpuidle_select(drv, dev); 186 next_state = cpuidle_select(drv, dev);
149 } 187 entered_state = call_cpuidle(drv, dev, next_state);
150 /* Fall back to the default arch idle method on errors. */ 188 /*
151 if (next_state < 0) { 189 * Give the governor an opportunity to reflect on the outcome
152 default_idle_call(); 190 */
153 goto exit_idle;
154 }
155
156 /*
157 * The idle task must be scheduled, it is pointless to
158 * go to idle, just update no idle residency and get
159 * out of this function
160 */
161 if (current_clr_polling_and_test()) {
162 dev->last_residency = 0;
163 entered_state = next_state;
164 local_irq_enable();
165 goto exit_idle;
166 }
167
168 /* Take note of the planned idle state. */
169 idle_set_state(this_rq(), &drv->states[next_state]);
170
171 /*
172 * Enter the idle state previously returned by the governor decision.
173 * This function will block until an interrupt occurs and will take
174 * care of re-enabling the local interrupts
175 */
176 entered_state = cpuidle_enter(drv, dev, next_state);
177
178 /* The cpu is no longer idle or about to enter idle. */
179 idle_set_state(this_rq(), NULL);
180
181 if (entered_state == -EBUSY) {
182 default_idle_call();
183 goto exit_idle;
184 }
185
186 /*
187 * Give the governor an opportunity to reflect on the outcome
188 */
189 if (reflect)
190 cpuidle_reflect(dev, entered_state); 191 cpuidle_reflect(dev, entered_state);
192 }
191 193
192exit_idle: 194exit_idle:
193 __current_set_polling(); 195 __current_set_polling();