aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRafael J. Wysocki <rafael.j.wysocki@intel.com>2015-05-09 19:18:03 -0400
committerRafael J. Wysocki <rafael.j.wysocki@intel.com>2015-05-14 15:35:10 -0400
commitfaad38492814112e3e7ce94d90123bbe301fff33 (patch)
tree36f4bf459d2dfdda68a891228af94aebbf951c95
parent7312280bd2ad9df1bcca236c5614091a0bd1504c (diff)
sched / idle: Call idle_set_state() from cpuidle_enter_state()
Introduce a wrapper function around idle_set_state() called sched_idle_set_state() that will pass this_rq() to it as the first argument and make cpuidle_enter_state() call the new function before and after entering the target state. At the same time, remove direct invocations of idle_set_state() from call_cpuidle(). This will allow the invocation of default_idle_call() to be moved from call_cpuidle() to cpuidle_enter_state() safely and call_cpuidle() to be simplified a bit as a result. Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com> Reviewed-by: Preeti U Murthy <preeti@linux.vnet.ibm.com> Tested-by: Preeti U Murthy <preeti@linux.vnet.ibm.com> Tested-by: Sudeep Holla <sudeep.holla@arm.com> Acked-by: Kevin Hilman <khilman@linaro.org>
-rw-r--r--drivers/cpuidle/cpuidle.c6
-rw-r--r--include/linux/cpuidle.h3
-rw-r--r--kernel/sched/idle.c15
3 files changed, 18 insertions, 6 deletions
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index 597f88443bdc..9306dd5f460e 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -170,6 +170,9 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
170 if (broadcast && tick_broadcast_enter()) 170 if (broadcast && tick_broadcast_enter())
171 return -EBUSY; 171 return -EBUSY;
172 172
173 /* Take note of the planned idle state. */
174 sched_idle_set_state(target_state);
175
173 trace_cpu_idle_rcuidle(index, dev->cpu); 176 trace_cpu_idle_rcuidle(index, dev->cpu);
174 time_start = ktime_get(); 177 time_start = ktime_get();
175 178
@@ -178,6 +181,9 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
178 time_end = ktime_get(); 181 time_end = ktime_get();
179 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu); 182 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu);
180 183
184 /* The cpu is no longer idle or about to enter idle. */
185 sched_idle_set_state(NULL);
186
181 if (broadcast) { 187 if (broadcast) {
182 if (WARN_ON_ONCE(!irqs_disabled())) 188 if (WARN_ON_ONCE(!irqs_disabled()))
183 local_irq_disable(); 189 local_irq_disable();
diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
index 9c5e89254796..301eaaab40e3 100644
--- a/include/linux/cpuidle.h
+++ b/include/linux/cpuidle.h
@@ -200,6 +200,9 @@ static inline struct cpuidle_driver *cpuidle_get_cpu_driver(
200 struct cpuidle_device *dev) {return NULL; } 200 struct cpuidle_device *dev) {return NULL; }
201#endif 201#endif
202 202
203/* kernel/sched/idle.c */
204extern void sched_idle_set_state(struct cpuidle_state *idle_state);
205
203#ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED 206#ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED
204void cpuidle_coupled_parallel_barrier(struct cpuidle_device *dev, atomic_t *a); 207void cpuidle_coupled_parallel_barrier(struct cpuidle_device *dev, atomic_t *a);
205#else 208#else
diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c
index 9c919b42f846..5d9f549fffa8 100644
--- a/kernel/sched/idle.c
+++ b/kernel/sched/idle.c
@@ -15,6 +15,15 @@
15 15
16#include "sched.h" 16#include "sched.h"
17 17
18/**
19 * sched_idle_set_state - Record idle state for the current CPU.
20 * @idle_state: State to record.
21 */
22void sched_idle_set_state(struct cpuidle_state *idle_state)
23{
24 idle_set_state(this_rq(), idle_state);
25}
26
18static int __read_mostly cpu_idle_force_poll; 27static int __read_mostly cpu_idle_force_poll;
19 28
20void cpu_idle_poll_ctrl(bool enable) 29void cpu_idle_poll_ctrl(bool enable)
@@ -100,9 +109,6 @@ static int call_cpuidle(struct cpuidle_driver *drv, struct cpuidle_device *dev,
100 return -EBUSY; 109 return -EBUSY;
101 } 110 }
102 111
103 /* Take note of the planned idle state. */
104 idle_set_state(this_rq(), &drv->states[next_state]);
105
106 /* 112 /*
107 * Enter the idle state previously returned by the governor decision. 113 * Enter the idle state previously returned by the governor decision.
108 * This function will block until an interrupt occurs and will take 114 * This function will block until an interrupt occurs and will take
@@ -110,9 +116,6 @@ static int call_cpuidle(struct cpuidle_driver *drv, struct cpuidle_device *dev,
110 */ 116 */
111 entered_state = cpuidle_enter(drv, dev, next_state); 117 entered_state = cpuidle_enter(drv, dev, next_state);
112 118
113 /* The cpu is no longer idle or about to enter idle. */
114 idle_set_state(this_rq(), NULL);
115
116 if (entered_state == -EBUSY) 119 if (entered_state == -EBUSY)
117 default_idle_call(); 120 default_idle_call();
118 121