aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-04-02 19:22:27 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-04-02 19:22:27 -0400
commit05bf58ca4b8f0be7d7af830f943f6d6b2c9ccee1 (patch)
tree9c87f7ffb9bbd2d54dd1e3bbfd8ef4eaeec8063b /kernel
parentd23082257d83e4bc89727d5aedee197e907999d2 (diff)
parenta1d028bd6d2b7789d15eddfd07c5bea2aaf36040 (diff)
Merge branch 'sched-idle-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull sched/idle changes from Ingo Molnar: "More idle code reorganization, to prepare for more integration. (Sent separately because it depended on pending timer work, which is now upstream)" * 'sched-idle-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: sched/idle: Add more comments to the code sched/idle: Move idle conditions in cpuidle_idle main function sched/idle: Reorganize the idle loop cpuidle/idle: Move the cpuidle_idle_call function to idle.c idle/cpuidle: Split cpuidle_idle_call main function into smaller functions
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched/idle.c150
1 files changed, 134 insertions, 16 deletions
diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c
index b7976a127178..8f4390a079c7 100644
--- a/kernel/sched/idle.c
+++ b/kernel/sched/idle.c
@@ -63,6 +63,136 @@ void __weak arch_cpu_idle(void)
63 local_irq_enable(); 63 local_irq_enable();
64} 64}
65 65
66/**
67 * cpuidle_idle_call - the main idle function
68 *
69 * NOTE: no locks or semaphores should be used here
70 * return non-zero on failure
71 */
72static int cpuidle_idle_call(void)
73{
74 struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices);
75 struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
76 int next_state, entered_state, ret;
77 bool broadcast;
78
79 /*
80 * Check if the idle task must be rescheduled. If it is the
81 * case, exit the function after re-enabling the local irq and
82 * set again the polling flag
83 */
84 if (current_clr_polling_and_test()) {
85 local_irq_enable();
86 __current_set_polling();
87 return 0;
88 }
89
90 /*
91 * During the idle period, stop measuring the disabled irqs
92 * critical sections latencies
93 */
94 stop_critical_timings();
95
96 /*
97 * Tell the RCU framework we are entering an idle section,
98 * so no more rcu read side critical sections and one more
99 * step to the grace period
100 */
101 rcu_idle_enter();
102
103 /*
104 * Check if the cpuidle framework is ready, otherwise fallback
105 * to the default arch specific idle method
106 */
107 ret = cpuidle_enabled(drv, dev);
108
109 if (!ret) {
110 /*
111 * Ask the governor to choose an idle state it thinks
112 * it is convenient to go to. There is *always* a
113 * convenient idle state
114 */
115 next_state = cpuidle_select(drv, dev);
116
117 /*
118 * The idle task must be scheduled, it is pointless to
119 * go to idle, just update no idle residency and get
120 * out of this function
121 */
122 if (current_clr_polling_and_test()) {
123 dev->last_residency = 0;
124 entered_state = next_state;
125 local_irq_enable();
126 } else {
127 broadcast = !!(drv->states[next_state].flags &
128 CPUIDLE_FLAG_TIMER_STOP);
129
130 if (broadcast)
131 /*
132 * Tell the time framework to switch
133 * to a broadcast timer because our
134 * local timer will be shutdown. If a
135 * local timer is used from another
136 * cpu as a broadcast timer, this call
137 * may fail if it is not available
138 */
139 ret = clockevents_notify(
140 CLOCK_EVT_NOTIFY_BROADCAST_ENTER,
141 &dev->cpu);
142
143 if (!ret) {
144 trace_cpu_idle_rcuidle(next_state, dev->cpu);
145
146 /*
147 * Enter the idle state previously
148 * returned by the governor
149 * decision. This function will block
150 * until an interrupt occurs and will
151 * take care of re-enabling the local
152 * interrupts
153 */
154 entered_state = cpuidle_enter(drv, dev,
155 next_state);
156
157 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT,
158 dev->cpu);
159
160 if (broadcast)
161 clockevents_notify(
162 CLOCK_EVT_NOTIFY_BROADCAST_EXIT,
163 &dev->cpu);
164
165 /*
166 * Give the governor an opportunity to reflect on the
167 * outcome
168 */
169 cpuidle_reflect(dev, entered_state);
170 }
171 }
172 }
173
174 /*
175 * We can't use the cpuidle framework, let's use the default
176 * idle routine
177 */
178 if (ret)
179 arch_cpu_idle();
180
181 __current_set_polling();
182
183 /*
184 * It is up to the idle functions to enable back the local
185 * interrupt
186 */
187 if (WARN_ON_ONCE(irqs_disabled()))
188 local_irq_enable();
189
190 rcu_idle_exit();
191 start_critical_timings();
192
193 return 0;
194}
195
66/* 196/*
67 * Generic idle loop implementation 197 * Generic idle loop implementation
68 */ 198 */
@@ -90,23 +220,11 @@ static void cpu_idle_loop(void)
90 * know that the IPI is going to arrive right 220 * know that the IPI is going to arrive right
91 * away 221 * away
92 */ 222 */
93 if (cpu_idle_force_poll || tick_check_broadcast_expired()) { 223 if (cpu_idle_force_poll || tick_check_broadcast_expired())
94 cpu_idle_poll(); 224 cpu_idle_poll();
95 } else { 225 else
96 if (!current_clr_polling_and_test()) { 226 cpuidle_idle_call();
97 stop_critical_timings(); 227
98 rcu_idle_enter();
99 if (cpuidle_idle_call())
100 arch_cpu_idle();
101 if (WARN_ON_ONCE(irqs_disabled()))
102 local_irq_enable();
103 rcu_idle_exit();
104 start_critical_timings();
105 } else {
106 local_irq_enable();
107 }
108 __current_set_polling();
109 }
110 arch_cpu_idle_exit(); 228 arch_cpu_idle_exit();
111 } 229 }
112 230