aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorDaniel Lezcano <daniel.lezcano@linaro.org>2014-03-03 02:48:54 -0500
committerIngo Molnar <mingo@kernel.org>2014-03-11 06:52:49 -0400
commita1d028bd6d2b7789d15eddfd07c5bea2aaf36040 (patch)
tree5755907de09333e837ba3a225d7426964899488c /kernel
parent8ca3c6424f4988fc19ed1067b121fbaf2e884d77 (diff)
sched/idle: Add more comments to the code
The idle main function is a complex and a critical function. Added more comments to the code. Signed-off-by: Daniel Lezcano <daniel.lezcano@linaro.org> Acked-by: Nicolas Pitre <nico@linaro.org> Signed-off-by: Peter Zijlstra <peterz@infradead.org> Cc: tglx@linutronix.de Cc: rjw@rjwysocki.net Cc: preeti@linux.vnet.ibm.com Link: http://lkml.kernel.org/r/1393832934-11625-5-git-send-email-daniel.lezcano@linaro.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched/idle.c59
1 files changed, 57 insertions, 2 deletions
diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c
index cc7a6f3801ff..8f4390a079c7 100644
--- a/kernel/sched/idle.c
+++ b/kernel/sched/idle.c
@@ -76,21 +76,49 @@ static int cpuidle_idle_call(void)
76 int next_state, entered_state, ret; 76 int next_state, entered_state, ret;
77 bool broadcast; 77 bool broadcast;
78 78
79 /*
80 * Check if the idle task must be rescheduled. If it is the
81 * case, exit the function after re-enabling the local irq and
82 * set again the polling flag
83 */
79 if (current_clr_polling_and_test()) { 84 if (current_clr_polling_and_test()) {
80 local_irq_enable(); 85 local_irq_enable();
81 __current_set_polling(); 86 __current_set_polling();
82 return 0; 87 return 0;
83 } 88 }
84 89
90 /*
91 * During the idle period, stop measuring the disabled irqs
92 * critical sections latencies
93 */
85 stop_critical_timings(); 94 stop_critical_timings();
95
96 /*
97 * Tell the RCU framework we are entering an idle section,
98 * so no more rcu read side critical sections and one more
99 * step to the grace period
100 */
86 rcu_idle_enter(); 101 rcu_idle_enter();
87 102
103 /*
104 * Check if the cpuidle framework is ready, otherwise fallback
105 * to the default arch specific idle method
106 */
88 ret = cpuidle_enabled(drv, dev); 107 ret = cpuidle_enabled(drv, dev);
89 108
90 if (!ret) { 109 if (!ret) {
91 /* ask the governor for the next state */ 110 /*
111 * Ask the governor to choose an idle state it thinks
112 * it is convenient to go to. There is *always* a
113 * convenient idle state
114 */
92 next_state = cpuidle_select(drv, dev); 115 next_state = cpuidle_select(drv, dev);
93 116
117 /*
118 * The idle task must be scheduled, it is pointless to
119 * go to idle, just update no idle residency and get
120 * out of this function
121 */
94 if (current_clr_polling_and_test()) { 122 if (current_clr_polling_and_test()) {
95 dev->last_residency = 0; 123 dev->last_residency = 0;
96 entered_state = next_state; 124 entered_state = next_state;
@@ -100,6 +128,14 @@ static int cpuidle_idle_call(void)
100 CPUIDLE_FLAG_TIMER_STOP); 128 CPUIDLE_FLAG_TIMER_STOP);
101 129
102 if (broadcast) 130 if (broadcast)
131 /*
132 * Tell the time framework to switch
133 * to a broadcast timer because our
134 * local timer will be shutdown. If a
135 * local timer is used from another
136 * cpu as a broadcast timer, this call
137 * may fail if it is not available
138 */
103 ret = clockevents_notify( 139 ret = clockevents_notify(
104 CLOCK_EVT_NOTIFY_BROADCAST_ENTER, 140 CLOCK_EVT_NOTIFY_BROADCAST_ENTER,
105 &dev->cpu); 141 &dev->cpu);
@@ -107,6 +143,14 @@ static int cpuidle_idle_call(void)
107 if (!ret) { 143 if (!ret) {
108 trace_cpu_idle_rcuidle(next_state, dev->cpu); 144 trace_cpu_idle_rcuidle(next_state, dev->cpu);
109 145
146 /*
147 * Enter the idle state previously
148 * returned by the governor
149 * decision. This function will block
150 * until an interrupt occurs and will
151 * take care of re-enabling the local
152 * interrupts
153 */
110 entered_state = cpuidle_enter(drv, dev, 154 entered_state = cpuidle_enter(drv, dev,
111 next_state); 155 next_state);
112 156
@@ -118,17 +162,28 @@ static int cpuidle_idle_call(void)
118 CLOCK_EVT_NOTIFY_BROADCAST_EXIT, 162 CLOCK_EVT_NOTIFY_BROADCAST_EXIT,
119 &dev->cpu); 163 &dev->cpu);
120 164
121 /* give the governor an opportunity to reflect on the outcome */ 165 /*
166 * Give the governor an opportunity to reflect on the
167 * outcome
168 */
122 cpuidle_reflect(dev, entered_state); 169 cpuidle_reflect(dev, entered_state);
123 } 170 }
124 } 171 }
125 } 172 }
126 173
174 /*
175 * We can't use the cpuidle framework, let's use the default
176 * idle routine
177 */
127 if (ret) 178 if (ret)
128 arch_cpu_idle(); 179 arch_cpu_idle();
129 180
130 __current_set_polling(); 181 __current_set_polling();
131 182
183 /*
184 * It is up to the idle functions to enable back the local
185 * interrupt
186 */
132 if (WARN_ON_ONCE(irqs_disabled())) 187 if (WARN_ON_ONCE(irqs_disabled()))
133 local_irq_enable(); 188 local_irq_enable();
134 189