diff options
author | Peter Zijlstra <peterz@infradead.org> | 2014-04-11 07:55:48 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2014-05-08 03:16:59 -0400 |
commit | 37352273ad48f2d177ed1b06ced32d5536b773fb (patch) | |
tree | 245d7410b60c5ab32f1b1103d40bcf56d9f941bd /kernel/sched | |
parent | c444117f0f39d59733ec23da67c44424df529230 (diff) |
sched/idle: Reflow cpuidle_idle_call()
Apply goto to reduce lines and nesting levels.
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Acked-by: Nicolas Pitre <nicolas.pitre@linaro.org>
Cc: Daniel Lezcano <daniel.lezcano@linaro.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Link: http://lkml.kernel.org/n/tip-cc6vb0snt3sr7op6rlbfeqfh@git.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched')
-rw-r--r-- | kernel/sched/idle.c | 131 |
1 files changed, 58 insertions, 73 deletions
diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c index ed67f0cd2906..88a6bc43738b 100644 --- a/kernel/sched/idle.c +++ b/kernel/sched/idle.c | |||
@@ -73,7 +73,7 @@ static int cpuidle_idle_call(void) | |||
73 | { | 73 | { |
74 | struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices); | 74 | struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices); |
75 | struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); | 75 | struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); |
76 | int next_state, entered_state, ret; | 76 | int next_state, entered_state; |
77 | bool broadcast; | 77 | bool broadcast; |
78 | 78 | ||
79 | /* | 79 | /* |
@@ -102,90 +102,75 @@ static int cpuidle_idle_call(void) | |||
102 | * Check if the cpuidle framework is ready, otherwise fallback | 102 | * Check if the cpuidle framework is ready, otherwise fallback |
103 | * to the default arch specific idle method | 103 | * to the default arch specific idle method |
104 | */ | 104 | */ |
105 | ret = cpuidle_enabled(drv, dev); | 105 | if (cpuidle_enabled(drv, dev)) { |
106 | 106 | use_default: | |
107 | if (!ret) { | ||
108 | /* | ||
109 | * Ask the governor to choose an idle state it thinks | ||
110 | * it is convenient to go to. There is *always* a | ||
111 | * convenient idle state | ||
112 | */ | ||
113 | next_state = cpuidle_select(drv, dev); | ||
114 | |||
115 | /* | 107 | /* |
116 | * The idle task must be scheduled, it is pointless to | 108 | * We can't use the cpuidle framework, let's use the default |
117 | * go to idle, just update no idle residency and get | 109 | * idle routine. |
118 | * out of this function | ||
119 | */ | 110 | */ |
120 | if (current_clr_polling_and_test()) { | 111 | if (current_clr_polling_and_test()) |
121 | dev->last_residency = 0; | ||
122 | entered_state = next_state; | ||
123 | local_irq_enable(); | 112 | local_irq_enable(); |
124 | } else { | 113 | else |
125 | broadcast = !!(drv->states[next_state].flags & | 114 | arch_cpu_idle(); |
126 | CPUIDLE_FLAG_TIMER_STOP); | 115 | |
127 | 116 | goto exit_idle; | |
128 | if (broadcast) { | ||
129 | /* | ||
130 | * Tell the time framework to switch | ||
131 | * to a broadcast timer because our | ||
132 | * local timer will be shutdown. If a | ||
133 | * local timer is used from another | ||
134 | * cpu as a broadcast timer, this call | ||
135 | * may fail if it is not available | ||
136 | */ | ||
137 | ret = clockevents_notify( | ||
138 | CLOCK_EVT_NOTIFY_BROADCAST_ENTER, | ||
139 | &dev->cpu); | ||
140 | } | ||
141 | |||
142 | if (!ret) { | ||
143 | trace_cpu_idle_rcuidle(next_state, dev->cpu); | ||
144 | |||
145 | /* | ||
146 | * Enter the idle state previously | ||
147 | * returned by the governor | ||
148 | * decision. This function will block | ||
149 | * until an interrupt occurs and will | ||
150 | * take care of re-enabling the local | ||
151 | * interrupts | ||
152 | */ | ||
153 | entered_state = cpuidle_enter(drv, dev, | ||
154 | next_state); | ||
155 | |||
156 | trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, | ||
157 | dev->cpu); | ||
158 | |||
159 | if (broadcast) | ||
160 | clockevents_notify( | ||
161 | CLOCK_EVT_NOTIFY_BROADCAST_EXIT, | ||
162 | &dev->cpu); | ||
163 | |||
164 | /* | ||
165 | * Give the governor an opportunity to reflect on the | ||
166 | * outcome | ||
167 | */ | ||
168 | cpuidle_reflect(dev, entered_state); | ||
169 | } | ||
170 | } | ||
171 | } | 117 | } |
172 | 118 | ||
173 | /* | 119 | /* |
174 | * We can't use the cpuidle framework, let's use the default | 120 | * Ask the governor to choose an idle state it thinks |
175 | * idle routine | 121 | * it is convenient to go to. There is *always* a |
122 | * convenient idle state | ||
176 | */ | 123 | */ |
177 | if (ret) { | 124 | next_state = cpuidle_select(drv, dev); |
178 | if (!current_clr_polling_and_test()) | 125 | |
179 | arch_cpu_idle(); | 126 | /* |
180 | else | 127 | * The idle task must be scheduled, it is pointless to |
181 | local_irq_enable(); | 128 | * go to idle, just update no idle residency and get |
129 | * out of this function | ||
130 | */ | ||
131 | if (current_clr_polling_and_test()) { | ||
132 | dev->last_residency = 0; | ||
133 | entered_state = next_state; | ||
134 | local_irq_enable(); | ||
135 | goto exit_idle; | ||
182 | } | 136 | } |
183 | 137 | ||
138 | broadcast = !!(drv->states[next_state].flags & CPUIDLE_FLAG_TIMER_STOP); | ||
139 | |||
140 | /* | ||
141 | * Tell the time framework to switch to a broadcast timer | ||
142 | * because our local timer will be shutdown. If a local timer | ||
143 | * is used from another cpu as a broadcast timer, this call may | ||
144 | * fail if it is not available | ||
145 | */ | ||
146 | if (broadcast && | ||
147 | clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &dev->cpu)) | ||
148 | goto use_default; | ||
149 | |||
150 | trace_cpu_idle_rcuidle(next_state, dev->cpu); | ||
151 | |||
152 | /* | ||
153 | * Enter the idle state previously returned by the governor decision. | ||
154 | * This function will block until an interrupt occurs and will take | ||
155 | * care of re-enabling the local interrupts | ||
156 | */ | ||
157 | entered_state = cpuidle_enter(drv, dev, next_state); | ||
158 | |||
159 | trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu); | ||
160 | |||
161 | if (broadcast) | ||
162 | clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &dev->cpu); | ||
163 | |||
164 | /* | ||
165 | * Give the governor an opportunity to reflect on the outcome | ||
166 | */ | ||
167 | cpuidle_reflect(dev, entered_state); | ||
168 | |||
169 | exit_idle: | ||
184 | __current_set_polling(); | 170 | __current_set_polling(); |
185 | 171 | ||
186 | /* | 172 | /* |
187 | * It is up to the idle functions to enable back the local | 173 | * It is up to the idle functions to reenable local interrupts |
188 | * interrupt | ||
189 | */ | 174 | */ |
190 | if (WARN_ON_ONCE(irqs_disabled())) | 175 | if (WARN_ON_ONCE(irqs_disabled())) |
191 | local_irq_enable(); | 176 | local_irq_enable(); |