diff options
Diffstat (limited to 'kernel/sched/idle.c')
| -rw-r--r-- | kernel/sched/idle.c | 140 |
1 files changed, 60 insertions, 80 deletions
diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c index 8f4390a079c7..25b9423abce9 100644 --- a/kernel/sched/idle.c +++ b/kernel/sched/idle.c | |||
| @@ -67,24 +67,21 @@ void __weak arch_cpu_idle(void) | |||
| 67 | * cpuidle_idle_call - the main idle function | 67 | * cpuidle_idle_call - the main idle function |
| 68 | * | 68 | * |
| 69 | * NOTE: no locks or semaphores should be used here | 69 | * NOTE: no locks or semaphores should be used here |
| 70 | * return non-zero on failure | ||
| 71 | */ | 70 | */ |
| 72 | static int cpuidle_idle_call(void) | 71 | static void cpuidle_idle_call(void) |
| 73 | { | 72 | { |
| 74 | struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices); | 73 | struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices); |
| 75 | struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); | 74 | struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); |
| 76 | int next_state, entered_state, ret; | 75 | int next_state, entered_state; |
| 77 | bool broadcast; | 76 | bool broadcast; |
| 78 | 77 | ||
| 79 | /* | 78 | /* |
| 80 | * Check if the idle task must be rescheduled. If it is the | 79 | * Check if the idle task must be rescheduled. If it is the |
| 81 | * case, exit the function after re-enabling the local irq and | 80 | * case, exit the function after re-enabling the local irq. |
| 82 | * set again the polling flag | ||
| 83 | */ | 81 | */ |
| 84 | if (current_clr_polling_and_test()) { | 82 | if (need_resched()) { |
| 85 | local_irq_enable(); | 83 | local_irq_enable(); |
| 86 | __current_set_polling(); | 84 | return; |
| 87 | return 0; | ||
| 88 | } | 85 | } |
| 89 | 86 | ||
| 90 | /* | 87 | /* |
| @@ -101,96 +98,79 @@ static int cpuidle_idle_call(void) | |||
| 101 | rcu_idle_enter(); | 98 | rcu_idle_enter(); |
| 102 | 99 | ||
| 103 | /* | 100 | /* |
| 104 | * Check if the cpuidle framework is ready, otherwise fallback | 101 | * Ask the cpuidle framework to choose a convenient idle state. |
| 105 | * to the default arch specific idle method | 102 | * Fall back to the default arch idle method on errors. |
| 106 | */ | 103 | */ |
| 107 | ret = cpuidle_enabled(drv, dev); | 104 | next_state = cpuidle_select(drv, dev); |
| 108 | 105 | if (next_state < 0) { | |
| 109 | if (!ret) { | 106 | use_default: |
| 110 | /* | 107 | /* |
| 111 | * Ask the governor to choose an idle state it thinks | 108 | * We can't use the cpuidle framework, let's use the default |
| 112 | * it is convenient to go to. There is *always* a | 109 | * idle routine. |
| 113 | * convenient idle state | ||
| 114 | */ | 110 | */ |
| 115 | next_state = cpuidle_select(drv, dev); | 111 | if (current_clr_polling_and_test()) |
| 116 | |||
| 117 | /* | ||
| 118 | * The idle task must be scheduled, it is pointless to | ||
| 119 | * go to idle, just update no idle residency and get | ||
| 120 | * out of this function | ||
| 121 | */ | ||
| 122 | if (current_clr_polling_and_test()) { | ||
| 123 | dev->last_residency = 0; | ||
| 124 | entered_state = next_state; | ||
| 125 | local_irq_enable(); | 112 | local_irq_enable(); |
| 126 | } else { | 113 | else |
| 127 | broadcast = !!(drv->states[next_state].flags & | 114 | arch_cpu_idle(); |
| 128 | CPUIDLE_FLAG_TIMER_STOP); | 115 | |
| 129 | 116 | goto exit_idle; | |
| 130 | if (broadcast) | ||
| 131 | /* | ||
| 132 | * Tell the time framework to switch | ||
| 133 | * to a broadcast timer because our | ||
| 134 | * local timer will be shutdown. If a | ||
| 135 | * local timer is used from another | ||
| 136 | * cpu as a broadcast timer, this call | ||
| 137 | * may fail if it is not available | ||
| 138 | */ | ||
| 139 | ret = clockevents_notify( | ||
| 140 | CLOCK_EVT_NOTIFY_BROADCAST_ENTER, | ||
| 141 | &dev->cpu); | ||
| 142 | |||
| 143 | if (!ret) { | ||
| 144 | trace_cpu_idle_rcuidle(next_state, dev->cpu); | ||
| 145 | |||
| 146 | /* | ||
| 147 | * Enter the idle state previously | ||
| 148 | * returned by the governor | ||
| 149 | * decision. This function will block | ||
| 150 | * until an interrupt occurs and will | ||
| 151 | * take care of re-enabling the local | ||
| 152 | * interrupts | ||
| 153 | */ | ||
| 154 | entered_state = cpuidle_enter(drv, dev, | ||
| 155 | next_state); | ||
| 156 | |||
| 157 | trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, | ||
| 158 | dev->cpu); | ||
| 159 | |||
| 160 | if (broadcast) | ||
| 161 | clockevents_notify( | ||
| 162 | CLOCK_EVT_NOTIFY_BROADCAST_EXIT, | ||
| 163 | &dev->cpu); | ||
| 164 | |||
| 165 | /* | ||
| 166 | * Give the governor an opportunity to reflect on the | ||
| 167 | * outcome | ||
| 168 | */ | ||
| 169 | cpuidle_reflect(dev, entered_state); | ||
| 170 | } | ||
| 171 | } | ||
| 172 | } | 117 | } |
| 173 | 118 | ||
| 119 | |||
| 174 | /* | 120 | /* |
| 175 | * We can't use the cpuidle framework, let's use the default | 121 | * The idle task must be scheduled, it is pointless to |
| 176 | * idle routine | 122 | * go to idle, just update no idle residency and get |
| 123 | * out of this function | ||
| 177 | */ | 124 | */ |
| 178 | if (ret) | 125 | if (current_clr_polling_and_test()) { |
| 179 | arch_cpu_idle(); | 126 | dev->last_residency = 0; |
| 127 | entered_state = next_state; | ||
| 128 | local_irq_enable(); | ||
| 129 | goto exit_idle; | ||
| 130 | } | ||
| 131 | |||
| 132 | broadcast = !!(drv->states[next_state].flags & CPUIDLE_FLAG_TIMER_STOP); | ||
| 180 | 133 | ||
| 134 | /* | ||
| 135 | * Tell the time framework to switch to a broadcast timer | ||
| 136 | * because our local timer will be shutdown. If a local timer | ||
| 137 | * is used from another cpu as a broadcast timer, this call may | ||
| 138 | * fail if it is not available | ||
| 139 | */ | ||
| 140 | if (broadcast && | ||
| 141 | clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &dev->cpu)) | ||
| 142 | goto use_default; | ||
| 143 | |||
| 144 | trace_cpu_idle_rcuidle(next_state, dev->cpu); | ||
| 145 | |||
| 146 | /* | ||
| 147 | * Enter the idle state previously returned by the governor decision. | ||
| 148 | * This function will block until an interrupt occurs and will take | ||
| 149 | * care of re-enabling the local interrupts | ||
| 150 | */ | ||
| 151 | entered_state = cpuidle_enter(drv, dev, next_state); | ||
| 152 | |||
| 153 | trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu); | ||
| 154 | |||
| 155 | if (broadcast) | ||
| 156 | clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &dev->cpu); | ||
| 157 | |||
| 158 | /* | ||
| 159 | * Give the governor an opportunity to reflect on the outcome | ||
| 160 | */ | ||
| 161 | cpuidle_reflect(dev, entered_state); | ||
| 162 | |||
| 163 | exit_idle: | ||
| 181 | __current_set_polling(); | 164 | __current_set_polling(); |
| 182 | 165 | ||
| 183 | /* | 166 | /* |
| 184 | * It is up to the idle functions to enable back the local | 167 | * It is up to the idle functions to reenable local interrupts |
| 185 | * interrupt | ||
| 186 | */ | 168 | */ |
| 187 | if (WARN_ON_ONCE(irqs_disabled())) | 169 | if (WARN_ON_ONCE(irqs_disabled())) |
| 188 | local_irq_enable(); | 170 | local_irq_enable(); |
| 189 | 171 | ||
| 190 | rcu_idle_exit(); | 172 | rcu_idle_exit(); |
| 191 | start_critical_timings(); | 173 | start_critical_timings(); |
| 192 | |||
| 193 | return 0; | ||
| 194 | } | 174 | } |
| 195 | 175 | ||
| 196 | /* | 176 | /* |
