diff options
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/hrtimer.c | 9 | ||||
| -rw-r--r-- | kernel/sched.c | 155 | ||||
| -rw-r--r-- | kernel/time/clocksource.c | 8 | ||||
| -rw-r--r-- | kernel/timer.c | 8 |
4 files changed, 14 insertions, 166 deletions
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index 476cb0c0b4a4..de93a8176ca6 100644 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c | |||
| @@ -1355,17 +1355,16 @@ static void migrate_hrtimers(int cpu) | |||
| 1355 | tick_cancel_sched_timer(cpu); | 1355 | tick_cancel_sched_timer(cpu); |
| 1356 | 1356 | ||
| 1357 | local_irq_disable(); | 1357 | local_irq_disable(); |
| 1358 | 1358 | double_spin_lock(&new_base->lock, &old_base->lock, | |
| 1359 | spin_lock(&new_base->lock); | 1359 | smp_processor_id() < cpu); |
| 1360 | spin_lock(&old_base->lock); | ||
| 1361 | 1360 | ||
| 1362 | for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) { | 1361 | for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) { |
| 1363 | migrate_hrtimer_list(&old_base->clock_base[i], | 1362 | migrate_hrtimer_list(&old_base->clock_base[i], |
| 1364 | &new_base->clock_base[i]); | 1363 | &new_base->clock_base[i]); |
| 1365 | } | 1364 | } |
| 1366 | spin_unlock(&old_base->lock); | ||
| 1367 | spin_unlock(&new_base->lock); | ||
| 1368 | 1365 | ||
| 1366 | double_spin_unlock(&new_base->lock, &old_base->lock, | ||
| 1367 | smp_processor_id() < cpu); | ||
| 1369 | local_irq_enable(); | 1368 | local_irq_enable(); |
| 1370 | put_cpu_var(hrtimer_bases); | 1369 | put_cpu_var(hrtimer_bases); |
| 1371 | } | 1370 | } |
diff --git a/kernel/sched.c b/kernel/sched.c index 5f102e6c7a4c..a4ca632c477c 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
| @@ -3006,23 +3006,6 @@ static inline void idle_balance(int cpu, struct rq *rq) | |||
| 3006 | } | 3006 | } |
| 3007 | #endif | 3007 | #endif |
| 3008 | 3008 | ||
| 3009 | static inline void wake_priority_sleeper(struct rq *rq) | ||
| 3010 | { | ||
| 3011 | #ifdef CONFIG_SCHED_SMT | ||
| 3012 | if (!rq->nr_running) | ||
| 3013 | return; | ||
| 3014 | |||
| 3015 | spin_lock(&rq->lock); | ||
| 3016 | /* | ||
| 3017 | * If an SMT sibling task has been put to sleep for priority | ||
| 3018 | * reasons reschedule the idle task to see if it can now run. | ||
| 3019 | */ | ||
| 3020 | if (rq->nr_running) | ||
| 3021 | resched_task(rq->idle); | ||
| 3022 | spin_unlock(&rq->lock); | ||
| 3023 | #endif | ||
| 3024 | } | ||
| 3025 | |||
| 3026 | DEFINE_PER_CPU(struct kernel_stat, kstat); | 3009 | DEFINE_PER_CPU(struct kernel_stat, kstat); |
| 3027 | 3010 | ||
| 3028 | EXPORT_PER_CPU_SYMBOL(kstat); | 3011 | EXPORT_PER_CPU_SYMBOL(kstat); |
| @@ -3239,10 +3222,7 @@ void scheduler_tick(void) | |||
| 3239 | 3222 | ||
| 3240 | update_cpu_clock(p, rq, now); | 3223 | update_cpu_clock(p, rq, now); |
| 3241 | 3224 | ||
| 3242 | if (p == rq->idle) | 3225 | if (p != rq->idle) |
| 3243 | /* Task on the idle queue */ | ||
| 3244 | wake_priority_sleeper(rq); | ||
| 3245 | else | ||
| 3246 | task_running_tick(rq, p); | 3226 | task_running_tick(rq, p); |
| 3247 | #ifdef CONFIG_SMP | 3227 | #ifdef CONFIG_SMP |
| 3248 | update_load(rq); | 3228 | update_load(rq); |
| @@ -3251,136 +3231,6 @@ void scheduler_tick(void) | |||
| 3251 | #endif | 3231 | #endif |
| 3252 | } | 3232 | } |
| 3253 | 3233 | ||
| 3254 | #ifdef CONFIG_SCHED_SMT | ||
| 3255 | static inline void wakeup_busy_runqueue(struct rq *rq) | ||
| 3256 | { | ||
| 3257 | /* If an SMT runqueue is sleeping due to priority reasons wake it up */ | ||
| 3258 | if (rq->curr == rq->idle && rq->nr_running) | ||
| 3259 | resched_task(rq->idle); | ||
| 3260 | } | ||
| 3261 | |||
| 3262 | /* | ||
| 3263 | * Called with interrupt disabled and this_rq's runqueue locked. | ||
| 3264 | */ | ||
| 3265 | static void wake_sleeping_dependent(int this_cpu) | ||
| 3266 | { | ||
| 3267 | struct sched_domain *tmp, *sd = NULL; | ||
| 3268 | int i; | ||
| 3269 | |||
| 3270 | for_each_domain(this_cpu, tmp) { | ||
| 3271 | if (tmp->flags & SD_SHARE_CPUPOWER) { | ||
| 3272 | sd = tmp; | ||
| 3273 | break; | ||
| 3274 | } | ||
| 3275 | } | ||
| 3276 | |||
| 3277 | if (!sd) | ||
| 3278 | return; | ||
| 3279 | |||
| 3280 | for_each_cpu_mask(i, sd->span) { | ||
| 3281 | struct rq *smt_rq = cpu_rq(i); | ||
| 3282 | |||
| 3283 | if (i == this_cpu) | ||
| 3284 | continue; | ||
| 3285 | if (unlikely(!spin_trylock(&smt_rq->lock))) | ||
| 3286 | continue; | ||
| 3287 | |||
| 3288 | wakeup_busy_runqueue(smt_rq); | ||
| 3289 | spin_unlock(&smt_rq->lock); | ||
| 3290 | } | ||
| 3291 | } | ||
| 3292 | |||
| 3293 | /* | ||
| 3294 | * number of 'lost' timeslices this task wont be able to fully | ||
| 3295 | * utilize, if another task runs on a sibling. This models the | ||
| 3296 | * slowdown effect of other tasks running on siblings: | ||
| 3297 | */ | ||
| 3298 | static inline unsigned long | ||
| 3299 | smt_slice(struct task_struct *p, struct sched_domain *sd) | ||
| 3300 | { | ||
| 3301 | return p->time_slice * (100 - sd->per_cpu_gain) / 100; | ||
| 3302 | } | ||
| 3303 | |||
| 3304 | /* | ||
| 3305 | * To minimise lock contention and not have to drop this_rq's runlock we only | ||
| 3306 | * trylock the sibling runqueues and bypass those runqueues if we fail to | ||
| 3307 | * acquire their lock. As we only trylock the normal locking order does not | ||
| 3308 | * need to be obeyed. | ||
| 3309 | */ | ||
| 3310 | static int | ||
| 3311 | dependent_sleeper(int this_cpu, struct rq *this_rq, struct task_struct *p) | ||
| 3312 | { | ||
| 3313 | struct sched_domain *tmp, *sd = NULL; | ||
| 3314 | int ret = 0, i; | ||
| 3315 | |||
| 3316 | /* kernel/rt threads do not participate in dependent sleeping */ | ||
| 3317 | if (!p->mm || rt_task(p)) | ||
| 3318 | return 0; | ||
| 3319 | |||
| 3320 | for_each_domain(this_cpu, tmp) { | ||
| 3321 | if (tmp->flags & SD_SHARE_CPUPOWER) { | ||
| 3322 | sd = tmp; | ||
| 3323 | break; | ||
| 3324 | } | ||
| 3325 | } | ||
| 3326 | |||
| 3327 | if (!sd) | ||
| 3328 | return 0; | ||
| 3329 | |||
| 3330 | for_each_cpu_mask(i, sd->span) { | ||
| 3331 | struct task_struct *smt_curr; | ||
| 3332 | struct rq *smt_rq; | ||
| 3333 | |||
| 3334 | if (i == this_cpu) | ||
| 3335 | continue; | ||
| 3336 | |||
| 3337 | smt_rq = cpu_rq(i); | ||
| 3338 | if (unlikely(!spin_trylock(&smt_rq->lock))) | ||
| 3339 | continue; | ||
| 3340 | |||
| 3341 | smt_curr = smt_rq->curr; | ||
| 3342 | |||
| 3343 | if (!smt_curr->mm) | ||
| 3344 | goto unlock; | ||
| 3345 | |||
| 3346 | /* | ||
| 3347 | * If a user task with lower static priority than the | ||
| 3348 | * running task on the SMT sibling is trying to schedule, | ||
| 3349 | * delay it till there is proportionately less timeslice | ||
| 3350 | * left of the sibling task to prevent a lower priority | ||
| 3351 | * task from using an unfair proportion of the | ||
| 3352 | * physical cpu's resources. -ck | ||
| 3353 | */ | ||
| 3354 | if (rt_task(smt_curr)) { | ||
| 3355 | /* | ||
| 3356 | * With real time tasks we run non-rt tasks only | ||
| 3357 | * per_cpu_gain% of the time. | ||
| 3358 | */ | ||
| 3359 | if ((jiffies % DEF_TIMESLICE) > | ||
| 3360 | (sd->per_cpu_gain * DEF_TIMESLICE / 100)) | ||
| 3361 | ret = 1; | ||
| 3362 | } else { | ||
| 3363 | if (smt_curr->static_prio < p->static_prio && | ||
| 3364 | !TASK_PREEMPTS_CURR(p, smt_rq) && | ||
| 3365 | smt_slice(smt_curr, sd) > task_timeslice(p)) | ||
| 3366 | ret = 1; | ||
| 3367 | } | ||
| 3368 | unlock: | ||
| 3369 | spin_unlock(&smt_rq->lock); | ||
| 3370 | } | ||
| 3371 | return ret; | ||
| 3372 | } | ||
| 3373 | #else | ||
| 3374 | static inline void wake_sleeping_dependent(int this_cpu) | ||
| 3375 | { | ||
| 3376 | } | ||
| 3377 | static inline int | ||
| 3378 | dependent_sleeper(int this_cpu, struct rq *this_rq, struct task_struct *p) | ||
| 3379 | { | ||
| 3380 | return 0; | ||
| 3381 | } | ||
| 3382 | #endif | ||
| 3383 | |||
| 3384 | #if defined(CONFIG_PREEMPT) && defined(CONFIG_DEBUG_PREEMPT) | 3234 | #if defined(CONFIG_PREEMPT) && defined(CONFIG_DEBUG_PREEMPT) |
| 3385 | 3235 | ||
| 3386 | void fastcall add_preempt_count(int val) | 3236 | void fastcall add_preempt_count(int val) |
| @@ -3507,7 +3357,6 @@ need_resched_nonpreemptible: | |||
| 3507 | if (!rq->nr_running) { | 3357 | if (!rq->nr_running) { |
| 3508 | next = rq->idle; | 3358 | next = rq->idle; |
| 3509 | rq->expired_timestamp = 0; | 3359 | rq->expired_timestamp = 0; |
| 3510 | wake_sleeping_dependent(cpu); | ||
| 3511 | goto switch_tasks; | 3360 | goto switch_tasks; |
| 3512 | } | 3361 | } |
| 3513 | } | 3362 | } |
| @@ -3547,8 +3396,6 @@ need_resched_nonpreemptible: | |||
| 3547 | } | 3396 | } |
| 3548 | } | 3397 | } |
| 3549 | next->sleep_type = SLEEP_NORMAL; | 3398 | next->sleep_type = SLEEP_NORMAL; |
| 3550 | if (rq->nr_running == 1 && dependent_sleeper(cpu, rq, next)) | ||
| 3551 | next = rq->idle; | ||
| 3552 | switch_tasks: | 3399 | switch_tasks: |
| 3553 | if (next == rq->idle) | 3400 | if (next == rq->idle) |
| 3554 | schedstat_inc(rq, sched_goidle); | 3401 | schedstat_inc(rq, sched_goidle); |
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c index 193a0793af95..5b0e46b56fd0 100644 --- a/kernel/time/clocksource.c +++ b/kernel/time/clocksource.c | |||
| @@ -55,16 +55,18 @@ static DEFINE_SPINLOCK(clocksource_lock); | |||
| 55 | static char override_name[32]; | 55 | static char override_name[32]; |
| 56 | static int finished_booting; | 56 | static int finished_booting; |
| 57 | 57 | ||
| 58 | /* clocksource_done_booting - Called near the end of bootup | 58 | /* clocksource_done_booting - Called near the end of core bootup |
| 59 | * | 59 | * |
| 60 | * Hack to avoid lots of clocksource churn at boot time | 60 | * Hack to avoid lots of clocksource churn at boot time. |
| 61 | * We use fs_initcall because we want this to start before | ||
| 62 | * device_initcall but after subsys_initcall. | ||
| 61 | */ | 63 | */ |
| 62 | static int __init clocksource_done_booting(void) | 64 | static int __init clocksource_done_booting(void) |
| 63 | { | 65 | { |
| 64 | finished_booting = 1; | 66 | finished_booting = 1; |
| 65 | return 0; | 67 | return 0; |
| 66 | } | 68 | } |
| 67 | late_initcall(clocksource_done_booting); | 69 | fs_initcall(clocksource_done_booting); |
| 68 | 70 | ||
| 69 | #ifdef CONFIG_CLOCKSOURCE_WATCHDOG | 71 | #ifdef CONFIG_CLOCKSOURCE_WATCHDOG |
| 70 | static LIST_HEAD(watchdog_list); | 72 | static LIST_HEAD(watchdog_list); |
diff --git a/kernel/timer.c b/kernel/timer.c index 6663a87f7304..8ad384253ef2 100644 --- a/kernel/timer.c +++ b/kernel/timer.c | |||
| @@ -1651,8 +1651,8 @@ static void __devinit migrate_timers(int cpu) | |||
| 1651 | new_base = get_cpu_var(tvec_bases); | 1651 | new_base = get_cpu_var(tvec_bases); |
| 1652 | 1652 | ||
| 1653 | local_irq_disable(); | 1653 | local_irq_disable(); |
| 1654 | spin_lock(&new_base->lock); | 1654 | double_spin_lock(&new_base->lock, &old_base->lock, |
| 1655 | spin_lock(&old_base->lock); | 1655 | smp_processor_id() < cpu); |
| 1656 | 1656 | ||
| 1657 | BUG_ON(old_base->running_timer); | 1657 | BUG_ON(old_base->running_timer); |
| 1658 | 1658 | ||
| @@ -1665,8 +1665,8 @@ static void __devinit migrate_timers(int cpu) | |||
| 1665 | migrate_timer_list(new_base, old_base->tv5.vec + i); | 1665 | migrate_timer_list(new_base, old_base->tv5.vec + i); |
| 1666 | } | 1666 | } |
| 1667 | 1667 | ||
| 1668 | spin_unlock(&old_base->lock); | 1668 | double_spin_unlock(&new_base->lock, &old_base->lock, |
| 1669 | spin_unlock(&new_base->lock); | 1669 | smp_processor_id() < cpu); |
| 1670 | local_irq_enable(); | 1670 | local_irq_enable(); |
| 1671 | put_cpu_var(tvec_bases); | 1671 | put_cpu_var(tvec_bases); |
| 1672 | } | 1672 | } |
