aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched.c155
1 files changed, 1 insertions, 154 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 5f102e6c7a4c..a4ca632c477c 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -3006,23 +3006,6 @@ static inline void idle_balance(int cpu, struct rq *rq)
3006} 3006}
3007#endif 3007#endif
3008 3008
3009static inline void wake_priority_sleeper(struct rq *rq)
3010{
3011#ifdef CONFIG_SCHED_SMT
3012 if (!rq->nr_running)
3013 return;
3014
3015 spin_lock(&rq->lock);
3016 /*
3017 * If an SMT sibling task has been put to sleep for priority
3018 * reasons reschedule the idle task to see if it can now run.
3019 */
3020 if (rq->nr_running)
3021 resched_task(rq->idle);
3022 spin_unlock(&rq->lock);
3023#endif
3024}
3025
3026DEFINE_PER_CPU(struct kernel_stat, kstat); 3009DEFINE_PER_CPU(struct kernel_stat, kstat);
3027 3010
3028EXPORT_PER_CPU_SYMBOL(kstat); 3011EXPORT_PER_CPU_SYMBOL(kstat);
@@ -3239,10 +3222,7 @@ void scheduler_tick(void)
3239 3222
3240 update_cpu_clock(p, rq, now); 3223 update_cpu_clock(p, rq, now);
3241 3224
3242 if (p == rq->idle) 3225 if (p != rq->idle)
3243 /* Task on the idle queue */
3244 wake_priority_sleeper(rq);
3245 else
3246 task_running_tick(rq, p); 3226 task_running_tick(rq, p);
3247#ifdef CONFIG_SMP 3227#ifdef CONFIG_SMP
3248 update_load(rq); 3228 update_load(rq);
@@ -3251,136 +3231,6 @@ void scheduler_tick(void)
3251#endif 3231#endif
3252} 3232}
3253 3233
3254#ifdef CONFIG_SCHED_SMT
3255static inline void wakeup_busy_runqueue(struct rq *rq)
3256{
3257 /* If an SMT runqueue is sleeping due to priority reasons wake it up */
3258 if (rq->curr == rq->idle && rq->nr_running)
3259 resched_task(rq->idle);
3260}
3261
3262/*
3263 * Called with interrupt disabled and this_rq's runqueue locked.
3264 */
3265static void wake_sleeping_dependent(int this_cpu)
3266{
3267 struct sched_domain *tmp, *sd = NULL;
3268 int i;
3269
3270 for_each_domain(this_cpu, tmp) {
3271 if (tmp->flags & SD_SHARE_CPUPOWER) {
3272 sd = tmp;
3273 break;
3274 }
3275 }
3276
3277 if (!sd)
3278 return;
3279
3280 for_each_cpu_mask(i, sd->span) {
3281 struct rq *smt_rq = cpu_rq(i);
3282
3283 if (i == this_cpu)
3284 continue;
3285 if (unlikely(!spin_trylock(&smt_rq->lock)))
3286 continue;
3287
3288 wakeup_busy_runqueue(smt_rq);
3289 spin_unlock(&smt_rq->lock);
3290 }
3291}
3292
3293/*
3294 * number of 'lost' timeslices this task wont be able to fully
3295 * utilize, if another task runs on a sibling. This models the
3296 * slowdown effect of other tasks running on siblings:
3297 */
3298static inline unsigned long
3299smt_slice(struct task_struct *p, struct sched_domain *sd)
3300{
3301 return p->time_slice * (100 - sd->per_cpu_gain) / 100;
3302}
3303
3304/*
3305 * To minimise lock contention and not have to drop this_rq's runlock we only
3306 * trylock the sibling runqueues and bypass those runqueues if we fail to
3307 * acquire their lock. As we only trylock the normal locking order does not
3308 * need to be obeyed.
3309 */
3310static int
3311dependent_sleeper(int this_cpu, struct rq *this_rq, struct task_struct *p)
3312{
3313 struct sched_domain *tmp, *sd = NULL;
3314 int ret = 0, i;
3315
3316 /* kernel/rt threads do not participate in dependent sleeping */
3317 if (!p->mm || rt_task(p))
3318 return 0;
3319
3320 for_each_domain(this_cpu, tmp) {
3321 if (tmp->flags & SD_SHARE_CPUPOWER) {
3322 sd = tmp;
3323 break;
3324 }
3325 }
3326
3327 if (!sd)
3328 return 0;
3329
3330 for_each_cpu_mask(i, sd->span) {
3331 struct task_struct *smt_curr;
3332 struct rq *smt_rq;
3333
3334 if (i == this_cpu)
3335 continue;
3336
3337 smt_rq = cpu_rq(i);
3338 if (unlikely(!spin_trylock(&smt_rq->lock)))
3339 continue;
3340
3341 smt_curr = smt_rq->curr;
3342
3343 if (!smt_curr->mm)
3344 goto unlock;
3345
3346 /*
3347 * If a user task with lower static priority than the
3348 * running task on the SMT sibling is trying to schedule,
3349 * delay it till there is proportionately less timeslice
3350 * left of the sibling task to prevent a lower priority
3351 * task from using an unfair proportion of the
3352 * physical cpu's resources. -ck
3353 */
3354 if (rt_task(smt_curr)) {
3355 /*
3356 * With real time tasks we run non-rt tasks only
3357 * per_cpu_gain% of the time.
3358 */
3359 if ((jiffies % DEF_TIMESLICE) >
3360 (sd->per_cpu_gain * DEF_TIMESLICE / 100))
3361 ret = 1;
3362 } else {
3363 if (smt_curr->static_prio < p->static_prio &&
3364 !TASK_PREEMPTS_CURR(p, smt_rq) &&
3365 smt_slice(smt_curr, sd) > task_timeslice(p))
3366 ret = 1;
3367 }
3368unlock:
3369 spin_unlock(&smt_rq->lock);
3370 }
3371 return ret;
3372}
3373#else
3374static inline void wake_sleeping_dependent(int this_cpu)
3375{
3376}
3377static inline int
3378dependent_sleeper(int this_cpu, struct rq *this_rq, struct task_struct *p)
3379{
3380 return 0;
3381}
3382#endif
3383
3384#if defined(CONFIG_PREEMPT) && defined(CONFIG_DEBUG_PREEMPT) 3234#if defined(CONFIG_PREEMPT) && defined(CONFIG_DEBUG_PREEMPT)
3385 3235
3386void fastcall add_preempt_count(int val) 3236void fastcall add_preempt_count(int val)
@@ -3507,7 +3357,6 @@ need_resched_nonpreemptible:
3507 if (!rq->nr_running) { 3357 if (!rq->nr_running) {
3508 next = rq->idle; 3358 next = rq->idle;
3509 rq->expired_timestamp = 0; 3359 rq->expired_timestamp = 0;
3510 wake_sleeping_dependent(cpu);
3511 goto switch_tasks; 3360 goto switch_tasks;
3512 } 3361 }
3513 } 3362 }
@@ -3547,8 +3396,6 @@ need_resched_nonpreemptible:
3547 } 3396 }
3548 } 3397 }
3549 next->sleep_type = SLEEP_NORMAL; 3398 next->sleep_type = SLEEP_NORMAL;
3550 if (rq->nr_running == 1 && dependent_sleeper(cpu, rq, next))
3551 next = rq->idle;
3552switch_tasks: 3399switch_tasks:
3553 if (next == rq->idle) 3400 if (next == rq->idle)
3554 schedstat_inc(rq, sched_goidle); 3401 schedstat_inc(rq, sched_goidle);