diff options
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 74 |
1 files changed, 55 insertions, 19 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index bfb8ad8ed171..94ead43eda62 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -312,12 +312,15 @@ static DEFINE_SPINLOCK(task_group_lock); | |||
312 | #endif | 312 | #endif |
313 | 313 | ||
314 | /* | 314 | /* |
315 | * A weight of 0, 1 or ULONG_MAX can cause arithmetics problems. | 315 | * A weight of 0 or 1 can cause arithmetics problems. |
316 | * A weight of a cfs_rq is the sum of weights of which entities | ||
317 | * are queued on this cfs_rq, so a weight of a entity should not be | ||
318 | * too large, so as the shares value of a task group. | ||
316 | * (The default weight is 1024 - so there's no practical | 319 | * (The default weight is 1024 - so there's no practical |
317 | * limitation from this.) | 320 | * limitation from this.) |
318 | */ | 321 | */ |
319 | #define MIN_SHARES 2 | 322 | #define MIN_SHARES 2 |
320 | #define MAX_SHARES (ULONG_MAX - 1) | 323 | #define MAX_SHARES (1UL << 18) |
321 | 324 | ||
322 | static int init_task_group_load = INIT_TASK_GROUP_LOAD; | 325 | static int init_task_group_load = INIT_TASK_GROUP_LOAD; |
323 | #endif | 326 | #endif |
@@ -1124,6 +1127,7 @@ static enum hrtimer_restart hrtick(struct hrtimer *timer) | |||
1124 | return HRTIMER_NORESTART; | 1127 | return HRTIMER_NORESTART; |
1125 | } | 1128 | } |
1126 | 1129 | ||
1130 | #ifdef CONFIG_SMP | ||
1127 | static void hotplug_hrtick_disable(int cpu) | 1131 | static void hotplug_hrtick_disable(int cpu) |
1128 | { | 1132 | { |
1129 | struct rq *rq = cpu_rq(cpu); | 1133 | struct rq *rq = cpu_rq(cpu); |
@@ -1179,6 +1183,7 @@ static void init_hrtick(void) | |||
1179 | { | 1183 | { |
1180 | hotcpu_notifier(hotplug_hrtick, 0); | 1184 | hotcpu_notifier(hotplug_hrtick, 0); |
1181 | } | 1185 | } |
1186 | #endif /* CONFIG_SMP */ | ||
1182 | 1187 | ||
1183 | static void init_rq_hrtick(struct rq *rq) | 1188 | static void init_rq_hrtick(struct rq *rq) |
1184 | { | 1189 | { |
@@ -1337,8 +1342,13 @@ calc_delta_mine(unsigned long delta_exec, unsigned long weight, | |||
1337 | { | 1342 | { |
1338 | u64 tmp; | 1343 | u64 tmp; |
1339 | 1344 | ||
1340 | if (!lw->inv_weight) | 1345 | if (!lw->inv_weight) { |
1341 | lw->inv_weight = 1 + (WMULT_CONST-lw->weight/2)/(lw->weight+1); | 1346 | if (BITS_PER_LONG > 32 && unlikely(lw->weight >= WMULT_CONST)) |
1347 | lw->inv_weight = 1; | ||
1348 | else | ||
1349 | lw->inv_weight = 1 + (WMULT_CONST-lw->weight/2) | ||
1350 | / (lw->weight+1); | ||
1351 | } | ||
1342 | 1352 | ||
1343 | tmp = (u64)delta_exec * weight; | 1353 | tmp = (u64)delta_exec * weight; |
1344 | /* | 1354 | /* |
@@ -4159,12 +4169,10 @@ need_resched_nonpreemptible: | |||
4159 | clear_tsk_need_resched(prev); | 4169 | clear_tsk_need_resched(prev); |
4160 | 4170 | ||
4161 | if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) { | 4171 | if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) { |
4162 | if (unlikely((prev->state & TASK_INTERRUPTIBLE) && | 4172 | if (unlikely(signal_pending_state(prev->state, prev))) |
4163 | signal_pending(prev))) { | ||
4164 | prev->state = TASK_RUNNING; | 4173 | prev->state = TASK_RUNNING; |
4165 | } else { | 4174 | else |
4166 | deactivate_task(rq, prev, 1); | 4175 | deactivate_task(rq, prev, 1); |
4167 | } | ||
4168 | switch_count = &prev->nvcsw; | 4176 | switch_count = &prev->nvcsw; |
4169 | } | 4177 | } |
4170 | 4178 | ||
@@ -4390,22 +4398,20 @@ do_wait_for_common(struct completion *x, long timeout, int state) | |||
4390 | signal_pending(current)) || | 4398 | signal_pending(current)) || |
4391 | (state == TASK_KILLABLE && | 4399 | (state == TASK_KILLABLE && |
4392 | fatal_signal_pending(current))) { | 4400 | fatal_signal_pending(current))) { |
4393 | __remove_wait_queue(&x->wait, &wait); | 4401 | timeout = -ERESTARTSYS; |
4394 | return -ERESTARTSYS; | 4402 | break; |
4395 | } | 4403 | } |
4396 | __set_current_state(state); | 4404 | __set_current_state(state); |
4397 | spin_unlock_irq(&x->wait.lock); | 4405 | spin_unlock_irq(&x->wait.lock); |
4398 | timeout = schedule_timeout(timeout); | 4406 | timeout = schedule_timeout(timeout); |
4399 | spin_lock_irq(&x->wait.lock); | 4407 | spin_lock_irq(&x->wait.lock); |
4400 | if (!timeout) { | 4408 | } while (!x->done && timeout); |
4401 | __remove_wait_queue(&x->wait, &wait); | ||
4402 | return timeout; | ||
4403 | } | ||
4404 | } while (!x->done); | ||
4405 | __remove_wait_queue(&x->wait, &wait); | 4409 | __remove_wait_queue(&x->wait, &wait); |
4410 | if (!x->done) | ||
4411 | return timeout; | ||
4406 | } | 4412 | } |
4407 | x->done--; | 4413 | x->done--; |
4408 | return timeout; | 4414 | return timeout ?: 1; |
4409 | } | 4415 | } |
4410 | 4416 | ||
4411 | static long __sched | 4417 | static long __sched |
@@ -5881,6 +5887,7 @@ static void migrate_dead_tasks(unsigned int dead_cpu) | |||
5881 | next = pick_next_task(rq, rq->curr); | 5887 | next = pick_next_task(rq, rq->curr); |
5882 | if (!next) | 5888 | if (!next) |
5883 | break; | 5889 | break; |
5890 | next->sched_class->put_prev_task(rq, next); | ||
5884 | migrate_dead(dead_cpu, next); | 5891 | migrate_dead(dead_cpu, next); |
5885 | 5892 | ||
5886 | } | 5893 | } |
@@ -6871,7 +6878,12 @@ static int default_relax_domain_level = -1; | |||
6871 | 6878 | ||
6872 | static int __init setup_relax_domain_level(char *str) | 6879 | static int __init setup_relax_domain_level(char *str) |
6873 | { | 6880 | { |
6874 | default_relax_domain_level = simple_strtoul(str, NULL, 0); | 6881 | unsigned long val; |
6882 | |||
6883 | val = simple_strtoul(str, NULL, 0); | ||
6884 | if (val < SD_LV_MAX) | ||
6885 | default_relax_domain_level = val; | ||
6886 | |||
6875 | return 1; | 6887 | return 1; |
6876 | } | 6888 | } |
6877 | __setup("relax_domain_level=", setup_relax_domain_level); | 6889 | __setup("relax_domain_level=", setup_relax_domain_level); |
@@ -7230,6 +7242,18 @@ void __attribute__((weak)) arch_update_cpu_topology(void) | |||
7230 | } | 7242 | } |
7231 | 7243 | ||
7232 | /* | 7244 | /* |
7245 | * Free current domain masks. | ||
7246 | * Called after all cpus are attached to NULL domain. | ||
7247 | */ | ||
7248 | static void free_sched_domains(void) | ||
7249 | { | ||
7250 | ndoms_cur = 0; | ||
7251 | if (doms_cur != &fallback_doms) | ||
7252 | kfree(doms_cur); | ||
7253 | doms_cur = &fallback_doms; | ||
7254 | } | ||
7255 | |||
7256 | /* | ||
7233 | * Set up scheduler domains and groups. Callers must hold the hotplug lock. | 7257 | * Set up scheduler domains and groups. Callers must hold the hotplug lock. |
7234 | * For now this just excludes isolated cpus, but could be used to | 7258 | * For now this just excludes isolated cpus, but could be used to |
7235 | * exclude other special cases in the future. | 7259 | * exclude other special cases in the future. |
@@ -7376,6 +7400,7 @@ int arch_reinit_sched_domains(void) | |||
7376 | get_online_cpus(); | 7400 | get_online_cpus(); |
7377 | mutex_lock(&sched_domains_mutex); | 7401 | mutex_lock(&sched_domains_mutex); |
7378 | detach_destroy_domains(&cpu_online_map); | 7402 | detach_destroy_domains(&cpu_online_map); |
7403 | free_sched_domains(); | ||
7379 | err = arch_init_sched_domains(&cpu_online_map); | 7404 | err = arch_init_sched_domains(&cpu_online_map); |
7380 | mutex_unlock(&sched_domains_mutex); | 7405 | mutex_unlock(&sched_domains_mutex); |
7381 | put_online_cpus(); | 7406 | put_online_cpus(); |
@@ -7461,6 +7486,7 @@ static int update_sched_domains(struct notifier_block *nfb, | |||
7461 | case CPU_DOWN_PREPARE: | 7486 | case CPU_DOWN_PREPARE: |
7462 | case CPU_DOWN_PREPARE_FROZEN: | 7487 | case CPU_DOWN_PREPARE_FROZEN: |
7463 | detach_destroy_domains(&cpu_online_map); | 7488 | detach_destroy_domains(&cpu_online_map); |
7489 | free_sched_domains(); | ||
7464 | return NOTIFY_OK; | 7490 | return NOTIFY_OK; |
7465 | 7491 | ||
7466 | case CPU_UP_CANCELED: | 7492 | case CPU_UP_CANCELED: |
@@ -7479,8 +7505,16 @@ static int update_sched_domains(struct notifier_block *nfb, | |||
7479 | return NOTIFY_DONE; | 7505 | return NOTIFY_DONE; |
7480 | } | 7506 | } |
7481 | 7507 | ||
7508 | #ifndef CONFIG_CPUSETS | ||
7509 | /* | ||
7510 | * Create default domain partitioning if cpusets are disabled. | ||
7511 | * Otherwise we let cpusets rebuild the domains based on the | ||
7512 | * current setup. | ||
7513 | */ | ||
7514 | |||
7482 | /* The hotplug lock is already held by cpu_up/cpu_down */ | 7515 | /* The hotplug lock is already held by cpu_up/cpu_down */ |
7483 | arch_init_sched_domains(&cpu_online_map); | 7516 | arch_init_sched_domains(&cpu_online_map); |
7517 | #endif | ||
7484 | 7518 | ||
7485 | return NOTIFY_OK; | 7519 | return NOTIFY_OK; |
7486 | } | 7520 | } |
@@ -7620,7 +7654,6 @@ static void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq, | |||
7620 | else | 7654 | else |
7621 | rt_se->rt_rq = parent->my_q; | 7655 | rt_se->rt_rq = parent->my_q; |
7622 | 7656 | ||
7623 | rt_se->rt_rq = &rq->rt; | ||
7624 | rt_se->my_q = rt_rq; | 7657 | rt_se->my_q = rt_rq; |
7625 | rt_se->parent = parent; | 7658 | rt_se->parent = parent; |
7626 | INIT_LIST_HEAD(&rt_se->run_list); | 7659 | INIT_LIST_HEAD(&rt_se->run_list); |
@@ -8342,7 +8375,7 @@ static unsigned long to_ratio(u64 period, u64 runtime) | |||
8342 | #ifdef CONFIG_CGROUP_SCHED | 8375 | #ifdef CONFIG_CGROUP_SCHED |
8343 | static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime) | 8376 | static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime) |
8344 | { | 8377 | { |
8345 | struct task_group *tgi, *parent = tg->parent; | 8378 | struct task_group *tgi, *parent = tg ? tg->parent : NULL; |
8346 | unsigned long total = 0; | 8379 | unsigned long total = 0; |
8347 | 8380 | ||
8348 | if (!parent) { | 8381 | if (!parent) { |
@@ -8469,6 +8502,9 @@ int sched_group_set_rt_period(struct task_group *tg, long rt_period_us) | |||
8469 | rt_period = (u64)rt_period_us * NSEC_PER_USEC; | 8502 | rt_period = (u64)rt_period_us * NSEC_PER_USEC; |
8470 | rt_runtime = tg->rt_bandwidth.rt_runtime; | 8503 | rt_runtime = tg->rt_bandwidth.rt_runtime; |
8471 | 8504 | ||
8505 | if (rt_period == 0) | ||
8506 | return -EINVAL; | ||
8507 | |||
8472 | return tg_set_bandwidth(tg, rt_period, rt_runtime); | 8508 | return tg_set_bandwidth(tg, rt_period, rt_runtime); |
8473 | } | 8509 | } |
8474 | 8510 | ||