diff options
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 108 |
1 files changed, 45 insertions, 63 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index c13c75e9f9f7..85cf246cfdf5 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -2802,7 +2802,7 @@ static void sched_migrate_task(struct task_struct *p, int dest_cpu) | |||
2802 | 2802 | ||
2803 | rq = task_rq_lock(p, &flags); | 2803 | rq = task_rq_lock(p, &flags); |
2804 | if (!cpu_isset(dest_cpu, p->cpus_allowed) | 2804 | if (!cpu_isset(dest_cpu, p->cpus_allowed) |
2805 | || unlikely(cpu_is_offline(dest_cpu))) | 2805 | || unlikely(!cpu_active(dest_cpu))) |
2806 | goto out; | 2806 | goto out; |
2807 | 2807 | ||
2808 | /* force the process onto the specified CPU */ | 2808 | /* force the process onto the specified CPU */ |
@@ -3770,7 +3770,7 @@ int select_nohz_load_balancer(int stop_tick) | |||
3770 | /* | 3770 | /* |
3771 | * If we are going offline and still the leader, give up! | 3771 | * If we are going offline and still the leader, give up! |
3772 | */ | 3772 | */ |
3773 | if (cpu_is_offline(cpu) && | 3773 | if (!cpu_active(cpu) && |
3774 | atomic_read(&nohz.load_balancer) == cpu) { | 3774 | atomic_read(&nohz.load_balancer) == cpu) { |
3775 | if (atomic_cmpxchg(&nohz.load_balancer, cpu, -1) != cpu) | 3775 | if (atomic_cmpxchg(&nohz.load_balancer, cpu, -1) != cpu) |
3776 | BUG(); | 3776 | BUG(); |
@@ -5794,7 +5794,7 @@ static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu) | |||
5794 | struct rq *rq_dest, *rq_src; | 5794 | struct rq *rq_dest, *rq_src; |
5795 | int ret = 0, on_rq; | 5795 | int ret = 0, on_rq; |
5796 | 5796 | ||
5797 | if (unlikely(cpu_is_offline(dest_cpu))) | 5797 | if (unlikely(!cpu_active(dest_cpu))) |
5798 | return ret; | 5798 | return ret; |
5799 | 5799 | ||
5800 | rq_src = cpu_rq(src_cpu); | 5800 | rq_src = cpu_rq(src_cpu); |
@@ -7472,18 +7472,6 @@ void __attribute__((weak)) arch_update_cpu_topology(void) | |||
7472 | } | 7472 | } |
7473 | 7473 | ||
7474 | /* | 7474 | /* |
7475 | * Free current domain masks. | ||
7476 | * Called after all cpus are attached to NULL domain. | ||
7477 | */ | ||
7478 | static void free_sched_domains(void) | ||
7479 | { | ||
7480 | ndoms_cur = 0; | ||
7481 | if (doms_cur != &fallback_doms) | ||
7482 | kfree(doms_cur); | ||
7483 | doms_cur = &fallback_doms; | ||
7484 | } | ||
7485 | |||
7486 | /* | ||
7487 | * Set up scheduler domains and groups. Callers must hold the hotplug lock. | 7475 | * Set up scheduler domains and groups. Callers must hold the hotplug lock. |
7488 | * For now this just excludes isolated cpus, but could be used to | 7476 | * For now this just excludes isolated cpus, but could be used to |
7489 | * exclude other special cases in the future. | 7477 | * exclude other special cases in the future. |
@@ -7561,7 +7549,7 @@ static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur, | |||
7561 | * ownership of it and will kfree it when done with it. If the caller | 7549 | * ownership of it and will kfree it when done with it. If the caller |
7562 | * failed the kmalloc call, then it can pass in doms_new == NULL, | 7550 | * failed the kmalloc call, then it can pass in doms_new == NULL, |
7563 | * and partition_sched_domains() will fallback to the single partition | 7551 | * and partition_sched_domains() will fallback to the single partition |
7564 | * 'fallback_doms'. | 7552 | * 'fallback_doms', it also forces the domains to be rebuilt. |
7565 | * | 7553 | * |
7566 | * Call with hotplug lock held | 7554 | * Call with hotplug lock held |
7567 | */ | 7555 | */ |
@@ -7575,12 +7563,8 @@ void partition_sched_domains(int ndoms_new, cpumask_t *doms_new, | |||
7575 | /* always unregister in case we don't destroy any domains */ | 7563 | /* always unregister in case we don't destroy any domains */ |
7576 | unregister_sched_domain_sysctl(); | 7564 | unregister_sched_domain_sysctl(); |
7577 | 7565 | ||
7578 | if (doms_new == NULL) { | 7566 | if (doms_new == NULL) |
7579 | ndoms_new = 1; | 7567 | ndoms_new = 0; |
7580 | doms_new = &fallback_doms; | ||
7581 | cpus_andnot(doms_new[0], cpu_online_map, cpu_isolated_map); | ||
7582 | dattr_new = NULL; | ||
7583 | } | ||
7584 | 7568 | ||
7585 | /* Destroy deleted domains */ | 7569 | /* Destroy deleted domains */ |
7586 | for (i = 0; i < ndoms_cur; i++) { | 7570 | for (i = 0; i < ndoms_cur; i++) { |
@@ -7595,6 +7579,14 @@ match1: | |||
7595 | ; | 7579 | ; |
7596 | } | 7580 | } |
7597 | 7581 | ||
7582 | if (doms_new == NULL) { | ||
7583 | ndoms_cur = 0; | ||
7584 | ndoms_new = 1; | ||
7585 | doms_new = &fallback_doms; | ||
7586 | cpus_andnot(doms_new[0], cpu_online_map, cpu_isolated_map); | ||
7587 | dattr_new = NULL; | ||
7588 | } | ||
7589 | |||
7598 | /* Build new domains */ | 7590 | /* Build new domains */ |
7599 | for (i = 0; i < ndoms_new; i++) { | 7591 | for (i = 0; i < ndoms_new; i++) { |
7600 | for (j = 0; j < ndoms_cur; j++) { | 7592 | for (j = 0; j < ndoms_cur; j++) { |
@@ -7625,17 +7617,10 @@ match2: | |||
7625 | #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT) | 7617 | #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT) |
7626 | int arch_reinit_sched_domains(void) | 7618 | int arch_reinit_sched_domains(void) |
7627 | { | 7619 | { |
7628 | int err; | ||
7629 | |||
7630 | get_online_cpus(); | 7620 | get_online_cpus(); |
7631 | mutex_lock(&sched_domains_mutex); | 7621 | rebuild_sched_domains(); |
7632 | detach_destroy_domains(&cpu_online_map); | ||
7633 | free_sched_domains(); | ||
7634 | err = arch_init_sched_domains(&cpu_online_map); | ||
7635 | mutex_unlock(&sched_domains_mutex); | ||
7636 | put_online_cpus(); | 7622 | put_online_cpus(); |
7637 | 7623 | return 0; | |
7638 | return err; | ||
7639 | } | 7624 | } |
7640 | 7625 | ||
7641 | static ssize_t sched_power_savings_store(const char *buf, size_t count, int smt) | 7626 | static ssize_t sched_power_savings_store(const char *buf, size_t count, int smt) |
@@ -7701,59 +7686,49 @@ int sched_create_sysfs_power_savings_entries(struct sysdev_class *cls) | |||
7701 | } | 7686 | } |
7702 | #endif /* CONFIG_SCHED_MC || CONFIG_SCHED_SMT */ | 7687 | #endif /* CONFIG_SCHED_MC || CONFIG_SCHED_SMT */ |
7703 | 7688 | ||
7689 | #ifndef CONFIG_CPUSETS | ||
7704 | /* | 7690 | /* |
7705 | * Force a reinitialization of the sched domains hierarchy. The domains | 7691 | * Add online and remove offline CPUs from the scheduler domains. |
7706 | * and groups cannot be updated in place without racing with the balancing | 7692 | * When cpusets are enabled they take over this function. |
7707 | * code, so we temporarily attach all running cpus to the NULL domain | ||
7708 | * which will prevent rebalancing while the sched domains are recalculated. | ||
7709 | */ | 7693 | */ |
7710 | static int update_sched_domains(struct notifier_block *nfb, | 7694 | static int update_sched_domains(struct notifier_block *nfb, |
7711 | unsigned long action, void *hcpu) | 7695 | unsigned long action, void *hcpu) |
7712 | { | 7696 | { |
7697 | switch (action) { | ||
7698 | case CPU_ONLINE: | ||
7699 | case CPU_ONLINE_FROZEN: | ||
7700 | case CPU_DEAD: | ||
7701 | case CPU_DEAD_FROZEN: | ||
7702 | partition_sched_domains(0, NULL, NULL); | ||
7703 | return NOTIFY_OK; | ||
7704 | |||
7705 | default: | ||
7706 | return NOTIFY_DONE; | ||
7707 | } | ||
7708 | } | ||
7709 | #endif | ||
7710 | |||
7711 | static int update_runtime(struct notifier_block *nfb, | ||
7712 | unsigned long action, void *hcpu) | ||
7713 | { | ||
7713 | int cpu = (int)(long)hcpu; | 7714 | int cpu = (int)(long)hcpu; |
7714 | 7715 | ||
7715 | switch (action) { | 7716 | switch (action) { |
7716 | case CPU_DOWN_PREPARE: | 7717 | case CPU_DOWN_PREPARE: |
7717 | case CPU_DOWN_PREPARE_FROZEN: | 7718 | case CPU_DOWN_PREPARE_FROZEN: |
7718 | disable_runtime(cpu_rq(cpu)); | 7719 | disable_runtime(cpu_rq(cpu)); |
7719 | /* fall-through */ | ||
7720 | case CPU_UP_PREPARE: | ||
7721 | case CPU_UP_PREPARE_FROZEN: | ||
7722 | detach_destroy_domains(&cpu_online_map); | ||
7723 | free_sched_domains(); | ||
7724 | return NOTIFY_OK; | 7720 | return NOTIFY_OK; |
7725 | 7721 | ||
7726 | |||
7727 | case CPU_DOWN_FAILED: | 7722 | case CPU_DOWN_FAILED: |
7728 | case CPU_DOWN_FAILED_FROZEN: | 7723 | case CPU_DOWN_FAILED_FROZEN: |
7729 | case CPU_ONLINE: | 7724 | case CPU_ONLINE: |
7730 | case CPU_ONLINE_FROZEN: | 7725 | case CPU_ONLINE_FROZEN: |
7731 | enable_runtime(cpu_rq(cpu)); | 7726 | enable_runtime(cpu_rq(cpu)); |
7732 | /* fall-through */ | 7727 | return NOTIFY_OK; |
7733 | case CPU_UP_CANCELED: | 7728 | |
7734 | case CPU_UP_CANCELED_FROZEN: | ||
7735 | case CPU_DEAD: | ||
7736 | case CPU_DEAD_FROZEN: | ||
7737 | /* | ||
7738 | * Fall through and re-initialise the domains. | ||
7739 | */ | ||
7740 | break; | ||
7741 | default: | 7729 | default: |
7742 | return NOTIFY_DONE; | 7730 | return NOTIFY_DONE; |
7743 | } | 7731 | } |
7744 | |||
7745 | #ifndef CONFIG_CPUSETS | ||
7746 | /* | ||
7747 | * Create default domain partitioning if cpusets are disabled. | ||
7748 | * Otherwise we let cpusets rebuild the domains based on the | ||
7749 | * current setup. | ||
7750 | */ | ||
7751 | |||
7752 | /* The hotplug lock is already held by cpu_up/cpu_down */ | ||
7753 | arch_init_sched_domains(&cpu_online_map); | ||
7754 | #endif | ||
7755 | |||
7756 | return NOTIFY_OK; | ||
7757 | } | 7732 | } |
7758 | 7733 | ||
7759 | void __init sched_init_smp(void) | 7734 | void __init sched_init_smp(void) |
@@ -7773,8 +7748,15 @@ void __init sched_init_smp(void) | |||
7773 | cpu_set(smp_processor_id(), non_isolated_cpus); | 7748 | cpu_set(smp_processor_id(), non_isolated_cpus); |
7774 | mutex_unlock(&sched_domains_mutex); | 7749 | mutex_unlock(&sched_domains_mutex); |
7775 | put_online_cpus(); | 7750 | put_online_cpus(); |
7751 | |||
7752 | #ifndef CONFIG_CPUSETS | ||
7776 | /* XXX: Theoretical race here - CPU may be hotplugged now */ | 7753 | /* XXX: Theoretical race here - CPU may be hotplugged now */ |
7777 | hotcpu_notifier(update_sched_domains, 0); | 7754 | hotcpu_notifier(update_sched_domains, 0); |
7755 | #endif | ||
7756 | |||
7757 | /* RT runtime code needs to handle some hotplug events */ | ||
7758 | hotcpu_notifier(update_runtime, 0); | ||
7759 | |||
7778 | init_hrtick(); | 7760 | init_hrtick(); |
7779 | 7761 | ||
7780 | /* Move init over to a non-isolated CPU */ | 7762 | /* Move init over to a non-isolated CPU */ |