diff options
Diffstat (limited to 'kernel/cpuset.c')
-rw-r--r-- | kernel/cpuset.c | 50 |
1 files changed, 31 insertions, 19 deletions
diff --git a/kernel/cpuset.c b/kernel/cpuset.c index eab7bd6628e0..96c0ba13b8cd 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c | |||
@@ -36,6 +36,7 @@ | |||
36 | #include <linux/list.h> | 36 | #include <linux/list.h> |
37 | #include <linux/mempolicy.h> | 37 | #include <linux/mempolicy.h> |
38 | #include <linux/mm.h> | 38 | #include <linux/mm.h> |
39 | #include <linux/memory.h> | ||
39 | #include <linux/module.h> | 40 | #include <linux/module.h> |
40 | #include <linux/mount.h> | 41 | #include <linux/mount.h> |
41 | #include <linux/namei.h> | 42 | #include <linux/namei.h> |
@@ -584,10 +585,9 @@ static int generate_sched_domains(cpumask_t **domains, | |||
584 | int i, j, k; /* indices for partition finding loops */ | 585 | int i, j, k; /* indices for partition finding loops */ |
585 | cpumask_t *doms; /* resulting partition; i.e. sched domains */ | 586 | cpumask_t *doms; /* resulting partition; i.e. sched domains */ |
586 | struct sched_domain_attr *dattr; /* attributes for custom domains */ | 587 | struct sched_domain_attr *dattr; /* attributes for custom domains */ |
587 | int ndoms; /* number of sched domains in result */ | 588 | int ndoms = 0; /* number of sched domains in result */ |
588 | int nslot; /* next empty doms[] cpumask_t slot */ | 589 | int nslot; /* next empty doms[] cpumask_t slot */ |
589 | 590 | ||
590 | ndoms = 0; | ||
591 | doms = NULL; | 591 | doms = NULL; |
592 | dattr = NULL; | 592 | dattr = NULL; |
593 | csa = NULL; | 593 | csa = NULL; |
@@ -674,10 +674,8 @@ restart: | |||
674 | * Convert <csn, csa> to <ndoms, doms> and populate cpu masks. | 674 | * Convert <csn, csa> to <ndoms, doms> and populate cpu masks. |
675 | */ | 675 | */ |
676 | doms = kmalloc(ndoms * sizeof(cpumask_t), GFP_KERNEL); | 676 | doms = kmalloc(ndoms * sizeof(cpumask_t), GFP_KERNEL); |
677 | if (!doms) { | 677 | if (!doms) |
678 | ndoms = 0; | ||
679 | goto done; | 678 | goto done; |
680 | } | ||
681 | 679 | ||
682 | /* | 680 | /* |
683 | * The rest of the code, including the scheduler, can deal with | 681 | * The rest of the code, including the scheduler, can deal with |
@@ -732,6 +730,13 @@ restart: | |||
732 | done: | 730 | done: |
733 | kfree(csa); | 731 | kfree(csa); |
734 | 732 | ||
733 | /* | ||
734 | * Fallback to the default domain if kmalloc() failed. | ||
735 | * See comments in partition_sched_domains(). | ||
736 | */ | ||
737 | if (doms == NULL) | ||
738 | ndoms = 1; | ||
739 | |||
735 | *domains = doms; | 740 | *domains = doms; |
736 | *attributes = dattr; | 741 | *attributes = dattr; |
737 | return ndoms; | 742 | return ndoms; |
@@ -1172,7 +1177,7 @@ static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs, | |||
1172 | { | 1177 | { |
1173 | struct cpuset trialcs; | 1178 | struct cpuset trialcs; |
1174 | int err; | 1179 | int err; |
1175 | int cpus_nonempty, balance_flag_changed; | 1180 | int balance_flag_changed; |
1176 | 1181 | ||
1177 | trialcs = *cs; | 1182 | trialcs = *cs; |
1178 | if (turning_on) | 1183 | if (turning_on) |
@@ -1184,7 +1189,6 @@ static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs, | |||
1184 | if (err < 0) | 1189 | if (err < 0) |
1185 | return err; | 1190 | return err; |
1186 | 1191 | ||
1187 | cpus_nonempty = !cpus_empty(trialcs.cpus_allowed); | ||
1188 | balance_flag_changed = (is_sched_load_balance(cs) != | 1192 | balance_flag_changed = (is_sched_load_balance(cs) != |
1189 | is_sched_load_balance(&trialcs)); | 1193 | is_sched_load_balance(&trialcs)); |
1190 | 1194 | ||
@@ -1192,7 +1196,7 @@ static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs, | |||
1192 | cs->flags = trialcs.flags; | 1196 | cs->flags = trialcs.flags; |
1193 | mutex_unlock(&callback_mutex); | 1197 | mutex_unlock(&callback_mutex); |
1194 | 1198 | ||
1195 | if (cpus_nonempty && balance_flag_changed) | 1199 | if (!cpus_empty(trialcs.cpus_allowed) && balance_flag_changed) |
1196 | async_rebuild_sched_domains(); | 1200 | async_rebuild_sched_domains(); |
1197 | 1201 | ||
1198 | return 0; | 1202 | return 0; |
@@ -2012,12 +2016,23 @@ static int cpuset_track_online_cpus(struct notifier_block *unused_nb, | |||
2012 | * Call this routine anytime after node_states[N_HIGH_MEMORY] changes. | 2016 | * Call this routine anytime after node_states[N_HIGH_MEMORY] changes. |
2013 | * See also the previous routine cpuset_track_online_cpus(). | 2017 | * See also the previous routine cpuset_track_online_cpus(). |
2014 | */ | 2018 | */ |
2015 | void cpuset_track_online_nodes(void) | 2019 | static int cpuset_track_online_nodes(struct notifier_block *self, |
2020 | unsigned long action, void *arg) | ||
2016 | { | 2021 | { |
2017 | cgroup_lock(); | 2022 | cgroup_lock(); |
2018 | top_cpuset.mems_allowed = node_states[N_HIGH_MEMORY]; | 2023 | switch (action) { |
2019 | scan_for_empty_cpusets(&top_cpuset); | 2024 | case MEM_ONLINE: |
2025 | top_cpuset.mems_allowed = node_states[N_HIGH_MEMORY]; | ||
2026 | break; | ||
2027 | case MEM_OFFLINE: | ||
2028 | top_cpuset.mems_allowed = node_states[N_HIGH_MEMORY]; | ||
2029 | scan_for_empty_cpusets(&top_cpuset); | ||
2030 | break; | ||
2031 | default: | ||
2032 | break; | ||
2033 | } | ||
2020 | cgroup_unlock(); | 2034 | cgroup_unlock(); |
2035 | return NOTIFY_OK; | ||
2021 | } | 2036 | } |
2022 | #endif | 2037 | #endif |
2023 | 2038 | ||
@@ -2033,6 +2048,7 @@ void __init cpuset_init_smp(void) | |||
2033 | top_cpuset.mems_allowed = node_states[N_HIGH_MEMORY]; | 2048 | top_cpuset.mems_allowed = node_states[N_HIGH_MEMORY]; |
2034 | 2049 | ||
2035 | hotcpu_notifier(cpuset_track_online_cpus, 0); | 2050 | hotcpu_notifier(cpuset_track_online_cpus, 0); |
2051 | hotplug_memory_notifier(cpuset_track_online_nodes, 10); | ||
2036 | } | 2052 | } |
2037 | 2053 | ||
2038 | /** | 2054 | /** |
@@ -2437,19 +2453,15 @@ const struct file_operations proc_cpuset_operations = { | |||
2437 | void cpuset_task_status_allowed(struct seq_file *m, struct task_struct *task) | 2453 | void cpuset_task_status_allowed(struct seq_file *m, struct task_struct *task) |
2438 | { | 2454 | { |
2439 | seq_printf(m, "Cpus_allowed:\t"); | 2455 | seq_printf(m, "Cpus_allowed:\t"); |
2440 | m->count += cpumask_scnprintf(m->buf + m->count, m->size - m->count, | 2456 | seq_cpumask(m, &task->cpus_allowed); |
2441 | task->cpus_allowed); | ||
2442 | seq_printf(m, "\n"); | 2457 | seq_printf(m, "\n"); |
2443 | seq_printf(m, "Cpus_allowed_list:\t"); | 2458 | seq_printf(m, "Cpus_allowed_list:\t"); |
2444 | m->count += cpulist_scnprintf(m->buf + m->count, m->size - m->count, | 2459 | seq_cpumask_list(m, &task->cpus_allowed); |
2445 | task->cpus_allowed); | ||
2446 | seq_printf(m, "\n"); | 2460 | seq_printf(m, "\n"); |
2447 | seq_printf(m, "Mems_allowed:\t"); | 2461 | seq_printf(m, "Mems_allowed:\t"); |
2448 | m->count += nodemask_scnprintf(m->buf + m->count, m->size - m->count, | 2462 | seq_nodemask(m, &task->mems_allowed); |
2449 | task->mems_allowed); | ||
2450 | seq_printf(m, "\n"); | 2463 | seq_printf(m, "\n"); |
2451 | seq_printf(m, "Mems_allowed_list:\t"); | 2464 | seq_printf(m, "Mems_allowed_list:\t"); |
2452 | m->count += nodelist_scnprintf(m->buf + m->count, m->size - m->count, | 2465 | seq_nodemask_list(m, &task->mems_allowed); |
2453 | task->mems_allowed); | ||
2454 | seq_printf(m, "\n"); | 2466 | seq_printf(m, "\n"); |
2455 | } | 2467 | } |