diff options
author | Frederic Weisbecker <frederic@kernel.org> | 2017-10-26 22:42:37 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2017-10-27 03:55:30 -0400 |
commit | edb9382175c3ebdced8ffdb3e0f20052ad9fdbe9 (patch) | |
tree | 5257baafe1ada153e8eb0bfe41e02c8f0545e6fa /kernel/cgroup | |
parent | 6f1982fedd59856bcc42a9b521be4c3ffd2f60a7 (diff) |
sched/isolation: Move isolcpus= handling to the housekeeping code
We want to centralize the isolation features, to be done by the housekeeping
subsystem and scheduler domain isolation is a significant part of it.
No intended behaviour change, we just reuse the housekeeping cpumask
and core code.
Signed-off-by: Frederic Weisbecker <frederic@kernel.org>
Acked-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Chris Metcalf <cmetcalf@mellanox.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Luiz Capitulino <lcapitulino@redhat.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rik van Riel <riel@redhat.com>
Cc: Wanpeng Li <kernellwp@gmail.com>
Link: http://lkml.kernel.org/r/1509072159-31808-11-git-send-email-frederic@kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/cgroup')
-rw-r--r-- | kernel/cgroup/cpuset.c | 15 |
1 files changed, 5 insertions, 10 deletions
diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c index 4657e2924ecb..f7efa7b4d825 100644 --- a/kernel/cgroup/cpuset.c +++ b/kernel/cgroup/cpuset.c | |||
@@ -57,7 +57,7 @@ | |||
57 | #include <linux/backing-dev.h> | 57 | #include <linux/backing-dev.h> |
58 | #include <linux/sort.h> | 58 | #include <linux/sort.h> |
59 | #include <linux/oom.h> | 59 | #include <linux/oom.h> |
60 | 60 | #include <linux/sched/isolation.h> | |
61 | #include <linux/uaccess.h> | 61 | #include <linux/uaccess.h> |
62 | #include <linux/atomic.h> | 62 | #include <linux/atomic.h> |
63 | #include <linux/mutex.h> | 63 | #include <linux/mutex.h> |
@@ -656,7 +656,6 @@ static int generate_sched_domains(cpumask_var_t **domains, | |||
656 | int csn; /* how many cpuset ptrs in csa so far */ | 656 | int csn; /* how many cpuset ptrs in csa so far */ |
657 | int i, j, k; /* indices for partition finding loops */ | 657 | int i, j, k; /* indices for partition finding loops */ |
658 | cpumask_var_t *doms; /* resulting partition; i.e. sched domains */ | 658 | cpumask_var_t *doms; /* resulting partition; i.e. sched domains */ |
659 | cpumask_var_t non_isolated_cpus; /* load balanced CPUs */ | ||
660 | struct sched_domain_attr *dattr; /* attributes for custom domains */ | 659 | struct sched_domain_attr *dattr; /* attributes for custom domains */ |
661 | int ndoms = 0; /* number of sched domains in result */ | 660 | int ndoms = 0; /* number of sched domains in result */ |
662 | int nslot; /* next empty doms[] struct cpumask slot */ | 661 | int nslot; /* next empty doms[] struct cpumask slot */ |
@@ -666,10 +665,6 @@ static int generate_sched_domains(cpumask_var_t **domains, | |||
666 | dattr = NULL; | 665 | dattr = NULL; |
667 | csa = NULL; | 666 | csa = NULL; |
668 | 667 | ||
669 | if (!alloc_cpumask_var(&non_isolated_cpus, GFP_KERNEL)) | ||
670 | goto done; | ||
671 | cpumask_andnot(non_isolated_cpus, cpu_possible_mask, cpu_isolated_map); | ||
672 | |||
673 | /* Special case for the 99% of systems with one, full, sched domain */ | 668 | /* Special case for the 99% of systems with one, full, sched domain */ |
674 | if (is_sched_load_balance(&top_cpuset)) { | 669 | if (is_sched_load_balance(&top_cpuset)) { |
675 | ndoms = 1; | 670 | ndoms = 1; |
@@ -683,7 +678,7 @@ static int generate_sched_domains(cpumask_var_t **domains, | |||
683 | update_domain_attr_tree(dattr, &top_cpuset); | 678 | update_domain_attr_tree(dattr, &top_cpuset); |
684 | } | 679 | } |
685 | cpumask_and(doms[0], top_cpuset.effective_cpus, | 680 | cpumask_and(doms[0], top_cpuset.effective_cpus, |
686 | non_isolated_cpus); | 681 | housekeeping_cpumask(HK_FLAG_DOMAIN)); |
687 | 682 | ||
688 | goto done; | 683 | goto done; |
689 | } | 684 | } |
@@ -707,7 +702,8 @@ static int generate_sched_domains(cpumask_var_t **domains, | |||
707 | */ | 702 | */ |
708 | if (!cpumask_empty(cp->cpus_allowed) && | 703 | if (!cpumask_empty(cp->cpus_allowed) && |
709 | !(is_sched_load_balance(cp) && | 704 | !(is_sched_load_balance(cp) && |
710 | cpumask_intersects(cp->cpus_allowed, non_isolated_cpus))) | 705 | cpumask_intersects(cp->cpus_allowed, |
706 | housekeeping_cpumask(HK_FLAG_DOMAIN)))) | ||
711 | continue; | 707 | continue; |
712 | 708 | ||
713 | if (is_sched_load_balance(cp)) | 709 | if (is_sched_load_balance(cp)) |
@@ -789,7 +785,7 @@ restart: | |||
789 | 785 | ||
790 | if (apn == b->pn) { | 786 | if (apn == b->pn) { |
791 | cpumask_or(dp, dp, b->effective_cpus); | 787 | cpumask_or(dp, dp, b->effective_cpus); |
792 | cpumask_and(dp, dp, non_isolated_cpus); | 788 | cpumask_and(dp, dp, housekeeping_cpumask(HK_FLAG_DOMAIN)); |
793 | if (dattr) | 789 | if (dattr) |
794 | update_domain_attr_tree(dattr + nslot, b); | 790 | update_domain_attr_tree(dattr + nslot, b); |
795 | 791 | ||
@@ -802,7 +798,6 @@ restart: | |||
802 | BUG_ON(nslot != ndoms); | 798 | BUG_ON(nslot != ndoms); |
803 | 799 | ||
804 | done: | 800 | done: |
805 | free_cpumask_var(non_isolated_cpus); | ||
806 | kfree(csa); | 801 | kfree(csa); |
807 | 802 | ||
808 | /* | 803 | /* |