aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorRik van Riel <riel@redhat.com>2015-03-09 12:12:08 -0400
committerTejun Heo <tj@kernel.org>2015-03-19 14:28:19 -0400
commit47b8ea7186aae7f474ec4c98f43eaa8da719cd83 (patch)
tree5c6d1dc3e08e9816fd6ab746c25e0edd01271fdf /kernel
parent3fa0818b3c85e9bb55e3ac96c9523b87e44eab9e (diff)
cpusets, isolcpus: exclude isolcpus from load balancing in cpusets
Ensure that cpus specified with the isolcpus= boot commandline option stay outside of the load balancing in the kernel scheduler. Operations like load balancing can introduce unwanted latencies, which is exactly what the isolcpus= commandline is there to prevent. Previously, simply creating a new cpuset, without even touching the cpuset.cpus field inside the new cpuset, would undo the effects of isolcpus=, by creating a scheduler domain spanning the whole system, and setting up load balancing inside that domain. The cpuset root cpuset.cpus file is read-only, so there was not even a way to undo that effect. This does not impact the majority of cpusets users, since isolcpus= is a fairly specialized feature used for realtime purposes. Cc: Peter Zijlstra <peterz@infradead.org> Cc: Clark Williams <williams@redhat.com> Cc: Li Zefan <lizefan@huawei.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: Luiz Capitulino <lcapitulino@redhat.com> Cc: Mike Galbraith <umgwanakikbuti@gmail.com> Cc: cgroups@vger.kernel.org Signed-off-by: Rik van Riel <riel@redhat.com> Tested-by: David Rientjes <rientjes@google.com> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Acked-by: David Rientjes <rientjes@google.com> Acked-by: Zefan Li <lizefan@huawei.com> Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/cpuset.c13
1 files changed, 11 insertions, 2 deletions
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index fc7f4748d34a..c68f0721df10 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -622,6 +622,7 @@ static int generate_sched_domains(cpumask_var_t **domains,
622 int csn; /* how many cpuset ptrs in csa so far */ 622 int csn; /* how many cpuset ptrs in csa so far */
623 int i, j, k; /* indices for partition finding loops */ 623 int i, j, k; /* indices for partition finding loops */
624 cpumask_var_t *doms; /* resulting partition; i.e. sched domains */ 624 cpumask_var_t *doms; /* resulting partition; i.e. sched domains */
625 cpumask_var_t non_isolated_cpus; /* load balanced CPUs */
625 struct sched_domain_attr *dattr; /* attributes for custom domains */ 626 struct sched_domain_attr *dattr; /* attributes for custom domains */
626 int ndoms = 0; /* number of sched domains in result */ 627 int ndoms = 0; /* number of sched domains in result */
627 int nslot; /* next empty doms[] struct cpumask slot */ 628 int nslot; /* next empty doms[] struct cpumask slot */
@@ -631,6 +632,10 @@ static int generate_sched_domains(cpumask_var_t **domains,
631 dattr = NULL; 632 dattr = NULL;
632 csa = NULL; 633 csa = NULL;
633 634
635 if (!alloc_cpumask_var(&non_isolated_cpus, GFP_KERNEL))
636 goto done;
637 cpumask_andnot(non_isolated_cpus, cpu_possible_mask, cpu_isolated_map);
638
634 /* Special case for the 99% of systems with one, full, sched domain */ 639 /* Special case for the 99% of systems with one, full, sched domain */
635 if (is_sched_load_balance(&top_cpuset)) { 640 if (is_sched_load_balance(&top_cpuset)) {
636 ndoms = 1; 641 ndoms = 1;
@@ -643,7 +648,8 @@ static int generate_sched_domains(cpumask_var_t **domains,
643 *dattr = SD_ATTR_INIT; 648 *dattr = SD_ATTR_INIT;
644 update_domain_attr_tree(dattr, &top_cpuset); 649 update_domain_attr_tree(dattr, &top_cpuset);
645 } 650 }
646 cpumask_copy(doms[0], top_cpuset.effective_cpus); 651 cpumask_and(doms[0], top_cpuset.effective_cpus,
652 non_isolated_cpus);
647 653
648 goto done; 654 goto done;
649 } 655 }
@@ -666,7 +672,8 @@ static int generate_sched_domains(cpumask_var_t **domains,
666 * the corresponding sched domain. 672 * the corresponding sched domain.
667 */ 673 */
668 if (!cpumask_empty(cp->cpus_allowed) && 674 if (!cpumask_empty(cp->cpus_allowed) &&
669 !is_sched_load_balance(cp)) 675 !(is_sched_load_balance(cp) &&
676 cpumask_intersects(cp->cpus_allowed, non_isolated_cpus)))
670 continue; 677 continue;
671 678
672 if (is_sched_load_balance(cp)) 679 if (is_sched_load_balance(cp))
@@ -748,6 +755,7 @@ restart:
748 755
749 if (apn == b->pn) { 756 if (apn == b->pn) {
750 cpumask_or(dp, dp, b->effective_cpus); 757 cpumask_or(dp, dp, b->effective_cpus);
758 cpumask_and(dp, dp, non_isolated_cpus);
751 if (dattr) 759 if (dattr)
752 update_domain_attr_tree(dattr + nslot, b); 760 update_domain_attr_tree(dattr + nslot, b);
753 761
@@ -760,6 +768,7 @@ restart:
760 BUG_ON(nslot != ndoms); 768 BUG_ON(nslot != ndoms);
761 769
762done: 770done:
771 free_cpumask_var(non_isolated_cpus);
763 kfree(csa); 772 kfree(csa);
764 773
765 /* 774 /*