aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/cpuset.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/cpuset.c')
-rw-r--r--kernel/cpuset.c84
1 files changed, 2 insertions, 82 deletions
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index e196510aa40f..0864f4097930 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -755,68 +755,13 @@ static int validate_change(const struct cpuset *cur, const struct cpuset *trial)
755} 755}
756 756
757/* 757/*
758 * For a given cpuset cur, partition the system as follows
759 * a. All cpus in the parent cpuset's cpus_allowed that are not part of any
760 * exclusive child cpusets
761 * b. All cpus in the current cpuset's cpus_allowed that are not part of any
762 * exclusive child cpusets
763 * Build these two partitions by calling partition_sched_domains
764 *
765 * Call with manage_mutex held. May nest a call to the
766 * lock_cpu_hotplug()/unlock_cpu_hotplug() pair.
767 * Must not be called holding callback_mutex, because we must
768 * not call lock_cpu_hotplug() while holding callback_mutex.
769 */
770
771static void update_cpu_domains(struct cpuset *cur)
772{
773 struct cpuset *c, *par = cur->parent;
774 cpumask_t pspan, cspan;
775
776 if (par == NULL || cpus_empty(cur->cpus_allowed))
777 return;
778
779 /*
780 * Get all cpus from parent's cpus_allowed not part of exclusive
781 * children
782 */
783 pspan = par->cpus_allowed;
784 list_for_each_entry(c, &par->children, sibling) {
785 if (is_cpu_exclusive(c))
786 cpus_andnot(pspan, pspan, c->cpus_allowed);
787 }
788 if (!is_cpu_exclusive(cur)) {
789 cpus_or(pspan, pspan, cur->cpus_allowed);
790 if (cpus_equal(pspan, cur->cpus_allowed))
791 return;
792 cspan = CPU_MASK_NONE;
793 } else {
794 if (cpus_empty(pspan))
795 return;
796 cspan = cur->cpus_allowed;
797 /*
798 * Get all cpus from current cpuset's cpus_allowed not part
799 * of exclusive children
800 */
801 list_for_each_entry(c, &cur->children, sibling) {
802 if (is_cpu_exclusive(c))
803 cpus_andnot(cspan, cspan, c->cpus_allowed);
804 }
805 }
806
807 lock_cpu_hotplug();
808 partition_sched_domains(&pspan, &cspan);
809 unlock_cpu_hotplug();
810}
811
812/*
813 * Call with manage_mutex held. May take callback_mutex during call. 758 * Call with manage_mutex held. May take callback_mutex during call.
814 */ 759 */
815 760
816static int update_cpumask(struct cpuset *cs, char *buf) 761static int update_cpumask(struct cpuset *cs, char *buf)
817{ 762{
818 struct cpuset trialcs; 763 struct cpuset trialcs;
819 int retval, cpus_unchanged; 764 int retval;
820 765
821 /* top_cpuset.cpus_allowed tracks cpu_online_map; it's read-only */ 766 /* top_cpuset.cpus_allowed tracks cpu_online_map; it's read-only */
822 if (cs == &top_cpuset) 767 if (cs == &top_cpuset)
@@ -843,12 +788,9 @@ static int update_cpumask(struct cpuset *cs, char *buf)
843 retval = validate_change(cs, &trialcs); 788 retval = validate_change(cs, &trialcs);
844 if (retval < 0) 789 if (retval < 0)
845 return retval; 790 return retval;
846 cpus_unchanged = cpus_equal(cs->cpus_allowed, trialcs.cpus_allowed);
847 mutex_lock(&callback_mutex); 791 mutex_lock(&callback_mutex);
848 cs->cpus_allowed = trialcs.cpus_allowed; 792 cs->cpus_allowed = trialcs.cpus_allowed;
849 mutex_unlock(&callback_mutex); 793 mutex_unlock(&callback_mutex);
850 if (is_cpu_exclusive(cs) && !cpus_unchanged)
851 update_cpu_domains(cs);
852 return 0; 794 return 0;
853} 795}
854 796
@@ -1085,7 +1027,7 @@ static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs, char *buf)
1085{ 1027{
1086 int turning_on; 1028 int turning_on;
1087 struct cpuset trialcs; 1029 struct cpuset trialcs;
1088 int err, cpu_exclusive_changed; 1030 int err;
1089 1031
1090 turning_on = (simple_strtoul(buf, NULL, 10) != 0); 1032 turning_on = (simple_strtoul(buf, NULL, 10) != 0);
1091 1033
@@ -1098,14 +1040,10 @@ static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs, char *buf)
1098 err = validate_change(cs, &trialcs); 1040 err = validate_change(cs, &trialcs);
1099 if (err < 0) 1041 if (err < 0)
1100 return err; 1042 return err;
1101 cpu_exclusive_changed =
1102 (is_cpu_exclusive(cs) != is_cpu_exclusive(&trialcs));
1103 mutex_lock(&callback_mutex); 1043 mutex_lock(&callback_mutex);
1104 cs->flags = trialcs.flags; 1044 cs->flags = trialcs.flags;
1105 mutex_unlock(&callback_mutex); 1045 mutex_unlock(&callback_mutex);
1106 1046
1107 if (cpu_exclusive_changed)
1108 update_cpu_domains(cs);
1109 return 0; 1047 return 0;
1110} 1048}
1111 1049
@@ -1965,17 +1903,6 @@ static int cpuset_mkdir(struct inode *dir, struct dentry *dentry, int mode)
1965 return cpuset_create(c_parent, dentry->d_name.name, mode | S_IFDIR); 1903 return cpuset_create(c_parent, dentry->d_name.name, mode | S_IFDIR);
1966} 1904}
1967 1905
1968/*
1969 * Locking note on the strange update_flag() call below:
1970 *
1971 * If the cpuset being removed is marked cpu_exclusive, then simulate
1972 * turning cpu_exclusive off, which will call update_cpu_domains().
1973 * The lock_cpu_hotplug() call in update_cpu_domains() must not be
1974 * made while holding callback_mutex. Elsewhere the kernel nests
1975 * callback_mutex inside lock_cpu_hotplug() calls. So the reverse
1976 * nesting would risk an ABBA deadlock.
1977 */
1978
1979static int cpuset_rmdir(struct inode *unused_dir, struct dentry *dentry) 1906static int cpuset_rmdir(struct inode *unused_dir, struct dentry *dentry)
1980{ 1907{
1981 struct cpuset *cs = dentry->d_fsdata; 1908 struct cpuset *cs = dentry->d_fsdata;
@@ -1995,13 +1922,6 @@ static int cpuset_rmdir(struct inode *unused_dir, struct dentry *dentry)
1995 mutex_unlock(&manage_mutex); 1922 mutex_unlock(&manage_mutex);
1996 return -EBUSY; 1923 return -EBUSY;
1997 } 1924 }
1998 if (is_cpu_exclusive(cs)) {
1999 int retval = update_flag(CS_CPU_EXCLUSIVE, cs, "0");
2000 if (retval < 0) {
2001 mutex_unlock(&manage_mutex);
2002 return retval;
2003 }
2004 }
2005 parent = cs->parent; 1925 parent = cs->parent;
2006 mutex_lock(&callback_mutex); 1926 mutex_lock(&callback_mutex);
2007 set_bit(CS_REMOVED, &cs->flags); 1927 set_bit(CS_REMOVED, &cs->flags);