diff options
Diffstat (limited to 'kernel/cpuset.c')
-rw-r--r-- | kernel/cpuset.c | 44 |
1 files changed, 35 insertions, 9 deletions
diff --git a/kernel/cpuset.c b/kernel/cpuset.c index 9fceb97e989c..d5738910c34c 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c | |||
@@ -564,7 +564,7 @@ update_domain_attr(struct sched_domain_attr *dattr, struct cpuset *c) | |||
564 | * partition_sched_domains(). | 564 | * partition_sched_domains(). |
565 | */ | 565 | */ |
566 | 566 | ||
567 | static void rebuild_sched_domains(void) | 567 | void rebuild_sched_domains(void) |
568 | { | 568 | { |
569 | struct kfifo *q; /* queue of cpusets to be scanned */ | 569 | struct kfifo *q; /* queue of cpusets to be scanned */ |
570 | struct cpuset *cp; /* scans q */ | 570 | struct cpuset *cp; /* scans q */ |
@@ -679,7 +679,9 @@ restart: | |||
679 | if (apn == b->pn) { | 679 | if (apn == b->pn) { |
680 | cpus_or(*dp, *dp, b->cpus_allowed); | 680 | cpus_or(*dp, *dp, b->cpus_allowed); |
681 | b->pn = -1; | 681 | b->pn = -1; |
682 | update_domain_attr(dattr, b); | 682 | if (dattr) |
683 | update_domain_attr(dattr | ||
684 | + nslot, b); | ||
683 | } | 685 | } |
684 | } | 686 | } |
685 | nslot++; | 687 | nslot++; |
@@ -1194,6 +1196,15 @@ static int cpuset_can_attach(struct cgroup_subsys *ss, | |||
1194 | 1196 | ||
1195 | if (cpus_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed)) | 1197 | if (cpus_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed)) |
1196 | return -ENOSPC; | 1198 | return -ENOSPC; |
1199 | if (tsk->flags & PF_THREAD_BOUND) { | ||
1200 | cpumask_t mask; | ||
1201 | |||
1202 | mutex_lock(&callback_mutex); | ||
1203 | mask = cs->cpus_allowed; | ||
1204 | mutex_unlock(&callback_mutex); | ||
1205 | if (!cpus_equal(tsk->cpus_allowed, mask)) | ||
1206 | return -EINVAL; | ||
1207 | } | ||
1197 | 1208 | ||
1198 | return security_task_setscheduler(tsk, 0, NULL); | 1209 | return security_task_setscheduler(tsk, 0, NULL); |
1199 | } | 1210 | } |
@@ -1207,11 +1218,14 @@ static void cpuset_attach(struct cgroup_subsys *ss, | |||
1207 | struct mm_struct *mm; | 1218 | struct mm_struct *mm; |
1208 | struct cpuset *cs = cgroup_cs(cont); | 1219 | struct cpuset *cs = cgroup_cs(cont); |
1209 | struct cpuset *oldcs = cgroup_cs(oldcont); | 1220 | struct cpuset *oldcs = cgroup_cs(oldcont); |
1221 | int err; | ||
1210 | 1222 | ||
1211 | mutex_lock(&callback_mutex); | 1223 | mutex_lock(&callback_mutex); |
1212 | guarantee_online_cpus(cs, &cpus); | 1224 | guarantee_online_cpus(cs, &cpus); |
1213 | set_cpus_allowed_ptr(tsk, &cpus); | 1225 | err = set_cpus_allowed_ptr(tsk, &cpus); |
1214 | mutex_unlock(&callback_mutex); | 1226 | mutex_unlock(&callback_mutex); |
1227 | if (err) | ||
1228 | return; | ||
1215 | 1229 | ||
1216 | from = oldcs->mems_allowed; | 1230 | from = oldcs->mems_allowed; |
1217 | to = cs->mems_allowed; | 1231 | to = cs->mems_allowed; |
@@ -1882,7 +1896,7 @@ static void scan_for_empty_cpusets(const struct cpuset *root) | |||
1882 | * in order to minimize text size. | 1896 | * in order to minimize text size. |
1883 | */ | 1897 | */ |
1884 | 1898 | ||
1885 | static void common_cpu_mem_hotplug_unplug(void) | 1899 | static void common_cpu_mem_hotplug_unplug(int rebuild_sd) |
1886 | { | 1900 | { |
1887 | cgroup_lock(); | 1901 | cgroup_lock(); |
1888 | 1902 | ||
@@ -1894,7 +1908,8 @@ static void common_cpu_mem_hotplug_unplug(void) | |||
1894 | * Scheduler destroys domains on hotplug events. | 1908 | * Scheduler destroys domains on hotplug events. |
1895 | * Rebuild them based on the current settings. | 1909 | * Rebuild them based on the current settings. |
1896 | */ | 1910 | */ |
1897 | rebuild_sched_domains(); | 1911 | if (rebuild_sd) |
1912 | rebuild_sched_domains(); | ||
1898 | 1913 | ||
1899 | cgroup_unlock(); | 1914 | cgroup_unlock(); |
1900 | } | 1915 | } |
@@ -1912,11 +1927,22 @@ static void common_cpu_mem_hotplug_unplug(void) | |||
1912 | static int cpuset_handle_cpuhp(struct notifier_block *unused_nb, | 1927 | static int cpuset_handle_cpuhp(struct notifier_block *unused_nb, |
1913 | unsigned long phase, void *unused_cpu) | 1928 | unsigned long phase, void *unused_cpu) |
1914 | { | 1929 | { |
1915 | if (phase == CPU_DYING || phase == CPU_DYING_FROZEN) | 1930 | switch (phase) { |
1931 | case CPU_UP_CANCELED: | ||
1932 | case CPU_UP_CANCELED_FROZEN: | ||
1933 | case CPU_DOWN_FAILED: | ||
1934 | case CPU_DOWN_FAILED_FROZEN: | ||
1935 | case CPU_ONLINE: | ||
1936 | case CPU_ONLINE_FROZEN: | ||
1937 | case CPU_DEAD: | ||
1938 | case CPU_DEAD_FROZEN: | ||
1939 | common_cpu_mem_hotplug_unplug(1); | ||
1940 | break; | ||
1941 | default: | ||
1916 | return NOTIFY_DONE; | 1942 | return NOTIFY_DONE; |
1943 | } | ||
1917 | 1944 | ||
1918 | common_cpu_mem_hotplug_unplug(); | 1945 | return NOTIFY_OK; |
1919 | return 0; | ||
1920 | } | 1946 | } |
1921 | 1947 | ||
1922 | #ifdef CONFIG_MEMORY_HOTPLUG | 1948 | #ifdef CONFIG_MEMORY_HOTPLUG |
@@ -1929,7 +1955,7 @@ static int cpuset_handle_cpuhp(struct notifier_block *unused_nb, | |||
1929 | 1955 | ||
1930 | void cpuset_track_online_nodes(void) | 1956 | void cpuset_track_online_nodes(void) |
1931 | { | 1957 | { |
1932 | common_cpu_mem_hotplug_unplug(); | 1958 | common_cpu_mem_hotplug_unplug(0); |
1933 | } | 1959 | } |
1934 | #endif | 1960 | #endif |
1935 | 1961 | ||