diff options
Diffstat (limited to 'kernel/cpuset.c')
| -rw-r--r-- | kernel/cpuset.c | 87 |
1 files changed, 31 insertions, 56 deletions
diff --git a/kernel/cpuset.c b/kernel/cpuset.c index 4349935c2ad8..33eee16addb8 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c | |||
| @@ -1015,17 +1015,12 @@ static void cpuset_change_nodemask(struct task_struct *p, | |||
| 1015 | struct cpuset *cs; | 1015 | struct cpuset *cs; |
| 1016 | int migrate; | 1016 | int migrate; |
| 1017 | const nodemask_t *oldmem = scan->data; | 1017 | const nodemask_t *oldmem = scan->data; |
| 1018 | NODEMASK_ALLOC(nodemask_t, newmems, GFP_KERNEL); | 1018 | static nodemask_t newmems; /* protected by cgroup_mutex */ |
| 1019 | |||
| 1020 | if (!newmems) | ||
| 1021 | return; | ||
| 1022 | 1019 | ||
| 1023 | cs = cgroup_cs(scan->cg); | 1020 | cs = cgroup_cs(scan->cg); |
| 1024 | guarantee_online_mems(cs, newmems); | 1021 | guarantee_online_mems(cs, &newmems); |
| 1025 | |||
| 1026 | cpuset_change_task_nodemask(p, newmems); | ||
| 1027 | 1022 | ||
| 1028 | NODEMASK_FREE(newmems); | 1023 | cpuset_change_task_nodemask(p, &newmems); |
| 1029 | 1024 | ||
| 1030 | mm = get_task_mm(p); | 1025 | mm = get_task_mm(p); |
| 1031 | if (!mm) | 1026 | if (!mm) |
| @@ -1438,44 +1433,35 @@ static void cpuset_attach(struct cgroup_subsys *ss, struct cgroup *cont, | |||
| 1438 | struct mm_struct *mm; | 1433 | struct mm_struct *mm; |
| 1439 | struct cpuset *cs = cgroup_cs(cont); | 1434 | struct cpuset *cs = cgroup_cs(cont); |
| 1440 | struct cpuset *oldcs = cgroup_cs(oldcont); | 1435 | struct cpuset *oldcs = cgroup_cs(oldcont); |
| 1441 | NODEMASK_ALLOC(nodemask_t, from, GFP_KERNEL); | 1436 | static nodemask_t to; /* protected by cgroup_mutex */ |
| 1442 | NODEMASK_ALLOC(nodemask_t, to, GFP_KERNEL); | ||
| 1443 | |||
| 1444 | if (from == NULL || to == NULL) | ||
| 1445 | goto alloc_fail; | ||
| 1446 | 1437 | ||
| 1447 | if (cs == &top_cpuset) { | 1438 | if (cs == &top_cpuset) { |
| 1448 | cpumask_copy(cpus_attach, cpu_possible_mask); | 1439 | cpumask_copy(cpus_attach, cpu_possible_mask); |
| 1449 | } else { | 1440 | } else { |
| 1450 | guarantee_online_cpus(cs, cpus_attach); | 1441 | guarantee_online_cpus(cs, cpus_attach); |
| 1451 | } | 1442 | } |
| 1452 | guarantee_online_mems(cs, to); | 1443 | guarantee_online_mems(cs, &to); |
| 1453 | 1444 | ||
| 1454 | /* do per-task migration stuff possibly for each in the threadgroup */ | 1445 | /* do per-task migration stuff possibly for each in the threadgroup */ |
| 1455 | cpuset_attach_task(tsk, to, cs); | 1446 | cpuset_attach_task(tsk, &to, cs); |
| 1456 | if (threadgroup) { | 1447 | if (threadgroup) { |
| 1457 | struct task_struct *c; | 1448 | struct task_struct *c; |
| 1458 | rcu_read_lock(); | 1449 | rcu_read_lock(); |
| 1459 | list_for_each_entry_rcu(c, &tsk->thread_group, thread_group) { | 1450 | list_for_each_entry_rcu(c, &tsk->thread_group, thread_group) { |
| 1460 | cpuset_attach_task(c, to, cs); | 1451 | cpuset_attach_task(c, &to, cs); |
| 1461 | } | 1452 | } |
| 1462 | rcu_read_unlock(); | 1453 | rcu_read_unlock(); |
| 1463 | } | 1454 | } |
| 1464 | 1455 | ||
| 1465 | /* change mm; only needs to be done once even if threadgroup */ | 1456 | /* change mm; only needs to be done once even if threadgroup */ |
| 1466 | *from = oldcs->mems_allowed; | 1457 | to = cs->mems_allowed; |
| 1467 | *to = cs->mems_allowed; | ||
| 1468 | mm = get_task_mm(tsk); | 1458 | mm = get_task_mm(tsk); |
| 1469 | if (mm) { | 1459 | if (mm) { |
| 1470 | mpol_rebind_mm(mm, to); | 1460 | mpol_rebind_mm(mm, &to); |
| 1471 | if (is_memory_migrate(cs)) | 1461 | if (is_memory_migrate(cs)) |
| 1472 | cpuset_migrate_mm(mm, from, to); | 1462 | cpuset_migrate_mm(mm, &oldcs->mems_allowed, &to); |
| 1473 | mmput(mm); | 1463 | mmput(mm); |
| 1474 | } | 1464 | } |
| 1475 | |||
| 1476 | alloc_fail: | ||
| 1477 | NODEMASK_FREE(from); | ||
| 1478 | NODEMASK_FREE(to); | ||
| 1479 | } | 1465 | } |
| 1480 | 1466 | ||
| 1481 | /* The various types of files and directories in a cpuset file system */ | 1467 | /* The various types of files and directories in a cpuset file system */ |
| @@ -1575,8 +1561,10 @@ static int cpuset_write_resmask(struct cgroup *cgrp, struct cftype *cft, | |||
| 1575 | return -ENODEV; | 1561 | return -ENODEV; |
| 1576 | 1562 | ||
| 1577 | trialcs = alloc_trial_cpuset(cs); | 1563 | trialcs = alloc_trial_cpuset(cs); |
| 1578 | if (!trialcs) | 1564 | if (!trialcs) { |
| 1579 | return -ENOMEM; | 1565 | retval = -ENOMEM; |
| 1566 | goto out; | ||
| 1567 | } | ||
| 1580 | 1568 | ||
| 1581 | switch (cft->private) { | 1569 | switch (cft->private) { |
| 1582 | case FILE_CPULIST: | 1570 | case FILE_CPULIST: |
| @@ -1591,6 +1579,7 @@ static int cpuset_write_resmask(struct cgroup *cgrp, struct cftype *cft, | |||
| 1591 | } | 1579 | } |
| 1592 | 1580 | ||
| 1593 | free_trial_cpuset(trialcs); | 1581 | free_trial_cpuset(trialcs); |
| 1582 | out: | ||
| 1594 | cgroup_unlock(); | 1583 | cgroup_unlock(); |
| 1595 | return retval; | 1584 | return retval; |
| 1596 | } | 1585 | } |
| @@ -1607,34 +1596,26 @@ static int cpuset_write_resmask(struct cgroup *cgrp, struct cftype *cft, | |||
| 1607 | * across a page fault. | 1596 | * across a page fault. |
| 1608 | */ | 1597 | */ |
| 1609 | 1598 | ||
| 1610 | static int cpuset_sprintf_cpulist(char *page, struct cpuset *cs) | 1599 | static size_t cpuset_sprintf_cpulist(char *page, struct cpuset *cs) |
| 1611 | { | 1600 | { |
| 1612 | int ret; | 1601 | size_t count; |
| 1613 | 1602 | ||
| 1614 | mutex_lock(&callback_mutex); | 1603 | mutex_lock(&callback_mutex); |
| 1615 | ret = cpulist_scnprintf(page, PAGE_SIZE, cs->cpus_allowed); | 1604 | count = cpulist_scnprintf(page, PAGE_SIZE, cs->cpus_allowed); |
| 1616 | mutex_unlock(&callback_mutex); | 1605 | mutex_unlock(&callback_mutex); |
| 1617 | 1606 | ||
| 1618 | return ret; | 1607 | return count; |
| 1619 | } | 1608 | } |
| 1620 | 1609 | ||
| 1621 | static int cpuset_sprintf_memlist(char *page, struct cpuset *cs) | 1610 | static size_t cpuset_sprintf_memlist(char *page, struct cpuset *cs) |
| 1622 | { | 1611 | { |
| 1623 | NODEMASK_ALLOC(nodemask_t, mask, GFP_KERNEL); | 1612 | size_t count; |
| 1624 | int retval; | ||
| 1625 | |||
| 1626 | if (mask == NULL) | ||
| 1627 | return -ENOMEM; | ||
| 1628 | 1613 | ||
| 1629 | mutex_lock(&callback_mutex); | 1614 | mutex_lock(&callback_mutex); |
| 1630 | *mask = cs->mems_allowed; | 1615 | count = nodelist_scnprintf(page, PAGE_SIZE, cs->mems_allowed); |
| 1631 | mutex_unlock(&callback_mutex); | 1616 | mutex_unlock(&callback_mutex); |
| 1632 | 1617 | ||
| 1633 | retval = nodelist_scnprintf(page, PAGE_SIZE, *mask); | 1618 | return count; |
| 1634 | |||
| 1635 | NODEMASK_FREE(mask); | ||
| 1636 | |||
| 1637 | return retval; | ||
| 1638 | } | 1619 | } |
| 1639 | 1620 | ||
| 1640 | static ssize_t cpuset_common_file_read(struct cgroup *cont, | 1621 | static ssize_t cpuset_common_file_read(struct cgroup *cont, |
| @@ -1859,8 +1840,10 @@ static void cpuset_post_clone(struct cgroup_subsys *ss, | |||
| 1859 | cs = cgroup_cs(cgroup); | 1840 | cs = cgroup_cs(cgroup); |
| 1860 | parent_cs = cgroup_cs(parent); | 1841 | parent_cs = cgroup_cs(parent); |
| 1861 | 1842 | ||
| 1843 | mutex_lock(&callback_mutex); | ||
| 1862 | cs->mems_allowed = parent_cs->mems_allowed; | 1844 | cs->mems_allowed = parent_cs->mems_allowed; |
| 1863 | cpumask_copy(cs->cpus_allowed, parent_cs->cpus_allowed); | 1845 | cpumask_copy(cs->cpus_allowed, parent_cs->cpus_allowed); |
| 1846 | mutex_unlock(&callback_mutex); | ||
| 1864 | return; | 1847 | return; |
| 1865 | } | 1848 | } |
| 1866 | 1849 | ||
| @@ -2063,10 +2046,7 @@ static void scan_for_empty_cpusets(struct cpuset *root) | |||
| 2063 | struct cpuset *cp; /* scans cpusets being updated */ | 2046 | struct cpuset *cp; /* scans cpusets being updated */ |
| 2064 | struct cpuset *child; /* scans child cpusets of cp */ | 2047 | struct cpuset *child; /* scans child cpusets of cp */ |
| 2065 | struct cgroup *cont; | 2048 | struct cgroup *cont; |
| 2066 | NODEMASK_ALLOC(nodemask_t, oldmems, GFP_KERNEL); | 2049 | static nodemask_t oldmems; /* protected by cgroup_mutex */ |
| 2067 | |||
| 2068 | if (oldmems == NULL) | ||
| 2069 | return; | ||
| 2070 | 2050 | ||
| 2071 | list_add_tail((struct list_head *)&root->stack_list, &queue); | 2051 | list_add_tail((struct list_head *)&root->stack_list, &queue); |
| 2072 | 2052 | ||
| @@ -2083,7 +2063,7 @@ static void scan_for_empty_cpusets(struct cpuset *root) | |||
| 2083 | nodes_subset(cp->mems_allowed, node_states[N_HIGH_MEMORY])) | 2063 | nodes_subset(cp->mems_allowed, node_states[N_HIGH_MEMORY])) |
| 2084 | continue; | 2064 | continue; |
| 2085 | 2065 | ||
| 2086 | *oldmems = cp->mems_allowed; | 2066 | oldmems = cp->mems_allowed; |
| 2087 | 2067 | ||
| 2088 | /* Remove offline cpus and mems from this cpuset. */ | 2068 | /* Remove offline cpus and mems from this cpuset. */ |
| 2089 | mutex_lock(&callback_mutex); | 2069 | mutex_lock(&callback_mutex); |
| @@ -2099,10 +2079,9 @@ static void scan_for_empty_cpusets(struct cpuset *root) | |||
| 2099 | remove_tasks_in_empty_cpuset(cp); | 2079 | remove_tasks_in_empty_cpuset(cp); |
| 2100 | else { | 2080 | else { |
| 2101 | update_tasks_cpumask(cp, NULL); | 2081 | update_tasks_cpumask(cp, NULL); |
| 2102 | update_tasks_nodemask(cp, oldmems, NULL); | 2082 | update_tasks_nodemask(cp, &oldmems, NULL); |
| 2103 | } | 2083 | } |
| 2104 | } | 2084 | } |
| 2105 | NODEMASK_FREE(oldmems); | ||
| 2106 | } | 2085 | } |
| 2107 | 2086 | ||
| 2108 | /* | 2087 | /* |
| @@ -2144,19 +2123,16 @@ void cpuset_update_active_cpus(void) | |||
| 2144 | static int cpuset_track_online_nodes(struct notifier_block *self, | 2123 | static int cpuset_track_online_nodes(struct notifier_block *self, |
| 2145 | unsigned long action, void *arg) | 2124 | unsigned long action, void *arg) |
| 2146 | { | 2125 | { |
| 2147 | NODEMASK_ALLOC(nodemask_t, oldmems, GFP_KERNEL); | 2126 | static nodemask_t oldmems; /* protected by cgroup_mutex */ |
| 2148 | |||
| 2149 | if (oldmems == NULL) | ||
| 2150 | return NOTIFY_DONE; | ||
| 2151 | 2127 | ||
| 2152 | cgroup_lock(); | 2128 | cgroup_lock(); |
| 2153 | switch (action) { | 2129 | switch (action) { |
| 2154 | case MEM_ONLINE: | 2130 | case MEM_ONLINE: |
| 2155 | *oldmems = top_cpuset.mems_allowed; | 2131 | oldmems = top_cpuset.mems_allowed; |
| 2156 | mutex_lock(&callback_mutex); | 2132 | mutex_lock(&callback_mutex); |
| 2157 | top_cpuset.mems_allowed = node_states[N_HIGH_MEMORY]; | 2133 | top_cpuset.mems_allowed = node_states[N_HIGH_MEMORY]; |
| 2158 | mutex_unlock(&callback_mutex); | 2134 | mutex_unlock(&callback_mutex); |
| 2159 | update_tasks_nodemask(&top_cpuset, oldmems, NULL); | 2135 | update_tasks_nodemask(&top_cpuset, &oldmems, NULL); |
| 2160 | break; | 2136 | break; |
| 2161 | case MEM_OFFLINE: | 2137 | case MEM_OFFLINE: |
| 2162 | /* | 2138 | /* |
| @@ -2170,7 +2146,6 @@ static int cpuset_track_online_nodes(struct notifier_block *self, | |||
| 2170 | } | 2146 | } |
| 2171 | cgroup_unlock(); | 2147 | cgroup_unlock(); |
| 2172 | 2148 | ||
| 2173 | NODEMASK_FREE(oldmems); | ||
| 2174 | return NOTIFY_OK; | 2149 | return NOTIFY_OK; |
| 2175 | } | 2150 | } |
| 2176 | #endif | 2151 | #endif |
