diff options
author | Mike Travis <travis@sgi.com> | 2008-04-04 21:11:07 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-04-19 13:44:58 -0400 |
commit | f9a86fcbbb1e5542eabf45c9144ac4b6330861a4 (patch) | |
tree | 0a3f8d57969b2dc8d2663e05d6ee36f9b50ba26a /kernel | |
parent | f70316dace2bb99730800d47044acb818c6735f6 (diff) |
cpuset: modify cpuset_set_cpus_allowed to use cpumask pointer
* Modify cpuset_cpus_allowed to return the currently allowed cpuset
via a pointer argument instead of as the function return value.
* Use new set_cpus_allowed_ptr function.
* Cleanup CPU_MASK_ALL and NODE_MASK_ALL uses.
Depends on:
[sched-devel]: sched: add new set_cpus_allowed_ptr function
Signed-off-by: Mike Travis <travis@sgi.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/cpuset.c | 31 | ||||
-rw-r--r-- | kernel/sched.c | 8 |
2 files changed, 17 insertions, 22 deletions
diff --git a/kernel/cpuset.c b/kernel/cpuset.c index a1b61f414228..6b9ac296a05c 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c | |||
@@ -729,7 +729,7 @@ int cpuset_test_cpumask(struct task_struct *tsk, struct cgroup_scanner *scan) | |||
729 | */ | 729 | */ |
730 | void cpuset_change_cpumask(struct task_struct *tsk, struct cgroup_scanner *scan) | 730 | void cpuset_change_cpumask(struct task_struct *tsk, struct cgroup_scanner *scan) |
731 | { | 731 | { |
732 | set_cpus_allowed(tsk, (cgroup_cs(scan->cg))->cpus_allowed); | 732 | set_cpus_allowed_ptr(tsk, &((cgroup_cs(scan->cg))->cpus_allowed)); |
733 | } | 733 | } |
734 | 734 | ||
735 | /** | 735 | /** |
@@ -1178,7 +1178,7 @@ static void cpuset_attach(struct cgroup_subsys *ss, | |||
1178 | 1178 | ||
1179 | mutex_lock(&callback_mutex); | 1179 | mutex_lock(&callback_mutex); |
1180 | guarantee_online_cpus(cs, &cpus); | 1180 | guarantee_online_cpus(cs, &cpus); |
1181 | set_cpus_allowed(tsk, cpus); | 1181 | set_cpus_allowed_ptr(tsk, &cpus); |
1182 | mutex_unlock(&callback_mutex); | 1182 | mutex_unlock(&callback_mutex); |
1183 | 1183 | ||
1184 | from = oldcs->mems_allowed; | 1184 | from = oldcs->mems_allowed; |
@@ -1555,8 +1555,8 @@ static struct cgroup_subsys_state *cpuset_create( | |||
1555 | if (is_spread_slab(parent)) | 1555 | if (is_spread_slab(parent)) |
1556 | set_bit(CS_SPREAD_SLAB, &cs->flags); | 1556 | set_bit(CS_SPREAD_SLAB, &cs->flags); |
1557 | set_bit(CS_SCHED_LOAD_BALANCE, &cs->flags); | 1557 | set_bit(CS_SCHED_LOAD_BALANCE, &cs->flags); |
1558 | cs->cpus_allowed = CPU_MASK_NONE; | 1558 | cpus_clear(cs->cpus_allowed); |
1559 | cs->mems_allowed = NODE_MASK_NONE; | 1559 | nodes_clear(cs->mems_allowed); |
1560 | cs->mems_generation = cpuset_mems_generation++; | 1560 | cs->mems_generation = cpuset_mems_generation++; |
1561 | fmeter_init(&cs->fmeter); | 1561 | fmeter_init(&cs->fmeter); |
1562 | 1562 | ||
@@ -1625,8 +1625,8 @@ int __init cpuset_init(void) | |||
1625 | { | 1625 | { |
1626 | int err = 0; | 1626 | int err = 0; |
1627 | 1627 | ||
1628 | top_cpuset.cpus_allowed = CPU_MASK_ALL; | 1628 | cpus_setall(top_cpuset.cpus_allowed); |
1629 | top_cpuset.mems_allowed = NODE_MASK_ALL; | 1629 | nodes_setall(top_cpuset.mems_allowed); |
1630 | 1630 | ||
1631 | fmeter_init(&top_cpuset.fmeter); | 1631 | fmeter_init(&top_cpuset.fmeter); |
1632 | top_cpuset.mems_generation = cpuset_mems_generation++; | 1632 | top_cpuset.mems_generation = cpuset_mems_generation++; |
@@ -1844,6 +1844,7 @@ void __init cpuset_init_smp(void) | |||
1844 | 1844 | ||
1845 | * cpuset_cpus_allowed - return cpus_allowed mask from a tasks cpuset. | 1845 | * cpuset_cpus_allowed - return cpus_allowed mask from a tasks cpuset. |
1846 | * @tsk: pointer to task_struct from which to obtain cpuset->cpus_allowed. | 1846 | * @tsk: pointer to task_struct from which to obtain cpuset->cpus_allowed. |
1847 | * @pmask: pointer to cpumask_t variable to receive cpus_allowed set. | ||
1847 | * | 1848 | * |
1848 | * Description: Returns the cpumask_t cpus_allowed of the cpuset | 1849 | * Description: Returns the cpumask_t cpus_allowed of the cpuset |
1849 | * attached to the specified @tsk. Guaranteed to return some non-empty | 1850 | * attached to the specified @tsk. Guaranteed to return some non-empty |
@@ -1851,35 +1852,27 @@ void __init cpuset_init_smp(void) | |||
1851 | * tasks cpuset. | 1852 | * tasks cpuset. |
1852 | **/ | 1853 | **/ |
1853 | 1854 | ||
1854 | cpumask_t cpuset_cpus_allowed(struct task_struct *tsk) | 1855 | void cpuset_cpus_allowed(struct task_struct *tsk, cpumask_t *pmask) |
1855 | { | 1856 | { |
1856 | cpumask_t mask; | ||
1857 | |||
1858 | mutex_lock(&callback_mutex); | 1857 | mutex_lock(&callback_mutex); |
1859 | mask = cpuset_cpus_allowed_locked(tsk); | 1858 | cpuset_cpus_allowed_locked(tsk, pmask); |
1860 | mutex_unlock(&callback_mutex); | 1859 | mutex_unlock(&callback_mutex); |
1861 | |||
1862 | return mask; | ||
1863 | } | 1860 | } |
1864 | 1861 | ||
1865 | /** | 1862 | /** |
1866 | * cpuset_cpus_allowed_locked - return cpus_allowed mask from a tasks cpuset. | 1863 | * cpuset_cpus_allowed_locked - return cpus_allowed mask from a tasks cpuset. |
1867 | * Must be called with callback_mutex held. | 1864 | * Must be called with callback_mutex held. |
1868 | **/ | 1865 | **/ |
1869 | cpumask_t cpuset_cpus_allowed_locked(struct task_struct *tsk) | 1866 | void cpuset_cpus_allowed_locked(struct task_struct *tsk, cpumask_t *pmask) |
1870 | { | 1867 | { |
1871 | cpumask_t mask; | ||
1872 | |||
1873 | task_lock(tsk); | 1868 | task_lock(tsk); |
1874 | guarantee_online_cpus(task_cs(tsk), &mask); | 1869 | guarantee_online_cpus(task_cs(tsk), pmask); |
1875 | task_unlock(tsk); | 1870 | task_unlock(tsk); |
1876 | |||
1877 | return mask; | ||
1878 | } | 1871 | } |
1879 | 1872 | ||
1880 | void cpuset_init_current_mems_allowed(void) | 1873 | void cpuset_init_current_mems_allowed(void) |
1881 | { | 1874 | { |
1882 | current->mems_allowed = NODE_MASK_ALL; | 1875 | nodes_setall(current->mems_allowed); |
1883 | } | 1876 | } |
1884 | 1877 | ||
1885 | /** | 1878 | /** |
diff --git a/kernel/sched.c b/kernel/sched.c index ef3f28b334ea..ccc23a9cd264 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -4941,13 +4941,13 @@ long sched_setaffinity(pid_t pid, cpumask_t new_mask) | |||
4941 | if (retval) | 4941 | if (retval) |
4942 | goto out_unlock; | 4942 | goto out_unlock; |
4943 | 4943 | ||
4944 | cpus_allowed = cpuset_cpus_allowed(p); | 4944 | cpuset_cpus_allowed(p, &cpus_allowed); |
4945 | cpus_and(new_mask, new_mask, cpus_allowed); | 4945 | cpus_and(new_mask, new_mask, cpus_allowed); |
4946 | again: | 4946 | again: |
4947 | retval = set_cpus_allowed(p, new_mask); | 4947 | retval = set_cpus_allowed(p, new_mask); |
4948 | 4948 | ||
4949 | if (!retval) { | 4949 | if (!retval) { |
4950 | cpus_allowed = cpuset_cpus_allowed(p); | 4950 | cpuset_cpus_allowed(p, &cpus_allowed); |
4951 | if (!cpus_subset(new_mask, cpus_allowed)) { | 4951 | if (!cpus_subset(new_mask, cpus_allowed)) { |
4952 | /* | 4952 | /* |
4953 | * We must have raced with a concurrent cpuset | 4953 | * We must have raced with a concurrent cpuset |
@@ -5661,7 +5661,9 @@ static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p) | |||
5661 | 5661 | ||
5662 | /* No more Mr. Nice Guy. */ | 5662 | /* No more Mr. Nice Guy. */ |
5663 | if (dest_cpu >= nr_cpu_ids) { | 5663 | if (dest_cpu >= nr_cpu_ids) { |
5664 | cpumask_t cpus_allowed = cpuset_cpus_allowed_locked(p); | 5664 | cpumask_t cpus_allowed; |
5665 | |||
5666 | cpuset_cpus_allowed_locked(p, &cpus_allowed); | ||
5665 | /* | 5667 | /* |
5666 | * Try to stay on the same cpuset, where the | 5668 | * Try to stay on the same cpuset, where the |
5667 | * current cpuset may be a subset of all cpus. | 5669 | * current cpuset may be a subset of all cpus. |