diff options
| -rw-r--r-- | include/linux/cpuset.h | 13 | ||||
| -rw-r--r-- | kernel/cpuset.c | 31 | ||||
| -rw-r--r-- | kernel/sched.c | 8 | ||||
| -rw-r--r-- | mm/pdflush.c | 4 |
4 files changed, 26 insertions, 30 deletions
diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h index 0a26be353cb3..726761e24003 100644 --- a/include/linux/cpuset.h +++ b/include/linux/cpuset.h | |||
| @@ -20,8 +20,8 @@ extern int number_of_cpusets; /* How many cpusets are defined in system? */ | |||
| 20 | extern int cpuset_init_early(void); | 20 | extern int cpuset_init_early(void); |
| 21 | extern int cpuset_init(void); | 21 | extern int cpuset_init(void); |
| 22 | extern void cpuset_init_smp(void); | 22 | extern void cpuset_init_smp(void); |
| 23 | extern cpumask_t cpuset_cpus_allowed(struct task_struct *p); | 23 | extern void cpuset_cpus_allowed(struct task_struct *p, cpumask_t *mask); |
| 24 | extern cpumask_t cpuset_cpus_allowed_locked(struct task_struct *p); | 24 | extern void cpuset_cpus_allowed_locked(struct task_struct *p, cpumask_t *mask); |
| 25 | extern nodemask_t cpuset_mems_allowed(struct task_struct *p); | 25 | extern nodemask_t cpuset_mems_allowed(struct task_struct *p); |
| 26 | #define cpuset_current_mems_allowed (current->mems_allowed) | 26 | #define cpuset_current_mems_allowed (current->mems_allowed) |
| 27 | void cpuset_init_current_mems_allowed(void); | 27 | void cpuset_init_current_mems_allowed(void); |
| @@ -84,13 +84,14 @@ static inline int cpuset_init_early(void) { return 0; } | |||
| 84 | static inline int cpuset_init(void) { return 0; } | 84 | static inline int cpuset_init(void) { return 0; } |
| 85 | static inline void cpuset_init_smp(void) {} | 85 | static inline void cpuset_init_smp(void) {} |
| 86 | 86 | ||
| 87 | static inline cpumask_t cpuset_cpus_allowed(struct task_struct *p) | 87 | static inline void cpuset_cpus_allowed(struct task_struct *p, cpumask_t *mask) |
| 88 | { | 88 | { |
| 89 | return cpu_possible_map; | 89 | *mask = cpu_possible_map; |
| 90 | } | 90 | } |
| 91 | static inline cpumask_t cpuset_cpus_allowed_locked(struct task_struct *p) | 91 | static inline void cpuset_cpus_allowed_locked(struct task_struct *p, |
| 92 | cpumask_t *mask) | ||
| 92 | { | 93 | { |
| 93 | return cpu_possible_map; | 94 | *mask = cpu_possible_map; |
| 94 | } | 95 | } |
| 95 | 96 | ||
| 96 | static inline nodemask_t cpuset_mems_allowed(struct task_struct *p) | 97 | static inline nodemask_t cpuset_mems_allowed(struct task_struct *p) |
diff --git a/kernel/cpuset.c b/kernel/cpuset.c index a1b61f414228..6b9ac296a05c 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c | |||
| @@ -729,7 +729,7 @@ int cpuset_test_cpumask(struct task_struct *tsk, struct cgroup_scanner *scan) | |||
| 729 | */ | 729 | */ |
| 730 | void cpuset_change_cpumask(struct task_struct *tsk, struct cgroup_scanner *scan) | 730 | void cpuset_change_cpumask(struct task_struct *tsk, struct cgroup_scanner *scan) |
| 731 | { | 731 | { |
| 732 | set_cpus_allowed(tsk, (cgroup_cs(scan->cg))->cpus_allowed); | 732 | set_cpus_allowed_ptr(tsk, &((cgroup_cs(scan->cg))->cpus_allowed)); |
| 733 | } | 733 | } |
| 734 | 734 | ||
| 735 | /** | 735 | /** |
| @@ -1178,7 +1178,7 @@ static void cpuset_attach(struct cgroup_subsys *ss, | |||
| 1178 | 1178 | ||
| 1179 | mutex_lock(&callback_mutex); | 1179 | mutex_lock(&callback_mutex); |
| 1180 | guarantee_online_cpus(cs, &cpus); | 1180 | guarantee_online_cpus(cs, &cpus); |
| 1181 | set_cpus_allowed(tsk, cpus); | 1181 | set_cpus_allowed_ptr(tsk, &cpus); |
| 1182 | mutex_unlock(&callback_mutex); | 1182 | mutex_unlock(&callback_mutex); |
| 1183 | 1183 | ||
| 1184 | from = oldcs->mems_allowed; | 1184 | from = oldcs->mems_allowed; |
| @@ -1555,8 +1555,8 @@ static struct cgroup_subsys_state *cpuset_create( | |||
| 1555 | if (is_spread_slab(parent)) | 1555 | if (is_spread_slab(parent)) |
| 1556 | set_bit(CS_SPREAD_SLAB, &cs->flags); | 1556 | set_bit(CS_SPREAD_SLAB, &cs->flags); |
| 1557 | set_bit(CS_SCHED_LOAD_BALANCE, &cs->flags); | 1557 | set_bit(CS_SCHED_LOAD_BALANCE, &cs->flags); |
| 1558 | cs->cpus_allowed = CPU_MASK_NONE; | 1558 | cpus_clear(cs->cpus_allowed); |
| 1559 | cs->mems_allowed = NODE_MASK_NONE; | 1559 | nodes_clear(cs->mems_allowed); |
| 1560 | cs->mems_generation = cpuset_mems_generation++; | 1560 | cs->mems_generation = cpuset_mems_generation++; |
| 1561 | fmeter_init(&cs->fmeter); | 1561 | fmeter_init(&cs->fmeter); |
| 1562 | 1562 | ||
| @@ -1625,8 +1625,8 @@ int __init cpuset_init(void) | |||
| 1625 | { | 1625 | { |
| 1626 | int err = 0; | 1626 | int err = 0; |
| 1627 | 1627 | ||
| 1628 | top_cpuset.cpus_allowed = CPU_MASK_ALL; | 1628 | cpus_setall(top_cpuset.cpus_allowed); |
| 1629 | top_cpuset.mems_allowed = NODE_MASK_ALL; | 1629 | nodes_setall(top_cpuset.mems_allowed); |
| 1630 | 1630 | ||
| 1631 | fmeter_init(&top_cpuset.fmeter); | 1631 | fmeter_init(&top_cpuset.fmeter); |
| 1632 | top_cpuset.mems_generation = cpuset_mems_generation++; | 1632 | top_cpuset.mems_generation = cpuset_mems_generation++; |
| @@ -1844,6 +1844,7 @@ void __init cpuset_init_smp(void) | |||
| 1844 | 1844 | ||
| 1845 | * cpuset_cpus_allowed - return cpus_allowed mask from a tasks cpuset. | 1845 | * cpuset_cpus_allowed - return cpus_allowed mask from a tasks cpuset. |
| 1846 | * @tsk: pointer to task_struct from which to obtain cpuset->cpus_allowed. | 1846 | * @tsk: pointer to task_struct from which to obtain cpuset->cpus_allowed. |
| 1847 | * @pmask: pointer to cpumask_t variable to receive cpus_allowed set. | ||
| 1847 | * | 1848 | * |
| 1848 | * Description: Returns the cpumask_t cpus_allowed of the cpuset | 1849 | * Description: Returns the cpumask_t cpus_allowed of the cpuset |
| 1849 | * attached to the specified @tsk. Guaranteed to return some non-empty | 1850 | * attached to the specified @tsk. Guaranteed to return some non-empty |
| @@ -1851,35 +1852,27 @@ void __init cpuset_init_smp(void) | |||
| 1851 | * tasks cpuset. | 1852 | * tasks cpuset. |
| 1852 | **/ | 1853 | **/ |
| 1853 | 1854 | ||
| 1854 | cpumask_t cpuset_cpus_allowed(struct task_struct *tsk) | 1855 | void cpuset_cpus_allowed(struct task_struct *tsk, cpumask_t *pmask) |
| 1855 | { | 1856 | { |
| 1856 | cpumask_t mask; | ||
| 1857 | |||
| 1858 | mutex_lock(&callback_mutex); | 1857 | mutex_lock(&callback_mutex); |
| 1859 | mask = cpuset_cpus_allowed_locked(tsk); | 1858 | cpuset_cpus_allowed_locked(tsk, pmask); |
| 1860 | mutex_unlock(&callback_mutex); | 1859 | mutex_unlock(&callback_mutex); |
| 1861 | |||
| 1862 | return mask; | ||
| 1863 | } | 1860 | } |
| 1864 | 1861 | ||
| 1865 | /** | 1862 | /** |
| 1866 | * cpuset_cpus_allowed_locked - return cpus_allowed mask from a tasks cpuset. | 1863 | * cpuset_cpus_allowed_locked - return cpus_allowed mask from a tasks cpuset. |
| 1867 | * Must be called with callback_mutex held. | 1864 | * Must be called with callback_mutex held. |
| 1868 | **/ | 1865 | **/ |
| 1869 | cpumask_t cpuset_cpus_allowed_locked(struct task_struct *tsk) | 1866 | void cpuset_cpus_allowed_locked(struct task_struct *tsk, cpumask_t *pmask) |
| 1870 | { | 1867 | { |
| 1871 | cpumask_t mask; | ||
| 1872 | |||
| 1873 | task_lock(tsk); | 1868 | task_lock(tsk); |
| 1874 | guarantee_online_cpus(task_cs(tsk), &mask); | 1869 | guarantee_online_cpus(task_cs(tsk), pmask); |
| 1875 | task_unlock(tsk); | 1870 | task_unlock(tsk); |
| 1876 | |||
| 1877 | return mask; | ||
| 1878 | } | 1871 | } |
| 1879 | 1872 | ||
| 1880 | void cpuset_init_current_mems_allowed(void) | 1873 | void cpuset_init_current_mems_allowed(void) |
| 1881 | { | 1874 | { |
| 1882 | current->mems_allowed = NODE_MASK_ALL; | 1875 | nodes_setall(current->mems_allowed); |
| 1883 | } | 1876 | } |
| 1884 | 1877 | ||
| 1885 | /** | 1878 | /** |
diff --git a/kernel/sched.c b/kernel/sched.c index ef3f28b334ea..ccc23a9cd264 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
| @@ -4941,13 +4941,13 @@ long sched_setaffinity(pid_t pid, cpumask_t new_mask) | |||
| 4941 | if (retval) | 4941 | if (retval) |
| 4942 | goto out_unlock; | 4942 | goto out_unlock; |
| 4943 | 4943 | ||
| 4944 | cpus_allowed = cpuset_cpus_allowed(p); | 4944 | cpuset_cpus_allowed(p, &cpus_allowed); |
| 4945 | cpus_and(new_mask, new_mask, cpus_allowed); | 4945 | cpus_and(new_mask, new_mask, cpus_allowed); |
| 4946 | again: | 4946 | again: |
| 4947 | retval = set_cpus_allowed(p, new_mask); | 4947 | retval = set_cpus_allowed(p, new_mask); |
| 4948 | 4948 | ||
| 4949 | if (!retval) { | 4949 | if (!retval) { |
| 4950 | cpus_allowed = cpuset_cpus_allowed(p); | 4950 | cpuset_cpus_allowed(p, &cpus_allowed); |
| 4951 | if (!cpus_subset(new_mask, cpus_allowed)) { | 4951 | if (!cpus_subset(new_mask, cpus_allowed)) { |
| 4952 | /* | 4952 | /* |
| 4953 | * We must have raced with a concurrent cpuset | 4953 | * We must have raced with a concurrent cpuset |
| @@ -5661,7 +5661,9 @@ static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p) | |||
| 5661 | 5661 | ||
| 5662 | /* No more Mr. Nice Guy. */ | 5662 | /* No more Mr. Nice Guy. */ |
| 5663 | if (dest_cpu >= nr_cpu_ids) { | 5663 | if (dest_cpu >= nr_cpu_ids) { |
| 5664 | cpumask_t cpus_allowed = cpuset_cpus_allowed_locked(p); | 5664 | cpumask_t cpus_allowed; |
| 5665 | |||
| 5666 | cpuset_cpus_allowed_locked(p, &cpus_allowed); | ||
| 5665 | /* | 5667 | /* |
| 5666 | * Try to stay on the same cpuset, where the | 5668 | * Try to stay on the same cpuset, where the |
| 5667 | * current cpuset may be a subset of all cpus. | 5669 | * current cpuset may be a subset of all cpus. |
diff --git a/mm/pdflush.c b/mm/pdflush.c index 8f6ee073c0e3..0ceacff56457 100644 --- a/mm/pdflush.c +++ b/mm/pdflush.c | |||
| @@ -187,8 +187,8 @@ static int pdflush(void *dummy) | |||
| 187 | * This is needed as pdflush's are dynamically created and destroyed. | 187 | * This is needed as pdflush's are dynamically created and destroyed. |
| 188 | * The boottime pdflush's are easily placed w/o these 2 lines. | 188 | * The boottime pdflush's are easily placed w/o these 2 lines. |
| 189 | */ | 189 | */ |
| 190 | cpus_allowed = cpuset_cpus_allowed(current); | 190 | cpuset_cpus_allowed(current, &cpus_allowed); |
| 191 | set_cpus_allowed(current, cpus_allowed); | 191 | set_cpus_allowed_ptr(current, &cpus_allowed); |
| 192 | 192 | ||
| 193 | return __pdflush(&my_work); | 193 | return __pdflush(&my_work); |
| 194 | } | 194 | } |
