diff options
author | Paul Jackson <pj@sgi.com> | 2006-03-24 06:16:11 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-03-24 10:33:24 -0500 |
commit | 151a44202d097ae8b1bbaa6d8d2f97df30e3cd1e (patch) | |
tree | 3752bba902e73001443bb75f40495cc3a1d24f54 | |
parent | 8488bc359d674baf710992e4b641513ea5ebd212 (diff) |
[PATCH] cpuset: don't need to mark cpuset_mems_generation atomic
Drop the atomic_t marking on the cpuset static global
cpuset_mems_generation. Since all access to it is guarded by the global
manage_mutex, there is no need for further serialization of this value.
Signed-off-by: Paul Jackson <pj@sgi.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r-- | kernel/cpuset.c | 19 |
1 files changed, 11 insertions, 8 deletions
diff --git a/kernel/cpuset.c b/kernel/cpuset.c index bc4131141230..702928664f42 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c | |||
@@ -149,7 +149,7 @@ static inline int is_spread_slab(const struct cpuset *cs) | |||
149 | } | 149 | } |
150 | 150 | ||
151 | /* | 151 | /* |
152 | * Increment this atomic integer everytime any cpuset changes its | 152 | * Increment this integer everytime any cpuset changes its |
153 | * mems_allowed value. Users of cpusets can track this generation | 153 | * mems_allowed value. Users of cpusets can track this generation |
154 | * number, and avoid having to lock and reload mems_allowed unless | 154 | * number, and avoid having to lock and reload mems_allowed unless |
155 | * the cpuset they're using changes generation. | 155 | * the cpuset they're using changes generation. |
@@ -163,8 +163,11 @@ static inline int is_spread_slab(const struct cpuset *cs) | |||
163 | * on every visit to __alloc_pages(), to efficiently check whether | 163 | * on every visit to __alloc_pages(), to efficiently check whether |
164 | * its current->cpuset->mems_allowed has changed, requiring an update | 164 | * its current->cpuset->mems_allowed has changed, requiring an update |
165 | * of its current->mems_allowed. | 165 | * of its current->mems_allowed. |
166 | * | ||
167 | * Since cpuset_mems_generation is guarded by manage_mutex, | ||
168 | * there is no need to mark it atomic. | ||
166 | */ | 169 | */ |
167 | static atomic_t cpuset_mems_generation = ATOMIC_INIT(1); | 170 | static int cpuset_mems_generation; |
168 | 171 | ||
169 | static struct cpuset top_cpuset = { | 172 | static struct cpuset top_cpuset = { |
170 | .flags = ((1 << CS_CPU_EXCLUSIVE) | (1 << CS_MEM_EXCLUSIVE)), | 173 | .flags = ((1 << CS_CPU_EXCLUSIVE) | (1 << CS_MEM_EXCLUSIVE)), |
@@ -877,7 +880,7 @@ static int update_nodemask(struct cpuset *cs, char *buf) | |||
877 | 880 | ||
878 | mutex_lock(&callback_mutex); | 881 | mutex_lock(&callback_mutex); |
879 | cs->mems_allowed = trialcs.mems_allowed; | 882 | cs->mems_allowed = trialcs.mems_allowed; |
880 | cs->mems_generation = atomic_inc_return(&cpuset_mems_generation); | 883 | cs->mems_generation = cpuset_mems_generation++; |
881 | mutex_unlock(&callback_mutex); | 884 | mutex_unlock(&callback_mutex); |
882 | 885 | ||
883 | set_cpuset_being_rebound(cs); /* causes mpol_copy() rebind */ | 886 | set_cpuset_being_rebound(cs); /* causes mpol_copy() rebind */ |
@@ -1270,11 +1273,11 @@ static ssize_t cpuset_common_file_write(struct file *file, const char __user *us | |||
1270 | break; | 1273 | break; |
1271 | case FILE_SPREAD_PAGE: | 1274 | case FILE_SPREAD_PAGE: |
1272 | retval = update_flag(CS_SPREAD_PAGE, cs, buffer); | 1275 | retval = update_flag(CS_SPREAD_PAGE, cs, buffer); |
1273 | cs->mems_generation = atomic_inc_return(&cpuset_mems_generation); | 1276 | cs->mems_generation = cpuset_mems_generation++; |
1274 | break; | 1277 | break; |
1275 | case FILE_SPREAD_SLAB: | 1278 | case FILE_SPREAD_SLAB: |
1276 | retval = update_flag(CS_SPREAD_SLAB, cs, buffer); | 1279 | retval = update_flag(CS_SPREAD_SLAB, cs, buffer); |
1277 | cs->mems_generation = atomic_inc_return(&cpuset_mems_generation); | 1280 | cs->mems_generation = cpuset_mems_generation++; |
1278 | break; | 1281 | break; |
1279 | case FILE_TASKLIST: | 1282 | case FILE_TASKLIST: |
1280 | retval = attach_task(cs, buffer, &pathbuf); | 1283 | retval = attach_task(cs, buffer, &pathbuf); |
@@ -1823,7 +1826,7 @@ static long cpuset_create(struct cpuset *parent, const char *name, int mode) | |||
1823 | atomic_set(&cs->count, 0); | 1826 | atomic_set(&cs->count, 0); |
1824 | INIT_LIST_HEAD(&cs->sibling); | 1827 | INIT_LIST_HEAD(&cs->sibling); |
1825 | INIT_LIST_HEAD(&cs->children); | 1828 | INIT_LIST_HEAD(&cs->children); |
1826 | cs->mems_generation = atomic_inc_return(&cpuset_mems_generation); | 1829 | cs->mems_generation = cpuset_mems_generation++; |
1827 | fmeter_init(&cs->fmeter); | 1830 | fmeter_init(&cs->fmeter); |
1828 | 1831 | ||
1829 | cs->parent = parent; | 1832 | cs->parent = parent; |
@@ -1913,7 +1916,7 @@ int __init cpuset_init_early(void) | |||
1913 | struct task_struct *tsk = current; | 1916 | struct task_struct *tsk = current; |
1914 | 1917 | ||
1915 | tsk->cpuset = &top_cpuset; | 1918 | tsk->cpuset = &top_cpuset; |
1916 | tsk->cpuset->mems_generation = atomic_inc_return(&cpuset_mems_generation); | 1919 | tsk->cpuset->mems_generation = cpuset_mems_generation++; |
1917 | return 0; | 1920 | return 0; |
1918 | } | 1921 | } |
1919 | 1922 | ||
@@ -1932,7 +1935,7 @@ int __init cpuset_init(void) | |||
1932 | top_cpuset.mems_allowed = NODE_MASK_ALL; | 1935 | top_cpuset.mems_allowed = NODE_MASK_ALL; |
1933 | 1936 | ||
1934 | fmeter_init(&top_cpuset.fmeter); | 1937 | fmeter_init(&top_cpuset.fmeter); |
1935 | top_cpuset.mems_generation = atomic_inc_return(&cpuset_mems_generation); | 1938 | top_cpuset.mems_generation = cpuset_mems_generation++; |
1936 | 1939 | ||
1937 | init_task.cpuset = &top_cpuset; | 1940 | init_task.cpuset = &top_cpuset; |
1938 | 1941 | ||