diff options
author | David Rientjes <rientjes@google.com> | 2009-04-02 19:57:54 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-04-02 22:04:57 -0400 |
commit | a1bc5a4eee990a1f290735c8694d0aebdad095fa (patch) | |
tree | f3e5849823444136df9c7f91f7217e1894235682 | |
parent | 7f81b1ae18416b457e4d5ff23f0bd598e8a42224 (diff) |
cpusets: replace zone allowed functions with node allowed
The cpuset_zone_allowed() variants are actually only a function of the
zone's node.
Cc: Paul Menage <menage@google.com>
Acked-by: Christoph Lameter <cl@linux-foundation.org>
Cc: Randy Dunlap <randy.dunlap@oracle.com>
Signed-off-by: David Rientjes <rientjes@google.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | include/linux/cpuset.h | 33 | ||||
-rw-r--r-- | kernel/cpuset.c | 59 |
2 files changed, 52 insertions, 40 deletions
diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h index 2e0d79678deb..05ea1dd7d681 100644 --- a/include/linux/cpuset.h +++ b/include/linux/cpuset.h | |||
@@ -12,6 +12,7 @@ | |||
12 | #include <linux/cpumask.h> | 12 | #include <linux/cpumask.h> |
13 | #include <linux/nodemask.h> | 13 | #include <linux/nodemask.h> |
14 | #include <linux/cgroup.h> | 14 | #include <linux/cgroup.h> |
15 | #include <linux/mm.h> | ||
15 | 16 | ||
16 | #ifdef CONFIG_CPUSETS | 17 | #ifdef CONFIG_CPUSETS |
17 | 18 | ||
@@ -29,19 +30,29 @@ void cpuset_init_current_mems_allowed(void); | |||
29 | void cpuset_update_task_memory_state(void); | 30 | void cpuset_update_task_memory_state(void); |
30 | int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask); | 31 | int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask); |
31 | 32 | ||
32 | extern int __cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask); | 33 | extern int __cpuset_node_allowed_softwall(int node, gfp_t gfp_mask); |
33 | extern int __cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask); | 34 | extern int __cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask); |
34 | 35 | ||
35 | static int inline cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask) | 36 | static inline int cpuset_node_allowed_softwall(int node, gfp_t gfp_mask) |
36 | { | 37 | { |
37 | return number_of_cpusets <= 1 || | 38 | return number_of_cpusets <= 1 || |
38 | __cpuset_zone_allowed_softwall(z, gfp_mask); | 39 | __cpuset_node_allowed_softwall(node, gfp_mask); |
39 | } | 40 | } |
40 | 41 | ||
41 | static int inline cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask) | 42 | static inline int cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask) |
42 | { | 43 | { |
43 | return number_of_cpusets <= 1 || | 44 | return number_of_cpusets <= 1 || |
44 | __cpuset_zone_allowed_hardwall(z, gfp_mask); | 45 | __cpuset_node_allowed_hardwall(node, gfp_mask); |
46 | } | ||
47 | |||
48 | static inline int cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask) | ||
49 | { | ||
50 | return cpuset_node_allowed_softwall(zone_to_nid(z), gfp_mask); | ||
51 | } | ||
52 | |||
53 | static inline int cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask) | ||
54 | { | ||
55 | return cpuset_node_allowed_hardwall(zone_to_nid(z), gfp_mask); | ||
45 | } | 56 | } |
46 | 57 | ||
47 | extern int cpuset_mems_allowed_intersects(const struct task_struct *tsk1, | 58 | extern int cpuset_mems_allowed_intersects(const struct task_struct *tsk1, |
@@ -112,6 +123,16 @@ static inline int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask) | |||
112 | return 1; | 123 | return 1; |
113 | } | 124 | } |
114 | 125 | ||
126 | static inline int cpuset_node_allowed_softwall(int node, gfp_t gfp_mask) | ||
127 | { | ||
128 | return 1; | ||
129 | } | ||
130 | |||
131 | static inline int cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask) | ||
132 | { | ||
133 | return 1; | ||
134 | } | ||
135 | |||
115 | static inline int cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask) | 136 | static inline int cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask) |
116 | { | 137 | { |
117 | return 1; | 138 | return 1; |
diff --git a/kernel/cpuset.c b/kernel/cpuset.c index 0619f109d38d..3ff910eb30d3 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c | |||
@@ -2181,26 +2181,24 @@ static const struct cpuset *nearest_hardwall_ancestor(const struct cpuset *cs) | |||
2181 | } | 2181 | } |
2182 | 2182 | ||
2183 | /** | 2183 | /** |
2184 | * cpuset_zone_allowed_softwall - Can we allocate on zone z's memory node? | 2184 | * cpuset_node_allowed_softwall - Can we allocate on a memory node? |
2185 | * @z: is this zone on an allowed node? | 2185 | * @node: is this an allowed node? |
2186 | * @gfp_mask: memory allocation flags | 2186 | * @gfp_mask: memory allocation flags |
2187 | * | 2187 | * |
2188 | * If we're in interrupt, yes, we can always allocate. If | 2188 | * If we're in interrupt, yes, we can always allocate. If __GFP_THISNODE is |
2189 | * __GFP_THISNODE is set, yes, we can always allocate. If zone | 2189 | * set, yes, we can always allocate. If node is in our task's mems_allowed, |
2190 | * z's node is in our tasks mems_allowed, yes. If it's not a | 2190 | * yes. If it's not a __GFP_HARDWALL request and this node is in the nearest |
2191 | * __GFP_HARDWALL request and this zone's nodes is in the nearest | 2191 | * hardwalled cpuset ancestor to this task's cpuset, yes. If the task has been |
2192 | * hardwalled cpuset ancestor to this tasks cpuset, yes. | 2192 | * OOM killed and has access to memory reserves as specified by the TIF_MEMDIE |
2193 | * If the task has been OOM killed and has access to memory reserves | 2193 | * flag, yes. |
2194 | * as specified by the TIF_MEMDIE flag, yes. | ||
2195 | * Otherwise, no. | 2194 | * Otherwise, no. |
2196 | * | 2195 | * |
2197 | * If __GFP_HARDWALL is set, cpuset_zone_allowed_softwall() | 2196 | * If __GFP_HARDWALL is set, cpuset_node_allowed_softwall() reduces to |
2198 | * reduces to cpuset_zone_allowed_hardwall(). Otherwise, | 2197 | * cpuset_node_allowed_hardwall(). Otherwise, cpuset_node_allowed_softwall() |
2199 | * cpuset_zone_allowed_softwall() might sleep, and might allow a zone | 2198 | * might sleep, and might allow a node from an enclosing cpuset. |
2200 | * from an enclosing cpuset. | ||
2201 | * | 2199 | * |
2202 | * cpuset_zone_allowed_hardwall() only handles the simpler case of | 2200 | * cpuset_node_allowed_hardwall() only handles the simpler case of hardwall |
2203 | * hardwall cpusets, and never sleeps. | 2201 | * cpusets, and never sleeps. |
2204 | * | 2202 | * |
2205 | * The __GFP_THISNODE placement logic is really handled elsewhere, | 2203 | * The __GFP_THISNODE placement logic is really handled elsewhere, |
2206 | * by forcibly using a zonelist starting at a specified node, and by | 2204 | * by forcibly using a zonelist starting at a specified node, and by |
@@ -2239,20 +2237,17 @@ static const struct cpuset *nearest_hardwall_ancestor(const struct cpuset *cs) | |||
2239 | * GFP_USER - only nodes in current tasks mems allowed ok. | 2237 | * GFP_USER - only nodes in current tasks mems allowed ok. |
2240 | * | 2238 | * |
2241 | * Rule: | 2239 | * Rule: |
2242 | * Don't call cpuset_zone_allowed_softwall if you can't sleep, unless you | 2240 | * Don't call cpuset_node_allowed_softwall if you can't sleep, unless you |
2243 | * pass in the __GFP_HARDWALL flag set in gfp_flag, which disables | 2241 | * pass in the __GFP_HARDWALL flag set in gfp_flag, which disables |
2244 | * the code that might scan up ancestor cpusets and sleep. | 2242 | * the code that might scan up ancestor cpusets and sleep. |
2245 | */ | 2243 | */ |
2246 | 2244 | int __cpuset_node_allowed_softwall(int node, gfp_t gfp_mask) | |
2247 | int __cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask) | ||
2248 | { | 2245 | { |
2249 | int node; /* node that zone z is on */ | ||
2250 | const struct cpuset *cs; /* current cpuset ancestors */ | 2246 | const struct cpuset *cs; /* current cpuset ancestors */ |
2251 | int allowed; /* is allocation in zone z allowed? */ | 2247 | int allowed; /* is allocation in zone z allowed? */ |
2252 | 2248 | ||
2253 | if (in_interrupt() || (gfp_mask & __GFP_THISNODE)) | 2249 | if (in_interrupt() || (gfp_mask & __GFP_THISNODE)) |
2254 | return 1; | 2250 | return 1; |
2255 | node = zone_to_nid(z); | ||
2256 | might_sleep_if(!(gfp_mask & __GFP_HARDWALL)); | 2251 | might_sleep_if(!(gfp_mask & __GFP_HARDWALL)); |
2257 | if (node_isset(node, current->mems_allowed)) | 2252 | if (node_isset(node, current->mems_allowed)) |
2258 | return 1; | 2253 | return 1; |
@@ -2281,15 +2276,15 @@ int __cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask) | |||
2281 | } | 2276 | } |
2282 | 2277 | ||
2283 | /* | 2278 | /* |
2284 | * cpuset_zone_allowed_hardwall - Can we allocate on zone z's memory node? | 2279 | * cpuset_node_allowed_hardwall - Can we allocate on a memory node? |
2285 | * @z: is this zone on an allowed node? | 2280 | * @node: is this an allowed node? |
2286 | * @gfp_mask: memory allocation flags | 2281 | * @gfp_mask: memory allocation flags |
2287 | * | 2282 | * |
2288 | * If we're in interrupt, yes, we can always allocate. | 2283 | * If we're in interrupt, yes, we can always allocate. If __GFP_THISNODE is |
2289 | * If __GFP_THISNODE is set, yes, we can always allocate. If zone | 2284 | * set, yes, we can always allocate. If node is in our task's mems_allowed, |
2290 | * z's node is in our tasks mems_allowed, yes. If the task has been | 2285 | * yes. If the task has been OOM killed and has access to memory reserves as |
2291 | * OOM killed and has access to memory reserves as specified by the | 2286 | * specified by the TIF_MEMDIE flag, yes. |
2292 | * TIF_MEMDIE flag, yes. Otherwise, no. | 2287 | * Otherwise, no. |
2293 | * | 2288 | * |
2294 | * The __GFP_THISNODE placement logic is really handled elsewhere, | 2289 | * The __GFP_THISNODE placement logic is really handled elsewhere, |
2295 | * by forcibly using a zonelist starting at a specified node, and by | 2290 | * by forcibly using a zonelist starting at a specified node, and by |
@@ -2297,20 +2292,16 @@ int __cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask) | |||
2297 | * any node on the zonelist except the first. By the time any such | 2292 | * any node on the zonelist except the first. By the time any such |
2298 | * calls get to this routine, we should just shut up and say 'yes'. | 2293 | * calls get to this routine, we should just shut up and say 'yes'. |
2299 | * | 2294 | * |
2300 | * Unlike the cpuset_zone_allowed_softwall() variant, above, | 2295 | * Unlike the cpuset_node_allowed_softwall() variant, above, |
2301 | * this variant requires that the zone be in the current tasks | 2296 | * this variant requires that the node be in the current task's |
2302 | * mems_allowed or that we're in interrupt. It does not scan up the | 2297 | * mems_allowed or that we're in interrupt. It does not scan up the |
2303 | * cpuset hierarchy for the nearest enclosing mem_exclusive cpuset. | 2298 | * cpuset hierarchy for the nearest enclosing mem_exclusive cpuset. |
2304 | * It never sleeps. | 2299 | * It never sleeps. |
2305 | */ | 2300 | */ |
2306 | 2301 | int __cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask) | |
2307 | int __cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask) | ||
2308 | { | 2302 | { |
2309 | int node; /* node that zone z is on */ | ||
2310 | |||
2311 | if (in_interrupt() || (gfp_mask & __GFP_THISNODE)) | 2303 | if (in_interrupt() || (gfp_mask & __GFP_THISNODE)) |
2312 | return 1; | 2304 | return 1; |
2313 | node = zone_to_nid(z); | ||
2314 | if (node_isset(node, current->mems_allowed)) | 2305 | if (node_isset(node, current->mems_allowed)) |
2315 | return 1; | 2306 | return 1; |
2316 | /* | 2307 | /* |