aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorLee Schermerhorn <lee.schermerhorn@hp.com>2012-10-25 08:16:29 -0400
committerMel Gorman <mgorman@suse.de>2012-12-11 09:42:40 -0500
commitd3a710337b0590f43fd236d5e6518439afc7410a (patch)
tree19a03d787d1795c8212fc2c9ede5397ba0a32c85 /mm
parent479e2802d09f1e18a97262c4c6f8f17ae5884bd8 (diff)
mm: mempolicy: Add MPOL_NOOP
This patch augments the MPOL_MF_LAZY feature by adding a "NOOP" policy to mbind(). When the NOOP policy is used with the 'MOVE and 'LAZY flags, mbind() will map the pages PROT_NONE so that they will be migrated on the next touch. This allows an application to prepare for a new phase of operation where different regions of shared storage will be assigned to worker threads, w/o changing policy. Note that we could just use "default" policy in this case. However, this also allows an application to request that pages be migrated, only if necessary, to follow any arbitrary policy that might currently apply to a range of pages, without knowing the policy, or without specifying multiple mbind()s for ranges with different policies. [ Bug in early version of mpol_parse_str() reported by Fengguang Wu. ] Bug-Reported-by: Reported-by: Fengguang Wu <fengguang.wu@intel.com> Signed-off-by: Lee Schermerhorn <lee.schermerhorn@hp.com> Reviewed-by: Rik van Riel <riel@redhat.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Ingo Molnar <mingo@kernel.org> Signed-off-by: Mel Gorman <mgorman@suse.de>
Diffstat (limited to 'mm')
-rw-r--r--mm/mempolicy.c11
1 files changed, 6 insertions, 5 deletions
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 54bd3e5ed776..c21e91477c4f 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -251,10 +251,10 @@ static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags,
251 pr_debug("setting mode %d flags %d nodes[0] %lx\n", 251 pr_debug("setting mode %d flags %d nodes[0] %lx\n",
252 mode, flags, nodes ? nodes_addr(*nodes)[0] : -1); 252 mode, flags, nodes ? nodes_addr(*nodes)[0] : -1);
253 253
254 if (mode == MPOL_DEFAULT) { 254 if (mode == MPOL_DEFAULT || mode == MPOL_NOOP) {
255 if (nodes && !nodes_empty(*nodes)) 255 if (nodes && !nodes_empty(*nodes))
256 return ERR_PTR(-EINVAL); 256 return ERR_PTR(-EINVAL);
257 return NULL; /* simply delete any existing policy */ 257 return NULL;
258 } 258 }
259 VM_BUG_ON(!nodes); 259 VM_BUG_ON(!nodes);
260 260
@@ -1147,7 +1147,7 @@ static long do_mbind(unsigned long start, unsigned long len,
1147 if (start & ~PAGE_MASK) 1147 if (start & ~PAGE_MASK)
1148 return -EINVAL; 1148 return -EINVAL;
1149 1149
1150 if (mode == MPOL_DEFAULT) 1150 if (mode == MPOL_DEFAULT || mode == MPOL_NOOP)
1151 flags &= ~MPOL_MF_STRICT; 1151 flags &= ~MPOL_MF_STRICT;
1152 1152
1153 len = (len + PAGE_SIZE - 1) & PAGE_MASK; 1153 len = (len + PAGE_SIZE - 1) & PAGE_MASK;
@@ -2409,7 +2409,8 @@ static const char * const policy_modes[] =
2409 [MPOL_PREFERRED] = "prefer", 2409 [MPOL_PREFERRED] = "prefer",
2410 [MPOL_BIND] = "bind", 2410 [MPOL_BIND] = "bind",
2411 [MPOL_INTERLEAVE] = "interleave", 2411 [MPOL_INTERLEAVE] = "interleave",
2412 [MPOL_LOCAL] = "local" 2412 [MPOL_LOCAL] = "local",
2413 [MPOL_NOOP] = "noop", /* should not actually be used */
2413}; 2414};
2414 2415
2415 2416
@@ -2460,7 +2461,7 @@ int mpol_parse_str(char *str, struct mempolicy **mpol, int no_context)
2460 break; 2461 break;
2461 } 2462 }
2462 } 2463 }
2463 if (mode >= MPOL_MAX) 2464 if (mode >= MPOL_MAX || mode == MPOL_NOOP)
2464 goto out; 2465 goto out;
2465 2466
2466 switch (mode) { 2467 switch (mode) {