diff options
| -rw-r--r-- | include/linux/cpuset.h | 3 | ||||
| -rw-r--r-- | mm/mempolicy.c | 61 |
2 files changed, 36 insertions, 28 deletions
diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h index f8c9a2752f06..0a26be353cb3 100644 --- a/include/linux/cpuset.h +++ b/include/linux/cpuset.h | |||
| @@ -26,8 +26,6 @@ extern nodemask_t cpuset_mems_allowed(struct task_struct *p); | |||
| 26 | #define cpuset_current_mems_allowed (current->mems_allowed) | 26 | #define cpuset_current_mems_allowed (current->mems_allowed) |
| 27 | void cpuset_init_current_mems_allowed(void); | 27 | void cpuset_init_current_mems_allowed(void); |
| 28 | void cpuset_update_task_memory_state(void); | 28 | void cpuset_update_task_memory_state(void); |
| 29 | #define cpuset_nodes_subset_current_mems_allowed(nodes) \ | ||
| 30 | nodes_subset((nodes), current->mems_allowed) | ||
| 31 | int cpuset_zonelist_valid_mems_allowed(struct zonelist *zl); | 29 | int cpuset_zonelist_valid_mems_allowed(struct zonelist *zl); |
| 32 | 30 | ||
| 33 | extern int __cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask); | 31 | extern int __cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask); |
| @@ -103,7 +101,6 @@ static inline nodemask_t cpuset_mems_allowed(struct task_struct *p) | |||
| 103 | #define cpuset_current_mems_allowed (node_states[N_HIGH_MEMORY]) | 101 | #define cpuset_current_mems_allowed (node_states[N_HIGH_MEMORY]) |
| 104 | static inline void cpuset_init_current_mems_allowed(void) {} | 102 | static inline void cpuset_init_current_mems_allowed(void) {} |
| 105 | static inline void cpuset_update_task_memory_state(void) {} | 103 | static inline void cpuset_update_task_memory_state(void) {} |
| 106 | #define cpuset_nodes_subset_current_mems_allowed(nodes) (1) | ||
| 107 | 104 | ||
| 108 | static inline int cpuset_zonelist_valid_mems_allowed(struct zonelist *zl) | 105 | static inline int cpuset_zonelist_valid_mems_allowed(struct zonelist *zl) |
| 109 | { | 106 | { |
diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 83c69f8a64c2..8d246c3b340f 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c | |||
| @@ -116,22 +116,51 @@ static void mpol_rebind_policy(struct mempolicy *pol, | |||
| 116 | /* Do sanity checking on a policy */ | 116 | /* Do sanity checking on a policy */ |
| 117 | static int mpol_check_policy(int mode, nodemask_t *nodes) | 117 | static int mpol_check_policy(int mode, nodemask_t *nodes) |
| 118 | { | 118 | { |
| 119 | int empty = nodes_empty(*nodes); | 119 | int was_empty, is_empty; |
| 120 | |||
| 121 | if (!nodes) | ||
| 122 | return 0; | ||
| 123 | |||
| 124 | /* | ||
| 125 | * "Contextualize" the in-coming nodemast for cpusets: | ||
| 126 | * Remember whether in-coming nodemask was empty, If not, | ||
| 127 | * restrict the nodes to the allowed nodes in the cpuset. | ||
| 128 | * This is guaranteed to be a subset of nodes with memory. | ||
| 129 | */ | ||
| 130 | cpuset_update_task_memory_state(); | ||
| 131 | is_empty = was_empty = nodes_empty(*nodes); | ||
| 132 | if (!was_empty) { | ||
| 133 | nodes_and(*nodes, *nodes, cpuset_current_mems_allowed); | ||
| 134 | is_empty = nodes_empty(*nodes); /* after "contextualization" */ | ||
| 135 | } | ||
| 120 | 136 | ||
| 121 | switch (mode) { | 137 | switch (mode) { |
| 122 | case MPOL_DEFAULT: | 138 | case MPOL_DEFAULT: |
| 123 | if (!empty) | 139 | /* |
| 140 | * require caller to specify an empty nodemask | ||
| 141 | * before "contextualization" | ||
| 142 | */ | ||
| 143 | if (!was_empty) | ||
| 124 | return -EINVAL; | 144 | return -EINVAL; |
| 125 | break; | 145 | break; |
| 126 | case MPOL_BIND: | 146 | case MPOL_BIND: |
| 127 | case MPOL_INTERLEAVE: | 147 | case MPOL_INTERLEAVE: |
| 128 | /* Preferred will only use the first bit, but allow | 148 | /* |
| 129 | more for now. */ | 149 | * require at least 1 valid node after "contextualization" |
| 130 | if (empty) | 150 | */ |
| 151 | if (is_empty) | ||
| 152 | return -EINVAL; | ||
| 153 | break; | ||
| 154 | case MPOL_PREFERRED: | ||
| 155 | /* | ||
| 156 | * Did caller specify invalid nodes? | ||
| 157 | * Don't silently accept this as "local allocation". | ||
| 158 | */ | ||
| 159 | if (!was_empty && is_empty) | ||
| 131 | return -EINVAL; | 160 | return -EINVAL; |
| 132 | break; | 161 | break; |
| 133 | } | 162 | } |
| 134 | return nodes_subset(*nodes, node_states[N_HIGH_MEMORY]) ? 0 : -EINVAL; | 163 | return 0; |
| 135 | } | 164 | } |
| 136 | 165 | ||
| 137 | /* Generate a custom zonelist for the BIND policy. */ | 166 | /* Generate a custom zonelist for the BIND policy. */ |
| @@ -188,8 +217,6 @@ static struct mempolicy *mpol_new(int mode, nodemask_t *nodes) | |||
| 188 | switch (mode) { | 217 | switch (mode) { |
| 189 | case MPOL_INTERLEAVE: | 218 | case MPOL_INTERLEAVE: |
| 190 | policy->v.nodes = *nodes; | 219 | policy->v.nodes = *nodes; |
| 191 | nodes_and(policy->v.nodes, policy->v.nodes, | ||
| 192 | node_states[N_HIGH_MEMORY]); | ||
| 193 | if (nodes_weight(policy->v.nodes) == 0) { | 220 | if (nodes_weight(policy->v.nodes) == 0) { |
| 194 | kmem_cache_free(policy_cache, policy); | 221 | kmem_cache_free(policy_cache, policy); |
| 195 | return ERR_PTR(-EINVAL); | 222 | return ERR_PTR(-EINVAL); |
| @@ -421,18 +448,6 @@ static int mbind_range(struct vm_area_struct *vma, unsigned long start, | |||
| 421 | return err; | 448 | return err; |
| 422 | } | 449 | } |
| 423 | 450 | ||
| 424 | static int contextualize_policy(int mode, nodemask_t *nodes) | ||
| 425 | { | ||
| 426 | if (!nodes) | ||
| 427 | return 0; | ||
| 428 | |||
| 429 | cpuset_update_task_memory_state(); | ||
| 430 | if (!cpuset_nodes_subset_current_mems_allowed(*nodes)) | ||
| 431 | return -EINVAL; | ||
| 432 | return mpol_check_policy(mode, nodes); | ||
| 433 | } | ||
| 434 | |||
| 435 | |||
| 436 | /* | 451 | /* |
| 437 | * Update task->flags PF_MEMPOLICY bit: set iff non-default | 452 | * Update task->flags PF_MEMPOLICY bit: set iff non-default |
| 438 | * mempolicy. Allows more rapid checking of this (combined perhaps | 453 | * mempolicy. Allows more rapid checking of this (combined perhaps |
| @@ -468,7 +483,7 @@ static long do_set_mempolicy(int mode, nodemask_t *nodes) | |||
| 468 | { | 483 | { |
| 469 | struct mempolicy *new; | 484 | struct mempolicy *new; |
| 470 | 485 | ||
| 471 | if (contextualize_policy(mode, nodes)) | 486 | if (mpol_check_policy(mode, nodes)) |
| 472 | return -EINVAL; | 487 | return -EINVAL; |
| 473 | new = mpol_new(mode, nodes); | 488 | new = mpol_new(mode, nodes); |
| 474 | if (IS_ERR(new)) | 489 | if (IS_ERR(new)) |
| @@ -915,10 +930,6 @@ asmlinkage long sys_mbind(unsigned long start, unsigned long len, | |||
| 915 | err = get_nodes(&nodes, nmask, maxnode); | 930 | err = get_nodes(&nodes, nmask, maxnode); |
| 916 | if (err) | 931 | if (err) |
| 917 | return err; | 932 | return err; |
| 918 | #ifdef CONFIG_CPUSETS | ||
| 919 | /* Restrict the nodes to the allowed nodes in the cpuset */ | ||
| 920 | nodes_and(nodes, nodes, current->mems_allowed); | ||
| 921 | #endif | ||
| 922 | return do_mbind(start, len, mode, &nodes, flags); | 933 | return do_mbind(start, len, mode, &nodes, flags); |
| 923 | } | 934 | } |
| 924 | 935 | ||
