diff options
Diffstat (limited to 'mm/mempolicy.c')
-rw-r--r-- | mm/mempolicy.c | 59 |
1 files changed, 32 insertions, 27 deletions
diff --git a/mm/mempolicy.c b/mm/mempolicy.c index e2df1c1fb41f..31d26637b658 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c | |||
@@ -26,7 +26,7 @@ | |||
26 | * the allocation to memory nodes instead | 26 | * the allocation to memory nodes instead |
27 | * | 27 | * |
28 | * preferred Try a specific node first before normal fallback. | 28 | * preferred Try a specific node first before normal fallback. |
29 | * As a special case node -1 here means do the allocation | 29 | * As a special case NUMA_NO_NODE here means do the allocation |
30 | * on the local CPU. This is normally identical to default, | 30 | * on the local CPU. This is normally identical to default, |
31 | * but useful to set in a VMA when you have a non default | 31 | * but useful to set in a VMA when you have a non default |
32 | * process policy. | 32 | * process policy. |
@@ -127,7 +127,7 @@ static struct mempolicy *get_task_policy(struct task_struct *p) | |||
127 | 127 | ||
128 | if (!pol) { | 128 | if (!pol) { |
129 | node = numa_node_id(); | 129 | node = numa_node_id(); |
130 | if (node != -1) | 130 | if (node != NUMA_NO_NODE) |
131 | pol = &preferred_node_policy[node]; | 131 | pol = &preferred_node_policy[node]; |
132 | 132 | ||
133 | /* preferred_node_policy is not initialised early in boot */ | 133 | /* preferred_node_policy is not initialised early in boot */ |
@@ -161,19 +161,7 @@ static const struct mempolicy_operations { | |||
161 | /* Check that the nodemask contains at least one populated zone */ | 161 | /* Check that the nodemask contains at least one populated zone */ |
162 | static int is_valid_nodemask(const nodemask_t *nodemask) | 162 | static int is_valid_nodemask(const nodemask_t *nodemask) |
163 | { | 163 | { |
164 | int nd, k; | 164 | return nodes_intersects(*nodemask, node_states[N_MEMORY]); |
165 | |||
166 | for_each_node_mask(nd, *nodemask) { | ||
167 | struct zone *z; | ||
168 | |||
169 | for (k = 0; k <= policy_zone; k++) { | ||
170 | z = &NODE_DATA(nd)->node_zones[k]; | ||
171 | if (z->present_pages > 0) | ||
172 | return 1; | ||
173 | } | ||
174 | } | ||
175 | |||
176 | return 0; | ||
177 | } | 165 | } |
178 | 166 | ||
179 | static inline int mpol_store_user_nodemask(const struct mempolicy *pol) | 167 | static inline int mpol_store_user_nodemask(const struct mempolicy *pol) |
@@ -270,7 +258,7 @@ static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags, | |||
270 | struct mempolicy *policy; | 258 | struct mempolicy *policy; |
271 | 259 | ||
272 | pr_debug("setting mode %d flags %d nodes[0] %lx\n", | 260 | pr_debug("setting mode %d flags %d nodes[0] %lx\n", |
273 | mode, flags, nodes ? nodes_addr(*nodes)[0] : -1); | 261 | mode, flags, nodes ? nodes_addr(*nodes)[0] : NUMA_NO_NODE); |
274 | 262 | ||
275 | if (mode == MPOL_DEFAULT) { | 263 | if (mode == MPOL_DEFAULT) { |
276 | if (nodes && !nodes_empty(*nodes)) | 264 | if (nodes && !nodes_empty(*nodes)) |
@@ -508,9 +496,8 @@ static int check_pte_range(struct vm_area_struct *vma, pmd_t *pmd, | |||
508 | /* | 496 | /* |
509 | * vm_normal_page() filters out zero pages, but there might | 497 | * vm_normal_page() filters out zero pages, but there might |
510 | * still be PageReserved pages to skip, perhaps in a VDSO. | 498 | * still be PageReserved pages to skip, perhaps in a VDSO. |
511 | * And we cannot move PageKsm pages sensibly or safely yet. | ||
512 | */ | 499 | */ |
513 | if (PageReserved(page) || PageKsm(page)) | 500 | if (PageReserved(page)) |
514 | continue; | 501 | continue; |
515 | nid = page_to_nid(page); | 502 | nid = page_to_nid(page); |
516 | if (node_isset(nid, *nodes) == !!(flags & MPOL_MF_INVERT)) | 503 | if (node_isset(nid, *nodes) == !!(flags & MPOL_MF_INVERT)) |
@@ -1027,8 +1014,7 @@ static int migrate_to_node(struct mm_struct *mm, int source, int dest, | |||
1027 | 1014 | ||
1028 | if (!list_empty(&pagelist)) { | 1015 | if (!list_empty(&pagelist)) { |
1029 | err = migrate_pages(&pagelist, new_node_page, dest, | 1016 | err = migrate_pages(&pagelist, new_node_page, dest, |
1030 | false, MIGRATE_SYNC, | 1017 | MIGRATE_SYNC, MR_SYSCALL); |
1031 | MR_SYSCALL); | ||
1032 | if (err) | 1018 | if (err) |
1033 | putback_lru_pages(&pagelist); | 1019 | putback_lru_pages(&pagelist); |
1034 | } | 1020 | } |
@@ -1235,7 +1221,7 @@ static long do_mbind(unsigned long start, unsigned long len, | |||
1235 | 1221 | ||
1236 | pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n", | 1222 | pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n", |
1237 | start, start + len, mode, mode_flags, | 1223 | start, start + len, mode, mode_flags, |
1238 | nmask ? nodes_addr(*nmask)[0] : -1); | 1224 | nmask ? nodes_addr(*nmask)[0] : NUMA_NO_NODE); |
1239 | 1225 | ||
1240 | if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) { | 1226 | if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) { |
1241 | 1227 | ||
@@ -1272,9 +1258,8 @@ static long do_mbind(unsigned long start, unsigned long len, | |||
1272 | if (!list_empty(&pagelist)) { | 1258 | if (!list_empty(&pagelist)) { |
1273 | WARN_ON_ONCE(flags & MPOL_MF_LAZY); | 1259 | WARN_ON_ONCE(flags & MPOL_MF_LAZY); |
1274 | nr_failed = migrate_pages(&pagelist, new_vma_page, | 1260 | nr_failed = migrate_pages(&pagelist, new_vma_page, |
1275 | (unsigned long)vma, | 1261 | (unsigned long)vma, |
1276 | false, MIGRATE_SYNC, | 1262 | MIGRATE_SYNC, MR_MEMPOLICY_MBIND); |
1277 | MR_MEMPOLICY_MBIND); | ||
1278 | if (nr_failed) | 1263 | if (nr_failed) |
1279 | putback_lru_pages(&pagelist); | 1264 | putback_lru_pages(&pagelist); |
1280 | } | 1265 | } |
@@ -1644,6 +1629,26 @@ struct mempolicy *get_vma_policy(struct task_struct *task, | |||
1644 | return pol; | 1629 | return pol; |
1645 | } | 1630 | } |
1646 | 1631 | ||
1632 | static int apply_policy_zone(struct mempolicy *policy, enum zone_type zone) | ||
1633 | { | ||
1634 | enum zone_type dynamic_policy_zone = policy_zone; | ||
1635 | |||
1636 | BUG_ON(dynamic_policy_zone == ZONE_MOVABLE); | ||
1637 | |||
1638 | /* | ||
1639 | * if policy->v.nodes has movable memory only, | ||
1640 | * we apply policy when gfp_zone(gfp) = ZONE_MOVABLE only. | ||
1641 | * | ||
1642 | * policy->v.nodes is intersect with node_states[N_MEMORY]. | ||
1643 | * so if the following test faile, it implies | ||
1644 | * policy->v.nodes has movable memory only. | ||
1645 | */ | ||
1646 | if (!nodes_intersects(policy->v.nodes, node_states[N_HIGH_MEMORY])) | ||
1647 | dynamic_policy_zone = ZONE_MOVABLE; | ||
1648 | |||
1649 | return zone >= dynamic_policy_zone; | ||
1650 | } | ||
1651 | |||
1647 | /* | 1652 | /* |
1648 | * Return a nodemask representing a mempolicy for filtering nodes for | 1653 | * Return a nodemask representing a mempolicy for filtering nodes for |
1649 | * page allocation | 1654 | * page allocation |
@@ -1652,7 +1657,7 @@ static nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy) | |||
1652 | { | 1657 | { |
1653 | /* Lower zones don't get a nodemask applied for MPOL_BIND */ | 1658 | /* Lower zones don't get a nodemask applied for MPOL_BIND */ |
1654 | if (unlikely(policy->mode == MPOL_BIND) && | 1659 | if (unlikely(policy->mode == MPOL_BIND) && |
1655 | gfp_zone(gfp) >= policy_zone && | 1660 | apply_policy_zone(policy, gfp_zone(gfp)) && |
1656 | cpuset_nodemask_valid_mems_allowed(&policy->v.nodes)) | 1661 | cpuset_nodemask_valid_mems_allowed(&policy->v.nodes)) |
1657 | return &policy->v.nodes; | 1662 | return &policy->v.nodes; |
1658 | 1663 | ||
@@ -2308,7 +2313,7 @@ int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long | |||
2308 | * it less likely we act on an unlikely task<->page | 2313 | * it less likely we act on an unlikely task<->page |
2309 | * relation. | 2314 | * relation. |
2310 | */ | 2315 | */ |
2311 | last_nid = page_xchg_last_nid(page, polnid); | 2316 | last_nid = page_nid_xchg_last(page, polnid); |
2312 | if (last_nid != polnid) | 2317 | if (last_nid != polnid) |
2313 | goto out; | 2318 | goto out; |
2314 | } | 2319 | } |
@@ -2483,7 +2488,7 @@ int mpol_set_shared_policy(struct shared_policy *info, | |||
2483 | vma->vm_pgoff, | 2488 | vma->vm_pgoff, |
2484 | sz, npol ? npol->mode : -1, | 2489 | sz, npol ? npol->mode : -1, |
2485 | npol ? npol->flags : -1, | 2490 | npol ? npol->flags : -1, |
2486 | npol ? nodes_addr(npol->v.nodes)[0] : -1); | 2491 | npol ? nodes_addr(npol->v.nodes)[0] : NUMA_NO_NODE); |
2487 | 2492 | ||
2488 | if (npol) { | 2493 | if (npol) { |
2489 | new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol); | 2494 | new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol); |