diff options
Diffstat (limited to 'mm/mempolicy.c')
| -rw-r--r-- | mm/mempolicy.c | 14 |
1 files changed, 7 insertions, 7 deletions
diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 49355a970be2..b53ec99f1428 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c | |||
| @@ -1524,10 +1524,9 @@ static nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy) | |||
| 1524 | } | 1524 | } |
| 1525 | 1525 | ||
| 1526 | /* Return a zonelist indicated by gfp for node representing a mempolicy */ | 1526 | /* Return a zonelist indicated by gfp for node representing a mempolicy */ |
| 1527 | static struct zonelist *policy_zonelist(gfp_t gfp, struct mempolicy *policy) | 1527 | static struct zonelist *policy_zonelist(gfp_t gfp, struct mempolicy *policy, |
| 1528 | int nd) | ||
| 1528 | { | 1529 | { |
| 1529 | int nd = numa_node_id(); | ||
| 1530 | |||
| 1531 | switch (policy->mode) { | 1530 | switch (policy->mode) { |
| 1532 | case MPOL_PREFERRED: | 1531 | case MPOL_PREFERRED: |
| 1533 | if (!(policy->flags & MPOL_F_LOCAL)) | 1532 | if (!(policy->flags & MPOL_F_LOCAL)) |
| @@ -1679,7 +1678,7 @@ struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr, | |||
| 1679 | zl = node_zonelist(interleave_nid(*mpol, vma, addr, | 1678 | zl = node_zonelist(interleave_nid(*mpol, vma, addr, |
| 1680 | huge_page_shift(hstate_vma(vma))), gfp_flags); | 1679 | huge_page_shift(hstate_vma(vma))), gfp_flags); |
| 1681 | } else { | 1680 | } else { |
| 1682 | zl = policy_zonelist(gfp_flags, *mpol); | 1681 | zl = policy_zonelist(gfp_flags, *mpol, numa_node_id()); |
| 1683 | if ((*mpol)->mode == MPOL_BIND) | 1682 | if ((*mpol)->mode == MPOL_BIND) |
| 1684 | *nodemask = &(*mpol)->v.nodes; | 1683 | *nodemask = &(*mpol)->v.nodes; |
| 1685 | } | 1684 | } |
| @@ -1820,7 +1819,7 @@ static struct page *alloc_page_interleave(gfp_t gfp, unsigned order, | |||
| 1820 | */ | 1819 | */ |
| 1821 | struct page * | 1820 | struct page * |
| 1822 | alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma, | 1821 | alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma, |
| 1823 | unsigned long addr) | 1822 | unsigned long addr, int node) |
| 1824 | { | 1823 | { |
| 1825 | struct mempolicy *pol = get_vma_policy(current, vma, addr); | 1824 | struct mempolicy *pol = get_vma_policy(current, vma, addr); |
| 1826 | struct zonelist *zl; | 1825 | struct zonelist *zl; |
| @@ -1836,7 +1835,7 @@ alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma, | |||
| 1836 | put_mems_allowed(); | 1835 | put_mems_allowed(); |
| 1837 | return page; | 1836 | return page; |
| 1838 | } | 1837 | } |
| 1839 | zl = policy_zonelist(gfp, pol); | 1838 | zl = policy_zonelist(gfp, pol, node); |
| 1840 | if (unlikely(mpol_needs_cond_ref(pol))) { | 1839 | if (unlikely(mpol_needs_cond_ref(pol))) { |
| 1841 | /* | 1840 | /* |
| 1842 | * slow path: ref counted shared policy | 1841 | * slow path: ref counted shared policy |
| @@ -1892,7 +1891,8 @@ struct page *alloc_pages_current(gfp_t gfp, unsigned order) | |||
| 1892 | page = alloc_page_interleave(gfp, order, interleave_nodes(pol)); | 1891 | page = alloc_page_interleave(gfp, order, interleave_nodes(pol)); |
| 1893 | else | 1892 | else |
| 1894 | page = __alloc_pages_nodemask(gfp, order, | 1893 | page = __alloc_pages_nodemask(gfp, order, |
| 1895 | policy_zonelist(gfp, pol), policy_nodemask(gfp, pol)); | 1894 | policy_zonelist(gfp, pol, numa_node_id()), |
| 1895 | policy_nodemask(gfp, pol)); | ||
| 1896 | put_mems_allowed(); | 1896 | put_mems_allowed(); |
| 1897 | return page; | 1897 | return page; |
| 1898 | } | 1898 | } |
