diff options
author | Mel Gorman <mgorman@suse.de> | 2014-04-03 17:47:24 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-04-03 19:20:58 -0400 |
commit | d26914d11751b23ca2e8747725f2cae10c2f2c1b (patch) | |
tree | 020b606fb9223e29292f54922a11111239e3a3f4 /mm/mempolicy.c | |
parent | 91ca9186484809c57303b33778d841cc28f696ed (diff) |
mm: optimize put_mems_allowed() usage
Since put_mems_allowed() is strictly optional, its a seqcount retry, we
don't need to evaluate the function if the allocation was in fact
successful, saving a smp_rmb some loads and comparisons on some relative
fast-paths.
Since the naming, get/put_mems_allowed() does suggest a mandatory
pairing, rename the interface, as suggested by Mel, to resemble the
seqcount interface.
This gives us: read_mems_allowed_begin() and read_mems_allowed_retry(),
where it is important to note that the return value of the latter call
is inverted from its previous incarnation.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Mel Gorman <mgorman@suse.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/mempolicy.c')
-rw-r--r-- | mm/mempolicy.c | 12 |
1 files changed, 6 insertions, 6 deletions
diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 4755c8576942..e3ab02822799 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c | |||
@@ -1899,7 +1899,7 @@ int node_random(const nodemask_t *maskp) | |||
1899 | * If the effective policy is 'BIND, returns a pointer to the mempolicy's | 1899 | * If the effective policy is 'BIND, returns a pointer to the mempolicy's |
1900 | * @nodemask for filtering the zonelist. | 1900 | * @nodemask for filtering the zonelist. |
1901 | * | 1901 | * |
1902 | * Must be protected by get_mems_allowed() | 1902 | * Must be protected by read_mems_allowed_begin() |
1903 | */ | 1903 | */ |
1904 | struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr, | 1904 | struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr, |
1905 | gfp_t gfp_flags, struct mempolicy **mpol, | 1905 | gfp_t gfp_flags, struct mempolicy **mpol, |
@@ -2063,7 +2063,7 @@ alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma, | |||
2063 | 2063 | ||
2064 | retry_cpuset: | 2064 | retry_cpuset: |
2065 | pol = get_vma_policy(current, vma, addr); | 2065 | pol = get_vma_policy(current, vma, addr); |
2066 | cpuset_mems_cookie = get_mems_allowed(); | 2066 | cpuset_mems_cookie = read_mems_allowed_begin(); |
2067 | 2067 | ||
2068 | if (unlikely(pol->mode == MPOL_INTERLEAVE)) { | 2068 | if (unlikely(pol->mode == MPOL_INTERLEAVE)) { |
2069 | unsigned nid; | 2069 | unsigned nid; |
@@ -2071,7 +2071,7 @@ retry_cpuset: | |||
2071 | nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order); | 2071 | nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order); |
2072 | mpol_cond_put(pol); | 2072 | mpol_cond_put(pol); |
2073 | page = alloc_page_interleave(gfp, order, nid); | 2073 | page = alloc_page_interleave(gfp, order, nid); |
2074 | if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page)) | 2074 | if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie))) |
2075 | goto retry_cpuset; | 2075 | goto retry_cpuset; |
2076 | 2076 | ||
2077 | return page; | 2077 | return page; |
@@ -2081,7 +2081,7 @@ retry_cpuset: | |||
2081 | policy_nodemask(gfp, pol)); | 2081 | policy_nodemask(gfp, pol)); |
2082 | if (unlikely(mpol_needs_cond_ref(pol))) | 2082 | if (unlikely(mpol_needs_cond_ref(pol))) |
2083 | __mpol_put(pol); | 2083 | __mpol_put(pol); |
2084 | if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page)) | 2084 | if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie))) |
2085 | goto retry_cpuset; | 2085 | goto retry_cpuset; |
2086 | return page; | 2086 | return page; |
2087 | } | 2087 | } |
@@ -2115,7 +2115,7 @@ struct page *alloc_pages_current(gfp_t gfp, unsigned order) | |||
2115 | pol = &default_policy; | 2115 | pol = &default_policy; |
2116 | 2116 | ||
2117 | retry_cpuset: | 2117 | retry_cpuset: |
2118 | cpuset_mems_cookie = get_mems_allowed(); | 2118 | cpuset_mems_cookie = read_mems_allowed_begin(); |
2119 | 2119 | ||
2120 | /* | 2120 | /* |
2121 | * No reference counting needed for current->mempolicy | 2121 | * No reference counting needed for current->mempolicy |
@@ -2128,7 +2128,7 @@ retry_cpuset: | |||
2128 | policy_zonelist(gfp, pol, numa_node_id()), | 2128 | policy_zonelist(gfp, pol, numa_node_id()), |
2129 | policy_nodemask(gfp, pol)); | 2129 | policy_nodemask(gfp, pol)); |
2130 | 2130 | ||
2131 | if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page)) | 2131 | if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie))) |
2132 | goto retry_cpuset; | 2132 | goto retry_cpuset; |
2133 | 2133 | ||
2134 | return page; | 2134 | return page; |