summaryrefslogtreecommitdiffstats
path: root/mm/mmap.c
diff options
context:
space:
mode:
authorJason Low <jason.low2@hp.com>2015-04-15 19:14:08 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2015-04-15 19:35:18 -0400
commit4db0c3c2983cc6b7a08a33542af5e14de8a9258c (patch)
tree66cfeaeae432f904c09af45e030b7e1e00476011 /mm/mmap.c
parent9d8c47e4bb1c20dbceee437f9fa7d76dafee80a2 (diff)
mm: remove rest of ACCESS_ONCE() usages
We converted some of the usages of ACCESS_ONCE to READ_ONCE in the mm/ tree since it doesn't work reliably on non-scalar types. This patch removes the rest of the usages of ACCESS_ONCE, and use the new READ_ONCE API for the read accesses. This makes things cleaner, instead of using separate/multiple sets of APIs. Signed-off-by: Jason Low <jason.low2@hp.com> Acked-by: Michal Hocko <mhocko@suse.cz> Acked-by: Davidlohr Bueso <dave@stgolabs.net> Acked-by: Rik van Riel <riel@redhat.com> Reviewed-by: Christian Borntraeger <borntraeger@de.ibm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/mmap.c')
-rw-r--r--mm/mmap.c8
1 files changed, 4 insertions, 4 deletions
diff --git a/mm/mmap.c b/mm/mmap.c
index 06a6076c92e5..e65cbe0d64fc 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1133,7 +1133,7 @@ static int anon_vma_compatible(struct vm_area_struct *a, struct vm_area_struct *
1133 * by another page fault trying to merge _that_. But that's ok: if it 1133 * by another page fault trying to merge _that_. But that's ok: if it
1134 * is being set up, that automatically means that it will be a singleton 1134 * is being set up, that automatically means that it will be a singleton
1135 * acceptable for merging, so we can do all of this optimistically. But 1135 * acceptable for merging, so we can do all of this optimistically. But
1136 * we do that ACCESS_ONCE() to make sure that we never re-load the pointer. 1136 * we do that READ_ONCE() to make sure that we never re-load the pointer.
1137 * 1137 *
1138 * IOW: that the "list_is_singular()" test on the anon_vma_chain only 1138 * IOW: that the "list_is_singular()" test on the anon_vma_chain only
1139 * matters for the 'stable anon_vma' case (ie the thing we want to avoid 1139 * matters for the 'stable anon_vma' case (ie the thing we want to avoid
@@ -1147,7 +1147,7 @@ static int anon_vma_compatible(struct vm_area_struct *a, struct vm_area_struct *
1147static struct anon_vma *reusable_anon_vma(struct vm_area_struct *old, struct vm_area_struct *a, struct vm_area_struct *b) 1147static struct anon_vma *reusable_anon_vma(struct vm_area_struct *old, struct vm_area_struct *a, struct vm_area_struct *b)
1148{ 1148{
1149 if (anon_vma_compatible(a, b)) { 1149 if (anon_vma_compatible(a, b)) {
1150 struct anon_vma *anon_vma = ACCESS_ONCE(old->anon_vma); 1150 struct anon_vma *anon_vma = READ_ONCE(old->anon_vma);
1151 1151
1152 if (anon_vma && list_is_singular(&old->anon_vma_chain)) 1152 if (anon_vma && list_is_singular(&old->anon_vma_chain))
1153 return anon_vma; 1153 return anon_vma;
@@ -2100,7 +2100,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
2100 actual_size = size; 2100 actual_size = size;
2101 if (size && (vma->vm_flags & (VM_GROWSUP | VM_GROWSDOWN))) 2101 if (size && (vma->vm_flags & (VM_GROWSUP | VM_GROWSDOWN)))
2102 actual_size -= PAGE_SIZE; 2102 actual_size -= PAGE_SIZE;
2103 if (actual_size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur)) 2103 if (actual_size > READ_ONCE(rlim[RLIMIT_STACK].rlim_cur))
2104 return -ENOMEM; 2104 return -ENOMEM;
2105 2105
2106 /* mlock limit tests */ 2106 /* mlock limit tests */
@@ -2108,7 +2108,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
2108 unsigned long locked; 2108 unsigned long locked;
2109 unsigned long limit; 2109 unsigned long limit;
2110 locked = mm->locked_vm + grow; 2110 locked = mm->locked_vm + grow;
2111 limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur); 2111 limit = READ_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
2112 limit >>= PAGE_SHIFT; 2112 limit >>= PAGE_SHIFT;
2113 if (locked > limit && !capable(CAP_IPC_LOCK)) 2113 if (locked > limit && !capable(CAP_IPC_LOCK))
2114 return -ENOMEM; 2114 return -ENOMEM;