diff options
Diffstat (limited to 'mm/mmap.c')
-rw-r--r-- | mm/mmap.c | 21 |
1 files changed, 10 insertions, 11 deletions
@@ -1133,7 +1133,7 @@ static int anon_vma_compatible(struct vm_area_struct *a, struct vm_area_struct * | |||
1133 | * by another page fault trying to merge _that_. But that's ok: if it | 1133 | * by another page fault trying to merge _that_. But that's ok: if it |
1134 | * is being set up, that automatically means that it will be a singleton | 1134 | * is being set up, that automatically means that it will be a singleton |
1135 | * acceptable for merging, so we can do all of this optimistically. But | 1135 | * acceptable for merging, so we can do all of this optimistically. But |
1136 | * we do that ACCESS_ONCE() to make sure that we never re-load the pointer. | 1136 | * we do that READ_ONCE() to make sure that we never re-load the pointer. |
1137 | * | 1137 | * |
1138 | * IOW: that the "list_is_singular()" test on the anon_vma_chain only | 1138 | * IOW: that the "list_is_singular()" test on the anon_vma_chain only |
1139 | * matters for the 'stable anon_vma' case (ie the thing we want to avoid | 1139 | * matters for the 'stable anon_vma' case (ie the thing we want to avoid |
@@ -1147,7 +1147,7 @@ static int anon_vma_compatible(struct vm_area_struct *a, struct vm_area_struct * | |||
1147 | static struct anon_vma *reusable_anon_vma(struct vm_area_struct *old, struct vm_area_struct *a, struct vm_area_struct *b) | 1147 | static struct anon_vma *reusable_anon_vma(struct vm_area_struct *old, struct vm_area_struct *a, struct vm_area_struct *b) |
1148 | { | 1148 | { |
1149 | if (anon_vma_compatible(a, b)) { | 1149 | if (anon_vma_compatible(a, b)) { |
1150 | struct anon_vma *anon_vma = ACCESS_ONCE(old->anon_vma); | 1150 | struct anon_vma *anon_vma = READ_ONCE(old->anon_vma); |
1151 | 1151 | ||
1152 | if (anon_vma && list_is_singular(&old->anon_vma_chain)) | 1152 | if (anon_vma && list_is_singular(&old->anon_vma_chain)) |
1153 | return anon_vma; | 1153 | return anon_vma; |
@@ -1551,11 +1551,10 @@ unsigned long mmap_region(struct file *file, unsigned long addr, | |||
1551 | 1551 | ||
1552 | /* Clear old maps */ | 1552 | /* Clear old maps */ |
1553 | error = -ENOMEM; | 1553 | error = -ENOMEM; |
1554 | munmap_back: | 1554 | while (find_vma_links(mm, addr, addr + len, &prev, &rb_link, |
1555 | if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) { | 1555 | &rb_parent)) { |
1556 | if (do_munmap(mm, addr, len)) | 1556 | if (do_munmap(mm, addr, len)) |
1557 | return -ENOMEM; | 1557 | return -ENOMEM; |
1558 | goto munmap_back; | ||
1559 | } | 1558 | } |
1560 | 1559 | ||
1561 | /* | 1560 | /* |
@@ -1571,7 +1570,8 @@ munmap_back: | |||
1571 | /* | 1570 | /* |
1572 | * Can we just expand an old mapping? | 1571 | * Can we just expand an old mapping? |
1573 | */ | 1572 | */ |
1574 | vma = vma_merge(mm, prev, addr, addr + len, vm_flags, NULL, file, pgoff, NULL); | 1573 | vma = vma_merge(mm, prev, addr, addr + len, vm_flags, NULL, file, pgoff, |
1574 | NULL); | ||
1575 | if (vma) | 1575 | if (vma) |
1576 | goto out; | 1576 | goto out; |
1577 | 1577 | ||
@@ -2100,7 +2100,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns | |||
2100 | actual_size = size; | 2100 | actual_size = size; |
2101 | if (size && (vma->vm_flags & (VM_GROWSUP | VM_GROWSDOWN))) | 2101 | if (size && (vma->vm_flags & (VM_GROWSUP | VM_GROWSDOWN))) |
2102 | actual_size -= PAGE_SIZE; | 2102 | actual_size -= PAGE_SIZE; |
2103 | if (actual_size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur)) | 2103 | if (actual_size > READ_ONCE(rlim[RLIMIT_STACK].rlim_cur)) |
2104 | return -ENOMEM; | 2104 | return -ENOMEM; |
2105 | 2105 | ||
2106 | /* mlock limit tests */ | 2106 | /* mlock limit tests */ |
@@ -2108,7 +2108,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns | |||
2108 | unsigned long locked; | 2108 | unsigned long locked; |
2109 | unsigned long limit; | 2109 | unsigned long limit; |
2110 | locked = mm->locked_vm + grow; | 2110 | locked = mm->locked_vm + grow; |
2111 | limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur); | 2111 | limit = READ_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur); |
2112 | limit >>= PAGE_SHIFT; | 2112 | limit >>= PAGE_SHIFT; |
2113 | if (locked > limit && !capable(CAP_IPC_LOCK)) | 2113 | if (locked > limit && !capable(CAP_IPC_LOCK)) |
2114 | return -ENOMEM; | 2114 | return -ENOMEM; |
@@ -2739,11 +2739,10 @@ static unsigned long do_brk(unsigned long addr, unsigned long len) | |||
2739 | /* | 2739 | /* |
2740 | * Clear old maps. this also does some error checking for us | 2740 | * Clear old maps. this also does some error checking for us |
2741 | */ | 2741 | */ |
2742 | munmap_back: | 2742 | while (find_vma_links(mm, addr, addr + len, &prev, &rb_link, |
2743 | if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) { | 2743 | &rb_parent)) { |
2744 | if (do_munmap(mm, addr, len)) | 2744 | if (do_munmap(mm, addr, len)) |
2745 | return -ENOMEM; | 2745 | return -ENOMEM; |
2746 | goto munmap_back; | ||
2747 | } | 2746 | } |
2748 | 2747 | ||
2749 | /* Check against address space limits *after* clearing old maps... */ | 2748 | /* Check against address space limits *after* clearing old maps... */ |