diff options
Diffstat (limited to 'mm')
-rw-r--r-- | mm/memory.c | 19 | ||||
-rw-r--r-- | mm/mlock.c | 5 | ||||
-rw-r--r-- | mm/mmap.c | 11 | ||||
-rw-r--r-- | mm/slub.c | 4 |
4 files changed, 18 insertions, 21 deletions
diff --git a/mm/memory.c b/mm/memory.c index 607098d47e74..61e66f026563 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -1359,7 +1359,7 @@ split_fallthrough: | |||
1359 | */ | 1359 | */ |
1360 | mark_page_accessed(page); | 1360 | mark_page_accessed(page); |
1361 | } | 1361 | } |
1362 | if (flags & FOLL_MLOCK) { | 1362 | if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) { |
1363 | /* | 1363 | /* |
1364 | * The preliminary mapping check is mainly to avoid the | 1364 | * The preliminary mapping check is mainly to avoid the |
1365 | * pointless overhead of lock_page on the ZERO_PAGE | 1365 | * pointless overhead of lock_page on the ZERO_PAGE |
@@ -1412,9 +1412,8 @@ no_page_table: | |||
1412 | 1412 | ||
1413 | static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr) | 1413 | static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr) |
1414 | { | 1414 | { |
1415 | return (vma->vm_flags & VM_GROWSDOWN) && | 1415 | return stack_guard_page_start(vma, addr) || |
1416 | (vma->vm_start == addr) && | 1416 | stack_guard_page_end(vma, addr+PAGE_SIZE); |
1417 | !vma_stack_continue(vma->vm_prev, addr); | ||
1418 | } | 1417 | } |
1419 | 1418 | ||
1420 | /** | 1419 | /** |
@@ -1551,13 +1550,6 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, | |||
1551 | continue; | 1550 | continue; |
1552 | } | 1551 | } |
1553 | 1552 | ||
1554 | /* | ||
1555 | * If we don't actually want the page itself, | ||
1556 | * and it's the stack guard page, just skip it. | ||
1557 | */ | ||
1558 | if (!pages && stack_guard_page(vma, start)) | ||
1559 | goto next_page; | ||
1560 | |||
1561 | do { | 1553 | do { |
1562 | struct page *page; | 1554 | struct page *page; |
1563 | unsigned int foll_flags = gup_flags; | 1555 | unsigned int foll_flags = gup_flags; |
@@ -1574,6 +1566,11 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, | |||
1574 | int ret; | 1566 | int ret; |
1575 | unsigned int fault_flags = 0; | 1567 | unsigned int fault_flags = 0; |
1576 | 1568 | ||
1569 | /* For mlock, just skip the stack guard page. */ | ||
1570 | if (foll_flags & FOLL_MLOCK) { | ||
1571 | if (stack_guard_page(vma, start)) | ||
1572 | goto next_page; | ||
1573 | } | ||
1577 | if (foll_flags & FOLL_WRITE) | 1574 | if (foll_flags & FOLL_WRITE) |
1578 | fault_flags |= FAULT_FLAG_WRITE; | 1575 | fault_flags |= FAULT_FLAG_WRITE; |
1579 | if (nonblocking) | 1576 | if (nonblocking) |
diff --git a/mm/mlock.c b/mm/mlock.c index 6b55e3efe0df..516b2c2ddd5a 100644 --- a/mm/mlock.c +++ b/mm/mlock.c | |||
@@ -162,7 +162,7 @@ static long __mlock_vma_pages_range(struct vm_area_struct *vma, | |||
162 | VM_BUG_ON(end > vma->vm_end); | 162 | VM_BUG_ON(end > vma->vm_end); |
163 | VM_BUG_ON(!rwsem_is_locked(&mm->mmap_sem)); | 163 | VM_BUG_ON(!rwsem_is_locked(&mm->mmap_sem)); |
164 | 164 | ||
165 | gup_flags = FOLL_TOUCH; | 165 | gup_flags = FOLL_TOUCH | FOLL_MLOCK; |
166 | /* | 166 | /* |
167 | * We want to touch writable mappings with a write fault in order | 167 | * We want to touch writable mappings with a write fault in order |
168 | * to break COW, except for shared mappings because these don't COW | 168 | * to break COW, except for shared mappings because these don't COW |
@@ -178,9 +178,6 @@ static long __mlock_vma_pages_range(struct vm_area_struct *vma, | |||
178 | if (vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) | 178 | if (vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) |
179 | gup_flags |= FOLL_FORCE; | 179 | gup_flags |= FOLL_FORCE; |
180 | 180 | ||
181 | if (vma->vm_flags & VM_LOCKED) | ||
182 | gup_flags |= FOLL_MLOCK; | ||
183 | |||
184 | return __get_user_pages(current, mm, addr, nr_pages, gup_flags, | 181 | return __get_user_pages(current, mm, addr, nr_pages, gup_flags, |
185 | NULL, NULL, nonblocking); | 182 | NULL, NULL, nonblocking); |
186 | } | 183 | } |
@@ -1767,10 +1767,13 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address) | |||
1767 | size = address - vma->vm_start; | 1767 | size = address - vma->vm_start; |
1768 | grow = (address - vma->vm_end) >> PAGE_SHIFT; | 1768 | grow = (address - vma->vm_end) >> PAGE_SHIFT; |
1769 | 1769 | ||
1770 | error = acct_stack_growth(vma, size, grow); | 1770 | error = -ENOMEM; |
1771 | if (!error) { | 1771 | if (vma->vm_pgoff + (size >> PAGE_SHIFT) >= vma->vm_pgoff) { |
1772 | vma->vm_end = address; | 1772 | error = acct_stack_growth(vma, size, grow); |
1773 | perf_event_mmap(vma); | 1773 | if (!error) { |
1774 | vma->vm_end = address; | ||
1775 | perf_event_mmap(vma); | ||
1776 | } | ||
1774 | } | 1777 | } |
1775 | } | 1778 | } |
1776 | vma_unlock_anon_vma(vma); | 1779 | vma_unlock_anon_vma(vma); |
@@ -1940,7 +1940,7 @@ redo: | |||
1940 | * Since this is without lock semantics the protection is only against | 1940 | * Since this is without lock semantics the protection is only against |
1941 | * code executing on this cpu *not* from access by other cpus. | 1941 | * code executing on this cpu *not* from access by other cpus. |
1942 | */ | 1942 | */ |
1943 | if (unlikely(!this_cpu_cmpxchg_double( | 1943 | if (unlikely(!irqsafe_cpu_cmpxchg_double( |
1944 | s->cpu_slab->freelist, s->cpu_slab->tid, | 1944 | s->cpu_slab->freelist, s->cpu_slab->tid, |
1945 | object, tid, | 1945 | object, tid, |
1946 | get_freepointer(s, object), next_tid(tid)))) { | 1946 | get_freepointer(s, object), next_tid(tid)))) { |
@@ -2145,7 +2145,7 @@ redo: | |||
2145 | set_freepointer(s, object, c->freelist); | 2145 | set_freepointer(s, object, c->freelist); |
2146 | 2146 | ||
2147 | #ifdef CONFIG_CMPXCHG_LOCAL | 2147 | #ifdef CONFIG_CMPXCHG_LOCAL |
2148 | if (unlikely(!this_cpu_cmpxchg_double( | 2148 | if (unlikely(!irqsafe_cpu_cmpxchg_double( |
2149 | s->cpu_slab->freelist, s->cpu_slab->tid, | 2149 | s->cpu_slab->freelist, s->cpu_slab->tid, |
2150 | c->freelist, tid, | 2150 | c->freelist, tid, |
2151 | object, next_tid(tid)))) { | 2151 | object, next_tid(tid)))) { |