diff options
Diffstat (limited to 'mm/memory.c')
-rw-r--r-- | mm/memory.c | 21 |
1 files changed, 9 insertions, 12 deletions
diff --git a/mm/memory.c b/mm/memory.c index ce22a250926f..61e66f026563 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -1359,7 +1359,7 @@ split_fallthrough: | |||
1359 | */ | 1359 | */ |
1360 | mark_page_accessed(page); | 1360 | mark_page_accessed(page); |
1361 | } | 1361 | } |
1362 | if (flags & FOLL_MLOCK) { | 1362 | if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) { |
1363 | /* | 1363 | /* |
1364 | * The preliminary mapping check is mainly to avoid the | 1364 | * The preliminary mapping check is mainly to avoid the |
1365 | * pointless overhead of lock_page on the ZERO_PAGE | 1365 | * pointless overhead of lock_page on the ZERO_PAGE |
@@ -1412,9 +1412,8 @@ no_page_table: | |||
1412 | 1412 | ||
1413 | static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr) | 1413 | static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr) |
1414 | { | 1414 | { |
1415 | return (vma->vm_flags & VM_GROWSDOWN) && | 1415 | return stack_guard_page_start(vma, addr) || |
1416 | (vma->vm_start == addr) && | 1416 | stack_guard_page_end(vma, addr+PAGE_SIZE); |
1417 | !vma_stack_continue(vma->vm_prev, addr); | ||
1418 | } | 1417 | } |
1419 | 1418 | ||
1420 | /** | 1419 | /** |
@@ -1551,13 +1550,6 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, | |||
1551 | continue; | 1550 | continue; |
1552 | } | 1551 | } |
1553 | 1552 | ||
1554 | /* | ||
1555 | * If we don't actually want the page itself, | ||
1556 | * and it's the stack guard page, just skip it. | ||
1557 | */ | ||
1558 | if (!pages && stack_guard_page(vma, start)) | ||
1559 | goto next_page; | ||
1560 | |||
1561 | do { | 1553 | do { |
1562 | struct page *page; | 1554 | struct page *page; |
1563 | unsigned int foll_flags = gup_flags; | 1555 | unsigned int foll_flags = gup_flags; |
@@ -1574,6 +1566,11 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, | |||
1574 | int ret; | 1566 | int ret; |
1575 | unsigned int fault_flags = 0; | 1567 | unsigned int fault_flags = 0; |
1576 | 1568 | ||
1569 | /* For mlock, just skip the stack guard page. */ | ||
1570 | if (foll_flags & FOLL_MLOCK) { | ||
1571 | if (stack_guard_page(vma, start)) | ||
1572 | goto next_page; | ||
1573 | } | ||
1577 | if (foll_flags & FOLL_WRITE) | 1574 | if (foll_flags & FOLL_WRITE) |
1578 | fault_flags |= FAULT_FLAG_WRITE; | 1575 | fault_flags |= FAULT_FLAG_WRITE; |
1579 | if (nonblocking) | 1576 | if (nonblocking) |
@@ -3396,7 +3393,7 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, | |||
3396 | * run pte_offset_map on the pmd, if an huge pmd could | 3393 | * run pte_offset_map on the pmd, if an huge pmd could |
3397 | * materialize from under us from a different thread. | 3394 | * materialize from under us from a different thread. |
3398 | */ | 3395 | */ |
3399 | if (unlikely(__pte_alloc(mm, vma, pmd, address))) | 3396 | if (unlikely(pmd_none(*pmd)) && __pte_alloc(mm, vma, pmd, address)) |
3400 | return VM_FAULT_OOM; | 3397 | return VM_FAULT_OOM; |
3401 | /* if an huge pmd materialized from under us just retry later */ | 3398 | /* if an huge pmd materialized from under us just retry later */ |
3402 | if (unlikely(pmd_trans_huge(*pmd))) | 3399 | if (unlikely(pmd_trans_huge(*pmd))) |