diff options
author | Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> | 2015-02-11 18:25:22 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2015-02-11 20:06:01 -0500 |
commit | e66f17ff71772b209eed39de35aaa99ba819c93d (patch) | |
tree | fcf2db6086cb68e0c10354947c1c3d61bcd9f100 /mm/hugetlb.c | |
parent | cbef8478bee55775ac312a574aad48af7bb9cf9f (diff) |
mm/hugetlb: take page table lock in follow_huge_pmd()
We have a race condition between move_pages() and freeing hugepages, where
move_pages() calls follow_page(FOLL_GET) for hugepages internally and
tries to get its refcount without preventing concurrent freeing. This
race crashes the kernel, so this patch fixes it by moving FOLL_GET code
for hugepages into follow_huge_pmd() with taking the page table lock.
This patch intentionally removes page==NULL check after pte_page.
This is justified because pte_page() never returns NULL for any
architectures or configurations.
This patch changes the behavior of follow_huge_pmd() for tail pages and
then tail pages can be pinned/returned. So the caller must be changed to
properly handle the returned tail pages.
We could have a choice to add the similar locking to
follow_huge_(addr|pud) for consistency, but it's not necessary because
currently these functions don't support FOLL_GET flag, so let's leave it
for future development.
Here is the reproducer:
$ cat movepages.c
#include <stdio.h>
#include <stdlib.h>
#include <numaif.h>
#define ADDR_INPUT 0x700000000000UL
#define HPS 0x200000
#define PS 0x1000
int main(int argc, char *argv[]) {
int i;
int nr_hp = strtol(argv[1], NULL, 0);
int nr_p = nr_hp * HPS / PS;
int ret;
void **addrs;
int *status;
int *nodes;
pid_t pid;
pid = strtol(argv[2], NULL, 0);
addrs = malloc(sizeof(char *) * nr_p + 1);
status = malloc(sizeof(char *) * nr_p + 1);
nodes = malloc(sizeof(char *) * nr_p + 1);
while (1) {
for (i = 0; i < nr_p; i++) {
addrs[i] = (void *)ADDR_INPUT + i * PS;
nodes[i] = 1;
status[i] = 0;
}
ret = numa_move_pages(pid, nr_p, addrs, nodes, status,
MPOL_MF_MOVE_ALL);
if (ret == -1)
err("move_pages");
for (i = 0; i < nr_p; i++) {
addrs[i] = (void *)ADDR_INPUT + i * PS;
nodes[i] = 0;
status[i] = 0;
}
ret = numa_move_pages(pid, nr_p, addrs, nodes, status,
MPOL_MF_MOVE_ALL);
if (ret == -1)
err("move_pages");
}
return 0;
}
$ cat hugepage.c
#include <stdio.h>
#include <sys/mman.h>
#include <string.h>
#define ADDR_INPUT 0x700000000000UL
#define HPS 0x200000
int main(int argc, char *argv[]) {
int nr_hp = strtol(argv[1], NULL, 0);
char *p;
while (1) {
p = mmap((void *)ADDR_INPUT, nr_hp * HPS, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB, -1, 0);
if (p != (void *)ADDR_INPUT) {
perror("mmap");
break;
}
memset(p, 0, nr_hp * HPS);
munmap(p, nr_hp * HPS);
}
}
$ sysctl vm.nr_hugepages=40
$ ./hugepage 10 &
$ ./movepages 10 $(pgrep -f hugepage)
Fixes: e632a938d914 ("mm: migrate: add hugepage migration code to move_pages()")
Signed-off-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Reported-by: Hugh Dickins <hughd@google.com>
Cc: James Hogan <james.hogan@imgtec.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Mel Gorman <mel@csn.ul.ie>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Michal Hocko <mhocko@suse.cz>
Cc: Rik van Riel <riel@redhat.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Luiz Capitulino <lcapitulino@redhat.com>
Cc: Nishanth Aravamudan <nacc@linux.vnet.ibm.com>
Cc: Lee Schermerhorn <lee.schermerhorn@hp.com>
Cc: Steve Capper <steve.capper@linaro.org>
Cc: <stable@vger.kernel.org> [3.12+]
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/hugetlb.c')
-rw-r--r-- | mm/hugetlb.c | 48 |
1 files changed, 34 insertions, 14 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index d96b8bfa748f..5aca3707450f 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c | |||
@@ -3675,28 +3675,48 @@ follow_huge_addr(struct mm_struct *mm, unsigned long address, | |||
3675 | 3675 | ||
3676 | struct page * __weak | 3676 | struct page * __weak |
3677 | follow_huge_pmd(struct mm_struct *mm, unsigned long address, | 3677 | follow_huge_pmd(struct mm_struct *mm, unsigned long address, |
3678 | pmd_t *pmd, int write) | 3678 | pmd_t *pmd, int flags) |
3679 | { | 3679 | { |
3680 | struct page *page; | 3680 | struct page *page = NULL; |
3681 | 3681 | spinlock_t *ptl; | |
3682 | if (!pmd_present(*pmd)) | 3682 | retry: |
3683 | return NULL; | 3683 | ptl = pmd_lockptr(mm, pmd); |
3684 | page = pte_page(*(pte_t *)pmd); | 3684 | spin_lock(ptl); |
3685 | if (page) | 3685 | /* |
3686 | page += ((address & ~PMD_MASK) >> PAGE_SHIFT); | 3686 | * make sure that the address range covered by this pmd is not |
3687 | * unmapped from other threads. | ||
3688 | */ | ||
3689 | if (!pmd_huge(*pmd)) | ||
3690 | goto out; | ||
3691 | if (pmd_present(*pmd)) { | ||
3692 | page = pte_page(*(pte_t *)pmd) + | ||
3693 | ((address & ~PMD_MASK) >> PAGE_SHIFT); | ||
3694 | if (flags & FOLL_GET) | ||
3695 | get_page(page); | ||
3696 | } else { | ||
3697 | if (is_hugetlb_entry_migration(huge_ptep_get((pte_t *)pmd))) { | ||
3698 | spin_unlock(ptl); | ||
3699 | __migration_entry_wait(mm, (pte_t *)pmd, ptl); | ||
3700 | goto retry; | ||
3701 | } | ||
3702 | /* | ||
3703 | * hwpoisoned entry is treated as no_page_table in | ||
3704 | * follow_page_mask(). | ||
3705 | */ | ||
3706 | } | ||
3707 | out: | ||
3708 | spin_unlock(ptl); | ||
3687 | return page; | 3709 | return page; |
3688 | } | 3710 | } |
3689 | 3711 | ||
3690 | struct page * __weak | 3712 | struct page * __weak |
3691 | follow_huge_pud(struct mm_struct *mm, unsigned long address, | 3713 | follow_huge_pud(struct mm_struct *mm, unsigned long address, |
3692 | pud_t *pud, int write) | 3714 | pud_t *pud, int flags) |
3693 | { | 3715 | { |
3694 | struct page *page; | 3716 | if (flags & FOLL_GET) |
3717 | return NULL; | ||
3695 | 3718 | ||
3696 | page = pte_page(*(pte_t *)pud); | 3719 | return pte_page(*(pte_t *)pud) + ((address & ~PUD_MASK) >> PAGE_SHIFT); |
3697 | if (page) | ||
3698 | page += ((address & ~PUD_MASK) >> PAGE_SHIFT); | ||
3699 | return page; | ||
3700 | } | 3720 | } |
3701 | 3721 | ||
3702 | #ifdef CONFIG_MEMORY_FAILURE | 3722 | #ifdef CONFIG_MEMORY_FAILURE |