diff options
author | Wanpeng Li <liwanp@linux.vnet.ibm.com> | 2013-09-30 16:45:21 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-09-30 17:31:02 -0400 |
commit | 20cb6cab52a21b46e3c0dc7bd23f004f810fb421 (patch) | |
tree | a0adabe2b403a45cdb9dcf034a1f1a01f1468168 | |
parent | 080506ad0aa9c9fbc7879cdd290d55624da08c60 (diff) |
mm/hwpoison: fix traversal of hugetlbfs pages to avoid printk flood
madvise_hwpoison won't check if the page is small page or huge page and
traverses in small page granularity against the range unconditionally,
which result in a printk flood "MCE xxx: already hardware poisoned" if
the page is a huge page.
This patch fixes it by using compound_order(compound_head(page)) for
huge page iterator.
Testcase:
#define _GNU_SOURCE
#include <stdlib.h>
#include <stdio.h>
#include <sys/mman.h>
#include <unistd.h>
#include <fcntl.h>
#include <sys/types.h>
#include <errno.h>
#define PAGES_TO_TEST 3
#define PAGE_SIZE 4096 * 512
int main(void)
{
char *mem;
int i;
mem = mmap(NULL, PAGES_TO_TEST * PAGE_SIZE,
PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB, 0, 0);
if (madvise(mem, PAGES_TO_TEST * PAGE_SIZE, MADV_HWPOISON) == -1)
return -1;
munmap(mem, PAGES_TO_TEST * PAGE_SIZE);
return 0;
}
Signed-off-by: Wanpeng Li <liwanp@linux.vnet.ibm.com>
Reviewed-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Acked-by: Andi Kleen <ak@linux.intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | mm/madvise.c | 5 |
1 files changed, 3 insertions, 2 deletions
diff --git a/mm/madvise.c b/mm/madvise.c index 6975bc812542..539eeb96b323 100644 --- a/mm/madvise.c +++ b/mm/madvise.c | |||
@@ -343,10 +343,11 @@ static long madvise_remove(struct vm_area_struct *vma, | |||
343 | */ | 343 | */ |
344 | static int madvise_hwpoison(int bhv, unsigned long start, unsigned long end) | 344 | static int madvise_hwpoison(int bhv, unsigned long start, unsigned long end) |
345 | { | 345 | { |
346 | struct page *p; | ||
346 | if (!capable(CAP_SYS_ADMIN)) | 347 | if (!capable(CAP_SYS_ADMIN)) |
347 | return -EPERM; | 348 | return -EPERM; |
348 | for (; start < end; start += PAGE_SIZE) { | 349 | for (; start < end; start += PAGE_SIZE << |
349 | struct page *p; | 350 | compound_order(compound_head(p))) { |
350 | int ret; | 351 | int ret; |
351 | 352 | ||
352 | ret = get_user_pages_fast(start, 1, 0, &p); | 353 | ret = get_user_pages_fast(start, 1, 0, &p); |