aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/asm-ia64/page.h1
-rw-r--r--include/linux/hugetlb.h16
2 files changed, 13 insertions, 4 deletions
diff --git a/include/asm-ia64/page.h b/include/asm-ia64/page.h
index 732cf3086741..3ab27333dae4 100644
--- a/include/asm-ia64/page.h
+++ b/include/asm-ia64/page.h
@@ -57,6 +57,7 @@
57 57
58# define HAVE_ARCH_HUGETLB_UNMAPPED_AREA 58# define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
59# define ARCH_HAS_HUGEPAGE_ONLY_RANGE 59# define ARCH_HAS_HUGEPAGE_ONLY_RANGE
60# define ARCH_HAS_PREPARE_HUGEPAGE_RANGE
60# define ARCH_HAS_HUGETLB_FREE_PGD_RANGE 61# define ARCH_HAS_HUGETLB_FREE_PGD_RANGE
61#endif /* CONFIG_HUGETLB_PAGE */ 62#endif /* CONFIG_HUGETLB_PAGE */
62 63
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 5db25ffdb3eb..d6f1019625af 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -36,7 +36,6 @@ struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
36 int write); 36 int write);
37struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address, 37struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
38 pmd_t *pmd, int write); 38 pmd_t *pmd, int write);
39int is_aligned_hugepage_range(unsigned long addr, unsigned long len);
40int pmd_huge(pmd_t pmd); 39int pmd_huge(pmd_t pmd);
41void hugetlb_change_protection(struct vm_area_struct *vma, 40void hugetlb_change_protection(struct vm_area_struct *vma,
42 unsigned long address, unsigned long end, pgprot_t newprot); 41 unsigned long address, unsigned long end, pgprot_t newprot);
@@ -54,8 +53,18 @@ void hugetlb_free_pgd_range(struct mmu_gather **tlb, unsigned long addr,
54#endif 53#endif
55 54
56#ifndef ARCH_HAS_PREPARE_HUGEPAGE_RANGE 55#ifndef ARCH_HAS_PREPARE_HUGEPAGE_RANGE
57#define prepare_hugepage_range(addr, len) \ 56/*
58 is_aligned_hugepage_range(addr, len) 57 * If the arch doesn't supply something else, assume that hugepage
58 * size aligned regions are ok without further preparation.
59 */
60static inline int prepare_hugepage_range(unsigned long addr, unsigned long len)
61{
62 if (len & ~HPAGE_MASK)
63 return -EINVAL;
64 if (addr & ~HPAGE_MASK)
65 return -EINVAL;
66 return 0;
67}
59#else 68#else
60int prepare_hugepage_range(unsigned long addr, unsigned long len); 69int prepare_hugepage_range(unsigned long addr, unsigned long len);
61#endif 70#endif
@@ -95,7 +104,6 @@ static inline unsigned long hugetlb_total_pages(void)
95#define hugetlb_report_meminfo(buf) 0 104#define hugetlb_report_meminfo(buf) 0
96#define hugetlb_report_node_meminfo(n, buf) 0 105#define hugetlb_report_node_meminfo(n, buf) 0
97#define follow_huge_pmd(mm, addr, pmd, write) NULL 106#define follow_huge_pmd(mm, addr, pmd, write) NULL
98#define is_aligned_hugepage_range(addr, len) 0
99#define prepare_hugepage_range(addr, len) (-EINVAL) 107#define prepare_hugepage_range(addr, len) (-EINVAL)
100#define pmd_huge(x) 0 108#define pmd_huge(x) 0
101#define is_hugepage_only_range(mm, addr, len) 0 109#define is_hugepage_only_range(mm, addr, len) 0