aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/hugetlb.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/hugetlb.h')
-rw-r--r--include/linux/hugetlb.h41
1 files changed, 36 insertions, 5 deletions
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 0393270466c3..bd7e98752222 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -31,6 +31,7 @@ struct hugepage_subpool *hugepage_new_subpool(long nr_blocks);
31void hugepage_put_subpool(struct hugepage_subpool *spool); 31void hugepage_put_subpool(struct hugepage_subpool *spool);
32 32
33int PageHuge(struct page *page); 33int PageHuge(struct page *page);
34int PageHeadHuge(struct page *page_head);
34 35
35void reset_vma_resv_huge_pages(struct vm_area_struct *vma); 36void reset_vma_resv_huge_pages(struct vm_area_struct *vma);
36int hugetlb_sysctl_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *); 37int hugetlb_sysctl_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
@@ -69,7 +70,6 @@ int dequeue_hwpoisoned_huge_page(struct page *page);
69bool isolate_huge_page(struct page *page, struct list_head *list); 70bool isolate_huge_page(struct page *page, struct list_head *list);
70void putback_active_hugepage(struct page *page); 71void putback_active_hugepage(struct page *page);
71bool is_hugepage_active(struct page *page); 72bool is_hugepage_active(struct page *page);
72void copy_huge_page(struct page *dst, struct page *src);
73 73
74#ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE 74#ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
75pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud); 75pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud);
@@ -104,6 +104,11 @@ static inline int PageHuge(struct page *page)
104 return 0; 104 return 0;
105} 105}
106 106
107static inline int PageHeadHuge(struct page *page_head)
108{
109 return 0;
110}
111
107static inline void reset_vma_resv_huge_pages(struct vm_area_struct *vma) 112static inline void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
108{ 113{
109} 114}
@@ -137,12 +142,12 @@ static inline int dequeue_hwpoisoned_huge_page(struct page *page)
137 return 0; 142 return 0;
138} 143}
139 144
140#define isolate_huge_page(p, l) false 145static inline bool isolate_huge_page(struct page *page, struct list_head *list)
141#define putback_active_hugepage(p) do {} while (0)
142#define is_hugepage_active(x) false
143static inline void copy_huge_page(struct page *dst, struct page *src)
144{ 146{
147 return false;
145} 148}
149#define putback_active_hugepage(p) do {} while (0)
150#define is_hugepage_active(x) false
146 151
147static inline unsigned long hugetlb_change_protection(struct vm_area_struct *vma, 152static inline unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
148 unsigned long address, unsigned long end, pgprot_t newprot) 153 unsigned long address, unsigned long end, pgprot_t newprot)
@@ -392,6 +397,15 @@ static inline int hugepage_migration_support(struct hstate *h)
392 return pmd_huge_support() && (huge_page_shift(h) == PMD_SHIFT); 397 return pmd_huge_support() && (huge_page_shift(h) == PMD_SHIFT);
393} 398}
394 399
400static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
401 struct mm_struct *mm, pte_t *pte)
402{
403 if (huge_page_size(h) == PMD_SIZE)
404 return pmd_lockptr(mm, (pmd_t *) pte);
405 VM_BUG_ON(huge_page_size(h) == PAGE_SIZE);
406 return &mm->page_table_lock;
407}
408
395#else /* CONFIG_HUGETLB_PAGE */ 409#else /* CONFIG_HUGETLB_PAGE */
396struct hstate {}; 410struct hstate {};
397#define alloc_huge_page_node(h, nid) NULL 411#define alloc_huge_page_node(h, nid) NULL
@@ -401,6 +415,7 @@ struct hstate {};
401#define hstate_sizelog(s) NULL 415#define hstate_sizelog(s) NULL
402#define hstate_vma(v) NULL 416#define hstate_vma(v) NULL
403#define hstate_inode(i) NULL 417#define hstate_inode(i) NULL
418#define page_hstate(page) NULL
404#define huge_page_size(h) PAGE_SIZE 419#define huge_page_size(h) PAGE_SIZE
405#define huge_page_mask(h) PAGE_MASK 420#define huge_page_mask(h) PAGE_MASK
406#define vma_kernel_pagesize(v) PAGE_SIZE 421#define vma_kernel_pagesize(v) PAGE_SIZE
@@ -421,6 +436,22 @@ static inline pgoff_t basepage_index(struct page *page)
421#define dissolve_free_huge_pages(s, e) do {} while (0) 436#define dissolve_free_huge_pages(s, e) do {} while (0)
422#define pmd_huge_support() 0 437#define pmd_huge_support() 0
423#define hugepage_migration_support(h) 0 438#define hugepage_migration_support(h) 0
439
440static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
441 struct mm_struct *mm, pte_t *pte)
442{
443 return &mm->page_table_lock;
444}
424#endif /* CONFIG_HUGETLB_PAGE */ 445#endif /* CONFIG_HUGETLB_PAGE */
425 446
447static inline spinlock_t *huge_pte_lock(struct hstate *h,
448 struct mm_struct *mm, pte_t *pte)
449{
450 spinlock_t *ptl;
451
452 ptl = huge_pte_lockptr(h, mm, pte);
453 spin_lock(ptl);
454 return ptl;
455}
456
426#endif /* _LINUX_HUGETLB_H */ 457#endif /* _LINUX_HUGETLB_H */