diff options
Diffstat (limited to 'include/linux/hugetlb.h')
-rw-r--r-- | include/linux/hugetlb.h | 36 |
1 files changed, 32 insertions, 4 deletions
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index 0393270466c3..9649ff0c63f8 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h | |||
@@ -31,6 +31,7 @@ struct hugepage_subpool *hugepage_new_subpool(long nr_blocks); | |||
31 | void hugepage_put_subpool(struct hugepage_subpool *spool); | 31 | void hugepage_put_subpool(struct hugepage_subpool *spool); |
32 | 32 | ||
33 | int PageHuge(struct page *page); | 33 | int PageHuge(struct page *page); |
34 | int PageHeadHuge(struct page *page_head); | ||
34 | 35 | ||
35 | void reset_vma_resv_huge_pages(struct vm_area_struct *vma); | 36 | void reset_vma_resv_huge_pages(struct vm_area_struct *vma); |
36 | int hugetlb_sysctl_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *); | 37 | int hugetlb_sysctl_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *); |
@@ -69,7 +70,6 @@ int dequeue_hwpoisoned_huge_page(struct page *page); | |||
69 | bool isolate_huge_page(struct page *page, struct list_head *list); | 70 | bool isolate_huge_page(struct page *page, struct list_head *list); |
70 | void putback_active_hugepage(struct page *page); | 71 | void putback_active_hugepage(struct page *page); |
71 | bool is_hugepage_active(struct page *page); | 72 | bool is_hugepage_active(struct page *page); |
72 | void copy_huge_page(struct page *dst, struct page *src); | ||
73 | 73 | ||
74 | #ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE | 74 | #ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE |
75 | pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud); | 75 | pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud); |
@@ -104,6 +104,11 @@ static inline int PageHuge(struct page *page) | |||
104 | return 0; | 104 | return 0; |
105 | } | 105 | } |
106 | 106 | ||
107 | static inline int PageHeadHuge(struct page *page_head) | ||
108 | { | ||
109 | return 0; | ||
110 | } | ||
111 | |||
107 | static inline void reset_vma_resv_huge_pages(struct vm_area_struct *vma) | 112 | static inline void reset_vma_resv_huge_pages(struct vm_area_struct *vma) |
108 | { | 113 | { |
109 | } | 114 | } |
@@ -140,9 +145,6 @@ static inline int dequeue_hwpoisoned_huge_page(struct page *page) | |||
140 | #define isolate_huge_page(p, l) false | 145 | #define isolate_huge_page(p, l) false |
141 | #define putback_active_hugepage(p) do {} while (0) | 146 | #define putback_active_hugepage(p) do {} while (0) |
142 | #define is_hugepage_active(x) false | 147 | #define is_hugepage_active(x) false |
143 | static inline void copy_huge_page(struct page *dst, struct page *src) | ||
144 | { | ||
145 | } | ||
146 | 148 | ||
147 | static inline unsigned long hugetlb_change_protection(struct vm_area_struct *vma, | 149 | static inline unsigned long hugetlb_change_protection(struct vm_area_struct *vma, |
148 | unsigned long address, unsigned long end, pgprot_t newprot) | 150 | unsigned long address, unsigned long end, pgprot_t newprot) |
@@ -392,6 +394,15 @@ static inline int hugepage_migration_support(struct hstate *h) | |||
392 | return pmd_huge_support() && (huge_page_shift(h) == PMD_SHIFT); | 394 | return pmd_huge_support() && (huge_page_shift(h) == PMD_SHIFT); |
393 | } | 395 | } |
394 | 396 | ||
397 | static inline spinlock_t *huge_pte_lockptr(struct hstate *h, | ||
398 | struct mm_struct *mm, pte_t *pte) | ||
399 | { | ||
400 | if (huge_page_size(h) == PMD_SIZE) | ||
401 | return pmd_lockptr(mm, (pmd_t *) pte); | ||
402 | VM_BUG_ON(huge_page_size(h) == PAGE_SIZE); | ||
403 | return &mm->page_table_lock; | ||
404 | } | ||
405 | |||
395 | #else /* CONFIG_HUGETLB_PAGE */ | 406 | #else /* CONFIG_HUGETLB_PAGE */ |
396 | struct hstate {}; | 407 | struct hstate {}; |
397 | #define alloc_huge_page_node(h, nid) NULL | 408 | #define alloc_huge_page_node(h, nid) NULL |
@@ -401,6 +412,7 @@ struct hstate {}; | |||
401 | #define hstate_sizelog(s) NULL | 412 | #define hstate_sizelog(s) NULL |
402 | #define hstate_vma(v) NULL | 413 | #define hstate_vma(v) NULL |
403 | #define hstate_inode(i) NULL | 414 | #define hstate_inode(i) NULL |
415 | #define page_hstate(page) NULL | ||
404 | #define huge_page_size(h) PAGE_SIZE | 416 | #define huge_page_size(h) PAGE_SIZE |
405 | #define huge_page_mask(h) PAGE_MASK | 417 | #define huge_page_mask(h) PAGE_MASK |
406 | #define vma_kernel_pagesize(v) PAGE_SIZE | 418 | #define vma_kernel_pagesize(v) PAGE_SIZE |
@@ -421,6 +433,22 @@ static inline pgoff_t basepage_index(struct page *page) | |||
421 | #define dissolve_free_huge_pages(s, e) do {} while (0) | 433 | #define dissolve_free_huge_pages(s, e) do {} while (0) |
422 | #define pmd_huge_support() 0 | 434 | #define pmd_huge_support() 0 |
423 | #define hugepage_migration_support(h) 0 | 435 | #define hugepage_migration_support(h) 0 |
436 | |||
437 | static inline spinlock_t *huge_pte_lockptr(struct hstate *h, | ||
438 | struct mm_struct *mm, pte_t *pte) | ||
439 | { | ||
440 | return &mm->page_table_lock; | ||
441 | } | ||
424 | #endif /* CONFIG_HUGETLB_PAGE */ | 442 | #endif /* CONFIG_HUGETLB_PAGE */ |
425 | 443 | ||
444 | static inline spinlock_t *huge_pte_lock(struct hstate *h, | ||
445 | struct mm_struct *mm, pte_t *pte) | ||
446 | { | ||
447 | spinlock_t *ptl; | ||
448 | |||
449 | ptl = huge_pte_lockptr(h, mm, pte); | ||
450 | spin_lock(ptl); | ||
451 | return ptl; | ||
452 | } | ||
453 | |||
426 | #endif /* _LINUX_HUGETLB_H */ | 454 | #endif /* _LINUX_HUGETLB_H */ |