diff options
author | Jonathan Herman <hermanjl@cs.unc.edu> | 2013-01-17 16:15:55 -0500 |
---|---|---|
committer | Jonathan Herman <hermanjl@cs.unc.edu> | 2013-01-17 16:15:55 -0500 |
commit | 8dea78da5cee153b8af9c07a2745f6c55057fe12 (patch) | |
tree | a8f4d49d63b1ecc92f2fddceba0655b2472c5bd9 /include/linux/huge_mm.h | |
parent | 406089d01562f1e2bf9f089fd7637009ebaad589 (diff) |
Patched in Tegra support.
Diffstat (limited to 'include/linux/huge_mm.h')
-rw-r--r-- | include/linux/huge_mm.h | 78 |
1 files changed, 14 insertions, 64 deletions
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index 1d76f8ca90f..48c32ebf65a 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h | |||
@@ -8,31 +8,22 @@ extern int do_huge_pmd_anonymous_page(struct mm_struct *mm, | |||
8 | extern int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm, | 8 | extern int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm, |
9 | pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr, | 9 | pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr, |
10 | struct vm_area_struct *vma); | 10 | struct vm_area_struct *vma); |
11 | extern void huge_pmd_set_accessed(struct mm_struct *mm, | ||
12 | struct vm_area_struct *vma, | ||
13 | unsigned long address, pmd_t *pmd, | ||
14 | pmd_t orig_pmd, int dirty); | ||
15 | extern int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, | 11 | extern int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, |
16 | unsigned long address, pmd_t *pmd, | 12 | unsigned long address, pmd_t *pmd, |
17 | pmd_t orig_pmd); | 13 | pmd_t orig_pmd); |
18 | extern struct page *follow_trans_huge_pmd(struct vm_area_struct *vma, | 14 | extern pgtable_t get_pmd_huge_pte(struct mm_struct *mm); |
15 | extern struct page *follow_trans_huge_pmd(struct mm_struct *mm, | ||
19 | unsigned long addr, | 16 | unsigned long addr, |
20 | pmd_t *pmd, | 17 | pmd_t *pmd, |
21 | unsigned int flags); | 18 | unsigned int flags); |
22 | extern int zap_huge_pmd(struct mmu_gather *tlb, | 19 | extern int zap_huge_pmd(struct mmu_gather *tlb, |
23 | struct vm_area_struct *vma, | 20 | struct vm_area_struct *vma, |
24 | pmd_t *pmd, unsigned long addr); | 21 | pmd_t *pmd); |
25 | extern int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, | 22 | extern int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, |
26 | unsigned long addr, unsigned long end, | 23 | unsigned long addr, unsigned long end, |
27 | unsigned char *vec); | 24 | unsigned char *vec); |
28 | extern int move_huge_pmd(struct vm_area_struct *vma, | ||
29 | struct vm_area_struct *new_vma, | ||
30 | unsigned long old_addr, | ||
31 | unsigned long new_addr, unsigned long old_end, | ||
32 | pmd_t *old_pmd, pmd_t *new_pmd); | ||
33 | extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, | 25 | extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, |
34 | unsigned long addr, pgprot_t newprot, | 26 | unsigned long addr, pgprot_t newprot); |
35 | int prot_numa); | ||
36 | 27 | ||
37 | enum transparent_hugepage_flag { | 28 | enum transparent_hugepage_flag { |
38 | TRANSPARENT_HUGEPAGE_FLAG, | 29 | TRANSPARENT_HUGEPAGE_FLAG, |
@@ -40,7 +31,6 @@ enum transparent_hugepage_flag { | |||
40 | TRANSPARENT_HUGEPAGE_DEFRAG_FLAG, | 31 | TRANSPARENT_HUGEPAGE_DEFRAG_FLAG, |
41 | TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, | 32 | TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, |
42 | TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG, | 33 | TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG, |
43 | TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG, | ||
44 | #ifdef CONFIG_DEBUG_VM | 34 | #ifdef CONFIG_DEBUG_VM |
45 | TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG, | 35 | TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG, |
46 | #endif | 36 | #endif |
@@ -56,16 +46,11 @@ extern pmd_t *page_check_address_pmd(struct page *page, | |||
56 | unsigned long address, | 46 | unsigned long address, |
57 | enum page_check_address_pmd_flag flag); | 47 | enum page_check_address_pmd_flag flag); |
58 | 48 | ||
59 | #define HPAGE_PMD_ORDER (HPAGE_PMD_SHIFT-PAGE_SHIFT) | ||
60 | #define HPAGE_PMD_NR (1<<HPAGE_PMD_ORDER) | ||
61 | |||
62 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | 49 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
63 | #define HPAGE_PMD_SHIFT HPAGE_SHIFT | 50 | #define HPAGE_PMD_SHIFT HPAGE_SHIFT |
64 | #define HPAGE_PMD_MASK HPAGE_MASK | 51 | #define HPAGE_PMD_MASK HPAGE_MASK |
65 | #define HPAGE_PMD_SIZE HPAGE_SIZE | 52 | #define HPAGE_PMD_SIZE HPAGE_SIZE |
66 | 53 | ||
67 | extern bool is_vma_temporary_stack(struct vm_area_struct *vma); | ||
68 | |||
69 | #define transparent_hugepage_enabled(__vma) \ | 54 | #define transparent_hugepage_enabled(__vma) \ |
70 | ((transparent_hugepage_flags & \ | 55 | ((transparent_hugepage_flags & \ |
71 | (1<<TRANSPARENT_HUGEPAGE_FLAG) || \ | 56 | (1<<TRANSPARENT_HUGEPAGE_FLAG) || \ |
@@ -80,9 +65,6 @@ extern bool is_vma_temporary_stack(struct vm_area_struct *vma); | |||
80 | (transparent_hugepage_flags & \ | 65 | (transparent_hugepage_flags & \ |
81 | (1<<TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG) && \ | 66 | (1<<TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG) && \ |
82 | (__vma)->vm_flags & VM_HUGEPAGE)) | 67 | (__vma)->vm_flags & VM_HUGEPAGE)) |
83 | #define transparent_hugepage_use_zero_page() \ | ||
84 | (transparent_hugepage_flags & \ | ||
85 | (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG)) | ||
86 | #ifdef CONFIG_DEBUG_VM | 68 | #ifdef CONFIG_DEBUG_VM |
87 | #define transparent_hugepage_debug_cow() \ | 69 | #define transparent_hugepage_debug_cow() \ |
88 | (transparent_hugepage_flags & \ | 70 | (transparent_hugepage_flags & \ |
@@ -100,25 +82,23 @@ extern int handle_pte_fault(struct mm_struct *mm, | |||
100 | struct vm_area_struct *vma, unsigned long address, | 82 | struct vm_area_struct *vma, unsigned long address, |
101 | pte_t *pte, pmd_t *pmd, unsigned int flags); | 83 | pte_t *pte, pmd_t *pmd, unsigned int flags); |
102 | extern int split_huge_page(struct page *page); | 84 | extern int split_huge_page(struct page *page); |
103 | extern void __split_huge_page_pmd(struct vm_area_struct *vma, | 85 | extern void __split_huge_page_pmd(struct mm_struct *mm, pmd_t *pmd); |
104 | unsigned long address, pmd_t *pmd); | 86 | #define split_huge_page_pmd(__mm, __pmd) \ |
105 | #define split_huge_page_pmd(__vma, __address, __pmd) \ | ||
106 | do { \ | 87 | do { \ |
107 | pmd_t *____pmd = (__pmd); \ | 88 | pmd_t *____pmd = (__pmd); \ |
108 | if (unlikely(pmd_trans_huge(*____pmd))) \ | 89 | if (unlikely(pmd_trans_huge(*____pmd))) \ |
109 | __split_huge_page_pmd(__vma, __address, \ | 90 | __split_huge_page_pmd(__mm, ____pmd); \ |
110 | ____pmd); \ | ||
111 | } while (0) | 91 | } while (0) |
112 | #define wait_split_huge_page(__anon_vma, __pmd) \ | 92 | #define wait_split_huge_page(__anon_vma, __pmd) \ |
113 | do { \ | 93 | do { \ |
114 | pmd_t *____pmd = (__pmd); \ | 94 | pmd_t *____pmd = (__pmd); \ |
115 | anon_vma_lock_write(__anon_vma); \ | 95 | anon_vma_lock(__anon_vma); \ |
116 | anon_vma_unlock(__anon_vma); \ | 96 | anon_vma_unlock(__anon_vma); \ |
117 | BUG_ON(pmd_trans_splitting(*____pmd) || \ | 97 | BUG_ON(pmd_trans_splitting(*____pmd) || \ |
118 | pmd_trans_huge(*____pmd)); \ | 98 | pmd_trans_huge(*____pmd)); \ |
119 | } while (0) | 99 | } while (0) |
120 | extern void split_huge_page_pmd_mm(struct mm_struct *mm, unsigned long address, | 100 | #define HPAGE_PMD_ORDER (HPAGE_PMD_SHIFT-PAGE_SHIFT) |
121 | pmd_t *pmd); | 101 | #define HPAGE_PMD_NR (1<<HPAGE_PMD_ORDER) |
122 | #if HPAGE_PMD_ORDER > MAX_ORDER | 102 | #if HPAGE_PMD_ORDER > MAX_ORDER |
123 | #error "hugepages can't be allocated by the buddy allocator" | 103 | #error "hugepages can't be allocated by the buddy allocator" |
124 | #endif | 104 | #endif |
@@ -128,18 +108,6 @@ extern void __vma_adjust_trans_huge(struct vm_area_struct *vma, | |||
128 | unsigned long start, | 108 | unsigned long start, |
129 | unsigned long end, | 109 | unsigned long end, |
130 | long adjust_next); | 110 | long adjust_next); |
131 | extern int __pmd_trans_huge_lock(pmd_t *pmd, | ||
132 | struct vm_area_struct *vma); | ||
133 | /* mmap_sem must be held on entry */ | ||
134 | static inline int pmd_trans_huge_lock(pmd_t *pmd, | ||
135 | struct vm_area_struct *vma) | ||
136 | { | ||
137 | VM_BUG_ON(!rwsem_is_locked(&vma->vm_mm->mmap_sem)); | ||
138 | if (pmd_trans_huge(*pmd)) | ||
139 | return __pmd_trans_huge_lock(pmd, vma); | ||
140 | else | ||
141 | return 0; | ||
142 | } | ||
143 | static inline void vma_adjust_trans_huge(struct vm_area_struct *vma, | 111 | static inline void vma_adjust_trans_huge(struct vm_area_struct *vma, |
144 | unsigned long start, | 112 | unsigned long start, |
145 | unsigned long end, | 113 | unsigned long end, |
@@ -172,14 +140,10 @@ static inline struct page *compound_trans_head(struct page *page) | |||
172 | } | 140 | } |
173 | return page; | 141 | return page; |
174 | } | 142 | } |
175 | |||
176 | extern int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, | ||
177 | unsigned long addr, pmd_t pmd, pmd_t *pmdp); | ||
178 | |||
179 | #else /* CONFIG_TRANSPARENT_HUGEPAGE */ | 143 | #else /* CONFIG_TRANSPARENT_HUGEPAGE */ |
180 | #define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; }) | 144 | #define HPAGE_PMD_SHIFT ({ BUG(); 0; }) |
181 | #define HPAGE_PMD_MASK ({ BUILD_BUG(); 0; }) | 145 | #define HPAGE_PMD_MASK ({ BUG(); 0; }) |
182 | #define HPAGE_PMD_SIZE ({ BUILD_BUG(); 0; }) | 146 | #define HPAGE_PMD_SIZE ({ BUG(); 0; }) |
183 | 147 | ||
184 | #define hpage_nr_pages(x) 1 | 148 | #define hpage_nr_pages(x) 1 |
185 | 149 | ||
@@ -190,12 +154,10 @@ static inline int split_huge_page(struct page *page) | |||
190 | { | 154 | { |
191 | return 0; | 155 | return 0; |
192 | } | 156 | } |
193 | #define split_huge_page_pmd(__vma, __address, __pmd) \ | 157 | #define split_huge_page_pmd(__mm, __pmd) \ |
194 | do { } while (0) | 158 | do { } while (0) |
195 | #define wait_split_huge_page(__anon_vma, __pmd) \ | 159 | #define wait_split_huge_page(__anon_vma, __pmd) \ |
196 | do { } while (0) | 160 | do { } while (0) |
197 | #define split_huge_page_pmd_mm(__mm, __address, __pmd) \ | ||
198 | do { } while (0) | ||
199 | #define compound_trans_head(page) compound_head(page) | 161 | #define compound_trans_head(page) compound_head(page) |
200 | static inline int hugepage_madvise(struct vm_area_struct *vma, | 162 | static inline int hugepage_madvise(struct vm_area_struct *vma, |
201 | unsigned long *vm_flags, int advice) | 163 | unsigned long *vm_flags, int advice) |
@@ -209,18 +171,6 @@ static inline void vma_adjust_trans_huge(struct vm_area_struct *vma, | |||
209 | long adjust_next) | 171 | long adjust_next) |
210 | { | 172 | { |
211 | } | 173 | } |
212 | static inline int pmd_trans_huge_lock(pmd_t *pmd, | ||
213 | struct vm_area_struct *vma) | ||
214 | { | ||
215 | return 0; | ||
216 | } | ||
217 | |||
218 | static inline int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, | ||
219 | unsigned long addr, pmd_t pmd, pmd_t *pmdp) | ||
220 | { | ||
221 | return 0; | ||
222 | } | ||
223 | |||
224 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ | 174 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ |
225 | 175 | ||
226 | #endif /* _LINUX_HUGE_MM_H */ | 176 | #endif /* _LINUX_HUGE_MM_H */ |