diff options
author | James Hogan <james.hogan@imgtec.com> | 2012-10-09 05:54:17 -0400 |
---|---|---|
committer | James Hogan <james.hogan@imgtec.com> | 2013-03-02 15:09:20 -0500 |
commit | e624e95bd88f94fc70bbe612789bcac44c6f0923 (patch) | |
tree | 04fd5485d400ca9fa3aedcbbf61a81e9a434dc72 /arch/metag | |
parent | 373cd784d0fc83f076c899ca7da50ecca7286e42 (diff) |
metag: Huge TLB
Add huge TLB support to the metag architecture.
Signed-off-by: James Hogan <james.hogan@imgtec.com>
Diffstat (limited to 'arch/metag')
-rw-r--r-- | arch/metag/include/asm/hugetlb.h | 86 | ||||
-rw-r--r-- | arch/metag/mm/hugetlbpage.c | 291 |
2 files changed, 377 insertions, 0 deletions
diff --git a/arch/metag/include/asm/hugetlb.h b/arch/metag/include/asm/hugetlb.h new file mode 100644 index 000000000000..f545477e61f3 --- /dev/null +++ b/arch/metag/include/asm/hugetlb.h | |||
@@ -0,0 +1,86 @@ | |||
1 | #ifndef _ASM_METAG_HUGETLB_H | ||
2 | #define _ASM_METAG_HUGETLB_H | ||
3 | |||
4 | #include <asm/page.h> | ||
5 | |||
6 | |||
7 | static inline int is_hugepage_only_range(struct mm_struct *mm, | ||
8 | unsigned long addr, | ||
9 | unsigned long len) { | ||
10 | return 0; | ||
11 | } | ||
12 | |||
13 | int prepare_hugepage_range(struct file *file, unsigned long addr, | ||
14 | unsigned long len); | ||
15 | |||
16 | static inline void hugetlb_prefault_arch_hook(struct mm_struct *mm) | ||
17 | { | ||
18 | } | ||
19 | |||
20 | static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb, | ||
21 | unsigned long addr, unsigned long end, | ||
22 | unsigned long floor, | ||
23 | unsigned long ceiling) | ||
24 | { | ||
25 | free_pgd_range(tlb, addr, end, floor, ceiling); | ||
26 | } | ||
27 | |||
28 | static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, | ||
29 | pte_t *ptep, pte_t pte) | ||
30 | { | ||
31 | set_pte_at(mm, addr, ptep, pte); | ||
32 | } | ||
33 | |||
34 | static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm, | ||
35 | unsigned long addr, pte_t *ptep) | ||
36 | { | ||
37 | return ptep_get_and_clear(mm, addr, ptep); | ||
38 | } | ||
39 | |||
40 | static inline void huge_ptep_clear_flush(struct vm_area_struct *vma, | ||
41 | unsigned long addr, pte_t *ptep) | ||
42 | { | ||
43 | } | ||
44 | |||
45 | static inline int huge_pte_none(pte_t pte) | ||
46 | { | ||
47 | return pte_none(pte); | ||
48 | } | ||
49 | |||
50 | static inline pte_t huge_pte_wrprotect(pte_t pte) | ||
51 | { | ||
52 | return pte_wrprotect(pte); | ||
53 | } | ||
54 | |||
55 | static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, | ||
56 | unsigned long addr, pte_t *ptep) | ||
57 | { | ||
58 | ptep_set_wrprotect(mm, addr, ptep); | ||
59 | } | ||
60 | |||
61 | static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma, | ||
62 | unsigned long addr, pte_t *ptep, | ||
63 | pte_t pte, int dirty) | ||
64 | { | ||
65 | return ptep_set_access_flags(vma, addr, ptep, pte, dirty); | ||
66 | } | ||
67 | |||
68 | static inline pte_t huge_ptep_get(pte_t *ptep) | ||
69 | { | ||
70 | return *ptep; | ||
71 | } | ||
72 | |||
73 | static inline int arch_prepare_hugepage(struct page *page) | ||
74 | { | ||
75 | return 0; | ||
76 | } | ||
77 | |||
78 | static inline void arch_release_hugepage(struct page *page) | ||
79 | { | ||
80 | } | ||
81 | |||
82 | static inline void arch_clear_hugepage_flags(struct page *page) | ||
83 | { | ||
84 | } | ||
85 | |||
86 | #endif /* _ASM_METAG_HUGETLB_H */ | ||
diff --git a/arch/metag/mm/hugetlbpage.c b/arch/metag/mm/hugetlbpage.c new file mode 100644 index 000000000000..24ceed4f4eed --- /dev/null +++ b/arch/metag/mm/hugetlbpage.c | |||
@@ -0,0 +1,291 @@ | |||
1 | /* | ||
2 | * arch/metag/mm/hugetlbpage.c | ||
3 | * | ||
4 | * METAG HugeTLB page support. | ||
5 | * | ||
6 | * Cloned from SuperH | ||
7 | * | ||
8 | * Cloned from sparc64 by Paul Mundt. | ||
9 | * | ||
10 | * Copyright (C) 2002, 2003 David S. Miller (davem@redhat.com) | ||
11 | */ | ||
12 | |||
13 | #include <linux/init.h> | ||
14 | #include <linux/fs.h> | ||
15 | #include <linux/mm.h> | ||
16 | #include <linux/hugetlb.h> | ||
17 | #include <linux/pagemap.h> | ||
18 | #include <linux/sysctl.h> | ||
19 | |||
20 | #include <asm/mman.h> | ||
21 | #include <asm/pgalloc.h> | ||
22 | #include <asm/tlb.h> | ||
23 | #include <asm/tlbflush.h> | ||
24 | #include <asm/cacheflush.h> | ||
25 | |||
26 | /* | ||
27 | * If the arch doesn't supply something else, assume that hugepage | ||
28 | * size aligned regions are ok without further preparation. | ||
29 | */ | ||
30 | int prepare_hugepage_range(struct file *file, unsigned long addr, | ||
31 | unsigned long len) | ||
32 | { | ||
33 | struct mm_struct *mm = current->mm; | ||
34 | struct hstate *h = hstate_file(file); | ||
35 | struct vm_area_struct *vma; | ||
36 | |||
37 | if (len & ~huge_page_mask(h)) | ||
38 | return -EINVAL; | ||
39 | if (addr & ~huge_page_mask(h)) | ||
40 | return -EINVAL; | ||
41 | if (TASK_SIZE - len < addr) | ||
42 | return -EINVAL; | ||
43 | |||
44 | vma = find_vma(mm, ALIGN_HUGEPT(addr)); | ||
45 | if (vma && !(vma->vm_flags & MAP_HUGETLB)) | ||
46 | return -EINVAL; | ||
47 | |||
48 | vma = find_vma(mm, addr); | ||
49 | if (vma) { | ||
50 | if (addr + len > vma->vm_start) | ||
51 | return -EINVAL; | ||
52 | if (!(vma->vm_flags & MAP_HUGETLB) && | ||
53 | (ALIGN_HUGEPT(addr + len) > vma->vm_start)) | ||
54 | return -EINVAL; | ||
55 | } | ||
56 | return 0; | ||
57 | } | ||
58 | |||
59 | pte_t *huge_pte_alloc(struct mm_struct *mm, | ||
60 | unsigned long addr, unsigned long sz) | ||
61 | { | ||
62 | pgd_t *pgd; | ||
63 | pud_t *pud; | ||
64 | pmd_t *pmd; | ||
65 | pte_t *pte; | ||
66 | |||
67 | pgd = pgd_offset(mm, addr); | ||
68 | pud = pud_offset(pgd, addr); | ||
69 | pmd = pmd_offset(pud, addr); | ||
70 | pte = pte_alloc_map(mm, NULL, pmd, addr); | ||
71 | pgd->pgd &= ~_PAGE_SZ_MASK; | ||
72 | pgd->pgd |= _PAGE_SZHUGE; | ||
73 | |||
74 | return pte; | ||
75 | } | ||
76 | |||
77 | pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) | ||
78 | { | ||
79 | pgd_t *pgd; | ||
80 | pud_t *pud; | ||
81 | pmd_t *pmd; | ||
82 | pte_t *pte = NULL; | ||
83 | |||
84 | pgd = pgd_offset(mm, addr); | ||
85 | pud = pud_offset(pgd, addr); | ||
86 | pmd = pmd_offset(pud, addr); | ||
87 | pte = pte_offset_kernel(pmd, addr); | ||
88 | |||
89 | return pte; | ||
90 | } | ||
91 | |||
92 | int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep) | ||
93 | { | ||
94 | return 0; | ||
95 | } | ||
96 | |||
97 | struct page *follow_huge_addr(struct mm_struct *mm, | ||
98 | unsigned long address, int write) | ||
99 | { | ||
100 | return ERR_PTR(-EINVAL); | ||
101 | } | ||
102 | |||
103 | int pmd_huge(pmd_t pmd) | ||
104 | { | ||
105 | return pmd_page_shift(pmd) > PAGE_SHIFT; | ||
106 | } | ||
107 | |||
108 | int pud_huge(pud_t pud) | ||
109 | { | ||
110 | return 0; | ||
111 | } | ||
112 | |||
113 | struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address, | ||
114 | pmd_t *pmd, int write) | ||
115 | { | ||
116 | return NULL; | ||
117 | } | ||
118 | |||
119 | #ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA | ||
120 | |||
121 | /* | ||
122 | * Look for an unmapped area starting after another hugetlb vma. | ||
123 | * There are guaranteed to be no huge pte's spare if all the huge pages are | ||
124 | * full size (4MB), so in that case compile out this search. | ||
125 | */ | ||
126 | #if HPAGE_SHIFT == HUGEPT_SHIFT | ||
127 | static inline unsigned long | ||
128 | hugetlb_get_unmapped_area_existing(unsigned long len) | ||
129 | { | ||
130 | return 0; | ||
131 | } | ||
132 | #else | ||
133 | static unsigned long | ||
134 | hugetlb_get_unmapped_area_existing(unsigned long len) | ||
135 | { | ||
136 | struct mm_struct *mm = current->mm; | ||
137 | struct vm_area_struct *vma; | ||
138 | unsigned long start_addr, addr; | ||
139 | int after_huge; | ||
140 | |||
141 | if (mm->context.part_huge) { | ||
142 | start_addr = mm->context.part_huge; | ||
143 | after_huge = 1; | ||
144 | } else { | ||
145 | start_addr = TASK_UNMAPPED_BASE; | ||
146 | after_huge = 0; | ||
147 | } | ||
148 | new_search: | ||
149 | addr = start_addr; | ||
150 | |||
151 | for (vma = find_vma(mm, addr); ; vma = vma->vm_next) { | ||
152 | if ((!vma && !after_huge) || TASK_SIZE - len < addr) { | ||
153 | /* | ||
154 | * Start a new search - just in case we missed | ||
155 | * some holes. | ||
156 | */ | ||
157 | if (start_addr != TASK_UNMAPPED_BASE) { | ||
158 | start_addr = TASK_UNMAPPED_BASE; | ||
159 | goto new_search; | ||
160 | } | ||
161 | return 0; | ||
162 | } | ||
163 | /* skip ahead if we've aligned right over some vmas */ | ||
164 | if (vma && vma->vm_end <= addr) | ||
165 | continue; | ||
166 | /* space before the next vma? */ | ||
167 | if (after_huge && (!vma || ALIGN_HUGEPT(addr + len) | ||
168 | <= vma->vm_start)) { | ||
169 | unsigned long end = addr + len; | ||
170 | if (end & HUGEPT_MASK) | ||
171 | mm->context.part_huge = end; | ||
172 | else if (addr == mm->context.part_huge) | ||
173 | mm->context.part_huge = 0; | ||
174 | return addr; | ||
175 | } | ||
176 | if (vma && (vma->vm_flags & MAP_HUGETLB)) { | ||
177 | /* space after a huge vma in 2nd level page table? */ | ||
178 | if (vma->vm_end & HUGEPT_MASK) { | ||
179 | after_huge = 1; | ||
180 | /* no need to align to the next PT block */ | ||
181 | addr = vma->vm_end; | ||
182 | continue; | ||
183 | } | ||
184 | } | ||
185 | after_huge = 0; | ||
186 | addr = ALIGN_HUGEPT(vma->vm_end); | ||
187 | } | ||
188 | } | ||
189 | #endif | ||
190 | |||
191 | /* Do a full search to find an area without any nearby normal pages. */ | ||
192 | static unsigned long | ||
193 | hugetlb_get_unmapped_area_new_pmd(unsigned long len) | ||
194 | { | ||
195 | struct mm_struct *mm = current->mm; | ||
196 | struct vm_area_struct *vma; | ||
197 | unsigned long start_addr, addr; | ||
198 | |||
199 | if (ALIGN_HUGEPT(len) > mm->cached_hole_size) | ||
200 | start_addr = mm->free_area_cache; | ||
201 | else | ||
202 | start_addr = TASK_UNMAPPED_BASE; | ||
203 | |||
204 | new_search: | ||
205 | addr = ALIGN_HUGEPT(start_addr); | ||
206 | |||
207 | for (vma = find_vma(mm, addr); ; vma = vma->vm_next) { | ||
208 | if (TASK_SIZE - len < addr) { | ||
209 | /* | ||
210 | * Start a new search - just in case we missed | ||
211 | * some holes. | ||
212 | */ | ||
213 | if (start_addr != TASK_UNMAPPED_BASE) { | ||
214 | start_addr = TASK_UNMAPPED_BASE; | ||
215 | mm->cached_hole_size = 0; | ||
216 | goto new_search; | ||
217 | } | ||
218 | return 0; | ||
219 | } | ||
220 | /* skip ahead if we've aligned right over some vmas */ | ||
221 | if (vma && vma->vm_end <= addr) | ||
222 | continue; | ||
223 | if (!vma || ALIGN_HUGEPT(addr + len) <= vma->vm_start) { | ||
224 | #if HPAGE_SHIFT < HUGEPT_SHIFT | ||
225 | if (len & HUGEPT_MASK) | ||
226 | mm->context.part_huge = addr + len; | ||
227 | #endif | ||
228 | return addr; | ||
229 | } | ||
230 | addr = ALIGN_HUGEPT(vma->vm_end); | ||
231 | } | ||
232 | } | ||
233 | |||
234 | unsigned long | ||
235 | hugetlb_get_unmapped_area(struct file *file, unsigned long addr, | ||
236 | unsigned long len, unsigned long pgoff, unsigned long flags) | ||
237 | { | ||
238 | struct hstate *h = hstate_file(file); | ||
239 | |||
240 | if (len & ~huge_page_mask(h)) | ||
241 | return -EINVAL; | ||
242 | if (len > TASK_SIZE) | ||
243 | return -ENOMEM; | ||
244 | |||
245 | if (flags & MAP_FIXED) { | ||
246 | if (prepare_hugepage_range(file, addr, len)) | ||
247 | return -EINVAL; | ||
248 | return addr; | ||
249 | } | ||
250 | |||
251 | if (addr) { | ||
252 | addr = ALIGN(addr, huge_page_size(h)); | ||
253 | if (!prepare_hugepage_range(file, addr, len)) | ||
254 | return addr; | ||
255 | } | ||
256 | |||
257 | /* | ||
258 | * Look for an existing hugetlb vma with space after it (this is to to | ||
259 | * minimise fragmentation caused by huge pages. | ||
260 | */ | ||
261 | addr = hugetlb_get_unmapped_area_existing(len); | ||
262 | if (addr) | ||
263 | return addr; | ||
264 | |||
265 | /* | ||
266 | * Find an unmapped naturally aligned set of 4MB blocks that we can use | ||
267 | * for huge pages. | ||
268 | */ | ||
269 | addr = hugetlb_get_unmapped_area_new_pmd(len); | ||
270 | if (likely(addr)) | ||
271 | return addr; | ||
272 | |||
273 | return -EINVAL; | ||
274 | } | ||
275 | |||
276 | #endif /*HAVE_ARCH_HUGETLB_UNMAPPED_AREA*/ | ||
277 | |||
278 | /* necessary for boot time 4MB huge page allocation */ | ||
279 | static __init int setup_hugepagesz(char *opt) | ||
280 | { | ||
281 | unsigned long ps = memparse(opt, &opt); | ||
282 | if (ps == (1 << HPAGE_SHIFT)) { | ||
283 | hugetlb_add_hstate(HPAGE_SHIFT - PAGE_SHIFT); | ||
284 | } else { | ||
285 | pr_err("hugepagesz: Unsupported page size %lu M\n", | ||
286 | ps >> 20); | ||
287 | return 0; | ||
288 | } | ||
289 | return 1; | ||
290 | } | ||
291 | __setup("hugepagesz=", setup_hugepagesz); | ||