aboutsummaryrefslogtreecommitdiffstats
path: root/arch/i386/mm/hugetlbpage.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:20:36 -0400
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:20:36 -0400
commit1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch)
tree0bba044c4ce775e45a88a51686b5d9f90697ea9d /arch/i386/mm/hugetlbpage.c
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history, even though we have it. We can create a separate "historical" git archive of that later if we want to, and in the meantime it's about 3.2GB when imported into git - space that would just make the early git days unnecessarily complicated, when we don't have a lot of good infrastructure for it. Let it rip!
Diffstat (limited to 'arch/i386/mm/hugetlbpage.c')
-rw-r--r--arch/i386/mm/hugetlbpage.c431
1 files changed, 431 insertions, 0 deletions
diff --git a/arch/i386/mm/hugetlbpage.c b/arch/i386/mm/hugetlbpage.c
new file mode 100644
index 000000000000..a8c45143088b
--- /dev/null
+++ b/arch/i386/mm/hugetlbpage.c
@@ -0,0 +1,431 @@
1/*
2 * IA-32 Huge TLB Page Support for Kernel.
3 *
4 * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
5 */
6
7#include <linux/config.h>
8#include <linux/init.h>
9#include <linux/fs.h>
10#include <linux/mm.h>
11#include <linux/hugetlb.h>
12#include <linux/pagemap.h>
13#include <linux/smp_lock.h>
14#include <linux/slab.h>
15#include <linux/err.h>
16#include <linux/sysctl.h>
17#include <asm/mman.h>
18#include <asm/tlb.h>
19#include <asm/tlbflush.h>
20
21static pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr)
22{
23 pgd_t *pgd;
24 pud_t *pud;
25 pmd_t *pmd = NULL;
26
27 pgd = pgd_offset(mm, addr);
28 pud = pud_alloc(mm, pgd, addr);
29 pmd = pmd_alloc(mm, pud, addr);
30 return (pte_t *) pmd;
31}
32
33static pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
34{
35 pgd_t *pgd;
36 pud_t *pud;
37 pmd_t *pmd = NULL;
38
39 pgd = pgd_offset(mm, addr);
40 pud = pud_offset(pgd, addr);
41 pmd = pmd_offset(pud, addr);
42 return (pte_t *) pmd;
43}
44
45static void set_huge_pte(struct mm_struct *mm, struct vm_area_struct *vma, struct page *page, pte_t * page_table, int write_access)
46{
47 pte_t entry;
48
49 add_mm_counter(mm, rss, HPAGE_SIZE / PAGE_SIZE);
50 if (write_access) {
51 entry =
52 pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot)));
53 } else
54 entry = pte_wrprotect(mk_pte(page, vma->vm_page_prot));
55 entry = pte_mkyoung(entry);
56 mk_pte_huge(entry);
57 set_pte(page_table, entry);
58}
59
60/*
61 * This function checks for proper alignment of input addr and len parameters.
62 */
63int is_aligned_hugepage_range(unsigned long addr, unsigned long len)
64{
65 if (len & ~HPAGE_MASK)
66 return -EINVAL;
67 if (addr & ~HPAGE_MASK)
68 return -EINVAL;
69 return 0;
70}
71
72int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
73 struct vm_area_struct *vma)
74{
75 pte_t *src_pte, *dst_pte, entry;
76 struct page *ptepage;
77 unsigned long addr = vma->vm_start;
78 unsigned long end = vma->vm_end;
79
80 while (addr < end) {
81 dst_pte = huge_pte_alloc(dst, addr);
82 if (!dst_pte)
83 goto nomem;
84 src_pte = huge_pte_offset(src, addr);
85 entry = *src_pte;
86 ptepage = pte_page(entry);
87 get_page(ptepage);
88 set_pte(dst_pte, entry);
89 add_mm_counter(dst, rss, HPAGE_SIZE / PAGE_SIZE);
90 addr += HPAGE_SIZE;
91 }
92 return 0;
93
94nomem:
95 return -ENOMEM;
96}
97
98int
99follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
100 struct page **pages, struct vm_area_struct **vmas,
101 unsigned long *position, int *length, int i)
102{
103 unsigned long vpfn, vaddr = *position;
104 int remainder = *length;
105
106 WARN_ON(!is_vm_hugetlb_page(vma));
107
108 vpfn = vaddr/PAGE_SIZE;
109 while (vaddr < vma->vm_end && remainder) {
110
111 if (pages) {
112 pte_t *pte;
113 struct page *page;
114
115 pte = huge_pte_offset(mm, vaddr);
116
117 /* hugetlb should be locked, and hence, prefaulted */
118 WARN_ON(!pte || pte_none(*pte));
119
120 page = &pte_page(*pte)[vpfn % (HPAGE_SIZE/PAGE_SIZE)];
121
122 WARN_ON(!PageCompound(page));
123
124 get_page(page);
125 pages[i] = page;
126 }
127
128 if (vmas)
129 vmas[i] = vma;
130
131 vaddr += PAGE_SIZE;
132 ++vpfn;
133 --remainder;
134 ++i;
135 }
136
137 *length = remainder;
138 *position = vaddr;
139
140 return i;
141}
142
143#if 0 /* This is just for testing */
144struct page *
145follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
146{
147 unsigned long start = address;
148 int length = 1;
149 int nr;
150 struct page *page;
151 struct vm_area_struct *vma;
152
153 vma = find_vma(mm, addr);
154 if (!vma || !is_vm_hugetlb_page(vma))
155 return ERR_PTR(-EINVAL);
156
157 pte = huge_pte_offset(mm, address);
158
159 /* hugetlb should be locked, and hence, prefaulted */
160 WARN_ON(!pte || pte_none(*pte));
161
162 page = &pte_page(*pte)[vpfn % (HPAGE_SIZE/PAGE_SIZE)];
163
164 WARN_ON(!PageCompound(page));
165
166 return page;
167}
168
169int pmd_huge(pmd_t pmd)
170{
171 return 0;
172}
173
174struct page *
175follow_huge_pmd(struct mm_struct *mm, unsigned long address,
176 pmd_t *pmd, int write)
177{
178 return NULL;
179}
180
181#else
182
183struct page *
184follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
185{
186 return ERR_PTR(-EINVAL);
187}
188
189int pmd_huge(pmd_t pmd)
190{
191 return !!(pmd_val(pmd) & _PAGE_PSE);
192}
193
194struct page *
195follow_huge_pmd(struct mm_struct *mm, unsigned long address,
196 pmd_t *pmd, int write)
197{
198 struct page *page;
199
200 page = pte_page(*(pte_t *)pmd);
201 if (page)
202 page += ((address & ~HPAGE_MASK) >> PAGE_SHIFT);
203 return page;
204}
205#endif
206
207void unmap_hugepage_range(struct vm_area_struct *vma,
208 unsigned long start, unsigned long end)
209{
210 struct mm_struct *mm = vma->vm_mm;
211 unsigned long address;
212 pte_t pte, *ptep;
213 struct page *page;
214
215 BUG_ON(start & (HPAGE_SIZE - 1));
216 BUG_ON(end & (HPAGE_SIZE - 1));
217
218 for (address = start; address < end; address += HPAGE_SIZE) {
219 ptep = huge_pte_offset(mm, address);
220 if (!ptep)
221 continue;
222 pte = ptep_get_and_clear(mm, address, ptep);
223 if (pte_none(pte))
224 continue;
225 page = pte_page(pte);
226 put_page(page);
227 }
228 add_mm_counter(mm ,rss, -((end - start) >> PAGE_SHIFT));
229 flush_tlb_range(vma, start, end);
230}
231
232int hugetlb_prefault(struct address_space *mapping, struct vm_area_struct *vma)
233{
234 struct mm_struct *mm = current->mm;
235 unsigned long addr;
236 int ret = 0;
237
238 BUG_ON(vma->vm_start & ~HPAGE_MASK);
239 BUG_ON(vma->vm_end & ~HPAGE_MASK);
240
241 spin_lock(&mm->page_table_lock);
242 for (addr = vma->vm_start; addr < vma->vm_end; addr += HPAGE_SIZE) {
243 unsigned long idx;
244 pte_t *pte = huge_pte_alloc(mm, addr);
245 struct page *page;
246
247 if (!pte) {
248 ret = -ENOMEM;
249 goto out;
250 }
251
252 if (!pte_none(*pte)) {
253 pmd_t *pmd = (pmd_t *) pte;
254
255 page = pmd_page(*pmd);
256 pmd_clear(pmd);
257 mm->nr_ptes--;
258 dec_page_state(nr_page_table_pages);
259 page_cache_release(page);
260 }
261
262 idx = ((addr - vma->vm_start) >> HPAGE_SHIFT)
263 + (vma->vm_pgoff >> (HPAGE_SHIFT - PAGE_SHIFT));
264 page = find_get_page(mapping, idx);
265 if (!page) {
266 /* charge the fs quota first */
267 if (hugetlb_get_quota(mapping)) {
268 ret = -ENOMEM;
269 goto out;
270 }
271 page = alloc_huge_page();
272 if (!page) {
273 hugetlb_put_quota(mapping);
274 ret = -ENOMEM;
275 goto out;
276 }
277 ret = add_to_page_cache(page, mapping, idx, GFP_ATOMIC);
278 if (! ret) {
279 unlock_page(page);
280 } else {
281 hugetlb_put_quota(mapping);
282 free_huge_page(page);
283 goto out;
284 }
285 }
286 set_huge_pte(mm, vma, page, pte, vma->vm_flags & VM_WRITE);
287 }
288out:
289 spin_unlock(&mm->page_table_lock);
290 return ret;
291}
292
293/* x86_64 also uses this file */
294
295#ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
296static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
297 unsigned long addr, unsigned long len,
298 unsigned long pgoff, unsigned long flags)
299{
300 struct mm_struct *mm = current->mm;
301 struct vm_area_struct *vma;
302 unsigned long start_addr;
303
304 start_addr = mm->free_area_cache;
305
306full_search:
307 addr = ALIGN(start_addr, HPAGE_SIZE);
308
309 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
310 /* At this point: (!vma || addr < vma->vm_end). */
311 if (TASK_SIZE - len < addr) {
312 /*
313 * Start a new search - just in case we missed
314 * some holes.
315 */
316 if (start_addr != TASK_UNMAPPED_BASE) {
317 start_addr = TASK_UNMAPPED_BASE;
318 goto full_search;
319 }
320 return -ENOMEM;
321 }
322 if (!vma || addr + len <= vma->vm_start) {
323 mm->free_area_cache = addr + len;
324 return addr;
325 }
326 addr = ALIGN(vma->vm_end, HPAGE_SIZE);
327 }
328}
329
330static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
331 unsigned long addr0, unsigned long len,
332 unsigned long pgoff, unsigned long flags)
333{
334 struct mm_struct *mm = current->mm;
335 struct vm_area_struct *vma, *prev_vma;
336 unsigned long base = mm->mmap_base, addr = addr0;
337 int first_time = 1;
338
339 /* don't allow allocations above current base */
340 if (mm->free_area_cache > base)
341 mm->free_area_cache = base;
342
343try_again:
344 /* make sure it can fit in the remaining address space */
345 if (mm->free_area_cache < len)
346 goto fail;
347
348 /* either no address requested or cant fit in requested address hole */
349 addr = (mm->free_area_cache - len) & HPAGE_MASK;
350 do {
351 /*
352 * Lookup failure means no vma is above this address,
353 * i.e. return with success:
354 */
355 if (!(vma = find_vma_prev(mm, addr, &prev_vma)))
356 return addr;
357
358 /*
359 * new region fits between prev_vma->vm_end and
360 * vma->vm_start, use it:
361 */
362 if (addr + len <= vma->vm_start &&
363 (!prev_vma || (addr >= prev_vma->vm_end)))
364 /* remember the address as a hint for next time */
365 return (mm->free_area_cache = addr);
366 else
367 /* pull free_area_cache down to the first hole */
368 if (mm->free_area_cache == vma->vm_end)
369 mm->free_area_cache = vma->vm_start;
370
371 /* try just below the current vma->vm_start */
372 addr = (vma->vm_start - len) & HPAGE_MASK;
373 } while (len <= vma->vm_start);
374
375fail:
376 /*
377 * if hint left us with no space for the requested
378 * mapping then try again:
379 */
380 if (first_time) {
381 mm->free_area_cache = base;
382 first_time = 0;
383 goto try_again;
384 }
385 /*
386 * A failed mmap() very likely causes application failure,
387 * so fall back to the bottom-up function here. This scenario
388 * can happen with large stack limits and large mmap()
389 * allocations.
390 */
391 mm->free_area_cache = TASK_UNMAPPED_BASE;
392 addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
393 len, pgoff, flags);
394
395 /*
396 * Restore the topdown base:
397 */
398 mm->free_area_cache = base;
399
400 return addr;
401}
402
403unsigned long
404hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
405 unsigned long len, unsigned long pgoff, unsigned long flags)
406{
407 struct mm_struct *mm = current->mm;
408 struct vm_area_struct *vma;
409
410 if (len & ~HPAGE_MASK)
411 return -EINVAL;
412 if (len > TASK_SIZE)
413 return -ENOMEM;
414
415 if (addr) {
416 addr = ALIGN(addr, HPAGE_SIZE);
417 vma = find_vma(mm, addr);
418 if (TASK_SIZE - len >= addr &&
419 (!vma || addr + len <= vma->vm_start))
420 return addr;
421 }
422 if (mm->get_unmapped_area == arch_get_unmapped_area)
423 return hugetlb_get_unmapped_area_bottomup(file, addr, len,
424 pgoff, flags);
425 else
426 return hugetlb_get_unmapped_area_topdown(file, addr, len,
427 pgoff, flags);
428}
429
430#endif /*HAVE_ARCH_HUGETLB_UNMAPPED_AREA*/
431