diff options
Diffstat (limited to 'arch/s390/mm/hugetlbpage.c')
-rw-r--r-- | arch/s390/mm/hugetlbpage.c | 134 |
1 files changed, 134 insertions, 0 deletions
diff --git a/arch/s390/mm/hugetlbpage.c b/arch/s390/mm/hugetlbpage.c new file mode 100644 index 000000000000..f4b6124fdb75 --- /dev/null +++ b/arch/s390/mm/hugetlbpage.c | |||
@@ -0,0 +1,134 @@ | |||
1 | /* | ||
2 | * IBM System z Huge TLB Page Support for Kernel. | ||
3 | * | ||
4 | * Copyright 2007 IBM Corp. | ||
5 | * Author(s): Gerald Schaefer <gerald.schaefer@de.ibm.com> | ||
6 | */ | ||
7 | |||
8 | #include <linux/mm.h> | ||
9 | #include <linux/hugetlb.h> | ||
10 | |||
11 | |||
12 | void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, | ||
13 | pte_t *pteptr, pte_t pteval) | ||
14 | { | ||
15 | pmd_t *pmdp = (pmd_t *) pteptr; | ||
16 | pte_t shadow_pteval = pteval; | ||
17 | unsigned long mask; | ||
18 | |||
19 | if (!MACHINE_HAS_HPAGE) { | ||
20 | pteptr = (pte_t *) pte_page(pteval)[1].index; | ||
21 | mask = pte_val(pteval) & | ||
22 | (_SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO); | ||
23 | pte_val(pteval) = (_SEGMENT_ENTRY + __pa(pteptr)) | mask; | ||
24 | if (mm->context.noexec) { | ||
25 | pteptr += PTRS_PER_PTE; | ||
26 | pte_val(shadow_pteval) = | ||
27 | (_SEGMENT_ENTRY + __pa(pteptr)) | mask; | ||
28 | } | ||
29 | } | ||
30 | |||
31 | pmd_val(*pmdp) = pte_val(pteval); | ||
32 | if (mm->context.noexec) { | ||
33 | pmdp = get_shadow_table(pmdp); | ||
34 | pmd_val(*pmdp) = pte_val(shadow_pteval); | ||
35 | } | ||
36 | } | ||
37 | |||
38 | int arch_prepare_hugepage(struct page *page) | ||
39 | { | ||
40 | unsigned long addr = page_to_phys(page); | ||
41 | pte_t pte; | ||
42 | pte_t *ptep; | ||
43 | int i; | ||
44 | |||
45 | if (MACHINE_HAS_HPAGE) | ||
46 | return 0; | ||
47 | |||
48 | ptep = (pte_t *) pte_alloc_one(&init_mm, address); | ||
49 | if (!ptep) | ||
50 | return -ENOMEM; | ||
51 | |||
52 | pte = mk_pte(page, PAGE_RW); | ||
53 | for (i = 0; i < PTRS_PER_PTE; i++) { | ||
54 | set_pte_at(&init_mm, addr + i * PAGE_SIZE, ptep + i, pte); | ||
55 | pte_val(pte) += PAGE_SIZE; | ||
56 | } | ||
57 | page[1].index = (unsigned long) ptep; | ||
58 | return 0; | ||
59 | } | ||
60 | |||
61 | void arch_release_hugepage(struct page *page) | ||
62 | { | ||
63 | pte_t *ptep; | ||
64 | |||
65 | if (MACHINE_HAS_HPAGE) | ||
66 | return; | ||
67 | |||
68 | ptep = (pte_t *) page[1].index; | ||
69 | if (!ptep) | ||
70 | return; | ||
71 | pte_free(&init_mm, ptep); | ||
72 | page[1].index = 0; | ||
73 | } | ||
74 | |||
75 | pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr) | ||
76 | { | ||
77 | pgd_t *pgdp; | ||
78 | pud_t *pudp; | ||
79 | pmd_t *pmdp = NULL; | ||
80 | |||
81 | pgdp = pgd_offset(mm, addr); | ||
82 | pudp = pud_alloc(mm, pgdp, addr); | ||
83 | if (pudp) | ||
84 | pmdp = pmd_alloc(mm, pudp, addr); | ||
85 | return (pte_t *) pmdp; | ||
86 | } | ||
87 | |||
88 | pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) | ||
89 | { | ||
90 | pgd_t *pgdp; | ||
91 | pud_t *pudp; | ||
92 | pmd_t *pmdp = NULL; | ||
93 | |||
94 | pgdp = pgd_offset(mm, addr); | ||
95 | if (pgd_present(*pgdp)) { | ||
96 | pudp = pud_offset(pgdp, addr); | ||
97 | if (pud_present(*pudp)) | ||
98 | pmdp = pmd_offset(pudp, addr); | ||
99 | } | ||
100 | return (pte_t *) pmdp; | ||
101 | } | ||
102 | |||
103 | int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep) | ||
104 | { | ||
105 | return 0; | ||
106 | } | ||
107 | |||
108 | struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address, | ||
109 | int write) | ||
110 | { | ||
111 | return ERR_PTR(-EINVAL); | ||
112 | } | ||
113 | |||
114 | int pmd_huge(pmd_t pmd) | ||
115 | { | ||
116 | if (!MACHINE_HAS_HPAGE) | ||
117 | return 0; | ||
118 | |||
119 | return !!(pmd_val(pmd) & _SEGMENT_ENTRY_LARGE); | ||
120 | } | ||
121 | |||
122 | struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address, | ||
123 | pmd_t *pmdp, int write) | ||
124 | { | ||
125 | struct page *page; | ||
126 | |||
127 | if (!MACHINE_HAS_HPAGE) | ||
128 | return NULL; | ||
129 | |||
130 | page = pmd_page(*pmdp); | ||
131 | if (page) | ||
132 | page += ((address & ~HPAGE_MASK) >> PAGE_SHIFT); | ||
133 | return page; | ||
134 | } | ||