aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-s390/hugetlb.h
diff options
context:
space:
mode:
authorGerald Schaefer <geraldsc@de.ibm.com>2008-04-30 07:38:46 -0400
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2008-04-30 07:38:47 -0400
commit53492b1de46a7576170e865062ffcfc93bb5650b (patch)
treebee94e5b2e8c19c1a094a25023cb82572707feb4 /include/asm-s390/hugetlb.h
parent2e5061e40af88070984e3769eafb5a06022375fd (diff)
[S390] System z large page support.
This adds hugetlbfs support on System z, using both hardware large page support if available and software large page emulation on older hardware. Shared (large) page tables are implemented in software emulation mode, by using page->index of the first tail page from a compound large page to store page table information. Signed-off-by: Gerald Schaefer <geraldsc@de.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'include/asm-s390/hugetlb.h')
-rw-r--r--include/asm-s390/hugetlb.h183
1 files changed, 183 insertions, 0 deletions
diff --git a/include/asm-s390/hugetlb.h b/include/asm-s390/hugetlb.h
new file mode 100644
index 000000000000..600a776f8f75
--- /dev/null
+++ b/include/asm-s390/hugetlb.h
@@ -0,0 +1,183 @@
1/*
2 * IBM System z Huge TLB Page Support for Kernel.
3 *
4 * Copyright IBM Corp. 2008
5 * Author(s): Gerald Schaefer <gerald.schaefer@de.ibm.com>
6 */
7
8#ifndef _ASM_S390_HUGETLB_H
9#define _ASM_S390_HUGETLB_H
10
11#include <asm/page.h>
12#include <asm/pgtable.h>
13
14
15#define is_hugepage_only_range(mm, addr, len) 0
16#define hugetlb_free_pgd_range free_pgd_range
17
18void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
19 pte_t *ptep, pte_t pte);
20
21/*
22 * If the arch doesn't supply something else, assume that hugepage
23 * size aligned regions are ok without further preparation.
24 */
25static inline int prepare_hugepage_range(unsigned long addr, unsigned long len)
26{
27 if (len & ~HPAGE_MASK)
28 return -EINVAL;
29 if (addr & ~HPAGE_MASK)
30 return -EINVAL;
31 return 0;
32}
33
34#define hugetlb_prefault_arch_hook(mm) do { } while (0)
35
36int arch_prepare_hugepage(struct page *page);
37void arch_release_hugepage(struct page *page);
38
39static inline pte_t pte_mkhuge(pte_t pte)
40{
41 /*
42 * PROT_NONE needs to be remapped from the pte type to the ste type.
43 * The HW invalid bit is also different for pte and ste. The pte
44 * invalid bit happens to be the same as the ste _SEGMENT_ENTRY_LARGE
45 * bit, so we don't have to clear it.
46 */
47 if (pte_val(pte) & _PAGE_INVALID) {
48 if (pte_val(pte) & _PAGE_SWT)
49 pte_val(pte) |= _HPAGE_TYPE_NONE;
50 pte_val(pte) |= _SEGMENT_ENTRY_INV;
51 }
52 /*
53 * Clear SW pte bits SWT and SWX, there are no SW bits in a segment
54 * table entry.
55 */
56 pte_val(pte) &= ~(_PAGE_SWT | _PAGE_SWX);
57 /*
58 * Also set the change-override bit because we don't need dirty bit
59 * tracking for hugetlbfs pages.
60 */
61 pte_val(pte) |= (_SEGMENT_ENTRY_LARGE | _SEGMENT_ENTRY_CO);
62 return pte;
63}
64
65static inline pte_t huge_pte_wrprotect(pte_t pte)
66{
67 pte_val(pte) |= _PAGE_RO;
68 return pte;
69}
70
71static inline int huge_pte_none(pte_t pte)
72{
73 return (pte_val(pte) & _SEGMENT_ENTRY_INV) &&
74 !(pte_val(pte) & _SEGMENT_ENTRY_RO);
75}
76
77static inline pte_t huge_ptep_get(pte_t *ptep)
78{
79 pte_t pte = *ptep;
80 unsigned long mask;
81
82 if (!MACHINE_HAS_HPAGE) {
83 ptep = (pte_t *) (pte_val(pte) & _SEGMENT_ENTRY_ORIGIN);
84 if (ptep) {
85 mask = pte_val(pte) &
86 (_SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO);
87 pte = pte_mkhuge(*ptep);
88 pte_val(pte) |= mask;
89 }
90 }
91 return pte;
92}
93
94static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
95 unsigned long addr, pte_t *ptep)
96{
97 pte_t pte = huge_ptep_get(ptep);
98
99 pmd_clear((pmd_t *) ptep);
100 return pte;
101}
102
103static inline void __pmd_csp(pmd_t *pmdp)
104{
105 register unsigned long reg2 asm("2") = pmd_val(*pmdp);
106 register unsigned long reg3 asm("3") = pmd_val(*pmdp) |
107 _SEGMENT_ENTRY_INV;
108 register unsigned long reg4 asm("4") = ((unsigned long) pmdp) + 5;
109
110 asm volatile(
111 " csp %1,%3"
112 : "=m" (*pmdp)
113 : "d" (reg2), "d" (reg3), "d" (reg4), "m" (*pmdp) : "cc");
114 pmd_val(*pmdp) = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY;
115}
116
117static inline void __pmd_idte(unsigned long address, pmd_t *pmdp)
118{
119 unsigned long sto = (unsigned long) pmdp -
120 pmd_index(address) * sizeof(pmd_t);
121
122 if (!(pmd_val(*pmdp) & _SEGMENT_ENTRY_INV)) {
123 asm volatile(
124 " .insn rrf,0xb98e0000,%2,%3,0,0"
125 : "=m" (*pmdp)
126 : "m" (*pmdp), "a" (sto),
127 "a" ((address & HPAGE_MASK))
128 );
129 }
130 pmd_val(*pmdp) = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY;
131}
132
133static inline void huge_ptep_invalidate(struct mm_struct *mm,
134 unsigned long address, pte_t *ptep)
135{
136 pmd_t *pmdp = (pmd_t *) ptep;
137
138 if (!MACHINE_HAS_IDTE) {
139 __pmd_csp(pmdp);
140 if (mm->context.noexec) {
141 pmdp = get_shadow_table(pmdp);
142 __pmd_csp(pmdp);
143 }
144 return;
145 }
146
147 __pmd_idte(address, pmdp);
148 if (mm->context.noexec) {
149 pmdp = get_shadow_table(pmdp);
150 __pmd_idte(address, pmdp);
151 }
152 return;
153}
154
155#define huge_ptep_set_access_flags(__vma, __addr, __ptep, __entry, __dirty) \
156({ \
157 int __changed = !pte_same(huge_ptep_get(__ptep), __entry); \
158 if (__changed) { \
159 huge_ptep_invalidate((__vma)->vm_mm, __addr, __ptep); \
160 set_huge_pte_at((__vma)->vm_mm, __addr, __ptep, __entry); \
161 } \
162 __changed; \
163})
164
165#define huge_ptep_set_wrprotect(__mm, __addr, __ptep) \
166({ \
167 pte_t __pte = huge_ptep_get(__ptep); \
168 if (pte_write(__pte)) { \
169 if (atomic_read(&(__mm)->mm_users) > 1 || \
170 (__mm) != current->active_mm) \
171 huge_ptep_invalidate(__mm, __addr, __ptep); \
172 set_huge_pte_at(__mm, __addr, __ptep, \
173 huge_pte_wrprotect(__pte)); \
174 } \
175})
176
177static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
178 unsigned long address, pte_t *ptep)
179{
180 huge_ptep_invalidate(vma->vm_mm, address, ptep);
181}
182
183#endif /* _ASM_S390_HUGETLB_H */