aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-s390/pgalloc.h
diff options
context:
space:
mode:
authorMartin Schwidefsky <schwidefsky@de.ibm.com>2007-10-22 06:52:47 -0400
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2007-10-22 06:52:49 -0400
commit3610cce87af0693603db171d5b6f6735f5e3dc5b (patch)
tree9aa7d9a0924b2f075c1b95ed57bb63ed512165c9 /include/asm-s390/pgalloc.h
parente4aa402e7a3b6b87d8df6243a37171cdcd2f01c2 (diff)
[S390] Cleanup page table definitions.
- De-confuse the defines for the address-space-control-elements and the segment/region table entries. - Create out of line functions for page table allocation / freeing. - Simplify get_shadow_xxx functions. Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'include/asm-s390/pgalloc.h')
-rw-r--r--include/asm-s390/pgalloc.h213
1 files changed, 70 insertions, 143 deletions
diff --git a/include/asm-s390/pgalloc.h b/include/asm-s390/pgalloc.h
index 6cbbfe4f6749..229b0bd59331 100644
--- a/include/asm-s390/pgalloc.h
+++ b/include/asm-s390/pgalloc.h
@@ -19,114 +19,75 @@
19 19
20#define check_pgt_cache() do {} while (0) 20#define check_pgt_cache() do {} while (0)
21 21
22/* 22unsigned long *crst_table_alloc(struct mm_struct *, int);
23 * Page allocation orders. 23void crst_table_free(unsigned long *);
24 */
25#ifndef __s390x__
26# define PTE_ALLOC_ORDER 0
27# define PMD_ALLOC_ORDER 0
28# define PGD_ALLOC_ORDER 1
29#else /* __s390x__ */
30# define PTE_ALLOC_ORDER 0
31# define PMD_ALLOC_ORDER 2
32# define PGD_ALLOC_ORDER 2
33#endif /* __s390x__ */
34 24
35/* 25unsigned long *page_table_alloc(int);
36 * Allocate and free page tables. The xxx_kernel() versions are 26void page_table_free(unsigned long *);
37 * used to allocate a kernel page table - this turns on ASN bits
38 * if any.
39 */
40 27
41static inline pgd_t *pgd_alloc(struct mm_struct *mm) 28static inline void clear_table(unsigned long *s, unsigned long val, size_t n)
42{ 29{
43 pgd_t *pgd = (pgd_t *) __get_free_pages(GFP_KERNEL, PGD_ALLOC_ORDER); 30 *s = val;
44 int i; 31 n = (n / 256) - 1;
45 32 asm volatile(
46 if (!pgd) 33#ifdef CONFIG_64BIT
47 return NULL; 34 " mvc 8(248,%0),0(%0)\n"
48 if (s390_noexec) {
49 pgd_t *shadow_pgd = (pgd_t *)
50 __get_free_pages(GFP_KERNEL, PGD_ALLOC_ORDER);
51 struct page *page = virt_to_page(pgd);
52
53 if (!shadow_pgd) {
54 free_pages((unsigned long) pgd, PGD_ALLOC_ORDER);
55 return NULL;
56 }
57 page->lru.next = (void *) shadow_pgd;
58 }
59 for (i = 0; i < PTRS_PER_PGD; i++)
60#ifndef __s390x__
61 pmd_clear(pmd_offset(pgd + i, i*PGDIR_SIZE));
62#else 35#else
63 pgd_clear(pgd + i); 36 " mvc 4(252,%0),0(%0)\n"
64#endif 37#endif
65 return pgd; 38 "0: mvc 256(256,%0),0(%0)\n"
39 " la %0,256(%0)\n"
40 " brct %1,0b\n"
41 : "+a" (s), "+d" (n));
66} 42}
67 43
68static inline void pgd_free(pgd_t *pgd) 44static inline void crst_table_init(unsigned long *crst, unsigned long entry)
69{ 45{
70 pgd_t *shadow_pgd = get_shadow_pgd(pgd); 46 clear_table(crst, entry, sizeof(unsigned long)*2048);
71 47 crst = get_shadow_table(crst);
72 if (shadow_pgd) 48 if (crst)
73 free_pages((unsigned long) shadow_pgd, PGD_ALLOC_ORDER); 49 clear_table(crst, entry, sizeof(unsigned long)*2048);
74 free_pages((unsigned long) pgd, PGD_ALLOC_ORDER);
75} 50}
76 51
77#ifndef __s390x__ 52#ifndef __s390x__
78/* 53
79 * page middle directory allocation/free routines. 54static inline unsigned long pgd_entry_type(struct mm_struct *mm)
80 * We use pmd cache only on s390x, so these are dummy routines. This 55{
81 * code never triggers because the pgd will always be present. 56 return _SEGMENT_ENTRY_EMPTY;
82 */ 57}
83#define pmd_alloc_one(mm,address) ({ BUG(); ((pmd_t *)2); }) 58
84#define pmd_free(x) do { } while (0) 59#define pmd_alloc_one(mm,address) ({ BUG(); ((pmd_t *)2); })
85#define pgd_populate(mm, pmd, pte) BUG() 60#define pmd_free(x) do { } while (0)
61
62#define pgd_populate(mm, pmd, pte) BUG()
86#define pgd_populate_kernel(mm, pmd, pte) BUG() 63#define pgd_populate_kernel(mm, pmd, pte) BUG()
64
87#else /* __s390x__ */ 65#else /* __s390x__ */
88static inline pmd_t * pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr) 66
67static inline unsigned long pgd_entry_type(struct mm_struct *mm)
89{ 68{
90 pmd_t *pmd = (pmd_t *) __get_free_pages(GFP_KERNEL, PMD_ALLOC_ORDER); 69 return _REGION3_ENTRY_EMPTY;
91 int i;
92
93 if (!pmd)
94 return NULL;
95 if (s390_noexec) {
96 pmd_t *shadow_pmd = (pmd_t *)
97 __get_free_pages(GFP_KERNEL, PMD_ALLOC_ORDER);
98 struct page *page = virt_to_page(pmd);
99
100 if (!shadow_pmd) {
101 free_pages((unsigned long) pmd, PMD_ALLOC_ORDER);
102 return NULL;
103 }
104 page->lru.next = (void *) shadow_pmd;
105 }
106 for (i=0; i < PTRS_PER_PMD; i++)
107 pmd_clear(pmd + i);
108 return pmd;
109} 70}
110 71
111static inline void pmd_free (pmd_t *pmd) 72static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr)
112{ 73{
113 pmd_t *shadow_pmd = get_shadow_pmd(pmd); 74 unsigned long *crst = crst_table_alloc(mm, s390_noexec);
114 75 if (crst)
115 if (shadow_pmd) 76 crst_table_init(crst, _SEGMENT_ENTRY_EMPTY);
116 free_pages((unsigned long) shadow_pmd, PMD_ALLOC_ORDER); 77 return (pmd_t *) crst;
117 free_pages((unsigned long) pmd, PMD_ALLOC_ORDER);
118} 78}
79#define pmd_free(pmd) crst_table_free((unsigned long *) pmd)
119 80
120static inline void 81static inline void pgd_populate_kernel(struct mm_struct *mm,
121pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd) 82 pgd_t *pgd, pmd_t *pmd)
122{ 83{
123 pgd_val(*pgd) = _PGD_ENTRY | __pa(pmd); 84 pgd_val(*pgd) = _REGION3_ENTRY | __pa(pmd);
124} 85}
125 86
126static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd) 87static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
127{ 88{
128 pgd_t *shadow_pgd = get_shadow_pgd(pgd); 89 pgd_t *shadow_pgd = get_shadow_table(pgd);
129 pmd_t *shadow_pmd = get_shadow_pmd(pmd); 90 pmd_t *shadow_pmd = get_shadow_table(pmd);
130 91
131 if (shadow_pgd && shadow_pmd) 92 if (shadow_pgd && shadow_pmd)
132 pgd_populate_kernel(mm, shadow_pgd, shadow_pmd); 93 pgd_populate_kernel(mm, shadow_pgd, shadow_pmd);
@@ -135,17 +96,26 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
135 96
136#endif /* __s390x__ */ 97#endif /* __s390x__ */
137 98
99static inline pgd_t *pgd_alloc(struct mm_struct *mm)
100{
101 unsigned long *crst = crst_table_alloc(mm, s390_noexec);
102 if (crst)
103 crst_table_init(crst, pgd_entry_type(mm));
104 return (pgd_t *) crst;
105}
106#define pgd_free(pgd) crst_table_free((unsigned long *) pgd)
107
138static inline void 108static inline void
139pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte) 109pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
140{ 110{
141#ifndef __s390x__ 111#ifndef __s390x__
142 pmd_val(pmd[0]) = _PAGE_TABLE + __pa(pte); 112 pmd_val(pmd[0]) = _SEGMENT_ENTRY + __pa(pte);
143 pmd_val(pmd[1]) = _PAGE_TABLE + __pa(pte+256); 113 pmd_val(pmd[1]) = _SEGMENT_ENTRY + __pa(pte+256);
144 pmd_val(pmd[2]) = _PAGE_TABLE + __pa(pte+512); 114 pmd_val(pmd[2]) = _SEGMENT_ENTRY + __pa(pte+512);
145 pmd_val(pmd[3]) = _PAGE_TABLE + __pa(pte+768); 115 pmd_val(pmd[3]) = _SEGMENT_ENTRY + __pa(pte+768);
146#else /* __s390x__ */ 116#else /* __s390x__ */
147 pmd_val(*pmd) = _PMD_ENTRY + __pa(pte); 117 pmd_val(*pmd) = _SEGMENT_ENTRY + __pa(pte);
148 pmd_val1(*pmd) = _PMD_ENTRY + __pa(pte+256); 118 pmd_val1(*pmd) = _SEGMENT_ENTRY + __pa(pte+256);
149#endif /* __s390x__ */ 119#endif /* __s390x__ */
150} 120}
151 121
@@ -153,7 +123,7 @@ static inline void
153pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *page) 123pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *page)
154{ 124{
155 pte_t *pte = (pte_t *)page_to_phys(page); 125 pte_t *pte = (pte_t *)page_to_phys(page);
156 pmd_t *shadow_pmd = get_shadow_pmd(pmd); 126 pmd_t *shadow_pmd = get_shadow_table(pmd);
157 pte_t *shadow_pte = get_shadow_pte(pte); 127 pte_t *shadow_pte = get_shadow_pte(pte);
158 128
159 pmd_populate_kernel(mm, pmd, pte); 129 pmd_populate_kernel(mm, pmd, pte);
@@ -164,57 +134,14 @@ pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *page)
164/* 134/*
165 * page table entry allocation/free routines. 135 * page table entry allocation/free routines.
166 */ 136 */
167static inline pte_t * 137#define pte_alloc_one_kernel(mm, vmaddr) \
168pte_alloc_one_kernel(struct mm_struct *mm, unsigned long vmaddr) 138 ((pte_t *) page_table_alloc(s390_noexec))
169{ 139#define pte_alloc_one(mm, vmaddr) \
170 pte_t *pte = (pte_t *) __get_free_page(GFP_KERNEL|__GFP_REPEAT); 140 virt_to_page(page_table_alloc(s390_noexec))
171 int i; 141
172 142#define pte_free_kernel(pte) \
173 if (!pte) 143 page_table_free((unsigned long *) pte)
174 return NULL; 144#define pte_free(pte) \
175 if (s390_noexec) { 145 page_table_free((unsigned long *) page_to_phys((struct page *) pte))
176 pte_t *shadow_pte = (pte_t *)
177 __get_free_page(GFP_KERNEL|__GFP_REPEAT);
178 struct page *page = virt_to_page(pte);
179
180 if (!shadow_pte) {
181 free_page((unsigned long) pte);
182 return NULL;
183 }
184 page->lru.next = (void *) shadow_pte;
185 }
186 for (i=0; i < PTRS_PER_PTE; i++) {
187 pte_clear(mm, vmaddr, pte + i);
188 vmaddr += PAGE_SIZE;
189 }
190 return pte;
191}
192
193static inline struct page *
194pte_alloc_one(struct mm_struct *mm, unsigned long vmaddr)
195{
196 pte_t *pte = pte_alloc_one_kernel(mm, vmaddr);
197 if (pte)
198 return virt_to_page(pte);
199 return NULL;
200}
201
202static inline void pte_free_kernel(pte_t *pte)
203{
204 pte_t *shadow_pte = get_shadow_pte(pte);
205
206 if (shadow_pte)
207 free_page((unsigned long) shadow_pte);
208 free_page((unsigned long) pte);
209}
210
211static inline void pte_free(struct page *pte)
212{
213 struct page *shadow_page = get_shadow_page(pte);
214
215 if (shadow_page)
216 __free_page(shadow_page);
217 __free_page(pte);
218}
219 146
220#endif /* _S390_PGALLOC_H */ 147#endif /* _S390_PGALLOC_H */