diff options
author | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-10-22 22:23:34 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-10-22 22:23:34 -0400 |
commit | 56d61a0e26c5a61c66d1ac259a59960295939da9 (patch) | |
tree | a23a30a966fe4220060682179294087cba1f9c57 /include/asm-s390/pgalloc.h | |
parent | 5f48b338cd28f4095697a174d7e3e72084aca893 (diff) | |
parent | 190a1d722a59725706daf832bc8a511ed62f249d (diff) |
Merge branch 'for-linus' of git://git390.osdl.marist.edu/pub/scm/linux-2.6
* 'for-linus' of git://git390.osdl.marist.edu/pub/scm/linux-2.6:
[S390] 4level-fixup cleanup
[S390] Cleanup page table definitions.
[S390] Introduce follow_table in uaccess_pt.c
[S390] Remove unused user_seg from thread structure.
[S390] tlb flush fix.
[S390] kernel: Fix dump on panic for DASDs under LPAR.
[S390] struct class_device -> struct device conversion.
[S390] cio: Fix incomplete commit for uevent suppression.
[S390] cio: Use to_channelpath() for device to channel path conversion.
[S390] Add per-cpu idle time / idle count sysfs attributes.
[S390] Update default configuration.
Diffstat (limited to 'include/asm-s390/pgalloc.h')
-rw-r--r-- | include/asm-s390/pgalloc.h | 250 |
1 files changed, 86 insertions, 164 deletions
diff --git a/include/asm-s390/pgalloc.h b/include/asm-s390/pgalloc.h index e45d3c9a4b7e..709dd1740956 100644 --- a/include/asm-s390/pgalloc.h +++ b/include/asm-s390/pgalloc.h | |||
@@ -19,140 +19,115 @@ | |||
19 | 19 | ||
20 | #define check_pgt_cache() do {} while (0) | 20 | #define check_pgt_cache() do {} while (0) |
21 | 21 | ||
22 | /* | 22 | unsigned long *crst_table_alloc(struct mm_struct *, int); |
23 | * Page allocation orders. | 23 | void crst_table_free(unsigned long *); |
24 | */ | ||
25 | #ifndef __s390x__ | ||
26 | # define PTE_ALLOC_ORDER 0 | ||
27 | # define PMD_ALLOC_ORDER 0 | ||
28 | # define PGD_ALLOC_ORDER 1 | ||
29 | #else /* __s390x__ */ | ||
30 | # define PTE_ALLOC_ORDER 0 | ||
31 | # define PMD_ALLOC_ORDER 2 | ||
32 | # define PGD_ALLOC_ORDER 2 | ||
33 | #endif /* __s390x__ */ | ||
34 | 24 | ||
35 | /* | 25 | unsigned long *page_table_alloc(int); |
36 | * Allocate and free page tables. The xxx_kernel() versions are | 26 | void page_table_free(unsigned long *); |
37 | * used to allocate a kernel page table - this turns on ASN bits | ||
38 | * if any. | ||
39 | */ | ||
40 | 27 | ||
41 | static inline pgd_t *pgd_alloc(struct mm_struct *mm) | 28 | static inline void clear_table(unsigned long *s, unsigned long val, size_t n) |
42 | { | 29 | { |
43 | pgd_t *pgd = (pgd_t *) __get_free_pages(GFP_KERNEL, PGD_ALLOC_ORDER); | 30 | *s = val; |
44 | int i; | 31 | n = (n / 256) - 1; |
45 | 32 | asm volatile( | |
46 | if (!pgd) | 33 | #ifdef CONFIG_64BIT |
47 | return NULL; | 34 | " mvc 8(248,%0),0(%0)\n" |
48 | if (s390_noexec) { | ||
49 | pgd_t *shadow_pgd = (pgd_t *) | ||
50 | __get_free_pages(GFP_KERNEL, PGD_ALLOC_ORDER); | ||
51 | struct page *page = virt_to_page(pgd); | ||
52 | |||
53 | if (!shadow_pgd) { | ||
54 | free_pages((unsigned long) pgd, PGD_ALLOC_ORDER); | ||
55 | return NULL; | ||
56 | } | ||
57 | page->lru.next = (void *) shadow_pgd; | ||
58 | } | ||
59 | for (i = 0; i < PTRS_PER_PGD; i++) | ||
60 | #ifndef __s390x__ | ||
61 | pmd_clear(pmd_offset(pgd + i, i*PGDIR_SIZE)); | ||
62 | #else | 35 | #else |
63 | pgd_clear(pgd + i); | 36 | " mvc 4(252,%0),0(%0)\n" |
64 | #endif | 37 | #endif |
65 | return pgd; | 38 | "0: mvc 256(256,%0),0(%0)\n" |
39 | " la %0,256(%0)\n" | ||
40 | " brct %1,0b\n" | ||
41 | : "+a" (s), "+d" (n)); | ||
66 | } | 42 | } |
67 | 43 | ||
68 | static inline void pgd_free(pgd_t *pgd) | 44 | static inline void crst_table_init(unsigned long *crst, unsigned long entry) |
69 | { | 45 | { |
70 | pgd_t *shadow_pgd = get_shadow_pgd(pgd); | 46 | clear_table(crst, entry, sizeof(unsigned long)*2048); |
71 | 47 | crst = get_shadow_table(crst); | |
72 | if (shadow_pgd) | 48 | if (crst) |
73 | free_pages((unsigned long) shadow_pgd, PGD_ALLOC_ORDER); | 49 | clear_table(crst, entry, sizeof(unsigned long)*2048); |
74 | free_pages((unsigned long) pgd, PGD_ALLOC_ORDER); | ||
75 | } | 50 | } |
76 | 51 | ||
77 | #ifndef __s390x__ | 52 | #ifndef __s390x__ |
78 | /* | 53 | |
79 | * page middle directory allocation/free routines. | 54 | static inline unsigned long pgd_entry_type(struct mm_struct *mm) |
80 | * We use pmd cache only on s390x, so these are dummy routines. This | ||
81 | * code never triggers because the pgd will always be present. | ||
82 | */ | ||
83 | #define pmd_alloc_one(mm,address) ({ BUG(); ((pmd_t *)2); }) | ||
84 | #define pmd_free(x) do { } while (0) | ||
85 | #define __pmd_free_tlb(tlb,x) do { } while (0) | ||
86 | #define pgd_populate(mm, pmd, pte) BUG() | ||
87 | #define pgd_populate_kernel(mm, pmd, pte) BUG() | ||
88 | #else /* __s390x__ */ | ||
89 | static inline pmd_t * pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr) | ||
90 | { | 55 | { |
91 | pmd_t *pmd = (pmd_t *) __get_free_pages(GFP_KERNEL, PMD_ALLOC_ORDER); | 56 | return _SEGMENT_ENTRY_EMPTY; |
92 | int i; | ||
93 | |||
94 | if (!pmd) | ||
95 | return NULL; | ||
96 | if (s390_noexec) { | ||
97 | pmd_t *shadow_pmd = (pmd_t *) | ||
98 | __get_free_pages(GFP_KERNEL, PMD_ALLOC_ORDER); | ||
99 | struct page *page = virt_to_page(pmd); | ||
100 | |||
101 | if (!shadow_pmd) { | ||
102 | free_pages((unsigned long) pmd, PMD_ALLOC_ORDER); | ||
103 | return NULL; | ||
104 | } | ||
105 | page->lru.next = (void *) shadow_pmd; | ||
106 | } | ||
107 | for (i=0; i < PTRS_PER_PMD; i++) | ||
108 | pmd_clear(pmd + i); | ||
109 | return pmd; | ||
110 | } | 57 | } |
111 | 58 | ||
112 | static inline void pmd_free (pmd_t *pmd) | 59 | #define pud_alloc_one(mm,address) ({ BUG(); ((pud_t *)2); }) |
60 | #define pud_free(x) do { } while (0) | ||
61 | |||
62 | #define pmd_alloc_one(mm,address) ({ BUG(); ((pmd_t *)2); }) | ||
63 | #define pmd_free(x) do { } while (0) | ||
64 | |||
65 | #define pgd_populate(mm, pgd, pud) BUG() | ||
66 | #define pgd_populate_kernel(mm, pgd, pud) BUG() | ||
67 | |||
68 | #define pud_populate(mm, pud, pmd) BUG() | ||
69 | #define pud_populate_kernel(mm, pud, pmd) BUG() | ||
70 | |||
71 | #else /* __s390x__ */ | ||
72 | |||
73 | static inline unsigned long pgd_entry_type(struct mm_struct *mm) | ||
113 | { | 74 | { |
114 | pmd_t *shadow_pmd = get_shadow_pmd(pmd); | 75 | return _REGION3_ENTRY_EMPTY; |
76 | } | ||
77 | |||
78 | #define pud_alloc_one(mm,address) ({ BUG(); ((pud_t *)2); }) | ||
79 | #define pud_free(x) do { } while (0) | ||
115 | 80 | ||
116 | if (shadow_pmd) | 81 | static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr) |
117 | free_pages((unsigned long) shadow_pmd, PMD_ALLOC_ORDER); | 82 | { |
118 | free_pages((unsigned long) pmd, PMD_ALLOC_ORDER); | 83 | unsigned long *crst = crst_table_alloc(mm, s390_noexec); |
84 | if (crst) | ||
85 | crst_table_init(crst, _SEGMENT_ENTRY_EMPTY); | ||
86 | return (pmd_t *) crst; | ||
119 | } | 87 | } |
88 | #define pmd_free(pmd) crst_table_free((unsigned long *) pmd) | ||
120 | 89 | ||
121 | #define __pmd_free_tlb(tlb,pmd) \ | 90 | #define pgd_populate(mm, pgd, pud) BUG() |
122 | do { \ | 91 | #define pgd_populate_kernel(mm, pgd, pud) BUG() |
123 | tlb_flush_mmu(tlb, 0, 0); \ | ||
124 | pmd_free(pmd); \ | ||
125 | } while (0) | ||
126 | 92 | ||
127 | static inline void | 93 | static inline void pud_populate_kernel(struct mm_struct *mm, |
128 | pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd) | 94 | pud_t *pud, pmd_t *pmd) |
129 | { | 95 | { |
130 | pgd_val(*pgd) = _PGD_ENTRY | __pa(pmd); | 96 | pud_val(*pud) = _REGION3_ENTRY | __pa(pmd); |
131 | } | 97 | } |
132 | 98 | ||
133 | static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd) | 99 | static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) |
134 | { | 100 | { |
135 | pgd_t *shadow_pgd = get_shadow_pgd(pgd); | 101 | pud_t *shadow_pud = get_shadow_table(pud); |
136 | pmd_t *shadow_pmd = get_shadow_pmd(pmd); | 102 | pmd_t *shadow_pmd = get_shadow_table(pmd); |
137 | 103 | ||
138 | if (shadow_pgd && shadow_pmd) | 104 | if (shadow_pud && shadow_pmd) |
139 | pgd_populate_kernel(mm, shadow_pgd, shadow_pmd); | 105 | pud_populate_kernel(mm, shadow_pud, shadow_pmd); |
140 | pgd_populate_kernel(mm, pgd, pmd); | 106 | pud_populate_kernel(mm, pud, pmd); |
141 | } | 107 | } |
142 | 108 | ||
143 | #endif /* __s390x__ */ | 109 | #endif /* __s390x__ */ |
144 | 110 | ||
111 | static inline pgd_t *pgd_alloc(struct mm_struct *mm) | ||
112 | { | ||
113 | unsigned long *crst = crst_table_alloc(mm, s390_noexec); | ||
114 | if (crst) | ||
115 | crst_table_init(crst, pgd_entry_type(mm)); | ||
116 | return (pgd_t *) crst; | ||
117 | } | ||
118 | #define pgd_free(pgd) crst_table_free((unsigned long *) pgd) | ||
119 | |||
145 | static inline void | 120 | static inline void |
146 | pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte) | 121 | pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte) |
147 | { | 122 | { |
148 | #ifndef __s390x__ | 123 | #ifndef __s390x__ |
149 | pmd_val(pmd[0]) = _PAGE_TABLE + __pa(pte); | 124 | pmd_val(pmd[0]) = _SEGMENT_ENTRY + __pa(pte); |
150 | pmd_val(pmd[1]) = _PAGE_TABLE + __pa(pte+256); | 125 | pmd_val(pmd[1]) = _SEGMENT_ENTRY + __pa(pte+256); |
151 | pmd_val(pmd[2]) = _PAGE_TABLE + __pa(pte+512); | 126 | pmd_val(pmd[2]) = _SEGMENT_ENTRY + __pa(pte+512); |
152 | pmd_val(pmd[3]) = _PAGE_TABLE + __pa(pte+768); | 127 | pmd_val(pmd[3]) = _SEGMENT_ENTRY + __pa(pte+768); |
153 | #else /* __s390x__ */ | 128 | #else /* __s390x__ */ |
154 | pmd_val(*pmd) = _PMD_ENTRY + __pa(pte); | 129 | pmd_val(*pmd) = _SEGMENT_ENTRY + __pa(pte); |
155 | pmd_val1(*pmd) = _PMD_ENTRY + __pa(pte+256); | 130 | pmd_val1(*pmd) = _SEGMENT_ENTRY + __pa(pte+256); |
156 | #endif /* __s390x__ */ | 131 | #endif /* __s390x__ */ |
157 | } | 132 | } |
158 | 133 | ||
@@ -160,7 +135,7 @@ static inline void | |||
160 | pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *page) | 135 | pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *page) |
161 | { | 136 | { |
162 | pte_t *pte = (pte_t *)page_to_phys(page); | 137 | pte_t *pte = (pte_t *)page_to_phys(page); |
163 | pmd_t *shadow_pmd = get_shadow_pmd(pmd); | 138 | pmd_t *shadow_pmd = get_shadow_table(pmd); |
164 | pte_t *shadow_pte = get_shadow_pte(pte); | 139 | pte_t *shadow_pte = get_shadow_pte(pte); |
165 | 140 | ||
166 | pmd_populate_kernel(mm, pmd, pte); | 141 | pmd_populate_kernel(mm, pmd, pte); |
@@ -171,67 +146,14 @@ pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *page) | |||
171 | /* | 146 | /* |
172 | * page table entry allocation/free routines. | 147 | * page table entry allocation/free routines. |
173 | */ | 148 | */ |
174 | static inline pte_t * | 149 | #define pte_alloc_one_kernel(mm, vmaddr) \ |
175 | pte_alloc_one_kernel(struct mm_struct *mm, unsigned long vmaddr) | 150 | ((pte_t *) page_table_alloc(s390_noexec)) |
176 | { | 151 | #define pte_alloc_one(mm, vmaddr) \ |
177 | pte_t *pte = (pte_t *) __get_free_page(GFP_KERNEL|__GFP_REPEAT); | 152 | virt_to_page(page_table_alloc(s390_noexec)) |
178 | int i; | 153 | |
179 | 154 | #define pte_free_kernel(pte) \ | |
180 | if (!pte) | 155 | page_table_free((unsigned long *) pte) |
181 | return NULL; | 156 | #define pte_free(pte) \ |
182 | if (s390_noexec) { | 157 | page_table_free((unsigned long *) page_to_phys((struct page *) pte)) |
183 | pte_t *shadow_pte = (pte_t *) | ||
184 | __get_free_page(GFP_KERNEL|__GFP_REPEAT); | ||
185 | struct page *page = virt_to_page(pte); | ||
186 | |||
187 | if (!shadow_pte) { | ||
188 | free_page((unsigned long) pte); | ||
189 | return NULL; | ||
190 | } | ||
191 | page->lru.next = (void *) shadow_pte; | ||
192 | } | ||
193 | for (i=0; i < PTRS_PER_PTE; i++) { | ||
194 | pte_clear(mm, vmaddr, pte + i); | ||
195 | vmaddr += PAGE_SIZE; | ||
196 | } | ||
197 | return pte; | ||
198 | } | ||
199 | |||
200 | static inline struct page * | ||
201 | pte_alloc_one(struct mm_struct *mm, unsigned long vmaddr) | ||
202 | { | ||
203 | pte_t *pte = pte_alloc_one_kernel(mm, vmaddr); | ||
204 | if (pte) | ||
205 | return virt_to_page(pte); | ||
206 | return NULL; | ||
207 | } | ||
208 | |||
209 | static inline void pte_free_kernel(pte_t *pte) | ||
210 | { | ||
211 | pte_t *shadow_pte = get_shadow_pte(pte); | ||
212 | |||
213 | if (shadow_pte) | ||
214 | free_page((unsigned long) shadow_pte); | ||
215 | free_page((unsigned long) pte); | ||
216 | } | ||
217 | |||
218 | static inline void pte_free(struct page *pte) | ||
219 | { | ||
220 | struct page *shadow_page = get_shadow_page(pte); | ||
221 | |||
222 | if (shadow_page) | ||
223 | __free_page(shadow_page); | ||
224 | __free_page(pte); | ||
225 | } | ||
226 | |||
227 | #define __pte_free_tlb(tlb, pte) \ | ||
228 | ({ \ | ||
229 | struct mmu_gather *__tlb = (tlb); \ | ||
230 | struct page *__pte = (pte); \ | ||
231 | struct page *shadow_page = get_shadow_page(__pte); \ | ||
232 | if (shadow_page) \ | ||
233 | tlb_remove_page(__tlb, shadow_page); \ | ||
234 | tlb_remove_page(__tlb, __pte); \ | ||
235 | }) | ||
236 | 158 | ||
237 | #endif /* _S390_PGALLOC_H */ | 159 | #endif /* _S390_PGALLOC_H */ |