diff options
Diffstat (limited to 'include/asm-s390/pgtable.h')
-rw-r--r-- | include/asm-s390/pgtable.h | 813 |
1 files changed, 813 insertions, 0 deletions
diff --git a/include/asm-s390/pgtable.h b/include/asm-s390/pgtable.h new file mode 100644 index 000000000000..1633cb75f057 --- /dev/null +++ b/include/asm-s390/pgtable.h | |||
@@ -0,0 +1,813 @@ | |||
1 | /* | ||
2 | * include/asm-s390/pgtable.h | ||
3 | * | ||
4 | * S390 version | ||
5 | * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation | ||
6 | * Author(s): Hartmut Penner (hp@de.ibm.com) | ||
7 | * Ulrich Weigand (weigand@de.ibm.com) | ||
8 | * Martin Schwidefsky (schwidefsky@de.ibm.com) | ||
9 | * | ||
10 | * Derived from "include/asm-i386/pgtable.h" | ||
11 | */ | ||
12 | |||
13 | #ifndef _ASM_S390_PGTABLE_H | ||
14 | #define _ASM_S390_PGTABLE_H | ||
15 | |||
16 | #include <asm-generic/4level-fixup.h> | ||
17 | |||
18 | /* | ||
19 | * The Linux memory management assumes a three-level page table setup. For | ||
20 | * s390 31 bit we "fold" the mid level into the top-level page table, so | ||
21 | * that we physically have the same two-level page table as the s390 mmu | ||
22 | * expects in 31 bit mode. For s390 64 bit we use three of the five levels | ||
23 | * the hardware provides (region first and region second tables are not | ||
24 | * used). | ||
25 | * | ||
26 | * The "pgd_xxx()" functions are trivial for a folded two-level | ||
27 | * setup: the pgd is never bad, and a pmd always exists (as it's folded | ||
28 | * into the pgd entry) | ||
29 | * | ||
30 | * This file contains the functions and defines necessary to modify and use | ||
31 | * the S390 page table tree. | ||
32 | */ | ||
33 | #ifndef __ASSEMBLY__ | ||
34 | #include <asm/bug.h> | ||
35 | #include <asm/processor.h> | ||
36 | #include <linux/threads.h> | ||
37 | |||
38 | struct vm_area_struct; /* forward declaration (include/linux/mm.h) */ | ||
39 | |||
40 | extern pgd_t swapper_pg_dir[] __attribute__ ((aligned (4096))); | ||
41 | extern void paging_init(void); | ||
42 | |||
43 | /* | ||
44 | * The S390 doesn't have any external MMU info: the kernel page | ||
45 | * tables contain all the necessary information. | ||
46 | */ | ||
47 | #define update_mmu_cache(vma, address, pte) do { } while (0) | ||
48 | |||
49 | /* | ||
50 | * ZERO_PAGE is a global shared page that is always zero: used | ||
51 | * for zero-mapped memory areas etc.. | ||
52 | */ | ||
53 | extern char empty_zero_page[PAGE_SIZE]; | ||
54 | #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) | ||
55 | #endif /* !__ASSEMBLY__ */ | ||
56 | |||
57 | /* | ||
58 | * PMD_SHIFT determines the size of the area a second-level page | ||
59 | * table can map | ||
60 | * PGDIR_SHIFT determines what a third-level page table entry can map | ||
61 | */ | ||
62 | #ifndef __s390x__ | ||
63 | # define PMD_SHIFT 22 | ||
64 | # define PGDIR_SHIFT 22 | ||
65 | #else /* __s390x__ */ | ||
66 | # define PMD_SHIFT 21 | ||
67 | # define PGDIR_SHIFT 31 | ||
68 | #endif /* __s390x__ */ | ||
69 | |||
70 | #define PMD_SIZE (1UL << PMD_SHIFT) | ||
71 | #define PMD_MASK (~(PMD_SIZE-1)) | ||
72 | #define PGDIR_SIZE (1UL << PGDIR_SHIFT) | ||
73 | #define PGDIR_MASK (~(PGDIR_SIZE-1)) | ||
74 | |||
75 | /* | ||
76 | * entries per page directory level: the S390 is two-level, so | ||
77 | * we don't really have any PMD directory physically. | ||
78 | * for S390 segment-table entries are combined to one PGD | ||
79 | * that leads to 1024 pte per pgd | ||
80 | */ | ||
81 | #ifndef __s390x__ | ||
82 | # define PTRS_PER_PTE 1024 | ||
83 | # define PTRS_PER_PMD 1 | ||
84 | # define PTRS_PER_PGD 512 | ||
85 | #else /* __s390x__ */ | ||
86 | # define PTRS_PER_PTE 512 | ||
87 | # define PTRS_PER_PMD 1024 | ||
88 | # define PTRS_PER_PGD 2048 | ||
89 | #endif /* __s390x__ */ | ||
90 | |||
91 | /* | ||
92 | * pgd entries used up by user/kernel: | ||
93 | */ | ||
94 | #ifndef __s390x__ | ||
95 | # define USER_PTRS_PER_PGD 512 | ||
96 | # define USER_PGD_PTRS 512 | ||
97 | # define KERNEL_PGD_PTRS 512 | ||
98 | # define FIRST_USER_PGD_NR 0 | ||
99 | #else /* __s390x__ */ | ||
100 | # define USER_PTRS_PER_PGD 2048 | ||
101 | # define USER_PGD_PTRS 2048 | ||
102 | # define KERNEL_PGD_PTRS 2048 | ||
103 | # define FIRST_USER_PGD_NR 0 | ||
104 | #endif /* __s390x__ */ | ||
105 | |||
106 | #define pte_ERROR(e) \ | ||
107 | printk("%s:%d: bad pte %p.\n", __FILE__, __LINE__, (void *) pte_val(e)) | ||
108 | #define pmd_ERROR(e) \ | ||
109 | printk("%s:%d: bad pmd %p.\n", __FILE__, __LINE__, (void *) pmd_val(e)) | ||
110 | #define pgd_ERROR(e) \ | ||
111 | printk("%s:%d: bad pgd %p.\n", __FILE__, __LINE__, (void *) pgd_val(e)) | ||
112 | |||
113 | #ifndef __ASSEMBLY__ | ||
114 | /* | ||
115 | * Just any arbitrary offset to the start of the vmalloc VM area: the | ||
116 | * current 8MB value just means that there will be a 8MB "hole" after the | ||
117 | * physical memory until the kernel virtual memory starts. That means that | ||
118 | * any out-of-bounds memory accesses will hopefully be caught. | ||
119 | * The vmalloc() routines leaves a hole of 4kB between each vmalloced | ||
120 | * area for the same reason. ;) | ||
121 | */ | ||
122 | #define VMALLOC_OFFSET (8*1024*1024) | ||
123 | #define VMALLOC_START (((unsigned long) high_memory + VMALLOC_OFFSET) \ | ||
124 | & ~(VMALLOC_OFFSET-1)) | ||
125 | #ifndef __s390x__ | ||
126 | # define VMALLOC_END (0x7fffffffL) | ||
127 | #else /* __s390x__ */ | ||
128 | # define VMALLOC_END (0x40000000000L) | ||
129 | #endif /* __s390x__ */ | ||
130 | |||
131 | |||
132 | /* | ||
133 | * A 31 bit pagetable entry of S390 has following format: | ||
134 | * | PFRA | | OS | | ||
135 | * 0 0IP0 | ||
136 | * 00000000001111111111222222222233 | ||
137 | * 01234567890123456789012345678901 | ||
138 | * | ||
139 | * I Page-Invalid Bit: Page is not available for address-translation | ||
140 | * P Page-Protection Bit: Store access not possible for page | ||
141 | * | ||
142 | * A 31 bit segmenttable entry of S390 has following format: | ||
143 | * | P-table origin | |PTL | ||
144 | * 0 IC | ||
145 | * 00000000001111111111222222222233 | ||
146 | * 01234567890123456789012345678901 | ||
147 | * | ||
148 | * I Segment-Invalid Bit: Segment is not available for address-translation | ||
149 | * C Common-Segment Bit: Segment is not private (PoP 3-30) | ||
150 | * PTL Page-Table-Length: Page-table length (PTL+1*16 entries -> up to 256) | ||
151 | * | ||
152 | * The 31 bit segmenttable origin of S390 has following format: | ||
153 | * | ||
154 | * |S-table origin | | STL | | ||
155 | * X **GPS | ||
156 | * 00000000001111111111222222222233 | ||
157 | * 01234567890123456789012345678901 | ||
158 | * | ||
159 | * X Space-Switch event: | ||
160 | * G Segment-Invalid Bit: * | ||
161 | * P Private-Space Bit: Segment is not private (PoP 3-30) | ||
162 | * S Storage-Alteration: | ||
163 | * STL Segment-Table-Length: Segment-table length (STL+1*16 entries -> up to 2048) | ||
164 | * | ||
165 | * A 64 bit pagetable entry of S390 has following format: | ||
166 | * | PFRA |0IP0| OS | | ||
167 | * 0000000000111111111122222222223333333333444444444455555555556666 | ||
168 | * 0123456789012345678901234567890123456789012345678901234567890123 | ||
169 | * | ||
170 | * I Page-Invalid Bit: Page is not available for address-translation | ||
171 | * P Page-Protection Bit: Store access not possible for page | ||
172 | * | ||
173 | * A 64 bit segmenttable entry of S390 has following format: | ||
174 | * | P-table origin | TT | ||
175 | * 0000000000111111111122222222223333333333444444444455555555556666 | ||
176 | * 0123456789012345678901234567890123456789012345678901234567890123 | ||
177 | * | ||
178 | * I Segment-Invalid Bit: Segment is not available for address-translation | ||
179 | * C Common-Segment Bit: Segment is not private (PoP 3-30) | ||
180 | * P Page-Protection Bit: Store access not possible for page | ||
181 | * TT Type 00 | ||
182 | * | ||
183 | * A 64 bit region table entry of S390 has following format: | ||
184 | * | S-table origin | TF TTTL | ||
185 | * 0000000000111111111122222222223333333333444444444455555555556666 | ||
186 | * 0123456789012345678901234567890123456789012345678901234567890123 | ||
187 | * | ||
188 | * I Segment-Invalid Bit: Segment is not available for address-translation | ||
189 | * TT Type 01 | ||
190 | * TF | ||
191 | * TL Table lenght | ||
192 | * | ||
193 | * The 64 bit regiontable origin of S390 has following format: | ||
194 | * | region table origon | DTTL | ||
195 | * 0000000000111111111122222222223333333333444444444455555555556666 | ||
196 | * 0123456789012345678901234567890123456789012345678901234567890123 | ||
197 | * | ||
198 | * X Space-Switch event: | ||
199 | * G Segment-Invalid Bit: | ||
200 | * P Private-Space Bit: | ||
201 | * S Storage-Alteration: | ||
202 | * R Real space | ||
203 | * TL Table-Length: | ||
204 | * | ||
205 | * A storage key has the following format: | ||
206 | * | ACC |F|R|C|0| | ||
207 | * 0 3 4 5 6 7 | ||
208 | * ACC: access key | ||
209 | * F : fetch protection bit | ||
210 | * R : referenced bit | ||
211 | * C : changed bit | ||
212 | */ | ||
213 | |||
214 | /* Hardware bits in the page table entry */ | ||
215 | #define _PAGE_RO 0x200 /* HW read-only */ | ||
216 | #define _PAGE_INVALID 0x400 /* HW invalid */ | ||
217 | |||
218 | /* Mask and four different kinds of invalid pages. */ | ||
219 | #define _PAGE_INVALID_MASK 0x601 | ||
220 | #define _PAGE_INVALID_EMPTY 0x400 | ||
221 | #define _PAGE_INVALID_NONE 0x401 | ||
222 | #define _PAGE_INVALID_SWAP 0x600 | ||
223 | #define _PAGE_INVALID_FILE 0x601 | ||
224 | |||
225 | #ifndef __s390x__ | ||
226 | |||
227 | /* Bits in the segment table entry */ | ||
228 | #define _PAGE_TABLE_LEN 0xf /* only full page-tables */ | ||
229 | #define _PAGE_TABLE_COM 0x10 /* common page-table */ | ||
230 | #define _PAGE_TABLE_INV 0x20 /* invalid page-table */ | ||
231 | #define _SEG_PRESENT 0x001 /* Software (overlap with PTL) */ | ||
232 | |||
233 | /* Bits int the storage key */ | ||
234 | #define _PAGE_CHANGED 0x02 /* HW changed bit */ | ||
235 | #define _PAGE_REFERENCED 0x04 /* HW referenced bit */ | ||
236 | |||
237 | #define _USER_SEG_TABLE_LEN 0x7f /* user-segment-table up to 2 GB */ | ||
238 | #define _KERNEL_SEG_TABLE_LEN 0x7f /* kernel-segment-table up to 2 GB */ | ||
239 | |||
240 | /* | ||
241 | * User and Kernel pagetables are identical | ||
242 | */ | ||
243 | #define _PAGE_TABLE _PAGE_TABLE_LEN | ||
244 | #define _KERNPG_TABLE _PAGE_TABLE_LEN | ||
245 | |||
246 | /* | ||
247 | * The Kernel segment-tables includes the User segment-table | ||
248 | */ | ||
249 | |||
250 | #define _SEGMENT_TABLE (_USER_SEG_TABLE_LEN|0x80000000|0x100) | ||
251 | #define _KERNSEG_TABLE _KERNEL_SEG_TABLE_LEN | ||
252 | |||
253 | #define USER_STD_MASK 0x00000080UL | ||
254 | |||
255 | #else /* __s390x__ */ | ||
256 | |||
257 | /* Bits in the segment table entry */ | ||
258 | #define _PMD_ENTRY_INV 0x20 /* invalid segment table entry */ | ||
259 | #define _PMD_ENTRY 0x00 | ||
260 | |||
261 | /* Bits in the region third table entry */ | ||
262 | #define _PGD_ENTRY_INV 0x20 /* invalid region table entry */ | ||
263 | #define _PGD_ENTRY 0x07 | ||
264 | |||
265 | /* | ||
266 | * User and kernel page directory | ||
267 | */ | ||
268 | #define _REGION_THIRD 0x4 | ||
269 | #define _REGION_THIRD_LEN 0x3 | ||
270 | #define _REGION_TABLE (_REGION_THIRD|_REGION_THIRD_LEN|0x40|0x100) | ||
271 | #define _KERN_REGION_TABLE (_REGION_THIRD|_REGION_THIRD_LEN) | ||
272 | |||
273 | #define USER_STD_MASK 0x0000000000000080UL | ||
274 | |||
275 | /* Bits in the storage key */ | ||
276 | #define _PAGE_CHANGED 0x02 /* HW changed bit */ | ||
277 | #define _PAGE_REFERENCED 0x04 /* HW referenced bit */ | ||
278 | |||
279 | #endif /* __s390x__ */ | ||
280 | |||
281 | /* | ||
282 | * No mapping available | ||
283 | */ | ||
284 | #define PAGE_NONE_SHARED __pgprot(_PAGE_INVALID_NONE) | ||
285 | #define PAGE_NONE_PRIVATE __pgprot(_PAGE_INVALID_NONE) | ||
286 | #define PAGE_RO_SHARED __pgprot(_PAGE_RO) | ||
287 | #define PAGE_RO_PRIVATE __pgprot(_PAGE_RO) | ||
288 | #define PAGE_COPY __pgprot(_PAGE_RO) | ||
289 | #define PAGE_SHARED __pgprot(0) | ||
290 | #define PAGE_KERNEL __pgprot(0) | ||
291 | |||
292 | /* | ||
293 | * The S390 can't do page protection for execute, and considers that the | ||
294 | * same are read. Also, write permissions imply read permissions. This is | ||
295 | * the closest we can get.. | ||
296 | */ | ||
297 | /*xwr*/ | ||
298 | #define __P000 PAGE_NONE_PRIVATE | ||
299 | #define __P001 PAGE_RO_PRIVATE | ||
300 | #define __P010 PAGE_COPY | ||
301 | #define __P011 PAGE_COPY | ||
302 | #define __P100 PAGE_RO_PRIVATE | ||
303 | #define __P101 PAGE_RO_PRIVATE | ||
304 | #define __P110 PAGE_COPY | ||
305 | #define __P111 PAGE_COPY | ||
306 | |||
307 | #define __S000 PAGE_NONE_SHARED | ||
308 | #define __S001 PAGE_RO_SHARED | ||
309 | #define __S010 PAGE_SHARED | ||
310 | #define __S011 PAGE_SHARED | ||
311 | #define __S100 PAGE_RO_SHARED | ||
312 | #define __S101 PAGE_RO_SHARED | ||
313 | #define __S110 PAGE_SHARED | ||
314 | #define __S111 PAGE_SHARED | ||
315 | |||
316 | /* | ||
317 | * Certain architectures need to do special things when PTEs | ||
318 | * within a page table are directly modified. Thus, the following | ||
319 | * hook is made available. | ||
320 | */ | ||
321 | extern inline void set_pte(pte_t *pteptr, pte_t pteval) | ||
322 | { | ||
323 | *pteptr = pteval; | ||
324 | } | ||
325 | #define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) | ||
326 | |||
327 | /* | ||
328 | * pgd/pmd/pte query functions | ||
329 | */ | ||
330 | #ifndef __s390x__ | ||
331 | |||
332 | extern inline int pgd_present(pgd_t pgd) { return 1; } | ||
333 | extern inline int pgd_none(pgd_t pgd) { return 0; } | ||
334 | extern inline int pgd_bad(pgd_t pgd) { return 0; } | ||
335 | |||
336 | extern inline int pmd_present(pmd_t pmd) { return pmd_val(pmd) & _SEG_PRESENT; } | ||
337 | extern inline int pmd_none(pmd_t pmd) { return pmd_val(pmd) & _PAGE_TABLE_INV; } | ||
338 | extern inline int pmd_bad(pmd_t pmd) | ||
339 | { | ||
340 | return (pmd_val(pmd) & (~PAGE_MASK & ~_PAGE_TABLE_INV)) != _PAGE_TABLE; | ||
341 | } | ||
342 | |||
343 | #else /* __s390x__ */ | ||
344 | |||
345 | extern inline int pgd_present(pgd_t pgd) | ||
346 | { | ||
347 | return (pgd_val(pgd) & ~PAGE_MASK) == _PGD_ENTRY; | ||
348 | } | ||
349 | |||
350 | extern inline int pgd_none(pgd_t pgd) | ||
351 | { | ||
352 | return pgd_val(pgd) & _PGD_ENTRY_INV; | ||
353 | } | ||
354 | |||
355 | extern inline int pgd_bad(pgd_t pgd) | ||
356 | { | ||
357 | return (pgd_val(pgd) & (~PAGE_MASK & ~_PGD_ENTRY_INV)) != _PGD_ENTRY; | ||
358 | } | ||
359 | |||
360 | extern inline int pmd_present(pmd_t pmd) | ||
361 | { | ||
362 | return (pmd_val(pmd) & ~PAGE_MASK) == _PMD_ENTRY; | ||
363 | } | ||
364 | |||
365 | extern inline int pmd_none(pmd_t pmd) | ||
366 | { | ||
367 | return pmd_val(pmd) & _PMD_ENTRY_INV; | ||
368 | } | ||
369 | |||
370 | extern inline int pmd_bad(pmd_t pmd) | ||
371 | { | ||
372 | return (pmd_val(pmd) & (~PAGE_MASK & ~_PMD_ENTRY_INV)) != _PMD_ENTRY; | ||
373 | } | ||
374 | |||
375 | #endif /* __s390x__ */ | ||
376 | |||
377 | extern inline int pte_none(pte_t pte) | ||
378 | { | ||
379 | return (pte_val(pte) & _PAGE_INVALID_MASK) == _PAGE_INVALID_EMPTY; | ||
380 | } | ||
381 | |||
382 | extern inline int pte_present(pte_t pte) | ||
383 | { | ||
384 | return !(pte_val(pte) & _PAGE_INVALID) || | ||
385 | (pte_val(pte) & _PAGE_INVALID_MASK) == _PAGE_INVALID_NONE; | ||
386 | } | ||
387 | |||
388 | extern inline int pte_file(pte_t pte) | ||
389 | { | ||
390 | return (pte_val(pte) & _PAGE_INVALID_MASK) == _PAGE_INVALID_FILE; | ||
391 | } | ||
392 | |||
393 | #define pte_same(a,b) (pte_val(a) == pte_val(b)) | ||
394 | |||
395 | /* | ||
396 | * query functions pte_write/pte_dirty/pte_young only work if | ||
397 | * pte_present() is true. Undefined behaviour if not.. | ||
398 | */ | ||
399 | extern inline int pte_write(pte_t pte) | ||
400 | { | ||
401 | return (pte_val(pte) & _PAGE_RO) == 0; | ||
402 | } | ||
403 | |||
404 | extern inline int pte_dirty(pte_t pte) | ||
405 | { | ||
406 | /* A pte is neither clean nor dirty on s/390. The dirty bit | ||
407 | * is in the storage key. See page_test_and_clear_dirty for | ||
408 | * details. | ||
409 | */ | ||
410 | return 0; | ||
411 | } | ||
412 | |||
413 | extern inline int pte_young(pte_t pte) | ||
414 | { | ||
415 | /* A pte is neither young nor old on s/390. The young bit | ||
416 | * is in the storage key. See page_test_and_clear_young for | ||
417 | * details. | ||
418 | */ | ||
419 | return 0; | ||
420 | } | ||
421 | |||
422 | extern inline int pte_read(pte_t pte) | ||
423 | { | ||
424 | /* All pages are readable since we don't use the fetch | ||
425 | * protection bit in the storage key. | ||
426 | */ | ||
427 | return 1; | ||
428 | } | ||
429 | |||
430 | /* | ||
431 | * pgd/pmd/pte modification functions | ||
432 | */ | ||
433 | |||
434 | #ifndef __s390x__ | ||
435 | |||
436 | extern inline void pgd_clear(pgd_t * pgdp) { } | ||
437 | |||
438 | extern inline void pmd_clear(pmd_t * pmdp) | ||
439 | { | ||
440 | pmd_val(pmdp[0]) = _PAGE_TABLE_INV; | ||
441 | pmd_val(pmdp[1]) = _PAGE_TABLE_INV; | ||
442 | pmd_val(pmdp[2]) = _PAGE_TABLE_INV; | ||
443 | pmd_val(pmdp[3]) = _PAGE_TABLE_INV; | ||
444 | } | ||
445 | |||
446 | #else /* __s390x__ */ | ||
447 | |||
448 | extern inline void pgd_clear(pgd_t * pgdp) | ||
449 | { | ||
450 | pgd_val(*pgdp) = _PGD_ENTRY_INV | _PGD_ENTRY; | ||
451 | } | ||
452 | |||
453 | extern inline void pmd_clear(pmd_t * pmdp) | ||
454 | { | ||
455 | pmd_val(*pmdp) = _PMD_ENTRY_INV | _PMD_ENTRY; | ||
456 | pmd_val1(*pmdp) = _PMD_ENTRY_INV | _PMD_ENTRY; | ||
457 | } | ||
458 | |||
459 | #endif /* __s390x__ */ | ||
460 | |||
461 | extern inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) | ||
462 | { | ||
463 | pte_val(*ptep) = _PAGE_INVALID_EMPTY; | ||
464 | } | ||
465 | |||
466 | /* | ||
467 | * The following pte modification functions only work if | ||
468 | * pte_present() is true. Undefined behaviour if not.. | ||
469 | */ | ||
470 | extern inline pte_t pte_modify(pte_t pte, pgprot_t newprot) | ||
471 | { | ||
472 | pte_val(pte) &= PAGE_MASK; | ||
473 | pte_val(pte) |= pgprot_val(newprot); | ||
474 | return pte; | ||
475 | } | ||
476 | |||
477 | extern inline pte_t pte_wrprotect(pte_t pte) | ||
478 | { | ||
479 | /* Do not clobber _PAGE_INVALID_NONE pages! */ | ||
480 | if (!(pte_val(pte) & _PAGE_INVALID)) | ||
481 | pte_val(pte) |= _PAGE_RO; | ||
482 | return pte; | ||
483 | } | ||
484 | |||
485 | extern inline pte_t pte_mkwrite(pte_t pte) | ||
486 | { | ||
487 | pte_val(pte) &= ~_PAGE_RO; | ||
488 | return pte; | ||
489 | } | ||
490 | |||
491 | extern inline pte_t pte_mkclean(pte_t pte) | ||
492 | { | ||
493 | /* The only user of pte_mkclean is the fork() code. | ||
494 | We must *not* clear the *physical* page dirty bit | ||
495 | just because fork() wants to clear the dirty bit in | ||
496 | *one* of the page's mappings. So we just do nothing. */ | ||
497 | return pte; | ||
498 | } | ||
499 | |||
500 | extern inline pte_t pte_mkdirty(pte_t pte) | ||
501 | { | ||
502 | /* We do not explicitly set the dirty bit because the | ||
503 | * sske instruction is slow. It is faster to let the | ||
504 | * next instruction set the dirty bit. | ||
505 | */ | ||
506 | return pte; | ||
507 | } | ||
508 | |||
509 | extern inline pte_t pte_mkold(pte_t pte) | ||
510 | { | ||
511 | /* S/390 doesn't keep its dirty/referenced bit in the pte. | ||
512 | * There is no point in clearing the real referenced bit. | ||
513 | */ | ||
514 | return pte; | ||
515 | } | ||
516 | |||
517 | extern inline pte_t pte_mkyoung(pte_t pte) | ||
518 | { | ||
519 | /* S/390 doesn't keep its dirty/referenced bit in the pte. | ||
520 | * There is no point in setting the real referenced bit. | ||
521 | */ | ||
522 | return pte; | ||
523 | } | ||
524 | |||
525 | static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) | ||
526 | { | ||
527 | return 0; | ||
528 | } | ||
529 | |||
530 | static inline int | ||
531 | ptep_clear_flush_young(struct vm_area_struct *vma, | ||
532 | unsigned long address, pte_t *ptep) | ||
533 | { | ||
534 | /* No need to flush TLB; bits are in storage key */ | ||
535 | return ptep_test_and_clear_young(vma, address, ptep); | ||
536 | } | ||
537 | |||
538 | static inline int ptep_test_and_clear_dirty(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) | ||
539 | { | ||
540 | return 0; | ||
541 | } | ||
542 | |||
543 | static inline int | ||
544 | ptep_clear_flush_dirty(struct vm_area_struct *vma, | ||
545 | unsigned long address, pte_t *ptep) | ||
546 | { | ||
547 | /* No need to flush TLB; bits are in storage key */ | ||
548 | return ptep_test_and_clear_dirty(vma, address, ptep); | ||
549 | } | ||
550 | |||
551 | static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) | ||
552 | { | ||
553 | pte_t pte = *ptep; | ||
554 | pte_clear(mm, addr, ptep); | ||
555 | return pte; | ||
556 | } | ||
557 | |||
558 | static inline pte_t | ||
559 | ptep_clear_flush(struct vm_area_struct *vma, | ||
560 | unsigned long address, pte_t *ptep) | ||
561 | { | ||
562 | pte_t pte = *ptep; | ||
563 | #ifndef __s390x__ | ||
564 | if (!(pte_val(pte) & _PAGE_INVALID)) { | ||
565 | /* S390 has 1mb segments, we are emulating 4MB segments */ | ||
566 | pte_t *pto = (pte_t *) (((unsigned long) ptep) & 0x7ffffc00); | ||
567 | __asm__ __volatile__ ("ipte %2,%3" | ||
568 | : "=m" (*ptep) : "m" (*ptep), | ||
569 | "a" (pto), "a" (address) ); | ||
570 | } | ||
571 | #else /* __s390x__ */ | ||
572 | if (!(pte_val(pte) & _PAGE_INVALID)) | ||
573 | __asm__ __volatile__ ("ipte %2,%3" | ||
574 | : "=m" (*ptep) : "m" (*ptep), | ||
575 | "a" (ptep), "a" (address) ); | ||
576 | #endif /* __s390x__ */ | ||
577 | pte_val(*ptep) = _PAGE_INVALID_EMPTY; | ||
578 | return pte; | ||
579 | } | ||
580 | |||
581 | static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) | ||
582 | { | ||
583 | pte_t old_pte = *ptep; | ||
584 | set_pte_at(mm, addr, ptep, pte_wrprotect(old_pte)); | ||
585 | } | ||
586 | |||
587 | static inline void | ||
588 | ptep_establish(struct vm_area_struct *vma, | ||
589 | unsigned long address, pte_t *ptep, | ||
590 | pte_t entry) | ||
591 | { | ||
592 | ptep_clear_flush(vma, address, ptep); | ||
593 | set_pte(ptep, entry); | ||
594 | } | ||
595 | |||
596 | #define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \ | ||
597 | ptep_establish(__vma, __address, __ptep, __entry) | ||
598 | |||
599 | /* | ||
600 | * Test and clear dirty bit in storage key. | ||
601 | * We can't clear the changed bit atomically. This is a potential | ||
602 | * race against modification of the referenced bit. This function | ||
603 | * should therefore only be called if it is not mapped in any | ||
604 | * address space. | ||
605 | */ | ||
606 | #define page_test_and_clear_dirty(_page) \ | ||
607 | ({ \ | ||
608 | struct page *__page = (_page); \ | ||
609 | unsigned long __physpage = __pa((__page-mem_map) << PAGE_SHIFT); \ | ||
610 | int __skey = page_get_storage_key(__physpage); \ | ||
611 | if (__skey & _PAGE_CHANGED) \ | ||
612 | page_set_storage_key(__physpage, __skey & ~_PAGE_CHANGED);\ | ||
613 | (__skey & _PAGE_CHANGED); \ | ||
614 | }) | ||
615 | |||
616 | /* | ||
617 | * Test and clear referenced bit in storage key. | ||
618 | */ | ||
619 | #define page_test_and_clear_young(page) \ | ||
620 | ({ \ | ||
621 | struct page *__page = (page); \ | ||
622 | unsigned long __physpage = __pa((__page-mem_map) << PAGE_SHIFT); \ | ||
623 | int __ccode; \ | ||
624 | asm volatile ("rrbe 0,%1\n\t" \ | ||
625 | "ipm %0\n\t" \ | ||
626 | "srl %0,28\n\t" \ | ||
627 | : "=d" (__ccode) : "a" (__physpage) : "cc" ); \ | ||
628 | (__ccode & 2); \ | ||
629 | }) | ||
630 | |||
631 | /* | ||
632 | * Conversion functions: convert a page and protection to a page entry, | ||
633 | * and a page entry and page directory to the page they refer to. | ||
634 | */ | ||
635 | static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot) | ||
636 | { | ||
637 | pte_t __pte; | ||
638 | pte_val(__pte) = physpage + pgprot_val(pgprot); | ||
639 | return __pte; | ||
640 | } | ||
641 | |||
642 | #define mk_pte(pg, pgprot) \ | ||
643 | ({ \ | ||
644 | struct page *__page = (pg); \ | ||
645 | pgprot_t __pgprot = (pgprot); \ | ||
646 | unsigned long __physpage = __pa((__page-mem_map) << PAGE_SHIFT); \ | ||
647 | pte_t __pte = mk_pte_phys(__physpage, __pgprot); \ | ||
648 | __pte; \ | ||
649 | }) | ||
650 | |||
651 | #define pfn_pte(pfn, pgprot) \ | ||
652 | ({ \ | ||
653 | pgprot_t __pgprot = (pgprot); \ | ||
654 | unsigned long __physpage = __pa((pfn) << PAGE_SHIFT); \ | ||
655 | pte_t __pte = mk_pte_phys(__physpage, __pgprot); \ | ||
656 | __pte; \ | ||
657 | }) | ||
658 | |||
659 | #define SetPageUptodate(_page) \ | ||
660 | do { \ | ||
661 | struct page *__page = (_page); \ | ||
662 | if (!test_and_set_bit(PG_uptodate, &__page->flags)) \ | ||
663 | page_test_and_clear_dirty(_page); \ | ||
664 | } while (0) | ||
665 | |||
666 | #ifdef __s390x__ | ||
667 | |||
668 | #define pfn_pmd(pfn, pgprot) \ | ||
669 | ({ \ | ||
670 | pgprot_t __pgprot = (pgprot); \ | ||
671 | unsigned long __physpage = __pa((pfn) << PAGE_SHIFT); \ | ||
672 | pmd_t __pmd = __pmd(__physpage + pgprot_val(__pgprot)); \ | ||
673 | __pmd; \ | ||
674 | }) | ||
675 | |||
676 | #endif /* __s390x__ */ | ||
677 | |||
678 | #define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT) | ||
679 | #define pte_page(x) pfn_to_page(pte_pfn(x)) | ||
680 | |||
681 | #define pmd_page_kernel(pmd) (pmd_val(pmd) & PAGE_MASK) | ||
682 | |||
683 | #define pmd_page(pmd) (mem_map+(pmd_val(pmd) >> PAGE_SHIFT)) | ||
684 | |||
685 | #define pgd_page_kernel(pgd) (pgd_val(pgd) & PAGE_MASK) | ||
686 | |||
687 | /* to find an entry in a page-table-directory */ | ||
688 | #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) | ||
689 | #define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address)) | ||
690 | |||
691 | /* to find an entry in a kernel page-table-directory */ | ||
692 | #define pgd_offset_k(address) pgd_offset(&init_mm, address) | ||
693 | |||
694 | #ifndef __s390x__ | ||
695 | |||
696 | /* Find an entry in the second-level page table.. */ | ||
697 | extern inline pmd_t * pmd_offset(pgd_t * dir, unsigned long address) | ||
698 | { | ||
699 | return (pmd_t *) dir; | ||
700 | } | ||
701 | |||
702 | #else /* __s390x__ */ | ||
703 | |||
704 | /* Find an entry in the second-level page table.. */ | ||
705 | #define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1)) | ||
706 | #define pmd_offset(dir,addr) \ | ||
707 | ((pmd_t *) pgd_page_kernel(*(dir)) + pmd_index(addr)) | ||
708 | |||
709 | #endif /* __s390x__ */ | ||
710 | |||
711 | /* Find an entry in the third-level page table.. */ | ||
712 | #define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE-1)) | ||
713 | #define pte_offset_kernel(pmd, address) \ | ||
714 | ((pte_t *) pmd_page_kernel(*(pmd)) + pte_index(address)) | ||
715 | #define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address) | ||
716 | #define pte_offset_map_nested(pmd, address) pte_offset_kernel(pmd, address) | ||
717 | #define pte_unmap(pte) do { } while (0) | ||
718 | #define pte_unmap_nested(pte) do { } while (0) | ||
719 | |||
720 | /* | ||
721 | * 31 bit swap entry format: | ||
722 | * A page-table entry has some bits we have to treat in a special way. | ||
723 | * Bits 0, 20 and bit 23 have to be zero, otherwise an specification | ||
724 | * exception will occur instead of a page translation exception. The | ||
725 | * specifiation exception has the bad habit not to store necessary | ||
726 | * information in the lowcore. | ||
727 | * Bit 21 and bit 22 are the page invalid bit and the page protection | ||
728 | * bit. We set both to indicate a swapped page. | ||
729 | * Bit 30 and 31 are used to distinguish the different page types. For | ||
730 | * a swapped page these bits need to be zero. | ||
731 | * This leaves the bits 1-19 and bits 24-29 to store type and offset. | ||
732 | * We use the 5 bits from 25-29 for the type and the 20 bits from 1-19 | ||
733 | * plus 24 for the offset. | ||
734 | * 0| offset |0110|o|type |00| | ||
735 | * 0 0000000001111111111 2222 2 22222 33 | ||
736 | * 0 1234567890123456789 0123 4 56789 01 | ||
737 | * | ||
738 | * 64 bit swap entry format: | ||
739 | * A page-table entry has some bits we have to treat in a special way. | ||
740 | * Bits 52 and bit 55 have to be zero, otherwise an specification | ||
741 | * exception will occur instead of a page translation exception. The | ||
742 | * specifiation exception has the bad habit not to store necessary | ||
743 | * information in the lowcore. | ||
744 | * Bit 53 and bit 54 are the page invalid bit and the page protection | ||
745 | * bit. We set both to indicate a swapped page. | ||
746 | * Bit 62 and 63 are used to distinguish the different page types. For | ||
747 | * a swapped page these bits need to be zero. | ||
748 | * This leaves the bits 0-51 and bits 56-61 to store type and offset. | ||
749 | * We use the 5 bits from 57-61 for the type and the 53 bits from 0-51 | ||
750 | * plus 56 for the offset. | ||
751 | * | offset |0110|o|type |00| | ||
752 | * 0000000000111111111122222222223333333333444444444455 5555 5 55566 66 | ||
753 | * 0123456789012345678901234567890123456789012345678901 2345 6 78901 23 | ||
754 | */ | ||
755 | #ifndef __s390x__ | ||
756 | #define __SWP_OFFSET_MASK (~0UL >> 12) | ||
757 | #else | ||
758 | #define __SWP_OFFSET_MASK (~0UL >> 11) | ||
759 | #endif | ||
760 | extern inline pte_t mk_swap_pte(unsigned long type, unsigned long offset) | ||
761 | { | ||
762 | pte_t pte; | ||
763 | offset &= __SWP_OFFSET_MASK; | ||
764 | pte_val(pte) = _PAGE_INVALID_SWAP | ((type & 0x1f) << 2) | | ||
765 | ((offset & 1UL) << 7) | ((offset & ~1UL) << 11); | ||
766 | return pte; | ||
767 | } | ||
768 | |||
769 | #define __swp_type(entry) (((entry).val >> 2) & 0x1f) | ||
770 | #define __swp_offset(entry) (((entry).val >> 11) | (((entry).val >> 7) & 1)) | ||
771 | #define __swp_entry(type,offset) ((swp_entry_t) { pte_val(mk_swap_pte((type),(offset))) }) | ||
772 | |||
773 | #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) | ||
774 | #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) | ||
775 | |||
776 | #ifndef __s390x__ | ||
777 | # define PTE_FILE_MAX_BITS 26 | ||
778 | #else /* __s390x__ */ | ||
779 | # define PTE_FILE_MAX_BITS 59 | ||
780 | #endif /* __s390x__ */ | ||
781 | |||
782 | #define pte_to_pgoff(__pte) \ | ||
783 | ((((__pte).pte >> 12) << 7) + (((__pte).pte >> 1) & 0x7f)) | ||
784 | |||
785 | #define pgoff_to_pte(__off) \ | ||
786 | ((pte_t) { ((((__off) & 0x7f) << 1) + (((__off) >> 7) << 12)) \ | ||
787 | | _PAGE_INVALID_FILE }) | ||
788 | |||
789 | #endif /* !__ASSEMBLY__ */ | ||
790 | |||
791 | #define kern_addr_valid(addr) (1) | ||
792 | |||
793 | /* | ||
794 | * No page table caches to initialise | ||
795 | */ | ||
796 | #define pgtable_cache_init() do { } while (0) | ||
797 | |||
798 | #define __HAVE_ARCH_PTEP_ESTABLISH | ||
799 | #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS | ||
800 | #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG | ||
801 | #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH | ||
802 | #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY | ||
803 | #define __HAVE_ARCH_PTEP_CLEAR_DIRTY_FLUSH | ||
804 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR | ||
805 | #define __HAVE_ARCH_PTEP_CLEAR_FLUSH | ||
806 | #define __HAVE_ARCH_PTEP_SET_WRPROTECT | ||
807 | #define __HAVE_ARCH_PTE_SAME | ||
808 | #define __HAVE_ARCH_PAGE_TEST_AND_CLEAR_DIRTY | ||
809 | #define __HAVE_ARCH_PAGE_TEST_AND_CLEAR_YOUNG | ||
810 | #include <asm-generic/pgtable.h> | ||
811 | |||
812 | #endif /* _S390_PAGE_H */ | ||
813 | |||