diff options
Diffstat (limited to 'arch/s390/include/asm/pgtable.h')
-rw-r--r-- | arch/s390/include/asm/pgtable.h | 1093 |
1 files changed, 1093 insertions, 0 deletions
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h new file mode 100644 index 000000000000..0bdb704ae051 --- /dev/null +++ b/arch/s390/include/asm/pgtable.h | |||
@@ -0,0 +1,1093 @@ | |||
1 | /* | ||
2 | * include/asm-s390/pgtable.h | ||
3 | * | ||
4 | * S390 version | ||
5 | * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation | ||
6 | * Author(s): Hartmut Penner (hp@de.ibm.com) | ||
7 | * Ulrich Weigand (weigand@de.ibm.com) | ||
8 | * Martin Schwidefsky (schwidefsky@de.ibm.com) | ||
9 | * | ||
10 | * Derived from "include/asm-i386/pgtable.h" | ||
11 | */ | ||
12 | |||
13 | #ifndef _ASM_S390_PGTABLE_H | ||
14 | #define _ASM_S390_PGTABLE_H | ||
15 | |||
16 | /* | ||
17 | * The Linux memory management assumes a three-level page table setup. For | ||
18 | * s390 31 bit we "fold" the mid level into the top-level page table, so | ||
19 | * that we physically have the same two-level page table as the s390 mmu | ||
20 | * expects in 31 bit mode. For s390 64 bit we use three of the five levels | ||
21 | * the hardware provides (region first and region second tables are not | ||
22 | * used). | ||
23 | * | ||
24 | * The "pgd_xxx()" functions are trivial for a folded two-level | ||
25 | * setup: the pgd is never bad, and a pmd always exists (as it's folded | ||
26 | * into the pgd entry) | ||
27 | * | ||
28 | * This file contains the functions and defines necessary to modify and use | ||
29 | * the S390 page table tree. | ||
30 | */ | ||
31 | #ifndef __ASSEMBLY__ | ||
32 | #include <linux/sched.h> | ||
33 | #include <linux/mm_types.h> | ||
34 | #include <asm/bitops.h> | ||
35 | #include <asm/bug.h> | ||
36 | #include <asm/processor.h> | ||
37 | |||
38 | extern pgd_t swapper_pg_dir[] __attribute__ ((aligned (4096))); | ||
39 | extern void paging_init(void); | ||
40 | extern void vmem_map_init(void); | ||
41 | |||
42 | /* | ||
43 | * The S390 doesn't have any external MMU info: the kernel page | ||
44 | * tables contain all the necessary information. | ||
45 | */ | ||
46 | #define update_mmu_cache(vma, address, pte) do { } while (0) | ||
47 | |||
48 | /* | ||
49 | * ZERO_PAGE is a global shared page that is always zero: used | ||
50 | * for zero-mapped memory areas etc.. | ||
51 | */ | ||
52 | extern char empty_zero_page[PAGE_SIZE]; | ||
53 | #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) | ||
54 | #endif /* !__ASSEMBLY__ */ | ||
55 | |||
56 | /* | ||
57 | * PMD_SHIFT determines the size of the area a second-level page | ||
58 | * table can map | ||
59 | * PGDIR_SHIFT determines what a third-level page table entry can map | ||
60 | */ | ||
61 | #ifndef __s390x__ | ||
62 | # define PMD_SHIFT 20 | ||
63 | # define PUD_SHIFT 20 | ||
64 | # define PGDIR_SHIFT 20 | ||
65 | #else /* __s390x__ */ | ||
66 | # define PMD_SHIFT 20 | ||
67 | # define PUD_SHIFT 31 | ||
68 | # define PGDIR_SHIFT 42 | ||
69 | #endif /* __s390x__ */ | ||
70 | |||
71 | #define PMD_SIZE (1UL << PMD_SHIFT) | ||
72 | #define PMD_MASK (~(PMD_SIZE-1)) | ||
73 | #define PUD_SIZE (1UL << PUD_SHIFT) | ||
74 | #define PUD_MASK (~(PUD_SIZE-1)) | ||
75 | #define PGDIR_SIZE (1UL << PGDIR_SHIFT) | ||
76 | #define PGDIR_MASK (~(PGDIR_SIZE-1)) | ||
77 | |||
78 | /* | ||
79 | * entries per page directory level: the S390 is two-level, so | ||
80 | * we don't really have any PMD directory physically. | ||
81 | * for S390 segment-table entries are combined to one PGD | ||
82 | * that leads to 1024 pte per pgd | ||
83 | */ | ||
84 | #define PTRS_PER_PTE 256 | ||
85 | #ifndef __s390x__ | ||
86 | #define PTRS_PER_PMD 1 | ||
87 | #define PTRS_PER_PUD 1 | ||
88 | #else /* __s390x__ */ | ||
89 | #define PTRS_PER_PMD 2048 | ||
90 | #define PTRS_PER_PUD 2048 | ||
91 | #endif /* __s390x__ */ | ||
92 | #define PTRS_PER_PGD 2048 | ||
93 | |||
94 | #define FIRST_USER_ADDRESS 0 | ||
95 | |||
96 | #define pte_ERROR(e) \ | ||
97 | printk("%s:%d: bad pte %p.\n", __FILE__, __LINE__, (void *) pte_val(e)) | ||
98 | #define pmd_ERROR(e) \ | ||
99 | printk("%s:%d: bad pmd %p.\n", __FILE__, __LINE__, (void *) pmd_val(e)) | ||
100 | #define pud_ERROR(e) \ | ||
101 | printk("%s:%d: bad pud %p.\n", __FILE__, __LINE__, (void *) pud_val(e)) | ||
102 | #define pgd_ERROR(e) \ | ||
103 | printk("%s:%d: bad pgd %p.\n", __FILE__, __LINE__, (void *) pgd_val(e)) | ||
104 | |||
105 | #ifndef __ASSEMBLY__ | ||
106 | /* | ||
107 | * The vmalloc area will always be on the topmost area of the kernel | ||
108 | * mapping. We reserve 96MB (31bit) / 1GB (64bit) for vmalloc, | ||
109 | * which should be enough for any sane case. | ||
110 | * By putting vmalloc at the top, we maximise the gap between physical | ||
111 | * memory and vmalloc to catch misplaced memory accesses. As a side | ||
112 | * effect, this also makes sure that 64 bit module code cannot be used | ||
113 | * as system call address. | ||
114 | */ | ||
115 | #ifndef __s390x__ | ||
116 | #define VMALLOC_START 0x78000000UL | ||
117 | #define VMALLOC_END 0x7e000000UL | ||
118 | #define VMEM_MAP_END 0x80000000UL | ||
119 | #else /* __s390x__ */ | ||
120 | #define VMALLOC_START 0x3e000000000UL | ||
121 | #define VMALLOC_END 0x3e040000000UL | ||
122 | #define VMEM_MAP_END 0x40000000000UL | ||
123 | #endif /* __s390x__ */ | ||
124 | |||
125 | /* | ||
126 | * VMEM_MAX_PHYS is the highest physical address that can be added to the 1:1 | ||
127 | * mapping. This needs to be calculated at compile time since the size of the | ||
128 | * VMEM_MAP is static but the size of struct page can change. | ||
129 | */ | ||
130 | #define VMEM_MAX_PAGES ((VMEM_MAP_END - VMALLOC_END) / sizeof(struct page)) | ||
131 | #define VMEM_MAX_PFN min(VMALLOC_START >> PAGE_SHIFT, VMEM_MAX_PAGES) | ||
132 | #define VMEM_MAX_PHYS ((VMEM_MAX_PFN << PAGE_SHIFT) & ~((16 << 20) - 1)) | ||
133 | #define vmemmap ((struct page *) VMALLOC_END) | ||
134 | |||
135 | /* | ||
136 | * A 31 bit pagetable entry of S390 has following format: | ||
137 | * | PFRA | | OS | | ||
138 | * 0 0IP0 | ||
139 | * 00000000001111111111222222222233 | ||
140 | * 01234567890123456789012345678901 | ||
141 | * | ||
142 | * I Page-Invalid Bit: Page is not available for address-translation | ||
143 | * P Page-Protection Bit: Store access not possible for page | ||
144 | * | ||
145 | * A 31 bit segmenttable entry of S390 has following format: | ||
146 | * | P-table origin | |PTL | ||
147 | * 0 IC | ||
148 | * 00000000001111111111222222222233 | ||
149 | * 01234567890123456789012345678901 | ||
150 | * | ||
151 | * I Segment-Invalid Bit: Segment is not available for address-translation | ||
152 | * C Common-Segment Bit: Segment is not private (PoP 3-30) | ||
153 | * PTL Page-Table-Length: Page-table length (PTL+1*16 entries -> up to 256) | ||
154 | * | ||
155 | * The 31 bit segmenttable origin of S390 has following format: | ||
156 | * | ||
157 | * |S-table origin | | STL | | ||
158 | * X **GPS | ||
159 | * 00000000001111111111222222222233 | ||
160 | * 01234567890123456789012345678901 | ||
161 | * | ||
162 | * X Space-Switch event: | ||
163 | * G Segment-Invalid Bit: * | ||
164 | * P Private-Space Bit: Segment is not private (PoP 3-30) | ||
165 | * S Storage-Alteration: | ||
166 | * STL Segment-Table-Length: Segment-table length (STL+1*16 entries -> up to 2048) | ||
167 | * | ||
168 | * A 64 bit pagetable entry of S390 has following format: | ||
169 | * | PFRA |0IP0| OS | | ||
170 | * 0000000000111111111122222222223333333333444444444455555555556666 | ||
171 | * 0123456789012345678901234567890123456789012345678901234567890123 | ||
172 | * | ||
173 | * I Page-Invalid Bit: Page is not available for address-translation | ||
174 | * P Page-Protection Bit: Store access not possible for page | ||
175 | * | ||
176 | * A 64 bit segmenttable entry of S390 has following format: | ||
177 | * | P-table origin | TT | ||
178 | * 0000000000111111111122222222223333333333444444444455555555556666 | ||
179 | * 0123456789012345678901234567890123456789012345678901234567890123 | ||
180 | * | ||
181 | * I Segment-Invalid Bit: Segment is not available for address-translation | ||
182 | * C Common-Segment Bit: Segment is not private (PoP 3-30) | ||
183 | * P Page-Protection Bit: Store access not possible for page | ||
184 | * TT Type 00 | ||
185 | * | ||
186 | * A 64 bit region table entry of S390 has following format: | ||
187 | * | S-table origin | TF TTTL | ||
188 | * 0000000000111111111122222222223333333333444444444455555555556666 | ||
189 | * 0123456789012345678901234567890123456789012345678901234567890123 | ||
190 | * | ||
191 | * I Segment-Invalid Bit: Segment is not available for address-translation | ||
192 | * TT Type 01 | ||
193 | * TF | ||
194 | * TL Table length | ||
195 | * | ||
196 | * The 64 bit regiontable origin of S390 has following format: | ||
197 | * | region table origon | DTTL | ||
198 | * 0000000000111111111122222222223333333333444444444455555555556666 | ||
199 | * 0123456789012345678901234567890123456789012345678901234567890123 | ||
200 | * | ||
201 | * X Space-Switch event: | ||
202 | * G Segment-Invalid Bit: | ||
203 | * P Private-Space Bit: | ||
204 | * S Storage-Alteration: | ||
205 | * R Real space | ||
206 | * TL Table-Length: | ||
207 | * | ||
208 | * A storage key has the following format: | ||
209 | * | ACC |F|R|C|0| | ||
210 | * 0 3 4 5 6 7 | ||
211 | * ACC: access key | ||
212 | * F : fetch protection bit | ||
213 | * R : referenced bit | ||
214 | * C : changed bit | ||
215 | */ | ||
216 | |||
217 | /* Hardware bits in the page table entry */ | ||
218 | #define _PAGE_RO 0x200 /* HW read-only bit */ | ||
219 | #define _PAGE_INVALID 0x400 /* HW invalid bit */ | ||
220 | |||
221 | /* Software bits in the page table entry */ | ||
222 | #define _PAGE_SWT 0x001 /* SW pte type bit t */ | ||
223 | #define _PAGE_SWX 0x002 /* SW pte type bit x */ | ||
224 | #define _PAGE_SPECIAL 0x004 /* SW associated with special page */ | ||
225 | #define __HAVE_ARCH_PTE_SPECIAL | ||
226 | |||
227 | /* Set of bits not changed in pte_modify */ | ||
228 | #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_SPECIAL) | ||
229 | |||
230 | /* Six different types of pages. */ | ||
231 | #define _PAGE_TYPE_EMPTY 0x400 | ||
232 | #define _PAGE_TYPE_NONE 0x401 | ||
233 | #define _PAGE_TYPE_SWAP 0x403 | ||
234 | #define _PAGE_TYPE_FILE 0x601 /* bit 0x002 is used for offset !! */ | ||
235 | #define _PAGE_TYPE_RO 0x200 | ||
236 | #define _PAGE_TYPE_RW 0x000 | ||
237 | #define _PAGE_TYPE_EX_RO 0x202 | ||
238 | #define _PAGE_TYPE_EX_RW 0x002 | ||
239 | |||
240 | /* | ||
241 | * Only four types for huge pages, using the invalid bit and protection bit | ||
242 | * of a segment table entry. | ||
243 | */ | ||
244 | #define _HPAGE_TYPE_EMPTY 0x020 /* _SEGMENT_ENTRY_INV */ | ||
245 | #define _HPAGE_TYPE_NONE 0x220 | ||
246 | #define _HPAGE_TYPE_RO 0x200 /* _SEGMENT_ENTRY_RO */ | ||
247 | #define _HPAGE_TYPE_RW 0x000 | ||
248 | |||
249 | /* | ||
250 | * PTE type bits are rather complicated. handle_pte_fault uses pte_present, | ||
251 | * pte_none and pte_file to find out the pte type WITHOUT holding the page | ||
252 | * table lock. ptep_clear_flush on the other hand uses ptep_clear_flush to | ||
253 | * invalidate a given pte. ipte sets the hw invalid bit and clears all tlbs | ||
254 | * for the page. The page table entry is set to _PAGE_TYPE_EMPTY afterwards. | ||
255 | * This change is done while holding the lock, but the intermediate step | ||
256 | * of a previously valid pte with the hw invalid bit set can be observed by | ||
257 | * handle_pte_fault. That makes it necessary that all valid pte types with | ||
258 | * the hw invalid bit set must be distinguishable from the four pte types | ||
259 | * empty, none, swap and file. | ||
260 | * | ||
261 | * irxt ipte irxt | ||
262 | * _PAGE_TYPE_EMPTY 1000 -> 1000 | ||
263 | * _PAGE_TYPE_NONE 1001 -> 1001 | ||
264 | * _PAGE_TYPE_SWAP 1011 -> 1011 | ||
265 | * _PAGE_TYPE_FILE 11?1 -> 11?1 | ||
266 | * _PAGE_TYPE_RO 0100 -> 1100 | ||
267 | * _PAGE_TYPE_RW 0000 -> 1000 | ||
268 | * _PAGE_TYPE_EX_RO 0110 -> 1110 | ||
269 | * _PAGE_TYPE_EX_RW 0010 -> 1010 | ||
270 | * | ||
271 | * pte_none is true for bits combinations 1000, 1010, 1100, 1110 | ||
272 | * pte_present is true for bits combinations 0000, 0010, 0100, 0110, 1001 | ||
273 | * pte_file is true for bits combinations 1101, 1111 | ||
274 | * swap pte is 1011 and 0001, 0011, 0101, 0111 are invalid. | ||
275 | */ | ||
276 | |||
277 | /* Page status table bits for virtualization */ | ||
278 | #define RCP_PCL_BIT 55 | ||
279 | #define RCP_HR_BIT 54 | ||
280 | #define RCP_HC_BIT 53 | ||
281 | #define RCP_GR_BIT 50 | ||
282 | #define RCP_GC_BIT 49 | ||
283 | |||
284 | #ifndef __s390x__ | ||
285 | |||
286 | /* Bits in the segment table address-space-control-element */ | ||
287 | #define _ASCE_SPACE_SWITCH 0x80000000UL /* space switch event */ | ||
288 | #define _ASCE_ORIGIN_MASK 0x7ffff000UL /* segment table origin */ | ||
289 | #define _ASCE_PRIVATE_SPACE 0x100 /* private space control */ | ||
290 | #define _ASCE_ALT_EVENT 0x80 /* storage alteration event control */ | ||
291 | #define _ASCE_TABLE_LENGTH 0x7f /* 128 x 64 entries = 8k */ | ||
292 | |||
293 | /* Bits in the segment table entry */ | ||
294 | #define _SEGMENT_ENTRY_ORIGIN 0x7fffffc0UL /* page table origin */ | ||
295 | #define _SEGMENT_ENTRY_INV 0x20 /* invalid segment table entry */ | ||
296 | #define _SEGMENT_ENTRY_COMMON 0x10 /* common segment bit */ | ||
297 | #define _SEGMENT_ENTRY_PTL 0x0f /* page table length */ | ||
298 | |||
299 | #define _SEGMENT_ENTRY (_SEGMENT_ENTRY_PTL) | ||
300 | #define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INV) | ||
301 | |||
302 | #else /* __s390x__ */ | ||
303 | |||
304 | /* Bits in the segment/region table address-space-control-element */ | ||
305 | #define _ASCE_ORIGIN ~0xfffUL/* segment table origin */ | ||
306 | #define _ASCE_PRIVATE_SPACE 0x100 /* private space control */ | ||
307 | #define _ASCE_ALT_EVENT 0x80 /* storage alteration event control */ | ||
308 | #define _ASCE_SPACE_SWITCH 0x40 /* space switch event */ | ||
309 | #define _ASCE_REAL_SPACE 0x20 /* real space control */ | ||
310 | #define _ASCE_TYPE_MASK 0x0c /* asce table type mask */ | ||
311 | #define _ASCE_TYPE_REGION1 0x0c /* region first table type */ | ||
312 | #define _ASCE_TYPE_REGION2 0x08 /* region second table type */ | ||
313 | #define _ASCE_TYPE_REGION3 0x04 /* region third table type */ | ||
314 | #define _ASCE_TYPE_SEGMENT 0x00 /* segment table type */ | ||
315 | #define _ASCE_TABLE_LENGTH 0x03 /* region table length */ | ||
316 | |||
317 | /* Bits in the region table entry */ | ||
318 | #define _REGION_ENTRY_ORIGIN ~0xfffUL/* region/segment table origin */ | ||
319 | #define _REGION_ENTRY_INV 0x20 /* invalid region table entry */ | ||
320 | #define _REGION_ENTRY_TYPE_MASK 0x0c /* region/segment table type mask */ | ||
321 | #define _REGION_ENTRY_TYPE_R1 0x0c /* region first table type */ | ||
322 | #define _REGION_ENTRY_TYPE_R2 0x08 /* region second table type */ | ||
323 | #define _REGION_ENTRY_TYPE_R3 0x04 /* region third table type */ | ||
324 | #define _REGION_ENTRY_LENGTH 0x03 /* region third length */ | ||
325 | |||
326 | #define _REGION1_ENTRY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_LENGTH) | ||
327 | #define _REGION1_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_INV) | ||
328 | #define _REGION2_ENTRY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_LENGTH) | ||
329 | #define _REGION2_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_INV) | ||
330 | #define _REGION3_ENTRY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_LENGTH) | ||
331 | #define _REGION3_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INV) | ||
332 | |||
333 | /* Bits in the segment table entry */ | ||
334 | #define _SEGMENT_ENTRY_ORIGIN ~0x7ffUL/* segment table origin */ | ||
335 | #define _SEGMENT_ENTRY_RO 0x200 /* page protection bit */ | ||
336 | #define _SEGMENT_ENTRY_INV 0x20 /* invalid segment table entry */ | ||
337 | |||
338 | #define _SEGMENT_ENTRY (0) | ||
339 | #define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INV) | ||
340 | |||
341 | #define _SEGMENT_ENTRY_LARGE 0x400 /* STE-format control, large page */ | ||
342 | #define _SEGMENT_ENTRY_CO 0x100 /* change-recording override */ | ||
343 | |||
344 | #endif /* __s390x__ */ | ||
345 | |||
346 | /* | ||
347 | * A user page table pointer has the space-switch-event bit, the | ||
348 | * private-space-control bit and the storage-alteration-event-control | ||
349 | * bit set. A kernel page table pointer doesn't need them. | ||
350 | */ | ||
351 | #define _ASCE_USER_BITS (_ASCE_SPACE_SWITCH | _ASCE_PRIVATE_SPACE | \ | ||
352 | _ASCE_ALT_EVENT) | ||
353 | |||
354 | /* Bits int the storage key */ | ||
355 | #define _PAGE_CHANGED 0x02 /* HW changed bit */ | ||
356 | #define _PAGE_REFERENCED 0x04 /* HW referenced bit */ | ||
357 | |||
358 | /* | ||
359 | * Page protection definitions. | ||
360 | */ | ||
361 | #define PAGE_NONE __pgprot(_PAGE_TYPE_NONE) | ||
362 | #define PAGE_RO __pgprot(_PAGE_TYPE_RO) | ||
363 | #define PAGE_RW __pgprot(_PAGE_TYPE_RW) | ||
364 | #define PAGE_EX_RO __pgprot(_PAGE_TYPE_EX_RO) | ||
365 | #define PAGE_EX_RW __pgprot(_PAGE_TYPE_EX_RW) | ||
366 | |||
367 | #define PAGE_KERNEL PAGE_RW | ||
368 | #define PAGE_COPY PAGE_RO | ||
369 | |||
370 | /* | ||
371 | * Dependent on the EXEC_PROTECT option s390 can do execute protection. | ||
372 | * Write permission always implies read permission. In theory with a | ||
373 | * primary/secondary page table execute only can be implemented but | ||
374 | * it would cost an additional bit in the pte to distinguish all the | ||
375 | * different pte types. To avoid that execute permission currently | ||
376 | * implies read permission as well. | ||
377 | */ | ||
378 | /*xwr*/ | ||
379 | #define __P000 PAGE_NONE | ||
380 | #define __P001 PAGE_RO | ||
381 | #define __P010 PAGE_RO | ||
382 | #define __P011 PAGE_RO | ||
383 | #define __P100 PAGE_EX_RO | ||
384 | #define __P101 PAGE_EX_RO | ||
385 | #define __P110 PAGE_EX_RO | ||
386 | #define __P111 PAGE_EX_RO | ||
387 | |||
388 | #define __S000 PAGE_NONE | ||
389 | #define __S001 PAGE_RO | ||
390 | #define __S010 PAGE_RW | ||
391 | #define __S011 PAGE_RW | ||
392 | #define __S100 PAGE_EX_RO | ||
393 | #define __S101 PAGE_EX_RO | ||
394 | #define __S110 PAGE_EX_RW | ||
395 | #define __S111 PAGE_EX_RW | ||
396 | |||
397 | #ifndef __s390x__ | ||
398 | # define PxD_SHADOW_SHIFT 1 | ||
399 | #else /* __s390x__ */ | ||
400 | # define PxD_SHADOW_SHIFT 2 | ||
401 | #endif /* __s390x__ */ | ||
402 | |||
403 | static inline void *get_shadow_table(void *table) | ||
404 | { | ||
405 | unsigned long addr, offset; | ||
406 | struct page *page; | ||
407 | |||
408 | addr = (unsigned long) table; | ||
409 | offset = addr & ((PAGE_SIZE << PxD_SHADOW_SHIFT) - 1); | ||
410 | page = virt_to_page((void *)(addr ^ offset)); | ||
411 | return (void *)(addr_t)(page->index ? (page->index | offset) : 0UL); | ||
412 | } | ||
413 | |||
414 | /* | ||
415 | * Certain architectures need to do special things when PTEs | ||
416 | * within a page table are directly modified. Thus, the following | ||
417 | * hook is made available. | ||
418 | */ | ||
419 | static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, | ||
420 | pte_t *ptep, pte_t entry) | ||
421 | { | ||
422 | *ptep = entry; | ||
423 | if (mm->context.noexec) { | ||
424 | if (!(pte_val(entry) & _PAGE_INVALID) && | ||
425 | (pte_val(entry) & _PAGE_SWX)) | ||
426 | pte_val(entry) |= _PAGE_RO; | ||
427 | else | ||
428 | pte_val(entry) = _PAGE_TYPE_EMPTY; | ||
429 | ptep[PTRS_PER_PTE] = entry; | ||
430 | } | ||
431 | } | ||
432 | |||
433 | /* | ||
434 | * pgd/pmd/pte query functions | ||
435 | */ | ||
436 | #ifndef __s390x__ | ||
437 | |||
438 | static inline int pgd_present(pgd_t pgd) { return 1; } | ||
439 | static inline int pgd_none(pgd_t pgd) { return 0; } | ||
440 | static inline int pgd_bad(pgd_t pgd) { return 0; } | ||
441 | |||
442 | static inline int pud_present(pud_t pud) { return 1; } | ||
443 | static inline int pud_none(pud_t pud) { return 0; } | ||
444 | static inline int pud_bad(pud_t pud) { return 0; } | ||
445 | |||
446 | #else /* __s390x__ */ | ||
447 | |||
448 | static inline int pgd_present(pgd_t pgd) | ||
449 | { | ||
450 | if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2) | ||
451 | return 1; | ||
452 | return (pgd_val(pgd) & _REGION_ENTRY_ORIGIN) != 0UL; | ||
453 | } | ||
454 | |||
455 | static inline int pgd_none(pgd_t pgd) | ||
456 | { | ||
457 | if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2) | ||
458 | return 0; | ||
459 | return (pgd_val(pgd) & _REGION_ENTRY_INV) != 0UL; | ||
460 | } | ||
461 | |||
462 | static inline int pgd_bad(pgd_t pgd) | ||
463 | { | ||
464 | /* | ||
465 | * With dynamic page table levels the pgd can be a region table | ||
466 | * entry or a segment table entry. Check for the bit that are | ||
467 | * invalid for either table entry. | ||
468 | */ | ||
469 | unsigned long mask = | ||
470 | ~_SEGMENT_ENTRY_ORIGIN & ~_REGION_ENTRY_INV & | ||
471 | ~_REGION_ENTRY_TYPE_MASK & ~_REGION_ENTRY_LENGTH; | ||
472 | return (pgd_val(pgd) & mask) != 0; | ||
473 | } | ||
474 | |||
475 | static inline int pud_present(pud_t pud) | ||
476 | { | ||
477 | if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3) | ||
478 | return 1; | ||
479 | return (pud_val(pud) & _REGION_ENTRY_ORIGIN) != 0UL; | ||
480 | } | ||
481 | |||
482 | static inline int pud_none(pud_t pud) | ||
483 | { | ||
484 | if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3) | ||
485 | return 0; | ||
486 | return (pud_val(pud) & _REGION_ENTRY_INV) != 0UL; | ||
487 | } | ||
488 | |||
489 | static inline int pud_bad(pud_t pud) | ||
490 | { | ||
491 | /* | ||
492 | * With dynamic page table levels the pud can be a region table | ||
493 | * entry or a segment table entry. Check for the bit that are | ||
494 | * invalid for either table entry. | ||
495 | */ | ||
496 | unsigned long mask = | ||
497 | ~_SEGMENT_ENTRY_ORIGIN & ~_REGION_ENTRY_INV & | ||
498 | ~_REGION_ENTRY_TYPE_MASK & ~_REGION_ENTRY_LENGTH; | ||
499 | return (pud_val(pud) & mask) != 0; | ||
500 | } | ||
501 | |||
502 | #endif /* __s390x__ */ | ||
503 | |||
504 | static inline int pmd_present(pmd_t pmd) | ||
505 | { | ||
506 | return (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN) != 0UL; | ||
507 | } | ||
508 | |||
509 | static inline int pmd_none(pmd_t pmd) | ||
510 | { | ||
511 | return (pmd_val(pmd) & _SEGMENT_ENTRY_INV) != 0UL; | ||
512 | } | ||
513 | |||
514 | static inline int pmd_bad(pmd_t pmd) | ||
515 | { | ||
516 | unsigned long mask = ~_SEGMENT_ENTRY_ORIGIN & ~_SEGMENT_ENTRY_INV; | ||
517 | return (pmd_val(pmd) & mask) != _SEGMENT_ENTRY; | ||
518 | } | ||
519 | |||
520 | static inline int pte_none(pte_t pte) | ||
521 | { | ||
522 | return (pte_val(pte) & _PAGE_INVALID) && !(pte_val(pte) & _PAGE_SWT); | ||
523 | } | ||
524 | |||
525 | static inline int pte_present(pte_t pte) | ||
526 | { | ||
527 | unsigned long mask = _PAGE_RO | _PAGE_INVALID | _PAGE_SWT | _PAGE_SWX; | ||
528 | return (pte_val(pte) & mask) == _PAGE_TYPE_NONE || | ||
529 | (!(pte_val(pte) & _PAGE_INVALID) && | ||
530 | !(pte_val(pte) & _PAGE_SWT)); | ||
531 | } | ||
532 | |||
533 | static inline int pte_file(pte_t pte) | ||
534 | { | ||
535 | unsigned long mask = _PAGE_RO | _PAGE_INVALID | _PAGE_SWT; | ||
536 | return (pte_val(pte) & mask) == _PAGE_TYPE_FILE; | ||
537 | } | ||
538 | |||
539 | static inline int pte_special(pte_t pte) | ||
540 | { | ||
541 | return (pte_val(pte) & _PAGE_SPECIAL); | ||
542 | } | ||
543 | |||
544 | #define __HAVE_ARCH_PTE_SAME | ||
545 | #define pte_same(a,b) (pte_val(a) == pte_val(b)) | ||
546 | |||
547 | static inline void rcp_lock(pte_t *ptep) | ||
548 | { | ||
549 | #ifdef CONFIG_PGSTE | ||
550 | unsigned long *pgste = (unsigned long *) (ptep + PTRS_PER_PTE); | ||
551 | preempt_disable(); | ||
552 | while (test_and_set_bit(RCP_PCL_BIT, pgste)) | ||
553 | ; | ||
554 | #endif | ||
555 | } | ||
556 | |||
557 | static inline void rcp_unlock(pte_t *ptep) | ||
558 | { | ||
559 | #ifdef CONFIG_PGSTE | ||
560 | unsigned long *pgste = (unsigned long *) (ptep + PTRS_PER_PTE); | ||
561 | clear_bit(RCP_PCL_BIT, pgste); | ||
562 | preempt_enable(); | ||
563 | #endif | ||
564 | } | ||
565 | |||
566 | /* forward declaration for SetPageUptodate in page-flags.h*/ | ||
567 | static inline void page_clear_dirty(struct page *page); | ||
568 | #include <linux/page-flags.h> | ||
569 | |||
570 | static inline void ptep_rcp_copy(pte_t *ptep) | ||
571 | { | ||
572 | #ifdef CONFIG_PGSTE | ||
573 | struct page *page = virt_to_page(pte_val(*ptep)); | ||
574 | unsigned int skey; | ||
575 | unsigned long *pgste = (unsigned long *) (ptep + PTRS_PER_PTE); | ||
576 | |||
577 | skey = page_get_storage_key(page_to_phys(page)); | ||
578 | if (skey & _PAGE_CHANGED) | ||
579 | set_bit_simple(RCP_GC_BIT, pgste); | ||
580 | if (skey & _PAGE_REFERENCED) | ||
581 | set_bit_simple(RCP_GR_BIT, pgste); | ||
582 | if (test_and_clear_bit_simple(RCP_HC_BIT, pgste)) | ||
583 | SetPageDirty(page); | ||
584 | if (test_and_clear_bit_simple(RCP_HR_BIT, pgste)) | ||
585 | SetPageReferenced(page); | ||
586 | #endif | ||
587 | } | ||
588 | |||
589 | /* | ||
590 | * query functions pte_write/pte_dirty/pte_young only work if | ||
591 | * pte_present() is true. Undefined behaviour if not.. | ||
592 | */ | ||
593 | static inline int pte_write(pte_t pte) | ||
594 | { | ||
595 | return (pte_val(pte) & _PAGE_RO) == 0; | ||
596 | } | ||
597 | |||
598 | static inline int pte_dirty(pte_t pte) | ||
599 | { | ||
600 | /* A pte is neither clean nor dirty on s/390. The dirty bit | ||
601 | * is in the storage key. See page_test_and_clear_dirty for | ||
602 | * details. | ||
603 | */ | ||
604 | return 0; | ||
605 | } | ||
606 | |||
607 | static inline int pte_young(pte_t pte) | ||
608 | { | ||
609 | /* A pte is neither young nor old on s/390. The young bit | ||
610 | * is in the storage key. See page_test_and_clear_young for | ||
611 | * details. | ||
612 | */ | ||
613 | return 0; | ||
614 | } | ||
615 | |||
616 | /* | ||
617 | * pgd/pmd/pte modification functions | ||
618 | */ | ||
619 | |||
620 | #ifndef __s390x__ | ||
621 | |||
622 | #define pgd_clear(pgd) do { } while (0) | ||
623 | #define pud_clear(pud) do { } while (0) | ||
624 | |||
625 | #else /* __s390x__ */ | ||
626 | |||
627 | static inline void pgd_clear_kernel(pgd_t * pgd) | ||
628 | { | ||
629 | if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2) | ||
630 | pgd_val(*pgd) = _REGION2_ENTRY_EMPTY; | ||
631 | } | ||
632 | |||
633 | static inline void pgd_clear(pgd_t * pgd) | ||
634 | { | ||
635 | pgd_t *shadow = get_shadow_table(pgd); | ||
636 | |||
637 | pgd_clear_kernel(pgd); | ||
638 | if (shadow) | ||
639 | pgd_clear_kernel(shadow); | ||
640 | } | ||
641 | |||
642 | static inline void pud_clear_kernel(pud_t *pud) | ||
643 | { | ||
644 | if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3) | ||
645 | pud_val(*pud) = _REGION3_ENTRY_EMPTY; | ||
646 | } | ||
647 | |||
648 | static inline void pud_clear(pud_t *pud) | ||
649 | { | ||
650 | pud_t *shadow = get_shadow_table(pud); | ||
651 | |||
652 | pud_clear_kernel(pud); | ||
653 | if (shadow) | ||
654 | pud_clear_kernel(shadow); | ||
655 | } | ||
656 | |||
657 | #endif /* __s390x__ */ | ||
658 | |||
659 | static inline void pmd_clear_kernel(pmd_t * pmdp) | ||
660 | { | ||
661 | pmd_val(*pmdp) = _SEGMENT_ENTRY_EMPTY; | ||
662 | } | ||
663 | |||
664 | static inline void pmd_clear(pmd_t *pmd) | ||
665 | { | ||
666 | pmd_t *shadow = get_shadow_table(pmd); | ||
667 | |||
668 | pmd_clear_kernel(pmd); | ||
669 | if (shadow) | ||
670 | pmd_clear_kernel(shadow); | ||
671 | } | ||
672 | |||
673 | static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) | ||
674 | { | ||
675 | if (mm->context.pgstes) | ||
676 | ptep_rcp_copy(ptep); | ||
677 | pte_val(*ptep) = _PAGE_TYPE_EMPTY; | ||
678 | if (mm->context.noexec) | ||
679 | pte_val(ptep[PTRS_PER_PTE]) = _PAGE_TYPE_EMPTY; | ||
680 | } | ||
681 | |||
682 | /* | ||
683 | * The following pte modification functions only work if | ||
684 | * pte_present() is true. Undefined behaviour if not.. | ||
685 | */ | ||
686 | static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) | ||
687 | { | ||
688 | pte_val(pte) &= _PAGE_CHG_MASK; | ||
689 | pte_val(pte) |= pgprot_val(newprot); | ||
690 | return pte; | ||
691 | } | ||
692 | |||
693 | static inline pte_t pte_wrprotect(pte_t pte) | ||
694 | { | ||
695 | /* Do not clobber _PAGE_TYPE_NONE pages! */ | ||
696 | if (!(pte_val(pte) & _PAGE_INVALID)) | ||
697 | pte_val(pte) |= _PAGE_RO; | ||
698 | return pte; | ||
699 | } | ||
700 | |||
701 | static inline pte_t pte_mkwrite(pte_t pte) | ||
702 | { | ||
703 | pte_val(pte) &= ~_PAGE_RO; | ||
704 | return pte; | ||
705 | } | ||
706 | |||
707 | static inline pte_t pte_mkclean(pte_t pte) | ||
708 | { | ||
709 | /* The only user of pte_mkclean is the fork() code. | ||
710 | We must *not* clear the *physical* page dirty bit | ||
711 | just because fork() wants to clear the dirty bit in | ||
712 | *one* of the page's mappings. So we just do nothing. */ | ||
713 | return pte; | ||
714 | } | ||
715 | |||
716 | static inline pte_t pte_mkdirty(pte_t pte) | ||
717 | { | ||
718 | /* We do not explicitly set the dirty bit because the | ||
719 | * sske instruction is slow. It is faster to let the | ||
720 | * next instruction set the dirty bit. | ||
721 | */ | ||
722 | return pte; | ||
723 | } | ||
724 | |||
725 | static inline pte_t pte_mkold(pte_t pte) | ||
726 | { | ||
727 | /* S/390 doesn't keep its dirty/referenced bit in the pte. | ||
728 | * There is no point in clearing the real referenced bit. | ||
729 | */ | ||
730 | return pte; | ||
731 | } | ||
732 | |||
733 | static inline pte_t pte_mkyoung(pte_t pte) | ||
734 | { | ||
735 | /* S/390 doesn't keep its dirty/referenced bit in the pte. | ||
736 | * There is no point in setting the real referenced bit. | ||
737 | */ | ||
738 | return pte; | ||
739 | } | ||
740 | |||
741 | static inline pte_t pte_mkspecial(pte_t pte) | ||
742 | { | ||
743 | pte_val(pte) |= _PAGE_SPECIAL; | ||
744 | return pte; | ||
745 | } | ||
746 | |||
747 | #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG | ||
748 | static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, | ||
749 | unsigned long addr, pte_t *ptep) | ||
750 | { | ||
751 | #ifdef CONFIG_PGSTE | ||
752 | unsigned long physpage; | ||
753 | int young; | ||
754 | unsigned long *pgste; | ||
755 | |||
756 | if (!vma->vm_mm->context.pgstes) | ||
757 | return 0; | ||
758 | physpage = pte_val(*ptep) & PAGE_MASK; | ||
759 | pgste = (unsigned long *) (ptep + PTRS_PER_PTE); | ||
760 | |||
761 | young = ((page_get_storage_key(physpage) & _PAGE_REFERENCED) != 0); | ||
762 | rcp_lock(ptep); | ||
763 | if (young) | ||
764 | set_bit_simple(RCP_GR_BIT, pgste); | ||
765 | young |= test_and_clear_bit_simple(RCP_HR_BIT, pgste); | ||
766 | rcp_unlock(ptep); | ||
767 | return young; | ||
768 | #endif | ||
769 | return 0; | ||
770 | } | ||
771 | |||
772 | #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH | ||
773 | static inline int ptep_clear_flush_young(struct vm_area_struct *vma, | ||
774 | unsigned long address, pte_t *ptep) | ||
775 | { | ||
776 | /* No need to flush TLB | ||
777 | * On s390 reference bits are in storage key and never in TLB | ||
778 | * With virtualization we handle the reference bit, without we | ||
779 | * we can simply return */ | ||
780 | #ifdef CONFIG_PGSTE | ||
781 | return ptep_test_and_clear_young(vma, address, ptep); | ||
782 | #endif | ||
783 | return 0; | ||
784 | } | ||
785 | |||
786 | static inline void __ptep_ipte(unsigned long address, pte_t *ptep) | ||
787 | { | ||
788 | if (!(pte_val(*ptep) & _PAGE_INVALID)) { | ||
789 | #ifndef __s390x__ | ||
790 | /* pto must point to the start of the segment table */ | ||
791 | pte_t *pto = (pte_t *) (((unsigned long) ptep) & 0x7ffffc00); | ||
792 | #else | ||
793 | /* ipte in zarch mode can do the math */ | ||
794 | pte_t *pto = ptep; | ||
795 | #endif | ||
796 | asm volatile( | ||
797 | " ipte %2,%3" | ||
798 | : "=m" (*ptep) : "m" (*ptep), | ||
799 | "a" (pto), "a" (address)); | ||
800 | } | ||
801 | } | ||
802 | |||
803 | static inline void ptep_invalidate(struct mm_struct *mm, | ||
804 | unsigned long address, pte_t *ptep) | ||
805 | { | ||
806 | if (mm->context.pgstes) { | ||
807 | rcp_lock(ptep); | ||
808 | __ptep_ipte(address, ptep); | ||
809 | ptep_rcp_copy(ptep); | ||
810 | pte_val(*ptep) = _PAGE_TYPE_EMPTY; | ||
811 | rcp_unlock(ptep); | ||
812 | return; | ||
813 | } | ||
814 | __ptep_ipte(address, ptep); | ||
815 | pte_val(*ptep) = _PAGE_TYPE_EMPTY; | ||
816 | if (mm->context.noexec) { | ||
817 | __ptep_ipte(address, ptep + PTRS_PER_PTE); | ||
818 | pte_val(*(ptep + PTRS_PER_PTE)) = _PAGE_TYPE_EMPTY; | ||
819 | } | ||
820 | } | ||
821 | |||
822 | /* | ||
823 | * This is hard to understand. ptep_get_and_clear and ptep_clear_flush | ||
824 | * both clear the TLB for the unmapped pte. The reason is that | ||
825 | * ptep_get_and_clear is used in common code (e.g. change_pte_range) | ||
826 | * to modify an active pte. The sequence is | ||
827 | * 1) ptep_get_and_clear | ||
828 | * 2) set_pte_at | ||
829 | * 3) flush_tlb_range | ||
830 | * On s390 the tlb needs to get flushed with the modification of the pte | ||
831 | * if the pte is active. The only way how this can be implemented is to | ||
832 | * have ptep_get_and_clear do the tlb flush. In exchange flush_tlb_range | ||
833 | * is a nop. | ||
834 | */ | ||
835 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR | ||
836 | #define ptep_get_and_clear(__mm, __address, __ptep) \ | ||
837 | ({ \ | ||
838 | pte_t __pte = *(__ptep); \ | ||
839 | if (atomic_read(&(__mm)->mm_users) > 1 || \ | ||
840 | (__mm) != current->active_mm) \ | ||
841 | ptep_invalidate(__mm, __address, __ptep); \ | ||
842 | else \ | ||
843 | pte_clear((__mm), (__address), (__ptep)); \ | ||
844 | __pte; \ | ||
845 | }) | ||
846 | |||
847 | #define __HAVE_ARCH_PTEP_CLEAR_FLUSH | ||
848 | static inline pte_t ptep_clear_flush(struct vm_area_struct *vma, | ||
849 | unsigned long address, pte_t *ptep) | ||
850 | { | ||
851 | pte_t pte = *ptep; | ||
852 | ptep_invalidate(vma->vm_mm, address, ptep); | ||
853 | return pte; | ||
854 | } | ||
855 | |||
856 | /* | ||
857 | * The batched pte unmap code uses ptep_get_and_clear_full to clear the | ||
858 | * ptes. Here an optimization is possible. tlb_gather_mmu flushes all | ||
859 | * tlbs of an mm if it can guarantee that the ptes of the mm_struct | ||
860 | * cannot be accessed while the batched unmap is running. In this case | ||
861 | * full==1 and a simple pte_clear is enough. See tlb.h. | ||
862 | */ | ||
863 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL | ||
864 | static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, | ||
865 | unsigned long addr, | ||
866 | pte_t *ptep, int full) | ||
867 | { | ||
868 | pte_t pte = *ptep; | ||
869 | |||
870 | if (full) | ||
871 | pte_clear(mm, addr, ptep); | ||
872 | else | ||
873 | ptep_invalidate(mm, addr, ptep); | ||
874 | return pte; | ||
875 | } | ||
876 | |||
877 | #define __HAVE_ARCH_PTEP_SET_WRPROTECT | ||
878 | #define ptep_set_wrprotect(__mm, __addr, __ptep) \ | ||
879 | ({ \ | ||
880 | pte_t __pte = *(__ptep); \ | ||
881 | if (pte_write(__pte)) { \ | ||
882 | if (atomic_read(&(__mm)->mm_users) > 1 || \ | ||
883 | (__mm) != current->active_mm) \ | ||
884 | ptep_invalidate(__mm, __addr, __ptep); \ | ||
885 | set_pte_at(__mm, __addr, __ptep, pte_wrprotect(__pte)); \ | ||
886 | } \ | ||
887 | }) | ||
888 | |||
889 | #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS | ||
890 | #define ptep_set_access_flags(__vma, __addr, __ptep, __entry, __dirty) \ | ||
891 | ({ \ | ||
892 | int __changed = !pte_same(*(__ptep), __entry); \ | ||
893 | if (__changed) { \ | ||
894 | ptep_invalidate((__vma)->vm_mm, __addr, __ptep); \ | ||
895 | set_pte_at((__vma)->vm_mm, __addr, __ptep, __entry); \ | ||
896 | } \ | ||
897 | __changed; \ | ||
898 | }) | ||
899 | |||
900 | /* | ||
901 | * Test and clear dirty bit in storage key. | ||
902 | * We can't clear the changed bit atomically. This is a potential | ||
903 | * race against modification of the referenced bit. This function | ||
904 | * should therefore only be called if it is not mapped in any | ||
905 | * address space. | ||
906 | */ | ||
907 | #define __HAVE_ARCH_PAGE_TEST_DIRTY | ||
908 | static inline int page_test_dirty(struct page *page) | ||
909 | { | ||
910 | return (page_get_storage_key(page_to_phys(page)) & _PAGE_CHANGED) != 0; | ||
911 | } | ||
912 | |||
913 | #define __HAVE_ARCH_PAGE_CLEAR_DIRTY | ||
914 | static inline void page_clear_dirty(struct page *page) | ||
915 | { | ||
916 | page_set_storage_key(page_to_phys(page), PAGE_DEFAULT_KEY); | ||
917 | } | ||
918 | |||
919 | /* | ||
920 | * Test and clear referenced bit in storage key. | ||
921 | */ | ||
922 | #define __HAVE_ARCH_PAGE_TEST_AND_CLEAR_YOUNG | ||
923 | static inline int page_test_and_clear_young(struct page *page) | ||
924 | { | ||
925 | unsigned long physpage = page_to_phys(page); | ||
926 | int ccode; | ||
927 | |||
928 | asm volatile( | ||
929 | " rrbe 0,%1\n" | ||
930 | " ipm %0\n" | ||
931 | " srl %0,28\n" | ||
932 | : "=d" (ccode) : "a" (physpage) : "cc" ); | ||
933 | return ccode & 2; | ||
934 | } | ||
935 | |||
936 | /* | ||
937 | * Conversion functions: convert a page and protection to a page entry, | ||
938 | * and a page entry and page directory to the page they refer to. | ||
939 | */ | ||
940 | static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot) | ||
941 | { | ||
942 | pte_t __pte; | ||
943 | pte_val(__pte) = physpage + pgprot_val(pgprot); | ||
944 | return __pte; | ||
945 | } | ||
946 | |||
947 | static inline pte_t mk_pte(struct page *page, pgprot_t pgprot) | ||
948 | { | ||
949 | unsigned long physpage = page_to_phys(page); | ||
950 | |||
951 | return mk_pte_phys(physpage, pgprot); | ||
952 | } | ||
953 | |||
954 | #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) | ||
955 | #define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1)) | ||
956 | #define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1)) | ||
957 | #define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE-1)) | ||
958 | |||
959 | #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address)) | ||
960 | #define pgd_offset_k(address) pgd_offset(&init_mm, address) | ||
961 | |||
962 | #ifndef __s390x__ | ||
963 | |||
964 | #define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN) | ||
965 | #define pud_deref(pmd) ({ BUG(); 0UL; }) | ||
966 | #define pgd_deref(pmd) ({ BUG(); 0UL; }) | ||
967 | |||
968 | #define pud_offset(pgd, address) ((pud_t *) pgd) | ||
969 | #define pmd_offset(pud, address) ((pmd_t *) pud + pmd_index(address)) | ||
970 | |||
971 | #else /* __s390x__ */ | ||
972 | |||
973 | #define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN) | ||
974 | #define pud_deref(pud) (pud_val(pud) & _REGION_ENTRY_ORIGIN) | ||
975 | #define pgd_deref(pgd) (pgd_val(pgd) & _REGION_ENTRY_ORIGIN) | ||
976 | |||
977 | static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address) | ||
978 | { | ||
979 | pud_t *pud = (pud_t *) pgd; | ||
980 | if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2) | ||
981 | pud = (pud_t *) pgd_deref(*pgd); | ||
982 | return pud + pud_index(address); | ||
983 | } | ||
984 | |||
985 | static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address) | ||
986 | { | ||
987 | pmd_t *pmd = (pmd_t *) pud; | ||
988 | if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3) | ||
989 | pmd = (pmd_t *) pud_deref(*pud); | ||
990 | return pmd + pmd_index(address); | ||
991 | } | ||
992 | |||
993 | #endif /* __s390x__ */ | ||
994 | |||
995 | #define pfn_pte(pfn,pgprot) mk_pte_phys(__pa((pfn) << PAGE_SHIFT),(pgprot)) | ||
996 | #define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT) | ||
997 | #define pte_page(x) pfn_to_page(pte_pfn(x)) | ||
998 | |||
999 | #define pmd_page(pmd) pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT) | ||
1000 | |||
1001 | /* Find an entry in the lowest level page table.. */ | ||
1002 | #define pte_offset(pmd, addr) ((pte_t *) pmd_deref(*(pmd)) + pte_index(addr)) | ||
1003 | #define pte_offset_kernel(pmd, address) pte_offset(pmd,address) | ||
1004 | #define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address) | ||
1005 | #define pte_offset_map_nested(pmd, address) pte_offset_kernel(pmd, address) | ||
1006 | #define pte_unmap(pte) do { } while (0) | ||
1007 | #define pte_unmap_nested(pte) do { } while (0) | ||
1008 | |||
1009 | /* | ||
1010 | * 31 bit swap entry format: | ||
1011 | * A page-table entry has some bits we have to treat in a special way. | ||
1012 | * Bits 0, 20 and bit 23 have to be zero, otherwise an specification | ||
1013 | * exception will occur instead of a page translation exception. The | ||
1014 | * specifiation exception has the bad habit not to store necessary | ||
1015 | * information in the lowcore. | ||
1016 | * Bit 21 and bit 22 are the page invalid bit and the page protection | ||
1017 | * bit. We set both to indicate a swapped page. | ||
1018 | * Bit 30 and 31 are used to distinguish the different page types. For | ||
1019 | * a swapped page these bits need to be zero. | ||
1020 | * This leaves the bits 1-19 and bits 24-29 to store type and offset. | ||
1021 | * We use the 5 bits from 25-29 for the type and the 20 bits from 1-19 | ||
1022 | * plus 24 for the offset. | ||
1023 | * 0| offset |0110|o|type |00| | ||
1024 | * 0 0000000001111111111 2222 2 22222 33 | ||
1025 | * 0 1234567890123456789 0123 4 56789 01 | ||
1026 | * | ||
1027 | * 64 bit swap entry format: | ||
1028 | * A page-table entry has some bits we have to treat in a special way. | ||
1029 | * Bits 52 and bit 55 have to be zero, otherwise an specification | ||
1030 | * exception will occur instead of a page translation exception. The | ||
1031 | * specifiation exception has the bad habit not to store necessary | ||
1032 | * information in the lowcore. | ||
1033 | * Bit 53 and bit 54 are the page invalid bit and the page protection | ||
1034 | * bit. We set both to indicate a swapped page. | ||
1035 | * Bit 62 and 63 are used to distinguish the different page types. For | ||
1036 | * a swapped page these bits need to be zero. | ||
1037 | * This leaves the bits 0-51 and bits 56-61 to store type and offset. | ||
1038 | * We use the 5 bits from 57-61 for the type and the 53 bits from 0-51 | ||
1039 | * plus 56 for the offset. | ||
1040 | * | offset |0110|o|type |00| | ||
1041 | * 0000000000111111111122222222223333333333444444444455 5555 5 55566 66 | ||
1042 | * 0123456789012345678901234567890123456789012345678901 2345 6 78901 23 | ||
1043 | */ | ||
1044 | #ifndef __s390x__ | ||
1045 | #define __SWP_OFFSET_MASK (~0UL >> 12) | ||
1046 | #else | ||
1047 | #define __SWP_OFFSET_MASK (~0UL >> 11) | ||
1048 | #endif | ||
1049 | static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset) | ||
1050 | { | ||
1051 | pte_t pte; | ||
1052 | offset &= __SWP_OFFSET_MASK; | ||
1053 | pte_val(pte) = _PAGE_TYPE_SWAP | ((type & 0x1f) << 2) | | ||
1054 | ((offset & 1UL) << 7) | ((offset & ~1UL) << 11); | ||
1055 | return pte; | ||
1056 | } | ||
1057 | |||
1058 | #define __swp_type(entry) (((entry).val >> 2) & 0x1f) | ||
1059 | #define __swp_offset(entry) (((entry).val >> 11) | (((entry).val >> 7) & 1)) | ||
1060 | #define __swp_entry(type,offset) ((swp_entry_t) { pte_val(mk_swap_pte((type),(offset))) }) | ||
1061 | |||
1062 | #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) | ||
1063 | #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) | ||
1064 | |||
1065 | #ifndef __s390x__ | ||
1066 | # define PTE_FILE_MAX_BITS 26 | ||
1067 | #else /* __s390x__ */ | ||
1068 | # define PTE_FILE_MAX_BITS 59 | ||
1069 | #endif /* __s390x__ */ | ||
1070 | |||
1071 | #define pte_to_pgoff(__pte) \ | ||
1072 | ((((__pte).pte >> 12) << 7) + (((__pte).pte >> 1) & 0x7f)) | ||
1073 | |||
1074 | #define pgoff_to_pte(__off) \ | ||
1075 | ((pte_t) { ((((__off) & 0x7f) << 1) + (((__off) >> 7) << 12)) \ | ||
1076 | | _PAGE_TYPE_FILE }) | ||
1077 | |||
1078 | #endif /* !__ASSEMBLY__ */ | ||
1079 | |||
1080 | #define kern_addr_valid(addr) (1) | ||
1081 | |||
1082 | extern int vmem_add_mapping(unsigned long start, unsigned long size); | ||
1083 | extern int vmem_remove_mapping(unsigned long start, unsigned long size); | ||
1084 | extern int s390_enable_sie(void); | ||
1085 | |||
1086 | /* | ||
1087 | * No page table caches to initialise | ||
1088 | */ | ||
1089 | #define pgtable_cache_init() do { } while (0) | ||
1090 | |||
1091 | #include <asm-generic/pgtable.h> | ||
1092 | |||
1093 | #endif /* _S390_PAGE_H */ | ||