aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorGerald Schaefer <geraldsc@de.ibm.com>2006-09-20 09:59:37 -0400
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2006-09-20 09:59:37 -0400
commit9282ed929758b82f448a40d3c17319d794970624 (patch)
treef3db7796f0ea7afddc853ab4294033b4fdd6d785
parent31b58088292c7f00f0b81088bfb557285b0b6247 (diff)
[S390] Cleanup in page table related code.
Changed and simplified some page table related #defines and code. Signed-off-by: Gerald Schaefer <geraldsc@de.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
-rw-r--r--arch/s390/mm/init.c36
-rw-r--r--include/asm-s390/pgalloc.h67
-rw-r--r--include/asm-s390/pgtable.h124
3 files changed, 106 insertions, 121 deletions
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index 6e6b6de77770..cfd9b8f7a523 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -108,16 +108,23 @@ void __init paging_init(void)
108 unsigned long pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) | _KERNSEG_TABLE; 108 unsigned long pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) | _KERNSEG_TABLE;
109 static const int ssm_mask = 0x04000000L; 109 static const int ssm_mask = 0x04000000L;
110 unsigned long ro_start_pfn, ro_end_pfn; 110 unsigned long ro_start_pfn, ro_end_pfn;
111 unsigned long zones_size[MAX_NR_ZONES];
111 112
112 ro_start_pfn = PFN_DOWN((unsigned long)&__start_rodata); 113 ro_start_pfn = PFN_DOWN((unsigned long)&__start_rodata);
113 ro_end_pfn = PFN_UP((unsigned long)&__end_rodata); 114 ro_end_pfn = PFN_UP((unsigned long)&__end_rodata);
114 115
116 memset(zones_size, 0, sizeof(zones_size));
117 zones_size[ZONE_DMA] = max_low_pfn;
118 free_area_init_node(0, &contig_page_data, zones_size,
119 __pa(PAGE_OFFSET) >> PAGE_SHIFT,
120 zholes_size);
121
115 /* unmap whole virtual address space */ 122 /* unmap whole virtual address space */
116 123
117 pg_dir = swapper_pg_dir; 124 pg_dir = swapper_pg_dir;
118 125
119 for (i=0;i<KERNEL_PGD_PTRS;i++) 126 for (i = 0; i < PTRS_PER_PGD; i++)
120 pmd_clear((pmd_t*)pg_dir++); 127 pmd_clear((pmd_t *) pg_dir++);
121 128
122 /* 129 /*
123 * map whole physical memory to virtual memory (identity mapping) 130 * map whole physical memory to virtual memory (identity mapping)
@@ -131,10 +138,7 @@ void __init paging_init(void)
131 */ 138 */
132 pg_table = (pte_t *) alloc_bootmem_pages(PAGE_SIZE); 139 pg_table = (pte_t *) alloc_bootmem_pages(PAGE_SIZE);
133 140
134 pg_dir->pgd0 = (_PAGE_TABLE | __pa(pg_table)); 141 pmd_populate_kernel(&init_mm, (pmd_t *) pg_dir, pg_table);
135 pg_dir->pgd1 = (_PAGE_TABLE | (__pa(pg_table)+1024));
136 pg_dir->pgd2 = (_PAGE_TABLE | (__pa(pg_table)+2048));
137 pg_dir->pgd3 = (_PAGE_TABLE | (__pa(pg_table)+3072));
138 pg_dir++; 142 pg_dir++;
139 143
140 for (tmp = 0 ; tmp < PTRS_PER_PTE ; tmp++,pg_table++) { 144 for (tmp = 0 ; tmp < PTRS_PER_PTE ; tmp++,pg_table++) {
@@ -143,8 +147,8 @@ void __init paging_init(void)
143 else 147 else
144 pte = pfn_pte(pfn, PAGE_KERNEL); 148 pte = pfn_pte(pfn, PAGE_KERNEL);
145 if (pfn >= max_low_pfn) 149 if (pfn >= max_low_pfn)
146 pte_clear(&init_mm, 0, &pte); 150 pte_val(pte) = _PAGE_TYPE_EMPTY;
147 set_pte(pg_table, pte); 151 set_pte(pg_table, pte);
148 pfn++; 152 pfn++;
149 } 153 }
150 } 154 }
@@ -159,16 +163,6 @@ void __init paging_init(void)
159 : : "m" (pgdir_k), "m" (ssm_mask)); 163 : : "m" (pgdir_k), "m" (ssm_mask));
160 164
161 local_flush_tlb(); 165 local_flush_tlb();
162
163 {
164 unsigned long zones_size[MAX_NR_ZONES];
165
166 memset(zones_size, 0, sizeof(zones_size));
167 zones_size[ZONE_DMA] = max_low_pfn;
168 free_area_init_node(0, &contig_page_data, zones_size,
169 __pa(PAGE_OFFSET) >> PAGE_SHIFT,
170 zholes_size);
171 }
172 return; 166 return;
173} 167}
174 168
@@ -236,10 +230,8 @@ void __init paging_init(void)
236 pte = pfn_pte(pfn, __pgprot(_PAGE_RO)); 230 pte = pfn_pte(pfn, __pgprot(_PAGE_RO));
237 else 231 else
238 pte = pfn_pte(pfn, PAGE_KERNEL); 232 pte = pfn_pte(pfn, PAGE_KERNEL);
239 if (pfn >= max_low_pfn) { 233 if (pfn >= max_low_pfn)
240 pte_clear(&init_mm, 0, &pte); 234 pte_val(pte) = _PAGE_TYPE_EMPTY;
241 continue;
242 }
243 set_pte(pt_dir, pte); 235 set_pte(pt_dir, pte);
244 pfn++; 236 pfn++;
245 } 237 }
diff --git a/include/asm-s390/pgalloc.h b/include/asm-s390/pgalloc.h
index a78e853e0dd5..803bc7064418 100644
--- a/include/asm-s390/pgalloc.h
+++ b/include/asm-s390/pgalloc.h
@@ -22,6 +22,16 @@
22extern void diag10(unsigned long addr); 22extern void diag10(unsigned long addr);
23 23
24/* 24/*
25 * Page allocation orders.
26 */
27#ifndef __s390x__
28# define PGD_ALLOC_ORDER 1
29#else /* __s390x__ */
30# define PMD_ALLOC_ORDER 2
31# define PGD_ALLOC_ORDER 2
32#endif /* __s390x__ */
33
34/*
25 * Allocate and free page tables. The xxx_kernel() versions are 35 * Allocate and free page tables. The xxx_kernel() versions are
26 * used to allocate a kernel page table - this turns on ASN bits 36 * used to allocate a kernel page table - this turns on ASN bits
27 * if any. 37 * if any.
@@ -29,30 +39,23 @@ extern void diag10(unsigned long addr);
29 39
30static inline pgd_t *pgd_alloc(struct mm_struct *mm) 40static inline pgd_t *pgd_alloc(struct mm_struct *mm)
31{ 41{
32 pgd_t *pgd; 42 pgd_t *pgd = (pgd_t *) __get_free_pages(GFP_KERNEL, PGD_ALLOC_ORDER);
33 int i; 43 int i;
34 44
45 if (!pgd)
46 return NULL;
47 for (i = 0; i < PTRS_PER_PGD; i++)
35#ifndef __s390x__ 48#ifndef __s390x__
36 pgd = (pgd_t *) __get_free_pages(GFP_KERNEL,1); 49 pmd_clear(pmd_offset(pgd + i, i*PGDIR_SIZE));
37 if (pgd != NULL) 50#else
38 for (i = 0; i < USER_PTRS_PER_PGD; i++) 51 pgd_clear(pgd + i);
39 pmd_clear(pmd_offset(pgd + i, i*PGDIR_SIZE)); 52#endif
40#else /* __s390x__ */
41 pgd = (pgd_t *) __get_free_pages(GFP_KERNEL,2);
42 if (pgd != NULL)
43 for (i = 0; i < PTRS_PER_PGD; i++)
44 pgd_clear(pgd + i);
45#endif /* __s390x__ */
46 return pgd; 53 return pgd;
47} 54}
48 55
49static inline void pgd_free(pgd_t *pgd) 56static inline void pgd_free(pgd_t *pgd)
50{ 57{
51#ifndef __s390x__ 58 free_pages((unsigned long) pgd, PGD_ALLOC_ORDER);
52 free_pages((unsigned long) pgd, 1);
53#else /* __s390x__ */
54 free_pages((unsigned long) pgd, 2);
55#endif /* __s390x__ */
56} 59}
57 60
58#ifndef __s390x__ 61#ifndef __s390x__
@@ -68,20 +71,19 @@ static inline void pgd_free(pgd_t *pgd)
68#else /* __s390x__ */ 71#else /* __s390x__ */
69static inline pmd_t * pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr) 72static inline pmd_t * pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr)
70{ 73{
71 pmd_t *pmd; 74 pmd_t *pmd = (pmd_t *) __get_free_pages(GFP_KERNEL, PMD_ALLOC_ORDER);
72 int i; 75 int i;
73 76
74 pmd = (pmd_t *) __get_free_pages(GFP_KERNEL, 2); 77 if (!pmd)
75 if (pmd != NULL) { 78 return NULL;
76 for (i=0; i < PTRS_PER_PMD; i++) 79 for (i=0; i < PTRS_PER_PMD; i++)
77 pmd_clear(pmd+i); 80 pmd_clear(pmd + i);
78 }
79 return pmd; 81 return pmd;
80} 82}
81 83
82static inline void pmd_free (pmd_t *pmd) 84static inline void pmd_free (pmd_t *pmd)
83{ 85{
84 free_pages((unsigned long) pmd, 2); 86 free_pages((unsigned long) pmd, PMD_ALLOC_ORDER);
85} 87}
86 88
87#define __pmd_free_tlb(tlb,pmd) \ 89#define __pmd_free_tlb(tlb,pmd) \
@@ -123,15 +125,14 @@ pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *page)
123static inline pte_t * 125static inline pte_t *
124pte_alloc_one_kernel(struct mm_struct *mm, unsigned long vmaddr) 126pte_alloc_one_kernel(struct mm_struct *mm, unsigned long vmaddr)
125{ 127{
126 pte_t *pte; 128 pte_t *pte = (pte_t *) __get_free_page(GFP_KERNEL|__GFP_REPEAT);
127 int i; 129 int i;
128 130
129 pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT); 131 if (!pte)
130 if (pte != NULL) { 132 return NULL;
131 for (i=0; i < PTRS_PER_PTE; i++) { 133 for (i=0; i < PTRS_PER_PTE; i++) {
132 pte_clear(mm, vmaddr, pte+i); 134 pte_clear(mm, vmaddr, pte + i);
133 vmaddr += PAGE_SIZE; 135 vmaddr += PAGE_SIZE;
134 }
135 } 136 }
136 return pte; 137 return pte;
137} 138}
diff --git a/include/asm-s390/pgtable.h b/include/asm-s390/pgtable.h
index 24312387fa24..1a07028d575e 100644
--- a/include/asm-s390/pgtable.h
+++ b/include/asm-s390/pgtable.h
@@ -89,19 +89,6 @@ extern char empty_zero_page[PAGE_SIZE];
89# define PTRS_PER_PGD 2048 89# define PTRS_PER_PGD 2048
90#endif /* __s390x__ */ 90#endif /* __s390x__ */
91 91
92/*
93 * pgd entries used up by user/kernel:
94 */
95#ifndef __s390x__
96# define USER_PTRS_PER_PGD 512
97# define USER_PGD_PTRS 512
98# define KERNEL_PGD_PTRS 512
99#else /* __s390x__ */
100# define USER_PTRS_PER_PGD 2048
101# define USER_PGD_PTRS 2048
102# define KERNEL_PGD_PTRS 2048
103#endif /* __s390x__ */
104
105#define FIRST_USER_ADDRESS 0 92#define FIRST_USER_ADDRESS 0
106 93
107#define pte_ERROR(e) \ 94#define pte_ERROR(e) \
@@ -216,12 +203,14 @@ extern char empty_zero_page[PAGE_SIZE];
216#define _PAGE_RO 0x200 /* HW read-only */ 203#define _PAGE_RO 0x200 /* HW read-only */
217#define _PAGE_INVALID 0x400 /* HW invalid */ 204#define _PAGE_INVALID 0x400 /* HW invalid */
218 205
219/* Mask and four different kinds of invalid pages. */ 206/* Mask and six different types of pages. */
220#define _PAGE_INVALID_MASK 0x601 207#define _PAGE_TYPE_MASK 0x601
221#define _PAGE_INVALID_EMPTY 0x400 208#define _PAGE_TYPE_EMPTY 0x400
222#define _PAGE_INVALID_NONE 0x401 209#define _PAGE_TYPE_NONE 0x401
223#define _PAGE_INVALID_SWAP 0x600 210#define _PAGE_TYPE_SWAP 0x600
224#define _PAGE_INVALID_FILE 0x601 211#define _PAGE_TYPE_FILE 0x601
212#define _PAGE_TYPE_RO 0x200
213#define _PAGE_TYPE_RW 0x000
225 214
226#ifndef __s390x__ 215#ifndef __s390x__
227 216
@@ -280,15 +269,14 @@ extern char empty_zero_page[PAGE_SIZE];
280#endif /* __s390x__ */ 269#endif /* __s390x__ */
281 270
282/* 271/*
283 * No mapping available 272 * Page protection definitions.
284 */ 273 */
285#define PAGE_NONE_SHARED __pgprot(_PAGE_INVALID_NONE) 274#define PAGE_NONE __pgprot(_PAGE_TYPE_NONE)
286#define PAGE_NONE_PRIVATE __pgprot(_PAGE_INVALID_NONE) 275#define PAGE_RO __pgprot(_PAGE_TYPE_RO)
287#define PAGE_RO_SHARED __pgprot(_PAGE_RO) 276#define PAGE_RW __pgprot(_PAGE_TYPE_RW)
288#define PAGE_RO_PRIVATE __pgprot(_PAGE_RO) 277
289#define PAGE_COPY __pgprot(_PAGE_RO) 278#define PAGE_KERNEL PAGE_RW
290#define PAGE_SHARED __pgprot(0) 279#define PAGE_COPY PAGE_RO
291#define PAGE_KERNEL __pgprot(0)
292 280
293/* 281/*
294 * The S390 can't do page protection for execute, and considers that the 282 * The S390 can't do page protection for execute, and considers that the
@@ -296,23 +284,23 @@ extern char empty_zero_page[PAGE_SIZE];
296 * the closest we can get.. 284 * the closest we can get..
297 */ 285 */
298 /*xwr*/ 286 /*xwr*/
299#define __P000 PAGE_NONE_PRIVATE 287#define __P000 PAGE_NONE
300#define __P001 PAGE_RO_PRIVATE 288#define __P001 PAGE_RO
301#define __P010 PAGE_COPY 289#define __P010 PAGE_RO
302#define __P011 PAGE_COPY 290#define __P011 PAGE_RO
303#define __P100 PAGE_RO_PRIVATE 291#define __P100 PAGE_RO
304#define __P101 PAGE_RO_PRIVATE 292#define __P101 PAGE_RO
305#define __P110 PAGE_COPY 293#define __P110 PAGE_RO
306#define __P111 PAGE_COPY 294#define __P111 PAGE_RO
307 295
308#define __S000 PAGE_NONE_SHARED 296#define __S000 PAGE_NONE
309#define __S001 PAGE_RO_SHARED 297#define __S001 PAGE_RO
310#define __S010 PAGE_SHARED 298#define __S010 PAGE_RW
311#define __S011 PAGE_SHARED 299#define __S011 PAGE_RW
312#define __S100 PAGE_RO_SHARED 300#define __S100 PAGE_RO
313#define __S101 PAGE_RO_SHARED 301#define __S101 PAGE_RO
314#define __S110 PAGE_SHARED 302#define __S110 PAGE_RW
315#define __S111 PAGE_SHARED 303#define __S111 PAGE_RW
316 304
317/* 305/*
318 * Certain architectures need to do special things when PTEs 306 * Certain architectures need to do special things when PTEs
@@ -377,18 +365,18 @@ static inline int pmd_bad(pmd_t pmd)
377 365
378static inline int pte_none(pte_t pte) 366static inline int pte_none(pte_t pte)
379{ 367{
380 return (pte_val(pte) & _PAGE_INVALID_MASK) == _PAGE_INVALID_EMPTY; 368 return (pte_val(pte) & _PAGE_TYPE_MASK) == _PAGE_TYPE_EMPTY;
381} 369}
382 370
383static inline int pte_present(pte_t pte) 371static inline int pte_present(pte_t pte)
384{ 372{
385 return !(pte_val(pte) & _PAGE_INVALID) || 373 return !(pte_val(pte) & _PAGE_INVALID) ||
386 (pte_val(pte) & _PAGE_INVALID_MASK) == _PAGE_INVALID_NONE; 374 (pte_val(pte) & _PAGE_TYPE_MASK) == _PAGE_TYPE_NONE;
387} 375}
388 376
389static inline int pte_file(pte_t pte) 377static inline int pte_file(pte_t pte)
390{ 378{
391 return (pte_val(pte) & _PAGE_INVALID_MASK) == _PAGE_INVALID_FILE; 379 return (pte_val(pte) & _PAGE_TYPE_MASK) == _PAGE_TYPE_FILE;
392} 380}
393 381
394#define pte_same(a,b) (pte_val(a) == pte_val(b)) 382#define pte_same(a,b) (pte_val(a) == pte_val(b))
@@ -461,7 +449,7 @@ static inline void pmd_clear(pmd_t * pmdp)
461 449
462static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) 450static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
463{ 451{
464 pte_val(*ptep) = _PAGE_INVALID_EMPTY; 452 pte_val(*ptep) = _PAGE_TYPE_EMPTY;
465} 453}
466 454
467/* 455/*
@@ -477,7 +465,7 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
477 465
478static inline pte_t pte_wrprotect(pte_t pte) 466static inline pte_t pte_wrprotect(pte_t pte)
479{ 467{
480 /* Do not clobber _PAGE_INVALID_NONE pages! */ 468 /* Do not clobber _PAGE_TYPE_NONE pages! */
481 if (!(pte_val(pte) & _PAGE_INVALID)) 469 if (!(pte_val(pte) & _PAGE_INVALID))
482 pte_val(pte) |= _PAGE_RO; 470 pte_val(pte) |= _PAGE_RO;
483 return pte; 471 return pte;
@@ -556,26 +544,30 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
556 return pte; 544 return pte;
557} 545}
558 546
559static inline pte_t 547static inline void __ptep_ipte(unsigned long address, pte_t *ptep)
560ptep_clear_flush(struct vm_area_struct *vma,
561 unsigned long address, pte_t *ptep)
562{ 548{
563 pte_t pte = *ptep; 549 if (!(pte_val(*ptep) & _PAGE_INVALID)) {
564#ifndef __s390x__ 550#ifndef __s390x__
565 if (!(pte_val(pte) & _PAGE_INVALID)) {
566 /* S390 has 1mb segments, we are emulating 4MB segments */ 551 /* S390 has 1mb segments, we are emulating 4MB segments */
567 pte_t *pto = (pte_t *) (((unsigned long) ptep) & 0x7ffffc00); 552 pte_t *pto = (pte_t *) (((unsigned long) ptep) & 0x7ffffc00);
568 __asm__ __volatile__ ("ipte %2,%3" 553#else
569 : "=m" (*ptep) : "m" (*ptep), 554 /* ipte in zarch mode can do the math */
570 "a" (pto), "a" (address) ); 555 pte_t *pto = ptep;
556#endif
557 asm volatile ("ipte %2,%3"
558 : "=m" (*ptep) : "m" (*ptep),
559 "a" (pto), "a" (address) );
571 } 560 }
572#else /* __s390x__ */ 561 pte_val(*ptep) = _PAGE_TYPE_EMPTY;
573 if (!(pte_val(pte) & _PAGE_INVALID)) 562}
574 __asm__ __volatile__ ("ipte %2,%3" 563
575 : "=m" (*ptep) : "m" (*ptep), 564static inline pte_t
576 "a" (ptep), "a" (address) ); 565ptep_clear_flush(struct vm_area_struct *vma,
577#endif /* __s390x__ */ 566 unsigned long address, pte_t *ptep)
578 pte_val(*ptep) = _PAGE_INVALID_EMPTY; 567{
568 pte_t pte = *ptep;
569
570 __ptep_ipte(address, ptep);
579 return pte; 571 return pte;
580} 572}
581 573
@@ -755,7 +747,7 @@ static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
755{ 747{
756 pte_t pte; 748 pte_t pte;
757 offset &= __SWP_OFFSET_MASK; 749 offset &= __SWP_OFFSET_MASK;
758 pte_val(pte) = _PAGE_INVALID_SWAP | ((type & 0x1f) << 2) | 750 pte_val(pte) = _PAGE_TYPE_SWAP | ((type & 0x1f) << 2) |
759 ((offset & 1UL) << 7) | ((offset & ~1UL) << 11); 751 ((offset & 1UL) << 7) | ((offset & ~1UL) << 11);
760 return pte; 752 return pte;
761} 753}
@@ -778,7 +770,7 @@ static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
778 770
779#define pgoff_to_pte(__off) \ 771#define pgoff_to_pte(__off) \
780 ((pte_t) { ((((__off) & 0x7f) << 1) + (((__off) >> 7) << 12)) \ 772 ((pte_t) { ((((__off) & 0x7f) << 1) + (((__off) >> 7) << 12)) \
781 | _PAGE_INVALID_FILE }) 773 | _PAGE_TYPE_FILE })
782 774
783#endif /* !__ASSEMBLY__ */ 775#endif /* !__ASSEMBLY__ */
784 776