aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMartin Schwidefsky <schwidefsky@de.ibm.com>2007-10-22 06:52:47 -0400
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2007-10-22 06:52:49 -0400
commit3610cce87af0693603db171d5b6f6735f5e3dc5b (patch)
tree9aa7d9a0924b2f075c1b95ed57bb63ed512165c9
parente4aa402e7a3b6b87d8df6243a37171cdcd2f01c2 (diff)
[S390] Cleanup page table definitions.
- De-confuse the defines for the address-space-control-elements and the segment/region table entries. - Create out of line functions for page table allocation / freeing. - Simplify get_shadow_xxx functions. Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
-rw-r--r--arch/s390/mm/Makefile2
-rw-r--r--arch/s390/mm/init.c28
-rw-r--r--arch/s390/mm/pgtable.c94
-rw-r--r--arch/s390/mm/vmem.c19
-rw-r--r--include/asm-s390/mmu_context.h50
-rw-r--r--include/asm-s390/pgalloc.h213
-rw-r--r--include/asm-s390/pgtable.h219
-rw-r--r--include/asm-s390/processor.h6
-rw-r--r--include/asm-s390/tlbflush.h2
9 files changed, 305 insertions, 328 deletions
diff --git a/arch/s390/mm/Makefile b/arch/s390/mm/Makefile
index f95449b29fa5..66401930f83e 100644
--- a/arch/s390/mm/Makefile
+++ b/arch/s390/mm/Makefile
@@ -2,6 +2,6 @@
2# Makefile for the linux s390-specific parts of the memory manager. 2# Makefile for the linux s390-specific parts of the memory manager.
3# 3#
4 4
5obj-y := init.o fault.o extmem.o mmap.o vmem.o 5obj-y := init.o fault.o extmem.o mmap.o vmem.o pgtable.o
6obj-$(CONFIG_CMM) += cmm.o 6obj-$(CONFIG_CMM) += cmm.o
7 7
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index 3a25bbf2eb0a..90ec058aa7db 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -103,32 +103,28 @@ static void __init setup_ro_region(void)
103 */ 103 */
104void __init paging_init(void) 104void __init paging_init(void)
105{ 105{
106 pgd_t *pg_dir;
107 int i;
108 unsigned long pgdir_k;
109 static const int ssm_mask = 0x04000000L; 106 static const int ssm_mask = 0x04000000L;
110 unsigned long max_zone_pfns[MAX_NR_ZONES]; 107 unsigned long max_zone_pfns[MAX_NR_ZONES];
108 unsigned long pgd_type;
111 109
112 pg_dir = swapper_pg_dir; 110 init_mm.pgd = swapper_pg_dir;
113 111 S390_lowcore.kernel_asce = __pa(init_mm.pgd) & PAGE_MASK;
114#ifdef CONFIG_64BIT 112#ifdef CONFIG_64BIT
115 pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) | _KERN_REGION_TABLE; 113 S390_lowcore.kernel_asce |= _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH;
116 for (i = 0; i < PTRS_PER_PGD; i++) 114 pgd_type = _REGION3_ENTRY_EMPTY;
117 pgd_clear_kernel(pg_dir + i);
118#else 115#else
119 pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) | _KERNSEG_TABLE; 116 S390_lowcore.kernel_asce |= _ASCE_TABLE_LENGTH;
120 for (i = 0; i < PTRS_PER_PGD; i++) 117 pgd_type = _SEGMENT_ENTRY_EMPTY;
121 pmd_clear_kernel((pmd_t *)(pg_dir + i));
122#endif 118#endif
119 clear_table((unsigned long *) init_mm.pgd, pgd_type,
120 sizeof(unsigned long)*2048);
123 vmem_map_init(); 121 vmem_map_init();
124 setup_ro_region(); 122 setup_ro_region();
125 123
126 S390_lowcore.kernel_asce = pgdir_k;
127
128 /* enable virtual mapping in kernel mode */ 124 /* enable virtual mapping in kernel mode */
129 __ctl_load(pgdir_k, 1, 1); 125 __ctl_load(S390_lowcore.kernel_asce, 1, 1);
130 __ctl_load(pgdir_k, 7, 7); 126 __ctl_load(S390_lowcore.kernel_asce, 7, 7);
131 __ctl_load(pgdir_k, 13, 13); 127 __ctl_load(S390_lowcore.kernel_asce, 13, 13);
132 __raw_local_irq_ssm(ssm_mask); 128 __raw_local_irq_ssm(ssm_mask);
133 129
134 memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); 130 memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
new file mode 100644
index 000000000000..e60e0ae13402
--- /dev/null
+++ b/arch/s390/mm/pgtable.c
@@ -0,0 +1,94 @@
1/*
2 * arch/s390/mm/pgtable.c
3 *
4 * Copyright IBM Corp. 2007
5 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
6 */
7
8#include <linux/sched.h>
9#include <linux/kernel.h>
10#include <linux/errno.h>
11#include <linux/mm.h>
12#include <linux/swap.h>
13#include <linux/smp.h>
14#include <linux/highmem.h>
15#include <linux/slab.h>
16#include <linux/pagemap.h>
17#include <linux/spinlock.h>
18#include <linux/module.h>
19#include <linux/quicklist.h>
20
21#include <asm/system.h>
22#include <asm/pgtable.h>
23#include <asm/pgalloc.h>
24#include <asm/tlb.h>
25#include <asm/tlbflush.h>
26
27#ifndef CONFIG_64BIT
28#define ALLOC_ORDER 1
29#else
30#define ALLOC_ORDER 2
31#endif
32
33unsigned long *crst_table_alloc(struct mm_struct *mm, int noexec)
34{
35 struct page *page = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
36
37 if (!page)
38 return NULL;
39 page->index = 0;
40 if (noexec) {
41 struct page *shadow = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
42 if (!shadow) {
43 __free_pages(page, ALLOC_ORDER);
44 return NULL;
45 }
46 page->index = page_to_phys(shadow);
47 }
48 return (unsigned long *) page_to_phys(page);
49}
50
51void crst_table_free(unsigned long *table)
52{
53 unsigned long *shadow = get_shadow_table(table);
54
55 if (shadow)
56 free_pages((unsigned long) shadow, ALLOC_ORDER);
57 free_pages((unsigned long) table, ALLOC_ORDER);
58}
59
60/*
61 * page table entry allocation/free routines.
62 */
63unsigned long *page_table_alloc(int noexec)
64{
65 struct page *page = alloc_page(GFP_KERNEL);
66 unsigned long *table;
67
68 if (!page)
69 return NULL;
70 page->index = 0;
71 if (noexec) {
72 struct page *shadow = alloc_page(GFP_KERNEL);
73 if (!shadow) {
74 __free_page(page);
75 return NULL;
76 }
77 table = (unsigned long *) page_to_phys(shadow);
78 clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE);
79 page->index = (addr_t) table;
80 }
81 table = (unsigned long *) page_to_phys(page);
82 clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE);
83 return table;
84}
85
86void page_table_free(unsigned long *table)
87{
88 unsigned long *shadow = get_shadow_pte(table);
89
90 if (shadow)
91 free_page((unsigned long) shadow);
92 free_page((unsigned long) table);
93
94}
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
index fd594d5fe142..1bd51d840484 100644
--- a/arch/s390/mm/vmem.c
+++ b/arch/s390/mm/vmem.c
@@ -75,29 +75,24 @@ static void __init_refok *vmem_alloc_pages(unsigned int order)
75 75
76static inline pmd_t *vmem_pmd_alloc(void) 76static inline pmd_t *vmem_pmd_alloc(void)
77{ 77{
78 pmd_t *pmd; 78 pmd_t *pmd = NULL;
79 int i;
80 79
81 pmd = vmem_alloc_pages(PMD_ALLOC_ORDER); 80#ifdef CONFIG_64BIT
81 pmd = vmem_alloc_pages(2);
82 if (!pmd) 82 if (!pmd)
83 return NULL; 83 return NULL;
84 for (i = 0; i < PTRS_PER_PMD; i++) 84 clear_table((unsigned long *) pmd, _SEGMENT_ENTRY_EMPTY, PAGE_SIZE*4);
85 pmd_clear_kernel(pmd + i); 85#endif
86 return pmd; 86 return pmd;
87} 87}
88 88
89static inline pte_t *vmem_pte_alloc(void) 89static inline pte_t *vmem_pte_alloc(void)
90{ 90{
91 pte_t *pte; 91 pte_t *pte = vmem_alloc_pages(0);
92 pte_t empty_pte;
93 int i;
94 92
95 pte = vmem_alloc_pages(PTE_ALLOC_ORDER);
96 if (!pte) 93 if (!pte)
97 return NULL; 94 return NULL;
98 pte_val(empty_pte) = _PAGE_TYPE_EMPTY; 95 clear_table((unsigned long *) pte, _PAGE_TYPE_EMPTY, PAGE_SIZE);
99 for (i = 0; i < PTRS_PER_PTE; i++)
100 pte[i] = empty_pte;
101 return pte; 96 return pte;
102} 97}
103 98
diff --git a/include/asm-s390/mmu_context.h b/include/asm-s390/mmu_context.h
index 501cb9b06314..05b842126b99 100644
--- a/include/asm-s390/mmu_context.h
+++ b/include/asm-s390/mmu_context.h
@@ -21,45 +21,43 @@
21 21
22#ifndef __s390x__ 22#ifndef __s390x__
23#define LCTL_OPCODE "lctl" 23#define LCTL_OPCODE "lctl"
24#define PGTABLE_BITS (_SEGMENT_TABLE|USER_STD_MASK)
25#else 24#else
26#define LCTL_OPCODE "lctlg" 25#define LCTL_OPCODE "lctlg"
27#define PGTABLE_BITS (_REGION_TABLE|USER_STD_MASK)
28#endif 26#endif
29 27
30static inline void enter_lazy_tlb(struct mm_struct *mm, 28static inline void update_mm(struct mm_struct *mm, struct task_struct *tsk)
31 struct task_struct *tsk)
32{ 29{
30 pgd_t *pgd = mm->pgd;
31 unsigned long asce_bits;
32
33 /* Calculate asce bits from the first pgd table entry. */
34 asce_bits = _ASCE_TABLE_LENGTH | _ASCE_USER_BITS;
35#ifdef CONFIG_64BIT
36 asce_bits |= _ASCE_TYPE_REGION3;
37#endif
38 S390_lowcore.user_asce = asce_bits | __pa(pgd);
39 if (switch_amode) {
40 /* Load primary space page table origin. */
41 pgd_t *shadow_pgd = get_shadow_table(pgd) ? : pgd;
42 S390_lowcore.user_exec_asce = asce_bits | __pa(shadow_pgd);
43 asm volatile(LCTL_OPCODE" 1,1,%0\n"
44 : : "m" (S390_lowcore.user_exec_asce) );
45 } else
46 /* Load home space page table origin. */
47 asm volatile(LCTL_OPCODE" 13,13,%0"
48 : : "m" (S390_lowcore.user_asce) );
33} 49}
34 50
35static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, 51static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
36 struct task_struct *tsk) 52 struct task_struct *tsk)
37{ 53{
38 pgd_t *shadow_pgd = get_shadow_pgd(next->pgd); 54 if (unlikely(prev == next))
39 55 return;
40 if (prev != next) {
41 S390_lowcore.user_asce = (__pa(next->pgd) & PAGE_MASK) |
42 PGTABLE_BITS;
43 if (shadow_pgd) {
44 /* Load primary/secondary space page table origin. */
45 S390_lowcore.user_exec_asce =
46 (__pa(shadow_pgd) & PAGE_MASK) | PGTABLE_BITS;
47 asm volatile(LCTL_OPCODE" 1,1,%0\n"
48 LCTL_OPCODE" 7,7,%1"
49 : : "m" (S390_lowcore.user_exec_asce),
50 "m" (S390_lowcore.user_asce) );
51 } else if (switch_amode) {
52 /* Load primary space page table origin. */
53 asm volatile(LCTL_OPCODE" 1,1,%0"
54 : : "m" (S390_lowcore.user_asce) );
55 } else
56 /* Load home space page table origin. */
57 asm volatile(LCTL_OPCODE" 13,13,%0"
58 : : "m" (S390_lowcore.user_asce) );
59 }
60 cpu_set(smp_processor_id(), next->cpu_vm_mask); 56 cpu_set(smp_processor_id(), next->cpu_vm_mask);
57 update_mm(next, tsk);
61} 58}
62 59
60#define enter_lazy_tlb(mm,tsk) do { } while (0)
63#define deactivate_mm(tsk,mm) do { } while (0) 61#define deactivate_mm(tsk,mm) do { } while (0)
64 62
65static inline void activate_mm(struct mm_struct *prev, 63static inline void activate_mm(struct mm_struct *prev,
diff --git a/include/asm-s390/pgalloc.h b/include/asm-s390/pgalloc.h
index 6cbbfe4f6749..229b0bd59331 100644
--- a/include/asm-s390/pgalloc.h
+++ b/include/asm-s390/pgalloc.h
@@ -19,114 +19,75 @@
19 19
20#define check_pgt_cache() do {} while (0) 20#define check_pgt_cache() do {} while (0)
21 21
22/* 22unsigned long *crst_table_alloc(struct mm_struct *, int);
23 * Page allocation orders. 23void crst_table_free(unsigned long *);
24 */
25#ifndef __s390x__
26# define PTE_ALLOC_ORDER 0
27# define PMD_ALLOC_ORDER 0
28# define PGD_ALLOC_ORDER 1
29#else /* __s390x__ */
30# define PTE_ALLOC_ORDER 0
31# define PMD_ALLOC_ORDER 2
32# define PGD_ALLOC_ORDER 2
33#endif /* __s390x__ */
34 24
35/* 25unsigned long *page_table_alloc(int);
36 * Allocate and free page tables. The xxx_kernel() versions are 26void page_table_free(unsigned long *);
37 * used to allocate a kernel page table - this turns on ASN bits
38 * if any.
39 */
40 27
41static inline pgd_t *pgd_alloc(struct mm_struct *mm) 28static inline void clear_table(unsigned long *s, unsigned long val, size_t n)
42{ 29{
43 pgd_t *pgd = (pgd_t *) __get_free_pages(GFP_KERNEL, PGD_ALLOC_ORDER); 30 *s = val;
44 int i; 31 n = (n / 256) - 1;
45 32 asm volatile(
46 if (!pgd) 33#ifdef CONFIG_64BIT
47 return NULL; 34 " mvc 8(248,%0),0(%0)\n"
48 if (s390_noexec) {
49 pgd_t *shadow_pgd = (pgd_t *)
50 __get_free_pages(GFP_KERNEL, PGD_ALLOC_ORDER);
51 struct page *page = virt_to_page(pgd);
52
53 if (!shadow_pgd) {
54 free_pages((unsigned long) pgd, PGD_ALLOC_ORDER);
55 return NULL;
56 }
57 page->lru.next = (void *) shadow_pgd;
58 }
59 for (i = 0; i < PTRS_PER_PGD; i++)
60#ifndef __s390x__
61 pmd_clear(pmd_offset(pgd + i, i*PGDIR_SIZE));
62#else 35#else
63 pgd_clear(pgd + i); 36 " mvc 4(252,%0),0(%0)\n"
64#endif 37#endif
65 return pgd; 38 "0: mvc 256(256,%0),0(%0)\n"
39 " la %0,256(%0)\n"
40 " brct %1,0b\n"
41 : "+a" (s), "+d" (n));
66} 42}
67 43
68static inline void pgd_free(pgd_t *pgd) 44static inline void crst_table_init(unsigned long *crst, unsigned long entry)
69{ 45{
70 pgd_t *shadow_pgd = get_shadow_pgd(pgd); 46 clear_table(crst, entry, sizeof(unsigned long)*2048);
71 47 crst = get_shadow_table(crst);
72 if (shadow_pgd) 48 if (crst)
73 free_pages((unsigned long) shadow_pgd, PGD_ALLOC_ORDER); 49 clear_table(crst, entry, sizeof(unsigned long)*2048);
74 free_pages((unsigned long) pgd, PGD_ALLOC_ORDER);
75} 50}
76 51
77#ifndef __s390x__ 52#ifndef __s390x__
78/* 53
79 * page middle directory allocation/free routines. 54static inline unsigned long pgd_entry_type(struct mm_struct *mm)
80 * We use pmd cache only on s390x, so these are dummy routines. This 55{
81 * code never triggers because the pgd will always be present. 56 return _SEGMENT_ENTRY_EMPTY;
82 */ 57}
83#define pmd_alloc_one(mm,address) ({ BUG(); ((pmd_t *)2); }) 58
84#define pmd_free(x) do { } while (0) 59#define pmd_alloc_one(mm,address) ({ BUG(); ((pmd_t *)2); })
85#define pgd_populate(mm, pmd, pte) BUG() 60#define pmd_free(x) do { } while (0)
61
62#define pgd_populate(mm, pmd, pte) BUG()
86#define pgd_populate_kernel(mm, pmd, pte) BUG() 63#define pgd_populate_kernel(mm, pmd, pte) BUG()
64
87#else /* __s390x__ */ 65#else /* __s390x__ */
88static inline pmd_t * pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr) 66
67static inline unsigned long pgd_entry_type(struct mm_struct *mm)
89{ 68{
90 pmd_t *pmd = (pmd_t *) __get_free_pages(GFP_KERNEL, PMD_ALLOC_ORDER); 69 return _REGION3_ENTRY_EMPTY;
91 int i;
92
93 if (!pmd)
94 return NULL;
95 if (s390_noexec) {
96 pmd_t *shadow_pmd = (pmd_t *)
97 __get_free_pages(GFP_KERNEL, PMD_ALLOC_ORDER);
98 struct page *page = virt_to_page(pmd);
99
100 if (!shadow_pmd) {
101 free_pages((unsigned long) pmd, PMD_ALLOC_ORDER);
102 return NULL;
103 }
104 page->lru.next = (void *) shadow_pmd;
105 }
106 for (i=0; i < PTRS_PER_PMD; i++)
107 pmd_clear(pmd + i);
108 return pmd;
109} 70}
110 71
111static inline void pmd_free (pmd_t *pmd) 72static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr)
112{ 73{
113 pmd_t *shadow_pmd = get_shadow_pmd(pmd); 74 unsigned long *crst = crst_table_alloc(mm, s390_noexec);
114 75 if (crst)
115 if (shadow_pmd) 76 crst_table_init(crst, _SEGMENT_ENTRY_EMPTY);
116 free_pages((unsigned long) shadow_pmd, PMD_ALLOC_ORDER); 77 return (pmd_t *) crst;
117 free_pages((unsigned long) pmd, PMD_ALLOC_ORDER);
118} 78}
79#define pmd_free(pmd) crst_table_free((unsigned long *) pmd)
119 80
120static inline void 81static inline void pgd_populate_kernel(struct mm_struct *mm,
121pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd) 82 pgd_t *pgd, pmd_t *pmd)
122{ 83{
123 pgd_val(*pgd) = _PGD_ENTRY | __pa(pmd); 84 pgd_val(*pgd) = _REGION3_ENTRY | __pa(pmd);
124} 85}
125 86
126static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd) 87static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
127{ 88{
128 pgd_t *shadow_pgd = get_shadow_pgd(pgd); 89 pgd_t *shadow_pgd = get_shadow_table(pgd);
129 pmd_t *shadow_pmd = get_shadow_pmd(pmd); 90 pmd_t *shadow_pmd = get_shadow_table(pmd);
130 91
131 if (shadow_pgd && shadow_pmd) 92 if (shadow_pgd && shadow_pmd)
132 pgd_populate_kernel(mm, shadow_pgd, shadow_pmd); 93 pgd_populate_kernel(mm, shadow_pgd, shadow_pmd);
@@ -135,17 +96,26 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
135 96
136#endif /* __s390x__ */ 97#endif /* __s390x__ */
137 98
99static inline pgd_t *pgd_alloc(struct mm_struct *mm)
100{
101 unsigned long *crst = crst_table_alloc(mm, s390_noexec);
102 if (crst)
103 crst_table_init(crst, pgd_entry_type(mm));
104 return (pgd_t *) crst;
105}
106#define pgd_free(pgd) crst_table_free((unsigned long *) pgd)
107
138static inline void 108static inline void
139pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte) 109pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
140{ 110{
141#ifndef __s390x__ 111#ifndef __s390x__
142 pmd_val(pmd[0]) = _PAGE_TABLE + __pa(pte); 112 pmd_val(pmd[0]) = _SEGMENT_ENTRY + __pa(pte);
143 pmd_val(pmd[1]) = _PAGE_TABLE + __pa(pte+256); 113 pmd_val(pmd[1]) = _SEGMENT_ENTRY + __pa(pte+256);
144 pmd_val(pmd[2]) = _PAGE_TABLE + __pa(pte+512); 114 pmd_val(pmd[2]) = _SEGMENT_ENTRY + __pa(pte+512);
145 pmd_val(pmd[3]) = _PAGE_TABLE + __pa(pte+768); 115 pmd_val(pmd[3]) = _SEGMENT_ENTRY + __pa(pte+768);
146#else /* __s390x__ */ 116#else /* __s390x__ */
147 pmd_val(*pmd) = _PMD_ENTRY + __pa(pte); 117 pmd_val(*pmd) = _SEGMENT_ENTRY + __pa(pte);
148 pmd_val1(*pmd) = _PMD_ENTRY + __pa(pte+256); 118 pmd_val1(*pmd) = _SEGMENT_ENTRY + __pa(pte+256);
149#endif /* __s390x__ */ 119#endif /* __s390x__ */
150} 120}
151 121
@@ -153,7 +123,7 @@ static inline void
153pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *page) 123pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *page)
154{ 124{
155 pte_t *pte = (pte_t *)page_to_phys(page); 125 pte_t *pte = (pte_t *)page_to_phys(page);
156 pmd_t *shadow_pmd = get_shadow_pmd(pmd); 126 pmd_t *shadow_pmd = get_shadow_table(pmd);
157 pte_t *shadow_pte = get_shadow_pte(pte); 127 pte_t *shadow_pte = get_shadow_pte(pte);
158 128
159 pmd_populate_kernel(mm, pmd, pte); 129 pmd_populate_kernel(mm, pmd, pte);
@@ -164,57 +134,14 @@ pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *page)
164/* 134/*
165 * page table entry allocation/free routines. 135 * page table entry allocation/free routines.
166 */ 136 */
167static inline pte_t * 137#define pte_alloc_one_kernel(mm, vmaddr) \
168pte_alloc_one_kernel(struct mm_struct *mm, unsigned long vmaddr) 138 ((pte_t *) page_table_alloc(s390_noexec))
169{ 139#define pte_alloc_one(mm, vmaddr) \
170 pte_t *pte = (pte_t *) __get_free_page(GFP_KERNEL|__GFP_REPEAT); 140 virt_to_page(page_table_alloc(s390_noexec))
171 int i; 141
172 142#define pte_free_kernel(pte) \
173 if (!pte) 143 page_table_free((unsigned long *) pte)
174 return NULL; 144#define pte_free(pte) \
175 if (s390_noexec) { 145 page_table_free((unsigned long *) page_to_phys((struct page *) pte))
176 pte_t *shadow_pte = (pte_t *)
177 __get_free_page(GFP_KERNEL|__GFP_REPEAT);
178 struct page *page = virt_to_page(pte);
179
180 if (!shadow_pte) {
181 free_page((unsigned long) pte);
182 return NULL;
183 }
184 page->lru.next = (void *) shadow_pte;
185 }
186 for (i=0; i < PTRS_PER_PTE; i++) {
187 pte_clear(mm, vmaddr, pte + i);
188 vmaddr += PAGE_SIZE;
189 }
190 return pte;
191}
192
193static inline struct page *
194pte_alloc_one(struct mm_struct *mm, unsigned long vmaddr)
195{
196 pte_t *pte = pte_alloc_one_kernel(mm, vmaddr);
197 if (pte)
198 return virt_to_page(pte);
199 return NULL;
200}
201
202static inline void pte_free_kernel(pte_t *pte)
203{
204 pte_t *shadow_pte = get_shadow_pte(pte);
205
206 if (shadow_pte)
207 free_page((unsigned long) shadow_pte);
208 free_page((unsigned long) pte);
209}
210
211static inline void pte_free(struct page *pte)
212{
213 struct page *shadow_page = get_shadow_page(pte);
214
215 if (shadow_page)
216 __free_page(shadow_page);
217 __free_page(pte);
218}
219 146
220#endif /* _S390_PGALLOC_H */ 147#endif /* _S390_PGALLOC_H */
diff --git a/include/asm-s390/pgtable.h b/include/asm-s390/pgtable.h
index b424ab21f8bd..f9f59a805e5d 100644
--- a/include/asm-s390/pgtable.h
+++ b/include/asm-s390/pgtable.h
@@ -35,9 +35,6 @@
35#include <asm/bug.h> 35#include <asm/bug.h>
36#include <asm/processor.h> 36#include <asm/processor.h>
37 37
38struct vm_area_struct; /* forward declaration (include/linux/mm.h) */
39struct mm_struct;
40
41extern pgd_t swapper_pg_dir[] __attribute__ ((aligned (4096))); 38extern pgd_t swapper_pg_dir[] __attribute__ ((aligned (4096)));
42extern void paging_init(void); 39extern void paging_init(void);
43extern void vmem_map_init(void); 40extern void vmem_map_init(void);
@@ -221,6 +218,8 @@ extern unsigned long vmalloc_end;
221/* Hardware bits in the page table entry */ 218/* Hardware bits in the page table entry */
222#define _PAGE_RO 0x200 /* HW read-only bit */ 219#define _PAGE_RO 0x200 /* HW read-only bit */
223#define _PAGE_INVALID 0x400 /* HW invalid bit */ 220#define _PAGE_INVALID 0x400 /* HW invalid bit */
221
222/* Software bits in the page table entry */
224#define _PAGE_SWT 0x001 /* SW pte type bit t */ 223#define _PAGE_SWT 0x001 /* SW pte type bit t */
225#define _PAGE_SWX 0x002 /* SW pte type bit x */ 224#define _PAGE_SWX 0x002 /* SW pte type bit x */
226 225
@@ -264,60 +263,75 @@ extern unsigned long vmalloc_end;
264 263
265#ifndef __s390x__ 264#ifndef __s390x__
266 265
267/* Bits in the segment table entry */ 266/* Bits in the segment table address-space-control-element */
268#define _PAGE_TABLE_LEN 0xf /* only full page-tables */ 267#define _ASCE_SPACE_SWITCH 0x80000000UL /* space switch event */
269#define _PAGE_TABLE_COM 0x10 /* common page-table */ 268#define _ASCE_ORIGIN_MASK 0x7ffff000UL /* segment table origin */
270#define _PAGE_TABLE_INV 0x20 /* invalid page-table */ 269#define _ASCE_PRIVATE_SPACE 0x100 /* private space control */
271#define _SEG_PRESENT 0x001 /* Software (overlap with PTL) */ 270#define _ASCE_ALT_EVENT 0x80 /* storage alteration event control */
272 271#define _ASCE_TABLE_LENGTH 0x7f /* 128 x 64 entries = 8k */
273/* Bits int the storage key */
274#define _PAGE_CHANGED 0x02 /* HW changed bit */
275#define _PAGE_REFERENCED 0x04 /* HW referenced bit */
276
277#define _USER_SEG_TABLE_LEN 0x7f /* user-segment-table up to 2 GB */
278#define _KERNEL_SEG_TABLE_LEN 0x7f /* kernel-segment-table up to 2 GB */
279
280/*
281 * User and Kernel pagetables are identical
282 */
283#define _PAGE_TABLE _PAGE_TABLE_LEN
284#define _KERNPG_TABLE _PAGE_TABLE_LEN
285
286/*
287 * The Kernel segment-tables includes the User segment-table
288 */
289 272
290#define _SEGMENT_TABLE (_USER_SEG_TABLE_LEN|0x80000000|0x100) 273/* Bits in the segment table entry */
291#define _KERNSEG_TABLE _KERNEL_SEG_TABLE_LEN 274#define _SEGMENT_ENTRY_ORIGIN 0x7fffffc0UL /* page table origin */
275#define _SEGMENT_ENTRY_INV 0x20 /* invalid segment table entry */
276#define _SEGMENT_ENTRY_COMMON 0x10 /* common segment bit */
277#define _SEGMENT_ENTRY_PTL 0x0f /* page table length */
292 278
293#define USER_STD_MASK 0x00000080UL 279#define _SEGMENT_ENTRY (_SEGMENT_ENTRY_PTL)
280#define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INV)
294 281
295#else /* __s390x__ */ 282#else /* __s390x__ */
296 283
284/* Bits in the segment/region table address-space-control-element */
285#define _ASCE_ORIGIN ~0xfffUL/* segment table origin */
286#define _ASCE_PRIVATE_SPACE 0x100 /* private space control */
287#define _ASCE_ALT_EVENT 0x80 /* storage alteration event control */
288#define _ASCE_SPACE_SWITCH 0x40 /* space switch event */
289#define _ASCE_REAL_SPACE 0x20 /* real space control */
290#define _ASCE_TYPE_MASK 0x0c /* asce table type mask */
291#define _ASCE_TYPE_REGION1 0x0c /* region first table type */
292#define _ASCE_TYPE_REGION2 0x08 /* region second table type */
293#define _ASCE_TYPE_REGION3 0x04 /* region third table type */
294#define _ASCE_TYPE_SEGMENT 0x00 /* segment table type */
295#define _ASCE_TABLE_LENGTH 0x03 /* region table length */
296
297/* Bits in the region table entry */
298#define _REGION_ENTRY_ORIGIN ~0xfffUL/* region/segment table origin */
299#define _REGION_ENTRY_INV 0x20 /* invalid region table entry */
300#define _REGION_ENTRY_TYPE_MASK 0x0c /* region/segment table type mask */
301#define _REGION_ENTRY_TYPE_R1 0x0c /* region first table type */
302#define _REGION_ENTRY_TYPE_R2 0x08 /* region second table type */
303#define _REGION_ENTRY_TYPE_R3 0x04 /* region third table type */
304#define _REGION_ENTRY_LENGTH 0x03 /* region third length */
305
306#define _REGION1_ENTRY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_LENGTH)
307#define _REGION1_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_INV)
308#define _REGION2_ENTRY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_LENGTH)
309#define _REGION2_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_INV)
310#define _REGION3_ENTRY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_LENGTH)
311#define _REGION3_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INV)
312
297/* Bits in the segment table entry */ 313/* Bits in the segment table entry */
298#define _PMD_ENTRY_INV 0x20 /* invalid segment table entry */ 314#define _SEGMENT_ENTRY_ORIGIN ~0x7ffUL/* segment table origin */
299#define _PMD_ENTRY 0x00 315#define _SEGMENT_ENTRY_RO 0x200 /* page protection bit */
316#define _SEGMENT_ENTRY_INV 0x20 /* invalid segment table entry */
300 317
301/* Bits in the region third table entry */ 318#define _SEGMENT_ENTRY (0)
302#define _PGD_ENTRY_INV 0x20 /* invalid region table entry */ 319#define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INV)
303#define _PGD_ENTRY 0x07 320
321#endif /* __s390x__ */
304 322
305/* 323/*
306 * User and kernel page directory 324 * A user page table pointer has the space-switch-event bit, the
325 * private-space-control bit and the storage-alteration-event-control
326 * bit set. A kernel page table pointer doesn't need them.
307 */ 327 */
308#define _REGION_THIRD 0x4 328#define _ASCE_USER_BITS (_ASCE_SPACE_SWITCH | _ASCE_PRIVATE_SPACE | \
309#define _REGION_THIRD_LEN 0x3 329 _ASCE_ALT_EVENT)
310#define _REGION_TABLE (_REGION_THIRD|_REGION_THIRD_LEN|0x40|0x100)
311#define _KERN_REGION_TABLE (_REGION_THIRD|_REGION_THIRD_LEN)
312
313#define USER_STD_MASK 0x0000000000000080UL
314 330
315/* Bits in the storage key */ 331/* Bits int the storage key */
316#define _PAGE_CHANGED 0x02 /* HW changed bit */ 332#define _PAGE_CHANGED 0x02 /* HW changed bit */
317#define _PAGE_REFERENCED 0x04 /* HW referenced bit */ 333#define _PAGE_REFERENCED 0x04 /* HW referenced bit */
318 334
319#endif /* __s390x__ */
320
321/* 335/*
322 * Page protection definitions. 336 * Page protection definitions.
323 */ 337 */
@@ -358,65 +372,38 @@ extern unsigned long vmalloc_end;
358#define __S111 PAGE_EX_RW 372#define __S111 PAGE_EX_RW
359 373
360#ifndef __s390x__ 374#ifndef __s390x__
361# define PMD_SHADOW_SHIFT 1 375# define PxD_SHADOW_SHIFT 1
362# define PGD_SHADOW_SHIFT 1
363#else /* __s390x__ */ 376#else /* __s390x__ */
364# define PMD_SHADOW_SHIFT 2 377# define PxD_SHADOW_SHIFT 2
365# define PGD_SHADOW_SHIFT 2
366#endif /* __s390x__ */ 378#endif /* __s390x__ */
367 379
368static inline struct page *get_shadow_page(struct page *page) 380static inline struct page *get_shadow_page(struct page *page)
369{ 381{
370 if (s390_noexec && !list_empty(&page->lru)) 382 if (s390_noexec && page->index)
371 return virt_to_page(page->lru.next); 383 return virt_to_page((void *)(addr_t) page->index);
372 return NULL;
373}
374
375static inline pte_t *get_shadow_pte(pte_t *ptep)
376{
377 unsigned long pteptr = (unsigned long) (ptep);
378
379 if (s390_noexec) {
380 unsigned long offset = pteptr & (PAGE_SIZE - 1);
381 void *addr = (void *) (pteptr ^ offset);
382 struct page *page = virt_to_page(addr);
383 if (!list_empty(&page->lru))
384 return (pte_t *) ((unsigned long) page->lru.next |
385 offset);
386 }
387 return NULL; 384 return NULL;
388} 385}
389 386
390static inline pmd_t *get_shadow_pmd(pmd_t *pmdp) 387static inline void *get_shadow_pte(void *table)
391{ 388{
392 unsigned long pmdptr = (unsigned long) (pmdp); 389 unsigned long addr, offset;
390 struct page *page;
393 391
394 if (s390_noexec) { 392 addr = (unsigned long) table;
395 unsigned long offset = pmdptr & 393 offset = addr & (PAGE_SIZE - 1);
396 ((PAGE_SIZE << PMD_SHADOW_SHIFT) - 1); 394 page = virt_to_page((void *)(addr ^ offset));
397 void *addr = (void *) (pmdptr ^ offset); 395 return (void *)(addr_t)(page->index ? (page->index | offset) : 0UL);
398 struct page *page = virt_to_page(addr);
399 if (!list_empty(&page->lru))
400 return (pmd_t *) ((unsigned long) page->lru.next |
401 offset);
402 }
403 return NULL;
404} 396}
405 397
406static inline pgd_t *get_shadow_pgd(pgd_t *pgdp) 398static inline void *get_shadow_table(void *table)
407{ 399{
408 unsigned long pgdptr = (unsigned long) (pgdp); 400 unsigned long addr, offset;
401 struct page *page;
409 402
410 if (s390_noexec) { 403 addr = (unsigned long) table;
411 unsigned long offset = pgdptr & 404 offset = addr & ((PAGE_SIZE << PxD_SHADOW_SHIFT) - 1);
412 ((PAGE_SIZE << PGD_SHADOW_SHIFT) - 1); 405 page = virt_to_page((void *)(addr ^ offset));
413 void *addr = (void *) (pgdptr ^ offset); 406 return (void *)(addr_t)(page->index ? (page->index | offset) : 0UL);
414 struct page *page = virt_to_page(addr);
415 if (!list_empty(&page->lru))
416 return (pgd_t *) ((unsigned long) page->lru.next |
417 offset);
418 }
419 return NULL;
420} 407}
421 408
422/* 409/*
@@ -448,47 +435,42 @@ static inline int pgd_present(pgd_t pgd) { return 1; }
448static inline int pgd_none(pgd_t pgd) { return 0; } 435static inline int pgd_none(pgd_t pgd) { return 0; }
449static inline int pgd_bad(pgd_t pgd) { return 0; } 436static inline int pgd_bad(pgd_t pgd) { return 0; }
450 437
451static inline int pmd_present(pmd_t pmd) { return pmd_val(pmd) & _SEG_PRESENT; }
452static inline int pmd_none(pmd_t pmd) { return pmd_val(pmd) & _PAGE_TABLE_INV; }
453static inline int pmd_bad(pmd_t pmd)
454{
455 return (pmd_val(pmd) & (~PAGE_MASK & ~_PAGE_TABLE_INV)) != _PAGE_TABLE;
456}
457
458#else /* __s390x__ */ 438#else /* __s390x__ */
459 439
460static inline int pgd_present(pgd_t pgd) 440static inline int pgd_present(pgd_t pgd)
461{ 441{
462 return (pgd_val(pgd) & ~PAGE_MASK) == _PGD_ENTRY; 442 return pgd_val(pgd) & _REGION_ENTRY_ORIGIN;
463} 443}
464 444
465static inline int pgd_none(pgd_t pgd) 445static inline int pgd_none(pgd_t pgd)
466{ 446{
467 return pgd_val(pgd) & _PGD_ENTRY_INV; 447 return pgd_val(pgd) & _REGION_ENTRY_INV;
468} 448}
469 449
470static inline int pgd_bad(pgd_t pgd) 450static inline int pgd_bad(pgd_t pgd)
471{ 451{
472 return (pgd_val(pgd) & (~PAGE_MASK & ~_PGD_ENTRY_INV)) != _PGD_ENTRY; 452 unsigned long mask = ~_REGION_ENTRY_ORIGIN & ~_REGION_ENTRY_INV;
453 return (pgd_val(pgd) & mask) != _REGION3_ENTRY;
473} 454}
474 455
456#endif /* __s390x__ */
457
475static inline int pmd_present(pmd_t pmd) 458static inline int pmd_present(pmd_t pmd)
476{ 459{
477 return (pmd_val(pmd) & ~PAGE_MASK) == _PMD_ENTRY; 460 return pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN;
478} 461}
479 462
480static inline int pmd_none(pmd_t pmd) 463static inline int pmd_none(pmd_t pmd)
481{ 464{
482 return pmd_val(pmd) & _PMD_ENTRY_INV; 465 return pmd_val(pmd) & _SEGMENT_ENTRY_INV;
483} 466}
484 467
485static inline int pmd_bad(pmd_t pmd) 468static inline int pmd_bad(pmd_t pmd)
486{ 469{
487 return (pmd_val(pmd) & (~PAGE_MASK & ~_PMD_ENTRY_INV)) != _PMD_ENTRY; 470 unsigned long mask = ~_SEGMENT_ENTRY_ORIGIN & ~_SEGMENT_ENTRY_INV;
471 return (pmd_val(pmd) & mask) != _SEGMENT_ENTRY;
488} 472}
489 473
490#endif /* __s390x__ */
491
492static inline int pte_none(pte_t pte) 474static inline int pte_none(pte_t pte)
493{ 475{
494 return (pte_val(pte) & _PAGE_INVALID) && !(pte_val(pte) & _PAGE_SWT); 476 return (pte_val(pte) & _PAGE_INVALID) && !(pte_val(pte) & _PAGE_SWT);
@@ -548,31 +530,22 @@ static inline void pgd_clear(pgd_t * pgdp) { }
548 530
549static inline void pmd_clear_kernel(pmd_t * pmdp) 531static inline void pmd_clear_kernel(pmd_t * pmdp)
550{ 532{
551 pmd_val(pmdp[0]) = _PAGE_TABLE_INV; 533 pmd_val(pmdp[0]) = _SEGMENT_ENTRY_EMPTY;
552 pmd_val(pmdp[1]) = _PAGE_TABLE_INV; 534 pmd_val(pmdp[1]) = _SEGMENT_ENTRY_EMPTY;
553 pmd_val(pmdp[2]) = _PAGE_TABLE_INV; 535 pmd_val(pmdp[2]) = _SEGMENT_ENTRY_EMPTY;
554 pmd_val(pmdp[3]) = _PAGE_TABLE_INV; 536 pmd_val(pmdp[3]) = _SEGMENT_ENTRY_EMPTY;
555}
556
557static inline void pmd_clear(pmd_t * pmdp)
558{
559 pmd_t *shadow_pmd = get_shadow_pmd(pmdp);
560
561 pmd_clear_kernel(pmdp);
562 if (shadow_pmd)
563 pmd_clear_kernel(shadow_pmd);
564} 537}
565 538
566#else /* __s390x__ */ 539#else /* __s390x__ */
567 540
568static inline void pgd_clear_kernel(pgd_t * pgdp) 541static inline void pgd_clear_kernel(pgd_t * pgdp)
569{ 542{
570 pgd_val(*pgdp) = _PGD_ENTRY_INV | _PGD_ENTRY; 543 pgd_val(*pgdp) = _REGION3_ENTRY_EMPTY;
571} 544}
572 545
573static inline void pgd_clear(pgd_t * pgdp) 546static inline void pgd_clear(pgd_t * pgdp)
574{ 547{
575 pgd_t *shadow_pgd = get_shadow_pgd(pgdp); 548 pgd_t *shadow_pgd = get_shadow_table(pgdp);
576 549
577 pgd_clear_kernel(pgdp); 550 pgd_clear_kernel(pgdp);
578 if (shadow_pgd) 551 if (shadow_pgd)
@@ -581,21 +554,21 @@ static inline void pgd_clear(pgd_t * pgdp)
581 554
582static inline void pmd_clear_kernel(pmd_t * pmdp) 555static inline void pmd_clear_kernel(pmd_t * pmdp)
583{ 556{
584 pmd_val(*pmdp) = _PMD_ENTRY_INV | _PMD_ENTRY; 557 pmd_val(*pmdp) = _SEGMENT_ENTRY_EMPTY;
585 pmd_val1(*pmdp) = _PMD_ENTRY_INV | _PMD_ENTRY; 558 pmd_val1(*pmdp) = _SEGMENT_ENTRY_EMPTY;
586} 559}
587 560
561#endif /* __s390x__ */
562
588static inline void pmd_clear(pmd_t * pmdp) 563static inline void pmd_clear(pmd_t * pmdp)
589{ 564{
590 pmd_t *shadow_pmd = get_shadow_pmd(pmdp); 565 pmd_t *shadow_pmd = get_shadow_table(pmdp);
591 566
592 pmd_clear_kernel(pmdp); 567 pmd_clear_kernel(pmdp);
593 if (shadow_pmd) 568 if (shadow_pmd)
594 pmd_clear_kernel(shadow_pmd); 569 pmd_clear_kernel(shadow_pmd);
595} 570}
596 571
597#endif /* __s390x__ */
598
599static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) 572static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
600{ 573{
601 pte_t *shadow_pte = get_shadow_pte(ptep); 574 pte_t *shadow_pte = get_shadow_pte(ptep);
diff --git a/include/asm-s390/processor.h b/include/asm-s390/processor.h
index 81efccc63942..21d40a19355e 100644
--- a/include/asm-s390/processor.h
+++ b/include/asm-s390/processor.h
@@ -127,12 +127,6 @@ struct stack_frame {
127 127
128#define ARCH_MIN_TASKALIGN 8 128#define ARCH_MIN_TASKALIGN 8
129 129
130#ifndef __s390x__
131# define __SWAPPER_PG_DIR __pa(&swapper_pg_dir[0]) + _SEGMENT_TABLE
132#else /* __s390x__ */
133# define __SWAPPER_PG_DIR __pa(&swapper_pg_dir[0]) + _REGION_TABLE
134#endif /* __s390x__ */
135
136#define INIT_THREAD { \ 130#define INIT_THREAD { \
137 .ksp = sizeof(init_stack) + (unsigned long) &init_stack, \ 131 .ksp = sizeof(init_stack) + (unsigned long) &init_stack, \
138} 132}
diff --git a/include/asm-s390/tlbflush.h b/include/asm-s390/tlbflush.h
index 3a9985fbc8af..a69bd2490d52 100644
--- a/include/asm-s390/tlbflush.h
+++ b/include/asm-s390/tlbflush.h
@@ -61,7 +61,7 @@ static inline void __tlb_flush_mm(struct mm_struct * mm)
61 * only ran on the local cpu. 61 * only ran on the local cpu.
62 */ 62 */
63 if (MACHINE_HAS_IDTE) { 63 if (MACHINE_HAS_IDTE) {
64 pgd_t *shadow_pgd = get_shadow_pgd(mm->pgd); 64 pgd_t *shadow_pgd = get_shadow_table(mm->pgd);
65 65
66 if (shadow_pgd) 66 if (shadow_pgd)
67 __tlb_flush_idte(shadow_pgd); 67 __tlb_flush_idte(shadow_pgd);