aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@woody.linux-foundation.org>2008-02-09 14:15:23 -0500
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2008-02-09 14:15:23 -0500
commit531021f2ca681faf58f926771f85bb5c76f13eba (patch)
treeb730cd35a1bde5f2ddd85e080a6269df96c50074 /include
parent451688ba0b488faf274e13dc591734b1e695642c (diff)
parent6252d702c5311ce916caf75ed82e5c8245171c92 (diff)
Merge branch 'for-linus' of git://git390.osdl.marist.edu/pub/scm/linux-2.6
* 'for-linus' of git://git390.osdl.marist.edu/pub/scm/linux-2.6: [S390] dynamic page tables. [S390] Add four level page tables for CONFIG_64BIT=y. [S390] 1K/2K page table pages. [S390] Remove a.out header file. [S390] sclp_vt220: Fix vt220 initialization [S390] qdio: avoid hang when establishing qdio queues [S390] VMEM_MAX_PHYS overflow on 31 bit. [S390] zcrypt: Do not start ap poll thread per default [S390] Fix __ffs_word_loop/__ffz_word_loop inlnie assembly. [S390] Wire up new timerfd syscalls. [S390] Update default configuration.
Diffstat (limited to 'include')
-rw-r--r--include/asm-s390/a.out.h32
-rw-r--r--include/asm-s390/bitops.h4
-rw-r--r--include/asm-s390/elf.h22
-rw-r--r--include/asm-s390/mmu.h9
-rw-r--r--include/asm-s390/mmu_context.h20
-rw-r--r--include/asm-s390/page.h36
-rw-r--r--include/asm-s390/pgalloc.h116
-rw-r--r--include/asm-s390/pgtable.h191
-rw-r--r--include/asm-s390/processor.h45
-rw-r--r--include/asm-s390/tlb.h49
-rw-r--r--include/asm-s390/tlbflush.h11
-rw-r--r--include/asm-s390/unistd.h5
12 files changed, 275 insertions, 265 deletions
diff --git a/include/asm-s390/a.out.h b/include/asm-s390/a.out.h
deleted file mode 100644
index 8d6bd9c2952e..000000000000
--- a/include/asm-s390/a.out.h
+++ /dev/null
@@ -1,32 +0,0 @@
1/*
2 * include/asm-s390/a.out.h
3 *
4 * S390 version
5 * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
6 *
7 * Derived from "include/asm-i386/a.out.h"
8 * Copyright (C) 1992, Linus Torvalds
9 *
10 * I don't think we'll ever need a.out ...
11 */
12
13#ifndef __S390_A_OUT_H__
14#define __S390_A_OUT_H__
15
16struct exec
17{
18 unsigned long a_info; /* Use macros N_MAGIC, etc for access */
19 unsigned a_text; /* length of text, in bytes */
20 unsigned a_data; /* length of data, in bytes */
21 unsigned a_bss; /* length of uninitialized data area for file, in bytes */
22 unsigned a_syms; /* length of symbol table data in file, in bytes */
23 unsigned a_entry; /* start address */
24 unsigned a_trsize; /* length of relocation info for text, in bytes */
25 unsigned a_drsize; /* length of relocation info for data, in bytes */
26};
27
28#define N_TRSIZE(a) ((a).a_trsize)
29#define N_DRSIZE(a) ((a).a_drsize)
30#define N_SYMSIZE(a) ((a).a_syms)
31
32#endif /* __A_OUT_GNU_H__ */
diff --git a/include/asm-s390/bitops.h b/include/asm-s390/bitops.h
index 882db054110c..ab83c844d04c 100644
--- a/include/asm-s390/bitops.h
+++ b/include/asm-s390/bitops.h
@@ -472,7 +472,7 @@ static inline unsigned long __ffz_word_loop(const unsigned long *addr,
472 " brct %1,0b\n" 472 " brct %1,0b\n"
473 "1:\n" 473 "1:\n"
474#endif 474#endif
475 : "+a" (bytes), "+d" (size) 475 : "+&a" (bytes), "+&d" (size)
476 : "d" (-1UL), "a" (addr), "m" (*(addrtype *) addr) 476 : "d" (-1UL), "a" (addr), "m" (*(addrtype *) addr)
477 : "cc" ); 477 : "cc" );
478 return bytes; 478 return bytes;
@@ -507,7 +507,7 @@ static inline unsigned long __ffs_word_loop(const unsigned long *addr,
507 " brct %1,0b\n" 507 " brct %1,0b\n"
508 "1:\n" 508 "1:\n"
509#endif 509#endif
510 : "+a" (bytes), "+a" (size) 510 : "+&a" (bytes), "+&a" (size)
511 : "d" (0UL), "a" (addr), "m" (*(addrtype *) addr) 511 : "d" (0UL), "a" (addr), "m" (*(addrtype *) addr)
512 : "cc" ); 512 : "cc" );
513 return bytes; 513 return bytes;
diff --git a/include/asm-s390/elf.h b/include/asm-s390/elf.h
index b73a424d0f97..b3ac262c4582 100644
--- a/include/asm-s390/elf.h
+++ b/include/asm-s390/elf.h
@@ -115,6 +115,7 @@ typedef s390_regs elf_gregset_t;
115 115
116#include <linux/sched.h> /* for task_struct */ 116#include <linux/sched.h> /* for task_struct */
117#include <asm/system.h> /* for save_access_regs */ 117#include <asm/system.h> /* for save_access_regs */
118#include <asm/mmu_context.h>
118 119
119/* 120/*
120 * This is used to ensure we don't load something for the wrong architecture. 121 * This is used to ensure we don't load something for the wrong architecture.
@@ -137,14 +138,7 @@ typedef s390_regs elf_gregset_t;
137 use of this is to invoke "./ld.so someprog" to test out a new version of 138 use of this is to invoke "./ld.so someprog" to test out a new version of
138 the loader. We need to make sure that it is out of the way of the program 139 the loader. We need to make sure that it is out of the way of the program
139 that it will "exec", and that there is sufficient room for the brk. */ 140 that it will "exec", and that there is sufficient room for the brk. */
140 141#define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
141#ifndef __s390x__
142#define ELF_ET_DYN_BASE ((TASK_SIZE & 0x80000000) \
143 ? TASK_SIZE / 3 * 2 \
144 : 2 * TASK_SIZE / 3)
145#else /* __s390x__ */
146#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
147#endif /* __s390x__ */
148 142
149/* Wow, the "main" arch needs arch dependent functions too.. :) */ 143/* Wow, the "main" arch needs arch dependent functions too.. :) */
150 144
@@ -214,4 +208,16 @@ do { \
214} while (0) 208} while (0)
215#endif /* __s390x__ */ 209#endif /* __s390x__ */
216 210
211/*
212 * An executable for which elf_read_implies_exec() returns TRUE will
213 * have the READ_IMPLIES_EXEC personality flag set automatically.
214 */
215#define elf_read_implies_exec(ex, executable_stack) \
216({ \
217 if (current->mm->context.noexec && \
218 executable_stack != EXSTACK_DISABLE_X) \
219 disable_noexec(current->mm, current); \
220 current->mm->context.noexec == 0; \
221})
222
217#endif 223#endif
diff --git a/include/asm-s390/mmu.h b/include/asm-s390/mmu.h
index ccd36d26615a..1698e29c5b20 100644
--- a/include/asm-s390/mmu.h
+++ b/include/asm-s390/mmu.h
@@ -1,7 +1,12 @@
1#ifndef __MMU_H 1#ifndef __MMU_H
2#define __MMU_H 2#define __MMU_H
3 3
4/* Default "unsigned long" context */ 4typedef struct {
5typedef unsigned long mm_context_t; 5 struct list_head crst_list;
6 struct list_head pgtable_list;
7 unsigned long asce_bits;
8 unsigned long asce_limit;
9 int noexec;
10} mm_context_t;
6 11
7#endif 12#endif
diff --git a/include/asm-s390/mmu_context.h b/include/asm-s390/mmu_context.h
index a77d4ba3c8eb..b5a34c6f91a9 100644
--- a/include/asm-s390/mmu_context.h
+++ b/include/asm-s390/mmu_context.h
@@ -10,15 +10,19 @@
10#define __S390_MMU_CONTEXT_H 10#define __S390_MMU_CONTEXT_H
11 11
12#include <asm/pgalloc.h> 12#include <asm/pgalloc.h>
13#include <asm/uaccess.h>
13#include <asm-generic/mm_hooks.h> 14#include <asm-generic/mm_hooks.h>
14 15
15static inline int init_new_context(struct task_struct *tsk, 16static inline int init_new_context(struct task_struct *tsk,
16 struct mm_struct *mm) 17 struct mm_struct *mm)
17{ 18{
18 mm->context = _ASCE_TABLE_LENGTH | _ASCE_USER_BITS; 19 mm->context.asce_bits = _ASCE_TABLE_LENGTH | _ASCE_USER_BITS;
19#ifdef CONFIG_64BIT 20#ifdef CONFIG_64BIT
20 mm->context |= _ASCE_TYPE_REGION3; 21 mm->context.asce_bits |= _ASCE_TYPE_REGION3;
21#endif 22#endif
23 mm->context.noexec = s390_noexec;
24 mm->context.asce_limit = STACK_TOP_MAX;
25 crst_table_init((unsigned long *) mm->pgd, pgd_entry_type(mm));
22 return 0; 26 return 0;
23} 27}
24 28
@@ -32,24 +36,25 @@ static inline int init_new_context(struct task_struct *tsk,
32 36
33static inline void update_mm(struct mm_struct *mm, struct task_struct *tsk) 37static inline void update_mm(struct mm_struct *mm, struct task_struct *tsk)
34{ 38{
35 S390_lowcore.user_asce = mm->context | __pa(mm->pgd); 39 pgd_t *pgd = mm->pgd;
40
41 S390_lowcore.user_asce = mm->context.asce_bits | __pa(pgd);
36 if (switch_amode) { 42 if (switch_amode) {
37 /* Load primary space page table origin. */ 43 /* Load primary space page table origin. */
38 pgd_t *shadow_pgd = get_shadow_table(mm->pgd) ? : mm->pgd; 44 pgd = mm->context.noexec ? get_shadow_table(pgd) : pgd;
39 S390_lowcore.user_exec_asce = mm->context | __pa(shadow_pgd); 45 S390_lowcore.user_exec_asce = mm->context.asce_bits | __pa(pgd);
40 asm volatile(LCTL_OPCODE" 1,1,%0\n" 46 asm volatile(LCTL_OPCODE" 1,1,%0\n"
41 : : "m" (S390_lowcore.user_exec_asce) ); 47 : : "m" (S390_lowcore.user_exec_asce) );
42 } else 48 } else
43 /* Load home space page table origin. */ 49 /* Load home space page table origin. */
44 asm volatile(LCTL_OPCODE" 13,13,%0" 50 asm volatile(LCTL_OPCODE" 13,13,%0"
45 : : "m" (S390_lowcore.user_asce) ); 51 : : "m" (S390_lowcore.user_asce) );
52 set_fs(current->thread.mm_segment);
46} 53}
47 54
48static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, 55static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
49 struct task_struct *tsk) 56 struct task_struct *tsk)
50{ 57{
51 if (unlikely(prev == next))
52 return;
53 cpu_set(smp_processor_id(), next->cpu_vm_mask); 58 cpu_set(smp_processor_id(), next->cpu_vm_mask);
54 update_mm(next, tsk); 59 update_mm(next, tsk);
55} 60}
@@ -61,7 +66,6 @@ static inline void activate_mm(struct mm_struct *prev,
61 struct mm_struct *next) 66 struct mm_struct *next)
62{ 67{
63 switch_mm(prev, next, current); 68 switch_mm(prev, next, current);
64 set_fs(current->thread.mm_segment);
65} 69}
66 70
67#endif /* __S390_MMU_CONTEXT_H */ 71#endif /* __S390_MMU_CONTEXT_H */
diff --git a/include/asm-s390/page.h b/include/asm-s390/page.h
index 7f29a981f48c..fe7f92b6ae6d 100644
--- a/include/asm-s390/page.h
+++ b/include/asm-s390/page.h
@@ -74,43 +74,17 @@ static inline void copy_page(void *to, void *from)
74 74
75typedef struct { unsigned long pgprot; } pgprot_t; 75typedef struct { unsigned long pgprot; } pgprot_t;
76typedef struct { unsigned long pte; } pte_t; 76typedef struct { unsigned long pte; } pte_t;
77
78#define pte_val(x) ((x).pte)
79#define pgprot_val(x) ((x).pgprot)
80
81#ifndef __s390x__
82
83typedef struct { unsigned long pmd; } pmd_t; 77typedef struct { unsigned long pmd; } pmd_t;
84typedef struct { unsigned long pud; } pud_t; 78typedef struct { unsigned long pud; } pud_t;
85typedef struct {
86 unsigned long pgd0;
87 unsigned long pgd1;
88 unsigned long pgd2;
89 unsigned long pgd3;
90 } pgd_t;
91
92#define pmd_val(x) ((x).pmd)
93#define pud_val(x) ((x).pud)
94#define pgd_val(x) ((x).pgd0)
95
96#else /* __s390x__ */
97
98typedef struct {
99 unsigned long pmd0;
100 unsigned long pmd1;
101 } pmd_t;
102typedef struct { unsigned long pud; } pud_t;
103typedef struct { unsigned long pgd; } pgd_t; 79typedef struct { unsigned long pgd; } pgd_t;
80typedef pte_t *pgtable_t;
104 81
105#define pmd_val(x) ((x).pmd0) 82#define pgprot_val(x) ((x).pgprot)
106#define pmd_val1(x) ((x).pmd1) 83#define pte_val(x) ((x).pte)
84#define pmd_val(x) ((x).pmd)
107#define pud_val(x) ((x).pud) 85#define pud_val(x) ((x).pud)
108#define pgd_val(x) ((x).pgd) 86#define pgd_val(x) ((x).pgd)
109 87
110#endif /* __s390x__ */
111
112typedef struct page *pgtable_t;
113
114#define __pte(x) ((pte_t) { (x) } ) 88#define __pte(x) ((pte_t) { (x) } )
115#define __pmd(x) ((pmd_t) { (x) } ) 89#define __pmd(x) ((pmd_t) { (x) } )
116#define __pgd(x) ((pgd_t) { (x) } ) 90#define __pgd(x) ((pgd_t) { (x) } )
@@ -167,7 +141,7 @@ static inline int pfn_valid(unsigned long pfn)
167#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT) 141#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
168#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) 142#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
169 143
170#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \ 144#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | \
171 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) 145 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
172 146
173#include <asm-generic/memory_model.h> 147#include <asm-generic/memory_model.h>
diff --git a/include/asm-s390/pgalloc.h b/include/asm-s390/pgalloc.h
index 900d44807e10..f5b2bf3d7c1d 100644
--- a/include/asm-s390/pgalloc.h
+++ b/include/asm-s390/pgalloc.h
@@ -20,10 +20,11 @@
20#define check_pgt_cache() do {} while (0) 20#define check_pgt_cache() do {} while (0)
21 21
22unsigned long *crst_table_alloc(struct mm_struct *, int); 22unsigned long *crst_table_alloc(struct mm_struct *, int);
23void crst_table_free(unsigned long *); 23void crst_table_free(struct mm_struct *, unsigned long *);
24 24
25unsigned long *page_table_alloc(int); 25unsigned long *page_table_alloc(struct mm_struct *);
26void page_table_free(unsigned long *); 26void page_table_free(struct mm_struct *, unsigned long *);
27void disable_noexec(struct mm_struct *, struct task_struct *);
27 28
28static inline void clear_table(unsigned long *s, unsigned long val, size_t n) 29static inline void clear_table(unsigned long *s, unsigned long val, size_t n)
29{ 30{
@@ -72,23 +73,49 @@ static inline unsigned long pgd_entry_type(struct mm_struct *mm)
72 73
73static inline unsigned long pgd_entry_type(struct mm_struct *mm) 74static inline unsigned long pgd_entry_type(struct mm_struct *mm)
74{ 75{
75 return _REGION3_ENTRY_EMPTY; 76 if (mm->context.asce_limit <= (1UL << 31))
77 return _SEGMENT_ENTRY_EMPTY;
78 if (mm->context.asce_limit <= (1UL << 42))
79 return _REGION3_ENTRY_EMPTY;
80 return _REGION2_ENTRY_EMPTY;
76} 81}
77 82
78#define pud_alloc_one(mm,address) ({ BUG(); ((pud_t *)2); }) 83int crst_table_upgrade(struct mm_struct *, unsigned long limit);
79#define pud_free(mm, x) do { } while (0) 84void crst_table_downgrade(struct mm_struct *, unsigned long limit);
85
86static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address)
87{
88 unsigned long *table = crst_table_alloc(mm, mm->context.noexec);
89 if (table)
90 crst_table_init(table, _REGION3_ENTRY_EMPTY);
91 return (pud_t *) table;
92}
93#define pud_free(mm, pud) crst_table_free(mm, (unsigned long *) pud)
80 94
81static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr) 95static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr)
82{ 96{
83 unsigned long *crst = crst_table_alloc(mm, s390_noexec); 97 unsigned long *table = crst_table_alloc(mm, mm->context.noexec);
84 if (crst) 98 if (table)
85 crst_table_init(crst, _SEGMENT_ENTRY_EMPTY); 99 crst_table_init(table, _SEGMENT_ENTRY_EMPTY);
86 return (pmd_t *) crst; 100 return (pmd_t *) table;
87} 101}
88#define pmd_free(mm, pmd) crst_table_free((unsigned long *)pmd) 102#define pmd_free(mm, pmd) crst_table_free(mm, (unsigned long *) pmd)
89 103
90#define pgd_populate(mm, pgd, pud) BUG() 104static inline void pgd_populate_kernel(struct mm_struct *mm,
91#define pgd_populate_kernel(mm, pgd, pud) BUG() 105 pgd_t *pgd, pud_t *pud)
106{
107 pgd_val(*pgd) = _REGION2_ENTRY | __pa(pud);
108}
109
110static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
111{
112 pgd_populate_kernel(mm, pgd, pud);
113 if (mm->context.noexec) {
114 pgd = get_shadow_table(pgd);
115 pud = get_shadow_table(pud);
116 pgd_populate_kernel(mm, pgd, pud);
117 }
118}
92 119
93static inline void pud_populate_kernel(struct mm_struct *mm, 120static inline void pud_populate_kernel(struct mm_struct *mm,
94 pud_t *pud, pmd_t *pmd) 121 pud_t *pud, pmd_t *pmd)
@@ -98,63 +125,50 @@ static inline void pud_populate_kernel(struct mm_struct *mm,
98 125
99static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) 126static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
100{ 127{
101 pud_t *shadow_pud = get_shadow_table(pud);
102 pmd_t *shadow_pmd = get_shadow_table(pmd);
103
104 if (shadow_pud && shadow_pmd)
105 pud_populate_kernel(mm, shadow_pud, shadow_pmd);
106 pud_populate_kernel(mm, pud, pmd); 128 pud_populate_kernel(mm, pud, pmd);
129 if (mm->context.noexec) {
130 pud = get_shadow_table(pud);
131 pmd = get_shadow_table(pmd);
132 pud_populate_kernel(mm, pud, pmd);
133 }
107} 134}
108 135
109#endif /* __s390x__ */ 136#endif /* __s390x__ */
110 137
111static inline pgd_t *pgd_alloc(struct mm_struct *mm) 138static inline pgd_t *pgd_alloc(struct mm_struct *mm)
112{ 139{
113 unsigned long *crst = crst_table_alloc(mm, s390_noexec); 140 INIT_LIST_HEAD(&mm->context.crst_list);
114 if (crst) 141 INIT_LIST_HEAD(&mm->context.pgtable_list);
115 crst_table_init(crst, pgd_entry_type(mm)); 142 return (pgd_t *) crst_table_alloc(mm, s390_noexec);
116 return (pgd_t *) crst;
117} 143}
118#define pgd_free(mm, pgd) crst_table_free((unsigned long *) pgd) 144#define pgd_free(mm, pgd) crst_table_free(mm, (unsigned long *) pgd)
119 145
120static inline void 146static inline void pmd_populate_kernel(struct mm_struct *mm,
121pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte) 147 pmd_t *pmd, pte_t *pte)
122{ 148{
123#ifndef __s390x__
124 pmd_val(pmd[0]) = _SEGMENT_ENTRY + __pa(pte);
125 pmd_val(pmd[1]) = _SEGMENT_ENTRY + __pa(pte+256);
126 pmd_val(pmd[2]) = _SEGMENT_ENTRY + __pa(pte+512);
127 pmd_val(pmd[3]) = _SEGMENT_ENTRY + __pa(pte+768);
128#else /* __s390x__ */
129 pmd_val(*pmd) = _SEGMENT_ENTRY + __pa(pte); 149 pmd_val(*pmd) = _SEGMENT_ENTRY + __pa(pte);
130 pmd_val1(*pmd) = _SEGMENT_ENTRY + __pa(pte+256);
131#endif /* __s390x__ */
132} 150}
133 151
134static inline void 152static inline void pmd_populate(struct mm_struct *mm,
135pmd_populate(struct mm_struct *mm, pmd_t *pmd, pgtable_t page) 153 pmd_t *pmd, pgtable_t pte)
136{ 154{
137 pte_t *pte = (pte_t *)page_to_phys(page);
138 pmd_t *shadow_pmd = get_shadow_table(pmd);
139 pte_t *shadow_pte = get_shadow_pte(pte);
140
141 pmd_populate_kernel(mm, pmd, pte); 155 pmd_populate_kernel(mm, pmd, pte);
142 if (shadow_pmd && shadow_pte) 156 if (mm->context.noexec) {
143 pmd_populate_kernel(mm, shadow_pmd, shadow_pte); 157 pmd = get_shadow_table(pmd);
158 pmd_populate_kernel(mm, pmd, pte + PTRS_PER_PTE);
159 }
144} 160}
145#define pmd_pgtable(pmd) pmd_page(pmd) 161
162#define pmd_pgtable(pmd) \
163 (pgtable_t)(pmd_val(pmd) & -sizeof(pte_t)*PTRS_PER_PTE)
146 164
147/* 165/*
148 * page table entry allocation/free routines. 166 * page table entry allocation/free routines.
149 */ 167 */
150#define pte_alloc_one_kernel(mm, vmaddr) \ 168#define pte_alloc_one_kernel(mm, vmaddr) ((pte_t *) page_table_alloc(mm))
151 ((pte_t *) page_table_alloc(s390_noexec)) 169#define pte_alloc_one(mm, vmaddr) ((pte_t *) page_table_alloc(mm))
152#define pte_alloc_one(mm, vmaddr) \ 170
153 virt_to_page(page_table_alloc(s390_noexec)) 171#define pte_free_kernel(mm, pte) page_table_free(mm, (unsigned long *) pte)
154 172#define pte_free(mm, pte) page_table_free(mm, (unsigned long *) pte)
155#define pte_free_kernel(mm, pte) \
156 page_table_free((unsigned long *) pte)
157#define pte_free(mm, pte) \
158 page_table_free((unsigned long *) page_to_phys((struct page *) pte))
159 173
160#endif /* _S390_PGALLOC_H */ 174#endif /* _S390_PGALLOC_H */
diff --git a/include/asm-s390/pgtable.h b/include/asm-s390/pgtable.h
index 3f520754e71c..65154dc9a9e5 100644
--- a/include/asm-s390/pgtable.h
+++ b/include/asm-s390/pgtable.h
@@ -57,21 +57,21 @@ extern char empty_zero_page[PAGE_SIZE];
57 * PGDIR_SHIFT determines what a third-level page table entry can map 57 * PGDIR_SHIFT determines what a third-level page table entry can map
58 */ 58 */
59#ifndef __s390x__ 59#ifndef __s390x__
60# define PMD_SHIFT 22 60# define PMD_SHIFT 20
61# define PUD_SHIFT 22 61# define PUD_SHIFT 20
62# define PGDIR_SHIFT 22 62# define PGDIR_SHIFT 20
63#else /* __s390x__ */ 63#else /* __s390x__ */
64# define PMD_SHIFT 21 64# define PMD_SHIFT 20
65# define PUD_SHIFT 31 65# define PUD_SHIFT 31
66# define PGDIR_SHIFT 31 66# define PGDIR_SHIFT 42
67#endif /* __s390x__ */ 67#endif /* __s390x__ */
68 68
69#define PMD_SIZE (1UL << PMD_SHIFT) 69#define PMD_SIZE (1UL << PMD_SHIFT)
70#define PMD_MASK (~(PMD_SIZE-1)) 70#define PMD_MASK (~(PMD_SIZE-1))
71#define PUD_SIZE (1UL << PUD_SHIFT) 71#define PUD_SIZE (1UL << PUD_SHIFT)
72#define PUD_MASK (~(PUD_SIZE-1)) 72#define PUD_MASK (~(PUD_SIZE-1))
73#define PGDIR_SIZE (1UL << PGDIR_SHIFT) 73#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
74#define PGDIR_MASK (~(PGDIR_SIZE-1)) 74#define PGDIR_MASK (~(PGDIR_SIZE-1))
75 75
76/* 76/*
77 * entries per page directory level: the S390 is two-level, so 77 * entries per page directory level: the S390 is two-level, so
@@ -79,17 +79,15 @@ extern char empty_zero_page[PAGE_SIZE];
79 * for S390 segment-table entries are combined to one PGD 79 * for S390 segment-table entries are combined to one PGD
80 * that leads to 1024 pte per pgd 80 * that leads to 1024 pte per pgd
81 */ 81 */
82#define PTRS_PER_PTE 256
82#ifndef __s390x__ 83#ifndef __s390x__
83# define PTRS_PER_PTE 1024 84#define PTRS_PER_PMD 1
84# define PTRS_PER_PMD 1 85#define PTRS_PER_PUD 1
85# define PTRS_PER_PUD 1
86# define PTRS_PER_PGD 512
87#else /* __s390x__ */ 86#else /* __s390x__ */
88# define PTRS_PER_PTE 512 87#define PTRS_PER_PMD 2048
89# define PTRS_PER_PMD 1024 88#define PTRS_PER_PUD 2048
90# define PTRS_PER_PUD 1
91# define PTRS_PER_PGD 2048
92#endif /* __s390x__ */ 89#endif /* __s390x__ */
90#define PTRS_PER_PGD 2048
93 91
94#define FIRST_USER_ADDRESS 0 92#define FIRST_USER_ADDRESS 0
95 93
@@ -127,8 +125,9 @@ extern char empty_zero_page[PAGE_SIZE];
127 * mapping. This needs to be calculated at compile time since the size of the 125 * mapping. This needs to be calculated at compile time since the size of the
128 * VMEM_MAP is static but the size of struct page can change. 126 * VMEM_MAP is static but the size of struct page can change.
129 */ 127 */
130#define VMEM_MAX_PHYS min(VMALLOC_START, ((VMEM_MAP_END - VMALLOC_END) / \ 128#define VMEM_MAX_PAGES ((VMEM_MAP_END - VMALLOC_END) / sizeof(struct page))
131 sizeof(struct page) * PAGE_SIZE) & ~((16 << 20) - 1)) 129#define VMEM_MAX_PFN min(VMALLOC_START >> PAGE_SHIFT, VMEM_MAX_PAGES)
130#define VMEM_MAX_PHYS ((VMEM_MAX_PFN << PAGE_SHIFT) & ~((16 << 20) - 1))
132#define VMEM_MAP ((struct page *) VMALLOC_END) 131#define VMEM_MAP ((struct page *) VMALLOC_END)
133 132
134/* 133/*
@@ -375,24 +374,6 @@ extern char empty_zero_page[PAGE_SIZE];
375# define PxD_SHADOW_SHIFT 2 374# define PxD_SHADOW_SHIFT 2
376#endif /* __s390x__ */ 375#endif /* __s390x__ */
377 376
378static inline struct page *get_shadow_page(struct page *page)
379{
380 if (s390_noexec && page->index)
381 return virt_to_page((void *)(addr_t) page->index);
382 return NULL;
383}
384
385static inline void *get_shadow_pte(void *table)
386{
387 unsigned long addr, offset;
388 struct page *page;
389
390 addr = (unsigned long) table;
391 offset = addr & (PAGE_SIZE - 1);
392 page = virt_to_page((void *)(addr ^ offset));
393 return (void *)(addr_t)(page->index ? (page->index | offset) : 0UL);
394}
395
396static inline void *get_shadow_table(void *table) 377static inline void *get_shadow_table(void *table)
397{ 378{
398 unsigned long addr, offset; 379 unsigned long addr, offset;
@@ -410,17 +391,16 @@ static inline void *get_shadow_table(void *table)
410 * hook is made available. 391 * hook is made available.
411 */ 392 */
412static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, 393static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
413 pte_t *pteptr, pte_t pteval) 394 pte_t *ptep, pte_t entry)
414{ 395{
415 pte_t *shadow_pte = get_shadow_pte(pteptr); 396 *ptep = entry;
416 397 if (mm->context.noexec) {
417 *pteptr = pteval; 398 if (!(pte_val(entry) & _PAGE_INVALID) &&
418 if (shadow_pte) { 399 (pte_val(entry) & _PAGE_SWX))
419 if (!(pte_val(pteval) & _PAGE_INVALID) && 400 pte_val(entry) |= _PAGE_RO;
420 (pte_val(pteval) & _PAGE_SWX))
421 pte_val(*shadow_pte) = pte_val(pteval) | _PAGE_RO;
422 else 401 else
423 pte_val(*shadow_pte) = _PAGE_TYPE_EMPTY; 402 pte_val(entry) = _PAGE_TYPE_EMPTY;
403 ptep[PTRS_PER_PTE] = entry;
424 } 404 }
425} 405}
426 406
@@ -439,24 +419,58 @@ static inline int pud_bad(pud_t pud) { return 0; }
439 419
440#else /* __s390x__ */ 420#else /* __s390x__ */
441 421
442static inline int pgd_present(pgd_t pgd) { return 1; } 422static inline int pgd_present(pgd_t pgd)
443static inline int pgd_none(pgd_t pgd) { return 0; } 423{
444static inline int pgd_bad(pgd_t pgd) { return 0; } 424 if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2)
425 return 1;
426 return (pgd_val(pgd) & _REGION_ENTRY_ORIGIN) != 0UL;
427}
428
429static inline int pgd_none(pgd_t pgd)
430{
431 if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2)
432 return 0;
433 return (pgd_val(pgd) & _REGION_ENTRY_INV) != 0UL;
434}
435
436static inline int pgd_bad(pgd_t pgd)
437{
438 /*
439 * With dynamic page table levels the pgd can be a region table
440 * entry or a segment table entry. Check for the bit that are
441 * invalid for either table entry.
442 */
443 unsigned long mask =
444 ~_SEGMENT_ENTRY_ORIGIN & ~_REGION_ENTRY_INV &
445 ~_REGION_ENTRY_TYPE_MASK & ~_REGION_ENTRY_LENGTH;
446 return (pgd_val(pgd) & mask) != 0;
447}
445 448
446static inline int pud_present(pud_t pud) 449static inline int pud_present(pud_t pud)
447{ 450{
451 if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3)
452 return 1;
448 return (pud_val(pud) & _REGION_ENTRY_ORIGIN) != 0UL; 453 return (pud_val(pud) & _REGION_ENTRY_ORIGIN) != 0UL;
449} 454}
450 455
451static inline int pud_none(pud_t pud) 456static inline int pud_none(pud_t pud)
452{ 457{
458 if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3)
459 return 0;
453 return (pud_val(pud) & _REGION_ENTRY_INV) != 0UL; 460 return (pud_val(pud) & _REGION_ENTRY_INV) != 0UL;
454} 461}
455 462
456static inline int pud_bad(pud_t pud) 463static inline int pud_bad(pud_t pud)
457{ 464{
458 unsigned long mask = ~_REGION_ENTRY_ORIGIN & ~_REGION_ENTRY_INV; 465 /*
459 return (pud_val(pud) & mask) != _REGION3_ENTRY; 466 * With dynamic page table levels the pud can be a region table
467 * entry or a segment table entry. Check for the bit that are
468 * invalid for either table entry.
469 */
470 unsigned long mask =
471 ~_SEGMENT_ENTRY_ORIGIN & ~_REGION_ENTRY_INV &
472 ~_REGION_ENTRY_TYPE_MASK & ~_REGION_ENTRY_LENGTH;
473 return (pud_val(pud) & mask) != 0;
460} 474}
461 475
462#endif /* __s390x__ */ 476#endif /* __s390x__ */
@@ -535,24 +549,30 @@ static inline int pte_young(pte_t pte)
535#define pgd_clear(pgd) do { } while (0) 549#define pgd_clear(pgd) do { } while (0)
536#define pud_clear(pud) do { } while (0) 550#define pud_clear(pud) do { } while (0)
537 551
538static inline void pmd_clear_kernel(pmd_t * pmdp) 552#else /* __s390x__ */
553
554static inline void pgd_clear_kernel(pgd_t * pgd)
539{ 555{
540 pmd_val(pmdp[0]) = _SEGMENT_ENTRY_EMPTY; 556 if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
541 pmd_val(pmdp[1]) = _SEGMENT_ENTRY_EMPTY; 557 pgd_val(*pgd) = _REGION2_ENTRY_EMPTY;
542 pmd_val(pmdp[2]) = _SEGMENT_ENTRY_EMPTY;
543 pmd_val(pmdp[3]) = _SEGMENT_ENTRY_EMPTY;
544} 558}
545 559
546#else /* __s390x__ */ 560static inline void pgd_clear(pgd_t * pgd)
561{
562 pgd_t *shadow = get_shadow_table(pgd);
547 563
548#define pgd_clear(pgd) do { } while (0) 564 pgd_clear_kernel(pgd);
565 if (shadow)
566 pgd_clear_kernel(shadow);
567}
549 568
550static inline void pud_clear_kernel(pud_t *pud) 569static inline void pud_clear_kernel(pud_t *pud)
551{ 570{
552 pud_val(*pud) = _REGION3_ENTRY_EMPTY; 571 if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
572 pud_val(*pud) = _REGION3_ENTRY_EMPTY;
553} 573}
554 574
555static inline void pud_clear(pud_t * pud) 575static inline void pud_clear(pud_t *pud)
556{ 576{
557 pud_t *shadow = get_shadow_table(pud); 577 pud_t *shadow = get_shadow_table(pud);
558 578
@@ -561,30 +581,27 @@ static inline void pud_clear(pud_t * pud)
561 pud_clear_kernel(shadow); 581 pud_clear_kernel(shadow);
562} 582}
563 583
584#endif /* __s390x__ */
585
564static inline void pmd_clear_kernel(pmd_t * pmdp) 586static inline void pmd_clear_kernel(pmd_t * pmdp)
565{ 587{
566 pmd_val(*pmdp) = _SEGMENT_ENTRY_EMPTY; 588 pmd_val(*pmdp) = _SEGMENT_ENTRY_EMPTY;
567 pmd_val1(*pmdp) = _SEGMENT_ENTRY_EMPTY;
568} 589}
569 590
570#endif /* __s390x__ */ 591static inline void pmd_clear(pmd_t *pmd)
571
572static inline void pmd_clear(pmd_t * pmdp)
573{ 592{
574 pmd_t *shadow_pmd = get_shadow_table(pmdp); 593 pmd_t *shadow = get_shadow_table(pmd);
575 594
576 pmd_clear_kernel(pmdp); 595 pmd_clear_kernel(pmd);
577 if (shadow_pmd) 596 if (shadow)
578 pmd_clear_kernel(shadow_pmd); 597 pmd_clear_kernel(shadow);
579} 598}
580 599
581static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) 600static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
582{ 601{
583 pte_t *shadow_pte = get_shadow_pte(ptep);
584
585 pte_val(*ptep) = _PAGE_TYPE_EMPTY; 602 pte_val(*ptep) = _PAGE_TYPE_EMPTY;
586 if (shadow_pte) 603 if (mm->context.noexec)
587 pte_val(*shadow_pte) = _PAGE_TYPE_EMPTY; 604 pte_val(ptep[PTRS_PER_PTE]) = _PAGE_TYPE_EMPTY;
588} 605}
589 606
590/* 607/*
@@ -665,7 +682,7 @@ static inline void __ptep_ipte(unsigned long address, pte_t *ptep)
665{ 682{
666 if (!(pte_val(*ptep) & _PAGE_INVALID)) { 683 if (!(pte_val(*ptep) & _PAGE_INVALID)) {
667#ifndef __s390x__ 684#ifndef __s390x__
668 /* S390 has 1mb segments, we are emulating 4MB segments */ 685 /* pto must point to the start of the segment table */
669 pte_t *pto = (pte_t *) (((unsigned long) ptep) & 0x7ffffc00); 686 pte_t *pto = (pte_t *) (((unsigned long) ptep) & 0x7ffffc00);
670#else 687#else
671 /* ipte in zarch mode can do the math */ 688 /* ipte in zarch mode can do the math */
@@ -679,12 +696,12 @@ static inline void __ptep_ipte(unsigned long address, pte_t *ptep)
679 pte_val(*ptep) = _PAGE_TYPE_EMPTY; 696 pte_val(*ptep) = _PAGE_TYPE_EMPTY;
680} 697}
681 698
682static inline void ptep_invalidate(unsigned long address, pte_t *ptep) 699static inline void ptep_invalidate(struct mm_struct *mm,
700 unsigned long address, pte_t *ptep)
683{ 701{
684 __ptep_ipte(address, ptep); 702 __ptep_ipte(address, ptep);
685 ptep = get_shadow_pte(ptep); 703 if (mm->context.noexec)
686 if (ptep) 704 __ptep_ipte(address, ptep + PTRS_PER_PTE);
687 __ptep_ipte(address, ptep);
688} 705}
689 706
690/* 707/*
@@ -706,7 +723,7 @@ static inline void ptep_invalidate(unsigned long address, pte_t *ptep)
706 pte_t __pte = *(__ptep); \ 723 pte_t __pte = *(__ptep); \
707 if (atomic_read(&(__mm)->mm_users) > 1 || \ 724 if (atomic_read(&(__mm)->mm_users) > 1 || \
708 (__mm) != current->active_mm) \ 725 (__mm) != current->active_mm) \
709 ptep_invalidate(__address, __ptep); \ 726 ptep_invalidate(__mm, __address, __ptep); \
710 else \ 727 else \
711 pte_clear((__mm), (__address), (__ptep)); \ 728 pte_clear((__mm), (__address), (__ptep)); \
712 __pte; \ 729 __pte; \
@@ -717,7 +734,7 @@ static inline pte_t ptep_clear_flush(struct vm_area_struct *vma,
717 unsigned long address, pte_t *ptep) 734 unsigned long address, pte_t *ptep)
718{ 735{
719 pte_t pte = *ptep; 736 pte_t pte = *ptep;
720 ptep_invalidate(address, ptep); 737 ptep_invalidate(vma->vm_mm, address, ptep);
721 return pte; 738 return pte;
722} 739}
723 740
@@ -738,7 +755,7 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
738 if (full) 755 if (full)
739 pte_clear(mm, addr, ptep); 756 pte_clear(mm, addr, ptep);
740 else 757 else
741 ptep_invalidate(addr, ptep); 758 ptep_invalidate(mm, addr, ptep);
742 return pte; 759 return pte;
743} 760}
744 761
@@ -749,7 +766,7 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
749 if (pte_write(__pte)) { \ 766 if (pte_write(__pte)) { \
750 if (atomic_read(&(__mm)->mm_users) > 1 || \ 767 if (atomic_read(&(__mm)->mm_users) > 1 || \
751 (__mm) != current->active_mm) \ 768 (__mm) != current->active_mm) \
752 ptep_invalidate(__addr, __ptep); \ 769 ptep_invalidate(__mm, __addr, __ptep); \
753 set_pte_at(__mm, __addr, __ptep, pte_wrprotect(__pte)); \ 770 set_pte_at(__mm, __addr, __ptep, pte_wrprotect(__pte)); \
754 } \ 771 } \
755}) 772})
@@ -759,7 +776,7 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
759({ \ 776({ \
760 int __changed = !pte_same(*(__ptep), __entry); \ 777 int __changed = !pte_same(*(__ptep), __entry); \
761 if (__changed) { \ 778 if (__changed) { \
762 ptep_invalidate(__addr, __ptep); \ 779 ptep_invalidate((__vma)->vm_mm, __addr, __ptep); \
763 set_pte_at((__vma)->vm_mm, __addr, __ptep, __entry); \ 780 set_pte_at((__vma)->vm_mm, __addr, __ptep, __entry); \
764 } \ 781 } \
765 __changed; \ 782 __changed; \
@@ -840,13 +857,21 @@ static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
840 857
841#define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN) 858#define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN)
842#define pud_deref(pud) (pud_val(pud) & _REGION_ENTRY_ORIGIN) 859#define pud_deref(pud) (pud_val(pud) & _REGION_ENTRY_ORIGIN)
843#define pgd_deref(pgd) ({ BUG(); 0UL; }) 860#define pgd_deref(pgd) (pgd_val(pgd) & _REGION_ENTRY_ORIGIN)
844 861
845#define pud_offset(pgd, address) ((pud_t *) pgd) 862static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
863{
864 pud_t *pud = (pud_t *) pgd;
865 if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
866 pud = (pud_t *) pgd_deref(*pgd);
867 return pud + pud_index(address);
868}
846 869
847static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address) 870static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
848{ 871{
849 pmd_t *pmd = (pmd_t *) pud_deref(*pud); 872 pmd_t *pmd = (pmd_t *) pud;
873 if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
874 pmd = (pmd_t *) pud_deref(*pud);
850 return pmd + pmd_index(address); 875 return pmd + pmd_index(address);
851} 876}
852 877
diff --git a/include/asm-s390/processor.h b/include/asm-s390/processor.h
index e8785634cbdb..51d88912aa20 100644
--- a/include/asm-s390/processor.h
+++ b/include/asm-s390/processor.h
@@ -64,24 +64,29 @@ extern int get_cpu_capability(unsigned int *);
64 */ 64 */
65#ifndef __s390x__ 65#ifndef __s390x__
66 66
67# define TASK_SIZE (0x80000000UL) 67#define TASK_SIZE (1UL << 31)
68# define TASK_UNMAPPED_BASE (TASK_SIZE / 2) 68#define TASK_UNMAPPED_BASE (1UL << 30)
69# define DEFAULT_TASK_SIZE (0x80000000UL)
70 69
71#else /* __s390x__ */ 70#else /* __s390x__ */
72 71
73# define TASK_SIZE_OF(tsk) (test_tsk_thread_flag(tsk, TIF_31BIT) ? \ 72#define TASK_SIZE_OF(tsk) (test_tsk_thread_flag(tsk,TIF_31BIT) ? \
74 (0x80000000UL) : (0x40000000000UL)) 73 (1UL << 31) : (1UL << 53))
75# define TASK_SIZE TASK_SIZE_OF(current) 74#define TASK_UNMAPPED_BASE (test_thread_flag(TIF_31BIT) ? \
76# define TASK_UNMAPPED_BASE (TASK_SIZE / 2) 75 (1UL << 30) : (1UL << 41))
77# define DEFAULT_TASK_SIZE (0x40000000000UL) 76#define TASK_SIZE TASK_SIZE_OF(current)
78 77
79#endif /* __s390x__ */ 78#endif /* __s390x__ */
80 79
81#ifdef __KERNEL__ 80#ifdef __KERNEL__
82 81
83#define STACK_TOP TASK_SIZE 82#ifndef __s390x__
84#define STACK_TOP_MAX DEFAULT_TASK_SIZE 83#define STACK_TOP (1UL << 31)
84#define STACK_TOP_MAX (1UL << 31)
85#else /* __s390x__ */
86#define STACK_TOP (1UL << (test_thread_flag(TIF_31BIT) ? 31:42))
87#define STACK_TOP_MAX (1UL << 42)
88#endif /* __s390x__ */
89
85 90
86#endif 91#endif
87 92
@@ -138,8 +143,6 @@ struct stack_frame {
138/* 143/*
139 * Do necessary setup to start up a new thread. 144 * Do necessary setup to start up a new thread.
140 */ 145 */
141#ifndef __s390x__
142
143#define start_thread(regs, new_psw, new_stackp) do { \ 146#define start_thread(regs, new_psw, new_stackp) do { \
144 set_fs(USER_DS); \ 147 set_fs(USER_DS); \
145 regs->psw.mask = psw_user_bits; \ 148 regs->psw.mask = psw_user_bits; \
@@ -147,24 +150,6 @@ struct stack_frame {
147 regs->gprs[15] = new_stackp ; \ 150 regs->gprs[15] = new_stackp ; \
148} while (0) 151} while (0)
149 152
150#else /* __s390x__ */
151
152#define start_thread(regs, new_psw, new_stackp) do { \
153 set_fs(USER_DS); \
154 regs->psw.mask = psw_user_bits; \
155 regs->psw.addr = new_psw; \
156 regs->gprs[15] = new_stackp; \
157} while (0)
158
159#define start_thread31(regs, new_psw, new_stackp) do { \
160 set_fs(USER_DS); \
161 regs->psw.mask = psw_user32_bits; \
162 regs->psw.addr = new_psw; \
163 regs->gprs[15] = new_stackp; \
164} while (0)
165
166#endif /* __s390x__ */
167
168/* Forward declaration, a strange C thing */ 153/* Forward declaration, a strange C thing */
169struct task_struct; 154struct task_struct;
170struct mm_struct; 155struct mm_struct;
diff --git a/include/asm-s390/tlb.h b/include/asm-s390/tlb.h
index 3c8177fa9e06..3d8a96d39d9d 100644
--- a/include/asm-s390/tlb.h
+++ b/include/asm-s390/tlb.h
@@ -38,7 +38,7 @@ struct mmu_gather {
38 struct mm_struct *mm; 38 struct mm_struct *mm;
39 unsigned int fullmm; 39 unsigned int fullmm;
40 unsigned int nr_ptes; 40 unsigned int nr_ptes;
41 unsigned int nr_pmds; 41 unsigned int nr_pxds;
42 void *array[TLB_NR_PTRS]; 42 void *array[TLB_NR_PTRS];
43}; 43};
44 44
@@ -53,7 +53,7 @@ static inline struct mmu_gather *tlb_gather_mmu(struct mm_struct *mm,
53 tlb->fullmm = full_mm_flush || (num_online_cpus() == 1) || 53 tlb->fullmm = full_mm_flush || (num_online_cpus() == 1) ||
54 (atomic_read(&mm->mm_users) <= 1 && mm == current->active_mm); 54 (atomic_read(&mm->mm_users) <= 1 && mm == current->active_mm);
55 tlb->nr_ptes = 0; 55 tlb->nr_ptes = 0;
56 tlb->nr_pmds = TLB_NR_PTRS; 56 tlb->nr_pxds = TLB_NR_PTRS;
57 if (tlb->fullmm) 57 if (tlb->fullmm)
58 __tlb_flush_mm(mm); 58 __tlb_flush_mm(mm);
59 return tlb; 59 return tlb;
@@ -62,12 +62,13 @@ static inline struct mmu_gather *tlb_gather_mmu(struct mm_struct *mm,
62static inline void tlb_flush_mmu(struct mmu_gather *tlb, 62static inline void tlb_flush_mmu(struct mmu_gather *tlb,
63 unsigned long start, unsigned long end) 63 unsigned long start, unsigned long end)
64{ 64{
65 if (!tlb->fullmm && (tlb->nr_ptes > 0 || tlb->nr_pmds < TLB_NR_PTRS)) 65 if (!tlb->fullmm && (tlb->nr_ptes > 0 || tlb->nr_pxds < TLB_NR_PTRS))
66 __tlb_flush_mm(tlb->mm); 66 __tlb_flush_mm(tlb->mm);
67 while (tlb->nr_ptes > 0) 67 while (tlb->nr_ptes > 0)
68 pte_free(tlb->mm, tlb->array[--tlb->nr_ptes]); 68 pte_free(tlb->mm, tlb->array[--tlb->nr_ptes]);
69 while (tlb->nr_pmds < TLB_NR_PTRS) 69 while (tlb->nr_pxds < TLB_NR_PTRS)
70 pmd_free(tlb->mm, (pmd_t *) tlb->array[tlb->nr_pmds++]); 70 /* pgd_free frees the pointer as region or segment table */
71 pgd_free(tlb->mm, tlb->array[tlb->nr_pxds++]);
71} 72}
72 73
73static inline void tlb_finish_mmu(struct mmu_gather *tlb, 74static inline void tlb_finish_mmu(struct mmu_gather *tlb,
@@ -95,33 +96,57 @@ static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
95 * pte_free_tlb frees a pte table and clears the CRSTE for the 96 * pte_free_tlb frees a pte table and clears the CRSTE for the
96 * page table from the tlb. 97 * page table from the tlb.
97 */ 98 */
98static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t page) 99static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte)
99{ 100{
100 if (!tlb->fullmm) { 101 if (!tlb->fullmm) {
101 tlb->array[tlb->nr_ptes++] = page; 102 tlb->array[tlb->nr_ptes++] = pte;
102 if (tlb->nr_ptes >= tlb->nr_pmds) 103 if (tlb->nr_ptes >= tlb->nr_pxds)
103 tlb_flush_mmu(tlb, 0, 0); 104 tlb_flush_mmu(tlb, 0, 0);
104 } else 105 } else
105 pte_free(tlb->mm, page); 106 pte_free(tlb->mm, pte);
106} 107}
107 108
108/* 109/*
109 * pmd_free_tlb frees a pmd table and clears the CRSTE for the 110 * pmd_free_tlb frees a pmd table and clears the CRSTE for the
110 * segment table entry from the tlb. 111 * segment table entry from the tlb.
112 * If the mm uses a two level page table the single pmd is freed
113 * as the pgd. pmd_free_tlb checks the asce_limit against 2GB
114 * to avoid the double free of the pmd in this case.
111 */ 115 */
112static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd) 116static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd)
113{ 117{
114#ifdef __s390x__ 118#ifdef __s390x__
119 if (tlb->mm->context.asce_limit <= (1UL << 31))
120 return;
115 if (!tlb->fullmm) { 121 if (!tlb->fullmm) {
116 tlb->array[--tlb->nr_pmds] = (struct page *) pmd; 122 tlb->array[--tlb->nr_pxds] = pmd;
117 if (tlb->nr_ptes >= tlb->nr_pmds) 123 if (tlb->nr_ptes >= tlb->nr_pxds)
118 tlb_flush_mmu(tlb, 0, 0); 124 tlb_flush_mmu(tlb, 0, 0);
119 } else 125 } else
120 pmd_free(tlb->mm, pmd); 126 pmd_free(tlb->mm, pmd);
121#endif 127#endif
122} 128}
123 129
124#define pud_free_tlb(tlb, pud) do { } while (0) 130/*
131 * pud_free_tlb frees a pud table and clears the CRSTE for the
132 * region third table entry from the tlb.
133 * If the mm uses a three level page table the single pud is freed
134 * as the pgd. pud_free_tlb checks the asce_limit against 4TB
135 * to avoid the double free of the pud in this case.
136 */
137static inline void pud_free_tlb(struct mmu_gather *tlb, pud_t *pud)
138{
139#ifdef __s390x__
140 if (tlb->mm->context.asce_limit <= (1UL << 42))
141 return;
142 if (!tlb->fullmm) {
143 tlb->array[--tlb->nr_pxds] = pud;
144 if (tlb->nr_ptes >= tlb->nr_pxds)
145 tlb_flush_mmu(tlb, 0, 0);
146 } else
147 pud_free(tlb->mm, pud);
148#endif
149}
125 150
126#define tlb_start_vma(tlb, vma) do { } while (0) 151#define tlb_start_vma(tlb, vma) do { } while (0)
127#define tlb_end_vma(tlb, vma) do { } while (0) 152#define tlb_end_vma(tlb, vma) do { } while (0)
diff --git a/include/asm-s390/tlbflush.h b/include/asm-s390/tlbflush.h
index 70fa5ae58180..35fb4f9127b2 100644
--- a/include/asm-s390/tlbflush.h
+++ b/include/asm-s390/tlbflush.h
@@ -61,11 +61,12 @@ static inline void __tlb_flush_mm(struct mm_struct * mm)
61 * only ran on the local cpu. 61 * only ran on the local cpu.
62 */ 62 */
63 if (MACHINE_HAS_IDTE) { 63 if (MACHINE_HAS_IDTE) {
64 pgd_t *shadow = get_shadow_table(mm->pgd); 64 if (mm->context.noexec)
65 65 __tlb_flush_idte((unsigned long)
66 if (shadow) 66 get_shadow_table(mm->pgd) |
67 __tlb_flush_idte((unsigned long) shadow | mm->context); 67 mm->context.asce_bits);
68 __tlb_flush_idte((unsigned long) mm->pgd | mm->context); 68 __tlb_flush_idte((unsigned long) mm->pgd |
69 mm->context.asce_bits);
69 return; 70 return;
70 } 71 }
71 preempt_disable(); 72 preempt_disable();
diff --git a/include/asm-s390/unistd.h b/include/asm-s390/unistd.h
index f04acb2670a8..583da807ea97 100644
--- a/include/asm-s390/unistd.h
+++ b/include/asm-s390/unistd.h
@@ -256,7 +256,10 @@
256#define __NR_signalfd 316 256#define __NR_signalfd 316
257#define __NR_timerfd 317 257#define __NR_timerfd 317
258#define __NR_eventfd 318 258#define __NR_eventfd 318
259#define NR_syscalls 319 259#define __NR_timerfd_create 319
260#define __NR_timerfd_settime 320
261#define __NR_timerfd_gettime 321
262#define NR_syscalls 322
260 263
261/* 264/*
262 * There are some system calls that are not present on 64 bit, some 265 * There are some system calls that are not present on 64 bit, some