aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390
diff options
context:
space:
mode:
authorMartin Schwidefsky <schwidefsky@de.ibm.com>2012-07-26 02:53:06 -0400
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2012-07-26 10:24:14 -0400
commit0f6f281b731d20bfe75c13f85d33f3f05b440222 (patch)
tree0f8c23d3fca50dd2830ddc978501cd45a0057f45 /arch/s390
parent8143adafd2d00b13f1db96ce06b6bf479e0bfe5b (diff)
s390/mm: downgrade page table after fork of a 31 bit process
The downgrade of the 4 level page table created by init_new_context is currently done only in start_thread31. If a 31 bit process forks the new mm uses a 4 level page table, including the task size of 2<<42 that goes along with it. This is incorrect as now a 31 bit process can map memory beyond 2GB. Define arch_dup_mmap to do the downgrade after fork. Cc: stable@vger.kernel.org Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390')
-rw-r--r--arch/s390/include/asm/mmu_context.h14
-rw-r--r--arch/s390/include/asm/processor.h2
-rw-r--r--arch/s390/mm/mmap.c12
-rw-r--r--arch/s390/mm/pgtable.c5
4 files changed, 25 insertions, 8 deletions
diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h
index 5c63615f1349..077de7efc82f 100644
--- a/arch/s390/include/asm/mmu_context.h
+++ b/arch/s390/include/asm/mmu_context.h
@@ -11,7 +11,6 @@
11#include <asm/uaccess.h> 11#include <asm/uaccess.h>
12#include <asm/tlbflush.h> 12#include <asm/tlbflush.h>
13#include <asm/ctl_reg.h> 13#include <asm/ctl_reg.h>
14#include <asm-generic/mm_hooks.h>
15 14
16static inline int init_new_context(struct task_struct *tsk, 15static inline int init_new_context(struct task_struct *tsk,
17 struct mm_struct *mm) 16 struct mm_struct *mm)
@@ -91,4 +90,17 @@ static inline void activate_mm(struct mm_struct *prev,
91 switch_mm(prev, next, current); 90 switch_mm(prev, next, current);
92} 91}
93 92
93static inline void arch_dup_mmap(struct mm_struct *oldmm,
94 struct mm_struct *mm)
95{
96#ifdef CONFIG_64BIT
97 if (oldmm->context.asce_limit < mm->context.asce_limit)
98 crst_table_downgrade(mm, oldmm->context.asce_limit);
99#endif
100}
101
102static inline void arch_exit_mmap(struct mm_struct *mm)
103{
104}
105
94#endif /* __S390_MMU_CONTEXT_H */ 106#endif /* __S390_MMU_CONTEXT_H */
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index c40fa91e38a8..11e4e3236937 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -120,7 +120,9 @@ struct stack_frame {
120 regs->psw.mask = psw_user_bits | PSW_MASK_BA; \ 120 regs->psw.mask = psw_user_bits | PSW_MASK_BA; \
121 regs->psw.addr = new_psw | PSW_ADDR_AMODE; \ 121 regs->psw.addr = new_psw | PSW_ADDR_AMODE; \
122 regs->gprs[15] = new_stackp; \ 122 regs->gprs[15] = new_stackp; \
123 __tlb_flush_mm(current->mm); \
123 crst_table_downgrade(current->mm, 1UL << 31); \ 124 crst_table_downgrade(current->mm, 1UL << 31); \
125 update_mm(current->mm, current); \
124} while (0) 126} while (0)
125 127
126/* Forward declaration, a strange C thing */ 128/* Forward declaration, a strange C thing */
diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
index 573384256c5c..c59a5efa58b1 100644
--- a/arch/s390/mm/mmap.c
+++ b/arch/s390/mm/mmap.c
@@ -103,9 +103,15 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
103 103
104int s390_mmap_check(unsigned long addr, unsigned long len) 104int s390_mmap_check(unsigned long addr, unsigned long len)
105{ 105{
106 int rc;
107
106 if (!is_compat_task() && 108 if (!is_compat_task() &&
107 len >= TASK_SIZE && TASK_SIZE < (1UL << 53)) 109 len >= TASK_SIZE && TASK_SIZE < (1UL << 53)) {
108 return crst_table_upgrade(current->mm, 1UL << 53); 110 rc = crst_table_upgrade(current->mm, 1UL << 53);
111 if (rc)
112 return rc;
113 update_mm(current->mm, current);
114 }
109 return 0; 115 return 0;
110} 116}
111 117
@@ -125,6 +131,7 @@ s390_get_unmapped_area(struct file *filp, unsigned long addr,
125 rc = crst_table_upgrade(mm, 1UL << 53); 131 rc = crst_table_upgrade(mm, 1UL << 53);
126 if (rc) 132 if (rc)
127 return (unsigned long) rc; 133 return (unsigned long) rc;
134 update_mm(mm, current);
128 area = arch_get_unmapped_area(filp, addr, len, pgoff, flags); 135 area = arch_get_unmapped_area(filp, addr, len, pgoff, flags);
129 } 136 }
130 return area; 137 return area;
@@ -147,6 +154,7 @@ s390_get_unmapped_area_topdown(struct file *filp, const unsigned long addr,
147 rc = crst_table_upgrade(mm, 1UL << 53); 154 rc = crst_table_upgrade(mm, 1UL << 53);
148 if (rc) 155 if (rc)
149 return (unsigned long) rc; 156 return (unsigned long) rc;
157 update_mm(mm, current);
150 area = arch_get_unmapped_area_topdown(filp, addr, len, 158 area = arch_get_unmapped_area_topdown(filp, addr, len,
151 pgoff, flags); 159 pgoff, flags);
152 } 160 }
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index 1cab221077cc..e9ac2d60b7e5 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -85,7 +85,6 @@ repeat:
85 crst_table_free(mm, table); 85 crst_table_free(mm, table);
86 if (mm->context.asce_limit < limit) 86 if (mm->context.asce_limit < limit)
87 goto repeat; 87 goto repeat;
88 update_mm(mm, current);
89 return 0; 88 return 0;
90} 89}
91 90
@@ -93,9 +92,6 @@ void crst_table_downgrade(struct mm_struct *mm, unsigned long limit)
93{ 92{
94 pgd_t *pgd; 93 pgd_t *pgd;
95 94
96 if (mm->context.asce_limit <= limit)
97 return;
98 __tlb_flush_mm(mm);
99 while (mm->context.asce_limit > limit) { 95 while (mm->context.asce_limit > limit) {
100 pgd = mm->pgd; 96 pgd = mm->pgd;
101 switch (pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) { 97 switch (pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) {
@@ -118,7 +114,6 @@ void crst_table_downgrade(struct mm_struct *mm, unsigned long limit)
118 mm->task_size = mm->context.asce_limit; 114 mm->task_size = mm->context.asce_limit;
119 crst_table_free(mm, (unsigned long *) pgd); 115 crst_table_free(mm, (unsigned long *) pgd);
120 } 116 }
121 update_mm(mm, current);
122} 117}
123#endif 118#endif
124 119