diff options
author | Martin Schwidefsky <schwidefsky@de.ibm.com> | 2009-03-18 08:27:37 -0400 |
---|---|---|
committer | Martin Schwidefsky <schwidefsky@de.ibm.com> | 2009-03-18 08:28:13 -0400 |
commit | 0fb1d9bcbcf701a45835aa150c57ca54ea685bfa (patch) | |
tree | a2821e3d10918d4b76e6329da42a45cfbb9f19cd /arch | |
parent | f481bfafd36e621d6cbc62d4b25f74811410aef7 (diff) |
[S390] make page table upgrade work again
After TASK_SIZE now gives the current size of the address space the
upgrade of a 64 bit process from 3 to 4 levels of page table needs
to use the arch_mmap_check hook to catch large mmap lengths. The
get_unmapped_area* functions need to check for -ENOMEM from the
arch_get_unmapped_area*, upgrade the page table and retry.
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/s390/include/asm/mman.h | 5 | ||||
-rw-r--r-- | arch/s390/mm/mmap.c | 44 |
2 files changed, 35 insertions, 14 deletions
diff --git a/arch/s390/include/asm/mman.h b/arch/s390/include/asm/mman.h index 7839767d837e..da01432e8f44 100644 --- a/arch/s390/include/asm/mman.h +++ b/arch/s390/include/asm/mman.h | |||
@@ -22,4 +22,9 @@ | |||
22 | #define MCL_CURRENT 1 /* lock all current mappings */ | 22 | #define MCL_CURRENT 1 /* lock all current mappings */ |
23 | #define MCL_FUTURE 2 /* lock all future mappings */ | 23 | #define MCL_FUTURE 2 /* lock all future mappings */ |
24 | 24 | ||
25 | #if defined(__KERNEL__) && !defined(__ASSEMBLY__) && defined(CONFIG_64BIT) | ||
26 | int s390_mmap_check(unsigned long addr, unsigned long len); | ||
27 | #define arch_mmap_check(addr,len,flags) s390_mmap_check(addr,len) | ||
28 | #endif | ||
29 | |||
25 | #endif /* __S390_MMAN_H__ */ | 30 | #endif /* __S390_MMAN_H__ */ |
diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c index 346dd0c5cbde..e008d236cc15 100644 --- a/arch/s390/mm/mmap.c +++ b/arch/s390/mm/mmap.c | |||
@@ -89,42 +89,58 @@ EXPORT_SYMBOL_GPL(arch_pick_mmap_layout); | |||
89 | 89 | ||
90 | #else | 90 | #else |
91 | 91 | ||
92 | int s390_mmap_check(unsigned long addr, unsigned long len) | ||
93 | { | ||
94 | if (!test_thread_flag(TIF_31BIT) && | ||
95 | len >= TASK_SIZE && TASK_SIZE < (1UL << 53)) | ||
96 | return crst_table_upgrade(current->mm, 1UL << 53); | ||
97 | return 0; | ||
98 | } | ||
99 | |||
92 | static unsigned long | 100 | static unsigned long |
93 | s390_get_unmapped_area(struct file *filp, unsigned long addr, | 101 | s390_get_unmapped_area(struct file *filp, unsigned long addr, |
94 | unsigned long len, unsigned long pgoff, unsigned long flags) | 102 | unsigned long len, unsigned long pgoff, unsigned long flags) |
95 | { | 103 | { |
96 | struct mm_struct *mm = current->mm; | 104 | struct mm_struct *mm = current->mm; |
105 | unsigned long area; | ||
97 | int rc; | 106 | int rc; |
98 | 107 | ||
99 | addr = arch_get_unmapped_area(filp, addr, len, pgoff, flags); | 108 | area = arch_get_unmapped_area(filp, addr, len, pgoff, flags); |
100 | if (addr & ~PAGE_MASK) | 109 | if (!(area & ~PAGE_MASK)) |
101 | return addr; | 110 | return area; |
102 | if (unlikely(mm->context.asce_limit < addr + len)) { | 111 | if (area == -ENOMEM && |
103 | rc = crst_table_upgrade(mm, addr + len); | 112 | !test_thread_flag(TIF_31BIT) && TASK_SIZE < (1UL << 53)) { |
113 | /* Upgrade the page table to 4 levels and retry. */ | ||
114 | rc = crst_table_upgrade(mm, 1UL << 53); | ||
104 | if (rc) | 115 | if (rc) |
105 | return (unsigned long) rc; | 116 | return (unsigned long) rc; |
117 | area = arch_get_unmapped_area(filp, addr, len, pgoff, flags); | ||
106 | } | 118 | } |
107 | return addr; | 119 | return area; |
108 | } | 120 | } |
109 | 121 | ||
110 | static unsigned long | 122 | static unsigned long |
111 | s390_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, | 123 | s390_get_unmapped_area_topdown(struct file *filp, const unsigned long addr, |
112 | const unsigned long len, const unsigned long pgoff, | 124 | const unsigned long len, const unsigned long pgoff, |
113 | const unsigned long flags) | 125 | const unsigned long flags) |
114 | { | 126 | { |
115 | struct mm_struct *mm = current->mm; | 127 | struct mm_struct *mm = current->mm; |
116 | unsigned long addr = addr0; | 128 | unsigned long area; |
117 | int rc; | 129 | int rc; |
118 | 130 | ||
119 | addr = arch_get_unmapped_area_topdown(filp, addr, len, pgoff, flags); | 131 | area = arch_get_unmapped_area_topdown(filp, addr, len, pgoff, flags); |
120 | if (addr & ~PAGE_MASK) | 132 | if (!(area & ~PAGE_MASK)) |
121 | return addr; | 133 | return area; |
122 | if (unlikely(mm->context.asce_limit < addr + len)) { | 134 | if (area == -ENOMEM && |
123 | rc = crst_table_upgrade(mm, addr + len); | 135 | !test_thread_flag(TIF_31BIT) && TASK_SIZE < (1UL << 53)) { |
136 | /* Upgrade the page table to 4 levels and retry. */ | ||
137 | rc = crst_table_upgrade(mm, 1UL << 53); | ||
124 | if (rc) | 138 | if (rc) |
125 | return (unsigned long) rc; | 139 | return (unsigned long) rc; |
140 | area = arch_get_unmapped_area_topdown(filp, addr, len, | ||
141 | pgoff, flags); | ||
126 | } | 142 | } |
127 | return addr; | 143 | return area; |
128 | } | 144 | } |
129 | /* | 145 | /* |
130 | * This function, called very early during the creation of a new | 146 | * This function, called very early during the creation of a new |