aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMartin Schwidefsky <schwidefsky@de.ibm.com>2015-01-14 11:51:17 -0500
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2015-01-22 06:15:59 -0500
commit1f6b83e5e4d3aed46eac1d219322fba9c7341cd8 (patch)
treea7839cd769ec8637746d5e20a7eb7117373caaf0
parentf8b2dcbd9e6d1479b9b5a9e9e78bbaf783bde819 (diff)
s390: avoid z13 cache aliasing
Avoid cache aliasing on z13 by aligning shared objects to multiples of 512K. The virtual addresses of a page from a shared file needs to have identical bits in the range 2^12 to 2^18. Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
-rw-r--r--arch/s390/include/asm/elf.h8
-rw-r--r--arch/s390/include/asm/pgtable.h4
-rw-r--r--arch/s390/kernel/process.c10
-rw-r--r--arch/s390/mm/init.c9
-rw-r--r--arch/s390/mm/mmap.c142
5 files changed, 155 insertions, 18 deletions
diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
index f6e43d39e3d8..c9df40b5c0ac 100644
--- a/arch/s390/include/asm/elf.h
+++ b/arch/s390/include/asm/elf.h
@@ -163,8 +163,8 @@ extern unsigned int vdso_enabled;
163 the loader. We need to make sure that it is out of the way of the program 163 the loader. We need to make sure that it is out of the way of the program
164 that it will "exec", and that there is sufficient room for the brk. */ 164 that it will "exec", and that there is sufficient room for the brk. */
165 165
166extern unsigned long randomize_et_dyn(unsigned long base); 166extern unsigned long randomize_et_dyn(void);
167#define ELF_ET_DYN_BASE (randomize_et_dyn(STACK_TOP / 3 * 2)) 167#define ELF_ET_DYN_BASE randomize_et_dyn()
168 168
169/* This yields a mask that user programs can use to figure out what 169/* This yields a mask that user programs can use to figure out what
170 instruction set this CPU supports. */ 170 instruction set this CPU supports. */
@@ -209,7 +209,9 @@ do { \
209} while (0) 209} while (0)
210#endif /* CONFIG_COMPAT */ 210#endif /* CONFIG_COMPAT */
211 211
212#define STACK_RND_MASK 0x7ffUL 212extern unsigned long mmap_rnd_mask;
213
214#define STACK_RND_MASK (mmap_rnd_mask)
213 215
214#define ARCH_DLINFO \ 216#define ARCH_DLINFO \
215do { \ 217do { \
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index 5e102422c9ab..b8641b41e19c 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -1779,6 +1779,10 @@ extern int s390_enable_sie(void);
1779extern int s390_enable_skey(void); 1779extern int s390_enable_skey(void);
1780extern void s390_reset_cmma(struct mm_struct *mm); 1780extern void s390_reset_cmma(struct mm_struct *mm);
1781 1781
1782/* s390 has a private copy of get unmapped area to deal with cache synonyms */
1783#define HAVE_ARCH_UNMAPPED_AREA
1784#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
1785
1782/* 1786/*
1783 * No page table caches to initialise 1787 * No page table caches to initialise
1784 */ 1788 */
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index aa7a83948c7b..2c1eb4f3aaf5 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -243,13 +243,3 @@ unsigned long arch_randomize_brk(struct mm_struct *mm)
243 ret = PAGE_ALIGN(mm->brk + brk_rnd()); 243 ret = PAGE_ALIGN(mm->brk + brk_rnd());
244 return (ret > mm->brk) ? ret : mm->brk; 244 return (ret > mm->brk) ? ret : mm->brk;
245} 245}
246
247unsigned long randomize_et_dyn(unsigned long base)
248{
249 unsigned long ret;
250
251 if (!(current->flags & PF_RANDOMIZE))
252 return base;
253 ret = PAGE_ALIGN(base + brk_rnd());
254 return (ret > base) ? ret : base;
255}
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index c7235e01fd67..d35b15113b17 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -71,13 +71,16 @@ static void __init setup_zero_pages(void)
71 break; 71 break;
72 case 0x2827: /* zEC12 */ 72 case 0x2827: /* zEC12 */
73 case 0x2828: /* zEC12 */ 73 case 0x2828: /* zEC12 */
74 default:
75 order = 5; 74 order = 5;
76 break; 75 break;
76 case 0x2964: /* z13 */
77 default:
78 order = 7;
79 break;
77 } 80 }
78 /* Limit number of empty zero pages for small memory sizes */ 81 /* Limit number of empty zero pages for small memory sizes */
79 if (order > 2 && totalram_pages <= 16384) 82 while (order > 2 && (totalram_pages >> 10) < (1UL << order))
80 order = 2; 83 order--;
81 84
82 empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order); 85 empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
83 if (!empty_zero_page) 86 if (!empty_zero_page)
diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
index 9b436c21195e..d008f638b2cd 100644
--- a/arch/s390/mm/mmap.c
+++ b/arch/s390/mm/mmap.c
@@ -28,8 +28,12 @@
28#include <linux/module.h> 28#include <linux/module.h>
29#include <linux/random.h> 29#include <linux/random.h>
30#include <linux/compat.h> 30#include <linux/compat.h>
31#include <linux/security.h>
31#include <asm/pgalloc.h> 32#include <asm/pgalloc.h>
32 33
34unsigned long mmap_rnd_mask;
35unsigned long mmap_align_mask;
36
33static unsigned long stack_maxrandom_size(void) 37static unsigned long stack_maxrandom_size(void)
34{ 38{
35 if (!(current->flags & PF_RANDOMIZE)) 39 if (!(current->flags & PF_RANDOMIZE))
@@ -60,8 +64,10 @@ static unsigned long mmap_rnd(void)
60{ 64{
61 if (!(current->flags & PF_RANDOMIZE)) 65 if (!(current->flags & PF_RANDOMIZE))
62 return 0; 66 return 0;
63 /* 8MB randomization for mmap_base */ 67 if (is_32bit_task())
64 return (get_random_int() & 0x7ffUL) << PAGE_SHIFT; 68 return (get_random_int() & 0x7ff) << PAGE_SHIFT;
69 else
70 return (get_random_int() & mmap_rnd_mask) << PAGE_SHIFT;
65} 71}
66 72
67static unsigned long mmap_base_legacy(void) 73static unsigned long mmap_base_legacy(void)
@@ -81,6 +87,106 @@ static inline unsigned long mmap_base(void)
81 return STACK_TOP - stack_maxrandom_size() - mmap_rnd() - gap; 87 return STACK_TOP - stack_maxrandom_size() - mmap_rnd() - gap;
82} 88}
83 89
90unsigned long
91arch_get_unmapped_area(struct file *filp, unsigned long addr,
92 unsigned long len, unsigned long pgoff, unsigned long flags)
93{
94 struct mm_struct *mm = current->mm;
95 struct vm_area_struct *vma;
96 struct vm_unmapped_area_info info;
97 int do_color_align;
98
99 if (len > TASK_SIZE - mmap_min_addr)
100 return -ENOMEM;
101
102 if (flags & MAP_FIXED)
103 return addr;
104
105 if (addr) {
106 addr = PAGE_ALIGN(addr);
107 vma = find_vma(mm, addr);
108 if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
109 (!vma || addr + len <= vma->vm_start))
110 return addr;
111 }
112
113 do_color_align = 0;
114 if (filp || (flags & MAP_SHARED))
115 do_color_align = !is_32bit_task();
116
117 info.flags = 0;
118 info.length = len;
119 info.low_limit = mm->mmap_base;
120 info.high_limit = TASK_SIZE;
121 info.align_mask = do_color_align ? (mmap_align_mask << PAGE_SHIFT) : 0;
122 info.align_offset = pgoff << PAGE_SHIFT;
123 return vm_unmapped_area(&info);
124}
125
126unsigned long
127arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
128 const unsigned long len, const unsigned long pgoff,
129 const unsigned long flags)
130{
131 struct vm_area_struct *vma;
132 struct mm_struct *mm = current->mm;
133 unsigned long addr = addr0;
134 struct vm_unmapped_area_info info;
135 int do_color_align;
136
137 /* requested length too big for entire address space */
138 if (len > TASK_SIZE - mmap_min_addr)
139 return -ENOMEM;
140
141 if (flags & MAP_FIXED)
142 return addr;
143
144 /* requesting a specific address */
145 if (addr) {
146 addr = PAGE_ALIGN(addr);
147 vma = find_vma(mm, addr);
148 if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
149 (!vma || addr + len <= vma->vm_start))
150 return addr;
151 }
152
153 do_color_align = 0;
154 if (filp || (flags & MAP_SHARED))
155 do_color_align = !is_32bit_task();
156
157 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
158 info.length = len;
159 info.low_limit = max(PAGE_SIZE, mmap_min_addr);
160 info.high_limit = mm->mmap_base;
161 info.align_mask = do_color_align ? (mmap_align_mask << PAGE_SHIFT) : 0;
162 info.align_offset = pgoff << PAGE_SHIFT;
163 addr = vm_unmapped_area(&info);
164
165 /*
166 * A failed mmap() very likely causes application failure,
167 * so fall back to the bottom-up function here. This scenario
168 * can happen with large stack limits and large mmap()
169 * allocations.
170 */
171 if (addr & ~PAGE_MASK) {
172 VM_BUG_ON(addr != -ENOMEM);
173 info.flags = 0;
174 info.low_limit = TASK_UNMAPPED_BASE;
175 info.high_limit = TASK_SIZE;
176 addr = vm_unmapped_area(&info);
177 }
178
179 return addr;
180}
181
182unsigned long randomize_et_dyn(void)
183{
184 unsigned long base;
185
186 base = (STACK_TOP / 3 * 2) & (~mmap_align_mask << PAGE_SHIFT);
187 return base + mmap_rnd();
188}
189
84#ifndef CONFIG_64BIT 190#ifndef CONFIG_64BIT
85 191
86/* 192/*
@@ -177,4 +283,36 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
177 } 283 }
178} 284}
179 285
286static int __init setup_mmap_rnd(void)
287{
288 struct cpuid cpu_id;
289
290 get_cpu_id(&cpu_id);
291 switch (cpu_id.machine) {
292 case 0x9672:
293 case 0x2064:
294 case 0x2066:
295 case 0x2084:
296 case 0x2086:
297 case 0x2094:
298 case 0x2096:
299 case 0x2097:
300 case 0x2098:
301 case 0x2817:
302 case 0x2818:
303 case 0x2827:
304 case 0x2828:
305 mmap_rnd_mask = 0x7ffUL;
306 mmap_align_mask = 0UL;
307 break;
308 case 0x2964: /* z13 */
309 default:
310 mmap_rnd_mask = 0x3ff80UL;
311 mmap_align_mask = 0x7fUL;
312 break;
313 }
314 return 0;
315}
316early_initcall(setup_mmap_rnd);
317
180#endif 318#endif