diff options
author | Martin Schwidefsky <schwidefsky@de.ibm.com> | 2015-01-14 11:51:17 -0500 |
---|---|---|
committer | Martin Schwidefsky <schwidefsky@de.ibm.com> | 2015-01-22 06:15:59 -0500 |
commit | 1f6b83e5e4d3aed46eac1d219322fba9c7341cd8 (patch) | |
tree | a7839cd769ec8637746d5e20a7eb7117373caaf0 /arch/s390/mm | |
parent | f8b2dcbd9e6d1479b9b5a9e9e78bbaf783bde819 (diff) |
s390: avoid z13 cache aliasing
Avoid cache aliasing on z13 by aligning shared objects to multiples
of 512K. The virtual addresses of a page from a shared file needs
to have identical bits in the range 2^12 to 2^18.
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390/mm')
-rw-r--r-- | arch/s390/mm/init.c | 9 | ||||
-rw-r--r-- | arch/s390/mm/mmap.c | 142 |
2 files changed, 146 insertions, 5 deletions
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c index c7235e01fd67..d35b15113b17 100644 --- a/arch/s390/mm/init.c +++ b/arch/s390/mm/init.c | |||
@@ -71,13 +71,16 @@ static void __init setup_zero_pages(void) | |||
71 | break; | 71 | break; |
72 | case 0x2827: /* zEC12 */ | 72 | case 0x2827: /* zEC12 */ |
73 | case 0x2828: /* zEC12 */ | 73 | case 0x2828: /* zEC12 */ |
74 | default: | ||
75 | order = 5; | 74 | order = 5; |
76 | break; | 75 | break; |
76 | case 0x2964: /* z13 */ | ||
77 | default: | ||
78 | order = 7; | ||
79 | break; | ||
77 | } | 80 | } |
78 | /* Limit number of empty zero pages for small memory sizes */ | 81 | /* Limit number of empty zero pages for small memory sizes */ |
79 | if (order > 2 && totalram_pages <= 16384) | 82 | while (order > 2 && (totalram_pages >> 10) < (1UL << order)) |
80 | order = 2; | 83 | order--; |
81 | 84 | ||
82 | empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order); | 85 | empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order); |
83 | if (!empty_zero_page) | 86 | if (!empty_zero_page) |
diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c index 9b436c21195e..d008f638b2cd 100644 --- a/arch/s390/mm/mmap.c +++ b/arch/s390/mm/mmap.c | |||
@@ -28,8 +28,12 @@ | |||
28 | #include <linux/module.h> | 28 | #include <linux/module.h> |
29 | #include <linux/random.h> | 29 | #include <linux/random.h> |
30 | #include <linux/compat.h> | 30 | #include <linux/compat.h> |
31 | #include <linux/security.h> | ||
31 | #include <asm/pgalloc.h> | 32 | #include <asm/pgalloc.h> |
32 | 33 | ||
34 | unsigned long mmap_rnd_mask; | ||
35 | unsigned long mmap_align_mask; | ||
36 | |||
33 | static unsigned long stack_maxrandom_size(void) | 37 | static unsigned long stack_maxrandom_size(void) |
34 | { | 38 | { |
35 | if (!(current->flags & PF_RANDOMIZE)) | 39 | if (!(current->flags & PF_RANDOMIZE)) |
@@ -60,8 +64,10 @@ static unsigned long mmap_rnd(void) | |||
60 | { | 64 | { |
61 | if (!(current->flags & PF_RANDOMIZE)) | 65 | if (!(current->flags & PF_RANDOMIZE)) |
62 | return 0; | 66 | return 0; |
63 | /* 8MB randomization for mmap_base */ | 67 | if (is_32bit_task()) |
64 | return (get_random_int() & 0x7ffUL) << PAGE_SHIFT; | 68 | return (get_random_int() & 0x7ff) << PAGE_SHIFT; |
69 | else | ||
70 | return (get_random_int() & mmap_rnd_mask) << PAGE_SHIFT; | ||
65 | } | 71 | } |
66 | 72 | ||
67 | static unsigned long mmap_base_legacy(void) | 73 | static unsigned long mmap_base_legacy(void) |
@@ -81,6 +87,106 @@ static inline unsigned long mmap_base(void) | |||
81 | return STACK_TOP - stack_maxrandom_size() - mmap_rnd() - gap; | 87 | return STACK_TOP - stack_maxrandom_size() - mmap_rnd() - gap; |
82 | } | 88 | } |
83 | 89 | ||
90 | unsigned long | ||
91 | arch_get_unmapped_area(struct file *filp, unsigned long addr, | ||
92 | unsigned long len, unsigned long pgoff, unsigned long flags) | ||
93 | { | ||
94 | struct mm_struct *mm = current->mm; | ||
95 | struct vm_area_struct *vma; | ||
96 | struct vm_unmapped_area_info info; | ||
97 | int do_color_align; | ||
98 | |||
99 | if (len > TASK_SIZE - mmap_min_addr) | ||
100 | return -ENOMEM; | ||
101 | |||
102 | if (flags & MAP_FIXED) | ||
103 | return addr; | ||
104 | |||
105 | if (addr) { | ||
106 | addr = PAGE_ALIGN(addr); | ||
107 | vma = find_vma(mm, addr); | ||
108 | if (TASK_SIZE - len >= addr && addr >= mmap_min_addr && | ||
109 | (!vma || addr + len <= vma->vm_start)) | ||
110 | return addr; | ||
111 | } | ||
112 | |||
113 | do_color_align = 0; | ||
114 | if (filp || (flags & MAP_SHARED)) | ||
115 | do_color_align = !is_32bit_task(); | ||
116 | |||
117 | info.flags = 0; | ||
118 | info.length = len; | ||
119 | info.low_limit = mm->mmap_base; | ||
120 | info.high_limit = TASK_SIZE; | ||
121 | info.align_mask = do_color_align ? (mmap_align_mask << PAGE_SHIFT) : 0; | ||
122 | info.align_offset = pgoff << PAGE_SHIFT; | ||
123 | return vm_unmapped_area(&info); | ||
124 | } | ||
125 | |||
126 | unsigned long | ||
127 | arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, | ||
128 | const unsigned long len, const unsigned long pgoff, | ||
129 | const unsigned long flags) | ||
130 | { | ||
131 | struct vm_area_struct *vma; | ||
132 | struct mm_struct *mm = current->mm; | ||
133 | unsigned long addr = addr0; | ||
134 | struct vm_unmapped_area_info info; | ||
135 | int do_color_align; | ||
136 | |||
137 | /* requested length too big for entire address space */ | ||
138 | if (len > TASK_SIZE - mmap_min_addr) | ||
139 | return -ENOMEM; | ||
140 | |||
141 | if (flags & MAP_FIXED) | ||
142 | return addr; | ||
143 | |||
144 | /* requesting a specific address */ | ||
145 | if (addr) { | ||
146 | addr = PAGE_ALIGN(addr); | ||
147 | vma = find_vma(mm, addr); | ||
148 | if (TASK_SIZE - len >= addr && addr >= mmap_min_addr && | ||
149 | (!vma || addr + len <= vma->vm_start)) | ||
150 | return addr; | ||
151 | } | ||
152 | |||
153 | do_color_align = 0; | ||
154 | if (filp || (flags & MAP_SHARED)) | ||
155 | do_color_align = !is_32bit_task(); | ||
156 | |||
157 | info.flags = VM_UNMAPPED_AREA_TOPDOWN; | ||
158 | info.length = len; | ||
159 | info.low_limit = max(PAGE_SIZE, mmap_min_addr); | ||
160 | info.high_limit = mm->mmap_base; | ||
161 | info.align_mask = do_color_align ? (mmap_align_mask << PAGE_SHIFT) : 0; | ||
162 | info.align_offset = pgoff << PAGE_SHIFT; | ||
163 | addr = vm_unmapped_area(&info); | ||
164 | |||
165 | /* | ||
166 | * A failed mmap() very likely causes application failure, | ||
167 | * so fall back to the bottom-up function here. This scenario | ||
168 | * can happen with large stack limits and large mmap() | ||
169 | * allocations. | ||
170 | */ | ||
171 | if (addr & ~PAGE_MASK) { | ||
172 | VM_BUG_ON(addr != -ENOMEM); | ||
173 | info.flags = 0; | ||
174 | info.low_limit = TASK_UNMAPPED_BASE; | ||
175 | info.high_limit = TASK_SIZE; | ||
176 | addr = vm_unmapped_area(&info); | ||
177 | } | ||
178 | |||
179 | return addr; | ||
180 | } | ||
181 | |||
182 | unsigned long randomize_et_dyn(void) | ||
183 | { | ||
184 | unsigned long base; | ||
185 | |||
186 | base = (STACK_TOP / 3 * 2) & (~mmap_align_mask << PAGE_SHIFT); | ||
187 | return base + mmap_rnd(); | ||
188 | } | ||
189 | |||
84 | #ifndef CONFIG_64BIT | 190 | #ifndef CONFIG_64BIT |
85 | 191 | ||
86 | /* | 192 | /* |
@@ -177,4 +283,36 @@ void arch_pick_mmap_layout(struct mm_struct *mm) | |||
177 | } | 283 | } |
178 | } | 284 | } |
179 | 285 | ||
286 | static int __init setup_mmap_rnd(void) | ||
287 | { | ||
288 | struct cpuid cpu_id; | ||
289 | |||
290 | get_cpu_id(&cpu_id); | ||
291 | switch (cpu_id.machine) { | ||
292 | case 0x9672: | ||
293 | case 0x2064: | ||
294 | case 0x2066: | ||
295 | case 0x2084: | ||
296 | case 0x2086: | ||
297 | case 0x2094: | ||
298 | case 0x2096: | ||
299 | case 0x2097: | ||
300 | case 0x2098: | ||
301 | case 0x2817: | ||
302 | case 0x2818: | ||
303 | case 0x2827: | ||
304 | case 0x2828: | ||
305 | mmap_rnd_mask = 0x7ffUL; | ||
306 | mmap_align_mask = 0UL; | ||
307 | break; | ||
308 | case 0x2964: /* z13 */ | ||
309 | default: | ||
310 | mmap_rnd_mask = 0x3ff80UL; | ||
311 | mmap_align_mask = 0x7fUL; | ||
312 | break; | ||
313 | } | ||
314 | return 0; | ||
315 | } | ||
316 | early_initcall(setup_mmap_rnd); | ||
317 | |||
180 | #endif | 318 | #endif |