diff options
| -rw-r--r-- | arch/powerpc/kernel/process.c | 17 |
1 files changed, 16 insertions, 1 deletions
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 0a3216433051..1168c5f440ab 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c | |||
| @@ -1165,7 +1165,22 @@ static inline unsigned long brk_rnd(void) | |||
| 1165 | 1165 | ||
| 1166 | unsigned long arch_randomize_brk(struct mm_struct *mm) | 1166 | unsigned long arch_randomize_brk(struct mm_struct *mm) |
| 1167 | { | 1167 | { |
| 1168 | unsigned long ret = PAGE_ALIGN(mm->brk + brk_rnd()); | 1168 | unsigned long base = mm->brk; |
| 1169 | unsigned long ret; | ||
| 1170 | |||
| 1171 | #ifdef CONFIG_PPC64 | ||
| 1172 | /* | ||
| 1173 | * If we are using 1TB segments and we are allowed to randomise | ||
| 1174 | * the heap, we can put it above 1TB so it is backed by a 1TB | ||
| 1175 | * segment. Otherwise the heap will be in the bottom 1TB | ||
| 1176 | * which always uses 256MB segments and this may result in a | ||
| 1177 | * performance penalty. | ||
| 1178 | */ | ||
| 1179 | if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T)) | ||
| 1180 | base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T); | ||
| 1181 | #endif | ||
| 1182 | |||
| 1183 | ret = PAGE_ALIGN(base + brk_rnd()); | ||
| 1169 | 1184 | ||
| 1170 | if (ret < mm->brk) | 1185 | if (ret < mm->brk) |
| 1171 | return mm->brk; | 1186 | return mm->brk; |
