aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/include/asm/elf.h2
-rw-r--r--arch/x86/mm/mmap.c17
-rw-r--r--arch/x86/mm/pat.c12
3 files changed, 19 insertions, 12 deletions
diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
index 83c1bc8d2e8a..456a304b8172 100644
--- a/arch/x86/include/asm/elf.h
+++ b/arch/x86/include/asm/elf.h
@@ -299,6 +299,8 @@ do { \
299 299
300#ifdef CONFIG_X86_32 300#ifdef CONFIG_X86_32
301 301
302#define STACK_RND_MASK (0x7ff)
303
302#define VDSO_HIGH_BASE (__fix_to_virt(FIX_VDSO)) 304#define VDSO_HIGH_BASE (__fix_to_virt(FIX_VDSO))
303 305
304#define ARCH_DLINFO ARCH_DLINFO_IA32(vdso_enabled) 306#define ARCH_DLINFO ARCH_DLINFO_IA32(vdso_enabled)
diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
index 165829600566..c8191defc38a 100644
--- a/arch/x86/mm/mmap.c
+++ b/arch/x86/mm/mmap.c
@@ -29,13 +29,26 @@
29#include <linux/random.h> 29#include <linux/random.h>
30#include <linux/limits.h> 30#include <linux/limits.h>
31#include <linux/sched.h> 31#include <linux/sched.h>
32#include <asm/elf.h>
33
34static unsigned int stack_maxrandom_size(void)
35{
36 unsigned int max = 0;
37 if ((current->flags & PF_RANDOMIZE) &&
38 !(current->personality & ADDR_NO_RANDOMIZE)) {
39 max = ((-1U) & STACK_RND_MASK) << PAGE_SHIFT;
40 }
41
42 return max;
43}
44
32 45
33/* 46/*
34 * Top of mmap area (just below the process stack). 47 * Top of mmap area (just below the process stack).
35 * 48 *
36 * Leave an at least ~128 MB hole. 49 * Leave an at least ~128 MB hole with possible stack randomization.
37 */ 50 */
38#define MIN_GAP (128*1024*1024) 51#define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
39#define MAX_GAP (TASK_SIZE/6*5) 52#define MAX_GAP (TASK_SIZE/6*5)
40 53
41/* 54/*
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index d7ebc3a10f2f..7257cf3decf9 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -424,17 +424,9 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
424 424
425 spin_lock(&memtype_lock); 425 spin_lock(&memtype_lock);
426 426
427 entry = memtype_rb_search(&memtype_rbroot, new->start);
428 if (likely(entry != NULL)) {
429 /* To work correctly with list_for_each_entry_continue */
430 entry = list_entry(entry->nd.prev, struct memtype, nd);
431 } else {
432 entry = list_entry(&memtype_list, struct memtype, nd);
433 }
434
435 /* Search for existing mapping that overlaps the current range */ 427 /* Search for existing mapping that overlaps the current range */
436 where = NULL; 428 where = NULL;
437 list_for_each_entry_continue(entry, &memtype_list, nd) { 429 list_for_each_entry(entry, &memtype_list, nd) {
438 if (end <= entry->start) { 430 if (end <= entry->start) {
439 where = entry->nd.prev; 431 where = entry->nd.prev;
440 break; 432 break;
@@ -532,7 +524,7 @@ int free_memtype(u64 start, u64 end)
532 * in sorted start address 524 * in sorted start address
533 */ 525 */
534 saved_entry = entry; 526 saved_entry = entry;
535 list_for_each_entry(entry, &memtype_list, nd) { 527 list_for_each_entry_from(entry, &memtype_list, nd) {
536 if (entry->start == start && entry->end == end) { 528 if (entry->start == start && entry->end == end) {
537 rb_erase(&entry->rb, &memtype_rbroot); 529 rb_erase(&entry->rb, &memtype_rbroot);
538 list_del(&entry->nd); 530 list_del(&entry->nd);