aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/mm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-09-17 23:58:11 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2009-09-17 23:58:11 -0400
commitca043a66ae48c74fa628ec92178f7a54f5b9a106 (patch)
tree37e9019bb99ed0f59debc426456e71befd4b7a9c /arch/x86/mm
parent1218259b2d09c79ed1113d3a6dbb9a1d6391f5cb (diff)
parent3bb045f1e2e51124200ef043256df4c7ad86bebd (diff)
Merge branch 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: x86, pat: don't use rb-tree based lookup in reserve_memtype() x86: Increase MIN_GAP to include randomized stack
Diffstat (limited to 'arch/x86/mm')
-rw-r--r--arch/x86/mm/mmap.c17
-rw-r--r--arch/x86/mm/pat.c12
2 files changed, 17 insertions, 12 deletions
diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
index 165829600566..c8191defc38a 100644
--- a/arch/x86/mm/mmap.c
+++ b/arch/x86/mm/mmap.c
@@ -29,13 +29,26 @@
29#include <linux/random.h> 29#include <linux/random.h>
30#include <linux/limits.h> 30#include <linux/limits.h>
31#include <linux/sched.h> 31#include <linux/sched.h>
32#include <asm/elf.h>
33
34static unsigned int stack_maxrandom_size(void)
35{
36 unsigned int max = 0;
37 if ((current->flags & PF_RANDOMIZE) &&
38 !(current->personality & ADDR_NO_RANDOMIZE)) {
39 max = ((-1U) & STACK_RND_MASK) << PAGE_SHIFT;
40 }
41
42 return max;
43}
44
32 45
33/* 46/*
34 * Top of mmap area (just below the process stack). 47 * Top of mmap area (just below the process stack).
35 * 48 *
36 * Leave an at least ~128 MB hole. 49 * Leave an at least ~128 MB hole with possible stack randomization.
37 */ 50 */
38#define MIN_GAP (128*1024*1024) 51#define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
39#define MAX_GAP (TASK_SIZE/6*5) 52#define MAX_GAP (TASK_SIZE/6*5)
40 53
41/* 54/*
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index d7ebc3a10f2f..7257cf3decf9 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -424,17 +424,9 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
424 424
425 spin_lock(&memtype_lock); 425 spin_lock(&memtype_lock);
426 426
427 entry = memtype_rb_search(&memtype_rbroot, new->start);
428 if (likely(entry != NULL)) {
429 /* To work correctly with list_for_each_entry_continue */
430 entry = list_entry(entry->nd.prev, struct memtype, nd);
431 } else {
432 entry = list_entry(&memtype_list, struct memtype, nd);
433 }
434
435 /* Search for existing mapping that overlaps the current range */ 427 /* Search for existing mapping that overlaps the current range */
436 where = NULL; 428 where = NULL;
437 list_for_each_entry_continue(entry, &memtype_list, nd) { 429 list_for_each_entry(entry, &memtype_list, nd) {
438 if (end <= entry->start) { 430 if (end <= entry->start) {
439 where = entry->nd.prev; 431 where = entry->nd.prev;
440 break; 432 break;
@@ -532,7 +524,7 @@ int free_memtype(u64 start, u64 end)
532 * in sorted start address 524 * in sorted start address
533 */ 525 */
534 saved_entry = entry; 526 saved_entry = entry;
535 list_for_each_entry(entry, &memtype_list, nd) { 527 list_for_each_entry_from(entry, &memtype_list, nd) {
536 if (entry->start == start && entry->end == end) { 528 if (entry->start == start && entry->end == end) {
537 rb_erase(&entry->rb, &memtype_rbroot); 529 rb_erase(&entry->rb, &memtype_rbroot);
538 list_del(&entry->nd); 530 list_del(&entry->nd);