diff options
author | Jiri Kosina <jkosina@suse.cz> | 2008-01-30 07:31:07 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-01-30 07:31:07 -0500 |
commit | cc503c1b43e002e3f1fed70f46d947e2bf349bb6 (patch) | |
tree | df0d77b7bccf0148c7b7cdd0363354499b259f99 /arch/x86 | |
parent | 82f74e7159749cc511ebf5954a7b9ea6ad634949 (diff) |
x86: PIE executable randomization
main executable of (specially compiled/linked -pie/-fpie) ET_DYN binaries
onto a random address (in cases in which mmap() is allowed to perform a
randomization).
The code has been extraced from Ingo's exec-shield patch
http://people.redhat.com/mingo/exec-shield/
[akpm@linux-foundation.org: fix used-uninitialsied warning]
[kamezawa.hiroyu@jp.fujitsu.com: fixed ia32 ELF on x86_64 handling]
Signed-off-by: Jiri Kosina <jkosina@suse.cz>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Arjan van de Ven <arjan@infradead.org>
Cc: Roland McGrath <roland@redhat.com>
Cc: Jakub Jelinek <jakub@redhat.com>
Cc: "Luck, Tony" <tony.luck@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch/x86')
-rw-r--r-- | arch/x86/kernel/sys_x86_64.c | 98 | ||||
-rw-r--r-- | arch/x86/mm/mmap_64.c | 119 |
2 files changed, 200 insertions, 17 deletions
diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c index 907942ee6e76..95485e63fd2f 100644 --- a/arch/x86/kernel/sys_x86_64.c +++ b/arch/x86/kernel/sys_x86_64.c | |||
@@ -12,6 +12,7 @@ | |||
12 | #include <linux/file.h> | 12 | #include <linux/file.h> |
13 | #include <linux/utsname.h> | 13 | #include <linux/utsname.h> |
14 | #include <linux/personality.h> | 14 | #include <linux/personality.h> |
15 | #include <linux/random.h> | ||
15 | 16 | ||
16 | #include <asm/uaccess.h> | 17 | #include <asm/uaccess.h> |
17 | #include <asm/ia32.h> | 18 | #include <asm/ia32.h> |
@@ -65,6 +66,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin, | |||
65 | unsigned long *end) | 66 | unsigned long *end) |
66 | { | 67 | { |
67 | if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) { | 68 | if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) { |
69 | unsigned long new_begin; | ||
68 | /* This is usually used needed to map code in small | 70 | /* This is usually used needed to map code in small |
69 | model, so it needs to be in the first 31bit. Limit | 71 | model, so it needs to be in the first 31bit. Limit |
70 | it to that. This means we need to move the | 72 | it to that. This means we need to move the |
@@ -74,6 +76,11 @@ static void find_start_end(unsigned long flags, unsigned long *begin, | |||
74 | of playground for now. -AK */ | 76 | of playground for now. -AK */ |
75 | *begin = 0x40000000; | 77 | *begin = 0x40000000; |
76 | *end = 0x80000000; | 78 | *end = 0x80000000; |
79 | if (current->flags & PF_RANDOMIZE) { | ||
80 | new_begin = randomize_range(*begin, *begin + 0x02000000, 0); | ||
81 | if (new_begin) | ||
82 | *begin = new_begin; | ||
83 | } | ||
77 | } else { | 84 | } else { |
78 | *begin = TASK_UNMAPPED_BASE; | 85 | *begin = TASK_UNMAPPED_BASE; |
79 | *end = TASK_SIZE; | 86 | *end = TASK_SIZE; |
@@ -143,6 +150,97 @@ full_search: | |||
143 | } | 150 | } |
144 | } | 151 | } |
145 | 152 | ||
153 | |||
154 | unsigned long | ||
155 | arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, | ||
156 | const unsigned long len, const unsigned long pgoff, | ||
157 | const unsigned long flags) | ||
158 | { | ||
159 | struct vm_area_struct *vma; | ||
160 | struct mm_struct *mm = current->mm; | ||
161 | unsigned long addr = addr0; | ||
162 | |||
163 | /* requested length too big for entire address space */ | ||
164 | if (len > TASK_SIZE) | ||
165 | return -ENOMEM; | ||
166 | |||
167 | if (flags & MAP_FIXED) | ||
168 | return addr; | ||
169 | |||
170 | /* for MAP_32BIT mappings we force the legact mmap base */ | ||
171 | if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) | ||
172 | goto bottomup; | ||
173 | |||
174 | /* requesting a specific address */ | ||
175 | if (addr) { | ||
176 | addr = PAGE_ALIGN(addr); | ||
177 | vma = find_vma(mm, addr); | ||
178 | if (TASK_SIZE - len >= addr && | ||
179 | (!vma || addr + len <= vma->vm_start)) | ||
180 | return addr; | ||
181 | } | ||
182 | |||
183 | /* check if free_area_cache is useful for us */ | ||
184 | if (len <= mm->cached_hole_size) { | ||
185 | mm->cached_hole_size = 0; | ||
186 | mm->free_area_cache = mm->mmap_base; | ||
187 | } | ||
188 | |||
189 | /* either no address requested or can't fit in requested address hole */ | ||
190 | addr = mm->free_area_cache; | ||
191 | |||
192 | /* make sure it can fit in the remaining address space */ | ||
193 | if (addr > len) { | ||
194 | vma = find_vma(mm, addr-len); | ||
195 | if (!vma || addr <= vma->vm_start) | ||
196 | /* remember the address as a hint for next time */ | ||
197 | return (mm->free_area_cache = addr-len); | ||
198 | } | ||
199 | |||
200 | if (mm->mmap_base < len) | ||
201 | goto bottomup; | ||
202 | |||
203 | addr = mm->mmap_base-len; | ||
204 | |||
205 | do { | ||
206 | /* | ||
207 | * Lookup failure means no vma is above this address, | ||
208 | * else if new region fits below vma->vm_start, | ||
209 | * return with success: | ||
210 | */ | ||
211 | vma = find_vma(mm, addr); | ||
212 | if (!vma || addr+len <= vma->vm_start) | ||
213 | /* remember the address as a hint for next time */ | ||
214 | return (mm->free_area_cache = addr); | ||
215 | |||
216 | /* remember the largest hole we saw so far */ | ||
217 | if (addr + mm->cached_hole_size < vma->vm_start) | ||
218 | mm->cached_hole_size = vma->vm_start - addr; | ||
219 | |||
220 | /* try just below the current vma->vm_start */ | ||
221 | addr = vma->vm_start-len; | ||
222 | } while (len < vma->vm_start); | ||
223 | |||
224 | bottomup: | ||
225 | /* | ||
226 | * A failed mmap() very likely causes application failure, | ||
227 | * so fall back to the bottom-up function here. This scenario | ||
228 | * can happen with large stack limits and large mmap() | ||
229 | * allocations. | ||
230 | */ | ||
231 | mm->cached_hole_size = ~0UL; | ||
232 | mm->free_area_cache = TASK_UNMAPPED_BASE; | ||
233 | addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags); | ||
234 | /* | ||
235 | * Restore the topdown base: | ||
236 | */ | ||
237 | mm->free_area_cache = mm->mmap_base; | ||
238 | mm->cached_hole_size = ~0UL; | ||
239 | |||
240 | return addr; | ||
241 | } | ||
242 | |||
243 | |||
146 | asmlinkage long sys_uname(struct new_utsname __user * name) | 244 | asmlinkage long sys_uname(struct new_utsname __user * name) |
147 | { | 245 | { |
148 | int err; | 246 | int err; |
diff --git a/arch/x86/mm/mmap_64.c b/arch/x86/mm/mmap_64.c index ffb71a31bb6e..8cf03ea651f8 100644 --- a/arch/x86/mm/mmap_64.c +++ b/arch/x86/mm/mmap_64.c | |||
@@ -1,32 +1,117 @@ | |||
1 | /* Copyright 2005 Andi Kleen, SuSE Labs. | 1 | /* |
2 | * Licensed under GPL, v.2 | 2 | * linux/arch/x86-64/mm/mmap.c |
3 | * | ||
4 | * flexible mmap layout support | ||
5 | * | ||
6 | * Based on code by Ingo Molnar and Andi Kleen, copyrighted | ||
7 | * as follows: | ||
8 | * | ||
9 | * Copyright 2003-2004 Red Hat Inc., Durham, North Carolina. | ||
10 | * All Rights Reserved. | ||
11 | * Copyright 2005 Andi Kleen, SUSE Labs. | ||
12 | * Copyright 2007 Jiri Kosina, SUSE Labs. | ||
13 | * | ||
14 | * This program is free software; you can redistribute it and/or modify | ||
15 | * it under the terms of the GNU General Public License as published by | ||
16 | * the Free Software Foundation; either version 2 of the License, or | ||
17 | * (at your option) any later version. | ||
18 | * | ||
19 | * This program is distributed in the hope that it will be useful, | ||
20 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
21 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
22 | * GNU General Public License for more details. | ||
23 | * | ||
24 | * You should have received a copy of the GNU General Public License | ||
25 | * along with this program; if not, write to the Free Software | ||
26 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
27 | * | ||
3 | */ | 28 | */ |
29 | |||
30 | #include <linux/personality.h> | ||
4 | #include <linux/mm.h> | 31 | #include <linux/mm.h> |
5 | #include <linux/sched.h> | ||
6 | #include <linux/random.h> | 32 | #include <linux/random.h> |
33 | #include <linux/limits.h> | ||
34 | #include <linux/sched.h> | ||
7 | #include <asm/ia32.h> | 35 | #include <asm/ia32.h> |
8 | 36 | ||
9 | /* Notebook: move the mmap code from sys_x86_64.c over here. */ | 37 | /* |
38 | * Top of mmap area (just below the process stack). | ||
39 | * | ||
40 | * Leave an at least ~128 MB hole. | ||
41 | */ | ||
42 | #define MIN_GAP (128*1024*1024) | ||
43 | #define MAX_GAP (TASK_SIZE/6*5) | ||
10 | 44 | ||
11 | void arch_pick_mmap_layout(struct mm_struct *mm) | 45 | static inline unsigned long mmap_base(void) |
46 | { | ||
47 | unsigned long gap = current->signal->rlim[RLIMIT_STACK].rlim_cur; | ||
48 | |||
49 | if (gap < MIN_GAP) | ||
50 | gap = MIN_GAP; | ||
51 | else if (gap > MAX_GAP) | ||
52 | gap = MAX_GAP; | ||
53 | |||
54 | return TASK_SIZE - (gap & PAGE_MASK); | ||
55 | } | ||
56 | |||
57 | static inline int mmap_is_32(void) | ||
12 | { | 58 | { |
13 | #ifdef CONFIG_IA32_EMULATION | 59 | #ifdef CONFIG_IA32_EMULATION |
14 | if (current_thread_info()->flags & _TIF_IA32) | 60 | if (test_thread_flag(TIF_IA32)) |
15 | return ia32_pick_mmap_layout(mm); | 61 | return 1; |
16 | #endif | 62 | #endif |
17 | mm->mmap_base = TASK_UNMAPPED_BASE; | 63 | return 0; |
64 | } | ||
65 | |||
66 | static inline int mmap_is_legacy(void) | ||
67 | { | ||
68 | if (current->personality & ADDR_COMPAT_LAYOUT) | ||
69 | return 1; | ||
70 | |||
71 | if (current->signal->rlim[RLIMIT_STACK].rlim_cur == RLIM_INFINITY) | ||
72 | return 1; | ||
73 | |||
74 | return sysctl_legacy_va_layout; | ||
75 | } | ||
76 | |||
77 | /* | ||
78 | * This function, called very early during the creation of a new | ||
79 | * process VM image, sets up which VM layout function to use: | ||
80 | */ | ||
81 | void arch_pick_mmap_layout(struct mm_struct *mm) | ||
82 | { | ||
83 | int rnd = 0; | ||
18 | if (current->flags & PF_RANDOMIZE) { | 84 | if (current->flags & PF_RANDOMIZE) { |
19 | /* | 85 | /* |
20 | * Add 28bit randomness which is about 40bits of | 86 | * Add 28bit randomness which is about 40bits of address space |
21 | * address space because mmap base has to be page | 87 | * because mmap base has to be page aligned. |
22 | * aligned. or ~1/128 of the total user VM (total | 88 | * or ~1/128 of the total user VM |
23 | * user address space is 47bits) | 89 | * (total user address space is 47bits) |
24 | */ | 90 | */ |
25 | unsigned rnd = get_random_int() & 0xfffffff; | 91 | rnd = get_random_int() & 0xfffffff; |
92 | } | ||
26 | 93 | ||
27 | mm->mmap_base += ((unsigned long)rnd) << PAGE_SHIFT; | 94 | /* |
95 | * Fall back to the standard layout if the personality | ||
96 | * bit is set, or if the expected stack growth is unlimited: | ||
97 | */ | ||
98 | if (mmap_is_32()) { | ||
99 | #ifdef CONFIG_IA32_EMULATION | ||
100 | /* ia32_pick_mmap_layout has its own. */ | ||
101 | return ia32_pick_mmap_layout(mm); | ||
102 | #endif | ||
103 | } else if(mmap_is_legacy()) { | ||
104 | mm->mmap_base = TASK_UNMAPPED_BASE; | ||
105 | mm->get_unmapped_area = arch_get_unmapped_area; | ||
106 | mm->unmap_area = arch_unmap_area; | ||
107 | } else { | ||
108 | mm->mmap_base = mmap_base(); | ||
109 | mm->get_unmapped_area = arch_get_unmapped_area_topdown; | ||
110 | mm->unmap_area = arch_unmap_area_topdown; | ||
111 | if (current->flags & PF_RANDOMIZE) | ||
112 | rnd = -rnd; | ||
113 | } | ||
114 | if (current->flags & PF_RANDOMIZE) { | ||
115 | mm->mmap_base += ((long)rnd) << PAGE_SHIFT; | ||
28 | } | 116 | } |
29 | mm->get_unmapped_area = arch_get_unmapped_area; | ||
30 | mm->unmap_area = arch_unmap_area; | ||
31 | } | 117 | } |
32 | |||