diff options
author | Jiri Kosina <jkosina@suse.cz> | 2008-01-30 07:31:07 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-01-30 07:31:07 -0500 |
commit | cc503c1b43e002e3f1fed70f46d947e2bf349bb6 (patch) | |
tree | df0d77b7bccf0148c7b7cdd0363354499b259f99 | |
parent | 82f74e7159749cc511ebf5954a7b9ea6ad634949 (diff) |
x86: PIE executable randomization
main executable of (specially compiled/linked -pie/-fpie) ET_DYN binaries
onto a random address (in cases in which mmap() is allowed to perform a
randomization).
The code has been extraced from Ingo's exec-shield patch
http://people.redhat.com/mingo/exec-shield/
[akpm@linux-foundation.org: fix used-uninitialsied warning]
[kamezawa.hiroyu@jp.fujitsu.com: fixed ia32 ELF on x86_64 handling]
Signed-off-by: Jiri Kosina <jkosina@suse.cz>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Arjan van de Ven <arjan@infradead.org>
Cc: Roland McGrath <roland@redhat.com>
Cc: Jakub Jelinek <jakub@redhat.com>
Cc: "Luck, Tony" <tony.luck@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-rw-r--r-- | arch/ia64/ia32/binfmt_elf32.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/sys_x86_64.c | 98 | ||||
-rw-r--r-- | arch/x86/mm/mmap_64.c | 119 | ||||
-rw-r--r-- | fs/binfmt_elf.c | 107 | ||||
-rw-r--r-- | include/asm-x86/pgtable_64.h | 1 |
5 files changed, 287 insertions, 40 deletions
diff --git a/arch/ia64/ia32/binfmt_elf32.c b/arch/ia64/ia32/binfmt_elf32.c index 3e35987af458..2a662215359c 100644 --- a/arch/ia64/ia32/binfmt_elf32.c +++ b/arch/ia64/ia32/binfmt_elf32.c | |||
@@ -222,7 +222,7 @@ elf32_set_personality (void) | |||
222 | } | 222 | } |
223 | 223 | ||
224 | static unsigned long | 224 | static unsigned long |
225 | elf32_map (struct file *filep, unsigned long addr, struct elf_phdr *eppnt, int prot, int type) | 225 | elf32_map (struct file *filep, unsigned long addr, struct elf_phdr *eppnt, int prot, int type, unsigned long unused) |
226 | { | 226 | { |
227 | unsigned long pgoff = (eppnt->p_vaddr) & ~IA32_PAGE_MASK; | 227 | unsigned long pgoff = (eppnt->p_vaddr) & ~IA32_PAGE_MASK; |
228 | 228 | ||
diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c index 907942ee6e76..95485e63fd2f 100644 --- a/arch/x86/kernel/sys_x86_64.c +++ b/arch/x86/kernel/sys_x86_64.c | |||
@@ -12,6 +12,7 @@ | |||
12 | #include <linux/file.h> | 12 | #include <linux/file.h> |
13 | #include <linux/utsname.h> | 13 | #include <linux/utsname.h> |
14 | #include <linux/personality.h> | 14 | #include <linux/personality.h> |
15 | #include <linux/random.h> | ||
15 | 16 | ||
16 | #include <asm/uaccess.h> | 17 | #include <asm/uaccess.h> |
17 | #include <asm/ia32.h> | 18 | #include <asm/ia32.h> |
@@ -65,6 +66,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin, | |||
65 | unsigned long *end) | 66 | unsigned long *end) |
66 | { | 67 | { |
67 | if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) { | 68 | if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) { |
69 | unsigned long new_begin; | ||
68 | /* This is usually used needed to map code in small | 70 | /* This is usually used needed to map code in small |
69 | model, so it needs to be in the first 31bit. Limit | 71 | model, so it needs to be in the first 31bit. Limit |
70 | it to that. This means we need to move the | 72 | it to that. This means we need to move the |
@@ -74,6 +76,11 @@ static void find_start_end(unsigned long flags, unsigned long *begin, | |||
74 | of playground for now. -AK */ | 76 | of playground for now. -AK */ |
75 | *begin = 0x40000000; | 77 | *begin = 0x40000000; |
76 | *end = 0x80000000; | 78 | *end = 0x80000000; |
79 | if (current->flags & PF_RANDOMIZE) { | ||
80 | new_begin = randomize_range(*begin, *begin + 0x02000000, 0); | ||
81 | if (new_begin) | ||
82 | *begin = new_begin; | ||
83 | } | ||
77 | } else { | 84 | } else { |
78 | *begin = TASK_UNMAPPED_BASE; | 85 | *begin = TASK_UNMAPPED_BASE; |
79 | *end = TASK_SIZE; | 86 | *end = TASK_SIZE; |
@@ -143,6 +150,97 @@ full_search: | |||
143 | } | 150 | } |
144 | } | 151 | } |
145 | 152 | ||
153 | |||
154 | unsigned long | ||
155 | arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, | ||
156 | const unsigned long len, const unsigned long pgoff, | ||
157 | const unsigned long flags) | ||
158 | { | ||
159 | struct vm_area_struct *vma; | ||
160 | struct mm_struct *mm = current->mm; | ||
161 | unsigned long addr = addr0; | ||
162 | |||
163 | /* requested length too big for entire address space */ | ||
164 | if (len > TASK_SIZE) | ||
165 | return -ENOMEM; | ||
166 | |||
167 | if (flags & MAP_FIXED) | ||
168 | return addr; | ||
169 | |||
170 | /* for MAP_32BIT mappings we force the legact mmap base */ | ||
171 | if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) | ||
172 | goto bottomup; | ||
173 | |||
174 | /* requesting a specific address */ | ||
175 | if (addr) { | ||
176 | addr = PAGE_ALIGN(addr); | ||
177 | vma = find_vma(mm, addr); | ||
178 | if (TASK_SIZE - len >= addr && | ||
179 | (!vma || addr + len <= vma->vm_start)) | ||
180 | return addr; | ||
181 | } | ||
182 | |||
183 | /* check if free_area_cache is useful for us */ | ||
184 | if (len <= mm->cached_hole_size) { | ||
185 | mm->cached_hole_size = 0; | ||
186 | mm->free_area_cache = mm->mmap_base; | ||
187 | } | ||
188 | |||
189 | /* either no address requested or can't fit in requested address hole */ | ||
190 | addr = mm->free_area_cache; | ||
191 | |||
192 | /* make sure it can fit in the remaining address space */ | ||
193 | if (addr > len) { | ||
194 | vma = find_vma(mm, addr-len); | ||
195 | if (!vma || addr <= vma->vm_start) | ||
196 | /* remember the address as a hint for next time */ | ||
197 | return (mm->free_area_cache = addr-len); | ||
198 | } | ||
199 | |||
200 | if (mm->mmap_base < len) | ||
201 | goto bottomup; | ||
202 | |||
203 | addr = mm->mmap_base-len; | ||
204 | |||
205 | do { | ||
206 | /* | ||
207 | * Lookup failure means no vma is above this address, | ||
208 | * else if new region fits below vma->vm_start, | ||
209 | * return with success: | ||
210 | */ | ||
211 | vma = find_vma(mm, addr); | ||
212 | if (!vma || addr+len <= vma->vm_start) | ||
213 | /* remember the address as a hint for next time */ | ||
214 | return (mm->free_area_cache = addr); | ||
215 | |||
216 | /* remember the largest hole we saw so far */ | ||
217 | if (addr + mm->cached_hole_size < vma->vm_start) | ||
218 | mm->cached_hole_size = vma->vm_start - addr; | ||
219 | |||
220 | /* try just below the current vma->vm_start */ | ||
221 | addr = vma->vm_start-len; | ||
222 | } while (len < vma->vm_start); | ||
223 | |||
224 | bottomup: | ||
225 | /* | ||
226 | * A failed mmap() very likely causes application failure, | ||
227 | * so fall back to the bottom-up function here. This scenario | ||
228 | * can happen with large stack limits and large mmap() | ||
229 | * allocations. | ||
230 | */ | ||
231 | mm->cached_hole_size = ~0UL; | ||
232 | mm->free_area_cache = TASK_UNMAPPED_BASE; | ||
233 | addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags); | ||
234 | /* | ||
235 | * Restore the topdown base: | ||
236 | */ | ||
237 | mm->free_area_cache = mm->mmap_base; | ||
238 | mm->cached_hole_size = ~0UL; | ||
239 | |||
240 | return addr; | ||
241 | } | ||
242 | |||
243 | |||
146 | asmlinkage long sys_uname(struct new_utsname __user * name) | 244 | asmlinkage long sys_uname(struct new_utsname __user * name) |
147 | { | 245 | { |
148 | int err; | 246 | int err; |
diff --git a/arch/x86/mm/mmap_64.c b/arch/x86/mm/mmap_64.c index ffb71a31bb6e..8cf03ea651f8 100644 --- a/arch/x86/mm/mmap_64.c +++ b/arch/x86/mm/mmap_64.c | |||
@@ -1,32 +1,117 @@ | |||
1 | /* Copyright 2005 Andi Kleen, SuSE Labs. | 1 | /* |
2 | * Licensed under GPL, v.2 | 2 | * linux/arch/x86-64/mm/mmap.c |
3 | * | ||
4 | * flexible mmap layout support | ||
5 | * | ||
6 | * Based on code by Ingo Molnar and Andi Kleen, copyrighted | ||
7 | * as follows: | ||
8 | * | ||
9 | * Copyright 2003-2004 Red Hat Inc., Durham, North Carolina. | ||
10 | * All Rights Reserved. | ||
11 | * Copyright 2005 Andi Kleen, SUSE Labs. | ||
12 | * Copyright 2007 Jiri Kosina, SUSE Labs. | ||
13 | * | ||
14 | * This program is free software; you can redistribute it and/or modify | ||
15 | * it under the terms of the GNU General Public License as published by | ||
16 | * the Free Software Foundation; either version 2 of the License, or | ||
17 | * (at your option) any later version. | ||
18 | * | ||
19 | * This program is distributed in the hope that it will be useful, | ||
20 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
21 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
22 | * GNU General Public License for more details. | ||
23 | * | ||
24 | * You should have received a copy of the GNU General Public License | ||
25 | * along with this program; if not, write to the Free Software | ||
26 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
27 | * | ||
3 | */ | 28 | */ |
29 | |||
30 | #include <linux/personality.h> | ||
4 | #include <linux/mm.h> | 31 | #include <linux/mm.h> |
5 | #include <linux/sched.h> | ||
6 | #include <linux/random.h> | 32 | #include <linux/random.h> |
33 | #include <linux/limits.h> | ||
34 | #include <linux/sched.h> | ||
7 | #include <asm/ia32.h> | 35 | #include <asm/ia32.h> |
8 | 36 | ||
9 | /* Notebook: move the mmap code from sys_x86_64.c over here. */ | 37 | /* |
38 | * Top of mmap area (just below the process stack). | ||
39 | * | ||
40 | * Leave an at least ~128 MB hole. | ||
41 | */ | ||
42 | #define MIN_GAP (128*1024*1024) | ||
43 | #define MAX_GAP (TASK_SIZE/6*5) | ||
10 | 44 | ||
11 | void arch_pick_mmap_layout(struct mm_struct *mm) | 45 | static inline unsigned long mmap_base(void) |
46 | { | ||
47 | unsigned long gap = current->signal->rlim[RLIMIT_STACK].rlim_cur; | ||
48 | |||
49 | if (gap < MIN_GAP) | ||
50 | gap = MIN_GAP; | ||
51 | else if (gap > MAX_GAP) | ||
52 | gap = MAX_GAP; | ||
53 | |||
54 | return TASK_SIZE - (gap & PAGE_MASK); | ||
55 | } | ||
56 | |||
57 | static inline int mmap_is_32(void) | ||
12 | { | 58 | { |
13 | #ifdef CONFIG_IA32_EMULATION | 59 | #ifdef CONFIG_IA32_EMULATION |
14 | if (current_thread_info()->flags & _TIF_IA32) | 60 | if (test_thread_flag(TIF_IA32)) |
15 | return ia32_pick_mmap_layout(mm); | 61 | return 1; |
16 | #endif | 62 | #endif |
17 | mm->mmap_base = TASK_UNMAPPED_BASE; | 63 | return 0; |
64 | } | ||
65 | |||
66 | static inline int mmap_is_legacy(void) | ||
67 | { | ||
68 | if (current->personality & ADDR_COMPAT_LAYOUT) | ||
69 | return 1; | ||
70 | |||
71 | if (current->signal->rlim[RLIMIT_STACK].rlim_cur == RLIM_INFINITY) | ||
72 | return 1; | ||
73 | |||
74 | return sysctl_legacy_va_layout; | ||
75 | } | ||
76 | |||
77 | /* | ||
78 | * This function, called very early during the creation of a new | ||
79 | * process VM image, sets up which VM layout function to use: | ||
80 | */ | ||
81 | void arch_pick_mmap_layout(struct mm_struct *mm) | ||
82 | { | ||
83 | int rnd = 0; | ||
18 | if (current->flags & PF_RANDOMIZE) { | 84 | if (current->flags & PF_RANDOMIZE) { |
19 | /* | 85 | /* |
20 | * Add 28bit randomness which is about 40bits of | 86 | * Add 28bit randomness which is about 40bits of address space |
21 | * address space because mmap base has to be page | 87 | * because mmap base has to be page aligned. |
22 | * aligned. or ~1/128 of the total user VM (total | 88 | * or ~1/128 of the total user VM |
23 | * user address space is 47bits) | 89 | * (total user address space is 47bits) |
24 | */ | 90 | */ |
25 | unsigned rnd = get_random_int() & 0xfffffff; | 91 | rnd = get_random_int() & 0xfffffff; |
92 | } | ||
26 | 93 | ||
27 | mm->mmap_base += ((unsigned long)rnd) << PAGE_SHIFT; | 94 | /* |
95 | * Fall back to the standard layout if the personality | ||
96 | * bit is set, or if the expected stack growth is unlimited: | ||
97 | */ | ||
98 | if (mmap_is_32()) { | ||
99 | #ifdef CONFIG_IA32_EMULATION | ||
100 | /* ia32_pick_mmap_layout has its own. */ | ||
101 | return ia32_pick_mmap_layout(mm); | ||
102 | #endif | ||
103 | } else if(mmap_is_legacy()) { | ||
104 | mm->mmap_base = TASK_UNMAPPED_BASE; | ||
105 | mm->get_unmapped_area = arch_get_unmapped_area; | ||
106 | mm->unmap_area = arch_unmap_area; | ||
107 | } else { | ||
108 | mm->mmap_base = mmap_base(); | ||
109 | mm->get_unmapped_area = arch_get_unmapped_area_topdown; | ||
110 | mm->unmap_area = arch_unmap_area_topdown; | ||
111 | if (current->flags & PF_RANDOMIZE) | ||
112 | rnd = -rnd; | ||
113 | } | ||
114 | if (current->flags & PF_RANDOMIZE) { | ||
115 | mm->mmap_base += ((long)rnd) << PAGE_SHIFT; | ||
28 | } | 116 | } |
29 | mm->get_unmapped_area = arch_get_unmapped_area; | ||
30 | mm->unmap_area = arch_unmap_area; | ||
31 | } | 117 | } |
32 | |||
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c index 043a800c8f71..8193d24be159 100644 --- a/fs/binfmt_elf.c +++ b/fs/binfmt_elf.c | |||
@@ -45,7 +45,7 @@ | |||
45 | 45 | ||
46 | static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs); | 46 | static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs); |
47 | static int load_elf_library(struct file *); | 47 | static int load_elf_library(struct file *); |
48 | static unsigned long elf_map (struct file *, unsigned long, struct elf_phdr *, int, int); | 48 | static unsigned long elf_map (struct file *, unsigned long, struct elf_phdr *, int, int, unsigned long); |
49 | 49 | ||
50 | /* | 50 | /* |
51 | * If we don't support core dumping, then supply a NULL so we | 51 | * If we don't support core dumping, then supply a NULL so we |
@@ -298,33 +298,70 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec, | |||
298 | #ifndef elf_map | 298 | #ifndef elf_map |
299 | 299 | ||
300 | static unsigned long elf_map(struct file *filep, unsigned long addr, | 300 | static unsigned long elf_map(struct file *filep, unsigned long addr, |
301 | struct elf_phdr *eppnt, int prot, int type) | 301 | struct elf_phdr *eppnt, int prot, int type, |
302 | unsigned long total_size) | ||
302 | { | 303 | { |
303 | unsigned long map_addr; | 304 | unsigned long map_addr; |
304 | unsigned long pageoffset = ELF_PAGEOFFSET(eppnt->p_vaddr); | 305 | unsigned long size = eppnt->p_filesz + ELF_PAGEOFFSET(eppnt->p_vaddr); |
306 | unsigned long off = eppnt->p_offset - ELF_PAGEOFFSET(eppnt->p_vaddr); | ||
307 | addr = ELF_PAGESTART(addr); | ||
308 | size = ELF_PAGEALIGN(size); | ||
305 | 309 | ||
306 | down_write(¤t->mm->mmap_sem); | ||
307 | /* mmap() will return -EINVAL if given a zero size, but a | 310 | /* mmap() will return -EINVAL if given a zero size, but a |
308 | * segment with zero filesize is perfectly valid */ | 311 | * segment with zero filesize is perfectly valid */ |
309 | if (eppnt->p_filesz + pageoffset) | 312 | if (!size) |
310 | map_addr = do_mmap(filep, ELF_PAGESTART(addr), | 313 | return addr; |
311 | eppnt->p_filesz + pageoffset, prot, type, | 314 | |
312 | eppnt->p_offset - pageoffset); | 315 | down_write(¤t->mm->mmap_sem); |
313 | else | 316 | /* |
314 | map_addr = ELF_PAGESTART(addr); | 317 | * total_size is the size of the ELF (interpreter) image. |
318 | * The _first_ mmap needs to know the full size, otherwise | ||
319 | * randomization might put this image into an overlapping | ||
320 | * position with the ELF binary image. (since size < total_size) | ||
321 | * So we first map the 'big' image - and unmap the remainder at | ||
322 | * the end. (which unmap is needed for ELF images with holes.) | ||
323 | */ | ||
324 | if (total_size) { | ||
325 | total_size = ELF_PAGEALIGN(total_size); | ||
326 | map_addr = do_mmap(filep, addr, total_size, prot, type, off); | ||
327 | if (!BAD_ADDR(map_addr)) | ||
328 | do_munmap(current->mm, map_addr+size, total_size-size); | ||
329 | } else | ||
330 | map_addr = do_mmap(filep, addr, size, prot, type, off); | ||
331 | |||
315 | up_write(¤t->mm->mmap_sem); | 332 | up_write(¤t->mm->mmap_sem); |
316 | return(map_addr); | 333 | return(map_addr); |
317 | } | 334 | } |
318 | 335 | ||
319 | #endif /* !elf_map */ | 336 | #endif /* !elf_map */ |
320 | 337 | ||
338 | static unsigned long total_mapping_size(struct elf_phdr *cmds, int nr) | ||
339 | { | ||
340 | int i, first_idx = -1, last_idx = -1; | ||
341 | |||
342 | for (i = 0; i < nr; i++) { | ||
343 | if (cmds[i].p_type == PT_LOAD) { | ||
344 | last_idx = i; | ||
345 | if (first_idx == -1) | ||
346 | first_idx = i; | ||
347 | } | ||
348 | } | ||
349 | if (first_idx == -1) | ||
350 | return 0; | ||
351 | |||
352 | return cmds[last_idx].p_vaddr + cmds[last_idx].p_memsz - | ||
353 | ELF_PAGESTART(cmds[first_idx].p_vaddr); | ||
354 | } | ||
355 | |||
356 | |||
321 | /* This is much more generalized than the library routine read function, | 357 | /* This is much more generalized than the library routine read function, |
322 | so we keep this separate. Technically the library read function | 358 | so we keep this separate. Technically the library read function |
323 | is only provided so that we can read a.out libraries that have | 359 | is only provided so that we can read a.out libraries that have |
324 | an ELF header */ | 360 | an ELF header */ |
325 | 361 | ||
326 | static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex, | 362 | static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex, |
327 | struct file *interpreter, unsigned long *interp_load_addr) | 363 | struct file *interpreter, unsigned long *interp_map_addr, |
364 | unsigned long no_base) | ||
328 | { | 365 | { |
329 | struct elf_phdr *elf_phdata; | 366 | struct elf_phdr *elf_phdata; |
330 | struct elf_phdr *eppnt; | 367 | struct elf_phdr *eppnt; |
@@ -332,6 +369,7 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex, | |||
332 | int load_addr_set = 0; | 369 | int load_addr_set = 0; |
333 | unsigned long last_bss = 0, elf_bss = 0; | 370 | unsigned long last_bss = 0, elf_bss = 0; |
334 | unsigned long error = ~0UL; | 371 | unsigned long error = ~0UL; |
372 | unsigned long total_size; | ||
335 | int retval, i, size; | 373 | int retval, i, size; |
336 | 374 | ||
337 | /* First of all, some simple consistency checks */ | 375 | /* First of all, some simple consistency checks */ |
@@ -370,6 +408,12 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex, | |||
370 | goto out_close; | 408 | goto out_close; |
371 | } | 409 | } |
372 | 410 | ||
411 | total_size = total_mapping_size(elf_phdata, interp_elf_ex->e_phnum); | ||
412 | if (!total_size) { | ||
413 | error = -EINVAL; | ||
414 | goto out_close; | ||
415 | } | ||
416 | |||
373 | eppnt = elf_phdata; | 417 | eppnt = elf_phdata; |
374 | for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) { | 418 | for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) { |
375 | if (eppnt->p_type == PT_LOAD) { | 419 | if (eppnt->p_type == PT_LOAD) { |
@@ -387,9 +431,14 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex, | |||
387 | vaddr = eppnt->p_vaddr; | 431 | vaddr = eppnt->p_vaddr; |
388 | if (interp_elf_ex->e_type == ET_EXEC || load_addr_set) | 432 | if (interp_elf_ex->e_type == ET_EXEC || load_addr_set) |
389 | elf_type |= MAP_FIXED; | 433 | elf_type |= MAP_FIXED; |
434 | else if (no_base && interp_elf_ex->e_type == ET_DYN) | ||
435 | load_addr = -vaddr; | ||
390 | 436 | ||
391 | map_addr = elf_map(interpreter, load_addr + vaddr, | 437 | map_addr = elf_map(interpreter, load_addr + vaddr, |
392 | eppnt, elf_prot, elf_type); | 438 | eppnt, elf_prot, elf_type, total_size); |
439 | total_size = 0; | ||
440 | if (!*interp_map_addr) | ||
441 | *interp_map_addr = map_addr; | ||
393 | error = map_addr; | 442 | error = map_addr; |
394 | if (BAD_ADDR(map_addr)) | 443 | if (BAD_ADDR(map_addr)) |
395 | goto out_close; | 444 | goto out_close; |
@@ -455,8 +504,7 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex, | |||
455 | goto out_close; | 504 | goto out_close; |
456 | } | 505 | } |
457 | 506 | ||
458 | *interp_load_addr = load_addr; | 507 | error = load_addr; |
459 | error = ((unsigned long)interp_elf_ex->e_entry) + load_addr; | ||
460 | 508 | ||
461 | out_close: | 509 | out_close: |
462 | kfree(elf_phdata); | 510 | kfree(elf_phdata); |
@@ -553,7 +601,8 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs) | |||
553 | int elf_exec_fileno; | 601 | int elf_exec_fileno; |
554 | int retval, i; | 602 | int retval, i; |
555 | unsigned int size; | 603 | unsigned int size; |
556 | unsigned long elf_entry, interp_load_addr = 0; | 604 | unsigned long elf_entry; |
605 | unsigned long interp_load_addr = 0; | ||
557 | unsigned long start_code, end_code, start_data, end_data; | 606 | unsigned long start_code, end_code, start_data, end_data; |
558 | unsigned long reloc_func_desc = 0; | 607 | unsigned long reloc_func_desc = 0; |
559 | char passed_fileno[6]; | 608 | char passed_fileno[6]; |
@@ -825,9 +874,7 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs) | |||
825 | current->mm->start_stack = bprm->p; | 874 | current->mm->start_stack = bprm->p; |
826 | 875 | ||
827 | /* Now we do a little grungy work by mmaping the ELF image into | 876 | /* Now we do a little grungy work by mmaping the ELF image into |
828 | the correct location in memory. At this point, we assume that | 877 | the correct location in memory. */ |
829 | the image should be loaded at fixed address, not at a variable | ||
830 | address. */ | ||
831 | for(i = 0, elf_ppnt = elf_phdata; | 878 | for(i = 0, elf_ppnt = elf_phdata; |
832 | i < loc->elf_ex.e_phnum; i++, elf_ppnt++) { | 879 | i < loc->elf_ex.e_phnum; i++, elf_ppnt++) { |
833 | int elf_prot = 0, elf_flags; | 880 | int elf_prot = 0, elf_flags; |
@@ -881,11 +928,15 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs) | |||
881 | * default mmap base, as well as whatever program they | 928 | * default mmap base, as well as whatever program they |
882 | * might try to exec. This is because the brk will | 929 | * might try to exec. This is because the brk will |
883 | * follow the loader, and is not movable. */ | 930 | * follow the loader, and is not movable. */ |
931 | #ifdef CONFIG_X86 | ||
932 | load_bias = 0; | ||
933 | #else | ||
884 | load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr); | 934 | load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr); |
935 | #endif | ||
885 | } | 936 | } |
886 | 937 | ||
887 | error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt, | 938 | error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt, |
888 | elf_prot, elf_flags); | 939 | elf_prot, elf_flags,0); |
889 | if (BAD_ADDR(error)) { | 940 | if (BAD_ADDR(error)) { |
890 | send_sig(SIGKILL, current, 0); | 941 | send_sig(SIGKILL, current, 0); |
891 | retval = IS_ERR((void *)error) ? | 942 | retval = IS_ERR((void *)error) ? |
@@ -961,13 +1012,25 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs) | |||
961 | } | 1012 | } |
962 | 1013 | ||
963 | if (elf_interpreter) { | 1014 | if (elf_interpreter) { |
964 | if (interpreter_type == INTERPRETER_AOUT) | 1015 | if (interpreter_type == INTERPRETER_AOUT) { |
965 | elf_entry = load_aout_interp(&loc->interp_ex, | 1016 | elf_entry = load_aout_interp(&loc->interp_ex, |
966 | interpreter); | 1017 | interpreter); |
967 | else | 1018 | } else { |
1019 | unsigned long uninitialized_var(interp_map_addr); | ||
1020 | |||
968 | elf_entry = load_elf_interp(&loc->interp_elf_ex, | 1021 | elf_entry = load_elf_interp(&loc->interp_elf_ex, |
969 | interpreter, | 1022 | interpreter, |
970 | &interp_load_addr); | 1023 | &interp_map_addr, |
1024 | load_bias); | ||
1025 | if (!IS_ERR((void *)elf_entry)) { | ||
1026 | /* | ||
1027 | * load_elf_interp() returns relocation | ||
1028 | * adjustment | ||
1029 | */ | ||
1030 | interp_load_addr = elf_entry; | ||
1031 | elf_entry += loc->interp_elf_ex.e_entry; | ||
1032 | } | ||
1033 | } | ||
971 | if (BAD_ADDR(elf_entry)) { | 1034 | if (BAD_ADDR(elf_entry)) { |
972 | force_sig(SIGSEGV, current); | 1035 | force_sig(SIGSEGV, current); |
973 | retval = IS_ERR((void *)elf_entry) ? | 1036 | retval = IS_ERR((void *)elf_entry) ? |
diff --git a/include/asm-x86/pgtable_64.h b/include/asm-x86/pgtable_64.h index 9d4f11dd566f..6cf40dec0932 100644 --- a/include/asm-x86/pgtable_64.h +++ b/include/asm-x86/pgtable_64.h | |||
@@ -413,6 +413,7 @@ pte_t *lookup_address(unsigned long addr); | |||
413 | remap_pfn_range(vma, vaddr, pfn, size, prot) | 413 | remap_pfn_range(vma, vaddr, pfn, size, prot) |
414 | 414 | ||
415 | #define HAVE_ARCH_UNMAPPED_AREA | 415 | #define HAVE_ARCH_UNMAPPED_AREA |
416 | #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN | ||
416 | 417 | ||
417 | #define pgtable_cache_init() do { } while (0) | 418 | #define pgtable_cache_init() do { } while (0) |
418 | #define check_pgt_cache() do { } while (0) | 419 | #define check_pgt_cache() do { } while (0) |