aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/sys_x86_64.c
diff options
context:
space:
mode:
authorJiri Kosina <jkosina@suse.cz>2008-01-30 07:31:07 -0500
committerIngo Molnar <mingo@elte.hu>2008-01-30 07:31:07 -0500
commitcc503c1b43e002e3f1fed70f46d947e2bf349bb6 (patch)
treedf0d77b7bccf0148c7b7cdd0363354499b259f99 /arch/x86/kernel/sys_x86_64.c
parent82f74e7159749cc511ebf5954a7b9ea6ad634949 (diff)
x86: PIE executable randomization
main executable of (specially compiled/linked -pie/-fpie) ET_DYN binaries onto a random address (in cases in which mmap() is allowed to perform a randomization). The code has been extraced from Ingo's exec-shield patch http://people.redhat.com/mingo/exec-shield/ [akpm@linux-foundation.org: fix used-uninitialsied warning] [kamezawa.hiroyu@jp.fujitsu.com: fixed ia32 ELF on x86_64 handling] Signed-off-by: Jiri Kosina <jkosina@suse.cz> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Arjan van de Ven <arjan@infradead.org> Cc: Roland McGrath <roland@redhat.com> Cc: Jakub Jelinek <jakub@redhat.com> Cc: "Luck, Tony" <tony.luck@intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch/x86/kernel/sys_x86_64.c')
-rw-r--r--arch/x86/kernel/sys_x86_64.c98
1 files changed, 98 insertions, 0 deletions
diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
index 907942ee6e76..95485e63fd2f 100644
--- a/arch/x86/kernel/sys_x86_64.c
+++ b/arch/x86/kernel/sys_x86_64.c
@@ -12,6 +12,7 @@
12#include <linux/file.h> 12#include <linux/file.h>
13#include <linux/utsname.h> 13#include <linux/utsname.h>
14#include <linux/personality.h> 14#include <linux/personality.h>
15#include <linux/random.h>
15 16
16#include <asm/uaccess.h> 17#include <asm/uaccess.h>
17#include <asm/ia32.h> 18#include <asm/ia32.h>
@@ -65,6 +66,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
65 unsigned long *end) 66 unsigned long *end)
66{ 67{
67 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) { 68 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) {
69 unsigned long new_begin;
68 /* This is usually used needed to map code in small 70 /* This is usually used needed to map code in small
69 model, so it needs to be in the first 31bit. Limit 71 model, so it needs to be in the first 31bit. Limit
70 it to that. This means we need to move the 72 it to that. This means we need to move the
@@ -74,6 +76,11 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
74 of playground for now. -AK */ 76 of playground for now. -AK */
75 *begin = 0x40000000; 77 *begin = 0x40000000;
76 *end = 0x80000000; 78 *end = 0x80000000;
79 if (current->flags & PF_RANDOMIZE) {
80 new_begin = randomize_range(*begin, *begin + 0x02000000, 0);
81 if (new_begin)
82 *begin = new_begin;
83 }
77 } else { 84 } else {
78 *begin = TASK_UNMAPPED_BASE; 85 *begin = TASK_UNMAPPED_BASE;
79 *end = TASK_SIZE; 86 *end = TASK_SIZE;
@@ -143,6 +150,97 @@ full_search:
143 } 150 }
144} 151}
145 152
153
154unsigned long
155arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
156 const unsigned long len, const unsigned long pgoff,
157 const unsigned long flags)
158{
159 struct vm_area_struct *vma;
160 struct mm_struct *mm = current->mm;
161 unsigned long addr = addr0;
162
163 /* requested length too big for entire address space */
164 if (len > TASK_SIZE)
165 return -ENOMEM;
166
167 if (flags & MAP_FIXED)
168 return addr;
169
170 /* for MAP_32BIT mappings we force the legact mmap base */
171 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT))
172 goto bottomup;
173
174 /* requesting a specific address */
175 if (addr) {
176 addr = PAGE_ALIGN(addr);
177 vma = find_vma(mm, addr);
178 if (TASK_SIZE - len >= addr &&
179 (!vma || addr + len <= vma->vm_start))
180 return addr;
181 }
182
183 /* check if free_area_cache is useful for us */
184 if (len <= mm->cached_hole_size) {
185 mm->cached_hole_size = 0;
186 mm->free_area_cache = mm->mmap_base;
187 }
188
189 /* either no address requested or can't fit in requested address hole */
190 addr = mm->free_area_cache;
191
192 /* make sure it can fit in the remaining address space */
193 if (addr > len) {
194 vma = find_vma(mm, addr-len);
195 if (!vma || addr <= vma->vm_start)
196 /* remember the address as a hint for next time */
197 return (mm->free_area_cache = addr-len);
198 }
199
200 if (mm->mmap_base < len)
201 goto bottomup;
202
203 addr = mm->mmap_base-len;
204
205 do {
206 /*
207 * Lookup failure means no vma is above this address,
208 * else if new region fits below vma->vm_start,
209 * return with success:
210 */
211 vma = find_vma(mm, addr);
212 if (!vma || addr+len <= vma->vm_start)
213 /* remember the address as a hint for next time */
214 return (mm->free_area_cache = addr);
215
216 /* remember the largest hole we saw so far */
217 if (addr + mm->cached_hole_size < vma->vm_start)
218 mm->cached_hole_size = vma->vm_start - addr;
219
220 /* try just below the current vma->vm_start */
221 addr = vma->vm_start-len;
222 } while (len < vma->vm_start);
223
224bottomup:
225 /*
226 * A failed mmap() very likely causes application failure,
227 * so fall back to the bottom-up function here. This scenario
228 * can happen with large stack limits and large mmap()
229 * allocations.
230 */
231 mm->cached_hole_size = ~0UL;
232 mm->free_area_cache = TASK_UNMAPPED_BASE;
233 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
234 /*
235 * Restore the topdown base:
236 */
237 mm->free_area_cache = mm->mmap_base;
238 mm->cached_hole_size = ~0UL;
239
240 return addr;
241}
242
243
146asmlinkage long sys_uname(struct new_utsname __user * name) 244asmlinkage long sys_uname(struct new_utsname __user * name)
147{ 245{
148 int err; 246 int err;