aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/ia64/ia32/binfmt_elf32.c3
-rw-r--r--arch/x86/kernel/sys_x86_64.c14
-rw-r--r--arch/x86/mm/mmap_64.c5
-rw-r--r--fs/binfmt_elf.c7
4 files changed, 15 insertions, 14 deletions
diff --git a/arch/ia64/ia32/binfmt_elf32.c b/arch/ia64/ia32/binfmt_elf32.c
index 2a662215359c..4f0c30c38e99 100644
--- a/arch/ia64/ia32/binfmt_elf32.c
+++ b/arch/ia64/ia32/binfmt_elf32.c
@@ -222,7 +222,8 @@ elf32_set_personality (void)
222} 222}
223 223
224static unsigned long 224static unsigned long
225elf32_map (struct file *filep, unsigned long addr, struct elf_phdr *eppnt, int prot, int type, unsigned long unused) 225elf32_map(struct file *filep, unsigned long addr, struct elf_phdr *eppnt,
226 int prot, int type, unsigned long unused)
226{ 227{
227 unsigned long pgoff = (eppnt->p_vaddr) & ~IA32_PAGE_MASK; 228 unsigned long pgoff = (eppnt->p_vaddr) & ~IA32_PAGE_MASK;
228 229
diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
index 95485e63fd2f..bd802a5e1aa3 100644
--- a/arch/x86/kernel/sys_x86_64.c
+++ b/arch/x86/kernel/sys_x86_64.c
@@ -182,9 +182,9 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
182 182
183 /* check if free_area_cache is useful for us */ 183 /* check if free_area_cache is useful for us */
184 if (len <= mm->cached_hole_size) { 184 if (len <= mm->cached_hole_size) {
185 mm->cached_hole_size = 0; 185 mm->cached_hole_size = 0;
186 mm->free_area_cache = mm->mmap_base; 186 mm->free_area_cache = mm->mmap_base;
187 } 187 }
188 188
189 /* either no address requested or can't fit in requested address hole */ 189 /* either no address requested or can't fit in requested address hole */
190 addr = mm->free_area_cache; 190 addr = mm->free_area_cache;
@@ -213,9 +213,9 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
213 /* remember the address as a hint for next time */ 213 /* remember the address as a hint for next time */
214 return (mm->free_area_cache = addr); 214 return (mm->free_area_cache = addr);
215 215
216 /* remember the largest hole we saw so far */ 216 /* remember the largest hole we saw so far */
217 if (addr + mm->cached_hole_size < vma->vm_start) 217 if (addr + mm->cached_hole_size < vma->vm_start)
218 mm->cached_hole_size = vma->vm_start - addr; 218 mm->cached_hole_size = vma->vm_start - addr;
219 219
220 /* try just below the current vma->vm_start */ 220 /* try just below the current vma->vm_start */
221 addr = vma->vm_start-len; 221 addr = vma->vm_start-len;
@@ -229,7 +229,7 @@ bottomup:
229 * allocations. 229 * allocations.
230 */ 230 */
231 mm->cached_hole_size = ~0UL; 231 mm->cached_hole_size = ~0UL;
232 mm->free_area_cache = TASK_UNMAPPED_BASE; 232 mm->free_area_cache = TASK_UNMAPPED_BASE;
233 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags); 233 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
234 /* 234 /*
235 * Restore the topdown base: 235 * Restore the topdown base:
diff --git a/arch/x86/mm/mmap_64.c b/arch/x86/mm/mmap_64.c
index 8cf03ea651f8..65b34f226f14 100644
--- a/arch/x86/mm/mmap_64.c
+++ b/arch/x86/mm/mmap_64.c
@@ -100,7 +100,7 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
100 /* ia32_pick_mmap_layout has its own. */ 100 /* ia32_pick_mmap_layout has its own. */
101 return ia32_pick_mmap_layout(mm); 101 return ia32_pick_mmap_layout(mm);
102#endif 102#endif
103 } else if(mmap_is_legacy()) { 103 } else if (mmap_is_legacy()) {
104 mm->mmap_base = TASK_UNMAPPED_BASE; 104 mm->mmap_base = TASK_UNMAPPED_BASE;
105 mm->get_unmapped_area = arch_get_unmapped_area; 105 mm->get_unmapped_area = arch_get_unmapped_area;
106 mm->unmap_area = arch_unmap_area; 106 mm->unmap_area = arch_unmap_area;
@@ -111,7 +111,6 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
111 if (current->flags & PF_RANDOMIZE) 111 if (current->flags & PF_RANDOMIZE)
112 rnd = -rnd; 112 rnd = -rnd;
113 } 113 }
114 if (current->flags & PF_RANDOMIZE) { 114 if (current->flags & PF_RANDOMIZE)
115 mm->mmap_base += ((long)rnd) << PAGE_SHIFT; 115 mm->mmap_base += ((long)rnd) << PAGE_SHIFT;
116 }
117} 116}
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index 8193d24be159..b8bca1ebc1a0 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -45,7 +45,8 @@
45 45
46static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs); 46static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs);
47static int load_elf_library(struct file *); 47static int load_elf_library(struct file *);
48static unsigned long elf_map (struct file *, unsigned long, struct elf_phdr *, int, int, unsigned long); 48static unsigned long elf_map(struct file *, unsigned long, struct elf_phdr *,
49 int, int, unsigned long);
49 50
50/* 51/*
51 * If we don't support core dumping, then supply a NULL so we 52 * If we don't support core dumping, then supply a NULL so we
@@ -435,7 +436,7 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
435 load_addr = -vaddr; 436 load_addr = -vaddr;
436 437
437 map_addr = elf_map(interpreter, load_addr + vaddr, 438 map_addr = elf_map(interpreter, load_addr + vaddr,
438 eppnt, elf_prot, elf_type, total_size); 439 eppnt, elf_prot, elf_type, total_size);
439 total_size = 0; 440 total_size = 0;
440 if (!*interp_map_addr) 441 if (!*interp_map_addr)
441 *interp_map_addr = map_addr; 442 *interp_map_addr = map_addr;
@@ -936,7 +937,7 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
936 } 937 }
937 938
938 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt, 939 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
939 elf_prot, elf_flags,0); 940 elf_prot, elf_flags, 0);
940 if (BAD_ADDR(error)) { 941 if (BAD_ADDR(error)) {
941 send_sig(SIGKILL, current, 0); 942 send_sig(SIGKILL, current, 0);
942 retval = IS_ERR((void *)error) ? 943 retval = IS_ERR((void *)error) ?