aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorMichael Davidson <md@google.com>2015-04-14 18:47:38 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2015-04-14 19:49:04 -0400
commita87938b2e246b81b4fb713edb371a9fa3c5c3c86 (patch)
tree860ecd1a0da8d345122aba2787523b01c23262ce /fs
parentb1b0deabbffa922fed808d4a5d99d03372a4c701 (diff)
fs/binfmt_elf.c: fix bug in loading of PIE binaries
With CONFIG_ARCH_BINFMT_ELF_RANDOMIZE_PIE enabled, and a normal top-down address allocation strategy, load_elf_binary() will attempt to map a PIE binary into an address range immediately below mm->mmap_base. Unfortunately, load_elf_ binary() does not take account of the need to allocate sufficient space for the entire binary which means that, while the first PT_LOAD segment is mapped below mm->mmap_base, the subsequent PT_LOAD segment(s) end up being mapped above mm->mmap_base into the are that is supposed to be the "gap" between the stack and the binary. Since the size of the "gap" on x86_64 is only guaranteed to be 128MB this means that binaries with large data segments > 128MB can end up mapping part of their data segment over their stack resulting in corruption of the stack (and the data segment once the binary starts to run). Any PIE binary with a data segment > 128MB is vulnerable to this although address randomization means that the actual gap between the stack and the end of the binary is normally greater than 128MB. The larger the data segment of the binary the higher the probability of failure. Fix this by calculating the total size of the binary in the same way as load_elf_interp(). Signed-off-by: Michael Davidson <md@google.com> Cc: Alexander Viro <viro@zeniv.linux.org.uk> Cc: Jiri Kosina <jkosina@suse.cz> Cc: Kees Cook <keescook@chromium.org> Cc: <stable@vger.kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'fs')
-rw-r--r--fs/binfmt_elf.c9
1 files changed, 8 insertions, 1 deletions
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index 995986b8e36b..d925f55e4857 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -862,6 +862,7 @@ static int load_elf_binary(struct linux_binprm *bprm)
862 i < loc->elf_ex.e_phnum; i++, elf_ppnt++) { 862 i < loc->elf_ex.e_phnum; i++, elf_ppnt++) {
863 int elf_prot = 0, elf_flags; 863 int elf_prot = 0, elf_flags;
864 unsigned long k, vaddr; 864 unsigned long k, vaddr;
865 unsigned long total_size = 0;
865 866
866 if (elf_ppnt->p_type != PT_LOAD) 867 if (elf_ppnt->p_type != PT_LOAD)
867 continue; 868 continue;
@@ -924,10 +925,16 @@ static int load_elf_binary(struct linux_binprm *bprm)
924#else 925#else
925 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr); 926 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
926#endif 927#endif
928 total_size = total_mapping_size(elf_phdata,
929 loc->elf_ex.e_phnum);
930 if (!total_size) {
931 error = -EINVAL;
932 goto out_free_dentry;
933 }
927 } 934 }
928 935
929 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt, 936 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
930 elf_prot, elf_flags, 0); 937 elf_prot, elf_flags, total_size);
931 if (BAD_ADDR(error)) { 938 if (BAD_ADDR(error)) {
932 retval = IS_ERR((void *)error) ? 939 retval = IS_ERR((void *)error) ?
933 PTR_ERR((void*)error) : -EINVAL; 940 PTR_ERR((void*)error) : -EINVAL;