diff options
author | Jesper Juhl <jesper.juhl@gmail.com> | 2006-06-23 05:05:35 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-06-23 10:43:05 -0400 |
commit | f4e5cc2c44bf760c02875cf48c886c50ec7d2734 (patch) | |
tree | 768df1296b1294c2c84b82f3b0239e5bae3fd5fc /fs/binfmt_elf.c | |
parent | 11420211b8123d0e2f71945ad022e8eec28ebfce (diff) |
[PATCH] binfmt_elf: CodingStyle cleanup and remove some pointless casts
Do a CodingStyle cleanup of fs/binfmt_elf.c and also remove some pointless
casts of kmalloc() return values in the same file.
Signed-off-by: Jesper Juhl <jesper.juhl@gmail.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'fs/binfmt_elf.c')
-rw-r--r-- | fs/binfmt_elf.c | 344 |
1 files changed, 183 insertions, 161 deletions
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c index 8a04216e8b4d..451c04fecb43 100644 --- a/fs/binfmt_elf.c +++ b/fs/binfmt_elf.c | |||
@@ -38,15 +38,13 @@ | |||
38 | #include <linux/security.h> | 38 | #include <linux/security.h> |
39 | #include <linux/syscalls.h> | 39 | #include <linux/syscalls.h> |
40 | #include <linux/random.h> | 40 | #include <linux/random.h> |
41 | 41 | #include <linux/elf.h> | |
42 | #include <asm/uaccess.h> | 42 | #include <asm/uaccess.h> |
43 | #include <asm/param.h> | 43 | #include <asm/param.h> |
44 | #include <asm/page.h> | 44 | #include <asm/page.h> |
45 | 45 | ||
46 | #include <linux/elf.h> | 46 | static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs); |
47 | 47 | static int load_elf_library(struct file *); | |
48 | static int load_elf_binary(struct linux_binprm * bprm, struct pt_regs * regs); | ||
49 | static int load_elf_library(struct file*); | ||
50 | static unsigned long elf_map (struct file *, unsigned long, struct elf_phdr *, int, int); | 48 | static unsigned long elf_map (struct file *, unsigned long, struct elf_phdr *, int, int); |
51 | extern int dump_fpu (struct pt_regs *, elf_fpregset_t *); | 49 | extern int dump_fpu (struct pt_regs *, elf_fpregset_t *); |
52 | 50 | ||
@@ -59,15 +57,15 @@ extern int dump_fpu (struct pt_regs *, elf_fpregset_t *); | |||
59 | * don't even try. | 57 | * don't even try. |
60 | */ | 58 | */ |
61 | #if defined(USE_ELF_CORE_DUMP) && defined(CONFIG_ELF_CORE) | 59 | #if defined(USE_ELF_CORE_DUMP) && defined(CONFIG_ELF_CORE) |
62 | static int elf_core_dump(long signr, struct pt_regs * regs, struct file * file); | 60 | static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file); |
63 | #else | 61 | #else |
64 | #define elf_core_dump NULL | 62 | #define elf_core_dump NULL |
65 | #endif | 63 | #endif |
66 | 64 | ||
67 | #if ELF_EXEC_PAGESIZE > PAGE_SIZE | 65 | #if ELF_EXEC_PAGESIZE > PAGE_SIZE |
68 | # define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE | 66 | #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE |
69 | #else | 67 | #else |
70 | # define ELF_MIN_ALIGN PAGE_SIZE | 68 | #define ELF_MIN_ALIGN PAGE_SIZE |
71 | #endif | 69 | #endif |
72 | 70 | ||
73 | #ifndef ELF_CORE_EFLAGS | 71 | #ifndef ELF_CORE_EFLAGS |
@@ -86,7 +84,7 @@ static struct linux_binfmt elf_format = { | |||
86 | .min_coredump = ELF_EXEC_PAGESIZE | 84 | .min_coredump = ELF_EXEC_PAGESIZE |
87 | }; | 85 | }; |
88 | 86 | ||
89 | #define BAD_ADDR(x) ((unsigned long)(x) > TASK_SIZE) | 87 | #define BAD_ADDR(x) ((unsigned long)(x) > TASK_SIZE) |
90 | 88 | ||
91 | static int set_brk(unsigned long start, unsigned long end) | 89 | static int set_brk(unsigned long start, unsigned long end) |
92 | { | 90 | { |
@@ -104,13 +102,11 @@ static int set_brk(unsigned long start, unsigned long end) | |||
104 | return 0; | 102 | return 0; |
105 | } | 103 | } |
106 | 104 | ||
107 | |||
108 | /* We need to explicitly zero any fractional pages | 105 | /* We need to explicitly zero any fractional pages |
109 | after the data section (i.e. bss). This would | 106 | after the data section (i.e. bss). This would |
110 | contain the junk from the file that should not | 107 | contain the junk from the file that should not |
111 | be in memory */ | 108 | be in memory |
112 | 109 | */ | |
113 | |||
114 | static int padzero(unsigned long elf_bss) | 110 | static int padzero(unsigned long elf_bss) |
115 | { | 111 | { |
116 | unsigned long nbyte; | 112 | unsigned long nbyte; |
@@ -129,7 +125,9 @@ static int padzero(unsigned long elf_bss) | |||
129 | #define STACK_ADD(sp, items) ((elf_addr_t __user *)(sp) + (items)) | 125 | #define STACK_ADD(sp, items) ((elf_addr_t __user *)(sp) + (items)) |
130 | #define STACK_ROUND(sp, items) \ | 126 | #define STACK_ROUND(sp, items) \ |
131 | ((15 + (unsigned long) ((sp) + (items))) &~ 15UL) | 127 | ((15 + (unsigned long) ((sp) + (items))) &~ 15UL) |
132 | #define STACK_ALLOC(sp, len) ({ elf_addr_t __user *old_sp = (elf_addr_t __user *)sp; sp += len; old_sp; }) | 128 | #define STACK_ALLOC(sp, len) ({ \ |
129 | elf_addr_t __user *old_sp = (elf_addr_t __user *)sp; sp += len; \ | ||
130 | old_sp; }) | ||
133 | #else | 131 | #else |
134 | #define STACK_ADD(sp, items) ((elf_addr_t __user *)(sp) - (items)) | 132 | #define STACK_ADD(sp, items) ((elf_addr_t __user *)(sp) - (items)) |
135 | #define STACK_ROUND(sp, items) \ | 133 | #define STACK_ROUND(sp, items) \ |
@@ -138,7 +136,7 @@ static int padzero(unsigned long elf_bss) | |||
138 | #endif | 136 | #endif |
139 | 137 | ||
140 | static int | 138 | static int |
141 | create_elf_tables(struct linux_binprm *bprm, struct elfhdr * exec, | 139 | create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec, |
142 | int interp_aout, unsigned long load_addr, | 140 | int interp_aout, unsigned long load_addr, |
143 | unsigned long interp_load_addr) | 141 | unsigned long interp_load_addr) |
144 | { | 142 | { |
@@ -161,7 +159,6 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr * exec, | |||
161 | * for userspace to get any other way, in others (i386) it is | 159 | * for userspace to get any other way, in others (i386) it is |
162 | * merely difficult. | 160 | * merely difficult. |
163 | */ | 161 | */ |
164 | |||
165 | u_platform = NULL; | 162 | u_platform = NULL; |
166 | if (k_platform) { | 163 | if (k_platform) { |
167 | size_t len = strlen(k_platform) + 1; | 164 | size_t len = strlen(k_platform) + 1; |
@@ -171,7 +168,7 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr * exec, | |||
171 | * evictions by the processes running on the same package. One | 168 | * evictions by the processes running on the same package. One |
172 | * thing we can do is to shuffle the initial stack for them. | 169 | * thing we can do is to shuffle the initial stack for them. |
173 | */ | 170 | */ |
174 | 171 | ||
175 | p = arch_align_stack(p); | 172 | p = arch_align_stack(p); |
176 | 173 | ||
177 | u_platform = (elf_addr_t __user *)STACK_ALLOC(p, len); | 174 | u_platform = (elf_addr_t __user *)STACK_ALLOC(p, len); |
@@ -182,7 +179,9 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr * exec, | |||
182 | /* Create the ELF interpreter info */ | 179 | /* Create the ELF interpreter info */ |
183 | elf_info = (elf_addr_t *) current->mm->saved_auxv; | 180 | elf_info = (elf_addr_t *) current->mm->saved_auxv; |
184 | #define NEW_AUX_ENT(id, val) \ | 181 | #define NEW_AUX_ENT(id, val) \ |
185 | do { elf_info[ei_index++] = id; elf_info[ei_index++] = val; } while (0) | 182 | do { \ |
183 | elf_info[ei_index++] = id; elf_info[ei_index++] = val; \ | ||
184 | } while (0) | ||
186 | 185 | ||
187 | #ifdef ARCH_DLINFO | 186 | #ifdef ARCH_DLINFO |
188 | /* | 187 | /* |
@@ -195,21 +194,22 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr * exec, | |||
195 | NEW_AUX_ENT(AT_PAGESZ, ELF_EXEC_PAGESIZE); | 194 | NEW_AUX_ENT(AT_PAGESZ, ELF_EXEC_PAGESIZE); |
196 | NEW_AUX_ENT(AT_CLKTCK, CLOCKS_PER_SEC); | 195 | NEW_AUX_ENT(AT_CLKTCK, CLOCKS_PER_SEC); |
197 | NEW_AUX_ENT(AT_PHDR, load_addr + exec->e_phoff); | 196 | NEW_AUX_ENT(AT_PHDR, load_addr + exec->e_phoff); |
198 | NEW_AUX_ENT(AT_PHENT, sizeof (struct elf_phdr)); | 197 | NEW_AUX_ENT(AT_PHENT, sizeof(struct elf_phdr)); |
199 | NEW_AUX_ENT(AT_PHNUM, exec->e_phnum); | 198 | NEW_AUX_ENT(AT_PHNUM, exec->e_phnum); |
200 | NEW_AUX_ENT(AT_BASE, interp_load_addr); | 199 | NEW_AUX_ENT(AT_BASE, interp_load_addr); |
201 | NEW_AUX_ENT(AT_FLAGS, 0); | 200 | NEW_AUX_ENT(AT_FLAGS, 0); |
202 | NEW_AUX_ENT(AT_ENTRY, exec->e_entry); | 201 | NEW_AUX_ENT(AT_ENTRY, exec->e_entry); |
203 | NEW_AUX_ENT(AT_UID, (elf_addr_t) tsk->uid); | 202 | NEW_AUX_ENT(AT_UID, (elf_addr_t)tsk->uid); |
204 | NEW_AUX_ENT(AT_EUID, (elf_addr_t) tsk->euid); | 203 | NEW_AUX_ENT(AT_EUID, (elf_addr_t)tsk->euid); |
205 | NEW_AUX_ENT(AT_GID, (elf_addr_t) tsk->gid); | 204 | NEW_AUX_ENT(AT_GID, (elf_addr_t)tsk->gid); |
206 | NEW_AUX_ENT(AT_EGID, (elf_addr_t) tsk->egid); | 205 | NEW_AUX_ENT(AT_EGID, (elf_addr_t)tsk->egid); |
207 | NEW_AUX_ENT(AT_SECURE, (elf_addr_t) security_bprm_secureexec(bprm)); | 206 | NEW_AUX_ENT(AT_SECURE, (elf_addr_t)security_bprm_secureexec(bprm)); |
208 | if (k_platform) { | 207 | if (k_platform) { |
209 | NEW_AUX_ENT(AT_PLATFORM, (elf_addr_t)(unsigned long)u_platform); | 208 | NEW_AUX_ENT(AT_PLATFORM, |
209 | (elf_addr_t)(unsigned long)u_platform); | ||
210 | } | 210 | } |
211 | if (bprm->interp_flags & BINPRM_FLAGS_EXECFD) { | 211 | if (bprm->interp_flags & BINPRM_FLAGS_EXECFD) { |
212 | NEW_AUX_ENT(AT_EXECFD, (elf_addr_t) bprm->interp_data); | 212 | NEW_AUX_ENT(AT_EXECFD, (elf_addr_t)bprm->interp_data); |
213 | } | 213 | } |
214 | #undef NEW_AUX_ENT | 214 | #undef NEW_AUX_ENT |
215 | /* AT_NULL is zero; clear the rest too */ | 215 | /* AT_NULL is zero; clear the rest too */ |
@@ -232,7 +232,7 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr * exec, | |||
232 | /* Point sp at the lowest address on the stack */ | 232 | /* Point sp at the lowest address on the stack */ |
233 | #ifdef CONFIG_STACK_GROWSUP | 233 | #ifdef CONFIG_STACK_GROWSUP |
234 | sp = (elf_addr_t __user *)bprm->p - items - ei_index; | 234 | sp = (elf_addr_t __user *)bprm->p - items - ei_index; |
235 | bprm->exec = (unsigned long) sp; /* XXX: PARISC HACK */ | 235 | bprm->exec = (unsigned long)sp; /* XXX: PARISC HACK */ |
236 | #else | 236 | #else |
237 | sp = (elf_addr_t __user *)bprm->p; | 237 | sp = (elf_addr_t __user *)bprm->p; |
238 | #endif | 238 | #endif |
@@ -285,7 +285,7 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr * exec, | |||
285 | #ifndef elf_map | 285 | #ifndef elf_map |
286 | 286 | ||
287 | static unsigned long elf_map(struct file *filep, unsigned long addr, | 287 | static unsigned long elf_map(struct file *filep, unsigned long addr, |
288 | struct elf_phdr *eppnt, int prot, int type) | 288 | struct elf_phdr *eppnt, int prot, int type) |
289 | { | 289 | { |
290 | unsigned long map_addr; | 290 | unsigned long map_addr; |
291 | unsigned long pageoffset = ELF_PAGEOFFSET(eppnt->p_vaddr); | 291 | unsigned long pageoffset = ELF_PAGEOFFSET(eppnt->p_vaddr); |
@@ -310,9 +310,8 @@ static unsigned long elf_map(struct file *filep, unsigned long addr, | |||
310 | is only provided so that we can read a.out libraries that have | 310 | is only provided so that we can read a.out libraries that have |
311 | an ELF header */ | 311 | an ELF header */ |
312 | 312 | ||
313 | static unsigned long load_elf_interp(struct elfhdr * interp_elf_ex, | 313 | static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex, |
314 | struct file * interpreter, | 314 | struct file *interpreter, unsigned long *interp_load_addr) |
315 | unsigned long *interp_load_addr) | ||
316 | { | 315 | { |
317 | struct elf_phdr *elf_phdata; | 316 | struct elf_phdr *elf_phdata; |
318 | struct elf_phdr *eppnt; | 317 | struct elf_phdr *eppnt; |
@@ -342,15 +341,15 @@ static unsigned long load_elf_interp(struct elfhdr * interp_elf_ex, | |||
342 | goto out; | 341 | goto out; |
343 | 342 | ||
344 | /* Now read in all of the header information */ | 343 | /* Now read in all of the header information */ |
345 | |||
346 | size = sizeof(struct elf_phdr) * interp_elf_ex->e_phnum; | 344 | size = sizeof(struct elf_phdr) * interp_elf_ex->e_phnum; |
347 | if (size > ELF_MIN_ALIGN) | 345 | if (size > ELF_MIN_ALIGN) |
348 | goto out; | 346 | goto out; |
349 | elf_phdata = (struct elf_phdr *) kmalloc(size, GFP_KERNEL); | 347 | elf_phdata = kmalloc(size, GFP_KERNEL); |
350 | if (!elf_phdata) | 348 | if (!elf_phdata) |
351 | goto out; | 349 | goto out; |
352 | 350 | ||
353 | retval = kernel_read(interpreter,interp_elf_ex->e_phoff,(char *)elf_phdata,size); | 351 | retval = kernel_read(interpreter, interp_elf_ex->e_phoff, |
352 | (char *)elf_phdata,size); | ||
354 | error = -EIO; | 353 | error = -EIO; |
355 | if (retval != size) { | 354 | if (retval != size) { |
356 | if (retval < 0) | 355 | if (retval < 0) |
@@ -359,58 +358,65 @@ static unsigned long load_elf_interp(struct elfhdr * interp_elf_ex, | |||
359 | } | 358 | } |
360 | 359 | ||
361 | eppnt = elf_phdata; | 360 | eppnt = elf_phdata; |
362 | for (i=0; i<interp_elf_ex->e_phnum; i++, eppnt++) { | 361 | for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) { |
363 | if (eppnt->p_type == PT_LOAD) { | 362 | if (eppnt->p_type == PT_LOAD) { |
364 | int elf_type = MAP_PRIVATE | MAP_DENYWRITE; | 363 | int elf_type = MAP_PRIVATE | MAP_DENYWRITE; |
365 | int elf_prot = 0; | 364 | int elf_prot = 0; |
366 | unsigned long vaddr = 0; | 365 | unsigned long vaddr = 0; |
367 | unsigned long k, map_addr; | 366 | unsigned long k, map_addr; |
368 | 367 | ||
369 | if (eppnt->p_flags & PF_R) elf_prot = PROT_READ; | 368 | if (eppnt->p_flags & PF_R) |
370 | if (eppnt->p_flags & PF_W) elf_prot |= PROT_WRITE; | 369 | elf_prot = PROT_READ; |
371 | if (eppnt->p_flags & PF_X) elf_prot |= PROT_EXEC; | 370 | if (eppnt->p_flags & PF_W) |
372 | vaddr = eppnt->p_vaddr; | 371 | elf_prot |= PROT_WRITE; |
373 | if (interp_elf_ex->e_type == ET_EXEC || load_addr_set) | 372 | if (eppnt->p_flags & PF_X) |
374 | elf_type |= MAP_FIXED; | 373 | elf_prot |= PROT_EXEC; |
375 | 374 | vaddr = eppnt->p_vaddr; | |
376 | map_addr = elf_map(interpreter, load_addr + vaddr, eppnt, elf_prot, elf_type); | 375 | if (interp_elf_ex->e_type == ET_EXEC || load_addr_set) |
377 | error = map_addr; | 376 | elf_type |= MAP_FIXED; |
378 | if (BAD_ADDR(map_addr)) | 377 | |
379 | goto out_close; | 378 | map_addr = elf_map(interpreter, load_addr + vaddr, |
380 | 379 | eppnt, elf_prot, elf_type); | |
381 | if (!load_addr_set && interp_elf_ex->e_type == ET_DYN) { | 380 | error = map_addr; |
382 | load_addr = map_addr - ELF_PAGESTART(vaddr); | 381 | if (BAD_ADDR(map_addr)) |
383 | load_addr_set = 1; | 382 | goto out_close; |
384 | } | 383 | |
385 | 384 | if (!load_addr_set && | |
386 | /* | 385 | interp_elf_ex->e_type == ET_DYN) { |
387 | * Check to see if the section's size will overflow the | 386 | load_addr = map_addr - ELF_PAGESTART(vaddr); |
388 | * allowed task size. Note that p_filesz must always be | 387 | load_addr_set = 1; |
389 | * <= p_memsize so it is only necessary to check p_memsz. | 388 | } |
390 | */ | 389 | |
391 | k = load_addr + eppnt->p_vaddr; | 390 | /* |
392 | if (k > TASK_SIZE || eppnt->p_filesz > eppnt->p_memsz || | 391 | * Check to see if the section's size will overflow the |
393 | eppnt->p_memsz > TASK_SIZE || TASK_SIZE - eppnt->p_memsz < k) { | 392 | * allowed task size. Note that p_filesz must always be |
394 | error = -ENOMEM; | 393 | * <= p_memsize so it's only necessary to check p_memsz. |
395 | goto out_close; | 394 | */ |
396 | } | 395 | k = load_addr + eppnt->p_vaddr; |
397 | 396 | if (k > TASK_SIZE || | |
398 | /* | 397 | eppnt->p_filesz > eppnt->p_memsz || |
399 | * Find the end of the file mapping for this phdr, and keep | 398 | eppnt->p_memsz > TASK_SIZE || |
400 | * track of the largest address we see for this. | 399 | TASK_SIZE - eppnt->p_memsz < k) { |
401 | */ | 400 | error = -ENOMEM; |
402 | k = load_addr + eppnt->p_vaddr + eppnt->p_filesz; | 401 | goto out_close; |
403 | if (k > elf_bss) | 402 | } |
404 | elf_bss = k; | 403 | |
405 | 404 | /* | |
406 | /* | 405 | * Find the end of the file mapping for this phdr, and |
407 | * Do the same thing for the memory mapping - between | 406 | * keep track of the largest address we see for this. |
408 | * elf_bss and last_bss is the bss section. | 407 | */ |
409 | */ | 408 | k = load_addr + eppnt->p_vaddr + eppnt->p_filesz; |
410 | k = load_addr + eppnt->p_memsz + eppnt->p_vaddr; | 409 | if (k > elf_bss) |
411 | if (k > last_bss) | 410 | elf_bss = k; |
412 | last_bss = k; | 411 | |
413 | } | 412 | /* |
413 | * Do the same thing for the memory mapping - between | ||
414 | * elf_bss and last_bss is the bss section. | ||
415 | */ | ||
416 | k = load_addr + eppnt->p_memsz + eppnt->p_vaddr; | ||
417 | if (k > last_bss) | ||
418 | last_bss = k; | ||
419 | } | ||
414 | } | 420 | } |
415 | 421 | ||
416 | /* | 422 | /* |
@@ -424,7 +430,8 @@ static unsigned long load_elf_interp(struct elfhdr * interp_elf_ex, | |||
424 | goto out_close; | 430 | goto out_close; |
425 | } | 431 | } |
426 | 432 | ||
427 | elf_bss = ELF_PAGESTART(elf_bss + ELF_MIN_ALIGN - 1); /* What we have mapped so far */ | 433 | /* What we have mapped so far */ |
434 | elf_bss = ELF_PAGESTART(elf_bss + ELF_MIN_ALIGN - 1); | ||
428 | 435 | ||
429 | /* Map the last of the bss segment */ | 436 | /* Map the last of the bss segment */ |
430 | if (last_bss > elf_bss) { | 437 | if (last_bss > elf_bss) { |
@@ -436,7 +443,7 @@ static unsigned long load_elf_interp(struct elfhdr * interp_elf_ex, | |||
436 | } | 443 | } |
437 | 444 | ||
438 | *interp_load_addr = load_addr; | 445 | *interp_load_addr = load_addr; |
439 | error = ((unsigned long) interp_elf_ex->e_entry) + load_addr; | 446 | error = ((unsigned long)interp_elf_ex->e_entry) + load_addr; |
440 | 447 | ||
441 | out_close: | 448 | out_close: |
442 | kfree(elf_phdata); | 449 | kfree(elf_phdata); |
@@ -444,8 +451,8 @@ out: | |||
444 | return error; | 451 | return error; |
445 | } | 452 | } |
446 | 453 | ||
447 | static unsigned long load_aout_interp(struct exec * interp_ex, | 454 | static unsigned long load_aout_interp(struct exec *interp_ex, |
448 | struct file * interpreter) | 455 | struct file *interpreter) |
449 | { | 456 | { |
450 | unsigned long text_data, elf_entry = ~0UL; | 457 | unsigned long text_data, elf_entry = ~0UL; |
451 | char __user * addr; | 458 | char __user * addr; |
@@ -464,7 +471,7 @@ static unsigned long load_aout_interp(struct exec * interp_ex, | |||
464 | case ZMAGIC: | 471 | case ZMAGIC: |
465 | case QMAGIC: | 472 | case QMAGIC: |
466 | offset = N_TXTOFF(*interp_ex); | 473 | offset = N_TXTOFF(*interp_ex); |
467 | addr = (char __user *) N_TXTADDR(*interp_ex); | 474 | addr = (char __user *)N_TXTADDR(*interp_ex); |
468 | break; | 475 | break; |
469 | default: | 476 | default: |
470 | goto out; | 477 | goto out; |
@@ -480,7 +487,6 @@ static unsigned long load_aout_interp(struct exec * interp_ex, | |||
480 | flush_icache_range((unsigned long)addr, | 487 | flush_icache_range((unsigned long)addr, |
481 | (unsigned long)addr + text_data); | 488 | (unsigned long)addr + text_data); |
482 | 489 | ||
483 | |||
484 | down_write(¤t->mm->mmap_sem); | 490 | down_write(¤t->mm->mmap_sem); |
485 | do_brk(ELF_PAGESTART(text_data + ELF_MIN_ALIGN - 1), | 491 | do_brk(ELF_PAGESTART(text_data + ELF_MIN_ALIGN - 1), |
486 | interp_ex->a_bss); | 492 | interp_ex->a_bss); |
@@ -519,7 +525,7 @@ static unsigned long randomize_stack_top(unsigned long stack_top) | |||
519 | #endif | 525 | #endif |
520 | } | 526 | } |
521 | 527 | ||
522 | static int load_elf_binary(struct linux_binprm * bprm, struct pt_regs * regs) | 528 | static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs) |
523 | { | 529 | { |
524 | struct file *interpreter = NULL; /* to shut gcc up */ | 530 | struct file *interpreter = NULL; /* to shut gcc up */ |
525 | unsigned long load_addr = 0, load_bias = 0; | 531 | unsigned long load_addr = 0, load_bias = 0; |
@@ -528,7 +534,7 @@ static int load_elf_binary(struct linux_binprm * bprm, struct pt_regs * regs) | |||
528 | unsigned int interpreter_type = INTERPRETER_NONE; | 534 | unsigned int interpreter_type = INTERPRETER_NONE; |
529 | unsigned char ibcs2_interpreter = 0; | 535 | unsigned char ibcs2_interpreter = 0; |
530 | unsigned long error; | 536 | unsigned long error; |
531 | struct elf_phdr * elf_ppnt, *elf_phdata; | 537 | struct elf_phdr *elf_ppnt, *elf_phdata; |
532 | unsigned long elf_bss, elf_brk; | 538 | unsigned long elf_bss, elf_brk; |
533 | int elf_exec_fileno; | 539 | int elf_exec_fileno; |
534 | int retval, i; | 540 | int retval, i; |
@@ -553,7 +559,7 @@ static int load_elf_binary(struct linux_binprm * bprm, struct pt_regs * regs) | |||
553 | } | 559 | } |
554 | 560 | ||
555 | /* Get the exec-header */ | 561 | /* Get the exec-header */ |
556 | loc->elf_ex = *((struct elfhdr *) bprm->buf); | 562 | loc->elf_ex = *((struct elfhdr *)bprm->buf); |
557 | 563 | ||
558 | retval = -ENOEXEC; | 564 | retval = -ENOEXEC; |
559 | /* First of all, some simple consistency checks */ | 565 | /* First of all, some simple consistency checks */ |
@@ -568,7 +574,6 @@ static int load_elf_binary(struct linux_binprm * bprm, struct pt_regs * regs) | |||
568 | goto out; | 574 | goto out; |
569 | 575 | ||
570 | /* Now read in all of the header information */ | 576 | /* Now read in all of the header information */ |
571 | |||
572 | if (loc->elf_ex.e_phentsize != sizeof(struct elf_phdr)) | 577 | if (loc->elf_ex.e_phentsize != sizeof(struct elf_phdr)) |
573 | goto out; | 578 | goto out; |
574 | if (loc->elf_ex.e_phnum < 1 || | 579 | if (loc->elf_ex.e_phnum < 1 || |
@@ -576,18 +581,19 @@ static int load_elf_binary(struct linux_binprm * bprm, struct pt_regs * regs) | |||
576 | goto out; | 581 | goto out; |
577 | size = loc->elf_ex.e_phnum * sizeof(struct elf_phdr); | 582 | size = loc->elf_ex.e_phnum * sizeof(struct elf_phdr); |
578 | retval = -ENOMEM; | 583 | retval = -ENOMEM; |
579 | elf_phdata = (struct elf_phdr *) kmalloc(size, GFP_KERNEL); | 584 | elf_phdata = kmalloc(size, GFP_KERNEL); |
580 | if (!elf_phdata) | 585 | if (!elf_phdata) |
581 | goto out; | 586 | goto out; |
582 | 587 | ||
583 | retval = kernel_read(bprm->file, loc->elf_ex.e_phoff, (char *) elf_phdata, size); | 588 | retval = kernel_read(bprm->file, loc->elf_ex.e_phoff, |
589 | (char *)elf_phdata, size); | ||
584 | if (retval != size) { | 590 | if (retval != size) { |
585 | if (retval >= 0) | 591 | if (retval >= 0) |
586 | retval = -EIO; | 592 | retval = -EIO; |
587 | goto out_free_ph; | 593 | goto out_free_ph; |
588 | } | 594 | } |
589 | 595 | ||
590 | files = current->files; /* Refcounted so ok */ | 596 | files = current->files; /* Refcounted so ok */ |
591 | retval = unshare_files(); | 597 | retval = unshare_files(); |
592 | if (retval < 0) | 598 | if (retval < 0) |
593 | goto out_free_ph; | 599 | goto out_free_ph; |
@@ -598,7 +604,6 @@ static int load_elf_binary(struct linux_binprm * bprm, struct pt_regs * regs) | |||
598 | 604 | ||
599 | /* exec will make our files private anyway, but for the a.out | 605 | /* exec will make our files private anyway, but for the a.out |
600 | loader stuff we need to do it earlier */ | 606 | loader stuff we need to do it earlier */ |
601 | |||
602 | retval = get_unused_fd(); | 607 | retval = get_unused_fd(); |
603 | if (retval < 0) | 608 | if (retval < 0) |
604 | goto out_free_fh; | 609 | goto out_free_fh; |
@@ -620,7 +625,6 @@ static int load_elf_binary(struct linux_binprm * bprm, struct pt_regs * regs) | |||
620 | * shared libraries - for now assume that this | 625 | * shared libraries - for now assume that this |
621 | * is an a.out format binary | 626 | * is an a.out format binary |
622 | */ | 627 | */ |
623 | |||
624 | retval = -ENOEXEC; | 628 | retval = -ENOEXEC; |
625 | if (elf_ppnt->p_filesz > PATH_MAX || | 629 | if (elf_ppnt->p_filesz > PATH_MAX || |
626 | elf_ppnt->p_filesz < 2) | 630 | elf_ppnt->p_filesz < 2) |
@@ -628,13 +632,13 @@ static int load_elf_binary(struct linux_binprm * bprm, struct pt_regs * regs) | |||
628 | 632 | ||
629 | retval = -ENOMEM; | 633 | retval = -ENOMEM; |
630 | elf_interpreter = kmalloc(elf_ppnt->p_filesz, | 634 | elf_interpreter = kmalloc(elf_ppnt->p_filesz, |
631 | GFP_KERNEL); | 635 | GFP_KERNEL); |
632 | if (!elf_interpreter) | 636 | if (!elf_interpreter) |
633 | goto out_free_file; | 637 | goto out_free_file; |
634 | 638 | ||
635 | retval = kernel_read(bprm->file, elf_ppnt->p_offset, | 639 | retval = kernel_read(bprm->file, elf_ppnt->p_offset, |
636 | elf_interpreter, | 640 | elf_interpreter, |
637 | elf_ppnt->p_filesz); | 641 | elf_ppnt->p_filesz); |
638 | if (retval != elf_ppnt->p_filesz) { | 642 | if (retval != elf_ppnt->p_filesz) { |
639 | if (retval >= 0) | 643 | if (retval >= 0) |
640 | retval = -EIO; | 644 | retval = -EIO; |
@@ -678,7 +682,8 @@ static int load_elf_binary(struct linux_binprm * bprm, struct pt_regs * regs) | |||
678 | retval = PTR_ERR(interpreter); | 682 | retval = PTR_ERR(interpreter); |
679 | if (IS_ERR(interpreter)) | 683 | if (IS_ERR(interpreter)) |
680 | goto out_free_interp; | 684 | goto out_free_interp; |
681 | retval = kernel_read(interpreter, 0, bprm->buf, BINPRM_BUF_SIZE); | 685 | retval = kernel_read(interpreter, 0, bprm->buf, |
686 | BINPRM_BUF_SIZE); | ||
682 | if (retval != BINPRM_BUF_SIZE) { | 687 | if (retval != BINPRM_BUF_SIZE) { |
683 | if (retval >= 0) | 688 | if (retval >= 0) |
684 | retval = -EIO; | 689 | retval = -EIO; |
@@ -686,8 +691,8 @@ static int load_elf_binary(struct linux_binprm * bprm, struct pt_regs * regs) | |||
686 | } | 691 | } |
687 | 692 | ||
688 | /* Get the exec headers */ | 693 | /* Get the exec headers */ |
689 | loc->interp_ex = *((struct exec *) bprm->buf); | 694 | loc->interp_ex = *((struct exec *)bprm->buf); |
690 | loc->interp_elf_ex = *((struct elfhdr *) bprm->buf); | 695 | loc->interp_elf_ex = *((struct elfhdr *)bprm->buf); |
691 | break; | 696 | break; |
692 | } | 697 | } |
693 | elf_ppnt++; | 698 | elf_ppnt++; |
@@ -739,7 +744,6 @@ static int load_elf_binary(struct linux_binprm * bprm, struct pt_regs * regs) | |||
739 | 744 | ||
740 | /* OK, we are done with that, now set up the arg stuff, | 745 | /* OK, we are done with that, now set up the arg stuff, |
741 | and then start this sucker up */ | 746 | and then start this sucker up */ |
742 | |||
743 | if ((!bprm->sh_bang) && (interpreter_type == INTERPRETER_AOUT)) { | 747 | if ((!bprm->sh_bang) && (interpreter_type == INTERPRETER_AOUT)) { |
744 | char *passed_p = passed_fileno; | 748 | char *passed_p = passed_fileno; |
745 | sprintf(passed_fileno, "%d", elf_exec_fileno); | 749 | sprintf(passed_fileno, "%d", elf_exec_fileno); |
@@ -777,7 +781,7 @@ static int load_elf_binary(struct linux_binprm * bprm, struct pt_regs * regs) | |||
777 | if (elf_read_implies_exec(loc->elf_ex, executable_stack)) | 781 | if (elf_read_implies_exec(loc->elf_ex, executable_stack)) |
778 | current->personality |= READ_IMPLIES_EXEC; | 782 | current->personality |= READ_IMPLIES_EXEC; |
779 | 783 | ||
780 | if ( !(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) | 784 | if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) |
781 | current->flags |= PF_RANDOMIZE; | 785 | current->flags |= PF_RANDOMIZE; |
782 | arch_pick_mmap_layout(current->mm); | 786 | arch_pick_mmap_layout(current->mm); |
783 | 787 | ||
@@ -798,8 +802,8 @@ static int load_elf_binary(struct linux_binprm * bprm, struct pt_regs * regs) | |||
798 | the correct location in memory. At this point, we assume that | 802 | the correct location in memory. At this point, we assume that |
799 | the image should be loaded at fixed address, not at a variable | 803 | the image should be loaded at fixed address, not at a variable |
800 | address. */ | 804 | address. */ |
801 | 805 | for(i = 0, elf_ppnt = elf_phdata; | |
802 | for(i = 0, elf_ppnt = elf_phdata; i < loc->elf_ex.e_phnum; i++, elf_ppnt++) { | 806 | i < loc->elf_ex.e_phnum; i++, elf_ppnt++) { |
803 | int elf_prot = 0, elf_flags; | 807 | int elf_prot = 0, elf_flags; |
804 | unsigned long k, vaddr; | 808 | unsigned long k, vaddr; |
805 | 809 | ||
@@ -827,30 +831,35 @@ static int load_elf_binary(struct linux_binprm * bprm, struct pt_regs * regs) | |||
827 | load_bias, nbyte)) { | 831 | load_bias, nbyte)) { |
828 | /* | 832 | /* |
829 | * This bss-zeroing can fail if the ELF | 833 | * This bss-zeroing can fail if the ELF |
830 | * file specifies odd protections. So | 834 | * file specifies odd protections. So |
831 | * we don't check the return value | 835 | * we don't check the return value |
832 | */ | 836 | */ |
833 | } | 837 | } |
834 | } | 838 | } |
835 | } | 839 | } |
836 | 840 | ||
837 | if (elf_ppnt->p_flags & PF_R) elf_prot |= PROT_READ; | 841 | if (elf_ppnt->p_flags & PF_R) |
838 | if (elf_ppnt->p_flags & PF_W) elf_prot |= PROT_WRITE; | 842 | elf_prot |= PROT_READ; |
839 | if (elf_ppnt->p_flags & PF_X) elf_prot |= PROT_EXEC; | 843 | if (elf_ppnt->p_flags & PF_W) |
844 | elf_prot |= PROT_WRITE; | ||
845 | if (elf_ppnt->p_flags & PF_X) | ||
846 | elf_prot |= PROT_EXEC; | ||
840 | 847 | ||
841 | elf_flags = MAP_PRIVATE|MAP_DENYWRITE|MAP_EXECUTABLE; | 848 | elf_flags = MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE; |
842 | 849 | ||
843 | vaddr = elf_ppnt->p_vaddr; | 850 | vaddr = elf_ppnt->p_vaddr; |
844 | if (loc->elf_ex.e_type == ET_EXEC || load_addr_set) { | 851 | if (loc->elf_ex.e_type == ET_EXEC || load_addr_set) { |
845 | elf_flags |= MAP_FIXED; | 852 | elf_flags |= MAP_FIXED; |
846 | } else if (loc->elf_ex.e_type == ET_DYN) { | 853 | } else if (loc->elf_ex.e_type == ET_DYN) { |
847 | /* Try and get dynamic programs out of the way of the default mmap | 854 | /* Try and get dynamic programs out of the way of the |
848 | base, as well as whatever program they might try to exec. This | 855 | * default mmap base, as well as whatever program they |
849 | is because the brk will follow the loader, and is not movable. */ | 856 | * might try to exec. This is because the brk will |
857 | * follow the loader, and is not movable. */ | ||
850 | load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr); | 858 | load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr); |
851 | } | 859 | } |
852 | 860 | ||
853 | error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt, elf_prot, elf_flags); | 861 | error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt, |
862 | elf_prot, elf_flags); | ||
854 | if (BAD_ADDR(error)) { | 863 | if (BAD_ADDR(error)) { |
855 | send_sig(SIGKILL, current, 0); | 864 | send_sig(SIGKILL, current, 0); |
856 | goto out_free_dentry; | 865 | goto out_free_dentry; |
@@ -867,8 +876,10 @@ static int load_elf_binary(struct linux_binprm * bprm, struct pt_regs * regs) | |||
867 | } | 876 | } |
868 | } | 877 | } |
869 | k = elf_ppnt->p_vaddr; | 878 | k = elf_ppnt->p_vaddr; |
870 | if (k < start_code) start_code = k; | 879 | if (k < start_code) |
871 | if (start_data < k) start_data = k; | 880 | start_code = k; |
881 | if (start_data < k) | ||
882 | start_data = k; | ||
872 | 883 | ||
873 | /* | 884 | /* |
874 | * Check to see if the section's size will overflow the | 885 | * Check to see if the section's size will overflow the |
@@ -878,7 +889,7 @@ static int load_elf_binary(struct linux_binprm * bprm, struct pt_regs * regs) | |||
878 | if (k > TASK_SIZE || elf_ppnt->p_filesz > elf_ppnt->p_memsz || | 889 | if (k > TASK_SIZE || elf_ppnt->p_filesz > elf_ppnt->p_memsz || |
879 | elf_ppnt->p_memsz > TASK_SIZE || | 890 | elf_ppnt->p_memsz > TASK_SIZE || |
880 | TASK_SIZE - elf_ppnt->p_memsz < k) { | 891 | TASK_SIZE - elf_ppnt->p_memsz < k) { |
881 | /* set_brk can never work. Avoid overflows. */ | 892 | /* set_brk can never work. Avoid overflows. */ |
882 | send_sig(SIGKILL, current, 0); | 893 | send_sig(SIGKILL, current, 0); |
883 | goto out_free_dentry; | 894 | goto out_free_dentry; |
884 | } | 895 | } |
@@ -966,8 +977,9 @@ static int load_elf_binary(struct linux_binprm * bprm, struct pt_regs * regs) | |||
966 | 977 | ||
967 | compute_creds(bprm); | 978 | compute_creds(bprm); |
968 | current->flags &= ~PF_FORKNOEXEC; | 979 | current->flags &= ~PF_FORKNOEXEC; |
969 | create_elf_tables(bprm, &loc->elf_ex, (interpreter_type == INTERPRETER_AOUT), | 980 | create_elf_tables(bprm, &loc->elf_ex, |
970 | load_addr, interp_load_addr); | 981 | (interpreter_type == INTERPRETER_AOUT), |
982 | load_addr, interp_load_addr); | ||
971 | /* N.B. passed_fileno might not be initialized? */ | 983 | /* N.B. passed_fileno might not be initialized? */ |
972 | if (interpreter_type == INTERPRETER_AOUT) | 984 | if (interpreter_type == INTERPRETER_AOUT) |
973 | current->mm->arg_start += strlen(passed_fileno) + 1; | 985 | current->mm->arg_start += strlen(passed_fileno) + 1; |
@@ -981,7 +993,7 @@ static int load_elf_binary(struct linux_binprm * bprm, struct pt_regs * regs) | |||
981 | /* Why this, you ask??? Well SVr4 maps page 0 as read-only, | 993 | /* Why this, you ask??? Well SVr4 maps page 0 as read-only, |
982 | and some applications "depend" upon this behavior. | 994 | and some applications "depend" upon this behavior. |
983 | Since we do not have the power to recompile these, we | 995 | Since we do not have the power to recompile these, we |
984 | emulate the SVr4 behavior. Sigh. */ | 996 | emulate the SVr4 behavior. Sigh. */ |
985 | down_write(¤t->mm->mmap_sem); | 997 | down_write(¤t->mm->mmap_sem); |
986 | error = do_mmap(NULL, 0, PAGE_SIZE, PROT_READ | PROT_EXEC, | 998 | error = do_mmap(NULL, 0, PAGE_SIZE, PROT_READ | PROT_EXEC, |
987 | MAP_FIXED | MAP_PRIVATE, 0); | 999 | MAP_FIXED | MAP_PRIVATE, 0); |
@@ -1036,7 +1048,6 @@ out_free_ph: | |||
1036 | 1048 | ||
1037 | /* This is really simpleminded and specialized - we are loading an | 1049 | /* This is really simpleminded and specialized - we are loading an |
1038 | a.out library that is given an ELF header. */ | 1050 | a.out library that is given an ELF header. */ |
1039 | |||
1040 | static int load_elf_library(struct file *file) | 1051 | static int load_elf_library(struct file *file) |
1041 | { | 1052 | { |
1042 | struct elf_phdr *elf_phdata; | 1053 | struct elf_phdr *elf_phdata; |
@@ -1046,7 +1057,7 @@ static int load_elf_library(struct file *file) | |||
1046 | struct elfhdr elf_ex; | 1057 | struct elfhdr elf_ex; |
1047 | 1058 | ||
1048 | error = -ENOEXEC; | 1059 | error = -ENOEXEC; |
1049 | retval = kernel_read(file, 0, (char *) &elf_ex, sizeof(elf_ex)); | 1060 | retval = kernel_read(file, 0, (char *)&elf_ex, sizeof(elf_ex)); |
1050 | if (retval != sizeof(elf_ex)) | 1061 | if (retval != sizeof(elf_ex)) |
1051 | goto out; | 1062 | goto out; |
1052 | 1063 | ||
@@ -1055,7 +1066,7 @@ static int load_elf_library(struct file *file) | |||
1055 | 1066 | ||
1056 | /* First of all, some simple consistency checks */ | 1067 | /* First of all, some simple consistency checks */ |
1057 | if (elf_ex.e_type != ET_EXEC || elf_ex.e_phnum > 2 || | 1068 | if (elf_ex.e_type != ET_EXEC || elf_ex.e_phnum > 2 || |
1058 | !elf_check_arch(&elf_ex) || !file->f_op || !file->f_op->mmap) | 1069 | !elf_check_arch(&elf_ex) || !file->f_op || !file->f_op->mmap) |
1059 | goto out; | 1070 | goto out; |
1060 | 1071 | ||
1061 | /* Now read in all of the header information */ | 1072 | /* Now read in all of the header information */ |
@@ -1103,7 +1114,8 @@ static int load_elf_library(struct file *file) | |||
1103 | goto out_free_ph; | 1114 | goto out_free_ph; |
1104 | } | 1115 | } |
1105 | 1116 | ||
1106 | len = ELF_PAGESTART(eppnt->p_filesz + eppnt->p_vaddr + ELF_MIN_ALIGN - 1); | 1117 | len = ELF_PAGESTART(eppnt->p_filesz + eppnt->p_vaddr + |
1118 | ELF_MIN_ALIGN - 1); | ||
1107 | bss = eppnt->p_memsz + eppnt->p_vaddr; | 1119 | bss = eppnt->p_memsz + eppnt->p_vaddr; |
1108 | if (bss > len) { | 1120 | if (bss > len) { |
1109 | down_write(¤t->mm->mmap_sem); | 1121 | down_write(¤t->mm->mmap_sem); |
@@ -1162,7 +1174,7 @@ static int maydump(struct vm_area_struct *vma) | |||
1162 | if (vma->vm_flags & (VM_IO | VM_RESERVED)) | 1174 | if (vma->vm_flags & (VM_IO | VM_RESERVED)) |
1163 | return 0; | 1175 | return 0; |
1164 | 1176 | ||
1165 | /* Dump shared memory only if mapped from an anonymous file. */ | 1177 | /* Dump shared memory only if mapped from an anonymous file. */ |
1166 | if (vma->vm_flags & VM_SHARED) | 1178 | if (vma->vm_flags & VM_SHARED) |
1167 | return vma->vm_file->f_dentry->d_inode->i_nlink == 0; | 1179 | return vma->vm_file->f_dentry->d_inode->i_nlink == 0; |
1168 | 1180 | ||
@@ -1173,7 +1185,7 @@ static int maydump(struct vm_area_struct *vma) | |||
1173 | return 1; | 1185 | return 1; |
1174 | } | 1186 | } |
1175 | 1187 | ||
1176 | #define roundup(x, y) ((((x)+((y)-1))/(y))*(y)) | 1188 | #define roundup(x, y) ((((x) + ((y) - 1)) / (y)) * (y)) |
1177 | 1189 | ||
1178 | /* An ELF note in memory */ | 1190 | /* An ELF note in memory */ |
1179 | struct memelfnote | 1191 | struct memelfnote |
@@ -1276,11 +1288,11 @@ static void fill_note(struct memelfnote *note, const char *name, int type, | |||
1276 | } | 1288 | } |
1277 | 1289 | ||
1278 | /* | 1290 | /* |
1279 | * fill up all the fields in prstatus from the given task struct, except registers | 1291 | * fill up all the fields in prstatus from the given task struct, except |
1280 | * which need to be filled up separately. | 1292 | * registers which need to be filled up separately. |
1281 | */ | 1293 | */ |
1282 | static void fill_prstatus(struct elf_prstatus *prstatus, | 1294 | static void fill_prstatus(struct elf_prstatus *prstatus, |
1283 | struct task_struct *p, long signr) | 1295 | struct task_struct *p, long signr) |
1284 | { | 1296 | { |
1285 | prstatus->pr_info.si_signo = prstatus->pr_cursig = signr; | 1297 | prstatus->pr_info.si_signo = prstatus->pr_cursig = signr; |
1286 | prstatus->pr_sigpend = p->pending.signal.sig[0]; | 1298 | prstatus->pr_sigpend = p->pending.signal.sig[0]; |
@@ -1365,8 +1377,8 @@ struct elf_thread_status | |||
1365 | 1377 | ||
1366 | /* | 1378 | /* |
1367 | * In order to add the specific thread information for the elf file format, | 1379 | * In order to add the specific thread information for the elf file format, |
1368 | * we need to keep a linked list of every threads pr_status and then | 1380 | * we need to keep a linked list of every threads pr_status and then create |
1369 | * create a single section for them in the final core file. | 1381 | * a single section for them in the final core file. |
1370 | */ | 1382 | */ |
1371 | static int elf_dump_thread_status(long signr, struct elf_thread_status *t) | 1383 | static int elf_dump_thread_status(long signr, struct elf_thread_status *t) |
1372 | { | 1384 | { |
@@ -1377,19 +1389,23 @@ static int elf_dump_thread_status(long signr, struct elf_thread_status *t) | |||
1377 | fill_prstatus(&t->prstatus, p, signr); | 1389 | fill_prstatus(&t->prstatus, p, signr); |
1378 | elf_core_copy_task_regs(p, &t->prstatus.pr_reg); | 1390 | elf_core_copy_task_regs(p, &t->prstatus.pr_reg); |
1379 | 1391 | ||
1380 | fill_note(&t->notes[0], "CORE", NT_PRSTATUS, sizeof(t->prstatus), &(t->prstatus)); | 1392 | fill_note(&t->notes[0], "CORE", NT_PRSTATUS, sizeof(t->prstatus), |
1393 | &(t->prstatus)); | ||
1381 | t->num_notes++; | 1394 | t->num_notes++; |
1382 | sz += notesize(&t->notes[0]); | 1395 | sz += notesize(&t->notes[0]); |
1383 | 1396 | ||
1384 | if ((t->prstatus.pr_fpvalid = elf_core_copy_task_fpregs(p, NULL, &t->fpu))) { | 1397 | if ((t->prstatus.pr_fpvalid = elf_core_copy_task_fpregs(p, NULL, |
1385 | fill_note(&t->notes[1], "CORE", NT_PRFPREG, sizeof(t->fpu), &(t->fpu)); | 1398 | &t->fpu))) { |
1399 | fill_note(&t->notes[1], "CORE", NT_PRFPREG, sizeof(t->fpu), | ||
1400 | &(t->fpu)); | ||
1386 | t->num_notes++; | 1401 | t->num_notes++; |
1387 | sz += notesize(&t->notes[1]); | 1402 | sz += notesize(&t->notes[1]); |
1388 | } | 1403 | } |
1389 | 1404 | ||
1390 | #ifdef ELF_CORE_COPY_XFPREGS | 1405 | #ifdef ELF_CORE_COPY_XFPREGS |
1391 | if (elf_core_copy_task_xfpregs(p, &t->xfpu)) { | 1406 | if (elf_core_copy_task_xfpregs(p, &t->xfpu)) { |
1392 | fill_note(&t->notes[2], "LINUX", NT_PRXFPREG, sizeof(t->xfpu), &t->xfpu); | 1407 | fill_note(&t->notes[2], "LINUX", NT_PRXFPREG, sizeof(t->xfpu), |
1408 | &t->xfpu); | ||
1393 | t->num_notes++; | 1409 | t->num_notes++; |
1394 | sz += notesize(&t->notes[2]); | 1410 | sz += notesize(&t->notes[2]); |
1395 | } | 1411 | } |
@@ -1404,7 +1420,7 @@ static int elf_dump_thread_status(long signr, struct elf_thread_status *t) | |||
1404 | * and then they are actually written out. If we run out of core limit | 1420 | * and then they are actually written out. If we run out of core limit |
1405 | * we just truncate. | 1421 | * we just truncate. |
1406 | */ | 1422 | */ |
1407 | static int elf_core_dump(long signr, struct pt_regs * regs, struct file * file) | 1423 | static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file) |
1408 | { | 1424 | { |
1409 | #define NUM_NOTES 6 | 1425 | #define NUM_NOTES 6 |
1410 | int has_dumped = 0; | 1426 | int has_dumped = 0; |
@@ -1433,12 +1449,12 @@ static int elf_core_dump(long signr, struct pt_regs * regs, struct file * file) | |||
1433 | /* | 1449 | /* |
1434 | * We no longer stop all VM operations. | 1450 | * We no longer stop all VM operations. |
1435 | * | 1451 | * |
1436 | * This is because those proceses that could possibly change map_count or | 1452 | * This is because those proceses that could possibly change map_count |
1437 | * the mmap / vma pages are now blocked in do_exit on current finishing | 1453 | * or the mmap / vma pages are now blocked in do_exit on current |
1438 | * this core dump. | 1454 | * finishing this core dump. |
1439 | * | 1455 | * |
1440 | * Only ptrace can touch these memory addresses, but it doesn't change | 1456 | * Only ptrace can touch these memory addresses, but it doesn't change |
1441 | * the map_count or the pages allocated. So no possibility of crashing | 1457 | * the map_count or the pages allocated. So no possibility of crashing |
1442 | * exists while dumping the mm->vm_next areas to the core file. | 1458 | * exists while dumping the mm->vm_next areas to the core file. |
1443 | */ | 1459 | */ |
1444 | 1460 | ||
@@ -1500,7 +1516,7 @@ static int elf_core_dump(long signr, struct pt_regs * regs, struct file * file) | |||
1500 | #endif | 1516 | #endif |
1501 | 1517 | ||
1502 | /* Set up header */ | 1518 | /* Set up header */ |
1503 | fill_elf_header(elf, segs+1); /* including notes section */ | 1519 | fill_elf_header(elf, segs + 1); /* including notes section */ |
1504 | 1520 | ||
1505 | has_dumped = 1; | 1521 | has_dumped = 1; |
1506 | current->flags |= PF_DUMPCORE; | 1522 | current->flags |= PF_DUMPCORE; |
@@ -1510,24 +1526,24 @@ static int elf_core_dump(long signr, struct pt_regs * regs, struct file * file) | |||
1510 | * with info from their /proc. | 1526 | * with info from their /proc. |
1511 | */ | 1527 | */ |
1512 | 1528 | ||
1513 | fill_note(notes +0, "CORE", NT_PRSTATUS, sizeof(*prstatus), prstatus); | 1529 | fill_note(notes + 0, "CORE", NT_PRSTATUS, sizeof(*prstatus), prstatus); |
1514 | |||
1515 | fill_psinfo(psinfo, current->group_leader, current->mm); | 1530 | fill_psinfo(psinfo, current->group_leader, current->mm); |
1516 | fill_note(notes +1, "CORE", NT_PRPSINFO, sizeof(*psinfo), psinfo); | 1531 | fill_note(notes + 1, "CORE", NT_PRPSINFO, sizeof(*psinfo), psinfo); |
1517 | 1532 | ||
1518 | numnote = 2; | 1533 | numnote = 2; |
1519 | 1534 | ||
1520 | auxv = (elf_addr_t *) current->mm->saved_auxv; | 1535 | auxv = (elf_addr_t *)current->mm->saved_auxv; |
1521 | 1536 | ||
1522 | i = 0; | 1537 | i = 0; |
1523 | do | 1538 | do |
1524 | i += 2; | 1539 | i += 2; |
1525 | while (auxv[i - 2] != AT_NULL); | 1540 | while (auxv[i - 2] != AT_NULL); |
1526 | fill_note(¬es[numnote++], "CORE", NT_AUXV, | 1541 | fill_note(¬es[numnote++], "CORE", NT_AUXV, |
1527 | i * sizeof (elf_addr_t), auxv); | 1542 | i * sizeof(elf_addr_t), auxv); |
1528 | 1543 | ||
1529 | /* Try to dump the FPU. */ | 1544 | /* Try to dump the FPU. */ |
1530 | if ((prstatus->pr_fpvalid = elf_core_copy_task_fpregs(current, regs, fpu))) | 1545 | if ((prstatus->pr_fpvalid = |
1546 | elf_core_copy_task_fpregs(current, regs, fpu))) | ||
1531 | fill_note(notes + numnote++, | 1547 | fill_note(notes + numnote++, |
1532 | "CORE", NT_PRFPREG, sizeof(*fpu), fpu); | 1548 | "CORE", NT_PRFPREG, sizeof(*fpu), fpu); |
1533 | #ifdef ELF_CORE_COPY_XFPREGS | 1549 | #ifdef ELF_CORE_COPY_XFPREGS |
@@ -1576,8 +1592,10 @@ static int elf_core_dump(long signr, struct pt_regs * regs, struct file * file) | |||
1576 | phdr.p_memsz = sz; | 1592 | phdr.p_memsz = sz; |
1577 | offset += phdr.p_filesz; | 1593 | offset += phdr.p_filesz; |
1578 | phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0; | 1594 | phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0; |
1579 | if (vma->vm_flags & VM_WRITE) phdr.p_flags |= PF_W; | 1595 | if (vma->vm_flags & VM_WRITE) |
1580 | if (vma->vm_flags & VM_EXEC) phdr.p_flags |= PF_X; | 1596 | phdr.p_flags |= PF_W; |
1597 | if (vma->vm_flags & VM_EXEC) | ||
1598 | phdr.p_flags |= PF_X; | ||
1581 | phdr.p_align = ELF_EXEC_PAGESIZE; | 1599 | phdr.p_align = ELF_EXEC_PAGESIZE; |
1582 | 1600 | ||
1583 | DUMP_WRITE(&phdr, sizeof(phdr)); | 1601 | DUMP_WRITE(&phdr, sizeof(phdr)); |
@@ -1594,7 +1612,9 @@ static int elf_core_dump(long signr, struct pt_regs * regs, struct file * file) | |||
1594 | 1612 | ||
1595 | /* write out the thread status notes section */ | 1613 | /* write out the thread status notes section */ |
1596 | list_for_each(t, &thread_list) { | 1614 | list_for_each(t, &thread_list) { |
1597 | struct elf_thread_status *tmp = list_entry(t, struct elf_thread_status, list); | 1615 | struct elf_thread_status *tmp = |
1616 | list_entry(t, struct elf_thread_status, list); | ||
1617 | |||
1598 | for (i = 0; i < tmp->num_notes; i++) | 1618 | for (i = 0; i < tmp->num_notes; i++) |
1599 | if (!writenote(&tmp->notes[i], file)) | 1619 | if (!writenote(&tmp->notes[i], file)) |
1600 | goto end_coredump; | 1620 | goto end_coredump; |
@@ -1611,18 +1631,19 @@ static int elf_core_dump(long signr, struct pt_regs * regs, struct file * file) | |||
1611 | for (addr = vma->vm_start; | 1631 | for (addr = vma->vm_start; |
1612 | addr < vma->vm_end; | 1632 | addr < vma->vm_end; |
1613 | addr += PAGE_SIZE) { | 1633 | addr += PAGE_SIZE) { |
1614 | struct page* page; | 1634 | struct page *page; |
1615 | struct vm_area_struct *vma; | 1635 | struct vm_area_struct *vma; |
1616 | 1636 | ||
1617 | if (get_user_pages(current, current->mm, addr, 1, 0, 1, | 1637 | if (get_user_pages(current, current->mm, addr, 1, 0, 1, |
1618 | &page, &vma) <= 0) { | 1638 | &page, &vma) <= 0) { |
1619 | DUMP_SEEK (file->f_pos + PAGE_SIZE); | 1639 | DUMP_SEEK(file->f_pos + PAGE_SIZE); |
1620 | } else { | 1640 | } else { |
1621 | if (page == ZERO_PAGE(addr)) { | 1641 | if (page == ZERO_PAGE(addr)) { |
1622 | DUMP_SEEK (file->f_pos + PAGE_SIZE); | 1642 | DUMP_SEEK(file->f_pos + PAGE_SIZE); |
1623 | } else { | 1643 | } else { |
1624 | void *kaddr; | 1644 | void *kaddr; |
1625 | flush_cache_page(vma, addr, page_to_pfn(page)); | 1645 | flush_cache_page(vma, addr, |
1646 | page_to_pfn(page)); | ||
1626 | kaddr = kmap(page); | 1647 | kaddr = kmap(page); |
1627 | if ((size += PAGE_SIZE) > limit || | 1648 | if ((size += PAGE_SIZE) > limit || |
1628 | !dump_write(file, kaddr, | 1649 | !dump_write(file, kaddr, |
@@ -1644,7 +1665,8 @@ static int elf_core_dump(long signr, struct pt_regs * regs, struct file * file) | |||
1644 | 1665 | ||
1645 | if ((off_t)file->f_pos != offset) { | 1666 | if ((off_t)file->f_pos != offset) { |
1646 | /* Sanity check */ | 1667 | /* Sanity check */ |
1647 | printk(KERN_WARNING "elf_core_dump: file->f_pos (%ld) != offset (%ld)\n", | 1668 | printk(KERN_WARNING |
1669 | "elf_core_dump: file->f_pos (%ld) != offset (%ld)\n", | ||
1648 | (off_t)file->f_pos, offset); | 1670 | (off_t)file->f_pos, offset); |
1649 | } | 1671 | } |
1650 | 1672 | ||