diff options
Diffstat (limited to 'arch/mips/kernel/vpe.c')
-rw-r--r-- | arch/mips/kernel/vpe.c | 29 |
1 files changed, 18 insertions, 11 deletions
diff --git a/arch/mips/kernel/vpe.c b/arch/mips/kernel/vpe.c index 39804c584edd..2794501ff302 100644 --- a/arch/mips/kernel/vpe.c +++ b/arch/mips/kernel/vpe.c | |||
@@ -269,7 +269,7 @@ static void *alloc_progmem(unsigned long len) | |||
269 | * This means you must tell Linux to use less memory than you | 269 | * This means you must tell Linux to use less memory than you |
270 | * physically have, for example by passing a mem= boot argument. | 270 | * physically have, for example by passing a mem= boot argument. |
271 | */ | 271 | */ |
272 | addr = pfn_to_kaddr(max_pfn); | 272 | addr = pfn_to_kaddr(max_low_pfn); |
273 | memset(addr, 0, len); | 273 | memset(addr, 0, len); |
274 | #else | 274 | #else |
275 | /* simple grab some mem for now */ | 275 | /* simple grab some mem for now */ |
@@ -781,10 +781,15 @@ static int vpe_run(struct vpe * v) | |||
781 | /* take system out of configuration state */ | 781 | /* take system out of configuration state */ |
782 | clear_c0_mvpcontrol(MVPCONTROL_VPC); | 782 | clear_c0_mvpcontrol(MVPCONTROL_VPC); |
783 | 783 | ||
784 | /* | ||
785 | * SMTC/SMVP kernels manage VPE enable independently, | ||
786 | * but uniprocessor kernels need to turn it on, even | ||
787 | * if that wasn't the pre-dvpe() state. | ||
788 | */ | ||
784 | #ifdef CONFIG_SMP | 789 | #ifdef CONFIG_SMP |
785 | evpe(EVPE_ENABLE); | ||
786 | #else | ||
787 | evpe(vpeflags); | 790 | evpe(vpeflags); |
791 | #else | ||
792 | evpe(EVPE_ENABLE); | ||
788 | #endif | 793 | #endif |
789 | emt(dmt_flag); | 794 | emt(dmt_flag); |
790 | local_irq_restore(flags); | 795 | local_irq_restore(flags); |
@@ -840,7 +845,7 @@ static int vpe_elfload(struct vpe * v) | |||
840 | 845 | ||
841 | /* Sanity checks against insmoding binaries or wrong arch, | 846 | /* Sanity checks against insmoding binaries or wrong arch, |
842 | weird elf version */ | 847 | weird elf version */ |
843 | if (memcmp(hdr->e_ident, ELFMAG, 4) != 0 | 848 | if (memcmp(hdr->e_ident, ELFMAG, SELFMAG) != 0 |
844 | || (hdr->e_type != ET_REL && hdr->e_type != ET_EXEC) | 849 | || (hdr->e_type != ET_REL && hdr->e_type != ET_EXEC) |
845 | || !elf_check_arch(hdr) | 850 | || !elf_check_arch(hdr) |
846 | || hdr->e_shentsize != sizeof(*sechdrs)) { | 851 | || hdr->e_shentsize != sizeof(*sechdrs)) { |
@@ -947,12 +952,14 @@ static int vpe_elfload(struct vpe * v) | |||
947 | struct elf_phdr *phdr = (struct elf_phdr *) ((char *)hdr + hdr->e_phoff); | 952 | struct elf_phdr *phdr = (struct elf_phdr *) ((char *)hdr + hdr->e_phoff); |
948 | 953 | ||
949 | for (i = 0; i < hdr->e_phnum; i++) { | 954 | for (i = 0; i < hdr->e_phnum; i++) { |
950 | if (phdr->p_type != PT_LOAD) | 955 | if (phdr->p_type == PT_LOAD) { |
951 | continue; | 956 | memcpy((void *)phdr->p_paddr, |
952 | 957 | (char *)hdr + phdr->p_offset, | |
953 | memcpy((void *)phdr->p_paddr, (char *)hdr + phdr->p_offset, phdr->p_filesz); | 958 | phdr->p_filesz); |
954 | memset((void *)phdr->p_paddr + phdr->p_filesz, 0, phdr->p_memsz - phdr->p_filesz); | 959 | memset((void *)phdr->p_paddr + phdr->p_filesz, |
955 | phdr++; | 960 | 0, phdr->p_memsz - phdr->p_filesz); |
961 | } | ||
962 | phdr++; | ||
956 | } | 963 | } |
957 | 964 | ||
958 | for (i = 0; i < hdr->e_shnum; i++) { | 965 | for (i = 0; i < hdr->e_shnum; i++) { |
@@ -1107,7 +1114,7 @@ static int vpe_release(struct inode *inode, struct file *filp) | |||
1107 | return -ENODEV; | 1114 | return -ENODEV; |
1108 | 1115 | ||
1109 | hdr = (Elf_Ehdr *) v->pbuffer; | 1116 | hdr = (Elf_Ehdr *) v->pbuffer; |
1110 | if (memcmp(hdr->e_ident, ELFMAG, 4) == 0) { | 1117 | if (memcmp(hdr->e_ident, ELFMAG, SELFMAG) == 0) { |
1111 | if (vpe_elfload(v) >= 0) { | 1118 | if (vpe_elfload(v) >= 0) { |
1112 | vpe_run(v); | 1119 | vpe_run(v); |
1113 | } else { | 1120 | } else { |