aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel
diff options
context:
space:
mode:
authorSteffen Rumler <steffen.rumler.ext@nsn.com>2012-06-06 10:37:17 -0400
committerLuis Henriques <luis.henriques@canonical.com>2012-07-03 11:29:06 -0400
commit3d7020a5f360d8d18fc63600d5961c121f4178e4 (patch)
tree7082398228faa3113611801dd7d1330f9a5ae4d2 /arch/powerpc/kernel
parent2d5982fb7f6898f882eea6a05739111a29fb5090 (diff)
powerpc: Fix kernel panic during kernel module load
BugLink: http://bugs.launchpad.net/bugs/1014712 commit 3c75296562f43e6fbc6cddd3de948a7b3e4e9bcf upstream. This fixes a problem which can causes kernel oopses while loading a kernel module. According to the PowerPC EABI specification, GPR r11 is assigned the dedicated function to point to the previous stack frame. In the powerpc-specific kernel module loader, do_plt_call() (in arch/powerpc/kernel/module_32.c), GPR r11 is also used to generate trampoline code. This combination crashes the kernel, in the case where the compiler chooses to use a helper function for saving GPRs on entry, and the module loader has placed the .init.text section far away from the .text section, meaning that it has to generate a trampoline for functions in the .init.text section to call the GPR save helper. Because the trampoline trashes r11, references to the stack frame using r11 can cause an oops. The fix just uses GPR r12 instead of GPR r11 for generating the trampoline code. According to the statements from Freescale, this is safe from an EABI perspective. I've tested the fix for kernel 2.6.33 on MPC8541. Signed-off-by: Steffen Rumler <steffen.rumler.ext@nsn.com> [paulus@samba.org: reworded the description] Signed-off-by: Paul Mackerras <paulus@samba.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Signed-off-by: Herton Ronaldo Krzesinski <herton.krzesinski@canonical.com>
Diffstat (limited to 'arch/powerpc/kernel')
-rw-r--r--arch/powerpc/kernel/module_32.c11
1 files changed, 5 insertions, 6 deletions
diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
index f832773fc28..449a7e053e6 100644
--- a/arch/powerpc/kernel/module_32.c
+++ b/arch/powerpc/kernel/module_32.c
@@ -187,8 +187,8 @@ int apply_relocate(Elf32_Shdr *sechdrs,
187 187
188static inline int entry_matches(struct ppc_plt_entry *entry, Elf32_Addr val) 188static inline int entry_matches(struct ppc_plt_entry *entry, Elf32_Addr val)
189{ 189{
190 if (entry->jump[0] == 0x3d600000 + ((val + 0x8000) >> 16) 190 if (entry->jump[0] == 0x3d800000 + ((val + 0x8000) >> 16)
191 && entry->jump[1] == 0x396b0000 + (val & 0xffff)) 191 && entry->jump[1] == 0x398c0000 + (val & 0xffff))
192 return 1; 192 return 1;
193 return 0; 193 return 0;
194} 194}
@@ -215,10 +215,9 @@ static uint32_t do_plt_call(void *location,
215 entry++; 215 entry++;
216 } 216 }
217 217
218 /* Stolen from Paul Mackerras as well... */ 218 entry->jump[0] = 0x3d800000+((val+0x8000)>>16); /* lis r12,sym@ha */
219 entry->jump[0] = 0x3d600000+((val+0x8000)>>16); /* lis r11,sym@ha */ 219 entry->jump[1] = 0x398c0000 + (val&0xffff); /* addi r12,r12,sym@l*/
220 entry->jump[1] = 0x396b0000 + (val&0xffff); /* addi r11,r11,sym@l*/ 220 entry->jump[2] = 0x7d8903a6; /* mtctr r12 */
221 entry->jump[2] = 0x7d6903a6; /* mtctr r11 */
222 entry->jump[3] = 0x4e800420; /* bctr */ 221 entry->jump[3] = 0x4e800420; /* bctr */
223 222
224 DEBUGP("Initialized plt for 0x%x at %p\n", val, entry); 223 DEBUGP("Initialized plt for 0x%x at %p\n", val, entry);