diff options
Diffstat (limited to 'arch')
-rw-r--r-- | arch/powerpc/Kconfig | 13 | ||||
-rw-r--r-- | arch/powerpc/Makefile | 4 | ||||
-rw-r--r-- | arch/powerpc/boot/Makefile | 3 | ||||
-rw-r--r-- | arch/powerpc/boot/elf_util.c | 6 | ||||
-rw-r--r-- | arch/powerpc/include/asm/mmu-hash64.h | 2 | ||||
-rw-r--r-- | arch/powerpc/include/asm/page.h | 14 | ||||
-rw-r--r-- | arch/powerpc/include/asm/sections.h | 6 | ||||
-rw-r--r-- | arch/powerpc/kernel/Makefile | 1 | ||||
-rw-r--r-- | arch/powerpc/kernel/head_64.S | 26 | ||||
-rw-r--r-- | arch/powerpc/kernel/paca.c | 3 | ||||
-rw-r--r-- | arch/powerpc/kernel/prom.c | 3 | ||||
-rw-r--r-- | arch/powerpc/kernel/prom_init.c | 11 | ||||
-rw-r--r-- | arch/powerpc/kernel/reloc_64.S | 87 | ||||
-rw-r--r-- | arch/powerpc/kernel/vmlinux.lds.S | 15 | ||||
-rw-r--r-- | arch/powerpc/mm/hash_utils_64.c | 2 | ||||
-rw-r--r-- | arch/powerpc/platforms/powermac/smp.c | 4 |
16 files changed, 181 insertions, 19 deletions
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 587da5e0990f..17c988b678d1 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig | |||
@@ -806,6 +806,19 @@ config PIN_TLB | |||
806 | endmenu | 806 | endmenu |
807 | 807 | ||
808 | if PPC64 | 808 | if PPC64 |
809 | config RELOCATABLE | ||
810 | bool "Build a relocatable kernel" | ||
811 | help | ||
812 | This builds a kernel image that is capable of running anywhere | ||
813 | in the RMA (real memory area) at any 16k-aligned base address. | ||
814 | The kernel is linked as a position-independent executable (PIE) | ||
815 | and contains dynamic relocations which are processed early | ||
816 | in the bootup process. | ||
817 | |||
818 | One use is for the kexec on panic case where the recovery kernel | ||
819 | must live at a different physical address than the primary | ||
820 | kernel. | ||
821 | |||
809 | config PAGE_OFFSET | 822 | config PAGE_OFFSET |
810 | hex | 823 | hex |
811 | default "0xc000000000000000" | 824 | default "0xc000000000000000" |
diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile index c6be19e9ceae..4df38cbb4149 100644 --- a/arch/powerpc/Makefile +++ b/arch/powerpc/Makefile | |||
@@ -63,7 +63,9 @@ override CC += -m$(CONFIG_WORD_SIZE) | |||
63 | override AR := GNUTARGET=elf$(CONFIG_WORD_SIZE)-powerpc $(AR) | 63 | override AR := GNUTARGET=elf$(CONFIG_WORD_SIZE)-powerpc $(AR) |
64 | endif | 64 | endif |
65 | 65 | ||
66 | LDFLAGS_vmlinux := -Bstatic | 66 | LDFLAGS_vmlinux-yy := -Bstatic |
67 | LDFLAGS_vmlinux-$(CONFIG_PPC64)$(CONFIG_RELOCATABLE) := -pie | ||
68 | LDFLAGS_vmlinux := $(LDFLAGS_vmlinux-yy) | ||
67 | 69 | ||
68 | CFLAGS-$(CONFIG_PPC64) := -mminimal-toc -mtraceback=none -mcall-aixdesc | 70 | CFLAGS-$(CONFIG_PPC64) := -mminimal-toc -mtraceback=none -mcall-aixdesc |
69 | CFLAGS-$(CONFIG_PPC32) := -ffixed-r2 -mmultiple | 71 | CFLAGS-$(CONFIG_PPC32) := -ffixed-r2 -mmultiple |
diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile index 717a3bc1352e..6403275553ea 100644 --- a/arch/powerpc/boot/Makefile +++ b/arch/powerpc/boot/Makefile | |||
@@ -310,8 +310,11 @@ $(obj)/dtbImage.%: vmlinux $(wrapperbits) $(obj)/%.dtb | |||
310 | $(obj)/vmlinux.strip: vmlinux | 310 | $(obj)/vmlinux.strip: vmlinux |
311 | $(STRIP) -s -R .comment $< -o $@ | 311 | $(STRIP) -s -R .comment $< -o $@ |
312 | 312 | ||
313 | # The iseries hypervisor won't take an ET_DYN executable, so this | ||
314 | # changes the type (byte 17) in the file to ET_EXEC (2). | ||
313 | $(obj)/zImage.iseries: vmlinux | 315 | $(obj)/zImage.iseries: vmlinux |
314 | $(STRIP) -s -R .comment $< -o $@ | 316 | $(STRIP) -s -R .comment $< -o $@ |
317 | printf "\x02" | dd of=$@ conv=notrunc bs=1 seek=17 | ||
315 | 318 | ||
316 | $(obj)/uImage: vmlinux $(wrapperbits) | 319 | $(obj)/uImage: vmlinux $(wrapperbits) |
317 | $(call if_changed,wrap,uboot) | 320 | $(call if_changed,wrap,uboot) |
diff --git a/arch/powerpc/boot/elf_util.c b/arch/powerpc/boot/elf_util.c index 7454aa4cc20c..1567a0c0f05c 100644 --- a/arch/powerpc/boot/elf_util.c +++ b/arch/powerpc/boot/elf_util.c | |||
@@ -27,7 +27,8 @@ int parse_elf64(void *hdr, struct elf_info *info) | |||
27 | elf64->e_ident[EI_MAG3] == ELFMAG3 && | 27 | elf64->e_ident[EI_MAG3] == ELFMAG3 && |
28 | elf64->e_ident[EI_CLASS] == ELFCLASS64 && | 28 | elf64->e_ident[EI_CLASS] == ELFCLASS64 && |
29 | elf64->e_ident[EI_DATA] == ELFDATA2MSB && | 29 | elf64->e_ident[EI_DATA] == ELFDATA2MSB && |
30 | elf64->e_type == ET_EXEC && | 30 | (elf64->e_type == ET_EXEC || |
31 | elf64->e_type == ET_DYN) && | ||
31 | elf64->e_machine == EM_PPC64)) | 32 | elf64->e_machine == EM_PPC64)) |
32 | return 0; | 33 | return 0; |
33 | 34 | ||
@@ -58,7 +59,8 @@ int parse_elf32(void *hdr, struct elf_info *info) | |||
58 | elf32->e_ident[EI_MAG3] == ELFMAG3 && | 59 | elf32->e_ident[EI_MAG3] == ELFMAG3 && |
59 | elf32->e_ident[EI_CLASS] == ELFCLASS32 && | 60 | elf32->e_ident[EI_CLASS] == ELFCLASS32 && |
60 | elf32->e_ident[EI_DATA] == ELFDATA2MSB && | 61 | elf32->e_ident[EI_DATA] == ELFDATA2MSB && |
61 | elf32->e_type == ET_EXEC && | 62 | (elf32->e_type == ET_EXEC || |
63 | elf32->e_type == ET_DYN) && | ||
62 | elf32->e_machine == EM_PPC)) | 64 | elf32->e_machine == EM_PPC)) |
63 | return 0; | 65 | return 0; |
64 | 66 | ||
diff --git a/arch/powerpc/include/asm/mmu-hash64.h b/arch/powerpc/include/asm/mmu-hash64.h index c2df53c5ceb9..5a441742ffba 100644 --- a/arch/powerpc/include/asm/mmu-hash64.h +++ b/arch/powerpc/include/asm/mmu-hash64.h | |||
@@ -437,7 +437,7 @@ typedef struct { | |||
437 | }) | 437 | }) |
438 | #endif /* 1 */ | 438 | #endif /* 1 */ |
439 | 439 | ||
440 | /* This is only valid for addresses >= KERNELBASE */ | 440 | /* This is only valid for addresses >= PAGE_OFFSET */ |
441 | static inline unsigned long get_kernel_vsid(unsigned long ea, int ssize) | 441 | static inline unsigned long get_kernel_vsid(unsigned long ea, int ssize) |
442 | { | 442 | { |
443 | if (ssize == MMU_SEGSIZE_256M) | 443 | if (ssize == MMU_SEGSIZE_256M) |
diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h index e088545cb3f5..64e144505f65 100644 --- a/arch/powerpc/include/asm/page.h +++ b/arch/powerpc/include/asm/page.h | |||
@@ -71,15 +71,21 @@ | |||
71 | #define PAGE_OFFSET ASM_CONST(CONFIG_PAGE_OFFSET) | 71 | #define PAGE_OFFSET ASM_CONST(CONFIG_PAGE_OFFSET) |
72 | #define LOAD_OFFSET ASM_CONST((CONFIG_KERNEL_START-CONFIG_PHYSICAL_START)) | 72 | #define LOAD_OFFSET ASM_CONST((CONFIG_KERNEL_START-CONFIG_PHYSICAL_START)) |
73 | 73 | ||
74 | #if defined(CONFIG_RELOCATABLE) && defined(CONFIG_FLATMEM) | 74 | #if defined(CONFIG_RELOCATABLE) |
75 | #ifndef __ASSEMBLY__ | 75 | #ifndef __ASSEMBLY__ |
76 | extern phys_addr_t memstart_addr; | 76 | extern phys_addr_t memstart_addr; |
77 | extern phys_addr_t kernstart_addr; | 77 | extern phys_addr_t kernstart_addr; |
78 | #endif | 78 | #endif |
79 | #define PHYSICAL_START kernstart_addr | 79 | #define PHYSICAL_START kernstart_addr |
80 | #define MEMORY_START memstart_addr | ||
81 | #else | 80 | #else |
82 | #define PHYSICAL_START ASM_CONST(CONFIG_PHYSICAL_START) | 81 | #define PHYSICAL_START ASM_CONST(CONFIG_PHYSICAL_START) |
82 | #endif | ||
83 | |||
84 | #ifdef CONFIG_PPC64 | ||
85 | #define MEMORY_START 0UL | ||
86 | #elif defined(CONFIG_RELOCATABLE) | ||
87 | #define MEMORY_START memstart_addr | ||
88 | #else | ||
83 | #define MEMORY_START (PHYSICAL_START + PAGE_OFFSET - KERNELBASE) | 89 | #define MEMORY_START (PHYSICAL_START + PAGE_OFFSET - KERNELBASE) |
84 | #endif | 90 | #endif |
85 | 91 | ||
@@ -92,8 +98,8 @@ extern phys_addr_t kernstart_addr; | |||
92 | #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) | 98 | #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) |
93 | #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) | 99 | #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) |
94 | 100 | ||
95 | #define __va(x) ((void *)((unsigned long)(x) - PHYSICAL_START + KERNELBASE)) | 101 | #define __va(x) ((void *)((unsigned long)(x) + PAGE_OFFSET - MEMORY_START)) |
96 | #define __pa(x) ((unsigned long)(x) + PHYSICAL_START - KERNELBASE) | 102 | #define __pa(x) ((unsigned long)(x) - PAGE_OFFSET + MEMORY_START) |
97 | 103 | ||
98 | /* | 104 | /* |
99 | * Unfortunately the PLT is in the BSS in the PPC32 ELF ABI, | 105 | * Unfortunately the PLT is in the BSS in the PPC32 ELF ABI, |
diff --git a/arch/powerpc/include/asm/sections.h b/arch/powerpc/include/asm/sections.h index 7710e9e6660f..baf318aec533 100644 --- a/arch/powerpc/include/asm/sections.h +++ b/arch/powerpc/include/asm/sections.h | |||
@@ -16,6 +16,12 @@ static inline int in_kernel_text(unsigned long addr) | |||
16 | return 0; | 16 | return 0; |
17 | } | 17 | } |
18 | 18 | ||
19 | static inline int overlaps_kernel_text(unsigned long start, unsigned long end) | ||
20 | { | ||
21 | return start < (unsigned long)__init_end && | ||
22 | (unsigned long)_stext < end; | ||
23 | } | ||
24 | |||
19 | #undef dereference_function_descriptor | 25 | #undef dereference_function_descriptor |
20 | void *dereference_function_descriptor(void *); | 26 | void *dereference_function_descriptor(void *); |
21 | 27 | ||
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile index 49b49c0707f0..16326fd92f99 100644 --- a/arch/powerpc/kernel/Makefile +++ b/arch/powerpc/kernel/Makefile | |||
@@ -35,6 +35,7 @@ obj-$(CONFIG_PPC64) += setup_64.o sys_ppc32.o \ | |||
35 | paca.o cpu_setup_ppc970.o \ | 35 | paca.o cpu_setup_ppc970.o \ |
36 | cpu_setup_pa6t.o \ | 36 | cpu_setup_pa6t.o \ |
37 | firmware.o nvram_64.o | 37 | firmware.o nvram_64.o |
38 | obj64-$(CONFIG_RELOCATABLE) += reloc_64.o | ||
38 | obj-$(CONFIG_PPC64) += vdso64/ | 39 | obj-$(CONFIG_PPC64) += vdso64/ |
39 | obj-$(CONFIG_ALTIVEC) += vecemu.o vector.o | 40 | obj-$(CONFIG_ALTIVEC) += vecemu.o vector.o |
40 | obj-$(CONFIG_PPC_970_NAP) += idle_power4.o | 41 | obj-$(CONFIG_PPC_970_NAP) += idle_power4.o |
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S index 6cdfd44d8efe..84856bee33a5 100644 --- a/arch/powerpc/kernel/head_64.S +++ b/arch/powerpc/kernel/head_64.S | |||
@@ -1360,6 +1360,12 @@ _INIT_STATIC(__boot_from_prom) | |||
1360 | */ | 1360 | */ |
1361 | rldicr r1,r1,0,59 | 1361 | rldicr r1,r1,0,59 |
1362 | 1362 | ||
1363 | #ifdef CONFIG_RELOCATABLE | ||
1364 | /* Relocate code for where we are now */ | ||
1365 | mr r3,r26 | ||
1366 | bl .relocate | ||
1367 | #endif | ||
1368 | |||
1363 | /* Restore parameters */ | 1369 | /* Restore parameters */ |
1364 | mr r3,r31 | 1370 | mr r3,r31 |
1365 | mr r4,r30 | 1371 | mr r4,r30 |
@@ -1368,11 +1374,19 @@ _INIT_STATIC(__boot_from_prom) | |||
1368 | mr r7,r27 | 1374 | mr r7,r27 |
1369 | 1375 | ||
1370 | /* Do all of the interaction with OF client interface */ | 1376 | /* Do all of the interaction with OF client interface */ |
1377 | mr r8,r26 | ||
1371 | bl .prom_init | 1378 | bl .prom_init |
1372 | /* We never return */ | 1379 | /* We never return */ |
1373 | trap | 1380 | trap |
1374 | 1381 | ||
1375 | _STATIC(__after_prom_start) | 1382 | _STATIC(__after_prom_start) |
1383 | #ifdef CONFIG_RELOCATABLE | ||
1384 | /* process relocations for the final address of the kernel */ | ||
1385 | lis r25,PAGE_OFFSET@highest /* compute virtual base of kernel */ | ||
1386 | sldi r25,r25,32 | ||
1387 | mr r3,r25 | ||
1388 | bl .relocate | ||
1389 | #endif | ||
1376 | 1390 | ||
1377 | /* | 1391 | /* |
1378 | * We need to run with _stext at physical address PHYSICAL_START. | 1392 | * We need to run with _stext at physical address PHYSICAL_START. |
@@ -1381,10 +1395,9 @@ _STATIC(__after_prom_start) | |||
1381 | * | 1395 | * |
1382 | * Note: This process overwrites the OF exception vectors. | 1396 | * Note: This process overwrites the OF exception vectors. |
1383 | */ | 1397 | */ |
1384 | LOAD_REG_IMMEDIATE(r3, PHYSICAL_START) /* target addr */ | 1398 | li r3,0 /* target addr */ |
1385 | cmpd r3,r26 /* In some cases the loader may */ | 1399 | mr. r4,r26 /* In some cases the loader may */ |
1386 | beq 9f /* have already put us at zero */ | 1400 | beq 9f /* have already put us at zero */ |
1387 | mr r4,r26 /* source address */ | ||
1388 | lis r5,(copy_to_here - _stext)@ha | 1401 | lis r5,(copy_to_here - _stext)@ha |
1389 | addi r5,r5,(copy_to_here - _stext)@l /* # bytes of memory to copy */ | 1402 | addi r5,r5,(copy_to_here - _stext)@l /* # bytes of memory to copy */ |
1390 | li r6,0x100 /* Start offset, the first 0x100 */ | 1403 | li r6,0x100 /* Start offset, the first 0x100 */ |
@@ -1617,6 +1630,13 @@ _INIT_STATIC(start_here_multiplatform) | |||
1617 | ori r6,r6,MSR_RI | 1630 | ori r6,r6,MSR_RI |
1618 | mtmsrd r6 /* RI on */ | 1631 | mtmsrd r6 /* RI on */ |
1619 | 1632 | ||
1633 | #ifdef CONFIG_RELOCATABLE | ||
1634 | /* Save the physical address we're running at in kernstart_addr */ | ||
1635 | LOAD_REG_ADDR(r4, kernstart_addr) | ||
1636 | clrldi r0,r25,2 | ||
1637 | std r0,0(r4) | ||
1638 | #endif | ||
1639 | |||
1620 | /* The following gets the stack set up with the regs */ | 1640 | /* The following gets the stack set up with the regs */ |
1621 | /* pointing to the real addr of the kernel stack. This is */ | 1641 | /* pointing to the real addr of the kernel stack. This is */ |
1622 | /* all done to support the C function call below which sets */ | 1642 | /* all done to support the C function call below which sets */ |
diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c index 623e8c3c57f9..48a347133f41 100644 --- a/arch/powerpc/kernel/paca.c +++ b/arch/powerpc/kernel/paca.c | |||
@@ -12,6 +12,7 @@ | |||
12 | 12 | ||
13 | #include <asm/lppaca.h> | 13 | #include <asm/lppaca.h> |
14 | #include <asm/paca.h> | 14 | #include <asm/paca.h> |
15 | #include <asm/sections.h> | ||
15 | 16 | ||
16 | /* This symbol is provided by the linker - let it fill in the paca | 17 | /* This symbol is provided by the linker - let it fill in the paca |
17 | * field correctly */ | 18 | * field correctly */ |
@@ -79,7 +80,7 @@ void __init initialise_pacas(void) | |||
79 | new_paca->lock_token = 0x8000; | 80 | new_paca->lock_token = 0x8000; |
80 | new_paca->paca_index = cpu; | 81 | new_paca->paca_index = cpu; |
81 | new_paca->kernel_toc = kernel_toc; | 82 | new_paca->kernel_toc = kernel_toc; |
82 | new_paca->kernelbase = KERNELBASE; | 83 | new_paca->kernelbase = (unsigned long) _stext; |
83 | new_paca->kernel_msr = MSR_KERNEL; | 84 | new_paca->kernel_msr = MSR_KERNEL; |
84 | new_paca->hw_cpu_id = 0xffff; | 85 | new_paca->hw_cpu_id = 0xffff; |
85 | new_paca->slb_shadow_ptr = &slb_shadow[cpu]; | 86 | new_paca->slb_shadow_ptr = &slb_shadow[cpu]; |
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c index 09455e1c27c5..3a2dc7e6586a 100644 --- a/arch/powerpc/kernel/prom.c +++ b/arch/powerpc/kernel/prom.c | |||
@@ -1192,6 +1192,9 @@ void __init early_init_devtree(void *params) | |||
1192 | 1192 | ||
1193 | /* Reserve LMB regions used by kernel, initrd, dt, etc... */ | 1193 | /* Reserve LMB regions used by kernel, initrd, dt, etc... */ |
1194 | lmb_reserve(PHYSICAL_START, __pa(klimit) - PHYSICAL_START); | 1194 | lmb_reserve(PHYSICAL_START, __pa(klimit) - PHYSICAL_START); |
1195 | /* If relocatable, reserve first 32k for interrupt vectors etc. */ | ||
1196 | if (PHYSICAL_START > MEMORY_START) | ||
1197 | lmb_reserve(MEMORY_START, 0x8000); | ||
1195 | reserve_kdump_trampoline(); | 1198 | reserve_kdump_trampoline(); |
1196 | reserve_crashkernel(); | 1199 | reserve_crashkernel(); |
1197 | early_reserve_mem(); | 1200 | early_reserve_mem(); |
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c index 1f8988585054..7cf274a2b334 100644 --- a/arch/powerpc/kernel/prom_init.c +++ b/arch/powerpc/kernel/prom_init.c | |||
@@ -2309,13 +2309,14 @@ static void __init prom_check_initrd(unsigned long r3, unsigned long r4) | |||
2309 | 2309 | ||
2310 | unsigned long __init prom_init(unsigned long r3, unsigned long r4, | 2310 | unsigned long __init prom_init(unsigned long r3, unsigned long r4, |
2311 | unsigned long pp, | 2311 | unsigned long pp, |
2312 | unsigned long r6, unsigned long r7) | 2312 | unsigned long r6, unsigned long r7, |
2313 | unsigned long kbase) | ||
2313 | { | 2314 | { |
2314 | struct prom_t *_prom; | 2315 | struct prom_t *_prom; |
2315 | unsigned long hdr; | 2316 | unsigned long hdr; |
2316 | unsigned long offset = reloc_offset(); | ||
2317 | 2317 | ||
2318 | #ifdef CONFIG_PPC32 | 2318 | #ifdef CONFIG_PPC32 |
2319 | unsigned long offset = reloc_offset(); | ||
2319 | reloc_got2(offset); | 2320 | reloc_got2(offset); |
2320 | #endif | 2321 | #endif |
2321 | 2322 | ||
@@ -2349,9 +2350,11 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4, | |||
2349 | */ | 2350 | */ |
2350 | RELOC(of_platform) = prom_find_machine_type(); | 2351 | RELOC(of_platform) = prom_find_machine_type(); |
2351 | 2352 | ||
2353 | #ifndef CONFIG_RELOCATABLE | ||
2352 | /* Bail if this is a kdump kernel. */ | 2354 | /* Bail if this is a kdump kernel. */ |
2353 | if (PHYSICAL_START > 0) | 2355 | if (PHYSICAL_START > 0) |
2354 | prom_panic("Error: You can't boot a kdump kernel from OF!\n"); | 2356 | prom_panic("Error: You can't boot a kdump kernel from OF!\n"); |
2357 | #endif | ||
2355 | 2358 | ||
2356 | /* | 2359 | /* |
2357 | * Check for an initrd | 2360 | * Check for an initrd |
@@ -2371,7 +2374,7 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4, | |||
2371 | * Copy the CPU hold code | 2374 | * Copy the CPU hold code |
2372 | */ | 2375 | */ |
2373 | if (RELOC(of_platform) != PLATFORM_POWERMAC) | 2376 | if (RELOC(of_platform) != PLATFORM_POWERMAC) |
2374 | copy_and_flush(0, KERNELBASE + offset, 0x100, 0); | 2377 | copy_and_flush(0, kbase, 0x100, 0); |
2375 | 2378 | ||
2376 | /* | 2379 | /* |
2377 | * Do early parsing of command line | 2380 | * Do early parsing of command line |
@@ -2474,7 +2477,7 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4, | |||
2474 | reloc_got2(-offset); | 2477 | reloc_got2(-offset); |
2475 | #endif | 2478 | #endif |
2476 | 2479 | ||
2477 | __start(hdr, KERNELBASE + offset, 0); | 2480 | __start(hdr, kbase, 0); |
2478 | 2481 | ||
2479 | return 0; | 2482 | return 0; |
2480 | } | 2483 | } |
diff --git a/arch/powerpc/kernel/reloc_64.S b/arch/powerpc/kernel/reloc_64.S new file mode 100644 index 000000000000..b47a0e1ab001 --- /dev/null +++ b/arch/powerpc/kernel/reloc_64.S | |||
@@ -0,0 +1,87 @@ | |||
1 | /* | ||
2 | * Code to process dynamic relocations in the kernel. | ||
3 | * | ||
4 | * Copyright 2008 Paul Mackerras, IBM Corp. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #include <asm/ppc_asm.h> | ||
13 | |||
14 | RELA = 7 | ||
15 | RELACOUNT = 0x6ffffff9 | ||
16 | R_PPC64_RELATIVE = 22 | ||
17 | |||
18 | /* | ||
19 | * r3 = desired final address of kernel | ||
20 | */ | ||
21 | _GLOBAL(relocate) | ||
22 | mflr r0 | ||
23 | bcl 20,31,$+4 | ||
24 | 0: mflr r12 /* r12 has runtime addr of label 0 */ | ||
25 | mtlr r0 | ||
26 | ld r11,(p_dyn - 0b)(r12) | ||
27 | add r11,r11,r12 /* r11 has runtime addr of .dynamic section */ | ||
28 | ld r9,(p_rela - 0b)(r12) | ||
29 | add r9,r9,r12 /* r9 has runtime addr of .rela.dyn section */ | ||
30 | ld r10,(p_st - 0b)(r12) | ||
31 | add r10,r10,r12 /* r10 has runtime addr of _stext */ | ||
32 | |||
33 | /* | ||
34 | * Scan the dynamic section for the RELA and RELACOUNT entries. | ||
35 | */ | ||
36 | li r7,0 | ||
37 | li r8,0 | ||
38 | 1: ld r6,0(r11) /* get tag */ | ||
39 | cmpdi r6,0 | ||
40 | beq 4f /* end of list */ | ||
41 | cmpdi r6,RELA | ||
42 | bne 2f | ||
43 | ld r7,8(r11) /* get RELA pointer in r7 */ | ||
44 | b 3f | ||
45 | 2: addis r6,r6,(-RELACOUNT)@ha | ||
46 | cmpdi r6,RELACOUNT@l | ||
47 | bne 3f | ||
48 | ld r8,8(r11) /* get RELACOUNT value in r8 */ | ||
49 | 3: addi r11,r11,16 | ||
50 | b 1b | ||
51 | 4: cmpdi r7,0 /* check we have both RELA and RELACOUNT */ | ||
52 | cmpdi cr1,r8,0 | ||
53 | beq 6f | ||
54 | beq cr1,6f | ||
55 | |||
56 | /* | ||
57 | * Work out linktime address of _stext and hence the | ||
58 | * relocation offset to be applied. | ||
59 | * cur_offset [r7] = rela.run [r9] - rela.link [r7] | ||
60 | * _stext.link [r10] = _stext.run [r10] - cur_offset [r7] | ||
61 | * final_offset [r3] = _stext.final [r3] - _stext.link [r10] | ||
62 | */ | ||
63 | subf r7,r7,r9 /* cur_offset */ | ||
64 | subf r10,r7,r10 | ||
65 | subf r3,r10,r3 /* final_offset */ | ||
66 | |||
67 | /* | ||
68 | * Run through the list of relocations and process the | ||
69 | * R_PPC64_RELATIVE ones. | ||
70 | */ | ||
71 | mtctr r8 | ||
72 | 5: lwz r0,12(9) /* ELF64_R_TYPE(reloc->r_info) */ | ||
73 | cmpwi r0,R_PPC64_RELATIVE | ||
74 | bne 6f | ||
75 | ld r6,0(r9) /* reloc->r_offset */ | ||
76 | ld r0,16(r9) /* reloc->r_addend */ | ||
77 | add r0,r0,r3 | ||
78 | stdx r0,r7,r6 | ||
79 | addi r9,r9,24 | ||
80 | bdnz 5b | ||
81 | |||
82 | 6: blr | ||
83 | |||
84 | p_dyn: .llong __dynamic_start - 0b | ||
85 | p_rela: .llong __rela_dyn_start - 0b | ||
86 | p_st: .llong _stext - 0b | ||
87 | |||
diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S index 9f6c1ca1739e..e6927fb2e655 100644 --- a/arch/powerpc/kernel/vmlinux.lds.S +++ b/arch/powerpc/kernel/vmlinux.lds.S | |||
@@ -187,6 +187,21 @@ SECTIONS | |||
187 | *(.machine.desc) | 187 | *(.machine.desc) |
188 | __machine_desc_end = . ; | 188 | __machine_desc_end = . ; |
189 | } | 189 | } |
190 | . = ALIGN(8); | ||
191 | .dynsym : AT(ADDR(.dynsym) - LOAD_OFFSET) { *(.dynsym) } | ||
192 | .dynstr : AT(ADDR(.dynstr) - LOAD_OFFSET) { *(.dynstr) } | ||
193 | .dynamic : AT(ADDR(.dynamic) - LOAD_OFFSET) | ||
194 | { | ||
195 | __dynamic_start = .; | ||
196 | *(.dynamic) | ||
197 | } | ||
198 | .hash : AT(ADDR(.hash) - LOAD_OFFSET) { *(.hash) } | ||
199 | .interp : AT(ADDR(.interp) - LOAD_OFFSET) { *(.interp) } | ||
200 | .rela.dyn : AT(ADDR(.rela.dyn) - LOAD_OFFSET) | ||
201 | { | ||
202 | __rela_dyn_start = .; | ||
203 | *(.rela*) | ||
204 | } | ||
190 | 205 | ||
191 | /* freed after init ends here */ | 206 | /* freed after init ends here */ |
192 | . = ALIGN(PAGE_SIZE); | 207 | . = ALIGN(PAGE_SIZE); |
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c index d666f71f1f30..09db4efe1921 100644 --- a/arch/powerpc/mm/hash_utils_64.c +++ b/arch/powerpc/mm/hash_utils_64.c | |||
@@ -194,7 +194,7 @@ int htab_bolt_mapping(unsigned long vstart, unsigned long vend, | |||
194 | unsigned long tprot = prot; | 194 | unsigned long tprot = prot; |
195 | 195 | ||
196 | /* Make kernel text executable */ | 196 | /* Make kernel text executable */ |
197 | if (in_kernel_text(vaddr)) | 197 | if (overlaps_kernel_text(vaddr, vaddr + step)) |
198 | tprot &= ~HPTE_R_N; | 198 | tprot &= ~HPTE_R_N; |
199 | 199 | ||
200 | hash = hpt_hash(va, shift, ssize); | 200 | hash = hpt_hash(va, shift, ssize); |
diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c index 4ae3d00e0bdd..40f72c2a4699 100644 --- a/arch/powerpc/platforms/powermac/smp.c +++ b/arch/powerpc/platforms/powermac/smp.c | |||
@@ -787,7 +787,7 @@ static void __devinit smp_core99_kick_cpu(int nr) | |||
787 | { | 787 | { |
788 | unsigned int save_vector; | 788 | unsigned int save_vector; |
789 | unsigned long target, flags; | 789 | unsigned long target, flags; |
790 | unsigned int *vector = (unsigned int *)(KERNELBASE+0x100); | 790 | unsigned int *vector = (unsigned int *)(PAGE_OFFSET+0x100); |
791 | 791 | ||
792 | if (nr < 0 || nr > 3) | 792 | if (nr < 0 || nr > 3) |
793 | return; | 793 | return; |
@@ -801,7 +801,7 @@ static void __devinit smp_core99_kick_cpu(int nr) | |||
801 | save_vector = *vector; | 801 | save_vector = *vector; |
802 | 802 | ||
803 | /* Setup fake reset vector that does | 803 | /* Setup fake reset vector that does |
804 | * b __secondary_start_pmac_0 + nr*8 - KERNELBASE | 804 | * b __secondary_start_pmac_0 + nr*8 |
805 | */ | 805 | */ |
806 | target = (unsigned long) __secondary_start_pmac_0 + nr * 8; | 806 | target = (unsigned long) __secondary_start_pmac_0 + nr * 8; |
807 | patch_branch(vector, target, BRANCH_SET_LINK); | 807 | patch_branch(vector, target, BRANCH_SET_LINK); |