aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBenjamin Herrenschmidt <benh@kernel.crashing.org>2016-08-19 04:52:38 -0400
committerMichael Ellerman <mpe@ellerman.id.au>2016-09-22 17:54:18 -0400
commitb970b41ea68ace17f389c8387c1df4a86aa039a0 (patch)
tree9e999c56f825bd1971131679b9ce06d10b2106a6
parentbe34d300597a7a4fb38c6e3f9929af2f1faa23b8 (diff)
powerpc/64/kexec: Copy image with MMU off when possible
Currently we turn the MMU off after copying the image, and we make sure there is no overlap between the hash table and the target pages in that case. That doesn't work for Radix however. In that case, the page tables are scattered and we can't really enforce that the target of the image isn't overlapping one of them. So instead, let's turn the MMU off before copying the image in radix mode. Thankfully, in radix mode, even under a hypervisor, we know we don't have the same kind of RMA limitations that hash mode has. While at it, also turn the MMU off early when using hash in non-LPAR mode, that way we can get rid of the collision check completely. Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org> Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Acked-by: Balbir Singh <bsingharora@gmail.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
-rw-r--r--arch/powerpc/kernel/machine_kexec_64.c49
-rw-r--r--arch/powerpc/kernel/misc_64.S18
2 files changed, 36 insertions, 31 deletions
diff --git a/arch/powerpc/kernel/machine_kexec_64.c b/arch/powerpc/kernel/machine_kexec_64.c
index 7a7793211ae7..7e12ef70808f 100644
--- a/arch/powerpc/kernel/machine_kexec_64.c
+++ b/arch/powerpc/kernel/machine_kexec_64.c
@@ -23,6 +23,7 @@
23#include <asm/current.h> 23#include <asm/current.h>
24#include <asm/machdep.h> 24#include <asm/machdep.h>
25#include <asm/cacheflush.h> 25#include <asm/cacheflush.h>
26#include <asm/firmware.h>
26#include <asm/paca.h> 27#include <asm/paca.h>
27#include <asm/mmu.h> 28#include <asm/mmu.h>
28#include <asm/sections.h> /* _end */ 29#include <asm/sections.h> /* _end */
@@ -64,31 +65,6 @@ int default_machine_kexec_prepare(struct kimage *image)
64 if (image->segment[i].mem < __pa(_end)) 65 if (image->segment[i].mem < __pa(_end))
65 return -ETXTBSY; 66 return -ETXTBSY;
66 67
67 /*
68 * For non-LPAR, we absolutely can not overwrite the mmu hash
69 * table, since we are still using the bolted entries in it to
70 * do the copy. Check that here.
71 *
72 * It is safe if the end is below the start of the blocked
73 * region (end <= low), or if the beginning is after the
74 * end of the blocked region (begin >= high). Use the
75 * boolean identity !(a || b) === (!a && !b).
76 */
77#ifdef CONFIG_PPC_STD_MMU_64
78 if (htab_address) {
79 low = __pa(htab_address);
80 high = low + htab_size_bytes;
81
82 for (i = 0; i < image->nr_segments; i++) {
83 begin = image->segment[i].mem;
84 end = begin + image->segment[i].memsz;
85
86 if ((begin < high) && (end > low))
87 return -ETXTBSY;
88 }
89 }
90#endif /* CONFIG_PPC_STD_MMU_64 */
91
92 /* We also should not overwrite the tce tables */ 68 /* We also should not overwrite the tce tables */
93 for_each_node_by_type(node, "pci") { 69 for_each_node_by_type(node, "pci") {
94 basep = of_get_property(node, "linux,tce-base", NULL); 70 basep = of_get_property(node, "linux,tce-base", NULL);
@@ -329,11 +305,14 @@ struct paca_struct kexec_paca;
329/* Our assembly helper, in misc_64.S */ 305/* Our assembly helper, in misc_64.S */
330extern void kexec_sequence(void *newstack, unsigned long start, 306extern void kexec_sequence(void *newstack, unsigned long start,
331 void *image, void *control, 307 void *image, void *control,
332 void (*clear_all)(void)) __noreturn; 308 void (*clear_all)(void),
309 bool copy_with_mmu_off) __noreturn;
333 310
334/* too late to fail here */ 311/* too late to fail here */
335void default_machine_kexec(struct kimage *image) 312void default_machine_kexec(struct kimage *image)
336{ 313{
314 bool copy_with_mmu_off;
315
337 /* prepare control code if any */ 316 /* prepare control code if any */
338 317
339 /* 318 /*
@@ -371,13 +350,29 @@ void default_machine_kexec(struct kimage *image)
371 /* XXX: If anyone does 'dynamic lppacas' this will also need to be 350 /* XXX: If anyone does 'dynamic lppacas' this will also need to be
372 * switched to a static version! 351 * switched to a static version!
373 */ 352 */
353 /*
354 * On Book3S, the copy must happen with the MMU off if we are either
355 * using Radix page tables or we are not in an LPAR since we can
356 * overwrite the page tables while copying.
357 *
358 * In an LPAR, we keep the MMU on otherwise we can't access beyond
359 * the RMA. On BookE there is no real MMU off mode, so we have to
360 * keep it enabled as well (but then we have bolted TLB entries).
361 */
362#ifdef CONFIG_PPC_BOOK3E
363 copy_with_mmu_off = false;
364#else
365 copy_with_mmu_off = radix_enabled() ||
366 !(firmware_has_feature(FW_FEATURE_LPAR) ||
367 firmware_has_feature(FW_FEATURE_PS3_LV1));
368#endif
374 369
375 /* Some things are best done in assembly. Finding globals with 370 /* Some things are best done in assembly. Finding globals with
376 * a toc is easier in C, so pass in what we can. 371 * a toc is easier in C, so pass in what we can.
377 */ 372 */
378 kexec_sequence(&kexec_stack, image->start, image, 373 kexec_sequence(&kexec_stack, image->start, image,
379 page_address(image->control_code_page), 374 page_address(image->control_code_page),
380 mmu_cleanup_all); 375 mmu_cleanup_all, copy_with_mmu_off);
381 /* NOTREACHED */ 376 /* NOTREACHED */
382} 377}
383 378
diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
index 939e3f50a345..9f0bed214bcb 100644
--- a/arch/powerpc/kernel/misc_64.S
+++ b/arch/powerpc/kernel/misc_64.S
@@ -592,7 +592,8 @@ real_mode: /* assume normal blr return */
592#endif 592#endif
593 593
594/* 594/*
595 * kexec_sequence(newstack, start, image, control, clear_all()) 595 * kexec_sequence(newstack, start, image, control, clear_all(),
596 copy_with_mmu_off)
596 * 597 *
597 * does the grungy work with stack switching and real mode switches 598 * does the grungy work with stack switching and real mode switches
598 * also does simple calls to other code 599 * also does simple calls to other code
@@ -628,7 +629,7 @@ _GLOBAL(kexec_sequence)
628 mr r29,r5 /* image (virt) */ 629 mr r29,r5 /* image (virt) */
629 mr r28,r6 /* control, unused */ 630 mr r28,r6 /* control, unused */
630 mr r27,r7 /* clear_all() fn desc */ 631 mr r27,r7 /* clear_all() fn desc */
631 mr r26,r8 /* spare */ 632 mr r26,r8 /* copy_with_mmu_off */
632 lhz r25,PACAHWCPUID(r13) /* get our phys cpu from paca */ 633 lhz r25,PACAHWCPUID(r13) /* get our phys cpu from paca */
633 634
634 /* disable interrupts, we are overwriting kernel data next */ 635 /* disable interrupts, we are overwriting kernel data next */
@@ -640,15 +641,24 @@ _GLOBAL(kexec_sequence)
640 mtmsrd r3,1 641 mtmsrd r3,1
641#endif 642#endif
642 643
644 /* We need to turn the MMU off unless we are in hash mode
645 * under a hypervisor
646 */
647 cmpdi r26,0
648 beq 1f
649 bl real_mode
6501:
643 /* copy dest pages, flush whole dest image */ 651 /* copy dest pages, flush whole dest image */
644 mr r3,r29 652 mr r3,r29
645 bl kexec_copy_flush /* (image) */ 653 bl kexec_copy_flush /* (image) */
646 654
647 /* turn off mmu */ 655 /* turn off mmu now if not done earlier */
656 cmpdi r26,0
657 bne 1f
648 bl real_mode 658 bl real_mode
649 659
650 /* copy 0x100 bytes starting at start to 0 */ 660 /* copy 0x100 bytes starting at start to 0 */
651 li r3,0 6611: li r3,0
652 mr r4,r30 /* start, aka phys mem offset */ 662 mr r4,r30 /* start, aka phys mem offset */
653 li r5,0x100 663 li r5,0x100
654 li r6,0 664 li r6,0