diff options
Diffstat (limited to 'arch/powerpc/kernel')
| -rw-r--r-- | arch/powerpc/kernel/entry_64.S | 41 | ||||
| -rw-r--r-- | arch/powerpc/kernel/pci_64.c | 2 | ||||
| -rw-r--r-- | arch/powerpc/kernel/perf_event.c | 17 | ||||
| -rw-r--r-- | arch/powerpc/kernel/process.c | 2 | ||||
| -rw-r--r-- | arch/powerpc/kernel/setup_64.c | 1 | ||||
| -rw-r--r-- | arch/powerpc/kernel/vdso.c | 11 | ||||
| -rw-r--r-- | arch/powerpc/kernel/vdso32/vdso32.lds.S | 4 |
7 files changed, 46 insertions, 32 deletions
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index f9fd54bfcc84..9763267e38b4 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S | |||
| @@ -658,42 +658,43 @@ do_work: | |||
| 658 | cmpdi r0,0 | 658 | cmpdi r0,0 |
| 659 | crandc eq,cr1*4+eq,eq | 659 | crandc eq,cr1*4+eq,eq |
| 660 | bne restore | 660 | bne restore |
| 661 | /* here we are preempting the current task */ | 661 | |
| 662 | 1: | 662 | /* Here we are preempting the current task. |
| 663 | #ifdef CONFIG_TRACE_IRQFLAGS | 663 | * |
| 664 | bl .trace_hardirqs_on | 664 | * Ensure interrupts are soft-disabled. We also properly mark |
| 665 | /* Note: we just clobbered r10 which used to contain the previous | 665 | * the PACA to reflect the fact that they are hard-disabled |
| 666 | * MSR before the hard-disabling done by the caller of do_work. | 666 | * and trace the change |
| 667 | * We don't have that value anymore, but it doesn't matter as | ||
| 668 | * we will hard-enable unconditionally, we can just reload the | ||
| 669 | * current MSR into r10 | ||
| 670 | */ | 667 | */ |
| 671 | mfmsr r10 | 668 | li r0,0 |
| 672 | #endif /* CONFIG_TRACE_IRQFLAGS */ | ||
| 673 | li r0,1 | ||
| 674 | stb r0,PACASOFTIRQEN(r13) | 669 | stb r0,PACASOFTIRQEN(r13) |
| 675 | stb r0,PACAHARDIRQEN(r13) | 670 | stb r0,PACAHARDIRQEN(r13) |
| 671 | TRACE_DISABLE_INTS | ||
| 672 | |||
| 673 | /* Call the scheduler with soft IRQs off */ | ||
| 674 | 1: bl .preempt_schedule_irq | ||
| 675 | |||
| 676 | /* Hard-disable interrupts again (and update PACA) */ | ||
| 676 | #ifdef CONFIG_PPC_BOOK3E | 677 | #ifdef CONFIG_PPC_BOOK3E |
| 677 | wrteei 1 | ||
| 678 | bl .preempt_schedule | ||
| 679 | wrteei 0 | 678 | wrteei 0 |
| 680 | #else | 679 | #else |
| 681 | ori r10,r10,MSR_EE | ||
| 682 | mtmsrd r10,1 /* reenable interrupts */ | ||
| 683 | bl .preempt_schedule | ||
| 684 | mfmsr r10 | 680 | mfmsr r10 |
| 685 | clrrdi r9,r1,THREAD_SHIFT | 681 | rldicl r10,r10,48,1 |
| 686 | rldicl r10,r10,48,1 /* disable interrupts again */ | ||
| 687 | rotldi r10,r10,16 | 682 | rotldi r10,r10,16 |
| 688 | mtmsrd r10,1 | 683 | mtmsrd r10,1 |
| 689 | #endif /* CONFIG_PPC_BOOK3E */ | 684 | #endif /* CONFIG_PPC_BOOK3E */ |
| 685 | li r0,0 | ||
| 686 | stb r0,PACAHARDIRQEN(r13) | ||
| 687 | |||
| 688 | /* Re-test flags and eventually loop */ | ||
| 689 | clrrdi r9,r1,THREAD_SHIFT | ||
| 690 | ld r4,TI_FLAGS(r9) | 690 | ld r4,TI_FLAGS(r9) |
| 691 | andi. r0,r4,_TIF_NEED_RESCHED | 691 | andi. r0,r4,_TIF_NEED_RESCHED |
| 692 | bne 1b | 692 | bne 1b |
| 693 | b restore | 693 | b restore |
| 694 | 694 | ||
| 695 | user_work: | 695 | user_work: |
| 696 | #endif | 696 | #endif /* CONFIG_PREEMPT */ |
| 697 | |||
| 697 | /* Enable interrupts */ | 698 | /* Enable interrupts */ |
| 698 | #ifdef CONFIG_PPC_BOOK3E | 699 | #ifdef CONFIG_PPC_BOOK3E |
| 699 | wrteei 1 | 700 | wrteei 1 |
diff --git a/arch/powerpc/kernel/pci_64.c b/arch/powerpc/kernel/pci_64.c index ba949a2c93ac..ccf56ac92de5 100644 --- a/arch/powerpc/kernel/pci_64.c +++ b/arch/powerpc/kernel/pci_64.c | |||
| @@ -97,7 +97,9 @@ int pcibios_unmap_io_space(struct pci_bus *bus) | |||
| 97 | * to do an appropriate TLB flush here too | 97 | * to do an appropriate TLB flush here too |
| 98 | */ | 98 | */ |
| 99 | if (bus->self) { | 99 | if (bus->self) { |
| 100 | #ifdef CONFIG_PPC_STD_MMU_64 | ||
| 100 | struct resource *res = bus->resource[0]; | 101 | struct resource *res = bus->resource[0]; |
| 102 | #endif | ||
| 101 | 103 | ||
| 102 | pr_debug("IO unmapping for PCI-PCI bridge %s\n", | 104 | pr_debug("IO unmapping for PCI-PCI bridge %s\n", |
| 103 | pci_name(bus->self)); | 105 | pci_name(bus->self)); |
diff --git a/arch/powerpc/kernel/perf_event.c b/arch/powerpc/kernel/perf_event.c index bbcbae183e92..87f1663584b0 100644 --- a/arch/powerpc/kernel/perf_event.c +++ b/arch/powerpc/kernel/perf_event.c | |||
| @@ -116,20 +116,23 @@ static inline void perf_get_data_addr(struct pt_regs *regs, u64 *addrp) | |||
| 116 | static inline u32 perf_get_misc_flags(struct pt_regs *regs) | 116 | static inline u32 perf_get_misc_flags(struct pt_regs *regs) |
| 117 | { | 117 | { |
| 118 | unsigned long mmcra = regs->dsisr; | 118 | unsigned long mmcra = regs->dsisr; |
| 119 | unsigned long sihv = MMCRA_SIHV; | ||
| 120 | unsigned long sipr = MMCRA_SIPR; | ||
| 119 | 121 | ||
| 120 | if (TRAP(regs) != 0xf00) | 122 | if (TRAP(regs) != 0xf00) |
| 121 | return 0; /* not a PMU interrupt */ | 123 | return 0; /* not a PMU interrupt */ |
| 122 | 124 | ||
| 123 | if (ppmu->flags & PPMU_ALT_SIPR) { | 125 | if (ppmu->flags & PPMU_ALT_SIPR) { |
| 124 | if (mmcra & POWER6_MMCRA_SIHV) | 126 | sihv = POWER6_MMCRA_SIHV; |
| 125 | return PERF_RECORD_MISC_HYPERVISOR; | 127 | sipr = POWER6_MMCRA_SIPR; |
| 126 | return (mmcra & POWER6_MMCRA_SIPR) ? | ||
| 127 | PERF_RECORD_MISC_USER : PERF_RECORD_MISC_KERNEL; | ||
| 128 | } | 128 | } |
| 129 | if (mmcra & MMCRA_SIHV) | 129 | |
| 130 | /* PR has priority over HV, so order below is important */ | ||
| 131 | if (mmcra & sipr) | ||
| 132 | return PERF_RECORD_MISC_USER; | ||
| 133 | if ((mmcra & sihv) && (freeze_events_kernel != MMCR0_FCHV)) | ||
| 130 | return PERF_RECORD_MISC_HYPERVISOR; | 134 | return PERF_RECORD_MISC_HYPERVISOR; |
| 131 | return (mmcra & MMCRA_SIPR) ? PERF_RECORD_MISC_USER : | 135 | return PERF_RECORD_MISC_KERNEL; |
| 132 | PERF_RECORD_MISC_KERNEL; | ||
| 133 | } | 136 | } |
| 134 | 137 | ||
| 135 | /* | 138 | /* |
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 2ec1eaed19ca..c930ac38e59f 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c | |||
| @@ -1172,7 +1172,7 @@ unsigned long arch_randomize_brk(struct mm_struct *mm) | |||
| 1172 | unsigned long base = mm->brk; | 1172 | unsigned long base = mm->brk; |
| 1173 | unsigned long ret; | 1173 | unsigned long ret; |
| 1174 | 1174 | ||
| 1175 | #ifdef CONFIG_PPC64 | 1175 | #ifdef CONFIG_PPC_STD_MMU_64 |
| 1176 | /* | 1176 | /* |
| 1177 | * If we are using 1TB segments and we are allowed to randomise | 1177 | * If we are using 1TB segments and we are allowed to randomise |
| 1178 | * the heap, we can put it above 1TB so it is backed by a 1TB | 1178 | * the heap, we can put it above 1TB so it is backed by a 1TB |
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index 797ea95aae2e..04f638d82fb3 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c | |||
| @@ -57,7 +57,6 @@ | |||
| 57 | #include <asm/cache.h> | 57 | #include <asm/cache.h> |
| 58 | #include <asm/page.h> | 58 | #include <asm/page.h> |
| 59 | #include <asm/mmu.h> | 59 | #include <asm/mmu.h> |
| 60 | #include <asm/mmu-hash64.h> | ||
| 61 | #include <asm/firmware.h> | 60 | #include <asm/firmware.h> |
| 62 | #include <asm/xmon.h> | 61 | #include <asm/xmon.h> |
| 63 | #include <asm/udbg.h> | 62 | #include <asm/udbg.h> |
diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c index 94e2df3cae07..137dc22afa42 100644 --- a/arch/powerpc/kernel/vdso.c +++ b/arch/powerpc/kernel/vdso.c | |||
| @@ -50,6 +50,9 @@ | |||
| 50 | /* Max supported size for symbol names */ | 50 | /* Max supported size for symbol names */ |
| 51 | #define MAX_SYMNAME 64 | 51 | #define MAX_SYMNAME 64 |
| 52 | 52 | ||
| 53 | /* The alignment of the vDSO */ | ||
| 54 | #define VDSO_ALIGNMENT (1 << 16) | ||
| 55 | |||
| 53 | extern char vdso32_start, vdso32_end; | 56 | extern char vdso32_start, vdso32_end; |
| 54 | static void *vdso32_kbase = &vdso32_start; | 57 | static void *vdso32_kbase = &vdso32_start; |
| 55 | static unsigned int vdso32_pages; | 58 | static unsigned int vdso32_pages; |
| @@ -231,15 +234,21 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) | |||
| 231 | * pick a base address for the vDSO in process space. We try to put it | 234 | * pick a base address for the vDSO in process space. We try to put it |
| 232 | * at vdso_base which is the "natural" base for it, but we might fail | 235 | * at vdso_base which is the "natural" base for it, but we might fail |
| 233 | * and end up putting it elsewhere. | 236 | * and end up putting it elsewhere. |
| 237 | * Add enough to the size so that the result can be aligned. | ||
| 234 | */ | 238 | */ |
| 235 | down_write(&mm->mmap_sem); | 239 | down_write(&mm->mmap_sem); |
| 236 | vdso_base = get_unmapped_area(NULL, vdso_base, | 240 | vdso_base = get_unmapped_area(NULL, vdso_base, |
| 237 | vdso_pages << PAGE_SHIFT, 0, 0); | 241 | (vdso_pages << PAGE_SHIFT) + |
| 242 | ((VDSO_ALIGNMENT - 1) & PAGE_MASK), | ||
| 243 | 0, 0); | ||
| 238 | if (IS_ERR_VALUE(vdso_base)) { | 244 | if (IS_ERR_VALUE(vdso_base)) { |
| 239 | rc = vdso_base; | 245 | rc = vdso_base; |
| 240 | goto fail_mmapsem; | 246 | goto fail_mmapsem; |
| 241 | } | 247 | } |
| 242 | 248 | ||
| 249 | /* Add required alignment. */ | ||
| 250 | vdso_base = ALIGN(vdso_base, VDSO_ALIGNMENT); | ||
| 251 | |||
| 243 | /* | 252 | /* |
| 244 | * Put vDSO base into mm struct. We need to do this before calling | 253 | * Put vDSO base into mm struct. We need to do this before calling |
| 245 | * install_special_mapping or the perf counter mmap tracking code | 254 | * install_special_mapping or the perf counter mmap tracking code |
diff --git a/arch/powerpc/kernel/vdso32/vdso32.lds.S b/arch/powerpc/kernel/vdso32/vdso32.lds.S index 904ef1360dd7..0546bcd49cd0 100644 --- a/arch/powerpc/kernel/vdso32/vdso32.lds.S +++ b/arch/powerpc/kernel/vdso32/vdso32.lds.S | |||
| @@ -25,7 +25,7 @@ SECTIONS | |||
| 25 | . = ALIGN(16); | 25 | . = ALIGN(16); |
| 26 | .text : { | 26 | .text : { |
| 27 | *(.text .stub .text.* .gnu.linkonce.t.* __ftr_alt_*) | 27 | *(.text .stub .text.* .gnu.linkonce.t.* __ftr_alt_*) |
| 28 | } | 28 | } :text |
| 29 | PROVIDE(__etext = .); | 29 | PROVIDE(__etext = .); |
| 30 | PROVIDE(_etext = .); | 30 | PROVIDE(_etext = .); |
| 31 | PROVIDE(etext = .); | 31 | PROVIDE(etext = .); |
| @@ -56,7 +56,7 @@ SECTIONS | |||
| 56 | .fixup : { *(.fixup) } | 56 | .fixup : { *(.fixup) } |
| 57 | 57 | ||
| 58 | .dynamic : { *(.dynamic) } :text :dynamic | 58 | .dynamic : { *(.dynamic) } :text :dynamic |
| 59 | .got : { *(.got) } | 59 | .got : { *(.got) } :text |
| 60 | .plt : { *(.plt) } | 60 | .plt : { *(.plt) } |
| 61 | 61 | ||
| 62 | _end = .; | 62 | _end = .; |
