diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2018-04-20 11:23:30 -0400 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2018-04-20 11:23:30 -0400 |
| commit | d08de37b8c3ec9d650a346b50cf3698c5b00a6bc (patch) | |
| tree | 9db6e38541c6663d4f279448a9f5e711d5a166cb | |
| parent | c2d94c5214905fd67ddfd7ad21729ca129e4e02d (diff) | |
| parent | 56376c5864f8ff4ba7c78a80ae857eee3b1d23d8 (diff) | |
Merge tag 'powerpc-4.17-3' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux
Pull powerpc fixes from Michael Ellerman:
- Fix an off-by-one bug in our alternative asm patching which leads to
incorrectly patched code. This bug lay dormant for nearly 10 years
but we finally hit it due to a recent change.
- Fix lockups when running KVM guests on Power8 due to a missing check
when a thread that's running KVM comes out of idle.
- Fix an out-of-spec behaviour in the XIVE code (P9 interrupt
controller).
- Fix EEH handling of bridge MMIO windows.
- Prevent crashes in our RFI fallback flush handler if firmware didn't
tell us the size of the L1 cache (only seen on simulators).
Thanks to: Benjamin Herrenschmidt, Madhavan Srinivasan, Michael Neuling.
* tag 'powerpc-4.17-3' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux:
powerpc/kvm: Fix lockups when running KVM guests on Power8
powerpc/eeh: Fix enabling bridge MMIO windows
powerpc/xive: Fix trying to "push" an already active pool VP
powerpc/64s: Default l1d_size to 64K in RFI fallback flush
powerpc/lib: Fix off-by-one in alternate feature patching
| -rw-r--r-- | arch/powerpc/kernel/eeh_pe.c | 3 | ||||
| -rw-r--r-- | arch/powerpc/kernel/idle_book3s.S | 4 | ||||
| -rw-r--r-- | arch/powerpc/kernel/setup_64.c | 11 | ||||
| -rw-r--r-- | arch/powerpc/lib/feature-fixups.c | 2 | ||||
| -rw-r--r-- | arch/powerpc/sysdev/xive/native.c | 4 |
5 files changed, 20 insertions, 4 deletions
diff --git a/arch/powerpc/kernel/eeh_pe.c b/arch/powerpc/kernel/eeh_pe.c index 2d4956e97aa9..ee5a67d57aab 100644 --- a/arch/powerpc/kernel/eeh_pe.c +++ b/arch/powerpc/kernel/eeh_pe.c | |||
| @@ -807,7 +807,8 @@ static void eeh_restore_bridge_bars(struct eeh_dev *edev) | |||
| 807 | eeh_ops->write_config(pdn, 15*4, 4, edev->config_space[15]); | 807 | eeh_ops->write_config(pdn, 15*4, 4, edev->config_space[15]); |
| 808 | 808 | ||
| 809 | /* PCI Command: 0x4 */ | 809 | /* PCI Command: 0x4 */ |
| 810 | eeh_ops->write_config(pdn, PCI_COMMAND, 4, edev->config_space[1]); | 810 | eeh_ops->write_config(pdn, PCI_COMMAND, 4, edev->config_space[1] | |
| 811 | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER); | ||
| 811 | 812 | ||
| 812 | /* Check the PCIe link is ready */ | 813 | /* Check the PCIe link is ready */ |
| 813 | eeh_bridge_check_link(edev); | 814 | eeh_bridge_check_link(edev); |
diff --git a/arch/powerpc/kernel/idle_book3s.S b/arch/powerpc/kernel/idle_book3s.S index 79d005445c6c..e734f6e45abc 100644 --- a/arch/powerpc/kernel/idle_book3s.S +++ b/arch/powerpc/kernel/idle_book3s.S | |||
| @@ -553,12 +553,12 @@ ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_300) | |||
| 553 | #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE | 553 | #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE |
| 554 | lbz r0,HSTATE_HWTHREAD_STATE(r13) | 554 | lbz r0,HSTATE_HWTHREAD_STATE(r13) |
| 555 | cmpwi r0,KVM_HWTHREAD_IN_KERNEL | 555 | cmpwi r0,KVM_HWTHREAD_IN_KERNEL |
| 556 | beq 1f | 556 | beq 0f |
| 557 | li r0,KVM_HWTHREAD_IN_KERNEL | 557 | li r0,KVM_HWTHREAD_IN_KERNEL |
| 558 | stb r0,HSTATE_HWTHREAD_STATE(r13) | 558 | stb r0,HSTATE_HWTHREAD_STATE(r13) |
| 559 | /* Order setting hwthread_state vs. testing hwthread_req */ | 559 | /* Order setting hwthread_state vs. testing hwthread_req */ |
| 560 | sync | 560 | sync |
| 561 | lbz r0,HSTATE_HWTHREAD_REQ(r13) | 561 | 0: lbz r0,HSTATE_HWTHREAD_REQ(r13) |
| 562 | cmpwi r0,0 | 562 | cmpwi r0,0 |
| 563 | beq 1f | 563 | beq 1f |
| 564 | b kvm_start_guest | 564 | b kvm_start_guest |
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index 44c30dd38067..b78f142a4148 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c | |||
| @@ -890,6 +890,17 @@ static void __ref init_fallback_flush(void) | |||
| 890 | return; | 890 | return; |
| 891 | 891 | ||
| 892 | l1d_size = ppc64_caches.l1d.size; | 892 | l1d_size = ppc64_caches.l1d.size; |
| 893 | |||
| 894 | /* | ||
| 895 | * If there is no d-cache-size property in the device tree, l1d_size | ||
| 896 | * could be zero. That leads to the loop in the asm wrapping around to | ||
| 897 | * 2^64-1, and then walking off the end of the fallback area and | ||
| 898 | * eventually causing a page fault which is fatal. Just default to | ||
| 899 | * something vaguely sane. | ||
| 900 | */ | ||
| 901 | if (!l1d_size) | ||
| 902 | l1d_size = (64 * 1024); | ||
| 903 | |||
| 893 | limit = min(ppc64_bolted_size(), ppc64_rma_size); | 904 | limit = min(ppc64_bolted_size(), ppc64_rma_size); |
| 894 | 905 | ||
| 895 | /* | 906 | /* |
diff --git a/arch/powerpc/lib/feature-fixups.c b/arch/powerpc/lib/feature-fixups.c index 35f80ab7cbd8..288fe4f0db4e 100644 --- a/arch/powerpc/lib/feature-fixups.c +++ b/arch/powerpc/lib/feature-fixups.c | |||
| @@ -55,7 +55,7 @@ static int patch_alt_instruction(unsigned int *src, unsigned int *dest, | |||
| 55 | unsigned int *target = (unsigned int *)branch_target(src); | 55 | unsigned int *target = (unsigned int *)branch_target(src); |
| 56 | 56 | ||
| 57 | /* Branch within the section doesn't need translating */ | 57 | /* Branch within the section doesn't need translating */ |
| 58 | if (target < alt_start || target >= alt_end) { | 58 | if (target < alt_start || target > alt_end) { |
| 59 | instr = translate_branch(dest, src); | 59 | instr = translate_branch(dest, src); |
| 60 | if (!instr) | 60 | if (!instr) |
| 61 | return 1; | 61 | return 1; |
diff --git a/arch/powerpc/sysdev/xive/native.c b/arch/powerpc/sysdev/xive/native.c index d22aeb0b69e1..b48454be5b98 100644 --- a/arch/powerpc/sysdev/xive/native.c +++ b/arch/powerpc/sysdev/xive/native.c | |||
| @@ -389,6 +389,10 @@ static void xive_native_setup_cpu(unsigned int cpu, struct xive_cpu *xc) | |||
| 389 | if (xive_pool_vps == XIVE_INVALID_VP) | 389 | if (xive_pool_vps == XIVE_INVALID_VP) |
| 390 | return; | 390 | return; |
| 391 | 391 | ||
| 392 | /* Check if pool VP already active, if it is, pull it */ | ||
| 393 | if (in_be32(xive_tima + TM_QW2_HV_POOL + TM_WORD2) & TM_QW2W2_VP) | ||
| 394 | in_be64(xive_tima + TM_SPC_PULL_POOL_CTX); | ||
| 395 | |||
| 392 | /* Enable the pool VP */ | 396 | /* Enable the pool VP */ |
| 393 | vp = xive_pool_vps + cpu; | 397 | vp = xive_pool_vps + cpu; |
| 394 | pr_debug("CPU %d setting up pool VP 0x%x\n", cpu, vp); | 398 | pr_debug("CPU %d setting up pool VP 0x%x\n", cpu, vp); |
