aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-11-16 11:14:54 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2018-11-16 11:14:54 -0500
commitef268de19756e6bc78cd3e6d9b15545f7df97ef2 (patch)
tree593c6ce06142726b120383c584dc7436abead9f3
parent50d25bdc64318a8a367c4433fa1e030c38331f05 (diff)
parentb2fed34a628df6118b5d4e13f49a33e15f704fa9 (diff)
Merge tag 'powerpc-4.20-3' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux
Pull powerpc fixes from Michael Ellerman: "Two weeks worth of fixes since rc1. - I broke 16-byte alignment of the stack when we moved PPR into pt_regs. Despite being required by the ABI this broke almost nothing, we eventually hit it in code where GCC does arithmetic on the stack pointer assuming the bottom 4 bits are clear. Fix it by padding the in-kernel pt_regs by 8 bytes. - A couple of commits fixing minor bugs in the recent SLB rewrite. - A build fix related to tracepoints in KVM in some configurations. - Our old "IO workarounds" code written for Cell couldn't coexist in a kernel that runs on Power9 with the Radix MMU, fix that. - Remove the NPU DMA ops, these just printed a warning and should never have been called. - Suppress an overly chatty message triggered by CPU hotplug in some configs. - Two small selftest fixes. Thanks to: Alistair Popple, Gustavo Romero, Nicholas Piggin, Satheesh Rajendran, Scott Wood" * tag 'powerpc-4.20-3' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux: selftests/powerpc: Adjust wild_bctr to build with old binutils powerpc/64: Fix kernel stack 16-byte alignment powerpc/numa: Suppress "VPHN is not supported" messages selftests/powerpc: Fix wild_bctr test to work on ppc64 powerpc/io: Fix the IO workarounds code to work with Radix powerpc/mm/64s: Fix preempt warning in slb_allocate_kernel() KVM: PPC: Move and undef TRACE_INCLUDE_PATH/FILE powerpc/mm/64s: Only use slbfee on CPUs that support it powerpc/mm/64s: Use PPC_SLBFEE macro powerpc/mm/64s: Consolidate SLB assertions powerpc/powernv/npu: Remove NPU DMA ops
-rw-r--r--arch/powerpc/include/asm/io.h20
-rw-r--r--arch/powerpc/include/asm/ppc-opcode.h2
-rw-r--r--arch/powerpc/include/asm/ptrace.h1
-rw-r--r--arch/powerpc/kernel/setup_64.c2
-rw-r--r--arch/powerpc/kvm/trace.h8
-rw-r--r--arch/powerpc/kvm/trace_booke.h9
-rw-r--r--arch/powerpc/kvm/trace_hv.h9
-rw-r--r--arch/powerpc/kvm/trace_pr.h9
-rw-r--r--arch/powerpc/mm/numa.c2
-rw-r--r--arch/powerpc/mm/slb.c35
-rw-r--r--arch/powerpc/platforms/powernv/npu-dma.c64
-rw-r--r--tools/testing/selftests/powerpc/mm/wild_bctr.c21
12 files changed, 76 insertions, 106 deletions
diff --git a/arch/powerpc/include/asm/io.h b/arch/powerpc/include/asm/io.h
index 3ef40b703c4a..e746becd9d6f 100644
--- a/arch/powerpc/include/asm/io.h
+++ b/arch/powerpc/include/asm/io.h
@@ -268,19 +268,13 @@ extern void _memcpy_toio(volatile void __iomem *dest, const void *src,
268 * their hooks, a bitfield is reserved for use by the platform near the 268 * their hooks, a bitfield is reserved for use by the platform near the
269 * top of MMIO addresses (not PIO, those have to cope the hard way). 269 * top of MMIO addresses (not PIO, those have to cope the hard way).
270 * 270 *
271 * This bit field is 12 bits and is at the top of the IO virtual 271 * The highest address in the kernel virtual space are:
272 * addresses PCI_IO_INDIRECT_TOKEN_MASK.
273 * 272 *
274 * The kernel virtual space is thus: 273 * d0003fffffffffff # with Hash MMU
274 * c00fffffffffffff # with Radix MMU
275 * 275 *
276 * 0xD000000000000000 : vmalloc 276 * The top 4 bits are reserved as the region ID on hash, leaving us 8 bits
277 * 0xD000080000000000 : PCI PHB IO space 277 * that can be used for the field.
278 * 0xD000080080000000 : ioremap
279 * 0xD0000fffffffffff : end of ioremap region
280 *
281 * Since the top 4 bits are reserved as the region ID, we use thus
282 * the next 12 bits and keep 4 bits available for the future if the
283 * virtual address space is ever to be extended.
284 * 278 *
285 * The direct IO mapping operations will then mask off those bits 279 * The direct IO mapping operations will then mask off those bits
286 * before doing the actual access, though that only happen when 280 * before doing the actual access, though that only happen when
@@ -292,8 +286,8 @@ extern void _memcpy_toio(volatile void __iomem *dest, const void *src,
292 */ 286 */
293 287
294#ifdef CONFIG_PPC_INDIRECT_MMIO 288#ifdef CONFIG_PPC_INDIRECT_MMIO
295#define PCI_IO_IND_TOKEN_MASK 0x0fff000000000000ul 289#define PCI_IO_IND_TOKEN_SHIFT 52
296#define PCI_IO_IND_TOKEN_SHIFT 48 290#define PCI_IO_IND_TOKEN_MASK (0xfful << PCI_IO_IND_TOKEN_SHIFT)
297#define PCI_FIX_ADDR(addr) \ 291#define PCI_FIX_ADDR(addr) \
298 ((PCI_IO_ADDR)(((unsigned long)(addr)) & ~PCI_IO_IND_TOKEN_MASK)) 292 ((PCI_IO_ADDR)(((unsigned long)(addr)) & ~PCI_IO_IND_TOKEN_MASK))
299#define PCI_GET_ADDR_TOKEN(addr) \ 293#define PCI_GET_ADDR_TOKEN(addr) \
diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h
index 6093bc8f74e5..a6e9e314c707 100644
--- a/arch/powerpc/include/asm/ppc-opcode.h
+++ b/arch/powerpc/include/asm/ppc-opcode.h
@@ -493,6 +493,8 @@
493 __PPC_RS(t) | __PPC_RA0(a) | __PPC_RB(b)) 493 __PPC_RS(t) | __PPC_RA0(a) | __PPC_RB(b))
494#define PPC_SLBFEE_DOT(t, b) stringify_in_c(.long PPC_INST_SLBFEE | \ 494#define PPC_SLBFEE_DOT(t, b) stringify_in_c(.long PPC_INST_SLBFEE | \
495 __PPC_RT(t) | __PPC_RB(b)) 495 __PPC_RT(t) | __PPC_RB(b))
496#define __PPC_SLBFEE_DOT(t, b) stringify_in_c(.long PPC_INST_SLBFEE | \
497 ___PPC_RT(t) | ___PPC_RB(b))
496#define PPC_ICBT(c,a,b) stringify_in_c(.long PPC_INST_ICBT | \ 498#define PPC_ICBT(c,a,b) stringify_in_c(.long PPC_INST_ICBT | \
497 __PPC_CT(c) | __PPC_RA0(a) | __PPC_RB(b)) 499 __PPC_CT(c) | __PPC_RA0(a) | __PPC_RB(b))
498/* PASemi instructions */ 500/* PASemi instructions */
diff --git a/arch/powerpc/include/asm/ptrace.h b/arch/powerpc/include/asm/ptrace.h
index f73886a1a7f5..0b8a735b6d85 100644
--- a/arch/powerpc/include/asm/ptrace.h
+++ b/arch/powerpc/include/asm/ptrace.h
@@ -54,6 +54,7 @@ struct pt_regs
54 54
55#ifdef CONFIG_PPC64 55#ifdef CONFIG_PPC64
56 unsigned long ppr; 56 unsigned long ppr;
57 unsigned long __pad; /* Maintain 16 byte interrupt stack alignment */
57#endif 58#endif
58}; 59};
59#endif 60#endif
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 2a51e4cc8246..236c1151a3a7 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -636,6 +636,8 @@ static void *__init alloc_stack(unsigned long limit, int cpu)
636{ 636{
637 unsigned long pa; 637 unsigned long pa;
638 638
639 BUILD_BUG_ON(STACK_INT_FRAME_SIZE % 16);
640
639 pa = memblock_alloc_base_nid(THREAD_SIZE, THREAD_SIZE, limit, 641 pa = memblock_alloc_base_nid(THREAD_SIZE, THREAD_SIZE, limit,
640 early_cpu_to_node(cpu), MEMBLOCK_NONE); 642 early_cpu_to_node(cpu), MEMBLOCK_NONE);
641 if (!pa) { 643 if (!pa) {
diff --git a/arch/powerpc/kvm/trace.h b/arch/powerpc/kvm/trace.h
index 491b0f715d6b..ea1d7c808319 100644
--- a/arch/powerpc/kvm/trace.h
+++ b/arch/powerpc/kvm/trace.h
@@ -6,8 +6,6 @@
6 6
7#undef TRACE_SYSTEM 7#undef TRACE_SYSTEM
8#define TRACE_SYSTEM kvm 8#define TRACE_SYSTEM kvm
9#define TRACE_INCLUDE_PATH .
10#define TRACE_INCLUDE_FILE trace
11 9
12/* 10/*
13 * Tracepoint for guest mode entry. 11 * Tracepoint for guest mode entry.
@@ -120,4 +118,10 @@ TRACE_EVENT(kvm_check_requests,
120#endif /* _TRACE_KVM_H */ 118#endif /* _TRACE_KVM_H */
121 119
122/* This part must be outside protection */ 120/* This part must be outside protection */
121#undef TRACE_INCLUDE_PATH
122#undef TRACE_INCLUDE_FILE
123
124#define TRACE_INCLUDE_PATH .
125#define TRACE_INCLUDE_FILE trace
126
123#include <trace/define_trace.h> 127#include <trace/define_trace.h>
diff --git a/arch/powerpc/kvm/trace_booke.h b/arch/powerpc/kvm/trace_booke.h
index ac640e81fdc5..3837842986aa 100644
--- a/arch/powerpc/kvm/trace_booke.h
+++ b/arch/powerpc/kvm/trace_booke.h
@@ -6,8 +6,6 @@
6 6
7#undef TRACE_SYSTEM 7#undef TRACE_SYSTEM
8#define TRACE_SYSTEM kvm_booke 8#define TRACE_SYSTEM kvm_booke
9#define TRACE_INCLUDE_PATH .
10#define TRACE_INCLUDE_FILE trace_booke
11 9
12#define kvm_trace_symbol_exit \ 10#define kvm_trace_symbol_exit \
13 {0, "CRITICAL"}, \ 11 {0, "CRITICAL"}, \
@@ -218,4 +216,11 @@ TRACE_EVENT(kvm_booke_queue_irqprio,
218#endif 216#endif
219 217
220/* This part must be outside protection */ 218/* This part must be outside protection */
219
220#undef TRACE_INCLUDE_PATH
221#undef TRACE_INCLUDE_FILE
222
223#define TRACE_INCLUDE_PATH .
224#define TRACE_INCLUDE_FILE trace_booke
225
221#include <trace/define_trace.h> 226#include <trace/define_trace.h>
diff --git a/arch/powerpc/kvm/trace_hv.h b/arch/powerpc/kvm/trace_hv.h
index bcfe8a987f6a..8a1e3b0047f1 100644
--- a/arch/powerpc/kvm/trace_hv.h
+++ b/arch/powerpc/kvm/trace_hv.h
@@ -9,8 +9,6 @@
9 9
10#undef TRACE_SYSTEM 10#undef TRACE_SYSTEM
11#define TRACE_SYSTEM kvm_hv 11#define TRACE_SYSTEM kvm_hv
12#define TRACE_INCLUDE_PATH .
13#define TRACE_INCLUDE_FILE trace_hv
14 12
15#define kvm_trace_symbol_hcall \ 13#define kvm_trace_symbol_hcall \
16 {H_REMOVE, "H_REMOVE"}, \ 14 {H_REMOVE, "H_REMOVE"}, \
@@ -497,4 +495,11 @@ TRACE_EVENT(kvmppc_run_vcpu_exit,
497#endif /* _TRACE_KVM_HV_H */ 495#endif /* _TRACE_KVM_HV_H */
498 496
499/* This part must be outside protection */ 497/* This part must be outside protection */
498
499#undef TRACE_INCLUDE_PATH
500#undef TRACE_INCLUDE_FILE
501
502#define TRACE_INCLUDE_PATH .
503#define TRACE_INCLUDE_FILE trace_hv
504
500#include <trace/define_trace.h> 505#include <trace/define_trace.h>
diff --git a/arch/powerpc/kvm/trace_pr.h b/arch/powerpc/kvm/trace_pr.h
index 2f9a8829552b..46a46d328fbf 100644
--- a/arch/powerpc/kvm/trace_pr.h
+++ b/arch/powerpc/kvm/trace_pr.h
@@ -8,8 +8,6 @@
8 8
9#undef TRACE_SYSTEM 9#undef TRACE_SYSTEM
10#define TRACE_SYSTEM kvm_pr 10#define TRACE_SYSTEM kvm_pr
11#define TRACE_INCLUDE_PATH .
12#define TRACE_INCLUDE_FILE trace_pr
13 11
14TRACE_EVENT(kvm_book3s_reenter, 12TRACE_EVENT(kvm_book3s_reenter,
15 TP_PROTO(int r, struct kvm_vcpu *vcpu), 13 TP_PROTO(int r, struct kvm_vcpu *vcpu),
@@ -257,4 +255,11 @@ TRACE_EVENT(kvm_exit,
257#endif /* _TRACE_KVM_H */ 255#endif /* _TRACE_KVM_H */
258 256
259/* This part must be outside protection */ 257/* This part must be outside protection */
258
259#undef TRACE_INCLUDE_PATH
260#undef TRACE_INCLUDE_FILE
261
262#define TRACE_INCLUDE_PATH .
263#define TRACE_INCLUDE_FILE trace_pr
264
260#include <trace/define_trace.h> 265#include <trace/define_trace.h>
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index 3a048e98a132..ce28ae5ca080 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -1178,7 +1178,7 @@ static long vphn_get_associativity(unsigned long cpu,
1178 1178
1179 switch (rc) { 1179 switch (rc) {
1180 case H_FUNCTION: 1180 case H_FUNCTION:
1181 printk(KERN_INFO 1181 printk_once(KERN_INFO
1182 "VPHN is not supported. Disabling polling...\n"); 1182 "VPHN is not supported. Disabling polling...\n");
1183 stop_topology_update(); 1183 stop_topology_update();
1184 break; 1184 break;
diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c
index c3fdf2969d9f..bc3914d54e26 100644
--- a/arch/powerpc/mm/slb.c
+++ b/arch/powerpc/mm/slb.c
@@ -19,6 +19,7 @@
19#include <asm/mmu.h> 19#include <asm/mmu.h>
20#include <asm/mmu_context.h> 20#include <asm/mmu_context.h>
21#include <asm/paca.h> 21#include <asm/paca.h>
22#include <asm/ppc-opcode.h>
22#include <asm/cputable.h> 23#include <asm/cputable.h>
23#include <asm/cacheflush.h> 24#include <asm/cacheflush.h>
24#include <asm/smp.h> 25#include <asm/smp.h>
@@ -58,27 +59,19 @@ static inline unsigned long mk_vsid_data(unsigned long ea, int ssize,
58 return __mk_vsid_data(get_kernel_vsid(ea, ssize), ssize, flags); 59 return __mk_vsid_data(get_kernel_vsid(ea, ssize), ssize, flags);
59} 60}
60 61
61static void assert_slb_exists(unsigned long ea) 62static void assert_slb_presence(bool present, unsigned long ea)
62{ 63{
63#ifdef CONFIG_DEBUG_VM 64#ifdef CONFIG_DEBUG_VM
64 unsigned long tmp; 65 unsigned long tmp;
65 66
66 WARN_ON_ONCE(mfmsr() & MSR_EE); 67 WARN_ON_ONCE(mfmsr() & MSR_EE);
67 68
68 asm volatile("slbfee. %0, %1" : "=r"(tmp) : "r"(ea) : "cr0"); 69 if (!cpu_has_feature(CPU_FTR_ARCH_206))
69 WARN_ON(tmp == 0); 70 return;
70#endif
71}
72
73static void assert_slb_notexists(unsigned long ea)
74{
75#ifdef CONFIG_DEBUG_VM
76 unsigned long tmp;
77 71
78 WARN_ON_ONCE(mfmsr() & MSR_EE); 72 asm volatile(__PPC_SLBFEE_DOT(%0, %1) : "=r"(tmp) : "r"(ea) : "cr0");
79 73
80 asm volatile("slbfee. %0, %1" : "=r"(tmp) : "r"(ea) : "cr0"); 74 WARN_ON(present == (tmp == 0));
81 WARN_ON(tmp != 0);
82#endif 75#endif
83} 76}
84 77
@@ -114,7 +107,7 @@ static inline void create_shadowed_slbe(unsigned long ea, int ssize,
114 */ 107 */
115 slb_shadow_update(ea, ssize, flags, index); 108 slb_shadow_update(ea, ssize, flags, index);
116 109
117 assert_slb_notexists(ea); 110 assert_slb_presence(false, ea);
118 asm volatile("slbmte %0,%1" : 111 asm volatile("slbmte %0,%1" :
119 : "r" (mk_vsid_data(ea, ssize, flags)), 112 : "r" (mk_vsid_data(ea, ssize, flags)),
120 "r" (mk_esid_data(ea, ssize, index)) 113 "r" (mk_esid_data(ea, ssize, index))
@@ -137,7 +130,7 @@ void __slb_restore_bolted_realmode(void)
137 "r" (be64_to_cpu(p->save_area[index].esid))); 130 "r" (be64_to_cpu(p->save_area[index].esid)));
138 } 131 }
139 132
140 assert_slb_exists(local_paca->kstack); 133 assert_slb_presence(true, local_paca->kstack);
141} 134}
142 135
143/* 136/*
@@ -185,7 +178,7 @@ void slb_flush_and_restore_bolted(void)
185 :: "r" (be64_to_cpu(p->save_area[KSTACK_INDEX].vsid)), 178 :: "r" (be64_to_cpu(p->save_area[KSTACK_INDEX].vsid)),
186 "r" (be64_to_cpu(p->save_area[KSTACK_INDEX].esid)) 179 "r" (be64_to_cpu(p->save_area[KSTACK_INDEX].esid))
187 : "memory"); 180 : "memory");
188 assert_slb_exists(get_paca()->kstack); 181 assert_slb_presence(true, get_paca()->kstack);
189 182
190 get_paca()->slb_cache_ptr = 0; 183 get_paca()->slb_cache_ptr = 0;
191 184
@@ -443,9 +436,9 @@ void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
443 ea = (unsigned long) 436 ea = (unsigned long)
444 get_paca()->slb_cache[i] << SID_SHIFT; 437 get_paca()->slb_cache[i] << SID_SHIFT;
445 /* 438 /*
446 * Could assert_slb_exists here, but hypervisor 439 * Could assert_slb_presence(true) here, but
447 * or machine check could have come in and 440 * hypervisor or machine check could have come
448 * removed the entry at this point. 441 * in and removed the entry at this point.
449 */ 442 */
450 443
451 slbie_data = ea; 444 slbie_data = ea;
@@ -676,7 +669,7 @@ static long slb_insert_entry(unsigned long ea, unsigned long context,
676 * User preloads should add isync afterwards in case the kernel 669 * User preloads should add isync afterwards in case the kernel
677 * accesses user memory before it returns to userspace with rfid. 670 * accesses user memory before it returns to userspace with rfid.
678 */ 671 */
679 assert_slb_notexists(ea); 672 assert_slb_presence(false, ea);
680 asm volatile("slbmte %0, %1" : : "r" (vsid_data), "r" (esid_data)); 673 asm volatile("slbmte %0, %1" : : "r" (vsid_data), "r" (esid_data));
681 674
682 barrier(); 675 barrier();
@@ -715,7 +708,7 @@ static long slb_allocate_kernel(unsigned long ea, unsigned long id)
715 return -EFAULT; 708 return -EFAULT;
716 709
717 if (ea < H_VMALLOC_END) 710 if (ea < H_VMALLOC_END)
718 flags = get_paca()->vmalloc_sllp; 711 flags = local_paca->vmalloc_sllp;
719 else 712 else
720 flags = SLB_VSID_KERNEL | mmu_psize_defs[mmu_io_psize].sllp; 713 flags = SLB_VSID_KERNEL | mmu_psize_defs[mmu_io_psize].sllp;
721 } else { 714 } else {
diff --git a/arch/powerpc/platforms/powernv/npu-dma.c b/arch/powerpc/platforms/powernv/npu-dma.c
index 6f60e0931922..75b935252981 100644
--- a/arch/powerpc/platforms/powernv/npu-dma.c
+++ b/arch/powerpc/platforms/powernv/npu-dma.c
@@ -102,63 +102,6 @@ struct pci_dev *pnv_pci_get_npu_dev(struct pci_dev *gpdev, int index)
102} 102}
103EXPORT_SYMBOL(pnv_pci_get_npu_dev); 103EXPORT_SYMBOL(pnv_pci_get_npu_dev);
104 104
105#define NPU_DMA_OP_UNSUPPORTED() \
106 dev_err_once(dev, "%s operation unsupported for NVLink devices\n", \
107 __func__)
108
109static void *dma_npu_alloc(struct device *dev, size_t size,
110 dma_addr_t *dma_handle, gfp_t flag,
111 unsigned long attrs)
112{
113 NPU_DMA_OP_UNSUPPORTED();
114 return NULL;
115}
116
117static void dma_npu_free(struct device *dev, size_t size,
118 void *vaddr, dma_addr_t dma_handle,
119 unsigned long attrs)
120{
121 NPU_DMA_OP_UNSUPPORTED();
122}
123
124static dma_addr_t dma_npu_map_page(struct device *dev, struct page *page,
125 unsigned long offset, size_t size,
126 enum dma_data_direction direction,
127 unsigned long attrs)
128{
129 NPU_DMA_OP_UNSUPPORTED();
130 return 0;
131}
132
133static int dma_npu_map_sg(struct device *dev, struct scatterlist *sglist,
134 int nelems, enum dma_data_direction direction,
135 unsigned long attrs)
136{
137 NPU_DMA_OP_UNSUPPORTED();
138 return 0;
139}
140
141static int dma_npu_dma_supported(struct device *dev, u64 mask)
142{
143 NPU_DMA_OP_UNSUPPORTED();
144 return 0;
145}
146
147static u64 dma_npu_get_required_mask(struct device *dev)
148{
149 NPU_DMA_OP_UNSUPPORTED();
150 return 0;
151}
152
153static const struct dma_map_ops dma_npu_ops = {
154 .map_page = dma_npu_map_page,
155 .map_sg = dma_npu_map_sg,
156 .alloc = dma_npu_alloc,
157 .free = dma_npu_free,
158 .dma_supported = dma_npu_dma_supported,
159 .get_required_mask = dma_npu_get_required_mask,
160};
161
162/* 105/*
163 * Returns the PE assoicated with the PCI device of the given 106 * Returns the PE assoicated with the PCI device of the given
164 * NPU. Returns the linked pci device if pci_dev != NULL. 107 * NPU. Returns the linked pci device if pci_dev != NULL.
@@ -270,10 +213,11 @@ static void pnv_npu_dma_set_32(struct pnv_ioda_pe *npe)
270 rc = pnv_npu_set_window(npe, 0, gpe->table_group.tables[0]); 213 rc = pnv_npu_set_window(npe, 0, gpe->table_group.tables[0]);
271 214
272 /* 215 /*
273 * We don't initialise npu_pe->tce32_table as we always use 216 * NVLink devices use the same TCE table configuration as
274 * dma_npu_ops which are nops. 217 * their parent device so drivers shouldn't be doing DMA
218 * operations directly on these devices.
275 */ 219 */
276 set_dma_ops(&npe->pdev->dev, &dma_npu_ops); 220 set_dma_ops(&npe->pdev->dev, NULL);
277} 221}
278 222
279/* 223/*
diff --git a/tools/testing/selftests/powerpc/mm/wild_bctr.c b/tools/testing/selftests/powerpc/mm/wild_bctr.c
index 1b0e9e9a2ddc..f2fa101c5a6a 100644
--- a/tools/testing/selftests/powerpc/mm/wild_bctr.c
+++ b/tools/testing/selftests/powerpc/mm/wild_bctr.c
@@ -47,8 +47,9 @@ static int ok(void)
47 return 0; 47 return 0;
48} 48}
49 49
50#define REG_POISON 0x5a5aUL 50#define REG_POISON 0x5a5a
51#define POISONED_REG(n) ((REG_POISON << 48) | ((n) << 32) | (REG_POISON << 16) | (n)) 51#define POISONED_REG(n) ((((unsigned long)REG_POISON) << 48) | ((n) << 32) | \
52 (((unsigned long)REG_POISON) << 16) | (n))
52 53
53static inline void poison_regs(void) 54static inline void poison_regs(void)
54{ 55{
@@ -105,6 +106,20 @@ static void dump_regs(void)
105 } 106 }
106} 107}
107 108
109#ifdef _CALL_AIXDESC
110struct opd {
111 unsigned long ip;
112 unsigned long toc;
113 unsigned long env;
114};
115static struct opd bad_opd = {
116 .ip = BAD_NIP,
117};
118#define BAD_FUNC (&bad_opd)
119#else
120#define BAD_FUNC BAD_NIP
121#endif
122
108int test_wild_bctr(void) 123int test_wild_bctr(void)
109{ 124{
110 int (*func_ptr)(void); 125 int (*func_ptr)(void);
@@ -133,7 +148,7 @@ int test_wild_bctr(void)
133 148
134 poison_regs(); 149 poison_regs();
135 150
136 func_ptr = (int (*)(void))BAD_NIP; 151 func_ptr = (int (*)(void))BAD_FUNC;
137 func_ptr(); 152 func_ptr();
138 153
139 FAIL_IF(1); /* we didn't segv? */ 154 FAIL_IF(1); /* we didn't segv? */