aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/include/asm/io.h20
-rw-r--r--arch/powerpc/include/asm/ppc-opcode.h2
-rw-r--r--arch/powerpc/include/asm/ptrace.h1
-rw-r--r--arch/powerpc/kernel/setup_64.c2
-rw-r--r--arch/powerpc/kernel/trace/ftrace.c15
-rw-r--r--arch/powerpc/kvm/book3s_hv.c1
-rw-r--r--arch/powerpc/kvm/trace.h8
-rw-r--r--arch/powerpc/kvm/trace_booke.h9
-rw-r--r--arch/powerpc/kvm/trace_hv.h9
-rw-r--r--arch/powerpc/kvm/trace_pr.h9
-rw-r--r--arch/powerpc/mm/numa.c2
-rw-r--r--arch/powerpc/mm/slb.c35
-rw-r--r--arch/powerpc/net/bpf_jit_comp64.c57
-rw-r--r--arch/powerpc/platforms/powernv/npu-dma.c64
14 files changed, 99 insertions, 135 deletions
diff --git a/arch/powerpc/include/asm/io.h b/arch/powerpc/include/asm/io.h
index 3ef40b703c4a..e746becd9d6f 100644
--- a/arch/powerpc/include/asm/io.h
+++ b/arch/powerpc/include/asm/io.h
@@ -268,19 +268,13 @@ extern void _memcpy_toio(volatile void __iomem *dest, const void *src,
268 * their hooks, a bitfield is reserved for use by the platform near the 268 * their hooks, a bitfield is reserved for use by the platform near the
269 * top of MMIO addresses (not PIO, those have to cope the hard way). 269 * top of MMIO addresses (not PIO, those have to cope the hard way).
270 * 270 *
271 * This bit field is 12 bits and is at the top of the IO virtual 271 * The highest address in the kernel virtual space are:
272 * addresses PCI_IO_INDIRECT_TOKEN_MASK.
273 * 272 *
274 * The kernel virtual space is thus: 273 * d0003fffffffffff # with Hash MMU
274 * c00fffffffffffff # with Radix MMU
275 * 275 *
276 * 0xD000000000000000 : vmalloc 276 * The top 4 bits are reserved as the region ID on hash, leaving us 8 bits
277 * 0xD000080000000000 : PCI PHB IO space 277 * that can be used for the field.
278 * 0xD000080080000000 : ioremap
279 * 0xD0000fffffffffff : end of ioremap region
280 *
281 * Since the top 4 bits are reserved as the region ID, we use thus
282 * the next 12 bits and keep 4 bits available for the future if the
283 * virtual address space is ever to be extended.
284 * 278 *
285 * The direct IO mapping operations will then mask off those bits 279 * The direct IO mapping operations will then mask off those bits
286 * before doing the actual access, though that only happen when 280 * before doing the actual access, though that only happen when
@@ -292,8 +286,8 @@ extern void _memcpy_toio(volatile void __iomem *dest, const void *src,
292 */ 286 */
293 287
294#ifdef CONFIG_PPC_INDIRECT_MMIO 288#ifdef CONFIG_PPC_INDIRECT_MMIO
295#define PCI_IO_IND_TOKEN_MASK 0x0fff000000000000ul 289#define PCI_IO_IND_TOKEN_SHIFT 52
296#define PCI_IO_IND_TOKEN_SHIFT 48 290#define PCI_IO_IND_TOKEN_MASK (0xfful << PCI_IO_IND_TOKEN_SHIFT)
297#define PCI_FIX_ADDR(addr) \ 291#define PCI_FIX_ADDR(addr) \
298 ((PCI_IO_ADDR)(((unsigned long)(addr)) & ~PCI_IO_IND_TOKEN_MASK)) 292 ((PCI_IO_ADDR)(((unsigned long)(addr)) & ~PCI_IO_IND_TOKEN_MASK))
299#define PCI_GET_ADDR_TOKEN(addr) \ 293#define PCI_GET_ADDR_TOKEN(addr) \
diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h
index 6093bc8f74e5..a6e9e314c707 100644
--- a/arch/powerpc/include/asm/ppc-opcode.h
+++ b/arch/powerpc/include/asm/ppc-opcode.h
@@ -493,6 +493,8 @@
493 __PPC_RS(t) | __PPC_RA0(a) | __PPC_RB(b)) 493 __PPC_RS(t) | __PPC_RA0(a) | __PPC_RB(b))
494#define PPC_SLBFEE_DOT(t, b) stringify_in_c(.long PPC_INST_SLBFEE | \ 494#define PPC_SLBFEE_DOT(t, b) stringify_in_c(.long PPC_INST_SLBFEE | \
495 __PPC_RT(t) | __PPC_RB(b)) 495 __PPC_RT(t) | __PPC_RB(b))
496#define __PPC_SLBFEE_DOT(t, b) stringify_in_c(.long PPC_INST_SLBFEE | \
497 ___PPC_RT(t) | ___PPC_RB(b))
496#define PPC_ICBT(c,a,b) stringify_in_c(.long PPC_INST_ICBT | \ 498#define PPC_ICBT(c,a,b) stringify_in_c(.long PPC_INST_ICBT | \
497 __PPC_CT(c) | __PPC_RA0(a) | __PPC_RB(b)) 499 __PPC_CT(c) | __PPC_RA0(a) | __PPC_RB(b))
498/* PASemi instructions */ 500/* PASemi instructions */
diff --git a/arch/powerpc/include/asm/ptrace.h b/arch/powerpc/include/asm/ptrace.h
index f73886a1a7f5..0b8a735b6d85 100644
--- a/arch/powerpc/include/asm/ptrace.h
+++ b/arch/powerpc/include/asm/ptrace.h
@@ -54,6 +54,7 @@ struct pt_regs
54 54
55#ifdef CONFIG_PPC64 55#ifdef CONFIG_PPC64
56 unsigned long ppr; 56 unsigned long ppr;
57 unsigned long __pad; /* Maintain 16 byte interrupt stack alignment */
57#endif 58#endif
58}; 59};
59#endif 60#endif
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 2a51e4cc8246..236c1151a3a7 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -636,6 +636,8 @@ static void *__init alloc_stack(unsigned long limit, int cpu)
636{ 636{
637 unsigned long pa; 637 unsigned long pa;
638 638
639 BUILD_BUG_ON(STACK_INT_FRAME_SIZE % 16);
640
639 pa = memblock_alloc_base_nid(THREAD_SIZE, THREAD_SIZE, limit, 641 pa = memblock_alloc_base_nid(THREAD_SIZE, THREAD_SIZE, limit,
640 early_cpu_to_node(cpu), MEMBLOCK_NONE); 642 early_cpu_to_node(cpu), MEMBLOCK_NONE);
641 if (!pa) { 643 if (!pa) {
diff --git a/arch/powerpc/kernel/trace/ftrace.c b/arch/powerpc/kernel/trace/ftrace.c
index 4bf051d3e21e..b65c8a34ad6e 100644
--- a/arch/powerpc/kernel/trace/ftrace.c
+++ b/arch/powerpc/kernel/trace/ftrace.c
@@ -950,7 +950,6 @@ int ftrace_disable_ftrace_graph_caller(void)
950 */ 950 */
951unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip) 951unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip)
952{ 952{
953 struct ftrace_graph_ent trace;
954 unsigned long return_hooker; 953 unsigned long return_hooker;
955 954
956 if (unlikely(ftrace_graph_is_dead())) 955 if (unlikely(ftrace_graph_is_dead()))
@@ -961,18 +960,8 @@ unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip)
961 960
962 return_hooker = ppc_function_entry(return_to_handler); 961 return_hooker = ppc_function_entry(return_to_handler);
963 962
964 trace.func = ip; 963 if (!function_graph_enter(parent, ip, 0, NULL))
965 trace.depth = current->curr_ret_stack + 1; 964 parent = return_hooker;
966
967 /* Only trace if the calling function expects to */
968 if (!ftrace_graph_entry(&trace))
969 goto out;
970
971 if (ftrace_push_return_trace(parent, ip, &trace.depth, 0,
972 NULL) == -EBUSY)
973 goto out;
974
975 parent = return_hooker;
976out: 965out:
977 return parent; 966 return parent;
978} 967}
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index d65b961661fb..a56f8413758a 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -983,6 +983,7 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
983 ret = kvmhv_enter_nested_guest(vcpu); 983 ret = kvmhv_enter_nested_guest(vcpu);
984 if (ret == H_INTERRUPT) { 984 if (ret == H_INTERRUPT) {
985 kvmppc_set_gpr(vcpu, 3, 0); 985 kvmppc_set_gpr(vcpu, 3, 0);
986 vcpu->arch.hcall_needed = 0;
986 return -EINTR; 987 return -EINTR;
987 } 988 }
988 break; 989 break;
diff --git a/arch/powerpc/kvm/trace.h b/arch/powerpc/kvm/trace.h
index 491b0f715d6b..ea1d7c808319 100644
--- a/arch/powerpc/kvm/trace.h
+++ b/arch/powerpc/kvm/trace.h
@@ -6,8 +6,6 @@
6 6
7#undef TRACE_SYSTEM 7#undef TRACE_SYSTEM
8#define TRACE_SYSTEM kvm 8#define TRACE_SYSTEM kvm
9#define TRACE_INCLUDE_PATH .
10#define TRACE_INCLUDE_FILE trace
11 9
12/* 10/*
13 * Tracepoint for guest mode entry. 11 * Tracepoint for guest mode entry.
@@ -120,4 +118,10 @@ TRACE_EVENT(kvm_check_requests,
120#endif /* _TRACE_KVM_H */ 118#endif /* _TRACE_KVM_H */
121 119
122/* This part must be outside protection */ 120/* This part must be outside protection */
121#undef TRACE_INCLUDE_PATH
122#undef TRACE_INCLUDE_FILE
123
124#define TRACE_INCLUDE_PATH .
125#define TRACE_INCLUDE_FILE trace
126
123#include <trace/define_trace.h> 127#include <trace/define_trace.h>
diff --git a/arch/powerpc/kvm/trace_booke.h b/arch/powerpc/kvm/trace_booke.h
index ac640e81fdc5..3837842986aa 100644
--- a/arch/powerpc/kvm/trace_booke.h
+++ b/arch/powerpc/kvm/trace_booke.h
@@ -6,8 +6,6 @@
6 6
7#undef TRACE_SYSTEM 7#undef TRACE_SYSTEM
8#define TRACE_SYSTEM kvm_booke 8#define TRACE_SYSTEM kvm_booke
9#define TRACE_INCLUDE_PATH .
10#define TRACE_INCLUDE_FILE trace_booke
11 9
12#define kvm_trace_symbol_exit \ 10#define kvm_trace_symbol_exit \
13 {0, "CRITICAL"}, \ 11 {0, "CRITICAL"}, \
@@ -218,4 +216,11 @@ TRACE_EVENT(kvm_booke_queue_irqprio,
218#endif 216#endif
219 217
220/* This part must be outside protection */ 218/* This part must be outside protection */
219
220#undef TRACE_INCLUDE_PATH
221#undef TRACE_INCLUDE_FILE
222
223#define TRACE_INCLUDE_PATH .
224#define TRACE_INCLUDE_FILE trace_booke
225
221#include <trace/define_trace.h> 226#include <trace/define_trace.h>
diff --git a/arch/powerpc/kvm/trace_hv.h b/arch/powerpc/kvm/trace_hv.h
index bcfe8a987f6a..8a1e3b0047f1 100644
--- a/arch/powerpc/kvm/trace_hv.h
+++ b/arch/powerpc/kvm/trace_hv.h
@@ -9,8 +9,6 @@
9 9
10#undef TRACE_SYSTEM 10#undef TRACE_SYSTEM
11#define TRACE_SYSTEM kvm_hv 11#define TRACE_SYSTEM kvm_hv
12#define TRACE_INCLUDE_PATH .
13#define TRACE_INCLUDE_FILE trace_hv
14 12
15#define kvm_trace_symbol_hcall \ 13#define kvm_trace_symbol_hcall \
16 {H_REMOVE, "H_REMOVE"}, \ 14 {H_REMOVE, "H_REMOVE"}, \
@@ -497,4 +495,11 @@ TRACE_EVENT(kvmppc_run_vcpu_exit,
497#endif /* _TRACE_KVM_HV_H */ 495#endif /* _TRACE_KVM_HV_H */
498 496
499/* This part must be outside protection */ 497/* This part must be outside protection */
498
499#undef TRACE_INCLUDE_PATH
500#undef TRACE_INCLUDE_FILE
501
502#define TRACE_INCLUDE_PATH .
503#define TRACE_INCLUDE_FILE trace_hv
504
500#include <trace/define_trace.h> 505#include <trace/define_trace.h>
diff --git a/arch/powerpc/kvm/trace_pr.h b/arch/powerpc/kvm/trace_pr.h
index 2f9a8829552b..46a46d328fbf 100644
--- a/arch/powerpc/kvm/trace_pr.h
+++ b/arch/powerpc/kvm/trace_pr.h
@@ -8,8 +8,6 @@
8 8
9#undef TRACE_SYSTEM 9#undef TRACE_SYSTEM
10#define TRACE_SYSTEM kvm_pr 10#define TRACE_SYSTEM kvm_pr
11#define TRACE_INCLUDE_PATH .
12#define TRACE_INCLUDE_FILE trace_pr
13 11
14TRACE_EVENT(kvm_book3s_reenter, 12TRACE_EVENT(kvm_book3s_reenter,
15 TP_PROTO(int r, struct kvm_vcpu *vcpu), 13 TP_PROTO(int r, struct kvm_vcpu *vcpu),
@@ -257,4 +255,11 @@ TRACE_EVENT(kvm_exit,
257#endif /* _TRACE_KVM_H */ 255#endif /* _TRACE_KVM_H */
258 256
259/* This part must be outside protection */ 257/* This part must be outside protection */
258
259#undef TRACE_INCLUDE_PATH
260#undef TRACE_INCLUDE_FILE
261
262#define TRACE_INCLUDE_PATH .
263#define TRACE_INCLUDE_FILE trace_pr
264
260#include <trace/define_trace.h> 265#include <trace/define_trace.h>
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index 3a048e98a132..ce28ae5ca080 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -1178,7 +1178,7 @@ static long vphn_get_associativity(unsigned long cpu,
1178 1178
1179 switch (rc) { 1179 switch (rc) {
1180 case H_FUNCTION: 1180 case H_FUNCTION:
1181 printk(KERN_INFO 1181 printk_once(KERN_INFO
1182 "VPHN is not supported. Disabling polling...\n"); 1182 "VPHN is not supported. Disabling polling...\n");
1183 stop_topology_update(); 1183 stop_topology_update();
1184 break; 1184 break;
diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c
index c3fdf2969d9f..bc3914d54e26 100644
--- a/arch/powerpc/mm/slb.c
+++ b/arch/powerpc/mm/slb.c
@@ -19,6 +19,7 @@
19#include <asm/mmu.h> 19#include <asm/mmu.h>
20#include <asm/mmu_context.h> 20#include <asm/mmu_context.h>
21#include <asm/paca.h> 21#include <asm/paca.h>
22#include <asm/ppc-opcode.h>
22#include <asm/cputable.h> 23#include <asm/cputable.h>
23#include <asm/cacheflush.h> 24#include <asm/cacheflush.h>
24#include <asm/smp.h> 25#include <asm/smp.h>
@@ -58,27 +59,19 @@ static inline unsigned long mk_vsid_data(unsigned long ea, int ssize,
58 return __mk_vsid_data(get_kernel_vsid(ea, ssize), ssize, flags); 59 return __mk_vsid_data(get_kernel_vsid(ea, ssize), ssize, flags);
59} 60}
60 61
61static void assert_slb_exists(unsigned long ea) 62static void assert_slb_presence(bool present, unsigned long ea)
62{ 63{
63#ifdef CONFIG_DEBUG_VM 64#ifdef CONFIG_DEBUG_VM
64 unsigned long tmp; 65 unsigned long tmp;
65 66
66 WARN_ON_ONCE(mfmsr() & MSR_EE); 67 WARN_ON_ONCE(mfmsr() & MSR_EE);
67 68
68 asm volatile("slbfee. %0, %1" : "=r"(tmp) : "r"(ea) : "cr0"); 69 if (!cpu_has_feature(CPU_FTR_ARCH_206))
69 WARN_ON(tmp == 0); 70 return;
70#endif
71}
72
73static void assert_slb_notexists(unsigned long ea)
74{
75#ifdef CONFIG_DEBUG_VM
76 unsigned long tmp;
77 71
78 WARN_ON_ONCE(mfmsr() & MSR_EE); 72 asm volatile(__PPC_SLBFEE_DOT(%0, %1) : "=r"(tmp) : "r"(ea) : "cr0");
79 73
80 asm volatile("slbfee. %0, %1" : "=r"(tmp) : "r"(ea) : "cr0"); 74 WARN_ON(present == (tmp == 0));
81 WARN_ON(tmp != 0);
82#endif 75#endif
83} 76}
84 77
@@ -114,7 +107,7 @@ static inline void create_shadowed_slbe(unsigned long ea, int ssize,
114 */ 107 */
115 slb_shadow_update(ea, ssize, flags, index); 108 slb_shadow_update(ea, ssize, flags, index);
116 109
117 assert_slb_notexists(ea); 110 assert_slb_presence(false, ea);
118 asm volatile("slbmte %0,%1" : 111 asm volatile("slbmte %0,%1" :
119 : "r" (mk_vsid_data(ea, ssize, flags)), 112 : "r" (mk_vsid_data(ea, ssize, flags)),
120 "r" (mk_esid_data(ea, ssize, index)) 113 "r" (mk_esid_data(ea, ssize, index))
@@ -137,7 +130,7 @@ void __slb_restore_bolted_realmode(void)
137 "r" (be64_to_cpu(p->save_area[index].esid))); 130 "r" (be64_to_cpu(p->save_area[index].esid)));
138 } 131 }
139 132
140 assert_slb_exists(local_paca->kstack); 133 assert_slb_presence(true, local_paca->kstack);
141} 134}
142 135
143/* 136/*
@@ -185,7 +178,7 @@ void slb_flush_and_restore_bolted(void)
185 :: "r" (be64_to_cpu(p->save_area[KSTACK_INDEX].vsid)), 178 :: "r" (be64_to_cpu(p->save_area[KSTACK_INDEX].vsid)),
186 "r" (be64_to_cpu(p->save_area[KSTACK_INDEX].esid)) 179 "r" (be64_to_cpu(p->save_area[KSTACK_INDEX].esid))
187 : "memory"); 180 : "memory");
188 assert_slb_exists(get_paca()->kstack); 181 assert_slb_presence(true, get_paca()->kstack);
189 182
190 get_paca()->slb_cache_ptr = 0; 183 get_paca()->slb_cache_ptr = 0;
191 184
@@ -443,9 +436,9 @@ void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
443 ea = (unsigned long) 436 ea = (unsigned long)
444 get_paca()->slb_cache[i] << SID_SHIFT; 437 get_paca()->slb_cache[i] << SID_SHIFT;
445 /* 438 /*
446 * Could assert_slb_exists here, but hypervisor 439 * Could assert_slb_presence(true) here, but
447 * or machine check could have come in and 440 * hypervisor or machine check could have come
448 * removed the entry at this point. 441 * in and removed the entry at this point.
449 */ 442 */
450 443
451 slbie_data = ea; 444 slbie_data = ea;
@@ -676,7 +669,7 @@ static long slb_insert_entry(unsigned long ea, unsigned long context,
676 * User preloads should add isync afterwards in case the kernel 669 * User preloads should add isync afterwards in case the kernel
677 * accesses user memory before it returns to userspace with rfid. 670 * accesses user memory before it returns to userspace with rfid.
678 */ 671 */
679 assert_slb_notexists(ea); 672 assert_slb_presence(false, ea);
680 asm volatile("slbmte %0, %1" : : "r" (vsid_data), "r" (esid_data)); 673 asm volatile("slbmte %0, %1" : : "r" (vsid_data), "r" (esid_data));
681 674
682 barrier(); 675 barrier();
@@ -715,7 +708,7 @@ static long slb_allocate_kernel(unsigned long ea, unsigned long id)
715 return -EFAULT; 708 return -EFAULT;
716 709
717 if (ea < H_VMALLOC_END) 710 if (ea < H_VMALLOC_END)
718 flags = get_paca()->vmalloc_sllp; 711 flags = local_paca->vmalloc_sllp;
719 else 712 else
720 flags = SLB_VSID_KERNEL | mmu_psize_defs[mmu_io_psize].sllp; 713 flags = SLB_VSID_KERNEL | mmu_psize_defs[mmu_io_psize].sllp;
721 } else { 714 } else {
diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c
index 50b129785aee..17482f5de3e2 100644
--- a/arch/powerpc/net/bpf_jit_comp64.c
+++ b/arch/powerpc/net/bpf_jit_comp64.c
@@ -166,7 +166,33 @@ static void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx)
166 PPC_BLR(); 166 PPC_BLR();
167} 167}
168 168
169static void bpf_jit_emit_func_call(u32 *image, struct codegen_context *ctx, u64 func) 169static void bpf_jit_emit_func_call_hlp(u32 *image, struct codegen_context *ctx,
170 u64 func)
171{
172#ifdef PPC64_ELF_ABI_v1
173 /* func points to the function descriptor */
174 PPC_LI64(b2p[TMP_REG_2], func);
175 /* Load actual entry point from function descriptor */
176 PPC_BPF_LL(b2p[TMP_REG_1], b2p[TMP_REG_2], 0);
177 /* ... and move it to LR */
178 PPC_MTLR(b2p[TMP_REG_1]);
179 /*
180 * Load TOC from function descriptor at offset 8.
181 * We can clobber r2 since we get called through a
182 * function pointer (so caller will save/restore r2)
183 * and since we don't use a TOC ourself.
184 */
185 PPC_BPF_LL(2, b2p[TMP_REG_2], 8);
186#else
187 /* We can clobber r12 */
188 PPC_FUNC_ADDR(12, func);
189 PPC_MTLR(12);
190#endif
191 PPC_BLRL();
192}
193
194static void bpf_jit_emit_func_call_rel(u32 *image, struct codegen_context *ctx,
195 u64 func)
170{ 196{
171 unsigned int i, ctx_idx = ctx->idx; 197 unsigned int i, ctx_idx = ctx->idx;
172 198
@@ -273,7 +299,7 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
273{ 299{
274 const struct bpf_insn *insn = fp->insnsi; 300 const struct bpf_insn *insn = fp->insnsi;
275 int flen = fp->len; 301 int flen = fp->len;
276 int i; 302 int i, ret;
277 303
278 /* Start of epilogue code - will only be valid 2nd pass onwards */ 304 /* Start of epilogue code - will only be valid 2nd pass onwards */
279 u32 exit_addr = addrs[flen]; 305 u32 exit_addr = addrs[flen];
@@ -284,8 +310,9 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
284 u32 src_reg = b2p[insn[i].src_reg]; 310 u32 src_reg = b2p[insn[i].src_reg];
285 s16 off = insn[i].off; 311 s16 off = insn[i].off;
286 s32 imm = insn[i].imm; 312 s32 imm = insn[i].imm;
313 bool func_addr_fixed;
314 u64 func_addr;
287 u64 imm64; 315 u64 imm64;
288 u8 *func;
289 u32 true_cond; 316 u32 true_cond;
290 u32 tmp_idx; 317 u32 tmp_idx;
291 318
@@ -711,23 +738,15 @@ emit_clear:
711 case BPF_JMP | BPF_CALL: 738 case BPF_JMP | BPF_CALL:
712 ctx->seen |= SEEN_FUNC; 739 ctx->seen |= SEEN_FUNC;
713 740
714 /* bpf function call */ 741 ret = bpf_jit_get_func_addr(fp, &insn[i], extra_pass,
715 if (insn[i].src_reg == BPF_PSEUDO_CALL) 742 &func_addr, &func_addr_fixed);
716 if (!extra_pass) 743 if (ret < 0)
717 func = NULL; 744 return ret;
718 else if (fp->aux->func && off < fp->aux->func_cnt)
719 /* use the subprog id from the off
720 * field to lookup the callee address
721 */
722 func = (u8 *) fp->aux->func[off]->bpf_func;
723 else
724 return -EINVAL;
725 /* kernel helper call */
726 else
727 func = (u8 *) __bpf_call_base + imm;
728
729 bpf_jit_emit_func_call(image, ctx, (u64)func);
730 745
746 if (func_addr_fixed)
747 bpf_jit_emit_func_call_hlp(image, ctx, func_addr);
748 else
749 bpf_jit_emit_func_call_rel(image, ctx, func_addr);
731 /* move return value from r3 to BPF_REG_0 */ 750 /* move return value from r3 to BPF_REG_0 */
732 PPC_MR(b2p[BPF_REG_0], 3); 751 PPC_MR(b2p[BPF_REG_0], 3);
733 break; 752 break;
diff --git a/arch/powerpc/platforms/powernv/npu-dma.c b/arch/powerpc/platforms/powernv/npu-dma.c
index 6f60e0931922..75b935252981 100644
--- a/arch/powerpc/platforms/powernv/npu-dma.c
+++ b/arch/powerpc/platforms/powernv/npu-dma.c
@@ -102,63 +102,6 @@ struct pci_dev *pnv_pci_get_npu_dev(struct pci_dev *gpdev, int index)
102} 102}
103EXPORT_SYMBOL(pnv_pci_get_npu_dev); 103EXPORT_SYMBOL(pnv_pci_get_npu_dev);
104 104
105#define NPU_DMA_OP_UNSUPPORTED() \
106 dev_err_once(dev, "%s operation unsupported for NVLink devices\n", \
107 __func__)
108
109static void *dma_npu_alloc(struct device *dev, size_t size,
110 dma_addr_t *dma_handle, gfp_t flag,
111 unsigned long attrs)
112{
113 NPU_DMA_OP_UNSUPPORTED();
114 return NULL;
115}
116
117static void dma_npu_free(struct device *dev, size_t size,
118 void *vaddr, dma_addr_t dma_handle,
119 unsigned long attrs)
120{
121 NPU_DMA_OP_UNSUPPORTED();
122}
123
124static dma_addr_t dma_npu_map_page(struct device *dev, struct page *page,
125 unsigned long offset, size_t size,
126 enum dma_data_direction direction,
127 unsigned long attrs)
128{
129 NPU_DMA_OP_UNSUPPORTED();
130 return 0;
131}
132
133static int dma_npu_map_sg(struct device *dev, struct scatterlist *sglist,
134 int nelems, enum dma_data_direction direction,
135 unsigned long attrs)
136{
137 NPU_DMA_OP_UNSUPPORTED();
138 return 0;
139}
140
141static int dma_npu_dma_supported(struct device *dev, u64 mask)
142{
143 NPU_DMA_OP_UNSUPPORTED();
144 return 0;
145}
146
147static u64 dma_npu_get_required_mask(struct device *dev)
148{
149 NPU_DMA_OP_UNSUPPORTED();
150 return 0;
151}
152
153static const struct dma_map_ops dma_npu_ops = {
154 .map_page = dma_npu_map_page,
155 .map_sg = dma_npu_map_sg,
156 .alloc = dma_npu_alloc,
157 .free = dma_npu_free,
158 .dma_supported = dma_npu_dma_supported,
159 .get_required_mask = dma_npu_get_required_mask,
160};
161
162/* 105/*
163 * Returns the PE assoicated with the PCI device of the given 106 * Returns the PE assoicated with the PCI device of the given
164 * NPU. Returns the linked pci device if pci_dev != NULL. 107 * NPU. Returns the linked pci device if pci_dev != NULL.
@@ -270,10 +213,11 @@ static void pnv_npu_dma_set_32(struct pnv_ioda_pe *npe)
270 rc = pnv_npu_set_window(npe, 0, gpe->table_group.tables[0]); 213 rc = pnv_npu_set_window(npe, 0, gpe->table_group.tables[0]);
271 214
272 /* 215 /*
273 * We don't initialise npu_pe->tce32_table as we always use 216 * NVLink devices use the same TCE table configuration as
274 * dma_npu_ops which are nops. 217 * their parent device so drivers shouldn't be doing DMA
218 * operations directly on these devices.
275 */ 219 */
276 set_dma_ops(&npe->pdev->dev, &dma_npu_ops); 220 set_dma_ops(&npe->pdev->dev, NULL);
277} 221}
278 222
279/* 223/*