aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/powerpc/Kconfig2
-rw-r--r--arch/powerpc/include/asm/asm-compat.h4
-rw-r--r--arch/powerpc/include/asm/ppc-opcode.h2
-rw-r--r--arch/powerpc/net/Makefile2
-rw-r--r--arch/powerpc/net/bpf_jit.h64
-rw-r--r--arch/powerpc/net/bpf_jit_asm.S (renamed from arch/powerpc/net/bpf_jit_64.S)70
-rw-r--r--arch/powerpc/net/bpf_jit_comp.c46
-rw-r--r--arch/s390/kernel/irq.c1
8 files changed, 124 insertions, 67 deletions
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 22b0940494bb..5084bdcc6046 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -126,7 +126,7 @@ config PPC
126 select IRQ_FORCED_THREADING 126 select IRQ_FORCED_THREADING
127 select HAVE_RCU_TABLE_FREE if SMP 127 select HAVE_RCU_TABLE_FREE if SMP
128 select HAVE_SYSCALL_TRACEPOINTS 128 select HAVE_SYSCALL_TRACEPOINTS
129 select HAVE_BPF_JIT if PPC64 129 select HAVE_BPF_JIT
130 select HAVE_ARCH_JUMP_LABEL 130 select HAVE_ARCH_JUMP_LABEL
131 select ARCH_HAVE_NMI_SAFE_CMPXCHG 131 select ARCH_HAVE_NMI_SAFE_CMPXCHG
132 select ARCH_HAS_GCOV_PROFILE_ALL 132 select ARCH_HAS_GCOV_PROFILE_ALL
diff --git a/arch/powerpc/include/asm/asm-compat.h b/arch/powerpc/include/asm/asm-compat.h
index 21be8ae8f809..dc85dcb891cf 100644
--- a/arch/powerpc/include/asm/asm-compat.h
+++ b/arch/powerpc/include/asm/asm-compat.h
@@ -23,6 +23,8 @@
23#define PPC_STL stringify_in_c(std) 23#define PPC_STL stringify_in_c(std)
24#define PPC_STLU stringify_in_c(stdu) 24#define PPC_STLU stringify_in_c(stdu)
25#define PPC_LCMPI stringify_in_c(cmpdi) 25#define PPC_LCMPI stringify_in_c(cmpdi)
26#define PPC_LCMPLI stringify_in_c(cmpldi)
27#define PPC_LCMP stringify_in_c(cmpd)
26#define PPC_LONG stringify_in_c(.llong) 28#define PPC_LONG stringify_in_c(.llong)
27#define PPC_LONG_ALIGN stringify_in_c(.balign 8) 29#define PPC_LONG_ALIGN stringify_in_c(.balign 8)
28#define PPC_TLNEI stringify_in_c(tdnei) 30#define PPC_TLNEI stringify_in_c(tdnei)
@@ -52,6 +54,8 @@
52#define PPC_STL stringify_in_c(stw) 54#define PPC_STL stringify_in_c(stw)
53#define PPC_STLU stringify_in_c(stwu) 55#define PPC_STLU stringify_in_c(stwu)
54#define PPC_LCMPI stringify_in_c(cmpwi) 56#define PPC_LCMPI stringify_in_c(cmpwi)
57#define PPC_LCMPLI stringify_in_c(cmplwi)
58#define PPC_LCMP stringify_in_c(cmpw)
55#define PPC_LONG stringify_in_c(.long) 59#define PPC_LONG stringify_in_c(.long)
56#define PPC_LONG_ALIGN stringify_in_c(.balign 4) 60#define PPC_LONG_ALIGN stringify_in_c(.balign 4)
57#define PPC_TLNEI stringify_in_c(twnei) 61#define PPC_TLNEI stringify_in_c(twnei)
diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h
index 03cd858a401c..2eadde0b98fb 100644
--- a/arch/powerpc/include/asm/ppc-opcode.h
+++ b/arch/powerpc/include/asm/ppc-opcode.h
@@ -212,6 +212,8 @@
212#define PPC_INST_LWZ 0x80000000 212#define PPC_INST_LWZ 0x80000000
213#define PPC_INST_STD 0xf8000000 213#define PPC_INST_STD 0xf8000000
214#define PPC_INST_STDU 0xf8000001 214#define PPC_INST_STDU 0xf8000001
215#define PPC_INST_STW 0x90000000
216#define PPC_INST_STWU 0x94000000
215#define PPC_INST_MFLR 0x7c0802a6 217#define PPC_INST_MFLR 0x7c0802a6
216#define PPC_INST_MTLR 0x7c0803a6 218#define PPC_INST_MTLR 0x7c0803a6
217#define PPC_INST_CMPWI 0x2c000000 219#define PPC_INST_CMPWI 0x2c000000
diff --git a/arch/powerpc/net/Makefile b/arch/powerpc/net/Makefile
index 266b3950c3ac..1306a58ac541 100644
--- a/arch/powerpc/net/Makefile
+++ b/arch/powerpc/net/Makefile
@@ -1,4 +1,4 @@
1# 1#
2# Arch-specific network modules 2# Arch-specific network modules
3# 3#
4obj-$(CONFIG_BPF_JIT) += bpf_jit_64.o bpf_jit_comp.o 4obj-$(CONFIG_BPF_JIT) += bpf_jit_asm.o bpf_jit_comp.o
diff --git a/arch/powerpc/net/bpf_jit.h b/arch/powerpc/net/bpf_jit.h
index c406aa95b2bc..889fd199a821 100644
--- a/arch/powerpc/net/bpf_jit.h
+++ b/arch/powerpc/net/bpf_jit.h
@@ -10,12 +10,25 @@
10#ifndef _BPF_JIT_H 10#ifndef _BPF_JIT_H
11#define _BPF_JIT_H 11#define _BPF_JIT_H
12 12
13#ifdef CONFIG_PPC64
14#define BPF_PPC_STACK_R3_OFF 48
13#define BPF_PPC_STACK_LOCALS 32 15#define BPF_PPC_STACK_LOCALS 32
14#define BPF_PPC_STACK_BASIC (48+64) 16#define BPF_PPC_STACK_BASIC (48+64)
15#define BPF_PPC_STACK_SAVE (18*8) 17#define BPF_PPC_STACK_SAVE (18*8)
16#define BPF_PPC_STACKFRAME (BPF_PPC_STACK_BASIC+BPF_PPC_STACK_LOCALS+ \ 18#define BPF_PPC_STACKFRAME (BPF_PPC_STACK_BASIC+BPF_PPC_STACK_LOCALS+ \
17 BPF_PPC_STACK_SAVE) 19 BPF_PPC_STACK_SAVE)
18#define BPF_PPC_SLOWPATH_FRAME (48+64) 20#define BPF_PPC_SLOWPATH_FRAME (48+64)
21#else
22#define BPF_PPC_STACK_R3_OFF 24
23#define BPF_PPC_STACK_LOCALS 16
24#define BPF_PPC_STACK_BASIC (24+32)
25#define BPF_PPC_STACK_SAVE (18*4)
26#define BPF_PPC_STACKFRAME (BPF_PPC_STACK_BASIC+BPF_PPC_STACK_LOCALS+ \
27 BPF_PPC_STACK_SAVE)
28#define BPF_PPC_SLOWPATH_FRAME (24+32)
29#endif
30
31#define REG_SZ (BITS_PER_LONG/8)
19 32
20/* 33/*
21 * Generated code register usage: 34 * Generated code register usage:
@@ -57,7 +70,11 @@ DECLARE_LOAD_FUNC(sk_load_half);
57DECLARE_LOAD_FUNC(sk_load_byte); 70DECLARE_LOAD_FUNC(sk_load_byte);
58DECLARE_LOAD_FUNC(sk_load_byte_msh); 71DECLARE_LOAD_FUNC(sk_load_byte_msh);
59 72
73#ifdef CONFIG_PPC64
60#define FUNCTION_DESCR_SIZE 24 74#define FUNCTION_DESCR_SIZE 24
75#else
76#define FUNCTION_DESCR_SIZE 0
77#endif
61 78
62/* 79/*
63 * 16-bit immediate helper macros: HA() is for use with sign-extending instrs 80 * 16-bit immediate helper macros: HA() is for use with sign-extending instrs
@@ -86,7 +103,12 @@ DECLARE_LOAD_FUNC(sk_load_byte_msh);
86#define PPC_LIS(r, i) PPC_ADDIS(r, 0, i) 103#define PPC_LIS(r, i) PPC_ADDIS(r, 0, i)
87#define PPC_STD(r, base, i) EMIT(PPC_INST_STD | ___PPC_RS(r) | \ 104#define PPC_STD(r, base, i) EMIT(PPC_INST_STD | ___PPC_RS(r) | \
88 ___PPC_RA(base) | ((i) & 0xfffc)) 105 ___PPC_RA(base) | ((i) & 0xfffc))
89 106#define PPC_STDU(r, base, i) EMIT(PPC_INST_STDU | ___PPC_RS(r) | \
107 ___PPC_RA(base) | ((i) & 0xfffc))
108#define PPC_STW(r, base, i) EMIT(PPC_INST_STW | ___PPC_RS(r) | \
109 ___PPC_RA(base) | ((i) & 0xfffc))
110#define PPC_STWU(r, base, i) EMIT(PPC_INST_STWU | ___PPC_RS(r) | \
111 ___PPC_RA(base) | ((i) & 0xfffc))
90 112
91#define PPC_LBZ(r, base, i) EMIT(PPC_INST_LBZ | ___PPC_RT(r) | \ 113#define PPC_LBZ(r, base, i) EMIT(PPC_INST_LBZ | ___PPC_RT(r) | \
92 ___PPC_RA(base) | IMM_L(i)) 114 ___PPC_RA(base) | IMM_L(i))
@@ -98,6 +120,17 @@ DECLARE_LOAD_FUNC(sk_load_byte_msh);
98 ___PPC_RA(base) | IMM_L(i)) 120 ___PPC_RA(base) | IMM_L(i))
99#define PPC_LHBRX(r, base, b) EMIT(PPC_INST_LHBRX | ___PPC_RT(r) | \ 121#define PPC_LHBRX(r, base, b) EMIT(PPC_INST_LHBRX | ___PPC_RT(r) | \
100 ___PPC_RA(base) | ___PPC_RB(b)) 122 ___PPC_RA(base) | ___PPC_RB(b))
123
124#ifdef CONFIG_PPC64
125#define PPC_BPF_LL(r, base, i) do { PPC_LD(r, base, i); } while(0)
126#define PPC_BPF_STL(r, base, i) do { PPC_STD(r, base, i); } while(0)
127#define PPC_BPF_STLU(r, base, i) do { PPC_STDU(r, base, i); } while(0)
128#else
129#define PPC_BPF_LL(r, base, i) do { PPC_LWZ(r, base, i); } while(0)
130#define PPC_BPF_STL(r, base, i) do { PPC_STW(r, base, i); } while(0)
131#define PPC_BPF_STLU(r, base, i) do { PPC_STWU(r, base, i); } while(0)
132#endif
133
101/* Convenience helpers for the above with 'far' offsets: */ 134/* Convenience helpers for the above with 'far' offsets: */
102#define PPC_LBZ_OFFS(r, base, i) do { if ((i) < 32768) PPC_LBZ(r, base, i); \ 135#define PPC_LBZ_OFFS(r, base, i) do { if ((i) < 32768) PPC_LBZ(r, base, i); \
103 else { PPC_ADDIS(r, base, IMM_HA(i)); \ 136 else { PPC_ADDIS(r, base, IMM_HA(i)); \
@@ -115,6 +148,29 @@ DECLARE_LOAD_FUNC(sk_load_byte_msh);
115 else { PPC_ADDIS(r, base, IMM_HA(i)); \ 148 else { PPC_ADDIS(r, base, IMM_HA(i)); \
116 PPC_LHZ(r, r, IMM_L(i)); } } while(0) 149 PPC_LHZ(r, r, IMM_L(i)); } } while(0)
117 150
151#ifdef CONFIG_PPC64
152#define PPC_LL_OFFS(r, base, i) do { PPC_LD_OFFS(r, base, i); } while(0)
153#else
154#define PPC_LL_OFFS(r, base, i) do { PPC_LWZ_OFFS(r, base, i); } while(0)
155#endif
156
157#ifdef CONFIG_SMP
158#ifdef CONFIG_PPC64
159#define PPC_BPF_LOAD_CPU(r) \
160 do { BUILD_BUG_ON(FIELD_SIZEOF(struct paca_struct, paca_index) != 2); \
161 PPC_LHZ_OFFS(r, 13, offsetof(struct paca_struct, paca_index)); \
162 } while (0)
163#else
164#define PPC_BPF_LOAD_CPU(r) \
165 do { BUILD_BUG_ON(FIELD_SIZEOF(struct thread_info, cpu) != 4); \
166 PPC_LHZ_OFFS(r, (1 & ~(THREAD_SIZE - 1)), \
167 offsetof(struct thread_info, cpu)); \
168 } while(0)
169#endif
170#else
171#define PPC_BPF_LOAD_CPU(r) do { PPC_LI(r, 0); } while(0)
172#endif
173
118#define PPC_CMPWI(a, i) EMIT(PPC_INST_CMPWI | ___PPC_RA(a) | IMM_L(i)) 174#define PPC_CMPWI(a, i) EMIT(PPC_INST_CMPWI | ___PPC_RA(a) | IMM_L(i))
119#define PPC_CMPDI(a, i) EMIT(PPC_INST_CMPDI | ___PPC_RA(a) | IMM_L(i)) 175#define PPC_CMPDI(a, i) EMIT(PPC_INST_CMPDI | ___PPC_RA(a) | IMM_L(i))
120#define PPC_CMPLWI(a, i) EMIT(PPC_INST_CMPLWI | ___PPC_RA(a) | IMM_L(i)) 176#define PPC_CMPLWI(a, i) EMIT(PPC_INST_CMPLWI | ___PPC_RA(a) | IMM_L(i))
@@ -196,6 +252,12 @@ DECLARE_LOAD_FUNC(sk_load_byte_msh);
196 PPC_ORI(d, d, (uintptr_t)(i) & 0xffff); \ 252 PPC_ORI(d, d, (uintptr_t)(i) & 0xffff); \
197 } } while (0); 253 } } while (0);
198 254
255#ifdef CONFIG_PPC64
256#define PPC_FUNC_ADDR(d,i) do { PPC_LI64(d, i); } while(0)
257#else
258#define PPC_FUNC_ADDR(d,i) do { PPC_LI32(d, i); } while(0)
259#endif
260
199#define PPC_LHBRX_OFFS(r, base, i) \ 261#define PPC_LHBRX_OFFS(r, base, i) \
200 do { PPC_LI32(r, i); PPC_LHBRX(r, r, base); } while(0) 262 do { PPC_LI32(r, i); PPC_LHBRX(r, r, base); } while(0)
201#ifdef __LITTLE_ENDIAN__ 263#ifdef __LITTLE_ENDIAN__
diff --git a/arch/powerpc/net/bpf_jit_64.S b/arch/powerpc/net/bpf_jit_asm.S
index 8f87d9217122..8ff5a3b5d1c3 100644
--- a/arch/powerpc/net/bpf_jit_64.S
+++ b/arch/powerpc/net/bpf_jit_asm.S
@@ -34,13 +34,13 @@
34 */ 34 */
35 .globl sk_load_word 35 .globl sk_load_word
36sk_load_word: 36sk_load_word:
37 cmpdi r_addr, 0 37 PPC_LCMPI r_addr, 0
38 blt bpf_slow_path_word_neg 38 blt bpf_slow_path_word_neg
39 .globl sk_load_word_positive_offset 39 .globl sk_load_word_positive_offset
40sk_load_word_positive_offset: 40sk_load_word_positive_offset:
41 /* Are we accessing past headlen? */ 41 /* Are we accessing past headlen? */
42 subi r_scratch1, r_HL, 4 42 subi r_scratch1, r_HL, 4
43 cmpd r_scratch1, r_addr 43 PPC_LCMP r_scratch1, r_addr
44 blt bpf_slow_path_word 44 blt bpf_slow_path_word
45 /* Nope, just hitting the header. cr0 here is eq or gt! */ 45 /* Nope, just hitting the header. cr0 here is eq or gt! */
46#ifdef __LITTLE_ENDIAN__ 46#ifdef __LITTLE_ENDIAN__
@@ -52,12 +52,12 @@ sk_load_word_positive_offset:
52 52
53 .globl sk_load_half 53 .globl sk_load_half
54sk_load_half: 54sk_load_half:
55 cmpdi r_addr, 0 55 PPC_LCMPI r_addr, 0
56 blt bpf_slow_path_half_neg 56 blt bpf_slow_path_half_neg
57 .globl sk_load_half_positive_offset 57 .globl sk_load_half_positive_offset
58sk_load_half_positive_offset: 58sk_load_half_positive_offset:
59 subi r_scratch1, r_HL, 2 59 subi r_scratch1, r_HL, 2
60 cmpd r_scratch1, r_addr 60 PPC_LCMP r_scratch1, r_addr
61 blt bpf_slow_path_half 61 blt bpf_slow_path_half
62#ifdef __LITTLE_ENDIAN__ 62#ifdef __LITTLE_ENDIAN__
63 lhbrx r_A, r_D, r_addr 63 lhbrx r_A, r_D, r_addr
@@ -68,11 +68,11 @@ sk_load_half_positive_offset:
68 68
69 .globl sk_load_byte 69 .globl sk_load_byte
70sk_load_byte: 70sk_load_byte:
71 cmpdi r_addr, 0 71 PPC_LCMPI r_addr, 0
72 blt bpf_slow_path_byte_neg 72 blt bpf_slow_path_byte_neg
73 .globl sk_load_byte_positive_offset 73 .globl sk_load_byte_positive_offset
74sk_load_byte_positive_offset: 74sk_load_byte_positive_offset:
75 cmpd r_HL, r_addr 75 PPC_LCMP r_HL, r_addr
76 ble bpf_slow_path_byte 76 ble bpf_slow_path_byte
77 lbzx r_A, r_D, r_addr 77 lbzx r_A, r_D, r_addr
78 blr 78 blr
@@ -83,11 +83,11 @@ sk_load_byte_positive_offset:
83 */ 83 */
84 .globl sk_load_byte_msh 84 .globl sk_load_byte_msh
85sk_load_byte_msh: 85sk_load_byte_msh:
86 cmpdi r_addr, 0 86 PPC_LCMPI r_addr, 0
87 blt bpf_slow_path_byte_msh_neg 87 blt bpf_slow_path_byte_msh_neg
88 .globl sk_load_byte_msh_positive_offset 88 .globl sk_load_byte_msh_positive_offset
89sk_load_byte_msh_positive_offset: 89sk_load_byte_msh_positive_offset:
90 cmpd r_HL, r_addr 90 PPC_LCMP r_HL, r_addr
91 ble bpf_slow_path_byte_msh 91 ble bpf_slow_path_byte_msh
92 lbzx r_X, r_D, r_addr 92 lbzx r_X, r_D, r_addr
93 rlwinm r_X, r_X, 2, 32-4-2, 31-2 93 rlwinm r_X, r_X, 2, 32-4-2, 31-2
@@ -101,13 +101,13 @@ sk_load_byte_msh_positive_offset:
101 */ 101 */
102#define bpf_slow_path_common(SIZE) \ 102#define bpf_slow_path_common(SIZE) \
103 mflr r0; \ 103 mflr r0; \
104 std r0, 16(r1); \ 104 PPC_STL r0, PPC_LR_STKOFF(r1); \
105 /* R3 goes in parameter space of caller's frame */ \ 105 /* R3 goes in parameter space of caller's frame */ \
106 std r_skb, (BPF_PPC_STACKFRAME+48)(r1); \ 106 PPC_STL r_skb, (BPF_PPC_STACKFRAME+BPF_PPC_STACK_R3_OFF)(r1); \
107 std r_A, (BPF_PPC_STACK_BASIC+(0*8))(r1); \ 107 PPC_STL r_A, (BPF_PPC_STACK_BASIC+(0*REG_SZ))(r1); \
108 std r_X, (BPF_PPC_STACK_BASIC+(1*8))(r1); \ 108 PPC_STL r_X, (BPF_PPC_STACK_BASIC+(1*REG_SZ))(r1); \
109 addi r5, r1, BPF_PPC_STACK_BASIC+(2*8); \ 109 addi r5, r1, BPF_PPC_STACK_BASIC+(2*REG_SZ); \
110 stdu r1, -BPF_PPC_SLOWPATH_FRAME(r1); \ 110 PPC_STLU r1, -BPF_PPC_SLOWPATH_FRAME(r1); \
111 /* R3 = r_skb, as passed */ \ 111 /* R3 = r_skb, as passed */ \
112 mr r4, r_addr; \ 112 mr r4, r_addr; \
113 li r6, SIZE; \ 113 li r6, SIZE; \
@@ -115,19 +115,19 @@ sk_load_byte_msh_positive_offset:
115 nop; \ 115 nop; \
116 /* R3 = 0 on success */ \ 116 /* R3 = 0 on success */ \
117 addi r1, r1, BPF_PPC_SLOWPATH_FRAME; \ 117 addi r1, r1, BPF_PPC_SLOWPATH_FRAME; \
118 ld r0, 16(r1); \ 118 PPC_LL r0, PPC_LR_STKOFF(r1); \
119 ld r_A, (BPF_PPC_STACK_BASIC+(0*8))(r1); \ 119 PPC_LL r_A, (BPF_PPC_STACK_BASIC+(0*REG_SZ))(r1); \
120 ld r_X, (BPF_PPC_STACK_BASIC+(1*8))(r1); \ 120 PPC_LL r_X, (BPF_PPC_STACK_BASIC+(1*REG_SZ))(r1); \
121 mtlr r0; \ 121 mtlr r0; \
122 cmpdi r3, 0; \ 122 PPC_LCMPI r3, 0; \
123 blt bpf_error; /* cr0 = LT */ \ 123 blt bpf_error; /* cr0 = LT */ \
124 ld r_skb, (BPF_PPC_STACKFRAME+48)(r1); \ 124 PPC_LL r_skb, (BPF_PPC_STACKFRAME+BPF_PPC_STACK_R3_OFF)(r1); \
125 /* Great success! */ 125 /* Great success! */
126 126
127bpf_slow_path_word: 127bpf_slow_path_word:
128 bpf_slow_path_common(4) 128 bpf_slow_path_common(4)
129 /* Data value is on stack, and cr0 != LT */ 129 /* Data value is on stack, and cr0 != LT */
130 lwz r_A, BPF_PPC_STACK_BASIC+(2*8)(r1) 130 lwz r_A, BPF_PPC_STACK_BASIC+(2*REG_SZ)(r1)
131 blr 131 blr
132 132
133bpf_slow_path_half: 133bpf_slow_path_half:
@@ -154,12 +154,12 @@ bpf_slow_path_byte_msh:
154 */ 154 */
155#define sk_negative_common(SIZE) \ 155#define sk_negative_common(SIZE) \
156 mflr r0; \ 156 mflr r0; \
157 std r0, 16(r1); \ 157 PPC_STL r0, PPC_LR_STKOFF(r1); \
158 /* R3 goes in parameter space of caller's frame */ \ 158 /* R3 goes in parameter space of caller's frame */ \
159 std r_skb, (BPF_PPC_STACKFRAME+48)(r1); \ 159 PPC_STL r_skb, (BPF_PPC_STACKFRAME+BPF_PPC_STACK_R3_OFF)(r1); \
160 std r_A, (BPF_PPC_STACK_BASIC+(0*8))(r1); \ 160 PPC_STL r_A, (BPF_PPC_STACK_BASIC+(0*REG_SZ))(r1); \
161 std r_X, (BPF_PPC_STACK_BASIC+(1*8))(r1); \ 161 PPC_STL r_X, (BPF_PPC_STACK_BASIC+(1*REG_SZ))(r1); \
162 stdu r1, -BPF_PPC_SLOWPATH_FRAME(r1); \ 162 PPC_STLU r1, -BPF_PPC_SLOWPATH_FRAME(r1); \
163 /* R3 = r_skb, as passed */ \ 163 /* R3 = r_skb, as passed */ \
164 mr r4, r_addr; \ 164 mr r4, r_addr; \
165 li r5, SIZE; \ 165 li r5, SIZE; \
@@ -167,19 +167,19 @@ bpf_slow_path_byte_msh:
167 nop; \ 167 nop; \
168 /* R3 != 0 on success */ \ 168 /* R3 != 0 on success */ \
169 addi r1, r1, BPF_PPC_SLOWPATH_FRAME; \ 169 addi r1, r1, BPF_PPC_SLOWPATH_FRAME; \
170 ld r0, 16(r1); \ 170 PPC_LL r0, PPC_LR_STKOFF(r1); \
171 ld r_A, (BPF_PPC_STACK_BASIC+(0*8))(r1); \ 171 PPC_LL r_A, (BPF_PPC_STACK_BASIC+(0*REG_SZ))(r1); \
172 ld r_X, (BPF_PPC_STACK_BASIC+(1*8))(r1); \ 172 PPC_LL r_X, (BPF_PPC_STACK_BASIC+(1*REG_SZ))(r1); \
173 mtlr r0; \ 173 mtlr r0; \
174 cmpldi r3, 0; \ 174 PPC_LCMPLI r3, 0; \
175 beq bpf_error_slow; /* cr0 = EQ */ \ 175 beq bpf_error_slow; /* cr0 = EQ */ \
176 mr r_addr, r3; \ 176 mr r_addr, r3; \
177 ld r_skb, (BPF_PPC_STACKFRAME+48)(r1); \ 177 PPC_LL r_skb, (BPF_PPC_STACKFRAME+BPF_PPC_STACK_R3_OFF)(r1); \
178 /* Great success! */ 178 /* Great success! */
179 179
180bpf_slow_path_word_neg: 180bpf_slow_path_word_neg:
181 lis r_scratch1,-32 /* SKF_LL_OFF */ 181 lis r_scratch1,-32 /* SKF_LL_OFF */
182 cmpd r_addr, r_scratch1 /* addr < SKF_* */ 182 PPC_LCMP r_addr, r_scratch1 /* addr < SKF_* */
183 blt bpf_error /* cr0 = LT */ 183 blt bpf_error /* cr0 = LT */
184 .globl sk_load_word_negative_offset 184 .globl sk_load_word_negative_offset
185sk_load_word_negative_offset: 185sk_load_word_negative_offset:
@@ -189,7 +189,7 @@ sk_load_word_negative_offset:
189 189
190bpf_slow_path_half_neg: 190bpf_slow_path_half_neg:
191 lis r_scratch1,-32 /* SKF_LL_OFF */ 191 lis r_scratch1,-32 /* SKF_LL_OFF */
192 cmpd r_addr, r_scratch1 /* addr < SKF_* */ 192 PPC_LCMP r_addr, r_scratch1 /* addr < SKF_* */
193 blt bpf_error /* cr0 = LT */ 193 blt bpf_error /* cr0 = LT */
194 .globl sk_load_half_negative_offset 194 .globl sk_load_half_negative_offset
195sk_load_half_negative_offset: 195sk_load_half_negative_offset:
@@ -199,7 +199,7 @@ sk_load_half_negative_offset:
199 199
200bpf_slow_path_byte_neg: 200bpf_slow_path_byte_neg:
201 lis r_scratch1,-32 /* SKF_LL_OFF */ 201 lis r_scratch1,-32 /* SKF_LL_OFF */
202 cmpd r_addr, r_scratch1 /* addr < SKF_* */ 202 PPC_LCMP r_addr, r_scratch1 /* addr < SKF_* */
203 blt bpf_error /* cr0 = LT */ 203 blt bpf_error /* cr0 = LT */
204 .globl sk_load_byte_negative_offset 204 .globl sk_load_byte_negative_offset
205sk_load_byte_negative_offset: 205sk_load_byte_negative_offset:
@@ -209,7 +209,7 @@ sk_load_byte_negative_offset:
209 209
210bpf_slow_path_byte_msh_neg: 210bpf_slow_path_byte_msh_neg:
211 lis r_scratch1,-32 /* SKF_LL_OFF */ 211 lis r_scratch1,-32 /* SKF_LL_OFF */
212 cmpd r_addr, r_scratch1 /* addr < SKF_* */ 212 PPC_LCMP r_addr, r_scratch1 /* addr < SKF_* */
213 blt bpf_error /* cr0 = LT */ 213 blt bpf_error /* cr0 = LT */
214 .globl sk_load_byte_msh_negative_offset 214 .globl sk_load_byte_msh_negative_offset
215sk_load_byte_msh_negative_offset: 215sk_load_byte_msh_negative_offset:
@@ -221,7 +221,7 @@ sk_load_byte_msh_negative_offset:
221bpf_error_slow: 221bpf_error_slow:
222 /* fabricate a cr0 = lt */ 222 /* fabricate a cr0 = lt */
223 li r_scratch1, -1 223 li r_scratch1, -1
224 cmpdi r_scratch1, 0 224 PPC_LCMPI r_scratch1, 0
225bpf_error: 225bpf_error:
226 /* Entered with cr0 = lt */ 226 /* Entered with cr0 = lt */
227 li r3, 0 227 li r3, 0
diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c
index d1916b577f2c..17cea18a09d3 100644
--- a/arch/powerpc/net/bpf_jit_comp.c
+++ b/arch/powerpc/net/bpf_jit_comp.c
@@ -1,8 +1,9 @@
1/* bpf_jit_comp.c: BPF JIT compiler for PPC64 1/* bpf_jit_comp.c: BPF JIT compiler
2 * 2 *
3 * Copyright 2011 Matt Evans <matt@ozlabs.org>, IBM Corporation 3 * Copyright 2011 Matt Evans <matt@ozlabs.org>, IBM Corporation
4 * 4 *
5 * Based on the x86 BPF compiler, by Eric Dumazet (eric.dumazet@gmail.com) 5 * Based on the x86 BPF compiler, by Eric Dumazet (eric.dumazet@gmail.com)
6 * Ported to ppc32 by Denis Kirjanov <kda@linux-powerpc.org>
6 * 7 *
7 * This program is free software; you can redistribute it and/or 8 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License 9 * modify it under the terms of the GNU General Public License
@@ -36,11 +37,11 @@ static void bpf_jit_build_prologue(struct bpf_prog *fp, u32 *image,
36 if (ctx->seen & SEEN_DATAREF) { 37 if (ctx->seen & SEEN_DATAREF) {
37 /* If we call any helpers (for loads), save LR */ 38 /* If we call any helpers (for loads), save LR */
38 EMIT(PPC_INST_MFLR | __PPC_RT(R0)); 39 EMIT(PPC_INST_MFLR | __PPC_RT(R0));
39 PPC_STD(0, 1, 16); 40 PPC_BPF_STL(0, 1, PPC_LR_STKOFF);
40 41
41 /* Back up non-volatile regs. */ 42 /* Back up non-volatile regs. */
42 PPC_STD(r_D, 1, -(8*(32-r_D))); 43 PPC_BPF_STL(r_D, 1, -(REG_SZ*(32-r_D)));
43 PPC_STD(r_HL, 1, -(8*(32-r_HL))); 44 PPC_BPF_STL(r_HL, 1, -(REG_SZ*(32-r_HL)));
44 } 45 }
45 if (ctx->seen & SEEN_MEM) { 46 if (ctx->seen & SEEN_MEM) {
46 /* 47 /*
@@ -49,11 +50,10 @@ static void bpf_jit_build_prologue(struct bpf_prog *fp, u32 *image,
49 */ 50 */
50 for (i = r_M; i < (r_M+16); i++) { 51 for (i = r_M; i < (r_M+16); i++) {
51 if (ctx->seen & (1 << (i-r_M))) 52 if (ctx->seen & (1 << (i-r_M)))
52 PPC_STD(i, 1, -(8*(32-i))); 53 PPC_BPF_STL(i, 1, -(REG_SZ*(32-i)));
53 } 54 }
54 } 55 }
55 EMIT(PPC_INST_STDU | __PPC_RS(R1) | __PPC_RA(R1) | 56 PPC_BPF_STLU(1, 1, -BPF_PPC_STACKFRAME);
56 (-BPF_PPC_STACKFRAME & 0xfffc));
57 } 57 }
58 58
59 if (ctx->seen & SEEN_DATAREF) { 59 if (ctx->seen & SEEN_DATAREF) {
@@ -67,7 +67,7 @@ static void bpf_jit_build_prologue(struct bpf_prog *fp, u32 *image,
67 data_len)); 67 data_len));
68 PPC_LWZ_OFFS(r_HL, r_skb, offsetof(struct sk_buff, len)); 68 PPC_LWZ_OFFS(r_HL, r_skb, offsetof(struct sk_buff, len));
69 PPC_SUB(r_HL, r_HL, r_scratch1); 69 PPC_SUB(r_HL, r_HL, r_scratch1);
70 PPC_LD_OFFS(r_D, r_skb, offsetof(struct sk_buff, data)); 70 PPC_LL_OFFS(r_D, r_skb, offsetof(struct sk_buff, data));
71 } 71 }
72 72
73 if (ctx->seen & SEEN_XREG) { 73 if (ctx->seen & SEEN_XREG) {
@@ -99,16 +99,16 @@ static void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx)
99 if (ctx->seen & (SEEN_MEM | SEEN_DATAREF)) { 99 if (ctx->seen & (SEEN_MEM | SEEN_DATAREF)) {
100 PPC_ADDI(1, 1, BPF_PPC_STACKFRAME); 100 PPC_ADDI(1, 1, BPF_PPC_STACKFRAME);
101 if (ctx->seen & SEEN_DATAREF) { 101 if (ctx->seen & SEEN_DATAREF) {
102 PPC_LD(0, 1, 16); 102 PPC_BPF_LL(0, 1, PPC_LR_STKOFF);
103 PPC_MTLR(0); 103 PPC_MTLR(0);
104 PPC_LD(r_D, 1, -(8*(32-r_D))); 104 PPC_BPF_LL(r_D, 1, -(REG_SZ*(32-r_D)));
105 PPC_LD(r_HL, 1, -(8*(32-r_HL))); 105 PPC_BPF_LL(r_HL, 1, -(REG_SZ*(32-r_HL)));
106 } 106 }
107 if (ctx->seen & SEEN_MEM) { 107 if (ctx->seen & SEEN_MEM) {
108 /* Restore any saved non-vol registers */ 108 /* Restore any saved non-vol registers */
109 for (i = r_M; i < (r_M+16); i++) { 109 for (i = r_M; i < (r_M+16); i++) {
110 if (ctx->seen & (1 << (i-r_M))) 110 if (ctx->seen & (1 << (i-r_M)))
111 PPC_LD(i, 1, -(8*(32-i))); 111 PPC_BPF_LL(i, 1, -(REG_SZ*(32-i)));
112 } 112 }
113 } 113 }
114 } 114 }
@@ -355,7 +355,7 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
355 ifindex) != 4); 355 ifindex) != 4);
356 BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, 356 BUILD_BUG_ON(FIELD_SIZEOF(struct net_device,
357 type) != 2); 357 type) != 2);
358 PPC_LD_OFFS(r_scratch1, r_skb, offsetof(struct sk_buff, 358 PPC_LL_OFFS(r_scratch1, r_skb, offsetof(struct sk_buff,
359 dev)); 359 dev));
360 PPC_CMPDI(r_scratch1, 0); 360 PPC_CMPDI(r_scratch1, 0);
361 if (ctx->pc_ret0 != -1) { 361 if (ctx->pc_ret0 != -1) {
@@ -411,20 +411,8 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
411 PPC_SRWI(r_A, r_A, 5); 411 PPC_SRWI(r_A, r_A, 5);
412 break; 412 break;
413 case BPF_ANC | SKF_AD_CPU: 413 case BPF_ANC | SKF_AD_CPU:
414#ifdef CONFIG_SMP 414 PPC_BPF_LOAD_CPU(r_A);
415 /*
416 * PACA ptr is r13:
417 * raw_smp_processor_id() = local_paca->paca_index
418 */
419 BUILD_BUG_ON(FIELD_SIZEOF(struct paca_struct,
420 paca_index) != 2);
421 PPC_LHZ_OFFS(r_A, 13,
422 offsetof(struct paca_struct, paca_index));
423#else
424 PPC_LI(r_A, 0);
425#endif
426 break; 415 break;
427
428 /*** Absolute loads from packet header/data ***/ 416 /*** Absolute loads from packet header/data ***/
429 case BPF_LD | BPF_W | BPF_ABS: 417 case BPF_LD | BPF_W | BPF_ABS:
430 func = CHOOSE_LOAD_FUNC(K, sk_load_word); 418 func = CHOOSE_LOAD_FUNC(K, sk_load_word);
@@ -437,7 +425,7 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
437 common_load: 425 common_load:
438 /* Load from [K]. */ 426 /* Load from [K]. */
439 ctx->seen |= SEEN_DATAREF; 427 ctx->seen |= SEEN_DATAREF;
440 PPC_LI64(r_scratch1, func); 428 PPC_FUNC_ADDR(r_scratch1, func);
441 PPC_MTLR(r_scratch1); 429 PPC_MTLR(r_scratch1);
442 PPC_LI32(r_addr, K); 430 PPC_LI32(r_addr, K);
443 PPC_BLRL(); 431 PPC_BLRL();
@@ -463,7 +451,7 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
463 * in the helper functions. 451 * in the helper functions.
464 */ 452 */
465 ctx->seen |= SEEN_DATAREF | SEEN_XREG; 453 ctx->seen |= SEEN_DATAREF | SEEN_XREG;
466 PPC_LI64(r_scratch1, func); 454 PPC_FUNC_ADDR(r_scratch1, func);
467 PPC_MTLR(r_scratch1); 455 PPC_MTLR(r_scratch1);
468 PPC_ADDI(r_addr, r_X, IMM_L(K)); 456 PPC_ADDI(r_addr, r_X, IMM_L(K));
469 if (K >= 32768) 457 if (K >= 32768)
@@ -685,9 +673,11 @@ void bpf_jit_compile(struct bpf_prog *fp)
685 673
686 if (image) { 674 if (image) {
687 bpf_flush_icache(code_base, code_base + (proglen/4)); 675 bpf_flush_icache(code_base, code_base + (proglen/4));
676#ifdef CONFIG_PPC64
688 /* Function descriptor nastiness: Address + TOC */ 677 /* Function descriptor nastiness: Address + TOC */
689 ((u64 *)image)[0] = (u64)code_base; 678 ((u64 *)image)[0] = (u64)code_base;
690 ((u64 *)image)[1] = local_paca->kernel_toc; 679 ((u64 *)image)[1] = local_paca->kernel_toc;
680#endif
691 fp->bpf_func = (void *)image; 681 fp->bpf_func = (void *)image;
692 fp->jited = true; 682 fp->jited = true;
693 } 683 }
diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c
index f238720690f3..0220e7d3c629 100644
--- a/arch/s390/kernel/irq.c
+++ b/arch/s390/kernel/irq.c
@@ -79,7 +79,6 @@ static const struct irq_class irqclass_sub_desc[NR_ARCH_IRQS] = {
79 {.irq = IRQIO_TAP, .name = "TAP", .desc = "[I/O] Tape"}, 79 {.irq = IRQIO_TAP, .name = "TAP", .desc = "[I/O] Tape"},
80 {.irq = IRQIO_VMR, .name = "VMR", .desc = "[I/O] Unit Record Devices"}, 80 {.irq = IRQIO_VMR, .name = "VMR", .desc = "[I/O] Unit Record Devices"},
81 {.irq = IRQIO_LCS, .name = "LCS", .desc = "[I/O] LCS"}, 81 {.irq = IRQIO_LCS, .name = "LCS", .desc = "[I/O] LCS"},
82 {.irq = IRQIO_CLW, .name = "CLW", .desc = "[I/O] CLAW"},
83 {.irq = IRQIO_CTC, .name = "CTC", .desc = "[I/O] CTC"}, 82 {.irq = IRQIO_CTC, .name = "CTC", .desc = "[I/O] CTC"},
84 {.irq = IRQIO_APB, .name = "APB", .desc = "[I/O] AP Bus"}, 83 {.irq = IRQIO_APB, .name = "APB", .desc = "[I/O] AP Bus"},
85 {.irq = IRQIO_ADM, .name = "ADM", .desc = "[I/O] EADM Subchannel"}, 84 {.irq = IRQIO_ADM, .name = "ADM", .desc = "[I/O] EADM Subchannel"},