diff options
author | Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com> | 2016-06-22 12:25:05 -0400 |
---|---|---|
committer | Michael Ellerman <mpe@ellerman.id.au> | 2016-06-24 01:15:37 -0400 |
commit | cef1e8cdcdb50513e7d3351f536e7e1e3e347827 (patch) | |
tree | 742ed4f5516d3327980a8d91782588d4d91aef21 | |
parent | 277285b854c666308cf6cb92a696748f976d6f64 (diff) |
powerpc/bpf/jit: A few cleanups
1. Per the ISA, ADDIS actually uses RT, rather than RS. Though
the result is the same, make the usage clear.
2. The multiply instruction used is a 32-bit multiply. Rename PPC_MUL()
to PPC_MULW() to make the same clear.
3. PPC_STW[U] take the entire 16-bit immediate value and do not require
word-alignment, per the ISA. Change the macros to use IMM_L().
4. A few white-space cleanups to satisfy checkpatch.pl.
Acked-by: Alexei Starovoitov <ast@kernel.org>
Signed-off-by: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
-rw-r--r-- | arch/powerpc/net/bpf_jit.h | 13 | ||||
-rw-r--r-- | arch/powerpc/net/bpf_jit_comp.c | 8 |
2 files changed, 11 insertions, 10 deletions
diff --git a/arch/powerpc/net/bpf_jit.h b/arch/powerpc/net/bpf_jit.h index 95d0e3809c9e..9041d3fb9231 100644 --- a/arch/powerpc/net/bpf_jit.h +++ b/arch/powerpc/net/bpf_jit.h | |||
@@ -83,7 +83,7 @@ DECLARE_LOAD_FUNC(sk_load_byte_msh); | |||
83 | */ | 83 | */ |
84 | #define IMM_H(i) ((uintptr_t)(i)>>16) | 84 | #define IMM_H(i) ((uintptr_t)(i)>>16) |
85 | #define IMM_HA(i) (((uintptr_t)(i)>>16) + \ | 85 | #define IMM_HA(i) (((uintptr_t)(i)>>16) + \ |
86 | (((uintptr_t)(i) & 0x8000) >> 15)) | 86 | (((uintptr_t)(i) & 0x8000) >> 15)) |
87 | #define IMM_L(i) ((uintptr_t)(i) & 0xffff) | 87 | #define IMM_L(i) ((uintptr_t)(i) & 0xffff) |
88 | 88 | ||
89 | #define PLANT_INSTR(d, idx, instr) \ | 89 | #define PLANT_INSTR(d, idx, instr) \ |
@@ -99,16 +99,16 @@ DECLARE_LOAD_FUNC(sk_load_byte_msh); | |||
99 | #define PPC_MR(d, a) PPC_OR(d, a, a) | 99 | #define PPC_MR(d, a) PPC_OR(d, a, a) |
100 | #define PPC_LI(r, i) PPC_ADDI(r, 0, i) | 100 | #define PPC_LI(r, i) PPC_ADDI(r, 0, i) |
101 | #define PPC_ADDIS(d, a, i) EMIT(PPC_INST_ADDIS | \ | 101 | #define PPC_ADDIS(d, a, i) EMIT(PPC_INST_ADDIS | \ |
102 | ___PPC_RS(d) | ___PPC_RA(a) | IMM_L(i)) | 102 | ___PPC_RT(d) | ___PPC_RA(a) | IMM_L(i)) |
103 | #define PPC_LIS(r, i) PPC_ADDIS(r, 0, i) | 103 | #define PPC_LIS(r, i) PPC_ADDIS(r, 0, i) |
104 | #define PPC_STD(r, base, i) EMIT(PPC_INST_STD | ___PPC_RS(r) | \ | 104 | #define PPC_STD(r, base, i) EMIT(PPC_INST_STD | ___PPC_RS(r) | \ |
105 | ___PPC_RA(base) | ((i) & 0xfffc)) | 105 | ___PPC_RA(base) | ((i) & 0xfffc)) |
106 | #define PPC_STDU(r, base, i) EMIT(PPC_INST_STDU | ___PPC_RS(r) | \ | 106 | #define PPC_STDU(r, base, i) EMIT(PPC_INST_STDU | ___PPC_RS(r) | \ |
107 | ___PPC_RA(base) | ((i) & 0xfffc)) | 107 | ___PPC_RA(base) | ((i) & 0xfffc)) |
108 | #define PPC_STW(r, base, i) EMIT(PPC_INST_STW | ___PPC_RS(r) | \ | 108 | #define PPC_STW(r, base, i) EMIT(PPC_INST_STW | ___PPC_RS(r) | \ |
109 | ___PPC_RA(base) | ((i) & 0xfffc)) | 109 | ___PPC_RA(base) | IMM_L(i)) |
110 | #define PPC_STWU(r, base, i) EMIT(PPC_INST_STWU | ___PPC_RS(r) | \ | 110 | #define PPC_STWU(r, base, i) EMIT(PPC_INST_STWU | ___PPC_RS(r) | \ |
111 | ___PPC_RA(base) | ((i) & 0xfffc)) | 111 | ___PPC_RA(base) | IMM_L(i)) |
112 | 112 | ||
113 | #define PPC_LBZ(r, base, i) EMIT(PPC_INST_LBZ | ___PPC_RT(r) | \ | 113 | #define PPC_LBZ(r, base, i) EMIT(PPC_INST_LBZ | ___PPC_RT(r) | \ |
114 | ___PPC_RA(base) | IMM_L(i)) | 114 | ___PPC_RA(base) | IMM_L(i)) |
@@ -174,13 +174,14 @@ DECLARE_LOAD_FUNC(sk_load_byte_msh); | |||
174 | #define PPC_CMPWI(a, i) EMIT(PPC_INST_CMPWI | ___PPC_RA(a) | IMM_L(i)) | 174 | #define PPC_CMPWI(a, i) EMIT(PPC_INST_CMPWI | ___PPC_RA(a) | IMM_L(i)) |
175 | #define PPC_CMPDI(a, i) EMIT(PPC_INST_CMPDI | ___PPC_RA(a) | IMM_L(i)) | 175 | #define PPC_CMPDI(a, i) EMIT(PPC_INST_CMPDI | ___PPC_RA(a) | IMM_L(i)) |
176 | #define PPC_CMPLWI(a, i) EMIT(PPC_INST_CMPLWI | ___PPC_RA(a) | IMM_L(i)) | 176 | #define PPC_CMPLWI(a, i) EMIT(PPC_INST_CMPLWI | ___PPC_RA(a) | IMM_L(i)) |
177 | #define PPC_CMPLW(a, b) EMIT(PPC_INST_CMPLW | ___PPC_RA(a) | ___PPC_RB(b)) | 177 | #define PPC_CMPLW(a, b) EMIT(PPC_INST_CMPLW | ___PPC_RA(a) | \ |
178 | ___PPC_RB(b)) | ||
178 | 179 | ||
179 | #define PPC_SUB(d, a, b) EMIT(PPC_INST_SUB | ___PPC_RT(d) | \ | 180 | #define PPC_SUB(d, a, b) EMIT(PPC_INST_SUB | ___PPC_RT(d) | \ |
180 | ___PPC_RB(a) | ___PPC_RA(b)) | 181 | ___PPC_RB(a) | ___PPC_RA(b)) |
181 | #define PPC_ADD(d, a, b) EMIT(PPC_INST_ADD | ___PPC_RT(d) | \ | 182 | #define PPC_ADD(d, a, b) EMIT(PPC_INST_ADD | ___PPC_RT(d) | \ |
182 | ___PPC_RA(a) | ___PPC_RB(b)) | 183 | ___PPC_RA(a) | ___PPC_RB(b)) |
183 | #define PPC_MUL(d, a, b) EMIT(PPC_INST_MULLW | ___PPC_RT(d) | \ | 184 | #define PPC_MULW(d, a, b) EMIT(PPC_INST_MULLW | ___PPC_RT(d) | \ |
184 | ___PPC_RA(a) | ___PPC_RB(b)) | 185 | ___PPC_RA(a) | ___PPC_RB(b)) |
185 | #define PPC_MULHWU(d, a, b) EMIT(PPC_INST_MULHWU | ___PPC_RT(d) | \ | 186 | #define PPC_MULHWU(d, a, b) EMIT(PPC_INST_MULHWU | ___PPC_RT(d) | \ |
186 | ___PPC_RA(a) | ___PPC_RB(b)) | 187 | ___PPC_RA(a) | ___PPC_RB(b)) |
diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c index 2d66a8446198..6012aac70e2f 100644 --- a/arch/powerpc/net/bpf_jit_comp.c +++ b/arch/powerpc/net/bpf_jit_comp.c | |||
@@ -161,14 +161,14 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, | |||
161 | break; | 161 | break; |
162 | case BPF_ALU | BPF_MUL | BPF_X: /* A *= X; */ | 162 | case BPF_ALU | BPF_MUL | BPF_X: /* A *= X; */ |
163 | ctx->seen |= SEEN_XREG; | 163 | ctx->seen |= SEEN_XREG; |
164 | PPC_MUL(r_A, r_A, r_X); | 164 | PPC_MULW(r_A, r_A, r_X); |
165 | break; | 165 | break; |
166 | case BPF_ALU | BPF_MUL | BPF_K: /* A *= K */ | 166 | case BPF_ALU | BPF_MUL | BPF_K: /* A *= K */ |
167 | if (K < 32768) | 167 | if (K < 32768) |
168 | PPC_MULI(r_A, r_A, K); | 168 | PPC_MULI(r_A, r_A, K); |
169 | else { | 169 | else { |
170 | PPC_LI32(r_scratch1, K); | 170 | PPC_LI32(r_scratch1, K); |
171 | PPC_MUL(r_A, r_A, r_scratch1); | 171 | PPC_MULW(r_A, r_A, r_scratch1); |
172 | } | 172 | } |
173 | break; | 173 | break; |
174 | case BPF_ALU | BPF_MOD | BPF_X: /* A %= X; */ | 174 | case BPF_ALU | BPF_MOD | BPF_X: /* A %= X; */ |
@@ -184,7 +184,7 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, | |||
184 | } | 184 | } |
185 | if (code == (BPF_ALU | BPF_MOD | BPF_X)) { | 185 | if (code == (BPF_ALU | BPF_MOD | BPF_X)) { |
186 | PPC_DIVWU(r_scratch1, r_A, r_X); | 186 | PPC_DIVWU(r_scratch1, r_A, r_X); |
187 | PPC_MUL(r_scratch1, r_X, r_scratch1); | 187 | PPC_MULW(r_scratch1, r_X, r_scratch1); |
188 | PPC_SUB(r_A, r_A, r_scratch1); | 188 | PPC_SUB(r_A, r_A, r_scratch1); |
189 | } else { | 189 | } else { |
190 | PPC_DIVWU(r_A, r_A, r_X); | 190 | PPC_DIVWU(r_A, r_A, r_X); |
@@ -193,7 +193,7 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, | |||
193 | case BPF_ALU | BPF_MOD | BPF_K: /* A %= K; */ | 193 | case BPF_ALU | BPF_MOD | BPF_K: /* A %= K; */ |
194 | PPC_LI32(r_scratch2, K); | 194 | PPC_LI32(r_scratch2, K); |
195 | PPC_DIVWU(r_scratch1, r_A, r_scratch2); | 195 | PPC_DIVWU(r_scratch1, r_A, r_scratch2); |
196 | PPC_MUL(r_scratch1, r_scratch2, r_scratch1); | 196 | PPC_MULW(r_scratch1, r_scratch2, r_scratch1); |
197 | PPC_SUB(r_A, r_A, r_scratch1); | 197 | PPC_SUB(r_A, r_A, r_scratch1); |
198 | break; | 198 | break; |
199 | case BPF_ALU | BPF_DIV | BPF_K: /* A /= K */ | 199 | case BPF_ALU | BPF_DIV | BPF_K: /* A /= K */ |