diff options
Diffstat (limited to 'arch/powerpc')
-rw-r--r-- | arch/powerpc/net/bpf_jit_64.S | 2 | ||||
-rw-r--r-- | arch/powerpc/net/bpf_jit_comp.c | 157 | ||||
-rw-r--r-- | arch/powerpc/sysdev/fsl_soc.c | 32 |
3 files changed, 76 insertions, 115 deletions
diff --git a/arch/powerpc/net/bpf_jit_64.S b/arch/powerpc/net/bpf_jit_64.S index e76eba74d9da..8f87d9217122 100644 --- a/arch/powerpc/net/bpf_jit_64.S +++ b/arch/powerpc/net/bpf_jit_64.S | |||
@@ -78,7 +78,7 @@ sk_load_byte_positive_offset: | |||
78 | blr | 78 | blr |
79 | 79 | ||
80 | /* | 80 | /* |
81 | * BPF_S_LDX_B_MSH: ldxb 4*([offset]&0xf) | 81 | * BPF_LDX | BPF_B | BPF_MSH: ldxb 4*([offset]&0xf) |
82 | * r_addr is the offset value | 82 | * r_addr is the offset value |
83 | */ | 83 | */ |
84 | .globl sk_load_byte_msh | 84 | .globl sk_load_byte_msh |
diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c index 808ce1cae21a..6dcdadefd8d0 100644 --- a/arch/powerpc/net/bpf_jit_comp.c +++ b/arch/powerpc/net/bpf_jit_comp.c | |||
@@ -79,19 +79,11 @@ static void bpf_jit_build_prologue(struct sk_filter *fp, u32 *image, | |||
79 | } | 79 | } |
80 | 80 | ||
81 | switch (filter[0].code) { | 81 | switch (filter[0].code) { |
82 | case BPF_S_RET_K: | 82 | case BPF_RET | BPF_K: |
83 | case BPF_S_LD_W_LEN: | 83 | case BPF_LD | BPF_W | BPF_LEN: |
84 | case BPF_S_ANC_PROTOCOL: | 84 | case BPF_LD | BPF_W | BPF_ABS: |
85 | case BPF_S_ANC_IFINDEX: | 85 | case BPF_LD | BPF_H | BPF_ABS: |
86 | case BPF_S_ANC_MARK: | 86 | case BPF_LD | BPF_B | BPF_ABS: |
87 | case BPF_S_ANC_RXHASH: | ||
88 | case BPF_S_ANC_VLAN_TAG: | ||
89 | case BPF_S_ANC_VLAN_TAG_PRESENT: | ||
90 | case BPF_S_ANC_CPU: | ||
91 | case BPF_S_ANC_QUEUE: | ||
92 | case BPF_S_LD_W_ABS: | ||
93 | case BPF_S_LD_H_ABS: | ||
94 | case BPF_S_LD_B_ABS: | ||
95 | /* first instruction sets A register (or is RET 'constant') */ | 87 | /* first instruction sets A register (or is RET 'constant') */ |
96 | break; | 88 | break; |
97 | default: | 89 | default: |
@@ -144,6 +136,7 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image, | |||
144 | 136 | ||
145 | for (i = 0; i < flen; i++) { | 137 | for (i = 0; i < flen; i++) { |
146 | unsigned int K = filter[i].k; | 138 | unsigned int K = filter[i].k; |
139 | u16 code = bpf_anc_helper(&filter[i]); | ||
147 | 140 | ||
148 | /* | 141 | /* |
149 | * addrs[] maps a BPF bytecode address into a real offset from | 142 | * addrs[] maps a BPF bytecode address into a real offset from |
@@ -151,35 +144,35 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image, | |||
151 | */ | 144 | */ |
152 | addrs[i] = ctx->idx * 4; | 145 | addrs[i] = ctx->idx * 4; |
153 | 146 | ||
154 | switch (filter[i].code) { | 147 | switch (code) { |
155 | /*** ALU ops ***/ | 148 | /*** ALU ops ***/ |
156 | case BPF_S_ALU_ADD_X: /* A += X; */ | 149 | case BPF_ALU | BPF_ADD | BPF_X: /* A += X; */ |
157 | ctx->seen |= SEEN_XREG; | 150 | ctx->seen |= SEEN_XREG; |
158 | PPC_ADD(r_A, r_A, r_X); | 151 | PPC_ADD(r_A, r_A, r_X); |
159 | break; | 152 | break; |
160 | case BPF_S_ALU_ADD_K: /* A += K; */ | 153 | case BPF_ALU | BPF_ADD | BPF_K: /* A += K; */ |
161 | if (!K) | 154 | if (!K) |
162 | break; | 155 | break; |
163 | PPC_ADDI(r_A, r_A, IMM_L(K)); | 156 | PPC_ADDI(r_A, r_A, IMM_L(K)); |
164 | if (K >= 32768) | 157 | if (K >= 32768) |
165 | PPC_ADDIS(r_A, r_A, IMM_HA(K)); | 158 | PPC_ADDIS(r_A, r_A, IMM_HA(K)); |
166 | break; | 159 | break; |
167 | case BPF_S_ALU_SUB_X: /* A -= X; */ | 160 | case BPF_ALU | BPF_SUB | BPF_X: /* A -= X; */ |
168 | ctx->seen |= SEEN_XREG; | 161 | ctx->seen |= SEEN_XREG; |
169 | PPC_SUB(r_A, r_A, r_X); | 162 | PPC_SUB(r_A, r_A, r_X); |
170 | break; | 163 | break; |
171 | case BPF_S_ALU_SUB_K: /* A -= K */ | 164 | case BPF_ALU | BPF_SUB | BPF_K: /* A -= K */ |
172 | if (!K) | 165 | if (!K) |
173 | break; | 166 | break; |
174 | PPC_ADDI(r_A, r_A, IMM_L(-K)); | 167 | PPC_ADDI(r_A, r_A, IMM_L(-K)); |
175 | if (K >= 32768) | 168 | if (K >= 32768) |
176 | PPC_ADDIS(r_A, r_A, IMM_HA(-K)); | 169 | PPC_ADDIS(r_A, r_A, IMM_HA(-K)); |
177 | break; | 170 | break; |
178 | case BPF_S_ALU_MUL_X: /* A *= X; */ | 171 | case BPF_ALU | BPF_MUL | BPF_X: /* A *= X; */ |
179 | ctx->seen |= SEEN_XREG; | 172 | ctx->seen |= SEEN_XREG; |
180 | PPC_MUL(r_A, r_A, r_X); | 173 | PPC_MUL(r_A, r_A, r_X); |
181 | break; | 174 | break; |
182 | case BPF_S_ALU_MUL_K: /* A *= K */ | 175 | case BPF_ALU | BPF_MUL | BPF_K: /* A *= K */ |
183 | if (K < 32768) | 176 | if (K < 32768) |
184 | PPC_MULI(r_A, r_A, K); | 177 | PPC_MULI(r_A, r_A, K); |
185 | else { | 178 | else { |
@@ -187,7 +180,7 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image, | |||
187 | PPC_MUL(r_A, r_A, r_scratch1); | 180 | PPC_MUL(r_A, r_A, r_scratch1); |
188 | } | 181 | } |
189 | break; | 182 | break; |
190 | case BPF_S_ALU_MOD_X: /* A %= X; */ | 183 | case BPF_ALU | BPF_MOD | BPF_X: /* A %= X; */ |
191 | ctx->seen |= SEEN_XREG; | 184 | ctx->seen |= SEEN_XREG; |
192 | PPC_CMPWI(r_X, 0); | 185 | PPC_CMPWI(r_X, 0); |
193 | if (ctx->pc_ret0 != -1) { | 186 | if (ctx->pc_ret0 != -1) { |
@@ -201,13 +194,13 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image, | |||
201 | PPC_MUL(r_scratch1, r_X, r_scratch1); | 194 | PPC_MUL(r_scratch1, r_X, r_scratch1); |
202 | PPC_SUB(r_A, r_A, r_scratch1); | 195 | PPC_SUB(r_A, r_A, r_scratch1); |
203 | break; | 196 | break; |
204 | case BPF_S_ALU_MOD_K: /* A %= K; */ | 197 | case BPF_ALU | BPF_MOD | BPF_K: /* A %= K; */ |
205 | PPC_LI32(r_scratch2, K); | 198 | PPC_LI32(r_scratch2, K); |
206 | PPC_DIVWU(r_scratch1, r_A, r_scratch2); | 199 | PPC_DIVWU(r_scratch1, r_A, r_scratch2); |
207 | PPC_MUL(r_scratch1, r_scratch2, r_scratch1); | 200 | PPC_MUL(r_scratch1, r_scratch2, r_scratch1); |
208 | PPC_SUB(r_A, r_A, r_scratch1); | 201 | PPC_SUB(r_A, r_A, r_scratch1); |
209 | break; | 202 | break; |
210 | case BPF_S_ALU_DIV_X: /* A /= X; */ | 203 | case BPF_ALU | BPF_DIV | BPF_X: /* A /= X; */ |
211 | ctx->seen |= SEEN_XREG; | 204 | ctx->seen |= SEEN_XREG; |
212 | PPC_CMPWI(r_X, 0); | 205 | PPC_CMPWI(r_X, 0); |
213 | if (ctx->pc_ret0 != -1) { | 206 | if (ctx->pc_ret0 != -1) { |
@@ -223,17 +216,17 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image, | |||
223 | } | 216 | } |
224 | PPC_DIVWU(r_A, r_A, r_X); | 217 | PPC_DIVWU(r_A, r_A, r_X); |
225 | break; | 218 | break; |
226 | case BPF_S_ALU_DIV_K: /* A /= K */ | 219 | case BPF_ALU | BPF_DIV | BPF_K: /* A /= K */ |
227 | if (K == 1) | 220 | if (K == 1) |
228 | break; | 221 | break; |
229 | PPC_LI32(r_scratch1, K); | 222 | PPC_LI32(r_scratch1, K); |
230 | PPC_DIVWU(r_A, r_A, r_scratch1); | 223 | PPC_DIVWU(r_A, r_A, r_scratch1); |
231 | break; | 224 | break; |
232 | case BPF_S_ALU_AND_X: | 225 | case BPF_ALU | BPF_AND | BPF_X: |
233 | ctx->seen |= SEEN_XREG; | 226 | ctx->seen |= SEEN_XREG; |
234 | PPC_AND(r_A, r_A, r_X); | 227 | PPC_AND(r_A, r_A, r_X); |
235 | break; | 228 | break; |
236 | case BPF_S_ALU_AND_K: | 229 | case BPF_ALU | BPF_AND | BPF_K: |
237 | if (!IMM_H(K)) | 230 | if (!IMM_H(K)) |
238 | PPC_ANDI(r_A, r_A, K); | 231 | PPC_ANDI(r_A, r_A, K); |
239 | else { | 232 | else { |
@@ -241,51 +234,51 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image, | |||
241 | PPC_AND(r_A, r_A, r_scratch1); | 234 | PPC_AND(r_A, r_A, r_scratch1); |
242 | } | 235 | } |
243 | break; | 236 | break; |
244 | case BPF_S_ALU_OR_X: | 237 | case BPF_ALU | BPF_OR | BPF_X: |
245 | ctx->seen |= SEEN_XREG; | 238 | ctx->seen |= SEEN_XREG; |
246 | PPC_OR(r_A, r_A, r_X); | 239 | PPC_OR(r_A, r_A, r_X); |
247 | break; | 240 | break; |
248 | case BPF_S_ALU_OR_K: | 241 | case BPF_ALU | BPF_OR | BPF_K: |
249 | if (IMM_L(K)) | 242 | if (IMM_L(K)) |
250 | PPC_ORI(r_A, r_A, IMM_L(K)); | 243 | PPC_ORI(r_A, r_A, IMM_L(K)); |
251 | if (K >= 65536) | 244 | if (K >= 65536) |
252 | PPC_ORIS(r_A, r_A, IMM_H(K)); | 245 | PPC_ORIS(r_A, r_A, IMM_H(K)); |
253 | break; | 246 | break; |
254 | case BPF_S_ANC_ALU_XOR_X: | 247 | case BPF_ANC | SKF_AD_ALU_XOR_X: |
255 | case BPF_S_ALU_XOR_X: /* A ^= X */ | 248 | case BPF_ALU | BPF_XOR | BPF_X: /* A ^= X */ |
256 | ctx->seen |= SEEN_XREG; | 249 | ctx->seen |= SEEN_XREG; |
257 | PPC_XOR(r_A, r_A, r_X); | 250 | PPC_XOR(r_A, r_A, r_X); |
258 | break; | 251 | break; |
259 | case BPF_S_ALU_XOR_K: /* A ^= K */ | 252 | case BPF_ALU | BPF_XOR | BPF_K: /* A ^= K */ |
260 | if (IMM_L(K)) | 253 | if (IMM_L(K)) |
261 | PPC_XORI(r_A, r_A, IMM_L(K)); | 254 | PPC_XORI(r_A, r_A, IMM_L(K)); |
262 | if (K >= 65536) | 255 | if (K >= 65536) |
263 | PPC_XORIS(r_A, r_A, IMM_H(K)); | 256 | PPC_XORIS(r_A, r_A, IMM_H(K)); |
264 | break; | 257 | break; |
265 | case BPF_S_ALU_LSH_X: /* A <<= X; */ | 258 | case BPF_ALU | BPF_LSH | BPF_X: /* A <<= X; */ |
266 | ctx->seen |= SEEN_XREG; | 259 | ctx->seen |= SEEN_XREG; |
267 | PPC_SLW(r_A, r_A, r_X); | 260 | PPC_SLW(r_A, r_A, r_X); |
268 | break; | 261 | break; |
269 | case BPF_S_ALU_LSH_K: | 262 | case BPF_ALU | BPF_LSH | BPF_K: |
270 | if (K == 0) | 263 | if (K == 0) |
271 | break; | 264 | break; |
272 | else | 265 | else |
273 | PPC_SLWI(r_A, r_A, K); | 266 | PPC_SLWI(r_A, r_A, K); |
274 | break; | 267 | break; |
275 | case BPF_S_ALU_RSH_X: /* A >>= X; */ | 268 | case BPF_ALU | BPF_RSH | BPF_X: /* A >>= X; */ |
276 | ctx->seen |= SEEN_XREG; | 269 | ctx->seen |= SEEN_XREG; |
277 | PPC_SRW(r_A, r_A, r_X); | 270 | PPC_SRW(r_A, r_A, r_X); |
278 | break; | 271 | break; |
279 | case BPF_S_ALU_RSH_K: /* A >>= K; */ | 272 | case BPF_ALU | BPF_RSH | BPF_K: /* A >>= K; */ |
280 | if (K == 0) | 273 | if (K == 0) |
281 | break; | 274 | break; |
282 | else | 275 | else |
283 | PPC_SRWI(r_A, r_A, K); | 276 | PPC_SRWI(r_A, r_A, K); |
284 | break; | 277 | break; |
285 | case BPF_S_ALU_NEG: | 278 | case BPF_ALU | BPF_NEG: |
286 | PPC_NEG(r_A, r_A); | 279 | PPC_NEG(r_A, r_A); |
287 | break; | 280 | break; |
288 | case BPF_S_RET_K: | 281 | case BPF_RET | BPF_K: |
289 | PPC_LI32(r_ret, K); | 282 | PPC_LI32(r_ret, K); |
290 | if (!K) { | 283 | if (!K) { |
291 | if (ctx->pc_ret0 == -1) | 284 | if (ctx->pc_ret0 == -1) |
@@ -312,7 +305,7 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image, | |||
312 | PPC_BLR(); | 305 | PPC_BLR(); |
313 | } | 306 | } |
314 | break; | 307 | break; |
315 | case BPF_S_RET_A: | 308 | case BPF_RET | BPF_A: |
316 | PPC_MR(r_ret, r_A); | 309 | PPC_MR(r_ret, r_A); |
317 | if (i != flen - 1) { | 310 | if (i != flen - 1) { |
318 | if (ctx->seen) | 311 | if (ctx->seen) |
@@ -321,53 +314,53 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image, | |||
321 | PPC_BLR(); | 314 | PPC_BLR(); |
322 | } | 315 | } |
323 | break; | 316 | break; |
324 | case BPF_S_MISC_TAX: /* X = A */ | 317 | case BPF_MISC | BPF_TAX: /* X = A */ |
325 | PPC_MR(r_X, r_A); | 318 | PPC_MR(r_X, r_A); |
326 | break; | 319 | break; |
327 | case BPF_S_MISC_TXA: /* A = X */ | 320 | case BPF_MISC | BPF_TXA: /* A = X */ |
328 | ctx->seen |= SEEN_XREG; | 321 | ctx->seen |= SEEN_XREG; |
329 | PPC_MR(r_A, r_X); | 322 | PPC_MR(r_A, r_X); |
330 | break; | 323 | break; |
331 | 324 | ||
332 | /*** Constant loads/M[] access ***/ | 325 | /*** Constant loads/M[] access ***/ |
333 | case BPF_S_LD_IMM: /* A = K */ | 326 | case BPF_LD | BPF_IMM: /* A = K */ |
334 | PPC_LI32(r_A, K); | 327 | PPC_LI32(r_A, K); |
335 | break; | 328 | break; |
336 | case BPF_S_LDX_IMM: /* X = K */ | 329 | case BPF_LDX | BPF_IMM: /* X = K */ |
337 | PPC_LI32(r_X, K); | 330 | PPC_LI32(r_X, K); |
338 | break; | 331 | break; |
339 | case BPF_S_LD_MEM: /* A = mem[K] */ | 332 | case BPF_LD | BPF_MEM: /* A = mem[K] */ |
340 | PPC_MR(r_A, r_M + (K & 0xf)); | 333 | PPC_MR(r_A, r_M + (K & 0xf)); |
341 | ctx->seen |= SEEN_MEM | (1<<(K & 0xf)); | 334 | ctx->seen |= SEEN_MEM | (1<<(K & 0xf)); |
342 | break; | 335 | break; |
343 | case BPF_S_LDX_MEM: /* X = mem[K] */ | 336 | case BPF_LDX | BPF_MEM: /* X = mem[K] */ |
344 | PPC_MR(r_X, r_M + (K & 0xf)); | 337 | PPC_MR(r_X, r_M + (K & 0xf)); |
345 | ctx->seen |= SEEN_MEM | (1<<(K & 0xf)); | 338 | ctx->seen |= SEEN_MEM | (1<<(K & 0xf)); |
346 | break; | 339 | break; |
347 | case BPF_S_ST: /* mem[K] = A */ | 340 | case BPF_ST: /* mem[K] = A */ |
348 | PPC_MR(r_M + (K & 0xf), r_A); | 341 | PPC_MR(r_M + (K & 0xf), r_A); |
349 | ctx->seen |= SEEN_MEM | (1<<(K & 0xf)); | 342 | ctx->seen |= SEEN_MEM | (1<<(K & 0xf)); |
350 | break; | 343 | break; |
351 | case BPF_S_STX: /* mem[K] = X */ | 344 | case BPF_STX: /* mem[K] = X */ |
352 | PPC_MR(r_M + (K & 0xf), r_X); | 345 | PPC_MR(r_M + (K & 0xf), r_X); |
353 | ctx->seen |= SEEN_XREG | SEEN_MEM | (1<<(K & 0xf)); | 346 | ctx->seen |= SEEN_XREG | SEEN_MEM | (1<<(K & 0xf)); |
354 | break; | 347 | break; |
355 | case BPF_S_LD_W_LEN: /* A = skb->len; */ | 348 | case BPF_LD | BPF_W | BPF_LEN: /* A = skb->len; */ |
356 | BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4); | 349 | BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4); |
357 | PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff, len)); | 350 | PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff, len)); |
358 | break; | 351 | break; |
359 | case BPF_S_LDX_W_LEN: /* X = skb->len; */ | 352 | case BPF_LDX | BPF_W | BPF_LEN: /* X = skb->len; */ |
360 | PPC_LWZ_OFFS(r_X, r_skb, offsetof(struct sk_buff, len)); | 353 | PPC_LWZ_OFFS(r_X, r_skb, offsetof(struct sk_buff, len)); |
361 | break; | 354 | break; |
362 | 355 | ||
363 | /*** Ancillary info loads ***/ | 356 | /*** Ancillary info loads ***/ |
364 | case BPF_S_ANC_PROTOCOL: /* A = ntohs(skb->protocol); */ | 357 | case BPF_ANC | SKF_AD_PROTOCOL: /* A = ntohs(skb->protocol); */ |
365 | BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, | 358 | BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, |
366 | protocol) != 2); | 359 | protocol) != 2); |
367 | PPC_NTOHS_OFFS(r_A, r_skb, offsetof(struct sk_buff, | 360 | PPC_NTOHS_OFFS(r_A, r_skb, offsetof(struct sk_buff, |
368 | protocol)); | 361 | protocol)); |
369 | break; | 362 | break; |
370 | case BPF_S_ANC_IFINDEX: | 363 | case BPF_ANC | SKF_AD_IFINDEX: |
371 | PPC_LD_OFFS(r_scratch1, r_skb, offsetof(struct sk_buff, | 364 | PPC_LD_OFFS(r_scratch1, r_skb, offsetof(struct sk_buff, |
372 | dev)); | 365 | dev)); |
373 | PPC_CMPDI(r_scratch1, 0); | 366 | PPC_CMPDI(r_scratch1, 0); |
@@ -384,33 +377,33 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image, | |||
384 | PPC_LWZ_OFFS(r_A, r_scratch1, | 377 | PPC_LWZ_OFFS(r_A, r_scratch1, |
385 | offsetof(struct net_device, ifindex)); | 378 | offsetof(struct net_device, ifindex)); |
386 | break; | 379 | break; |
387 | case BPF_S_ANC_MARK: | 380 | case BPF_ANC | SKF_AD_MARK: |
388 | BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4); | 381 | BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4); |
389 | PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff, | 382 | PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff, |
390 | mark)); | 383 | mark)); |
391 | break; | 384 | break; |
392 | case BPF_S_ANC_RXHASH: | 385 | case BPF_ANC | SKF_AD_RXHASH: |
393 | BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4); | 386 | BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4); |
394 | PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff, | 387 | PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff, |
395 | hash)); | 388 | hash)); |
396 | break; | 389 | break; |
397 | case BPF_S_ANC_VLAN_TAG: | 390 | case BPF_ANC | SKF_AD_VLAN_TAG: |
398 | case BPF_S_ANC_VLAN_TAG_PRESENT: | 391 | case BPF_ANC | SKF_AD_VLAN_TAG_PRESENT: |
399 | BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2); | 392 | BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2); |
400 | PPC_LHZ_OFFS(r_A, r_skb, offsetof(struct sk_buff, | 393 | PPC_LHZ_OFFS(r_A, r_skb, offsetof(struct sk_buff, |
401 | vlan_tci)); | 394 | vlan_tci)); |
402 | if (filter[i].code == BPF_S_ANC_VLAN_TAG) | 395 | if (code == (BPF_ANC | SKF_AD_VLAN_TAG)) |
403 | PPC_ANDI(r_A, r_A, VLAN_VID_MASK); | 396 | PPC_ANDI(r_A, r_A, VLAN_VID_MASK); |
404 | else | 397 | else |
405 | PPC_ANDI(r_A, r_A, VLAN_TAG_PRESENT); | 398 | PPC_ANDI(r_A, r_A, VLAN_TAG_PRESENT); |
406 | break; | 399 | break; |
407 | case BPF_S_ANC_QUEUE: | 400 | case BPF_ANC | SKF_AD_QUEUE: |
408 | BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, | 401 | BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, |
409 | queue_mapping) != 2); | 402 | queue_mapping) != 2); |
410 | PPC_LHZ_OFFS(r_A, r_skb, offsetof(struct sk_buff, | 403 | PPC_LHZ_OFFS(r_A, r_skb, offsetof(struct sk_buff, |
411 | queue_mapping)); | 404 | queue_mapping)); |
412 | break; | 405 | break; |
413 | case BPF_S_ANC_CPU: | 406 | case BPF_ANC | SKF_AD_CPU: |
414 | #ifdef CONFIG_SMP | 407 | #ifdef CONFIG_SMP |
415 | /* | 408 | /* |
416 | * PACA ptr is r13: | 409 | * PACA ptr is r13: |
@@ -426,13 +419,13 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image, | |||
426 | break; | 419 | break; |
427 | 420 | ||
428 | /*** Absolute loads from packet header/data ***/ | 421 | /*** Absolute loads from packet header/data ***/ |
429 | case BPF_S_LD_W_ABS: | 422 | case BPF_LD | BPF_W | BPF_ABS: |
430 | func = CHOOSE_LOAD_FUNC(K, sk_load_word); | 423 | func = CHOOSE_LOAD_FUNC(K, sk_load_word); |
431 | goto common_load; | 424 | goto common_load; |
432 | case BPF_S_LD_H_ABS: | 425 | case BPF_LD | BPF_H | BPF_ABS: |
433 | func = CHOOSE_LOAD_FUNC(K, sk_load_half); | 426 | func = CHOOSE_LOAD_FUNC(K, sk_load_half); |
434 | goto common_load; | 427 | goto common_load; |
435 | case BPF_S_LD_B_ABS: | 428 | case BPF_LD | BPF_B | BPF_ABS: |
436 | func = CHOOSE_LOAD_FUNC(K, sk_load_byte); | 429 | func = CHOOSE_LOAD_FUNC(K, sk_load_byte); |
437 | common_load: | 430 | common_load: |
438 | /* Load from [K]. */ | 431 | /* Load from [K]. */ |
@@ -449,13 +442,13 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image, | |||
449 | break; | 442 | break; |
450 | 443 | ||
451 | /*** Indirect loads from packet header/data ***/ | 444 | /*** Indirect loads from packet header/data ***/ |
452 | case BPF_S_LD_W_IND: | 445 | case BPF_LD | BPF_W | BPF_IND: |
453 | func = sk_load_word; | 446 | func = sk_load_word; |
454 | goto common_load_ind; | 447 | goto common_load_ind; |
455 | case BPF_S_LD_H_IND: | 448 | case BPF_LD | BPF_H | BPF_IND: |
456 | func = sk_load_half; | 449 | func = sk_load_half; |
457 | goto common_load_ind; | 450 | goto common_load_ind; |
458 | case BPF_S_LD_B_IND: | 451 | case BPF_LD | BPF_B | BPF_IND: |
459 | func = sk_load_byte; | 452 | func = sk_load_byte; |
460 | common_load_ind: | 453 | common_load_ind: |
461 | /* | 454 | /* |
@@ -473,31 +466,31 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image, | |||
473 | PPC_BCC(COND_LT, exit_addr); | 466 | PPC_BCC(COND_LT, exit_addr); |
474 | break; | 467 | break; |
475 | 468 | ||
476 | case BPF_S_LDX_B_MSH: | 469 | case BPF_LDX | BPF_B | BPF_MSH: |
477 | func = CHOOSE_LOAD_FUNC(K, sk_load_byte_msh); | 470 | func = CHOOSE_LOAD_FUNC(K, sk_load_byte_msh); |
478 | goto common_load; | 471 | goto common_load; |
479 | break; | 472 | break; |
480 | 473 | ||
481 | /*** Jump and branches ***/ | 474 | /*** Jump and branches ***/ |
482 | case BPF_S_JMP_JA: | 475 | case BPF_JMP | BPF_JA: |
483 | if (K != 0) | 476 | if (K != 0) |
484 | PPC_JMP(addrs[i + 1 + K]); | 477 | PPC_JMP(addrs[i + 1 + K]); |
485 | break; | 478 | break; |
486 | 479 | ||
487 | case BPF_S_JMP_JGT_K: | 480 | case BPF_JMP | BPF_JGT | BPF_K: |
488 | case BPF_S_JMP_JGT_X: | 481 | case BPF_JMP | BPF_JGT | BPF_X: |
489 | true_cond = COND_GT; | 482 | true_cond = COND_GT; |
490 | goto cond_branch; | 483 | goto cond_branch; |
491 | case BPF_S_JMP_JGE_K: | 484 | case BPF_JMP | BPF_JGE | BPF_K: |
492 | case BPF_S_JMP_JGE_X: | 485 | case BPF_JMP | BPF_JGE | BPF_X: |
493 | true_cond = COND_GE; | 486 | true_cond = COND_GE; |
494 | goto cond_branch; | 487 | goto cond_branch; |
495 | case BPF_S_JMP_JEQ_K: | 488 | case BPF_JMP | BPF_JEQ | BPF_K: |
496 | case BPF_S_JMP_JEQ_X: | 489 | case BPF_JMP | BPF_JEQ | BPF_X: |
497 | true_cond = COND_EQ; | 490 | true_cond = COND_EQ; |
498 | goto cond_branch; | 491 | goto cond_branch; |
499 | case BPF_S_JMP_JSET_K: | 492 | case BPF_JMP | BPF_JSET | BPF_K: |
500 | case BPF_S_JMP_JSET_X: | 493 | case BPF_JMP | BPF_JSET | BPF_X: |
501 | true_cond = COND_NE; | 494 | true_cond = COND_NE; |
502 | /* Fall through */ | 495 | /* Fall through */ |
503 | cond_branch: | 496 | cond_branch: |
@@ -508,20 +501,20 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image, | |||
508 | break; | 501 | break; |
509 | } | 502 | } |
510 | 503 | ||
511 | switch (filter[i].code) { | 504 | switch (code) { |
512 | case BPF_S_JMP_JGT_X: | 505 | case BPF_JMP | BPF_JGT | BPF_X: |
513 | case BPF_S_JMP_JGE_X: | 506 | case BPF_JMP | BPF_JGE | BPF_X: |
514 | case BPF_S_JMP_JEQ_X: | 507 | case BPF_JMP | BPF_JEQ | BPF_X: |
515 | ctx->seen |= SEEN_XREG; | 508 | ctx->seen |= SEEN_XREG; |
516 | PPC_CMPLW(r_A, r_X); | 509 | PPC_CMPLW(r_A, r_X); |
517 | break; | 510 | break; |
518 | case BPF_S_JMP_JSET_X: | 511 | case BPF_JMP | BPF_JSET | BPF_X: |
519 | ctx->seen |= SEEN_XREG; | 512 | ctx->seen |= SEEN_XREG; |
520 | PPC_AND_DOT(r_scratch1, r_A, r_X); | 513 | PPC_AND_DOT(r_scratch1, r_A, r_X); |
521 | break; | 514 | break; |
522 | case BPF_S_JMP_JEQ_K: | 515 | case BPF_JMP | BPF_JEQ | BPF_K: |
523 | case BPF_S_JMP_JGT_K: | 516 | case BPF_JMP | BPF_JGT | BPF_K: |
524 | case BPF_S_JMP_JGE_K: | 517 | case BPF_JMP | BPF_JGE | BPF_K: |
525 | if (K < 32768) | 518 | if (K < 32768) |
526 | PPC_CMPLWI(r_A, K); | 519 | PPC_CMPLWI(r_A, K); |
527 | else { | 520 | else { |
@@ -529,7 +522,7 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image, | |||
529 | PPC_CMPLW(r_A, r_scratch1); | 522 | PPC_CMPLW(r_A, r_scratch1); |
530 | } | 523 | } |
531 | break; | 524 | break; |
532 | case BPF_S_JMP_JSET_K: | 525 | case BPF_JMP | BPF_JSET | BPF_K: |
533 | if (K < 32768) | 526 | if (K < 32768) |
534 | /* PPC_ANDI is /only/ dot-form */ | 527 | /* PPC_ANDI is /only/ dot-form */ |
535 | PPC_ANDI(r_scratch1, r_A, K); | 528 | PPC_ANDI(r_scratch1, r_A, K); |
diff --git a/arch/powerpc/sysdev/fsl_soc.c b/arch/powerpc/sysdev/fsl_soc.c index 228cf91b91c1..ffd1169ebaab 100644 --- a/arch/powerpc/sysdev/fsl_soc.c +++ b/arch/powerpc/sysdev/fsl_soc.c | |||
@@ -25,7 +25,6 @@ | |||
25 | #include <linux/of.h> | 25 | #include <linux/of.h> |
26 | #include <linux/of_platform.h> | 26 | #include <linux/of_platform.h> |
27 | #include <linux/phy.h> | 27 | #include <linux/phy.h> |
28 | #include <linux/phy_fixed.h> | ||
29 | #include <linux/spi/spi.h> | 28 | #include <linux/spi/spi.h> |
30 | #include <linux/fsl_devices.h> | 29 | #include <linux/fsl_devices.h> |
31 | #include <linux/fs_enet_pd.h> | 30 | #include <linux/fs_enet_pd.h> |
@@ -178,37 +177,6 @@ u32 get_baudrate(void) | |||
178 | EXPORT_SYMBOL(get_baudrate); | 177 | EXPORT_SYMBOL(get_baudrate); |
179 | #endif /* CONFIG_CPM2 */ | 178 | #endif /* CONFIG_CPM2 */ |
180 | 179 | ||
181 | #ifdef CONFIG_FIXED_PHY | ||
182 | static int __init of_add_fixed_phys(void) | ||
183 | { | ||
184 | int ret; | ||
185 | struct device_node *np; | ||
186 | u32 *fixed_link; | ||
187 | struct fixed_phy_status status = {}; | ||
188 | |||
189 | for_each_node_by_name(np, "ethernet") { | ||
190 | fixed_link = (u32 *)of_get_property(np, "fixed-link", NULL); | ||
191 | if (!fixed_link) | ||
192 | continue; | ||
193 | |||
194 | status.link = 1; | ||
195 | status.duplex = fixed_link[1]; | ||
196 | status.speed = fixed_link[2]; | ||
197 | status.pause = fixed_link[3]; | ||
198 | status.asym_pause = fixed_link[4]; | ||
199 | |||
200 | ret = fixed_phy_add(PHY_POLL, fixed_link[0], &status); | ||
201 | if (ret) { | ||
202 | of_node_put(np); | ||
203 | return ret; | ||
204 | } | ||
205 | } | ||
206 | |||
207 | return 0; | ||
208 | } | ||
209 | arch_initcall(of_add_fixed_phys); | ||
210 | #endif /* CONFIG_FIXED_PHY */ | ||
211 | |||
212 | #if defined(CONFIG_FSL_SOC_BOOKE) || defined(CONFIG_PPC_86xx) | 180 | #if defined(CONFIG_FSL_SOC_BOOKE) || defined(CONFIG_PPC_86xx) |
213 | static __be32 __iomem *rstcr; | 181 | static __be32 __iomem *rstcr; |
214 | 182 | ||