aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/mips/net/ebpf_jit.c1950
-rw-r--r--arch/s390/net/bpf_jit_comp.c3
-rw-r--r--drivers/isdn/hysdn/hysdn_proclog.c28
-rw-r--r--drivers/net/dsa/mt7530.c38
-rw-r--r--drivers/net/dsa/mt7530.h1
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.c6
-rw-r--r--drivers/net/ethernet/broadcom/b44.c1
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c15
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c29
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c37
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.c2
-rw-r--r--drivers/net/ethernet/ti/cpts.c111
-rw-r--r--drivers/net/ethernet/ti/cpts.h2
-rw-r--r--drivers/net/gtp.c2
-rw-r--r--drivers/net/hyperv/hyperv_net.h3
-rw-r--r--drivers/net/hyperv/netvsc.c3
-rw-r--r--drivers/net/hyperv/rndis_filter.c14
-rw-r--r--drivers/net/ipvlan/ipvlan_main.c2
-rw-r--r--drivers/net/ppp/ppp_generic.c18
-rw-r--r--drivers/net/usb/asix.h1
-rw-r--r--drivers/net/usb/asix_common.c53
-rw-r--r--drivers/net/usb/asix_devices.c1
-rw-r--r--drivers/net/usb/lan78xx.c18
-rw-r--r--drivers/net/usb/qmi_wwan.c7
-rw-r--r--drivers/net/vxlan.c1
-rw-r--r--drivers/ptp/ptp_clock.c42
-rw-r--r--drivers/ptp/ptp_private.h3
-rw-r--r--drivers/s390/net/qeth_l3_main.c4
-rw-r--r--include/linux/mlx4/device.h1
-rw-r--r--include/linux/ptp_clock_kernel.h20
-rw-r--r--include/net/tcp.h10
-rw-r--r--net/batman-adv/translation-table.c60
-rw-r--r--net/batman-adv/types.h2
-rw-r--r--net/core/dev.c2
-rw-r--r--net/ipv4/cipso_ipv4.c12
-rw-r--r--net/ipv4/fou.c1
-rw-r--r--net/ipv4/tcp_input.c34
-rw-r--r--net/ipv4/tcp_output.c27
-rw-r--r--net/ipv4/tcp_timer.c3
-rw-r--r--net/ipv4/udp_offload.c2
-rw-r--r--net/ipv6/route.c11
-rw-r--r--net/ipv6/udp_offload.c2
-rw-r--r--net/rds/ib_recv.c5
-rw-r--r--net/sched/act_ipt.c20
-rw-r--r--tools/build/feature/test-bpf.c2
-rw-r--r--tools/lib/bpf/bpf.c2
-rw-r--r--tools/testing/selftests/bpf/test_pkt_md_access.c11
-rw-r--r--tools/testing/selftests/bpf/test_verifier.c19
55 files changed, 2490 insertions, 186 deletions
diff --git a/arch/mips/net/ebpf_jit.c b/arch/mips/net/ebpf_jit.c
new file mode 100644
index 000000000000..3f87b96da5c4
--- /dev/null
+++ b/arch/mips/net/ebpf_jit.c
@@ -0,0 +1,1950 @@
1/*
2 * Just-In-Time compiler for eBPF filters on MIPS
3 *
4 * Copyright (c) 2017 Cavium, Inc.
5 *
6 * Based on code from:
7 *
8 * Copyright (c) 2014 Imagination Technologies Ltd.
9 * Author: Markos Chandras <markos.chandras@imgtec.com>
10 *
11 * This program is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; version 2 of the License.
14 */
15
16#include <linux/bitops.h>
17#include <linux/errno.h>
18#include <linux/filter.h>
19#include <linux/bpf.h>
20#include <linux/slab.h>
21#include <asm/bitops.h>
22#include <asm/byteorder.h>
23#include <asm/cacheflush.h>
24#include <asm/cpu-features.h>
25#include <asm/uasm.h>
26
27/* Registers used by JIT */
28#define MIPS_R_ZERO 0
29#define MIPS_R_AT 1
30#define MIPS_R_V0 2 /* BPF_R0 */
31#define MIPS_R_V1 3
32#define MIPS_R_A0 4 /* BPF_R1 */
33#define MIPS_R_A1 5 /* BPF_R2 */
34#define MIPS_R_A2 6 /* BPF_R3 */
35#define MIPS_R_A3 7 /* BPF_R4 */
36#define MIPS_R_A4 8 /* BPF_R5 */
37#define MIPS_R_T4 12 /* BPF_AX */
38#define MIPS_R_T5 13
39#define MIPS_R_T6 14
40#define MIPS_R_T7 15
41#define MIPS_R_S0 16 /* BPF_R6 */
42#define MIPS_R_S1 17 /* BPF_R7 */
43#define MIPS_R_S2 18 /* BPF_R8 */
44#define MIPS_R_S3 19 /* BPF_R9 */
45#define MIPS_R_S4 20 /* BPF_TCC */
46#define MIPS_R_S5 21
47#define MIPS_R_S6 22
48#define MIPS_R_S7 23
49#define MIPS_R_T8 24
50#define MIPS_R_T9 25
51#define MIPS_R_SP 29
52#define MIPS_R_RA 31
53
54/* eBPF flags */
55#define EBPF_SAVE_S0 BIT(0)
56#define EBPF_SAVE_S1 BIT(1)
57#define EBPF_SAVE_S2 BIT(2)
58#define EBPF_SAVE_S3 BIT(3)
59#define EBPF_SAVE_S4 BIT(4)
60#define EBPF_SAVE_RA BIT(5)
61#define EBPF_SEEN_FP BIT(6)
62#define EBPF_SEEN_TC BIT(7)
63#define EBPF_TCC_IN_V1 BIT(8)
64
65/*
66 * For the mips64 ISA, we need to track the value range or type for
67 * each JIT register. The BPF machine requires zero extended 32-bit
68 * values, but the mips64 ISA requires sign extended 32-bit values.
69 * At each point in the BPF program we track the state of every
70 * register so that we can zero extend or sign extend as the BPF
71 * semantics require.
72 */
73enum reg_val_type {
74 /* uninitialized */
75 REG_UNKNOWN,
76 /* not known to be 32-bit compatible. */
77 REG_64BIT,
78 /* 32-bit compatible, no truncation needed for 64-bit ops. */
79 REG_64BIT_32BIT,
80 /* 32-bit compatible, need truncation for 64-bit ops. */
81 REG_32BIT,
82 /* 32-bit zero extended. */
83 REG_32BIT_ZERO_EX,
84 /* 32-bit no sign/zero extension needed. */
85 REG_32BIT_POS
86};
87
88/*
89 * high bit of offsets indicates if long branch conversion done at
90 * this insn.
91 */
92#define OFFSETS_B_CONV BIT(31)
93
94/**
95 * struct jit_ctx - JIT context
96 * @skf: The sk_filter
97 * @stack_size: eBPF stack size
98 * @tmp_offset: eBPF $sp offset to 8-byte temporary memory
99 * @idx: Instruction index
100 * @flags: JIT flags
101 * @offsets: Instruction offsets
102 * @target: Memory location for the compiled filter
103 * @reg_val_types Packed enum reg_val_type for each register.
104 */
105struct jit_ctx {
106 const struct bpf_prog *skf;
107 int stack_size;
108 int tmp_offset;
109 u32 idx;
110 u32 flags;
111 u32 *offsets;
112 u32 *target;
113 u64 *reg_val_types;
114 unsigned int long_b_conversion:1;
115 unsigned int gen_b_offsets:1;
116};
117
118static void set_reg_val_type(u64 *rvt, int reg, enum reg_val_type type)
119{
120 *rvt &= ~(7ull << (reg * 3));
121 *rvt |= ((u64)type << (reg * 3));
122}
123
124static enum reg_val_type get_reg_val_type(const struct jit_ctx *ctx,
125 int index, int reg)
126{
127 return (ctx->reg_val_types[index] >> (reg * 3)) & 7;
128}
129
130/* Simply emit the instruction if the JIT memory space has been allocated */
131#define emit_instr(ctx, func, ...) \
132do { \
133 if ((ctx)->target != NULL) { \
134 u32 *p = &(ctx)->target[ctx->idx]; \
135 uasm_i_##func(&p, ##__VA_ARGS__); \
136 } \
137 (ctx)->idx++; \
138} while (0)
139
140static unsigned int j_target(struct jit_ctx *ctx, int target_idx)
141{
142 unsigned long target_va, base_va;
143 unsigned int r;
144
145 if (!ctx->target)
146 return 0;
147
148 base_va = (unsigned long)ctx->target;
149 target_va = base_va + (ctx->offsets[target_idx] & ~OFFSETS_B_CONV);
150
151 if ((base_va & ~0x0ffffffful) != (target_va & ~0x0ffffffful))
152 return (unsigned int)-1;
153 r = target_va & 0x0ffffffful;
154 return r;
155}
156
157/* Compute the immediate value for PC-relative branches. */
158static u32 b_imm(unsigned int tgt, struct jit_ctx *ctx)
159{
160 if (!ctx->gen_b_offsets)
161 return 0;
162
163 /*
164 * We want a pc-relative branch. tgt is the instruction offset
165 * we want to jump to.
166
167 * Branch on MIPS:
168 * I: target_offset <- sign_extend(offset)
169 * I+1: PC += target_offset (delay slot)
170 *
171 * ctx->idx currently points to the branch instruction
172 * but the offset is added to the delay slot so we need
173 * to subtract 4.
174 */
175 return (ctx->offsets[tgt] & ~OFFSETS_B_CONV) -
176 (ctx->idx * 4) - 4;
177}
178
179int bpf_jit_enable __read_mostly;
180
181enum which_ebpf_reg {
182 src_reg,
183 src_reg_no_fp,
184 dst_reg,
185 dst_reg_fp_ok
186};
187
188/*
189 * For eBPF, the register mapping naturally falls out of the
190 * requirements of eBPF and the MIPS n64 ABI. We don't maintain a
191 * separate frame pointer, so BPF_REG_10 relative accesses are
192 * adjusted to be $sp relative.
193 */
194int ebpf_to_mips_reg(struct jit_ctx *ctx, const struct bpf_insn *insn,
195 enum which_ebpf_reg w)
196{
197 int ebpf_reg = (w == src_reg || w == src_reg_no_fp) ?
198 insn->src_reg : insn->dst_reg;
199
200 switch (ebpf_reg) {
201 case BPF_REG_0:
202 return MIPS_R_V0;
203 case BPF_REG_1:
204 return MIPS_R_A0;
205 case BPF_REG_2:
206 return MIPS_R_A1;
207 case BPF_REG_3:
208 return MIPS_R_A2;
209 case BPF_REG_4:
210 return MIPS_R_A3;
211 case BPF_REG_5:
212 return MIPS_R_A4;
213 case BPF_REG_6:
214 ctx->flags |= EBPF_SAVE_S0;
215 return MIPS_R_S0;
216 case BPF_REG_7:
217 ctx->flags |= EBPF_SAVE_S1;
218 return MIPS_R_S1;
219 case BPF_REG_8:
220 ctx->flags |= EBPF_SAVE_S2;
221 return MIPS_R_S2;
222 case BPF_REG_9:
223 ctx->flags |= EBPF_SAVE_S3;
224 return MIPS_R_S3;
225 case BPF_REG_10:
226 if (w == dst_reg || w == src_reg_no_fp)
227 goto bad_reg;
228 ctx->flags |= EBPF_SEEN_FP;
229 /*
230 * Needs special handling, return something that
231 * cannot be clobbered just in case.
232 */
233 return MIPS_R_ZERO;
234 case BPF_REG_AX:
235 return MIPS_R_T4;
236 default:
237bad_reg:
238 WARN(1, "Illegal bpf reg: %d\n", ebpf_reg);
239 return -EINVAL;
240 }
241}
242/*
243 * eBPF stack frame will be something like:
244 *
245 * Entry $sp ------> +--------------------------------+
246 * | $ra (optional) |
247 * +--------------------------------+
248 * | $s0 (optional) |
249 * +--------------------------------+
250 * | $s1 (optional) |
251 * +--------------------------------+
252 * | $s2 (optional) |
253 * +--------------------------------+
254 * | $s3 (optional) |
255 * +--------------------------------+
256 * | $s4 (optional) |
257 * +--------------------------------+
258 * | tmp-storage (if $ra saved) |
259 * $sp + tmp_offset --> +--------------------------------+ <--BPF_REG_10
260 * | BPF_REG_10 relative storage |
261 * | MAX_BPF_STACK (optional) |
262 * | . |
263 * | . |
264 * | . |
265 * $sp --------> +--------------------------------+
266 *
267 * If BPF_REG_10 is never referenced, then the MAX_BPF_STACK sized
268 * area is not allocated.
269 */
270static int gen_int_prologue(struct jit_ctx *ctx)
271{
272 int stack_adjust = 0;
273 int store_offset;
274 int locals_size;
275
276 if (ctx->flags & EBPF_SAVE_RA)
277 /*
278 * If RA we are doing a function call and may need
279 * extra 8-byte tmp area.
280 */
281 stack_adjust += 16;
282 if (ctx->flags & EBPF_SAVE_S0)
283 stack_adjust += 8;
284 if (ctx->flags & EBPF_SAVE_S1)
285 stack_adjust += 8;
286 if (ctx->flags & EBPF_SAVE_S2)
287 stack_adjust += 8;
288 if (ctx->flags & EBPF_SAVE_S3)
289 stack_adjust += 8;
290 if (ctx->flags & EBPF_SAVE_S4)
291 stack_adjust += 8;
292
293 BUILD_BUG_ON(MAX_BPF_STACK & 7);
294 locals_size = (ctx->flags & EBPF_SEEN_FP) ? MAX_BPF_STACK : 0;
295
296 stack_adjust += locals_size;
297 ctx->tmp_offset = locals_size;
298
299 ctx->stack_size = stack_adjust;
300
301 /*
302 * First instruction initializes the tail call count (TCC).
303 * On tail call we skip this instruction, and the TCC is
304 * passed in $v1 from the caller.
305 */
306 emit_instr(ctx, daddiu, MIPS_R_V1, MIPS_R_ZERO, MAX_TAIL_CALL_CNT);
307 if (stack_adjust)
308 emit_instr(ctx, daddiu, MIPS_R_SP, MIPS_R_SP, -stack_adjust);
309 else
310 return 0;
311
312 store_offset = stack_adjust - 8;
313
314 if (ctx->flags & EBPF_SAVE_RA) {
315 emit_instr(ctx, sd, MIPS_R_RA, store_offset, MIPS_R_SP);
316 store_offset -= 8;
317 }
318 if (ctx->flags & EBPF_SAVE_S0) {
319 emit_instr(ctx, sd, MIPS_R_S0, store_offset, MIPS_R_SP);
320 store_offset -= 8;
321 }
322 if (ctx->flags & EBPF_SAVE_S1) {
323 emit_instr(ctx, sd, MIPS_R_S1, store_offset, MIPS_R_SP);
324 store_offset -= 8;
325 }
326 if (ctx->flags & EBPF_SAVE_S2) {
327 emit_instr(ctx, sd, MIPS_R_S2, store_offset, MIPS_R_SP);
328 store_offset -= 8;
329 }
330 if (ctx->flags & EBPF_SAVE_S3) {
331 emit_instr(ctx, sd, MIPS_R_S3, store_offset, MIPS_R_SP);
332 store_offset -= 8;
333 }
334 if (ctx->flags & EBPF_SAVE_S4) {
335 emit_instr(ctx, sd, MIPS_R_S4, store_offset, MIPS_R_SP);
336 store_offset -= 8;
337 }
338
339 if ((ctx->flags & EBPF_SEEN_TC) && !(ctx->flags & EBPF_TCC_IN_V1))
340 emit_instr(ctx, daddu, MIPS_R_S4, MIPS_R_V1, MIPS_R_ZERO);
341
342 return 0;
343}
344
345static int build_int_epilogue(struct jit_ctx *ctx, int dest_reg)
346{
347 const struct bpf_prog *prog = ctx->skf;
348 int stack_adjust = ctx->stack_size;
349 int store_offset = stack_adjust - 8;
350 int r0 = MIPS_R_V0;
351
352 if (dest_reg == MIPS_R_RA &&
353 get_reg_val_type(ctx, prog->len, BPF_REG_0) == REG_32BIT_ZERO_EX)
354 /* Don't let zero extended value escape. */
355 emit_instr(ctx, sll, r0, r0, 0);
356
357 if (ctx->flags & EBPF_SAVE_RA) {
358 emit_instr(ctx, ld, MIPS_R_RA, store_offset, MIPS_R_SP);
359 store_offset -= 8;
360 }
361 if (ctx->flags & EBPF_SAVE_S0) {
362 emit_instr(ctx, ld, MIPS_R_S0, store_offset, MIPS_R_SP);
363 store_offset -= 8;
364 }
365 if (ctx->flags & EBPF_SAVE_S1) {
366 emit_instr(ctx, ld, MIPS_R_S1, store_offset, MIPS_R_SP);
367 store_offset -= 8;
368 }
369 if (ctx->flags & EBPF_SAVE_S2) {
370 emit_instr(ctx, ld, MIPS_R_S2, store_offset, MIPS_R_SP);
371 store_offset -= 8;
372 }
373 if (ctx->flags & EBPF_SAVE_S3) {
374 emit_instr(ctx, ld, MIPS_R_S3, store_offset, MIPS_R_SP);
375 store_offset -= 8;
376 }
377 if (ctx->flags & EBPF_SAVE_S4) {
378 emit_instr(ctx, ld, MIPS_R_S4, store_offset, MIPS_R_SP);
379 store_offset -= 8;
380 }
381 emit_instr(ctx, jr, dest_reg);
382
383 if (stack_adjust)
384 emit_instr(ctx, daddiu, MIPS_R_SP, MIPS_R_SP, stack_adjust);
385 else
386 emit_instr(ctx, nop);
387
388 return 0;
389}
390
391static void gen_imm_to_reg(const struct bpf_insn *insn, int reg,
392 struct jit_ctx *ctx)
393{
394 if (insn->imm >= S16_MIN && insn->imm <= S16_MAX) {
395 emit_instr(ctx, addiu, reg, MIPS_R_ZERO, insn->imm);
396 } else {
397 int lower = (s16)(insn->imm & 0xffff);
398 int upper = insn->imm - lower;
399
400 emit_instr(ctx, lui, reg, upper >> 16);
401 emit_instr(ctx, addiu, reg, reg, lower);
402 }
403
404}
405
406static int gen_imm_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
407 int idx)
408{
409 int upper_bound, lower_bound;
410 int dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
411
412 if (dst < 0)
413 return dst;
414
415 switch (BPF_OP(insn->code)) {
416 case BPF_MOV:
417 case BPF_ADD:
418 upper_bound = S16_MAX;
419 lower_bound = S16_MIN;
420 break;
421 case BPF_SUB:
422 upper_bound = -(int)S16_MIN;
423 lower_bound = -(int)S16_MAX;
424 break;
425 case BPF_AND:
426 case BPF_OR:
427 case BPF_XOR:
428 upper_bound = 0xffff;
429 lower_bound = 0;
430 break;
431 case BPF_RSH:
432 case BPF_LSH:
433 case BPF_ARSH:
434 /* Shift amounts are truncated, no need for bounds */
435 upper_bound = S32_MAX;
436 lower_bound = S32_MIN;
437 break;
438 default:
439 return -EINVAL;
440 }
441
442 /*
443 * Immediate move clobbers the register, so no sign/zero
444 * extension needed.
445 */
446 if (BPF_CLASS(insn->code) == BPF_ALU64 &&
447 BPF_OP(insn->code) != BPF_MOV &&
448 get_reg_val_type(ctx, idx, insn->dst_reg) == REG_32BIT)
449 emit_instr(ctx, dinsu, dst, MIPS_R_ZERO, 32, 32);
450 /* BPF_ALU | BPF_LSH doesn't need separate sign extension */
451 if (BPF_CLASS(insn->code) == BPF_ALU &&
452 BPF_OP(insn->code) != BPF_LSH &&
453 BPF_OP(insn->code) != BPF_MOV &&
454 get_reg_val_type(ctx, idx, insn->dst_reg) != REG_32BIT)
455 emit_instr(ctx, sll, dst, dst, 0);
456
457 if (insn->imm >= lower_bound && insn->imm <= upper_bound) {
458 /* single insn immediate case */
459 switch (BPF_OP(insn->code) | BPF_CLASS(insn->code)) {
460 case BPF_ALU64 | BPF_MOV:
461 emit_instr(ctx, daddiu, dst, MIPS_R_ZERO, insn->imm);
462 break;
463 case BPF_ALU64 | BPF_AND:
464 case BPF_ALU | BPF_AND:
465 emit_instr(ctx, andi, dst, dst, insn->imm);
466 break;
467 case BPF_ALU64 | BPF_OR:
468 case BPF_ALU | BPF_OR:
469 emit_instr(ctx, ori, dst, dst, insn->imm);
470 break;
471 case BPF_ALU64 | BPF_XOR:
472 case BPF_ALU | BPF_XOR:
473 emit_instr(ctx, xori, dst, dst, insn->imm);
474 break;
475 case BPF_ALU64 | BPF_ADD:
476 emit_instr(ctx, daddiu, dst, dst, insn->imm);
477 break;
478 case BPF_ALU64 | BPF_SUB:
479 emit_instr(ctx, daddiu, dst, dst, -insn->imm);
480 break;
481 case BPF_ALU64 | BPF_RSH:
482 emit_instr(ctx, dsrl_safe, dst, dst, insn->imm & 0x3f);
483 break;
484 case BPF_ALU | BPF_RSH:
485 emit_instr(ctx, srl, dst, dst, insn->imm & 0x1f);
486 break;
487 case BPF_ALU64 | BPF_LSH:
488 emit_instr(ctx, dsll_safe, dst, dst, insn->imm & 0x3f);
489 break;
490 case BPF_ALU | BPF_LSH:
491 emit_instr(ctx, sll, dst, dst, insn->imm & 0x1f);
492 break;
493 case BPF_ALU64 | BPF_ARSH:
494 emit_instr(ctx, dsra_safe, dst, dst, insn->imm & 0x3f);
495 break;
496 case BPF_ALU | BPF_ARSH:
497 emit_instr(ctx, sra, dst, dst, insn->imm & 0x1f);
498 break;
499 case BPF_ALU | BPF_MOV:
500 emit_instr(ctx, addiu, dst, MIPS_R_ZERO, insn->imm);
501 break;
502 case BPF_ALU | BPF_ADD:
503 emit_instr(ctx, addiu, dst, dst, insn->imm);
504 break;
505 case BPF_ALU | BPF_SUB:
506 emit_instr(ctx, addiu, dst, dst, -insn->imm);
507 break;
508 default:
509 return -EINVAL;
510 }
511 } else {
512 /* multi insn immediate case */
513 if (BPF_OP(insn->code) == BPF_MOV) {
514 gen_imm_to_reg(insn, dst, ctx);
515 } else {
516 gen_imm_to_reg(insn, MIPS_R_AT, ctx);
517 switch (BPF_OP(insn->code) | BPF_CLASS(insn->code)) {
518 case BPF_ALU64 | BPF_AND:
519 case BPF_ALU | BPF_AND:
520 emit_instr(ctx, and, dst, dst, MIPS_R_AT);
521 break;
522 case BPF_ALU64 | BPF_OR:
523 case BPF_ALU | BPF_OR:
524 emit_instr(ctx, or, dst, dst, MIPS_R_AT);
525 break;
526 case BPF_ALU64 | BPF_XOR:
527 case BPF_ALU | BPF_XOR:
528 emit_instr(ctx, xor, dst, dst, MIPS_R_AT);
529 break;
530 case BPF_ALU64 | BPF_ADD:
531 emit_instr(ctx, daddu, dst, dst, MIPS_R_AT);
532 break;
533 case BPF_ALU64 | BPF_SUB:
534 emit_instr(ctx, dsubu, dst, dst, MIPS_R_AT);
535 break;
536 case BPF_ALU | BPF_ADD:
537 emit_instr(ctx, addu, dst, dst, MIPS_R_AT);
538 break;
539 case BPF_ALU | BPF_SUB:
540 emit_instr(ctx, subu, dst, dst, MIPS_R_AT);
541 break;
542 default:
543 return -EINVAL;
544 }
545 }
546 }
547
548 return 0;
549}
550
551static void * __must_check
552ool_skb_header_pointer(const struct sk_buff *skb, int offset,
553 int len, void *buffer)
554{
555 return skb_header_pointer(skb, offset, len, buffer);
556}
557
558static int size_to_len(const struct bpf_insn *insn)
559{
560 switch (BPF_SIZE(insn->code)) {
561 case BPF_B:
562 return 1;
563 case BPF_H:
564 return 2;
565 case BPF_W:
566 return 4;
567 case BPF_DW:
568 return 8;
569 }
570 return 0;
571}
572
573static void emit_const_to_reg(struct jit_ctx *ctx, int dst, u64 value)
574{
575 if (value >= 0xffffffffffff8000ull || value < 0x8000ull) {
576 emit_instr(ctx, daddiu, dst, MIPS_R_ZERO, (int)value);
577 } else if (value >= 0xffffffff80000000ull ||
578 (value < 0x80000000 && value > 0xffff)) {
579 emit_instr(ctx, lui, dst, (s32)(s16)(value >> 16));
580 emit_instr(ctx, ori, dst, dst, (unsigned int)(value & 0xffff));
581 } else {
582 int i;
583 bool seen_part = false;
584 int needed_shift = 0;
585
586 for (i = 0; i < 4; i++) {
587 u64 part = (value >> (16 * (3 - i))) & 0xffff;
588
589 if (seen_part && needed_shift > 0 && (part || i == 3)) {
590 emit_instr(ctx, dsll_safe, dst, dst, needed_shift);
591 needed_shift = 0;
592 }
593 if (part) {
594 if (i == 0 || (!seen_part && i < 3 && part < 0x8000)) {
595 emit_instr(ctx, lui, dst, (s32)(s16)part);
596 needed_shift = -16;
597 } else {
598 emit_instr(ctx, ori, dst,
599 seen_part ? dst : MIPS_R_ZERO,
600 (unsigned int)part);
601 }
602 seen_part = true;
603 }
604 if (seen_part)
605 needed_shift += 16;
606 }
607 }
608}
609
610static int emit_bpf_tail_call(struct jit_ctx *ctx, int this_idx)
611{
612 int off, b_off;
613
614 ctx->flags |= EBPF_SEEN_TC;
615 /*
616 * if (index >= array->map.max_entries)
617 * goto out;
618 */
619 off = offsetof(struct bpf_array, map.max_entries);
620 emit_instr(ctx, lwu, MIPS_R_T5, off, MIPS_R_A1);
621 emit_instr(ctx, sltu, MIPS_R_AT, MIPS_R_T5, MIPS_R_A2);
622 b_off = b_imm(this_idx + 1, ctx);
623 emit_instr(ctx, bne, MIPS_R_AT, MIPS_R_ZERO, b_off);
624 /*
625 * if (--TCC < 0)
626 * goto out;
627 */
628 /* Delay slot */
629 emit_instr(ctx, daddiu, MIPS_R_T5,
630 (ctx->flags & EBPF_TCC_IN_V1) ? MIPS_R_V1 : MIPS_R_S4, -1);
631 b_off = b_imm(this_idx + 1, ctx);
632 emit_instr(ctx, bltz, MIPS_R_T5, b_off);
633 /*
634 * prog = array->ptrs[index];
635 * if (prog == NULL)
636 * goto out;
637 */
638 /* Delay slot */
639 emit_instr(ctx, dsll, MIPS_R_T8, MIPS_R_A2, 3);
640 emit_instr(ctx, daddu, MIPS_R_T8, MIPS_R_T8, MIPS_R_A1);
641 off = offsetof(struct bpf_array, ptrs);
642 emit_instr(ctx, ld, MIPS_R_AT, off, MIPS_R_T8);
643 b_off = b_imm(this_idx + 1, ctx);
644 emit_instr(ctx, beq, MIPS_R_AT, MIPS_R_ZERO, b_off);
645 /* Delay slot */
646 emit_instr(ctx, nop);
647
648 /* goto *(prog->bpf_func + 4); */
649 off = offsetof(struct bpf_prog, bpf_func);
650 emit_instr(ctx, ld, MIPS_R_T9, off, MIPS_R_AT);
651 /* All systems are go... propagate TCC */
652 emit_instr(ctx, daddu, MIPS_R_V1, MIPS_R_T5, MIPS_R_ZERO);
653 /* Skip first instruction (TCC initialization) */
654 emit_instr(ctx, daddiu, MIPS_R_T9, MIPS_R_T9, 4);
655 return build_int_epilogue(ctx, MIPS_R_T9);
656}
657
658static bool use_bbit_insns(void)
659{
660 switch (current_cpu_type()) {
661 case CPU_CAVIUM_OCTEON:
662 case CPU_CAVIUM_OCTEON_PLUS:
663 case CPU_CAVIUM_OCTEON2:
664 case CPU_CAVIUM_OCTEON3:
665 return true;
666 default:
667 return false;
668 }
669}
670
671static bool is_bad_offset(int b_off)
672{
673 return b_off > 0x1ffff || b_off < -0x20000;
674}
675
676/* Returns the number of insn slots consumed. */
677static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
678 int this_idx, int exit_idx)
679{
680 int src, dst, r, td, ts, mem_off, b_off;
681 bool need_swap, did_move, cmp_eq;
682 unsigned int target;
683 u64 t64;
684 s64 t64s;
685
686 switch (insn->code) {
687 case BPF_ALU64 | BPF_ADD | BPF_K: /* ALU64_IMM */
688 case BPF_ALU64 | BPF_SUB | BPF_K: /* ALU64_IMM */
689 case BPF_ALU64 | BPF_OR | BPF_K: /* ALU64_IMM */
690 case BPF_ALU64 | BPF_AND | BPF_K: /* ALU64_IMM */
691 case BPF_ALU64 | BPF_LSH | BPF_K: /* ALU64_IMM */
692 case BPF_ALU64 | BPF_RSH | BPF_K: /* ALU64_IMM */
693 case BPF_ALU64 | BPF_XOR | BPF_K: /* ALU64_IMM */
694 case BPF_ALU64 | BPF_ARSH | BPF_K: /* ALU64_IMM */
695 case BPF_ALU64 | BPF_MOV | BPF_K: /* ALU64_IMM */
696 case BPF_ALU | BPF_MOV | BPF_K: /* ALU32_IMM */
697 case BPF_ALU | BPF_ADD | BPF_K: /* ALU32_IMM */
698 case BPF_ALU | BPF_SUB | BPF_K: /* ALU32_IMM */
699 case BPF_ALU | BPF_OR | BPF_K: /* ALU64_IMM */
700 case BPF_ALU | BPF_AND | BPF_K: /* ALU64_IMM */
701 case BPF_ALU | BPF_LSH | BPF_K: /* ALU64_IMM */
702 case BPF_ALU | BPF_RSH | BPF_K: /* ALU64_IMM */
703 case BPF_ALU | BPF_XOR | BPF_K: /* ALU64_IMM */
704 case BPF_ALU | BPF_ARSH | BPF_K: /* ALU64_IMM */
705 r = gen_imm_insn(insn, ctx, this_idx);
706 if (r < 0)
707 return r;
708 break;
709 case BPF_ALU64 | BPF_MUL | BPF_K: /* ALU64_IMM */
710 dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
711 if (dst < 0)
712 return dst;
713 if (get_reg_val_type(ctx, this_idx, insn->dst_reg) == REG_32BIT)
714 emit_instr(ctx, dinsu, dst, MIPS_R_ZERO, 32, 32);
715 if (insn->imm == 1) /* Mult by 1 is a nop */
716 break;
717 gen_imm_to_reg(insn, MIPS_R_AT, ctx);
718 emit_instr(ctx, dmultu, MIPS_R_AT, dst);
719 emit_instr(ctx, mflo, dst);
720 break;
721 case BPF_ALU64 | BPF_NEG | BPF_K: /* ALU64_IMM */
722 dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
723 if (dst < 0)
724 return dst;
725 if (get_reg_val_type(ctx, this_idx, insn->dst_reg) == REG_32BIT)
726 emit_instr(ctx, dinsu, dst, MIPS_R_ZERO, 32, 32);
727 emit_instr(ctx, dsubu, dst, MIPS_R_ZERO, dst);
728 break;
729 case BPF_ALU | BPF_MUL | BPF_K: /* ALU_IMM */
730 dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
731 if (dst < 0)
732 return dst;
733 td = get_reg_val_type(ctx, this_idx, insn->dst_reg);
734 if (td == REG_64BIT || td == REG_32BIT_ZERO_EX) {
735 /* sign extend */
736 emit_instr(ctx, sll, dst, dst, 0);
737 }
738 if (insn->imm == 1) /* Mult by 1 is a nop */
739 break;
740 gen_imm_to_reg(insn, MIPS_R_AT, ctx);
741 emit_instr(ctx, multu, dst, MIPS_R_AT);
742 emit_instr(ctx, mflo, dst);
743 break;
744 case BPF_ALU | BPF_NEG | BPF_K: /* ALU_IMM */
745 dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
746 if (dst < 0)
747 return dst;
748 td = get_reg_val_type(ctx, this_idx, insn->dst_reg);
749 if (td == REG_64BIT || td == REG_32BIT_ZERO_EX) {
750 /* sign extend */
751 emit_instr(ctx, sll, dst, dst, 0);
752 }
753 emit_instr(ctx, subu, dst, MIPS_R_ZERO, dst);
754 break;
755 case BPF_ALU | BPF_DIV | BPF_K: /* ALU_IMM */
756 case BPF_ALU | BPF_MOD | BPF_K: /* ALU_IMM */
757 dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
758 if (dst < 0)
759 return dst;
760 if (insn->imm == 0) { /* Div by zero */
761 b_off = b_imm(exit_idx, ctx);
762 if (is_bad_offset(b_off))
763 return -E2BIG;
764 emit_instr(ctx, beq, MIPS_R_ZERO, MIPS_R_ZERO, b_off);
765 emit_instr(ctx, addu, MIPS_R_V0, MIPS_R_ZERO, MIPS_R_ZERO);
766 }
767 td = get_reg_val_type(ctx, this_idx, insn->dst_reg);
768 if (td == REG_64BIT || td == REG_32BIT_ZERO_EX)
769 /* sign extend */
770 emit_instr(ctx, sll, dst, dst, 0);
771 if (insn->imm == 1) {
772 /* div by 1 is a nop, mod by 1 is zero */
773 if (BPF_OP(insn->code) == BPF_MOD)
774 emit_instr(ctx, addu, dst, MIPS_R_ZERO, MIPS_R_ZERO);
775 break;
776 }
777 gen_imm_to_reg(insn, MIPS_R_AT, ctx);
778 emit_instr(ctx, divu, dst, MIPS_R_AT);
779 if (BPF_OP(insn->code) == BPF_DIV)
780 emit_instr(ctx, mflo, dst);
781 else
782 emit_instr(ctx, mfhi, dst);
783 break;
784 case BPF_ALU64 | BPF_DIV | BPF_K: /* ALU_IMM */
785 case BPF_ALU64 | BPF_MOD | BPF_K: /* ALU_IMM */
786 dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
787 if (dst < 0)
788 return dst;
789 if (insn->imm == 0) { /* Div by zero */
790 b_off = b_imm(exit_idx, ctx);
791 if (is_bad_offset(b_off))
792 return -E2BIG;
793 emit_instr(ctx, beq, MIPS_R_ZERO, MIPS_R_ZERO, b_off);
794 emit_instr(ctx, addu, MIPS_R_V0, MIPS_R_ZERO, MIPS_R_ZERO);
795 }
796 if (get_reg_val_type(ctx, this_idx, insn->dst_reg) == REG_32BIT)
797 emit_instr(ctx, dinsu, dst, MIPS_R_ZERO, 32, 32);
798
799 if (insn->imm == 1) {
800 /* div by 1 is a nop, mod by 1 is zero */
801 if (BPF_OP(insn->code) == BPF_MOD)
802 emit_instr(ctx, addu, dst, MIPS_R_ZERO, MIPS_R_ZERO);
803 break;
804 }
805 gen_imm_to_reg(insn, MIPS_R_AT, ctx);
806 emit_instr(ctx, ddivu, dst, MIPS_R_AT);
807 if (BPF_OP(insn->code) == BPF_DIV)
808 emit_instr(ctx, mflo, dst);
809 else
810 emit_instr(ctx, mfhi, dst);
811 break;
812 case BPF_ALU64 | BPF_MOV | BPF_X: /* ALU64_REG */
813 case BPF_ALU64 | BPF_ADD | BPF_X: /* ALU64_REG */
814 case BPF_ALU64 | BPF_SUB | BPF_X: /* ALU64_REG */
815 case BPF_ALU64 | BPF_XOR | BPF_X: /* ALU64_REG */
816 case BPF_ALU64 | BPF_OR | BPF_X: /* ALU64_REG */
817 case BPF_ALU64 | BPF_AND | BPF_X: /* ALU64_REG */
818 case BPF_ALU64 | BPF_MUL | BPF_X: /* ALU64_REG */
819 case BPF_ALU64 | BPF_DIV | BPF_X: /* ALU64_REG */
820 case BPF_ALU64 | BPF_MOD | BPF_X: /* ALU64_REG */
821 case BPF_ALU64 | BPF_LSH | BPF_X: /* ALU64_REG */
822 case BPF_ALU64 | BPF_RSH | BPF_X: /* ALU64_REG */
823 case BPF_ALU64 | BPF_ARSH | BPF_X: /* ALU64_REG */
824 src = ebpf_to_mips_reg(ctx, insn, src_reg);
825 dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
826 if (src < 0 || dst < 0)
827 return -EINVAL;
828 if (get_reg_val_type(ctx, this_idx, insn->dst_reg) == REG_32BIT)
829 emit_instr(ctx, dinsu, dst, MIPS_R_ZERO, 32, 32);
830 did_move = false;
831 if (insn->src_reg == BPF_REG_10) {
832 if (BPF_OP(insn->code) == BPF_MOV) {
833 emit_instr(ctx, daddiu, dst, MIPS_R_SP, MAX_BPF_STACK);
834 did_move = true;
835 } else {
836 emit_instr(ctx, daddiu, MIPS_R_AT, MIPS_R_SP, MAX_BPF_STACK);
837 src = MIPS_R_AT;
838 }
839 } else if (get_reg_val_type(ctx, this_idx, insn->src_reg) == REG_32BIT) {
840 int tmp_reg = MIPS_R_AT;
841
842 if (BPF_OP(insn->code) == BPF_MOV) {
843 tmp_reg = dst;
844 did_move = true;
845 }
846 emit_instr(ctx, daddu, tmp_reg, src, MIPS_R_ZERO);
847 emit_instr(ctx, dinsu, tmp_reg, MIPS_R_ZERO, 32, 32);
848 src = MIPS_R_AT;
849 }
850 switch (BPF_OP(insn->code)) {
851 case BPF_MOV:
852 if (!did_move)
853 emit_instr(ctx, daddu, dst, src, MIPS_R_ZERO);
854 break;
855 case BPF_ADD:
856 emit_instr(ctx, daddu, dst, dst, src);
857 break;
858 case BPF_SUB:
859 emit_instr(ctx, dsubu, dst, dst, src);
860 break;
861 case BPF_XOR:
862 emit_instr(ctx, xor, dst, dst, src);
863 break;
864 case BPF_OR:
865 emit_instr(ctx, or, dst, dst, src);
866 break;
867 case BPF_AND:
868 emit_instr(ctx, and, dst, dst, src);
869 break;
870 case BPF_MUL:
871 emit_instr(ctx, dmultu, dst, src);
872 emit_instr(ctx, mflo, dst);
873 break;
874 case BPF_DIV:
875 case BPF_MOD:
876 b_off = b_imm(exit_idx, ctx);
877 if (is_bad_offset(b_off))
878 return -E2BIG;
879 emit_instr(ctx, beq, src, MIPS_R_ZERO, b_off);
880 emit_instr(ctx, movz, MIPS_R_V0, MIPS_R_ZERO, src);
881 emit_instr(ctx, ddivu, dst, src);
882 if (BPF_OP(insn->code) == BPF_DIV)
883 emit_instr(ctx, mflo, dst);
884 else
885 emit_instr(ctx, mfhi, dst);
886 break;
887 case BPF_LSH:
888 emit_instr(ctx, dsllv, dst, dst, src);
889 break;
890 case BPF_RSH:
891 emit_instr(ctx, dsrlv, dst, dst, src);
892 break;
893 case BPF_ARSH:
894 emit_instr(ctx, dsrav, dst, dst, src);
895 break;
896 default:
897 pr_err("ALU64_REG NOT HANDLED\n");
898 return -EINVAL;
899 }
900 break;
901 case BPF_ALU | BPF_MOV | BPF_X: /* ALU_REG */
902 case BPF_ALU | BPF_ADD | BPF_X: /* ALU_REG */
903 case BPF_ALU | BPF_SUB | BPF_X: /* ALU_REG */
904 case BPF_ALU | BPF_XOR | BPF_X: /* ALU_REG */
905 case BPF_ALU | BPF_OR | BPF_X: /* ALU_REG */
906 case BPF_ALU | BPF_AND | BPF_X: /* ALU_REG */
907 case BPF_ALU | BPF_MUL | BPF_X: /* ALU_REG */
908 case BPF_ALU | BPF_DIV | BPF_X: /* ALU_REG */
909 case BPF_ALU | BPF_MOD | BPF_X: /* ALU_REG */
910 case BPF_ALU | BPF_LSH | BPF_X: /* ALU_REG */
911 case BPF_ALU | BPF_RSH | BPF_X: /* ALU_REG */
912 src = ebpf_to_mips_reg(ctx, insn, src_reg_no_fp);
913 dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
914 if (src < 0 || dst < 0)
915 return -EINVAL;
916 td = get_reg_val_type(ctx, this_idx, insn->dst_reg);
917 if (td == REG_64BIT || td == REG_32BIT_ZERO_EX) {
918 /* sign extend */
919 emit_instr(ctx, sll, dst, dst, 0);
920 }
921 did_move = false;
922 ts = get_reg_val_type(ctx, this_idx, insn->src_reg);
923 if (ts == REG_64BIT || ts == REG_32BIT_ZERO_EX) {
924 int tmp_reg = MIPS_R_AT;
925
926 if (BPF_OP(insn->code) == BPF_MOV) {
927 tmp_reg = dst;
928 did_move = true;
929 }
930 /* sign extend */
931 emit_instr(ctx, sll, tmp_reg, src, 0);
932 src = MIPS_R_AT;
933 }
934 switch (BPF_OP(insn->code)) {
935 case BPF_MOV:
936 if (!did_move)
937 emit_instr(ctx, addu, dst, src, MIPS_R_ZERO);
938 break;
939 case BPF_ADD:
940 emit_instr(ctx, addu, dst, dst, src);
941 break;
942 case BPF_SUB:
943 emit_instr(ctx, subu, dst, dst, src);
944 break;
945 case BPF_XOR:
946 emit_instr(ctx, xor, dst, dst, src);
947 break;
948 case BPF_OR:
949 emit_instr(ctx, or, dst, dst, src);
950 break;
951 case BPF_AND:
952 emit_instr(ctx, and, dst, dst, src);
953 break;
954 case BPF_MUL:
955 emit_instr(ctx, mul, dst, dst, src);
956 break;
957 case BPF_DIV:
958 case BPF_MOD:
959 b_off = b_imm(exit_idx, ctx);
960 if (is_bad_offset(b_off))
961 return -E2BIG;
962 emit_instr(ctx, beq, src, MIPS_R_ZERO, b_off);
963 emit_instr(ctx, movz, MIPS_R_V0, MIPS_R_ZERO, src);
964 emit_instr(ctx, divu, dst, src);
965 if (BPF_OP(insn->code) == BPF_DIV)
966 emit_instr(ctx, mflo, dst);
967 else
968 emit_instr(ctx, mfhi, dst);
969 break;
970 case BPF_LSH:
971 emit_instr(ctx, sllv, dst, dst, src);
972 break;
973 case BPF_RSH:
974 emit_instr(ctx, srlv, dst, dst, src);
975 break;
976 default:
977 pr_err("ALU_REG NOT HANDLED\n");
978 return -EINVAL;
979 }
980 break;
981 case BPF_JMP | BPF_EXIT:
982 if (this_idx + 1 < exit_idx) {
983 b_off = b_imm(exit_idx, ctx);
984 if (is_bad_offset(b_off))
985 return -E2BIG;
986 emit_instr(ctx, beq, MIPS_R_ZERO, MIPS_R_ZERO, b_off);
987 emit_instr(ctx, nop);
988 }
989 break;
990 case BPF_JMP | BPF_JEQ | BPF_K: /* JMP_IMM */
991 case BPF_JMP | BPF_JNE | BPF_K: /* JMP_IMM */
992 cmp_eq = (BPF_OP(insn->code) == BPF_JEQ);
993 dst = ebpf_to_mips_reg(ctx, insn, dst_reg_fp_ok);
994 if (dst < 0)
995 return dst;
996 if (insn->imm == 0) {
997 src = MIPS_R_ZERO;
998 } else {
999 gen_imm_to_reg(insn, MIPS_R_AT, ctx);
1000 src = MIPS_R_AT;
1001 }
1002 goto jeq_common;
1003 case BPF_JMP | BPF_JEQ | BPF_X: /* JMP_REG */
1004 case BPF_JMP | BPF_JNE | BPF_X:
1005 case BPF_JMP | BPF_JSGT | BPF_X:
1006 case BPF_JMP | BPF_JSGE | BPF_X:
1007 case BPF_JMP | BPF_JGT | BPF_X:
1008 case BPF_JMP | BPF_JGE | BPF_X:
1009 case BPF_JMP | BPF_JSET | BPF_X:
1010 src = ebpf_to_mips_reg(ctx, insn, src_reg_no_fp);
1011 dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
1012 if (src < 0 || dst < 0)
1013 return -EINVAL;
1014 td = get_reg_val_type(ctx, this_idx, insn->dst_reg);
1015 ts = get_reg_val_type(ctx, this_idx, insn->src_reg);
1016 if (td == REG_32BIT && ts != REG_32BIT) {
1017 emit_instr(ctx, sll, MIPS_R_AT, src, 0);
1018 src = MIPS_R_AT;
1019 } else if (ts == REG_32BIT && td != REG_32BIT) {
1020 emit_instr(ctx, sll, MIPS_R_AT, dst, 0);
1021 dst = MIPS_R_AT;
1022 }
1023 if (BPF_OP(insn->code) == BPF_JSET) {
1024 emit_instr(ctx, and, MIPS_R_AT, dst, src);
1025 cmp_eq = false;
1026 dst = MIPS_R_AT;
1027 src = MIPS_R_ZERO;
1028 } else if (BPF_OP(insn->code) == BPF_JSGT) {
1029 emit_instr(ctx, dsubu, MIPS_R_AT, dst, src);
1030 if ((insn + 1)->code == (BPF_JMP | BPF_EXIT) && insn->off == 1) {
1031 b_off = b_imm(exit_idx, ctx);
1032 if (is_bad_offset(b_off))
1033 return -E2BIG;
1034 emit_instr(ctx, blez, MIPS_R_AT, b_off);
1035 emit_instr(ctx, nop);
1036 return 2; /* We consumed the exit. */
1037 }
1038 b_off = b_imm(this_idx + insn->off + 1, ctx);
1039 if (is_bad_offset(b_off))
1040 return -E2BIG;
1041 emit_instr(ctx, bgtz, MIPS_R_AT, b_off);
1042 emit_instr(ctx, nop);
1043 break;
1044 } else if (BPF_OP(insn->code) == BPF_JSGE) {
1045 emit_instr(ctx, slt, MIPS_R_AT, dst, src);
1046 cmp_eq = true;
1047 dst = MIPS_R_AT;
1048 src = MIPS_R_ZERO;
1049 } else if (BPF_OP(insn->code) == BPF_JGT) {
1050 /* dst or src could be AT */
1051 emit_instr(ctx, dsubu, MIPS_R_T8, dst, src);
1052 emit_instr(ctx, sltu, MIPS_R_AT, dst, src);
1053 /* SP known to be non-zero, movz becomes boolean not */
1054 emit_instr(ctx, movz, MIPS_R_T9, MIPS_R_SP, MIPS_R_T8);
1055 emit_instr(ctx, movn, MIPS_R_T9, MIPS_R_ZERO, MIPS_R_T8);
1056 emit_instr(ctx, or, MIPS_R_AT, MIPS_R_T9, MIPS_R_AT);
1057 cmp_eq = true;
1058 dst = MIPS_R_AT;
1059 src = MIPS_R_ZERO;
1060 } else if (BPF_OP(insn->code) == BPF_JGE) {
1061 emit_instr(ctx, sltu, MIPS_R_AT, dst, src);
1062 cmp_eq = true;
1063 dst = MIPS_R_AT;
1064 src = MIPS_R_ZERO;
1065 } else { /* JNE/JEQ case */
1066 cmp_eq = (BPF_OP(insn->code) == BPF_JEQ);
1067 }
1068jeq_common:
1069 /*
1070 * If the next insn is EXIT and we are jumping arround
1071 * only it, invert the sense of the compare and
1072 * conditionally jump to the exit. Poor man's branch
1073 * chaining.
1074 */
1075 if ((insn + 1)->code == (BPF_JMP | BPF_EXIT) && insn->off == 1) {
1076 b_off = b_imm(exit_idx, ctx);
1077 if (is_bad_offset(b_off)) {
1078 target = j_target(ctx, exit_idx);
1079 if (target == (unsigned int)-1)
1080 return -E2BIG;
1081 cmp_eq = !cmp_eq;
1082 b_off = 4 * 3;
1083 if (!(ctx->offsets[this_idx] & OFFSETS_B_CONV)) {
1084 ctx->offsets[this_idx] |= OFFSETS_B_CONV;
1085 ctx->long_b_conversion = 1;
1086 }
1087 }
1088
1089 if (cmp_eq)
1090 emit_instr(ctx, bne, dst, src, b_off);
1091 else
1092 emit_instr(ctx, beq, dst, src, b_off);
1093 emit_instr(ctx, nop);
1094 if (ctx->offsets[this_idx] & OFFSETS_B_CONV) {
1095 emit_instr(ctx, j, target);
1096 emit_instr(ctx, nop);
1097 }
1098 return 2; /* We consumed the exit. */
1099 }
1100 b_off = b_imm(this_idx + insn->off + 1, ctx);
1101 if (is_bad_offset(b_off)) {
1102 target = j_target(ctx, this_idx + insn->off + 1);
1103 if (target == (unsigned int)-1)
1104 return -E2BIG;
1105 cmp_eq = !cmp_eq;
1106 b_off = 4 * 3;
1107 if (!(ctx->offsets[this_idx] & OFFSETS_B_CONV)) {
1108 ctx->offsets[this_idx] |= OFFSETS_B_CONV;
1109 ctx->long_b_conversion = 1;
1110 }
1111 }
1112
1113 if (cmp_eq)
1114 emit_instr(ctx, beq, dst, src, b_off);
1115 else
1116 emit_instr(ctx, bne, dst, src, b_off);
1117 emit_instr(ctx, nop);
1118 if (ctx->offsets[this_idx] & OFFSETS_B_CONV) {
1119 emit_instr(ctx, j, target);
1120 emit_instr(ctx, nop);
1121 }
1122 break;
1123 case BPF_JMP | BPF_JSGT | BPF_K: /* JMP_IMM */
1124 case BPF_JMP | BPF_JSGE | BPF_K: /* JMP_IMM */
1125 cmp_eq = (BPF_OP(insn->code) == BPF_JSGE);
1126 dst = ebpf_to_mips_reg(ctx, insn, dst_reg_fp_ok);
1127 if (dst < 0)
1128 return dst;
1129
1130 if (insn->imm == 0) {
1131 if ((insn + 1)->code == (BPF_JMP | BPF_EXIT) && insn->off == 1) {
1132 b_off = b_imm(exit_idx, ctx);
1133 if (is_bad_offset(b_off))
1134 return -E2BIG;
1135 if (cmp_eq)
1136 emit_instr(ctx, bltz, dst, b_off);
1137 else
1138 emit_instr(ctx, blez, dst, b_off);
1139 emit_instr(ctx, nop);
1140 return 2; /* We consumed the exit. */
1141 }
1142 b_off = b_imm(this_idx + insn->off + 1, ctx);
1143 if (is_bad_offset(b_off))
1144 return -E2BIG;
1145 if (cmp_eq)
1146 emit_instr(ctx, bgez, dst, b_off);
1147 else
1148 emit_instr(ctx, bgtz, dst, b_off);
1149 emit_instr(ctx, nop);
1150 break;
1151 }
1152 /*
1153 * only "LT" compare available, so we must use imm + 1
1154 * to generate "GT"
1155 */
1156 t64s = insn->imm + (cmp_eq ? 0 : 1);
1157 if (t64s >= S16_MIN && t64s <= S16_MAX) {
1158 emit_instr(ctx, slti, MIPS_R_AT, dst, (int)t64s);
1159 src = MIPS_R_AT;
1160 dst = MIPS_R_ZERO;
1161 cmp_eq = true;
1162 goto jeq_common;
1163 }
1164 emit_const_to_reg(ctx, MIPS_R_AT, (u64)t64s);
1165 emit_instr(ctx, slt, MIPS_R_AT, dst, MIPS_R_AT);
1166 src = MIPS_R_AT;
1167 dst = MIPS_R_ZERO;
1168 cmp_eq = true;
1169 goto jeq_common;
1170
1171 case BPF_JMP | BPF_JGT | BPF_K:
1172 case BPF_JMP | BPF_JGE | BPF_K:
1173 cmp_eq = (BPF_OP(insn->code) == BPF_JGE);
1174 dst = ebpf_to_mips_reg(ctx, insn, dst_reg_fp_ok);
1175 if (dst < 0)
1176 return dst;
1177 /*
1178 * only "LT" compare available, so we must use imm + 1
1179 * to generate "GT"
1180 */
1181 t64s = (u64)(u32)(insn->imm) + (cmp_eq ? 0 : 1);
1182 if (t64s >= 0 && t64s <= S16_MAX) {
1183 emit_instr(ctx, sltiu, MIPS_R_AT, dst, (int)t64s);
1184 src = MIPS_R_AT;
1185 dst = MIPS_R_ZERO;
1186 cmp_eq = true;
1187 goto jeq_common;
1188 }
1189 emit_const_to_reg(ctx, MIPS_R_AT, (u64)t64s);
1190 emit_instr(ctx, sltu, MIPS_R_AT, dst, MIPS_R_AT);
1191 src = MIPS_R_AT;
1192 dst = MIPS_R_ZERO;
1193 cmp_eq = true;
1194 goto jeq_common;
1195
1196 case BPF_JMP | BPF_JSET | BPF_K: /* JMP_IMM */
1197 dst = ebpf_to_mips_reg(ctx, insn, dst_reg_fp_ok);
1198 if (dst < 0)
1199 return dst;
1200
1201 if (use_bbit_insns() && hweight32((u32)insn->imm) == 1) {
1202 if ((insn + 1)->code == (BPF_JMP | BPF_EXIT) && insn->off == 1) {
1203 b_off = b_imm(exit_idx, ctx);
1204 if (is_bad_offset(b_off))
1205 return -E2BIG;
1206 emit_instr(ctx, bbit0, dst, ffs((u32)insn->imm) - 1, b_off);
1207 emit_instr(ctx, nop);
1208 return 2; /* We consumed the exit. */
1209 }
1210 b_off = b_imm(this_idx + insn->off + 1, ctx);
1211 if (is_bad_offset(b_off))
1212 return -E2BIG;
1213 emit_instr(ctx, bbit1, dst, ffs((u32)insn->imm) - 1, b_off);
1214 emit_instr(ctx, nop);
1215 break;
1216 }
1217 t64 = (u32)insn->imm;
1218 emit_const_to_reg(ctx, MIPS_R_AT, t64);
1219 emit_instr(ctx, and, MIPS_R_AT, dst, MIPS_R_AT);
1220 src = MIPS_R_AT;
1221 dst = MIPS_R_ZERO;
1222 cmp_eq = false;
1223 goto jeq_common;
1224
1225 case BPF_JMP | BPF_JA:
1226 /*
1227 * Prefer relative branch for easier debugging, but
1228 * fall back if needed.
1229 */
1230 b_off = b_imm(this_idx + insn->off + 1, ctx);
1231 if (is_bad_offset(b_off)) {
1232 target = j_target(ctx, this_idx + insn->off + 1);
1233 if (target == (unsigned int)-1)
1234 return -E2BIG;
1235 emit_instr(ctx, j, target);
1236 } else {
1237 emit_instr(ctx, b, b_off);
1238 }
1239 emit_instr(ctx, nop);
1240 break;
1241 case BPF_LD | BPF_DW | BPF_IMM:
1242 if (insn->src_reg != 0)
1243 return -EINVAL;
1244 dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
1245 if (dst < 0)
1246 return dst;
1247 t64 = ((u64)(u32)insn->imm) | ((u64)(insn + 1)->imm << 32);
1248 emit_const_to_reg(ctx, dst, t64);
1249 return 2; /* Double slot insn */
1250
1251 case BPF_JMP | BPF_CALL:
1252 ctx->flags |= EBPF_SAVE_RA;
1253 t64s = (s64)insn->imm + (s64)__bpf_call_base;
1254 emit_const_to_reg(ctx, MIPS_R_T9, (u64)t64s);
1255 emit_instr(ctx, jalr, MIPS_R_RA, MIPS_R_T9);
1256 /* delay slot */
1257 emit_instr(ctx, nop);
1258 break;
1259
1260 case BPF_JMP | BPF_TAIL_CALL:
1261 if (emit_bpf_tail_call(ctx, this_idx))
1262 return -EINVAL;
1263 break;
1264
1265 case BPF_LD | BPF_B | BPF_ABS:
1266 case BPF_LD | BPF_H | BPF_ABS:
1267 case BPF_LD | BPF_W | BPF_ABS:
1268 case BPF_LD | BPF_DW | BPF_ABS:
1269 ctx->flags |= EBPF_SAVE_RA;
1270
1271 gen_imm_to_reg(insn, MIPS_R_A1, ctx);
1272 emit_instr(ctx, addiu, MIPS_R_A2, MIPS_R_ZERO, size_to_len(insn));
1273
1274 if (insn->imm < 0) {
1275 emit_const_to_reg(ctx, MIPS_R_T9, (u64)bpf_internal_load_pointer_neg_helper);
1276 } else {
1277 emit_const_to_reg(ctx, MIPS_R_T9, (u64)ool_skb_header_pointer);
1278 emit_instr(ctx, daddiu, MIPS_R_A3, MIPS_R_SP, ctx->tmp_offset);
1279 }
1280 goto ld_skb_common;
1281
1282 case BPF_LD | BPF_B | BPF_IND:
1283 case BPF_LD | BPF_H | BPF_IND:
1284 case BPF_LD | BPF_W | BPF_IND:
1285 case BPF_LD | BPF_DW | BPF_IND:
1286 ctx->flags |= EBPF_SAVE_RA;
1287 src = ebpf_to_mips_reg(ctx, insn, src_reg_no_fp);
1288 if (src < 0)
1289 return src;
1290 ts = get_reg_val_type(ctx, this_idx, insn->src_reg);
1291 if (ts == REG_32BIT_ZERO_EX) {
1292 /* sign extend */
1293 emit_instr(ctx, sll, MIPS_R_A1, src, 0);
1294 src = MIPS_R_A1;
1295 }
1296 if (insn->imm >= S16_MIN && insn->imm <= S16_MAX) {
1297 emit_instr(ctx, daddiu, MIPS_R_A1, src, insn->imm);
1298 } else {
1299 gen_imm_to_reg(insn, MIPS_R_AT, ctx);
1300 emit_instr(ctx, daddu, MIPS_R_A1, MIPS_R_AT, src);
1301 }
1302 /* truncate to 32-bit int */
1303 emit_instr(ctx, sll, MIPS_R_A1, MIPS_R_A1, 0);
1304 emit_instr(ctx, daddiu, MIPS_R_A3, MIPS_R_SP, ctx->tmp_offset);
1305 emit_instr(ctx, slt, MIPS_R_AT, MIPS_R_A1, MIPS_R_ZERO);
1306
1307 emit_const_to_reg(ctx, MIPS_R_T8, (u64)bpf_internal_load_pointer_neg_helper);
1308 emit_const_to_reg(ctx, MIPS_R_T9, (u64)ool_skb_header_pointer);
1309 emit_instr(ctx, addiu, MIPS_R_A2, MIPS_R_ZERO, size_to_len(insn));
1310 emit_instr(ctx, movn, MIPS_R_T9, MIPS_R_T8, MIPS_R_AT);
1311
1312ld_skb_common:
1313 emit_instr(ctx, jalr, MIPS_R_RA, MIPS_R_T9);
1314 /* delay slot move */
1315 emit_instr(ctx, daddu, MIPS_R_A0, MIPS_R_S0, MIPS_R_ZERO);
1316
1317 /* Check the error value */
1318 b_off = b_imm(exit_idx, ctx);
1319 if (is_bad_offset(b_off)) {
1320 target = j_target(ctx, exit_idx);
1321 if (target == (unsigned int)-1)
1322 return -E2BIG;
1323
1324 if (!(ctx->offsets[this_idx] & OFFSETS_B_CONV)) {
1325 ctx->offsets[this_idx] |= OFFSETS_B_CONV;
1326 ctx->long_b_conversion = 1;
1327 }
1328 emit_instr(ctx, bne, MIPS_R_V0, MIPS_R_ZERO, 4 * 3);
1329 emit_instr(ctx, nop);
1330 emit_instr(ctx, j, target);
1331 emit_instr(ctx, nop);
1332 } else {
1333 emit_instr(ctx, beq, MIPS_R_V0, MIPS_R_ZERO, b_off);
1334 emit_instr(ctx, nop);
1335 }
1336
1337#ifdef __BIG_ENDIAN
1338 need_swap = false;
1339#else
1340 need_swap = true;
1341#endif
1342 dst = MIPS_R_V0;
1343 switch (BPF_SIZE(insn->code)) {
1344 case BPF_B:
1345 emit_instr(ctx, lbu, dst, 0, MIPS_R_V0);
1346 break;
1347 case BPF_H:
1348 emit_instr(ctx, lhu, dst, 0, MIPS_R_V0);
1349 if (need_swap)
1350 emit_instr(ctx, wsbh, dst, dst);
1351 break;
1352 case BPF_W:
1353 emit_instr(ctx, lw, dst, 0, MIPS_R_V0);
1354 if (need_swap) {
1355 emit_instr(ctx, wsbh, dst, dst);
1356 emit_instr(ctx, rotr, dst, dst, 16);
1357 }
1358 break;
1359 case BPF_DW:
1360 emit_instr(ctx, ld, dst, 0, MIPS_R_V0);
1361 if (need_swap) {
1362 emit_instr(ctx, dsbh, dst, dst);
1363 emit_instr(ctx, dshd, dst, dst);
1364 }
1365 break;
1366 }
1367
1368 break;
1369 case BPF_ALU | BPF_END | BPF_FROM_BE:
1370 case BPF_ALU | BPF_END | BPF_FROM_LE:
1371 dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
1372 if (dst < 0)
1373 return dst;
1374 td = get_reg_val_type(ctx, this_idx, insn->dst_reg);
1375 if (insn->imm == 64 && td == REG_32BIT)
1376 emit_instr(ctx, dinsu, dst, MIPS_R_ZERO, 32, 32);
1377
1378 if (insn->imm != 64 &&
1379 (td == REG_64BIT || td == REG_32BIT_ZERO_EX)) {
1380 /* sign extend */
1381 emit_instr(ctx, sll, dst, dst, 0);
1382 }
1383
1384#ifdef __BIG_ENDIAN
1385 need_swap = (BPF_SRC(insn->code) == BPF_FROM_LE);
1386#else
1387 need_swap = (BPF_SRC(insn->code) == BPF_FROM_BE);
1388#endif
1389 if (insn->imm == 16) {
1390 if (need_swap)
1391 emit_instr(ctx, wsbh, dst, dst);
1392 emit_instr(ctx, andi, dst, dst, 0xffff);
1393 } else if (insn->imm == 32) {
1394 if (need_swap) {
1395 emit_instr(ctx, wsbh, dst, dst);
1396 emit_instr(ctx, rotr, dst, dst, 16);
1397 }
1398 } else { /* 64-bit*/
1399 if (need_swap) {
1400 emit_instr(ctx, dsbh, dst, dst);
1401 emit_instr(ctx, dshd, dst, dst);
1402 }
1403 }
1404 break;
1405
1406 case BPF_ST | BPF_B | BPF_MEM:
1407 case BPF_ST | BPF_H | BPF_MEM:
1408 case BPF_ST | BPF_W | BPF_MEM:
1409 case BPF_ST | BPF_DW | BPF_MEM:
1410 if (insn->dst_reg == BPF_REG_10) {
1411 ctx->flags |= EBPF_SEEN_FP;
1412 dst = MIPS_R_SP;
1413 mem_off = insn->off + MAX_BPF_STACK;
1414 } else {
1415 dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
1416 if (dst < 0)
1417 return dst;
1418 mem_off = insn->off;
1419 }
1420 gen_imm_to_reg(insn, MIPS_R_AT, ctx);
1421 switch (BPF_SIZE(insn->code)) {
1422 case BPF_B:
1423 emit_instr(ctx, sb, MIPS_R_AT, mem_off, dst);
1424 break;
1425 case BPF_H:
1426 emit_instr(ctx, sh, MIPS_R_AT, mem_off, dst);
1427 break;
1428 case BPF_W:
1429 emit_instr(ctx, sw, MIPS_R_AT, mem_off, dst);
1430 break;
1431 case BPF_DW:
1432 emit_instr(ctx, sd, MIPS_R_AT, mem_off, dst);
1433 break;
1434 }
1435 break;
1436
1437 case BPF_LDX | BPF_B | BPF_MEM:
1438 case BPF_LDX | BPF_H | BPF_MEM:
1439 case BPF_LDX | BPF_W | BPF_MEM:
1440 case BPF_LDX | BPF_DW | BPF_MEM:
1441 if (insn->src_reg == BPF_REG_10) {
1442 ctx->flags |= EBPF_SEEN_FP;
1443 src = MIPS_R_SP;
1444 mem_off = insn->off + MAX_BPF_STACK;
1445 } else {
1446 src = ebpf_to_mips_reg(ctx, insn, src_reg_no_fp);
1447 if (src < 0)
1448 return src;
1449 mem_off = insn->off;
1450 }
1451 dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
1452 if (dst < 0)
1453 return dst;
1454 switch (BPF_SIZE(insn->code)) {
1455 case BPF_B:
1456 emit_instr(ctx, lbu, dst, mem_off, src);
1457 break;
1458 case BPF_H:
1459 emit_instr(ctx, lhu, dst, mem_off, src);
1460 break;
1461 case BPF_W:
1462 emit_instr(ctx, lw, dst, mem_off, src);
1463 break;
1464 case BPF_DW:
1465 emit_instr(ctx, ld, dst, mem_off, src);
1466 break;
1467 }
1468 break;
1469
1470 case BPF_STX | BPF_B | BPF_MEM:
1471 case BPF_STX | BPF_H | BPF_MEM:
1472 case BPF_STX | BPF_W | BPF_MEM:
1473 case BPF_STX | BPF_DW | BPF_MEM:
1474 case BPF_STX | BPF_W | BPF_XADD:
1475 case BPF_STX | BPF_DW | BPF_XADD:
1476 if (insn->dst_reg == BPF_REG_10) {
1477 ctx->flags |= EBPF_SEEN_FP;
1478 dst = MIPS_R_SP;
1479 mem_off = insn->off + MAX_BPF_STACK;
1480 } else {
1481 dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
1482 if (dst < 0)
1483 return dst;
1484 mem_off = insn->off;
1485 }
1486 src = ebpf_to_mips_reg(ctx, insn, src_reg_no_fp);
1487 if (src < 0)
1488 return dst;
1489 if (BPF_MODE(insn->code) == BPF_XADD) {
1490 switch (BPF_SIZE(insn->code)) {
1491 case BPF_W:
1492 if (get_reg_val_type(ctx, this_idx, insn->src_reg) == REG_32BIT) {
1493 emit_instr(ctx, sll, MIPS_R_AT, src, 0);
1494 src = MIPS_R_AT;
1495 }
1496 emit_instr(ctx, ll, MIPS_R_T8, mem_off, dst);
1497 emit_instr(ctx, addu, MIPS_R_T8, MIPS_R_T8, src);
1498 emit_instr(ctx, sc, MIPS_R_T8, mem_off, dst);
1499 /*
1500 * On failure back up to LL (-4
1501 * instructions of 4 bytes each
1502 */
1503 emit_instr(ctx, beq, MIPS_R_T8, MIPS_R_ZERO, -4 * 4);
1504 emit_instr(ctx, nop);
1505 break;
1506 case BPF_DW:
1507 if (get_reg_val_type(ctx, this_idx, insn->src_reg) == REG_32BIT) {
1508 emit_instr(ctx, daddu, MIPS_R_AT, src, MIPS_R_ZERO);
1509 emit_instr(ctx, dinsu, MIPS_R_AT, MIPS_R_ZERO, 32, 32);
1510 src = MIPS_R_AT;
1511 }
1512 emit_instr(ctx, lld, MIPS_R_T8, mem_off, dst);
1513 emit_instr(ctx, daddu, MIPS_R_T8, MIPS_R_T8, src);
1514 emit_instr(ctx, scd, MIPS_R_T8, mem_off, dst);
1515 emit_instr(ctx, beq, MIPS_R_T8, MIPS_R_ZERO, -4 * 4);
1516 emit_instr(ctx, nop);
1517 break;
1518 }
1519 } else { /* BPF_MEM */
1520 switch (BPF_SIZE(insn->code)) {
1521 case BPF_B:
1522 emit_instr(ctx, sb, src, mem_off, dst);
1523 break;
1524 case BPF_H:
1525 emit_instr(ctx, sh, src, mem_off, dst);
1526 break;
1527 case BPF_W:
1528 emit_instr(ctx, sw, src, mem_off, dst);
1529 break;
1530 case BPF_DW:
1531 if (get_reg_val_type(ctx, this_idx, insn->src_reg) == REG_32BIT) {
1532 emit_instr(ctx, daddu, MIPS_R_AT, src, MIPS_R_ZERO);
1533 emit_instr(ctx, dinsu, MIPS_R_AT, MIPS_R_ZERO, 32, 32);
1534 src = MIPS_R_AT;
1535 }
1536 emit_instr(ctx, sd, src, mem_off, dst);
1537 break;
1538 }
1539 }
1540 break;
1541
1542 default:
1543 pr_err("NOT HANDLED %d - (%02x)\n",
1544 this_idx, (unsigned int)insn->code);
1545 return -EINVAL;
1546 }
1547 return 1;
1548}
1549
1550#define RVT_VISITED_MASK 0xc000000000000000ull
1551#define RVT_FALL_THROUGH 0x4000000000000000ull
1552#define RVT_BRANCH_TAKEN 0x8000000000000000ull
1553#define RVT_DONE (RVT_FALL_THROUGH | RVT_BRANCH_TAKEN)
1554
1555static int build_int_body(struct jit_ctx *ctx)
1556{
1557 const struct bpf_prog *prog = ctx->skf;
1558 const struct bpf_insn *insn;
1559 int i, r;
1560
1561 for (i = 0; i < prog->len; ) {
1562 insn = prog->insnsi + i;
1563 if ((ctx->reg_val_types[i] & RVT_VISITED_MASK) == 0) {
1564 /* dead instruction, don't emit it. */
1565 i++;
1566 continue;
1567 }
1568
1569 if (ctx->target == NULL)
1570 ctx->offsets[i] = (ctx->offsets[i] & OFFSETS_B_CONV) | (ctx->idx * 4);
1571
1572 r = build_one_insn(insn, ctx, i, prog->len);
1573 if (r < 0)
1574 return r;
1575 i += r;
1576 }
1577 /* epilogue offset */
1578 if (ctx->target == NULL)
1579 ctx->offsets[i] = ctx->idx * 4;
1580
1581 /*
1582 * All exits have an offset of the epilogue, some offsets may
1583 * not have been set due to banch-around threading, so set
1584 * them now.
1585 */
1586 if (ctx->target == NULL)
1587 for (i = 0; i < prog->len; i++) {
1588 insn = prog->insnsi + i;
1589 if (insn->code == (BPF_JMP | BPF_EXIT))
1590 ctx->offsets[i] = ctx->idx * 4;
1591 }
1592 return 0;
1593}
1594
1595/* return the last idx processed, or negative for error */
1596static int reg_val_propagate_range(struct jit_ctx *ctx, u64 initial_rvt,
1597 int start_idx, bool follow_taken)
1598{
1599 const struct bpf_prog *prog = ctx->skf;
1600 const struct bpf_insn *insn;
1601 u64 exit_rvt = initial_rvt;
1602 u64 *rvt = ctx->reg_val_types;
1603 int idx;
1604 int reg;
1605
1606 for (idx = start_idx; idx < prog->len; idx++) {
1607 rvt[idx] = (rvt[idx] & RVT_VISITED_MASK) | exit_rvt;
1608 insn = prog->insnsi + idx;
1609 switch (BPF_CLASS(insn->code)) {
1610 case BPF_ALU:
1611 switch (BPF_OP(insn->code)) {
1612 case BPF_ADD:
1613 case BPF_SUB:
1614 case BPF_MUL:
1615 case BPF_DIV:
1616 case BPF_OR:
1617 case BPF_AND:
1618 case BPF_LSH:
1619 case BPF_RSH:
1620 case BPF_NEG:
1621 case BPF_MOD:
1622 case BPF_XOR:
1623 set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT);
1624 break;
1625 case BPF_MOV:
1626 if (BPF_SRC(insn->code)) {
1627 set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT);
1628 } else {
1629 /* IMM to REG move*/
1630 if (insn->imm >= 0)
1631 set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT_POS);
1632 else
1633 set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT);
1634 }
1635 break;
1636 case BPF_END:
1637 if (insn->imm == 64)
1638 set_reg_val_type(&exit_rvt, insn->dst_reg, REG_64BIT);
1639 else if (insn->imm == 32)
1640 set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT);
1641 else /* insn->imm == 16 */
1642 set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT_POS);
1643 break;
1644 }
1645 rvt[idx] |= RVT_DONE;
1646 break;
1647 case BPF_ALU64:
1648 switch (BPF_OP(insn->code)) {
1649 case BPF_MOV:
1650 if (BPF_SRC(insn->code)) {
1651 /* REG to REG move*/
1652 set_reg_val_type(&exit_rvt, insn->dst_reg, REG_64BIT);
1653 } else {
1654 /* IMM to REG move*/
1655 if (insn->imm >= 0)
1656 set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT_POS);
1657 else
1658 set_reg_val_type(&exit_rvt, insn->dst_reg, REG_64BIT_32BIT);
1659 }
1660 break;
1661 default:
1662 set_reg_val_type(&exit_rvt, insn->dst_reg, REG_64BIT);
1663 }
1664 rvt[idx] |= RVT_DONE;
1665 break;
1666 case BPF_LD:
1667 switch (BPF_SIZE(insn->code)) {
1668 case BPF_DW:
1669 if (BPF_MODE(insn->code) == BPF_IMM) {
1670 s64 val;
1671
1672 val = (s64)((u32)insn->imm | ((u64)(insn + 1)->imm << 32));
1673 if (val > 0 && val <= S32_MAX)
1674 set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT_POS);
1675 else if (val >= S32_MIN && val <= S32_MAX)
1676 set_reg_val_type(&exit_rvt, insn->dst_reg, REG_64BIT_32BIT);
1677 else
1678 set_reg_val_type(&exit_rvt, insn->dst_reg, REG_64BIT);
1679 rvt[idx] |= RVT_DONE;
1680 idx++;
1681 } else {
1682 set_reg_val_type(&exit_rvt, insn->dst_reg, REG_64BIT);
1683 }
1684 break;
1685 case BPF_B:
1686 case BPF_H:
1687 set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT_POS);
1688 break;
1689 case BPF_W:
1690 if (BPF_MODE(insn->code) == BPF_IMM)
1691 set_reg_val_type(&exit_rvt, insn->dst_reg,
1692 insn->imm >= 0 ? REG_32BIT_POS : REG_32BIT);
1693 else
1694 set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT);
1695 break;
1696 }
1697 rvt[idx] |= RVT_DONE;
1698 break;
1699 case BPF_LDX:
1700 switch (BPF_SIZE(insn->code)) {
1701 case BPF_DW:
1702 set_reg_val_type(&exit_rvt, insn->dst_reg, REG_64BIT);
1703 break;
1704 case BPF_B:
1705 case BPF_H:
1706 set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT_POS);
1707 break;
1708 case BPF_W:
1709 set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT);
1710 break;
1711 }
1712 rvt[idx] |= RVT_DONE;
1713 break;
1714 case BPF_JMP:
1715 switch (BPF_OP(insn->code)) {
1716 case BPF_EXIT:
1717 rvt[idx] = RVT_DONE | exit_rvt;
1718 rvt[prog->len] = exit_rvt;
1719 return idx;
1720 case BPF_JA:
1721 rvt[idx] |= RVT_DONE;
1722 idx += insn->off;
1723 break;
1724 case BPF_JEQ:
1725 case BPF_JGT:
1726 case BPF_JGE:
1727 case BPF_JSET:
1728 case BPF_JNE:
1729 case BPF_JSGT:
1730 case BPF_JSGE:
1731 if (follow_taken) {
1732 rvt[idx] |= RVT_BRANCH_TAKEN;
1733 idx += insn->off;
1734 follow_taken = false;
1735 } else {
1736 rvt[idx] |= RVT_FALL_THROUGH;
1737 }
1738 break;
1739 case BPF_CALL:
1740 set_reg_val_type(&exit_rvt, BPF_REG_0, REG_64BIT);
1741 /* Upon call return, argument registers are clobbered. */
1742 for (reg = BPF_REG_0; reg <= BPF_REG_5; reg++)
1743 set_reg_val_type(&exit_rvt, reg, REG_64BIT);
1744
1745 rvt[idx] |= RVT_DONE;
1746 break;
1747 default:
1748 WARN(1, "Unhandled BPF_JMP case.\n");
1749 rvt[idx] |= RVT_DONE;
1750 break;
1751 }
1752 break;
1753 default:
1754 rvt[idx] |= RVT_DONE;
1755 break;
1756 }
1757 }
1758 return idx;
1759}
1760
1761/*
1762 * Track the value range (i.e. 32-bit vs. 64-bit) of each register at
1763 * each eBPF insn. This allows unneeded sign and zero extension
1764 * operations to be omitted.
1765 *
1766 * Doesn't handle yet confluence of control paths with conflicting
1767 * ranges, but it is good enough for most sane code.
1768 */
1769static int reg_val_propagate(struct jit_ctx *ctx)
1770{
1771 const struct bpf_prog *prog = ctx->skf;
1772 u64 exit_rvt;
1773 int reg;
1774 int i;
1775
1776 /*
1777 * 11 registers * 3 bits/reg leaves top bits free for other
1778 * uses. Bit-62..63 used to see if we have visited an insn.
1779 */
1780 exit_rvt = 0;
1781
1782 /* Upon entry, argument registers are 64-bit. */
1783 for (reg = BPF_REG_1; reg <= BPF_REG_5; reg++)
1784 set_reg_val_type(&exit_rvt, reg, REG_64BIT);
1785
1786 /*
1787 * First follow all conditional branches on the fall-through
1788 * edge of control flow..
1789 */
1790 reg_val_propagate_range(ctx, exit_rvt, 0, false);
1791restart_search:
1792 /*
1793 * Then repeatedly find the first conditional branch where
1794 * both edges of control flow have not been taken, and follow
1795 * the branch taken edge. We will end up restarting the
1796 * search once per conditional branch insn.
1797 */
1798 for (i = 0; i < prog->len; i++) {
1799 u64 rvt = ctx->reg_val_types[i];
1800
1801 if ((rvt & RVT_VISITED_MASK) == RVT_DONE ||
1802 (rvt & RVT_VISITED_MASK) == 0)
1803 continue;
1804 if ((rvt & RVT_VISITED_MASK) == RVT_FALL_THROUGH) {
1805 reg_val_propagate_range(ctx, rvt & ~RVT_VISITED_MASK, i, true);
1806 } else { /* RVT_BRANCH_TAKEN */
1807 WARN(1, "Unexpected RVT_BRANCH_TAKEN case.\n");
1808 reg_val_propagate_range(ctx, rvt & ~RVT_VISITED_MASK, i, false);
1809 }
1810 goto restart_search;
1811 }
1812 /*
1813 * Eventually all conditional branches have been followed on
1814 * both branches and we are done. Any insn that has not been
1815 * visited at this point is dead.
1816 */
1817
1818 return 0;
1819}
1820
1821static void jit_fill_hole(void *area, unsigned int size)
1822{
1823 u32 *p;
1824
1825 /* We are guaranteed to have aligned memory. */
1826 for (p = area; size >= sizeof(u32); size -= sizeof(u32))
1827 uasm_i_break(&p, BRK_BUG); /* Increments p */
1828}
1829
1830struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
1831{
1832 struct bpf_prog *orig_prog = prog;
1833 bool tmp_blinded = false;
1834 struct bpf_prog *tmp;
1835 struct bpf_binary_header *header = NULL;
1836 struct jit_ctx ctx;
1837 unsigned int image_size;
1838 u8 *image_ptr;
1839
1840 if (!bpf_jit_enable || !cpu_has_mips64r2)
1841 return prog;
1842
1843 tmp = bpf_jit_blind_constants(prog);
1844 /* If blinding was requested and we failed during blinding,
1845 * we must fall back to the interpreter.
1846 */
1847 if (IS_ERR(tmp))
1848 return orig_prog;
1849 if (tmp != prog) {
1850 tmp_blinded = true;
1851 prog = tmp;
1852 }
1853
1854 memset(&ctx, 0, sizeof(ctx));
1855
1856 ctx.offsets = kcalloc(prog->len + 1, sizeof(*ctx.offsets), GFP_KERNEL);
1857 if (ctx.offsets == NULL)
1858 goto out_err;
1859
1860 ctx.reg_val_types = kcalloc(prog->len + 1, sizeof(*ctx.reg_val_types), GFP_KERNEL);
1861 if (ctx.reg_val_types == NULL)
1862 goto out_err;
1863
1864 ctx.skf = prog;
1865
1866 if (reg_val_propagate(&ctx))
1867 goto out_err;
1868
1869 /*
1870 * First pass discovers used resources and instruction offsets
1871 * assuming short branches are used.
1872 */
1873 if (build_int_body(&ctx))
1874 goto out_err;
1875
1876 /*
1877 * If no calls are made (EBPF_SAVE_RA), then tail call count
1878 * in $v1, else we must save in n$s4.
1879 */
1880 if (ctx.flags & EBPF_SEEN_TC) {
1881 if (ctx.flags & EBPF_SAVE_RA)
1882 ctx.flags |= EBPF_SAVE_S4;
1883 else
1884 ctx.flags |= EBPF_TCC_IN_V1;
1885 }
1886
1887 /*
1888 * Second pass generates offsets, if any branches are out of
1889 * range a jump-around long sequence is generated, and we have
1890 * to try again from the beginning to generate the new
1891 * offsets. This is done until no additional conversions are
1892 * necessary.
1893 */
1894 do {
1895 ctx.idx = 0;
1896 ctx.gen_b_offsets = 1;
1897 ctx.long_b_conversion = 0;
1898 if (gen_int_prologue(&ctx))
1899 goto out_err;
1900 if (build_int_body(&ctx))
1901 goto out_err;
1902 if (build_int_epilogue(&ctx, MIPS_R_RA))
1903 goto out_err;
1904 } while (ctx.long_b_conversion);
1905
1906 image_size = 4 * ctx.idx;
1907
1908 header = bpf_jit_binary_alloc(image_size, &image_ptr,
1909 sizeof(u32), jit_fill_hole);
1910 if (header == NULL)
1911 goto out_err;
1912
1913 ctx.target = (u32 *)image_ptr;
1914
1915 /* Third pass generates the code */
1916 ctx.idx = 0;
1917 if (gen_int_prologue(&ctx))
1918 goto out_err;
1919 if (build_int_body(&ctx))
1920 goto out_err;
1921 if (build_int_epilogue(&ctx, MIPS_R_RA))
1922 goto out_err;
1923
1924 /* Update the icache */
1925 flush_icache_range((unsigned long)ctx.target,
1926 (unsigned long)(ctx.target + ctx.idx * sizeof(u32)));
1927
1928 if (bpf_jit_enable > 1)
1929 /* Dump JIT code */
1930 bpf_jit_dump(prog->len, image_size, 2, ctx.target);
1931
1932 bpf_jit_binary_lock_ro(header);
1933 prog->bpf_func = (void *)ctx.target;
1934 prog->jited = 1;
1935 prog->jited_len = image_size;
1936out_normal:
1937 if (tmp_blinded)
1938 bpf_jit_prog_release_other(prog, prog == orig_prog ?
1939 tmp : orig_prog);
1940 kfree(ctx.offsets);
1941 kfree(ctx.reg_val_types);
1942
1943 return prog;
1944
1945out_err:
1946 prog = orig_prog;
1947 if (header)
1948 bpf_jit_binary_free(header);
1949 goto out_normal;
1950}
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index 01c6fbc3e85b..1803797fc885 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -1253,7 +1253,8 @@ static int bpf_jit_prog(struct bpf_jit *jit, struct bpf_prog *fp)
1253 insn_count = bpf_jit_insn(jit, fp, i); 1253 insn_count = bpf_jit_insn(jit, fp, i);
1254 if (insn_count < 0) 1254 if (insn_count < 0)
1255 return -1; 1255 return -1;
1256 jit->addrs[i + 1] = jit->prg; /* Next instruction address */ 1256 /* Next instruction address */
1257 jit->addrs[i + insn_count] = jit->prg;
1257 } 1258 }
1258 bpf_jit_epilogue(jit); 1259 bpf_jit_epilogue(jit);
1259 1260
diff --git a/drivers/isdn/hysdn/hysdn_proclog.c b/drivers/isdn/hysdn/hysdn_proclog.c
index 7b5fd8fb1761..aaca0b3d662e 100644
--- a/drivers/isdn/hysdn/hysdn_proclog.c
+++ b/drivers/isdn/hysdn/hysdn_proclog.c
@@ -44,7 +44,6 @@ struct procdata {
44 char log_name[15]; /* log filename */ 44 char log_name[15]; /* log filename */
45 struct log_data *log_head, *log_tail; /* head and tail for queue */ 45 struct log_data *log_head, *log_tail; /* head and tail for queue */
46 int if_used; /* open count for interface */ 46 int if_used; /* open count for interface */
47 int volatile del_lock; /* lock for delete operations */
48 unsigned char logtmp[LOG_MAX_LINELEN]; 47 unsigned char logtmp[LOG_MAX_LINELEN];
49 wait_queue_head_t rd_queue; 48 wait_queue_head_t rd_queue;
50}; 49};
@@ -102,7 +101,6 @@ put_log_buffer(hysdn_card *card, char *cp)
102{ 101{
103 struct log_data *ib; 102 struct log_data *ib;
104 struct procdata *pd = card->proclog; 103 struct procdata *pd = card->proclog;
105 int i;
106 unsigned long flags; 104 unsigned long flags;
107 105
108 if (!pd) 106 if (!pd)
@@ -126,21 +124,21 @@ put_log_buffer(hysdn_card *card, char *cp)
126 else 124 else
127 pd->log_tail->next = ib; /* follows existing messages */ 125 pd->log_tail->next = ib; /* follows existing messages */
128 pd->log_tail = ib; /* new tail */ 126 pd->log_tail = ib; /* new tail */
129 i = pd->del_lock++; /* get lock state */
130 spin_unlock_irqrestore(&card->hysdn_lock, flags);
131 127
132 /* delete old entrys */ 128 /* delete old entrys */
133 if (!i) 129 while (pd->log_head->next) {
134 while (pd->log_head->next) { 130 if ((pd->log_head->usage_cnt <= 0) &&
135 if ((pd->log_head->usage_cnt <= 0) && 131 (pd->log_head->next->usage_cnt <= 0)) {
136 (pd->log_head->next->usage_cnt <= 0)) { 132 ib = pd->log_head;
137 ib = pd->log_head; 133 pd->log_head = pd->log_head->next;
138 pd->log_head = pd->log_head->next; 134 kfree(ib);
139 kfree(ib); 135 } else {
140 } else 136 break;
141 break; 137 }
142 } /* pd->log_head->next */ 138 } /* pd->log_head->next */
143 pd->del_lock--; /* release lock level */ 139
140 spin_unlock_irqrestore(&card->hysdn_lock, flags);
141
144 wake_up_interruptible(&(pd->rd_queue)); /* announce new entry */ 142 wake_up_interruptible(&(pd->rd_queue)); /* announce new entry */
145} /* put_log_buffer */ 143} /* put_log_buffer */
146 144
diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
index 1e46418a3b74..264b281eb86b 100644
--- a/drivers/net/dsa/mt7530.c
+++ b/drivers/net/dsa/mt7530.c
@@ -625,6 +625,44 @@ static void mt7530_adjust_link(struct dsa_switch *ds, int port,
625 * all finished. 625 * all finished.
626 */ 626 */
627 mt7623_pad_clk_setup(ds); 627 mt7623_pad_clk_setup(ds);
628 } else {
629 u16 lcl_adv = 0, rmt_adv = 0;
630 u8 flowctrl;
631 u32 mcr = PMCR_USERP_LINK | PMCR_FORCE_MODE;
632
633 switch (phydev->speed) {
634 case SPEED_1000:
635 mcr |= PMCR_FORCE_SPEED_1000;
636 break;
637 case SPEED_100:
638 mcr |= PMCR_FORCE_SPEED_100;
639 break;
640 };
641
642 if (phydev->link)
643 mcr |= PMCR_FORCE_LNK;
644
645 if (phydev->duplex) {
646 mcr |= PMCR_FORCE_FDX;
647
648 if (phydev->pause)
649 rmt_adv = LPA_PAUSE_CAP;
650 if (phydev->asym_pause)
651 rmt_adv |= LPA_PAUSE_ASYM;
652
653 if (phydev->advertising & ADVERTISED_Pause)
654 lcl_adv |= ADVERTISE_PAUSE_CAP;
655 if (phydev->advertising & ADVERTISED_Asym_Pause)
656 lcl_adv |= ADVERTISE_PAUSE_ASYM;
657
658 flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
659
660 if (flowctrl & FLOW_CTRL_TX)
661 mcr |= PMCR_TX_FC_EN;
662 if (flowctrl & FLOW_CTRL_RX)
663 mcr |= PMCR_RX_FC_EN;
664 }
665 mt7530_write(priv, MT7530_PMCR_P(port), mcr);
628 } 666 }
629} 667}
630 668
diff --git a/drivers/net/dsa/mt7530.h b/drivers/net/dsa/mt7530.h
index b83d76b99802..74db9822eb40 100644
--- a/drivers/net/dsa/mt7530.h
+++ b/drivers/net/dsa/mt7530.h
@@ -151,6 +151,7 @@ enum mt7530_stp_state {
151#define PMCR_TX_FC_EN BIT(5) 151#define PMCR_TX_FC_EN BIT(5)
152#define PMCR_RX_FC_EN BIT(4) 152#define PMCR_RX_FC_EN BIT(4)
153#define PMCR_FORCE_SPEED_1000 BIT(3) 153#define PMCR_FORCE_SPEED_1000 BIT(3)
154#define PMCR_FORCE_SPEED_100 BIT(2)
154#define PMCR_FORCE_FDX BIT(1) 155#define PMCR_FORCE_FDX BIT(1)
155#define PMCR_FORCE_LNK BIT(0) 156#define PMCR_FORCE_LNK BIT(0)
156#define PMCR_COMMON_LINK (PMCR_IFG_XMIT(1) | PMCR_MAC_MODE | \ 157#define PMCR_COMMON_LINK (PMCR_IFG_XMIT(1) | PMCR_MAC_MODE | \
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
index 86058a9f3417..1d307f2def2d 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
@@ -1785,9 +1785,9 @@ static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata)
1785 1785
1786 xgene_enet_gpiod_get(pdata); 1786 xgene_enet_gpiod_get(pdata);
1787 1787
1788 if (pdata->phy_mode != PHY_INTERFACE_MODE_SGMII) { 1788 pdata->clk = devm_clk_get(&pdev->dev, NULL);
1789 pdata->clk = devm_clk_get(&pdev->dev, NULL); 1789 if (IS_ERR(pdata->clk)) {
1790 if (IS_ERR(pdata->clk)) { 1790 if (pdata->phy_mode != PHY_INTERFACE_MODE_SGMII) {
1791 /* Abort if the clock is defined but couldn't be 1791 /* Abort if the clock is defined but couldn't be
1792 * retrived. Always abort if the clock is missing on 1792 * retrived. Always abort if the clock is missing on
1793 * DT system as the driver can't cope with this case. 1793 * DT system as the driver can't cope with this case.
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c
index f411936b744c..a1125d10c825 100644
--- a/drivers/net/ethernet/broadcom/b44.c
+++ b/drivers/net/ethernet/broadcom/b44.c
@@ -2368,6 +2368,7 @@ static int b44_init_one(struct ssb_device *sdev,
2368 bp->msg_enable = netif_msg_init(b44_debug, B44_DEF_MSG_ENABLE); 2368 bp->msg_enable = netif_msg_init(b44_debug, B44_DEF_MSG_ENABLE);
2369 2369
2370 spin_lock_init(&bp->lock); 2370 spin_lock_init(&bp->lock);
2371 u64_stats_init(&bp->hw_stats.syncp);
2371 2372
2372 bp->rx_pending = B44_DEF_RX_RING_PENDING; 2373 bp->rx_pending = B44_DEF_RX_RING_PENDING;
2373 bp->tx_pending = B44_DEF_TX_RING_PENDING; 2374 bp->tx_pending = B44_DEF_TX_RING_PENDING;
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index a3e694679635..c45e8e3b82d3 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -111,6 +111,7 @@ static void send_request_map(struct ibmvnic_adapter *, dma_addr_t, __be32, u8);
111static void send_request_unmap(struct ibmvnic_adapter *, u8); 111static void send_request_unmap(struct ibmvnic_adapter *, u8);
112static void send_login(struct ibmvnic_adapter *adapter); 112static void send_login(struct ibmvnic_adapter *adapter);
113static void send_cap_queries(struct ibmvnic_adapter *adapter); 113static void send_cap_queries(struct ibmvnic_adapter *adapter);
114static int init_sub_crqs(struct ibmvnic_adapter *);
114static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter); 115static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter);
115static int ibmvnic_init(struct ibmvnic_adapter *); 116static int ibmvnic_init(struct ibmvnic_adapter *);
116static void release_crq_queue(struct ibmvnic_adapter *); 117static void release_crq_queue(struct ibmvnic_adapter *);
@@ -651,6 +652,7 @@ static int ibmvnic_login(struct net_device *netdev)
651 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 652 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
652 unsigned long timeout = msecs_to_jiffies(30000); 653 unsigned long timeout = msecs_to_jiffies(30000);
653 struct device *dev = &adapter->vdev->dev; 654 struct device *dev = &adapter->vdev->dev;
655 int rc;
654 656
655 do { 657 do {
656 if (adapter->renegotiate) { 658 if (adapter->renegotiate) {
@@ -664,6 +666,18 @@ static int ibmvnic_login(struct net_device *netdev)
664 dev_err(dev, "Capabilities query timeout\n"); 666 dev_err(dev, "Capabilities query timeout\n");
665 return -1; 667 return -1;
666 } 668 }
669 rc = init_sub_crqs(adapter);
670 if (rc) {
671 dev_err(dev,
672 "Initialization of SCRQ's failed\n");
673 return -1;
674 }
675 rc = init_sub_crq_irqs(adapter);
676 if (rc) {
677 dev_err(dev,
678 "Initialization of SCRQ's irqs failed\n");
679 return -1;
680 }
667 } 681 }
668 682
669 reinit_completion(&adapter->init_done); 683 reinit_completion(&adapter->init_done);
@@ -3004,7 +3018,6 @@ static void handle_request_cap_rsp(union ibmvnic_crq *crq,
3004 *req_value, 3018 *req_value,
3005 (long int)be64_to_cpu(crq->request_capability_rsp. 3019 (long int)be64_to_cpu(crq->request_capability_rsp.
3006 number), name); 3020 number), name);
3007 release_sub_crqs(adapter);
3008 *req_value = be64_to_cpu(crq->request_capability_rsp.number); 3021 *req_value = be64_to_cpu(crq->request_capability_rsp.number);
3009 ibmvnic_send_req_caps(adapter, 1); 3022 ibmvnic_send_req_caps(adapter, 1);
3010 return; 3023 return;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index b936febc315a..2194960d5855 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -1113,6 +1113,8 @@ int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring)
1113 if (!tx_ring->tx_bi) 1113 if (!tx_ring->tx_bi)
1114 goto err; 1114 goto err;
1115 1115
1116 u64_stats_init(&tx_ring->syncp);
1117
1116 /* round up to nearest 4K */ 1118 /* round up to nearest 4K */
1117 tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc); 1119 tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc);
1118 /* add u32 for head writeback, align after this takes care of 1120 /* add u32 for head writeback, align after this takes care of
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 084c53582793..032f8ac06357 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -2988,6 +2988,8 @@ int ixgbevf_setup_tx_resources(struct ixgbevf_ring *tx_ring)
2988 if (!tx_ring->tx_buffer_info) 2988 if (!tx_ring->tx_buffer_info)
2989 goto err; 2989 goto err;
2990 2990
2991 u64_stats_init(&tx_ring->syncp);
2992
2991 /* round up to nearest 4K */ 2993 /* round up to nearest 4K */
2992 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc); 2994 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
2993 tx_ring->size = ALIGN(tx_ring->size, 4096); 2995 tx_ring->size = ALIGN(tx_ring->size, 4096);
@@ -3046,6 +3048,8 @@ int ixgbevf_setup_rx_resources(struct ixgbevf_ring *rx_ring)
3046 if (!rx_ring->rx_buffer_info) 3048 if (!rx_ring->rx_buffer_info)
3047 goto err; 3049 goto err;
3048 3050
3051 u64_stats_init(&rx_ring->syncp);
3052
3049 /* Round up to nearest 4K */ 3053 /* Round up to nearest 4K */
3050 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc); 3054 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
3051 rx_ring->size = ALIGN(rx_ring->size, 4096); 3055 rx_ring->size = ALIGN(rx_ring->size, 4096);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index c751a1d434ad..3d4e4a5d00d1 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -223,6 +223,7 @@ static void mlx4_en_get_wol(struct net_device *netdev,
223 struct ethtool_wolinfo *wol) 223 struct ethtool_wolinfo *wol)
224{ 224{
225 struct mlx4_en_priv *priv = netdev_priv(netdev); 225 struct mlx4_en_priv *priv = netdev_priv(netdev);
226 struct mlx4_caps *caps = &priv->mdev->dev->caps;
226 int err = 0; 227 int err = 0;
227 u64 config = 0; 228 u64 config = 0;
228 u64 mask; 229 u64 mask;
@@ -235,24 +236,24 @@ static void mlx4_en_get_wol(struct net_device *netdev,
235 mask = (priv->port == 1) ? MLX4_DEV_CAP_FLAG_WOL_PORT1 : 236 mask = (priv->port == 1) ? MLX4_DEV_CAP_FLAG_WOL_PORT1 :
236 MLX4_DEV_CAP_FLAG_WOL_PORT2; 237 MLX4_DEV_CAP_FLAG_WOL_PORT2;
237 238
238 if (!(priv->mdev->dev->caps.flags & mask)) { 239 if (!(caps->flags & mask)) {
239 wol->supported = 0; 240 wol->supported = 0;
240 wol->wolopts = 0; 241 wol->wolopts = 0;
241 return; 242 return;
242 } 243 }
243 244
245 if (caps->wol_port[priv->port])
246 wol->supported = WAKE_MAGIC;
247 else
248 wol->supported = 0;
249
244 err = mlx4_wol_read(priv->mdev->dev, &config, priv->port); 250 err = mlx4_wol_read(priv->mdev->dev, &config, priv->port);
245 if (err) { 251 if (err) {
246 en_err(priv, "Failed to get WoL information\n"); 252 en_err(priv, "Failed to get WoL information\n");
247 return; 253 return;
248 } 254 }
249 255
250 if (config & MLX4_EN_WOL_MAGIC) 256 if ((config & MLX4_EN_WOL_ENABLED) && (config & MLX4_EN_WOL_MAGIC))
251 wol->supported = WAKE_MAGIC;
252 else
253 wol->supported = 0;
254
255 if (config & MLX4_EN_WOL_ENABLED)
256 wol->wolopts = WAKE_MAGIC; 257 wol->wolopts = WAKE_MAGIC;
257 else 258 else
258 wol->wolopts = 0; 259 wol->wolopts = 0;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 436f7689a032..bf1638044a7a 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -574,16 +574,21 @@ static inline __wsum get_fixed_vlan_csum(__wsum hw_checksum,
574 * header, the HW adds it. To address that, we are subtracting the pseudo 574 * header, the HW adds it. To address that, we are subtracting the pseudo
575 * header checksum from the checksum value provided by the HW. 575 * header checksum from the checksum value provided by the HW.
576 */ 576 */
577static void get_fixed_ipv4_csum(__wsum hw_checksum, struct sk_buff *skb, 577static int get_fixed_ipv4_csum(__wsum hw_checksum, struct sk_buff *skb,
578 struct iphdr *iph) 578 struct iphdr *iph)
579{ 579{
580 __u16 length_for_csum = 0; 580 __u16 length_for_csum = 0;
581 __wsum csum_pseudo_header = 0; 581 __wsum csum_pseudo_header = 0;
582 __u8 ipproto = iph->protocol;
583
584 if (unlikely(ipproto == IPPROTO_SCTP))
585 return -1;
582 586
583 length_for_csum = (be16_to_cpu(iph->tot_len) - (iph->ihl << 2)); 587 length_for_csum = (be16_to_cpu(iph->tot_len) - (iph->ihl << 2));
584 csum_pseudo_header = csum_tcpudp_nofold(iph->saddr, iph->daddr, 588 csum_pseudo_header = csum_tcpudp_nofold(iph->saddr, iph->daddr,
585 length_for_csum, iph->protocol, 0); 589 length_for_csum, ipproto, 0);
586 skb->csum = csum_sub(hw_checksum, csum_pseudo_header); 590 skb->csum = csum_sub(hw_checksum, csum_pseudo_header);
591 return 0;
587} 592}
588 593
589#if IS_ENABLED(CONFIG_IPV6) 594#if IS_ENABLED(CONFIG_IPV6)
@@ -594,17 +599,20 @@ static void get_fixed_ipv4_csum(__wsum hw_checksum, struct sk_buff *skb,
594static int get_fixed_ipv6_csum(__wsum hw_checksum, struct sk_buff *skb, 599static int get_fixed_ipv6_csum(__wsum hw_checksum, struct sk_buff *skb,
595 struct ipv6hdr *ipv6h) 600 struct ipv6hdr *ipv6h)
596{ 601{
602 __u8 nexthdr = ipv6h->nexthdr;
597 __wsum csum_pseudo_hdr = 0; 603 __wsum csum_pseudo_hdr = 0;
598 604
599 if (unlikely(ipv6h->nexthdr == IPPROTO_FRAGMENT || 605 if (unlikely(nexthdr == IPPROTO_FRAGMENT ||
600 ipv6h->nexthdr == IPPROTO_HOPOPTS)) 606 nexthdr == IPPROTO_HOPOPTS ||
607 nexthdr == IPPROTO_SCTP))
601 return -1; 608 return -1;
602 hw_checksum = csum_add(hw_checksum, (__force __wsum)htons(ipv6h->nexthdr)); 609 hw_checksum = csum_add(hw_checksum, (__force __wsum)htons(nexthdr));
603 610
604 csum_pseudo_hdr = csum_partial(&ipv6h->saddr, 611 csum_pseudo_hdr = csum_partial(&ipv6h->saddr,
605 sizeof(ipv6h->saddr) + sizeof(ipv6h->daddr), 0); 612 sizeof(ipv6h->saddr) + sizeof(ipv6h->daddr), 0);
606 csum_pseudo_hdr = csum_add(csum_pseudo_hdr, (__force __wsum)ipv6h->payload_len); 613 csum_pseudo_hdr = csum_add(csum_pseudo_hdr, (__force __wsum)ipv6h->payload_len);
607 csum_pseudo_hdr = csum_add(csum_pseudo_hdr, (__force __wsum)ntohs(ipv6h->nexthdr)); 614 csum_pseudo_hdr = csum_add(csum_pseudo_hdr,
615 (__force __wsum)htons(nexthdr));
608 616
609 skb->csum = csum_sub(hw_checksum, csum_pseudo_hdr); 617 skb->csum = csum_sub(hw_checksum, csum_pseudo_hdr);
610 skb->csum = csum_add(skb->csum, csum_partial(ipv6h, sizeof(struct ipv6hdr), 0)); 618 skb->csum = csum_add(skb->csum, csum_partial(ipv6h, sizeof(struct ipv6hdr), 0));
@@ -627,11 +635,10 @@ static int check_csum(struct mlx4_cqe *cqe, struct sk_buff *skb, void *va,
627 } 635 }
628 636
629 if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV4)) 637 if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV4))
630 get_fixed_ipv4_csum(hw_checksum, skb, hdr); 638 return get_fixed_ipv4_csum(hw_checksum, skb, hdr);
631#if IS_ENABLED(CONFIG_IPV6) 639#if IS_ENABLED(CONFIG_IPV6)
632 else if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV6)) 640 if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV6))
633 if (unlikely(get_fixed_ipv6_csum(hw_checksum, skb, hdr))) 641 return get_fixed_ipv6_csum(hw_checksum, skb, hdr);
634 return -1;
635#endif 642#endif
636 return 0; 643 return 0;
637} 644}
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index 37e84a59e751..041c0ed65929 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -159,8 +159,9 @@ static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags)
159 [32] = "Loopback source checks support", 159 [32] = "Loopback source checks support",
160 [33] = "RoCEv2 support", 160 [33] = "RoCEv2 support",
161 [34] = "DMFS Sniffer support (UC & MC)", 161 [34] = "DMFS Sniffer support (UC & MC)",
162 [35] = "QinQ VST mode support", 162 [35] = "Diag counters per port",
163 [36] = "sl to vl mapping table change event support" 163 [36] = "QinQ VST mode support",
164 [37] = "sl to vl mapping table change event support",
164 }; 165 };
165 int i; 166 int i;
166 167
@@ -764,6 +765,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
764#define QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET 0x3e 765#define QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET 0x3e
765#define QUERY_DEV_CAP_MAX_PKEY_OFFSET 0x3f 766#define QUERY_DEV_CAP_MAX_PKEY_OFFSET 0x3f
766#define QUERY_DEV_CAP_EXT_FLAGS_OFFSET 0x40 767#define QUERY_DEV_CAP_EXT_FLAGS_OFFSET 0x40
768#define QUERY_DEV_CAP_WOL_OFFSET 0x43
767#define QUERY_DEV_CAP_FLAGS_OFFSET 0x44 769#define QUERY_DEV_CAP_FLAGS_OFFSET 0x44
768#define QUERY_DEV_CAP_RSVD_UAR_OFFSET 0x48 770#define QUERY_DEV_CAP_RSVD_UAR_OFFSET 0x48
769#define QUERY_DEV_CAP_UAR_SZ_OFFSET 0x49 771#define QUERY_DEV_CAP_UAR_SZ_OFFSET 0x49
@@ -920,6 +922,9 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
920 MLX4_GET(ext_flags, outbox, QUERY_DEV_CAP_EXT_FLAGS_OFFSET); 922 MLX4_GET(ext_flags, outbox, QUERY_DEV_CAP_EXT_FLAGS_OFFSET);
921 MLX4_GET(flags, outbox, QUERY_DEV_CAP_FLAGS_OFFSET); 923 MLX4_GET(flags, outbox, QUERY_DEV_CAP_FLAGS_OFFSET);
922 dev_cap->flags = flags | (u64)ext_flags << 32; 924 dev_cap->flags = flags | (u64)ext_flags << 32;
925 MLX4_GET(field, outbox, QUERY_DEV_CAP_WOL_OFFSET);
926 dev_cap->wol_port[1] = !!(field & 0x20);
927 dev_cap->wol_port[2] = !!(field & 0x40);
923 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_UAR_OFFSET); 928 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_UAR_OFFSET);
924 dev_cap->reserved_uars = field >> 4; 929 dev_cap->reserved_uars = field >> 4;
925 MLX4_GET(field, outbox, QUERY_DEV_CAP_UAR_SZ_OFFSET); 930 MLX4_GET(field, outbox, QUERY_DEV_CAP_UAR_SZ_OFFSET);
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.h b/drivers/net/ethernet/mellanox/mlx4/fw.h
index 5343a0599253..b52ba01aa486 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.h
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.h
@@ -129,6 +129,7 @@ struct mlx4_dev_cap {
129 u32 dmfs_high_rate_qpn_range; 129 u32 dmfs_high_rate_qpn_range;
130 struct mlx4_rate_limit_caps rl_caps; 130 struct mlx4_rate_limit_caps rl_caps;
131 struct mlx4_port_cap port_cap[MLX4_MAX_PORTS + 1]; 131 struct mlx4_port_cap port_cap[MLX4_MAX_PORTS + 1];
132 bool wol_port[MLX4_MAX_PORTS + 1];
132}; 133};
133 134
134struct mlx4_func_cap { 135struct mlx4_func_cap {
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index a27c9c13a36e..09b9bc17bce9 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -424,6 +424,8 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
424 dev->caps.stat_rate_support = dev_cap->stat_rate_support; 424 dev->caps.stat_rate_support = dev_cap->stat_rate_support;
425 dev->caps.max_gso_sz = dev_cap->max_gso_sz; 425 dev->caps.max_gso_sz = dev_cap->max_gso_sz;
426 dev->caps.max_rss_tbl_sz = dev_cap->max_rss_tbl_sz; 426 dev->caps.max_rss_tbl_sz = dev_cap->max_rss_tbl_sz;
427 dev->caps.wol_port[1] = dev_cap->wol_port[1];
428 dev->caps.wol_port[2] = dev_cap->wol_port[2];
427 429
428 /* Save uar page shift */ 430 /* Save uar page shift */
429 if (!mlx4_is_slave(dev)) { 431 if (!mlx4_is_slave(dev)) {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index 656b2d3f1bee..5eb1606765c5 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -626,8 +626,8 @@ static int mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port *mlxsw_sp_port,
626 626
627 bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge, 627 bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge,
628 orig_dev); 628 orig_dev);
629 if (WARN_ON(!bridge_port)) 629 if (!bridge_port)
630 return -EINVAL; 630 return 0;
631 631
632 err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port, bridge_port, 632 err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port, bridge_port,
633 MLXSW_SP_FLOOD_TYPE_UC, 633 MLXSW_SP_FLOOD_TYPE_UC,
@@ -711,8 +711,8 @@ static int mlxsw_sp_port_attr_mc_router_set(struct mlxsw_sp_port *mlxsw_sp_port,
711 711
712 bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge, 712 bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge,
713 orig_dev); 713 orig_dev);
714 if (WARN_ON(!bridge_port)) 714 if (!bridge_port)
715 return -EINVAL; 715 return 0;
716 716
717 if (!bridge_port->bridge_device->multicast_enabled) 717 if (!bridge_port->bridge_device->multicast_enabled)
718 return 0; 718 return 0;
@@ -1283,15 +1283,15 @@ static int mlxsw_sp_port_mdb_add(struct mlxsw_sp_port *mlxsw_sp_port,
1283 return 0; 1283 return 0;
1284 1284
1285 bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev); 1285 bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
1286 if (WARN_ON(!bridge_port)) 1286 if (!bridge_port)
1287 return -EINVAL; 1287 return 0;
1288 1288
1289 bridge_device = bridge_port->bridge_device; 1289 bridge_device = bridge_port->bridge_device;
1290 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port, 1290 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port,
1291 bridge_device, 1291 bridge_device,
1292 mdb->vid); 1292 mdb->vid);
1293 if (WARN_ON(!mlxsw_sp_port_vlan)) 1293 if (!mlxsw_sp_port_vlan)
1294 return -EINVAL; 1294 return 0;
1295 1295
1296 fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid); 1296 fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid);
1297 1297
@@ -1407,15 +1407,15 @@ static int mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port,
1407 int err = 0; 1407 int err = 0;
1408 1408
1409 bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev); 1409 bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
1410 if (WARN_ON(!bridge_port)) 1410 if (!bridge_port)
1411 return -EINVAL; 1411 return 0;
1412 1412
1413 bridge_device = bridge_port->bridge_device; 1413 bridge_device = bridge_port->bridge_device;
1414 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port, 1414 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port,
1415 bridge_device, 1415 bridge_device,
1416 mdb->vid); 1416 mdb->vid);
1417 if (WARN_ON(!mlxsw_sp_port_vlan)) 1417 if (!mlxsw_sp_port_vlan)
1418 return -EINVAL; 1418 return 0;
1419 1419
1420 fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid); 1420 fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid);
1421 1421
@@ -1974,6 +1974,17 @@ static void mlxsw_sp_fdb_fini(struct mlxsw_sp *mlxsw_sp)
1974 1974
1975} 1975}
1976 1976
1977static void mlxsw_sp_mids_fini(struct mlxsw_sp *mlxsw_sp)
1978{
1979 struct mlxsw_sp_mid *mid, *tmp;
1980
1981 list_for_each_entry_safe(mid, tmp, &mlxsw_sp->bridge->mids_list, list) {
1982 list_del(&mid->list);
1983 clear_bit(mid->mid, mlxsw_sp->bridge->mids_bitmap);
1984 kfree(mid);
1985 }
1986}
1987
1977int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp) 1988int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp)
1978{ 1989{
1979 struct mlxsw_sp_bridge *bridge; 1990 struct mlxsw_sp_bridge *bridge;
@@ -1996,7 +2007,7 @@ int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp)
1996void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp) 2007void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp)
1997{ 2008{
1998 mlxsw_sp_fdb_fini(mlxsw_sp); 2009 mlxsw_sp_fdb_fini(mlxsw_sp);
1999 WARN_ON(!list_empty(&mlxsw_sp->bridge->mids_list)); 2010 mlxsw_sp_mids_fini(mlxsw_sp);
2000 WARN_ON(!list_empty(&mlxsw_sp->bridge->bridges_list)); 2011 WARN_ON(!list_empty(&mlxsw_sp->bridge->bridges_list));
2001 kfree(mlxsw_sp->bridge); 2012 kfree(mlxsw_sp->bridge);
2002} 2013}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index 18750ff0ede6..4631ca8b8eb2 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -513,6 +513,7 @@ nfp_net_tx_ring_init(struct nfp_net_tx_ring *tx_ring,
513 tx_ring->idx = idx; 513 tx_ring->idx = idx;
514 tx_ring->r_vec = r_vec; 514 tx_ring->r_vec = r_vec;
515 tx_ring->is_xdp = is_xdp; 515 tx_ring->is_xdp = is_xdp;
516 u64_stats_init(&tx_ring->r_vec->tx_sync);
516 517
517 tx_ring->qcidx = tx_ring->idx * nn->stride_tx; 518 tx_ring->qcidx = tx_ring->idx * nn->stride_tx;
518 tx_ring->qcp_q = nn->tx_bar + NFP_QCP_QUEUE_OFF(tx_ring->qcidx); 519 tx_ring->qcp_q = nn->tx_bar + NFP_QCP_QUEUE_OFF(tx_ring->qcidx);
@@ -532,6 +533,7 @@ nfp_net_rx_ring_init(struct nfp_net_rx_ring *rx_ring,
532 533
533 rx_ring->idx = idx; 534 rx_ring->idx = idx;
534 rx_ring->r_vec = r_vec; 535 rx_ring->r_vec = r_vec;
536 u64_stats_init(&rx_ring->r_vec->rx_sync);
535 537
536 rx_ring->fl_qcidx = rx_ring->idx * nn->stride_rx; 538 rx_ring->fl_qcidx = rx_ring->idx * nn->stride_rx;
537 rx_ring->qcp_fl = nn->rx_bar + NFP_QCP_QUEUE_OFF(rx_ring->fl_qcidx); 539 rx_ring->qcp_fl = nn->rx_bar + NFP_QCP_QUEUE_OFF(rx_ring->fl_qcidx);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
index 9da91045d167..3eb241657368 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
@@ -253,7 +253,7 @@ int qed_mcp_cmd_init(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
253 size = MFW_DRV_MSG_MAX_DWORDS(p_info->mfw_mb_length) * sizeof(u32); 253 size = MFW_DRV_MSG_MAX_DWORDS(p_info->mfw_mb_length) * sizeof(u32);
254 p_info->mfw_mb_cur = kzalloc(size, GFP_KERNEL); 254 p_info->mfw_mb_cur = kzalloc(size, GFP_KERNEL);
255 p_info->mfw_mb_shadow = kzalloc(size, GFP_KERNEL); 255 p_info->mfw_mb_shadow = kzalloc(size, GFP_KERNEL);
256 if (!p_info->mfw_mb_shadow || !p_info->mfw_mb_addr) 256 if (!p_info->mfw_mb_cur || !p_info->mfw_mb_shadow)
257 goto err; 257 goto err;
258 258
259 return 0; 259 return 0;
diff --git a/drivers/net/ethernet/ti/cpts.c b/drivers/net/ethernet/ti/cpts.c
index 32279d21c836..c2121d214f08 100644
--- a/drivers/net/ethernet/ti/cpts.c
+++ b/drivers/net/ethernet/ti/cpts.c
@@ -31,9 +31,18 @@
31 31
32#include "cpts.h" 32#include "cpts.h"
33 33
34#define CPTS_SKB_TX_WORK_TIMEOUT 1 /* jiffies */
35
36struct cpts_skb_cb_data {
37 unsigned long tmo;
38};
39
34#define cpts_read32(c, r) readl_relaxed(&c->reg->r) 40#define cpts_read32(c, r) readl_relaxed(&c->reg->r)
35#define cpts_write32(c, v, r) writel_relaxed(v, &c->reg->r) 41#define cpts_write32(c, v, r) writel_relaxed(v, &c->reg->r)
36 42
43static int cpts_match(struct sk_buff *skb, unsigned int ptp_class,
44 u16 ts_seqid, u8 ts_msgtype);
45
37static int event_expired(struct cpts_event *event) 46static int event_expired(struct cpts_event *event)
38{ 47{
39 return time_after(jiffies, event->tmo); 48 return time_after(jiffies, event->tmo);
@@ -77,6 +86,47 @@ static int cpts_purge_events(struct cpts *cpts)
77 return removed ? 0 : -1; 86 return removed ? 0 : -1;
78} 87}
79 88
89static bool cpts_match_tx_ts(struct cpts *cpts, struct cpts_event *event)
90{
91 struct sk_buff *skb, *tmp;
92 u16 seqid;
93 u8 mtype;
94 bool found = false;
95
96 mtype = (event->high >> MESSAGE_TYPE_SHIFT) & MESSAGE_TYPE_MASK;
97 seqid = (event->high >> SEQUENCE_ID_SHIFT) & SEQUENCE_ID_MASK;
98
99 /* no need to grab txq.lock as access is always done under cpts->lock */
100 skb_queue_walk_safe(&cpts->txq, skb, tmp) {
101 struct skb_shared_hwtstamps ssh;
102 unsigned int class = ptp_classify_raw(skb);
103 struct cpts_skb_cb_data *skb_cb =
104 (struct cpts_skb_cb_data *)skb->cb;
105
106 if (cpts_match(skb, class, seqid, mtype)) {
107 u64 ns = timecounter_cyc2time(&cpts->tc, event->low);
108
109 memset(&ssh, 0, sizeof(ssh));
110 ssh.hwtstamp = ns_to_ktime(ns);
111 skb_tstamp_tx(skb, &ssh);
112 found = true;
113 __skb_unlink(skb, &cpts->txq);
114 dev_consume_skb_any(skb);
115 dev_dbg(cpts->dev, "match tx timestamp mtype %u seqid %04x\n",
116 mtype, seqid);
117 } else if (time_after(jiffies, skb_cb->tmo)) {
118 /* timeout any expired skbs over 1s */
119 dev_dbg(cpts->dev,
120 "expiring tx timestamp mtype %u seqid %04x\n",
121 mtype, seqid);
122 __skb_unlink(skb, &cpts->txq);
123 dev_consume_skb_any(skb);
124 }
125 }
126
127 return found;
128}
129
80/* 130/*
81 * Returns zero if matching event type was found. 131 * Returns zero if matching event type was found.
82 */ 132 */
@@ -101,9 +151,15 @@ static int cpts_fifo_read(struct cpts *cpts, int match)
101 event->low = lo; 151 event->low = lo;
102 type = event_type(event); 152 type = event_type(event);
103 switch (type) { 153 switch (type) {
154 case CPTS_EV_TX:
155 if (cpts_match_tx_ts(cpts, event)) {
156 /* if the new event matches an existing skb,
157 * then don't queue it
158 */
159 break;
160 }
104 case CPTS_EV_PUSH: 161 case CPTS_EV_PUSH:
105 case CPTS_EV_RX: 162 case CPTS_EV_RX:
106 case CPTS_EV_TX:
107 list_del_init(&event->list); 163 list_del_init(&event->list);
108 list_add_tail(&event->list, &cpts->events); 164 list_add_tail(&event->list, &cpts->events);
109 break; 165 break;
@@ -224,6 +280,24 @@ static int cpts_ptp_enable(struct ptp_clock_info *ptp,
224 return -EOPNOTSUPP; 280 return -EOPNOTSUPP;
225} 281}
226 282
283static long cpts_overflow_check(struct ptp_clock_info *ptp)
284{
285 struct cpts *cpts = container_of(ptp, struct cpts, info);
286 unsigned long delay = cpts->ov_check_period;
287 struct timespec64 ts;
288 unsigned long flags;
289
290 spin_lock_irqsave(&cpts->lock, flags);
291 ts = ns_to_timespec64(timecounter_read(&cpts->tc));
292
293 if (!skb_queue_empty(&cpts->txq))
294 delay = CPTS_SKB_TX_WORK_TIMEOUT;
295 spin_unlock_irqrestore(&cpts->lock, flags);
296
297 pr_debug("cpts overflow check at %lld.%09lu\n", ts.tv_sec, ts.tv_nsec);
298 return (long)delay;
299}
300
227static struct ptp_clock_info cpts_info = { 301static struct ptp_clock_info cpts_info = {
228 .owner = THIS_MODULE, 302 .owner = THIS_MODULE,
229 .name = "CTPS timer", 303 .name = "CTPS timer",
@@ -236,18 +310,9 @@ static struct ptp_clock_info cpts_info = {
236 .gettime64 = cpts_ptp_gettime, 310 .gettime64 = cpts_ptp_gettime,
237 .settime64 = cpts_ptp_settime, 311 .settime64 = cpts_ptp_settime,
238 .enable = cpts_ptp_enable, 312 .enable = cpts_ptp_enable,
313 .do_aux_work = cpts_overflow_check,
239}; 314};
240 315
241static void cpts_overflow_check(struct work_struct *work)
242{
243 struct timespec64 ts;
244 struct cpts *cpts = container_of(work, struct cpts, overflow_work.work);
245
246 cpts_ptp_gettime(&cpts->info, &ts);
247 pr_debug("cpts overflow check at %lld.%09lu\n", ts.tv_sec, ts.tv_nsec);
248 schedule_delayed_work(&cpts->overflow_work, cpts->ov_check_period);
249}
250
251static int cpts_match(struct sk_buff *skb, unsigned int ptp_class, 316static int cpts_match(struct sk_buff *skb, unsigned int ptp_class,
252 u16 ts_seqid, u8 ts_msgtype) 317 u16 ts_seqid, u8 ts_msgtype)
253{ 318{
@@ -299,7 +364,7 @@ static u64 cpts_find_ts(struct cpts *cpts, struct sk_buff *skb, int ev_type)
299 return 0; 364 return 0;
300 365
301 spin_lock_irqsave(&cpts->lock, flags); 366 spin_lock_irqsave(&cpts->lock, flags);
302 cpts_fifo_read(cpts, CPTS_EV_PUSH); 367 cpts_fifo_read(cpts, -1);
303 list_for_each_safe(this, next, &cpts->events) { 368 list_for_each_safe(this, next, &cpts->events) {
304 event = list_entry(this, struct cpts_event, list); 369 event = list_entry(this, struct cpts_event, list);
305 if (event_expired(event)) { 370 if (event_expired(event)) {
@@ -317,6 +382,19 @@ static u64 cpts_find_ts(struct cpts *cpts, struct sk_buff *skb, int ev_type)
317 break; 382 break;
318 } 383 }
319 } 384 }
385
386 if (ev_type == CPTS_EV_TX && !ns) {
387 struct cpts_skb_cb_data *skb_cb =
388 (struct cpts_skb_cb_data *)skb->cb;
389 /* Not found, add frame to queue for processing later.
390 * The periodic FIFO check will handle this.
391 */
392 skb_get(skb);
393 /* get the timestamp for timeouts */
394 skb_cb->tmo = jiffies + msecs_to_jiffies(100);
395 __skb_queue_tail(&cpts->txq, skb);
396 ptp_schedule_worker(cpts->clock, 0);
397 }
320 spin_unlock_irqrestore(&cpts->lock, flags); 398 spin_unlock_irqrestore(&cpts->lock, flags);
321 399
322 return ns; 400 return ns;
@@ -358,6 +436,7 @@ int cpts_register(struct cpts *cpts)
358{ 436{
359 int err, i; 437 int err, i;
360 438
439 skb_queue_head_init(&cpts->txq);
361 INIT_LIST_HEAD(&cpts->events); 440 INIT_LIST_HEAD(&cpts->events);
362 INIT_LIST_HEAD(&cpts->pool); 441 INIT_LIST_HEAD(&cpts->pool);
363 for (i = 0; i < CPTS_MAX_EVENTS; i++) 442 for (i = 0; i < CPTS_MAX_EVENTS; i++)
@@ -378,7 +457,7 @@ int cpts_register(struct cpts *cpts)
378 } 457 }
379 cpts->phc_index = ptp_clock_index(cpts->clock); 458 cpts->phc_index = ptp_clock_index(cpts->clock);
380 459
381 schedule_delayed_work(&cpts->overflow_work, cpts->ov_check_period); 460 ptp_schedule_worker(cpts->clock, cpts->ov_check_period);
382 return 0; 461 return 0;
383 462
384err_ptp: 463err_ptp:
@@ -392,14 +471,15 @@ void cpts_unregister(struct cpts *cpts)
392 if (WARN_ON(!cpts->clock)) 471 if (WARN_ON(!cpts->clock))
393 return; 472 return;
394 473
395 cancel_delayed_work_sync(&cpts->overflow_work);
396
397 ptp_clock_unregister(cpts->clock); 474 ptp_clock_unregister(cpts->clock);
398 cpts->clock = NULL; 475 cpts->clock = NULL;
399 476
400 cpts_write32(cpts, 0, int_enable); 477 cpts_write32(cpts, 0, int_enable);
401 cpts_write32(cpts, 0, control); 478 cpts_write32(cpts, 0, control);
402 479
480 /* Drop all packet */
481 skb_queue_purge(&cpts->txq);
482
403 clk_disable(cpts->refclk); 483 clk_disable(cpts->refclk);
404} 484}
405EXPORT_SYMBOL_GPL(cpts_unregister); 485EXPORT_SYMBOL_GPL(cpts_unregister);
@@ -476,7 +556,6 @@ struct cpts *cpts_create(struct device *dev, void __iomem *regs,
476 cpts->dev = dev; 556 cpts->dev = dev;
477 cpts->reg = (struct cpsw_cpts __iomem *)regs; 557 cpts->reg = (struct cpsw_cpts __iomem *)regs;
478 spin_lock_init(&cpts->lock); 558 spin_lock_init(&cpts->lock);
479 INIT_DELAYED_WORK(&cpts->overflow_work, cpts_overflow_check);
480 559
481 ret = cpts_of_parse(cpts, node); 560 ret = cpts_of_parse(cpts, node);
482 if (ret) 561 if (ret)
diff --git a/drivers/net/ethernet/ti/cpts.h b/drivers/net/ethernet/ti/cpts.h
index 01ea82ba9cdc..73d73faf0f38 100644
--- a/drivers/net/ethernet/ti/cpts.h
+++ b/drivers/net/ethernet/ti/cpts.h
@@ -119,13 +119,13 @@ struct cpts {
119 u32 cc_mult; /* for the nominal frequency */ 119 u32 cc_mult; /* for the nominal frequency */
120 struct cyclecounter cc; 120 struct cyclecounter cc;
121 struct timecounter tc; 121 struct timecounter tc;
122 struct delayed_work overflow_work;
123 int phc_index; 122 int phc_index;
124 struct clk *refclk; 123 struct clk *refclk;
125 struct list_head events; 124 struct list_head events;
126 struct list_head pool; 125 struct list_head pool;
127 struct cpts_event pool_data[CPTS_MAX_EVENTS]; 126 struct cpts_event pool_data[CPTS_MAX_EVENTS];
128 unsigned long ov_check_period; 127 unsigned long ov_check_period;
128 struct sk_buff_head txq;
129}; 129};
130 130
131void cpts_rx_timestamp(struct cpts *cpts, struct sk_buff *skb); 131void cpts_rx_timestamp(struct cpts *cpts, struct sk_buff *skb);
diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
index 1542e837fdfa..f38e32a7ec9c 100644
--- a/drivers/net/gtp.c
+++ b/drivers/net/gtp.c
@@ -364,7 +364,7 @@ static int gtp_dev_init(struct net_device *dev)
364 364
365 gtp->dev = dev; 365 gtp->dev = dev;
366 366
367 dev->tstats = alloc_percpu(struct pcpu_sw_netstats); 367 dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
368 if (!dev->tstats) 368 if (!dev->tstats)
369 return -ENOMEM; 369 return -ENOMEM;
370 370
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index d6c25580f8dd..12cc64bfcff8 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -765,7 +765,8 @@ struct netvsc_device {
765 u32 max_chn; 765 u32 max_chn;
766 u32 num_chn; 766 u32 num_chn;
767 767
768 refcount_t sc_offered; 768 atomic_t open_chn;
769 wait_queue_head_t subchan_open;
769 770
770 struct rndis_device *extension; 771 struct rndis_device *extension;
771 772
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 0a9167dd72fb..d18c3326a1f7 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -78,6 +78,7 @@ static struct netvsc_device *alloc_net_device(void)
78 net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT; 78 net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT;
79 net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT; 79 net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT;
80 init_completion(&net_device->channel_init_wait); 80 init_completion(&net_device->channel_init_wait);
81 init_waitqueue_head(&net_device->subchan_open);
81 82
82 return net_device; 83 return net_device;
83} 84}
@@ -1302,6 +1303,8 @@ int netvsc_device_add(struct hv_device *device,
1302 struct netvsc_channel *nvchan = &net_device->chan_table[i]; 1303 struct netvsc_channel *nvchan = &net_device->chan_table[i];
1303 1304
1304 nvchan->channel = device->channel; 1305 nvchan->channel = device->channel;
1306 u64_stats_init(&nvchan->tx_stats.syncp);
1307 u64_stats_init(&nvchan->rx_stats.syncp);
1305 } 1308 }
1306 1309
1307 /* Enable NAPI handler before init callbacks */ 1310 /* Enable NAPI handler before init callbacks */
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index 85c00e1c52b6..d6308ffda53e 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -1048,8 +1048,8 @@ static void netvsc_sc_open(struct vmbus_channel *new_sc)
1048 else 1048 else
1049 netif_napi_del(&nvchan->napi); 1049 netif_napi_del(&nvchan->napi);
1050 1050
1051 if (refcount_dec_and_test(&nvscdev->sc_offered)) 1051 atomic_inc(&nvscdev->open_chn);
1052 complete(&nvscdev->channel_init_wait); 1052 wake_up(&nvscdev->subchan_open);
1053} 1053}
1054 1054
1055int rndis_filter_device_add(struct hv_device *dev, 1055int rndis_filter_device_add(struct hv_device *dev,
@@ -1090,8 +1090,6 @@ int rndis_filter_device_add(struct hv_device *dev,
1090 net_device->max_chn = 1; 1090 net_device->max_chn = 1;
1091 net_device->num_chn = 1; 1091 net_device->num_chn = 1;
1092 1092
1093 refcount_set(&net_device->sc_offered, 0);
1094
1095 net_device->extension = rndis_device; 1093 net_device->extension = rndis_device;
1096 rndis_device->ndev = net; 1094 rndis_device->ndev = net;
1097 1095
@@ -1221,11 +1219,11 @@ int rndis_filter_device_add(struct hv_device *dev,
1221 rndis_device->ind_table[i] = ethtool_rxfh_indir_default(i, 1219 rndis_device->ind_table[i] = ethtool_rxfh_indir_default(i,
1222 net_device->num_chn); 1220 net_device->num_chn);
1223 1221
1222 atomic_set(&net_device->open_chn, 1);
1224 num_rss_qs = net_device->num_chn - 1; 1223 num_rss_qs = net_device->num_chn - 1;
1225 if (num_rss_qs == 0) 1224 if (num_rss_qs == 0)
1226 return 0; 1225 return 0;
1227 1226
1228 refcount_set(&net_device->sc_offered, num_rss_qs);
1229 vmbus_set_sc_create_callback(dev->channel, netvsc_sc_open); 1227 vmbus_set_sc_create_callback(dev->channel, netvsc_sc_open);
1230 1228
1231 init_packet = &net_device->channel_init_pkt; 1229 init_packet = &net_device->channel_init_pkt;
@@ -1242,15 +1240,19 @@ int rndis_filter_device_add(struct hv_device *dev,
1242 if (ret) 1240 if (ret)
1243 goto out; 1241 goto out;
1244 1242
1243 wait_for_completion(&net_device->channel_init_wait);
1245 if (init_packet->msg.v5_msg.subchn_comp.status != NVSP_STAT_SUCCESS) { 1244 if (init_packet->msg.v5_msg.subchn_comp.status != NVSP_STAT_SUCCESS) {
1246 ret = -ENODEV; 1245 ret = -ENODEV;
1247 goto out; 1246 goto out;
1248 } 1247 }
1249 wait_for_completion(&net_device->channel_init_wait);
1250 1248
1251 net_device->num_chn = 1 + 1249 net_device->num_chn = 1 +
1252 init_packet->msg.v5_msg.subchn_comp.num_subchannels; 1250 init_packet->msg.v5_msg.subchn_comp.num_subchannels;
1253 1251
1252 /* wait for all sub channels to open */
1253 wait_event(net_device->subchan_open,
1254 atomic_read(&net_device->open_chn) == net_device->num_chn);
1255
1254 /* ignore failues from setting rss parameters, still have channels */ 1256 /* ignore failues from setting rss parameters, still have channels */
1255 rndis_filter_set_rss_param(rndis_device, netvsc_hash_key, 1257 rndis_filter_set_rss_param(rndis_device, netvsc_hash_key,
1256 net_device->num_chn); 1258 net_device->num_chn);
diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
index f37e3c1fd4e7..8dab74a81303 100644
--- a/drivers/net/ipvlan/ipvlan_main.c
+++ b/drivers/net/ipvlan/ipvlan_main.c
@@ -192,7 +192,7 @@ static int ipvlan_init(struct net_device *dev)
192 192
193 netdev_lockdep_set_classes(dev); 193 netdev_lockdep_set_classes(dev);
194 194
195 ipvlan->pcpu_stats = alloc_percpu(struct ipvl_pcpu_stats); 195 ipvlan->pcpu_stats = netdev_alloc_pcpu_stats(struct ipvl_pcpu_stats);
196 if (!ipvlan->pcpu_stats) 196 if (!ipvlan->pcpu_stats)
197 return -ENOMEM; 197 return -ENOMEM;
198 198
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index bd4303944e44..a404552555d4 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -1915,21 +1915,23 @@ static void __ppp_channel_push(struct channel *pch)
1915 spin_unlock(&pch->downl); 1915 spin_unlock(&pch->downl);
1916 /* see if there is anything from the attached unit to be sent */ 1916 /* see if there is anything from the attached unit to be sent */
1917 if (skb_queue_empty(&pch->file.xq)) { 1917 if (skb_queue_empty(&pch->file.xq)) {
1918 read_lock(&pch->upl);
1919 ppp = pch->ppp; 1918 ppp = pch->ppp;
1920 if (ppp) 1919 if (ppp)
1921 ppp_xmit_process(ppp); 1920 __ppp_xmit_process(ppp);
1922 read_unlock(&pch->upl);
1923 } 1921 }
1924} 1922}
1925 1923
1926static void ppp_channel_push(struct channel *pch) 1924static void ppp_channel_push(struct channel *pch)
1927{ 1925{
1928 local_bh_disable(); 1926 read_lock_bh(&pch->upl);
1929 1927 if (pch->ppp) {
1930 __ppp_channel_push(pch); 1928 (*this_cpu_ptr(pch->ppp->xmit_recursion))++;
1931 1929 __ppp_channel_push(pch);
1932 local_bh_enable(); 1930 (*this_cpu_ptr(pch->ppp->xmit_recursion))--;
1931 } else {
1932 __ppp_channel_push(pch);
1933 }
1934 read_unlock_bh(&pch->upl);
1933} 1935}
1934 1936
1935/* 1937/*
diff --git a/drivers/net/usb/asix.h b/drivers/net/usb/asix.h
index d1092421aaa7..9a4171b90947 100644
--- a/drivers/net/usb/asix.h
+++ b/drivers/net/usb/asix.h
@@ -209,6 +209,7 @@ void asix_write_cmd_async(struct usbnet *dev, u8 cmd, u16 value,
209int asix_rx_fixup_internal(struct usbnet *dev, struct sk_buff *skb, 209int asix_rx_fixup_internal(struct usbnet *dev, struct sk_buff *skb,
210 struct asix_rx_fixup_info *rx); 210 struct asix_rx_fixup_info *rx);
211int asix_rx_fixup_common(struct usbnet *dev, struct sk_buff *skb); 211int asix_rx_fixup_common(struct usbnet *dev, struct sk_buff *skb);
212void asix_rx_fixup_common_free(struct asix_common_private *dp);
212 213
213struct sk_buff *asix_tx_fixup(struct usbnet *dev, struct sk_buff *skb, 214struct sk_buff *asix_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
214 gfp_t flags); 215 gfp_t flags);
diff --git a/drivers/net/usb/asix_common.c b/drivers/net/usb/asix_common.c
index 7847436c441e..522d2900cd1d 100644
--- a/drivers/net/usb/asix_common.c
+++ b/drivers/net/usb/asix_common.c
@@ -75,6 +75,27 @@ void asix_write_cmd_async(struct usbnet *dev, u8 cmd, u16 value, u16 index,
75 value, index, data, size); 75 value, index, data, size);
76} 76}
77 77
78static void reset_asix_rx_fixup_info(struct asix_rx_fixup_info *rx)
79{
80 /* Reset the variables that have a lifetime outside of
81 * asix_rx_fixup_internal() so that future processing starts from a
82 * known set of initial conditions.
83 */
84
85 if (rx->ax_skb) {
86 /* Discard any incomplete Ethernet frame in the netdev buffer */
87 kfree_skb(rx->ax_skb);
88 rx->ax_skb = NULL;
89 }
90
91 /* Assume the Data header 32-bit word is at the start of the current
92 * or next URB socket buffer so reset all the state variables.
93 */
94 rx->remaining = 0;
95 rx->split_head = false;
96 rx->header = 0;
97}
98
78int asix_rx_fixup_internal(struct usbnet *dev, struct sk_buff *skb, 99int asix_rx_fixup_internal(struct usbnet *dev, struct sk_buff *skb,
79 struct asix_rx_fixup_info *rx) 100 struct asix_rx_fixup_info *rx)
80{ 101{
@@ -99,15 +120,7 @@ int asix_rx_fixup_internal(struct usbnet *dev, struct sk_buff *skb,
99 if (size != ((~rx->header >> 16) & 0x7ff)) { 120 if (size != ((~rx->header >> 16) & 0x7ff)) {
100 netdev_err(dev->net, "asix_rx_fixup() Data Header synchronisation was lost, remaining %d\n", 121 netdev_err(dev->net, "asix_rx_fixup() Data Header synchronisation was lost, remaining %d\n",
101 rx->remaining); 122 rx->remaining);
102 if (rx->ax_skb) { 123 reset_asix_rx_fixup_info(rx);
103 kfree_skb(rx->ax_skb);
104 rx->ax_skb = NULL;
105 /* Discard the incomplete netdev Ethernet frame
106 * and assume the Data header is at the start of
107 * the current URB socket buffer.
108 */
109 }
110 rx->remaining = 0;
111 } 124 }
112 } 125 }
113 126
@@ -139,11 +152,13 @@ int asix_rx_fixup_internal(struct usbnet *dev, struct sk_buff *skb,
139 if (size != ((~rx->header >> 16) & 0x7ff)) { 152 if (size != ((~rx->header >> 16) & 0x7ff)) {
140 netdev_err(dev->net, "asix_rx_fixup() Bad Header Length 0x%x, offset %d\n", 153 netdev_err(dev->net, "asix_rx_fixup() Bad Header Length 0x%x, offset %d\n",
141 rx->header, offset); 154 rx->header, offset);
155 reset_asix_rx_fixup_info(rx);
142 return 0; 156 return 0;
143 } 157 }
144 if (size > dev->net->mtu + ETH_HLEN + VLAN_HLEN) { 158 if (size > dev->net->mtu + ETH_HLEN + VLAN_HLEN) {
145 netdev_dbg(dev->net, "asix_rx_fixup() Bad RX Length %d\n", 159 netdev_dbg(dev->net, "asix_rx_fixup() Bad RX Length %d\n",
146 size); 160 size);
161 reset_asix_rx_fixup_info(rx);
147 return 0; 162 return 0;
148 } 163 }
149 164
@@ -168,8 +183,10 @@ int asix_rx_fixup_internal(struct usbnet *dev, struct sk_buff *skb,
168 if (rx->ax_skb) { 183 if (rx->ax_skb) {
169 skb_put_data(rx->ax_skb, skb->data + offset, 184 skb_put_data(rx->ax_skb, skb->data + offset,
170 copy_length); 185 copy_length);
171 if (!rx->remaining) 186 if (!rx->remaining) {
172 usbnet_skb_return(dev, rx->ax_skb); 187 usbnet_skb_return(dev, rx->ax_skb);
188 rx->ax_skb = NULL;
189 }
173 } 190 }
174 191
175 offset += (copy_length + 1) & 0xfffe; 192 offset += (copy_length + 1) & 0xfffe;
@@ -178,6 +195,7 @@ int asix_rx_fixup_internal(struct usbnet *dev, struct sk_buff *skb,
178 if (skb->len != offset) { 195 if (skb->len != offset) {
179 netdev_err(dev->net, "asix_rx_fixup() Bad SKB Length %d, %d\n", 196 netdev_err(dev->net, "asix_rx_fixup() Bad SKB Length %d, %d\n",
180 skb->len, offset); 197 skb->len, offset);
198 reset_asix_rx_fixup_info(rx);
181 return 0; 199 return 0;
182 } 200 }
183 201
@@ -192,6 +210,21 @@ int asix_rx_fixup_common(struct usbnet *dev, struct sk_buff *skb)
192 return asix_rx_fixup_internal(dev, skb, rx); 210 return asix_rx_fixup_internal(dev, skb, rx);
193} 211}
194 212
213void asix_rx_fixup_common_free(struct asix_common_private *dp)
214{
215 struct asix_rx_fixup_info *rx;
216
217 if (!dp)
218 return;
219
220 rx = &dp->rx_fixup_info;
221
222 if (rx->ax_skb) {
223 kfree_skb(rx->ax_skb);
224 rx->ax_skb = NULL;
225 }
226}
227
195struct sk_buff *asix_tx_fixup(struct usbnet *dev, struct sk_buff *skb, 228struct sk_buff *asix_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
196 gfp_t flags) 229 gfp_t flags)
197{ 230{
diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c
index a3aa0a27dfe5..b2ff88e69a81 100644
--- a/drivers/net/usb/asix_devices.c
+++ b/drivers/net/usb/asix_devices.c
@@ -764,6 +764,7 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf)
764 764
765static void ax88772_unbind(struct usbnet *dev, struct usb_interface *intf) 765static void ax88772_unbind(struct usbnet *dev, struct usb_interface *intf)
766{ 766{
767 asix_rx_fixup_common_free(dev->driver_priv);
767 kfree(dev->driver_priv); 768 kfree(dev->driver_priv);
768} 769}
769 770
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index 5833f7e2a127..b99a7fb09f8e 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -2367,9 +2367,6 @@ static int lan78xx_reset(struct lan78xx_net *dev)
2367 /* Init LTM */ 2367 /* Init LTM */
2368 lan78xx_init_ltm(dev); 2368 lan78xx_init_ltm(dev);
2369 2369
2370 dev->net->hard_header_len += TX_OVERHEAD;
2371 dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
2372
2373 if (dev->udev->speed == USB_SPEED_SUPER) { 2370 if (dev->udev->speed == USB_SPEED_SUPER) {
2374 buf = DEFAULT_BURST_CAP_SIZE / SS_USB_PKT_SIZE; 2371 buf = DEFAULT_BURST_CAP_SIZE / SS_USB_PKT_SIZE;
2375 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE; 2372 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
@@ -2855,16 +2852,19 @@ static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf)
2855 return ret; 2852 return ret;
2856 } 2853 }
2857 2854
2855 dev->net->hard_header_len += TX_OVERHEAD;
2856 dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
2857
2858 /* Init all registers */ 2858 /* Init all registers */
2859 ret = lan78xx_reset(dev); 2859 ret = lan78xx_reset(dev);
2860 2860
2861 lan78xx_mdio_init(dev); 2861 ret = lan78xx_mdio_init(dev);
2862 2862
2863 dev->net->flags |= IFF_MULTICAST; 2863 dev->net->flags |= IFF_MULTICAST;
2864 2864
2865 pdata->wol = WAKE_MAGIC; 2865 pdata->wol = WAKE_MAGIC;
2866 2866
2867 return 0; 2867 return ret;
2868} 2868}
2869 2869
2870static void lan78xx_unbind(struct lan78xx_net *dev, struct usb_interface *intf) 2870static void lan78xx_unbind(struct lan78xx_net *dev, struct usb_interface *intf)
@@ -3525,11 +3525,11 @@ static int lan78xx_probe(struct usb_interface *intf,
3525 udev = interface_to_usbdev(intf); 3525 udev = interface_to_usbdev(intf);
3526 udev = usb_get_dev(udev); 3526 udev = usb_get_dev(udev);
3527 3527
3528 ret = -ENOMEM;
3529 netdev = alloc_etherdev(sizeof(struct lan78xx_net)); 3528 netdev = alloc_etherdev(sizeof(struct lan78xx_net));
3530 if (!netdev) { 3529 if (!netdev) {
3531 dev_err(&intf->dev, "Error: OOM\n"); 3530 dev_err(&intf->dev, "Error: OOM\n");
3532 goto out1; 3531 ret = -ENOMEM;
3532 goto out1;
3533 } 3533 }
3534 3534
3535 /* netdev_printk() needs this */ 3535 /* netdev_printk() needs this */
@@ -3610,7 +3610,7 @@ static int lan78xx_probe(struct usb_interface *intf,
3610 ret = register_netdev(netdev); 3610 ret = register_netdev(netdev);
3611 if (ret != 0) { 3611 if (ret != 0) {
3612 netif_err(dev, probe, netdev, "couldn't register the device\n"); 3612 netif_err(dev, probe, netdev, "couldn't register the device\n");
3613 goto out2; 3613 goto out3;
3614 } 3614 }
3615 3615
3616 usb_set_intfdata(intf, dev); 3616 usb_set_intfdata(intf, dev);
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 5894e3c9468f..8c3733608271 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -1175,6 +1175,7 @@ static const struct usb_device_id products[] = {
1175 {QMI_FIXED_INTF(0x19d2, 0x1428, 2)}, /* Telewell TW-LTE 4G v2 */ 1175 {QMI_FIXED_INTF(0x19d2, 0x1428, 2)}, /* Telewell TW-LTE 4G v2 */
1176 {QMI_FIXED_INTF(0x19d2, 0x2002, 4)}, /* ZTE (Vodafone) K3765-Z */ 1176 {QMI_FIXED_INTF(0x19d2, 0x2002, 4)}, /* ZTE (Vodafone) K3765-Z */
1177 {QMI_FIXED_INTF(0x2001, 0x7e19, 4)}, /* D-Link DWM-221 B1 */ 1177 {QMI_FIXED_INTF(0x2001, 0x7e19, 4)}, /* D-Link DWM-221 B1 */
1178 {QMI_FIXED_INTF(0x2001, 0x7e35, 4)}, /* D-Link DWM-222 */
1178 {QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)}, /* Sierra Wireless MC7700 */ 1179 {QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)}, /* Sierra Wireless MC7700 */
1179 {QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */ 1180 {QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */
1180 {QMI_FIXED_INTF(0x1199, 0x68a2, 8)}, /* Sierra Wireless MC7710 in QMI mode */ 1181 {QMI_FIXED_INTF(0x1199, 0x68a2, 8)}, /* Sierra Wireless MC7710 in QMI mode */
@@ -1340,10 +1341,14 @@ static int qmi_wwan_probe(struct usb_interface *intf,
1340static void qmi_wwan_disconnect(struct usb_interface *intf) 1341static void qmi_wwan_disconnect(struct usb_interface *intf)
1341{ 1342{
1342 struct usbnet *dev = usb_get_intfdata(intf); 1343 struct usbnet *dev = usb_get_intfdata(intf);
1343 struct qmi_wwan_state *info = (void *)&dev->data; 1344 struct qmi_wwan_state *info;
1344 struct list_head *iter; 1345 struct list_head *iter;
1345 struct net_device *ldev; 1346 struct net_device *ldev;
1346 1347
1348 /* called twice if separate control and data intf */
1349 if (!dev)
1350 return;
1351 info = (void *)&dev->data;
1347 if (info->flags & QMI_WWAN_FLAG_MUX) { 1352 if (info->flags & QMI_WWAN_FLAG_MUX) {
1348 if (!rtnl_trylock()) { 1353 if (!rtnl_trylock()) {
1349 restart_syscall(); 1354 restart_syscall();
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 96aa7e6cf214..e17baac70f43 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -623,6 +623,7 @@ static struct sk_buff **vxlan_gro_receive(struct sock *sk,
623 623
624out: 624out:
625 skb_gro_remcsum_cleanup(skb, &grc); 625 skb_gro_remcsum_cleanup(skb, &grc);
626 skb->remcsum_offload = 0;
626 NAPI_GRO_CB(skb)->flush |= flush; 627 NAPI_GRO_CB(skb)->flush |= flush;
627 628
628 return pp; 629 return pp;
diff --git a/drivers/ptp/ptp_clock.c b/drivers/ptp/ptp_clock.c
index b77435783ef3..7eacc1c4b3b1 100644
--- a/drivers/ptp/ptp_clock.c
+++ b/drivers/ptp/ptp_clock.c
@@ -28,6 +28,7 @@
28#include <linux/slab.h> 28#include <linux/slab.h>
29#include <linux/syscalls.h> 29#include <linux/syscalls.h>
30#include <linux/uaccess.h> 30#include <linux/uaccess.h>
31#include <uapi/linux/sched/types.h>
31 32
32#include "ptp_private.h" 33#include "ptp_private.h"
33 34
@@ -184,6 +185,19 @@ static void delete_ptp_clock(struct posix_clock *pc)
184 kfree(ptp); 185 kfree(ptp);
185} 186}
186 187
188static void ptp_aux_kworker(struct kthread_work *work)
189{
190 struct ptp_clock *ptp = container_of(work, struct ptp_clock,
191 aux_work.work);
192 struct ptp_clock_info *info = ptp->info;
193 long delay;
194
195 delay = info->do_aux_work(info);
196
197 if (delay >= 0)
198 kthread_queue_delayed_work(ptp->kworker, &ptp->aux_work, delay);
199}
200
187/* public interface */ 201/* public interface */
188 202
189struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info, 203struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
@@ -217,6 +231,20 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
217 mutex_init(&ptp->pincfg_mux); 231 mutex_init(&ptp->pincfg_mux);
218 init_waitqueue_head(&ptp->tsev_wq); 232 init_waitqueue_head(&ptp->tsev_wq);
219 233
234 if (ptp->info->do_aux_work) {
235 char *worker_name = kasprintf(GFP_KERNEL, "ptp%d", ptp->index);
236
237 kthread_init_delayed_work(&ptp->aux_work, ptp_aux_kworker);
238 ptp->kworker = kthread_create_worker(0, worker_name ?
239 worker_name : info->name);
240 kfree(worker_name);
241 if (IS_ERR(ptp->kworker)) {
242 err = PTR_ERR(ptp->kworker);
243 pr_err("failed to create ptp aux_worker %d\n", err);
244 goto kworker_err;
245 }
246 }
247
220 err = ptp_populate_pin_groups(ptp); 248 err = ptp_populate_pin_groups(ptp);
221 if (err) 249 if (err)
222 goto no_pin_groups; 250 goto no_pin_groups;
@@ -259,6 +287,9 @@ no_pps:
259no_device: 287no_device:
260 ptp_cleanup_pin_groups(ptp); 288 ptp_cleanup_pin_groups(ptp);
261no_pin_groups: 289no_pin_groups:
290 if (ptp->kworker)
291 kthread_destroy_worker(ptp->kworker);
292kworker_err:
262 mutex_destroy(&ptp->tsevq_mux); 293 mutex_destroy(&ptp->tsevq_mux);
263 mutex_destroy(&ptp->pincfg_mux); 294 mutex_destroy(&ptp->pincfg_mux);
264 ida_simple_remove(&ptp_clocks_map, index); 295 ida_simple_remove(&ptp_clocks_map, index);
@@ -274,6 +305,11 @@ int ptp_clock_unregister(struct ptp_clock *ptp)
274 ptp->defunct = 1; 305 ptp->defunct = 1;
275 wake_up_interruptible(&ptp->tsev_wq); 306 wake_up_interruptible(&ptp->tsev_wq);
276 307
308 if (ptp->kworker) {
309 kthread_cancel_delayed_work_sync(&ptp->aux_work);
310 kthread_destroy_worker(ptp->kworker);
311 }
312
277 /* Release the clock's resources. */ 313 /* Release the clock's resources. */
278 if (ptp->pps_source) 314 if (ptp->pps_source)
279 pps_unregister_source(ptp->pps_source); 315 pps_unregister_source(ptp->pps_source);
@@ -339,6 +375,12 @@ int ptp_find_pin(struct ptp_clock *ptp,
339} 375}
340EXPORT_SYMBOL(ptp_find_pin); 376EXPORT_SYMBOL(ptp_find_pin);
341 377
378int ptp_schedule_worker(struct ptp_clock *ptp, unsigned long delay)
379{
380 return kthread_mod_delayed_work(ptp->kworker, &ptp->aux_work, delay);
381}
382EXPORT_SYMBOL(ptp_schedule_worker);
383
342/* module operations */ 384/* module operations */
343 385
344static void __exit ptp_exit(void) 386static void __exit ptp_exit(void)
diff --git a/drivers/ptp/ptp_private.h b/drivers/ptp/ptp_private.h
index d95888974d0c..b86f1bfecd6f 100644
--- a/drivers/ptp/ptp_private.h
+++ b/drivers/ptp/ptp_private.h
@@ -22,6 +22,7 @@
22 22
23#include <linux/cdev.h> 23#include <linux/cdev.h>
24#include <linux/device.h> 24#include <linux/device.h>
25#include <linux/kthread.h>
25#include <linux/mutex.h> 26#include <linux/mutex.h>
26#include <linux/posix-clock.h> 27#include <linux/posix-clock.h>
27#include <linux/ptp_clock.h> 28#include <linux/ptp_clock.h>
@@ -56,6 +57,8 @@ struct ptp_clock {
56 struct attribute_group pin_attr_group; 57 struct attribute_group pin_attr_group;
57 /* 1st entry is a pointer to the real group, 2nd is NULL terminator */ 58 /* 1st entry is a pointer to the real group, 2nd is NULL terminator */
58 const struct attribute_group *pin_attr_groups[2]; 59 const struct attribute_group *pin_attr_groups[2];
60 struct kthread_worker *kworker;
61 struct kthread_delayed_work aux_work;
59}; 62};
60 63
61/* 64/*
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 8975cd321390..d42e758518ed 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -2512,7 +2512,7 @@ static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
2512 struct rtable *rt = (struct rtable *) dst; 2512 struct rtable *rt = (struct rtable *) dst;
2513 __be32 *pkey = &ip_hdr(skb)->daddr; 2513 __be32 *pkey = &ip_hdr(skb)->daddr;
2514 2514
2515 if (rt->rt_gateway) 2515 if (rt && rt->rt_gateway)
2516 pkey = &rt->rt_gateway; 2516 pkey = &rt->rt_gateway;
2517 2517
2518 /* IPv4 */ 2518 /* IPv4 */
@@ -2523,7 +2523,7 @@ static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
2523 struct rt6_info *rt = (struct rt6_info *) dst; 2523 struct rt6_info *rt = (struct rt6_info *) dst;
2524 struct in6_addr *pkey = &ipv6_hdr(skb)->daddr; 2524 struct in6_addr *pkey = &ipv6_hdr(skb)->daddr;
2525 2525
2526 if (!ipv6_addr_any(&rt->rt6i_gateway)) 2526 if (rt && !ipv6_addr_any(&rt->rt6i_gateway))
2527 pkey = &rt->rt6i_gateway; 2527 pkey = &rt->rt6i_gateway;
2528 2528
2529 /* IPv6 */ 2529 /* IPv6 */
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index aad5d81dfb44..b54517c05e9a 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -620,6 +620,7 @@ struct mlx4_caps {
620 u32 dmfs_high_rate_qpn_base; 620 u32 dmfs_high_rate_qpn_base;
621 u32 dmfs_high_rate_qpn_range; 621 u32 dmfs_high_rate_qpn_range;
622 u32 vf_caps; 622 u32 vf_caps;
623 bool wol_port[MLX4_MAX_PORTS + 1];
623 struct mlx4_rate_limit_caps rl_caps; 624 struct mlx4_rate_limit_caps rl_caps;
624}; 625};
625 626
diff --git a/include/linux/ptp_clock_kernel.h b/include/linux/ptp_clock_kernel.h
index a026bfd089db..51349d124ee5 100644
--- a/include/linux/ptp_clock_kernel.h
+++ b/include/linux/ptp_clock_kernel.h
@@ -99,6 +99,11 @@ struct system_device_crosststamp;
99 * parameter func: the desired function to use. 99 * parameter func: the desired function to use.
100 * parameter chan: the function channel index to use. 100 * parameter chan: the function channel index to use.
101 * 101 *
102 * @do_work: Request driver to perform auxiliary (periodic) operations
103 * Driver should return delay of the next auxiliary work scheduling
104 * time (>=0) or negative value in case further scheduling
105 * is not required.
106 *
102 * Drivers should embed their ptp_clock_info within a private 107 * Drivers should embed their ptp_clock_info within a private
103 * structure, obtaining a reference to it using container_of(). 108 * structure, obtaining a reference to it using container_of().
104 * 109 *
@@ -126,6 +131,7 @@ struct ptp_clock_info {
126 struct ptp_clock_request *request, int on); 131 struct ptp_clock_request *request, int on);
127 int (*verify)(struct ptp_clock_info *ptp, unsigned int pin, 132 int (*verify)(struct ptp_clock_info *ptp, unsigned int pin,
128 enum ptp_pin_function func, unsigned int chan); 133 enum ptp_pin_function func, unsigned int chan);
134 long (*do_aux_work)(struct ptp_clock_info *ptp);
129}; 135};
130 136
131struct ptp_clock; 137struct ptp_clock;
@@ -211,6 +217,16 @@ extern int ptp_clock_index(struct ptp_clock *ptp);
211int ptp_find_pin(struct ptp_clock *ptp, 217int ptp_find_pin(struct ptp_clock *ptp,
212 enum ptp_pin_function func, unsigned int chan); 218 enum ptp_pin_function func, unsigned int chan);
213 219
220/**
221 * ptp_schedule_worker() - schedule ptp auxiliary work
222 *
223 * @ptp: The clock obtained from ptp_clock_register().
224 * @delay: number of jiffies to wait before queuing
225 * See kthread_queue_delayed_work() for more info.
226 */
227
228int ptp_schedule_worker(struct ptp_clock *ptp, unsigned long delay);
229
214#else 230#else
215static inline struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info, 231static inline struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
216 struct device *parent) 232 struct device *parent)
@@ -225,6 +241,10 @@ static inline int ptp_clock_index(struct ptp_clock *ptp)
225static inline int ptp_find_pin(struct ptp_clock *ptp, 241static inline int ptp_find_pin(struct ptp_clock *ptp,
226 enum ptp_pin_function func, unsigned int chan) 242 enum ptp_pin_function func, unsigned int chan)
227{ return -1; } 243{ return -1; }
244static inline int ptp_schedule_worker(struct ptp_clock *ptp,
245 unsigned long delay)
246{ return -EOPNOTSUPP; }
247
228#endif 248#endif
229 249
230#endif 250#endif
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 70483296157f..ada65e767b28 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -1916,6 +1916,16 @@ extern void tcp_rack_advance(struct tcp_sock *tp, u8 sacked, u32 end_seq,
1916 u64 xmit_time); 1916 u64 xmit_time);
1917extern void tcp_rack_reo_timeout(struct sock *sk); 1917extern void tcp_rack_reo_timeout(struct sock *sk);
1918 1918
1919/* At how many usecs into the future should the RTO fire? */
1920static inline s64 tcp_rto_delta_us(const struct sock *sk)
1921{
1922 const struct sk_buff *skb = tcp_write_queue_head(sk);
1923 u32 rto = inet_csk(sk)->icsk_rto;
1924 u64 rto_time_stamp_us = skb->skb_mstamp + jiffies_to_usecs(rto);
1925
1926 return rto_time_stamp_us - tcp_sk(sk)->tcp_mstamp;
1927}
1928
1919/* 1929/*
1920 * Save and compile IPv4 options, return a pointer to it 1930 * Save and compile IPv4 options, return a pointer to it
1921 */ 1931 */
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
index e1133bc634b5..8a3ce79b1307 100644
--- a/net/batman-adv/translation-table.c
+++ b/net/batman-adv/translation-table.c
@@ -1549,9 +1549,41 @@ batadv_tt_global_entry_has_orig(const struct batadv_tt_global_entry *entry,
1549 return found; 1549 return found;
1550} 1550}
1551 1551
1552/**
1553 * batadv_tt_global_sync_flags - update TT sync flags
1554 * @tt_global: the TT global entry to update sync flags in
1555 *
1556 * Updates the sync flag bits in the tt_global flag attribute with a logical
1557 * OR of all sync flags from any of its TT orig entries.
1558 */
1559static void
1560batadv_tt_global_sync_flags(struct batadv_tt_global_entry *tt_global)
1561{
1562 struct batadv_tt_orig_list_entry *orig_entry;
1563 const struct hlist_head *head;
1564 u16 flags = BATADV_NO_FLAGS;
1565
1566 rcu_read_lock();
1567 head = &tt_global->orig_list;
1568 hlist_for_each_entry_rcu(orig_entry, head, list)
1569 flags |= orig_entry->flags;
1570 rcu_read_unlock();
1571
1572 flags |= tt_global->common.flags & (~BATADV_TT_SYNC_MASK);
1573 tt_global->common.flags = flags;
1574}
1575
1576/**
1577 * batadv_tt_global_orig_entry_add - add or update a TT orig entry
1578 * @tt_global: the TT global entry to add an orig entry in
1579 * @orig_node: the originator to add an orig entry for
1580 * @ttvn: translation table version number of this changeset
1581 * @flags: TT sync flags
1582 */
1552static void 1583static void
1553batadv_tt_global_orig_entry_add(struct batadv_tt_global_entry *tt_global, 1584batadv_tt_global_orig_entry_add(struct batadv_tt_global_entry *tt_global,
1554 struct batadv_orig_node *orig_node, int ttvn) 1585 struct batadv_orig_node *orig_node, int ttvn,
1586 u8 flags)
1555{ 1587{
1556 struct batadv_tt_orig_list_entry *orig_entry; 1588 struct batadv_tt_orig_list_entry *orig_entry;
1557 1589
@@ -1561,7 +1593,8 @@ batadv_tt_global_orig_entry_add(struct batadv_tt_global_entry *tt_global,
1561 * was added during a "temporary client detection" 1593 * was added during a "temporary client detection"
1562 */ 1594 */
1563 orig_entry->ttvn = ttvn; 1595 orig_entry->ttvn = ttvn;
1564 goto out; 1596 orig_entry->flags = flags;
1597 goto sync_flags;
1565 } 1598 }
1566 1599
1567 orig_entry = kmem_cache_zalloc(batadv_tt_orig_cache, GFP_ATOMIC); 1600 orig_entry = kmem_cache_zalloc(batadv_tt_orig_cache, GFP_ATOMIC);
@@ -1573,6 +1606,7 @@ batadv_tt_global_orig_entry_add(struct batadv_tt_global_entry *tt_global,
1573 batadv_tt_global_size_inc(orig_node, tt_global->common.vid); 1606 batadv_tt_global_size_inc(orig_node, tt_global->common.vid);
1574 orig_entry->orig_node = orig_node; 1607 orig_entry->orig_node = orig_node;
1575 orig_entry->ttvn = ttvn; 1608 orig_entry->ttvn = ttvn;
1609 orig_entry->flags = flags;
1576 kref_init(&orig_entry->refcount); 1610 kref_init(&orig_entry->refcount);
1577 1611
1578 spin_lock_bh(&tt_global->list_lock); 1612 spin_lock_bh(&tt_global->list_lock);
@@ -1582,6 +1616,8 @@ batadv_tt_global_orig_entry_add(struct batadv_tt_global_entry *tt_global,
1582 spin_unlock_bh(&tt_global->list_lock); 1616 spin_unlock_bh(&tt_global->list_lock);
1583 atomic_inc(&tt_global->orig_list_count); 1617 atomic_inc(&tt_global->orig_list_count);
1584 1618
1619sync_flags:
1620 batadv_tt_global_sync_flags(tt_global);
1585out: 1621out:
1586 if (orig_entry) 1622 if (orig_entry)
1587 batadv_tt_orig_list_entry_put(orig_entry); 1623 batadv_tt_orig_list_entry_put(orig_entry);
@@ -1703,10 +1739,10 @@ static bool batadv_tt_global_add(struct batadv_priv *bat_priv,
1703 } 1739 }
1704 1740
1705 /* the change can carry possible "attribute" flags like the 1741 /* the change can carry possible "attribute" flags like the
1706 * TT_CLIENT_WIFI, therefore they have to be copied in the 1742 * TT_CLIENT_TEMP, therefore they have to be copied in the
1707 * client entry 1743 * client entry
1708 */ 1744 */
1709 common->flags |= flags; 1745 common->flags |= flags & (~BATADV_TT_SYNC_MASK);
1710 1746
1711 /* If there is the BATADV_TT_CLIENT_ROAM flag set, there is only 1747 /* If there is the BATADV_TT_CLIENT_ROAM flag set, there is only
1712 * one originator left in the list and we previously received a 1748 * one originator left in the list and we previously received a
@@ -1723,7 +1759,8 @@ static bool batadv_tt_global_add(struct batadv_priv *bat_priv,
1723 } 1759 }
1724add_orig_entry: 1760add_orig_entry:
1725 /* add the new orig_entry (if needed) or update it */ 1761 /* add the new orig_entry (if needed) or update it */
1726 batadv_tt_global_orig_entry_add(tt_global_entry, orig_node, ttvn); 1762 batadv_tt_global_orig_entry_add(tt_global_entry, orig_node, ttvn,
1763 flags & BATADV_TT_SYNC_MASK);
1727 1764
1728 batadv_dbg(BATADV_DBG_TT, bat_priv, 1765 batadv_dbg(BATADV_DBG_TT, bat_priv,
1729 "Creating new global tt entry: %pM (vid: %d, via %pM)\n", 1766 "Creating new global tt entry: %pM (vid: %d, via %pM)\n",
@@ -1946,6 +1983,7 @@ batadv_tt_global_dump_subentry(struct sk_buff *msg, u32 portid, u32 seq,
1946 struct batadv_tt_orig_list_entry *orig, 1983 struct batadv_tt_orig_list_entry *orig,
1947 bool best) 1984 bool best)
1948{ 1985{
1986 u16 flags = (common->flags & (~BATADV_TT_SYNC_MASK)) | orig->flags;
1949 void *hdr; 1987 void *hdr;
1950 struct batadv_orig_node_vlan *vlan; 1988 struct batadv_orig_node_vlan *vlan;
1951 u8 last_ttvn; 1989 u8 last_ttvn;
@@ -1975,7 +2013,7 @@ batadv_tt_global_dump_subentry(struct sk_buff *msg, u32 portid, u32 seq,
1975 nla_put_u8(msg, BATADV_ATTR_TT_LAST_TTVN, last_ttvn) || 2013 nla_put_u8(msg, BATADV_ATTR_TT_LAST_TTVN, last_ttvn) ||
1976 nla_put_u32(msg, BATADV_ATTR_TT_CRC32, crc) || 2014 nla_put_u32(msg, BATADV_ATTR_TT_CRC32, crc) ||
1977 nla_put_u16(msg, BATADV_ATTR_TT_VID, common->vid) || 2015 nla_put_u16(msg, BATADV_ATTR_TT_VID, common->vid) ||
1978 nla_put_u32(msg, BATADV_ATTR_TT_FLAGS, common->flags)) 2016 nla_put_u32(msg, BATADV_ATTR_TT_FLAGS, flags))
1979 goto nla_put_failure; 2017 goto nla_put_failure;
1980 2018
1981 if (best && nla_put_flag(msg, BATADV_ATTR_FLAG_BEST)) 2019 if (best && nla_put_flag(msg, BATADV_ATTR_FLAG_BEST))
@@ -2589,6 +2627,7 @@ static u32 batadv_tt_global_crc(struct batadv_priv *bat_priv,
2589 unsigned short vid) 2627 unsigned short vid)
2590{ 2628{
2591 struct batadv_hashtable *hash = bat_priv->tt.global_hash; 2629 struct batadv_hashtable *hash = bat_priv->tt.global_hash;
2630 struct batadv_tt_orig_list_entry *tt_orig;
2592 struct batadv_tt_common_entry *tt_common; 2631 struct batadv_tt_common_entry *tt_common;
2593 struct batadv_tt_global_entry *tt_global; 2632 struct batadv_tt_global_entry *tt_global;
2594 struct hlist_head *head; 2633 struct hlist_head *head;
@@ -2627,8 +2666,9 @@ static u32 batadv_tt_global_crc(struct batadv_priv *bat_priv,
2627 /* find out if this global entry is announced by this 2666 /* find out if this global entry is announced by this
2628 * originator 2667 * originator
2629 */ 2668 */
2630 if (!batadv_tt_global_entry_has_orig(tt_global, 2669 tt_orig = batadv_tt_global_orig_entry_find(tt_global,
2631 orig_node)) 2670 orig_node);
2671 if (!tt_orig)
2632 continue; 2672 continue;
2633 2673
2634 /* use network order to read the VID: this ensures that 2674 /* use network order to read the VID: this ensures that
@@ -2640,10 +2680,12 @@ static u32 batadv_tt_global_crc(struct batadv_priv *bat_priv,
2640 /* compute the CRC on flags that have to be kept in sync 2680 /* compute the CRC on flags that have to be kept in sync
2641 * among nodes 2681 * among nodes
2642 */ 2682 */
2643 flags = tt_common->flags & BATADV_TT_SYNC_MASK; 2683 flags = tt_orig->flags;
2644 crc_tmp = crc32c(crc_tmp, &flags, sizeof(flags)); 2684 crc_tmp = crc32c(crc_tmp, &flags, sizeof(flags));
2645 2685
2646 crc ^= crc32c(crc_tmp, tt_common->addr, ETH_ALEN); 2686 crc ^= crc32c(crc_tmp, tt_common->addr, ETH_ALEN);
2687
2688 batadv_tt_orig_list_entry_put(tt_orig);
2647 } 2689 }
2648 rcu_read_unlock(); 2690 rcu_read_unlock();
2649 } 2691 }
diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
index ea43a6449247..a62795868794 100644
--- a/net/batman-adv/types.h
+++ b/net/batman-adv/types.h
@@ -1260,6 +1260,7 @@ struct batadv_tt_global_entry {
1260 * struct batadv_tt_orig_list_entry - orig node announcing a non-mesh client 1260 * struct batadv_tt_orig_list_entry - orig node announcing a non-mesh client
1261 * @orig_node: pointer to orig node announcing this non-mesh client 1261 * @orig_node: pointer to orig node announcing this non-mesh client
1262 * @ttvn: translation table version number which added the non-mesh client 1262 * @ttvn: translation table version number which added the non-mesh client
1263 * @flags: per orig entry TT sync flags
1263 * @list: list node for batadv_tt_global_entry::orig_list 1264 * @list: list node for batadv_tt_global_entry::orig_list
1264 * @refcount: number of contexts the object is used 1265 * @refcount: number of contexts the object is used
1265 * @rcu: struct used for freeing in an RCU-safe manner 1266 * @rcu: struct used for freeing in an RCU-safe manner
@@ -1267,6 +1268,7 @@ struct batadv_tt_global_entry {
1267struct batadv_tt_orig_list_entry { 1268struct batadv_tt_orig_list_entry {
1268 struct batadv_orig_node *orig_node; 1269 struct batadv_orig_node *orig_node;
1269 u8 ttvn; 1270 u8 ttvn;
1271 u8 flags;
1270 struct hlist_node list; 1272 struct hlist_node list;
1271 struct kref refcount; 1273 struct kref refcount;
1272 struct rcu_head rcu; 1274 struct rcu_head rcu;
diff --git a/net/core/dev.c b/net/core/dev.c
index 8515f8fe0460..ce15a06d5558 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2739,7 +2739,7 @@ static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path)
2739{ 2739{
2740 if (tx_path) 2740 if (tx_path)
2741 return skb->ip_summed != CHECKSUM_PARTIAL && 2741 return skb->ip_summed != CHECKSUM_PARTIAL &&
2742 skb->ip_summed != CHECKSUM_NONE; 2742 skb->ip_summed != CHECKSUM_UNNECESSARY;
2743 2743
2744 return skb->ip_summed == CHECKSUM_NONE; 2744 return skb->ip_summed == CHECKSUM_NONE;
2745} 2745}
diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c
index c4c6e1969ed0..2ae8f54cb321 100644
--- a/net/ipv4/cipso_ipv4.c
+++ b/net/ipv4/cipso_ipv4.c
@@ -1523,9 +1523,17 @@ unsigned char *cipso_v4_optptr(const struct sk_buff *skb)
1523 int taglen; 1523 int taglen;
1524 1524
1525 for (optlen = iph->ihl*4 - sizeof(struct iphdr); optlen > 0; ) { 1525 for (optlen = iph->ihl*4 - sizeof(struct iphdr); optlen > 0; ) {
1526 if (optptr[0] == IPOPT_CIPSO) 1526 switch (optptr[0]) {
1527 case IPOPT_CIPSO:
1527 return optptr; 1528 return optptr;
1528 taglen = optptr[1]; 1529 case IPOPT_END:
1530 return NULL;
1531 case IPOPT_NOOP:
1532 taglen = 1;
1533 break;
1534 default:
1535 taglen = optptr[1];
1536 }
1529 optlen -= taglen; 1537 optlen -= taglen;
1530 optptr += taglen; 1538 optptr += taglen;
1531 } 1539 }
diff --git a/net/ipv4/fou.c b/net/ipv4/fou.c
index 8e0257d01200..1540db65241a 100644
--- a/net/ipv4/fou.c
+++ b/net/ipv4/fou.c
@@ -450,6 +450,7 @@ out_unlock:
450out: 450out:
451 NAPI_GRO_CB(skb)->flush |= flush; 451 NAPI_GRO_CB(skb)->flush |= flush;
452 skb_gro_remcsum_cleanup(skb, &grc); 452 skb_gro_remcsum_cleanup(skb, &grc);
453 skb->remcsum_offload = 0;
453 454
454 return pp; 455 return pp;
455} 456}
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 2920e0cb09f8..53de1424c13c 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -107,6 +107,7 @@ int sysctl_tcp_invalid_ratelimit __read_mostly = HZ/2;
107#define FLAG_ORIG_SACK_ACKED 0x200 /* Never retransmitted data are (s)acked */ 107#define FLAG_ORIG_SACK_ACKED 0x200 /* Never retransmitted data are (s)acked */
108#define FLAG_SND_UNA_ADVANCED 0x400 /* Snd_una was changed (!= FLAG_DATA_ACKED) */ 108#define FLAG_SND_UNA_ADVANCED 0x400 /* Snd_una was changed (!= FLAG_DATA_ACKED) */
109#define FLAG_DSACKING_ACK 0x800 /* SACK blocks contained D-SACK info */ 109#define FLAG_DSACKING_ACK 0x800 /* SACK blocks contained D-SACK info */
110#define FLAG_SET_XMIT_TIMER 0x1000 /* Set TLP or RTO timer */
110#define FLAG_SACK_RENEGING 0x2000 /* snd_una advanced to a sacked seq */ 111#define FLAG_SACK_RENEGING 0x2000 /* snd_una advanced to a sacked seq */
111#define FLAG_UPDATE_TS_RECENT 0x4000 /* tcp_replace_ts_recent() */ 112#define FLAG_UPDATE_TS_RECENT 0x4000 /* tcp_replace_ts_recent() */
112#define FLAG_NO_CHALLENGE_ACK 0x8000 /* do not call tcp_send_challenge_ack() */ 113#define FLAG_NO_CHALLENGE_ACK 0x8000 /* do not call tcp_send_challenge_ack() */
@@ -2520,8 +2521,8 @@ static inline void tcp_end_cwnd_reduction(struct sock *sk)
2520 return; 2521 return;
2521 2522
2522 /* Reset cwnd to ssthresh in CWR or Recovery (unless it's undone) */ 2523 /* Reset cwnd to ssthresh in CWR or Recovery (unless it's undone) */
2523 if (inet_csk(sk)->icsk_ca_state == TCP_CA_CWR || 2524 if (tp->snd_ssthresh < TCP_INFINITE_SSTHRESH &&
2524 (tp->undo_marker && tp->snd_ssthresh < TCP_INFINITE_SSTHRESH)) { 2525 (inet_csk(sk)->icsk_ca_state == TCP_CA_CWR || tp->undo_marker)) {
2525 tp->snd_cwnd = tp->snd_ssthresh; 2526 tp->snd_cwnd = tp->snd_ssthresh;
2526 tp->snd_cwnd_stamp = tcp_jiffies32; 2527 tp->snd_cwnd_stamp = tcp_jiffies32;
2527 } 2528 }
@@ -3004,10 +3005,7 @@ void tcp_rearm_rto(struct sock *sk)
3004 /* Offset the time elapsed after installing regular RTO */ 3005 /* Offset the time elapsed after installing regular RTO */
3005 if (icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT || 3006 if (icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT ||
3006 icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) { 3007 icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
3007 struct sk_buff *skb = tcp_write_queue_head(sk); 3008 s64 delta_us = tcp_rto_delta_us(sk);
3008 u64 rto_time_stamp = skb->skb_mstamp +
3009 jiffies_to_usecs(rto);
3010 s64 delta_us = rto_time_stamp - tp->tcp_mstamp;
3011 /* delta_us may not be positive if the socket is locked 3009 /* delta_us may not be positive if the socket is locked
3012 * when the retrans timer fires and is rescheduled. 3010 * when the retrans timer fires and is rescheduled.
3013 */ 3011 */
@@ -3019,6 +3017,13 @@ void tcp_rearm_rto(struct sock *sk)
3019 } 3017 }
3020} 3018}
3021 3019
3020/* Try to schedule a loss probe; if that doesn't work, then schedule an RTO. */
3021static void tcp_set_xmit_timer(struct sock *sk)
3022{
3023 if (!tcp_schedule_loss_probe(sk))
3024 tcp_rearm_rto(sk);
3025}
3026
3022/* If we get here, the whole TSO packet has not been acked. */ 3027/* If we get here, the whole TSO packet has not been acked. */
3023static u32 tcp_tso_acked(struct sock *sk, struct sk_buff *skb) 3028static u32 tcp_tso_acked(struct sock *sk, struct sk_buff *skb)
3024{ 3029{
@@ -3180,7 +3185,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
3180 ca_rtt_us, sack->rate); 3185 ca_rtt_us, sack->rate);
3181 3186
3182 if (flag & FLAG_ACKED) { 3187 if (flag & FLAG_ACKED) {
3183 tcp_rearm_rto(sk); 3188 flag |= FLAG_SET_XMIT_TIMER; /* set TLP or RTO timer */
3184 if (unlikely(icsk->icsk_mtup.probe_size && 3189 if (unlikely(icsk->icsk_mtup.probe_size &&
3185 !after(tp->mtu_probe.probe_seq_end, tp->snd_una))) { 3190 !after(tp->mtu_probe.probe_seq_end, tp->snd_una))) {
3186 tcp_mtup_probe_success(sk); 3191 tcp_mtup_probe_success(sk);
@@ -3208,7 +3213,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
3208 * after when the head was last (re)transmitted. Otherwise the 3213 * after when the head was last (re)transmitted. Otherwise the
3209 * timeout may continue to extend in loss recovery. 3214 * timeout may continue to extend in loss recovery.
3210 */ 3215 */
3211 tcp_rearm_rto(sk); 3216 flag |= FLAG_SET_XMIT_TIMER; /* set TLP or RTO timer */
3212 } 3217 }
3213 3218
3214 if (icsk->icsk_ca_ops->pkts_acked) { 3219 if (icsk->icsk_ca_ops->pkts_acked) {
@@ -3580,9 +3585,6 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
3580 if (after(ack, tp->snd_nxt)) 3585 if (after(ack, tp->snd_nxt))
3581 goto invalid_ack; 3586 goto invalid_ack;
3582 3587
3583 if (icsk->icsk_pending == ICSK_TIME_LOSS_PROBE)
3584 tcp_rearm_rto(sk);
3585
3586 if (after(ack, prior_snd_una)) { 3588 if (after(ack, prior_snd_una)) {
3587 flag |= FLAG_SND_UNA_ADVANCED; 3589 flag |= FLAG_SND_UNA_ADVANCED;
3588 icsk->icsk_retransmits = 0; 3590 icsk->icsk_retransmits = 0;
@@ -3647,18 +3649,20 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
3647 flag |= tcp_clean_rtx_queue(sk, prior_fackets, prior_snd_una, &acked, 3649 flag |= tcp_clean_rtx_queue(sk, prior_fackets, prior_snd_una, &acked,
3648 &sack_state); 3650 &sack_state);
3649 3651
3652 if (tp->tlp_high_seq)
3653 tcp_process_tlp_ack(sk, ack, flag);
3654 /* If needed, reset TLP/RTO timer; RACK may later override this. */
3655 if (flag & FLAG_SET_XMIT_TIMER)
3656 tcp_set_xmit_timer(sk);
3657
3650 if (tcp_ack_is_dubious(sk, flag)) { 3658 if (tcp_ack_is_dubious(sk, flag)) {
3651 is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP)); 3659 is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP));
3652 tcp_fastretrans_alert(sk, acked, is_dupack, &flag, &rexmit); 3660 tcp_fastretrans_alert(sk, acked, is_dupack, &flag, &rexmit);
3653 } 3661 }
3654 if (tp->tlp_high_seq)
3655 tcp_process_tlp_ack(sk, ack, flag);
3656 3662
3657 if ((flag & FLAG_FORWARD_PROGRESS) || !(flag & FLAG_NOT_DUP)) 3663 if ((flag & FLAG_FORWARD_PROGRESS) || !(flag & FLAG_NOT_DUP))
3658 sk_dst_confirm(sk); 3664 sk_dst_confirm(sk);
3659 3665
3660 if (icsk->icsk_pending == ICSK_TIME_RETRANS)
3661 tcp_schedule_loss_probe(sk);
3662 delivered = tp->delivered - delivered; /* freshly ACKed or SACKed */ 3666 delivered = tp->delivered - delivered; /* freshly ACKed or SACKed */
3663 lost = tp->lost - lost; /* freshly marked lost */ 3667 lost = tp->lost - lost; /* freshly marked lost */
3664 tcp_rate_gen(sk, delivered, lost, sack_state.rate); 3668 tcp_rate_gen(sk, delivered, lost, sack_state.rate);
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 2f1588bf73da..b7661a68d498 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -2377,24 +2377,15 @@ bool tcp_schedule_loss_probe(struct sock *sk)
2377{ 2377{
2378 struct inet_connection_sock *icsk = inet_csk(sk); 2378 struct inet_connection_sock *icsk = inet_csk(sk);
2379 struct tcp_sock *tp = tcp_sk(sk); 2379 struct tcp_sock *tp = tcp_sk(sk);
2380 u32 timeout, tlp_time_stamp, rto_time_stamp;
2381 u32 rtt = usecs_to_jiffies(tp->srtt_us >> 3); 2380 u32 rtt = usecs_to_jiffies(tp->srtt_us >> 3);
2381 u32 timeout, rto_delta_us;
2382 2382
2383 /* No consecutive loss probes. */
2384 if (WARN_ON(icsk->icsk_pending == ICSK_TIME_LOSS_PROBE)) {
2385 tcp_rearm_rto(sk);
2386 return false;
2387 }
2388 /* Don't do any loss probe on a Fast Open connection before 3WHS 2383 /* Don't do any loss probe on a Fast Open connection before 3WHS
2389 * finishes. 2384 * finishes.
2390 */ 2385 */
2391 if (tp->fastopen_rsk) 2386 if (tp->fastopen_rsk)
2392 return false; 2387 return false;
2393 2388
2394 /* TLP is only scheduled when next timer event is RTO. */
2395 if (icsk->icsk_pending != ICSK_TIME_RETRANS)
2396 return false;
2397
2398 /* Schedule a loss probe in 2*RTT for SACK capable connections 2389 /* Schedule a loss probe in 2*RTT for SACK capable connections
2399 * in Open state, that are either limited by cwnd or application. 2390 * in Open state, that are either limited by cwnd or application.
2400 */ 2391 */
@@ -2417,14 +2408,10 @@ bool tcp_schedule_loss_probe(struct sock *sk)
2417 (rtt + (rtt >> 1) + TCP_DELACK_MAX)); 2408 (rtt + (rtt >> 1) + TCP_DELACK_MAX));
2418 timeout = max_t(u32, timeout, msecs_to_jiffies(10)); 2409 timeout = max_t(u32, timeout, msecs_to_jiffies(10));
2419 2410
2420 /* If RTO is shorter, just schedule TLP in its place. */ 2411 /* If the RTO formula yields an earlier time, then use that time. */
2421 tlp_time_stamp = tcp_jiffies32 + timeout; 2412 rto_delta_us = tcp_rto_delta_us(sk); /* How far in future is RTO? */
2422 rto_time_stamp = (u32)inet_csk(sk)->icsk_timeout; 2413 if (rto_delta_us > 0)
2423 if ((s32)(tlp_time_stamp - rto_time_stamp) > 0) { 2414 timeout = min_t(u32, timeout, usecs_to_jiffies(rto_delta_us));
2424 s32 delta = rto_time_stamp - tcp_jiffies32;
2425 if (delta > 0)
2426 timeout = delta;
2427 }
2428 2415
2429 inet_csk_reset_xmit_timer(sk, ICSK_TIME_LOSS_PROBE, timeout, 2416 inet_csk_reset_xmit_timer(sk, ICSK_TIME_LOSS_PROBE, timeout,
2430 TCP_RTO_MAX); 2417 TCP_RTO_MAX);
@@ -3449,6 +3436,10 @@ int tcp_connect(struct sock *sk)
3449 int err; 3436 int err;
3450 3437
3451 tcp_call_bpf(sk, BPF_SOCK_OPS_TCP_CONNECT_CB); 3438 tcp_call_bpf(sk, BPF_SOCK_OPS_TCP_CONNECT_CB);
3439
3440 if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk))
3441 return -EHOSTUNREACH; /* Routing failure or similar. */
3442
3452 tcp_connect_init(sk); 3443 tcp_connect_init(sk);
3453 3444
3454 if (unlikely(tp->repair)) { 3445 if (unlikely(tp->repair)) {
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index c0feeeef962a..e906014890b6 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -652,7 +652,8 @@ static void tcp_keepalive_timer (unsigned long data)
652 goto death; 652 goto death;
653 } 653 }
654 654
655 if (!sock_flag(sk, SOCK_KEEPOPEN) || sk->sk_state == TCP_CLOSE) 655 if (!sock_flag(sk, SOCK_KEEPOPEN) ||
656 ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_SYN_SENT)))
656 goto out; 657 goto out;
657 658
658 elapsed = keepalive_time_when(tp); 659 elapsed = keepalive_time_when(tp);
diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
index 781250151d40..0932c85b42af 100644
--- a/net/ipv4/udp_offload.c
+++ b/net/ipv4/udp_offload.c
@@ -235,7 +235,7 @@ static struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb,
235 if (uh->check == 0) 235 if (uh->check == 0)
236 uh->check = CSUM_MANGLED_0; 236 uh->check = CSUM_MANGLED_0;
237 237
238 skb->ip_summed = CHECKSUM_NONE; 238 skb->ip_summed = CHECKSUM_UNNECESSARY;
239 239
240 /* If there is no outer header we can fake a checksum offload 240 /* If there is no outer header we can fake a checksum offload
241 * due to the fact that we have already done the checksum in 241 * due to the fact that we have already done the checksum in
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 4d30c96a819d..a640fbcba15d 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -2351,6 +2351,7 @@ static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_bu
2351 if (on_link) 2351 if (on_link)
2352 nrt->rt6i_flags &= ~RTF_GATEWAY; 2352 nrt->rt6i_flags &= ~RTF_GATEWAY;
2353 2353
2354 nrt->rt6i_protocol = RTPROT_REDIRECT;
2354 nrt->rt6i_gateway = *(struct in6_addr *)neigh->primary_key; 2355 nrt->rt6i_gateway = *(struct in6_addr *)neigh->primary_key;
2355 2356
2356 if (ip6_ins_rt(nrt)) 2357 if (ip6_ins_rt(nrt))
@@ -2461,6 +2462,7 @@ static struct rt6_info *rt6_add_route_info(struct net *net,
2461 .fc_dst_len = prefixlen, 2462 .fc_dst_len = prefixlen,
2462 .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_ROUTEINFO | 2463 .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_ROUTEINFO |
2463 RTF_UP | RTF_PREF(pref), 2464 RTF_UP | RTF_PREF(pref),
2465 .fc_protocol = RTPROT_RA,
2464 .fc_nlinfo.portid = 0, 2466 .fc_nlinfo.portid = 0,
2465 .fc_nlinfo.nlh = NULL, 2467 .fc_nlinfo.nlh = NULL,
2466 .fc_nlinfo.nl_net = net, 2468 .fc_nlinfo.nl_net = net,
@@ -2513,6 +2515,7 @@ struct rt6_info *rt6_add_dflt_router(const struct in6_addr *gwaddr,
2513 .fc_ifindex = dev->ifindex, 2515 .fc_ifindex = dev->ifindex,
2514 .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_DEFAULT | 2516 .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_DEFAULT |
2515 RTF_UP | RTF_EXPIRES | RTF_PREF(pref), 2517 RTF_UP | RTF_EXPIRES | RTF_PREF(pref),
2518 .fc_protocol = RTPROT_RA,
2516 .fc_nlinfo.portid = 0, 2519 .fc_nlinfo.portid = 0,
2517 .fc_nlinfo.nlh = NULL, 2520 .fc_nlinfo.nlh = NULL,
2518 .fc_nlinfo.nl_net = dev_net(dev), 2521 .fc_nlinfo.nl_net = dev_net(dev),
@@ -3424,14 +3427,6 @@ static int rt6_fill_node(struct net *net,
3424 rtm->rtm_flags = 0; 3427 rtm->rtm_flags = 0;
3425 rtm->rtm_scope = RT_SCOPE_UNIVERSE; 3428 rtm->rtm_scope = RT_SCOPE_UNIVERSE;
3426 rtm->rtm_protocol = rt->rt6i_protocol; 3429 rtm->rtm_protocol = rt->rt6i_protocol;
3427 if (rt->rt6i_flags & RTF_DYNAMIC)
3428 rtm->rtm_protocol = RTPROT_REDIRECT;
3429 else if (rt->rt6i_flags & RTF_ADDRCONF) {
3430 if (rt->rt6i_flags & (RTF_DEFAULT | RTF_ROUTEINFO))
3431 rtm->rtm_protocol = RTPROT_RA;
3432 else
3433 rtm->rtm_protocol = RTPROT_KERNEL;
3434 }
3435 3430
3436 if (rt->rt6i_flags & RTF_CACHE) 3431 if (rt->rt6i_flags & RTF_CACHE)
3437 rtm->rtm_flags |= RTM_F_CLONED; 3432 rtm->rtm_flags |= RTM_F_CLONED;
diff --git a/net/ipv6/udp_offload.c b/net/ipv6/udp_offload.c
index a2267f80febb..e7d378c032cb 100644
--- a/net/ipv6/udp_offload.c
+++ b/net/ipv6/udp_offload.c
@@ -72,7 +72,7 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb,
72 if (uh->check == 0) 72 if (uh->check == 0)
73 uh->check = CSUM_MANGLED_0; 73 uh->check = CSUM_MANGLED_0;
74 74
75 skb->ip_summed = CHECKSUM_NONE; 75 skb->ip_summed = CHECKSUM_UNNECESSARY;
76 76
77 /* If there is no outer header we can fake a checksum offload 77 /* If there is no outer header we can fake a checksum offload
78 * due to the fact that we have already done the checksum in 78 * due to the fact that we have already done the checksum in
diff --git a/net/rds/ib_recv.c b/net/rds/ib_recv.c
index e10624aa6959..9722bf839d9d 100644
--- a/net/rds/ib_recv.c
+++ b/net/rds/ib_recv.c
@@ -1015,8 +1015,10 @@ void rds_ib_recv_cqe_handler(struct rds_ib_connection *ic,
1015 if (rds_ib_ring_empty(&ic->i_recv_ring)) 1015 if (rds_ib_ring_empty(&ic->i_recv_ring))
1016 rds_ib_stats_inc(s_ib_rx_ring_empty); 1016 rds_ib_stats_inc(s_ib_rx_ring_empty);
1017 1017
1018 if (rds_ib_ring_low(&ic->i_recv_ring)) 1018 if (rds_ib_ring_low(&ic->i_recv_ring)) {
1019 rds_ib_recv_refill(conn, 0, GFP_NOWAIT); 1019 rds_ib_recv_refill(conn, 0, GFP_NOWAIT);
1020 rds_ib_stats_inc(s_ib_rx_refill_from_cq);
1021 }
1020} 1022}
1021 1023
1022int rds_ib_recv_path(struct rds_conn_path *cp) 1024int rds_ib_recv_path(struct rds_conn_path *cp)
@@ -1029,6 +1031,7 @@ int rds_ib_recv_path(struct rds_conn_path *cp)
1029 if (rds_conn_up(conn)) { 1031 if (rds_conn_up(conn)) {
1030 rds_ib_attempt_ack(ic); 1032 rds_ib_attempt_ack(ic);
1031 rds_ib_recv_refill(conn, 0, GFP_KERNEL); 1033 rds_ib_recv_refill(conn, 0, GFP_KERNEL);
1034 rds_ib_stats_inc(s_ib_rx_refill_from_thread);
1032 } 1035 }
1033 1036
1034 return ret; 1037 return ret;
diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c
index 36f0ced9e60c..94ba5cfab860 100644
--- a/net/sched/act_ipt.c
+++ b/net/sched/act_ipt.c
@@ -36,8 +36,8 @@ static struct tc_action_ops act_ipt_ops;
36static unsigned int xt_net_id; 36static unsigned int xt_net_id;
37static struct tc_action_ops act_xt_ops; 37static struct tc_action_ops act_xt_ops;
38 38
39static int ipt_init_target(struct xt_entry_target *t, char *table, 39static int ipt_init_target(struct net *net, struct xt_entry_target *t,
40 unsigned int hook) 40 char *table, unsigned int hook)
41{ 41{
42 struct xt_tgchk_param par; 42 struct xt_tgchk_param par;
43 struct xt_target *target; 43 struct xt_target *target;
@@ -49,6 +49,7 @@ static int ipt_init_target(struct xt_entry_target *t, char *table,
49 return PTR_ERR(target); 49 return PTR_ERR(target);
50 50
51 t->u.kernel.target = target; 51 t->u.kernel.target = target;
52 par.net = net;
52 par.table = table; 53 par.table = table;
53 par.entryinfo = NULL; 54 par.entryinfo = NULL;
54 par.target = target; 55 par.target = target;
@@ -91,10 +92,11 @@ static const struct nla_policy ipt_policy[TCA_IPT_MAX + 1] = {
91 [TCA_IPT_TARG] = { .len = sizeof(struct xt_entry_target) }, 92 [TCA_IPT_TARG] = { .len = sizeof(struct xt_entry_target) },
92}; 93};
93 94
94static int __tcf_ipt_init(struct tc_action_net *tn, struct nlattr *nla, 95static int __tcf_ipt_init(struct net *net, unsigned int id, struct nlattr *nla,
95 struct nlattr *est, struct tc_action **a, 96 struct nlattr *est, struct tc_action **a,
96 const struct tc_action_ops *ops, int ovr, int bind) 97 const struct tc_action_ops *ops, int ovr, int bind)
97{ 98{
99 struct tc_action_net *tn = net_generic(net, id);
98 struct nlattr *tb[TCA_IPT_MAX + 1]; 100 struct nlattr *tb[TCA_IPT_MAX + 1];
99 struct tcf_ipt *ipt; 101 struct tcf_ipt *ipt;
100 struct xt_entry_target *td, *t; 102 struct xt_entry_target *td, *t;
@@ -159,7 +161,7 @@ static int __tcf_ipt_init(struct tc_action_net *tn, struct nlattr *nla,
159 if (unlikely(!t)) 161 if (unlikely(!t))
160 goto err2; 162 goto err2;
161 163
162 err = ipt_init_target(t, tname, hook); 164 err = ipt_init_target(net, t, tname, hook);
163 if (err < 0) 165 if (err < 0)
164 goto err3; 166 goto err3;
165 167
@@ -193,18 +195,16 @@ static int tcf_ipt_init(struct net *net, struct nlattr *nla,
193 struct nlattr *est, struct tc_action **a, int ovr, 195 struct nlattr *est, struct tc_action **a, int ovr,
194 int bind) 196 int bind)
195{ 197{
196 struct tc_action_net *tn = net_generic(net, ipt_net_id); 198 return __tcf_ipt_init(net, ipt_net_id, nla, est, a, &act_ipt_ops, ovr,
197 199 bind);
198 return __tcf_ipt_init(tn, nla, est, a, &act_ipt_ops, ovr, bind);
199} 200}
200 201
201static int tcf_xt_init(struct net *net, struct nlattr *nla, 202static int tcf_xt_init(struct net *net, struct nlattr *nla,
202 struct nlattr *est, struct tc_action **a, int ovr, 203 struct nlattr *est, struct tc_action **a, int ovr,
203 int bind) 204 int bind)
204{ 205{
205 struct tc_action_net *tn = net_generic(net, xt_net_id); 206 return __tcf_ipt_init(net, xt_net_id, nla, est, a, &act_xt_ops, ovr,
206 207 bind);
207 return __tcf_ipt_init(tn, nla, est, a, &act_xt_ops, ovr, bind);
208} 208}
209 209
210static int tcf_ipt(struct sk_buff *skb, const struct tc_action *a, 210static int tcf_ipt(struct sk_buff *skb, const struct tc_action *a,
diff --git a/tools/build/feature/test-bpf.c b/tools/build/feature/test-bpf.c
index 7598361ef1f1..da2172ff9662 100644
--- a/tools/build/feature/test-bpf.c
+++ b/tools/build/feature/test-bpf.c
@@ -11,6 +11,8 @@
11# define __NR_bpf 280 11# define __NR_bpf 280
12# elif defined(__sparc__) 12# elif defined(__sparc__)
13# define __NR_bpf 349 13# define __NR_bpf 349
14# elif defined(__s390__)
15# define __NR_bpf 351
14# else 16# else
15# error __NR_bpf not defined. libbpf does not support your arch. 17# error __NR_bpf not defined. libbpf does not support your arch.
16# endif 18# endif
diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c
index 256f571f2ab5..e5bbb090bf88 100644
--- a/tools/lib/bpf/bpf.c
+++ b/tools/lib/bpf/bpf.c
@@ -39,6 +39,8 @@
39# define __NR_bpf 280 39# define __NR_bpf 280
40# elif defined(__sparc__) 40# elif defined(__sparc__)
41# define __NR_bpf 349 41# define __NR_bpf 349
42# elif defined(__s390__)
43# define __NR_bpf 351
42# else 44# else
43# error __NR_bpf not defined. libbpf does not support your arch. 45# error __NR_bpf not defined. libbpf does not support your arch.
44# endif 46# endif
diff --git a/tools/testing/selftests/bpf/test_pkt_md_access.c b/tools/testing/selftests/bpf/test_pkt_md_access.c
index 71729d47eb85..7956302ecdf2 100644
--- a/tools/testing/selftests/bpf/test_pkt_md_access.c
+++ b/tools/testing/selftests/bpf/test_pkt_md_access.c
@@ -12,12 +12,23 @@
12 12
13int _version SEC("version") = 1; 13int _version SEC("version") = 1;
14 14
15#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
15#define TEST_FIELD(TYPE, FIELD, MASK) \ 16#define TEST_FIELD(TYPE, FIELD, MASK) \
16 { \ 17 { \
17 TYPE tmp = *(volatile TYPE *)&skb->FIELD; \ 18 TYPE tmp = *(volatile TYPE *)&skb->FIELD; \
18 if (tmp != ((*(volatile __u32 *)&skb->FIELD) & MASK)) \ 19 if (tmp != ((*(volatile __u32 *)&skb->FIELD) & MASK)) \
19 return TC_ACT_SHOT; \ 20 return TC_ACT_SHOT; \
20 } 21 }
22#else
23#define TEST_FIELD_OFFSET(a, b) ((sizeof(a) - sizeof(b)) / sizeof(b))
24#define TEST_FIELD(TYPE, FIELD, MASK) \
25 { \
26 TYPE tmp = *((volatile TYPE *)&skb->FIELD + \
27 TEST_FIELD_OFFSET(skb->FIELD, TYPE)); \
28 if (tmp != ((*(volatile __u32 *)&skb->FIELD) & MASK)) \
29 return TC_ACT_SHOT; \
30 }
31#endif
21 32
22SEC("test1") 33SEC("test1")
23int process(struct __sk_buff *skb) 34int process(struct __sk_buff *skb)
diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c
index addea82f76c9..d3ed7324105e 100644
--- a/tools/testing/selftests/bpf/test_verifier.c
+++ b/tools/testing/selftests/bpf/test_verifier.c
@@ -8,6 +8,7 @@
8 * License as published by the Free Software Foundation. 8 * License as published by the Free Software Foundation.
9 */ 9 */
10 10
11#include <endian.h>
11#include <asm/types.h> 12#include <asm/types.h>
12#include <linux/types.h> 13#include <linux/types.h>
13#include <stdint.h> 14#include <stdint.h>
@@ -1098,7 +1099,7 @@ static struct bpf_test tests[] = {
1098 "check skb->hash byte load permitted", 1099 "check skb->hash byte load permitted",
1099 .insns = { 1100 .insns = {
1100 BPF_MOV64_IMM(BPF_REG_0, 0), 1101 BPF_MOV64_IMM(BPF_REG_0, 0),
1101#ifdef __LITTLE_ENDIAN 1102#if __BYTE_ORDER == __LITTLE_ENDIAN
1102 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 1103 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1103 offsetof(struct __sk_buff, hash)), 1104 offsetof(struct __sk_buff, hash)),
1104#else 1105#else
@@ -1135,7 +1136,7 @@ static struct bpf_test tests[] = {
1135 "check skb->hash byte load not permitted 3", 1136 "check skb->hash byte load not permitted 3",
1136 .insns = { 1137 .insns = {
1137 BPF_MOV64_IMM(BPF_REG_0, 0), 1138 BPF_MOV64_IMM(BPF_REG_0, 0),
1138#ifdef __LITTLE_ENDIAN 1139#if __BYTE_ORDER == __LITTLE_ENDIAN
1139 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 1140 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1140 offsetof(struct __sk_buff, hash) + 3), 1141 offsetof(struct __sk_buff, hash) + 3),
1141#else 1142#else
@@ -1244,7 +1245,7 @@ static struct bpf_test tests[] = {
1244 "check skb->hash half load permitted", 1245 "check skb->hash half load permitted",
1245 .insns = { 1246 .insns = {
1246 BPF_MOV64_IMM(BPF_REG_0, 0), 1247 BPF_MOV64_IMM(BPF_REG_0, 0),
1247#ifdef __LITTLE_ENDIAN 1248#if __BYTE_ORDER == __LITTLE_ENDIAN
1248 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, 1249 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1249 offsetof(struct __sk_buff, hash)), 1250 offsetof(struct __sk_buff, hash)),
1250#else 1251#else
@@ -1259,7 +1260,7 @@ static struct bpf_test tests[] = {
1259 "check skb->hash half load not permitted", 1260 "check skb->hash half load not permitted",
1260 .insns = { 1261 .insns = {
1261 BPF_MOV64_IMM(BPF_REG_0, 0), 1262 BPF_MOV64_IMM(BPF_REG_0, 0),
1262#ifdef __LITTLE_ENDIAN 1263#if __BYTE_ORDER == __LITTLE_ENDIAN
1263 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, 1264 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1264 offsetof(struct __sk_buff, hash) + 2), 1265 offsetof(struct __sk_buff, hash) + 2),
1265#else 1266#else
@@ -5422,7 +5423,7 @@ static struct bpf_test tests[] = {
5422 "check bpf_perf_event_data->sample_period byte load permitted", 5423 "check bpf_perf_event_data->sample_period byte load permitted",
5423 .insns = { 5424 .insns = {
5424 BPF_MOV64_IMM(BPF_REG_0, 0), 5425 BPF_MOV64_IMM(BPF_REG_0, 0),
5425#ifdef __LITTLE_ENDIAN 5426#if __BYTE_ORDER == __LITTLE_ENDIAN
5426 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 5427 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
5427 offsetof(struct bpf_perf_event_data, sample_period)), 5428 offsetof(struct bpf_perf_event_data, sample_period)),
5428#else 5429#else
@@ -5438,7 +5439,7 @@ static struct bpf_test tests[] = {
5438 "check bpf_perf_event_data->sample_period half load permitted", 5439 "check bpf_perf_event_data->sample_period half load permitted",
5439 .insns = { 5440 .insns = {
5440 BPF_MOV64_IMM(BPF_REG_0, 0), 5441 BPF_MOV64_IMM(BPF_REG_0, 0),
5441#ifdef __LITTLE_ENDIAN 5442#if __BYTE_ORDER == __LITTLE_ENDIAN
5442 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, 5443 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
5443 offsetof(struct bpf_perf_event_data, sample_period)), 5444 offsetof(struct bpf_perf_event_data, sample_period)),
5444#else 5445#else
@@ -5454,7 +5455,7 @@ static struct bpf_test tests[] = {
5454 "check bpf_perf_event_data->sample_period word load permitted", 5455 "check bpf_perf_event_data->sample_period word load permitted",
5455 .insns = { 5456 .insns = {
5456 BPF_MOV64_IMM(BPF_REG_0, 0), 5457 BPF_MOV64_IMM(BPF_REG_0, 0),
5457#ifdef __LITTLE_ENDIAN 5458#if __BYTE_ORDER == __LITTLE_ENDIAN
5458 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 5459 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
5459 offsetof(struct bpf_perf_event_data, sample_period)), 5460 offsetof(struct bpf_perf_event_data, sample_period)),
5460#else 5461#else
@@ -5481,7 +5482,7 @@ static struct bpf_test tests[] = {
5481 "check skb->data half load not permitted", 5482 "check skb->data half load not permitted",
5482 .insns = { 5483 .insns = {
5483 BPF_MOV64_IMM(BPF_REG_0, 0), 5484 BPF_MOV64_IMM(BPF_REG_0, 0),
5484#ifdef __LITTLE_ENDIAN 5485#if __BYTE_ORDER == __LITTLE_ENDIAN
5485 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, 5486 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
5486 offsetof(struct __sk_buff, data)), 5487 offsetof(struct __sk_buff, data)),
5487#else 5488#else
@@ -5497,7 +5498,7 @@ static struct bpf_test tests[] = {
5497 "check skb->tc_classid half load not permitted for lwt prog", 5498 "check skb->tc_classid half load not permitted for lwt prog",
5498 .insns = { 5499 .insns = {
5499 BPF_MOV64_IMM(BPF_REG_0, 0), 5500 BPF_MOV64_IMM(BPF_REG_0, 0),
5500#ifdef __LITTLE_ENDIAN 5501#if __BYTE_ORDER == __LITTLE_ENDIAN
5501 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, 5502 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
5502 offsetof(struct __sk_buff, tc_classid)), 5503 offsetof(struct __sk_buff, tc_classid)),
5503#else 5504#else