diff options
author | David S. Miller <davem@davemloft.net> | 2017-08-09 19:28:45 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2017-08-09 19:28:45 -0400 |
commit | 3118e6e19da7b8d76b2456b880c74a9aa3a2268b (patch) | |
tree | 3060d11297c1195ef2d1f120d9c2247b4b1de4ae | |
parent | feca7d8c135bc1527b244fe817b8b6498066ccec (diff) | |
parent | 48fb6f4db940e92cfb16cd878cddd59ea6120d06 (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
The UDP offload conflict is dealt with by simply taking what is
in net-next where we have removed all of the UFO handling code
entirely.
The TCP conflict was a case of local variables in a function
being removed from both net and net-next.
In netvsc we had an assignment right next to where a missing
set of u64 stats sync object inits were added.
Signed-off-by: David S. Miller <davem@davemloft.net>
103 files changed, 2992 insertions, 621 deletions
diff --git a/MAINTAINERS b/MAINTAINERS index b3a8ca6aa3ed..7cb7f4c3ad3f 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -1161,7 +1161,7 @@ M: Brendan Higgins <brendanhiggins@google.com> | |||
1161 | R: Benjamin Herrenschmidt <benh@kernel.crashing.org> | 1161 | R: Benjamin Herrenschmidt <benh@kernel.crashing.org> |
1162 | R: Joel Stanley <joel@jms.id.au> | 1162 | R: Joel Stanley <joel@jms.id.au> |
1163 | L: linux-i2c@vger.kernel.org | 1163 | L: linux-i2c@vger.kernel.org |
1164 | L: openbmc@lists.ozlabs.org | 1164 | L: openbmc@lists.ozlabs.org (moderated for non-subscribers) |
1165 | S: Maintained | 1165 | S: Maintained |
1166 | F: drivers/irqchip/irq-aspeed-i2c-ic.c | 1166 | F: drivers/irqchip/irq-aspeed-i2c-ic.c |
1167 | F: drivers/i2c/busses/i2c-aspeed.c | 1167 | F: drivers/i2c/busses/i2c-aspeed.c |
@@ -5835,7 +5835,7 @@ F: drivers/staging/greybus/spi.c | |||
5835 | F: drivers/staging/greybus/spilib.c | 5835 | F: drivers/staging/greybus/spilib.c |
5836 | F: drivers/staging/greybus/spilib.h | 5836 | F: drivers/staging/greybus/spilib.h |
5837 | 5837 | ||
5838 | GREYBUS LOOBACK/TIME PROTOCOLS DRIVERS | 5838 | GREYBUS LOOPBACK/TIME PROTOCOLS DRIVERS |
5839 | M: Bryan O'Donoghue <pure.logic@nexus-software.ie> | 5839 | M: Bryan O'Donoghue <pure.logic@nexus-software.ie> |
5840 | S: Maintained | 5840 | S: Maintained |
5841 | F: drivers/staging/greybus/loopback.c | 5841 | F: drivers/staging/greybus/loopback.c |
diff --git a/arch/mips/net/ebpf_jit.c b/arch/mips/net/ebpf_jit.c new file mode 100644 index 000000000000..3f87b96da5c4 --- /dev/null +++ b/arch/mips/net/ebpf_jit.c | |||
@@ -0,0 +1,1950 @@ | |||
1 | /* | ||
2 | * Just-In-Time compiler for eBPF filters on MIPS | ||
3 | * | ||
4 | * Copyright (c) 2017 Cavium, Inc. | ||
5 | * | ||
6 | * Based on code from: | ||
7 | * | ||
8 | * Copyright (c) 2014 Imagination Technologies Ltd. | ||
9 | * Author: Markos Chandras <markos.chandras@imgtec.com> | ||
10 | * | ||
11 | * This program is free software; you can redistribute it and/or modify it | ||
12 | * under the terms of the GNU General Public License as published by the | ||
13 | * Free Software Foundation; version 2 of the License. | ||
14 | */ | ||
15 | |||
16 | #include <linux/bitops.h> | ||
17 | #include <linux/errno.h> | ||
18 | #include <linux/filter.h> | ||
19 | #include <linux/bpf.h> | ||
20 | #include <linux/slab.h> | ||
21 | #include <asm/bitops.h> | ||
22 | #include <asm/byteorder.h> | ||
23 | #include <asm/cacheflush.h> | ||
24 | #include <asm/cpu-features.h> | ||
25 | #include <asm/uasm.h> | ||
26 | |||
27 | /* Registers used by JIT */ | ||
28 | #define MIPS_R_ZERO 0 | ||
29 | #define MIPS_R_AT 1 | ||
30 | #define MIPS_R_V0 2 /* BPF_R0 */ | ||
31 | #define MIPS_R_V1 3 | ||
32 | #define MIPS_R_A0 4 /* BPF_R1 */ | ||
33 | #define MIPS_R_A1 5 /* BPF_R2 */ | ||
34 | #define MIPS_R_A2 6 /* BPF_R3 */ | ||
35 | #define MIPS_R_A3 7 /* BPF_R4 */ | ||
36 | #define MIPS_R_A4 8 /* BPF_R5 */ | ||
37 | #define MIPS_R_T4 12 /* BPF_AX */ | ||
38 | #define MIPS_R_T5 13 | ||
39 | #define MIPS_R_T6 14 | ||
40 | #define MIPS_R_T7 15 | ||
41 | #define MIPS_R_S0 16 /* BPF_R6 */ | ||
42 | #define MIPS_R_S1 17 /* BPF_R7 */ | ||
43 | #define MIPS_R_S2 18 /* BPF_R8 */ | ||
44 | #define MIPS_R_S3 19 /* BPF_R9 */ | ||
45 | #define MIPS_R_S4 20 /* BPF_TCC */ | ||
46 | #define MIPS_R_S5 21 | ||
47 | #define MIPS_R_S6 22 | ||
48 | #define MIPS_R_S7 23 | ||
49 | #define MIPS_R_T8 24 | ||
50 | #define MIPS_R_T9 25 | ||
51 | #define MIPS_R_SP 29 | ||
52 | #define MIPS_R_RA 31 | ||
53 | |||
54 | /* eBPF flags */ | ||
55 | #define EBPF_SAVE_S0 BIT(0) | ||
56 | #define EBPF_SAVE_S1 BIT(1) | ||
57 | #define EBPF_SAVE_S2 BIT(2) | ||
58 | #define EBPF_SAVE_S3 BIT(3) | ||
59 | #define EBPF_SAVE_S4 BIT(4) | ||
60 | #define EBPF_SAVE_RA BIT(5) | ||
61 | #define EBPF_SEEN_FP BIT(6) | ||
62 | #define EBPF_SEEN_TC BIT(7) | ||
63 | #define EBPF_TCC_IN_V1 BIT(8) | ||
64 | |||
65 | /* | ||
66 | * For the mips64 ISA, we need to track the value range or type for | ||
67 | * each JIT register. The BPF machine requires zero extended 32-bit | ||
68 | * values, but the mips64 ISA requires sign extended 32-bit values. | ||
69 | * At each point in the BPF program we track the state of every | ||
70 | * register so that we can zero extend or sign extend as the BPF | ||
71 | * semantics require. | ||
72 | */ | ||
73 | enum reg_val_type { | ||
74 | /* uninitialized */ | ||
75 | REG_UNKNOWN, | ||
76 | /* not known to be 32-bit compatible. */ | ||
77 | REG_64BIT, | ||
78 | /* 32-bit compatible, no truncation needed for 64-bit ops. */ | ||
79 | REG_64BIT_32BIT, | ||
80 | /* 32-bit compatible, need truncation for 64-bit ops. */ | ||
81 | REG_32BIT, | ||
82 | /* 32-bit zero extended. */ | ||
83 | REG_32BIT_ZERO_EX, | ||
84 | /* 32-bit no sign/zero extension needed. */ | ||
85 | REG_32BIT_POS | ||
86 | }; | ||
87 | |||
88 | /* | ||
89 | * high bit of offsets indicates if long branch conversion done at | ||
90 | * this insn. | ||
91 | */ | ||
92 | #define OFFSETS_B_CONV BIT(31) | ||
93 | |||
94 | /** | ||
95 | * struct jit_ctx - JIT context | ||
96 | * @skf: The sk_filter | ||
97 | * @stack_size: eBPF stack size | ||
98 | * @tmp_offset: eBPF $sp offset to 8-byte temporary memory | ||
99 | * @idx: Instruction index | ||
100 | * @flags: JIT flags | ||
101 | * @offsets: Instruction offsets | ||
102 | * @target: Memory location for the compiled filter | ||
103 | * @reg_val_types Packed enum reg_val_type for each register. | ||
104 | */ | ||
105 | struct jit_ctx { | ||
106 | const struct bpf_prog *skf; | ||
107 | int stack_size; | ||
108 | int tmp_offset; | ||
109 | u32 idx; | ||
110 | u32 flags; | ||
111 | u32 *offsets; | ||
112 | u32 *target; | ||
113 | u64 *reg_val_types; | ||
114 | unsigned int long_b_conversion:1; | ||
115 | unsigned int gen_b_offsets:1; | ||
116 | }; | ||
117 | |||
118 | static void set_reg_val_type(u64 *rvt, int reg, enum reg_val_type type) | ||
119 | { | ||
120 | *rvt &= ~(7ull << (reg * 3)); | ||
121 | *rvt |= ((u64)type << (reg * 3)); | ||
122 | } | ||
123 | |||
124 | static enum reg_val_type get_reg_val_type(const struct jit_ctx *ctx, | ||
125 | int index, int reg) | ||
126 | { | ||
127 | return (ctx->reg_val_types[index] >> (reg * 3)) & 7; | ||
128 | } | ||
129 | |||
130 | /* Simply emit the instruction if the JIT memory space has been allocated */ | ||
131 | #define emit_instr(ctx, func, ...) \ | ||
132 | do { \ | ||
133 | if ((ctx)->target != NULL) { \ | ||
134 | u32 *p = &(ctx)->target[ctx->idx]; \ | ||
135 | uasm_i_##func(&p, ##__VA_ARGS__); \ | ||
136 | } \ | ||
137 | (ctx)->idx++; \ | ||
138 | } while (0) | ||
139 | |||
140 | static unsigned int j_target(struct jit_ctx *ctx, int target_idx) | ||
141 | { | ||
142 | unsigned long target_va, base_va; | ||
143 | unsigned int r; | ||
144 | |||
145 | if (!ctx->target) | ||
146 | return 0; | ||
147 | |||
148 | base_va = (unsigned long)ctx->target; | ||
149 | target_va = base_va + (ctx->offsets[target_idx] & ~OFFSETS_B_CONV); | ||
150 | |||
151 | if ((base_va & ~0x0ffffffful) != (target_va & ~0x0ffffffful)) | ||
152 | return (unsigned int)-1; | ||
153 | r = target_va & 0x0ffffffful; | ||
154 | return r; | ||
155 | } | ||
156 | |||
157 | /* Compute the immediate value for PC-relative branches. */ | ||
158 | static u32 b_imm(unsigned int tgt, struct jit_ctx *ctx) | ||
159 | { | ||
160 | if (!ctx->gen_b_offsets) | ||
161 | return 0; | ||
162 | |||
163 | /* | ||
164 | * We want a pc-relative branch. tgt is the instruction offset | ||
165 | * we want to jump to. | ||
166 | |||
167 | * Branch on MIPS: | ||
168 | * I: target_offset <- sign_extend(offset) | ||
169 | * I+1: PC += target_offset (delay slot) | ||
170 | * | ||
171 | * ctx->idx currently points to the branch instruction | ||
172 | * but the offset is added to the delay slot so we need | ||
173 | * to subtract 4. | ||
174 | */ | ||
175 | return (ctx->offsets[tgt] & ~OFFSETS_B_CONV) - | ||
176 | (ctx->idx * 4) - 4; | ||
177 | } | ||
178 | |||
179 | int bpf_jit_enable __read_mostly; | ||
180 | |||
181 | enum which_ebpf_reg { | ||
182 | src_reg, | ||
183 | src_reg_no_fp, | ||
184 | dst_reg, | ||
185 | dst_reg_fp_ok | ||
186 | }; | ||
187 | |||
188 | /* | ||
189 | * For eBPF, the register mapping naturally falls out of the | ||
190 | * requirements of eBPF and the MIPS n64 ABI. We don't maintain a | ||
191 | * separate frame pointer, so BPF_REG_10 relative accesses are | ||
192 | * adjusted to be $sp relative. | ||
193 | */ | ||
194 | int ebpf_to_mips_reg(struct jit_ctx *ctx, const struct bpf_insn *insn, | ||
195 | enum which_ebpf_reg w) | ||
196 | { | ||
197 | int ebpf_reg = (w == src_reg || w == src_reg_no_fp) ? | ||
198 | insn->src_reg : insn->dst_reg; | ||
199 | |||
200 | switch (ebpf_reg) { | ||
201 | case BPF_REG_0: | ||
202 | return MIPS_R_V0; | ||
203 | case BPF_REG_1: | ||
204 | return MIPS_R_A0; | ||
205 | case BPF_REG_2: | ||
206 | return MIPS_R_A1; | ||
207 | case BPF_REG_3: | ||
208 | return MIPS_R_A2; | ||
209 | case BPF_REG_4: | ||
210 | return MIPS_R_A3; | ||
211 | case BPF_REG_5: | ||
212 | return MIPS_R_A4; | ||
213 | case BPF_REG_6: | ||
214 | ctx->flags |= EBPF_SAVE_S0; | ||
215 | return MIPS_R_S0; | ||
216 | case BPF_REG_7: | ||
217 | ctx->flags |= EBPF_SAVE_S1; | ||
218 | return MIPS_R_S1; | ||
219 | case BPF_REG_8: | ||
220 | ctx->flags |= EBPF_SAVE_S2; | ||
221 | return MIPS_R_S2; | ||
222 | case BPF_REG_9: | ||
223 | ctx->flags |= EBPF_SAVE_S3; | ||
224 | return MIPS_R_S3; | ||
225 | case BPF_REG_10: | ||
226 | if (w == dst_reg || w == src_reg_no_fp) | ||
227 | goto bad_reg; | ||
228 | ctx->flags |= EBPF_SEEN_FP; | ||
229 | /* | ||
230 | * Needs special handling, return something that | ||
231 | * cannot be clobbered just in case. | ||
232 | */ | ||
233 | return MIPS_R_ZERO; | ||
234 | case BPF_REG_AX: | ||
235 | return MIPS_R_T4; | ||
236 | default: | ||
237 | bad_reg: | ||
238 | WARN(1, "Illegal bpf reg: %d\n", ebpf_reg); | ||
239 | return -EINVAL; | ||
240 | } | ||
241 | } | ||
242 | /* | ||
243 | * eBPF stack frame will be something like: | ||
244 | * | ||
245 | * Entry $sp ------> +--------------------------------+ | ||
246 | * | $ra (optional) | | ||
247 | * +--------------------------------+ | ||
248 | * | $s0 (optional) | | ||
249 | * +--------------------------------+ | ||
250 | * | $s1 (optional) | | ||
251 | * +--------------------------------+ | ||
252 | * | $s2 (optional) | | ||
253 | * +--------------------------------+ | ||
254 | * | $s3 (optional) | | ||
255 | * +--------------------------------+ | ||
256 | * | $s4 (optional) | | ||
257 | * +--------------------------------+ | ||
258 | * | tmp-storage (if $ra saved) | | ||
259 | * $sp + tmp_offset --> +--------------------------------+ <--BPF_REG_10 | ||
260 | * | BPF_REG_10 relative storage | | ||
261 | * | MAX_BPF_STACK (optional) | | ||
262 | * | . | | ||
263 | * | . | | ||
264 | * | . | | ||
265 | * $sp --------> +--------------------------------+ | ||
266 | * | ||
267 | * If BPF_REG_10 is never referenced, then the MAX_BPF_STACK sized | ||
268 | * area is not allocated. | ||
269 | */ | ||
270 | static int gen_int_prologue(struct jit_ctx *ctx) | ||
271 | { | ||
272 | int stack_adjust = 0; | ||
273 | int store_offset; | ||
274 | int locals_size; | ||
275 | |||
276 | if (ctx->flags & EBPF_SAVE_RA) | ||
277 | /* | ||
278 | * If RA we are doing a function call and may need | ||
279 | * extra 8-byte tmp area. | ||
280 | */ | ||
281 | stack_adjust += 16; | ||
282 | if (ctx->flags & EBPF_SAVE_S0) | ||
283 | stack_adjust += 8; | ||
284 | if (ctx->flags & EBPF_SAVE_S1) | ||
285 | stack_adjust += 8; | ||
286 | if (ctx->flags & EBPF_SAVE_S2) | ||
287 | stack_adjust += 8; | ||
288 | if (ctx->flags & EBPF_SAVE_S3) | ||
289 | stack_adjust += 8; | ||
290 | if (ctx->flags & EBPF_SAVE_S4) | ||
291 | stack_adjust += 8; | ||
292 | |||
293 | BUILD_BUG_ON(MAX_BPF_STACK & 7); | ||
294 | locals_size = (ctx->flags & EBPF_SEEN_FP) ? MAX_BPF_STACK : 0; | ||
295 | |||
296 | stack_adjust += locals_size; | ||
297 | ctx->tmp_offset = locals_size; | ||
298 | |||
299 | ctx->stack_size = stack_adjust; | ||
300 | |||
301 | /* | ||
302 | * First instruction initializes the tail call count (TCC). | ||
303 | * On tail call we skip this instruction, and the TCC is | ||
304 | * passed in $v1 from the caller. | ||
305 | */ | ||
306 | emit_instr(ctx, daddiu, MIPS_R_V1, MIPS_R_ZERO, MAX_TAIL_CALL_CNT); | ||
307 | if (stack_adjust) | ||
308 | emit_instr(ctx, daddiu, MIPS_R_SP, MIPS_R_SP, -stack_adjust); | ||
309 | else | ||
310 | return 0; | ||
311 | |||
312 | store_offset = stack_adjust - 8; | ||
313 | |||
314 | if (ctx->flags & EBPF_SAVE_RA) { | ||
315 | emit_instr(ctx, sd, MIPS_R_RA, store_offset, MIPS_R_SP); | ||
316 | store_offset -= 8; | ||
317 | } | ||
318 | if (ctx->flags & EBPF_SAVE_S0) { | ||
319 | emit_instr(ctx, sd, MIPS_R_S0, store_offset, MIPS_R_SP); | ||
320 | store_offset -= 8; | ||
321 | } | ||
322 | if (ctx->flags & EBPF_SAVE_S1) { | ||
323 | emit_instr(ctx, sd, MIPS_R_S1, store_offset, MIPS_R_SP); | ||
324 | store_offset -= 8; | ||
325 | } | ||
326 | if (ctx->flags & EBPF_SAVE_S2) { | ||
327 | emit_instr(ctx, sd, MIPS_R_S2, store_offset, MIPS_R_SP); | ||
328 | store_offset -= 8; | ||
329 | } | ||
330 | if (ctx->flags & EBPF_SAVE_S3) { | ||
331 | emit_instr(ctx, sd, MIPS_R_S3, store_offset, MIPS_R_SP); | ||
332 | store_offset -= 8; | ||
333 | } | ||
334 | if (ctx->flags & EBPF_SAVE_S4) { | ||
335 | emit_instr(ctx, sd, MIPS_R_S4, store_offset, MIPS_R_SP); | ||
336 | store_offset -= 8; | ||
337 | } | ||
338 | |||
339 | if ((ctx->flags & EBPF_SEEN_TC) && !(ctx->flags & EBPF_TCC_IN_V1)) | ||
340 | emit_instr(ctx, daddu, MIPS_R_S4, MIPS_R_V1, MIPS_R_ZERO); | ||
341 | |||
342 | return 0; | ||
343 | } | ||
344 | |||
345 | static int build_int_epilogue(struct jit_ctx *ctx, int dest_reg) | ||
346 | { | ||
347 | const struct bpf_prog *prog = ctx->skf; | ||
348 | int stack_adjust = ctx->stack_size; | ||
349 | int store_offset = stack_adjust - 8; | ||
350 | int r0 = MIPS_R_V0; | ||
351 | |||
352 | if (dest_reg == MIPS_R_RA && | ||
353 | get_reg_val_type(ctx, prog->len, BPF_REG_0) == REG_32BIT_ZERO_EX) | ||
354 | /* Don't let zero extended value escape. */ | ||
355 | emit_instr(ctx, sll, r0, r0, 0); | ||
356 | |||
357 | if (ctx->flags & EBPF_SAVE_RA) { | ||
358 | emit_instr(ctx, ld, MIPS_R_RA, store_offset, MIPS_R_SP); | ||
359 | store_offset -= 8; | ||
360 | } | ||
361 | if (ctx->flags & EBPF_SAVE_S0) { | ||
362 | emit_instr(ctx, ld, MIPS_R_S0, store_offset, MIPS_R_SP); | ||
363 | store_offset -= 8; | ||
364 | } | ||
365 | if (ctx->flags & EBPF_SAVE_S1) { | ||
366 | emit_instr(ctx, ld, MIPS_R_S1, store_offset, MIPS_R_SP); | ||
367 | store_offset -= 8; | ||
368 | } | ||
369 | if (ctx->flags & EBPF_SAVE_S2) { | ||
370 | emit_instr(ctx, ld, MIPS_R_S2, store_offset, MIPS_R_SP); | ||
371 | store_offset -= 8; | ||
372 | } | ||
373 | if (ctx->flags & EBPF_SAVE_S3) { | ||
374 | emit_instr(ctx, ld, MIPS_R_S3, store_offset, MIPS_R_SP); | ||
375 | store_offset -= 8; | ||
376 | } | ||
377 | if (ctx->flags & EBPF_SAVE_S4) { | ||
378 | emit_instr(ctx, ld, MIPS_R_S4, store_offset, MIPS_R_SP); | ||
379 | store_offset -= 8; | ||
380 | } | ||
381 | emit_instr(ctx, jr, dest_reg); | ||
382 | |||
383 | if (stack_adjust) | ||
384 | emit_instr(ctx, daddiu, MIPS_R_SP, MIPS_R_SP, stack_adjust); | ||
385 | else | ||
386 | emit_instr(ctx, nop); | ||
387 | |||
388 | return 0; | ||
389 | } | ||
390 | |||
391 | static void gen_imm_to_reg(const struct bpf_insn *insn, int reg, | ||
392 | struct jit_ctx *ctx) | ||
393 | { | ||
394 | if (insn->imm >= S16_MIN && insn->imm <= S16_MAX) { | ||
395 | emit_instr(ctx, addiu, reg, MIPS_R_ZERO, insn->imm); | ||
396 | } else { | ||
397 | int lower = (s16)(insn->imm & 0xffff); | ||
398 | int upper = insn->imm - lower; | ||
399 | |||
400 | emit_instr(ctx, lui, reg, upper >> 16); | ||
401 | emit_instr(ctx, addiu, reg, reg, lower); | ||
402 | } | ||
403 | |||
404 | } | ||
405 | |||
406 | static int gen_imm_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, | ||
407 | int idx) | ||
408 | { | ||
409 | int upper_bound, lower_bound; | ||
410 | int dst = ebpf_to_mips_reg(ctx, insn, dst_reg); | ||
411 | |||
412 | if (dst < 0) | ||
413 | return dst; | ||
414 | |||
415 | switch (BPF_OP(insn->code)) { | ||
416 | case BPF_MOV: | ||
417 | case BPF_ADD: | ||
418 | upper_bound = S16_MAX; | ||
419 | lower_bound = S16_MIN; | ||
420 | break; | ||
421 | case BPF_SUB: | ||
422 | upper_bound = -(int)S16_MIN; | ||
423 | lower_bound = -(int)S16_MAX; | ||
424 | break; | ||
425 | case BPF_AND: | ||
426 | case BPF_OR: | ||
427 | case BPF_XOR: | ||
428 | upper_bound = 0xffff; | ||
429 | lower_bound = 0; | ||
430 | break; | ||
431 | case BPF_RSH: | ||
432 | case BPF_LSH: | ||
433 | case BPF_ARSH: | ||
434 | /* Shift amounts are truncated, no need for bounds */ | ||
435 | upper_bound = S32_MAX; | ||
436 | lower_bound = S32_MIN; | ||
437 | break; | ||
438 | default: | ||
439 | return -EINVAL; | ||
440 | } | ||
441 | |||
442 | /* | ||
443 | * Immediate move clobbers the register, so no sign/zero | ||
444 | * extension needed. | ||
445 | */ | ||
446 | if (BPF_CLASS(insn->code) == BPF_ALU64 && | ||
447 | BPF_OP(insn->code) != BPF_MOV && | ||
448 | get_reg_val_type(ctx, idx, insn->dst_reg) == REG_32BIT) | ||
449 | emit_instr(ctx, dinsu, dst, MIPS_R_ZERO, 32, 32); | ||
450 | /* BPF_ALU | BPF_LSH doesn't need separate sign extension */ | ||
451 | if (BPF_CLASS(insn->code) == BPF_ALU && | ||
452 | BPF_OP(insn->code) != BPF_LSH && | ||
453 | BPF_OP(insn->code) != BPF_MOV && | ||
454 | get_reg_val_type(ctx, idx, insn->dst_reg) != REG_32BIT) | ||
455 | emit_instr(ctx, sll, dst, dst, 0); | ||
456 | |||
457 | if (insn->imm >= lower_bound && insn->imm <= upper_bound) { | ||
458 | /* single insn immediate case */ | ||
459 | switch (BPF_OP(insn->code) | BPF_CLASS(insn->code)) { | ||
460 | case BPF_ALU64 | BPF_MOV: | ||
461 | emit_instr(ctx, daddiu, dst, MIPS_R_ZERO, insn->imm); | ||
462 | break; | ||
463 | case BPF_ALU64 | BPF_AND: | ||
464 | case BPF_ALU | BPF_AND: | ||
465 | emit_instr(ctx, andi, dst, dst, insn->imm); | ||
466 | break; | ||
467 | case BPF_ALU64 | BPF_OR: | ||
468 | case BPF_ALU | BPF_OR: | ||
469 | emit_instr(ctx, ori, dst, dst, insn->imm); | ||
470 | break; | ||
471 | case BPF_ALU64 | BPF_XOR: | ||
472 | case BPF_ALU | BPF_XOR: | ||
473 | emit_instr(ctx, xori, dst, dst, insn->imm); | ||
474 | break; | ||
475 | case BPF_ALU64 | BPF_ADD: | ||
476 | emit_instr(ctx, daddiu, dst, dst, insn->imm); | ||
477 | break; | ||
478 | case BPF_ALU64 | BPF_SUB: | ||
479 | emit_instr(ctx, daddiu, dst, dst, -insn->imm); | ||
480 | break; | ||
481 | case BPF_ALU64 | BPF_RSH: | ||
482 | emit_instr(ctx, dsrl_safe, dst, dst, insn->imm & 0x3f); | ||
483 | break; | ||
484 | case BPF_ALU | BPF_RSH: | ||
485 | emit_instr(ctx, srl, dst, dst, insn->imm & 0x1f); | ||
486 | break; | ||
487 | case BPF_ALU64 | BPF_LSH: | ||
488 | emit_instr(ctx, dsll_safe, dst, dst, insn->imm & 0x3f); | ||
489 | break; | ||
490 | case BPF_ALU | BPF_LSH: | ||
491 | emit_instr(ctx, sll, dst, dst, insn->imm & 0x1f); | ||
492 | break; | ||
493 | case BPF_ALU64 | BPF_ARSH: | ||
494 | emit_instr(ctx, dsra_safe, dst, dst, insn->imm & 0x3f); | ||
495 | break; | ||
496 | case BPF_ALU | BPF_ARSH: | ||
497 | emit_instr(ctx, sra, dst, dst, insn->imm & 0x1f); | ||
498 | break; | ||
499 | case BPF_ALU | BPF_MOV: | ||
500 | emit_instr(ctx, addiu, dst, MIPS_R_ZERO, insn->imm); | ||
501 | break; | ||
502 | case BPF_ALU | BPF_ADD: | ||
503 | emit_instr(ctx, addiu, dst, dst, insn->imm); | ||
504 | break; | ||
505 | case BPF_ALU | BPF_SUB: | ||
506 | emit_instr(ctx, addiu, dst, dst, -insn->imm); | ||
507 | break; | ||
508 | default: | ||
509 | return -EINVAL; | ||
510 | } | ||
511 | } else { | ||
512 | /* multi insn immediate case */ | ||
513 | if (BPF_OP(insn->code) == BPF_MOV) { | ||
514 | gen_imm_to_reg(insn, dst, ctx); | ||
515 | } else { | ||
516 | gen_imm_to_reg(insn, MIPS_R_AT, ctx); | ||
517 | switch (BPF_OP(insn->code) | BPF_CLASS(insn->code)) { | ||
518 | case BPF_ALU64 | BPF_AND: | ||
519 | case BPF_ALU | BPF_AND: | ||
520 | emit_instr(ctx, and, dst, dst, MIPS_R_AT); | ||
521 | break; | ||
522 | case BPF_ALU64 | BPF_OR: | ||
523 | case BPF_ALU | BPF_OR: | ||
524 | emit_instr(ctx, or, dst, dst, MIPS_R_AT); | ||
525 | break; | ||
526 | case BPF_ALU64 | BPF_XOR: | ||
527 | case BPF_ALU | BPF_XOR: | ||
528 | emit_instr(ctx, xor, dst, dst, MIPS_R_AT); | ||
529 | break; | ||
530 | case BPF_ALU64 | BPF_ADD: | ||
531 | emit_instr(ctx, daddu, dst, dst, MIPS_R_AT); | ||
532 | break; | ||
533 | case BPF_ALU64 | BPF_SUB: | ||
534 | emit_instr(ctx, dsubu, dst, dst, MIPS_R_AT); | ||
535 | break; | ||
536 | case BPF_ALU | BPF_ADD: | ||
537 | emit_instr(ctx, addu, dst, dst, MIPS_R_AT); | ||
538 | break; | ||
539 | case BPF_ALU | BPF_SUB: | ||
540 | emit_instr(ctx, subu, dst, dst, MIPS_R_AT); | ||
541 | break; | ||
542 | default: | ||
543 | return -EINVAL; | ||
544 | } | ||
545 | } | ||
546 | } | ||
547 | |||
548 | return 0; | ||
549 | } | ||
550 | |||
551 | static void * __must_check | ||
552 | ool_skb_header_pointer(const struct sk_buff *skb, int offset, | ||
553 | int len, void *buffer) | ||
554 | { | ||
555 | return skb_header_pointer(skb, offset, len, buffer); | ||
556 | } | ||
557 | |||
558 | static int size_to_len(const struct bpf_insn *insn) | ||
559 | { | ||
560 | switch (BPF_SIZE(insn->code)) { | ||
561 | case BPF_B: | ||
562 | return 1; | ||
563 | case BPF_H: | ||
564 | return 2; | ||
565 | case BPF_W: | ||
566 | return 4; | ||
567 | case BPF_DW: | ||
568 | return 8; | ||
569 | } | ||
570 | return 0; | ||
571 | } | ||
572 | |||
573 | static void emit_const_to_reg(struct jit_ctx *ctx, int dst, u64 value) | ||
574 | { | ||
575 | if (value >= 0xffffffffffff8000ull || value < 0x8000ull) { | ||
576 | emit_instr(ctx, daddiu, dst, MIPS_R_ZERO, (int)value); | ||
577 | } else if (value >= 0xffffffff80000000ull || | ||
578 | (value < 0x80000000 && value > 0xffff)) { | ||
579 | emit_instr(ctx, lui, dst, (s32)(s16)(value >> 16)); | ||
580 | emit_instr(ctx, ori, dst, dst, (unsigned int)(value & 0xffff)); | ||
581 | } else { | ||
582 | int i; | ||
583 | bool seen_part = false; | ||
584 | int needed_shift = 0; | ||
585 | |||
586 | for (i = 0; i < 4; i++) { | ||
587 | u64 part = (value >> (16 * (3 - i))) & 0xffff; | ||
588 | |||
589 | if (seen_part && needed_shift > 0 && (part || i == 3)) { | ||
590 | emit_instr(ctx, dsll_safe, dst, dst, needed_shift); | ||
591 | needed_shift = 0; | ||
592 | } | ||
593 | if (part) { | ||
594 | if (i == 0 || (!seen_part && i < 3 && part < 0x8000)) { | ||
595 | emit_instr(ctx, lui, dst, (s32)(s16)part); | ||
596 | needed_shift = -16; | ||
597 | } else { | ||
598 | emit_instr(ctx, ori, dst, | ||
599 | seen_part ? dst : MIPS_R_ZERO, | ||
600 | (unsigned int)part); | ||
601 | } | ||
602 | seen_part = true; | ||
603 | } | ||
604 | if (seen_part) | ||
605 | needed_shift += 16; | ||
606 | } | ||
607 | } | ||
608 | } | ||
609 | |||
610 | static int emit_bpf_tail_call(struct jit_ctx *ctx, int this_idx) | ||
611 | { | ||
612 | int off, b_off; | ||
613 | |||
614 | ctx->flags |= EBPF_SEEN_TC; | ||
615 | /* | ||
616 | * if (index >= array->map.max_entries) | ||
617 | * goto out; | ||
618 | */ | ||
619 | off = offsetof(struct bpf_array, map.max_entries); | ||
620 | emit_instr(ctx, lwu, MIPS_R_T5, off, MIPS_R_A1); | ||
621 | emit_instr(ctx, sltu, MIPS_R_AT, MIPS_R_T5, MIPS_R_A2); | ||
622 | b_off = b_imm(this_idx + 1, ctx); | ||
623 | emit_instr(ctx, bne, MIPS_R_AT, MIPS_R_ZERO, b_off); | ||
624 | /* | ||
625 | * if (--TCC < 0) | ||
626 | * goto out; | ||
627 | */ | ||
628 | /* Delay slot */ | ||
629 | emit_instr(ctx, daddiu, MIPS_R_T5, | ||
630 | (ctx->flags & EBPF_TCC_IN_V1) ? MIPS_R_V1 : MIPS_R_S4, -1); | ||
631 | b_off = b_imm(this_idx + 1, ctx); | ||
632 | emit_instr(ctx, bltz, MIPS_R_T5, b_off); | ||
633 | /* | ||
634 | * prog = array->ptrs[index]; | ||
635 | * if (prog == NULL) | ||
636 | * goto out; | ||
637 | */ | ||
638 | /* Delay slot */ | ||
639 | emit_instr(ctx, dsll, MIPS_R_T8, MIPS_R_A2, 3); | ||
640 | emit_instr(ctx, daddu, MIPS_R_T8, MIPS_R_T8, MIPS_R_A1); | ||
641 | off = offsetof(struct bpf_array, ptrs); | ||
642 | emit_instr(ctx, ld, MIPS_R_AT, off, MIPS_R_T8); | ||
643 | b_off = b_imm(this_idx + 1, ctx); | ||
644 | emit_instr(ctx, beq, MIPS_R_AT, MIPS_R_ZERO, b_off); | ||
645 | /* Delay slot */ | ||
646 | emit_instr(ctx, nop); | ||
647 | |||
648 | /* goto *(prog->bpf_func + 4); */ | ||
649 | off = offsetof(struct bpf_prog, bpf_func); | ||
650 | emit_instr(ctx, ld, MIPS_R_T9, off, MIPS_R_AT); | ||
651 | /* All systems are go... propagate TCC */ | ||
652 | emit_instr(ctx, daddu, MIPS_R_V1, MIPS_R_T5, MIPS_R_ZERO); | ||
653 | /* Skip first instruction (TCC initialization) */ | ||
654 | emit_instr(ctx, daddiu, MIPS_R_T9, MIPS_R_T9, 4); | ||
655 | return build_int_epilogue(ctx, MIPS_R_T9); | ||
656 | } | ||
657 | |||
658 | static bool use_bbit_insns(void) | ||
659 | { | ||
660 | switch (current_cpu_type()) { | ||
661 | case CPU_CAVIUM_OCTEON: | ||
662 | case CPU_CAVIUM_OCTEON_PLUS: | ||
663 | case CPU_CAVIUM_OCTEON2: | ||
664 | case CPU_CAVIUM_OCTEON3: | ||
665 | return true; | ||
666 | default: | ||
667 | return false; | ||
668 | } | ||
669 | } | ||
670 | |||
671 | static bool is_bad_offset(int b_off) | ||
672 | { | ||
673 | return b_off > 0x1ffff || b_off < -0x20000; | ||
674 | } | ||
675 | |||
676 | /* Returns the number of insn slots consumed. */ | ||
677 | static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, | ||
678 | int this_idx, int exit_idx) | ||
679 | { | ||
680 | int src, dst, r, td, ts, mem_off, b_off; | ||
681 | bool need_swap, did_move, cmp_eq; | ||
682 | unsigned int target; | ||
683 | u64 t64; | ||
684 | s64 t64s; | ||
685 | |||
686 | switch (insn->code) { | ||
687 | case BPF_ALU64 | BPF_ADD | BPF_K: /* ALU64_IMM */ | ||
688 | case BPF_ALU64 | BPF_SUB | BPF_K: /* ALU64_IMM */ | ||
689 | case BPF_ALU64 | BPF_OR | BPF_K: /* ALU64_IMM */ | ||
690 | case BPF_ALU64 | BPF_AND | BPF_K: /* ALU64_IMM */ | ||
691 | case BPF_ALU64 | BPF_LSH | BPF_K: /* ALU64_IMM */ | ||
692 | case BPF_ALU64 | BPF_RSH | BPF_K: /* ALU64_IMM */ | ||
693 | case BPF_ALU64 | BPF_XOR | BPF_K: /* ALU64_IMM */ | ||
694 | case BPF_ALU64 | BPF_ARSH | BPF_K: /* ALU64_IMM */ | ||
695 | case BPF_ALU64 | BPF_MOV | BPF_K: /* ALU64_IMM */ | ||
696 | case BPF_ALU | BPF_MOV | BPF_K: /* ALU32_IMM */ | ||
697 | case BPF_ALU | BPF_ADD | BPF_K: /* ALU32_IMM */ | ||
698 | case BPF_ALU | BPF_SUB | BPF_K: /* ALU32_IMM */ | ||
699 | case BPF_ALU | BPF_OR | BPF_K: /* ALU64_IMM */ | ||
700 | case BPF_ALU | BPF_AND | BPF_K: /* ALU64_IMM */ | ||
701 | case BPF_ALU | BPF_LSH | BPF_K: /* ALU64_IMM */ | ||
702 | case BPF_ALU | BPF_RSH | BPF_K: /* ALU64_IMM */ | ||
703 | case BPF_ALU | BPF_XOR | BPF_K: /* ALU64_IMM */ | ||
704 | case BPF_ALU | BPF_ARSH | BPF_K: /* ALU64_IMM */ | ||
705 | r = gen_imm_insn(insn, ctx, this_idx); | ||
706 | if (r < 0) | ||
707 | return r; | ||
708 | break; | ||
709 | case BPF_ALU64 | BPF_MUL | BPF_K: /* ALU64_IMM */ | ||
710 | dst = ebpf_to_mips_reg(ctx, insn, dst_reg); | ||
711 | if (dst < 0) | ||
712 | return dst; | ||
713 | if (get_reg_val_type(ctx, this_idx, insn->dst_reg) == REG_32BIT) | ||
714 | emit_instr(ctx, dinsu, dst, MIPS_R_ZERO, 32, 32); | ||
715 | if (insn->imm == 1) /* Mult by 1 is a nop */ | ||
716 | break; | ||
717 | gen_imm_to_reg(insn, MIPS_R_AT, ctx); | ||
718 | emit_instr(ctx, dmultu, MIPS_R_AT, dst); | ||
719 | emit_instr(ctx, mflo, dst); | ||
720 | break; | ||
721 | case BPF_ALU64 | BPF_NEG | BPF_K: /* ALU64_IMM */ | ||
722 | dst = ebpf_to_mips_reg(ctx, insn, dst_reg); | ||
723 | if (dst < 0) | ||
724 | return dst; | ||
725 | if (get_reg_val_type(ctx, this_idx, insn->dst_reg) == REG_32BIT) | ||
726 | emit_instr(ctx, dinsu, dst, MIPS_R_ZERO, 32, 32); | ||
727 | emit_instr(ctx, dsubu, dst, MIPS_R_ZERO, dst); | ||
728 | break; | ||
729 | case BPF_ALU | BPF_MUL | BPF_K: /* ALU_IMM */ | ||
730 | dst = ebpf_to_mips_reg(ctx, insn, dst_reg); | ||
731 | if (dst < 0) | ||
732 | return dst; | ||
733 | td = get_reg_val_type(ctx, this_idx, insn->dst_reg); | ||
734 | if (td == REG_64BIT || td == REG_32BIT_ZERO_EX) { | ||
735 | /* sign extend */ | ||
736 | emit_instr(ctx, sll, dst, dst, 0); | ||
737 | } | ||
738 | if (insn->imm == 1) /* Mult by 1 is a nop */ | ||
739 | break; | ||
740 | gen_imm_to_reg(insn, MIPS_R_AT, ctx); | ||
741 | emit_instr(ctx, multu, dst, MIPS_R_AT); | ||
742 | emit_instr(ctx, mflo, dst); | ||
743 | break; | ||
744 | case BPF_ALU | BPF_NEG | BPF_K: /* ALU_IMM */ | ||
745 | dst = ebpf_to_mips_reg(ctx, insn, dst_reg); | ||
746 | if (dst < 0) | ||
747 | return dst; | ||
748 | td = get_reg_val_type(ctx, this_idx, insn->dst_reg); | ||
749 | if (td == REG_64BIT || td == REG_32BIT_ZERO_EX) { | ||
750 | /* sign extend */ | ||
751 | emit_instr(ctx, sll, dst, dst, 0); | ||
752 | } | ||
753 | emit_instr(ctx, subu, dst, MIPS_R_ZERO, dst); | ||
754 | break; | ||
755 | case BPF_ALU | BPF_DIV | BPF_K: /* ALU_IMM */ | ||
756 | case BPF_ALU | BPF_MOD | BPF_K: /* ALU_IMM */ | ||
757 | dst = ebpf_to_mips_reg(ctx, insn, dst_reg); | ||
758 | if (dst < 0) | ||
759 | return dst; | ||
760 | if (insn->imm == 0) { /* Div by zero */ | ||
761 | b_off = b_imm(exit_idx, ctx); | ||
762 | if (is_bad_offset(b_off)) | ||
763 | return -E2BIG; | ||
764 | emit_instr(ctx, beq, MIPS_R_ZERO, MIPS_R_ZERO, b_off); | ||
765 | emit_instr(ctx, addu, MIPS_R_V0, MIPS_R_ZERO, MIPS_R_ZERO); | ||
766 | } | ||
767 | td = get_reg_val_type(ctx, this_idx, insn->dst_reg); | ||
768 | if (td == REG_64BIT || td == REG_32BIT_ZERO_EX) | ||
769 | /* sign extend */ | ||
770 | emit_instr(ctx, sll, dst, dst, 0); | ||
771 | if (insn->imm == 1) { | ||
772 | /* div by 1 is a nop, mod by 1 is zero */ | ||
773 | if (BPF_OP(insn->code) == BPF_MOD) | ||
774 | emit_instr(ctx, addu, dst, MIPS_R_ZERO, MIPS_R_ZERO); | ||
775 | break; | ||
776 | } | ||
777 | gen_imm_to_reg(insn, MIPS_R_AT, ctx); | ||
778 | emit_instr(ctx, divu, dst, MIPS_R_AT); | ||
779 | if (BPF_OP(insn->code) == BPF_DIV) | ||
780 | emit_instr(ctx, mflo, dst); | ||
781 | else | ||
782 | emit_instr(ctx, mfhi, dst); | ||
783 | break; | ||
784 | case BPF_ALU64 | BPF_DIV | BPF_K: /* ALU_IMM */ | ||
785 | case BPF_ALU64 | BPF_MOD | BPF_K: /* ALU_IMM */ | ||
786 | dst = ebpf_to_mips_reg(ctx, insn, dst_reg); | ||
787 | if (dst < 0) | ||
788 | return dst; | ||
789 | if (insn->imm == 0) { /* Div by zero */ | ||
790 | b_off = b_imm(exit_idx, ctx); | ||
791 | if (is_bad_offset(b_off)) | ||
792 | return -E2BIG; | ||
793 | emit_instr(ctx, beq, MIPS_R_ZERO, MIPS_R_ZERO, b_off); | ||
794 | emit_instr(ctx, addu, MIPS_R_V0, MIPS_R_ZERO, MIPS_R_ZERO); | ||
795 | } | ||
796 | if (get_reg_val_type(ctx, this_idx, insn->dst_reg) == REG_32BIT) | ||
797 | emit_instr(ctx, dinsu, dst, MIPS_R_ZERO, 32, 32); | ||
798 | |||
799 | if (insn->imm == 1) { | ||
800 | /* div by 1 is a nop, mod by 1 is zero */ | ||
801 | if (BPF_OP(insn->code) == BPF_MOD) | ||
802 | emit_instr(ctx, addu, dst, MIPS_R_ZERO, MIPS_R_ZERO); | ||
803 | break; | ||
804 | } | ||
805 | gen_imm_to_reg(insn, MIPS_R_AT, ctx); | ||
806 | emit_instr(ctx, ddivu, dst, MIPS_R_AT); | ||
807 | if (BPF_OP(insn->code) == BPF_DIV) | ||
808 | emit_instr(ctx, mflo, dst); | ||
809 | else | ||
810 | emit_instr(ctx, mfhi, dst); | ||
811 | break; | ||
812 | case BPF_ALU64 | BPF_MOV | BPF_X: /* ALU64_REG */ | ||
813 | case BPF_ALU64 | BPF_ADD | BPF_X: /* ALU64_REG */ | ||
814 | case BPF_ALU64 | BPF_SUB | BPF_X: /* ALU64_REG */ | ||
815 | case BPF_ALU64 | BPF_XOR | BPF_X: /* ALU64_REG */ | ||
816 | case BPF_ALU64 | BPF_OR | BPF_X: /* ALU64_REG */ | ||
817 | case BPF_ALU64 | BPF_AND | BPF_X: /* ALU64_REG */ | ||
818 | case BPF_ALU64 | BPF_MUL | BPF_X: /* ALU64_REG */ | ||
819 | case BPF_ALU64 | BPF_DIV | BPF_X: /* ALU64_REG */ | ||
820 | case BPF_ALU64 | BPF_MOD | BPF_X: /* ALU64_REG */ | ||
821 | case BPF_ALU64 | BPF_LSH | BPF_X: /* ALU64_REG */ | ||
822 | case BPF_ALU64 | BPF_RSH | BPF_X: /* ALU64_REG */ | ||
823 | case BPF_ALU64 | BPF_ARSH | BPF_X: /* ALU64_REG */ | ||
824 | src = ebpf_to_mips_reg(ctx, insn, src_reg); | ||
825 | dst = ebpf_to_mips_reg(ctx, insn, dst_reg); | ||
826 | if (src < 0 || dst < 0) | ||
827 | return -EINVAL; | ||
828 | if (get_reg_val_type(ctx, this_idx, insn->dst_reg) == REG_32BIT) | ||
829 | emit_instr(ctx, dinsu, dst, MIPS_R_ZERO, 32, 32); | ||
830 | did_move = false; | ||
831 | if (insn->src_reg == BPF_REG_10) { | ||
832 | if (BPF_OP(insn->code) == BPF_MOV) { | ||
833 | emit_instr(ctx, daddiu, dst, MIPS_R_SP, MAX_BPF_STACK); | ||
834 | did_move = true; | ||
835 | } else { | ||
836 | emit_instr(ctx, daddiu, MIPS_R_AT, MIPS_R_SP, MAX_BPF_STACK); | ||
837 | src = MIPS_R_AT; | ||
838 | } | ||
839 | } else if (get_reg_val_type(ctx, this_idx, insn->src_reg) == REG_32BIT) { | ||
840 | int tmp_reg = MIPS_R_AT; | ||
841 | |||
842 | if (BPF_OP(insn->code) == BPF_MOV) { | ||
843 | tmp_reg = dst; | ||
844 | did_move = true; | ||
845 | } | ||
846 | emit_instr(ctx, daddu, tmp_reg, src, MIPS_R_ZERO); | ||
847 | emit_instr(ctx, dinsu, tmp_reg, MIPS_R_ZERO, 32, 32); | ||
848 | src = MIPS_R_AT; | ||
849 | } | ||
850 | switch (BPF_OP(insn->code)) { | ||
851 | case BPF_MOV: | ||
852 | if (!did_move) | ||
853 | emit_instr(ctx, daddu, dst, src, MIPS_R_ZERO); | ||
854 | break; | ||
855 | case BPF_ADD: | ||
856 | emit_instr(ctx, daddu, dst, dst, src); | ||
857 | break; | ||
858 | case BPF_SUB: | ||
859 | emit_instr(ctx, dsubu, dst, dst, src); | ||
860 | break; | ||
861 | case BPF_XOR: | ||
862 | emit_instr(ctx, xor, dst, dst, src); | ||
863 | break; | ||
864 | case BPF_OR: | ||
865 | emit_instr(ctx, or, dst, dst, src); | ||
866 | break; | ||
867 | case BPF_AND: | ||
868 | emit_instr(ctx, and, dst, dst, src); | ||
869 | break; | ||
870 | case BPF_MUL: | ||
871 | emit_instr(ctx, dmultu, dst, src); | ||
872 | emit_instr(ctx, mflo, dst); | ||
873 | break; | ||
874 | case BPF_DIV: | ||
875 | case BPF_MOD: | ||
876 | b_off = b_imm(exit_idx, ctx); | ||
877 | if (is_bad_offset(b_off)) | ||
878 | return -E2BIG; | ||
879 | emit_instr(ctx, beq, src, MIPS_R_ZERO, b_off); | ||
880 | emit_instr(ctx, movz, MIPS_R_V0, MIPS_R_ZERO, src); | ||
881 | emit_instr(ctx, ddivu, dst, src); | ||
882 | if (BPF_OP(insn->code) == BPF_DIV) | ||
883 | emit_instr(ctx, mflo, dst); | ||
884 | else | ||
885 | emit_instr(ctx, mfhi, dst); | ||
886 | break; | ||
887 | case BPF_LSH: | ||
888 | emit_instr(ctx, dsllv, dst, dst, src); | ||
889 | break; | ||
890 | case BPF_RSH: | ||
891 | emit_instr(ctx, dsrlv, dst, dst, src); | ||
892 | break; | ||
893 | case BPF_ARSH: | ||
894 | emit_instr(ctx, dsrav, dst, dst, src); | ||
895 | break; | ||
896 | default: | ||
897 | pr_err("ALU64_REG NOT HANDLED\n"); | ||
898 | return -EINVAL; | ||
899 | } | ||
900 | break; | ||
901 | case BPF_ALU | BPF_MOV | BPF_X: /* ALU_REG */ | ||
902 | case BPF_ALU | BPF_ADD | BPF_X: /* ALU_REG */ | ||
903 | case BPF_ALU | BPF_SUB | BPF_X: /* ALU_REG */ | ||
904 | case BPF_ALU | BPF_XOR | BPF_X: /* ALU_REG */ | ||
905 | case BPF_ALU | BPF_OR | BPF_X: /* ALU_REG */ | ||
906 | case BPF_ALU | BPF_AND | BPF_X: /* ALU_REG */ | ||
907 | case BPF_ALU | BPF_MUL | BPF_X: /* ALU_REG */ | ||
908 | case BPF_ALU | BPF_DIV | BPF_X: /* ALU_REG */ | ||
909 | case BPF_ALU | BPF_MOD | BPF_X: /* ALU_REG */ | ||
910 | case BPF_ALU | BPF_LSH | BPF_X: /* ALU_REG */ | ||
911 | case BPF_ALU | BPF_RSH | BPF_X: /* ALU_REG */ | ||
912 | src = ebpf_to_mips_reg(ctx, insn, src_reg_no_fp); | ||
913 | dst = ebpf_to_mips_reg(ctx, insn, dst_reg); | ||
914 | if (src < 0 || dst < 0) | ||
915 | return -EINVAL; | ||
916 | td = get_reg_val_type(ctx, this_idx, insn->dst_reg); | ||
917 | if (td == REG_64BIT || td == REG_32BIT_ZERO_EX) { | ||
918 | /* sign extend */ | ||
919 | emit_instr(ctx, sll, dst, dst, 0); | ||
920 | } | ||
921 | did_move = false; | ||
922 | ts = get_reg_val_type(ctx, this_idx, insn->src_reg); | ||
923 | if (ts == REG_64BIT || ts == REG_32BIT_ZERO_EX) { | ||
924 | int tmp_reg = MIPS_R_AT; | ||
925 | |||
926 | if (BPF_OP(insn->code) == BPF_MOV) { | ||
927 | tmp_reg = dst; | ||
928 | did_move = true; | ||
929 | } | ||
930 | /* sign extend */ | ||
931 | emit_instr(ctx, sll, tmp_reg, src, 0); | ||
932 | src = MIPS_R_AT; | ||
933 | } | ||
934 | switch (BPF_OP(insn->code)) { | ||
935 | case BPF_MOV: | ||
936 | if (!did_move) | ||
937 | emit_instr(ctx, addu, dst, src, MIPS_R_ZERO); | ||
938 | break; | ||
939 | case BPF_ADD: | ||
940 | emit_instr(ctx, addu, dst, dst, src); | ||
941 | break; | ||
942 | case BPF_SUB: | ||
943 | emit_instr(ctx, subu, dst, dst, src); | ||
944 | break; | ||
945 | case BPF_XOR: | ||
946 | emit_instr(ctx, xor, dst, dst, src); | ||
947 | break; | ||
948 | case BPF_OR: | ||
949 | emit_instr(ctx, or, dst, dst, src); | ||
950 | break; | ||
951 | case BPF_AND: | ||
952 | emit_instr(ctx, and, dst, dst, src); | ||
953 | break; | ||
954 | case BPF_MUL: | ||
955 | emit_instr(ctx, mul, dst, dst, src); | ||
956 | break; | ||
957 | case BPF_DIV: | ||
958 | case BPF_MOD: | ||
959 | b_off = b_imm(exit_idx, ctx); | ||
960 | if (is_bad_offset(b_off)) | ||
961 | return -E2BIG; | ||
962 | emit_instr(ctx, beq, src, MIPS_R_ZERO, b_off); | ||
963 | emit_instr(ctx, movz, MIPS_R_V0, MIPS_R_ZERO, src); | ||
964 | emit_instr(ctx, divu, dst, src); | ||
965 | if (BPF_OP(insn->code) == BPF_DIV) | ||
966 | emit_instr(ctx, mflo, dst); | ||
967 | else | ||
968 | emit_instr(ctx, mfhi, dst); | ||
969 | break; | ||
970 | case BPF_LSH: | ||
971 | emit_instr(ctx, sllv, dst, dst, src); | ||
972 | break; | ||
973 | case BPF_RSH: | ||
974 | emit_instr(ctx, srlv, dst, dst, src); | ||
975 | break; | ||
976 | default: | ||
977 | pr_err("ALU_REG NOT HANDLED\n"); | ||
978 | return -EINVAL; | ||
979 | } | ||
980 | break; | ||
981 | case BPF_JMP | BPF_EXIT: | ||
982 | if (this_idx + 1 < exit_idx) { | ||
983 | b_off = b_imm(exit_idx, ctx); | ||
984 | if (is_bad_offset(b_off)) | ||
985 | return -E2BIG; | ||
986 | emit_instr(ctx, beq, MIPS_R_ZERO, MIPS_R_ZERO, b_off); | ||
987 | emit_instr(ctx, nop); | ||
988 | } | ||
989 | break; | ||
990 | case BPF_JMP | BPF_JEQ | BPF_K: /* JMP_IMM */ | ||
991 | case BPF_JMP | BPF_JNE | BPF_K: /* JMP_IMM */ | ||
992 | cmp_eq = (BPF_OP(insn->code) == BPF_JEQ); | ||
993 | dst = ebpf_to_mips_reg(ctx, insn, dst_reg_fp_ok); | ||
994 | if (dst < 0) | ||
995 | return dst; | ||
996 | if (insn->imm == 0) { | ||
997 | src = MIPS_R_ZERO; | ||
998 | } else { | ||
999 | gen_imm_to_reg(insn, MIPS_R_AT, ctx); | ||
1000 | src = MIPS_R_AT; | ||
1001 | } | ||
1002 | goto jeq_common; | ||
1003 | case BPF_JMP | BPF_JEQ | BPF_X: /* JMP_REG */ | ||
1004 | case BPF_JMP | BPF_JNE | BPF_X: | ||
1005 | case BPF_JMP | BPF_JSGT | BPF_X: | ||
1006 | case BPF_JMP | BPF_JSGE | BPF_X: | ||
1007 | case BPF_JMP | BPF_JGT | BPF_X: | ||
1008 | case BPF_JMP | BPF_JGE | BPF_X: | ||
1009 | case BPF_JMP | BPF_JSET | BPF_X: | ||
1010 | src = ebpf_to_mips_reg(ctx, insn, src_reg_no_fp); | ||
1011 | dst = ebpf_to_mips_reg(ctx, insn, dst_reg); | ||
1012 | if (src < 0 || dst < 0) | ||
1013 | return -EINVAL; | ||
1014 | td = get_reg_val_type(ctx, this_idx, insn->dst_reg); | ||
1015 | ts = get_reg_val_type(ctx, this_idx, insn->src_reg); | ||
1016 | if (td == REG_32BIT && ts != REG_32BIT) { | ||
1017 | emit_instr(ctx, sll, MIPS_R_AT, src, 0); | ||
1018 | src = MIPS_R_AT; | ||
1019 | } else if (ts == REG_32BIT && td != REG_32BIT) { | ||
1020 | emit_instr(ctx, sll, MIPS_R_AT, dst, 0); | ||
1021 | dst = MIPS_R_AT; | ||
1022 | } | ||
1023 | if (BPF_OP(insn->code) == BPF_JSET) { | ||
1024 | emit_instr(ctx, and, MIPS_R_AT, dst, src); | ||
1025 | cmp_eq = false; | ||
1026 | dst = MIPS_R_AT; | ||
1027 | src = MIPS_R_ZERO; | ||
1028 | } else if (BPF_OP(insn->code) == BPF_JSGT) { | ||
1029 | emit_instr(ctx, dsubu, MIPS_R_AT, dst, src); | ||
1030 | if ((insn + 1)->code == (BPF_JMP | BPF_EXIT) && insn->off == 1) { | ||
1031 | b_off = b_imm(exit_idx, ctx); | ||
1032 | if (is_bad_offset(b_off)) | ||
1033 | return -E2BIG; | ||
1034 | emit_instr(ctx, blez, MIPS_R_AT, b_off); | ||
1035 | emit_instr(ctx, nop); | ||
1036 | return 2; /* We consumed the exit. */ | ||
1037 | } | ||
1038 | b_off = b_imm(this_idx + insn->off + 1, ctx); | ||
1039 | if (is_bad_offset(b_off)) | ||
1040 | return -E2BIG; | ||
1041 | emit_instr(ctx, bgtz, MIPS_R_AT, b_off); | ||
1042 | emit_instr(ctx, nop); | ||
1043 | break; | ||
1044 | } else if (BPF_OP(insn->code) == BPF_JSGE) { | ||
1045 | emit_instr(ctx, slt, MIPS_R_AT, dst, src); | ||
1046 | cmp_eq = true; | ||
1047 | dst = MIPS_R_AT; | ||
1048 | src = MIPS_R_ZERO; | ||
1049 | } else if (BPF_OP(insn->code) == BPF_JGT) { | ||
1050 | /* dst or src could be AT */ | ||
1051 | emit_instr(ctx, dsubu, MIPS_R_T8, dst, src); | ||
1052 | emit_instr(ctx, sltu, MIPS_R_AT, dst, src); | ||
1053 | /* SP known to be non-zero, movz becomes boolean not */ | ||
1054 | emit_instr(ctx, movz, MIPS_R_T9, MIPS_R_SP, MIPS_R_T8); | ||
1055 | emit_instr(ctx, movn, MIPS_R_T9, MIPS_R_ZERO, MIPS_R_T8); | ||
1056 | emit_instr(ctx, or, MIPS_R_AT, MIPS_R_T9, MIPS_R_AT); | ||
1057 | cmp_eq = true; | ||
1058 | dst = MIPS_R_AT; | ||
1059 | src = MIPS_R_ZERO; | ||
1060 | } else if (BPF_OP(insn->code) == BPF_JGE) { | ||
1061 | emit_instr(ctx, sltu, MIPS_R_AT, dst, src); | ||
1062 | cmp_eq = true; | ||
1063 | dst = MIPS_R_AT; | ||
1064 | src = MIPS_R_ZERO; | ||
1065 | } else { /* JNE/JEQ case */ | ||
1066 | cmp_eq = (BPF_OP(insn->code) == BPF_JEQ); | ||
1067 | } | ||
1068 | jeq_common: | ||
1069 | /* | ||
1070 | * If the next insn is EXIT and we are jumping arround | ||
1071 | * only it, invert the sense of the compare and | ||
1072 | * conditionally jump to the exit. Poor man's branch | ||
1073 | * chaining. | ||
1074 | */ | ||
1075 | if ((insn + 1)->code == (BPF_JMP | BPF_EXIT) && insn->off == 1) { | ||
1076 | b_off = b_imm(exit_idx, ctx); | ||
1077 | if (is_bad_offset(b_off)) { | ||
1078 | target = j_target(ctx, exit_idx); | ||
1079 | if (target == (unsigned int)-1) | ||
1080 | return -E2BIG; | ||
1081 | cmp_eq = !cmp_eq; | ||
1082 | b_off = 4 * 3; | ||
1083 | if (!(ctx->offsets[this_idx] & OFFSETS_B_CONV)) { | ||
1084 | ctx->offsets[this_idx] |= OFFSETS_B_CONV; | ||
1085 | ctx->long_b_conversion = 1; | ||
1086 | } | ||
1087 | } | ||
1088 | |||
1089 | if (cmp_eq) | ||
1090 | emit_instr(ctx, bne, dst, src, b_off); | ||
1091 | else | ||
1092 | emit_instr(ctx, beq, dst, src, b_off); | ||
1093 | emit_instr(ctx, nop); | ||
1094 | if (ctx->offsets[this_idx] & OFFSETS_B_CONV) { | ||
1095 | emit_instr(ctx, j, target); | ||
1096 | emit_instr(ctx, nop); | ||
1097 | } | ||
1098 | return 2; /* We consumed the exit. */ | ||
1099 | } | ||
1100 | b_off = b_imm(this_idx + insn->off + 1, ctx); | ||
1101 | if (is_bad_offset(b_off)) { | ||
1102 | target = j_target(ctx, this_idx + insn->off + 1); | ||
1103 | if (target == (unsigned int)-1) | ||
1104 | return -E2BIG; | ||
1105 | cmp_eq = !cmp_eq; | ||
1106 | b_off = 4 * 3; | ||
1107 | if (!(ctx->offsets[this_idx] & OFFSETS_B_CONV)) { | ||
1108 | ctx->offsets[this_idx] |= OFFSETS_B_CONV; | ||
1109 | ctx->long_b_conversion = 1; | ||
1110 | } | ||
1111 | } | ||
1112 | |||
1113 | if (cmp_eq) | ||
1114 | emit_instr(ctx, beq, dst, src, b_off); | ||
1115 | else | ||
1116 | emit_instr(ctx, bne, dst, src, b_off); | ||
1117 | emit_instr(ctx, nop); | ||
1118 | if (ctx->offsets[this_idx] & OFFSETS_B_CONV) { | ||
1119 | emit_instr(ctx, j, target); | ||
1120 | emit_instr(ctx, nop); | ||
1121 | } | ||
1122 | break; | ||
1123 | case BPF_JMP | BPF_JSGT | BPF_K: /* JMP_IMM */ | ||
1124 | case BPF_JMP | BPF_JSGE | BPF_K: /* JMP_IMM */ | ||
1125 | cmp_eq = (BPF_OP(insn->code) == BPF_JSGE); | ||
1126 | dst = ebpf_to_mips_reg(ctx, insn, dst_reg_fp_ok); | ||
1127 | if (dst < 0) | ||
1128 | return dst; | ||
1129 | |||
1130 | if (insn->imm == 0) { | ||
1131 | if ((insn + 1)->code == (BPF_JMP | BPF_EXIT) && insn->off == 1) { | ||
1132 | b_off = b_imm(exit_idx, ctx); | ||
1133 | if (is_bad_offset(b_off)) | ||
1134 | return -E2BIG; | ||
1135 | if (cmp_eq) | ||
1136 | emit_instr(ctx, bltz, dst, b_off); | ||
1137 | else | ||
1138 | emit_instr(ctx, blez, dst, b_off); | ||
1139 | emit_instr(ctx, nop); | ||
1140 | return 2; /* We consumed the exit. */ | ||
1141 | } | ||
1142 | b_off = b_imm(this_idx + insn->off + 1, ctx); | ||
1143 | if (is_bad_offset(b_off)) | ||
1144 | return -E2BIG; | ||
1145 | if (cmp_eq) | ||
1146 | emit_instr(ctx, bgez, dst, b_off); | ||
1147 | else | ||
1148 | emit_instr(ctx, bgtz, dst, b_off); | ||
1149 | emit_instr(ctx, nop); | ||
1150 | break; | ||
1151 | } | ||
1152 | /* | ||
1153 | * only "LT" compare available, so we must use imm + 1 | ||
1154 | * to generate "GT" | ||
1155 | */ | ||
1156 | t64s = insn->imm + (cmp_eq ? 0 : 1); | ||
1157 | if (t64s >= S16_MIN && t64s <= S16_MAX) { | ||
1158 | emit_instr(ctx, slti, MIPS_R_AT, dst, (int)t64s); | ||
1159 | src = MIPS_R_AT; | ||
1160 | dst = MIPS_R_ZERO; | ||
1161 | cmp_eq = true; | ||
1162 | goto jeq_common; | ||
1163 | } | ||
1164 | emit_const_to_reg(ctx, MIPS_R_AT, (u64)t64s); | ||
1165 | emit_instr(ctx, slt, MIPS_R_AT, dst, MIPS_R_AT); | ||
1166 | src = MIPS_R_AT; | ||
1167 | dst = MIPS_R_ZERO; | ||
1168 | cmp_eq = true; | ||
1169 | goto jeq_common; | ||
1170 | |||
1171 | case BPF_JMP | BPF_JGT | BPF_K: | ||
1172 | case BPF_JMP | BPF_JGE | BPF_K: | ||
1173 | cmp_eq = (BPF_OP(insn->code) == BPF_JGE); | ||
1174 | dst = ebpf_to_mips_reg(ctx, insn, dst_reg_fp_ok); | ||
1175 | if (dst < 0) | ||
1176 | return dst; | ||
1177 | /* | ||
1178 | * only "LT" compare available, so we must use imm + 1 | ||
1179 | * to generate "GT" | ||
1180 | */ | ||
1181 | t64s = (u64)(u32)(insn->imm) + (cmp_eq ? 0 : 1); | ||
1182 | if (t64s >= 0 && t64s <= S16_MAX) { | ||
1183 | emit_instr(ctx, sltiu, MIPS_R_AT, dst, (int)t64s); | ||
1184 | src = MIPS_R_AT; | ||
1185 | dst = MIPS_R_ZERO; | ||
1186 | cmp_eq = true; | ||
1187 | goto jeq_common; | ||
1188 | } | ||
1189 | emit_const_to_reg(ctx, MIPS_R_AT, (u64)t64s); | ||
1190 | emit_instr(ctx, sltu, MIPS_R_AT, dst, MIPS_R_AT); | ||
1191 | src = MIPS_R_AT; | ||
1192 | dst = MIPS_R_ZERO; | ||
1193 | cmp_eq = true; | ||
1194 | goto jeq_common; | ||
1195 | |||
1196 | case BPF_JMP | BPF_JSET | BPF_K: /* JMP_IMM */ | ||
1197 | dst = ebpf_to_mips_reg(ctx, insn, dst_reg_fp_ok); | ||
1198 | if (dst < 0) | ||
1199 | return dst; | ||
1200 | |||
1201 | if (use_bbit_insns() && hweight32((u32)insn->imm) == 1) { | ||
1202 | if ((insn + 1)->code == (BPF_JMP | BPF_EXIT) && insn->off == 1) { | ||
1203 | b_off = b_imm(exit_idx, ctx); | ||
1204 | if (is_bad_offset(b_off)) | ||
1205 | return -E2BIG; | ||
1206 | emit_instr(ctx, bbit0, dst, ffs((u32)insn->imm) - 1, b_off); | ||
1207 | emit_instr(ctx, nop); | ||
1208 | return 2; /* We consumed the exit. */ | ||
1209 | } | ||
1210 | b_off = b_imm(this_idx + insn->off + 1, ctx); | ||
1211 | if (is_bad_offset(b_off)) | ||
1212 | return -E2BIG; | ||
1213 | emit_instr(ctx, bbit1, dst, ffs((u32)insn->imm) - 1, b_off); | ||
1214 | emit_instr(ctx, nop); | ||
1215 | break; | ||
1216 | } | ||
1217 | t64 = (u32)insn->imm; | ||
1218 | emit_const_to_reg(ctx, MIPS_R_AT, t64); | ||
1219 | emit_instr(ctx, and, MIPS_R_AT, dst, MIPS_R_AT); | ||
1220 | src = MIPS_R_AT; | ||
1221 | dst = MIPS_R_ZERO; | ||
1222 | cmp_eq = false; | ||
1223 | goto jeq_common; | ||
1224 | |||
1225 | case BPF_JMP | BPF_JA: | ||
1226 | /* | ||
1227 | * Prefer relative branch for easier debugging, but | ||
1228 | * fall back if needed. | ||
1229 | */ | ||
1230 | b_off = b_imm(this_idx + insn->off + 1, ctx); | ||
1231 | if (is_bad_offset(b_off)) { | ||
1232 | target = j_target(ctx, this_idx + insn->off + 1); | ||
1233 | if (target == (unsigned int)-1) | ||
1234 | return -E2BIG; | ||
1235 | emit_instr(ctx, j, target); | ||
1236 | } else { | ||
1237 | emit_instr(ctx, b, b_off); | ||
1238 | } | ||
1239 | emit_instr(ctx, nop); | ||
1240 | break; | ||
1241 | case BPF_LD | BPF_DW | BPF_IMM: | ||
1242 | if (insn->src_reg != 0) | ||
1243 | return -EINVAL; | ||
1244 | dst = ebpf_to_mips_reg(ctx, insn, dst_reg); | ||
1245 | if (dst < 0) | ||
1246 | return dst; | ||
1247 | t64 = ((u64)(u32)insn->imm) | ((u64)(insn + 1)->imm << 32); | ||
1248 | emit_const_to_reg(ctx, dst, t64); | ||
1249 | return 2; /* Double slot insn */ | ||
1250 | |||
1251 | case BPF_JMP | BPF_CALL: | ||
1252 | ctx->flags |= EBPF_SAVE_RA; | ||
1253 | t64s = (s64)insn->imm + (s64)__bpf_call_base; | ||
1254 | emit_const_to_reg(ctx, MIPS_R_T9, (u64)t64s); | ||
1255 | emit_instr(ctx, jalr, MIPS_R_RA, MIPS_R_T9); | ||
1256 | /* delay slot */ | ||
1257 | emit_instr(ctx, nop); | ||
1258 | break; | ||
1259 | |||
1260 | case BPF_JMP | BPF_TAIL_CALL: | ||
1261 | if (emit_bpf_tail_call(ctx, this_idx)) | ||
1262 | return -EINVAL; | ||
1263 | break; | ||
1264 | |||
1265 | case BPF_LD | BPF_B | BPF_ABS: | ||
1266 | case BPF_LD | BPF_H | BPF_ABS: | ||
1267 | case BPF_LD | BPF_W | BPF_ABS: | ||
1268 | case BPF_LD | BPF_DW | BPF_ABS: | ||
1269 | ctx->flags |= EBPF_SAVE_RA; | ||
1270 | |||
1271 | gen_imm_to_reg(insn, MIPS_R_A1, ctx); | ||
1272 | emit_instr(ctx, addiu, MIPS_R_A2, MIPS_R_ZERO, size_to_len(insn)); | ||
1273 | |||
1274 | if (insn->imm < 0) { | ||
1275 | emit_const_to_reg(ctx, MIPS_R_T9, (u64)bpf_internal_load_pointer_neg_helper); | ||
1276 | } else { | ||
1277 | emit_const_to_reg(ctx, MIPS_R_T9, (u64)ool_skb_header_pointer); | ||
1278 | emit_instr(ctx, daddiu, MIPS_R_A3, MIPS_R_SP, ctx->tmp_offset); | ||
1279 | } | ||
1280 | goto ld_skb_common; | ||
1281 | |||
1282 | case BPF_LD | BPF_B | BPF_IND: | ||
1283 | case BPF_LD | BPF_H | BPF_IND: | ||
1284 | case BPF_LD | BPF_W | BPF_IND: | ||
1285 | case BPF_LD | BPF_DW | BPF_IND: | ||
1286 | ctx->flags |= EBPF_SAVE_RA; | ||
1287 | src = ebpf_to_mips_reg(ctx, insn, src_reg_no_fp); | ||
1288 | if (src < 0) | ||
1289 | return src; | ||
1290 | ts = get_reg_val_type(ctx, this_idx, insn->src_reg); | ||
1291 | if (ts == REG_32BIT_ZERO_EX) { | ||
1292 | /* sign extend */ | ||
1293 | emit_instr(ctx, sll, MIPS_R_A1, src, 0); | ||
1294 | src = MIPS_R_A1; | ||
1295 | } | ||
1296 | if (insn->imm >= S16_MIN && insn->imm <= S16_MAX) { | ||
1297 | emit_instr(ctx, daddiu, MIPS_R_A1, src, insn->imm); | ||
1298 | } else { | ||
1299 | gen_imm_to_reg(insn, MIPS_R_AT, ctx); | ||
1300 | emit_instr(ctx, daddu, MIPS_R_A1, MIPS_R_AT, src); | ||
1301 | } | ||
1302 | /* truncate to 32-bit int */ | ||
1303 | emit_instr(ctx, sll, MIPS_R_A1, MIPS_R_A1, 0); | ||
1304 | emit_instr(ctx, daddiu, MIPS_R_A3, MIPS_R_SP, ctx->tmp_offset); | ||
1305 | emit_instr(ctx, slt, MIPS_R_AT, MIPS_R_A1, MIPS_R_ZERO); | ||
1306 | |||
1307 | emit_const_to_reg(ctx, MIPS_R_T8, (u64)bpf_internal_load_pointer_neg_helper); | ||
1308 | emit_const_to_reg(ctx, MIPS_R_T9, (u64)ool_skb_header_pointer); | ||
1309 | emit_instr(ctx, addiu, MIPS_R_A2, MIPS_R_ZERO, size_to_len(insn)); | ||
1310 | emit_instr(ctx, movn, MIPS_R_T9, MIPS_R_T8, MIPS_R_AT); | ||
1311 | |||
1312 | ld_skb_common: | ||
1313 | emit_instr(ctx, jalr, MIPS_R_RA, MIPS_R_T9); | ||
1314 | /* delay slot move */ | ||
1315 | emit_instr(ctx, daddu, MIPS_R_A0, MIPS_R_S0, MIPS_R_ZERO); | ||
1316 | |||
1317 | /* Check the error value */ | ||
1318 | b_off = b_imm(exit_idx, ctx); | ||
1319 | if (is_bad_offset(b_off)) { | ||
1320 | target = j_target(ctx, exit_idx); | ||
1321 | if (target == (unsigned int)-1) | ||
1322 | return -E2BIG; | ||
1323 | |||
1324 | if (!(ctx->offsets[this_idx] & OFFSETS_B_CONV)) { | ||
1325 | ctx->offsets[this_idx] |= OFFSETS_B_CONV; | ||
1326 | ctx->long_b_conversion = 1; | ||
1327 | } | ||
1328 | emit_instr(ctx, bne, MIPS_R_V0, MIPS_R_ZERO, 4 * 3); | ||
1329 | emit_instr(ctx, nop); | ||
1330 | emit_instr(ctx, j, target); | ||
1331 | emit_instr(ctx, nop); | ||
1332 | } else { | ||
1333 | emit_instr(ctx, beq, MIPS_R_V0, MIPS_R_ZERO, b_off); | ||
1334 | emit_instr(ctx, nop); | ||
1335 | } | ||
1336 | |||
1337 | #ifdef __BIG_ENDIAN | ||
1338 | need_swap = false; | ||
1339 | #else | ||
1340 | need_swap = true; | ||
1341 | #endif | ||
1342 | dst = MIPS_R_V0; | ||
1343 | switch (BPF_SIZE(insn->code)) { | ||
1344 | case BPF_B: | ||
1345 | emit_instr(ctx, lbu, dst, 0, MIPS_R_V0); | ||
1346 | break; | ||
1347 | case BPF_H: | ||
1348 | emit_instr(ctx, lhu, dst, 0, MIPS_R_V0); | ||
1349 | if (need_swap) | ||
1350 | emit_instr(ctx, wsbh, dst, dst); | ||
1351 | break; | ||
1352 | case BPF_W: | ||
1353 | emit_instr(ctx, lw, dst, 0, MIPS_R_V0); | ||
1354 | if (need_swap) { | ||
1355 | emit_instr(ctx, wsbh, dst, dst); | ||
1356 | emit_instr(ctx, rotr, dst, dst, 16); | ||
1357 | } | ||
1358 | break; | ||
1359 | case BPF_DW: | ||
1360 | emit_instr(ctx, ld, dst, 0, MIPS_R_V0); | ||
1361 | if (need_swap) { | ||
1362 | emit_instr(ctx, dsbh, dst, dst); | ||
1363 | emit_instr(ctx, dshd, dst, dst); | ||
1364 | } | ||
1365 | break; | ||
1366 | } | ||
1367 | |||
1368 | break; | ||
1369 | case BPF_ALU | BPF_END | BPF_FROM_BE: | ||
1370 | case BPF_ALU | BPF_END | BPF_FROM_LE: | ||
1371 | dst = ebpf_to_mips_reg(ctx, insn, dst_reg); | ||
1372 | if (dst < 0) | ||
1373 | return dst; | ||
1374 | td = get_reg_val_type(ctx, this_idx, insn->dst_reg); | ||
1375 | if (insn->imm == 64 && td == REG_32BIT) | ||
1376 | emit_instr(ctx, dinsu, dst, MIPS_R_ZERO, 32, 32); | ||
1377 | |||
1378 | if (insn->imm != 64 && | ||
1379 | (td == REG_64BIT || td == REG_32BIT_ZERO_EX)) { | ||
1380 | /* sign extend */ | ||
1381 | emit_instr(ctx, sll, dst, dst, 0); | ||
1382 | } | ||
1383 | |||
1384 | #ifdef __BIG_ENDIAN | ||
1385 | need_swap = (BPF_SRC(insn->code) == BPF_FROM_LE); | ||
1386 | #else | ||
1387 | need_swap = (BPF_SRC(insn->code) == BPF_FROM_BE); | ||
1388 | #endif | ||
1389 | if (insn->imm == 16) { | ||
1390 | if (need_swap) | ||
1391 | emit_instr(ctx, wsbh, dst, dst); | ||
1392 | emit_instr(ctx, andi, dst, dst, 0xffff); | ||
1393 | } else if (insn->imm == 32) { | ||
1394 | if (need_swap) { | ||
1395 | emit_instr(ctx, wsbh, dst, dst); | ||
1396 | emit_instr(ctx, rotr, dst, dst, 16); | ||
1397 | } | ||
1398 | } else { /* 64-bit*/ | ||
1399 | if (need_swap) { | ||
1400 | emit_instr(ctx, dsbh, dst, dst); | ||
1401 | emit_instr(ctx, dshd, dst, dst); | ||
1402 | } | ||
1403 | } | ||
1404 | break; | ||
1405 | |||
1406 | case BPF_ST | BPF_B | BPF_MEM: | ||
1407 | case BPF_ST | BPF_H | BPF_MEM: | ||
1408 | case BPF_ST | BPF_W | BPF_MEM: | ||
1409 | case BPF_ST | BPF_DW | BPF_MEM: | ||
1410 | if (insn->dst_reg == BPF_REG_10) { | ||
1411 | ctx->flags |= EBPF_SEEN_FP; | ||
1412 | dst = MIPS_R_SP; | ||
1413 | mem_off = insn->off + MAX_BPF_STACK; | ||
1414 | } else { | ||
1415 | dst = ebpf_to_mips_reg(ctx, insn, dst_reg); | ||
1416 | if (dst < 0) | ||
1417 | return dst; | ||
1418 | mem_off = insn->off; | ||
1419 | } | ||
1420 | gen_imm_to_reg(insn, MIPS_R_AT, ctx); | ||
1421 | switch (BPF_SIZE(insn->code)) { | ||
1422 | case BPF_B: | ||
1423 | emit_instr(ctx, sb, MIPS_R_AT, mem_off, dst); | ||
1424 | break; | ||
1425 | case BPF_H: | ||
1426 | emit_instr(ctx, sh, MIPS_R_AT, mem_off, dst); | ||
1427 | break; | ||
1428 | case BPF_W: | ||
1429 | emit_instr(ctx, sw, MIPS_R_AT, mem_off, dst); | ||
1430 | break; | ||
1431 | case BPF_DW: | ||
1432 | emit_instr(ctx, sd, MIPS_R_AT, mem_off, dst); | ||
1433 | break; | ||
1434 | } | ||
1435 | break; | ||
1436 | |||
1437 | case BPF_LDX | BPF_B | BPF_MEM: | ||
1438 | case BPF_LDX | BPF_H | BPF_MEM: | ||
1439 | case BPF_LDX | BPF_W | BPF_MEM: | ||
1440 | case BPF_LDX | BPF_DW | BPF_MEM: | ||
1441 | if (insn->src_reg == BPF_REG_10) { | ||
1442 | ctx->flags |= EBPF_SEEN_FP; | ||
1443 | src = MIPS_R_SP; | ||
1444 | mem_off = insn->off + MAX_BPF_STACK; | ||
1445 | } else { | ||
1446 | src = ebpf_to_mips_reg(ctx, insn, src_reg_no_fp); | ||
1447 | if (src < 0) | ||
1448 | return src; | ||
1449 | mem_off = insn->off; | ||
1450 | } | ||
1451 | dst = ebpf_to_mips_reg(ctx, insn, dst_reg); | ||
1452 | if (dst < 0) | ||
1453 | return dst; | ||
1454 | switch (BPF_SIZE(insn->code)) { | ||
1455 | case BPF_B: | ||
1456 | emit_instr(ctx, lbu, dst, mem_off, src); | ||
1457 | break; | ||
1458 | case BPF_H: | ||
1459 | emit_instr(ctx, lhu, dst, mem_off, src); | ||
1460 | break; | ||
1461 | case BPF_W: | ||
1462 | emit_instr(ctx, lw, dst, mem_off, src); | ||
1463 | break; | ||
1464 | case BPF_DW: | ||
1465 | emit_instr(ctx, ld, dst, mem_off, src); | ||
1466 | break; | ||
1467 | } | ||
1468 | break; | ||
1469 | |||
1470 | case BPF_STX | BPF_B | BPF_MEM: | ||
1471 | case BPF_STX | BPF_H | BPF_MEM: | ||
1472 | case BPF_STX | BPF_W | BPF_MEM: | ||
1473 | case BPF_STX | BPF_DW | BPF_MEM: | ||
1474 | case BPF_STX | BPF_W | BPF_XADD: | ||
1475 | case BPF_STX | BPF_DW | BPF_XADD: | ||
1476 | if (insn->dst_reg == BPF_REG_10) { | ||
1477 | ctx->flags |= EBPF_SEEN_FP; | ||
1478 | dst = MIPS_R_SP; | ||
1479 | mem_off = insn->off + MAX_BPF_STACK; | ||
1480 | } else { | ||
1481 | dst = ebpf_to_mips_reg(ctx, insn, dst_reg); | ||
1482 | if (dst < 0) | ||
1483 | return dst; | ||
1484 | mem_off = insn->off; | ||
1485 | } | ||
1486 | src = ebpf_to_mips_reg(ctx, insn, src_reg_no_fp); | ||
1487 | if (src < 0) | ||
1488 | return dst; | ||
1489 | if (BPF_MODE(insn->code) == BPF_XADD) { | ||
1490 | switch (BPF_SIZE(insn->code)) { | ||
1491 | case BPF_W: | ||
1492 | if (get_reg_val_type(ctx, this_idx, insn->src_reg) == REG_32BIT) { | ||
1493 | emit_instr(ctx, sll, MIPS_R_AT, src, 0); | ||
1494 | src = MIPS_R_AT; | ||
1495 | } | ||
1496 | emit_instr(ctx, ll, MIPS_R_T8, mem_off, dst); | ||
1497 | emit_instr(ctx, addu, MIPS_R_T8, MIPS_R_T8, src); | ||
1498 | emit_instr(ctx, sc, MIPS_R_T8, mem_off, dst); | ||
1499 | /* | ||
1500 | * On failure back up to LL (-4 | ||
1501 | * instructions of 4 bytes each | ||
1502 | */ | ||
1503 | emit_instr(ctx, beq, MIPS_R_T8, MIPS_R_ZERO, -4 * 4); | ||
1504 | emit_instr(ctx, nop); | ||
1505 | break; | ||
1506 | case BPF_DW: | ||
1507 | if (get_reg_val_type(ctx, this_idx, insn->src_reg) == REG_32BIT) { | ||
1508 | emit_instr(ctx, daddu, MIPS_R_AT, src, MIPS_R_ZERO); | ||
1509 | emit_instr(ctx, dinsu, MIPS_R_AT, MIPS_R_ZERO, 32, 32); | ||
1510 | src = MIPS_R_AT; | ||
1511 | } | ||
1512 | emit_instr(ctx, lld, MIPS_R_T8, mem_off, dst); | ||
1513 | emit_instr(ctx, daddu, MIPS_R_T8, MIPS_R_T8, src); | ||
1514 | emit_instr(ctx, scd, MIPS_R_T8, mem_off, dst); | ||
1515 | emit_instr(ctx, beq, MIPS_R_T8, MIPS_R_ZERO, -4 * 4); | ||
1516 | emit_instr(ctx, nop); | ||
1517 | break; | ||
1518 | } | ||
1519 | } else { /* BPF_MEM */ | ||
1520 | switch (BPF_SIZE(insn->code)) { | ||
1521 | case BPF_B: | ||
1522 | emit_instr(ctx, sb, src, mem_off, dst); | ||
1523 | break; | ||
1524 | case BPF_H: | ||
1525 | emit_instr(ctx, sh, src, mem_off, dst); | ||
1526 | break; | ||
1527 | case BPF_W: | ||
1528 | emit_instr(ctx, sw, src, mem_off, dst); | ||
1529 | break; | ||
1530 | case BPF_DW: | ||
1531 | if (get_reg_val_type(ctx, this_idx, insn->src_reg) == REG_32BIT) { | ||
1532 | emit_instr(ctx, daddu, MIPS_R_AT, src, MIPS_R_ZERO); | ||
1533 | emit_instr(ctx, dinsu, MIPS_R_AT, MIPS_R_ZERO, 32, 32); | ||
1534 | src = MIPS_R_AT; | ||
1535 | } | ||
1536 | emit_instr(ctx, sd, src, mem_off, dst); | ||
1537 | break; | ||
1538 | } | ||
1539 | } | ||
1540 | break; | ||
1541 | |||
1542 | default: | ||
1543 | pr_err("NOT HANDLED %d - (%02x)\n", | ||
1544 | this_idx, (unsigned int)insn->code); | ||
1545 | return -EINVAL; | ||
1546 | } | ||
1547 | return 1; | ||
1548 | } | ||
1549 | |||
1550 | #define RVT_VISITED_MASK 0xc000000000000000ull | ||
1551 | #define RVT_FALL_THROUGH 0x4000000000000000ull | ||
1552 | #define RVT_BRANCH_TAKEN 0x8000000000000000ull | ||
1553 | #define RVT_DONE (RVT_FALL_THROUGH | RVT_BRANCH_TAKEN) | ||
1554 | |||
1555 | static int build_int_body(struct jit_ctx *ctx) | ||
1556 | { | ||
1557 | const struct bpf_prog *prog = ctx->skf; | ||
1558 | const struct bpf_insn *insn; | ||
1559 | int i, r; | ||
1560 | |||
1561 | for (i = 0; i < prog->len; ) { | ||
1562 | insn = prog->insnsi + i; | ||
1563 | if ((ctx->reg_val_types[i] & RVT_VISITED_MASK) == 0) { | ||
1564 | /* dead instruction, don't emit it. */ | ||
1565 | i++; | ||
1566 | continue; | ||
1567 | } | ||
1568 | |||
1569 | if (ctx->target == NULL) | ||
1570 | ctx->offsets[i] = (ctx->offsets[i] & OFFSETS_B_CONV) | (ctx->idx * 4); | ||
1571 | |||
1572 | r = build_one_insn(insn, ctx, i, prog->len); | ||
1573 | if (r < 0) | ||
1574 | return r; | ||
1575 | i += r; | ||
1576 | } | ||
1577 | /* epilogue offset */ | ||
1578 | if (ctx->target == NULL) | ||
1579 | ctx->offsets[i] = ctx->idx * 4; | ||
1580 | |||
1581 | /* | ||
1582 | * All exits have an offset of the epilogue, some offsets may | ||
1583 | * not have been set due to banch-around threading, so set | ||
1584 | * them now. | ||
1585 | */ | ||
1586 | if (ctx->target == NULL) | ||
1587 | for (i = 0; i < prog->len; i++) { | ||
1588 | insn = prog->insnsi + i; | ||
1589 | if (insn->code == (BPF_JMP | BPF_EXIT)) | ||
1590 | ctx->offsets[i] = ctx->idx * 4; | ||
1591 | } | ||
1592 | return 0; | ||
1593 | } | ||
1594 | |||
1595 | /* return the last idx processed, or negative for error */ | ||
1596 | static int reg_val_propagate_range(struct jit_ctx *ctx, u64 initial_rvt, | ||
1597 | int start_idx, bool follow_taken) | ||
1598 | { | ||
1599 | const struct bpf_prog *prog = ctx->skf; | ||
1600 | const struct bpf_insn *insn; | ||
1601 | u64 exit_rvt = initial_rvt; | ||
1602 | u64 *rvt = ctx->reg_val_types; | ||
1603 | int idx; | ||
1604 | int reg; | ||
1605 | |||
1606 | for (idx = start_idx; idx < prog->len; idx++) { | ||
1607 | rvt[idx] = (rvt[idx] & RVT_VISITED_MASK) | exit_rvt; | ||
1608 | insn = prog->insnsi + idx; | ||
1609 | switch (BPF_CLASS(insn->code)) { | ||
1610 | case BPF_ALU: | ||
1611 | switch (BPF_OP(insn->code)) { | ||
1612 | case BPF_ADD: | ||
1613 | case BPF_SUB: | ||
1614 | case BPF_MUL: | ||
1615 | case BPF_DIV: | ||
1616 | case BPF_OR: | ||
1617 | case BPF_AND: | ||
1618 | case BPF_LSH: | ||
1619 | case BPF_RSH: | ||
1620 | case BPF_NEG: | ||
1621 | case BPF_MOD: | ||
1622 | case BPF_XOR: | ||
1623 | set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT); | ||
1624 | break; | ||
1625 | case BPF_MOV: | ||
1626 | if (BPF_SRC(insn->code)) { | ||
1627 | set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT); | ||
1628 | } else { | ||
1629 | /* IMM to REG move*/ | ||
1630 | if (insn->imm >= 0) | ||
1631 | set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT_POS); | ||
1632 | else | ||
1633 | set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT); | ||
1634 | } | ||
1635 | break; | ||
1636 | case BPF_END: | ||
1637 | if (insn->imm == 64) | ||
1638 | set_reg_val_type(&exit_rvt, insn->dst_reg, REG_64BIT); | ||
1639 | else if (insn->imm == 32) | ||
1640 | set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT); | ||
1641 | else /* insn->imm == 16 */ | ||
1642 | set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT_POS); | ||
1643 | break; | ||
1644 | } | ||
1645 | rvt[idx] |= RVT_DONE; | ||
1646 | break; | ||
1647 | case BPF_ALU64: | ||
1648 | switch (BPF_OP(insn->code)) { | ||
1649 | case BPF_MOV: | ||
1650 | if (BPF_SRC(insn->code)) { | ||
1651 | /* REG to REG move*/ | ||
1652 | set_reg_val_type(&exit_rvt, insn->dst_reg, REG_64BIT); | ||
1653 | } else { | ||
1654 | /* IMM to REG move*/ | ||
1655 | if (insn->imm >= 0) | ||
1656 | set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT_POS); | ||
1657 | else | ||
1658 | set_reg_val_type(&exit_rvt, insn->dst_reg, REG_64BIT_32BIT); | ||
1659 | } | ||
1660 | break; | ||
1661 | default: | ||
1662 | set_reg_val_type(&exit_rvt, insn->dst_reg, REG_64BIT); | ||
1663 | } | ||
1664 | rvt[idx] |= RVT_DONE; | ||
1665 | break; | ||
1666 | case BPF_LD: | ||
1667 | switch (BPF_SIZE(insn->code)) { | ||
1668 | case BPF_DW: | ||
1669 | if (BPF_MODE(insn->code) == BPF_IMM) { | ||
1670 | s64 val; | ||
1671 | |||
1672 | val = (s64)((u32)insn->imm | ((u64)(insn + 1)->imm << 32)); | ||
1673 | if (val > 0 && val <= S32_MAX) | ||
1674 | set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT_POS); | ||
1675 | else if (val >= S32_MIN && val <= S32_MAX) | ||
1676 | set_reg_val_type(&exit_rvt, insn->dst_reg, REG_64BIT_32BIT); | ||
1677 | else | ||
1678 | set_reg_val_type(&exit_rvt, insn->dst_reg, REG_64BIT); | ||
1679 | rvt[idx] |= RVT_DONE; | ||
1680 | idx++; | ||
1681 | } else { | ||
1682 | set_reg_val_type(&exit_rvt, insn->dst_reg, REG_64BIT); | ||
1683 | } | ||
1684 | break; | ||
1685 | case BPF_B: | ||
1686 | case BPF_H: | ||
1687 | set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT_POS); | ||
1688 | break; | ||
1689 | case BPF_W: | ||
1690 | if (BPF_MODE(insn->code) == BPF_IMM) | ||
1691 | set_reg_val_type(&exit_rvt, insn->dst_reg, | ||
1692 | insn->imm >= 0 ? REG_32BIT_POS : REG_32BIT); | ||
1693 | else | ||
1694 | set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT); | ||
1695 | break; | ||
1696 | } | ||
1697 | rvt[idx] |= RVT_DONE; | ||
1698 | break; | ||
1699 | case BPF_LDX: | ||
1700 | switch (BPF_SIZE(insn->code)) { | ||
1701 | case BPF_DW: | ||
1702 | set_reg_val_type(&exit_rvt, insn->dst_reg, REG_64BIT); | ||
1703 | break; | ||
1704 | case BPF_B: | ||
1705 | case BPF_H: | ||
1706 | set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT_POS); | ||
1707 | break; | ||
1708 | case BPF_W: | ||
1709 | set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT); | ||
1710 | break; | ||
1711 | } | ||
1712 | rvt[idx] |= RVT_DONE; | ||
1713 | break; | ||
1714 | case BPF_JMP: | ||
1715 | switch (BPF_OP(insn->code)) { | ||
1716 | case BPF_EXIT: | ||
1717 | rvt[idx] = RVT_DONE | exit_rvt; | ||
1718 | rvt[prog->len] = exit_rvt; | ||
1719 | return idx; | ||
1720 | case BPF_JA: | ||
1721 | rvt[idx] |= RVT_DONE; | ||
1722 | idx += insn->off; | ||
1723 | break; | ||
1724 | case BPF_JEQ: | ||
1725 | case BPF_JGT: | ||
1726 | case BPF_JGE: | ||
1727 | case BPF_JSET: | ||
1728 | case BPF_JNE: | ||
1729 | case BPF_JSGT: | ||
1730 | case BPF_JSGE: | ||
1731 | if (follow_taken) { | ||
1732 | rvt[idx] |= RVT_BRANCH_TAKEN; | ||
1733 | idx += insn->off; | ||
1734 | follow_taken = false; | ||
1735 | } else { | ||
1736 | rvt[idx] |= RVT_FALL_THROUGH; | ||
1737 | } | ||
1738 | break; | ||
1739 | case BPF_CALL: | ||
1740 | set_reg_val_type(&exit_rvt, BPF_REG_0, REG_64BIT); | ||
1741 | /* Upon call return, argument registers are clobbered. */ | ||
1742 | for (reg = BPF_REG_0; reg <= BPF_REG_5; reg++) | ||
1743 | set_reg_val_type(&exit_rvt, reg, REG_64BIT); | ||
1744 | |||
1745 | rvt[idx] |= RVT_DONE; | ||
1746 | break; | ||
1747 | default: | ||
1748 | WARN(1, "Unhandled BPF_JMP case.\n"); | ||
1749 | rvt[idx] |= RVT_DONE; | ||
1750 | break; | ||
1751 | } | ||
1752 | break; | ||
1753 | default: | ||
1754 | rvt[idx] |= RVT_DONE; | ||
1755 | break; | ||
1756 | } | ||
1757 | } | ||
1758 | return idx; | ||
1759 | } | ||
1760 | |||
1761 | /* | ||
1762 | * Track the value range (i.e. 32-bit vs. 64-bit) of each register at | ||
1763 | * each eBPF insn. This allows unneeded sign and zero extension | ||
1764 | * operations to be omitted. | ||
1765 | * | ||
1766 | * Doesn't handle yet confluence of control paths with conflicting | ||
1767 | * ranges, but it is good enough for most sane code. | ||
1768 | */ | ||
1769 | static int reg_val_propagate(struct jit_ctx *ctx) | ||
1770 | { | ||
1771 | const struct bpf_prog *prog = ctx->skf; | ||
1772 | u64 exit_rvt; | ||
1773 | int reg; | ||
1774 | int i; | ||
1775 | |||
1776 | /* | ||
1777 | * 11 registers * 3 bits/reg leaves top bits free for other | ||
1778 | * uses. Bit-62..63 used to see if we have visited an insn. | ||
1779 | */ | ||
1780 | exit_rvt = 0; | ||
1781 | |||
1782 | /* Upon entry, argument registers are 64-bit. */ | ||
1783 | for (reg = BPF_REG_1; reg <= BPF_REG_5; reg++) | ||
1784 | set_reg_val_type(&exit_rvt, reg, REG_64BIT); | ||
1785 | |||
1786 | /* | ||
1787 | * First follow all conditional branches on the fall-through | ||
1788 | * edge of control flow.. | ||
1789 | */ | ||
1790 | reg_val_propagate_range(ctx, exit_rvt, 0, false); | ||
1791 | restart_search: | ||
1792 | /* | ||
1793 | * Then repeatedly find the first conditional branch where | ||
1794 | * both edges of control flow have not been taken, and follow | ||
1795 | * the branch taken edge. We will end up restarting the | ||
1796 | * search once per conditional branch insn. | ||
1797 | */ | ||
1798 | for (i = 0; i < prog->len; i++) { | ||
1799 | u64 rvt = ctx->reg_val_types[i]; | ||
1800 | |||
1801 | if ((rvt & RVT_VISITED_MASK) == RVT_DONE || | ||
1802 | (rvt & RVT_VISITED_MASK) == 0) | ||
1803 | continue; | ||
1804 | if ((rvt & RVT_VISITED_MASK) == RVT_FALL_THROUGH) { | ||
1805 | reg_val_propagate_range(ctx, rvt & ~RVT_VISITED_MASK, i, true); | ||
1806 | } else { /* RVT_BRANCH_TAKEN */ | ||
1807 | WARN(1, "Unexpected RVT_BRANCH_TAKEN case.\n"); | ||
1808 | reg_val_propagate_range(ctx, rvt & ~RVT_VISITED_MASK, i, false); | ||
1809 | } | ||
1810 | goto restart_search; | ||
1811 | } | ||
1812 | /* | ||
1813 | * Eventually all conditional branches have been followed on | ||
1814 | * both branches and we are done. Any insn that has not been | ||
1815 | * visited at this point is dead. | ||
1816 | */ | ||
1817 | |||
1818 | return 0; | ||
1819 | } | ||
1820 | |||
1821 | static void jit_fill_hole(void *area, unsigned int size) | ||
1822 | { | ||
1823 | u32 *p; | ||
1824 | |||
1825 | /* We are guaranteed to have aligned memory. */ | ||
1826 | for (p = area; size >= sizeof(u32); size -= sizeof(u32)) | ||
1827 | uasm_i_break(&p, BRK_BUG); /* Increments p */ | ||
1828 | } | ||
1829 | |||
1830 | struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) | ||
1831 | { | ||
1832 | struct bpf_prog *orig_prog = prog; | ||
1833 | bool tmp_blinded = false; | ||
1834 | struct bpf_prog *tmp; | ||
1835 | struct bpf_binary_header *header = NULL; | ||
1836 | struct jit_ctx ctx; | ||
1837 | unsigned int image_size; | ||
1838 | u8 *image_ptr; | ||
1839 | |||
1840 | if (!bpf_jit_enable || !cpu_has_mips64r2) | ||
1841 | return prog; | ||
1842 | |||
1843 | tmp = bpf_jit_blind_constants(prog); | ||
1844 | /* If blinding was requested and we failed during blinding, | ||
1845 | * we must fall back to the interpreter. | ||
1846 | */ | ||
1847 | if (IS_ERR(tmp)) | ||
1848 | return orig_prog; | ||
1849 | if (tmp != prog) { | ||
1850 | tmp_blinded = true; | ||
1851 | prog = tmp; | ||
1852 | } | ||
1853 | |||
1854 | memset(&ctx, 0, sizeof(ctx)); | ||
1855 | |||
1856 | ctx.offsets = kcalloc(prog->len + 1, sizeof(*ctx.offsets), GFP_KERNEL); | ||
1857 | if (ctx.offsets == NULL) | ||
1858 | goto out_err; | ||
1859 | |||
1860 | ctx.reg_val_types = kcalloc(prog->len + 1, sizeof(*ctx.reg_val_types), GFP_KERNEL); | ||
1861 | if (ctx.reg_val_types == NULL) | ||
1862 | goto out_err; | ||
1863 | |||
1864 | ctx.skf = prog; | ||
1865 | |||
1866 | if (reg_val_propagate(&ctx)) | ||
1867 | goto out_err; | ||
1868 | |||
1869 | /* | ||
1870 | * First pass discovers used resources and instruction offsets | ||
1871 | * assuming short branches are used. | ||
1872 | */ | ||
1873 | if (build_int_body(&ctx)) | ||
1874 | goto out_err; | ||
1875 | |||
1876 | /* | ||
1877 | * If no calls are made (EBPF_SAVE_RA), then tail call count | ||
1878 | * in $v1, else we must save in n$s4. | ||
1879 | */ | ||
1880 | if (ctx.flags & EBPF_SEEN_TC) { | ||
1881 | if (ctx.flags & EBPF_SAVE_RA) | ||
1882 | ctx.flags |= EBPF_SAVE_S4; | ||
1883 | else | ||
1884 | ctx.flags |= EBPF_TCC_IN_V1; | ||
1885 | } | ||
1886 | |||
1887 | /* | ||
1888 | * Second pass generates offsets, if any branches are out of | ||
1889 | * range a jump-around long sequence is generated, and we have | ||
1890 | * to try again from the beginning to generate the new | ||
1891 | * offsets. This is done until no additional conversions are | ||
1892 | * necessary. | ||
1893 | */ | ||
1894 | do { | ||
1895 | ctx.idx = 0; | ||
1896 | ctx.gen_b_offsets = 1; | ||
1897 | ctx.long_b_conversion = 0; | ||
1898 | if (gen_int_prologue(&ctx)) | ||
1899 | goto out_err; | ||
1900 | if (build_int_body(&ctx)) | ||
1901 | goto out_err; | ||
1902 | if (build_int_epilogue(&ctx, MIPS_R_RA)) | ||
1903 | goto out_err; | ||
1904 | } while (ctx.long_b_conversion); | ||
1905 | |||
1906 | image_size = 4 * ctx.idx; | ||
1907 | |||
1908 | header = bpf_jit_binary_alloc(image_size, &image_ptr, | ||
1909 | sizeof(u32), jit_fill_hole); | ||
1910 | if (header == NULL) | ||
1911 | goto out_err; | ||
1912 | |||
1913 | ctx.target = (u32 *)image_ptr; | ||
1914 | |||
1915 | /* Third pass generates the code */ | ||
1916 | ctx.idx = 0; | ||
1917 | if (gen_int_prologue(&ctx)) | ||
1918 | goto out_err; | ||
1919 | if (build_int_body(&ctx)) | ||
1920 | goto out_err; | ||
1921 | if (build_int_epilogue(&ctx, MIPS_R_RA)) | ||
1922 | goto out_err; | ||
1923 | |||
1924 | /* Update the icache */ | ||
1925 | flush_icache_range((unsigned long)ctx.target, | ||
1926 | (unsigned long)(ctx.target + ctx.idx * sizeof(u32))); | ||
1927 | |||
1928 | if (bpf_jit_enable > 1) | ||
1929 | /* Dump JIT code */ | ||
1930 | bpf_jit_dump(prog->len, image_size, 2, ctx.target); | ||
1931 | |||
1932 | bpf_jit_binary_lock_ro(header); | ||
1933 | prog->bpf_func = (void *)ctx.target; | ||
1934 | prog->jited = 1; | ||
1935 | prog->jited_len = image_size; | ||
1936 | out_normal: | ||
1937 | if (tmp_blinded) | ||
1938 | bpf_jit_prog_release_other(prog, prog == orig_prog ? | ||
1939 | tmp : orig_prog); | ||
1940 | kfree(ctx.offsets); | ||
1941 | kfree(ctx.reg_val_types); | ||
1942 | |||
1943 | return prog; | ||
1944 | |||
1945 | out_err: | ||
1946 | prog = orig_prog; | ||
1947 | if (header) | ||
1948 | bpf_jit_binary_free(header); | ||
1949 | goto out_normal; | ||
1950 | } | ||
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c index 01c6fbc3e85b..1803797fc885 100644 --- a/arch/s390/net/bpf_jit_comp.c +++ b/arch/s390/net/bpf_jit_comp.c | |||
@@ -1253,7 +1253,8 @@ static int bpf_jit_prog(struct bpf_jit *jit, struct bpf_prog *fp) | |||
1253 | insn_count = bpf_jit_insn(jit, fp, i); | 1253 | insn_count = bpf_jit_insn(jit, fp, i); |
1254 | if (insn_count < 0) | 1254 | if (insn_count < 0) |
1255 | return -1; | 1255 | return -1; |
1256 | jit->addrs[i + 1] = jit->prg; /* Next instruction address */ | 1256 | /* Next instruction address */ |
1257 | jit->addrs[i + insn_count] = jit->prg; | ||
1257 | } | 1258 | } |
1258 | bpf_jit_epilogue(jit); | 1259 | bpf_jit_epilogue(jit); |
1259 | 1260 | ||
diff --git a/arch/xtensa/include/asm/Kbuild b/arch/xtensa/include/asm/Kbuild index 2d716ebc5a5e..dff7cc39437c 100644 --- a/arch/xtensa/include/asm/Kbuild +++ b/arch/xtensa/include/asm/Kbuild | |||
@@ -1,5 +1,6 @@ | |||
1 | generic-y += bug.h | 1 | generic-y += bug.h |
2 | generic-y += clkdev.h | 2 | generic-y += clkdev.h |
3 | generic-y += device.h | ||
3 | generic-y += div64.h | 4 | generic-y += div64.h |
4 | generic-y += dma-contiguous.h | 5 | generic-y += dma-contiguous.h |
5 | generic-y += emergency-restart.h | 6 | generic-y += emergency-restart.h |
@@ -17,6 +18,7 @@ generic-y += local.h | |||
17 | generic-y += local64.h | 18 | generic-y += local64.h |
18 | generic-y += mcs_spinlock.h | 19 | generic-y += mcs_spinlock.h |
19 | generic-y += mm-arch-hooks.h | 20 | generic-y += mm-arch-hooks.h |
21 | generic-y += param.h | ||
20 | generic-y += percpu.h | 22 | generic-y += percpu.h |
21 | generic-y += preempt.h | 23 | generic-y += preempt.h |
22 | generic-y += rwsem.h | 24 | generic-y += rwsem.h |
diff --git a/arch/xtensa/include/asm/device.h b/arch/xtensa/include/asm/device.h deleted file mode 100644 index 1deeb8ebbb1b..000000000000 --- a/arch/xtensa/include/asm/device.h +++ /dev/null | |||
@@ -1,15 +0,0 @@ | |||
1 | /* | ||
2 | * Arch specific extensions to struct device | ||
3 | * | ||
4 | * This file is released under the GPLv2 | ||
5 | */ | ||
6 | #ifndef _ASM_XTENSA_DEVICE_H | ||
7 | #define _ASM_XTENSA_DEVICE_H | ||
8 | |||
9 | struct dev_archdata { | ||
10 | }; | ||
11 | |||
12 | struct pdev_archdata { | ||
13 | }; | ||
14 | |||
15 | #endif /* _ASM_XTENSA_DEVICE_H */ | ||
diff --git a/arch/xtensa/include/asm/param.h b/arch/xtensa/include/asm/param.h deleted file mode 100644 index 0a70e780ef2a..000000000000 --- a/arch/xtensa/include/asm/param.h +++ /dev/null | |||
@@ -1,18 +0,0 @@ | |||
1 | /* | ||
2 | * include/asm-xtensa/param.h | ||
3 | * | ||
4 | * This file is subject to the terms and conditions of the GNU General Public | ||
5 | * License. See the file "COPYING" in the main directory of this archive | ||
6 | * for more details. | ||
7 | * | ||
8 | * Copyright (C) 2001 - 2005 Tensilica Inc. | ||
9 | */ | ||
10 | #ifndef _XTENSA_PARAM_H | ||
11 | #define _XTENSA_PARAM_H | ||
12 | |||
13 | #include <uapi/asm/param.h> | ||
14 | |||
15 | # define HZ CONFIG_HZ /* internal timer frequency */ | ||
16 | # define USER_HZ 100 /* for user interfaces in "ticks" */ | ||
17 | # define CLOCKS_PER_SEC (USER_HZ) /* frequnzy at which times() counts */ | ||
18 | #endif /* _XTENSA_PARAM_H */ | ||
diff --git a/arch/xtensa/kernel/xtensa_ksyms.c b/arch/xtensa/kernel/xtensa_ksyms.c index d159e9b9c018..672391003e40 100644 --- a/arch/xtensa/kernel/xtensa_ksyms.c +++ b/arch/xtensa/kernel/xtensa_ksyms.c | |||
@@ -94,13 +94,11 @@ unsigned long __sync_fetch_and_or_4(unsigned long *p, unsigned long v) | |||
94 | } | 94 | } |
95 | EXPORT_SYMBOL(__sync_fetch_and_or_4); | 95 | EXPORT_SYMBOL(__sync_fetch_and_or_4); |
96 | 96 | ||
97 | #ifdef CONFIG_NET | ||
98 | /* | 97 | /* |
99 | * Networking support | 98 | * Networking support |
100 | */ | 99 | */ |
101 | EXPORT_SYMBOL(csum_partial); | 100 | EXPORT_SYMBOL(csum_partial); |
102 | EXPORT_SYMBOL(csum_partial_copy_generic); | 101 | EXPORT_SYMBOL(csum_partial_copy_generic); |
103 | #endif /* CONFIG_NET */ | ||
104 | 102 | ||
105 | /* | 103 | /* |
106 | * Architecture-specific symbols | 104 | * Architecture-specific symbols |
diff --git a/arch/xtensa/mm/cache.c b/arch/xtensa/mm/cache.c index 1a804a2f9a5b..3c75c4e597da 100644 --- a/arch/xtensa/mm/cache.c +++ b/arch/xtensa/mm/cache.c | |||
@@ -103,6 +103,7 @@ void clear_user_highpage(struct page *page, unsigned long vaddr) | |||
103 | clear_page_alias(kvaddr, paddr); | 103 | clear_page_alias(kvaddr, paddr); |
104 | preempt_enable(); | 104 | preempt_enable(); |
105 | } | 105 | } |
106 | EXPORT_SYMBOL(clear_user_highpage); | ||
106 | 107 | ||
107 | void copy_user_highpage(struct page *dst, struct page *src, | 108 | void copy_user_highpage(struct page *dst, struct page *src, |
108 | unsigned long vaddr, struct vm_area_struct *vma) | 109 | unsigned long vaddr, struct vm_area_struct *vma) |
@@ -119,10 +120,7 @@ void copy_user_highpage(struct page *dst, struct page *src, | |||
119 | copy_page_alias(dst_vaddr, src_vaddr, dst_paddr, src_paddr); | 120 | copy_page_alias(dst_vaddr, src_vaddr, dst_paddr, src_paddr); |
120 | preempt_enable(); | 121 | preempt_enable(); |
121 | } | 122 | } |
122 | 123 | EXPORT_SYMBOL(copy_user_highpage); | |
123 | #endif /* DCACHE_WAY_SIZE > PAGE_SIZE */ | ||
124 | |||
125 | #if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK | ||
126 | 124 | ||
127 | /* | 125 | /* |
128 | * Any time the kernel writes to a user page cache page, or it is about to | 126 | * Any time the kernel writes to a user page cache page, or it is about to |
@@ -176,7 +174,7 @@ void flush_dcache_page(struct page *page) | |||
176 | 174 | ||
177 | /* There shouldn't be an entry in the cache for this page anymore. */ | 175 | /* There shouldn't be an entry in the cache for this page anymore. */ |
178 | } | 176 | } |
179 | 177 | EXPORT_SYMBOL(flush_dcache_page); | |
180 | 178 | ||
181 | /* | 179 | /* |
182 | * For now, flush the whole cache. FIXME?? | 180 | * For now, flush the whole cache. FIXME?? |
@@ -188,6 +186,7 @@ void local_flush_cache_range(struct vm_area_struct *vma, | |||
188 | __flush_invalidate_dcache_all(); | 186 | __flush_invalidate_dcache_all(); |
189 | __invalidate_icache_all(); | 187 | __invalidate_icache_all(); |
190 | } | 188 | } |
189 | EXPORT_SYMBOL(local_flush_cache_range); | ||
191 | 190 | ||
192 | /* | 191 | /* |
193 | * Remove any entry in the cache for this page. | 192 | * Remove any entry in the cache for this page. |
@@ -207,8 +206,9 @@ void local_flush_cache_page(struct vm_area_struct *vma, unsigned long address, | |||
207 | __flush_invalidate_dcache_page_alias(virt, phys); | 206 | __flush_invalidate_dcache_page_alias(virt, phys); |
208 | __invalidate_icache_page_alias(virt, phys); | 207 | __invalidate_icache_page_alias(virt, phys); |
209 | } | 208 | } |
209 | EXPORT_SYMBOL(local_flush_cache_page); | ||
210 | 210 | ||
211 | #endif | 211 | #endif /* DCACHE_WAY_SIZE > PAGE_SIZE */ |
212 | 212 | ||
213 | void | 213 | void |
214 | update_mmu_cache(struct vm_area_struct * vma, unsigned long addr, pte_t *ptep) | 214 | update_mmu_cache(struct vm_area_struct * vma, unsigned long addr, pte_t *ptep) |
@@ -225,7 +225,7 @@ update_mmu_cache(struct vm_area_struct * vma, unsigned long addr, pte_t *ptep) | |||
225 | 225 | ||
226 | flush_tlb_page(vma, addr); | 226 | flush_tlb_page(vma, addr); |
227 | 227 | ||
228 | #if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK | 228 | #if (DCACHE_WAY_SIZE > PAGE_SIZE) |
229 | 229 | ||
230 | if (!PageReserved(page) && test_bit(PG_arch_1, &page->flags)) { | 230 | if (!PageReserved(page) && test_bit(PG_arch_1, &page->flags)) { |
231 | unsigned long phys = page_to_phys(page); | 231 | unsigned long phys = page_to_phys(page); |
@@ -256,7 +256,7 @@ update_mmu_cache(struct vm_area_struct * vma, unsigned long addr, pte_t *ptep) | |||
256 | * flush_dcache_page() on the page. | 256 | * flush_dcache_page() on the page. |
257 | */ | 257 | */ |
258 | 258 | ||
259 | #if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK | 259 | #if (DCACHE_WAY_SIZE > PAGE_SIZE) |
260 | 260 | ||
261 | void copy_to_user_page(struct vm_area_struct *vma, struct page *page, | 261 | void copy_to_user_page(struct vm_area_struct *vma, struct page *page, |
262 | unsigned long vaddr, void *dst, const void *src, | 262 | unsigned long vaddr, void *dst, const void *src, |
diff --git a/block/bfq-iosched.h b/block/bfq-iosched.h index 63e771ab56d8..859f0a8c97c8 100644 --- a/block/bfq-iosched.h +++ b/block/bfq-iosched.h | |||
@@ -71,17 +71,29 @@ struct bfq_service_tree { | |||
71 | * | 71 | * |
72 | * bfq_sched_data is the basic scheduler queue. It supports three | 72 | * bfq_sched_data is the basic scheduler queue. It supports three |
73 | * ioprio_classes, and can be used either as a toplevel queue or as an | 73 | * ioprio_classes, and can be used either as a toplevel queue or as an |
74 | * intermediate queue on a hierarchical setup. @next_in_service | 74 | * intermediate queue in a hierarchical setup. |
75 | * points to the active entity of the sched_data service trees that | ||
76 | * will be scheduled next. It is used to reduce the number of steps | ||
77 | * needed for each hierarchical-schedule update. | ||
78 | * | 75 | * |
79 | * The supported ioprio_classes are the same as in CFQ, in descending | 76 | * The supported ioprio_classes are the same as in CFQ, in descending |
80 | * priority order, IOPRIO_CLASS_RT, IOPRIO_CLASS_BE, IOPRIO_CLASS_IDLE. | 77 | * priority order, IOPRIO_CLASS_RT, IOPRIO_CLASS_BE, IOPRIO_CLASS_IDLE. |
81 | * Requests from higher priority queues are served before all the | 78 | * Requests from higher priority queues are served before all the |
82 | * requests from lower priority queues; among requests of the same | 79 | * requests from lower priority queues; among requests of the same |
83 | * queue requests are served according to B-WF2Q+. | 80 | * queue requests are served according to B-WF2Q+. |
84 | * All the fields are protected by the queue lock of the containing bfqd. | 81 | * |
82 | * The schedule is implemented by the service trees, plus the field | ||
83 | * @next_in_service, which points to the entity on the active trees | ||
84 | * that will be served next, if 1) no changes in the schedule occurs | ||
85 | * before the current in-service entity is expired, 2) the in-service | ||
86 | * queue becomes idle when it expires, and 3) if the entity pointed by | ||
87 | * in_service_entity is not a queue, then the in-service child entity | ||
88 | * of the entity pointed by in_service_entity becomes idle on | ||
89 | * expiration. This peculiar definition allows for the following | ||
90 | * optimization, not yet exploited: while a given entity is still in | ||
91 | * service, we already know which is the best candidate for next | ||
92 | * service among the other active entitities in the same parent | ||
93 | * entity. We can then quickly compare the timestamps of the | ||
94 | * in-service entity with those of such best candidate. | ||
95 | * | ||
96 | * All fields are protected by the lock of the containing bfqd. | ||
85 | */ | 97 | */ |
86 | struct bfq_sched_data { | 98 | struct bfq_sched_data { |
87 | /* entity in service */ | 99 | /* entity in service */ |
diff --git a/block/bfq-wf2q.c b/block/bfq-wf2q.c index 979f8f21b7e2..911aa7431dbe 100644 --- a/block/bfq-wf2q.c +++ b/block/bfq-wf2q.c | |||
@@ -188,21 +188,23 @@ static bool bfq_update_parent_budget(struct bfq_entity *next_in_service) | |||
188 | 188 | ||
189 | /* | 189 | /* |
190 | * This function tells whether entity stops being a candidate for next | 190 | * This function tells whether entity stops being a candidate for next |
191 | * service, according to the following logic. | 191 | * service, according to the restrictive definition of the field |
192 | * next_in_service. In particular, this function is invoked for an | ||
193 | * entity that is about to be set in service. | ||
192 | * | 194 | * |
193 | * This function is invoked for an entity that is about to be set in | 195 | * If entity is a queue, then the entity is no longer a candidate for |
194 | * service. If such an entity is a queue, then the entity is no longer | 196 | * next service according to the that definition, because entity is |
195 | * a candidate for next service (i.e, a candidate entity to serve | 197 | * about to become the in-service queue. This function then returns |
196 | * after the in-service entity is expired). The function then returns | 198 | * true if entity is a queue. |
197 | * true. | ||
198 | * | 199 | * |
199 | * In contrast, the entity could stil be a candidate for next service | 200 | * In contrast, entity could still be a candidate for next service if |
200 | * if it is not a queue, and has more than one child. In fact, even if | 201 | * it is not a queue, and has more than one active child. In fact, |
201 | * one of its children is about to be set in service, other children | 202 | * even if one of its children is about to be set in service, other |
202 | * may still be the next to serve. As a consequence, a non-queue | 203 | * active children may still be the next to serve, for the parent |
203 | * entity is not a candidate for next-service only if it has only one | 204 | * entity, even according to the above definition. As a consequence, a |
204 | * child. And only if this condition holds, then the function returns | 205 | * non-queue entity is not a candidate for next-service only if it has |
205 | * true for a non-queue entity. | 206 | * only one active child. And only if this condition holds, then this |
207 | * function returns true for a non-queue entity. | ||
206 | */ | 208 | */ |
207 | static bool bfq_no_longer_next_in_service(struct bfq_entity *entity) | 209 | static bool bfq_no_longer_next_in_service(struct bfq_entity *entity) |
208 | { | 210 | { |
@@ -213,6 +215,18 @@ static bool bfq_no_longer_next_in_service(struct bfq_entity *entity) | |||
213 | 215 | ||
214 | bfqg = container_of(entity, struct bfq_group, entity); | 216 | bfqg = container_of(entity, struct bfq_group, entity); |
215 | 217 | ||
218 | /* | ||
219 | * The field active_entities does not always contain the | ||
220 | * actual number of active children entities: it happens to | ||
221 | * not account for the in-service entity in case the latter is | ||
222 | * removed from its active tree (which may get done after | ||
223 | * invoking the function bfq_no_longer_next_in_service in | ||
224 | * bfq_get_next_queue). Fortunately, here, i.e., while | ||
225 | * bfq_no_longer_next_in_service is not yet completed in | ||
226 | * bfq_get_next_queue, bfq_active_extract has not yet been | ||
227 | * invoked, and thus active_entities still coincides with the | ||
228 | * actual number of active entities. | ||
229 | */ | ||
216 | if (bfqg->active_entities == 1) | 230 | if (bfqg->active_entities == 1) |
217 | return true; | 231 | return true; |
218 | 232 | ||
@@ -954,7 +968,7 @@ static void bfq_update_fin_time_enqueue(struct bfq_entity *entity, | |||
954 | * one of its children receives a new request. | 968 | * one of its children receives a new request. |
955 | * | 969 | * |
956 | * Basically, this function updates the timestamps of entity and | 970 | * Basically, this function updates the timestamps of entity and |
957 | * inserts entity into its active tree, ater possible extracting it | 971 | * inserts entity into its active tree, ater possibly extracting it |
958 | * from its idle tree. | 972 | * from its idle tree. |
959 | */ | 973 | */ |
960 | static void __bfq_activate_entity(struct bfq_entity *entity, | 974 | static void __bfq_activate_entity(struct bfq_entity *entity, |
@@ -1048,7 +1062,7 @@ static void __bfq_requeue_entity(struct bfq_entity *entity) | |||
1048 | entity->start = entity->finish; | 1062 | entity->start = entity->finish; |
1049 | /* | 1063 | /* |
1050 | * In addition, if the entity had more than one child | 1064 | * In addition, if the entity had more than one child |
1051 | * when set in service, then was not extracted from | 1065 | * when set in service, then it was not extracted from |
1052 | * the active tree. This implies that the position of | 1066 | * the active tree. This implies that the position of |
1053 | * the entity in the active tree may need to be | 1067 | * the entity in the active tree may need to be |
1054 | * changed now, because we have just updated the start | 1068 | * changed now, because we have just updated the start |
@@ -1056,9 +1070,8 @@ static void __bfq_requeue_entity(struct bfq_entity *entity) | |||
1056 | * time in a moment (the requeueing is then, more | 1070 | * time in a moment (the requeueing is then, more |
1057 | * precisely, a repositioning in this case). To | 1071 | * precisely, a repositioning in this case). To |
1058 | * implement this repositioning, we: 1) dequeue the | 1072 | * implement this repositioning, we: 1) dequeue the |
1059 | * entity here, 2) update the finish time and | 1073 | * entity here, 2) update the finish time and requeue |
1060 | * requeue the entity according to the new | 1074 | * the entity according to the new timestamps below. |
1061 | * timestamps below. | ||
1062 | */ | 1075 | */ |
1063 | if (entity->tree) | 1076 | if (entity->tree) |
1064 | bfq_active_extract(st, entity); | 1077 | bfq_active_extract(st, entity); |
@@ -1105,9 +1118,10 @@ static void __bfq_activate_requeue_entity(struct bfq_entity *entity, | |||
1105 | 1118 | ||
1106 | 1119 | ||
1107 | /** | 1120 | /** |
1108 | * bfq_activate_entity - activate or requeue an entity representing a bfq_queue, | 1121 | * bfq_activate_requeue_entity - activate or requeue an entity representing a |
1109 | * and activate, requeue or reposition all ancestors | 1122 | * bfq_queue, and activate, requeue or reposition |
1110 | * for which such an update becomes necessary. | 1123 | * all ancestors for which such an update becomes |
1124 | * necessary. | ||
1111 | * @entity: the entity to activate. | 1125 | * @entity: the entity to activate. |
1112 | * @non_blocking_wait_rq: true if this entity was waiting for a request | 1126 | * @non_blocking_wait_rq: true if this entity was waiting for a request |
1113 | * @requeue: true if this is a requeue, which implies that bfqq is | 1127 | * @requeue: true if this is a requeue, which implies that bfqq is |
@@ -1135,9 +1149,9 @@ static void bfq_activate_requeue_entity(struct bfq_entity *entity, | |||
1135 | * @ins_into_idle_tree: if false, the entity will not be put into the | 1149 | * @ins_into_idle_tree: if false, the entity will not be put into the |
1136 | * idle tree. | 1150 | * idle tree. |
1137 | * | 1151 | * |
1138 | * Deactivates an entity, independently from its previous state. Must | 1152 | * Deactivates an entity, independently of its previous state. Must |
1139 | * be invoked only if entity is on a service tree. Extracts the entity | 1153 | * be invoked only if entity is on a service tree. Extracts the entity |
1140 | * from that tree, and if necessary and allowed, puts it on the idle | 1154 | * from that tree, and if necessary and allowed, puts it into the idle |
1141 | * tree. | 1155 | * tree. |
1142 | */ | 1156 | */ |
1143 | bool __bfq_deactivate_entity(struct bfq_entity *entity, bool ins_into_idle_tree) | 1157 | bool __bfq_deactivate_entity(struct bfq_entity *entity, bool ins_into_idle_tree) |
@@ -1158,8 +1172,10 @@ bool __bfq_deactivate_entity(struct bfq_entity *entity, bool ins_into_idle_tree) | |||
1158 | st = bfq_entity_service_tree(entity); | 1172 | st = bfq_entity_service_tree(entity); |
1159 | is_in_service = entity == sd->in_service_entity; | 1173 | is_in_service = entity == sd->in_service_entity; |
1160 | 1174 | ||
1161 | if (is_in_service) | 1175 | if (is_in_service) { |
1162 | bfq_calc_finish(entity, entity->service); | 1176 | bfq_calc_finish(entity, entity->service); |
1177 | sd->in_service_entity = NULL; | ||
1178 | } | ||
1163 | 1179 | ||
1164 | if (entity->tree == &st->active) | 1180 | if (entity->tree == &st->active) |
1165 | bfq_active_extract(st, entity); | 1181 | bfq_active_extract(st, entity); |
@@ -1177,7 +1193,7 @@ bool __bfq_deactivate_entity(struct bfq_entity *entity, bool ins_into_idle_tree) | |||
1177 | /** | 1193 | /** |
1178 | * bfq_deactivate_entity - deactivate an entity representing a bfq_queue. | 1194 | * bfq_deactivate_entity - deactivate an entity representing a bfq_queue. |
1179 | * @entity: the entity to deactivate. | 1195 | * @entity: the entity to deactivate. |
1180 | * @ins_into_idle_tree: true if the entity can be put on the idle tree | 1196 | * @ins_into_idle_tree: true if the entity can be put into the idle tree |
1181 | */ | 1197 | */ |
1182 | static void bfq_deactivate_entity(struct bfq_entity *entity, | 1198 | static void bfq_deactivate_entity(struct bfq_entity *entity, |
1183 | bool ins_into_idle_tree, | 1199 | bool ins_into_idle_tree, |
@@ -1208,16 +1224,29 @@ static void bfq_deactivate_entity(struct bfq_entity *entity, | |||
1208 | */ | 1224 | */ |
1209 | bfq_update_next_in_service(sd, NULL); | 1225 | bfq_update_next_in_service(sd, NULL); |
1210 | 1226 | ||
1211 | if (sd->next_in_service) | 1227 | if (sd->next_in_service || sd->in_service_entity) { |
1212 | /* | 1228 | /* |
1213 | * The parent entity is still backlogged, | 1229 | * The parent entity is still active, because |
1214 | * because next_in_service is not NULL. So, no | 1230 | * either next_in_service or in_service_entity |
1215 | * further upwards deactivation must be | 1231 | * is not NULL. So, no further upwards |
1216 | * performed. Yet, next_in_service has | 1232 | * deactivation must be performed. Yet, |
1217 | * changed. Then the schedule does need to be | 1233 | * next_in_service has changed. Then the |
1218 | * updated upwards. | 1234 | * schedule does need to be updated upwards. |
1235 | * | ||
1236 | * NOTE If in_service_entity is not NULL, then | ||
1237 | * next_in_service may happen to be NULL, | ||
1238 | * although the parent entity is evidently | ||
1239 | * active. This happens if 1) the entity | ||
1240 | * pointed by in_service_entity is the only | ||
1241 | * active entity in the parent entity, and 2) | ||
1242 | * according to the definition of | ||
1243 | * next_in_service, the in_service_entity | ||
1244 | * cannot be considered as | ||
1245 | * next_in_service. See the comments on the | ||
1246 | * definition of next_in_service for details. | ||
1219 | */ | 1247 | */ |
1220 | break; | 1248 | break; |
1249 | } | ||
1221 | 1250 | ||
1222 | /* | 1251 | /* |
1223 | * If we get here, then the parent is no more | 1252 | * If we get here, then the parent is no more |
@@ -1494,47 +1523,34 @@ struct bfq_queue *bfq_get_next_queue(struct bfq_data *bfqd) | |||
1494 | 1523 | ||
1495 | /* | 1524 | /* |
1496 | * If entity is no longer a candidate for next | 1525 | * If entity is no longer a candidate for next |
1497 | * service, then we extract it from its active tree, | 1526 | * service, then it must be extracted from its active |
1498 | * for the following reason. To further boost the | 1527 | * tree, so as to make sure that it won't be |
1499 | * throughput in some special case, BFQ needs to know | 1528 | * considered when computing next_in_service. See the |
1500 | * which is the next candidate entity to serve, while | 1529 | * comments on the function |
1501 | * there is already an entity in service. In this | 1530 | * bfq_no_longer_next_in_service() for details. |
1502 | * respect, to make it easy to compute/update the next | ||
1503 | * candidate entity to serve after the current | ||
1504 | * candidate has been set in service, there is a case | ||
1505 | * where it is necessary to extract the current | ||
1506 | * candidate from its service tree. Such a case is | ||
1507 | * when the entity just set in service cannot be also | ||
1508 | * a candidate for next service. Details about when | ||
1509 | * this conditions holds are reported in the comments | ||
1510 | * on the function bfq_no_longer_next_in_service() | ||
1511 | * invoked below. | ||
1512 | */ | 1531 | */ |
1513 | if (bfq_no_longer_next_in_service(entity)) | 1532 | if (bfq_no_longer_next_in_service(entity)) |
1514 | bfq_active_extract(bfq_entity_service_tree(entity), | 1533 | bfq_active_extract(bfq_entity_service_tree(entity), |
1515 | entity); | 1534 | entity); |
1516 | 1535 | ||
1517 | /* | 1536 | /* |
1518 | * For the same reason why we may have just extracted | 1537 | * Even if entity is not to be extracted according to |
1519 | * entity from its active tree, we may need to update | 1538 | * the above check, a descendant entity may get |
1520 | * next_in_service for the sched_data of entity too, | 1539 | * extracted in one of the next iterations of this |
1521 | * regardless of whether entity has been extracted. | 1540 | * loop. Such an event could cause a change in |
1522 | * In fact, even if entity has not been extracted, a | 1541 | * next_in_service for the level of the descendant |
1523 | * descendant entity may get extracted. Such an event | 1542 | * entity, and thus possibly back to this level. |
1524 | * would cause a change in next_in_service for the | ||
1525 | * level of the descendant entity, and thus possibly | ||
1526 | * back to upper levels. | ||
1527 | * | 1543 | * |
1528 | * We cannot perform the resulting needed update | 1544 | * However, we cannot perform the resulting needed |
1529 | * before the end of this loop, because, to know which | 1545 | * update of next_in_service for this level before the |
1530 | * is the correct next-to-serve candidate entity for | 1546 | * end of the whole loop, because, to know which is |
1531 | * each level, we need first to find the leaf entity | 1547 | * the correct next-to-serve candidate entity for each |
1532 | * to set in service. In fact, only after we know | 1548 | * level, we need first to find the leaf entity to set |
1533 | * which is the next-to-serve leaf entity, we can | 1549 | * in service. In fact, only after we know which is |
1534 | * discover whether the parent entity of the leaf | 1550 | * the next-to-serve leaf entity, we can discover |
1535 | * entity becomes the next-to-serve, and so on. | 1551 | * whether the parent entity of the leaf entity |
1552 | * becomes the next-to-serve, and so on. | ||
1536 | */ | 1553 | */ |
1537 | |||
1538 | } | 1554 | } |
1539 | 1555 | ||
1540 | bfqq = bfq_entity_to_bfqq(entity); | 1556 | bfqq = bfq_entity_to_bfqq(entity); |
diff --git a/block/blk-mq.c b/block/blk-mq.c index 041f7b7fa0d6..211ef367345f 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c | |||
@@ -301,11 +301,12 @@ static struct request *blk_mq_get_request(struct request_queue *q, | |||
301 | struct elevator_queue *e = q->elevator; | 301 | struct elevator_queue *e = q->elevator; |
302 | struct request *rq; | 302 | struct request *rq; |
303 | unsigned int tag; | 303 | unsigned int tag; |
304 | struct blk_mq_ctx *local_ctx = NULL; | ||
304 | 305 | ||
305 | blk_queue_enter_live(q); | 306 | blk_queue_enter_live(q); |
306 | data->q = q; | 307 | data->q = q; |
307 | if (likely(!data->ctx)) | 308 | if (likely(!data->ctx)) |
308 | data->ctx = blk_mq_get_ctx(q); | 309 | data->ctx = local_ctx = blk_mq_get_ctx(q); |
309 | if (likely(!data->hctx)) | 310 | if (likely(!data->hctx)) |
310 | data->hctx = blk_mq_map_queue(q, data->ctx->cpu); | 311 | data->hctx = blk_mq_map_queue(q, data->ctx->cpu); |
311 | if (op & REQ_NOWAIT) | 312 | if (op & REQ_NOWAIT) |
@@ -324,6 +325,10 @@ static struct request *blk_mq_get_request(struct request_queue *q, | |||
324 | 325 | ||
325 | tag = blk_mq_get_tag(data); | 326 | tag = blk_mq_get_tag(data); |
326 | if (tag == BLK_MQ_TAG_FAIL) { | 327 | if (tag == BLK_MQ_TAG_FAIL) { |
328 | if (local_ctx) { | ||
329 | blk_mq_put_ctx(local_ctx); | ||
330 | data->ctx = NULL; | ||
331 | } | ||
327 | blk_queue_exit(q); | 332 | blk_queue_exit(q); |
328 | return NULL; | 333 | return NULL; |
329 | } | 334 | } |
@@ -356,12 +361,12 @@ struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op, | |||
356 | 361 | ||
357 | rq = blk_mq_get_request(q, NULL, op, &alloc_data); | 362 | rq = blk_mq_get_request(q, NULL, op, &alloc_data); |
358 | 363 | ||
359 | blk_mq_put_ctx(alloc_data.ctx); | ||
360 | blk_queue_exit(q); | ||
361 | |||
362 | if (!rq) | 364 | if (!rq) |
363 | return ERR_PTR(-EWOULDBLOCK); | 365 | return ERR_PTR(-EWOULDBLOCK); |
364 | 366 | ||
367 | blk_mq_put_ctx(alloc_data.ctx); | ||
368 | blk_queue_exit(q); | ||
369 | |||
365 | rq->__data_len = 0; | 370 | rq->__data_len = 0; |
366 | rq->__sector = (sector_t) -1; | 371 | rq->__sector = (sector_t) -1; |
367 | rq->bio = rq->biotail = NULL; | 372 | rq->bio = rq->biotail = NULL; |
@@ -407,11 +412,11 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q, | |||
407 | 412 | ||
408 | rq = blk_mq_get_request(q, NULL, op, &alloc_data); | 413 | rq = blk_mq_get_request(q, NULL, op, &alloc_data); |
409 | 414 | ||
410 | blk_queue_exit(q); | ||
411 | |||
412 | if (!rq) | 415 | if (!rq) |
413 | return ERR_PTR(-EWOULDBLOCK); | 416 | return ERR_PTR(-EWOULDBLOCK); |
414 | 417 | ||
418 | blk_queue_exit(q); | ||
419 | |||
415 | return rq; | 420 | return rq; |
416 | } | 421 | } |
417 | EXPORT_SYMBOL_GPL(blk_mq_alloc_request_hctx); | 422 | EXPORT_SYMBOL_GPL(blk_mq_alloc_request_hctx); |
diff --git a/drivers/char/random.c b/drivers/char/random.c index afa3ce7d3e72..8ad92707e45f 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c | |||
@@ -1492,7 +1492,7 @@ static void _warn_unseeded_randomness(const char *func_name, void *caller, | |||
1492 | #ifndef CONFIG_WARN_ALL_UNSEEDED_RANDOM | 1492 | #ifndef CONFIG_WARN_ALL_UNSEEDED_RANDOM |
1493 | print_once = true; | 1493 | print_once = true; |
1494 | #endif | 1494 | #endif |
1495 | pr_notice("random: %s called from %pF with crng_init=%d\n", | 1495 | pr_notice("random: %s called from %pS with crng_init=%d\n", |
1496 | func_name, caller, crng_init); | 1496 | func_name, caller, crng_init); |
1497 | } | 1497 | } |
1498 | 1498 | ||
diff --git a/drivers/crypto/inside-secure/safexcel_hash.c b/drivers/crypto/inside-secure/safexcel_hash.c index 8527a5899a2f..3f819399cd95 100644 --- a/drivers/crypto/inside-secure/safexcel_hash.c +++ b/drivers/crypto/inside-secure/safexcel_hash.c | |||
@@ -883,10 +883,7 @@ static int safexcel_hmac_sha1_setkey(struct crypto_ahash *tfm, const u8 *key, | |||
883 | if (ret) | 883 | if (ret) |
884 | return ret; | 884 | return ret; |
885 | 885 | ||
886 | memcpy(ctx->ipad, &istate.state, SHA1_DIGEST_SIZE); | 886 | for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(u32); i++) { |
887 | memcpy(ctx->opad, &ostate.state, SHA1_DIGEST_SIZE); | ||
888 | |||
889 | for (i = 0; i < ARRAY_SIZE(istate.state); i++) { | ||
890 | if (ctx->ipad[i] != le32_to_cpu(istate.state[i]) || | 887 | if (ctx->ipad[i] != le32_to_cpu(istate.state[i]) || |
891 | ctx->opad[i] != le32_to_cpu(ostate.state[i])) { | 888 | ctx->opad[i] != le32_to_cpu(ostate.state[i])) { |
892 | ctx->base.needs_inv = true; | 889 | ctx->base.needs_inv = true; |
@@ -894,6 +891,9 @@ static int safexcel_hmac_sha1_setkey(struct crypto_ahash *tfm, const u8 *key, | |||
894 | } | 891 | } |
895 | } | 892 | } |
896 | 893 | ||
894 | memcpy(ctx->ipad, &istate.state, SHA1_DIGEST_SIZE); | ||
895 | memcpy(ctx->opad, &ostate.state, SHA1_DIGEST_SIZE); | ||
896 | |||
897 | return 0; | 897 | return 0; |
898 | } | 898 | } |
899 | 899 | ||
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig index 1006b230b236..65fa29591d21 100644 --- a/drivers/i2c/busses/Kconfig +++ b/drivers/i2c/busses/Kconfig | |||
@@ -983,7 +983,7 @@ config I2C_UNIPHIER_F | |||
983 | 983 | ||
984 | config I2C_VERSATILE | 984 | config I2C_VERSATILE |
985 | tristate "ARM Versatile/Realview I2C bus support" | 985 | tristate "ARM Versatile/Realview I2C bus support" |
986 | depends on ARCH_VERSATILE || ARCH_REALVIEW || ARCH_VEXPRESS || COMPILE_TEST | 986 | depends on ARCH_MPS2 || ARCH_VERSATILE || ARCH_REALVIEW || ARCH_VEXPRESS || COMPILE_TEST |
987 | select I2C_ALGOBIT | 987 | select I2C_ALGOBIT |
988 | help | 988 | help |
989 | Say yes if you want to support the I2C serial bus on ARMs Versatile | 989 | Say yes if you want to support the I2C serial bus on ARMs Versatile |
diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c index 2ea6d0d25a01..143a8fd582b4 100644 --- a/drivers/i2c/busses/i2c-designware-platdrv.c +++ b/drivers/i2c/busses/i2c-designware-platdrv.c | |||
@@ -298,6 +298,9 @@ static int dw_i2c_plat_probe(struct platform_device *pdev) | |||
298 | } | 298 | } |
299 | 299 | ||
300 | acpi_speed = i2c_acpi_find_bus_speed(&pdev->dev); | 300 | acpi_speed = i2c_acpi_find_bus_speed(&pdev->dev); |
301 | /* Some broken DSTDs use 1MiHz instead of 1MHz */ | ||
302 | if (acpi_speed == 1048576) | ||
303 | acpi_speed = 1000000; | ||
301 | /* | 304 | /* |
302 | * Find bus speed from the "clock-frequency" device property, ACPI | 305 | * Find bus speed from the "clock-frequency" device property, ACPI |
303 | * or by using fast mode if neither is set. | 306 | * or by using fast mode if neither is set. |
@@ -319,7 +322,8 @@ static int dw_i2c_plat_probe(struct platform_device *pdev) | |||
319 | if (dev->clk_freq != 100000 && dev->clk_freq != 400000 | 322 | if (dev->clk_freq != 100000 && dev->clk_freq != 400000 |
320 | && dev->clk_freq != 1000000 && dev->clk_freq != 3400000) { | 323 | && dev->clk_freq != 1000000 && dev->clk_freq != 3400000) { |
321 | dev_err(&pdev->dev, | 324 | dev_err(&pdev->dev, |
322 | "Only 100kHz, 400kHz, 1MHz and 3.4MHz supported"); | 325 | "%d Hz is unsupported, only 100kHz, 400kHz, 1MHz and 3.4MHz are supported\n", |
326 | dev->clk_freq); | ||
323 | ret = -EINVAL; | 327 | ret = -EINVAL; |
324 | goto exit_reset; | 328 | goto exit_reset; |
325 | } | 329 | } |
diff --git a/drivers/i2c/i2c-core-acpi.c b/drivers/i2c/i2c-core-acpi.c index 4842ec3a5451..a9126b3cda61 100644 --- a/drivers/i2c/i2c-core-acpi.c +++ b/drivers/i2c/i2c-core-acpi.c | |||
@@ -230,6 +230,16 @@ void i2c_acpi_register_devices(struct i2c_adapter *adap) | |||
230 | dev_warn(&adap->dev, "failed to enumerate I2C slaves\n"); | 230 | dev_warn(&adap->dev, "failed to enumerate I2C slaves\n"); |
231 | } | 231 | } |
232 | 232 | ||
233 | const struct acpi_device_id * | ||
234 | i2c_acpi_match_device(const struct acpi_device_id *matches, | ||
235 | struct i2c_client *client) | ||
236 | { | ||
237 | if (!(client && matches)) | ||
238 | return NULL; | ||
239 | |||
240 | return acpi_match_device(matches, &client->dev); | ||
241 | } | ||
242 | |||
233 | static acpi_status i2c_acpi_lookup_speed(acpi_handle handle, u32 level, | 243 | static acpi_status i2c_acpi_lookup_speed(acpi_handle handle, u32 level, |
234 | void *data, void **return_value) | 244 | void *data, void **return_value) |
235 | { | 245 | { |
@@ -289,7 +299,7 @@ u32 i2c_acpi_find_bus_speed(struct device *dev) | |||
289 | } | 299 | } |
290 | EXPORT_SYMBOL_GPL(i2c_acpi_find_bus_speed); | 300 | EXPORT_SYMBOL_GPL(i2c_acpi_find_bus_speed); |
291 | 301 | ||
292 | static int i2c_acpi_match_adapter(struct device *dev, void *data) | 302 | static int i2c_acpi_find_match_adapter(struct device *dev, void *data) |
293 | { | 303 | { |
294 | struct i2c_adapter *adapter = i2c_verify_adapter(dev); | 304 | struct i2c_adapter *adapter = i2c_verify_adapter(dev); |
295 | 305 | ||
@@ -299,7 +309,7 @@ static int i2c_acpi_match_adapter(struct device *dev, void *data) | |||
299 | return ACPI_HANDLE(dev) == (acpi_handle)data; | 309 | return ACPI_HANDLE(dev) == (acpi_handle)data; |
300 | } | 310 | } |
301 | 311 | ||
302 | static int i2c_acpi_match_device(struct device *dev, void *data) | 312 | static int i2c_acpi_find_match_device(struct device *dev, void *data) |
303 | { | 313 | { |
304 | return ACPI_COMPANION(dev) == data; | 314 | return ACPI_COMPANION(dev) == data; |
305 | } | 315 | } |
@@ -309,7 +319,7 @@ static struct i2c_adapter *i2c_acpi_find_adapter_by_handle(acpi_handle handle) | |||
309 | struct device *dev; | 319 | struct device *dev; |
310 | 320 | ||
311 | dev = bus_find_device(&i2c_bus_type, NULL, handle, | 321 | dev = bus_find_device(&i2c_bus_type, NULL, handle, |
312 | i2c_acpi_match_adapter); | 322 | i2c_acpi_find_match_adapter); |
313 | return dev ? i2c_verify_adapter(dev) : NULL; | 323 | return dev ? i2c_verify_adapter(dev) : NULL; |
314 | } | 324 | } |
315 | 325 | ||
@@ -317,7 +327,8 @@ static struct i2c_client *i2c_acpi_find_client_by_adev(struct acpi_device *adev) | |||
317 | { | 327 | { |
318 | struct device *dev; | 328 | struct device *dev; |
319 | 329 | ||
320 | dev = bus_find_device(&i2c_bus_type, NULL, adev, i2c_acpi_match_device); | 330 | dev = bus_find_device(&i2c_bus_type, NULL, adev, |
331 | i2c_acpi_find_match_device); | ||
321 | return dev ? i2c_verify_client(dev) : NULL; | 332 | return dev ? i2c_verify_client(dev) : NULL; |
322 | } | 333 | } |
323 | 334 | ||
diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c index c89dac7fd2e7..12822a4b8f8f 100644 --- a/drivers/i2c/i2c-core-base.c +++ b/drivers/i2c/i2c-core-base.c | |||
@@ -357,6 +357,7 @@ static int i2c_device_probe(struct device *dev) | |||
357 | * Tree match table entry is supplied for the probing device. | 357 | * Tree match table entry is supplied for the probing device. |
358 | */ | 358 | */ |
359 | if (!driver->id_table && | 359 | if (!driver->id_table && |
360 | !i2c_acpi_match_device(dev->driver->acpi_match_table, client) && | ||
360 | !i2c_of_match_device(dev->driver->of_match_table, client)) | 361 | !i2c_of_match_device(dev->driver->of_match_table, client)) |
361 | return -ENODEV; | 362 | return -ENODEV; |
362 | 363 | ||
diff --git a/drivers/i2c/i2c-core.h b/drivers/i2c/i2c-core.h index 3b63f5e5b89c..3d3d9bf02101 100644 --- a/drivers/i2c/i2c-core.h +++ b/drivers/i2c/i2c-core.h | |||
@@ -31,9 +31,18 @@ int i2c_check_addr_validity(unsigned addr, unsigned short flags); | |||
31 | int i2c_check_7bit_addr_validity_strict(unsigned short addr); | 31 | int i2c_check_7bit_addr_validity_strict(unsigned short addr); |
32 | 32 | ||
33 | #ifdef CONFIG_ACPI | 33 | #ifdef CONFIG_ACPI |
34 | const struct acpi_device_id * | ||
35 | i2c_acpi_match_device(const struct acpi_device_id *matches, | ||
36 | struct i2c_client *client); | ||
34 | void i2c_acpi_register_devices(struct i2c_adapter *adap); | 37 | void i2c_acpi_register_devices(struct i2c_adapter *adap); |
35 | #else /* CONFIG_ACPI */ | 38 | #else /* CONFIG_ACPI */ |
36 | static inline void i2c_acpi_register_devices(struct i2c_adapter *adap) { } | 39 | static inline void i2c_acpi_register_devices(struct i2c_adapter *adap) { } |
40 | static inline const struct acpi_device_id * | ||
41 | i2c_acpi_match_device(const struct acpi_device_id *matches, | ||
42 | struct i2c_client *client) | ||
43 | { | ||
44 | return NULL; | ||
45 | } | ||
37 | #endif /* CONFIG_ACPI */ | 46 | #endif /* CONFIG_ACPI */ |
38 | extern struct notifier_block i2c_acpi_notifier; | 47 | extern struct notifier_block i2c_acpi_notifier; |
39 | 48 | ||
diff --git a/drivers/i2c/muxes/Kconfig b/drivers/i2c/muxes/Kconfig index 2c64d0e0740f..17121329bb79 100644 --- a/drivers/i2c/muxes/Kconfig +++ b/drivers/i2c/muxes/Kconfig | |||
@@ -83,7 +83,7 @@ config I2C_MUX_PINCTRL | |||
83 | different sets of pins at run-time. | 83 | different sets of pins at run-time. |
84 | 84 | ||
85 | This driver can also be built as a module. If so, the module will be | 85 | This driver can also be built as a module. If so, the module will be |
86 | called pinctrl-i2cmux. | 86 | called i2c-mux-pinctrl. |
87 | 87 | ||
88 | config I2C_MUX_REG | 88 | config I2C_MUX_REG |
89 | tristate "Register-based I2C multiplexer" | 89 | tristate "Register-based I2C multiplexer" |
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c index 01236cef7bfb..437522ca97b4 100644 --- a/drivers/infiniband/core/addr.c +++ b/drivers/infiniband/core/addr.c | |||
@@ -61,6 +61,7 @@ struct addr_req { | |||
61 | void (*callback)(int status, struct sockaddr *src_addr, | 61 | void (*callback)(int status, struct sockaddr *src_addr, |
62 | struct rdma_dev_addr *addr, void *context); | 62 | struct rdma_dev_addr *addr, void *context); |
63 | unsigned long timeout; | 63 | unsigned long timeout; |
64 | struct delayed_work work; | ||
64 | int status; | 65 | int status; |
65 | u32 seq; | 66 | u32 seq; |
66 | }; | 67 | }; |
@@ -295,7 +296,7 @@ int rdma_translate_ip(const struct sockaddr *addr, | |||
295 | } | 296 | } |
296 | EXPORT_SYMBOL(rdma_translate_ip); | 297 | EXPORT_SYMBOL(rdma_translate_ip); |
297 | 298 | ||
298 | static void set_timeout(unsigned long time) | 299 | static void set_timeout(struct delayed_work *delayed_work, unsigned long time) |
299 | { | 300 | { |
300 | unsigned long delay; | 301 | unsigned long delay; |
301 | 302 | ||
@@ -303,7 +304,7 @@ static void set_timeout(unsigned long time) | |||
303 | if ((long)delay < 0) | 304 | if ((long)delay < 0) |
304 | delay = 0; | 305 | delay = 0; |
305 | 306 | ||
306 | mod_delayed_work(addr_wq, &work, delay); | 307 | mod_delayed_work(addr_wq, delayed_work, delay); |
307 | } | 308 | } |
308 | 309 | ||
309 | static void queue_req(struct addr_req *req) | 310 | static void queue_req(struct addr_req *req) |
@@ -318,8 +319,7 @@ static void queue_req(struct addr_req *req) | |||
318 | 319 | ||
319 | list_add(&req->list, &temp_req->list); | 320 | list_add(&req->list, &temp_req->list); |
320 | 321 | ||
321 | if (req_list.next == &req->list) | 322 | set_timeout(&req->work, req->timeout); |
322 | set_timeout(req->timeout); | ||
323 | mutex_unlock(&lock); | 323 | mutex_unlock(&lock); |
324 | } | 324 | } |
325 | 325 | ||
@@ -574,6 +574,37 @@ static int addr_resolve(struct sockaddr *src_in, | |||
574 | return ret; | 574 | return ret; |
575 | } | 575 | } |
576 | 576 | ||
577 | static void process_one_req(struct work_struct *_work) | ||
578 | { | ||
579 | struct addr_req *req; | ||
580 | struct sockaddr *src_in, *dst_in; | ||
581 | |||
582 | mutex_lock(&lock); | ||
583 | req = container_of(_work, struct addr_req, work.work); | ||
584 | |||
585 | if (req->status == -ENODATA) { | ||
586 | src_in = (struct sockaddr *)&req->src_addr; | ||
587 | dst_in = (struct sockaddr *)&req->dst_addr; | ||
588 | req->status = addr_resolve(src_in, dst_in, req->addr, | ||
589 | true, req->seq); | ||
590 | if (req->status && time_after_eq(jiffies, req->timeout)) { | ||
591 | req->status = -ETIMEDOUT; | ||
592 | } else if (req->status == -ENODATA) { | ||
593 | /* requeue the work for retrying again */ | ||
594 | set_timeout(&req->work, req->timeout); | ||
595 | mutex_unlock(&lock); | ||
596 | return; | ||
597 | } | ||
598 | } | ||
599 | list_del(&req->list); | ||
600 | mutex_unlock(&lock); | ||
601 | |||
602 | req->callback(req->status, (struct sockaddr *)&req->src_addr, | ||
603 | req->addr, req->context); | ||
604 | put_client(req->client); | ||
605 | kfree(req); | ||
606 | } | ||
607 | |||
577 | static void process_req(struct work_struct *work) | 608 | static void process_req(struct work_struct *work) |
578 | { | 609 | { |
579 | struct addr_req *req, *temp_req; | 610 | struct addr_req *req, *temp_req; |
@@ -591,20 +622,23 @@ static void process_req(struct work_struct *work) | |||
591 | true, req->seq); | 622 | true, req->seq); |
592 | if (req->status && time_after_eq(jiffies, req->timeout)) | 623 | if (req->status && time_after_eq(jiffies, req->timeout)) |
593 | req->status = -ETIMEDOUT; | 624 | req->status = -ETIMEDOUT; |
594 | else if (req->status == -ENODATA) | 625 | else if (req->status == -ENODATA) { |
626 | set_timeout(&req->work, req->timeout); | ||
595 | continue; | 627 | continue; |
628 | } | ||
596 | } | 629 | } |
597 | list_move_tail(&req->list, &done_list); | 630 | list_move_tail(&req->list, &done_list); |
598 | } | 631 | } |
599 | 632 | ||
600 | if (!list_empty(&req_list)) { | ||
601 | req = list_entry(req_list.next, struct addr_req, list); | ||
602 | set_timeout(req->timeout); | ||
603 | } | ||
604 | mutex_unlock(&lock); | 633 | mutex_unlock(&lock); |
605 | 634 | ||
606 | list_for_each_entry_safe(req, temp_req, &done_list, list) { | 635 | list_for_each_entry_safe(req, temp_req, &done_list, list) { |
607 | list_del(&req->list); | 636 | list_del(&req->list); |
637 | /* It is safe to cancel other work items from this work item | ||
638 | * because at a time there can be only one work item running | ||
639 | * with this single threaded work queue. | ||
640 | */ | ||
641 | cancel_delayed_work(&req->work); | ||
608 | req->callback(req->status, (struct sockaddr *) &req->src_addr, | 642 | req->callback(req->status, (struct sockaddr *) &req->src_addr, |
609 | req->addr, req->context); | 643 | req->addr, req->context); |
610 | put_client(req->client); | 644 | put_client(req->client); |
@@ -647,6 +681,7 @@ int rdma_resolve_ip(struct rdma_addr_client *client, | |||
647 | req->context = context; | 681 | req->context = context; |
648 | req->client = client; | 682 | req->client = client; |
649 | atomic_inc(&client->refcount); | 683 | atomic_inc(&client->refcount); |
684 | INIT_DELAYED_WORK(&req->work, process_one_req); | ||
650 | req->seq = (u32)atomic_inc_return(&ib_nl_addr_request_seq); | 685 | req->seq = (u32)atomic_inc_return(&ib_nl_addr_request_seq); |
651 | 686 | ||
652 | req->status = addr_resolve(src_in, dst_in, addr, true, req->seq); | 687 | req->status = addr_resolve(src_in, dst_in, addr, true, req->seq); |
@@ -701,7 +736,7 @@ void rdma_addr_cancel(struct rdma_dev_addr *addr) | |||
701 | req->status = -ECANCELED; | 736 | req->status = -ECANCELED; |
702 | req->timeout = jiffies; | 737 | req->timeout = jiffies; |
703 | list_move(&req->list, &req_list); | 738 | list_move(&req->list, &req_list); |
704 | set_timeout(req->timeout); | 739 | set_timeout(&req->work, req->timeout); |
705 | break; | 740 | break; |
706 | } | 741 | } |
707 | } | 742 | } |
@@ -807,9 +842,8 @@ static int netevent_callback(struct notifier_block *self, unsigned long event, | |||
807 | if (event == NETEVENT_NEIGH_UPDATE) { | 842 | if (event == NETEVENT_NEIGH_UPDATE) { |
808 | struct neighbour *neigh = ctx; | 843 | struct neighbour *neigh = ctx; |
809 | 844 | ||
810 | if (neigh->nud_state & NUD_VALID) { | 845 | if (neigh->nud_state & NUD_VALID) |
811 | set_timeout(jiffies); | 846 | set_timeout(&work, jiffies); |
812 | } | ||
813 | } | 847 | } |
814 | return 0; | 848 | return 0; |
815 | } | 849 | } |
@@ -820,7 +854,7 @@ static struct notifier_block nb = { | |||
820 | 854 | ||
821 | int addr_init(void) | 855 | int addr_init(void) |
822 | { | 856 | { |
823 | addr_wq = alloc_workqueue("ib_addr", WQ_MEM_RECLAIM, 0); | 857 | addr_wq = alloc_ordered_workqueue("ib_addr", WQ_MEM_RECLAIM); |
824 | if (!addr_wq) | 858 | if (!addr_wq) |
825 | return -ENOMEM; | 859 | return -ENOMEM; |
826 | 860 | ||
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c index 2c98533a0203..c551d2b275fd 100644 --- a/drivers/infiniband/core/uverbs_cmd.c +++ b/drivers/infiniband/core/uverbs_cmd.c | |||
@@ -1153,7 +1153,7 @@ ssize_t ib_uverbs_resize_cq(struct ib_uverbs_file *file, | |||
1153 | int out_len) | 1153 | int out_len) |
1154 | { | 1154 | { |
1155 | struct ib_uverbs_resize_cq cmd; | 1155 | struct ib_uverbs_resize_cq cmd; |
1156 | struct ib_uverbs_resize_cq_resp resp; | 1156 | struct ib_uverbs_resize_cq_resp resp = {}; |
1157 | struct ib_udata udata; | 1157 | struct ib_udata udata; |
1158 | struct ib_cq *cq; | 1158 | struct ib_cq *cq; |
1159 | int ret = -EINVAL; | 1159 | int ret = -EINVAL; |
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c index 3d2609608f58..c023e2c81b8f 100644 --- a/drivers/infiniband/core/uverbs_main.c +++ b/drivers/infiniband/core/uverbs_main.c | |||
@@ -250,6 +250,7 @@ void ib_uverbs_release_file(struct kref *ref) | |||
250 | if (atomic_dec_and_test(&file->device->refcount)) | 250 | if (atomic_dec_and_test(&file->device->refcount)) |
251 | ib_uverbs_comp_dev(file->device); | 251 | ib_uverbs_comp_dev(file->device); |
252 | 252 | ||
253 | kobject_put(&file->device->kobj); | ||
253 | kfree(file); | 254 | kfree(file); |
254 | } | 255 | } |
255 | 256 | ||
@@ -917,7 +918,6 @@ err: | |||
917 | static int ib_uverbs_close(struct inode *inode, struct file *filp) | 918 | static int ib_uverbs_close(struct inode *inode, struct file *filp) |
918 | { | 919 | { |
919 | struct ib_uverbs_file *file = filp->private_data; | 920 | struct ib_uverbs_file *file = filp->private_data; |
920 | struct ib_uverbs_device *dev = file->device; | ||
921 | 921 | ||
922 | mutex_lock(&file->cleanup_mutex); | 922 | mutex_lock(&file->cleanup_mutex); |
923 | if (file->ucontext) { | 923 | if (file->ucontext) { |
@@ -939,7 +939,6 @@ static int ib_uverbs_close(struct inode *inode, struct file *filp) | |||
939 | ib_uverbs_release_async_event_file); | 939 | ib_uverbs_release_async_event_file); |
940 | 940 | ||
941 | kref_put(&file->ref, ib_uverbs_release_file); | 941 | kref_put(&file->ref, ib_uverbs_release_file); |
942 | kobject_put(&dev->kobj); | ||
943 | 942 | ||
944 | return 0; | 943 | return 0; |
945 | } | 944 | } |
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c index fb98ed67d5bc..7f8fe443df46 100644 --- a/drivers/infiniband/core/verbs.c +++ b/drivers/infiniband/core/verbs.c | |||
@@ -895,7 +895,6 @@ static const struct { | |||
895 | } qp_state_table[IB_QPS_ERR + 1][IB_QPS_ERR + 1] = { | 895 | } qp_state_table[IB_QPS_ERR + 1][IB_QPS_ERR + 1] = { |
896 | [IB_QPS_RESET] = { | 896 | [IB_QPS_RESET] = { |
897 | [IB_QPS_RESET] = { .valid = 1 }, | 897 | [IB_QPS_RESET] = { .valid = 1 }, |
898 | [IB_QPS_ERR] = { .valid = 1 }, | ||
899 | [IB_QPS_INIT] = { | 898 | [IB_QPS_INIT] = { |
900 | .valid = 1, | 899 | .valid = 1, |
901 | .req_param = { | 900 | .req_param = { |
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c index 23fad6d96944..2540b65e242c 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c | |||
@@ -733,7 +733,7 @@ static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev) | |||
733 | continue; | 733 | continue; |
734 | 734 | ||
735 | free_mr->mr_free_qp[i] = hns_roce_v1_create_lp_qp(hr_dev, pd); | 735 | free_mr->mr_free_qp[i] = hns_roce_v1_create_lp_qp(hr_dev, pd); |
736 | if (IS_ERR(free_mr->mr_free_qp[i])) { | 736 | if (!free_mr->mr_free_qp[i]) { |
737 | dev_err(dev, "Create loop qp failed!\n"); | 737 | dev_err(dev, "Create loop qp failed!\n"); |
738 | goto create_lp_qp_failed; | 738 | goto create_lp_qp_failed; |
739 | } | 739 | } |
diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c index ae0746754008..3d701c7a4c91 100644 --- a/drivers/infiniband/hw/mlx5/odp.c +++ b/drivers/infiniband/hw/mlx5/odp.c | |||
@@ -939,7 +939,7 @@ static int mlx5_ib_mr_initiator_pfault_handler( | |||
939 | 939 | ||
940 | if (qp->ibqp.qp_type != IB_QPT_RC) { | 940 | if (qp->ibqp.qp_type != IB_QPT_RC) { |
941 | av = *wqe; | 941 | av = *wqe; |
942 | if (av->dqp_dct & be32_to_cpu(MLX5_WQE_AV_EXT)) | 942 | if (av->dqp_dct & cpu_to_be32(MLX5_EXTENDED_UD_AV)) |
943 | *wqe += sizeof(struct mlx5_av); | 943 | *wqe += sizeof(struct mlx5_av); |
944 | else | 944 | else |
945 | *wqe += sizeof(struct mlx5_base_av); | 945 | *wqe += sizeof(struct mlx5_base_av); |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h index ff50a7bd66d8..7ac25059c40f 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib.h +++ b/drivers/infiniband/ulp/ipoib/ipoib.h | |||
@@ -336,6 +336,7 @@ struct ipoib_dev_priv { | |||
336 | unsigned long flags; | 336 | unsigned long flags; |
337 | 337 | ||
338 | struct rw_semaphore vlan_rwsem; | 338 | struct rw_semaphore vlan_rwsem; |
339 | struct mutex mcast_mutex; | ||
339 | 340 | ||
340 | struct rb_root path_tree; | 341 | struct rb_root path_tree; |
341 | struct list_head path_list; | 342 | struct list_head path_list; |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c index f87d104837dc..d69410c2ed97 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c | |||
@@ -511,7 +511,6 @@ static int ipoib_cm_rx_handler(struct ib_cm_id *cm_id, | |||
511 | case IB_CM_REQ_RECEIVED: | 511 | case IB_CM_REQ_RECEIVED: |
512 | return ipoib_cm_req_handler(cm_id, event); | 512 | return ipoib_cm_req_handler(cm_id, event); |
513 | case IB_CM_DREQ_RECEIVED: | 513 | case IB_CM_DREQ_RECEIVED: |
514 | p = cm_id->context; | ||
515 | ib_send_cm_drep(cm_id, NULL, 0); | 514 | ib_send_cm_drep(cm_id, NULL, 0); |
516 | /* Fall through */ | 515 | /* Fall through */ |
517 | case IB_CM_REJ_RECEIVED: | 516 | case IB_CM_REJ_RECEIVED: |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c index 7871379342f4..184a22f48027 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c | |||
@@ -52,7 +52,8 @@ static const struct ipoib_stats ipoib_gstrings_stats[] = { | |||
52 | IPOIB_NETDEV_STAT(tx_bytes), | 52 | IPOIB_NETDEV_STAT(tx_bytes), |
53 | IPOIB_NETDEV_STAT(tx_errors), | 53 | IPOIB_NETDEV_STAT(tx_errors), |
54 | IPOIB_NETDEV_STAT(rx_dropped), | 54 | IPOIB_NETDEV_STAT(rx_dropped), |
55 | IPOIB_NETDEV_STAT(tx_dropped) | 55 | IPOIB_NETDEV_STAT(tx_dropped), |
56 | IPOIB_NETDEV_STAT(multicast), | ||
56 | }; | 57 | }; |
57 | 58 | ||
58 | #define IPOIB_GLOBAL_STATS_LEN ARRAY_SIZE(ipoib_gstrings_stats) | 59 | #define IPOIB_GLOBAL_STATS_LEN ARRAY_SIZE(ipoib_gstrings_stats) |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c index 57a9655e844d..2e075377242e 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c | |||
@@ -256,6 +256,8 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc) | |||
256 | 256 | ||
257 | ++dev->stats.rx_packets; | 257 | ++dev->stats.rx_packets; |
258 | dev->stats.rx_bytes += skb->len; | 258 | dev->stats.rx_bytes += skb->len; |
259 | if (skb->pkt_type == PACKET_MULTICAST) | ||
260 | dev->stats.multicast++; | ||
259 | 261 | ||
260 | skb->dev = dev; | 262 | skb->dev = dev; |
261 | if ((dev->features & NETIF_F_RXCSUM) && | 263 | if ((dev->features & NETIF_F_RXCSUM) && |
@@ -709,6 +711,27 @@ static int recvs_pending(struct net_device *dev) | |||
709 | return pending; | 711 | return pending; |
710 | } | 712 | } |
711 | 713 | ||
714 | static void check_qp_movement_and_print(struct ipoib_dev_priv *priv, | ||
715 | struct ib_qp *qp, | ||
716 | enum ib_qp_state new_state) | ||
717 | { | ||
718 | struct ib_qp_attr qp_attr; | ||
719 | struct ib_qp_init_attr query_init_attr; | ||
720 | int ret; | ||
721 | |||
722 | ret = ib_query_qp(qp, &qp_attr, IB_QP_STATE, &query_init_attr); | ||
723 | if (ret) { | ||
724 | ipoib_warn(priv, "%s: Failed to query QP\n", __func__); | ||
725 | return; | ||
726 | } | ||
727 | /* print according to the new-state and the previous state.*/ | ||
728 | if (new_state == IB_QPS_ERR && qp_attr.qp_state == IB_QPS_RESET) | ||
729 | ipoib_dbg(priv, "Failed modify QP, IB_QPS_RESET to IB_QPS_ERR, acceptable\n"); | ||
730 | else | ||
731 | ipoib_warn(priv, "Failed to modify QP to state: %d from state: %d\n", | ||
732 | new_state, qp_attr.qp_state); | ||
733 | } | ||
734 | |||
712 | int ipoib_ib_dev_stop_default(struct net_device *dev) | 735 | int ipoib_ib_dev_stop_default(struct net_device *dev) |
713 | { | 736 | { |
714 | struct ipoib_dev_priv *priv = ipoib_priv(dev); | 737 | struct ipoib_dev_priv *priv = ipoib_priv(dev); |
@@ -728,7 +751,7 @@ int ipoib_ib_dev_stop_default(struct net_device *dev) | |||
728 | */ | 751 | */ |
729 | qp_attr.qp_state = IB_QPS_ERR; | 752 | qp_attr.qp_state = IB_QPS_ERR; |
730 | if (ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE)) | 753 | if (ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE)) |
731 | ipoib_warn(priv, "Failed to modify QP to ERROR state\n"); | 754 | check_qp_movement_and_print(priv, priv->qp, IB_QPS_ERR); |
732 | 755 | ||
733 | /* Wait for all sends and receives to complete */ | 756 | /* Wait for all sends and receives to complete */ |
734 | begin = jiffies; | 757 | begin = jiffies; |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index 4ce315c92b48..6c77df34869d 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c | |||
@@ -1560,6 +1560,7 @@ static void ipoib_flush_neighs(struct ipoib_dev_priv *priv) | |||
1560 | int i, wait_flushed = 0; | 1560 | int i, wait_flushed = 0; |
1561 | 1561 | ||
1562 | init_completion(&priv->ntbl.flushed); | 1562 | init_completion(&priv->ntbl.flushed); |
1563 | set_bit(IPOIB_NEIGH_TBL_FLUSH, &priv->flags); | ||
1563 | 1564 | ||
1564 | spin_lock_irqsave(&priv->lock, flags); | 1565 | spin_lock_irqsave(&priv->lock, flags); |
1565 | 1566 | ||
@@ -1604,7 +1605,6 @@ static void ipoib_neigh_hash_uninit(struct net_device *dev) | |||
1604 | 1605 | ||
1605 | ipoib_dbg(priv, "ipoib_neigh_hash_uninit\n"); | 1606 | ipoib_dbg(priv, "ipoib_neigh_hash_uninit\n"); |
1606 | init_completion(&priv->ntbl.deleted); | 1607 | init_completion(&priv->ntbl.deleted); |
1607 | set_bit(IPOIB_NEIGH_TBL_FLUSH, &priv->flags); | ||
1608 | 1608 | ||
1609 | /* Stop GC if called at init fail need to cancel work */ | 1609 | /* Stop GC if called at init fail need to cancel work */ |
1610 | stopped = test_and_set_bit(IPOIB_STOP_NEIGH_GC, &priv->flags); | 1610 | stopped = test_and_set_bit(IPOIB_STOP_NEIGH_GC, &priv->flags); |
@@ -1847,6 +1847,7 @@ static const struct net_device_ops ipoib_netdev_ops_vf = { | |||
1847 | .ndo_tx_timeout = ipoib_timeout, | 1847 | .ndo_tx_timeout = ipoib_timeout, |
1848 | .ndo_set_rx_mode = ipoib_set_mcast_list, | 1848 | .ndo_set_rx_mode = ipoib_set_mcast_list, |
1849 | .ndo_get_iflink = ipoib_get_iflink, | 1849 | .ndo_get_iflink = ipoib_get_iflink, |
1850 | .ndo_get_stats64 = ipoib_get_stats, | ||
1850 | }; | 1851 | }; |
1851 | 1852 | ||
1852 | void ipoib_setup_common(struct net_device *dev) | 1853 | void ipoib_setup_common(struct net_device *dev) |
@@ -1877,6 +1878,7 @@ static void ipoib_build_priv(struct net_device *dev) | |||
1877 | priv->dev = dev; | 1878 | priv->dev = dev; |
1878 | spin_lock_init(&priv->lock); | 1879 | spin_lock_init(&priv->lock); |
1879 | init_rwsem(&priv->vlan_rwsem); | 1880 | init_rwsem(&priv->vlan_rwsem); |
1881 | mutex_init(&priv->mcast_mutex); | ||
1880 | 1882 | ||
1881 | INIT_LIST_HEAD(&priv->path_list); | 1883 | INIT_LIST_HEAD(&priv->path_list); |
1882 | INIT_LIST_HEAD(&priv->child_intfs); | 1884 | INIT_LIST_HEAD(&priv->child_intfs); |
@@ -2173,14 +2175,14 @@ static struct net_device *ipoib_add_port(const char *format, | |||
2173 | priv->dev->dev_id = port - 1; | 2175 | priv->dev->dev_id = port - 1; |
2174 | 2176 | ||
2175 | result = ib_query_port(hca, port, &attr); | 2177 | result = ib_query_port(hca, port, &attr); |
2176 | if (!result) | 2178 | if (result) { |
2177 | priv->max_ib_mtu = ib_mtu_enum_to_int(attr.max_mtu); | ||
2178 | else { | ||
2179 | printk(KERN_WARNING "%s: ib_query_port %d failed\n", | 2179 | printk(KERN_WARNING "%s: ib_query_port %d failed\n", |
2180 | hca->name, port); | 2180 | hca->name, port); |
2181 | goto device_init_failed; | 2181 | goto device_init_failed; |
2182 | } | 2182 | } |
2183 | 2183 | ||
2184 | priv->max_ib_mtu = ib_mtu_enum_to_int(attr.max_mtu); | ||
2185 | |||
2184 | /* MTU will be reset when mcast join happens */ | 2186 | /* MTU will be reset when mcast join happens */ |
2185 | priv->dev->mtu = IPOIB_UD_MTU(priv->max_ib_mtu); | 2187 | priv->dev->mtu = IPOIB_UD_MTU(priv->max_ib_mtu); |
2186 | priv->mcast_mtu = priv->admin_mtu = priv->dev->mtu; | 2188 | priv->mcast_mtu = priv->admin_mtu = priv->dev->mtu; |
@@ -2211,12 +2213,14 @@ static struct net_device *ipoib_add_port(const char *format, | |||
2211 | printk(KERN_WARNING "%s: ib_query_gid port %d failed (ret = %d)\n", | 2213 | printk(KERN_WARNING "%s: ib_query_gid port %d failed (ret = %d)\n", |
2212 | hca->name, port, result); | 2214 | hca->name, port, result); |
2213 | goto device_init_failed; | 2215 | goto device_init_failed; |
2214 | } else | 2216 | } |
2215 | memcpy(priv->dev->dev_addr + 4, priv->local_gid.raw, sizeof (union ib_gid)); | 2217 | |
2218 | memcpy(priv->dev->dev_addr + 4, priv->local_gid.raw, | ||
2219 | sizeof(union ib_gid)); | ||
2216 | set_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags); | 2220 | set_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags); |
2217 | 2221 | ||
2218 | result = ipoib_dev_init(priv->dev, hca, port); | 2222 | result = ipoib_dev_init(priv->dev, hca, port); |
2219 | if (result < 0) { | 2223 | if (result) { |
2220 | printk(KERN_WARNING "%s: failed to initialize port %d (ret = %d)\n", | 2224 | printk(KERN_WARNING "%s: failed to initialize port %d (ret = %d)\n", |
2221 | hca->name, port, result); | 2225 | hca->name, port, result); |
2222 | goto device_init_failed; | 2226 | goto device_init_failed; |
@@ -2365,6 +2369,7 @@ static int __init ipoib_init_module(void) | |||
2365 | ipoib_sendq_size = max3(ipoib_sendq_size, 2 * MAX_SEND_CQE, IPOIB_MIN_QUEUE_SIZE); | 2369 | ipoib_sendq_size = max3(ipoib_sendq_size, 2 * MAX_SEND_CQE, IPOIB_MIN_QUEUE_SIZE); |
2366 | #ifdef CONFIG_INFINIBAND_IPOIB_CM | 2370 | #ifdef CONFIG_INFINIBAND_IPOIB_CM |
2367 | ipoib_max_conn_qp = min(ipoib_max_conn_qp, IPOIB_CM_MAX_CONN_QP); | 2371 | ipoib_max_conn_qp = min(ipoib_max_conn_qp, IPOIB_CM_MAX_CONN_QP); |
2372 | ipoib_max_conn_qp = max(ipoib_max_conn_qp, 0); | ||
2368 | #endif | 2373 | #endif |
2369 | 2374 | ||
2370 | /* | 2375 | /* |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c index 057f58e6afca..93e149efc1f5 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c | |||
@@ -684,15 +684,10 @@ void ipoib_mcast_start_thread(struct net_device *dev) | |||
684 | int ipoib_mcast_stop_thread(struct net_device *dev) | 684 | int ipoib_mcast_stop_thread(struct net_device *dev) |
685 | { | 685 | { |
686 | struct ipoib_dev_priv *priv = ipoib_priv(dev); | 686 | struct ipoib_dev_priv *priv = ipoib_priv(dev); |
687 | unsigned long flags; | ||
688 | 687 | ||
689 | ipoib_dbg_mcast(priv, "stopping multicast thread\n"); | 688 | ipoib_dbg_mcast(priv, "stopping multicast thread\n"); |
690 | 689 | ||
691 | spin_lock_irqsave(&priv->lock, flags); | 690 | cancel_delayed_work_sync(&priv->mcast_task); |
692 | cancel_delayed_work(&priv->mcast_task); | ||
693 | spin_unlock_irqrestore(&priv->lock, flags); | ||
694 | |||
695 | flush_workqueue(priv->wq); | ||
696 | 691 | ||
697 | return 0; | 692 | return 0; |
698 | } | 693 | } |
@@ -748,6 +743,14 @@ void ipoib_mcast_remove_list(struct list_head *remove_list) | |||
748 | { | 743 | { |
749 | struct ipoib_mcast *mcast, *tmcast; | 744 | struct ipoib_mcast *mcast, *tmcast; |
750 | 745 | ||
746 | /* | ||
747 | * make sure the in-flight joins have finished before we attempt | ||
748 | * to leave | ||
749 | */ | ||
750 | list_for_each_entry_safe(mcast, tmcast, remove_list, list) | ||
751 | if (test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags)) | ||
752 | wait_for_completion(&mcast->done); | ||
753 | |||
751 | list_for_each_entry_safe(mcast, tmcast, remove_list, list) { | 754 | list_for_each_entry_safe(mcast, tmcast, remove_list, list) { |
752 | ipoib_mcast_leave(mcast->dev, mcast); | 755 | ipoib_mcast_leave(mcast->dev, mcast); |
753 | ipoib_mcast_free(mcast); | 756 | ipoib_mcast_free(mcast); |
@@ -838,6 +841,7 @@ void ipoib_mcast_dev_flush(struct net_device *dev) | |||
838 | struct ipoib_mcast *mcast, *tmcast; | 841 | struct ipoib_mcast *mcast, *tmcast; |
839 | unsigned long flags; | 842 | unsigned long flags; |
840 | 843 | ||
844 | mutex_lock(&priv->mcast_mutex); | ||
841 | ipoib_dbg_mcast(priv, "flushing multicast list\n"); | 845 | ipoib_dbg_mcast(priv, "flushing multicast list\n"); |
842 | 846 | ||
843 | spin_lock_irqsave(&priv->lock, flags); | 847 | spin_lock_irqsave(&priv->lock, flags); |
@@ -856,15 +860,8 @@ void ipoib_mcast_dev_flush(struct net_device *dev) | |||
856 | 860 | ||
857 | spin_unlock_irqrestore(&priv->lock, flags); | 861 | spin_unlock_irqrestore(&priv->lock, flags); |
858 | 862 | ||
859 | /* | ||
860 | * make sure the in-flight joins have finished before we attempt | ||
861 | * to leave | ||
862 | */ | ||
863 | list_for_each_entry_safe(mcast, tmcast, &remove_list, list) | ||
864 | if (test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags)) | ||
865 | wait_for_completion(&mcast->done); | ||
866 | |||
867 | ipoib_mcast_remove_list(&remove_list); | 863 | ipoib_mcast_remove_list(&remove_list); |
864 | mutex_unlock(&priv->mcast_mutex); | ||
868 | } | 865 | } |
869 | 866 | ||
870 | static int ipoib_mcast_addr_is_valid(const u8 *addr, const u8 *broadcast) | 867 | static int ipoib_mcast_addr_is_valid(const u8 *addr, const u8 *broadcast) |
@@ -982,14 +979,6 @@ void ipoib_mcast_restart_task(struct work_struct *work) | |||
982 | netif_addr_unlock(dev); | 979 | netif_addr_unlock(dev); |
983 | local_irq_restore(flags); | 980 | local_irq_restore(flags); |
984 | 981 | ||
985 | /* | ||
986 | * make sure the in-flight joins have finished before we attempt | ||
987 | * to leave | ||
988 | */ | ||
989 | list_for_each_entry_safe(mcast, tmcast, &remove_list, list) | ||
990 | if (test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags)) | ||
991 | wait_for_completion(&mcast->done); | ||
992 | |||
993 | ipoib_mcast_remove_list(&remove_list); | 982 | ipoib_mcast_remove_list(&remove_list); |
994 | 983 | ||
995 | /* | 984 | /* |
diff --git a/drivers/isdn/hysdn/hysdn_proclog.c b/drivers/isdn/hysdn/hysdn_proclog.c index 7b5fd8fb1761..aaca0b3d662e 100644 --- a/drivers/isdn/hysdn/hysdn_proclog.c +++ b/drivers/isdn/hysdn/hysdn_proclog.c | |||
@@ -44,7 +44,6 @@ struct procdata { | |||
44 | char log_name[15]; /* log filename */ | 44 | char log_name[15]; /* log filename */ |
45 | struct log_data *log_head, *log_tail; /* head and tail for queue */ | 45 | struct log_data *log_head, *log_tail; /* head and tail for queue */ |
46 | int if_used; /* open count for interface */ | 46 | int if_used; /* open count for interface */ |
47 | int volatile del_lock; /* lock for delete operations */ | ||
48 | unsigned char logtmp[LOG_MAX_LINELEN]; | 47 | unsigned char logtmp[LOG_MAX_LINELEN]; |
49 | wait_queue_head_t rd_queue; | 48 | wait_queue_head_t rd_queue; |
50 | }; | 49 | }; |
@@ -102,7 +101,6 @@ put_log_buffer(hysdn_card *card, char *cp) | |||
102 | { | 101 | { |
103 | struct log_data *ib; | 102 | struct log_data *ib; |
104 | struct procdata *pd = card->proclog; | 103 | struct procdata *pd = card->proclog; |
105 | int i; | ||
106 | unsigned long flags; | 104 | unsigned long flags; |
107 | 105 | ||
108 | if (!pd) | 106 | if (!pd) |
@@ -126,21 +124,21 @@ put_log_buffer(hysdn_card *card, char *cp) | |||
126 | else | 124 | else |
127 | pd->log_tail->next = ib; /* follows existing messages */ | 125 | pd->log_tail->next = ib; /* follows existing messages */ |
128 | pd->log_tail = ib; /* new tail */ | 126 | pd->log_tail = ib; /* new tail */ |
129 | i = pd->del_lock++; /* get lock state */ | ||
130 | spin_unlock_irqrestore(&card->hysdn_lock, flags); | ||
131 | 127 | ||
132 | /* delete old entrys */ | 128 | /* delete old entrys */ |
133 | if (!i) | 129 | while (pd->log_head->next) { |
134 | while (pd->log_head->next) { | 130 | if ((pd->log_head->usage_cnt <= 0) && |
135 | if ((pd->log_head->usage_cnt <= 0) && | 131 | (pd->log_head->next->usage_cnt <= 0)) { |
136 | (pd->log_head->next->usage_cnt <= 0)) { | 132 | ib = pd->log_head; |
137 | ib = pd->log_head; | 133 | pd->log_head = pd->log_head->next; |
138 | pd->log_head = pd->log_head->next; | 134 | kfree(ib); |
139 | kfree(ib); | 135 | } else { |
140 | } else | 136 | break; |
141 | break; | 137 | } |
142 | } /* pd->log_head->next */ | 138 | } /* pd->log_head->next */ |
143 | pd->del_lock--; /* release lock level */ | 139 | |
140 | spin_unlock_irqrestore(&card->hysdn_lock, flags); | ||
141 | |||
144 | wake_up_interruptible(&(pd->rd_queue)); /* announce new entry */ | 142 | wake_up_interruptible(&(pd->rd_queue)); /* announce new entry */ |
145 | } /* put_log_buffer */ | 143 | } /* put_log_buffer */ |
146 | 144 | ||
diff --git a/drivers/mtd/nand/atmel/nand-controller.c b/drivers/mtd/nand/atmel/nand-controller.c index d922a88e407f..2c8baa0c2c4e 100644 --- a/drivers/mtd/nand/atmel/nand-controller.c +++ b/drivers/mtd/nand/atmel/nand-controller.c | |||
@@ -1201,7 +1201,7 @@ static int atmel_smc_nand_prepare_smcconf(struct atmel_nand *nand, | |||
1201 | * tRC < 30ns implies EDO mode. This controller does not support this | 1201 | * tRC < 30ns implies EDO mode. This controller does not support this |
1202 | * mode. | 1202 | * mode. |
1203 | */ | 1203 | */ |
1204 | if (conf->timings.sdr.tRC_min < 30) | 1204 | if (conf->timings.sdr.tRC_min < 30000) |
1205 | return -ENOTSUPP; | 1205 | return -ENOTSUPP; |
1206 | 1206 | ||
1207 | atmel_smc_cs_conf_init(smcconf); | 1207 | atmel_smc_cs_conf_init(smcconf); |
diff --git a/drivers/mtd/nand/atmel/pmecc.c b/drivers/mtd/nand/atmel/pmecc.c index 55a8ee5306ea..8c210a5776bc 100644 --- a/drivers/mtd/nand/atmel/pmecc.c +++ b/drivers/mtd/nand/atmel/pmecc.c | |||
@@ -945,6 +945,7 @@ struct atmel_pmecc *devm_atmel_pmecc_get(struct device *userdev) | |||
945 | */ | 945 | */ |
946 | struct platform_device *pdev = to_platform_device(userdev); | 946 | struct platform_device *pdev = to_platform_device(userdev); |
947 | const struct atmel_pmecc_caps *caps; | 947 | const struct atmel_pmecc_caps *caps; |
948 | const struct of_device_id *match; | ||
948 | 949 | ||
949 | /* No PMECC engine available. */ | 950 | /* No PMECC engine available. */ |
950 | if (!of_property_read_bool(userdev->of_node, | 951 | if (!of_property_read_bool(userdev->of_node, |
@@ -953,21 +954,11 @@ struct atmel_pmecc *devm_atmel_pmecc_get(struct device *userdev) | |||
953 | 954 | ||
954 | caps = &at91sam9g45_caps; | 955 | caps = &at91sam9g45_caps; |
955 | 956 | ||
956 | /* | 957 | /* Find the caps associated to the NAND dev node. */ |
957 | * Try to find the NFC subnode and extract the associated caps | 958 | match = of_match_node(atmel_pmecc_legacy_match, |
958 | * from there. | 959 | userdev->of_node); |
959 | */ | 960 | if (match && match->data) |
960 | np = of_find_compatible_node(userdev->of_node, NULL, | 961 | caps = match->data; |
961 | "atmel,sama5d3-nfc"); | ||
962 | if (np) { | ||
963 | const struct of_device_id *match; | ||
964 | |||
965 | match = of_match_node(atmel_pmecc_legacy_match, np); | ||
966 | if (match && match->data) | ||
967 | caps = match->data; | ||
968 | |||
969 | of_node_put(np); | ||
970 | } | ||
971 | 962 | ||
972 | pmecc = atmel_pmecc_create(pdev, caps, 1, 2); | 963 | pmecc = atmel_pmecc_create(pdev, caps, 1, 2); |
973 | } | 964 | } |
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c index 5fa5ddc94834..c6c18b82f8f4 100644 --- a/drivers/mtd/nand/nand_base.c +++ b/drivers/mtd/nand/nand_base.c | |||
@@ -65,8 +65,14 @@ static int nand_ooblayout_ecc_sp(struct mtd_info *mtd, int section, | |||
65 | 65 | ||
66 | if (!section) { | 66 | if (!section) { |
67 | oobregion->offset = 0; | 67 | oobregion->offset = 0; |
68 | oobregion->length = 4; | 68 | if (mtd->oobsize == 16) |
69 | oobregion->length = 4; | ||
70 | else | ||
71 | oobregion->length = 3; | ||
69 | } else { | 72 | } else { |
73 | if (mtd->oobsize == 8) | ||
74 | return -ERANGE; | ||
75 | |||
70 | oobregion->offset = 6; | 76 | oobregion->offset = 6; |
71 | oobregion->length = ecc->total - 4; | 77 | oobregion->length = ecc->total - 4; |
72 | } | 78 | } |
@@ -1125,7 +1131,9 @@ static int nand_setup_data_interface(struct nand_chip *chip, int chipnr) | |||
1125 | * Ensure the timing mode has been changed on the chip side | 1131 | * Ensure the timing mode has been changed on the chip side |
1126 | * before changing timings on the controller side. | 1132 | * before changing timings on the controller side. |
1127 | */ | 1133 | */ |
1128 | if (chip->onfi_version) { | 1134 | if (chip->onfi_version && |
1135 | (le16_to_cpu(chip->onfi_params.opt_cmd) & | ||
1136 | ONFI_OPT_CMD_SET_GET_FEATURES)) { | ||
1129 | u8 tmode_param[ONFI_SUBFEATURE_PARAM_LEN] = { | 1137 | u8 tmode_param[ONFI_SUBFEATURE_PARAM_LEN] = { |
1130 | chip->onfi_timing_mode_default, | 1138 | chip->onfi_timing_mode_default, |
1131 | }; | 1139 | }; |
@@ -2741,7 +2749,6 @@ static int nand_write_page_syndrome(struct mtd_info *mtd, | |||
2741 | * @buf: the data to write | 2749 | * @buf: the data to write |
2742 | * @oob_required: must write chip->oob_poi to OOB | 2750 | * @oob_required: must write chip->oob_poi to OOB |
2743 | * @page: page number to write | 2751 | * @page: page number to write |
2744 | * @cached: cached programming | ||
2745 | * @raw: use _raw version of write_page | 2752 | * @raw: use _raw version of write_page |
2746 | */ | 2753 | */ |
2747 | static int nand_write_page(struct mtd_info *mtd, struct nand_chip *chip, | 2754 | static int nand_write_page(struct mtd_info *mtd, struct nand_chip *chip, |
diff --git a/drivers/mtd/nand/nand_timings.c b/drivers/mtd/nand/nand_timings.c index f06312df3669..7e36d7d13c26 100644 --- a/drivers/mtd/nand/nand_timings.c +++ b/drivers/mtd/nand/nand_timings.c | |||
@@ -311,9 +311,9 @@ int onfi_init_data_interface(struct nand_chip *chip, | |||
311 | struct nand_sdr_timings *timings = &iface->timings.sdr; | 311 | struct nand_sdr_timings *timings = &iface->timings.sdr; |
312 | 312 | ||
313 | /* microseconds -> picoseconds */ | 313 | /* microseconds -> picoseconds */ |
314 | timings->tPROG_max = 1000000UL * le16_to_cpu(params->t_prog); | 314 | timings->tPROG_max = 1000000ULL * le16_to_cpu(params->t_prog); |
315 | timings->tBERS_max = 1000000UL * le16_to_cpu(params->t_bers); | 315 | timings->tBERS_max = 1000000ULL * le16_to_cpu(params->t_bers); |
316 | timings->tR_max = 1000000UL * le16_to_cpu(params->t_r); | 316 | timings->tR_max = 1000000ULL * le16_to_cpu(params->t_r); |
317 | 317 | ||
318 | /* nanoseconds -> picoseconds */ | 318 | /* nanoseconds -> picoseconds */ |
319 | timings->tCCS_min = 1000UL * le16_to_cpu(params->t_ccs); | 319 | timings->tCCS_min = 1000UL * le16_to_cpu(params->t_ccs); |
diff --git a/drivers/mtd/nand/sunxi_nand.c b/drivers/mtd/nand/sunxi_nand.c index d0b6f8f9f297..6abd142b1324 100644 --- a/drivers/mtd/nand/sunxi_nand.c +++ b/drivers/mtd/nand/sunxi_nand.c | |||
@@ -1728,6 +1728,10 @@ static int sunxi_nfc_setup_data_interface(struct mtd_info *mtd, int csline, | |||
1728 | */ | 1728 | */ |
1729 | chip->clk_rate = NSEC_PER_SEC / min_clk_period; | 1729 | chip->clk_rate = NSEC_PER_SEC / min_clk_period; |
1730 | real_clk_rate = clk_round_rate(nfc->mod_clk, chip->clk_rate); | 1730 | real_clk_rate = clk_round_rate(nfc->mod_clk, chip->clk_rate); |
1731 | if (real_clk_rate <= 0) { | ||
1732 | dev_err(nfc->dev, "Unable to round clk %lu\n", chip->clk_rate); | ||
1733 | return -EINVAL; | ||
1734 | } | ||
1731 | 1735 | ||
1732 | /* | 1736 | /* |
1733 | * ONFI specification 3.1, paragraph 4.15.2 dictates that EDO data | 1737 | * ONFI specification 3.1, paragraph 4.15.2 dictates that EDO data |
diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c index 12700710f26d..8faa796a115f 100644 --- a/drivers/net/dsa/mt7530.c +++ b/drivers/net/dsa/mt7530.c | |||
@@ -625,6 +625,44 @@ static void mt7530_adjust_link(struct dsa_switch *ds, int port, | |||
625 | * all finished. | 625 | * all finished. |
626 | */ | 626 | */ |
627 | mt7623_pad_clk_setup(ds); | 627 | mt7623_pad_clk_setup(ds); |
628 | } else { | ||
629 | u16 lcl_adv = 0, rmt_adv = 0; | ||
630 | u8 flowctrl; | ||
631 | u32 mcr = PMCR_USERP_LINK | PMCR_FORCE_MODE; | ||
632 | |||
633 | switch (phydev->speed) { | ||
634 | case SPEED_1000: | ||
635 | mcr |= PMCR_FORCE_SPEED_1000; | ||
636 | break; | ||
637 | case SPEED_100: | ||
638 | mcr |= PMCR_FORCE_SPEED_100; | ||
639 | break; | ||
640 | }; | ||
641 | |||
642 | if (phydev->link) | ||
643 | mcr |= PMCR_FORCE_LNK; | ||
644 | |||
645 | if (phydev->duplex) { | ||
646 | mcr |= PMCR_FORCE_FDX; | ||
647 | |||
648 | if (phydev->pause) | ||
649 | rmt_adv = LPA_PAUSE_CAP; | ||
650 | if (phydev->asym_pause) | ||
651 | rmt_adv |= LPA_PAUSE_ASYM; | ||
652 | |||
653 | if (phydev->advertising & ADVERTISED_Pause) | ||
654 | lcl_adv |= ADVERTISE_PAUSE_CAP; | ||
655 | if (phydev->advertising & ADVERTISED_Asym_Pause) | ||
656 | lcl_adv |= ADVERTISE_PAUSE_ASYM; | ||
657 | |||
658 | flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv); | ||
659 | |||
660 | if (flowctrl & FLOW_CTRL_TX) | ||
661 | mcr |= PMCR_TX_FC_EN; | ||
662 | if (flowctrl & FLOW_CTRL_RX) | ||
663 | mcr |= PMCR_RX_FC_EN; | ||
664 | } | ||
665 | mt7530_write(priv, MT7530_PMCR_P(port), mcr); | ||
628 | } | 666 | } |
629 | } | 667 | } |
630 | 668 | ||
diff --git a/drivers/net/dsa/mt7530.h b/drivers/net/dsa/mt7530.h index b83d76b99802..74db9822eb40 100644 --- a/drivers/net/dsa/mt7530.h +++ b/drivers/net/dsa/mt7530.h | |||
@@ -151,6 +151,7 @@ enum mt7530_stp_state { | |||
151 | #define PMCR_TX_FC_EN BIT(5) | 151 | #define PMCR_TX_FC_EN BIT(5) |
152 | #define PMCR_RX_FC_EN BIT(4) | 152 | #define PMCR_RX_FC_EN BIT(4) |
153 | #define PMCR_FORCE_SPEED_1000 BIT(3) | 153 | #define PMCR_FORCE_SPEED_1000 BIT(3) |
154 | #define PMCR_FORCE_SPEED_100 BIT(2) | ||
154 | #define PMCR_FORCE_FDX BIT(1) | 155 | #define PMCR_FORCE_FDX BIT(1) |
155 | #define PMCR_FORCE_LNK BIT(0) | 156 | #define PMCR_FORCE_LNK BIT(0) |
156 | #define PMCR_COMMON_LINK (PMCR_IFG_XMIT(1) | PMCR_MAC_MODE | \ | 157 | #define PMCR_COMMON_LINK (PMCR_IFG_XMIT(1) | PMCR_MAC_MODE | \ |
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c index 86058a9f3417..1d307f2def2d 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c | |||
@@ -1785,9 +1785,9 @@ static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata) | |||
1785 | 1785 | ||
1786 | xgene_enet_gpiod_get(pdata); | 1786 | xgene_enet_gpiod_get(pdata); |
1787 | 1787 | ||
1788 | if (pdata->phy_mode != PHY_INTERFACE_MODE_SGMII) { | 1788 | pdata->clk = devm_clk_get(&pdev->dev, NULL); |
1789 | pdata->clk = devm_clk_get(&pdev->dev, NULL); | 1789 | if (IS_ERR(pdata->clk)) { |
1790 | if (IS_ERR(pdata->clk)) { | 1790 | if (pdata->phy_mode != PHY_INTERFACE_MODE_SGMII) { |
1791 | /* Abort if the clock is defined but couldn't be | 1791 | /* Abort if the clock is defined but couldn't be |
1792 | * retrived. Always abort if the clock is missing on | 1792 | * retrived. Always abort if the clock is missing on |
1793 | * DT system as the driver can't cope with this case. | 1793 | * DT system as the driver can't cope with this case. |
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c index f411936b744c..a1125d10c825 100644 --- a/drivers/net/ethernet/broadcom/b44.c +++ b/drivers/net/ethernet/broadcom/b44.c | |||
@@ -2368,6 +2368,7 @@ static int b44_init_one(struct ssb_device *sdev, | |||
2368 | bp->msg_enable = netif_msg_init(b44_debug, B44_DEF_MSG_ENABLE); | 2368 | bp->msg_enable = netif_msg_init(b44_debug, B44_DEF_MSG_ENABLE); |
2369 | 2369 | ||
2370 | spin_lock_init(&bp->lock); | 2370 | spin_lock_init(&bp->lock); |
2371 | u64_stats_init(&bp->hw_stats.syncp); | ||
2371 | 2372 | ||
2372 | bp->rx_pending = B44_DEF_RX_RING_PENDING; | 2373 | bp->rx_pending = B44_DEF_RX_RING_PENDING; |
2373 | bp->tx_pending = B44_DEF_TX_RING_PENDING; | 2374 | bp->tx_pending = B44_DEF_TX_RING_PENDING; |
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index 99576ba4187f..32c116652755 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c | |||
@@ -111,6 +111,7 @@ static void send_request_map(struct ibmvnic_adapter *, dma_addr_t, __be32, u8); | |||
111 | static void send_request_unmap(struct ibmvnic_adapter *, u8); | 111 | static void send_request_unmap(struct ibmvnic_adapter *, u8); |
112 | static void send_login(struct ibmvnic_adapter *adapter); | 112 | static void send_login(struct ibmvnic_adapter *adapter); |
113 | static void send_cap_queries(struct ibmvnic_adapter *adapter); | 113 | static void send_cap_queries(struct ibmvnic_adapter *adapter); |
114 | static int init_sub_crqs(struct ibmvnic_adapter *); | ||
114 | static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter); | 115 | static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter); |
115 | static int ibmvnic_init(struct ibmvnic_adapter *); | 116 | static int ibmvnic_init(struct ibmvnic_adapter *); |
116 | static void release_crq_queue(struct ibmvnic_adapter *); | 117 | static void release_crq_queue(struct ibmvnic_adapter *); |
@@ -676,6 +677,7 @@ static int ibmvnic_login(struct net_device *netdev) | |||
676 | struct ibmvnic_adapter *adapter = netdev_priv(netdev); | 677 | struct ibmvnic_adapter *adapter = netdev_priv(netdev); |
677 | unsigned long timeout = msecs_to_jiffies(30000); | 678 | unsigned long timeout = msecs_to_jiffies(30000); |
678 | struct device *dev = &adapter->vdev->dev; | 679 | struct device *dev = &adapter->vdev->dev; |
680 | int rc; | ||
679 | 681 | ||
680 | do { | 682 | do { |
681 | if (adapter->renegotiate) { | 683 | if (adapter->renegotiate) { |
@@ -689,6 +691,18 @@ static int ibmvnic_login(struct net_device *netdev) | |||
689 | dev_err(dev, "Capabilities query timeout\n"); | 691 | dev_err(dev, "Capabilities query timeout\n"); |
690 | return -1; | 692 | return -1; |
691 | } | 693 | } |
694 | rc = init_sub_crqs(adapter); | ||
695 | if (rc) { | ||
696 | dev_err(dev, | ||
697 | "Initialization of SCRQ's failed\n"); | ||
698 | return -1; | ||
699 | } | ||
700 | rc = init_sub_crq_irqs(adapter); | ||
701 | if (rc) { | ||
702 | dev_err(dev, | ||
703 | "Initialization of SCRQ's irqs failed\n"); | ||
704 | return -1; | ||
705 | } | ||
692 | } | 706 | } |
693 | 707 | ||
694 | reinit_completion(&adapter->init_done); | 708 | reinit_completion(&adapter->init_done); |
@@ -3106,7 +3120,6 @@ static void handle_request_cap_rsp(union ibmvnic_crq *crq, | |||
3106 | *req_value, | 3120 | *req_value, |
3107 | (long int)be64_to_cpu(crq->request_capability_rsp. | 3121 | (long int)be64_to_cpu(crq->request_capability_rsp. |
3108 | number), name); | 3122 | number), name); |
3109 | release_sub_crqs(adapter); | ||
3110 | *req_value = be64_to_cpu(crq->request_capability_rsp.number); | 3123 | *req_value = be64_to_cpu(crq->request_capability_rsp.number); |
3111 | ibmvnic_send_req_caps(adapter, 1); | 3124 | ibmvnic_send_req_caps(adapter, 1); |
3112 | return; | 3125 | return; |
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index d464fceb300f..8a969d8f0790 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c | |||
@@ -1113,6 +1113,8 @@ int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring) | |||
1113 | if (!tx_ring->tx_bi) | 1113 | if (!tx_ring->tx_bi) |
1114 | goto err; | 1114 | goto err; |
1115 | 1115 | ||
1116 | u64_stats_init(&tx_ring->syncp); | ||
1117 | |||
1116 | /* round up to nearest 4K */ | 1118 | /* round up to nearest 4K */ |
1117 | tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc); | 1119 | tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc); |
1118 | /* add u32 for head writeback, align after this takes care of | 1120 | /* add u32 for head writeback, align after this takes care of |
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index 084c53582793..032f8ac06357 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c | |||
@@ -2988,6 +2988,8 @@ int ixgbevf_setup_tx_resources(struct ixgbevf_ring *tx_ring) | |||
2988 | if (!tx_ring->tx_buffer_info) | 2988 | if (!tx_ring->tx_buffer_info) |
2989 | goto err; | 2989 | goto err; |
2990 | 2990 | ||
2991 | u64_stats_init(&tx_ring->syncp); | ||
2992 | |||
2991 | /* round up to nearest 4K */ | 2993 | /* round up to nearest 4K */ |
2992 | tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc); | 2994 | tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc); |
2993 | tx_ring->size = ALIGN(tx_ring->size, 4096); | 2995 | tx_ring->size = ALIGN(tx_ring->size, 4096); |
@@ -3046,6 +3048,8 @@ int ixgbevf_setup_rx_resources(struct ixgbevf_ring *rx_ring) | |||
3046 | if (!rx_ring->rx_buffer_info) | 3048 | if (!rx_ring->rx_buffer_info) |
3047 | goto err; | 3049 | goto err; |
3048 | 3050 | ||
3051 | u64_stats_init(&rx_ring->syncp); | ||
3052 | |||
3049 | /* Round up to nearest 4K */ | 3053 | /* Round up to nearest 4K */ |
3050 | rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc); | 3054 | rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc); |
3051 | rx_ring->size = ALIGN(rx_ring->size, 4096); | 3055 | rx_ring->size = ALIGN(rx_ring->size, 4096); |
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c index c751a1d434ad..3d4e4a5d00d1 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c | |||
@@ -223,6 +223,7 @@ static void mlx4_en_get_wol(struct net_device *netdev, | |||
223 | struct ethtool_wolinfo *wol) | 223 | struct ethtool_wolinfo *wol) |
224 | { | 224 | { |
225 | struct mlx4_en_priv *priv = netdev_priv(netdev); | 225 | struct mlx4_en_priv *priv = netdev_priv(netdev); |
226 | struct mlx4_caps *caps = &priv->mdev->dev->caps; | ||
226 | int err = 0; | 227 | int err = 0; |
227 | u64 config = 0; | 228 | u64 config = 0; |
228 | u64 mask; | 229 | u64 mask; |
@@ -235,24 +236,24 @@ static void mlx4_en_get_wol(struct net_device *netdev, | |||
235 | mask = (priv->port == 1) ? MLX4_DEV_CAP_FLAG_WOL_PORT1 : | 236 | mask = (priv->port == 1) ? MLX4_DEV_CAP_FLAG_WOL_PORT1 : |
236 | MLX4_DEV_CAP_FLAG_WOL_PORT2; | 237 | MLX4_DEV_CAP_FLAG_WOL_PORT2; |
237 | 238 | ||
238 | if (!(priv->mdev->dev->caps.flags & mask)) { | 239 | if (!(caps->flags & mask)) { |
239 | wol->supported = 0; | 240 | wol->supported = 0; |
240 | wol->wolopts = 0; | 241 | wol->wolopts = 0; |
241 | return; | 242 | return; |
242 | } | 243 | } |
243 | 244 | ||
245 | if (caps->wol_port[priv->port]) | ||
246 | wol->supported = WAKE_MAGIC; | ||
247 | else | ||
248 | wol->supported = 0; | ||
249 | |||
244 | err = mlx4_wol_read(priv->mdev->dev, &config, priv->port); | 250 | err = mlx4_wol_read(priv->mdev->dev, &config, priv->port); |
245 | if (err) { | 251 | if (err) { |
246 | en_err(priv, "Failed to get WoL information\n"); | 252 | en_err(priv, "Failed to get WoL information\n"); |
247 | return; | 253 | return; |
248 | } | 254 | } |
249 | 255 | ||
250 | if (config & MLX4_EN_WOL_MAGIC) | 256 | if ((config & MLX4_EN_WOL_ENABLED) && (config & MLX4_EN_WOL_MAGIC)) |
251 | wol->supported = WAKE_MAGIC; | ||
252 | else | ||
253 | wol->supported = 0; | ||
254 | |||
255 | if (config & MLX4_EN_WOL_ENABLED) | ||
256 | wol->wolopts = WAKE_MAGIC; | 257 | wol->wolopts = WAKE_MAGIC; |
257 | else | 258 | else |
258 | wol->wolopts = 0; | 259 | wol->wolopts = 0; |
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c index 436f7689a032..bf1638044a7a 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c | |||
@@ -574,16 +574,21 @@ static inline __wsum get_fixed_vlan_csum(__wsum hw_checksum, | |||
574 | * header, the HW adds it. To address that, we are subtracting the pseudo | 574 | * header, the HW adds it. To address that, we are subtracting the pseudo |
575 | * header checksum from the checksum value provided by the HW. | 575 | * header checksum from the checksum value provided by the HW. |
576 | */ | 576 | */ |
577 | static void get_fixed_ipv4_csum(__wsum hw_checksum, struct sk_buff *skb, | 577 | static int get_fixed_ipv4_csum(__wsum hw_checksum, struct sk_buff *skb, |
578 | struct iphdr *iph) | 578 | struct iphdr *iph) |
579 | { | 579 | { |
580 | __u16 length_for_csum = 0; | 580 | __u16 length_for_csum = 0; |
581 | __wsum csum_pseudo_header = 0; | 581 | __wsum csum_pseudo_header = 0; |
582 | __u8 ipproto = iph->protocol; | ||
583 | |||
584 | if (unlikely(ipproto == IPPROTO_SCTP)) | ||
585 | return -1; | ||
582 | 586 | ||
583 | length_for_csum = (be16_to_cpu(iph->tot_len) - (iph->ihl << 2)); | 587 | length_for_csum = (be16_to_cpu(iph->tot_len) - (iph->ihl << 2)); |
584 | csum_pseudo_header = csum_tcpudp_nofold(iph->saddr, iph->daddr, | 588 | csum_pseudo_header = csum_tcpudp_nofold(iph->saddr, iph->daddr, |
585 | length_for_csum, iph->protocol, 0); | 589 | length_for_csum, ipproto, 0); |
586 | skb->csum = csum_sub(hw_checksum, csum_pseudo_header); | 590 | skb->csum = csum_sub(hw_checksum, csum_pseudo_header); |
591 | return 0; | ||
587 | } | 592 | } |
588 | 593 | ||
589 | #if IS_ENABLED(CONFIG_IPV6) | 594 | #if IS_ENABLED(CONFIG_IPV6) |
@@ -594,17 +599,20 @@ static void get_fixed_ipv4_csum(__wsum hw_checksum, struct sk_buff *skb, | |||
594 | static int get_fixed_ipv6_csum(__wsum hw_checksum, struct sk_buff *skb, | 599 | static int get_fixed_ipv6_csum(__wsum hw_checksum, struct sk_buff *skb, |
595 | struct ipv6hdr *ipv6h) | 600 | struct ipv6hdr *ipv6h) |
596 | { | 601 | { |
602 | __u8 nexthdr = ipv6h->nexthdr; | ||
597 | __wsum csum_pseudo_hdr = 0; | 603 | __wsum csum_pseudo_hdr = 0; |
598 | 604 | ||
599 | if (unlikely(ipv6h->nexthdr == IPPROTO_FRAGMENT || | 605 | if (unlikely(nexthdr == IPPROTO_FRAGMENT || |
600 | ipv6h->nexthdr == IPPROTO_HOPOPTS)) | 606 | nexthdr == IPPROTO_HOPOPTS || |
607 | nexthdr == IPPROTO_SCTP)) | ||
601 | return -1; | 608 | return -1; |
602 | hw_checksum = csum_add(hw_checksum, (__force __wsum)htons(ipv6h->nexthdr)); | 609 | hw_checksum = csum_add(hw_checksum, (__force __wsum)htons(nexthdr)); |
603 | 610 | ||
604 | csum_pseudo_hdr = csum_partial(&ipv6h->saddr, | 611 | csum_pseudo_hdr = csum_partial(&ipv6h->saddr, |
605 | sizeof(ipv6h->saddr) + sizeof(ipv6h->daddr), 0); | 612 | sizeof(ipv6h->saddr) + sizeof(ipv6h->daddr), 0); |
606 | csum_pseudo_hdr = csum_add(csum_pseudo_hdr, (__force __wsum)ipv6h->payload_len); | 613 | csum_pseudo_hdr = csum_add(csum_pseudo_hdr, (__force __wsum)ipv6h->payload_len); |
607 | csum_pseudo_hdr = csum_add(csum_pseudo_hdr, (__force __wsum)ntohs(ipv6h->nexthdr)); | 614 | csum_pseudo_hdr = csum_add(csum_pseudo_hdr, |
615 | (__force __wsum)htons(nexthdr)); | ||
608 | 616 | ||
609 | skb->csum = csum_sub(hw_checksum, csum_pseudo_hdr); | 617 | skb->csum = csum_sub(hw_checksum, csum_pseudo_hdr); |
610 | skb->csum = csum_add(skb->csum, csum_partial(ipv6h, sizeof(struct ipv6hdr), 0)); | 618 | skb->csum = csum_add(skb->csum, csum_partial(ipv6h, sizeof(struct ipv6hdr), 0)); |
@@ -627,11 +635,10 @@ static int check_csum(struct mlx4_cqe *cqe, struct sk_buff *skb, void *va, | |||
627 | } | 635 | } |
628 | 636 | ||
629 | if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV4)) | 637 | if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV4)) |
630 | get_fixed_ipv4_csum(hw_checksum, skb, hdr); | 638 | return get_fixed_ipv4_csum(hw_checksum, skb, hdr); |
631 | #if IS_ENABLED(CONFIG_IPV6) | 639 | #if IS_ENABLED(CONFIG_IPV6) |
632 | else if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV6)) | 640 | if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV6)) |
633 | if (unlikely(get_fixed_ipv6_csum(hw_checksum, skb, hdr))) | 641 | return get_fixed_ipv6_csum(hw_checksum, skb, hdr); |
634 | return -1; | ||
635 | #endif | 642 | #endif |
636 | return 0; | 643 | return 0; |
637 | } | 644 | } |
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c index 37e84a59e751..041c0ed65929 100644 --- a/drivers/net/ethernet/mellanox/mlx4/fw.c +++ b/drivers/net/ethernet/mellanox/mlx4/fw.c | |||
@@ -159,8 +159,9 @@ static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags) | |||
159 | [32] = "Loopback source checks support", | 159 | [32] = "Loopback source checks support", |
160 | [33] = "RoCEv2 support", | 160 | [33] = "RoCEv2 support", |
161 | [34] = "DMFS Sniffer support (UC & MC)", | 161 | [34] = "DMFS Sniffer support (UC & MC)", |
162 | [35] = "QinQ VST mode support", | 162 | [35] = "Diag counters per port", |
163 | [36] = "sl to vl mapping table change event support" | 163 | [36] = "QinQ VST mode support", |
164 | [37] = "sl to vl mapping table change event support", | ||
164 | }; | 165 | }; |
165 | int i; | 166 | int i; |
166 | 167 | ||
@@ -764,6 +765,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) | |||
764 | #define QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET 0x3e | 765 | #define QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET 0x3e |
765 | #define QUERY_DEV_CAP_MAX_PKEY_OFFSET 0x3f | 766 | #define QUERY_DEV_CAP_MAX_PKEY_OFFSET 0x3f |
766 | #define QUERY_DEV_CAP_EXT_FLAGS_OFFSET 0x40 | 767 | #define QUERY_DEV_CAP_EXT_FLAGS_OFFSET 0x40 |
768 | #define QUERY_DEV_CAP_WOL_OFFSET 0x43 | ||
767 | #define QUERY_DEV_CAP_FLAGS_OFFSET 0x44 | 769 | #define QUERY_DEV_CAP_FLAGS_OFFSET 0x44 |
768 | #define QUERY_DEV_CAP_RSVD_UAR_OFFSET 0x48 | 770 | #define QUERY_DEV_CAP_RSVD_UAR_OFFSET 0x48 |
769 | #define QUERY_DEV_CAP_UAR_SZ_OFFSET 0x49 | 771 | #define QUERY_DEV_CAP_UAR_SZ_OFFSET 0x49 |
@@ -920,6 +922,9 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) | |||
920 | MLX4_GET(ext_flags, outbox, QUERY_DEV_CAP_EXT_FLAGS_OFFSET); | 922 | MLX4_GET(ext_flags, outbox, QUERY_DEV_CAP_EXT_FLAGS_OFFSET); |
921 | MLX4_GET(flags, outbox, QUERY_DEV_CAP_FLAGS_OFFSET); | 923 | MLX4_GET(flags, outbox, QUERY_DEV_CAP_FLAGS_OFFSET); |
922 | dev_cap->flags = flags | (u64)ext_flags << 32; | 924 | dev_cap->flags = flags | (u64)ext_flags << 32; |
925 | MLX4_GET(field, outbox, QUERY_DEV_CAP_WOL_OFFSET); | ||
926 | dev_cap->wol_port[1] = !!(field & 0x20); | ||
927 | dev_cap->wol_port[2] = !!(field & 0x40); | ||
923 | MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_UAR_OFFSET); | 928 | MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_UAR_OFFSET); |
924 | dev_cap->reserved_uars = field >> 4; | 929 | dev_cap->reserved_uars = field >> 4; |
925 | MLX4_GET(field, outbox, QUERY_DEV_CAP_UAR_SZ_OFFSET); | 930 | MLX4_GET(field, outbox, QUERY_DEV_CAP_UAR_SZ_OFFSET); |
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.h b/drivers/net/ethernet/mellanox/mlx4/fw.h index 5343a0599253..b52ba01aa486 100644 --- a/drivers/net/ethernet/mellanox/mlx4/fw.h +++ b/drivers/net/ethernet/mellanox/mlx4/fw.h | |||
@@ -129,6 +129,7 @@ struct mlx4_dev_cap { | |||
129 | u32 dmfs_high_rate_qpn_range; | 129 | u32 dmfs_high_rate_qpn_range; |
130 | struct mlx4_rate_limit_caps rl_caps; | 130 | struct mlx4_rate_limit_caps rl_caps; |
131 | struct mlx4_port_cap port_cap[MLX4_MAX_PORTS + 1]; | 131 | struct mlx4_port_cap port_cap[MLX4_MAX_PORTS + 1]; |
132 | bool wol_port[MLX4_MAX_PORTS + 1]; | ||
132 | }; | 133 | }; |
133 | 134 | ||
134 | struct mlx4_func_cap { | 135 | struct mlx4_func_cap { |
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index a27c9c13a36e..09b9bc17bce9 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c | |||
@@ -424,6 +424,8 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) | |||
424 | dev->caps.stat_rate_support = dev_cap->stat_rate_support; | 424 | dev->caps.stat_rate_support = dev_cap->stat_rate_support; |
425 | dev->caps.max_gso_sz = dev_cap->max_gso_sz; | 425 | dev->caps.max_gso_sz = dev_cap->max_gso_sz; |
426 | dev->caps.max_rss_tbl_sz = dev_cap->max_rss_tbl_sz; | 426 | dev->caps.max_rss_tbl_sz = dev_cap->max_rss_tbl_sz; |
427 | dev->caps.wol_port[1] = dev_cap->wol_port[1]; | ||
428 | dev->caps.wol_port[2] = dev_cap->wol_port[2]; | ||
427 | 429 | ||
428 | /* Save uar page shift */ | 430 | /* Save uar page shift */ |
429 | if (!mlx4_is_slave(dev)) { | 431 | if (!mlx4_is_slave(dev)) { |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c index 656b2d3f1bee..5eb1606765c5 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c | |||
@@ -626,8 +626,8 @@ static int mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port *mlxsw_sp_port, | |||
626 | 626 | ||
627 | bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge, | 627 | bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge, |
628 | orig_dev); | 628 | orig_dev); |
629 | if (WARN_ON(!bridge_port)) | 629 | if (!bridge_port) |
630 | return -EINVAL; | 630 | return 0; |
631 | 631 | ||
632 | err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port, bridge_port, | 632 | err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port, bridge_port, |
633 | MLXSW_SP_FLOOD_TYPE_UC, | 633 | MLXSW_SP_FLOOD_TYPE_UC, |
@@ -711,8 +711,8 @@ static int mlxsw_sp_port_attr_mc_router_set(struct mlxsw_sp_port *mlxsw_sp_port, | |||
711 | 711 | ||
712 | bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge, | 712 | bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge, |
713 | orig_dev); | 713 | orig_dev); |
714 | if (WARN_ON(!bridge_port)) | 714 | if (!bridge_port) |
715 | return -EINVAL; | 715 | return 0; |
716 | 716 | ||
717 | if (!bridge_port->bridge_device->multicast_enabled) | 717 | if (!bridge_port->bridge_device->multicast_enabled) |
718 | return 0; | 718 | return 0; |
@@ -1283,15 +1283,15 @@ static int mlxsw_sp_port_mdb_add(struct mlxsw_sp_port *mlxsw_sp_port, | |||
1283 | return 0; | 1283 | return 0; |
1284 | 1284 | ||
1285 | bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev); | 1285 | bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev); |
1286 | if (WARN_ON(!bridge_port)) | 1286 | if (!bridge_port) |
1287 | return -EINVAL; | 1287 | return 0; |
1288 | 1288 | ||
1289 | bridge_device = bridge_port->bridge_device; | 1289 | bridge_device = bridge_port->bridge_device; |
1290 | mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port, | 1290 | mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port, |
1291 | bridge_device, | 1291 | bridge_device, |
1292 | mdb->vid); | 1292 | mdb->vid); |
1293 | if (WARN_ON(!mlxsw_sp_port_vlan)) | 1293 | if (!mlxsw_sp_port_vlan) |
1294 | return -EINVAL; | 1294 | return 0; |
1295 | 1295 | ||
1296 | fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid); | 1296 | fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid); |
1297 | 1297 | ||
@@ -1407,15 +1407,15 @@ static int mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port, | |||
1407 | int err = 0; | 1407 | int err = 0; |
1408 | 1408 | ||
1409 | bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev); | 1409 | bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev); |
1410 | if (WARN_ON(!bridge_port)) | 1410 | if (!bridge_port) |
1411 | return -EINVAL; | 1411 | return 0; |
1412 | 1412 | ||
1413 | bridge_device = bridge_port->bridge_device; | 1413 | bridge_device = bridge_port->bridge_device; |
1414 | mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port, | 1414 | mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port, |
1415 | bridge_device, | 1415 | bridge_device, |
1416 | mdb->vid); | 1416 | mdb->vid); |
1417 | if (WARN_ON(!mlxsw_sp_port_vlan)) | 1417 | if (!mlxsw_sp_port_vlan) |
1418 | return -EINVAL; | 1418 | return 0; |
1419 | 1419 | ||
1420 | fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid); | 1420 | fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid); |
1421 | 1421 | ||
@@ -1974,6 +1974,17 @@ static void mlxsw_sp_fdb_fini(struct mlxsw_sp *mlxsw_sp) | |||
1974 | 1974 | ||
1975 | } | 1975 | } |
1976 | 1976 | ||
1977 | static void mlxsw_sp_mids_fini(struct mlxsw_sp *mlxsw_sp) | ||
1978 | { | ||
1979 | struct mlxsw_sp_mid *mid, *tmp; | ||
1980 | |||
1981 | list_for_each_entry_safe(mid, tmp, &mlxsw_sp->bridge->mids_list, list) { | ||
1982 | list_del(&mid->list); | ||
1983 | clear_bit(mid->mid, mlxsw_sp->bridge->mids_bitmap); | ||
1984 | kfree(mid); | ||
1985 | } | ||
1986 | } | ||
1987 | |||
1977 | int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp) | 1988 | int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp) |
1978 | { | 1989 | { |
1979 | struct mlxsw_sp_bridge *bridge; | 1990 | struct mlxsw_sp_bridge *bridge; |
@@ -1996,7 +2007,7 @@ int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp) | |||
1996 | void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp) | 2007 | void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp) |
1997 | { | 2008 | { |
1998 | mlxsw_sp_fdb_fini(mlxsw_sp); | 2009 | mlxsw_sp_fdb_fini(mlxsw_sp); |
1999 | WARN_ON(!list_empty(&mlxsw_sp->bridge->mids_list)); | 2010 | mlxsw_sp_mids_fini(mlxsw_sp); |
2000 | WARN_ON(!list_empty(&mlxsw_sp->bridge->bridges_list)); | 2011 | WARN_ON(!list_empty(&mlxsw_sp->bridge->bridges_list)); |
2001 | kfree(mlxsw_sp->bridge); | 2012 | kfree(mlxsw_sp->bridge); |
2002 | } | 2013 | } |
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index ea471604450e..4a990033c4d5 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c | |||
@@ -513,6 +513,7 @@ nfp_net_tx_ring_init(struct nfp_net_tx_ring *tx_ring, | |||
513 | tx_ring->idx = idx; | 513 | tx_ring->idx = idx; |
514 | tx_ring->r_vec = r_vec; | 514 | tx_ring->r_vec = r_vec; |
515 | tx_ring->is_xdp = is_xdp; | 515 | tx_ring->is_xdp = is_xdp; |
516 | u64_stats_init(&tx_ring->r_vec->tx_sync); | ||
516 | 517 | ||
517 | tx_ring->qcidx = tx_ring->idx * nn->stride_tx; | 518 | tx_ring->qcidx = tx_ring->idx * nn->stride_tx; |
518 | tx_ring->qcp_q = nn->tx_bar + NFP_QCP_QUEUE_OFF(tx_ring->qcidx); | 519 | tx_ring->qcp_q = nn->tx_bar + NFP_QCP_QUEUE_OFF(tx_ring->qcidx); |
@@ -532,6 +533,7 @@ nfp_net_rx_ring_init(struct nfp_net_rx_ring *rx_ring, | |||
532 | 533 | ||
533 | rx_ring->idx = idx; | 534 | rx_ring->idx = idx; |
534 | rx_ring->r_vec = r_vec; | 535 | rx_ring->r_vec = r_vec; |
536 | u64_stats_init(&rx_ring->r_vec->rx_sync); | ||
535 | 537 | ||
536 | rx_ring->fl_qcidx = rx_ring->idx * nn->stride_rx; | 538 | rx_ring->fl_qcidx = rx_ring->idx * nn->stride_rx; |
537 | rx_ring->qcp_fl = nn->rx_bar + NFP_QCP_QUEUE_OFF(rx_ring->fl_qcidx); | 539 | rx_ring->qcp_fl = nn->rx_bar + NFP_QCP_QUEUE_OFF(rx_ring->fl_qcidx); |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c index c1ecce6b9141..376485d99357 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c | |||
@@ -253,7 +253,7 @@ int qed_mcp_cmd_init(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) | |||
253 | size = MFW_DRV_MSG_MAX_DWORDS(p_info->mfw_mb_length) * sizeof(u32); | 253 | size = MFW_DRV_MSG_MAX_DWORDS(p_info->mfw_mb_length) * sizeof(u32); |
254 | p_info->mfw_mb_cur = kzalloc(size, GFP_KERNEL); | 254 | p_info->mfw_mb_cur = kzalloc(size, GFP_KERNEL); |
255 | p_info->mfw_mb_shadow = kzalloc(size, GFP_KERNEL); | 255 | p_info->mfw_mb_shadow = kzalloc(size, GFP_KERNEL); |
256 | if (!p_info->mfw_mb_shadow || !p_info->mfw_mb_addr) | 256 | if (!p_info->mfw_mb_cur || !p_info->mfw_mb_shadow) |
257 | goto err; | 257 | goto err; |
258 | 258 | ||
259 | return 0; | 259 | return 0; |
diff --git a/drivers/net/ethernet/ti/cpts.c b/drivers/net/ethernet/ti/cpts.c index 32279d21c836..c2121d214f08 100644 --- a/drivers/net/ethernet/ti/cpts.c +++ b/drivers/net/ethernet/ti/cpts.c | |||
@@ -31,9 +31,18 @@ | |||
31 | 31 | ||
32 | #include "cpts.h" | 32 | #include "cpts.h" |
33 | 33 | ||
34 | #define CPTS_SKB_TX_WORK_TIMEOUT 1 /* jiffies */ | ||
35 | |||
36 | struct cpts_skb_cb_data { | ||
37 | unsigned long tmo; | ||
38 | }; | ||
39 | |||
34 | #define cpts_read32(c, r) readl_relaxed(&c->reg->r) | 40 | #define cpts_read32(c, r) readl_relaxed(&c->reg->r) |
35 | #define cpts_write32(c, v, r) writel_relaxed(v, &c->reg->r) | 41 | #define cpts_write32(c, v, r) writel_relaxed(v, &c->reg->r) |
36 | 42 | ||
43 | static int cpts_match(struct sk_buff *skb, unsigned int ptp_class, | ||
44 | u16 ts_seqid, u8 ts_msgtype); | ||
45 | |||
37 | static int event_expired(struct cpts_event *event) | 46 | static int event_expired(struct cpts_event *event) |
38 | { | 47 | { |
39 | return time_after(jiffies, event->tmo); | 48 | return time_after(jiffies, event->tmo); |
@@ -77,6 +86,47 @@ static int cpts_purge_events(struct cpts *cpts) | |||
77 | return removed ? 0 : -1; | 86 | return removed ? 0 : -1; |
78 | } | 87 | } |
79 | 88 | ||
89 | static bool cpts_match_tx_ts(struct cpts *cpts, struct cpts_event *event) | ||
90 | { | ||
91 | struct sk_buff *skb, *tmp; | ||
92 | u16 seqid; | ||
93 | u8 mtype; | ||
94 | bool found = false; | ||
95 | |||
96 | mtype = (event->high >> MESSAGE_TYPE_SHIFT) & MESSAGE_TYPE_MASK; | ||
97 | seqid = (event->high >> SEQUENCE_ID_SHIFT) & SEQUENCE_ID_MASK; | ||
98 | |||
99 | /* no need to grab txq.lock as access is always done under cpts->lock */ | ||
100 | skb_queue_walk_safe(&cpts->txq, skb, tmp) { | ||
101 | struct skb_shared_hwtstamps ssh; | ||
102 | unsigned int class = ptp_classify_raw(skb); | ||
103 | struct cpts_skb_cb_data *skb_cb = | ||
104 | (struct cpts_skb_cb_data *)skb->cb; | ||
105 | |||
106 | if (cpts_match(skb, class, seqid, mtype)) { | ||
107 | u64 ns = timecounter_cyc2time(&cpts->tc, event->low); | ||
108 | |||
109 | memset(&ssh, 0, sizeof(ssh)); | ||
110 | ssh.hwtstamp = ns_to_ktime(ns); | ||
111 | skb_tstamp_tx(skb, &ssh); | ||
112 | found = true; | ||
113 | __skb_unlink(skb, &cpts->txq); | ||
114 | dev_consume_skb_any(skb); | ||
115 | dev_dbg(cpts->dev, "match tx timestamp mtype %u seqid %04x\n", | ||
116 | mtype, seqid); | ||
117 | } else if (time_after(jiffies, skb_cb->tmo)) { | ||
118 | /* timeout any expired skbs over 1s */ | ||
119 | dev_dbg(cpts->dev, | ||
120 | "expiring tx timestamp mtype %u seqid %04x\n", | ||
121 | mtype, seqid); | ||
122 | __skb_unlink(skb, &cpts->txq); | ||
123 | dev_consume_skb_any(skb); | ||
124 | } | ||
125 | } | ||
126 | |||
127 | return found; | ||
128 | } | ||
129 | |||
80 | /* | 130 | /* |
81 | * Returns zero if matching event type was found. | 131 | * Returns zero if matching event type was found. |
82 | */ | 132 | */ |
@@ -101,9 +151,15 @@ static int cpts_fifo_read(struct cpts *cpts, int match) | |||
101 | event->low = lo; | 151 | event->low = lo; |
102 | type = event_type(event); | 152 | type = event_type(event); |
103 | switch (type) { | 153 | switch (type) { |
154 | case CPTS_EV_TX: | ||
155 | if (cpts_match_tx_ts(cpts, event)) { | ||
156 | /* if the new event matches an existing skb, | ||
157 | * then don't queue it | ||
158 | */ | ||
159 | break; | ||
160 | } | ||
104 | case CPTS_EV_PUSH: | 161 | case CPTS_EV_PUSH: |
105 | case CPTS_EV_RX: | 162 | case CPTS_EV_RX: |
106 | case CPTS_EV_TX: | ||
107 | list_del_init(&event->list); | 163 | list_del_init(&event->list); |
108 | list_add_tail(&event->list, &cpts->events); | 164 | list_add_tail(&event->list, &cpts->events); |
109 | break; | 165 | break; |
@@ -224,6 +280,24 @@ static int cpts_ptp_enable(struct ptp_clock_info *ptp, | |||
224 | return -EOPNOTSUPP; | 280 | return -EOPNOTSUPP; |
225 | } | 281 | } |
226 | 282 | ||
283 | static long cpts_overflow_check(struct ptp_clock_info *ptp) | ||
284 | { | ||
285 | struct cpts *cpts = container_of(ptp, struct cpts, info); | ||
286 | unsigned long delay = cpts->ov_check_period; | ||
287 | struct timespec64 ts; | ||
288 | unsigned long flags; | ||
289 | |||
290 | spin_lock_irqsave(&cpts->lock, flags); | ||
291 | ts = ns_to_timespec64(timecounter_read(&cpts->tc)); | ||
292 | |||
293 | if (!skb_queue_empty(&cpts->txq)) | ||
294 | delay = CPTS_SKB_TX_WORK_TIMEOUT; | ||
295 | spin_unlock_irqrestore(&cpts->lock, flags); | ||
296 | |||
297 | pr_debug("cpts overflow check at %lld.%09lu\n", ts.tv_sec, ts.tv_nsec); | ||
298 | return (long)delay; | ||
299 | } | ||
300 | |||
227 | static struct ptp_clock_info cpts_info = { | 301 | static struct ptp_clock_info cpts_info = { |
228 | .owner = THIS_MODULE, | 302 | .owner = THIS_MODULE, |
229 | .name = "CTPS timer", | 303 | .name = "CTPS timer", |
@@ -236,18 +310,9 @@ static struct ptp_clock_info cpts_info = { | |||
236 | .gettime64 = cpts_ptp_gettime, | 310 | .gettime64 = cpts_ptp_gettime, |
237 | .settime64 = cpts_ptp_settime, | 311 | .settime64 = cpts_ptp_settime, |
238 | .enable = cpts_ptp_enable, | 312 | .enable = cpts_ptp_enable, |
313 | .do_aux_work = cpts_overflow_check, | ||
239 | }; | 314 | }; |
240 | 315 | ||
241 | static void cpts_overflow_check(struct work_struct *work) | ||
242 | { | ||
243 | struct timespec64 ts; | ||
244 | struct cpts *cpts = container_of(work, struct cpts, overflow_work.work); | ||
245 | |||
246 | cpts_ptp_gettime(&cpts->info, &ts); | ||
247 | pr_debug("cpts overflow check at %lld.%09lu\n", ts.tv_sec, ts.tv_nsec); | ||
248 | schedule_delayed_work(&cpts->overflow_work, cpts->ov_check_period); | ||
249 | } | ||
250 | |||
251 | static int cpts_match(struct sk_buff *skb, unsigned int ptp_class, | 316 | static int cpts_match(struct sk_buff *skb, unsigned int ptp_class, |
252 | u16 ts_seqid, u8 ts_msgtype) | 317 | u16 ts_seqid, u8 ts_msgtype) |
253 | { | 318 | { |
@@ -299,7 +364,7 @@ static u64 cpts_find_ts(struct cpts *cpts, struct sk_buff *skb, int ev_type) | |||
299 | return 0; | 364 | return 0; |
300 | 365 | ||
301 | spin_lock_irqsave(&cpts->lock, flags); | 366 | spin_lock_irqsave(&cpts->lock, flags); |
302 | cpts_fifo_read(cpts, CPTS_EV_PUSH); | 367 | cpts_fifo_read(cpts, -1); |
303 | list_for_each_safe(this, next, &cpts->events) { | 368 | list_for_each_safe(this, next, &cpts->events) { |
304 | event = list_entry(this, struct cpts_event, list); | 369 | event = list_entry(this, struct cpts_event, list); |
305 | if (event_expired(event)) { | 370 | if (event_expired(event)) { |
@@ -317,6 +382,19 @@ static u64 cpts_find_ts(struct cpts *cpts, struct sk_buff *skb, int ev_type) | |||
317 | break; | 382 | break; |
318 | } | 383 | } |
319 | } | 384 | } |
385 | |||
386 | if (ev_type == CPTS_EV_TX && !ns) { | ||
387 | struct cpts_skb_cb_data *skb_cb = | ||
388 | (struct cpts_skb_cb_data *)skb->cb; | ||
389 | /* Not found, add frame to queue for processing later. | ||
390 | * The periodic FIFO check will handle this. | ||
391 | */ | ||
392 | skb_get(skb); | ||
393 | /* get the timestamp for timeouts */ | ||
394 | skb_cb->tmo = jiffies + msecs_to_jiffies(100); | ||
395 | __skb_queue_tail(&cpts->txq, skb); | ||
396 | ptp_schedule_worker(cpts->clock, 0); | ||
397 | } | ||
320 | spin_unlock_irqrestore(&cpts->lock, flags); | 398 | spin_unlock_irqrestore(&cpts->lock, flags); |
321 | 399 | ||
322 | return ns; | 400 | return ns; |
@@ -358,6 +436,7 @@ int cpts_register(struct cpts *cpts) | |||
358 | { | 436 | { |
359 | int err, i; | 437 | int err, i; |
360 | 438 | ||
439 | skb_queue_head_init(&cpts->txq); | ||
361 | INIT_LIST_HEAD(&cpts->events); | 440 | INIT_LIST_HEAD(&cpts->events); |
362 | INIT_LIST_HEAD(&cpts->pool); | 441 | INIT_LIST_HEAD(&cpts->pool); |
363 | for (i = 0; i < CPTS_MAX_EVENTS; i++) | 442 | for (i = 0; i < CPTS_MAX_EVENTS; i++) |
@@ -378,7 +457,7 @@ int cpts_register(struct cpts *cpts) | |||
378 | } | 457 | } |
379 | cpts->phc_index = ptp_clock_index(cpts->clock); | 458 | cpts->phc_index = ptp_clock_index(cpts->clock); |
380 | 459 | ||
381 | schedule_delayed_work(&cpts->overflow_work, cpts->ov_check_period); | 460 | ptp_schedule_worker(cpts->clock, cpts->ov_check_period); |
382 | return 0; | 461 | return 0; |
383 | 462 | ||
384 | err_ptp: | 463 | err_ptp: |
@@ -392,14 +471,15 @@ void cpts_unregister(struct cpts *cpts) | |||
392 | if (WARN_ON(!cpts->clock)) | 471 | if (WARN_ON(!cpts->clock)) |
393 | return; | 472 | return; |
394 | 473 | ||
395 | cancel_delayed_work_sync(&cpts->overflow_work); | ||
396 | |||
397 | ptp_clock_unregister(cpts->clock); | 474 | ptp_clock_unregister(cpts->clock); |
398 | cpts->clock = NULL; | 475 | cpts->clock = NULL; |
399 | 476 | ||
400 | cpts_write32(cpts, 0, int_enable); | 477 | cpts_write32(cpts, 0, int_enable); |
401 | cpts_write32(cpts, 0, control); | 478 | cpts_write32(cpts, 0, control); |
402 | 479 | ||
480 | /* Drop all packet */ | ||
481 | skb_queue_purge(&cpts->txq); | ||
482 | |||
403 | clk_disable(cpts->refclk); | 483 | clk_disable(cpts->refclk); |
404 | } | 484 | } |
405 | EXPORT_SYMBOL_GPL(cpts_unregister); | 485 | EXPORT_SYMBOL_GPL(cpts_unregister); |
@@ -476,7 +556,6 @@ struct cpts *cpts_create(struct device *dev, void __iomem *regs, | |||
476 | cpts->dev = dev; | 556 | cpts->dev = dev; |
477 | cpts->reg = (struct cpsw_cpts __iomem *)regs; | 557 | cpts->reg = (struct cpsw_cpts __iomem *)regs; |
478 | spin_lock_init(&cpts->lock); | 558 | spin_lock_init(&cpts->lock); |
479 | INIT_DELAYED_WORK(&cpts->overflow_work, cpts_overflow_check); | ||
480 | 559 | ||
481 | ret = cpts_of_parse(cpts, node); | 560 | ret = cpts_of_parse(cpts, node); |
482 | if (ret) | 561 | if (ret) |
diff --git a/drivers/net/ethernet/ti/cpts.h b/drivers/net/ethernet/ti/cpts.h index 01ea82ba9cdc..73d73faf0f38 100644 --- a/drivers/net/ethernet/ti/cpts.h +++ b/drivers/net/ethernet/ti/cpts.h | |||
@@ -119,13 +119,13 @@ struct cpts { | |||
119 | u32 cc_mult; /* for the nominal frequency */ | 119 | u32 cc_mult; /* for the nominal frequency */ |
120 | struct cyclecounter cc; | 120 | struct cyclecounter cc; |
121 | struct timecounter tc; | 121 | struct timecounter tc; |
122 | struct delayed_work overflow_work; | ||
123 | int phc_index; | 122 | int phc_index; |
124 | struct clk *refclk; | 123 | struct clk *refclk; |
125 | struct list_head events; | 124 | struct list_head events; |
126 | struct list_head pool; | 125 | struct list_head pool; |
127 | struct cpts_event pool_data[CPTS_MAX_EVENTS]; | 126 | struct cpts_event pool_data[CPTS_MAX_EVENTS]; |
128 | unsigned long ov_check_period; | 127 | unsigned long ov_check_period; |
128 | struct sk_buff_head txq; | ||
129 | }; | 129 | }; |
130 | 130 | ||
131 | void cpts_rx_timestamp(struct cpts *cpts, struct sk_buff *skb); | 131 | void cpts_rx_timestamp(struct cpts *cpts, struct sk_buff *skb); |
diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c index 1542e837fdfa..f38e32a7ec9c 100644 --- a/drivers/net/gtp.c +++ b/drivers/net/gtp.c | |||
@@ -364,7 +364,7 @@ static int gtp_dev_init(struct net_device *dev) | |||
364 | 364 | ||
365 | gtp->dev = dev; | 365 | gtp->dev = dev; |
366 | 366 | ||
367 | dev->tstats = alloc_percpu(struct pcpu_sw_netstats); | 367 | dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); |
368 | if (!dev->tstats) | 368 | if (!dev->tstats) |
369 | return -ENOMEM; | 369 | return -ENOMEM; |
370 | 370 | ||
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h index d1ea99a12cf2..98b25f6900c8 100644 --- a/drivers/net/hyperv/hyperv_net.h +++ b/drivers/net/hyperv/hyperv_net.h | |||
@@ -776,7 +776,8 @@ struct netvsc_device { | |||
776 | u32 max_chn; | 776 | u32 max_chn; |
777 | u32 num_chn; | 777 | u32 num_chn; |
778 | 778 | ||
779 | refcount_t sc_offered; | 779 | atomic_t open_chn; |
780 | wait_queue_head_t subchan_open; | ||
780 | 781 | ||
781 | struct rndis_device *extension; | 782 | struct rndis_device *extension; |
782 | 783 | ||
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index 208f03aa83de..bffaf93d3cb0 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c | |||
@@ -76,6 +76,7 @@ static struct netvsc_device *alloc_net_device(void) | |||
76 | net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT; | 76 | net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT; |
77 | net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT; | 77 | net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT; |
78 | init_completion(&net_device->channel_init_wait); | 78 | init_completion(&net_device->channel_init_wait); |
79 | init_waitqueue_head(&net_device->subchan_open); | ||
79 | 80 | ||
80 | return net_device; | 81 | return net_device; |
81 | } | 82 | } |
@@ -1268,6 +1269,8 @@ struct netvsc_device *netvsc_device_add(struct hv_device *device, | |||
1268 | 1269 | ||
1269 | nvchan->channel = device->channel; | 1270 | nvchan->channel = device->channel; |
1270 | nvchan->net_device = net_device; | 1271 | nvchan->net_device = net_device; |
1272 | u64_stats_init(&nvchan->tx_stats.syncp); | ||
1273 | u64_stats_init(&nvchan->rx_stats.syncp); | ||
1271 | } | 1274 | } |
1272 | 1275 | ||
1273 | /* Enable NAPI handler before init callbacks */ | 1276 | /* Enable NAPI handler before init callbacks */ |
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c index 44165fe328a4..36e9ee82ec6f 100644 --- a/drivers/net/hyperv/rndis_filter.c +++ b/drivers/net/hyperv/rndis_filter.c | |||
@@ -1050,8 +1050,8 @@ static void netvsc_sc_open(struct vmbus_channel *new_sc) | |||
1050 | else | 1050 | else |
1051 | netif_napi_del(&nvchan->napi); | 1051 | netif_napi_del(&nvchan->napi); |
1052 | 1052 | ||
1053 | if (refcount_dec_and_test(&nvscdev->sc_offered)) | 1053 | atomic_inc(&nvscdev->open_chn); |
1054 | complete(&nvscdev->channel_init_wait); | 1054 | wake_up(&nvscdev->subchan_open); |
1055 | } | 1055 | } |
1056 | 1056 | ||
1057 | struct netvsc_device *rndis_filter_device_add(struct hv_device *dev, | 1057 | struct netvsc_device *rndis_filter_device_add(struct hv_device *dev, |
@@ -1091,8 +1091,6 @@ struct netvsc_device *rndis_filter_device_add(struct hv_device *dev, | |||
1091 | net_device->max_chn = 1; | 1091 | net_device->max_chn = 1; |
1092 | net_device->num_chn = 1; | 1092 | net_device->num_chn = 1; |
1093 | 1093 | ||
1094 | refcount_set(&net_device->sc_offered, 0); | ||
1095 | |||
1096 | net_device->extension = rndis_device; | 1094 | net_device->extension = rndis_device; |
1097 | rndis_device->ndev = net; | 1095 | rndis_device->ndev = net; |
1098 | 1096 | ||
@@ -1216,6 +1214,7 @@ struct netvsc_device *rndis_filter_device_add(struct hv_device *dev, | |||
1216 | rndis_device->ind_table[i] = ethtool_rxfh_indir_default(i, | 1214 | rndis_device->ind_table[i] = ethtool_rxfh_indir_default(i, |
1217 | net_device->num_chn); | 1215 | net_device->num_chn); |
1218 | 1216 | ||
1217 | atomic_set(&net_device->open_chn, 1); | ||
1219 | num_rss_qs = net_device->num_chn - 1; | 1218 | num_rss_qs = net_device->num_chn - 1; |
1220 | if (num_rss_qs == 0) | 1219 | if (num_rss_qs == 0) |
1221 | return net_device; | 1220 | return net_device; |
@@ -1229,7 +1228,6 @@ struct netvsc_device *rndis_filter_device_add(struct hv_device *dev, | |||
1229 | } | 1228 | } |
1230 | } | 1229 | } |
1231 | 1230 | ||
1232 | refcount_set(&net_device->sc_offered, num_rss_qs); | ||
1233 | vmbus_set_sc_create_callback(dev->channel, netvsc_sc_open); | 1231 | vmbus_set_sc_create_callback(dev->channel, netvsc_sc_open); |
1234 | 1232 | ||
1235 | init_packet = &net_device->channel_init_pkt; | 1233 | init_packet = &net_device->channel_init_pkt; |
@@ -1246,15 +1244,19 @@ struct netvsc_device *rndis_filter_device_add(struct hv_device *dev, | |||
1246 | if (ret) | 1244 | if (ret) |
1247 | goto out; | 1245 | goto out; |
1248 | 1246 | ||
1247 | wait_for_completion(&net_device->channel_init_wait); | ||
1249 | if (init_packet->msg.v5_msg.subchn_comp.status != NVSP_STAT_SUCCESS) { | 1248 | if (init_packet->msg.v5_msg.subchn_comp.status != NVSP_STAT_SUCCESS) { |
1250 | ret = -ENODEV; | 1249 | ret = -ENODEV; |
1251 | goto out; | 1250 | goto out; |
1252 | } | 1251 | } |
1253 | wait_for_completion(&net_device->channel_init_wait); | ||
1254 | 1252 | ||
1255 | net_device->num_chn = 1 + | 1253 | net_device->num_chn = 1 + |
1256 | init_packet->msg.v5_msg.subchn_comp.num_subchannels; | 1254 | init_packet->msg.v5_msg.subchn_comp.num_subchannels; |
1257 | 1255 | ||
1256 | /* wait for all sub channels to open */ | ||
1257 | wait_event(net_device->subchan_open, | ||
1258 | atomic_read(&net_device->open_chn) == net_device->num_chn); | ||
1259 | |||
1258 | /* ignore failues from setting rss parameters, still have channels */ | 1260 | /* ignore failues from setting rss parameters, still have channels */ |
1259 | rndis_filter_set_rss_param(rndis_device, netvsc_hash_key, | 1261 | rndis_filter_set_rss_param(rndis_device, netvsc_hash_key, |
1260 | net_device->num_chn); | 1262 | net_device->num_chn); |
diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c index fdde20735416..58a9f990b553 100644 --- a/drivers/net/ipvlan/ipvlan_main.c +++ b/drivers/net/ipvlan/ipvlan_main.c | |||
@@ -192,7 +192,7 @@ static int ipvlan_init(struct net_device *dev) | |||
192 | 192 | ||
193 | netdev_lockdep_set_classes(dev); | 193 | netdev_lockdep_set_classes(dev); |
194 | 194 | ||
195 | ipvlan->pcpu_stats = alloc_percpu(struct ipvl_pcpu_stats); | 195 | ipvlan->pcpu_stats = netdev_alloc_pcpu_stats(struct ipvl_pcpu_stats); |
196 | if (!ipvlan->pcpu_stats) | 196 | if (!ipvlan->pcpu_stats) |
197 | return -ENOMEM; | 197 | return -ENOMEM; |
198 | 198 | ||
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c index bd4303944e44..a404552555d4 100644 --- a/drivers/net/ppp/ppp_generic.c +++ b/drivers/net/ppp/ppp_generic.c | |||
@@ -1915,21 +1915,23 @@ static void __ppp_channel_push(struct channel *pch) | |||
1915 | spin_unlock(&pch->downl); | 1915 | spin_unlock(&pch->downl); |
1916 | /* see if there is anything from the attached unit to be sent */ | 1916 | /* see if there is anything from the attached unit to be sent */ |
1917 | if (skb_queue_empty(&pch->file.xq)) { | 1917 | if (skb_queue_empty(&pch->file.xq)) { |
1918 | read_lock(&pch->upl); | ||
1919 | ppp = pch->ppp; | 1918 | ppp = pch->ppp; |
1920 | if (ppp) | 1919 | if (ppp) |
1921 | ppp_xmit_process(ppp); | 1920 | __ppp_xmit_process(ppp); |
1922 | read_unlock(&pch->upl); | ||
1923 | } | 1921 | } |
1924 | } | 1922 | } |
1925 | 1923 | ||
1926 | static void ppp_channel_push(struct channel *pch) | 1924 | static void ppp_channel_push(struct channel *pch) |
1927 | { | 1925 | { |
1928 | local_bh_disable(); | 1926 | read_lock_bh(&pch->upl); |
1929 | 1927 | if (pch->ppp) { | |
1930 | __ppp_channel_push(pch); | 1928 | (*this_cpu_ptr(pch->ppp->xmit_recursion))++; |
1931 | 1929 | __ppp_channel_push(pch); | |
1932 | local_bh_enable(); | 1930 | (*this_cpu_ptr(pch->ppp->xmit_recursion))--; |
1931 | } else { | ||
1932 | __ppp_channel_push(pch); | ||
1933 | } | ||
1934 | read_unlock_bh(&pch->upl); | ||
1933 | } | 1935 | } |
1934 | 1936 | ||
1935 | /* | 1937 | /* |
diff --git a/drivers/net/usb/asix.h b/drivers/net/usb/asix.h index d1092421aaa7..9a4171b90947 100644 --- a/drivers/net/usb/asix.h +++ b/drivers/net/usb/asix.h | |||
@@ -209,6 +209,7 @@ void asix_write_cmd_async(struct usbnet *dev, u8 cmd, u16 value, | |||
209 | int asix_rx_fixup_internal(struct usbnet *dev, struct sk_buff *skb, | 209 | int asix_rx_fixup_internal(struct usbnet *dev, struct sk_buff *skb, |
210 | struct asix_rx_fixup_info *rx); | 210 | struct asix_rx_fixup_info *rx); |
211 | int asix_rx_fixup_common(struct usbnet *dev, struct sk_buff *skb); | 211 | int asix_rx_fixup_common(struct usbnet *dev, struct sk_buff *skb); |
212 | void asix_rx_fixup_common_free(struct asix_common_private *dp); | ||
212 | 213 | ||
213 | struct sk_buff *asix_tx_fixup(struct usbnet *dev, struct sk_buff *skb, | 214 | struct sk_buff *asix_tx_fixup(struct usbnet *dev, struct sk_buff *skb, |
214 | gfp_t flags); | 215 | gfp_t flags); |
diff --git a/drivers/net/usb/asix_common.c b/drivers/net/usb/asix_common.c index 7847436c441e..522d2900cd1d 100644 --- a/drivers/net/usb/asix_common.c +++ b/drivers/net/usb/asix_common.c | |||
@@ -75,6 +75,27 @@ void asix_write_cmd_async(struct usbnet *dev, u8 cmd, u16 value, u16 index, | |||
75 | value, index, data, size); | 75 | value, index, data, size); |
76 | } | 76 | } |
77 | 77 | ||
78 | static void reset_asix_rx_fixup_info(struct asix_rx_fixup_info *rx) | ||
79 | { | ||
80 | /* Reset the variables that have a lifetime outside of | ||
81 | * asix_rx_fixup_internal() so that future processing starts from a | ||
82 | * known set of initial conditions. | ||
83 | */ | ||
84 | |||
85 | if (rx->ax_skb) { | ||
86 | /* Discard any incomplete Ethernet frame in the netdev buffer */ | ||
87 | kfree_skb(rx->ax_skb); | ||
88 | rx->ax_skb = NULL; | ||
89 | } | ||
90 | |||
91 | /* Assume the Data header 32-bit word is at the start of the current | ||
92 | * or next URB socket buffer so reset all the state variables. | ||
93 | */ | ||
94 | rx->remaining = 0; | ||
95 | rx->split_head = false; | ||
96 | rx->header = 0; | ||
97 | } | ||
98 | |||
78 | int asix_rx_fixup_internal(struct usbnet *dev, struct sk_buff *skb, | 99 | int asix_rx_fixup_internal(struct usbnet *dev, struct sk_buff *skb, |
79 | struct asix_rx_fixup_info *rx) | 100 | struct asix_rx_fixup_info *rx) |
80 | { | 101 | { |
@@ -99,15 +120,7 @@ int asix_rx_fixup_internal(struct usbnet *dev, struct sk_buff *skb, | |||
99 | if (size != ((~rx->header >> 16) & 0x7ff)) { | 120 | if (size != ((~rx->header >> 16) & 0x7ff)) { |
100 | netdev_err(dev->net, "asix_rx_fixup() Data Header synchronisation was lost, remaining %d\n", | 121 | netdev_err(dev->net, "asix_rx_fixup() Data Header synchronisation was lost, remaining %d\n", |
101 | rx->remaining); | 122 | rx->remaining); |
102 | if (rx->ax_skb) { | 123 | reset_asix_rx_fixup_info(rx); |
103 | kfree_skb(rx->ax_skb); | ||
104 | rx->ax_skb = NULL; | ||
105 | /* Discard the incomplete netdev Ethernet frame | ||
106 | * and assume the Data header is at the start of | ||
107 | * the current URB socket buffer. | ||
108 | */ | ||
109 | } | ||
110 | rx->remaining = 0; | ||
111 | } | 124 | } |
112 | } | 125 | } |
113 | 126 | ||
@@ -139,11 +152,13 @@ int asix_rx_fixup_internal(struct usbnet *dev, struct sk_buff *skb, | |||
139 | if (size != ((~rx->header >> 16) & 0x7ff)) { | 152 | if (size != ((~rx->header >> 16) & 0x7ff)) { |
140 | netdev_err(dev->net, "asix_rx_fixup() Bad Header Length 0x%x, offset %d\n", | 153 | netdev_err(dev->net, "asix_rx_fixup() Bad Header Length 0x%x, offset %d\n", |
141 | rx->header, offset); | 154 | rx->header, offset); |
155 | reset_asix_rx_fixup_info(rx); | ||
142 | return 0; | 156 | return 0; |
143 | } | 157 | } |
144 | if (size > dev->net->mtu + ETH_HLEN + VLAN_HLEN) { | 158 | if (size > dev->net->mtu + ETH_HLEN + VLAN_HLEN) { |
145 | netdev_dbg(dev->net, "asix_rx_fixup() Bad RX Length %d\n", | 159 | netdev_dbg(dev->net, "asix_rx_fixup() Bad RX Length %d\n", |
146 | size); | 160 | size); |
161 | reset_asix_rx_fixup_info(rx); | ||
147 | return 0; | 162 | return 0; |
148 | } | 163 | } |
149 | 164 | ||
@@ -168,8 +183,10 @@ int asix_rx_fixup_internal(struct usbnet *dev, struct sk_buff *skb, | |||
168 | if (rx->ax_skb) { | 183 | if (rx->ax_skb) { |
169 | skb_put_data(rx->ax_skb, skb->data + offset, | 184 | skb_put_data(rx->ax_skb, skb->data + offset, |
170 | copy_length); | 185 | copy_length); |
171 | if (!rx->remaining) | 186 | if (!rx->remaining) { |
172 | usbnet_skb_return(dev, rx->ax_skb); | 187 | usbnet_skb_return(dev, rx->ax_skb); |
188 | rx->ax_skb = NULL; | ||
189 | } | ||
173 | } | 190 | } |
174 | 191 | ||
175 | offset += (copy_length + 1) & 0xfffe; | 192 | offset += (copy_length + 1) & 0xfffe; |
@@ -178,6 +195,7 @@ int asix_rx_fixup_internal(struct usbnet *dev, struct sk_buff *skb, | |||
178 | if (skb->len != offset) { | 195 | if (skb->len != offset) { |
179 | netdev_err(dev->net, "asix_rx_fixup() Bad SKB Length %d, %d\n", | 196 | netdev_err(dev->net, "asix_rx_fixup() Bad SKB Length %d, %d\n", |
180 | skb->len, offset); | 197 | skb->len, offset); |
198 | reset_asix_rx_fixup_info(rx); | ||
181 | return 0; | 199 | return 0; |
182 | } | 200 | } |
183 | 201 | ||
@@ -192,6 +210,21 @@ int asix_rx_fixup_common(struct usbnet *dev, struct sk_buff *skb) | |||
192 | return asix_rx_fixup_internal(dev, skb, rx); | 210 | return asix_rx_fixup_internal(dev, skb, rx); |
193 | } | 211 | } |
194 | 212 | ||
213 | void asix_rx_fixup_common_free(struct asix_common_private *dp) | ||
214 | { | ||
215 | struct asix_rx_fixup_info *rx; | ||
216 | |||
217 | if (!dp) | ||
218 | return; | ||
219 | |||
220 | rx = &dp->rx_fixup_info; | ||
221 | |||
222 | if (rx->ax_skb) { | ||
223 | kfree_skb(rx->ax_skb); | ||
224 | rx->ax_skb = NULL; | ||
225 | } | ||
226 | } | ||
227 | |||
195 | struct sk_buff *asix_tx_fixup(struct usbnet *dev, struct sk_buff *skb, | 228 | struct sk_buff *asix_tx_fixup(struct usbnet *dev, struct sk_buff *skb, |
196 | gfp_t flags) | 229 | gfp_t flags) |
197 | { | 230 | { |
diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c index a3aa0a27dfe5..b2ff88e69a81 100644 --- a/drivers/net/usb/asix_devices.c +++ b/drivers/net/usb/asix_devices.c | |||
@@ -764,6 +764,7 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf) | |||
764 | 764 | ||
765 | static void ax88772_unbind(struct usbnet *dev, struct usb_interface *intf) | 765 | static void ax88772_unbind(struct usbnet *dev, struct usb_interface *intf) |
766 | { | 766 | { |
767 | asix_rx_fixup_common_free(dev->driver_priv); | ||
767 | kfree(dev->driver_priv); | 768 | kfree(dev->driver_priv); |
768 | } | 769 | } |
769 | 770 | ||
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c index 5833f7e2a127..b99a7fb09f8e 100644 --- a/drivers/net/usb/lan78xx.c +++ b/drivers/net/usb/lan78xx.c | |||
@@ -2367,9 +2367,6 @@ static int lan78xx_reset(struct lan78xx_net *dev) | |||
2367 | /* Init LTM */ | 2367 | /* Init LTM */ |
2368 | lan78xx_init_ltm(dev); | 2368 | lan78xx_init_ltm(dev); |
2369 | 2369 | ||
2370 | dev->net->hard_header_len += TX_OVERHEAD; | ||
2371 | dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len; | ||
2372 | |||
2373 | if (dev->udev->speed == USB_SPEED_SUPER) { | 2370 | if (dev->udev->speed == USB_SPEED_SUPER) { |
2374 | buf = DEFAULT_BURST_CAP_SIZE / SS_USB_PKT_SIZE; | 2371 | buf = DEFAULT_BURST_CAP_SIZE / SS_USB_PKT_SIZE; |
2375 | dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE; | 2372 | dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE; |
@@ -2855,16 +2852,19 @@ static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf) | |||
2855 | return ret; | 2852 | return ret; |
2856 | } | 2853 | } |
2857 | 2854 | ||
2855 | dev->net->hard_header_len += TX_OVERHEAD; | ||
2856 | dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len; | ||
2857 | |||
2858 | /* Init all registers */ | 2858 | /* Init all registers */ |
2859 | ret = lan78xx_reset(dev); | 2859 | ret = lan78xx_reset(dev); |
2860 | 2860 | ||
2861 | lan78xx_mdio_init(dev); | 2861 | ret = lan78xx_mdio_init(dev); |
2862 | 2862 | ||
2863 | dev->net->flags |= IFF_MULTICAST; | 2863 | dev->net->flags |= IFF_MULTICAST; |
2864 | 2864 | ||
2865 | pdata->wol = WAKE_MAGIC; | 2865 | pdata->wol = WAKE_MAGIC; |
2866 | 2866 | ||
2867 | return 0; | 2867 | return ret; |
2868 | } | 2868 | } |
2869 | 2869 | ||
2870 | static void lan78xx_unbind(struct lan78xx_net *dev, struct usb_interface *intf) | 2870 | static void lan78xx_unbind(struct lan78xx_net *dev, struct usb_interface *intf) |
@@ -3525,11 +3525,11 @@ static int lan78xx_probe(struct usb_interface *intf, | |||
3525 | udev = interface_to_usbdev(intf); | 3525 | udev = interface_to_usbdev(intf); |
3526 | udev = usb_get_dev(udev); | 3526 | udev = usb_get_dev(udev); |
3527 | 3527 | ||
3528 | ret = -ENOMEM; | ||
3529 | netdev = alloc_etherdev(sizeof(struct lan78xx_net)); | 3528 | netdev = alloc_etherdev(sizeof(struct lan78xx_net)); |
3530 | if (!netdev) { | 3529 | if (!netdev) { |
3531 | dev_err(&intf->dev, "Error: OOM\n"); | 3530 | dev_err(&intf->dev, "Error: OOM\n"); |
3532 | goto out1; | 3531 | ret = -ENOMEM; |
3532 | goto out1; | ||
3533 | } | 3533 | } |
3534 | 3534 | ||
3535 | /* netdev_printk() needs this */ | 3535 | /* netdev_printk() needs this */ |
@@ -3610,7 +3610,7 @@ static int lan78xx_probe(struct usb_interface *intf, | |||
3610 | ret = register_netdev(netdev); | 3610 | ret = register_netdev(netdev); |
3611 | if (ret != 0) { | 3611 | if (ret != 0) { |
3612 | netif_err(dev, probe, netdev, "couldn't register the device\n"); | 3612 | netif_err(dev, probe, netdev, "couldn't register the device\n"); |
3613 | goto out2; | 3613 | goto out3; |
3614 | } | 3614 | } |
3615 | 3615 | ||
3616 | usb_set_intfdata(intf, dev); | 3616 | usb_set_intfdata(intf, dev); |
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 68c23b0ee40d..8c3733608271 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c | |||
@@ -1175,6 +1175,7 @@ static const struct usb_device_id products[] = { | |||
1175 | {QMI_FIXED_INTF(0x19d2, 0x1428, 2)}, /* Telewell TW-LTE 4G v2 */ | 1175 | {QMI_FIXED_INTF(0x19d2, 0x1428, 2)}, /* Telewell TW-LTE 4G v2 */ |
1176 | {QMI_FIXED_INTF(0x19d2, 0x2002, 4)}, /* ZTE (Vodafone) K3765-Z */ | 1176 | {QMI_FIXED_INTF(0x19d2, 0x2002, 4)}, /* ZTE (Vodafone) K3765-Z */ |
1177 | {QMI_FIXED_INTF(0x2001, 0x7e19, 4)}, /* D-Link DWM-221 B1 */ | 1177 | {QMI_FIXED_INTF(0x2001, 0x7e19, 4)}, /* D-Link DWM-221 B1 */ |
1178 | {QMI_FIXED_INTF(0x2001, 0x7e35, 4)}, /* D-Link DWM-222 */ | ||
1178 | {QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)}, /* Sierra Wireless MC7700 */ | 1179 | {QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)}, /* Sierra Wireless MC7700 */ |
1179 | {QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */ | 1180 | {QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */ |
1180 | {QMI_FIXED_INTF(0x1199, 0x68a2, 8)}, /* Sierra Wireless MC7710 in QMI mode */ | 1181 | {QMI_FIXED_INTF(0x1199, 0x68a2, 8)}, /* Sierra Wireless MC7710 in QMI mode */ |
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index dbca067540d0..35e84a9e1cfb 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c | |||
@@ -623,6 +623,7 @@ static struct sk_buff **vxlan_gro_receive(struct sock *sk, | |||
623 | 623 | ||
624 | out: | 624 | out: |
625 | skb_gro_remcsum_cleanup(skb, &grc); | 625 | skb_gro_remcsum_cleanup(skb, &grc); |
626 | skb->remcsum_offload = 0; | ||
626 | NAPI_GRO_CB(skb)->flush |= flush; | 627 | NAPI_GRO_CB(skb)->flush |= flush; |
627 | 628 | ||
628 | return pp; | 629 | return pp; |
diff --git a/drivers/ptp/ptp_clock.c b/drivers/ptp/ptp_clock.c index b77435783ef3..7eacc1c4b3b1 100644 --- a/drivers/ptp/ptp_clock.c +++ b/drivers/ptp/ptp_clock.c | |||
@@ -28,6 +28,7 @@ | |||
28 | #include <linux/slab.h> | 28 | #include <linux/slab.h> |
29 | #include <linux/syscalls.h> | 29 | #include <linux/syscalls.h> |
30 | #include <linux/uaccess.h> | 30 | #include <linux/uaccess.h> |
31 | #include <uapi/linux/sched/types.h> | ||
31 | 32 | ||
32 | #include "ptp_private.h" | 33 | #include "ptp_private.h" |
33 | 34 | ||
@@ -184,6 +185,19 @@ static void delete_ptp_clock(struct posix_clock *pc) | |||
184 | kfree(ptp); | 185 | kfree(ptp); |
185 | } | 186 | } |
186 | 187 | ||
188 | static void ptp_aux_kworker(struct kthread_work *work) | ||
189 | { | ||
190 | struct ptp_clock *ptp = container_of(work, struct ptp_clock, | ||
191 | aux_work.work); | ||
192 | struct ptp_clock_info *info = ptp->info; | ||
193 | long delay; | ||
194 | |||
195 | delay = info->do_aux_work(info); | ||
196 | |||
197 | if (delay >= 0) | ||
198 | kthread_queue_delayed_work(ptp->kworker, &ptp->aux_work, delay); | ||
199 | } | ||
200 | |||
187 | /* public interface */ | 201 | /* public interface */ |
188 | 202 | ||
189 | struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info, | 203 | struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info, |
@@ -217,6 +231,20 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info, | |||
217 | mutex_init(&ptp->pincfg_mux); | 231 | mutex_init(&ptp->pincfg_mux); |
218 | init_waitqueue_head(&ptp->tsev_wq); | 232 | init_waitqueue_head(&ptp->tsev_wq); |
219 | 233 | ||
234 | if (ptp->info->do_aux_work) { | ||
235 | char *worker_name = kasprintf(GFP_KERNEL, "ptp%d", ptp->index); | ||
236 | |||
237 | kthread_init_delayed_work(&ptp->aux_work, ptp_aux_kworker); | ||
238 | ptp->kworker = kthread_create_worker(0, worker_name ? | ||
239 | worker_name : info->name); | ||
240 | kfree(worker_name); | ||
241 | if (IS_ERR(ptp->kworker)) { | ||
242 | err = PTR_ERR(ptp->kworker); | ||
243 | pr_err("failed to create ptp aux_worker %d\n", err); | ||
244 | goto kworker_err; | ||
245 | } | ||
246 | } | ||
247 | |||
220 | err = ptp_populate_pin_groups(ptp); | 248 | err = ptp_populate_pin_groups(ptp); |
221 | if (err) | 249 | if (err) |
222 | goto no_pin_groups; | 250 | goto no_pin_groups; |
@@ -259,6 +287,9 @@ no_pps: | |||
259 | no_device: | 287 | no_device: |
260 | ptp_cleanup_pin_groups(ptp); | 288 | ptp_cleanup_pin_groups(ptp); |
261 | no_pin_groups: | 289 | no_pin_groups: |
290 | if (ptp->kworker) | ||
291 | kthread_destroy_worker(ptp->kworker); | ||
292 | kworker_err: | ||
262 | mutex_destroy(&ptp->tsevq_mux); | 293 | mutex_destroy(&ptp->tsevq_mux); |
263 | mutex_destroy(&ptp->pincfg_mux); | 294 | mutex_destroy(&ptp->pincfg_mux); |
264 | ida_simple_remove(&ptp_clocks_map, index); | 295 | ida_simple_remove(&ptp_clocks_map, index); |
@@ -274,6 +305,11 @@ int ptp_clock_unregister(struct ptp_clock *ptp) | |||
274 | ptp->defunct = 1; | 305 | ptp->defunct = 1; |
275 | wake_up_interruptible(&ptp->tsev_wq); | 306 | wake_up_interruptible(&ptp->tsev_wq); |
276 | 307 | ||
308 | if (ptp->kworker) { | ||
309 | kthread_cancel_delayed_work_sync(&ptp->aux_work); | ||
310 | kthread_destroy_worker(ptp->kworker); | ||
311 | } | ||
312 | |||
277 | /* Release the clock's resources. */ | 313 | /* Release the clock's resources. */ |
278 | if (ptp->pps_source) | 314 | if (ptp->pps_source) |
279 | pps_unregister_source(ptp->pps_source); | 315 | pps_unregister_source(ptp->pps_source); |
@@ -339,6 +375,12 @@ int ptp_find_pin(struct ptp_clock *ptp, | |||
339 | } | 375 | } |
340 | EXPORT_SYMBOL(ptp_find_pin); | 376 | EXPORT_SYMBOL(ptp_find_pin); |
341 | 377 | ||
378 | int ptp_schedule_worker(struct ptp_clock *ptp, unsigned long delay) | ||
379 | { | ||
380 | return kthread_mod_delayed_work(ptp->kworker, &ptp->aux_work, delay); | ||
381 | } | ||
382 | EXPORT_SYMBOL(ptp_schedule_worker); | ||
383 | |||
342 | /* module operations */ | 384 | /* module operations */ |
343 | 385 | ||
344 | static void __exit ptp_exit(void) | 386 | static void __exit ptp_exit(void) |
diff --git a/drivers/ptp/ptp_private.h b/drivers/ptp/ptp_private.h index d95888974d0c..b86f1bfecd6f 100644 --- a/drivers/ptp/ptp_private.h +++ b/drivers/ptp/ptp_private.h | |||
@@ -22,6 +22,7 @@ | |||
22 | 22 | ||
23 | #include <linux/cdev.h> | 23 | #include <linux/cdev.h> |
24 | #include <linux/device.h> | 24 | #include <linux/device.h> |
25 | #include <linux/kthread.h> | ||
25 | #include <linux/mutex.h> | 26 | #include <linux/mutex.h> |
26 | #include <linux/posix-clock.h> | 27 | #include <linux/posix-clock.h> |
27 | #include <linux/ptp_clock.h> | 28 | #include <linux/ptp_clock.h> |
@@ -56,6 +57,8 @@ struct ptp_clock { | |||
56 | struct attribute_group pin_attr_group; | 57 | struct attribute_group pin_attr_group; |
57 | /* 1st entry is a pointer to the real group, 2nd is NULL terminator */ | 58 | /* 1st entry is a pointer to the real group, 2nd is NULL terminator */ |
58 | const struct attribute_group *pin_attr_groups[2]; | 59 | const struct attribute_group *pin_attr_groups[2]; |
60 | struct kthread_worker *kworker; | ||
61 | struct kthread_delayed_work aux_work; | ||
59 | }; | 62 | }; |
60 | 63 | ||
61 | /* | 64 | /* |
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index 8975cd321390..d42e758518ed 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c | |||
@@ -2512,7 +2512,7 @@ static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr, | |||
2512 | struct rtable *rt = (struct rtable *) dst; | 2512 | struct rtable *rt = (struct rtable *) dst; |
2513 | __be32 *pkey = &ip_hdr(skb)->daddr; | 2513 | __be32 *pkey = &ip_hdr(skb)->daddr; |
2514 | 2514 | ||
2515 | if (rt->rt_gateway) | 2515 | if (rt && rt->rt_gateway) |
2516 | pkey = &rt->rt_gateway; | 2516 | pkey = &rt->rt_gateway; |
2517 | 2517 | ||
2518 | /* IPv4 */ | 2518 | /* IPv4 */ |
@@ -2523,7 +2523,7 @@ static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr, | |||
2523 | struct rt6_info *rt = (struct rt6_info *) dst; | 2523 | struct rt6_info *rt = (struct rt6_info *) dst; |
2524 | struct in6_addr *pkey = &ipv6_hdr(skb)->daddr; | 2524 | struct in6_addr *pkey = &ipv6_hdr(skb)->daddr; |
2525 | 2525 | ||
2526 | if (!ipv6_addr_any(&rt->rt6i_gateway)) | 2526 | if (rt && !ipv6_addr_any(&rt->rt6i_gateway)) |
2527 | pkey = &rt->rt6i_gateway; | 2527 | pkey = &rt->rt6i_gateway; |
2528 | 2528 | ||
2529 | /* IPv6 */ | 2529 | /* IPv6 */ |
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c index 707ee2f5954d..4591113c49de 100644 --- a/drivers/scsi/aacraid/aachba.c +++ b/drivers/scsi/aacraid/aachba.c | |||
@@ -3198,10 +3198,11 @@ static int query_disk(struct aac_dev *dev, void __user *arg) | |||
3198 | return -EBUSY; | 3198 | return -EBUSY; |
3199 | if (copy_from_user(&qd, arg, sizeof (struct aac_query_disk))) | 3199 | if (copy_from_user(&qd, arg, sizeof (struct aac_query_disk))) |
3200 | return -EFAULT; | 3200 | return -EFAULT; |
3201 | if (qd.cnum == -1) | 3201 | if (qd.cnum == -1) { |
3202 | if (qd.id < 0 || qd.id >= dev->maximum_num_containers) | ||
3203 | return -EINVAL; | ||
3202 | qd.cnum = qd.id; | 3204 | qd.cnum = qd.id; |
3203 | else if ((qd.bus == -1) && (qd.id == -1) && (qd.lun == -1)) | 3205 | } else if ((qd.bus == -1) && (qd.id == -1) && (qd.lun == -1)) { |
3204 | { | ||
3205 | if (qd.cnum < 0 || qd.cnum >= dev->maximum_num_containers) | 3206 | if (qd.cnum < 0 || qd.cnum >= dev->maximum_num_containers) |
3206 | return -EINVAL; | 3207 | return -EINVAL; |
3207 | qd.instance = dev->scsi_host_ptr->host_no; | 3208 | qd.instance = dev->scsi_host_ptr->host_no; |
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c index 7dfe709a7138..6844ba361616 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c +++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c | |||
@@ -2624,12 +2624,11 @@ static struct fcoe_transport bnx2fc_transport = { | |||
2624 | }; | 2624 | }; |
2625 | 2625 | ||
2626 | /** | 2626 | /** |
2627 | * bnx2fc_percpu_thread_create - Create a receive thread for an | 2627 | * bnx2fc_cpu_online - Create a receive thread for an online CPU |
2628 | * online CPU | ||
2629 | * | 2628 | * |
2630 | * @cpu: cpu index for the online cpu | 2629 | * @cpu: cpu index for the online cpu |
2631 | */ | 2630 | */ |
2632 | static void bnx2fc_percpu_thread_create(unsigned int cpu) | 2631 | static int bnx2fc_cpu_online(unsigned int cpu) |
2633 | { | 2632 | { |
2634 | struct bnx2fc_percpu_s *p; | 2633 | struct bnx2fc_percpu_s *p; |
2635 | struct task_struct *thread; | 2634 | struct task_struct *thread; |
@@ -2639,15 +2638,17 @@ static void bnx2fc_percpu_thread_create(unsigned int cpu) | |||
2639 | thread = kthread_create_on_node(bnx2fc_percpu_io_thread, | 2638 | thread = kthread_create_on_node(bnx2fc_percpu_io_thread, |
2640 | (void *)p, cpu_to_node(cpu), | 2639 | (void *)p, cpu_to_node(cpu), |
2641 | "bnx2fc_thread/%d", cpu); | 2640 | "bnx2fc_thread/%d", cpu); |
2641 | if (IS_ERR(thread)) | ||
2642 | return PTR_ERR(thread); | ||
2643 | |||
2642 | /* bind thread to the cpu */ | 2644 | /* bind thread to the cpu */ |
2643 | if (likely(!IS_ERR(thread))) { | 2645 | kthread_bind(thread, cpu); |
2644 | kthread_bind(thread, cpu); | 2646 | p->iothread = thread; |
2645 | p->iothread = thread; | 2647 | wake_up_process(thread); |
2646 | wake_up_process(thread); | 2648 | return 0; |
2647 | } | ||
2648 | } | 2649 | } |
2649 | 2650 | ||
2650 | static void bnx2fc_percpu_thread_destroy(unsigned int cpu) | 2651 | static int bnx2fc_cpu_offline(unsigned int cpu) |
2651 | { | 2652 | { |
2652 | struct bnx2fc_percpu_s *p; | 2653 | struct bnx2fc_percpu_s *p; |
2653 | struct task_struct *thread; | 2654 | struct task_struct *thread; |
@@ -2661,7 +2662,6 @@ static void bnx2fc_percpu_thread_destroy(unsigned int cpu) | |||
2661 | thread = p->iothread; | 2662 | thread = p->iothread; |
2662 | p->iothread = NULL; | 2663 | p->iothread = NULL; |
2663 | 2664 | ||
2664 | |||
2665 | /* Free all work in the list */ | 2665 | /* Free all work in the list */ |
2666 | list_for_each_entry_safe(work, tmp, &p->work_list, list) { | 2666 | list_for_each_entry_safe(work, tmp, &p->work_list, list) { |
2667 | list_del_init(&work->list); | 2667 | list_del_init(&work->list); |
@@ -2673,20 +2673,6 @@ static void bnx2fc_percpu_thread_destroy(unsigned int cpu) | |||
2673 | 2673 | ||
2674 | if (thread) | 2674 | if (thread) |
2675 | kthread_stop(thread); | 2675 | kthread_stop(thread); |
2676 | } | ||
2677 | |||
2678 | |||
2679 | static int bnx2fc_cpu_online(unsigned int cpu) | ||
2680 | { | ||
2681 | printk(PFX "CPU %x online: Create Rx thread\n", cpu); | ||
2682 | bnx2fc_percpu_thread_create(cpu); | ||
2683 | return 0; | ||
2684 | } | ||
2685 | |||
2686 | static int bnx2fc_cpu_dead(unsigned int cpu) | ||
2687 | { | ||
2688 | printk(PFX "CPU %x offline: Remove Rx thread\n", cpu); | ||
2689 | bnx2fc_percpu_thread_destroy(cpu); | ||
2690 | return 0; | 2676 | return 0; |
2691 | } | 2677 | } |
2692 | 2678 | ||
@@ -2761,30 +2747,16 @@ static int __init bnx2fc_mod_init(void) | |||
2761 | spin_lock_init(&p->fp_work_lock); | 2747 | spin_lock_init(&p->fp_work_lock); |
2762 | } | 2748 | } |
2763 | 2749 | ||
2764 | get_online_cpus(); | 2750 | rc = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "scsi/bnx2fc:online", |
2765 | 2751 | bnx2fc_cpu_online, bnx2fc_cpu_offline); | |
2766 | for_each_online_cpu(cpu) | ||
2767 | bnx2fc_percpu_thread_create(cpu); | ||
2768 | |||
2769 | rc = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, | ||
2770 | "scsi/bnx2fc:online", | ||
2771 | bnx2fc_cpu_online, NULL); | ||
2772 | if (rc < 0) | 2752 | if (rc < 0) |
2773 | goto stop_threads; | 2753 | goto stop_thread; |
2774 | bnx2fc_online_state = rc; | 2754 | bnx2fc_online_state = rc; |
2775 | 2755 | ||
2776 | cpuhp_setup_state_nocalls(CPUHP_SCSI_BNX2FC_DEAD, "scsi/bnx2fc:dead", | ||
2777 | NULL, bnx2fc_cpu_dead); | ||
2778 | put_online_cpus(); | ||
2779 | |||
2780 | cnic_register_driver(CNIC_ULP_FCOE, &bnx2fc_cnic_cb); | 2756 | cnic_register_driver(CNIC_ULP_FCOE, &bnx2fc_cnic_cb); |
2781 | |||
2782 | return 0; | 2757 | return 0; |
2783 | 2758 | ||
2784 | stop_threads: | 2759 | stop_thread: |
2785 | for_each_online_cpu(cpu) | ||
2786 | bnx2fc_percpu_thread_destroy(cpu); | ||
2787 | put_online_cpus(); | ||
2788 | kthread_stop(l2_thread); | 2760 | kthread_stop(l2_thread); |
2789 | free_wq: | 2761 | free_wq: |
2790 | destroy_workqueue(bnx2fc_wq); | 2762 | destroy_workqueue(bnx2fc_wq); |
@@ -2803,7 +2775,6 @@ static void __exit bnx2fc_mod_exit(void) | |||
2803 | struct fcoe_percpu_s *bg; | 2775 | struct fcoe_percpu_s *bg; |
2804 | struct task_struct *l2_thread; | 2776 | struct task_struct *l2_thread; |
2805 | struct sk_buff *skb; | 2777 | struct sk_buff *skb; |
2806 | unsigned int cpu = 0; | ||
2807 | 2778 | ||
2808 | /* | 2779 | /* |
2809 | * NOTE: Since cnic calls register_driver routine rtnl_lock, | 2780 | * NOTE: Since cnic calls register_driver routine rtnl_lock, |
@@ -2844,16 +2815,7 @@ static void __exit bnx2fc_mod_exit(void) | |||
2844 | if (l2_thread) | 2815 | if (l2_thread) |
2845 | kthread_stop(l2_thread); | 2816 | kthread_stop(l2_thread); |
2846 | 2817 | ||
2847 | get_online_cpus(); | 2818 | cpuhp_remove_state(bnx2fc_online_state); |
2848 | /* Destroy per cpu threads */ | ||
2849 | for_each_online_cpu(cpu) { | ||
2850 | bnx2fc_percpu_thread_destroy(cpu); | ||
2851 | } | ||
2852 | |||
2853 | cpuhp_remove_state_nocalls(bnx2fc_online_state); | ||
2854 | cpuhp_remove_state_nocalls(CPUHP_SCSI_BNX2FC_DEAD); | ||
2855 | |||
2856 | put_online_cpus(); | ||
2857 | 2819 | ||
2858 | destroy_workqueue(bnx2fc_wq); | 2820 | destroy_workqueue(bnx2fc_wq); |
2859 | /* | 2821 | /* |
diff --git a/drivers/scsi/bnx2fc/bnx2fc_hwi.c b/drivers/scsi/bnx2fc/bnx2fc_hwi.c index 913c750205ce..26de61d65a4d 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_hwi.c +++ b/drivers/scsi/bnx2fc/bnx2fc_hwi.c | |||
@@ -1008,6 +1008,28 @@ static struct bnx2fc_work *bnx2fc_alloc_work(struct bnx2fc_rport *tgt, u16 wqe) | |||
1008 | return work; | 1008 | return work; |
1009 | } | 1009 | } |
1010 | 1010 | ||
1011 | /* Pending work request completion */ | ||
1012 | static void bnx2fc_pending_work(struct bnx2fc_rport *tgt, unsigned int wqe) | ||
1013 | { | ||
1014 | unsigned int cpu = wqe % num_possible_cpus(); | ||
1015 | struct bnx2fc_percpu_s *fps; | ||
1016 | struct bnx2fc_work *work; | ||
1017 | |||
1018 | fps = &per_cpu(bnx2fc_percpu, cpu); | ||
1019 | spin_lock_bh(&fps->fp_work_lock); | ||
1020 | if (fps->iothread) { | ||
1021 | work = bnx2fc_alloc_work(tgt, wqe); | ||
1022 | if (work) { | ||
1023 | list_add_tail(&work->list, &fps->work_list); | ||
1024 | wake_up_process(fps->iothread); | ||
1025 | spin_unlock_bh(&fps->fp_work_lock); | ||
1026 | return; | ||
1027 | } | ||
1028 | } | ||
1029 | spin_unlock_bh(&fps->fp_work_lock); | ||
1030 | bnx2fc_process_cq_compl(tgt, wqe); | ||
1031 | } | ||
1032 | |||
1011 | int bnx2fc_process_new_cqes(struct bnx2fc_rport *tgt) | 1033 | int bnx2fc_process_new_cqes(struct bnx2fc_rport *tgt) |
1012 | { | 1034 | { |
1013 | struct fcoe_cqe *cq; | 1035 | struct fcoe_cqe *cq; |
@@ -1042,28 +1064,7 @@ int bnx2fc_process_new_cqes(struct bnx2fc_rport *tgt) | |||
1042 | /* Unsolicited event notification */ | 1064 | /* Unsolicited event notification */ |
1043 | bnx2fc_process_unsol_compl(tgt, wqe); | 1065 | bnx2fc_process_unsol_compl(tgt, wqe); |
1044 | } else { | 1066 | } else { |
1045 | /* Pending work request completion */ | 1067 | bnx2fc_pending_work(tgt, wqe); |
1046 | struct bnx2fc_work *work = NULL; | ||
1047 | struct bnx2fc_percpu_s *fps = NULL; | ||
1048 | unsigned int cpu = wqe % num_possible_cpus(); | ||
1049 | |||
1050 | fps = &per_cpu(bnx2fc_percpu, cpu); | ||
1051 | spin_lock_bh(&fps->fp_work_lock); | ||
1052 | if (unlikely(!fps->iothread)) | ||
1053 | goto unlock; | ||
1054 | |||
1055 | work = bnx2fc_alloc_work(tgt, wqe); | ||
1056 | if (work) | ||
1057 | list_add_tail(&work->list, | ||
1058 | &fps->work_list); | ||
1059 | unlock: | ||
1060 | spin_unlock_bh(&fps->fp_work_lock); | ||
1061 | |||
1062 | /* Pending work request completion */ | ||
1063 | if (fps->iothread && work) | ||
1064 | wake_up_process(fps->iothread); | ||
1065 | else | ||
1066 | bnx2fc_process_cq_compl(tgt, wqe); | ||
1067 | num_free_sqes++; | 1068 | num_free_sqes++; |
1068 | } | 1069 | } |
1069 | cqe++; | 1070 | cqe++; |
diff --git a/drivers/scsi/bnx2i/bnx2i_init.c b/drivers/scsi/bnx2i/bnx2i_init.c index 86afc002814c..4ebcda8d9500 100644 --- a/drivers/scsi/bnx2i/bnx2i_init.c +++ b/drivers/scsi/bnx2i/bnx2i_init.c | |||
@@ -404,12 +404,11 @@ int bnx2i_get_stats(void *handle) | |||
404 | 404 | ||
405 | 405 | ||
406 | /** | 406 | /** |
407 | * bnx2i_percpu_thread_create - Create a receive thread for an | 407 | * bnx2i_cpu_online - Create a receive thread for an online CPU |
408 | * online CPU | ||
409 | * | 408 | * |
410 | * @cpu: cpu index for the online cpu | 409 | * @cpu: cpu index for the online cpu |
411 | */ | 410 | */ |
412 | static void bnx2i_percpu_thread_create(unsigned int cpu) | 411 | static int bnx2i_cpu_online(unsigned int cpu) |
413 | { | 412 | { |
414 | struct bnx2i_percpu_s *p; | 413 | struct bnx2i_percpu_s *p; |
415 | struct task_struct *thread; | 414 | struct task_struct *thread; |
@@ -419,16 +418,17 @@ static void bnx2i_percpu_thread_create(unsigned int cpu) | |||
419 | thread = kthread_create_on_node(bnx2i_percpu_io_thread, (void *)p, | 418 | thread = kthread_create_on_node(bnx2i_percpu_io_thread, (void *)p, |
420 | cpu_to_node(cpu), | 419 | cpu_to_node(cpu), |
421 | "bnx2i_thread/%d", cpu); | 420 | "bnx2i_thread/%d", cpu); |
421 | if (IS_ERR(thread)) | ||
422 | return PTR_ERR(thread); | ||
423 | |||
422 | /* bind thread to the cpu */ | 424 | /* bind thread to the cpu */ |
423 | if (likely(!IS_ERR(thread))) { | 425 | kthread_bind(thread, cpu); |
424 | kthread_bind(thread, cpu); | 426 | p->iothread = thread; |
425 | p->iothread = thread; | 427 | wake_up_process(thread); |
426 | wake_up_process(thread); | 428 | return 0; |
427 | } | ||
428 | } | 429 | } |
429 | 430 | ||
430 | 431 | static int bnx2i_cpu_offline(unsigned int cpu) | |
431 | static void bnx2i_percpu_thread_destroy(unsigned int cpu) | ||
432 | { | 432 | { |
433 | struct bnx2i_percpu_s *p; | 433 | struct bnx2i_percpu_s *p; |
434 | struct task_struct *thread; | 434 | struct task_struct *thread; |
@@ -451,19 +451,6 @@ static void bnx2i_percpu_thread_destroy(unsigned int cpu) | |||
451 | spin_unlock_bh(&p->p_work_lock); | 451 | spin_unlock_bh(&p->p_work_lock); |
452 | if (thread) | 452 | if (thread) |
453 | kthread_stop(thread); | 453 | kthread_stop(thread); |
454 | } | ||
455 | |||
456 | static int bnx2i_cpu_online(unsigned int cpu) | ||
457 | { | ||
458 | pr_info("bnx2i: CPU %x online: Create Rx thread\n", cpu); | ||
459 | bnx2i_percpu_thread_create(cpu); | ||
460 | return 0; | ||
461 | } | ||
462 | |||
463 | static int bnx2i_cpu_dead(unsigned int cpu) | ||
464 | { | ||
465 | pr_info("CPU %x offline: Remove Rx thread\n", cpu); | ||
466 | bnx2i_percpu_thread_destroy(cpu); | ||
467 | return 0; | 454 | return 0; |
468 | } | 455 | } |
469 | 456 | ||
@@ -511,27 +498,14 @@ static int __init bnx2i_mod_init(void) | |||
511 | p->iothread = NULL; | 498 | p->iothread = NULL; |
512 | } | 499 | } |
513 | 500 | ||
514 | get_online_cpus(); | 501 | err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "scsi/bnx2i:online", |
515 | 502 | bnx2i_cpu_online, bnx2i_cpu_offline); | |
516 | for_each_online_cpu(cpu) | ||
517 | bnx2i_percpu_thread_create(cpu); | ||
518 | |||
519 | err = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, | ||
520 | "scsi/bnx2i:online", | ||
521 | bnx2i_cpu_online, NULL); | ||
522 | if (err < 0) | 503 | if (err < 0) |
523 | goto remove_threads; | 504 | goto unreg_driver; |
524 | bnx2i_online_state = err; | 505 | bnx2i_online_state = err; |
525 | |||
526 | cpuhp_setup_state_nocalls(CPUHP_SCSI_BNX2I_DEAD, "scsi/bnx2i:dead", | ||
527 | NULL, bnx2i_cpu_dead); | ||
528 | put_online_cpus(); | ||
529 | return 0; | 506 | return 0; |
530 | 507 | ||
531 | remove_threads: | 508 | unreg_driver: |
532 | for_each_online_cpu(cpu) | ||
533 | bnx2i_percpu_thread_destroy(cpu); | ||
534 | put_online_cpus(); | ||
535 | cnic_unregister_driver(CNIC_ULP_ISCSI); | 509 | cnic_unregister_driver(CNIC_ULP_ISCSI); |
536 | unreg_xport: | 510 | unreg_xport: |
537 | iscsi_unregister_transport(&bnx2i_iscsi_transport); | 511 | iscsi_unregister_transport(&bnx2i_iscsi_transport); |
@@ -551,7 +525,6 @@ out: | |||
551 | static void __exit bnx2i_mod_exit(void) | 525 | static void __exit bnx2i_mod_exit(void) |
552 | { | 526 | { |
553 | struct bnx2i_hba *hba; | 527 | struct bnx2i_hba *hba; |
554 | unsigned cpu = 0; | ||
555 | 528 | ||
556 | mutex_lock(&bnx2i_dev_lock); | 529 | mutex_lock(&bnx2i_dev_lock); |
557 | while (!list_empty(&adapter_list)) { | 530 | while (!list_empty(&adapter_list)) { |
@@ -569,14 +542,7 @@ static void __exit bnx2i_mod_exit(void) | |||
569 | } | 542 | } |
570 | mutex_unlock(&bnx2i_dev_lock); | 543 | mutex_unlock(&bnx2i_dev_lock); |
571 | 544 | ||
572 | get_online_cpus(); | 545 | cpuhp_remove_state(bnx2i_online_state); |
573 | |||
574 | for_each_online_cpu(cpu) | ||
575 | bnx2i_percpu_thread_destroy(cpu); | ||
576 | |||
577 | cpuhp_remove_state_nocalls(bnx2i_online_state); | ||
578 | cpuhp_remove_state_nocalls(CPUHP_SCSI_BNX2I_DEAD); | ||
579 | put_online_cpus(); | ||
580 | 546 | ||
581 | iscsi_unregister_transport(&bnx2i_iscsi_transport); | 547 | iscsi_unregister_transport(&bnx2i_iscsi_transport); |
582 | cnic_unregister_driver(CNIC_ULP_ISCSI); | 548 | cnic_unregister_driver(CNIC_ULP_ISCSI); |
diff --git a/drivers/scsi/qedf/qedf.h b/drivers/scsi/qedf/qedf.h index 4d038926a455..351f06dfc5a0 100644 --- a/drivers/scsi/qedf/qedf.h +++ b/drivers/scsi/qedf/qedf.h | |||
@@ -528,7 +528,8 @@ struct fip_vlan { | |||
528 | #define QEDF_WRITE (1 << 0) | 528 | #define QEDF_WRITE (1 << 0) |
529 | #define MAX_FIBRE_LUNS 0xffffffff | 529 | #define MAX_FIBRE_LUNS 0xffffffff |
530 | 530 | ||
531 | #define QEDF_MAX_NUM_CQS 8 | 531 | #define MIN_NUM_CPUS_MSIX(x) min_t(u32, x->dev_info.num_cqs, \ |
532 | num_online_cpus()) | ||
532 | 533 | ||
533 | /* | 534 | /* |
534 | * PCI function probe defines | 535 | * PCI function probe defines |
diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c index 7786c97e033f..1d13c9ca517d 100644 --- a/drivers/scsi/qedf/qedf_main.c +++ b/drivers/scsi/qedf/qedf_main.c | |||
@@ -2760,11 +2760,9 @@ static int qedf_set_fcoe_pf_param(struct qedf_ctx *qedf) | |||
2760 | * we allocation is the minimum off: | 2760 | * we allocation is the minimum off: |
2761 | * | 2761 | * |
2762 | * Number of CPUs | 2762 | * Number of CPUs |
2763 | * Number of MSI-X vectors | 2763 | * Number allocated by qed for our PCI function |
2764 | * Max number allocated in hardware (QEDF_MAX_NUM_CQS) | ||
2765 | */ | 2764 | */ |
2766 | qedf->num_queues = min((unsigned int)QEDF_MAX_NUM_CQS, | 2765 | qedf->num_queues = MIN_NUM_CPUS_MSIX(qedf); |
2767 | num_online_cpus()); | ||
2768 | 2766 | ||
2769 | QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Number of CQs is %d.\n", | 2767 | QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Number of CQs is %d.\n", |
2770 | qedf->num_queues); | 2768 | qedf->num_queues); |
@@ -2962,6 +2960,13 @@ static int __qedf_probe(struct pci_dev *pdev, int mode) | |||
2962 | goto err1; | 2960 | goto err1; |
2963 | } | 2961 | } |
2964 | 2962 | ||
2963 | /* Learn information crucial for qedf to progress */ | ||
2964 | rc = qed_ops->fill_dev_info(qedf->cdev, &qedf->dev_info); | ||
2965 | if (rc) { | ||
2966 | QEDF_ERR(&(qedf->dbg_ctx), "Failed to dev info.\n"); | ||
2967 | goto err1; | ||
2968 | } | ||
2969 | |||
2965 | /* queue allocation code should come here | 2970 | /* queue allocation code should come here |
2966 | * order should be | 2971 | * order should be |
2967 | * slowpath_start | 2972 | * slowpath_start |
@@ -2977,13 +2982,6 @@ static int __qedf_probe(struct pci_dev *pdev, int mode) | |||
2977 | } | 2982 | } |
2978 | qed_ops->common->update_pf_params(qedf->cdev, &qedf->pf_params); | 2983 | qed_ops->common->update_pf_params(qedf->cdev, &qedf->pf_params); |
2979 | 2984 | ||
2980 | /* Learn information crucial for qedf to progress */ | ||
2981 | rc = qed_ops->fill_dev_info(qedf->cdev, &qedf->dev_info); | ||
2982 | if (rc) { | ||
2983 | QEDF_ERR(&(qedf->dbg_ctx), "Failed to dev info.\n"); | ||
2984 | goto err1; | ||
2985 | } | ||
2986 | |||
2987 | /* Record BDQ producer doorbell addresses */ | 2985 | /* Record BDQ producer doorbell addresses */ |
2988 | qedf->bdq_primary_prod = qedf->dev_info.primary_dbq_rq_addr; | 2986 | qedf->bdq_primary_prod = qedf->dev_info.primary_dbq_rq_addr; |
2989 | qedf->bdq_secondary_prod = qedf->dev_info.secondary_bdq_rq_addr; | 2987 | qedf->bdq_secondary_prod = qedf->dev_info.secondary_bdq_rq_addr; |
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c index 4fe606b000b4..d7ff71e0c85c 100644 --- a/drivers/scsi/sg.c +++ b/drivers/scsi/sg.c | |||
@@ -751,35 +751,6 @@ sg_new_write(Sg_fd *sfp, struct file *file, const char __user *buf, | |||
751 | return count; | 751 | return count; |
752 | } | 752 | } |
753 | 753 | ||
754 | static bool sg_is_valid_dxfer(sg_io_hdr_t *hp) | ||
755 | { | ||
756 | switch (hp->dxfer_direction) { | ||
757 | case SG_DXFER_NONE: | ||
758 | if (hp->dxferp || hp->dxfer_len > 0) | ||
759 | return false; | ||
760 | return true; | ||
761 | case SG_DXFER_FROM_DEV: | ||
762 | /* | ||
763 | * for SG_DXFER_FROM_DEV we always set dxfer_len to > 0. dxferp | ||
764 | * can either be NULL or != NULL so there's no point in checking | ||
765 | * it either. So just return true. | ||
766 | */ | ||
767 | return true; | ||
768 | case SG_DXFER_TO_DEV: | ||
769 | case SG_DXFER_TO_FROM_DEV: | ||
770 | if (!hp->dxferp || hp->dxfer_len == 0) | ||
771 | return false; | ||
772 | return true; | ||
773 | case SG_DXFER_UNKNOWN: | ||
774 | if ((!hp->dxferp && hp->dxfer_len) || | ||
775 | (hp->dxferp && hp->dxfer_len == 0)) | ||
776 | return false; | ||
777 | return true; | ||
778 | default: | ||
779 | return false; | ||
780 | } | ||
781 | } | ||
782 | |||
783 | static int | 754 | static int |
784 | sg_common_write(Sg_fd * sfp, Sg_request * srp, | 755 | sg_common_write(Sg_fd * sfp, Sg_request * srp, |
785 | unsigned char *cmnd, int timeout, int blocking) | 756 | unsigned char *cmnd, int timeout, int blocking) |
@@ -800,7 +771,7 @@ sg_common_write(Sg_fd * sfp, Sg_request * srp, | |||
800 | "sg_common_write: scsi opcode=0x%02x, cmd_size=%d\n", | 771 | "sg_common_write: scsi opcode=0x%02x, cmd_size=%d\n", |
801 | (int) cmnd[0], (int) hp->cmd_len)); | 772 | (int) cmnd[0], (int) hp->cmd_len)); |
802 | 773 | ||
803 | if (!sg_is_valid_dxfer(hp)) | 774 | if (hp->dxfer_len >= SZ_256M) |
804 | return -EINVAL; | 775 | return -EINVAL; |
805 | 776 | ||
806 | k = sg_start_req(srp, cmnd); | 777 | k = sg_start_req(srp, cmnd); |
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c index ceef77c0416a..ff48f0096810 100644 --- a/fs/xfs/xfs_inode.c +++ b/fs/xfs/xfs_inode.c | |||
@@ -874,7 +874,6 @@ xfs_ialloc( | |||
874 | case S_IFREG: | 874 | case S_IFREG: |
875 | case S_IFDIR: | 875 | case S_IFDIR: |
876 | if (pip && (pip->i_d.di_flags & XFS_DIFLAG_ANY)) { | 876 | if (pip && (pip->i_d.di_flags & XFS_DIFLAG_ANY)) { |
877 | uint64_t di_flags2 = 0; | ||
878 | uint di_flags = 0; | 877 | uint di_flags = 0; |
879 | 878 | ||
880 | if (S_ISDIR(mode)) { | 879 | if (S_ISDIR(mode)) { |
@@ -911,20 +910,23 @@ xfs_ialloc( | |||
911 | di_flags |= XFS_DIFLAG_NODEFRAG; | 910 | di_flags |= XFS_DIFLAG_NODEFRAG; |
912 | if (pip->i_d.di_flags & XFS_DIFLAG_FILESTREAM) | 911 | if (pip->i_d.di_flags & XFS_DIFLAG_FILESTREAM) |
913 | di_flags |= XFS_DIFLAG_FILESTREAM; | 912 | di_flags |= XFS_DIFLAG_FILESTREAM; |
914 | if (pip->i_d.di_flags2 & XFS_DIFLAG2_DAX) | ||
915 | di_flags2 |= XFS_DIFLAG2_DAX; | ||
916 | 913 | ||
917 | ip->i_d.di_flags |= di_flags; | 914 | ip->i_d.di_flags |= di_flags; |
918 | ip->i_d.di_flags2 |= di_flags2; | ||
919 | } | 915 | } |
920 | if (pip && | 916 | if (pip && |
921 | (pip->i_d.di_flags2 & XFS_DIFLAG2_ANY) && | 917 | (pip->i_d.di_flags2 & XFS_DIFLAG2_ANY) && |
922 | pip->i_d.di_version == 3 && | 918 | pip->i_d.di_version == 3 && |
923 | ip->i_d.di_version == 3) { | 919 | ip->i_d.di_version == 3) { |
920 | uint64_t di_flags2 = 0; | ||
921 | |||
924 | if (pip->i_d.di_flags2 & XFS_DIFLAG2_COWEXTSIZE) { | 922 | if (pip->i_d.di_flags2 & XFS_DIFLAG2_COWEXTSIZE) { |
925 | ip->i_d.di_flags2 |= XFS_DIFLAG2_COWEXTSIZE; | 923 | di_flags2 |= XFS_DIFLAG2_COWEXTSIZE; |
926 | ip->i_d.di_cowextsize = pip->i_d.di_cowextsize; | 924 | ip->i_d.di_cowextsize = pip->i_d.di_cowextsize; |
927 | } | 925 | } |
926 | if (pip->i_d.di_flags2 & XFS_DIFLAG2_DAX) | ||
927 | di_flags2 |= XFS_DIFLAG2_DAX; | ||
928 | |||
929 | ip->i_d.di_flags2 |= di_flags2; | ||
928 | } | 930 | } |
929 | /* FALLTHROUGH */ | 931 | /* FALLTHROUGH */ |
930 | case S_IFLNK: | 932 | case S_IFLNK: |
diff --git a/fs/xfs/xfs_log_cil.c b/fs/xfs/xfs_log_cil.c index fbe72b134bef..43aa42a3a5d3 100644 --- a/fs/xfs/xfs_log_cil.c +++ b/fs/xfs/xfs_log_cil.c | |||
@@ -539,6 +539,7 @@ xlog_discard_endio( | |||
539 | 539 | ||
540 | INIT_WORK(&ctx->discard_endio_work, xlog_discard_endio_work); | 540 | INIT_WORK(&ctx->discard_endio_work, xlog_discard_endio_work); |
541 | queue_work(xfs_discard_wq, &ctx->discard_endio_work); | 541 | queue_work(xfs_discard_wq, &ctx->discard_endio_work); |
542 | bio_put(bio); | ||
542 | } | 543 | } |
543 | 544 | ||
544 | static void | 545 | static void |
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index b56573bf440d..82b30e638430 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h | |||
@@ -39,8 +39,6 @@ enum cpuhp_state { | |||
39 | CPUHP_PCI_XGENE_DEAD, | 39 | CPUHP_PCI_XGENE_DEAD, |
40 | CPUHP_IOMMU_INTEL_DEAD, | 40 | CPUHP_IOMMU_INTEL_DEAD, |
41 | CPUHP_LUSTRE_CFS_DEAD, | 41 | CPUHP_LUSTRE_CFS_DEAD, |
42 | CPUHP_SCSI_BNX2FC_DEAD, | ||
43 | CPUHP_SCSI_BNX2I_DEAD, | ||
44 | CPUHP_WORKQUEUE_PREP, | 42 | CPUHP_WORKQUEUE_PREP, |
45 | CPUHP_POWER_NUMA_PREPARE, | 43 | CPUHP_POWER_NUMA_PREPARE, |
46 | CPUHP_HRTIMERS_PREPARE, | 44 | CPUHP_HRTIMERS_PREPARE, |
diff --git a/include/linux/i2c.h b/include/linux/i2c.h index 00ca5b86a753..d501d3956f13 100644 --- a/include/linux/i2c.h +++ b/include/linux/i2c.h | |||
@@ -689,7 +689,8 @@ i2c_unlock_adapter(struct i2c_adapter *adapter) | |||
689 | #define I2C_CLASS_HWMON (1<<0) /* lm_sensors, ... */ | 689 | #define I2C_CLASS_HWMON (1<<0) /* lm_sensors, ... */ |
690 | #define I2C_CLASS_DDC (1<<3) /* DDC bus on graphics adapters */ | 690 | #define I2C_CLASS_DDC (1<<3) /* DDC bus on graphics adapters */ |
691 | #define I2C_CLASS_SPD (1<<7) /* Memory modules */ | 691 | #define I2C_CLASS_SPD (1<<7) /* Memory modules */ |
692 | #define I2C_CLASS_DEPRECATED (1<<8) /* Warn users that adapter will stop using classes */ | 692 | /* Warn users that the adapter doesn't support classes anymore */ |
693 | #define I2C_CLASS_DEPRECATED (1<<8) | ||
693 | 694 | ||
694 | /* Internal numbers to terminate lists */ | 695 | /* Internal numbers to terminate lists */ |
695 | #define I2C_CLIENT_END 0xfffeU | 696 | #define I2C_CLIENT_END 0xfffeU |
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index aad5d81dfb44..b54517c05e9a 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h | |||
@@ -620,6 +620,7 @@ struct mlx4_caps { | |||
620 | u32 dmfs_high_rate_qpn_base; | 620 | u32 dmfs_high_rate_qpn_base; |
621 | u32 dmfs_high_rate_qpn_range; | 621 | u32 dmfs_high_rate_qpn_range; |
622 | u32 vf_caps; | 622 | u32 vf_caps; |
623 | bool wol_port[MLX4_MAX_PORTS + 1]; | ||
623 | struct mlx4_rate_limit_caps rl_caps; | 624 | struct mlx4_rate_limit_caps rl_caps; |
624 | }; | 625 | }; |
625 | 626 | ||
diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h index 6f41270d80c0..f378dc0e7eaf 100644 --- a/include/linux/mlx5/qp.h +++ b/include/linux/mlx5/qp.h | |||
@@ -212,7 +212,6 @@ struct mlx5_wqe_ctrl_seg { | |||
212 | #define MLX5_WQE_CTRL_OPCODE_MASK 0xff | 212 | #define MLX5_WQE_CTRL_OPCODE_MASK 0xff |
213 | #define MLX5_WQE_CTRL_WQE_INDEX_MASK 0x00ffff00 | 213 | #define MLX5_WQE_CTRL_WQE_INDEX_MASK 0x00ffff00 |
214 | #define MLX5_WQE_CTRL_WQE_INDEX_SHIFT 8 | 214 | #define MLX5_WQE_CTRL_WQE_INDEX_SHIFT 8 |
215 | #define MLX5_WQE_AV_EXT 0x80000000 | ||
216 | 215 | ||
217 | enum { | 216 | enum { |
218 | MLX5_ETH_WQE_L3_INNER_CSUM = 1 << 4, | 217 | MLX5_ETH_WQE_L3_INNER_CSUM = 1 << 4, |
diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h index 892148c448cc..5216d2eb2289 100644 --- a/include/linux/mtd/nand.h +++ b/include/linux/mtd/nand.h | |||
@@ -681,10 +681,10 @@ struct nand_buffers { | |||
681 | * @tWW_min: WP# transition to WE# low | 681 | * @tWW_min: WP# transition to WE# low |
682 | */ | 682 | */ |
683 | struct nand_sdr_timings { | 683 | struct nand_sdr_timings { |
684 | u32 tBERS_max; | 684 | u64 tBERS_max; |
685 | u32 tCCS_min; | 685 | u32 tCCS_min; |
686 | u32 tPROG_max; | 686 | u64 tPROG_max; |
687 | u32 tR_max; | 687 | u64 tR_max; |
688 | u32 tALH_min; | 688 | u32 tALH_min; |
689 | u32 tADL_min; | 689 | u32 tADL_min; |
690 | u32 tALS_min; | 690 | u32 tALS_min; |
diff --git a/include/linux/ptp_clock_kernel.h b/include/linux/ptp_clock_kernel.h index a026bfd089db..51349d124ee5 100644 --- a/include/linux/ptp_clock_kernel.h +++ b/include/linux/ptp_clock_kernel.h | |||
@@ -99,6 +99,11 @@ struct system_device_crosststamp; | |||
99 | * parameter func: the desired function to use. | 99 | * parameter func: the desired function to use. |
100 | * parameter chan: the function channel index to use. | 100 | * parameter chan: the function channel index to use. |
101 | * | 101 | * |
102 | * @do_work: Request driver to perform auxiliary (periodic) operations | ||
103 | * Driver should return delay of the next auxiliary work scheduling | ||
104 | * time (>=0) or negative value in case further scheduling | ||
105 | * is not required. | ||
106 | * | ||
102 | * Drivers should embed their ptp_clock_info within a private | 107 | * Drivers should embed their ptp_clock_info within a private |
103 | * structure, obtaining a reference to it using container_of(). | 108 | * structure, obtaining a reference to it using container_of(). |
104 | * | 109 | * |
@@ -126,6 +131,7 @@ struct ptp_clock_info { | |||
126 | struct ptp_clock_request *request, int on); | 131 | struct ptp_clock_request *request, int on); |
127 | int (*verify)(struct ptp_clock_info *ptp, unsigned int pin, | 132 | int (*verify)(struct ptp_clock_info *ptp, unsigned int pin, |
128 | enum ptp_pin_function func, unsigned int chan); | 133 | enum ptp_pin_function func, unsigned int chan); |
134 | long (*do_aux_work)(struct ptp_clock_info *ptp); | ||
129 | }; | 135 | }; |
130 | 136 | ||
131 | struct ptp_clock; | 137 | struct ptp_clock; |
@@ -211,6 +217,16 @@ extern int ptp_clock_index(struct ptp_clock *ptp); | |||
211 | int ptp_find_pin(struct ptp_clock *ptp, | 217 | int ptp_find_pin(struct ptp_clock *ptp, |
212 | enum ptp_pin_function func, unsigned int chan); | 218 | enum ptp_pin_function func, unsigned int chan); |
213 | 219 | ||
220 | /** | ||
221 | * ptp_schedule_worker() - schedule ptp auxiliary work | ||
222 | * | ||
223 | * @ptp: The clock obtained from ptp_clock_register(). | ||
224 | * @delay: number of jiffies to wait before queuing | ||
225 | * See kthread_queue_delayed_work() for more info. | ||
226 | */ | ||
227 | |||
228 | int ptp_schedule_worker(struct ptp_clock *ptp, unsigned long delay); | ||
229 | |||
214 | #else | 230 | #else |
215 | static inline struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info, | 231 | static inline struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info, |
216 | struct device *parent) | 232 | struct device *parent) |
@@ -225,6 +241,10 @@ static inline int ptp_clock_index(struct ptp_clock *ptp) | |||
225 | static inline int ptp_find_pin(struct ptp_clock *ptp, | 241 | static inline int ptp_find_pin(struct ptp_clock *ptp, |
226 | enum ptp_pin_function func, unsigned int chan) | 242 | enum ptp_pin_function func, unsigned int chan) |
227 | { return -1; } | 243 | { return -1; } |
244 | static inline int ptp_schedule_worker(struct ptp_clock *ptp, | ||
245 | unsigned long delay) | ||
246 | { return -EOPNOTSUPP; } | ||
247 | |||
228 | #endif | 248 | #endif |
229 | 249 | ||
230 | #endif | 250 | #endif |
diff --git a/include/net/tcp.h b/include/net/tcp.h index 999f3efe572b..afdab3781425 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h | |||
@@ -1902,6 +1902,16 @@ extern void tcp_rack_advance(struct tcp_sock *tp, u8 sacked, u32 end_seq, | |||
1902 | u64 xmit_time); | 1902 | u64 xmit_time); |
1903 | extern void tcp_rack_reo_timeout(struct sock *sk); | 1903 | extern void tcp_rack_reo_timeout(struct sock *sk); |
1904 | 1904 | ||
1905 | /* At how many usecs into the future should the RTO fire? */ | ||
1906 | static inline s64 tcp_rto_delta_us(const struct sock *sk) | ||
1907 | { | ||
1908 | const struct sk_buff *skb = tcp_write_queue_head(sk); | ||
1909 | u32 rto = inet_csk(sk)->icsk_rto; | ||
1910 | u64 rto_time_stamp_us = skb->skb_mstamp + jiffies_to_usecs(rto); | ||
1911 | |||
1912 | return rto_time_stamp_us - tcp_sk(sk)->tcp_mstamp; | ||
1913 | } | ||
1914 | |||
1905 | /* | 1915 | /* |
1906 | * Save and compile IPv4 options, return a pointer to it | 1916 | * Save and compile IPv4 options, return a pointer to it |
1907 | */ | 1917 | */ |
diff --git a/kernel/futex.c b/kernel/futex.c index 16dbe4c93895..f50b434756c1 100644 --- a/kernel/futex.c +++ b/kernel/futex.c | |||
@@ -670,13 +670,14 @@ again: | |||
670 | * this reference was taken by ihold under the page lock | 670 | * this reference was taken by ihold under the page lock |
671 | * pinning the inode in place so i_lock was unnecessary. The | 671 | * pinning the inode in place so i_lock was unnecessary. The |
672 | * only way for this check to fail is if the inode was | 672 | * only way for this check to fail is if the inode was |
673 | * truncated in parallel so warn for now if this happens. | 673 | * truncated in parallel which is almost certainly an |
674 | * application bug. In such a case, just retry. | ||
674 | * | 675 | * |
675 | * We are not calling into get_futex_key_refs() in file-backed | 676 | * We are not calling into get_futex_key_refs() in file-backed |
676 | * cases, therefore a successful atomic_inc return below will | 677 | * cases, therefore a successful atomic_inc return below will |
677 | * guarantee that get_futex_key() will still imply smp_mb(); (B). | 678 | * guarantee that get_futex_key() will still imply smp_mb(); (B). |
678 | */ | 679 | */ |
679 | if (WARN_ON_ONCE(!atomic_inc_not_zero(&inode->i_count))) { | 680 | if (!atomic_inc_not_zero(&inode->i_count)) { |
680 | rcu_read_unlock(); | 681 | rcu_read_unlock(); |
681 | put_page(page); | 682 | put_page(page); |
682 | 683 | ||
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c index e1133bc634b5..8a3ce79b1307 100644 --- a/net/batman-adv/translation-table.c +++ b/net/batman-adv/translation-table.c | |||
@@ -1549,9 +1549,41 @@ batadv_tt_global_entry_has_orig(const struct batadv_tt_global_entry *entry, | |||
1549 | return found; | 1549 | return found; |
1550 | } | 1550 | } |
1551 | 1551 | ||
1552 | /** | ||
1553 | * batadv_tt_global_sync_flags - update TT sync flags | ||
1554 | * @tt_global: the TT global entry to update sync flags in | ||
1555 | * | ||
1556 | * Updates the sync flag bits in the tt_global flag attribute with a logical | ||
1557 | * OR of all sync flags from any of its TT orig entries. | ||
1558 | */ | ||
1559 | static void | ||
1560 | batadv_tt_global_sync_flags(struct batadv_tt_global_entry *tt_global) | ||
1561 | { | ||
1562 | struct batadv_tt_orig_list_entry *orig_entry; | ||
1563 | const struct hlist_head *head; | ||
1564 | u16 flags = BATADV_NO_FLAGS; | ||
1565 | |||
1566 | rcu_read_lock(); | ||
1567 | head = &tt_global->orig_list; | ||
1568 | hlist_for_each_entry_rcu(orig_entry, head, list) | ||
1569 | flags |= orig_entry->flags; | ||
1570 | rcu_read_unlock(); | ||
1571 | |||
1572 | flags |= tt_global->common.flags & (~BATADV_TT_SYNC_MASK); | ||
1573 | tt_global->common.flags = flags; | ||
1574 | } | ||
1575 | |||
1576 | /** | ||
1577 | * batadv_tt_global_orig_entry_add - add or update a TT orig entry | ||
1578 | * @tt_global: the TT global entry to add an orig entry in | ||
1579 | * @orig_node: the originator to add an orig entry for | ||
1580 | * @ttvn: translation table version number of this changeset | ||
1581 | * @flags: TT sync flags | ||
1582 | */ | ||
1552 | static void | 1583 | static void |
1553 | batadv_tt_global_orig_entry_add(struct batadv_tt_global_entry *tt_global, | 1584 | batadv_tt_global_orig_entry_add(struct batadv_tt_global_entry *tt_global, |
1554 | struct batadv_orig_node *orig_node, int ttvn) | 1585 | struct batadv_orig_node *orig_node, int ttvn, |
1586 | u8 flags) | ||
1555 | { | 1587 | { |
1556 | struct batadv_tt_orig_list_entry *orig_entry; | 1588 | struct batadv_tt_orig_list_entry *orig_entry; |
1557 | 1589 | ||
@@ -1561,7 +1593,8 @@ batadv_tt_global_orig_entry_add(struct batadv_tt_global_entry *tt_global, | |||
1561 | * was added during a "temporary client detection" | 1593 | * was added during a "temporary client detection" |
1562 | */ | 1594 | */ |
1563 | orig_entry->ttvn = ttvn; | 1595 | orig_entry->ttvn = ttvn; |
1564 | goto out; | 1596 | orig_entry->flags = flags; |
1597 | goto sync_flags; | ||
1565 | } | 1598 | } |
1566 | 1599 | ||
1567 | orig_entry = kmem_cache_zalloc(batadv_tt_orig_cache, GFP_ATOMIC); | 1600 | orig_entry = kmem_cache_zalloc(batadv_tt_orig_cache, GFP_ATOMIC); |
@@ -1573,6 +1606,7 @@ batadv_tt_global_orig_entry_add(struct batadv_tt_global_entry *tt_global, | |||
1573 | batadv_tt_global_size_inc(orig_node, tt_global->common.vid); | 1606 | batadv_tt_global_size_inc(orig_node, tt_global->common.vid); |
1574 | orig_entry->orig_node = orig_node; | 1607 | orig_entry->orig_node = orig_node; |
1575 | orig_entry->ttvn = ttvn; | 1608 | orig_entry->ttvn = ttvn; |
1609 | orig_entry->flags = flags; | ||
1576 | kref_init(&orig_entry->refcount); | 1610 | kref_init(&orig_entry->refcount); |
1577 | 1611 | ||
1578 | spin_lock_bh(&tt_global->list_lock); | 1612 | spin_lock_bh(&tt_global->list_lock); |
@@ -1582,6 +1616,8 @@ batadv_tt_global_orig_entry_add(struct batadv_tt_global_entry *tt_global, | |||
1582 | spin_unlock_bh(&tt_global->list_lock); | 1616 | spin_unlock_bh(&tt_global->list_lock); |
1583 | atomic_inc(&tt_global->orig_list_count); | 1617 | atomic_inc(&tt_global->orig_list_count); |
1584 | 1618 | ||
1619 | sync_flags: | ||
1620 | batadv_tt_global_sync_flags(tt_global); | ||
1585 | out: | 1621 | out: |
1586 | if (orig_entry) | 1622 | if (orig_entry) |
1587 | batadv_tt_orig_list_entry_put(orig_entry); | 1623 | batadv_tt_orig_list_entry_put(orig_entry); |
@@ -1703,10 +1739,10 @@ static bool batadv_tt_global_add(struct batadv_priv *bat_priv, | |||
1703 | } | 1739 | } |
1704 | 1740 | ||
1705 | /* the change can carry possible "attribute" flags like the | 1741 | /* the change can carry possible "attribute" flags like the |
1706 | * TT_CLIENT_WIFI, therefore they have to be copied in the | 1742 | * TT_CLIENT_TEMP, therefore they have to be copied in the |
1707 | * client entry | 1743 | * client entry |
1708 | */ | 1744 | */ |
1709 | common->flags |= flags; | 1745 | common->flags |= flags & (~BATADV_TT_SYNC_MASK); |
1710 | 1746 | ||
1711 | /* If there is the BATADV_TT_CLIENT_ROAM flag set, there is only | 1747 | /* If there is the BATADV_TT_CLIENT_ROAM flag set, there is only |
1712 | * one originator left in the list and we previously received a | 1748 | * one originator left in the list and we previously received a |
@@ -1723,7 +1759,8 @@ static bool batadv_tt_global_add(struct batadv_priv *bat_priv, | |||
1723 | } | 1759 | } |
1724 | add_orig_entry: | 1760 | add_orig_entry: |
1725 | /* add the new orig_entry (if needed) or update it */ | 1761 | /* add the new orig_entry (if needed) or update it */ |
1726 | batadv_tt_global_orig_entry_add(tt_global_entry, orig_node, ttvn); | 1762 | batadv_tt_global_orig_entry_add(tt_global_entry, orig_node, ttvn, |
1763 | flags & BATADV_TT_SYNC_MASK); | ||
1727 | 1764 | ||
1728 | batadv_dbg(BATADV_DBG_TT, bat_priv, | 1765 | batadv_dbg(BATADV_DBG_TT, bat_priv, |
1729 | "Creating new global tt entry: %pM (vid: %d, via %pM)\n", | 1766 | "Creating new global tt entry: %pM (vid: %d, via %pM)\n", |
@@ -1946,6 +1983,7 @@ batadv_tt_global_dump_subentry(struct sk_buff *msg, u32 portid, u32 seq, | |||
1946 | struct batadv_tt_orig_list_entry *orig, | 1983 | struct batadv_tt_orig_list_entry *orig, |
1947 | bool best) | 1984 | bool best) |
1948 | { | 1985 | { |
1986 | u16 flags = (common->flags & (~BATADV_TT_SYNC_MASK)) | orig->flags; | ||
1949 | void *hdr; | 1987 | void *hdr; |
1950 | struct batadv_orig_node_vlan *vlan; | 1988 | struct batadv_orig_node_vlan *vlan; |
1951 | u8 last_ttvn; | 1989 | u8 last_ttvn; |
@@ -1975,7 +2013,7 @@ batadv_tt_global_dump_subentry(struct sk_buff *msg, u32 portid, u32 seq, | |||
1975 | nla_put_u8(msg, BATADV_ATTR_TT_LAST_TTVN, last_ttvn) || | 2013 | nla_put_u8(msg, BATADV_ATTR_TT_LAST_TTVN, last_ttvn) || |
1976 | nla_put_u32(msg, BATADV_ATTR_TT_CRC32, crc) || | 2014 | nla_put_u32(msg, BATADV_ATTR_TT_CRC32, crc) || |
1977 | nla_put_u16(msg, BATADV_ATTR_TT_VID, common->vid) || | 2015 | nla_put_u16(msg, BATADV_ATTR_TT_VID, common->vid) || |
1978 | nla_put_u32(msg, BATADV_ATTR_TT_FLAGS, common->flags)) | 2016 | nla_put_u32(msg, BATADV_ATTR_TT_FLAGS, flags)) |
1979 | goto nla_put_failure; | 2017 | goto nla_put_failure; |
1980 | 2018 | ||
1981 | if (best && nla_put_flag(msg, BATADV_ATTR_FLAG_BEST)) | 2019 | if (best && nla_put_flag(msg, BATADV_ATTR_FLAG_BEST)) |
@@ -2589,6 +2627,7 @@ static u32 batadv_tt_global_crc(struct batadv_priv *bat_priv, | |||
2589 | unsigned short vid) | 2627 | unsigned short vid) |
2590 | { | 2628 | { |
2591 | struct batadv_hashtable *hash = bat_priv->tt.global_hash; | 2629 | struct batadv_hashtable *hash = bat_priv->tt.global_hash; |
2630 | struct batadv_tt_orig_list_entry *tt_orig; | ||
2592 | struct batadv_tt_common_entry *tt_common; | 2631 | struct batadv_tt_common_entry *tt_common; |
2593 | struct batadv_tt_global_entry *tt_global; | 2632 | struct batadv_tt_global_entry *tt_global; |
2594 | struct hlist_head *head; | 2633 | struct hlist_head *head; |
@@ -2627,8 +2666,9 @@ static u32 batadv_tt_global_crc(struct batadv_priv *bat_priv, | |||
2627 | /* find out if this global entry is announced by this | 2666 | /* find out if this global entry is announced by this |
2628 | * originator | 2667 | * originator |
2629 | */ | 2668 | */ |
2630 | if (!batadv_tt_global_entry_has_orig(tt_global, | 2669 | tt_orig = batadv_tt_global_orig_entry_find(tt_global, |
2631 | orig_node)) | 2670 | orig_node); |
2671 | if (!tt_orig) | ||
2632 | continue; | 2672 | continue; |
2633 | 2673 | ||
2634 | /* use network order to read the VID: this ensures that | 2674 | /* use network order to read the VID: this ensures that |
@@ -2640,10 +2680,12 @@ static u32 batadv_tt_global_crc(struct batadv_priv *bat_priv, | |||
2640 | /* compute the CRC on flags that have to be kept in sync | 2680 | /* compute the CRC on flags that have to be kept in sync |
2641 | * among nodes | 2681 | * among nodes |
2642 | */ | 2682 | */ |
2643 | flags = tt_common->flags & BATADV_TT_SYNC_MASK; | 2683 | flags = tt_orig->flags; |
2644 | crc_tmp = crc32c(crc_tmp, &flags, sizeof(flags)); | 2684 | crc_tmp = crc32c(crc_tmp, &flags, sizeof(flags)); |
2645 | 2685 | ||
2646 | crc ^= crc32c(crc_tmp, tt_common->addr, ETH_ALEN); | 2686 | crc ^= crc32c(crc_tmp, tt_common->addr, ETH_ALEN); |
2687 | |||
2688 | batadv_tt_orig_list_entry_put(tt_orig); | ||
2647 | } | 2689 | } |
2648 | rcu_read_unlock(); | 2690 | rcu_read_unlock(); |
2649 | } | 2691 | } |
diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h index ea43a6449247..a62795868794 100644 --- a/net/batman-adv/types.h +++ b/net/batman-adv/types.h | |||
@@ -1260,6 +1260,7 @@ struct batadv_tt_global_entry { | |||
1260 | * struct batadv_tt_orig_list_entry - orig node announcing a non-mesh client | 1260 | * struct batadv_tt_orig_list_entry - orig node announcing a non-mesh client |
1261 | * @orig_node: pointer to orig node announcing this non-mesh client | 1261 | * @orig_node: pointer to orig node announcing this non-mesh client |
1262 | * @ttvn: translation table version number which added the non-mesh client | 1262 | * @ttvn: translation table version number which added the non-mesh client |
1263 | * @flags: per orig entry TT sync flags | ||
1263 | * @list: list node for batadv_tt_global_entry::orig_list | 1264 | * @list: list node for batadv_tt_global_entry::orig_list |
1264 | * @refcount: number of contexts the object is used | 1265 | * @refcount: number of contexts the object is used |
1265 | * @rcu: struct used for freeing in an RCU-safe manner | 1266 | * @rcu: struct used for freeing in an RCU-safe manner |
@@ -1267,6 +1268,7 @@ struct batadv_tt_global_entry { | |||
1267 | struct batadv_tt_orig_list_entry { | 1268 | struct batadv_tt_orig_list_entry { |
1268 | struct batadv_orig_node *orig_node; | 1269 | struct batadv_orig_node *orig_node; |
1269 | u8 ttvn; | 1270 | u8 ttvn; |
1271 | u8 flags; | ||
1270 | struct hlist_node list; | 1272 | struct hlist_node list; |
1271 | struct kref refcount; | 1273 | struct kref refcount; |
1272 | struct rcu_head rcu; | 1274 | struct rcu_head rcu; |
diff --git a/net/core/dev.c b/net/core/dev.c index 1d75499add72..3f69f6e71824 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -2732,7 +2732,7 @@ static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path) | |||
2732 | { | 2732 | { |
2733 | if (tx_path) | 2733 | if (tx_path) |
2734 | return skb->ip_summed != CHECKSUM_PARTIAL && | 2734 | return skb->ip_summed != CHECKSUM_PARTIAL && |
2735 | skb->ip_summed != CHECKSUM_NONE; | 2735 | skb->ip_summed != CHECKSUM_UNNECESSARY; |
2736 | 2736 | ||
2737 | return skb->ip_summed == CHECKSUM_NONE; | 2737 | return skb->ip_summed == CHECKSUM_NONE; |
2738 | } | 2738 | } |
diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c index c4c6e1969ed0..2ae8f54cb321 100644 --- a/net/ipv4/cipso_ipv4.c +++ b/net/ipv4/cipso_ipv4.c | |||
@@ -1523,9 +1523,17 @@ unsigned char *cipso_v4_optptr(const struct sk_buff *skb) | |||
1523 | int taglen; | 1523 | int taglen; |
1524 | 1524 | ||
1525 | for (optlen = iph->ihl*4 - sizeof(struct iphdr); optlen > 0; ) { | 1525 | for (optlen = iph->ihl*4 - sizeof(struct iphdr); optlen > 0; ) { |
1526 | if (optptr[0] == IPOPT_CIPSO) | 1526 | switch (optptr[0]) { |
1527 | case IPOPT_CIPSO: | ||
1527 | return optptr; | 1528 | return optptr; |
1528 | taglen = optptr[1]; | 1529 | case IPOPT_END: |
1530 | return NULL; | ||
1531 | case IPOPT_NOOP: | ||
1532 | taglen = 1; | ||
1533 | break; | ||
1534 | default: | ||
1535 | taglen = optptr[1]; | ||
1536 | } | ||
1529 | optlen -= taglen; | 1537 | optlen -= taglen; |
1530 | optptr += taglen; | 1538 | optptr += taglen; |
1531 | } | 1539 | } |
diff --git a/net/ipv4/fou.c b/net/ipv4/fou.c index 8e0257d01200..1540db65241a 100644 --- a/net/ipv4/fou.c +++ b/net/ipv4/fou.c | |||
@@ -450,6 +450,7 @@ out_unlock: | |||
450 | out: | 450 | out: |
451 | NAPI_GRO_CB(skb)->flush |= flush; | 451 | NAPI_GRO_CB(skb)->flush |= flush; |
452 | skb_gro_remcsum_cleanup(skb, &grc); | 452 | skb_gro_remcsum_cleanup(skb, &grc); |
453 | skb->remcsum_offload = 0; | ||
453 | 454 | ||
454 | return pp; | 455 | return pp; |
455 | } | 456 | } |
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 842ed75ccb25..d73903fe8c83 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
@@ -106,6 +106,7 @@ int sysctl_tcp_invalid_ratelimit __read_mostly = HZ/2; | |||
106 | #define FLAG_ORIG_SACK_ACKED 0x200 /* Never retransmitted data are (s)acked */ | 106 | #define FLAG_ORIG_SACK_ACKED 0x200 /* Never retransmitted data are (s)acked */ |
107 | #define FLAG_SND_UNA_ADVANCED 0x400 /* Snd_una was changed (!= FLAG_DATA_ACKED) */ | 107 | #define FLAG_SND_UNA_ADVANCED 0x400 /* Snd_una was changed (!= FLAG_DATA_ACKED) */ |
108 | #define FLAG_DSACKING_ACK 0x800 /* SACK blocks contained D-SACK info */ | 108 | #define FLAG_DSACKING_ACK 0x800 /* SACK blocks contained D-SACK info */ |
109 | #define FLAG_SET_XMIT_TIMER 0x1000 /* Set TLP or RTO timer */ | ||
109 | #define FLAG_SACK_RENEGING 0x2000 /* snd_una advanced to a sacked seq */ | 110 | #define FLAG_SACK_RENEGING 0x2000 /* snd_una advanced to a sacked seq */ |
110 | #define FLAG_UPDATE_TS_RECENT 0x4000 /* tcp_replace_ts_recent() */ | 111 | #define FLAG_UPDATE_TS_RECENT 0x4000 /* tcp_replace_ts_recent() */ |
111 | #define FLAG_NO_CHALLENGE_ACK 0x8000 /* do not call tcp_send_challenge_ack() */ | 112 | #define FLAG_NO_CHALLENGE_ACK 0x8000 /* do not call tcp_send_challenge_ack() */ |
@@ -2520,8 +2521,8 @@ static inline void tcp_end_cwnd_reduction(struct sock *sk) | |||
2520 | return; | 2521 | return; |
2521 | 2522 | ||
2522 | /* Reset cwnd to ssthresh in CWR or Recovery (unless it's undone) */ | 2523 | /* Reset cwnd to ssthresh in CWR or Recovery (unless it's undone) */ |
2523 | if (inet_csk(sk)->icsk_ca_state == TCP_CA_CWR || | 2524 | if (tp->snd_ssthresh < TCP_INFINITE_SSTHRESH && |
2524 | (tp->undo_marker && tp->snd_ssthresh < TCP_INFINITE_SSTHRESH)) { | 2525 | (inet_csk(sk)->icsk_ca_state == TCP_CA_CWR || tp->undo_marker)) { |
2525 | tp->snd_cwnd = tp->snd_ssthresh; | 2526 | tp->snd_cwnd = tp->snd_ssthresh; |
2526 | tp->snd_cwnd_stamp = tcp_jiffies32; | 2527 | tp->snd_cwnd_stamp = tcp_jiffies32; |
2527 | } | 2528 | } |
@@ -3004,10 +3005,7 @@ void tcp_rearm_rto(struct sock *sk) | |||
3004 | /* Offset the time elapsed after installing regular RTO */ | 3005 | /* Offset the time elapsed after installing regular RTO */ |
3005 | if (icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT || | 3006 | if (icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT || |
3006 | icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) { | 3007 | icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) { |
3007 | struct sk_buff *skb = tcp_write_queue_head(sk); | 3008 | s64 delta_us = tcp_rto_delta_us(sk); |
3008 | u64 rto_time_stamp = skb->skb_mstamp + | ||
3009 | jiffies_to_usecs(rto); | ||
3010 | s64 delta_us = rto_time_stamp - tp->tcp_mstamp; | ||
3011 | /* delta_us may not be positive if the socket is locked | 3009 | /* delta_us may not be positive if the socket is locked |
3012 | * when the retrans timer fires and is rescheduled. | 3010 | * when the retrans timer fires and is rescheduled. |
3013 | */ | 3011 | */ |
@@ -3019,6 +3017,13 @@ void tcp_rearm_rto(struct sock *sk) | |||
3019 | } | 3017 | } |
3020 | } | 3018 | } |
3021 | 3019 | ||
3020 | /* Try to schedule a loss probe; if that doesn't work, then schedule an RTO. */ | ||
3021 | static void tcp_set_xmit_timer(struct sock *sk) | ||
3022 | { | ||
3023 | if (!tcp_schedule_loss_probe(sk)) | ||
3024 | tcp_rearm_rto(sk); | ||
3025 | } | ||
3026 | |||
3022 | /* If we get here, the whole TSO packet has not been acked. */ | 3027 | /* If we get here, the whole TSO packet has not been acked. */ |
3023 | static u32 tcp_tso_acked(struct sock *sk, struct sk_buff *skb) | 3028 | static u32 tcp_tso_acked(struct sock *sk, struct sk_buff *skb) |
3024 | { | 3029 | { |
@@ -3180,7 +3185,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets, | |||
3180 | ca_rtt_us, sack->rate); | 3185 | ca_rtt_us, sack->rate); |
3181 | 3186 | ||
3182 | if (flag & FLAG_ACKED) { | 3187 | if (flag & FLAG_ACKED) { |
3183 | tcp_rearm_rto(sk); | 3188 | flag |= FLAG_SET_XMIT_TIMER; /* set TLP or RTO timer */ |
3184 | if (unlikely(icsk->icsk_mtup.probe_size && | 3189 | if (unlikely(icsk->icsk_mtup.probe_size && |
3185 | !after(tp->mtu_probe.probe_seq_end, tp->snd_una))) { | 3190 | !after(tp->mtu_probe.probe_seq_end, tp->snd_una))) { |
3186 | tcp_mtup_probe_success(sk); | 3191 | tcp_mtup_probe_success(sk); |
@@ -3208,7 +3213,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets, | |||
3208 | * after when the head was last (re)transmitted. Otherwise the | 3213 | * after when the head was last (re)transmitted. Otherwise the |
3209 | * timeout may continue to extend in loss recovery. | 3214 | * timeout may continue to extend in loss recovery. |
3210 | */ | 3215 | */ |
3211 | tcp_rearm_rto(sk); | 3216 | flag |= FLAG_SET_XMIT_TIMER; /* set TLP or RTO timer */ |
3212 | } | 3217 | } |
3213 | 3218 | ||
3214 | if (icsk->icsk_ca_ops->pkts_acked) { | 3219 | if (icsk->icsk_ca_ops->pkts_acked) { |
@@ -3575,9 +3580,6 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) | |||
3575 | if (after(ack, tp->snd_nxt)) | 3580 | if (after(ack, tp->snd_nxt)) |
3576 | goto invalid_ack; | 3581 | goto invalid_ack; |
3577 | 3582 | ||
3578 | if (icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) | ||
3579 | tcp_rearm_rto(sk); | ||
3580 | |||
3581 | if (after(ack, prior_snd_una)) { | 3583 | if (after(ack, prior_snd_una)) { |
3582 | flag |= FLAG_SND_UNA_ADVANCED; | 3584 | flag |= FLAG_SND_UNA_ADVANCED; |
3583 | icsk->icsk_retransmits = 0; | 3585 | icsk->icsk_retransmits = 0; |
@@ -3626,18 +3628,20 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) | |||
3626 | flag |= tcp_clean_rtx_queue(sk, prior_fackets, prior_snd_una, &acked, | 3628 | flag |= tcp_clean_rtx_queue(sk, prior_fackets, prior_snd_una, &acked, |
3627 | &sack_state); | 3629 | &sack_state); |
3628 | 3630 | ||
3631 | if (tp->tlp_high_seq) | ||
3632 | tcp_process_tlp_ack(sk, ack, flag); | ||
3633 | /* If needed, reset TLP/RTO timer; RACK may later override this. */ | ||
3634 | if (flag & FLAG_SET_XMIT_TIMER) | ||
3635 | tcp_set_xmit_timer(sk); | ||
3636 | |||
3629 | if (tcp_ack_is_dubious(sk, flag)) { | 3637 | if (tcp_ack_is_dubious(sk, flag)) { |
3630 | is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP)); | 3638 | is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP)); |
3631 | tcp_fastretrans_alert(sk, acked, is_dupack, &flag, &rexmit); | 3639 | tcp_fastretrans_alert(sk, acked, is_dupack, &flag, &rexmit); |
3632 | } | 3640 | } |
3633 | if (tp->tlp_high_seq) | ||
3634 | tcp_process_tlp_ack(sk, ack, flag); | ||
3635 | 3641 | ||
3636 | if ((flag & FLAG_FORWARD_PROGRESS) || !(flag & FLAG_NOT_DUP)) | 3642 | if ((flag & FLAG_FORWARD_PROGRESS) || !(flag & FLAG_NOT_DUP)) |
3637 | sk_dst_confirm(sk); | 3643 | sk_dst_confirm(sk); |
3638 | 3644 | ||
3639 | if (icsk->icsk_pending == ICSK_TIME_RETRANS) | ||
3640 | tcp_schedule_loss_probe(sk); | ||
3641 | delivered = tp->delivered - delivered; /* freshly ACKed or SACKed */ | 3645 | delivered = tp->delivered - delivered; /* freshly ACKed or SACKed */ |
3642 | lost = tp->lost - lost; /* freshly marked lost */ | 3646 | lost = tp->lost - lost; /* freshly marked lost */ |
3643 | tcp_rate_gen(sk, delivered, lost, sack_state.rate); | 3647 | tcp_rate_gen(sk, delivered, lost, sack_state.rate); |
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index d49bff51bdb7..3e0d19631534 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c | |||
@@ -2375,23 +2375,14 @@ bool tcp_schedule_loss_probe(struct sock *sk) | |||
2375 | { | 2375 | { |
2376 | struct inet_connection_sock *icsk = inet_csk(sk); | 2376 | struct inet_connection_sock *icsk = inet_csk(sk); |
2377 | struct tcp_sock *tp = tcp_sk(sk); | 2377 | struct tcp_sock *tp = tcp_sk(sk); |
2378 | u32 timeout, tlp_time_stamp, rto_time_stamp; | 2378 | u32 timeout, rto_delta_us; |
2379 | 2379 | ||
2380 | /* No consecutive loss probes. */ | ||
2381 | if (WARN_ON(icsk->icsk_pending == ICSK_TIME_LOSS_PROBE)) { | ||
2382 | tcp_rearm_rto(sk); | ||
2383 | return false; | ||
2384 | } | ||
2385 | /* Don't do any loss probe on a Fast Open connection before 3WHS | 2380 | /* Don't do any loss probe on a Fast Open connection before 3WHS |
2386 | * finishes. | 2381 | * finishes. |
2387 | */ | 2382 | */ |
2388 | if (tp->fastopen_rsk) | 2383 | if (tp->fastopen_rsk) |
2389 | return false; | 2384 | return false; |
2390 | 2385 | ||
2391 | /* TLP is only scheduled when next timer event is RTO. */ | ||
2392 | if (icsk->icsk_pending != ICSK_TIME_RETRANS) | ||
2393 | return false; | ||
2394 | |||
2395 | /* Schedule a loss probe in 2*RTT for SACK capable connections | 2386 | /* Schedule a loss probe in 2*RTT for SACK capable connections |
2396 | * in Open state, that are either limited by cwnd or application. | 2387 | * in Open state, that are either limited by cwnd or application. |
2397 | */ | 2388 | */ |
@@ -2418,14 +2409,10 @@ bool tcp_schedule_loss_probe(struct sock *sk) | |||
2418 | timeout = TCP_TIMEOUT_INIT; | 2409 | timeout = TCP_TIMEOUT_INIT; |
2419 | } | 2410 | } |
2420 | 2411 | ||
2421 | /* If RTO is shorter, just schedule TLP in its place. */ | 2412 | /* If the RTO formula yields an earlier time, then use that time. */ |
2422 | tlp_time_stamp = tcp_jiffies32 + timeout; | 2413 | rto_delta_us = tcp_rto_delta_us(sk); /* How far in future is RTO? */ |
2423 | rto_time_stamp = (u32)inet_csk(sk)->icsk_timeout; | 2414 | if (rto_delta_us > 0) |
2424 | if ((s32)(tlp_time_stamp - rto_time_stamp) > 0) { | 2415 | timeout = min_t(u32, timeout, usecs_to_jiffies(rto_delta_us)); |
2425 | s32 delta = rto_time_stamp - tcp_jiffies32; | ||
2426 | if (delta > 0) | ||
2427 | timeout = delta; | ||
2428 | } | ||
2429 | 2416 | ||
2430 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_LOSS_PROBE, timeout, | 2417 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_LOSS_PROBE, timeout, |
2431 | TCP_RTO_MAX); | 2418 | TCP_RTO_MAX); |
@@ -3450,6 +3437,10 @@ int tcp_connect(struct sock *sk) | |||
3450 | int err; | 3437 | int err; |
3451 | 3438 | ||
3452 | tcp_call_bpf(sk, BPF_SOCK_OPS_TCP_CONNECT_CB); | 3439 | tcp_call_bpf(sk, BPF_SOCK_OPS_TCP_CONNECT_CB); |
3440 | |||
3441 | if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk)) | ||
3442 | return -EHOSTUNREACH; /* Routing failure or similar. */ | ||
3443 | |||
3453 | tcp_connect_init(sk); | 3444 | tcp_connect_init(sk); |
3454 | 3445 | ||
3455 | if (unlikely(tp->repair)) { | 3446 | if (unlikely(tp->repair)) { |
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c index f753f9d2fee3..655dd8d7f064 100644 --- a/net/ipv4/tcp_timer.c +++ b/net/ipv4/tcp_timer.c | |||
@@ -640,7 +640,8 @@ static void tcp_keepalive_timer (unsigned long data) | |||
640 | goto death; | 640 | goto death; |
641 | } | 641 | } |
642 | 642 | ||
643 | if (!sock_flag(sk, SOCK_KEEPOPEN) || sk->sk_state == TCP_CLOSE) | 643 | if (!sock_flag(sk, SOCK_KEEPOPEN) || |
644 | ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_SYN_SENT))) | ||
644 | goto out; | 645 | goto out; |
645 | 646 | ||
646 | elapsed = keepalive_time_when(tp); | 647 | elapsed = keepalive_time_when(tp); |
diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 7ecbe5eb19f8..c73e61750642 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c | |||
@@ -2356,6 +2356,7 @@ static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_bu | |||
2356 | if (on_link) | 2356 | if (on_link) |
2357 | nrt->rt6i_flags &= ~RTF_GATEWAY; | 2357 | nrt->rt6i_flags &= ~RTF_GATEWAY; |
2358 | 2358 | ||
2359 | nrt->rt6i_protocol = RTPROT_REDIRECT; | ||
2359 | nrt->rt6i_gateway = *(struct in6_addr *)neigh->primary_key; | 2360 | nrt->rt6i_gateway = *(struct in6_addr *)neigh->primary_key; |
2360 | 2361 | ||
2361 | if (ip6_ins_rt(nrt)) | 2362 | if (ip6_ins_rt(nrt)) |
@@ -2466,6 +2467,7 @@ static struct rt6_info *rt6_add_route_info(struct net *net, | |||
2466 | .fc_dst_len = prefixlen, | 2467 | .fc_dst_len = prefixlen, |
2467 | .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_ROUTEINFO | | 2468 | .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_ROUTEINFO | |
2468 | RTF_UP | RTF_PREF(pref), | 2469 | RTF_UP | RTF_PREF(pref), |
2470 | .fc_protocol = RTPROT_RA, | ||
2469 | .fc_nlinfo.portid = 0, | 2471 | .fc_nlinfo.portid = 0, |
2470 | .fc_nlinfo.nlh = NULL, | 2472 | .fc_nlinfo.nlh = NULL, |
2471 | .fc_nlinfo.nl_net = net, | 2473 | .fc_nlinfo.nl_net = net, |
@@ -2518,6 +2520,7 @@ struct rt6_info *rt6_add_dflt_router(const struct in6_addr *gwaddr, | |||
2518 | .fc_ifindex = dev->ifindex, | 2520 | .fc_ifindex = dev->ifindex, |
2519 | .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_DEFAULT | | 2521 | .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_DEFAULT | |
2520 | RTF_UP | RTF_EXPIRES | RTF_PREF(pref), | 2522 | RTF_UP | RTF_EXPIRES | RTF_PREF(pref), |
2523 | .fc_protocol = RTPROT_RA, | ||
2521 | .fc_nlinfo.portid = 0, | 2524 | .fc_nlinfo.portid = 0, |
2522 | .fc_nlinfo.nlh = NULL, | 2525 | .fc_nlinfo.nlh = NULL, |
2523 | .fc_nlinfo.nl_net = dev_net(dev), | 2526 | .fc_nlinfo.nl_net = dev_net(dev), |
@@ -3432,14 +3435,6 @@ static int rt6_fill_node(struct net *net, | |||
3432 | rtm->rtm_flags = 0; | 3435 | rtm->rtm_flags = 0; |
3433 | rtm->rtm_scope = RT_SCOPE_UNIVERSE; | 3436 | rtm->rtm_scope = RT_SCOPE_UNIVERSE; |
3434 | rtm->rtm_protocol = rt->rt6i_protocol; | 3437 | rtm->rtm_protocol = rt->rt6i_protocol; |
3435 | if (rt->rt6i_flags & RTF_DYNAMIC) | ||
3436 | rtm->rtm_protocol = RTPROT_REDIRECT; | ||
3437 | else if (rt->rt6i_flags & RTF_ADDRCONF) { | ||
3438 | if (rt->rt6i_flags & (RTF_DEFAULT | RTF_ROUTEINFO)) | ||
3439 | rtm->rtm_protocol = RTPROT_RA; | ||
3440 | else | ||
3441 | rtm->rtm_protocol = RTPROT_KERNEL; | ||
3442 | } | ||
3443 | 3438 | ||
3444 | if (rt->rt6i_flags & RTF_CACHE) | 3439 | if (rt->rt6i_flags & RTF_CACHE) |
3445 | rtm->rtm_flags |= RTM_F_CLONED; | 3440 | rtm->rtm_flags |= RTM_F_CLONED; |
diff --git a/net/rds/ib_recv.c b/net/rds/ib_recv.c index e10624aa6959..9722bf839d9d 100644 --- a/net/rds/ib_recv.c +++ b/net/rds/ib_recv.c | |||
@@ -1015,8 +1015,10 @@ void rds_ib_recv_cqe_handler(struct rds_ib_connection *ic, | |||
1015 | if (rds_ib_ring_empty(&ic->i_recv_ring)) | 1015 | if (rds_ib_ring_empty(&ic->i_recv_ring)) |
1016 | rds_ib_stats_inc(s_ib_rx_ring_empty); | 1016 | rds_ib_stats_inc(s_ib_rx_ring_empty); |
1017 | 1017 | ||
1018 | if (rds_ib_ring_low(&ic->i_recv_ring)) | 1018 | if (rds_ib_ring_low(&ic->i_recv_ring)) { |
1019 | rds_ib_recv_refill(conn, 0, GFP_NOWAIT); | 1019 | rds_ib_recv_refill(conn, 0, GFP_NOWAIT); |
1020 | rds_ib_stats_inc(s_ib_rx_refill_from_cq); | ||
1021 | } | ||
1020 | } | 1022 | } |
1021 | 1023 | ||
1022 | int rds_ib_recv_path(struct rds_conn_path *cp) | 1024 | int rds_ib_recv_path(struct rds_conn_path *cp) |
@@ -1029,6 +1031,7 @@ int rds_ib_recv_path(struct rds_conn_path *cp) | |||
1029 | if (rds_conn_up(conn)) { | 1031 | if (rds_conn_up(conn)) { |
1030 | rds_ib_attempt_ack(ic); | 1032 | rds_ib_attempt_ack(ic); |
1031 | rds_ib_recv_refill(conn, 0, GFP_KERNEL); | 1033 | rds_ib_recv_refill(conn, 0, GFP_KERNEL); |
1034 | rds_ib_stats_inc(s_ib_rx_refill_from_thread); | ||
1032 | } | 1035 | } |
1033 | 1036 | ||
1034 | return ret; | 1037 | return ret; |
diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c index 36f0ced9e60c..94ba5cfab860 100644 --- a/net/sched/act_ipt.c +++ b/net/sched/act_ipt.c | |||
@@ -36,8 +36,8 @@ static struct tc_action_ops act_ipt_ops; | |||
36 | static unsigned int xt_net_id; | 36 | static unsigned int xt_net_id; |
37 | static struct tc_action_ops act_xt_ops; | 37 | static struct tc_action_ops act_xt_ops; |
38 | 38 | ||
39 | static int ipt_init_target(struct xt_entry_target *t, char *table, | 39 | static int ipt_init_target(struct net *net, struct xt_entry_target *t, |
40 | unsigned int hook) | 40 | char *table, unsigned int hook) |
41 | { | 41 | { |
42 | struct xt_tgchk_param par; | 42 | struct xt_tgchk_param par; |
43 | struct xt_target *target; | 43 | struct xt_target *target; |
@@ -49,6 +49,7 @@ static int ipt_init_target(struct xt_entry_target *t, char *table, | |||
49 | return PTR_ERR(target); | 49 | return PTR_ERR(target); |
50 | 50 | ||
51 | t->u.kernel.target = target; | 51 | t->u.kernel.target = target; |
52 | par.net = net; | ||
52 | par.table = table; | 53 | par.table = table; |
53 | par.entryinfo = NULL; | 54 | par.entryinfo = NULL; |
54 | par.target = target; | 55 | par.target = target; |
@@ -91,10 +92,11 @@ static const struct nla_policy ipt_policy[TCA_IPT_MAX + 1] = { | |||
91 | [TCA_IPT_TARG] = { .len = sizeof(struct xt_entry_target) }, | 92 | [TCA_IPT_TARG] = { .len = sizeof(struct xt_entry_target) }, |
92 | }; | 93 | }; |
93 | 94 | ||
94 | static int __tcf_ipt_init(struct tc_action_net *tn, struct nlattr *nla, | 95 | static int __tcf_ipt_init(struct net *net, unsigned int id, struct nlattr *nla, |
95 | struct nlattr *est, struct tc_action **a, | 96 | struct nlattr *est, struct tc_action **a, |
96 | const struct tc_action_ops *ops, int ovr, int bind) | 97 | const struct tc_action_ops *ops, int ovr, int bind) |
97 | { | 98 | { |
99 | struct tc_action_net *tn = net_generic(net, id); | ||
98 | struct nlattr *tb[TCA_IPT_MAX + 1]; | 100 | struct nlattr *tb[TCA_IPT_MAX + 1]; |
99 | struct tcf_ipt *ipt; | 101 | struct tcf_ipt *ipt; |
100 | struct xt_entry_target *td, *t; | 102 | struct xt_entry_target *td, *t; |
@@ -159,7 +161,7 @@ static int __tcf_ipt_init(struct tc_action_net *tn, struct nlattr *nla, | |||
159 | if (unlikely(!t)) | 161 | if (unlikely(!t)) |
160 | goto err2; | 162 | goto err2; |
161 | 163 | ||
162 | err = ipt_init_target(t, tname, hook); | 164 | err = ipt_init_target(net, t, tname, hook); |
163 | if (err < 0) | 165 | if (err < 0) |
164 | goto err3; | 166 | goto err3; |
165 | 167 | ||
@@ -193,18 +195,16 @@ static int tcf_ipt_init(struct net *net, struct nlattr *nla, | |||
193 | struct nlattr *est, struct tc_action **a, int ovr, | 195 | struct nlattr *est, struct tc_action **a, int ovr, |
194 | int bind) | 196 | int bind) |
195 | { | 197 | { |
196 | struct tc_action_net *tn = net_generic(net, ipt_net_id); | 198 | return __tcf_ipt_init(net, ipt_net_id, nla, est, a, &act_ipt_ops, ovr, |
197 | 199 | bind); | |
198 | return __tcf_ipt_init(tn, nla, est, a, &act_ipt_ops, ovr, bind); | ||
199 | } | 200 | } |
200 | 201 | ||
201 | static int tcf_xt_init(struct net *net, struct nlattr *nla, | 202 | static int tcf_xt_init(struct net *net, struct nlattr *nla, |
202 | struct nlattr *est, struct tc_action **a, int ovr, | 203 | struct nlattr *est, struct tc_action **a, int ovr, |
203 | int bind) | 204 | int bind) |
204 | { | 205 | { |
205 | struct tc_action_net *tn = net_generic(net, xt_net_id); | 206 | return __tcf_ipt_init(net, xt_net_id, nla, est, a, &act_xt_ops, ovr, |
206 | 207 | bind); | |
207 | return __tcf_ipt_init(tn, nla, est, a, &act_xt_ops, ovr, bind); | ||
208 | } | 208 | } |
209 | 209 | ||
210 | static int tcf_ipt(struct sk_buff *skb, const struct tc_action *a, | 210 | static int tcf_ipt(struct sk_buff *skb, const struct tc_action *a, |
diff --git a/scripts/get_maintainer.pl b/scripts/get_maintainer.pl index 3bd5f4f30235..bc443201d3ef 100755 --- a/scripts/get_maintainer.pl +++ b/scripts/get_maintainer.pl | |||
@@ -18,6 +18,7 @@ my $V = '0.26'; | |||
18 | 18 | ||
19 | use Getopt::Long qw(:config no_auto_abbrev); | 19 | use Getopt::Long qw(:config no_auto_abbrev); |
20 | use Cwd; | 20 | use Cwd; |
21 | use File::Find; | ||
21 | 22 | ||
22 | my $cur_path = fastgetcwd() . '/'; | 23 | my $cur_path = fastgetcwd() . '/'; |
23 | my $lk_path = "./"; | 24 | my $lk_path = "./"; |
@@ -58,6 +59,7 @@ my $from_filename = 0; | |||
58 | my $pattern_depth = 0; | 59 | my $pattern_depth = 0; |
59 | my $version = 0; | 60 | my $version = 0; |
60 | my $help = 0; | 61 | my $help = 0; |
62 | my $find_maintainer_files = 0; | ||
61 | 63 | ||
62 | my $vcs_used = 0; | 64 | my $vcs_used = 0; |
63 | 65 | ||
@@ -249,6 +251,7 @@ if (!GetOptions( | |||
249 | 'sections!' => \$sections, | 251 | 'sections!' => \$sections, |
250 | 'fe|file-emails!' => \$file_emails, | 252 | 'fe|file-emails!' => \$file_emails, |
251 | 'f|file' => \$from_filename, | 253 | 'f|file' => \$from_filename, |
254 | 'find-maintainer-files' => \$find_maintainer_files, | ||
252 | 'v|version' => \$version, | 255 | 'v|version' => \$version, |
253 | 'h|help|usage' => \$help, | 256 | 'h|help|usage' => \$help, |
254 | )) { | 257 | )) { |
@@ -307,36 +310,74 @@ if (!top_of_kernel_tree($lk_path)) { | |||
307 | 310 | ||
308 | my @typevalue = (); | 311 | my @typevalue = (); |
309 | my %keyword_hash; | 312 | my %keyword_hash; |
313 | my @mfiles = (); | ||
310 | 314 | ||
311 | open (my $maint, '<', "${lk_path}MAINTAINERS") | 315 | sub read_maintainer_file { |
312 | or die "$P: Can't open MAINTAINERS: $!\n"; | 316 | my ($file) = @_; |
313 | while (<$maint>) { | 317 | |
314 | my $line = $_; | 318 | open (my $maint, '<', "$file") |
315 | 319 | or die "$P: Can't open MAINTAINERS file '$file': $!\n"; | |
316 | if ($line =~ m/^([A-Z]):\s*(.*)/) { | 320 | while (<$maint>) { |
317 | my $type = $1; | 321 | my $line = $_; |
318 | my $value = $2; | 322 | |
319 | 323 | if ($line =~ m/^([A-Z]):\s*(.*)/) { | |
320 | ##Filename pattern matching | 324 | my $type = $1; |
321 | if ($type eq "F" || $type eq "X") { | 325 | my $value = $2; |
322 | $value =~ s@\.@\\\.@g; ##Convert . to \. | 326 | |
323 | $value =~ s/\*/\.\*/g; ##Convert * to .* | 327 | ##Filename pattern matching |
324 | $value =~ s/\?/\./g; ##Convert ? to . | 328 | if ($type eq "F" || $type eq "X") { |
325 | ##if pattern is a directory and it lacks a trailing slash, add one | 329 | $value =~ s@\.@\\\.@g; ##Convert . to \. |
326 | if ((-d $value)) { | 330 | $value =~ s/\*/\.\*/g; ##Convert * to .* |
327 | $value =~ s@([^/])$@$1/@; | 331 | $value =~ s/\?/\./g; ##Convert ? to . |
332 | ##if pattern is a directory and it lacks a trailing slash, add one | ||
333 | if ((-d $value)) { | ||
334 | $value =~ s@([^/])$@$1/@; | ||
335 | } | ||
336 | } elsif ($type eq "K") { | ||
337 | $keyword_hash{@typevalue} = $value; | ||
328 | } | 338 | } |
329 | } elsif ($type eq "K") { | 339 | push(@typevalue, "$type:$value"); |
330 | $keyword_hash{@typevalue} = $value; | 340 | } elsif (!(/^\s*$/ || /^\s*\#/)) { |
341 | $line =~ s/\n$//g; | ||
342 | push(@typevalue, $line); | ||
331 | } | 343 | } |
332 | push(@typevalue, "$type:$value"); | ||
333 | } elsif (!/^(\s)*$/) { | ||
334 | $line =~ s/\n$//g; | ||
335 | push(@typevalue, $line); | ||
336 | } | 344 | } |
345 | close($maint); | ||
346 | } | ||
347 | |||
348 | sub find_is_maintainer_file { | ||
349 | my ($file) = $_; | ||
350 | return if ($file !~ m@/MAINTAINERS$@); | ||
351 | $file = $File::Find::name; | ||
352 | return if (! -f $file); | ||
353 | push(@mfiles, $file); | ||
337 | } | 354 | } |
338 | close($maint); | ||
339 | 355 | ||
356 | sub find_ignore_git { | ||
357 | return grep { $_ !~ /^\.git$/; } @_; | ||
358 | } | ||
359 | |||
360 | if (-d "${lk_path}MAINTAINERS") { | ||
361 | opendir(DIR, "${lk_path}MAINTAINERS") or die $!; | ||
362 | my @files = readdir(DIR); | ||
363 | closedir(DIR); | ||
364 | foreach my $file (@files) { | ||
365 | push(@mfiles, "${lk_path}MAINTAINERS/$file") if ($file !~ /^\./); | ||
366 | } | ||
367 | } | ||
368 | |||
369 | if ($find_maintainer_files) { | ||
370 | find( { wanted => \&find_is_maintainer_file, | ||
371 | preprocess => \&find_ignore_git, | ||
372 | no_chdir => 1, | ||
373 | }, "${lk_path}"); | ||
374 | } else { | ||
375 | push(@mfiles, "${lk_path}MAINTAINERS") if -f "${lk_path}MAINTAINERS"; | ||
376 | } | ||
377 | |||
378 | foreach my $file (@mfiles) { | ||
379 | read_maintainer_file("$file"); | ||
380 | } | ||
340 | 381 | ||
341 | # | 382 | # |
342 | # Read mail address map | 383 | # Read mail address map |
@@ -873,7 +914,7 @@ sub top_of_kernel_tree { | |||
873 | if ( (-f "${lk_path}COPYING") | 914 | if ( (-f "${lk_path}COPYING") |
874 | && (-f "${lk_path}CREDITS") | 915 | && (-f "${lk_path}CREDITS") |
875 | && (-f "${lk_path}Kbuild") | 916 | && (-f "${lk_path}Kbuild") |
876 | && (-f "${lk_path}MAINTAINERS") | 917 | && (-e "${lk_path}MAINTAINERS") |
877 | && (-f "${lk_path}Makefile") | 918 | && (-f "${lk_path}Makefile") |
878 | && (-f "${lk_path}README") | 919 | && (-f "${lk_path}README") |
879 | && (-d "${lk_path}Documentation") | 920 | && (-d "${lk_path}Documentation") |
diff --git a/scripts/parse-maintainers.pl b/scripts/parse-maintainers.pl index a0fe34349b24..e40b53db7f9f 100644 --- a/scripts/parse-maintainers.pl +++ b/scripts/parse-maintainers.pl | |||
@@ -2,9 +2,9 @@ | |||
2 | 2 | ||
3 | use strict; | 3 | use strict; |
4 | 4 | ||
5 | my %map; | 5 | my $P = $0; |
6 | 6 | ||
7 | # sort comparison function | 7 | # sort comparison functions |
8 | sub by_category($$) { | 8 | sub by_category($$) { |
9 | my ($a, $b) = @_; | 9 | my ($a, $b) = @_; |
10 | 10 | ||
@@ -15,20 +15,33 @@ sub by_category($$) { | |||
15 | $a =~ s/THE REST/ZZZZZZ/g; | 15 | $a =~ s/THE REST/ZZZZZZ/g; |
16 | $b =~ s/THE REST/ZZZZZZ/g; | 16 | $b =~ s/THE REST/ZZZZZZ/g; |
17 | 17 | ||
18 | $a cmp $b; | 18 | return $a cmp $b; |
19 | } | 19 | } |
20 | 20 | ||
21 | sub alpha_output { | 21 | sub by_pattern($$) { |
22 | my $key; | 22 | my ($a, $b) = @_; |
23 | my $sort_method = \&by_category; | 23 | my $preferred_order = 'MRPLSWTQBCFXNK'; |
24 | my $sep = ""; | 24 | |
25 | 25 | my $a1 = uc(substr($a, 0, 1)); | |
26 | foreach $key (sort $sort_method keys %map) { | 26 | my $b1 = uc(substr($b, 0, 1)); |
27 | if ($key ne " ") { | 27 | |
28 | print $sep . $key . "\n"; | 28 | my $a_index = index($preferred_order, $a1); |
29 | $sep = "\n"; | 29 | my $b_index = index($preferred_order, $b1); |
30 | } | 30 | |
31 | print $map{$key}; | 31 | $a_index = 1000 if ($a_index == -1); |
32 | $b_index = 1000 if ($b_index == -1); | ||
33 | |||
34 | if (($a1 =~ /^F$/ && $b1 =~ /^F$/) || | ||
35 | ($a1 =~ /^X$/ && $b1 =~ /^X$/)) { | ||
36 | return $a cmp $b; | ||
37 | } | ||
38 | |||
39 | if ($a_index < $b_index) { | ||
40 | return -1; | ||
41 | } elsif ($a_index == $b_index) { | ||
42 | return 0; | ||
43 | } else { | ||
44 | return 1; | ||
32 | } | 45 | } |
33 | } | 46 | } |
34 | 47 | ||
@@ -39,39 +52,77 @@ sub trim { | |||
39 | return $s; | 52 | return $s; |
40 | } | 53 | } |
41 | 54 | ||
55 | sub alpha_output { | ||
56 | my ($hashref, $filename) = (@_); | ||
57 | |||
58 | open(my $file, '>', "$filename") or die "$P: $filename: open failed - $!\n"; | ||
59 | foreach my $key (sort by_category keys %$hashref) { | ||
60 | if ($key eq " ") { | ||
61 | chomp $$hashref{$key}; | ||
62 | print $file $$hashref{$key}; | ||
63 | } else { | ||
64 | print $file "\n" . $key . "\n"; | ||
65 | foreach my $pattern (sort by_pattern split('\n', %$hashref{$key})) { | ||
66 | print $file ($pattern . "\n"); | ||
67 | } | ||
68 | } | ||
69 | } | ||
70 | close($file); | ||
71 | } | ||
72 | |||
42 | sub file_input { | 73 | sub file_input { |
74 | my ($hashref, $filename) = (@_); | ||
75 | |||
43 | my $lastline = ""; | 76 | my $lastline = ""; |
44 | my $case = " "; | 77 | my $case = " "; |
45 | $map{$case} = ""; | 78 | $$hashref{$case} = ""; |
79 | |||
80 | open(my $file, '<', "$filename") or die "$P: $filename: open failed - $!\n"; | ||
46 | 81 | ||
47 | while (<>) { | 82 | while (<$file>) { |
48 | my $line = $_; | 83 | my $line = $_; |
49 | 84 | ||
50 | # Pattern line? | 85 | # Pattern line? |
51 | if ($line =~ m/^([A-Z]):\s*(.*)/) { | 86 | if ($line =~ m/^([A-Z]):\s*(.*)/) { |
52 | $line = $1 . ":\t" . trim($2) . "\n"; | 87 | $line = $1 . ":\t" . trim($2) . "\n"; |
53 | if ($lastline eq "") { | 88 | if ($lastline eq "") { |
54 | $map{$case} = $map{$case} . $line; | 89 | $$hashref{$case} = $$hashref{$case} . $line; |
55 | next; | 90 | next; |
56 | } | 91 | } |
57 | $case = trim($lastline); | 92 | $case = trim($lastline); |
58 | exists $map{$case} and die "Header '$case' already exists"; | 93 | exists $$hashref{$case} and die "Header '$case' already exists"; |
59 | $map{$case} = $line; | 94 | $$hashref{$case} = $line; |
60 | $lastline = ""; | 95 | $lastline = ""; |
61 | next; | 96 | next; |
62 | } | 97 | } |
63 | 98 | ||
64 | if ($case eq " ") { | 99 | if ($case eq " ") { |
65 | $map{$case} = $map{$case} . $lastline; | 100 | $$hashref{$case} = $$hashref{$case} . $lastline; |
66 | $lastline = $line; | 101 | $lastline = $line; |
67 | next; | 102 | next; |
68 | } | 103 | } |
69 | trim($lastline) eq "" or die ("Odd non-pattern line '$lastline' for '$case'"); | 104 | trim($lastline) eq "" or die ("Odd non-pattern line '$lastline' for '$case'"); |
70 | $lastline = $line; | 105 | $lastline = $line; |
71 | } | 106 | } |
72 | $map{$case} = $map{$case} . $lastline; | 107 | $$hashref{$case} = $$hashref{$case} . $lastline; |
108 | close($file); | ||
73 | } | 109 | } |
74 | 110 | ||
75 | &file_input; | 111 | my %hash; |
76 | &alpha_output; | 112 | my %new_hash; |
113 | |||
114 | file_input(\%hash, "MAINTAINERS"); | ||
115 | |||
116 | foreach my $type (@ARGV) { | ||
117 | foreach my $key (keys %hash) { | ||
118 | if ($key =~ /$type/ || $hash{$key} =~ /$type/) { | ||
119 | $new_hash{$key} = $hash{$key}; | ||
120 | delete $hash{$key}; | ||
121 | } | ||
122 | } | ||
123 | } | ||
124 | |||
125 | alpha_output(\%hash, "MAINTAINERS.new"); | ||
126 | alpha_output(\%new_hash, "SECTION.new"); | ||
127 | |||
77 | exit(0); | 128 | exit(0); |
diff --git a/tools/build/feature/test-bpf.c b/tools/build/feature/test-bpf.c index 7598361ef1f1..da2172ff9662 100644 --- a/tools/build/feature/test-bpf.c +++ b/tools/build/feature/test-bpf.c | |||
@@ -11,6 +11,8 @@ | |||
11 | # define __NR_bpf 280 | 11 | # define __NR_bpf 280 |
12 | # elif defined(__sparc__) | 12 | # elif defined(__sparc__) |
13 | # define __NR_bpf 349 | 13 | # define __NR_bpf 349 |
14 | # elif defined(__s390__) | ||
15 | # define __NR_bpf 351 | ||
14 | # else | 16 | # else |
15 | # error __NR_bpf not defined. libbpf does not support your arch. | 17 | # error __NR_bpf not defined. libbpf does not support your arch. |
16 | # endif | 18 | # endif |
diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c index 256f571f2ab5..e5bbb090bf88 100644 --- a/tools/lib/bpf/bpf.c +++ b/tools/lib/bpf/bpf.c | |||
@@ -39,6 +39,8 @@ | |||
39 | # define __NR_bpf 280 | 39 | # define __NR_bpf 280 |
40 | # elif defined(__sparc__) | 40 | # elif defined(__sparc__) |
41 | # define __NR_bpf 349 | 41 | # define __NR_bpf 349 |
42 | # elif defined(__s390__) | ||
43 | # define __NR_bpf 351 | ||
42 | # else | 44 | # else |
43 | # error __NR_bpf not defined. libbpf does not support your arch. | 45 | # error __NR_bpf not defined. libbpf does not support your arch. |
44 | # endif | 46 | # endif |
diff --git a/tools/testing/selftests/bpf/test_pkt_md_access.c b/tools/testing/selftests/bpf/test_pkt_md_access.c index 71729d47eb85..7956302ecdf2 100644 --- a/tools/testing/selftests/bpf/test_pkt_md_access.c +++ b/tools/testing/selftests/bpf/test_pkt_md_access.c | |||
@@ -12,12 +12,23 @@ | |||
12 | 12 | ||
13 | int _version SEC("version") = 1; | 13 | int _version SEC("version") = 1; |
14 | 14 | ||
15 | #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ | ||
15 | #define TEST_FIELD(TYPE, FIELD, MASK) \ | 16 | #define TEST_FIELD(TYPE, FIELD, MASK) \ |
16 | { \ | 17 | { \ |
17 | TYPE tmp = *(volatile TYPE *)&skb->FIELD; \ | 18 | TYPE tmp = *(volatile TYPE *)&skb->FIELD; \ |
18 | if (tmp != ((*(volatile __u32 *)&skb->FIELD) & MASK)) \ | 19 | if (tmp != ((*(volatile __u32 *)&skb->FIELD) & MASK)) \ |
19 | return TC_ACT_SHOT; \ | 20 | return TC_ACT_SHOT; \ |
20 | } | 21 | } |
22 | #else | ||
23 | #define TEST_FIELD_OFFSET(a, b) ((sizeof(a) - sizeof(b)) / sizeof(b)) | ||
24 | #define TEST_FIELD(TYPE, FIELD, MASK) \ | ||
25 | { \ | ||
26 | TYPE tmp = *((volatile TYPE *)&skb->FIELD + \ | ||
27 | TEST_FIELD_OFFSET(skb->FIELD, TYPE)); \ | ||
28 | if (tmp != ((*(volatile __u32 *)&skb->FIELD) & MASK)) \ | ||
29 | return TC_ACT_SHOT; \ | ||
30 | } | ||
31 | #endif | ||
21 | 32 | ||
22 | SEC("test1") | 33 | SEC("test1") |
23 | int process(struct __sk_buff *skb) | 34 | int process(struct __sk_buff *skb) |
diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c index 65aa562cff87..ab0cd1198326 100644 --- a/tools/testing/selftests/bpf/test_verifier.c +++ b/tools/testing/selftests/bpf/test_verifier.c | |||
@@ -8,6 +8,7 @@ | |||
8 | * License as published by the Free Software Foundation. | 8 | * License as published by the Free Software Foundation. |
9 | */ | 9 | */ |
10 | 10 | ||
11 | #include <endian.h> | ||
11 | #include <asm/types.h> | 12 | #include <asm/types.h> |
12 | #include <linux/types.h> | 13 | #include <linux/types.h> |
13 | #include <stdint.h> | 14 | #include <stdint.h> |
@@ -1101,7 +1102,7 @@ static struct bpf_test tests[] = { | |||
1101 | "check skb->hash byte load permitted", | 1102 | "check skb->hash byte load permitted", |
1102 | .insns = { | 1103 | .insns = { |
1103 | BPF_MOV64_IMM(BPF_REG_0, 0), | 1104 | BPF_MOV64_IMM(BPF_REG_0, 0), |
1104 | #ifdef __LITTLE_ENDIAN | 1105 | #if __BYTE_ORDER == __LITTLE_ENDIAN |
1105 | BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, | 1106 | BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, |
1106 | offsetof(struct __sk_buff, hash)), | 1107 | offsetof(struct __sk_buff, hash)), |
1107 | #else | 1108 | #else |
@@ -1138,7 +1139,7 @@ static struct bpf_test tests[] = { | |||
1138 | "check skb->hash byte load not permitted 3", | 1139 | "check skb->hash byte load not permitted 3", |
1139 | .insns = { | 1140 | .insns = { |
1140 | BPF_MOV64_IMM(BPF_REG_0, 0), | 1141 | BPF_MOV64_IMM(BPF_REG_0, 0), |
1141 | #ifdef __LITTLE_ENDIAN | 1142 | #if __BYTE_ORDER == __LITTLE_ENDIAN |
1142 | BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, | 1143 | BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, |
1143 | offsetof(struct __sk_buff, hash) + 3), | 1144 | offsetof(struct __sk_buff, hash) + 3), |
1144 | #else | 1145 | #else |
@@ -1248,7 +1249,7 @@ static struct bpf_test tests[] = { | |||
1248 | "check skb->hash half load permitted", | 1249 | "check skb->hash half load permitted", |
1249 | .insns = { | 1250 | .insns = { |
1250 | BPF_MOV64_IMM(BPF_REG_0, 0), | 1251 | BPF_MOV64_IMM(BPF_REG_0, 0), |
1251 | #ifdef __LITTLE_ENDIAN | 1252 | #if __BYTE_ORDER == __LITTLE_ENDIAN |
1252 | BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, | 1253 | BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, |
1253 | offsetof(struct __sk_buff, hash)), | 1254 | offsetof(struct __sk_buff, hash)), |
1254 | #else | 1255 | #else |
@@ -1263,7 +1264,7 @@ static struct bpf_test tests[] = { | |||
1263 | "check skb->hash half load not permitted", | 1264 | "check skb->hash half load not permitted", |
1264 | .insns = { | 1265 | .insns = { |
1265 | BPF_MOV64_IMM(BPF_REG_0, 0), | 1266 | BPF_MOV64_IMM(BPF_REG_0, 0), |
1266 | #ifdef __LITTLE_ENDIAN | 1267 | #if __BYTE_ORDER == __LITTLE_ENDIAN |
1267 | BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, | 1268 | BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, |
1268 | offsetof(struct __sk_buff, hash) + 2), | 1269 | offsetof(struct __sk_buff, hash) + 2), |
1269 | #else | 1270 | #else |
@@ -5430,7 +5431,7 @@ static struct bpf_test tests[] = { | |||
5430 | "check bpf_perf_event_data->sample_period byte load permitted", | 5431 | "check bpf_perf_event_data->sample_period byte load permitted", |
5431 | .insns = { | 5432 | .insns = { |
5432 | BPF_MOV64_IMM(BPF_REG_0, 0), | 5433 | BPF_MOV64_IMM(BPF_REG_0, 0), |
5433 | #ifdef __LITTLE_ENDIAN | 5434 | #if __BYTE_ORDER == __LITTLE_ENDIAN |
5434 | BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, | 5435 | BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, |
5435 | offsetof(struct bpf_perf_event_data, sample_period)), | 5436 | offsetof(struct bpf_perf_event_data, sample_period)), |
5436 | #else | 5437 | #else |
@@ -5446,7 +5447,7 @@ static struct bpf_test tests[] = { | |||
5446 | "check bpf_perf_event_data->sample_period half load permitted", | 5447 | "check bpf_perf_event_data->sample_period half load permitted", |
5447 | .insns = { | 5448 | .insns = { |
5448 | BPF_MOV64_IMM(BPF_REG_0, 0), | 5449 | BPF_MOV64_IMM(BPF_REG_0, 0), |
5449 | #ifdef __LITTLE_ENDIAN | 5450 | #if __BYTE_ORDER == __LITTLE_ENDIAN |
5450 | BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, | 5451 | BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, |
5451 | offsetof(struct bpf_perf_event_data, sample_period)), | 5452 | offsetof(struct bpf_perf_event_data, sample_period)), |
5452 | #else | 5453 | #else |
@@ -5462,7 +5463,7 @@ static struct bpf_test tests[] = { | |||
5462 | "check bpf_perf_event_data->sample_period word load permitted", | 5463 | "check bpf_perf_event_data->sample_period word load permitted", |
5463 | .insns = { | 5464 | .insns = { |
5464 | BPF_MOV64_IMM(BPF_REG_0, 0), | 5465 | BPF_MOV64_IMM(BPF_REG_0, 0), |
5465 | #ifdef __LITTLE_ENDIAN | 5466 | #if __BYTE_ORDER == __LITTLE_ENDIAN |
5466 | BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, | 5467 | BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, |
5467 | offsetof(struct bpf_perf_event_data, sample_period)), | 5468 | offsetof(struct bpf_perf_event_data, sample_period)), |
5468 | #else | 5469 | #else |
@@ -5489,7 +5490,7 @@ static struct bpf_test tests[] = { | |||
5489 | "check skb->data half load not permitted", | 5490 | "check skb->data half load not permitted", |
5490 | .insns = { | 5491 | .insns = { |
5491 | BPF_MOV64_IMM(BPF_REG_0, 0), | 5492 | BPF_MOV64_IMM(BPF_REG_0, 0), |
5492 | #ifdef __LITTLE_ENDIAN | 5493 | #if __BYTE_ORDER == __LITTLE_ENDIAN |
5493 | BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, | 5494 | BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, |
5494 | offsetof(struct __sk_buff, data)), | 5495 | offsetof(struct __sk_buff, data)), |
5495 | #else | 5496 | #else |
@@ -5505,7 +5506,7 @@ static struct bpf_test tests[] = { | |||
5505 | "check skb->tc_classid half load not permitted for lwt prog", | 5506 | "check skb->tc_classid half load not permitted for lwt prog", |
5506 | .insns = { | 5507 | .insns = { |
5507 | BPF_MOV64_IMM(BPF_REG_0, 0), | 5508 | BPF_MOV64_IMM(BPF_REG_0, 0), |
5508 | #ifdef __LITTLE_ENDIAN | 5509 | #if __BYTE_ORDER == __LITTLE_ENDIAN |
5509 | BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, | 5510 | BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, |
5510 | offsetof(struct __sk_buff, tc_classid)), | 5511 | offsetof(struct __sk_buff, tc_classid)), |
5511 | #else | 5512 | #else |