aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mips
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips')
-rw-r--r--arch/mips/Kconfig1
-rw-r--r--arch/mips/include/asm/sigcontext.h2
-rw-r--r--arch/mips/include/asm/uasm.h4
-rw-r--r--arch/mips/include/uapi/asm/inst.h1
-rw-r--r--arch/mips/include/uapi/asm/sigcontext.h8
-rw-r--r--arch/mips/kernel/asm-offsets.c3
-rw-r--r--arch/mips/kernel/irq-msc01.c2
-rw-r--r--arch/mips/kernel/pm-cps.c4
-rw-r--r--arch/mips/kernel/r4k_fpu.S213
-rw-r--r--arch/mips/kernel/signal.c79
-rw-r--r--arch/mips/kernel/signal32.c74
-rw-r--r--arch/mips/kernel/smp-cps.c2
-rw-r--r--arch/mips/kvm/kvm_mips.c1
-rw-r--r--arch/mips/math-emu/ieee754.c23
-rw-r--r--arch/mips/mm/uasm-micromips.c1
-rw-r--r--arch/mips/mm/uasm-mips.c3
-rw-r--r--arch/mips/mm/uasm.c10
-rw-r--r--arch/mips/net/bpf_jit.c266
18 files changed, 202 insertions, 495 deletions
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 7a469acee33c..4e238e6e661c 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -269,6 +269,7 @@ config LANTIQ
269config LASAT 269config LASAT
270 bool "LASAT Networks platforms" 270 bool "LASAT Networks platforms"
271 select CEVT_R4K 271 select CEVT_R4K
272 select CRC32
272 select CSRC_R4K 273 select CSRC_R4K
273 select DMA_NONCOHERENT 274 select DMA_NONCOHERENT
274 select SYS_HAS_EARLY_PRINTK 275 select SYS_HAS_EARLY_PRINTK
diff --git a/arch/mips/include/asm/sigcontext.h b/arch/mips/include/asm/sigcontext.h
index f54bdbe85c0d..eeeb0f48c767 100644
--- a/arch/mips/include/asm/sigcontext.h
+++ b/arch/mips/include/asm/sigcontext.h
@@ -32,8 +32,6 @@ struct sigcontext32 {
32 __u32 sc_lo2; 32 __u32 sc_lo2;
33 __u32 sc_hi3; 33 __u32 sc_hi3;
34 __u32 sc_lo3; 34 __u32 sc_lo3;
35 __u64 sc_msaregs[32]; /* Most significant 64 bits */
36 __u32 sc_msa_csr;
37}; 35};
38#endif /* _MIPS_SIM == _MIPS_SIM_ABI64 || _MIPS_SIM == _MIPS_SIM_NABI32 */ 36#endif /* _MIPS_SIM == _MIPS_SIM_ABI64 || _MIPS_SIM == _MIPS_SIM_NABI32 */
39#endif /* _ASM_SIGCONTEXT_H */ 37#endif /* _ASM_SIGCONTEXT_H */
diff --git a/arch/mips/include/asm/uasm.h b/arch/mips/include/asm/uasm.h
index f8d63b3b40b4..708c5d414905 100644
--- a/arch/mips/include/asm/uasm.h
+++ b/arch/mips/include/asm/uasm.h
@@ -67,6 +67,9 @@ void ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, signed int c)
67#define Ip_u2s3u1(op) \ 67#define Ip_u2s3u1(op) \
68void ISAOPC(op)(u32 **buf, unsigned int a, signed int b, unsigned int c) 68void ISAOPC(op)(u32 **buf, unsigned int a, signed int b, unsigned int c)
69 69
70#define Ip_s3s1s2(op) \
71void ISAOPC(op)(u32 **buf, int a, int b, int c)
72
70#define Ip_u2u1s3(op) \ 73#define Ip_u2u1s3(op) \
71void ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, signed int c) 74void ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, signed int c)
72 75
@@ -147,6 +150,7 @@ Ip_u2s3u1(_scd);
147Ip_u2s3u1(_sd); 150Ip_u2s3u1(_sd);
148Ip_u2u1u3(_sll); 151Ip_u2u1u3(_sll);
149Ip_u3u2u1(_sllv); 152Ip_u3u2u1(_sllv);
153Ip_s3s1s2(_slt);
150Ip_u2u1s3(_sltiu); 154Ip_u2u1s3(_sltiu);
151Ip_u3u1u2(_sltu); 155Ip_u3u1u2(_sltu);
152Ip_u2u1u3(_sra); 156Ip_u2u1u3(_sra);
diff --git a/arch/mips/include/uapi/asm/inst.h b/arch/mips/include/uapi/asm/inst.h
index 4b7160259292..4bfdb9d4c186 100644
--- a/arch/mips/include/uapi/asm/inst.h
+++ b/arch/mips/include/uapi/asm/inst.h
@@ -273,6 +273,7 @@ enum mm_32a_minor_op {
273 mm_and_op = 0x250, 273 mm_and_op = 0x250,
274 mm_or32_op = 0x290, 274 mm_or32_op = 0x290,
275 mm_xor32_op = 0x310, 275 mm_xor32_op = 0x310,
276 mm_slt_op = 0x350,
276 mm_sltu_op = 0x390, 277 mm_sltu_op = 0x390,
277}; 278};
278 279
diff --git a/arch/mips/include/uapi/asm/sigcontext.h b/arch/mips/include/uapi/asm/sigcontext.h
index 681c17603a48..6c9906f59c6e 100644
--- a/arch/mips/include/uapi/asm/sigcontext.h
+++ b/arch/mips/include/uapi/asm/sigcontext.h
@@ -12,10 +12,6 @@
12#include <linux/types.h> 12#include <linux/types.h>
13#include <asm/sgidefs.h> 13#include <asm/sgidefs.h>
14 14
15/* Bits which may be set in sc_used_math */
16#define USEDMATH_FP (1 << 0)
17#define USEDMATH_MSA (1 << 1)
18
19#if _MIPS_SIM == _MIPS_SIM_ABI32 15#if _MIPS_SIM == _MIPS_SIM_ABI32
20 16
21/* 17/*
@@ -41,8 +37,6 @@ struct sigcontext {
41 unsigned long sc_lo2; 37 unsigned long sc_lo2;
42 unsigned long sc_hi3; 38 unsigned long sc_hi3;
43 unsigned long sc_lo3; 39 unsigned long sc_lo3;
44 unsigned long long sc_msaregs[32]; /* Most significant 64 bits */
45 unsigned long sc_msa_csr;
46}; 40};
47 41
48#endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */ 42#endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */
@@ -76,8 +70,6 @@ struct sigcontext {
76 __u32 sc_used_math; 70 __u32 sc_used_math;
77 __u32 sc_dsp; 71 __u32 sc_dsp;
78 __u32 sc_reserved; 72 __u32 sc_reserved;
79 __u64 sc_msaregs[32];
80 __u32 sc_msa_csr;
81}; 73};
82 74
83 75
diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c
index 02f075df8f2e..4bb5107511e2 100644
--- a/arch/mips/kernel/asm-offsets.c
+++ b/arch/mips/kernel/asm-offsets.c
@@ -293,7 +293,6 @@ void output_sc_defines(void)
293 OFFSET(SC_LO2, sigcontext, sc_lo2); 293 OFFSET(SC_LO2, sigcontext, sc_lo2);
294 OFFSET(SC_HI3, sigcontext, sc_hi3); 294 OFFSET(SC_HI3, sigcontext, sc_hi3);
295 OFFSET(SC_LO3, sigcontext, sc_lo3); 295 OFFSET(SC_LO3, sigcontext, sc_lo3);
296 OFFSET(SC_MSAREGS, sigcontext, sc_msaregs);
297 BLANK(); 296 BLANK();
298} 297}
299#endif 298#endif
@@ -308,7 +307,6 @@ void output_sc_defines(void)
308 OFFSET(SC_MDLO, sigcontext, sc_mdlo); 307 OFFSET(SC_MDLO, sigcontext, sc_mdlo);
309 OFFSET(SC_PC, sigcontext, sc_pc); 308 OFFSET(SC_PC, sigcontext, sc_pc);
310 OFFSET(SC_FPC_CSR, sigcontext, sc_fpc_csr); 309 OFFSET(SC_FPC_CSR, sigcontext, sc_fpc_csr);
311 OFFSET(SC_MSAREGS, sigcontext, sc_msaregs);
312 BLANK(); 310 BLANK();
313} 311}
314#endif 312#endif
@@ -320,7 +318,6 @@ void output_sc32_defines(void)
320 OFFSET(SC32_FPREGS, sigcontext32, sc_fpregs); 318 OFFSET(SC32_FPREGS, sigcontext32, sc_fpregs);
321 OFFSET(SC32_FPC_CSR, sigcontext32, sc_fpc_csr); 319 OFFSET(SC32_FPC_CSR, sigcontext32, sc_fpc_csr);
322 OFFSET(SC32_FPC_EIR, sigcontext32, sc_fpc_eir); 320 OFFSET(SC32_FPC_EIR, sigcontext32, sc_fpc_eir);
323 OFFSET(SC32_MSAREGS, sigcontext32, sc_msaregs);
324 BLANK(); 321 BLANK();
325} 322}
326#endif 323#endif
diff --git a/arch/mips/kernel/irq-msc01.c b/arch/mips/kernel/irq-msc01.c
index 4858642d543d..a734b2c2f9ea 100644
--- a/arch/mips/kernel/irq-msc01.c
+++ b/arch/mips/kernel/irq-msc01.c
@@ -126,7 +126,7 @@ void __init init_msc_irqs(unsigned long icubase, unsigned int irqbase, msc_irqma
126 126
127 board_bind_eic_interrupt = &msc_bind_eic_interrupt; 127 board_bind_eic_interrupt = &msc_bind_eic_interrupt;
128 128
129 for (; nirq >= 0; nirq--, imp++) { 129 for (; nirq > 0; nirq--, imp++) {
130 int n = imp->im_irq; 130 int n = imp->im_irq;
131 131
132 switch (imp->im_type) { 132 switch (imp->im_type) {
diff --git a/arch/mips/kernel/pm-cps.c b/arch/mips/kernel/pm-cps.c
index 5aa4c6f8cf83..c4c2069d3a20 100644
--- a/arch/mips/kernel/pm-cps.c
+++ b/arch/mips/kernel/pm-cps.c
@@ -101,7 +101,7 @@ static void coupled_barrier(atomic_t *a, unsigned online)
101 if (!coupled_coherence) 101 if (!coupled_coherence)
102 return; 102 return;
103 103
104 smp_mb__before_atomic_inc(); 104 smp_mb__before_atomic();
105 atomic_inc(a); 105 atomic_inc(a);
106 106
107 while (atomic_read(a) < online) 107 while (atomic_read(a) < online)
@@ -158,7 +158,7 @@ int cps_pm_enter_state(enum cps_pm_state state)
158 158
159 /* Indicate that this CPU might not be coherent */ 159 /* Indicate that this CPU might not be coherent */
160 cpumask_clear_cpu(cpu, &cpu_coherent_mask); 160 cpumask_clear_cpu(cpu, &cpu_coherent_mask);
161 smp_mb__after_clear_bit(); 161 smp_mb__after_atomic();
162 162
163 /* Create a non-coherent mapping of the core ready_count */ 163 /* Create a non-coherent mapping of the core ready_count */
164 core_ready_count = per_cpu(ready_count, core); 164 core_ready_count = per_cpu(ready_count, core);
diff --git a/arch/mips/kernel/r4k_fpu.S b/arch/mips/kernel/r4k_fpu.S
index 71814272d148..8352523568e6 100644
--- a/arch/mips/kernel/r4k_fpu.S
+++ b/arch/mips/kernel/r4k_fpu.S
@@ -13,7 +13,6 @@
13 * Copyright (C) 1999, 2001 Silicon Graphics, Inc. 13 * Copyright (C) 1999, 2001 Silicon Graphics, Inc.
14 */ 14 */
15#include <asm/asm.h> 15#include <asm/asm.h>
16#include <asm/asmmacro.h>
17#include <asm/errno.h> 16#include <asm/errno.h>
18#include <asm/fpregdef.h> 17#include <asm/fpregdef.h>
19#include <asm/mipsregs.h> 18#include <asm/mipsregs.h>
@@ -246,218 +245,6 @@ LEAF(_restore_fp_context32)
246 END(_restore_fp_context32) 245 END(_restore_fp_context32)
247#endif 246#endif
248 247
249#ifdef CONFIG_CPU_HAS_MSA
250
251 .macro save_sc_msareg wr, off, sc, tmp
252#ifdef CONFIG_64BIT
253 copy_u_d \tmp, \wr, 1
254 EX sd \tmp, (\off+(\wr*8))(\sc)
255#elif defined(CONFIG_CPU_LITTLE_ENDIAN)
256 copy_u_w \tmp, \wr, 2
257 EX sw \tmp, (\off+(\wr*8)+0)(\sc)
258 copy_u_w \tmp, \wr, 3
259 EX sw \tmp, (\off+(\wr*8)+4)(\sc)
260#else /* CONFIG_CPU_BIG_ENDIAN */
261 copy_u_w \tmp, \wr, 2
262 EX sw \tmp, (\off+(\wr*8)+4)(\sc)
263 copy_u_w \tmp, \wr, 3
264 EX sw \tmp, (\off+(\wr*8)+0)(\sc)
265#endif
266 .endm
267
268/*
269 * int _save_msa_context(struct sigcontext *sc)
270 *
271 * Save the upper 64 bits of each vector register along with the MSA_CSR
272 * register into sc. Returns zero on success, else non-zero.
273 */
274LEAF(_save_msa_context)
275 save_sc_msareg 0, SC_MSAREGS, a0, t0
276 save_sc_msareg 1, SC_MSAREGS, a0, t0
277 save_sc_msareg 2, SC_MSAREGS, a0, t0
278 save_sc_msareg 3, SC_MSAREGS, a0, t0
279 save_sc_msareg 4, SC_MSAREGS, a0, t0
280 save_sc_msareg 5, SC_MSAREGS, a0, t0
281 save_sc_msareg 6, SC_MSAREGS, a0, t0
282 save_sc_msareg 7, SC_MSAREGS, a0, t0
283 save_sc_msareg 8, SC_MSAREGS, a0, t0
284 save_sc_msareg 9, SC_MSAREGS, a0, t0
285 save_sc_msareg 10, SC_MSAREGS, a0, t0
286 save_sc_msareg 11, SC_MSAREGS, a0, t0
287 save_sc_msareg 12, SC_MSAREGS, a0, t0
288 save_sc_msareg 13, SC_MSAREGS, a0, t0
289 save_sc_msareg 14, SC_MSAREGS, a0, t0
290 save_sc_msareg 15, SC_MSAREGS, a0, t0
291 save_sc_msareg 16, SC_MSAREGS, a0, t0
292 save_sc_msareg 17, SC_MSAREGS, a0, t0
293 save_sc_msareg 18, SC_MSAREGS, a0, t0
294 save_sc_msareg 19, SC_MSAREGS, a0, t0
295 save_sc_msareg 20, SC_MSAREGS, a0, t0
296 save_sc_msareg 21, SC_MSAREGS, a0, t0
297 save_sc_msareg 22, SC_MSAREGS, a0, t0
298 save_sc_msareg 23, SC_MSAREGS, a0, t0
299 save_sc_msareg 24, SC_MSAREGS, a0, t0
300 save_sc_msareg 25, SC_MSAREGS, a0, t0
301 save_sc_msareg 26, SC_MSAREGS, a0, t0
302 save_sc_msareg 27, SC_MSAREGS, a0, t0
303 save_sc_msareg 28, SC_MSAREGS, a0, t0
304 save_sc_msareg 29, SC_MSAREGS, a0, t0
305 save_sc_msareg 30, SC_MSAREGS, a0, t0
306 save_sc_msareg 31, SC_MSAREGS, a0, t0
307 jr ra
308 li v0, 0
309 END(_save_msa_context)
310
311#ifdef CONFIG_MIPS32_COMPAT
312
313/*
314 * int _save_msa_context32(struct sigcontext32 *sc)
315 *
316 * Save the upper 64 bits of each vector register along with the MSA_CSR
317 * register into sc. Returns zero on success, else non-zero.
318 */
319LEAF(_save_msa_context32)
320 save_sc_msareg 0, SC32_MSAREGS, a0, t0
321 save_sc_msareg 1, SC32_MSAREGS, a0, t0
322 save_sc_msareg 2, SC32_MSAREGS, a0, t0
323 save_sc_msareg 3, SC32_MSAREGS, a0, t0
324 save_sc_msareg 4, SC32_MSAREGS, a0, t0
325 save_sc_msareg 5, SC32_MSAREGS, a0, t0
326 save_sc_msareg 6, SC32_MSAREGS, a0, t0
327 save_sc_msareg 7, SC32_MSAREGS, a0, t0
328 save_sc_msareg 8, SC32_MSAREGS, a0, t0
329 save_sc_msareg 9, SC32_MSAREGS, a0, t0
330 save_sc_msareg 10, SC32_MSAREGS, a0, t0
331 save_sc_msareg 11, SC32_MSAREGS, a0, t0
332 save_sc_msareg 12, SC32_MSAREGS, a0, t0
333 save_sc_msareg 13, SC32_MSAREGS, a0, t0
334 save_sc_msareg 14, SC32_MSAREGS, a0, t0
335 save_sc_msareg 15, SC32_MSAREGS, a0, t0
336 save_sc_msareg 16, SC32_MSAREGS, a0, t0
337 save_sc_msareg 17, SC32_MSAREGS, a0, t0
338 save_sc_msareg 18, SC32_MSAREGS, a0, t0
339 save_sc_msareg 19, SC32_MSAREGS, a0, t0
340 save_sc_msareg 20, SC32_MSAREGS, a0, t0
341 save_sc_msareg 21, SC32_MSAREGS, a0, t0
342 save_sc_msareg 22, SC32_MSAREGS, a0, t0
343 save_sc_msareg 23, SC32_MSAREGS, a0, t0
344 save_sc_msareg 24, SC32_MSAREGS, a0, t0
345 save_sc_msareg 25, SC32_MSAREGS, a0, t0
346 save_sc_msareg 26, SC32_MSAREGS, a0, t0
347 save_sc_msareg 27, SC32_MSAREGS, a0, t0
348 save_sc_msareg 28, SC32_MSAREGS, a0, t0
349 save_sc_msareg 29, SC32_MSAREGS, a0, t0
350 save_sc_msareg 30, SC32_MSAREGS, a0, t0
351 save_sc_msareg 31, SC32_MSAREGS, a0, t0
352 jr ra
353 li v0, 0
354 END(_save_msa_context32)
355
356#endif /* CONFIG_MIPS32_COMPAT */
357
358 .macro restore_sc_msareg wr, off, sc, tmp
359#ifdef CONFIG_64BIT
360 EX ld \tmp, (\off+(\wr*8))(\sc)
361 insert_d \wr, 1, \tmp
362#elif defined(CONFIG_CPU_LITTLE_ENDIAN)
363 EX lw \tmp, (\off+(\wr*8)+0)(\sc)
364 insert_w \wr, 2, \tmp
365 EX lw \tmp, (\off+(\wr*8)+4)(\sc)
366 insert_w \wr, 3, \tmp
367#else /* CONFIG_CPU_BIG_ENDIAN */
368 EX lw \tmp, (\off+(\wr*8)+4)(\sc)
369 insert_w \wr, 2, \tmp
370 EX lw \tmp, (\off+(\wr*8)+0)(\sc)
371 insert_w \wr, 3, \tmp
372#endif
373 .endm
374
375/*
376 * int _restore_msa_context(struct sigcontext *sc)
377 */
378LEAF(_restore_msa_context)
379 restore_sc_msareg 0, SC_MSAREGS, a0, t0
380 restore_sc_msareg 1, SC_MSAREGS, a0, t0
381 restore_sc_msareg 2, SC_MSAREGS, a0, t0
382 restore_sc_msareg 3, SC_MSAREGS, a0, t0
383 restore_sc_msareg 4, SC_MSAREGS, a0, t0
384 restore_sc_msareg 5, SC_MSAREGS, a0, t0
385 restore_sc_msareg 6, SC_MSAREGS, a0, t0
386 restore_sc_msareg 7, SC_MSAREGS, a0, t0
387 restore_sc_msareg 8, SC_MSAREGS, a0, t0
388 restore_sc_msareg 9, SC_MSAREGS, a0, t0
389 restore_sc_msareg 10, SC_MSAREGS, a0, t0
390 restore_sc_msareg 11, SC_MSAREGS, a0, t0
391 restore_sc_msareg 12, SC_MSAREGS, a0, t0
392 restore_sc_msareg 13, SC_MSAREGS, a0, t0
393 restore_sc_msareg 14, SC_MSAREGS, a0, t0
394 restore_sc_msareg 15, SC_MSAREGS, a0, t0
395 restore_sc_msareg 16, SC_MSAREGS, a0, t0
396 restore_sc_msareg 17, SC_MSAREGS, a0, t0
397 restore_sc_msareg 18, SC_MSAREGS, a0, t0
398 restore_sc_msareg 19, SC_MSAREGS, a0, t0
399 restore_sc_msareg 20, SC_MSAREGS, a0, t0
400 restore_sc_msareg 21, SC_MSAREGS, a0, t0
401 restore_sc_msareg 22, SC_MSAREGS, a0, t0
402 restore_sc_msareg 23, SC_MSAREGS, a0, t0
403 restore_sc_msareg 24, SC_MSAREGS, a0, t0
404 restore_sc_msareg 25, SC_MSAREGS, a0, t0
405 restore_sc_msareg 26, SC_MSAREGS, a0, t0
406 restore_sc_msareg 27, SC_MSAREGS, a0, t0
407 restore_sc_msareg 28, SC_MSAREGS, a0, t0
408 restore_sc_msareg 29, SC_MSAREGS, a0, t0
409 restore_sc_msareg 30, SC_MSAREGS, a0, t0
410 restore_sc_msareg 31, SC_MSAREGS, a0, t0
411 jr ra
412 li v0, 0
413 END(_restore_msa_context)
414
415#ifdef CONFIG_MIPS32_COMPAT
416
417/*
418 * int _restore_msa_context32(struct sigcontext32 *sc)
419 */
420LEAF(_restore_msa_context32)
421 restore_sc_msareg 0, SC32_MSAREGS, a0, t0
422 restore_sc_msareg 1, SC32_MSAREGS, a0, t0
423 restore_sc_msareg 2, SC32_MSAREGS, a0, t0
424 restore_sc_msareg 3, SC32_MSAREGS, a0, t0
425 restore_sc_msareg 4, SC32_MSAREGS, a0, t0
426 restore_sc_msareg 5, SC32_MSAREGS, a0, t0
427 restore_sc_msareg 6, SC32_MSAREGS, a0, t0
428 restore_sc_msareg 7, SC32_MSAREGS, a0, t0
429 restore_sc_msareg 8, SC32_MSAREGS, a0, t0
430 restore_sc_msareg 9, SC32_MSAREGS, a0, t0
431 restore_sc_msareg 10, SC32_MSAREGS, a0, t0
432 restore_sc_msareg 11, SC32_MSAREGS, a0, t0
433 restore_sc_msareg 12, SC32_MSAREGS, a0, t0
434 restore_sc_msareg 13, SC32_MSAREGS, a0, t0
435 restore_sc_msareg 14, SC32_MSAREGS, a0, t0
436 restore_sc_msareg 15, SC32_MSAREGS, a0, t0
437 restore_sc_msareg 16, SC32_MSAREGS, a0, t0
438 restore_sc_msareg 17, SC32_MSAREGS, a0, t0
439 restore_sc_msareg 18, SC32_MSAREGS, a0, t0
440 restore_sc_msareg 19, SC32_MSAREGS, a0, t0
441 restore_sc_msareg 20, SC32_MSAREGS, a0, t0
442 restore_sc_msareg 21, SC32_MSAREGS, a0, t0
443 restore_sc_msareg 22, SC32_MSAREGS, a0, t0
444 restore_sc_msareg 23, SC32_MSAREGS, a0, t0
445 restore_sc_msareg 24, SC32_MSAREGS, a0, t0
446 restore_sc_msareg 25, SC32_MSAREGS, a0, t0
447 restore_sc_msareg 26, SC32_MSAREGS, a0, t0
448 restore_sc_msareg 27, SC32_MSAREGS, a0, t0
449 restore_sc_msareg 28, SC32_MSAREGS, a0, t0
450 restore_sc_msareg 29, SC32_MSAREGS, a0, t0
451 restore_sc_msareg 30, SC32_MSAREGS, a0, t0
452 restore_sc_msareg 31, SC32_MSAREGS, a0, t0
453 jr ra
454 li v0, 0
455 END(_restore_msa_context32)
456
457#endif /* CONFIG_MIPS32_COMPAT */
458
459#endif /* CONFIG_CPU_HAS_MSA */
460
461 .set reorder 248 .set reorder
462 249
463 .type fault@function 250 .type fault@function
diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c
index 33133d3df3e5..9e60d117e41e 100644
--- a/arch/mips/kernel/signal.c
+++ b/arch/mips/kernel/signal.c
@@ -31,7 +31,6 @@
31#include <linux/bitops.h> 31#include <linux/bitops.h>
32#include <asm/cacheflush.h> 32#include <asm/cacheflush.h>
33#include <asm/fpu.h> 33#include <asm/fpu.h>
34#include <asm/msa.h>
35#include <asm/sim.h> 34#include <asm/sim.h>
36#include <asm/ucontext.h> 35#include <asm/ucontext.h>
37#include <asm/cpu-features.h> 36#include <asm/cpu-features.h>
@@ -48,9 +47,6 @@ static int (*restore_fp_context)(struct sigcontext __user *sc);
48extern asmlinkage int _save_fp_context(struct sigcontext __user *sc); 47extern asmlinkage int _save_fp_context(struct sigcontext __user *sc);
49extern asmlinkage int _restore_fp_context(struct sigcontext __user *sc); 48extern asmlinkage int _restore_fp_context(struct sigcontext __user *sc);
50 49
51extern asmlinkage int _save_msa_context(struct sigcontext __user *sc);
52extern asmlinkage int _restore_msa_context(struct sigcontext __user *sc);
53
54struct sigframe { 50struct sigframe {
55 u32 sf_ass[4]; /* argument save space for o32 */ 51 u32 sf_ass[4]; /* argument save space for o32 */
56 u32 sf_pad[2]; /* Was: signal trampoline */ 52 u32 sf_pad[2]; /* Was: signal trampoline */
@@ -100,60 +96,20 @@ static int copy_fp_from_sigcontext(struct sigcontext __user *sc)
100} 96}
101 97
102/* 98/*
103 * These functions will save only the upper 64 bits of the vector registers,
104 * since the lower 64 bits have already been saved as the scalar FP context.
105 */
106static int copy_msa_to_sigcontext(struct sigcontext __user *sc)
107{
108 int i;
109 int err = 0;
110
111 for (i = 0; i < NUM_FPU_REGS; i++) {
112 err |=
113 __put_user(get_fpr64(&current->thread.fpu.fpr[i], 1),
114 &sc->sc_msaregs[i]);
115 }
116 err |= __put_user(current->thread.fpu.msacsr, &sc->sc_msa_csr);
117
118 return err;
119}
120
121static int copy_msa_from_sigcontext(struct sigcontext __user *sc)
122{
123 int i;
124 int err = 0;
125 u64 val;
126
127 for (i = 0; i < NUM_FPU_REGS; i++) {
128 err |= __get_user(val, &sc->sc_msaregs[i]);
129 set_fpr64(&current->thread.fpu.fpr[i], 1, val);
130 }
131 err |= __get_user(current->thread.fpu.msacsr, &sc->sc_msa_csr);
132
133 return err;
134}
135
136/*
137 * Helper routines 99 * Helper routines
138 */ 100 */
139static int protected_save_fp_context(struct sigcontext __user *sc, 101static int protected_save_fp_context(struct sigcontext __user *sc)
140 unsigned used_math)
141{ 102{
142 int err; 103 int err;
143 bool save_msa = cpu_has_msa && (used_math & USEDMATH_MSA);
144#ifndef CONFIG_EVA 104#ifndef CONFIG_EVA
145 while (1) { 105 while (1) {
146 lock_fpu_owner(); 106 lock_fpu_owner();
147 if (is_fpu_owner()) { 107 if (is_fpu_owner()) {
148 err = save_fp_context(sc); 108 err = save_fp_context(sc);
149 if (save_msa && !err)
150 err = _save_msa_context(sc);
151 unlock_fpu_owner(); 109 unlock_fpu_owner();
152 } else { 110 } else {
153 unlock_fpu_owner(); 111 unlock_fpu_owner();
154 err = copy_fp_to_sigcontext(sc); 112 err = copy_fp_to_sigcontext(sc);
155 if (save_msa && !err)
156 err = copy_msa_to_sigcontext(sc);
157 } 113 }
158 if (likely(!err)) 114 if (likely(!err))
159 break; 115 break;
@@ -169,38 +125,24 @@ static int protected_save_fp_context(struct sigcontext __user *sc,
169 * EVA does not have FPU EVA instructions so saving fpu context directly 125 * EVA does not have FPU EVA instructions so saving fpu context directly
170 * does not work. 126 * does not work.
171 */ 127 */
172 disable_msa();
173 lose_fpu(1); 128 lose_fpu(1);
174 err = save_fp_context(sc); /* this might fail */ 129 err = save_fp_context(sc); /* this might fail */
175 if (save_msa && !err)
176 err = copy_msa_to_sigcontext(sc);
177#endif 130#endif
178 return err; 131 return err;
179} 132}
180 133
181static int protected_restore_fp_context(struct sigcontext __user *sc, 134static int protected_restore_fp_context(struct sigcontext __user *sc)
182 unsigned used_math)
183{ 135{
184 int err, tmp __maybe_unused; 136 int err, tmp __maybe_unused;
185 bool restore_msa = cpu_has_msa && (used_math & USEDMATH_MSA);
186#ifndef CONFIG_EVA 137#ifndef CONFIG_EVA
187 while (1) { 138 while (1) {
188 lock_fpu_owner(); 139 lock_fpu_owner();
189 if (is_fpu_owner()) { 140 if (is_fpu_owner()) {
190 err = restore_fp_context(sc); 141 err = restore_fp_context(sc);
191 if (restore_msa && !err) {
192 enable_msa();
193 err = _restore_msa_context(sc);
194 } else {
195 /* signal handler may have used MSA */
196 disable_msa();
197 }
198 unlock_fpu_owner(); 142 unlock_fpu_owner();
199 } else { 143 } else {
200 unlock_fpu_owner(); 144 unlock_fpu_owner();
201 err = copy_fp_from_sigcontext(sc); 145 err = copy_fp_from_sigcontext(sc);
202 if (!err && (used_math & USEDMATH_MSA))
203 err = copy_msa_from_sigcontext(sc);
204 } 146 }
205 if (likely(!err)) 147 if (likely(!err))
206 break; 148 break;
@@ -216,11 +158,8 @@ static int protected_restore_fp_context(struct sigcontext __user *sc,
216 * EVA does not have FPU EVA instructions so restoring fpu context 158 * EVA does not have FPU EVA instructions so restoring fpu context
217 * directly does not work. 159 * directly does not work.
218 */ 160 */
219 enable_msa();
220 lose_fpu(0); 161 lose_fpu(0);
221 err = restore_fp_context(sc); /* this might fail */ 162 err = restore_fp_context(sc); /* this might fail */
222 if (restore_msa && !err)
223 err = copy_msa_from_sigcontext(sc);
224#endif 163#endif
225 return err; 164 return err;
226} 165}
@@ -252,8 +191,7 @@ int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
252 err |= __put_user(rddsp(DSP_MASK), &sc->sc_dsp); 191 err |= __put_user(rddsp(DSP_MASK), &sc->sc_dsp);
253 } 192 }
254 193
255 used_math = used_math() ? USEDMATH_FP : 0; 194 used_math = !!used_math();
256 used_math |= thread_msa_context_live() ? USEDMATH_MSA : 0;
257 err |= __put_user(used_math, &sc->sc_used_math); 195 err |= __put_user(used_math, &sc->sc_used_math);
258 196
259 if (used_math) { 197 if (used_math) {
@@ -261,7 +199,7 @@ int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
261 * Save FPU state to signal context. Signal handler 199 * Save FPU state to signal context. Signal handler
262 * will "inherit" current FPU state. 200 * will "inherit" current FPU state.
263 */ 201 */
264 err |= protected_save_fp_context(sc, used_math); 202 err |= protected_save_fp_context(sc);
265 } 203 }
266 return err; 204 return err;
267} 205}
@@ -286,14 +224,14 @@ int fpcsr_pending(unsigned int __user *fpcsr)
286} 224}
287 225
288static int 226static int
289check_and_restore_fp_context(struct sigcontext __user *sc, unsigned used_math) 227check_and_restore_fp_context(struct sigcontext __user *sc)
290{ 228{
291 int err, sig; 229 int err, sig;
292 230
293 err = sig = fpcsr_pending(&sc->sc_fpc_csr); 231 err = sig = fpcsr_pending(&sc->sc_fpc_csr);
294 if (err > 0) 232 if (err > 0)
295 err = 0; 233 err = 0;
296 err |= protected_restore_fp_context(sc, used_math); 234 err |= protected_restore_fp_context(sc);
297 return err ?: sig; 235 return err ?: sig;
298} 236}
299 237
@@ -333,10 +271,9 @@ int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
333 if (used_math) { 271 if (used_math) {
334 /* restore fpu context if we have used it before */ 272 /* restore fpu context if we have used it before */
335 if (!err) 273 if (!err)
336 err = check_and_restore_fp_context(sc, used_math); 274 err = check_and_restore_fp_context(sc);
337 } else { 275 } else {
338 /* signal handler may have used FPU or MSA. Disable them. */ 276 /* signal handler may have used FPU. Give it up. */
339 disable_msa();
340 lose_fpu(0); 277 lose_fpu(0);
341 } 278 }
342 279
diff --git a/arch/mips/kernel/signal32.c b/arch/mips/kernel/signal32.c
index 299f956e4db3..bae2e6ee2109 100644
--- a/arch/mips/kernel/signal32.c
+++ b/arch/mips/kernel/signal32.c
@@ -30,7 +30,6 @@
30#include <asm/sim.h> 30#include <asm/sim.h>
31#include <asm/ucontext.h> 31#include <asm/ucontext.h>
32#include <asm/fpu.h> 32#include <asm/fpu.h>
33#include <asm/msa.h>
34#include <asm/war.h> 33#include <asm/war.h>
35#include <asm/vdso.h> 34#include <asm/vdso.h>
36#include <asm/dsp.h> 35#include <asm/dsp.h>
@@ -43,9 +42,6 @@ static int (*restore_fp_context32)(struct sigcontext32 __user *sc);
43extern asmlinkage int _save_fp_context32(struct sigcontext32 __user *sc); 42extern asmlinkage int _save_fp_context32(struct sigcontext32 __user *sc);
44extern asmlinkage int _restore_fp_context32(struct sigcontext32 __user *sc); 43extern asmlinkage int _restore_fp_context32(struct sigcontext32 __user *sc);
45 44
46extern asmlinkage int _save_msa_context32(struct sigcontext32 __user *sc);
47extern asmlinkage int _restore_msa_context32(struct sigcontext32 __user *sc);
48
49/* 45/*
50 * Including <asm/unistd.h> would give use the 64-bit syscall numbers ... 46 * Including <asm/unistd.h> would give use the 64-bit syscall numbers ...
51 */ 47 */
@@ -115,59 +111,19 @@ static int copy_fp_from_sigcontext32(struct sigcontext32 __user *sc)
115} 111}
116 112
117/* 113/*
118 * These functions will save only the upper 64 bits of the vector registers,
119 * since the lower 64 bits have already been saved as the scalar FP context.
120 */
121static int copy_msa_to_sigcontext32(struct sigcontext32 __user *sc)
122{
123 int i;
124 int err = 0;
125
126 for (i = 0; i < NUM_FPU_REGS; i++) {
127 err |=
128 __put_user(get_fpr64(&current->thread.fpu.fpr[i], 1),
129 &sc->sc_msaregs[i]);
130 }
131 err |= __put_user(current->thread.fpu.msacsr, &sc->sc_msa_csr);
132
133 return err;
134}
135
136static int copy_msa_from_sigcontext32(struct sigcontext32 __user *sc)
137{
138 int i;
139 int err = 0;
140 u64 val;
141
142 for (i = 0; i < NUM_FPU_REGS; i++) {
143 err |= __get_user(val, &sc->sc_msaregs[i]);
144 set_fpr64(&current->thread.fpu.fpr[i], 1, val);
145 }
146 err |= __get_user(current->thread.fpu.msacsr, &sc->sc_msa_csr);
147
148 return err;
149}
150
151/*
152 * sigcontext handlers 114 * sigcontext handlers
153 */ 115 */
154static int protected_save_fp_context32(struct sigcontext32 __user *sc, 116static int protected_save_fp_context32(struct sigcontext32 __user *sc)
155 unsigned used_math)
156{ 117{
157 int err; 118 int err;
158 bool save_msa = cpu_has_msa && (used_math & USEDMATH_MSA);
159 while (1) { 119 while (1) {
160 lock_fpu_owner(); 120 lock_fpu_owner();
161 if (is_fpu_owner()) { 121 if (is_fpu_owner()) {
162 err = save_fp_context32(sc); 122 err = save_fp_context32(sc);
163 if (save_msa && !err)
164 err = _save_msa_context32(sc);
165 unlock_fpu_owner(); 123 unlock_fpu_owner();
166 } else { 124 } else {
167 unlock_fpu_owner(); 125 unlock_fpu_owner();
168 err = copy_fp_to_sigcontext32(sc); 126 err = copy_fp_to_sigcontext32(sc);
169 if (save_msa && !err)
170 err = copy_msa_to_sigcontext32(sc);
171 } 127 }
172 if (likely(!err)) 128 if (likely(!err))
173 break; 129 break;
@@ -181,28 +137,17 @@ static int protected_save_fp_context32(struct sigcontext32 __user *sc,
181 return err; 137 return err;
182} 138}
183 139
184static int protected_restore_fp_context32(struct sigcontext32 __user *sc, 140static int protected_restore_fp_context32(struct sigcontext32 __user *sc)
185 unsigned used_math)
186{ 141{
187 int err, tmp __maybe_unused; 142 int err, tmp __maybe_unused;
188 bool restore_msa = cpu_has_msa && (used_math & USEDMATH_MSA);
189 while (1) { 143 while (1) {
190 lock_fpu_owner(); 144 lock_fpu_owner();
191 if (is_fpu_owner()) { 145 if (is_fpu_owner()) {
192 err = restore_fp_context32(sc); 146 err = restore_fp_context32(sc);
193 if (restore_msa && !err) {
194 enable_msa();
195 err = _restore_msa_context32(sc);
196 } else {
197 /* signal handler may have used MSA */
198 disable_msa();
199 }
200 unlock_fpu_owner(); 147 unlock_fpu_owner();
201 } else { 148 } else {
202 unlock_fpu_owner(); 149 unlock_fpu_owner();
203 err = copy_fp_from_sigcontext32(sc); 150 err = copy_fp_from_sigcontext32(sc);
204 if (restore_msa && !err)
205 err = copy_msa_from_sigcontext32(sc);
206 } 151 }
207 if (likely(!err)) 152 if (likely(!err))
208 break; 153 break;
@@ -241,8 +186,7 @@ static int setup_sigcontext32(struct pt_regs *regs,
241 err |= __put_user(mflo3(), &sc->sc_lo3); 186 err |= __put_user(mflo3(), &sc->sc_lo3);
242 } 187 }
243 188
244 used_math = used_math() ? USEDMATH_FP : 0; 189 used_math = !!used_math();
245 used_math |= thread_msa_context_live() ? USEDMATH_MSA : 0;
246 err |= __put_user(used_math, &sc->sc_used_math); 190 err |= __put_user(used_math, &sc->sc_used_math);
247 191
248 if (used_math) { 192 if (used_math) {
@@ -250,21 +194,20 @@ static int setup_sigcontext32(struct pt_regs *regs,
250 * Save FPU state to signal context. Signal handler 194 * Save FPU state to signal context. Signal handler
251 * will "inherit" current FPU state. 195 * will "inherit" current FPU state.
252 */ 196 */
253 err |= protected_save_fp_context32(sc, used_math); 197 err |= protected_save_fp_context32(sc);
254 } 198 }
255 return err; 199 return err;
256} 200}
257 201
258static int 202static int
259check_and_restore_fp_context32(struct sigcontext32 __user *sc, 203check_and_restore_fp_context32(struct sigcontext32 __user *sc)
260 unsigned used_math)
261{ 204{
262 int err, sig; 205 int err, sig;
263 206
264 err = sig = fpcsr_pending(&sc->sc_fpc_csr); 207 err = sig = fpcsr_pending(&sc->sc_fpc_csr);
265 if (err > 0) 208 if (err > 0)
266 err = 0; 209 err = 0;
267 err |= protected_restore_fp_context32(sc, used_math); 210 err |= protected_restore_fp_context32(sc);
268 return err ?: sig; 211 return err ?: sig;
269} 212}
270 213
@@ -301,10 +244,9 @@ static int restore_sigcontext32(struct pt_regs *regs,
301 if (used_math) { 244 if (used_math) {
302 /* restore fpu context if we have used it before */ 245 /* restore fpu context if we have used it before */
303 if (!err) 246 if (!err)
304 err = check_and_restore_fp_context32(sc, used_math); 247 err = check_and_restore_fp_context32(sc);
305 } else { 248 } else {
306 /* signal handler may have used FPU or MSA. Disable them. */ 249 /* signal handler may have used FPU. Give it up. */
307 disable_msa();
308 lose_fpu(0); 250 lose_fpu(0);
309 } 251 }
310 252
diff --git a/arch/mips/kernel/smp-cps.c b/arch/mips/kernel/smp-cps.c
index df0598d9bfdd..949f2c6827a0 100644
--- a/arch/mips/kernel/smp-cps.c
+++ b/arch/mips/kernel/smp-cps.c
@@ -301,7 +301,7 @@ static int cps_cpu_disable(void)
301 301
302 core_cfg = &mips_cps_core_bootcfg[current_cpu_data.core]; 302 core_cfg = &mips_cps_core_bootcfg[current_cpu_data.core];
303 atomic_sub(1 << cpu_vpe_id(&current_cpu_data), &core_cfg->vpe_mask); 303 atomic_sub(1 << cpu_vpe_id(&current_cpu_data), &core_cfg->vpe_mask);
304 smp_mb__after_atomic_dec(); 304 smp_mb__after_atomic();
305 set_cpu_online(cpu, false); 305 set_cpu_online(cpu, false);
306 cpu_clear(cpu, cpu_callin_map); 306 cpu_clear(cpu, cpu_callin_map);
307 307
diff --git a/arch/mips/kvm/kvm_mips.c b/arch/mips/kvm/kvm_mips.c
index cd5e4f568439..f3c56a182fd8 100644
--- a/arch/mips/kvm/kvm_mips.c
+++ b/arch/mips/kvm/kvm_mips.c
@@ -384,6 +384,7 @@ void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
384 384
385 kfree(vcpu->arch.guest_ebase); 385 kfree(vcpu->arch.guest_ebase);
386 kfree(vcpu->arch.kseg0_commpage); 386 kfree(vcpu->arch.kseg0_commpage);
387 kfree(vcpu);
387} 388}
388 389
389void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) 390void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
diff --git a/arch/mips/math-emu/ieee754.c b/arch/mips/math-emu/ieee754.c
index 53f1d2287084..8e97acbbe22c 100644
--- a/arch/mips/math-emu/ieee754.c
+++ b/arch/mips/math-emu/ieee754.c
@@ -34,13 +34,22 @@
34 * Special constants 34 * Special constants
35 */ 35 */
36 36
37#define DPCNST(s, b, m) \ 37/*
38 * Older GCC requires the inner braces for initialization of union ieee754dp's
39 * anonymous struct member. Without an error will result.
40 */
41#define xPCNST(s, b, m, ebias) \
38{ \ 42{ \
39 .sign = (s), \ 43 { \
40 .bexp = (b) + DP_EBIAS, \ 44 .sign = (s), \
41 .mant = (m) \ 45 .bexp = (b) + ebias, \
46 .mant = (m) \
47 } \
42} 48}
43 49
50#define DPCNST(s, b, m) \
51 xPCNST(s, b, m, DP_EBIAS)
52
44const union ieee754dp __ieee754dp_spcvals[] = { 53const union ieee754dp __ieee754dp_spcvals[] = {
45 DPCNST(0, DP_EMIN - 1, 0x0000000000000ULL), /* + zero */ 54 DPCNST(0, DP_EMIN - 1, 0x0000000000000ULL), /* + zero */
46 DPCNST(1, DP_EMIN - 1, 0x0000000000000ULL), /* - zero */ 55 DPCNST(1, DP_EMIN - 1, 0x0000000000000ULL), /* - zero */
@@ -62,11 +71,7 @@ const union ieee754dp __ieee754dp_spcvals[] = {
62}; 71};
63 72
64#define SPCNST(s, b, m) \ 73#define SPCNST(s, b, m) \
65{ \ 74 xPCNST(s, b, m, SP_EBIAS)
66 .sign = (s), \
67 .bexp = (b) + SP_EBIAS, \
68 .mant = (m) \
69}
70 75
71const union ieee754sp __ieee754sp_spcvals[] = { 76const union ieee754sp __ieee754sp_spcvals[] = {
72 SPCNST(0, SP_EMIN - 1, 0x000000), /* + zero */ 77 SPCNST(0, SP_EMIN - 1, 0x000000), /* + zero */
diff --git a/arch/mips/mm/uasm-micromips.c b/arch/mips/mm/uasm-micromips.c
index 775c2800cba2..8399ddf03a02 100644
--- a/arch/mips/mm/uasm-micromips.c
+++ b/arch/mips/mm/uasm-micromips.c
@@ -102,6 +102,7 @@ static struct insn insn_table_MM[] = {
102 { insn_sd, 0, 0 }, 102 { insn_sd, 0, 0 },
103 { insn_sll, M(mm_pool32a_op, 0, 0, 0, 0, mm_sll32_op), RT | RS | RD }, 103 { insn_sll, M(mm_pool32a_op, 0, 0, 0, 0, mm_sll32_op), RT | RS | RD },
104 { insn_sllv, M(mm_pool32a_op, 0, 0, 0, 0, mm_sllv32_op), RT | RS | RD }, 104 { insn_sllv, M(mm_pool32a_op, 0, 0, 0, 0, mm_sllv32_op), RT | RS | RD },
105 { insn_slt, M(mm_pool32a_op, 0, 0, 0, 0, mm_slt_op), RT | RS | RD },
105 { insn_sltiu, M(mm_sltiu32_op, 0, 0, 0, 0, 0), RT | RS | SIMM }, 106 { insn_sltiu, M(mm_sltiu32_op, 0, 0, 0, 0, 0), RT | RS | SIMM },
106 { insn_sltu, M(mm_pool32a_op, 0, 0, 0, 0, mm_sltu_op), RT | RS | RD }, 107 { insn_sltu, M(mm_pool32a_op, 0, 0, 0, 0, mm_sltu_op), RT | RS | RD },
107 { insn_sra, M(mm_pool32a_op, 0, 0, 0, 0, mm_sra_op), RT | RS | RD }, 108 { insn_sra, M(mm_pool32a_op, 0, 0, 0, 0, mm_sra_op), RT | RS | RD },
diff --git a/arch/mips/mm/uasm-mips.c b/arch/mips/mm/uasm-mips.c
index 38792c2364f5..6708a2dbf934 100644
--- a/arch/mips/mm/uasm-mips.c
+++ b/arch/mips/mm/uasm-mips.c
@@ -89,7 +89,7 @@ static struct insn insn_table[] = {
89 { insn_lb, M(lb_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, 89 { insn_lb, M(lb_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
90 { insn_ld, M(ld_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, 90 { insn_ld, M(ld_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
91 { insn_ldx, M(spec3_op, 0, 0, 0, ldx_op, lx_op), RS | RT | RD }, 91 { insn_ldx, M(spec3_op, 0, 0, 0, ldx_op, lx_op), RS | RT | RD },
92 { insn_lh, M(lw_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, 92 { insn_lh, M(lh_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
93 { insn_lld, M(lld_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, 93 { insn_lld, M(lld_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
94 { insn_ll, M(ll_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, 94 { insn_ll, M(ll_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
95 { insn_lui, M(lui_op, 0, 0, 0, 0, 0), RT | SIMM }, 95 { insn_lui, M(lui_op, 0, 0, 0, 0, 0), RT | SIMM },
@@ -110,6 +110,7 @@ static struct insn insn_table[] = {
110 { insn_sd, M(sd_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, 110 { insn_sd, M(sd_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
111 { insn_sll, M(spec_op, 0, 0, 0, 0, sll_op), RT | RD | RE }, 111 { insn_sll, M(spec_op, 0, 0, 0, 0, sll_op), RT | RD | RE },
112 { insn_sllv, M(spec_op, 0, 0, 0, 0, sllv_op), RS | RT | RD }, 112 { insn_sllv, M(spec_op, 0, 0, 0, 0, sllv_op), RS | RT | RD },
113 { insn_slt, M(spec_op, 0, 0, 0, 0, slt_op), RS | RT | RD },
113 { insn_sltiu, M(sltiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, 114 { insn_sltiu, M(sltiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
114 { insn_sltu, M(spec_op, 0, 0, 0, 0, sltu_op), RS | RT | RD }, 115 { insn_sltu, M(spec_op, 0, 0, 0, 0, sltu_op), RS | RT | RD },
115 { insn_sra, M(spec_op, 0, 0, 0, 0, sra_op), RT | RD | RE }, 116 { insn_sra, M(spec_op, 0, 0, 0, 0, sra_op), RT | RD | RE },
diff --git a/arch/mips/mm/uasm.c b/arch/mips/mm/uasm.c
index 00515805fe41..a01b0d6cedd2 100644
--- a/arch/mips/mm/uasm.c
+++ b/arch/mips/mm/uasm.c
@@ -53,7 +53,7 @@ enum opcode {
53 insn_ld, insn_ldx, insn_lh, insn_ll, insn_lld, insn_lui, insn_lw, 53 insn_ld, insn_ldx, insn_lh, insn_ll, insn_lld, insn_lui, insn_lw,
54 insn_lwx, insn_mfc0, insn_mfhi, insn_mflo, insn_mtc0, insn_mul, 54 insn_lwx, insn_mfc0, insn_mfhi, insn_mflo, insn_mtc0, insn_mul,
55 insn_or, insn_ori, insn_pref, insn_rfe, insn_rotr, insn_sc, insn_scd, 55 insn_or, insn_ori, insn_pref, insn_rfe, insn_rotr, insn_sc, insn_scd,
56 insn_sd, insn_sll, insn_sllv, insn_sltiu, insn_sltu, insn_sra, 56 insn_sd, insn_sll, insn_sllv, insn_slt, insn_sltiu, insn_sltu, insn_sra,
57 insn_srl, insn_srlv, insn_subu, insn_sw, insn_sync, insn_syscall, 57 insn_srl, insn_srlv, insn_subu, insn_sw, insn_sync, insn_syscall,
58 insn_tlbp, insn_tlbr, insn_tlbwi, insn_tlbwr, insn_wait, insn_wsbh, 58 insn_tlbp, insn_tlbr, insn_tlbwi, insn_tlbwr, insn_wait, insn_wsbh,
59 insn_xor, insn_xori, insn_yield, 59 insn_xor, insn_xori, insn_yield,
@@ -139,6 +139,13 @@ Ip_u1u2u3(op) \
139} \ 139} \
140UASM_EXPORT_SYMBOL(uasm_i##op); 140UASM_EXPORT_SYMBOL(uasm_i##op);
141 141
142#define I_s3s1s2(op) \
143Ip_s3s1s2(op) \
144{ \
145 build_insn(buf, insn##op, b, c, a); \
146} \
147UASM_EXPORT_SYMBOL(uasm_i##op);
148
142#define I_u2u1u3(op) \ 149#define I_u2u1u3(op) \
143Ip_u2u1u3(op) \ 150Ip_u2u1u3(op) \
144{ \ 151{ \
@@ -289,6 +296,7 @@ I_u2s3u1(_scd)
289I_u2s3u1(_sd) 296I_u2s3u1(_sd)
290I_u2u1u3(_sll) 297I_u2u1u3(_sll)
291I_u3u2u1(_sllv) 298I_u3u2u1(_sllv)
299I_s3s1s2(_slt)
292I_u2u1s3(_sltiu) 300I_u2u1s3(_sltiu)
293I_u3u1u2(_sltu) 301I_u3u1u2(_sltu)
294I_u2u1u3(_sra) 302I_u2u1u3(_sra)
diff --git a/arch/mips/net/bpf_jit.c b/arch/mips/net/bpf_jit.c
index a67b9753330b..b87390a56a2f 100644
--- a/arch/mips/net/bpf_jit.c
+++ b/arch/mips/net/bpf_jit.c
@@ -119,8 +119,6 @@
119/* Arguments used by JIT */ 119/* Arguments used by JIT */
120#define ARGS_USED_BY_JIT 2 /* only applicable to 64-bit */ 120#define ARGS_USED_BY_JIT 2 /* only applicable to 64-bit */
121 121
122#define FLAG_NEED_X_RESET (1 << 0)
123
124#define SBIT(x) (1 << (x)) /* Signed version of BIT() */ 122#define SBIT(x) (1 << (x)) /* Signed version of BIT() */
125 123
126/** 124/**
@@ -153,6 +151,8 @@ static inline int optimize_div(u32 *k)
153 return 0; 151 return 0;
154} 152}
155 153
154static inline void emit_jit_reg_move(ptr dst, ptr src, struct jit_ctx *ctx);
155
156/* Simply emit the instruction if the JIT memory space has been allocated */ 156/* Simply emit the instruction if the JIT memory space has been allocated */
157#define emit_instr(ctx, func, ...) \ 157#define emit_instr(ctx, func, ...) \
158do { \ 158do { \
@@ -166,9 +166,7 @@ do { \
166/* Determine if immediate is within the 16-bit signed range */ 166/* Determine if immediate is within the 16-bit signed range */
167static inline bool is_range16(s32 imm) 167static inline bool is_range16(s32 imm)
168{ 168{
169 if (imm >= SBIT(15) || imm < -SBIT(15)) 169 return !(imm >= SBIT(15) || imm < -SBIT(15));
170 return true;
171 return false;
172} 170}
173 171
174static inline void emit_addu(unsigned int dst, unsigned int src1, 172static inline void emit_addu(unsigned int dst, unsigned int src1,
@@ -187,7 +185,7 @@ static inline void emit_load_imm(unsigned int dst, u32 imm, struct jit_ctx *ctx)
187{ 185{
188 if (ctx->target != NULL) { 186 if (ctx->target != NULL) {
189 /* addiu can only handle s16 */ 187 /* addiu can only handle s16 */
190 if (is_range16(imm)) { 188 if (!is_range16(imm)) {
191 u32 *p = &ctx->target[ctx->idx]; 189 u32 *p = &ctx->target[ctx->idx];
192 uasm_i_lui(&p, r_tmp_imm, (s32)imm >> 16); 190 uasm_i_lui(&p, r_tmp_imm, (s32)imm >> 16);
193 p = &ctx->target[ctx->idx + 1]; 191 p = &ctx->target[ctx->idx + 1];
@@ -199,7 +197,7 @@ static inline void emit_load_imm(unsigned int dst, u32 imm, struct jit_ctx *ctx)
199 } 197 }
200 ctx->idx++; 198 ctx->idx++;
201 199
202 if (is_range16(imm)) 200 if (!is_range16(imm))
203 ctx->idx++; 201 ctx->idx++;
204} 202}
205 203
@@ -240,7 +238,7 @@ static inline void emit_daddiu(unsigned int dst, unsigned int src,
240static inline void emit_addiu(unsigned int dst, unsigned int src, 238static inline void emit_addiu(unsigned int dst, unsigned int src,
241 u32 imm, struct jit_ctx *ctx) 239 u32 imm, struct jit_ctx *ctx)
242{ 240{
243 if (is_range16(imm)) { 241 if (!is_range16(imm)) {
244 emit_load_imm(r_tmp, imm, ctx); 242 emit_load_imm(r_tmp, imm, ctx);
245 emit_addu(dst, r_tmp, src, ctx); 243 emit_addu(dst, r_tmp, src, ctx);
246 } else { 244 } else {
@@ -313,8 +311,11 @@ static inline void emit_sll(unsigned int dst, unsigned int src,
313 unsigned int sa, struct jit_ctx *ctx) 311 unsigned int sa, struct jit_ctx *ctx)
314{ 312{
315 /* sa is 5-bits long */ 313 /* sa is 5-bits long */
316 BUG_ON(sa >= BIT(5)); 314 if (sa >= BIT(5))
317 emit_instr(ctx, sll, dst, src, sa); 315 /* Shifting >= 32 results in zero */
316 emit_jit_reg_move(dst, r_zero, ctx);
317 else
318 emit_instr(ctx, sll, dst, src, sa);
318} 319}
319 320
320static inline void emit_srlv(unsigned int dst, unsigned int src, 321static inline void emit_srlv(unsigned int dst, unsigned int src,
@@ -327,8 +328,17 @@ static inline void emit_srl(unsigned int dst, unsigned int src,
327 unsigned int sa, struct jit_ctx *ctx) 328 unsigned int sa, struct jit_ctx *ctx)
328{ 329{
329 /* sa is 5-bits long */ 330 /* sa is 5-bits long */
330 BUG_ON(sa >= BIT(5)); 331 if (sa >= BIT(5))
331 emit_instr(ctx, srl, dst, src, sa); 332 /* Shifting >= 32 results in zero */
333 emit_jit_reg_move(dst, r_zero, ctx);
334 else
335 emit_instr(ctx, srl, dst, src, sa);
336}
337
338static inline void emit_slt(unsigned int dst, unsigned int src1,
339 unsigned int src2, struct jit_ctx *ctx)
340{
341 emit_instr(ctx, slt, dst, src1, src2);
332} 342}
333 343
334static inline void emit_sltu(unsigned int dst, unsigned int src1, 344static inline void emit_sltu(unsigned int dst, unsigned int src1,
@@ -341,7 +351,7 @@ static inline void emit_sltiu(unsigned dst, unsigned int src,
341 unsigned int imm, struct jit_ctx *ctx) 351 unsigned int imm, struct jit_ctx *ctx)
342{ 352{
343 /* 16 bit immediate */ 353 /* 16 bit immediate */
344 if (is_range16((s32)imm)) { 354 if (!is_range16((s32)imm)) {
345 emit_load_imm(r_tmp, imm, ctx); 355 emit_load_imm(r_tmp, imm, ctx);
346 emit_sltu(dst, src, r_tmp, ctx); 356 emit_sltu(dst, src, r_tmp, ctx);
347 } else { 357 } else {
@@ -408,7 +418,7 @@ static inline void emit_div(unsigned int dst, unsigned int src,
408 u32 *p = &ctx->target[ctx->idx]; 418 u32 *p = &ctx->target[ctx->idx];
409 uasm_i_divu(&p, dst, src); 419 uasm_i_divu(&p, dst, src);
410 p = &ctx->target[ctx->idx + 1]; 420 p = &ctx->target[ctx->idx + 1];
411 uasm_i_mfhi(&p, dst); 421 uasm_i_mflo(&p, dst);
412 } 422 }
413 ctx->idx += 2; /* 2 insts */ 423 ctx->idx += 2; /* 2 insts */
414} 424}
@@ -443,6 +453,17 @@ static inline void emit_wsbh(unsigned int dst, unsigned int src,
443 emit_instr(ctx, wsbh, dst, src); 453 emit_instr(ctx, wsbh, dst, src);
444} 454}
445 455
456/* load pointer to register */
457static inline void emit_load_ptr(unsigned int dst, unsigned int src,
458 int imm, struct jit_ctx *ctx)
459{
460 /* src contains the base addr of the 32/64-pointer */
461 if (config_enabled(CONFIG_64BIT))
462 emit_instr(ctx, ld, dst, imm, src);
463 else
464 emit_instr(ctx, lw, dst, imm, src);
465}
466
446/* load a function pointer to register */ 467/* load a function pointer to register */
447static inline void emit_load_func(unsigned int reg, ptr imm, 468static inline void emit_load_func(unsigned int reg, ptr imm,
448 struct jit_ctx *ctx) 469 struct jit_ctx *ctx)
@@ -545,29 +566,13 @@ static inline u16 align_sp(unsigned int num)
545 return num; 566 return num;
546} 567}
547 568
548static inline void update_on_xread(struct jit_ctx *ctx)
549{
550 if (!(ctx->flags & SEEN_X))
551 ctx->flags |= FLAG_NEED_X_RESET;
552
553 ctx->flags |= SEEN_X;
554}
555
556static bool is_load_to_a(u16 inst) 569static bool is_load_to_a(u16 inst)
557{ 570{
558 switch (inst) { 571 switch (inst) {
559 case BPF_S_LD_W_LEN: 572 case BPF_LD | BPF_W | BPF_LEN:
560 case BPF_S_LD_W_ABS: 573 case BPF_LD | BPF_W | BPF_ABS:
561 case BPF_S_LD_H_ABS: 574 case BPF_LD | BPF_H | BPF_ABS:
562 case BPF_S_LD_B_ABS: 575 case BPF_LD | BPF_B | BPF_ABS:
563 case BPF_S_ANC_CPU:
564 case BPF_S_ANC_IFINDEX:
565 case BPF_S_ANC_MARK:
566 case BPF_S_ANC_PROTOCOL:
567 case BPF_S_ANC_RXHASH:
568 case BPF_S_ANC_VLAN_TAG:
569 case BPF_S_ANC_VLAN_TAG_PRESENT:
570 case BPF_S_ANC_QUEUE:
571 return true; 576 return true;
572 default: 577 default:
573 return false; 578 return false;
@@ -618,7 +623,10 @@ static void save_bpf_jit_regs(struct jit_ctx *ctx, unsigned offset)
618 if (ctx->flags & SEEN_MEM) { 623 if (ctx->flags & SEEN_MEM) {
619 if (real_off % (RSIZE * 2)) 624 if (real_off % (RSIZE * 2))
620 real_off += RSIZE; 625 real_off += RSIZE;
621 emit_addiu(r_M, r_sp, real_off, ctx); 626 if (config_enabled(CONFIG_64BIT))
627 emit_daddiu(r_M, r_sp, real_off, ctx);
628 else
629 emit_addiu(r_M, r_sp, real_off, ctx);
622 } 630 }
623} 631}
624 632
@@ -705,11 +713,11 @@ static void build_prologue(struct jit_ctx *ctx)
705 if (ctx->flags & SEEN_SKB) 713 if (ctx->flags & SEEN_SKB)
706 emit_reg_move(r_skb, MIPS_R_A0, ctx); 714 emit_reg_move(r_skb, MIPS_R_A0, ctx);
707 715
708 if (ctx->flags & FLAG_NEED_X_RESET) 716 if (ctx->flags & SEEN_X)
709 emit_jit_reg_move(r_X, r_zero, ctx); 717 emit_jit_reg_move(r_X, r_zero, ctx);
710 718
711 /* Do not leak kernel data to userspace */ 719 /* Do not leak kernel data to userspace */
712 if ((first_inst != BPF_S_RET_K) && !(is_load_to_a(first_inst))) 720 if ((first_inst != (BPF_RET | BPF_K)) && !(is_load_to_a(first_inst)))
713 emit_jit_reg_move(r_A, r_zero, ctx); 721 emit_jit_reg_move(r_A, r_zero, ctx);
714} 722}
715 723
@@ -757,13 +765,17 @@ static u64 jit_get_skb_w(struct sk_buff *skb, unsigned offset)
757 return (u64)err << 32 | ntohl(ret); 765 return (u64)err << 32 | ntohl(ret);
758} 766}
759 767
760#define PKT_TYPE_MAX 7 768#ifdef __BIG_ENDIAN_BITFIELD
769#define PKT_TYPE_MAX (7 << 5)
770#else
771#define PKT_TYPE_MAX 7
772#endif
761static int pkt_type_offset(void) 773static int pkt_type_offset(void)
762{ 774{
763 struct sk_buff skb_probe = { 775 struct sk_buff skb_probe = {
764 .pkt_type = ~0, 776 .pkt_type = ~0,
765 }; 777 };
766 char *ct = (char *)&skb_probe; 778 u8 *ct = (u8 *)&skb_probe;
767 unsigned int off; 779 unsigned int off;
768 780
769 for (off = 0; off < sizeof(struct sk_buff); off++) { 781 for (off = 0; off < sizeof(struct sk_buff); off++) {
@@ -783,46 +795,62 @@ static int build_body(struct jit_ctx *ctx)
783 u32 k, b_off __maybe_unused; 795 u32 k, b_off __maybe_unused;
784 796
785 for (i = 0; i < prog->len; i++) { 797 for (i = 0; i < prog->len; i++) {
798 u16 code;
799
786 inst = &(prog->insns[i]); 800 inst = &(prog->insns[i]);
787 pr_debug("%s: code->0x%02x, jt->0x%x, jf->0x%x, k->0x%x\n", 801 pr_debug("%s: code->0x%02x, jt->0x%x, jf->0x%x, k->0x%x\n",
788 __func__, inst->code, inst->jt, inst->jf, inst->k); 802 __func__, inst->code, inst->jt, inst->jf, inst->k);
789 k = inst->k; 803 k = inst->k;
804 code = bpf_anc_helper(inst);
790 805
791 if (ctx->target == NULL) 806 if (ctx->target == NULL)
792 ctx->offsets[i] = ctx->idx * 4; 807 ctx->offsets[i] = ctx->idx * 4;
793 808
794 switch (inst->code) { 809 switch (code) {
795 case BPF_S_LD_IMM: 810 case BPF_LD | BPF_IMM:
796 /* A <- k ==> li r_A, k */ 811 /* A <- k ==> li r_A, k */
797 ctx->flags |= SEEN_A; 812 ctx->flags |= SEEN_A;
798 emit_load_imm(r_A, k, ctx); 813 emit_load_imm(r_A, k, ctx);
799 break; 814 break;
800 case BPF_S_LD_W_LEN: 815 case BPF_LD | BPF_W | BPF_LEN:
801 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4); 816 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4);
802 /* A <- len ==> lw r_A, offset(skb) */ 817 /* A <- len ==> lw r_A, offset(skb) */
803 ctx->flags |= SEEN_SKB | SEEN_A; 818 ctx->flags |= SEEN_SKB | SEEN_A;
804 off = offsetof(struct sk_buff, len); 819 off = offsetof(struct sk_buff, len);
805 emit_load(r_A, r_skb, off, ctx); 820 emit_load(r_A, r_skb, off, ctx);
806 break; 821 break;
807 case BPF_S_LD_MEM: 822 case BPF_LD | BPF_MEM:
808 /* A <- M[k] ==> lw r_A, offset(M) */ 823 /* A <- M[k] ==> lw r_A, offset(M) */
809 ctx->flags |= SEEN_MEM | SEEN_A; 824 ctx->flags |= SEEN_MEM | SEEN_A;
810 emit_load(r_A, r_M, SCRATCH_OFF(k), ctx); 825 emit_load(r_A, r_M, SCRATCH_OFF(k), ctx);
811 break; 826 break;
812 case BPF_S_LD_W_ABS: 827 case BPF_LD | BPF_W | BPF_ABS:
813 /* A <- P[k:4] */ 828 /* A <- P[k:4] */
814 load_order = 2; 829 load_order = 2;
815 goto load; 830 goto load;
816 case BPF_S_LD_H_ABS: 831 case BPF_LD | BPF_H | BPF_ABS:
817 /* A <- P[k:2] */ 832 /* A <- P[k:2] */
818 load_order = 1; 833 load_order = 1;
819 goto load; 834 goto load;
820 case BPF_S_LD_B_ABS: 835 case BPF_LD | BPF_B | BPF_ABS:
821 /* A <- P[k:1] */ 836 /* A <- P[k:1] */
822 load_order = 0; 837 load_order = 0;
823load: 838load:
839 /* the interpreter will deal with the negative K */
840 if ((int)k < 0)
841 return -ENOTSUPP;
842
824 emit_load_imm(r_off, k, ctx); 843 emit_load_imm(r_off, k, ctx);
825load_common: 844load_common:
845 /*
846 * We may got here from the indirect loads so
847 * return if offset is negative.
848 */
849 emit_slt(r_s0, r_off, r_zero, ctx);
850 emit_bcond(MIPS_COND_NE, r_s0, r_zero,
851 b_imm(prog->len, ctx), ctx);
852 emit_reg_move(r_ret, r_zero, ctx);
853
826 ctx->flags |= SEEN_CALL | SEEN_OFF | SEEN_S0 | 854 ctx->flags |= SEEN_CALL | SEEN_OFF | SEEN_S0 |
827 SEEN_SKB | SEEN_A; 855 SEEN_SKB | SEEN_A;
828 856
@@ -852,39 +880,42 @@ load_common:
852 emit_b(b_imm(prog->len, ctx), ctx); 880 emit_b(b_imm(prog->len, ctx), ctx);
853 emit_reg_move(r_ret, r_zero, ctx); 881 emit_reg_move(r_ret, r_zero, ctx);
854 break; 882 break;
855 case BPF_S_LD_W_IND: 883 case BPF_LD | BPF_W | BPF_IND:
856 /* A <- P[X + k:4] */ 884 /* A <- P[X + k:4] */
857 load_order = 2; 885 load_order = 2;
858 goto load_ind; 886 goto load_ind;
859 case BPF_S_LD_H_IND: 887 case BPF_LD | BPF_H | BPF_IND:
860 /* A <- P[X + k:2] */ 888 /* A <- P[X + k:2] */
861 load_order = 1; 889 load_order = 1;
862 goto load_ind; 890 goto load_ind;
863 case BPF_S_LD_B_IND: 891 case BPF_LD | BPF_B | BPF_IND:
864 /* A <- P[X + k:1] */ 892 /* A <- P[X + k:1] */
865 load_order = 0; 893 load_order = 0;
866load_ind: 894load_ind:
867 update_on_xread(ctx);
868 ctx->flags |= SEEN_OFF | SEEN_X; 895 ctx->flags |= SEEN_OFF | SEEN_X;
869 emit_addiu(r_off, r_X, k, ctx); 896 emit_addiu(r_off, r_X, k, ctx);
870 goto load_common; 897 goto load_common;
871 case BPF_S_LDX_IMM: 898 case BPF_LDX | BPF_IMM:
872 /* X <- k */ 899 /* X <- k */
873 ctx->flags |= SEEN_X; 900 ctx->flags |= SEEN_X;
874 emit_load_imm(r_X, k, ctx); 901 emit_load_imm(r_X, k, ctx);
875 break; 902 break;
876 case BPF_S_LDX_MEM: 903 case BPF_LDX | BPF_MEM:
877 /* X <- M[k] */ 904 /* X <- M[k] */
878 ctx->flags |= SEEN_X | SEEN_MEM; 905 ctx->flags |= SEEN_X | SEEN_MEM;
879 emit_load(r_X, r_M, SCRATCH_OFF(k), ctx); 906 emit_load(r_X, r_M, SCRATCH_OFF(k), ctx);
880 break; 907 break;
881 case BPF_S_LDX_W_LEN: 908 case BPF_LDX | BPF_W | BPF_LEN:
882 /* X <- len */ 909 /* X <- len */
883 ctx->flags |= SEEN_X | SEEN_SKB; 910 ctx->flags |= SEEN_X | SEEN_SKB;
884 off = offsetof(struct sk_buff, len); 911 off = offsetof(struct sk_buff, len);
885 emit_load(r_X, r_skb, off, ctx); 912 emit_load(r_X, r_skb, off, ctx);
886 break; 913 break;
887 case BPF_S_LDX_B_MSH: 914 case BPF_LDX | BPF_B | BPF_MSH:
915 /* the interpreter will deal with the negative K */
916 if ((int)k < 0)
917 return -ENOTSUPP;
918
888 /* X <- 4 * (P[k:1] & 0xf) */ 919 /* X <- 4 * (P[k:1] & 0xf) */
889 ctx->flags |= SEEN_X | SEEN_CALL | SEEN_S0 | SEEN_SKB; 920 ctx->flags |= SEEN_X | SEEN_CALL | SEEN_S0 | SEEN_SKB;
890 /* Load offset to a1 */ 921 /* Load offset to a1 */
@@ -917,50 +948,49 @@ load_ind:
917 emit_b(b_imm(prog->len, ctx), ctx); 948 emit_b(b_imm(prog->len, ctx), ctx);
918 emit_load_imm(r_ret, 0, ctx); /* delay slot */ 949 emit_load_imm(r_ret, 0, ctx); /* delay slot */
919 break; 950 break;
920 case BPF_S_ST: 951 case BPF_ST:
921 /* M[k] <- A */ 952 /* M[k] <- A */
922 ctx->flags |= SEEN_MEM | SEEN_A; 953 ctx->flags |= SEEN_MEM | SEEN_A;
923 emit_store(r_A, r_M, SCRATCH_OFF(k), ctx); 954 emit_store(r_A, r_M, SCRATCH_OFF(k), ctx);
924 break; 955 break;
925 case BPF_S_STX: 956 case BPF_STX:
926 /* M[k] <- X */ 957 /* M[k] <- X */
927 ctx->flags |= SEEN_MEM | SEEN_X; 958 ctx->flags |= SEEN_MEM | SEEN_X;
928 emit_store(r_X, r_M, SCRATCH_OFF(k), ctx); 959 emit_store(r_X, r_M, SCRATCH_OFF(k), ctx);
929 break; 960 break;
930 case BPF_S_ALU_ADD_K: 961 case BPF_ALU | BPF_ADD | BPF_K:
931 /* A += K */ 962 /* A += K */
932 ctx->flags |= SEEN_A; 963 ctx->flags |= SEEN_A;
933 emit_addiu(r_A, r_A, k, ctx); 964 emit_addiu(r_A, r_A, k, ctx);
934 break; 965 break;
935 case BPF_S_ALU_ADD_X: 966 case BPF_ALU | BPF_ADD | BPF_X:
936 /* A += X */ 967 /* A += X */
937 ctx->flags |= SEEN_A | SEEN_X; 968 ctx->flags |= SEEN_A | SEEN_X;
938 emit_addu(r_A, r_A, r_X, ctx); 969 emit_addu(r_A, r_A, r_X, ctx);
939 break; 970 break;
940 case BPF_S_ALU_SUB_K: 971 case BPF_ALU | BPF_SUB | BPF_K:
941 /* A -= K */ 972 /* A -= K */
942 ctx->flags |= SEEN_A; 973 ctx->flags |= SEEN_A;
943 emit_addiu(r_A, r_A, -k, ctx); 974 emit_addiu(r_A, r_A, -k, ctx);
944 break; 975 break;
945 case BPF_S_ALU_SUB_X: 976 case BPF_ALU | BPF_SUB | BPF_X:
946 /* A -= X */ 977 /* A -= X */
947 ctx->flags |= SEEN_A | SEEN_X; 978 ctx->flags |= SEEN_A | SEEN_X;
948 emit_subu(r_A, r_A, r_X, ctx); 979 emit_subu(r_A, r_A, r_X, ctx);
949 break; 980 break;
950 case BPF_S_ALU_MUL_K: 981 case BPF_ALU | BPF_MUL | BPF_K:
951 /* A *= K */ 982 /* A *= K */
952 /* Load K to scratch register before MUL */ 983 /* Load K to scratch register before MUL */
953 ctx->flags |= SEEN_A | SEEN_S0; 984 ctx->flags |= SEEN_A | SEEN_S0;
954 emit_load_imm(r_s0, k, ctx); 985 emit_load_imm(r_s0, k, ctx);
955 emit_mul(r_A, r_A, r_s0, ctx); 986 emit_mul(r_A, r_A, r_s0, ctx);
956 break; 987 break;
957 case BPF_S_ALU_MUL_X: 988 case BPF_ALU | BPF_MUL | BPF_X:
958 /* A *= X */ 989 /* A *= X */
959 update_on_xread(ctx);
960 ctx->flags |= SEEN_A | SEEN_X; 990 ctx->flags |= SEEN_A | SEEN_X;
961 emit_mul(r_A, r_A, r_X, ctx); 991 emit_mul(r_A, r_A, r_X, ctx);
962 break; 992 break;
963 case BPF_S_ALU_DIV_K: 993 case BPF_ALU | BPF_DIV | BPF_K:
964 /* A /= k */ 994 /* A /= k */
965 if (k == 1) 995 if (k == 1)
966 break; 996 break;
@@ -973,7 +1003,7 @@ load_ind:
973 emit_load_imm(r_s0, k, ctx); 1003 emit_load_imm(r_s0, k, ctx);
974 emit_div(r_A, r_s0, ctx); 1004 emit_div(r_A, r_s0, ctx);
975 break; 1005 break;
976 case BPF_S_ALU_MOD_K: 1006 case BPF_ALU | BPF_MOD | BPF_K:
977 /* A %= k */ 1007 /* A %= k */
978 if (k == 1 || optimize_div(&k)) { 1008 if (k == 1 || optimize_div(&k)) {
979 ctx->flags |= SEEN_A; 1009 ctx->flags |= SEEN_A;
@@ -984,9 +1014,8 @@ load_ind:
984 emit_mod(r_A, r_s0, ctx); 1014 emit_mod(r_A, r_s0, ctx);
985 } 1015 }
986 break; 1016 break;
987 case BPF_S_ALU_DIV_X: 1017 case BPF_ALU | BPF_DIV | BPF_X:
988 /* A /= X */ 1018 /* A /= X */
989 update_on_xread(ctx);
990 ctx->flags |= SEEN_X | SEEN_A; 1019 ctx->flags |= SEEN_X | SEEN_A;
991 /* Check if r_X is zero */ 1020 /* Check if r_X is zero */
992 emit_bcond(MIPS_COND_EQ, r_X, r_zero, 1021 emit_bcond(MIPS_COND_EQ, r_X, r_zero,
@@ -994,9 +1023,8 @@ load_ind:
994 emit_load_imm(r_val, 0, ctx); /* delay slot */ 1023 emit_load_imm(r_val, 0, ctx); /* delay slot */
995 emit_div(r_A, r_X, ctx); 1024 emit_div(r_A, r_X, ctx);
996 break; 1025 break;
997 case BPF_S_ALU_MOD_X: 1026 case BPF_ALU | BPF_MOD | BPF_X:
998 /* A %= X */ 1027 /* A %= X */
999 update_on_xread(ctx);
1000 ctx->flags |= SEEN_X | SEEN_A; 1028 ctx->flags |= SEEN_X | SEEN_A;
1001 /* Check if r_X is zero */ 1029 /* Check if r_X is zero */
1002 emit_bcond(MIPS_COND_EQ, r_X, r_zero, 1030 emit_bcond(MIPS_COND_EQ, r_X, r_zero,
@@ -1004,94 +1032,89 @@ load_ind:
1004 emit_load_imm(r_val, 0, ctx); /* delay slot */ 1032 emit_load_imm(r_val, 0, ctx); /* delay slot */
1005 emit_mod(r_A, r_X, ctx); 1033 emit_mod(r_A, r_X, ctx);
1006 break; 1034 break;
1007 case BPF_S_ALU_OR_K: 1035 case BPF_ALU | BPF_OR | BPF_K:
1008 /* A |= K */ 1036 /* A |= K */
1009 ctx->flags |= SEEN_A; 1037 ctx->flags |= SEEN_A;
1010 emit_ori(r_A, r_A, k, ctx); 1038 emit_ori(r_A, r_A, k, ctx);
1011 break; 1039 break;
1012 case BPF_S_ALU_OR_X: 1040 case BPF_ALU | BPF_OR | BPF_X:
1013 /* A |= X */ 1041 /* A |= X */
1014 update_on_xread(ctx);
1015 ctx->flags |= SEEN_A; 1042 ctx->flags |= SEEN_A;
1016 emit_ori(r_A, r_A, r_X, ctx); 1043 emit_ori(r_A, r_A, r_X, ctx);
1017 break; 1044 break;
1018 case BPF_S_ALU_XOR_K: 1045 case BPF_ALU | BPF_XOR | BPF_K:
1019 /* A ^= k */ 1046 /* A ^= k */
1020 ctx->flags |= SEEN_A; 1047 ctx->flags |= SEEN_A;
1021 emit_xori(r_A, r_A, k, ctx); 1048 emit_xori(r_A, r_A, k, ctx);
1022 break; 1049 break;
1023 case BPF_S_ANC_ALU_XOR_X: 1050 case BPF_ANC | SKF_AD_ALU_XOR_X:
1024 case BPF_S_ALU_XOR_X: 1051 case BPF_ALU | BPF_XOR | BPF_X:
1025 /* A ^= X */ 1052 /* A ^= X */
1026 update_on_xread(ctx);
1027 ctx->flags |= SEEN_A; 1053 ctx->flags |= SEEN_A;
1028 emit_xor(r_A, r_A, r_X, ctx); 1054 emit_xor(r_A, r_A, r_X, ctx);
1029 break; 1055 break;
1030 case BPF_S_ALU_AND_K: 1056 case BPF_ALU | BPF_AND | BPF_K:
1031 /* A &= K */ 1057 /* A &= K */
1032 ctx->flags |= SEEN_A; 1058 ctx->flags |= SEEN_A;
1033 emit_andi(r_A, r_A, k, ctx); 1059 emit_andi(r_A, r_A, k, ctx);
1034 break; 1060 break;
1035 case BPF_S_ALU_AND_X: 1061 case BPF_ALU | BPF_AND | BPF_X:
1036 /* A &= X */ 1062 /* A &= X */
1037 update_on_xread(ctx);
1038 ctx->flags |= SEEN_A | SEEN_X; 1063 ctx->flags |= SEEN_A | SEEN_X;
1039 emit_and(r_A, r_A, r_X, ctx); 1064 emit_and(r_A, r_A, r_X, ctx);
1040 break; 1065 break;
1041 case BPF_S_ALU_LSH_K: 1066 case BPF_ALU | BPF_LSH | BPF_K:
1042 /* A <<= K */ 1067 /* A <<= K */
1043 ctx->flags |= SEEN_A; 1068 ctx->flags |= SEEN_A;
1044 emit_sll(r_A, r_A, k, ctx); 1069 emit_sll(r_A, r_A, k, ctx);
1045 break; 1070 break;
1046 case BPF_S_ALU_LSH_X: 1071 case BPF_ALU | BPF_LSH | BPF_X:
1047 /* A <<= X */ 1072 /* A <<= X */
1048 ctx->flags |= SEEN_A | SEEN_X; 1073 ctx->flags |= SEEN_A | SEEN_X;
1049 update_on_xread(ctx);
1050 emit_sllv(r_A, r_A, r_X, ctx); 1074 emit_sllv(r_A, r_A, r_X, ctx);
1051 break; 1075 break;
1052 case BPF_S_ALU_RSH_K: 1076 case BPF_ALU | BPF_RSH | BPF_K:
1053 /* A >>= K */ 1077 /* A >>= K */
1054 ctx->flags |= SEEN_A; 1078 ctx->flags |= SEEN_A;
1055 emit_srl(r_A, r_A, k, ctx); 1079 emit_srl(r_A, r_A, k, ctx);
1056 break; 1080 break;
1057 case BPF_S_ALU_RSH_X: 1081 case BPF_ALU | BPF_RSH | BPF_X:
1058 ctx->flags |= SEEN_A | SEEN_X; 1082 ctx->flags |= SEEN_A | SEEN_X;
1059 update_on_xread(ctx);
1060 emit_srlv(r_A, r_A, r_X, ctx); 1083 emit_srlv(r_A, r_A, r_X, ctx);
1061 break; 1084 break;
1062 case BPF_S_ALU_NEG: 1085 case BPF_ALU | BPF_NEG:
1063 /* A = -A */ 1086 /* A = -A */
1064 ctx->flags |= SEEN_A; 1087 ctx->flags |= SEEN_A;
1065 emit_neg(r_A, ctx); 1088 emit_neg(r_A, ctx);
1066 break; 1089 break;
1067 case BPF_S_JMP_JA: 1090 case BPF_JMP | BPF_JA:
1068 /* pc += K */ 1091 /* pc += K */
1069 emit_b(b_imm(i + k + 1, ctx), ctx); 1092 emit_b(b_imm(i + k + 1, ctx), ctx);
1070 emit_nop(ctx); 1093 emit_nop(ctx);
1071 break; 1094 break;
1072 case BPF_S_JMP_JEQ_K: 1095 case BPF_JMP | BPF_JEQ | BPF_K:
1073 /* pc += ( A == K ) ? pc->jt : pc->jf */ 1096 /* pc += ( A == K ) ? pc->jt : pc->jf */
1074 condt = MIPS_COND_EQ | MIPS_COND_K; 1097 condt = MIPS_COND_EQ | MIPS_COND_K;
1075 goto jmp_cmp; 1098 goto jmp_cmp;
1076 case BPF_S_JMP_JEQ_X: 1099 case BPF_JMP | BPF_JEQ | BPF_X:
1077 ctx->flags |= SEEN_X; 1100 ctx->flags |= SEEN_X;
1078 /* pc += ( A == X ) ? pc->jt : pc->jf */ 1101 /* pc += ( A == X ) ? pc->jt : pc->jf */
1079 condt = MIPS_COND_EQ | MIPS_COND_X; 1102 condt = MIPS_COND_EQ | MIPS_COND_X;
1080 goto jmp_cmp; 1103 goto jmp_cmp;
1081 case BPF_S_JMP_JGE_K: 1104 case BPF_JMP | BPF_JGE | BPF_K:
1082 /* pc += ( A >= K ) ? pc->jt : pc->jf */ 1105 /* pc += ( A >= K ) ? pc->jt : pc->jf */
1083 condt = MIPS_COND_GE | MIPS_COND_K; 1106 condt = MIPS_COND_GE | MIPS_COND_K;
1084 goto jmp_cmp; 1107 goto jmp_cmp;
1085 case BPF_S_JMP_JGE_X: 1108 case BPF_JMP | BPF_JGE | BPF_X:
1086 ctx->flags |= SEEN_X; 1109 ctx->flags |= SEEN_X;
1087 /* pc += ( A >= X ) ? pc->jt : pc->jf */ 1110 /* pc += ( A >= X ) ? pc->jt : pc->jf */
1088 condt = MIPS_COND_GE | MIPS_COND_X; 1111 condt = MIPS_COND_GE | MIPS_COND_X;
1089 goto jmp_cmp; 1112 goto jmp_cmp;
1090 case BPF_S_JMP_JGT_K: 1113 case BPF_JMP | BPF_JGT | BPF_K:
1091 /* pc += ( A > K ) ? pc->jt : pc->jf */ 1114 /* pc += ( A > K ) ? pc->jt : pc->jf */
1092 condt = MIPS_COND_GT | MIPS_COND_K; 1115 condt = MIPS_COND_GT | MIPS_COND_K;
1093 goto jmp_cmp; 1116 goto jmp_cmp;
1094 case BPF_S_JMP_JGT_X: 1117 case BPF_JMP | BPF_JGT | BPF_X:
1095 ctx->flags |= SEEN_X; 1118 ctx->flags |= SEEN_X;
1096 /* pc += ( A > X ) ? pc->jt : pc->jf */ 1119 /* pc += ( A > X ) ? pc->jt : pc->jf */
1097 condt = MIPS_COND_GT | MIPS_COND_X; 1120 condt = MIPS_COND_GT | MIPS_COND_X;
@@ -1109,7 +1132,7 @@ jmp_cmp:
1109 } 1132 }
1110 /* A < (K|X) ? r_scrach = 1 */ 1133 /* A < (K|X) ? r_scrach = 1 */
1111 b_off = b_imm(i + inst->jf + 1, ctx); 1134 b_off = b_imm(i + inst->jf + 1, ctx);
1112 emit_bcond(MIPS_COND_GT, r_s0, r_zero, b_off, 1135 emit_bcond(MIPS_COND_NE, r_s0, r_zero, b_off,
1113 ctx); 1136 ctx);
1114 emit_nop(ctx); 1137 emit_nop(ctx);
1115 /* A > (K|X) ? scratch = 0 */ 1138 /* A > (K|X) ? scratch = 0 */
@@ -1167,7 +1190,7 @@ jmp_cmp:
1167 } 1190 }
1168 } 1191 }
1169 break; 1192 break;
1170 case BPF_S_JMP_JSET_K: 1193 case BPF_JMP | BPF_JSET | BPF_K:
1171 ctx->flags |= SEEN_S0 | SEEN_S1 | SEEN_A; 1194 ctx->flags |= SEEN_S0 | SEEN_S1 | SEEN_A;
1172 /* pc += (A & K) ? pc -> jt : pc -> jf */ 1195 /* pc += (A & K) ? pc -> jt : pc -> jf */
1173 emit_load_imm(r_s1, k, ctx); 1196 emit_load_imm(r_s1, k, ctx);
@@ -1181,7 +1204,7 @@ jmp_cmp:
1181 emit_b(b_off, ctx); 1204 emit_b(b_off, ctx);
1182 emit_nop(ctx); 1205 emit_nop(ctx);
1183 break; 1206 break;
1184 case BPF_S_JMP_JSET_X: 1207 case BPF_JMP | BPF_JSET | BPF_X:
1185 ctx->flags |= SEEN_S0 | SEEN_X | SEEN_A; 1208 ctx->flags |= SEEN_S0 | SEEN_X | SEEN_A;
1186 /* pc += (A & X) ? pc -> jt : pc -> jf */ 1209 /* pc += (A & X) ? pc -> jt : pc -> jf */
1187 emit_and(r_s0, r_A, r_X, ctx); 1210 emit_and(r_s0, r_A, r_X, ctx);
@@ -1194,7 +1217,7 @@ jmp_cmp:
1194 emit_b(b_off, ctx); 1217 emit_b(b_off, ctx);
1195 emit_nop(ctx); 1218 emit_nop(ctx);
1196 break; 1219 break;
1197 case BPF_S_RET_A: 1220 case BPF_RET | BPF_A:
1198 ctx->flags |= SEEN_A; 1221 ctx->flags |= SEEN_A;
1199 if (i != prog->len - 1) 1222 if (i != prog->len - 1)
1200 /* 1223 /*
@@ -1204,7 +1227,7 @@ jmp_cmp:
1204 emit_b(b_imm(prog->len, ctx), ctx); 1227 emit_b(b_imm(prog->len, ctx), ctx);
1205 emit_reg_move(r_ret, r_A, ctx); /* delay slot */ 1228 emit_reg_move(r_ret, r_A, ctx); /* delay slot */
1206 break; 1229 break;
1207 case BPF_S_RET_K: 1230 case BPF_RET | BPF_K:
1208 /* 1231 /*
1209 * It can emit two instructions so it does not fit on 1232 * It can emit two instructions so it does not fit on
1210 * the delay slot. 1233 * the delay slot.
@@ -1219,19 +1242,18 @@ jmp_cmp:
1219 emit_nop(ctx); 1242 emit_nop(ctx);
1220 } 1243 }
1221 break; 1244 break;
1222 case BPF_S_MISC_TAX: 1245 case BPF_MISC | BPF_TAX:
1223 /* X = A */ 1246 /* X = A */
1224 ctx->flags |= SEEN_X | SEEN_A; 1247 ctx->flags |= SEEN_X | SEEN_A;
1225 emit_jit_reg_move(r_X, r_A, ctx); 1248 emit_jit_reg_move(r_X, r_A, ctx);
1226 break; 1249 break;
1227 case BPF_S_MISC_TXA: 1250 case BPF_MISC | BPF_TXA:
1228 /* A = X */ 1251 /* A = X */
1229 ctx->flags |= SEEN_A | SEEN_X; 1252 ctx->flags |= SEEN_A | SEEN_X;
1230 update_on_xread(ctx);
1231 emit_jit_reg_move(r_A, r_X, ctx); 1253 emit_jit_reg_move(r_A, r_X, ctx);
1232 break; 1254 break;
1233 /* AUX */ 1255 /* AUX */
1234 case BPF_S_ANC_PROTOCOL: 1256 case BPF_ANC | SKF_AD_PROTOCOL:
1235 /* A = ntohs(skb->protocol */ 1257 /* A = ntohs(skb->protocol */
1236 ctx->flags |= SEEN_SKB | SEEN_OFF | SEEN_A; 1258 ctx->flags |= SEEN_SKB | SEEN_OFF | SEEN_A;
1237 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, 1259 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
@@ -1256,7 +1278,7 @@ jmp_cmp:
1256 } 1278 }
1257#endif 1279#endif
1258 break; 1280 break;
1259 case BPF_S_ANC_CPU: 1281 case BPF_ANC | SKF_AD_CPU:
1260 ctx->flags |= SEEN_A | SEEN_OFF; 1282 ctx->flags |= SEEN_A | SEEN_OFF;
1261 /* A = current_thread_info()->cpu */ 1283 /* A = current_thread_info()->cpu */
1262 BUILD_BUG_ON(FIELD_SIZEOF(struct thread_info, 1284 BUILD_BUG_ON(FIELD_SIZEOF(struct thread_info,
@@ -1265,11 +1287,12 @@ jmp_cmp:
1265 /* $28/gp points to the thread_info struct */ 1287 /* $28/gp points to the thread_info struct */
1266 emit_load(r_A, 28, off, ctx); 1288 emit_load(r_A, 28, off, ctx);
1267 break; 1289 break;
1268 case BPF_S_ANC_IFINDEX: 1290 case BPF_ANC | SKF_AD_IFINDEX:
1269 /* A = skb->dev->ifindex */ 1291 /* A = skb->dev->ifindex */
1270 ctx->flags |= SEEN_SKB | SEEN_A | SEEN_S0; 1292 ctx->flags |= SEEN_SKB | SEEN_A | SEEN_S0;
1271 off = offsetof(struct sk_buff, dev); 1293 off = offsetof(struct sk_buff, dev);
1272 emit_load(r_s0, r_skb, off, ctx); 1294 /* Load *dev pointer */
1295 emit_load_ptr(r_s0, r_skb, off, ctx);
1273 /* error (0) in the delay slot */ 1296 /* error (0) in the delay slot */
1274 emit_bcond(MIPS_COND_EQ, r_s0, r_zero, 1297 emit_bcond(MIPS_COND_EQ, r_s0, r_zero,
1275 b_imm(prog->len, ctx), ctx); 1298 b_imm(prog->len, ctx), ctx);
@@ -1279,31 +1302,36 @@ jmp_cmp:
1279 off = offsetof(struct net_device, ifindex); 1302 off = offsetof(struct net_device, ifindex);
1280 emit_load(r_A, r_s0, off, ctx); 1303 emit_load(r_A, r_s0, off, ctx);
1281 break; 1304 break;
1282 case BPF_S_ANC_MARK: 1305 case BPF_ANC | SKF_AD_MARK:
1283 ctx->flags |= SEEN_SKB | SEEN_A; 1306 ctx->flags |= SEEN_SKB | SEEN_A;
1284 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4); 1307 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
1285 off = offsetof(struct sk_buff, mark); 1308 off = offsetof(struct sk_buff, mark);
1286 emit_load(r_A, r_skb, off, ctx); 1309 emit_load(r_A, r_skb, off, ctx);
1287 break; 1310 break;
1288 case BPF_S_ANC_RXHASH: 1311 case BPF_ANC | SKF_AD_RXHASH:
1289 ctx->flags |= SEEN_SKB | SEEN_A; 1312 ctx->flags |= SEEN_SKB | SEEN_A;
1290 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4); 1313 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4);
1291 off = offsetof(struct sk_buff, hash); 1314 off = offsetof(struct sk_buff, hash);
1292 emit_load(r_A, r_skb, off, ctx); 1315 emit_load(r_A, r_skb, off, ctx);
1293 break; 1316 break;
1294 case BPF_S_ANC_VLAN_TAG: 1317 case BPF_ANC | SKF_AD_VLAN_TAG:
1295 case BPF_S_ANC_VLAN_TAG_PRESENT: 1318 case BPF_ANC | SKF_AD_VLAN_TAG_PRESENT:
1296 ctx->flags |= SEEN_SKB | SEEN_S0 | SEEN_A; 1319 ctx->flags |= SEEN_SKB | SEEN_S0 | SEEN_A;
1297 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, 1320 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
1298 vlan_tci) != 2); 1321 vlan_tci) != 2);
1299 off = offsetof(struct sk_buff, vlan_tci); 1322 off = offsetof(struct sk_buff, vlan_tci);
1300 emit_half_load(r_s0, r_skb, off, ctx); 1323 emit_half_load(r_s0, r_skb, off, ctx);
1301 if (inst->code == BPF_S_ANC_VLAN_TAG) 1324 if (code == (BPF_ANC | SKF_AD_VLAN_TAG)) {
1302 emit_and(r_A, r_s0, VLAN_VID_MASK, ctx); 1325 emit_andi(r_A, r_s0, (u16)~VLAN_TAG_PRESENT, ctx);
1303 else 1326 } else {
1304 emit_and(r_A, r_s0, VLAN_TAG_PRESENT, ctx); 1327 emit_andi(r_A, r_s0, VLAN_TAG_PRESENT, ctx);
1328 /* return 1 if present */
1329 emit_sltu(r_A, r_zero, r_A, ctx);
1330 }
1305 break; 1331 break;
1306 case BPF_S_ANC_PKTTYPE: 1332 case BPF_ANC | SKF_AD_PKTTYPE:
1333 ctx->flags |= SEEN_SKB;
1334
1307 off = pkt_type_offset(); 1335 off = pkt_type_offset();
1308 1336
1309 if (off < 0) 1337 if (off < 0)
@@ -1311,8 +1339,12 @@ jmp_cmp:
1311 emit_load_byte(r_tmp, r_skb, off, ctx); 1339 emit_load_byte(r_tmp, r_skb, off, ctx);
1312 /* Keep only the last 3 bits */ 1340 /* Keep only the last 3 bits */
1313 emit_andi(r_A, r_tmp, PKT_TYPE_MAX, ctx); 1341 emit_andi(r_A, r_tmp, PKT_TYPE_MAX, ctx);
1342#ifdef __BIG_ENDIAN_BITFIELD
1343 /* Get the actual packet type to the lower 3 bits */
1344 emit_srl(r_A, r_A, 5, ctx);
1345#endif
1314 break; 1346 break;
1315 case BPF_S_ANC_QUEUE: 1347 case BPF_ANC | SKF_AD_QUEUE:
1316 ctx->flags |= SEEN_SKB | SEEN_A; 1348 ctx->flags |= SEEN_SKB | SEEN_A;
1317 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, 1349 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
1318 queue_mapping) != 2); 1350 queue_mapping) != 2);
@@ -1322,8 +1354,8 @@ jmp_cmp:
1322 emit_half_load(r_A, r_skb, off, ctx); 1354 emit_half_load(r_A, r_skb, off, ctx);
1323 break; 1355 break;
1324 default: 1356 default:
1325 pr_warn("%s: Unhandled opcode: 0x%02x\n", __FILE__, 1357 pr_debug("%s: Unhandled opcode: 0x%02x\n", __FILE__,
1326 inst->code); 1358 inst->code);
1327 return -1; 1359 return -1;
1328 } 1360 }
1329 } 1361 }