summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2019-05-08 19:41:47 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2019-05-08 19:41:47 -0400
commit92fab77b6b309dc219b02da4a69ad5dc76f7ec74 (patch)
tree2fa9ae2278b871398aa576977c75fef3992ee358
parentef75bd71c5d31dc17ae41ff8bec92630a3037d69 (diff)
parent3751cbda8f223549d7ea28803cbec8ac87e43ed2 (diff)
Merge tag 'mips_5.2' of git://git.kernel.org/pub/scm/linux/kernel/git/mips/linux
Pull MIPS updates from Paul Burton: - A set of memblock initialization improvements thanks to Serge Semin, tidying up after our conversion from bootmem to memblock back in v4.20. - Our eBPF JIT the previously supported only MIPS64r2 through MIPS64r5 is improved to also support MIPS64r6. Support for MIPS32 systems is introduced, with the caveat that it only works for programs that don't use 64 bit registers or operations - those will bail out & need to be interpreted. - Improvements to the allocation & configuration of our exception vector that should fix issues seen on some platforms using recent versions of U-Boot. - Some minor improvements to code generated for jump labels, along with enabling them by default for generic kernels. * tag 'mips_5.2' of git://git.kernel.org/pub/scm/linux/kernel/git/mips/linux: (27 commits) mips: Manually call fdt_init_reserved_mem() method mips: Make sure dt memory regions are valid mips: Perform early low memory test mips: Dump memblock regions for debugging mips: Add reserve-nomap memory type support mips: Use memblock to reserve the __nosave memory range mips: Discard post-CMA-init foreach loop mips: Reserve memory for the kernel image resources MIPS: Remove duplicate EBase configuration MIPS: Sync icache for whole exception vector MIPS: Always allocate exception vector for MIPSr2+ MIPS: Use memblock_phys_alloc() for exception vector mips: Combine memblock init and memory reservation loops mips: Discard rudiments from bootmem_init mips: Make sure kernel .bss exists in boot mem pool mips: vdso: drop unnecessary cc-ldoption Revert "MIPS: ralink: fix cpu clock of mt7621 and add dt clk devices" MIPS: generic: Enable CONFIG_JUMP_LABEL MIPS: jump_label: Use compact branches for >= r6 MIPS: jump_label: Remove redundant nops ...
-rw-r--r--arch/mips/Kconfig64
-rw-r--r--arch/mips/bcm47xx/Kconfig8
-rw-r--r--arch/mips/bcm63xx/boards/Kconfig2
-rw-r--r--arch/mips/configs/generic_defconfig1
-rw-r--r--arch/mips/include/asm/bootinfo.h1
-rw-r--r--arch/mips/include/asm/jump_label.h15
-rw-r--r--arch/mips/include/asm/uasm.h8
-rw-r--r--arch/mips/include/uapi/asm/inst.h6
-rw-r--r--arch/mips/kernel/entry.S5
-rw-r--r--arch/mips/kernel/jump_label.c30
-rw-r--r--arch/mips/kernel/prom.c18
-rw-r--r--arch/mips/kernel/setup.c129
-rw-r--r--arch/mips/kernel/traps.c63
-rw-r--r--arch/mips/kvm/emulate.c4
-rw-r--r--arch/mips/mm/uasm-mips.c14
-rw-r--r--arch/mips/mm/uasm.c39
-rw-r--r--arch/mips/net/Makefile1
-rw-r--r--arch/mips/net/bpf_jit.c1270
-rw-r--r--arch/mips/net/bpf_jit_asm.S285
-rw-r--r--arch/mips/net/ebpf_jit.c191
-rw-r--r--arch/mips/pic32/Kconfig8
-rw-r--r--arch/mips/vdso/Makefile4
22 files changed, 341 insertions, 1825 deletions
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index b9c48b27162d..ff8cff9fcf54 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -44,8 +44,7 @@ config MIPS
44 select HAVE_ARCH_SECCOMP_FILTER 44 select HAVE_ARCH_SECCOMP_FILTER
45 select HAVE_ARCH_TRACEHOOK 45 select HAVE_ARCH_TRACEHOOK
46 select HAVE_ARCH_TRANSPARENT_HUGEPAGE if CPU_SUPPORTS_HUGEPAGES && 64BIT 46 select HAVE_ARCH_TRANSPARENT_HUGEPAGE if CPU_SUPPORTS_HUGEPAGES && 64BIT
47 select HAVE_CBPF_JIT if (!64BIT && !CPU_MICROMIPS) 47 select HAVE_EBPF_JIT if (!CPU_MICROMIPS)
48 select HAVE_EBPF_JIT if (64BIT && !CPU_MICROMIPS)
49 select HAVE_CONTEXT_TRACKING 48 select HAVE_CONTEXT_TRACKING
50 select HAVE_COPY_THREAD_TLS 49 select HAVE_COPY_THREAD_TLS
51 select HAVE_C_RECORDMCOUNT 50 select HAVE_C_RECORDMCOUNT
@@ -276,7 +275,7 @@ config BCM47XX
276 select BCM47XX_SPROM 275 select BCM47XX_SPROM
277 select BCM47XX_SSB if !BCM47XX_BCMA 276 select BCM47XX_SSB if !BCM47XX_BCMA
278 help 277 help
279 Support for BCM47XX based boards 278 Support for BCM47XX based boards
280 279
281config BCM63XX 280config BCM63XX
282 bool "Broadcom BCM63XX based boards" 281 bool "Broadcom BCM63XX based boards"
@@ -295,7 +294,7 @@ config BCM63XX
295 select MIPS_L1_CACHE_SHIFT_4 294 select MIPS_L1_CACHE_SHIFT_4
296 select CLKDEV_LOOKUP 295 select CLKDEV_LOOKUP
297 help 296 help
298 Support for BCM63XX based boards 297 Support for BCM63XX based boards
299 298
300config MIPS_COBALT 299config MIPS_COBALT
301 bool "Cobalt Server" 300 bool "Cobalt Server"
@@ -374,10 +373,10 @@ config MACH_JAZZ
374 select SYS_SUPPORTS_64BIT_KERNEL 373 select SYS_SUPPORTS_64BIT_KERNEL
375 select SYS_SUPPORTS_100HZ 374 select SYS_SUPPORTS_100HZ
376 help 375 help
377 This a family of machines based on the MIPS R4030 chipset which was 376 This a family of machines based on the MIPS R4030 chipset which was
378 used by several vendors to build RISC/os and Windows NT workstations. 377 used by several vendors to build RISC/os and Windows NT workstations.
379 Members include the Acer PICA, MIPS Magnum 4000, MIPS Millennium and 378 Members include the Acer PICA, MIPS Magnum 4000, MIPS Millennium and
380 Olivetti M700-10 workstations. 379 Olivetti M700-10 workstations.
381 380
382config MACH_INGENIC 381config MACH_INGENIC
383 bool "Ingenic SoC based machines" 382 bool "Ingenic SoC based machines"
@@ -573,14 +572,14 @@ config NXP_STB220
573 bool "NXP STB220 board" 572 bool "NXP STB220 board"
574 select SOC_PNX833X 573 select SOC_PNX833X
575 help 574 help
576 Support for NXP Semiconductors STB220 Development Board. 575 Support for NXP Semiconductors STB220 Development Board.
577 576
578config NXP_STB225 577config NXP_STB225
579 bool "NXP 225 board" 578 bool "NXP 225 board"
580 select SOC_PNX833X 579 select SOC_PNX833X
581 select SOC_PNX8335 580 select SOC_PNX8335
582 help 581 help
583 Support for NXP Semiconductors STB225 Development Board. 582 Support for NXP Semiconductors STB225 Development Board.
584 583
585config PMC_MSP 584config PMC_MSP
586 bool "PMC-Sierra MSP chipsets" 585 bool "PMC-Sierra MSP chipsets"
@@ -722,9 +721,9 @@ config SGI_IP28
722 select SYS_SUPPORTS_64BIT_KERNEL 721 select SYS_SUPPORTS_64BIT_KERNEL
723 select SYS_SUPPORTS_BIG_ENDIAN 722 select SYS_SUPPORTS_BIG_ENDIAN
724 select MIPS_L1_CACHE_SHIFT_7 723 select MIPS_L1_CACHE_SHIFT_7
725 help 724 help
726 This is the SGI Indigo2 with R10000 processor. To compile a Linux 725 This is the SGI Indigo2 with R10000 processor. To compile a Linux
727 kernel that runs on these, say Y here. 726 kernel that runs on these, say Y here.
728 727
729config SGI_IP32 728config SGI_IP32
730 bool "SGI IP32 (O2)" 729 bool "SGI IP32 (O2)"
@@ -1168,9 +1167,9 @@ config HOLES_IN_ZONE
1168config SYS_SUPPORTS_RELOCATABLE 1167config SYS_SUPPORTS_RELOCATABLE
1169 bool 1168 bool
1170 help 1169 help
1171 Selected if the platform supports relocating the kernel. 1170 Selected if the platform supports relocating the kernel.
1172 The platform must provide plat_get_fdt() if it selects CONFIG_USE_OF 1171 The platform must provide plat_get_fdt() if it selects CONFIG_USE_OF
1173 to allow access to command line and entropy sources. 1172 to allow access to command line and entropy sources.
1174 1173
1175config MIPS_CBPF_JIT 1174config MIPS_CBPF_JIT
1176 def_bool y 1175 def_bool y
@@ -2113,8 +2112,8 @@ config MIPS_PGD_C0_CONTEXT
2113# Set to y for ptrace access to watch registers. 2112# Set to y for ptrace access to watch registers.
2114# 2113#
2115config HARDWARE_WATCHPOINTS 2114config HARDWARE_WATCHPOINTS
2116 bool 2115 bool
2117 default y if CPU_MIPSR1 || CPU_MIPSR2 || CPU_MIPSR6 2116 default y if CPU_MIPSR1 || CPU_MIPSR2 || CPU_MIPSR6
2118 2117
2119menu "Kernel type" 2118menu "Kernel type"
2120 2119
@@ -2178,10 +2177,10 @@ config PAGE_SIZE_4KB
2178 bool "4kB" 2177 bool "4kB"
2179 depends on !CPU_LOONGSON2 && !CPU_LOONGSON3 2178 depends on !CPU_LOONGSON2 && !CPU_LOONGSON3
2180 help 2179 help
2181 This option select the standard 4kB Linux page size. On some 2180 This option select the standard 4kB Linux page size. On some
2182 R3000-family processors this is the only available page size. Using 2181 R3000-family processors this is the only available page size. Using
2183 4kB page size will minimize memory consumption and is therefore 2182 4kB page size will minimize memory consumption and is therefore
2184 recommended for low memory systems. 2183 recommended for low memory systems.
2185 2184
2186config PAGE_SIZE_8KB 2185config PAGE_SIZE_8KB
2187 bool "8kB" 2186 bool "8kB"
@@ -2474,7 +2473,6 @@ config SB1_PASS_2_1_WORKAROUNDS
2474 depends on CPU_SB1 && CPU_SB1_PASS_2 2473 depends on CPU_SB1 && CPU_SB1_PASS_2
2475 default y 2474 default y
2476 2475
2477
2478choice 2476choice
2479 prompt "SmartMIPS or microMIPS ASE support" 2477 prompt "SmartMIPS or microMIPS ASE support"
2480 2478
@@ -2682,16 +2680,16 @@ config RANDOMIZE_BASE
2682 bool "Randomize the address of the kernel image" 2680 bool "Randomize the address of the kernel image"
2683 depends on RELOCATABLE 2681 depends on RELOCATABLE
2684 ---help--- 2682 ---help---
2685 Randomizes the physical and virtual address at which the 2683 Randomizes the physical and virtual address at which the
2686 kernel image is loaded, as a security feature that 2684 kernel image is loaded, as a security feature that
2687 deters exploit attempts relying on knowledge of the location 2685 deters exploit attempts relying on knowledge of the location
2688 of kernel internals. 2686 of kernel internals.
2689 2687
2690 Entropy is generated using any coprocessor 0 registers available. 2688 Entropy is generated using any coprocessor 0 registers available.
2691 2689
2692 The kernel will be offset by up to RANDOMIZE_BASE_MAX_OFFSET. 2690 The kernel will be offset by up to RANDOMIZE_BASE_MAX_OFFSET.
2693 2691
2694 If unsure, say N. 2692 If unsure, say N.
2695 2693
2696config RANDOMIZE_BASE_MAX_OFFSET 2694config RANDOMIZE_BASE_MAX_OFFSET
2697 hex "Maximum kASLR offset" if EXPERT 2695 hex "Maximum kASLR offset" if EXPERT
@@ -2821,7 +2819,7 @@ choice
2821 prompt "Timer frequency" 2819 prompt "Timer frequency"
2822 default HZ_250 2820 default HZ_250
2823 help 2821 help
2824 Allows the configuration of the timer frequency. 2822 Allows the configuration of the timer frequency.
2825 2823
2826 config HZ_24 2824 config HZ_24
2827 bool "24 HZ" if SYS_SUPPORTS_24HZ || SYS_SUPPORTS_ARBIT_HZ 2825 bool "24 HZ" if SYS_SUPPORTS_24HZ || SYS_SUPPORTS_ARBIT_HZ
@@ -3121,10 +3119,10 @@ config ARCH_MMAP_RND_BITS_MAX
3121 default 15 3119 default 15
3122 3120
3123config ARCH_MMAP_RND_COMPAT_BITS_MIN 3121config ARCH_MMAP_RND_COMPAT_BITS_MIN
3124 default 8 3122 default 8
3125 3123
3126config ARCH_MMAP_RND_COMPAT_BITS_MAX 3124config ARCH_MMAP_RND_COMPAT_BITS_MAX
3127 default 15 3125 default 15
3128 3126
3129config I8253 3127config I8253
3130 bool 3128 bool
diff --git a/arch/mips/bcm47xx/Kconfig b/arch/mips/bcm47xx/Kconfig
index 29471038d817..6889f74e06f5 100644
--- a/arch/mips/bcm47xx/Kconfig
+++ b/arch/mips/bcm47xx/Kconfig
@@ -15,9 +15,9 @@ config BCM47XX_SSB
15 select SSB_DRIVER_GPIO 15 select SSB_DRIVER_GPIO
16 default y 16 default y
17 help 17 help
18 Add support for old Broadcom BCM47xx boards with Sonics Silicon Backplane support. 18 Add support for old Broadcom BCM47xx boards with Sonics Silicon Backplane support.
19 19
20 This will generate an image with support for SSB and MIPS32 R1 instruction set. 20 This will generate an image with support for SSB and MIPS32 R1 instruction set.
21 21
22config BCM47XX_BCMA 22config BCM47XX_BCMA
23 bool "BCMA Support for Broadcom BCM47XX" 23 bool "BCMA Support for Broadcom BCM47XX"
@@ -31,8 +31,8 @@ config BCM47XX_BCMA
31 select BCMA_DRIVER_GPIO 31 select BCMA_DRIVER_GPIO
32 default y 32 default y
33 help 33 help
34 Add support for new Broadcom BCM47xx boards with Broadcom specific Advanced Microcontroller Bus. 34 Add support for new Broadcom BCM47xx boards with Broadcom specific Advanced Microcontroller Bus.
35 35
36 This will generate an image with support for BCMA and MIPS32 R2 instruction set. 36 This will generate an image with support for BCMA and MIPS32 R2 instruction set.
37 37
38endif 38endif
diff --git a/arch/mips/bcm63xx/boards/Kconfig b/arch/mips/bcm63xx/boards/Kconfig
index f60d96610ace..492c3bd005d5 100644
--- a/arch/mips/bcm63xx/boards/Kconfig
+++ b/arch/mips/bcm63xx/boards/Kconfig
@@ -5,7 +5,7 @@ choice
5 default BOARD_BCM963XX 5 default BOARD_BCM963XX
6 6
7config BOARD_BCM963XX 7config BOARD_BCM963XX
8 bool "Generic Broadcom 963xx boards" 8 bool "Generic Broadcom 963xx boards"
9 select SSB 9 select SSB
10 10
11endchoice 11endchoice
diff --git a/arch/mips/configs/generic_defconfig b/arch/mips/configs/generic_defconfig
index 5d80521e5d5a..714169e411cf 100644
--- a/arch/mips/configs/generic_defconfig
+++ b/arch/mips/configs/generic_defconfig
@@ -26,6 +26,7 @@ CONFIG_MIPS_CPS=y
26CONFIG_HIGHMEM=y 26CONFIG_HIGHMEM=y
27CONFIG_NR_CPUS=16 27CONFIG_NR_CPUS=16
28CONFIG_MIPS_O32_FP64_SUPPORT=y 28CONFIG_MIPS_O32_FP64_SUPPORT=y
29CONFIG_JUMP_LABEL=y
29CONFIG_MODULES=y 30CONFIG_MODULES=y
30CONFIG_MODULE_UNLOAD=y 31CONFIG_MODULE_UNLOAD=y
31CONFIG_TRIM_UNUSED_KSYMS=y 32CONFIG_TRIM_UNUSED_KSYMS=y
diff --git a/arch/mips/include/asm/bootinfo.h b/arch/mips/include/asm/bootinfo.h
index a301a8f4bc66..235bc2f52113 100644
--- a/arch/mips/include/asm/bootinfo.h
+++ b/arch/mips/include/asm/bootinfo.h
@@ -92,6 +92,7 @@ extern unsigned long mips_machtype;
92#define BOOT_MEM_ROM_DATA 2 92#define BOOT_MEM_ROM_DATA 2
93#define BOOT_MEM_RESERVED 3 93#define BOOT_MEM_RESERVED 3
94#define BOOT_MEM_INIT_RAM 4 94#define BOOT_MEM_INIT_RAM 4
95#define BOOT_MEM_NOMAP 5
95 96
96/* 97/*
97 * A memory map that's built upon what was determined 98 * A memory map that's built upon what was determined
diff --git a/arch/mips/include/asm/jump_label.h b/arch/mips/include/asm/jump_label.h
index e4456e450f94..3185fd3220ec 100644
--- a/arch/mips/include/asm/jump_label.h
+++ b/arch/mips/include/asm/jump_label.h
@@ -11,6 +11,7 @@
11#ifndef __ASSEMBLY__ 11#ifndef __ASSEMBLY__
12 12
13#include <linux/types.h> 13#include <linux/types.h>
14#include <asm/isa-rev.h>
14 15
15#define JUMP_LABEL_NOP_SIZE 4 16#define JUMP_LABEL_NOP_SIZE 4
16 17
@@ -21,15 +22,20 @@
21#endif 22#endif
22 23
23#ifdef CONFIG_CPU_MICROMIPS 24#ifdef CONFIG_CPU_MICROMIPS
24#define B_INSN "b32" 25# define B_INSN "b32"
26# define J_INSN "j32"
27#elif MIPS_ISA_REV >= 6
28# define B_INSN "bc"
29# define J_INSN "bc"
25#else 30#else
26#define B_INSN "b" 31# define B_INSN "b"
32# define J_INSN "j"
27#endif 33#endif
28 34
29static __always_inline bool arch_static_branch(struct static_key *key, bool branch) 35static __always_inline bool arch_static_branch(struct static_key *key, bool branch)
30{ 36{
31 asm_volatile_goto("1:\t" B_INSN " 2f\n\t" 37 asm_volatile_goto("1:\t" B_INSN " 2f\n\t"
32 "2:\tnop\n\t" 38 "2:\t.insn\n\t"
33 ".pushsection __jump_table, \"aw\"\n\t" 39 ".pushsection __jump_table, \"aw\"\n\t"
34 WORD_INSN " 1b, %l[l_yes], %0\n\t" 40 WORD_INSN " 1b, %l[l_yes], %0\n\t"
35 ".popsection\n\t" 41 ".popsection\n\t"
@@ -42,8 +48,7 @@ l_yes:
42 48
43static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch) 49static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch)
44{ 50{
45 asm_volatile_goto("1:\tj %l[l_yes]\n\t" 51 asm_volatile_goto("1:\t" J_INSN " %l[l_yes]\n\t"
46 "nop\n\t"
47 ".pushsection __jump_table, \"aw\"\n\t" 52 ".pushsection __jump_table, \"aw\"\n\t"
48 WORD_INSN " 1b, %l[l_yes], %0\n\t" 53 WORD_INSN " 1b, %l[l_yes], %0\n\t"
49 ".popsection\n\t" 54 ".popsection\n\t"
diff --git a/arch/mips/include/asm/uasm.h b/arch/mips/include/asm/uasm.h
index b1990dd75f27..f7effca791a5 100644
--- a/arch/mips/include/asm/uasm.h
+++ b/arch/mips/include/asm/uasm.h
@@ -86,14 +86,18 @@ Ip_u2u1(_ctcmsa);
86Ip_u2u1s3(_daddiu); 86Ip_u2u1s3(_daddiu);
87Ip_u3u1u2(_daddu); 87Ip_u3u1u2(_daddu);
88Ip_u1u2(_ddivu); 88Ip_u1u2(_ddivu);
89Ip_u3u1u2(_ddivu_r6);
89Ip_u1(_di); 90Ip_u1(_di);
90Ip_u2u1msbu3(_dins); 91Ip_u2u1msbu3(_dins);
91Ip_u2u1msbu3(_dinsm); 92Ip_u2u1msbu3(_dinsm);
92Ip_u2u1msbu3(_dinsu); 93Ip_u2u1msbu3(_dinsu);
93Ip_u1u2(_divu); 94Ip_u1u2(_divu);
95Ip_u3u1u2(_divu_r6);
94Ip_u1u2u3(_dmfc0); 96Ip_u1u2u3(_dmfc0);
97Ip_u3u1u2(_dmodu);
95Ip_u1u2u3(_dmtc0); 98Ip_u1u2u3(_dmtc0);
96Ip_u1u2(_dmultu); 99Ip_u1u2(_dmultu);
100Ip_u3u1u2(_dmulu);
97Ip_u2u1u3(_drotr); 101Ip_u2u1u3(_drotr);
98Ip_u2u1u3(_drotr32); 102Ip_u2u1u3(_drotr32);
99Ip_u2u1(_dsbh); 103Ip_u2u1(_dsbh);
@@ -131,6 +135,7 @@ Ip_u1u2u3(_mfc0);
131Ip_u1u2u3(_mfhc0); 135Ip_u1u2u3(_mfhc0);
132Ip_u1(_mfhi); 136Ip_u1(_mfhi);
133Ip_u1(_mflo); 137Ip_u1(_mflo);
138Ip_u3u1u2(_modu);
134Ip_u3u1u2(_movn); 139Ip_u3u1u2(_movn);
135Ip_u3u1u2(_movz); 140Ip_u3u1u2(_movz);
136Ip_u1u2u3(_mtc0); 141Ip_u1u2u3(_mtc0);
@@ -139,6 +144,7 @@ Ip_u1(_mthi);
139Ip_u1(_mtlo); 144Ip_u1(_mtlo);
140Ip_u3u1u2(_mul); 145Ip_u3u1u2(_mul);
141Ip_u1u2(_multu); 146Ip_u1u2(_multu);
147Ip_u3u1u2(_mulu);
142Ip_u3u1u2(_nor); 148Ip_u3u1u2(_nor);
143Ip_u3u1u2(_or); 149Ip_u3u1u2(_or);
144Ip_u2u1u3(_ori); 150Ip_u2u1u3(_ori);
@@ -149,6 +155,8 @@ Ip_u2s3u1(_sb);
149Ip_u2s3u1(_sc); 155Ip_u2s3u1(_sc);
150Ip_u2s3u1(_scd); 156Ip_u2s3u1(_scd);
151Ip_u2s3u1(_sd); 157Ip_u2s3u1(_sd);
158Ip_u3u1u2(_seleqz);
159Ip_u3u1u2(_selnez);
152Ip_u2s3u1(_sh); 160Ip_u2s3u1(_sh);
153Ip_u2u1u3(_sll); 161Ip_u2u1u3(_sll);
154Ip_u3u2u1(_sllv); 162Ip_u3u2u1(_sllv);
diff --git a/arch/mips/include/uapi/asm/inst.h b/arch/mips/include/uapi/asm/inst.h
index 40fbb5dd66df..eaa3a80affdf 100644
--- a/arch/mips/include/uapi/asm/inst.h
+++ b/arch/mips/include/uapi/asm/inst.h
@@ -55,9 +55,9 @@ enum spec_op {
55 spec3_unused_op, spec4_unused_op, slt_op, sltu_op, 55 spec3_unused_op, spec4_unused_op, slt_op, sltu_op,
56 dadd_op, daddu_op, dsub_op, dsubu_op, 56 dadd_op, daddu_op, dsub_op, dsubu_op,
57 tge_op, tgeu_op, tlt_op, tltu_op, 57 tge_op, tgeu_op, tlt_op, tltu_op,
58 teq_op, spec5_unused_op, tne_op, spec6_unused_op, 58 teq_op, seleqz_op, tne_op, selnez_op,
59 dsll_op, spec7_unused_op, dsrl_op, dsra_op, 59 dsll_op, spec5_unused_op, dsrl_op, dsra_op,
60 dsll32_op, spec8_unused_op, dsrl32_op, dsra32_op 60 dsll32_op, spec6_unused_op, dsrl32_op, dsra32_op
61}; 61};
62 62
63/* 63/*
diff --git a/arch/mips/kernel/entry.S b/arch/mips/kernel/entry.S
index d7de8adcfcc8..5469d43b6966 100644
--- a/arch/mips/kernel/entry.S
+++ b/arch/mips/kernel/entry.S
@@ -58,15 +58,14 @@ resume_kernel:
58 local_irq_disable 58 local_irq_disable
59 lw t0, TI_PRE_COUNT($28) 59 lw t0, TI_PRE_COUNT($28)
60 bnez t0, restore_all 60 bnez t0, restore_all
61need_resched:
62 LONG_L t0, TI_FLAGS($28) 61 LONG_L t0, TI_FLAGS($28)
63 andi t1, t0, _TIF_NEED_RESCHED 62 andi t1, t0, _TIF_NEED_RESCHED
64 beqz t1, restore_all 63 beqz t1, restore_all
65 LONG_L t0, PT_STATUS(sp) # Interrupts off? 64 LONG_L t0, PT_STATUS(sp) # Interrupts off?
66 andi t0, 1 65 andi t0, 1
67 beqz t0, restore_all 66 beqz t0, restore_all
68 jal preempt_schedule_irq 67 PTR_LA ra, restore_all
69 b need_resched 68 j preempt_schedule_irq
70#endif 69#endif
71 70
72FEXPORT(ret_from_kernel_thread) 71FEXPORT(ret_from_kernel_thread)
diff --git a/arch/mips/kernel/jump_label.c b/arch/mips/kernel/jump_label.c
index ab943927f97a..662c8db9f45b 100644
--- a/arch/mips/kernel/jump_label.c
+++ b/arch/mips/kernel/jump_label.c
@@ -40,18 +40,38 @@ void arch_jump_label_transform(struct jump_entry *e,
40{ 40{
41 union mips_instruction *insn_p; 41 union mips_instruction *insn_p;
42 union mips_instruction insn; 42 union mips_instruction insn;
43 long offset;
43 44
44 insn_p = (union mips_instruction *)msk_isa16_mode(e->code); 45 insn_p = (union mips_instruction *)msk_isa16_mode(e->code);
45 46
46 /* Jump only works within an aligned region its delay slot is in. */
47 BUG_ON((e->target & ~J_RANGE_MASK) != ((e->code + 4) & ~J_RANGE_MASK));
48
49 /* Target must have the right alignment and ISA must be preserved. */ 47 /* Target must have the right alignment and ISA must be preserved. */
50 BUG_ON((e->target & J_ALIGN_MASK) != J_ISA_BIT); 48 BUG_ON((e->target & J_ALIGN_MASK) != J_ISA_BIT);
51 49
52 if (type == JUMP_LABEL_JMP) { 50 if (type == JUMP_LABEL_JMP) {
53 insn.j_format.opcode = J_ISA_BIT ? mm_j32_op : j_op; 51 if (!IS_ENABLED(CONFIG_CPU_MICROMIPS) && MIPS_ISA_REV >= 6) {
54 insn.j_format.target = e->target >> J_RANGE_SHIFT; 52 offset = e->target - ((unsigned long)insn_p + 4);
53 offset >>= 2;
54
55 /*
56 * The branch offset must fit in the instruction's 26
57 * bit field.
58 */
59 WARN_ON((offset >= BIT(25)) ||
60 (offset < -(long)BIT(25)));
61
62 insn.j_format.opcode = bc6_op;
63 insn.j_format.target = offset;
64 } else {
65 /*
66 * Jump only works within an aligned region its delay
67 * slot is in.
68 */
69 WARN_ON((e->target & ~J_RANGE_MASK) !=
70 ((e->code + 4) & ~J_RANGE_MASK));
71
72 insn.j_format.opcode = J_ISA_BIT ? mm_j32_op : j_op;
73 insn.j_format.target = e->target >> J_RANGE_SHIFT;
74 }
55 } else { 75 } else {
56 insn.word = 0; /* nop */ 76 insn.word = 0; /* nop */
57 } 77 }
diff --git a/arch/mips/kernel/prom.c b/arch/mips/kernel/prom.c
index 93b8e0b4332f..28bf01961bb2 100644
--- a/arch/mips/kernel/prom.c
+++ b/arch/mips/kernel/prom.c
@@ -41,13 +41,27 @@ char *mips_get_machine_name(void)
41#ifdef CONFIG_USE_OF 41#ifdef CONFIG_USE_OF
42void __init early_init_dt_add_memory_arch(u64 base, u64 size) 42void __init early_init_dt_add_memory_arch(u64 base, u64 size)
43{ 43{
44 return add_memory_region(base, size, BOOT_MEM_RAM); 44 if (base >= PHYS_ADDR_MAX) {
45 pr_warn("Trying to add an invalid memory region, skipped\n");
46 return;
47 }
48
49 /* Truncate the passed memory region instead of type casting */
50 if (base + size - 1 >= PHYS_ADDR_MAX || base + size < base) {
51 pr_warn("Truncate memory region %llx @ %llx to size %llx\n",
52 size, base, PHYS_ADDR_MAX - base);
53 size = PHYS_ADDR_MAX - base;
54 }
55
56 add_memory_region(base, size, BOOT_MEM_RAM);
45} 57}
46 58
47int __init early_init_dt_reserve_memory_arch(phys_addr_t base, 59int __init early_init_dt_reserve_memory_arch(phys_addr_t base,
48 phys_addr_t size, bool nomap) 60 phys_addr_t size, bool nomap)
49{ 61{
50 add_memory_region(base, size, BOOT_MEM_RESERVED); 62 add_memory_region(base, size,
63 nomap ? BOOT_MEM_NOMAP : BOOT_MEM_RESERVED);
64
51 return 0; 65 return 0;
52} 66}
53 67
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
index 8d1dc6c71173..ab349d2381c3 100644
--- a/arch/mips/kernel/setup.c
+++ b/arch/mips/kernel/setup.c
@@ -27,6 +27,7 @@
27#include <linux/dma-contiguous.h> 27#include <linux/dma-contiguous.h>
28#include <linux/decompress/generic.h> 28#include <linux/decompress/generic.h>
29#include <linux/of_fdt.h> 29#include <linux/of_fdt.h>
30#include <linux/of_reserved_mem.h>
30 31
31#include <asm/addrspace.h> 32#include <asm/addrspace.h>
32#include <asm/bootinfo.h> 33#include <asm/bootinfo.h>
@@ -178,6 +179,7 @@ static bool __init __maybe_unused memory_region_available(phys_addr_t start,
178 in_ram = true; 179 in_ram = true;
179 break; 180 break;
180 case BOOT_MEM_RESERVED: 181 case BOOT_MEM_RESERVED:
182 case BOOT_MEM_NOMAP:
181 if ((start >= start_ && start < end_) || 183 if ((start >= start_ && start < end_) ||
182 (start < start_ && start + size >= start_)) 184 (start < start_ && start + size >= start_))
183 free = false; 185 free = false;
@@ -213,6 +215,9 @@ static void __init print_memory_map(void)
213 case BOOT_MEM_RESERVED: 215 case BOOT_MEM_RESERVED:
214 printk(KERN_CONT "(reserved)\n"); 216 printk(KERN_CONT "(reserved)\n");
215 break; 217 break;
218 case BOOT_MEM_NOMAP:
219 printk(KERN_CONT "(nomap)\n");
220 break;
216 default: 221 default:
217 printk(KERN_CONT "type %lu\n", boot_mem_map.map[i].type); 222 printk(KERN_CONT "type %lu\n", boot_mem_map.map[i].type);
218 break; 223 break;
@@ -371,7 +376,6 @@ static void __init bootmem_init(void)
371 376
372static void __init bootmem_init(void) 377static void __init bootmem_init(void)
373{ 378{
374 unsigned long reserved_end;
375 phys_addr_t ramstart = PHYS_ADDR_MAX; 379 phys_addr_t ramstart = PHYS_ADDR_MAX;
376 int i; 380 int i;
377 381
@@ -382,10 +386,10 @@ static void __init bootmem_init(void)
382 * will reserve the area used for the initrd. 386 * will reserve the area used for the initrd.
383 */ 387 */
384 init_initrd(); 388 init_initrd();
385 reserved_end = (unsigned long) PFN_UP(__pa_symbol(&_end));
386 389
387 memblock_reserve(PHYS_OFFSET, 390 /* Reserve memory occupied by kernel. */
388 (reserved_end << PAGE_SHIFT) - PHYS_OFFSET); 391 memblock_reserve(__pa_symbol(&_text),
392 __pa_symbol(&_end) - __pa_symbol(&_text));
389 393
390 /* 394 /*
391 * max_low_pfn is not a number of pages. The number of pages 395 * max_low_pfn is not a number of pages. The number of pages
@@ -394,10 +398,7 @@ static void __init bootmem_init(void)
394 min_low_pfn = ~0UL; 398 min_low_pfn = ~0UL;
395 max_low_pfn = 0; 399 max_low_pfn = 0;
396 400
397 /* 401 /* Find the highest and lowest page frame numbers we have available. */
398 * Find the highest page frame number we have available
399 * and the lowest used RAM address
400 */
401 for (i = 0; i < boot_mem_map.nr_map; i++) { 402 for (i = 0; i < boot_mem_map.nr_map; i++) {
402 unsigned long start, end; 403 unsigned long start, end;
403 404
@@ -427,13 +428,6 @@ static void __init bootmem_init(void)
427 max_low_pfn = end; 428 max_low_pfn = end;
428 if (start < min_low_pfn) 429 if (start < min_low_pfn)
429 min_low_pfn = start; 430 min_low_pfn = start;
430 if (end <= reserved_end)
431 continue;
432#ifdef CONFIG_BLK_DEV_INITRD
433 /* Skip zones before initrd and initrd itself */
434 if (initrd_end && end <= (unsigned long)PFN_UP(__pa(initrd_end)))
435 continue;
436#endif
437 } 431 }
438 432
439 if (min_low_pfn >= max_low_pfn) 433 if (min_low_pfn >= max_low_pfn)
@@ -474,6 +468,7 @@ static void __init bootmem_init(void)
474 max_low_pfn = PFN_DOWN(HIGHMEM_START); 468 max_low_pfn = PFN_DOWN(HIGHMEM_START);
475 } 469 }
476 470
471 /* Install all valid RAM ranges to the memblock memory region */
477 for (i = 0; i < boot_mem_map.nr_map; i++) { 472 for (i = 0; i < boot_mem_map.nr_map; i++) {
478 unsigned long start, end; 473 unsigned long start, end;
479 474
@@ -481,98 +476,38 @@ static void __init bootmem_init(void)
481 end = PFN_DOWN(boot_mem_map.map[i].addr 476 end = PFN_DOWN(boot_mem_map.map[i].addr
482 + boot_mem_map.map[i].size); 477 + boot_mem_map.map[i].size);
483 478
484 if (start <= min_low_pfn) 479 if (start < min_low_pfn)
485 start = min_low_pfn; 480 start = min_low_pfn;
486 if (start >= end)
487 continue;
488
489#ifndef CONFIG_HIGHMEM 481#ifndef CONFIG_HIGHMEM
482 /* Ignore highmem regions if highmem is unsupported */
490 if (end > max_low_pfn) 483 if (end > max_low_pfn)
491 end = max_low_pfn; 484 end = max_low_pfn;
492 485#endif
493 /*
494 * ... finally, is the area going away?
495 */
496 if (end <= start) 486 if (end <= start)
497 continue; 487 continue;
498#endif
499 488
500 memblock_add_node(PFN_PHYS(start), PFN_PHYS(end - start), 0); 489 memblock_add_node(PFN_PHYS(start), PFN_PHYS(end - start), 0);
501 }
502
503 /*
504 * Register fully available low RAM pages with the bootmem allocator.
505 */
506 for (i = 0; i < boot_mem_map.nr_map; i++) {
507 unsigned long start, end, size;
508 490
509 start = PFN_UP(boot_mem_map.map[i].addr); 491 /* Reserve any memory except the ordinary RAM ranges. */
510 end = PFN_DOWN(boot_mem_map.map[i].addr
511 + boot_mem_map.map[i].size);
512
513 /*
514 * Reserve usable memory.
515 */
516 switch (boot_mem_map.map[i].type) { 492 switch (boot_mem_map.map[i].type) {
517 case BOOT_MEM_RAM: 493 case BOOT_MEM_RAM:
518 break; 494 break;
519 case BOOT_MEM_INIT_RAM: 495 case BOOT_MEM_NOMAP: /* Discard the range from the system. */
520 memory_present(0, start, end); 496 memblock_remove(PFN_PHYS(start), PFN_PHYS(end - start));
521 continue;
522 default:
523 /* Not usable memory */
524 if (start > min_low_pfn && end < max_low_pfn)
525 memblock_reserve(boot_mem_map.map[i].addr,
526 boot_mem_map.map[i].size);
527
528 continue; 497 continue;
498 default: /* Reserve the rest of the memory types at boot time */
499 memblock_reserve(PFN_PHYS(start), PFN_PHYS(end - start));
500 break;
529 } 501 }
530 502
531 /* 503 /*
532 * We are rounding up the start address of usable memory 504 * In any case the added to the memblock memory regions
533 * and at the end of the usable range downwards. 505 * (highmem/lowmem, available/reserved, etc) are considered
506 * as present, so inform sparsemem about them.
534 */ 507 */
535 if (start >= max_low_pfn)
536 continue;
537 if (start < reserved_end)
538 start = reserved_end;
539 if (end > max_low_pfn)
540 end = max_low_pfn;
541
542 /*
543 * ... finally, is the area going away?
544 */
545 if (end <= start)
546 continue;
547 size = end - start;
548
549 /* Register lowmem ranges */
550 memory_present(0, start, end); 508 memory_present(0, start, end);
551 } 509 }
552 510
553#ifdef CONFIG_RELOCATABLE
554 /*
555 * The kernel reserves all memory below its _end symbol as bootmem,
556 * but the kernel may now be at a much higher address. The memory
557 * between the original and new locations may be returned to the system.
558 */
559 if (__pa_symbol(_text) > __pa_symbol(VMLINUX_LOAD_ADDRESS)) {
560 unsigned long offset;
561 extern void show_kernel_relocation(const char *level);
562
563 offset = __pa_symbol(_text) - __pa_symbol(VMLINUX_LOAD_ADDRESS);
564 memblock_free(__pa_symbol(VMLINUX_LOAD_ADDRESS), offset);
565
566#if defined(CONFIG_DEBUG_KERNEL) && defined(CONFIG_DEBUG_INFO)
567 /*
568 * This information is necessary when debugging the kernel
569 * But is a security vulnerability otherwise!
570 */
571 show_kernel_relocation(KERN_INFO);
572#endif
573 }
574#endif
575
576 /* 511 /*
577 * Reserve initrd memory if needed. 512 * Reserve initrd memory if needed.
578 */ 513 */
@@ -781,7 +716,6 @@ static void __init request_crashkernel(struct resource *res)
781 */ 716 */
782static void __init arch_mem_init(char **cmdline_p) 717static void __init arch_mem_init(char **cmdline_p)
783{ 718{
784 struct memblock_region *reg;
785 extern void plat_mem_setup(void); 719 extern void plat_mem_setup(void);
786 720
787 /* 721 /*
@@ -809,6 +743,9 @@ static void __init arch_mem_init(char **cmdline_p)
809 arch_mem_addpart(PFN_UP(__pa_symbol(&__init_begin)) << PAGE_SHIFT, 743 arch_mem_addpart(PFN_UP(__pa_symbol(&__init_begin)) << PAGE_SHIFT,
810 PFN_DOWN(__pa_symbol(&__init_end)) << PAGE_SHIFT, 744 PFN_DOWN(__pa_symbol(&__init_end)) << PAGE_SHIFT,
811 BOOT_MEM_INIT_RAM); 745 BOOT_MEM_INIT_RAM);
746 arch_mem_addpart(PFN_DOWN(__pa_symbol(&__bss_start)) << PAGE_SHIFT,
747 PFN_UP(__pa_symbol(&__bss_stop)) << PAGE_SHIFT,
748 BOOT_MEM_RAM);
812 749
813 pr_info("Determined physical RAM map:\n"); 750 pr_info("Determined physical RAM map:\n");
814 print_memory_map(); 751 print_memory_map();
@@ -884,13 +821,16 @@ static void __init arch_mem_init(char **cmdline_p)
884 plat_swiotlb_setup(); 821 plat_swiotlb_setup();
885 822
886 dma_contiguous_reserve(PFN_PHYS(max_low_pfn)); 823 dma_contiguous_reserve(PFN_PHYS(max_low_pfn));
887 /* Tell bootmem about cma reserved memblock section */
888 for_each_memblock(reserved, reg)
889 if (reg->size != 0)
890 memblock_reserve(reg->base, reg->size);
891 824
892 reserve_bootmem_region(__pa_symbol(&__nosave_begin), 825 /* Reserve for hibernation. */
893 __pa_symbol(&__nosave_end)); /* Reserve for hibernation */ 826 memblock_reserve(__pa_symbol(&__nosave_begin),
827 __pa_symbol(&__nosave_end) - __pa_symbol(&__nosave_begin));
828
829 fdt_init_reserved_mem();
830
831 memblock_dump_all();
832
833 early_memtest(PFN_PHYS(min_low_pfn), PFN_PHYS(max_low_pfn));
894} 834}
895 835
896static void __init resource_init(void) 836static void __init resource_init(void)
@@ -935,6 +875,7 @@ static void __init resource_init(void)
935 res->flags |= IORESOURCE_SYSRAM; 875 res->flags |= IORESOURCE_SYSRAM;
936 break; 876 break;
937 case BOOT_MEM_RESERVED: 877 case BOOT_MEM_RESERVED:
878 case BOOT_MEM_NOMAP:
938 default: 879 default:
939 res->name = "reserved"; 880 res->name = "reserved";
940 } 881 }
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 98ca55d62201..c52766a5b85f 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -2151,7 +2151,7 @@ static void configure_hwrena(void)
2151 2151
2152static void configure_exception_vector(void) 2152static void configure_exception_vector(void)
2153{ 2153{
2154 if (cpu_has_veic || cpu_has_vint) { 2154 if (cpu_has_mips_r2_r6) {
2155 unsigned long sr = set_c0_status(ST0_BEV); 2155 unsigned long sr = set_c0_status(ST0_BEV);
2156 /* If available, use WG to set top bits of EBASE */ 2156 /* If available, use WG to set top bits of EBASE */
2157 if (cpu_has_ebase_wg) { 2157 if (cpu_has_ebase_wg) {
@@ -2163,6 +2163,8 @@ static void configure_exception_vector(void)
2163 } 2163 }
2164 write_c0_ebase(ebase); 2164 write_c0_ebase(ebase);
2165 write_c0_status(sr); 2165 write_c0_status(sr);
2166 }
2167 if (cpu_has_veic || cpu_has_vint) {
2166 /* Setting vector spacing enables EI/VI mode */ 2168 /* Setting vector spacing enables EI/VI mode */
2167 change_c0_intctl(0x3e0, VECTORSPACING); 2169 change_c0_intctl(0x3e0, VECTORSPACING);
2168 } 2170 }
@@ -2193,22 +2195,6 @@ void per_cpu_trap_init(bool is_boot_cpu)
2193 * o read IntCtl.IPFDC to determine the fast debug channel interrupt 2195 * o read IntCtl.IPFDC to determine the fast debug channel interrupt
2194 */ 2196 */
2195 if (cpu_has_mips_r2_r6) { 2197 if (cpu_has_mips_r2_r6) {
2196 /*
2197 * We shouldn't trust a secondary core has a sane EBASE register
2198 * so use the one calculated by the boot CPU.
2199 */
2200 if (!is_boot_cpu) {
2201 /* If available, use WG to set top bits of EBASE */
2202 if (cpu_has_ebase_wg) {
2203#ifdef CONFIG_64BIT
2204 write_c0_ebase_64(ebase | MIPS_EBASE_WG);
2205#else
2206 write_c0_ebase(ebase | MIPS_EBASE_WG);
2207#endif
2208 }
2209 write_c0_ebase(ebase);
2210 }
2211
2212 cp0_compare_irq_shift = CAUSEB_TI - CAUSEB_IP; 2198 cp0_compare_irq_shift = CAUSEB_TI - CAUSEB_IP;
2213 cp0_compare_irq = (read_c0_intctl() >> INTCTLB_IPTI) & 7; 2199 cp0_compare_irq = (read_c0_intctl() >> INTCTLB_IPTI) & 7;
2214 cp0_perfcount_irq = (read_c0_intctl() >> INTCTLB_IPPCI) & 7; 2200 cp0_perfcount_irq = (read_c0_intctl() >> INTCTLB_IPPCI) & 7;
@@ -2284,19 +2270,27 @@ void __init trap_init(void)
2284 extern char except_vec3_generic; 2270 extern char except_vec3_generic;
2285 extern char except_vec4; 2271 extern char except_vec4;
2286 extern char except_vec3_r4000; 2272 extern char except_vec3_r4000;
2287 unsigned long i; 2273 unsigned long i, vec_size;
2274 phys_addr_t ebase_pa;
2288 2275
2289 check_wait(); 2276 check_wait();
2290 2277
2291 if (cpu_has_veic || cpu_has_vint) { 2278 if (!cpu_has_mips_r2_r6) {
2292 unsigned long size = 0x200 + VECTORSPACING*64; 2279 ebase = CAC_BASE;
2293 phys_addr_t ebase_pa; 2280 ebase_pa = virt_to_phys((void *)ebase);
2281 vec_size = 0x400;
2294 2282
2295 ebase = (unsigned long) 2283 memblock_reserve(ebase_pa, vec_size);
2296 memblock_alloc(size, 1 << fls(size)); 2284 } else {
2297 if (!ebase) 2285 if (cpu_has_veic || cpu_has_vint)
2286 vec_size = 0x200 + VECTORSPACING*64;
2287 else
2288 vec_size = PAGE_SIZE;
2289
2290 ebase_pa = memblock_phys_alloc(vec_size, 1 << fls(vec_size));
2291 if (!ebase_pa)
2298 panic("%s: Failed to allocate %lu bytes align=0x%x\n", 2292 panic("%s: Failed to allocate %lu bytes align=0x%x\n",
2299 __func__, size, 1 << fls(size)); 2293 __func__, vec_size, 1 << fls(vec_size));
2300 2294
2301 /* 2295 /*
2302 * Try to ensure ebase resides in KSeg0 if possible. 2296 * Try to ensure ebase resides in KSeg0 if possible.
@@ -2309,23 +2303,10 @@ void __init trap_init(void)
2309 * EVA is special though as it allows segments to be rearranged 2303 * EVA is special though as it allows segments to be rearranged
2310 * and to become uncached during cache error handling. 2304 * and to become uncached during cache error handling.
2311 */ 2305 */
2312 ebase_pa = __pa(ebase);
2313 if (!IS_ENABLED(CONFIG_EVA) && !WARN_ON(ebase_pa >= 0x20000000)) 2306 if (!IS_ENABLED(CONFIG_EVA) && !WARN_ON(ebase_pa >= 0x20000000))
2314 ebase = CKSEG0ADDR(ebase_pa); 2307 ebase = CKSEG0ADDR(ebase_pa);
2315 } else { 2308 else
2316 ebase = CAC_BASE; 2309 ebase = (unsigned long)phys_to_virt(ebase_pa);
2317
2318 if (cpu_has_mips_r2_r6) {
2319 if (cpu_has_ebase_wg) {
2320#ifdef CONFIG_64BIT
2321 ebase = (read_c0_ebase_64() & ~0xfff);
2322#else
2323 ebase = (read_c0_ebase() & ~0xfff);
2324#endif
2325 } else {
2326 ebase += (read_c0_ebase() & 0x3ffff000);
2327 }
2328 }
2329 } 2310 }
2330 2311
2331 if (cpu_has_mmips) { 2312 if (cpu_has_mmips) {
@@ -2459,7 +2440,7 @@ void __init trap_init(void)
2459 else 2440 else
2460 set_handler(0x080, &except_vec3_generic, 0x80); 2441 set_handler(0x080, &except_vec3_generic, 0x80);
2461 2442
2462 local_flush_icache_range(ebase, ebase + 0x400); 2443 local_flush_icache_range(ebase, ebase + vec_size);
2463 2444
2464 sort_extable(__start___dbe_table, __stop___dbe_table); 2445 sort_extable(__start___dbe_table, __stop___dbe_table);
2465 2446
diff --git a/arch/mips/kvm/emulate.c b/arch/mips/kvm/emulate.c
index 0074427b04fb..e5de6bac8197 100644
--- a/arch/mips/kvm/emulate.c
+++ b/arch/mips/kvm/emulate.c
@@ -1141,9 +1141,7 @@ enum emulation_result kvm_mips_emul_tlbwr(struct kvm_vcpu *vcpu)
1141 unsigned long pc = vcpu->arch.pc; 1141 unsigned long pc = vcpu->arch.pc;
1142 int index; 1142 int index;
1143 1143
1144 get_random_bytes(&index, sizeof(index)); 1144 index = prandom_u32_max(KVM_MIPS_GUEST_TLB_SIZE);
1145 index &= (KVM_MIPS_GUEST_TLB_SIZE - 1);
1146
1147 tlb = &vcpu->arch.guest_tlb[index]; 1145 tlb = &vcpu->arch.guest_tlb[index];
1148 1146
1149 kvm_mips_invalidate_guest_tlb(vcpu, tlb); 1147 kvm_mips_invalidate_guest_tlb(vcpu, tlb);
diff --git a/arch/mips/mm/uasm-mips.c b/arch/mips/mm/uasm-mips.c
index 6abe40fc413d..7154a1d99aad 100644
--- a/arch/mips/mm/uasm-mips.c
+++ b/arch/mips/mm/uasm-mips.c
@@ -76,14 +76,22 @@ static const struct insn insn_table[insn_invalid] = {
76 [insn_daddiu] = {M(daddiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM}, 76 [insn_daddiu] = {M(daddiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM},
77 [insn_daddu] = {M(spec_op, 0, 0, 0, 0, daddu_op), RS | RT | RD}, 77 [insn_daddu] = {M(spec_op, 0, 0, 0, 0, daddu_op), RS | RT | RD},
78 [insn_ddivu] = {M(spec_op, 0, 0, 0, 0, ddivu_op), RS | RT}, 78 [insn_ddivu] = {M(spec_op, 0, 0, 0, 0, ddivu_op), RS | RT},
79 [insn_ddivu_r6] = {M(spec_op, 0, 0, 0, ddivu_ddivu6_op, ddivu_op),
80 RS | RT | RD},
79 [insn_di] = {M(cop0_op, mfmc0_op, 0, 12, 0, 0), RT}, 81 [insn_di] = {M(cop0_op, mfmc0_op, 0, 12, 0, 0), RT},
80 [insn_dins] = {M(spec3_op, 0, 0, 0, 0, dins_op), RS | RT | RD | RE}, 82 [insn_dins] = {M(spec3_op, 0, 0, 0, 0, dins_op), RS | RT | RD | RE},
81 [insn_dinsm] = {M(spec3_op, 0, 0, 0, 0, dinsm_op), RS | RT | RD | RE}, 83 [insn_dinsm] = {M(spec3_op, 0, 0, 0, 0, dinsm_op), RS | RT | RD | RE},
82 [insn_dinsu] = {M(spec3_op, 0, 0, 0, 0, dinsu_op), RS | RT | RD | RE}, 84 [insn_dinsu] = {M(spec3_op, 0, 0, 0, 0, dinsu_op), RS | RT | RD | RE},
83 [insn_divu] = {M(spec_op, 0, 0, 0, 0, divu_op), RS | RT}, 85 [insn_divu] = {M(spec_op, 0, 0, 0, 0, divu_op), RS | RT},
86 [insn_divu_r6] = {M(spec_op, 0, 0, 0, divu_divu6_op, divu_op),
87 RS | RT | RD},
84 [insn_dmfc0] = {M(cop0_op, dmfc_op, 0, 0, 0, 0), RT | RD | SET}, 88 [insn_dmfc0] = {M(cop0_op, dmfc_op, 0, 0, 0, 0), RT | RD | SET},
89 [insn_dmodu] = {M(spec_op, 0, 0, 0, ddivu_dmodu_op, ddivu_op),
90 RS | RT | RD},
85 [insn_dmtc0] = {M(cop0_op, dmtc_op, 0, 0, 0, 0), RT | RD | SET}, 91 [insn_dmtc0] = {M(cop0_op, dmtc_op, 0, 0, 0, 0), RT | RD | SET},
86 [insn_dmultu] = {M(spec_op, 0, 0, 0, 0, dmultu_op), RS | RT}, 92 [insn_dmultu] = {M(spec_op, 0, 0, 0, 0, dmultu_op), RS | RT},
93 [insn_dmulu] = {M(spec_op, 0, 0, 0, dmult_dmul_op, dmultu_op),
94 RS | RT | RD},
87 [insn_drotr] = {M(spec_op, 1, 0, 0, 0, dsrl_op), RT | RD | RE}, 95 [insn_drotr] = {M(spec_op, 1, 0, 0, 0, dsrl_op), RT | RD | RE},
88 [insn_drotr32] = {M(spec_op, 1, 0, 0, 0, dsrl32_op), RT | RD | RE}, 96 [insn_drotr32] = {M(spec_op, 1, 0, 0, 0, dsrl32_op), RT | RD | RE},
89 [insn_dsbh] = {M(spec3_op, 0, 0, 0, dsbh_op, dbshfl_op), RT | RD}, 97 [insn_dsbh] = {M(spec3_op, 0, 0, 0, dsbh_op, dbshfl_op), RT | RD},
@@ -132,12 +140,16 @@ static const struct insn insn_table[insn_invalid] = {
132 [insn_mfhc0] = {M(cop0_op, mfhc0_op, 0, 0, 0, 0), RT | RD | SET}, 140 [insn_mfhc0] = {M(cop0_op, mfhc0_op, 0, 0, 0, 0), RT | RD | SET},
133 [insn_mfhi] = {M(spec_op, 0, 0, 0, 0, mfhi_op), RD}, 141 [insn_mfhi] = {M(spec_op, 0, 0, 0, 0, mfhi_op), RD},
134 [insn_mflo] = {M(spec_op, 0, 0, 0, 0, mflo_op), RD}, 142 [insn_mflo] = {M(spec_op, 0, 0, 0, 0, mflo_op), RD},
143 [insn_modu] = {M(spec_op, 0, 0, 0, divu_modu_op, divu_op),
144 RS | RT | RD},
135 [insn_movn] = {M(spec_op, 0, 0, 0, 0, movn_op), RS | RT | RD}, 145 [insn_movn] = {M(spec_op, 0, 0, 0, 0, movn_op), RS | RT | RD},
136 [insn_movz] = {M(spec_op, 0, 0, 0, 0, movz_op), RS | RT | RD}, 146 [insn_movz] = {M(spec_op, 0, 0, 0, 0, movz_op), RS | RT | RD},
137 [insn_mtc0] = {M(cop0_op, mtc_op, 0, 0, 0, 0), RT | RD | SET}, 147 [insn_mtc0] = {M(cop0_op, mtc_op, 0, 0, 0, 0), RT | RD | SET},
138 [insn_mthc0] = {M(cop0_op, mthc0_op, 0, 0, 0, 0), RT | RD | SET}, 148 [insn_mthc0] = {M(cop0_op, mthc0_op, 0, 0, 0, 0), RT | RD | SET},
139 [insn_mthi] = {M(spec_op, 0, 0, 0, 0, mthi_op), RS}, 149 [insn_mthi] = {M(spec_op, 0, 0, 0, 0, mthi_op), RS},
140 [insn_mtlo] = {M(spec_op, 0, 0, 0, 0, mtlo_op), RS}, 150 [insn_mtlo] = {M(spec_op, 0, 0, 0, 0, mtlo_op), RS},
151 [insn_mulu] = {M(spec_op, 0, 0, 0, multu_mulu_op, multu_op),
152 RS | RT | RD},
141#ifndef CONFIG_CPU_MIPSR6 153#ifndef CONFIG_CPU_MIPSR6
142 [insn_mul] = {M(spec2_op, 0, 0, 0, 0, mul_op), RS | RT | RD}, 154 [insn_mul] = {M(spec2_op, 0, 0, 0, 0, mul_op), RS | RT | RD},
143#else 155#else
@@ -163,6 +175,8 @@ static const struct insn insn_table[insn_invalid] = {
163 [insn_scd] = {M6(spec3_op, 0, 0, 0, scd6_op), RS | RT | SIMM9}, 175 [insn_scd] = {M6(spec3_op, 0, 0, 0, scd6_op), RS | RT | SIMM9},
164#endif 176#endif
165 [insn_sd] = {M(sd_op, 0, 0, 0, 0, 0), RS | RT | SIMM}, 177 [insn_sd] = {M(sd_op, 0, 0, 0, 0, 0), RS | RT | SIMM},
178 [insn_seleqz] = {M(spec_op, 0, 0, 0, 0, seleqz_op), RS | RT | RD},
179 [insn_selnez] = {M(spec_op, 0, 0, 0, 0, selnez_op), RS | RT | RD},
166 [insn_sh] = {M(sh_op, 0, 0, 0, 0, 0), RS | RT | SIMM}, 180 [insn_sh] = {M(sh_op, 0, 0, 0, 0, 0), RS | RT | SIMM},
167 [insn_sll] = {M(spec_op, 0, 0, 0, 0, sll_op), RT | RD | RE}, 181 [insn_sll] = {M(spec_op, 0, 0, 0, 0, sll_op), RT | RD | RE},
168 [insn_sllv] = {M(spec_op, 0, 0, 0, 0, sllv_op), RS | RT | RD}, 182 [insn_sllv] = {M(spec_op, 0, 0, 0, 0, sllv_op), RS | RT | RD},
diff --git a/arch/mips/mm/uasm.c b/arch/mips/mm/uasm.c
index 45b6264ff308..c56f129c9a4b 100644
--- a/arch/mips/mm/uasm.c
+++ b/arch/mips/mm/uasm.c
@@ -50,21 +50,22 @@ enum opcode {
50 insn_beq, insn_beql, insn_bgez, insn_bgezl, insn_bgtz, insn_blez, 50 insn_beq, insn_beql, insn_bgez, insn_bgezl, insn_bgtz, insn_blez,
51 insn_bltz, insn_bltzl, insn_bne, insn_break, insn_cache, insn_cfc1, 51 insn_bltz, insn_bltzl, insn_bne, insn_break, insn_cache, insn_cfc1,
52 insn_cfcmsa, insn_ctc1, insn_ctcmsa, insn_daddiu, insn_daddu, insn_ddivu, 52 insn_cfcmsa, insn_ctc1, insn_ctcmsa, insn_daddiu, insn_daddu, insn_ddivu,
53 insn_di, insn_dins, insn_dinsm, insn_dinsu, insn_divu, insn_dmfc0, 53 insn_ddivu_r6, insn_di, insn_dins, insn_dinsm, insn_dinsu, insn_divu,
54 insn_dmtc0, insn_dmultu, insn_drotr, insn_drotr32, insn_dsbh, insn_dshd, 54 insn_divu_r6, insn_dmfc0, insn_dmodu, insn_dmtc0, insn_dmultu,
55 insn_dsll, insn_dsll32, insn_dsllv, insn_dsra, insn_dsra32, insn_dsrav, 55 insn_dmulu, insn_drotr, insn_drotr32, insn_dsbh, insn_dshd, insn_dsll,
56 insn_dsrl, insn_dsrl32, insn_dsrlv, insn_dsubu, insn_eret, insn_ext, 56 insn_dsll32, insn_dsllv, insn_dsra, insn_dsra32, insn_dsrav, insn_dsrl,
57 insn_ins, insn_j, insn_jal, insn_jalr, insn_jr, insn_lb, insn_lbu, 57 insn_dsrl32, insn_dsrlv, insn_dsubu, insn_eret, insn_ext, insn_ins,
58 insn_ld, insn_lddir, insn_ldpte, insn_ldx, insn_lh, insn_lhu, 58 insn_j, insn_jal, insn_jalr, insn_jr, insn_lb, insn_lbu, insn_ld,
59 insn_ll, insn_lld, insn_lui, insn_lw, insn_lwu, insn_lwx, insn_mfc0, 59 insn_lddir, insn_ldpte, insn_ldx, insn_lh, insn_lhu, insn_ll, insn_lld,
60 insn_mfhc0, insn_mfhi, insn_mflo, insn_movn, insn_movz, insn_mtc0, 60 insn_lui, insn_lw, insn_lwu, insn_lwx, insn_mfc0, insn_mfhc0, insn_mfhi,
61 insn_mthc0, insn_mthi, insn_mtlo, insn_mul, insn_multu, insn_nor, 61 insn_mflo, insn_modu, insn_movn, insn_movz, insn_mtc0, insn_mthc0,
62 insn_or, insn_ori, insn_pref, insn_rfe, insn_rotr, insn_sb, 62 insn_mthi, insn_mtlo, insn_mul, insn_multu, insn_mulu, insn_nor,
63 insn_sc, insn_scd, insn_sd, insn_sh, insn_sll, insn_sllv, 63 insn_or, insn_ori, insn_pref, insn_rfe, insn_rotr, insn_sb, insn_sc,
64 insn_slt, insn_slti, insn_sltiu, insn_sltu, insn_sra, insn_srav, 64 insn_scd, insn_seleqz, insn_selnez, insn_sd, insn_sh, insn_sll,
65 insn_srl, insn_srlv, insn_subu, insn_sw, insn_sync, insn_syscall, 65 insn_sllv, insn_slt, insn_slti, insn_sltiu, insn_sltu, insn_sra,
66 insn_tlbp, insn_tlbr, insn_tlbwi, insn_tlbwr, insn_wait, insn_wsbh, 66 insn_srav, insn_srl, insn_srlv, insn_subu, insn_sw, insn_sync,
67 insn_xor, insn_xori, insn_yield, 67 insn_syscall, insn_tlbp, insn_tlbr, insn_tlbwi, insn_tlbwr, insn_wait,
68 insn_wsbh, insn_xor, insn_xori, insn_yield,
68 insn_invalid /* insn_invalid must be last */ 69 insn_invalid /* insn_invalid must be last */
69}; 70};
70 71
@@ -287,13 +288,17 @@ I_u2u1(_cfcmsa)
287I_u1u2(_ctc1) 288I_u1u2(_ctc1)
288I_u2u1(_ctcmsa) 289I_u2u1(_ctcmsa)
289I_u1u2(_ddivu) 290I_u1u2(_ddivu)
291I_u3u1u2(_ddivu_r6)
290I_u1u2u3(_dmfc0) 292I_u1u2u3(_dmfc0)
293I_u3u1u2(_dmodu)
291I_u1u2u3(_dmtc0) 294I_u1u2u3(_dmtc0)
292I_u1u2(_dmultu) 295I_u1u2(_dmultu)
296I_u3u1u2(_dmulu)
293I_u2u1s3(_daddiu) 297I_u2u1s3(_daddiu)
294I_u3u1u2(_daddu) 298I_u3u1u2(_daddu)
295I_u1(_di); 299I_u1(_di);
296I_u1u2(_divu) 300I_u1u2(_divu)
301I_u3u1u2(_divu_r6)
297I_u2u1(_dsbh); 302I_u2u1(_dsbh);
298I_u2u1(_dshd); 303I_u2u1(_dshd);
299I_u2u1u3(_dsll) 304I_u2u1u3(_dsll)
@@ -327,6 +332,7 @@ I_u2s3u1(_lw)
327I_u2s3u1(_lwu) 332I_u2s3u1(_lwu)
328I_u1u2u3(_mfc0) 333I_u1u2u3(_mfc0)
329I_u1u2u3(_mfhc0) 334I_u1u2u3(_mfhc0)
335I_u3u1u2(_modu)
330I_u3u1u2(_movn) 336I_u3u1u2(_movn)
331I_u3u1u2(_movz) 337I_u3u1u2(_movz)
332I_u1(_mfhi) 338I_u1(_mfhi)
@@ -337,6 +343,7 @@ I_u1(_mthi)
337I_u1(_mtlo) 343I_u1(_mtlo)
338I_u3u1u2(_mul) 344I_u3u1u2(_mul)
339I_u1u2(_multu) 345I_u1u2(_multu)
346I_u3u1u2(_mulu)
340I_u3u1u2(_nor) 347I_u3u1u2(_nor)
341I_u3u1u2(_or) 348I_u3u1u2(_or)
342I_u2u1u3(_ori) 349I_u2u1u3(_ori)
@@ -345,6 +352,8 @@ I_u2s3u1(_sb)
345I_u2s3u1(_sc) 352I_u2s3u1(_sc)
346I_u2s3u1(_scd) 353I_u2s3u1(_scd)
347I_u2s3u1(_sd) 354I_u2s3u1(_sd)
355I_u3u1u2(_seleqz)
356I_u3u1u2(_selnez)
348I_u2s3u1(_sh) 357I_u2s3u1(_sh)
349I_u2u1u3(_sll) 358I_u2u1u3(_sll)
350I_u3u2u1(_sllv) 359I_u3u2u1(_sllv)
diff --git a/arch/mips/net/Makefile b/arch/mips/net/Makefile
index 47d678416715..72a78462f872 100644
--- a/arch/mips/net/Makefile
+++ b/arch/mips/net/Makefile
@@ -1,4 +1,3 @@
1# MIPS networking code 1# MIPS networking code
2 2
3obj-$(CONFIG_MIPS_CBPF_JIT) += bpf_jit.o bpf_jit_asm.o
4obj-$(CONFIG_MIPS_EBPF_JIT) += ebpf_jit.o 3obj-$(CONFIG_MIPS_EBPF_JIT) += ebpf_jit.o
diff --git a/arch/mips/net/bpf_jit.c b/arch/mips/net/bpf_jit.c
deleted file mode 100644
index 3a0e34f4e615..000000000000
--- a/arch/mips/net/bpf_jit.c
+++ /dev/null
@@ -1,1270 +0,0 @@
1/*
2 * Just-In-Time compiler for BPF filters on MIPS
3 *
4 * Copyright (c) 2014 Imagination Technologies Ltd.
5 * Author: Markos Chandras <markos.chandras@imgtec.com>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the
9 * Free Software Foundation; version 2 of the License.
10 */
11
12#include <linux/bitops.h>
13#include <linux/compiler.h>
14#include <linux/errno.h>
15#include <linux/filter.h>
16#include <linux/if_vlan.h>
17#include <linux/moduleloader.h>
18#include <linux/netdevice.h>
19#include <linux/string.h>
20#include <linux/slab.h>
21#include <linux/types.h>
22#include <asm/asm.h>
23#include <asm/bitops.h>
24#include <asm/cacheflush.h>
25#include <asm/cpu-features.h>
26#include <asm/uasm.h>
27
28#include "bpf_jit.h"
29
30/* ABI
31 * r_skb_hl SKB header length
32 * r_data SKB data pointer
33 * r_off Offset
34 * r_A BPF register A
35 * r_X BPF register X
36 * r_skb *skb
37 * r_M *scratch memory
38 * r_skb_len SKB length
39 *
40 * On entry (*bpf_func)(*skb, *filter)
41 * a0 = MIPS_R_A0 = skb;
42 * a1 = MIPS_R_A1 = filter;
43 *
44 * Stack
45 * ...
46 * M[15]
47 * M[14]
48 * M[13]
49 * ...
50 * M[0] <-- r_M
51 * saved reg k-1
52 * saved reg k-2
53 * ...
54 * saved reg 0 <-- r_sp
55 * <no argument area>
56 *
57 * Packet layout
58 *
59 * <--------------------- len ------------------------>
60 * <--skb-len(r_skb_hl)-->< ----- skb->data_len ------>
61 * ----------------------------------------------------
62 * | skb->data |
63 * ----------------------------------------------------
64 */
65
66#define ptr typeof(unsigned long)
67
68#define SCRATCH_OFF(k) (4 * (k))
69
70/* JIT flags */
71#define SEEN_CALL (1 << BPF_MEMWORDS)
72#define SEEN_SREG_SFT (BPF_MEMWORDS + 1)
73#define SEEN_SREG_BASE (1 << SEEN_SREG_SFT)
74#define SEEN_SREG(x) (SEEN_SREG_BASE << (x))
75#define SEEN_OFF SEEN_SREG(2)
76#define SEEN_A SEEN_SREG(3)
77#define SEEN_X SEEN_SREG(4)
78#define SEEN_SKB SEEN_SREG(5)
79#define SEEN_MEM SEEN_SREG(6)
80/* SEEN_SK_DATA also implies skb_hl an skb_len */
81#define SEEN_SKB_DATA (SEEN_SREG(7) | SEEN_SREG(1) | SEEN_SREG(0))
82
83/* Arguments used by JIT */
84#define ARGS_USED_BY_JIT 2 /* only applicable to 64-bit */
85
86#define SBIT(x) (1 << (x)) /* Signed version of BIT() */
87
88/**
89 * struct jit_ctx - JIT context
90 * @skf: The sk_filter
91 * @prologue_bytes: Number of bytes for prologue
92 * @idx: Instruction index
93 * @flags: JIT flags
94 * @offsets: Instruction offsets
95 * @target: Memory location for the compiled filter
96 */
97struct jit_ctx {
98 const struct bpf_prog *skf;
99 unsigned int prologue_bytes;
100 u32 idx;
101 u32 flags;
102 u32 *offsets;
103 u32 *target;
104};
105
106
107static inline int optimize_div(u32 *k)
108{
109 /* power of 2 divides can be implemented with right shift */
110 if (!(*k & (*k-1))) {
111 *k = ilog2(*k);
112 return 1;
113 }
114
115 return 0;
116}
117
118static inline void emit_jit_reg_move(ptr dst, ptr src, struct jit_ctx *ctx);
119
120/* Simply emit the instruction if the JIT memory space has been allocated */
121#define emit_instr(ctx, func, ...) \
122do { \
123 if ((ctx)->target != NULL) { \
124 u32 *p = &(ctx)->target[ctx->idx]; \
125 uasm_i_##func(&p, ##__VA_ARGS__); \
126 } \
127 (ctx)->idx++; \
128} while (0)
129
130/*
131 * Similar to emit_instr but it must be used when we need to emit
132 * 32-bit or 64-bit instructions
133 */
134#define emit_long_instr(ctx, func, ...) \
135do { \
136 if ((ctx)->target != NULL) { \
137 u32 *p = &(ctx)->target[ctx->idx]; \
138 UASM_i_##func(&p, ##__VA_ARGS__); \
139 } \
140 (ctx)->idx++; \
141} while (0)
142
143/* Determine if immediate is within the 16-bit signed range */
144static inline bool is_range16(s32 imm)
145{
146 return !(imm >= SBIT(15) || imm < -SBIT(15));
147}
148
149static inline void emit_addu(unsigned int dst, unsigned int src1,
150 unsigned int src2, struct jit_ctx *ctx)
151{
152 emit_instr(ctx, addu, dst, src1, src2);
153}
154
155static inline void emit_nop(struct jit_ctx *ctx)
156{
157 emit_instr(ctx, nop);
158}
159
160/* Load a u32 immediate to a register */
161static inline void emit_load_imm(unsigned int dst, u32 imm, struct jit_ctx *ctx)
162{
163 if (ctx->target != NULL) {
164 /* addiu can only handle s16 */
165 if (!is_range16(imm)) {
166 u32 *p = &ctx->target[ctx->idx];
167 uasm_i_lui(&p, r_tmp_imm, (s32)imm >> 16);
168 p = &ctx->target[ctx->idx + 1];
169 uasm_i_ori(&p, dst, r_tmp_imm, imm & 0xffff);
170 } else {
171 u32 *p = &ctx->target[ctx->idx];
172 uasm_i_addiu(&p, dst, r_zero, imm);
173 }
174 }
175 ctx->idx++;
176
177 if (!is_range16(imm))
178 ctx->idx++;
179}
180
181static inline void emit_or(unsigned int dst, unsigned int src1,
182 unsigned int src2, struct jit_ctx *ctx)
183{
184 emit_instr(ctx, or, dst, src1, src2);
185}
186
187static inline void emit_ori(unsigned int dst, unsigned src, u32 imm,
188 struct jit_ctx *ctx)
189{
190 if (imm >= BIT(16)) {
191 emit_load_imm(r_tmp, imm, ctx);
192 emit_or(dst, src, r_tmp, ctx);
193 } else {
194 emit_instr(ctx, ori, dst, src, imm);
195 }
196}
197
198static inline void emit_daddiu(unsigned int dst, unsigned int src,
199 int imm, struct jit_ctx *ctx)
200{
201 /*
202 * Only used for stack, so the imm is relatively small
203 * and it fits in 15-bits
204 */
205 emit_instr(ctx, daddiu, dst, src, imm);
206}
207
208static inline void emit_addiu(unsigned int dst, unsigned int src,
209 u32 imm, struct jit_ctx *ctx)
210{
211 if (!is_range16(imm)) {
212 emit_load_imm(r_tmp, imm, ctx);
213 emit_addu(dst, r_tmp, src, ctx);
214 } else {
215 emit_instr(ctx, addiu, dst, src, imm);
216 }
217}
218
219static inline void emit_and(unsigned int dst, unsigned int src1,
220 unsigned int src2, struct jit_ctx *ctx)
221{
222 emit_instr(ctx, and, dst, src1, src2);
223}
224
225static inline void emit_andi(unsigned int dst, unsigned int src,
226 u32 imm, struct jit_ctx *ctx)
227{
228 /* If imm does not fit in u16 then load it to register */
229 if (imm >= BIT(16)) {
230 emit_load_imm(r_tmp, imm, ctx);
231 emit_and(dst, src, r_tmp, ctx);
232 } else {
233 emit_instr(ctx, andi, dst, src, imm);
234 }
235}
236
237static inline void emit_xor(unsigned int dst, unsigned int src1,
238 unsigned int src2, struct jit_ctx *ctx)
239{
240 emit_instr(ctx, xor, dst, src1, src2);
241}
242
243static inline void emit_xori(ptr dst, ptr src, u32 imm, struct jit_ctx *ctx)
244{
245 /* If imm does not fit in u16 then load it to register */
246 if (imm >= BIT(16)) {
247 emit_load_imm(r_tmp, imm, ctx);
248 emit_xor(dst, src, r_tmp, ctx);
249 } else {
250 emit_instr(ctx, xori, dst, src, imm);
251 }
252}
253
254static inline void emit_stack_offset(int offset, struct jit_ctx *ctx)
255{
256 emit_long_instr(ctx, ADDIU, r_sp, r_sp, offset);
257}
258
259static inline void emit_subu(unsigned int dst, unsigned int src1,
260 unsigned int src2, struct jit_ctx *ctx)
261{
262 emit_instr(ctx, subu, dst, src1, src2);
263}
264
265static inline void emit_neg(unsigned int reg, struct jit_ctx *ctx)
266{
267 emit_subu(reg, r_zero, reg, ctx);
268}
269
270static inline void emit_sllv(unsigned int dst, unsigned int src,
271 unsigned int sa, struct jit_ctx *ctx)
272{
273 emit_instr(ctx, sllv, dst, src, sa);
274}
275
276static inline void emit_sll(unsigned int dst, unsigned int src,
277 unsigned int sa, struct jit_ctx *ctx)
278{
279 /* sa is 5-bits long */
280 if (sa >= BIT(5))
281 /* Shifting >= 32 results in zero */
282 emit_jit_reg_move(dst, r_zero, ctx);
283 else
284 emit_instr(ctx, sll, dst, src, sa);
285}
286
287static inline void emit_srlv(unsigned int dst, unsigned int src,
288 unsigned int sa, struct jit_ctx *ctx)
289{
290 emit_instr(ctx, srlv, dst, src, sa);
291}
292
293static inline void emit_srl(unsigned int dst, unsigned int src,
294 unsigned int sa, struct jit_ctx *ctx)
295{
296 /* sa is 5-bits long */
297 if (sa >= BIT(5))
298 /* Shifting >= 32 results in zero */
299 emit_jit_reg_move(dst, r_zero, ctx);
300 else
301 emit_instr(ctx, srl, dst, src, sa);
302}
303
304static inline void emit_slt(unsigned int dst, unsigned int src1,
305 unsigned int src2, struct jit_ctx *ctx)
306{
307 emit_instr(ctx, slt, dst, src1, src2);
308}
309
310static inline void emit_sltu(unsigned int dst, unsigned int src1,
311 unsigned int src2, struct jit_ctx *ctx)
312{
313 emit_instr(ctx, sltu, dst, src1, src2);
314}
315
316static inline void emit_sltiu(unsigned dst, unsigned int src,
317 unsigned int imm, struct jit_ctx *ctx)
318{
319 /* 16 bit immediate */
320 if (!is_range16((s32)imm)) {
321 emit_load_imm(r_tmp, imm, ctx);
322 emit_sltu(dst, src, r_tmp, ctx);
323 } else {
324 emit_instr(ctx, sltiu, dst, src, imm);
325 }
326
327}
328
329/* Store register on the stack */
330static inline void emit_store_stack_reg(ptr reg, ptr base,
331 unsigned int offset,
332 struct jit_ctx *ctx)
333{
334 emit_long_instr(ctx, SW, reg, offset, base);
335}
336
337static inline void emit_store(ptr reg, ptr base, unsigned int offset,
338 struct jit_ctx *ctx)
339{
340 emit_instr(ctx, sw, reg, offset, base);
341}
342
343static inline void emit_load_stack_reg(ptr reg, ptr base,
344 unsigned int offset,
345 struct jit_ctx *ctx)
346{
347 emit_long_instr(ctx, LW, reg, offset, base);
348}
349
350static inline void emit_load(unsigned int reg, unsigned int base,
351 unsigned int offset, struct jit_ctx *ctx)
352{
353 emit_instr(ctx, lw, reg, offset, base);
354}
355
356static inline void emit_load_byte(unsigned int reg, unsigned int base,
357 unsigned int offset, struct jit_ctx *ctx)
358{
359 emit_instr(ctx, lb, reg, offset, base);
360}
361
362static inline void emit_half_load(unsigned int reg, unsigned int base,
363 unsigned int offset, struct jit_ctx *ctx)
364{
365 emit_instr(ctx, lh, reg, offset, base);
366}
367
368static inline void emit_half_load_unsigned(unsigned int reg, unsigned int base,
369 unsigned int offset, struct jit_ctx *ctx)
370{
371 emit_instr(ctx, lhu, reg, offset, base);
372}
373
374static inline void emit_mul(unsigned int dst, unsigned int src1,
375 unsigned int src2, struct jit_ctx *ctx)
376{
377 emit_instr(ctx, mul, dst, src1, src2);
378}
379
380static inline void emit_div(unsigned int dst, unsigned int src,
381 struct jit_ctx *ctx)
382{
383 if (ctx->target != NULL) {
384 u32 *p = &ctx->target[ctx->idx];
385 uasm_i_divu(&p, dst, src);
386 p = &ctx->target[ctx->idx + 1];
387 uasm_i_mflo(&p, dst);
388 }
389 ctx->idx += 2; /* 2 insts */
390}
391
392static inline void emit_mod(unsigned int dst, unsigned int src,
393 struct jit_ctx *ctx)
394{
395 if (ctx->target != NULL) {
396 u32 *p = &ctx->target[ctx->idx];
397 uasm_i_divu(&p, dst, src);
398 p = &ctx->target[ctx->idx + 1];
399 uasm_i_mfhi(&p, dst);
400 }
401 ctx->idx += 2; /* 2 insts */
402}
403
404static inline void emit_dsll(unsigned int dst, unsigned int src,
405 unsigned int sa, struct jit_ctx *ctx)
406{
407 emit_instr(ctx, dsll, dst, src, sa);
408}
409
410static inline void emit_dsrl32(unsigned int dst, unsigned int src,
411 unsigned int sa, struct jit_ctx *ctx)
412{
413 emit_instr(ctx, dsrl32, dst, src, sa);
414}
415
416static inline void emit_wsbh(unsigned int dst, unsigned int src,
417 struct jit_ctx *ctx)
418{
419 emit_instr(ctx, wsbh, dst, src);
420}
421
422/* load pointer to register */
423static inline void emit_load_ptr(unsigned int dst, unsigned int src,
424 int imm, struct jit_ctx *ctx)
425{
426 /* src contains the base addr of the 32/64-pointer */
427 emit_long_instr(ctx, LW, dst, imm, src);
428}
429
430/* load a function pointer to register */
431static inline void emit_load_func(unsigned int reg, ptr imm,
432 struct jit_ctx *ctx)
433{
434 if (IS_ENABLED(CONFIG_64BIT)) {
435 /* At this point imm is always 64-bit */
436 emit_load_imm(r_tmp, (u64)imm >> 32, ctx);
437 emit_dsll(r_tmp_imm, r_tmp, 16, ctx); /* left shift by 16 */
438 emit_ori(r_tmp, r_tmp_imm, (imm >> 16) & 0xffff, ctx);
439 emit_dsll(r_tmp_imm, r_tmp, 16, ctx); /* left shift by 16 */
440 emit_ori(reg, r_tmp_imm, imm & 0xffff, ctx);
441 } else {
442 emit_load_imm(reg, imm, ctx);
443 }
444}
445
446/* Move to real MIPS register */
447static inline void emit_reg_move(ptr dst, ptr src, struct jit_ctx *ctx)
448{
449 emit_long_instr(ctx, ADDU, dst, src, r_zero);
450}
451
452/* Move to JIT (32-bit) register */
453static inline void emit_jit_reg_move(ptr dst, ptr src, struct jit_ctx *ctx)
454{
455 emit_addu(dst, src, r_zero, ctx);
456}
457
458/* Compute the immediate value for PC-relative branches. */
459static inline u32 b_imm(unsigned int tgt, struct jit_ctx *ctx)
460{
461 if (ctx->target == NULL)
462 return 0;
463
464 /*
465 * We want a pc-relative branch. We only do forward branches
466 * so tgt is always after pc. tgt is the instruction offset
467 * we want to jump to.
468
469 * Branch on MIPS:
470 * I: target_offset <- sign_extend(offset)
471 * I+1: PC += target_offset (delay slot)
472 *
473 * ctx->idx currently points to the branch instruction
474 * but the offset is added to the delay slot so we need
475 * to subtract 4.
476 */
477 return ctx->offsets[tgt] -
478 (ctx->idx * 4 - ctx->prologue_bytes) - 4;
479}
480
481static inline void emit_bcond(int cond, unsigned int reg1, unsigned int reg2,
482 unsigned int imm, struct jit_ctx *ctx)
483{
484 if (ctx->target != NULL) {
485 u32 *p = &ctx->target[ctx->idx];
486
487 switch (cond) {
488 case MIPS_COND_EQ:
489 uasm_i_beq(&p, reg1, reg2, imm);
490 break;
491 case MIPS_COND_NE:
492 uasm_i_bne(&p, reg1, reg2, imm);
493 break;
494 case MIPS_COND_ALL:
495 uasm_i_b(&p, imm);
496 break;
497 default:
498 pr_warn("%s: Unhandled branch conditional: %d\n",
499 __func__, cond);
500 }
501 }
502 ctx->idx++;
503}
504
505static inline void emit_b(unsigned int imm, struct jit_ctx *ctx)
506{
507 emit_bcond(MIPS_COND_ALL, r_zero, r_zero, imm, ctx);
508}
509
510static inline void emit_jalr(unsigned int link, unsigned int reg,
511 struct jit_ctx *ctx)
512{
513 emit_instr(ctx, jalr, link, reg);
514}
515
516static inline void emit_jr(unsigned int reg, struct jit_ctx *ctx)
517{
518 emit_instr(ctx, jr, reg);
519}
520
521static inline u16 align_sp(unsigned int num)
522{
523 /* Double word alignment for 32-bit, quadword for 64-bit */
524 unsigned int align = IS_ENABLED(CONFIG_64BIT) ? 16 : 8;
525 num = (num + (align - 1)) & -align;
526 return num;
527}
528
529static void save_bpf_jit_regs(struct jit_ctx *ctx, unsigned offset)
530{
531 int i = 0, real_off = 0;
532 u32 sflags, tmp_flags;
533
534 /* Adjust the stack pointer */
535 if (offset)
536 emit_stack_offset(-align_sp(offset), ctx);
537
538 tmp_flags = sflags = ctx->flags >> SEEN_SREG_SFT;
539 /* sflags is essentially a bitmap */
540 while (tmp_flags) {
541 if ((sflags >> i) & 0x1) {
542 emit_store_stack_reg(MIPS_R_S0 + i, r_sp, real_off,
543 ctx);
544 real_off += SZREG;
545 }
546 i++;
547 tmp_flags >>= 1;
548 }
549
550 /* save return address */
551 if (ctx->flags & SEEN_CALL) {
552 emit_store_stack_reg(r_ra, r_sp, real_off, ctx);
553 real_off += SZREG;
554 }
555
556 /* Setup r_M leaving the alignment gap if necessary */
557 if (ctx->flags & SEEN_MEM) {
558 if (real_off % (SZREG * 2))
559 real_off += SZREG;
560 emit_long_instr(ctx, ADDIU, r_M, r_sp, real_off);
561 }
562}
563
564static void restore_bpf_jit_regs(struct jit_ctx *ctx,
565 unsigned int offset)
566{
567 int i, real_off = 0;
568 u32 sflags, tmp_flags;
569
570 tmp_flags = sflags = ctx->flags >> SEEN_SREG_SFT;
571 /* sflags is a bitmap */
572 i = 0;
573 while (tmp_flags) {
574 if ((sflags >> i) & 0x1) {
575 emit_load_stack_reg(MIPS_R_S0 + i, r_sp, real_off,
576 ctx);
577 real_off += SZREG;
578 }
579 i++;
580 tmp_flags >>= 1;
581 }
582
583 /* restore return address */
584 if (ctx->flags & SEEN_CALL)
585 emit_load_stack_reg(r_ra, r_sp, real_off, ctx);
586
587 /* Restore the sp and discard the scrach memory */
588 if (offset)
589 emit_stack_offset(align_sp(offset), ctx);
590}
591
592static unsigned int get_stack_depth(struct jit_ctx *ctx)
593{
594 int sp_off = 0;
595
596
597 /* How may s* regs do we need to preserved? */
598 sp_off += hweight32(ctx->flags >> SEEN_SREG_SFT) * SZREG;
599
600 if (ctx->flags & SEEN_MEM)
601 sp_off += 4 * BPF_MEMWORDS; /* BPF_MEMWORDS are 32-bit */
602
603 if (ctx->flags & SEEN_CALL)
604 sp_off += SZREG; /* Space for our ra register */
605
606 return sp_off;
607}
608
609static void build_prologue(struct jit_ctx *ctx)
610{
611 int sp_off;
612
613 /* Calculate the total offset for the stack pointer */
614 sp_off = get_stack_depth(ctx);
615 save_bpf_jit_regs(ctx, sp_off);
616
617 if (ctx->flags & SEEN_SKB)
618 emit_reg_move(r_skb, MIPS_R_A0, ctx);
619
620 if (ctx->flags & SEEN_SKB_DATA) {
621 /* Load packet length */
622 emit_load(r_skb_len, r_skb, offsetof(struct sk_buff, len),
623 ctx);
624 emit_load(r_tmp, r_skb, offsetof(struct sk_buff, data_len),
625 ctx);
626 /* Load the data pointer */
627 emit_load_ptr(r_skb_data, r_skb,
628 offsetof(struct sk_buff, data), ctx);
629 /* Load the header length */
630 emit_subu(r_skb_hl, r_skb_len, r_tmp, ctx);
631 }
632
633 if (ctx->flags & SEEN_X)
634 emit_jit_reg_move(r_X, r_zero, ctx);
635
636 /*
637 * Do not leak kernel data to userspace, we only need to clear
638 * r_A if it is ever used. In fact if it is never used, we
639 * will not save/restore it, so clearing it in this case would
640 * corrupt the state of the caller.
641 */
642 if (bpf_needs_clear_a(&ctx->skf->insns[0]) &&
643 (ctx->flags & SEEN_A))
644 emit_jit_reg_move(r_A, r_zero, ctx);
645}
646
647static void build_epilogue(struct jit_ctx *ctx)
648{
649 unsigned int sp_off;
650
651 /* Calculate the total offset for the stack pointer */
652
653 sp_off = get_stack_depth(ctx);
654 restore_bpf_jit_regs(ctx, sp_off);
655
656 /* Return */
657 emit_jr(r_ra, ctx);
658 emit_nop(ctx);
659}
660
661#define CHOOSE_LOAD_FUNC(K, func) \
662 ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative : func) : \
663 func##_positive)
664
665static int build_body(struct jit_ctx *ctx)
666{
667 const struct bpf_prog *prog = ctx->skf;
668 const struct sock_filter *inst;
669 unsigned int i, off, condt;
670 u32 k, b_off __maybe_unused;
671 u8 (*sk_load_func)(unsigned long *skb, int offset);
672
673 for (i = 0; i < prog->len; i++) {
674 u16 code;
675
676 inst = &(prog->insns[i]);
677 pr_debug("%s: code->0x%02x, jt->0x%x, jf->0x%x, k->0x%x\n",
678 __func__, inst->code, inst->jt, inst->jf, inst->k);
679 k = inst->k;
680 code = bpf_anc_helper(inst);
681
682 if (ctx->target == NULL)
683 ctx->offsets[i] = ctx->idx * 4;
684
685 switch (code) {
686 case BPF_LD | BPF_IMM:
687 /* A <- k ==> li r_A, k */
688 ctx->flags |= SEEN_A;
689 emit_load_imm(r_A, k, ctx);
690 break;
691 case BPF_LD | BPF_W | BPF_LEN:
692 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4);
693 /* A <- len ==> lw r_A, offset(skb) */
694 ctx->flags |= SEEN_SKB | SEEN_A;
695 off = offsetof(struct sk_buff, len);
696 emit_load(r_A, r_skb, off, ctx);
697 break;
698 case BPF_LD | BPF_MEM:
699 /* A <- M[k] ==> lw r_A, offset(M) */
700 ctx->flags |= SEEN_MEM | SEEN_A;
701 emit_load(r_A, r_M, SCRATCH_OFF(k), ctx);
702 break;
703 case BPF_LD | BPF_W | BPF_ABS:
704 /* A <- P[k:4] */
705 sk_load_func = CHOOSE_LOAD_FUNC(k, sk_load_word);
706 goto load;
707 case BPF_LD | BPF_H | BPF_ABS:
708 /* A <- P[k:2] */
709 sk_load_func = CHOOSE_LOAD_FUNC(k, sk_load_half);
710 goto load;
711 case BPF_LD | BPF_B | BPF_ABS:
712 /* A <- P[k:1] */
713 sk_load_func = CHOOSE_LOAD_FUNC(k, sk_load_byte);
714load:
715 emit_load_imm(r_off, k, ctx);
716load_common:
717 ctx->flags |= SEEN_CALL | SEEN_OFF |
718 SEEN_SKB | SEEN_A | SEEN_SKB_DATA;
719
720 emit_load_func(r_s0, (ptr)sk_load_func, ctx);
721 emit_reg_move(MIPS_R_A0, r_skb, ctx);
722 emit_jalr(MIPS_R_RA, r_s0, ctx);
723 /* Load second argument to delay slot */
724 emit_reg_move(MIPS_R_A1, r_off, ctx);
725 /* Check the error value */
726 emit_bcond(MIPS_COND_EQ, r_ret, 0, b_imm(i + 1, ctx),
727 ctx);
728 /* Load return register on DS for failures */
729 emit_reg_move(r_ret, r_zero, ctx);
730 /* Return with error */
731 emit_b(b_imm(prog->len, ctx), ctx);
732 emit_nop(ctx);
733 break;
734 case BPF_LD | BPF_W | BPF_IND:
735 /* A <- P[X + k:4] */
736 sk_load_func = sk_load_word;
737 goto load_ind;
738 case BPF_LD | BPF_H | BPF_IND:
739 /* A <- P[X + k:2] */
740 sk_load_func = sk_load_half;
741 goto load_ind;
742 case BPF_LD | BPF_B | BPF_IND:
743 /* A <- P[X + k:1] */
744 sk_load_func = sk_load_byte;
745load_ind:
746 ctx->flags |= SEEN_OFF | SEEN_X;
747 emit_addiu(r_off, r_X, k, ctx);
748 goto load_common;
749 case BPF_LDX | BPF_IMM:
750 /* X <- k */
751 ctx->flags |= SEEN_X;
752 emit_load_imm(r_X, k, ctx);
753 break;
754 case BPF_LDX | BPF_MEM:
755 /* X <- M[k] */
756 ctx->flags |= SEEN_X | SEEN_MEM;
757 emit_load(r_X, r_M, SCRATCH_OFF(k), ctx);
758 break;
759 case BPF_LDX | BPF_W | BPF_LEN:
760 /* X <- len */
761 ctx->flags |= SEEN_X | SEEN_SKB;
762 off = offsetof(struct sk_buff, len);
763 emit_load(r_X, r_skb, off, ctx);
764 break;
765 case BPF_LDX | BPF_B | BPF_MSH:
766 /* X <- 4 * (P[k:1] & 0xf) */
767 ctx->flags |= SEEN_X | SEEN_CALL | SEEN_SKB;
768 /* Load offset to a1 */
769 emit_load_func(r_s0, (ptr)sk_load_byte, ctx);
770 /*
771 * This may emit two instructions so it may not fit
772 * in the delay slot. So use a0 in the delay slot.
773 */
774 emit_load_imm(MIPS_R_A1, k, ctx);
775 emit_jalr(MIPS_R_RA, r_s0, ctx);
776 emit_reg_move(MIPS_R_A0, r_skb, ctx); /* delay slot */
777 /* Check the error value */
778 emit_bcond(MIPS_COND_NE, r_ret, 0,
779 b_imm(prog->len, ctx), ctx);
780 emit_reg_move(r_ret, r_zero, ctx);
781 /* We are good */
782 /* X <- P[1:K] & 0xf */
783 emit_andi(r_X, r_A, 0xf, ctx);
784 /* X << 2 */
785 emit_b(b_imm(i + 1, ctx), ctx);
786 emit_sll(r_X, r_X, 2, ctx); /* delay slot */
787 break;
788 case BPF_ST:
789 /* M[k] <- A */
790 ctx->flags |= SEEN_MEM | SEEN_A;
791 emit_store(r_A, r_M, SCRATCH_OFF(k), ctx);
792 break;
793 case BPF_STX:
794 /* M[k] <- X */
795 ctx->flags |= SEEN_MEM | SEEN_X;
796 emit_store(r_X, r_M, SCRATCH_OFF(k), ctx);
797 break;
798 case BPF_ALU | BPF_ADD | BPF_K:
799 /* A += K */
800 ctx->flags |= SEEN_A;
801 emit_addiu(r_A, r_A, k, ctx);
802 break;
803 case BPF_ALU | BPF_ADD | BPF_X:
804 /* A += X */
805 ctx->flags |= SEEN_A | SEEN_X;
806 emit_addu(r_A, r_A, r_X, ctx);
807 break;
808 case BPF_ALU | BPF_SUB | BPF_K:
809 /* A -= K */
810 ctx->flags |= SEEN_A;
811 emit_addiu(r_A, r_A, -k, ctx);
812 break;
813 case BPF_ALU | BPF_SUB | BPF_X:
814 /* A -= X */
815 ctx->flags |= SEEN_A | SEEN_X;
816 emit_subu(r_A, r_A, r_X, ctx);
817 break;
818 case BPF_ALU | BPF_MUL | BPF_K:
819 /* A *= K */
820 /* Load K to scratch register before MUL */
821 ctx->flags |= SEEN_A;
822 emit_load_imm(r_s0, k, ctx);
823 emit_mul(r_A, r_A, r_s0, ctx);
824 break;
825 case BPF_ALU | BPF_MUL | BPF_X:
826 /* A *= X */
827 ctx->flags |= SEEN_A | SEEN_X;
828 emit_mul(r_A, r_A, r_X, ctx);
829 break;
830 case BPF_ALU | BPF_DIV | BPF_K:
831 /* A /= k */
832 if (k == 1)
833 break;
834 if (optimize_div(&k)) {
835 ctx->flags |= SEEN_A;
836 emit_srl(r_A, r_A, k, ctx);
837 break;
838 }
839 ctx->flags |= SEEN_A;
840 emit_load_imm(r_s0, k, ctx);
841 emit_div(r_A, r_s0, ctx);
842 break;
843 case BPF_ALU | BPF_MOD | BPF_K:
844 /* A %= k */
845 if (k == 1) {
846 ctx->flags |= SEEN_A;
847 emit_jit_reg_move(r_A, r_zero, ctx);
848 } else {
849 ctx->flags |= SEEN_A;
850 emit_load_imm(r_s0, k, ctx);
851 emit_mod(r_A, r_s0, ctx);
852 }
853 break;
854 case BPF_ALU | BPF_DIV | BPF_X:
855 /* A /= X */
856 ctx->flags |= SEEN_X | SEEN_A;
857 /* Check if r_X is zero */
858 emit_bcond(MIPS_COND_EQ, r_X, r_zero,
859 b_imm(prog->len, ctx), ctx);
860 emit_load_imm(r_ret, 0, ctx); /* delay slot */
861 emit_div(r_A, r_X, ctx);
862 break;
863 case BPF_ALU | BPF_MOD | BPF_X:
864 /* A %= X */
865 ctx->flags |= SEEN_X | SEEN_A;
866 /* Check if r_X is zero */
867 emit_bcond(MIPS_COND_EQ, r_X, r_zero,
868 b_imm(prog->len, ctx), ctx);
869 emit_load_imm(r_ret, 0, ctx); /* delay slot */
870 emit_mod(r_A, r_X, ctx);
871 break;
872 case BPF_ALU | BPF_OR | BPF_K:
873 /* A |= K */
874 ctx->flags |= SEEN_A;
875 emit_ori(r_A, r_A, k, ctx);
876 break;
877 case BPF_ALU | BPF_OR | BPF_X:
878 /* A |= X */
879 ctx->flags |= SEEN_A;
880 emit_ori(r_A, r_A, r_X, ctx);
881 break;
882 case BPF_ALU | BPF_XOR | BPF_K:
883 /* A ^= k */
884 ctx->flags |= SEEN_A;
885 emit_xori(r_A, r_A, k, ctx);
886 break;
887 case BPF_ANC | SKF_AD_ALU_XOR_X:
888 case BPF_ALU | BPF_XOR | BPF_X:
889 /* A ^= X */
890 ctx->flags |= SEEN_A;
891 emit_xor(r_A, r_A, r_X, ctx);
892 break;
893 case BPF_ALU | BPF_AND | BPF_K:
894 /* A &= K */
895 ctx->flags |= SEEN_A;
896 emit_andi(r_A, r_A, k, ctx);
897 break;
898 case BPF_ALU | BPF_AND | BPF_X:
899 /* A &= X */
900 ctx->flags |= SEEN_A | SEEN_X;
901 emit_and(r_A, r_A, r_X, ctx);
902 break;
903 case BPF_ALU | BPF_LSH | BPF_K:
904 /* A <<= K */
905 ctx->flags |= SEEN_A;
906 emit_sll(r_A, r_A, k, ctx);
907 break;
908 case BPF_ALU | BPF_LSH | BPF_X:
909 /* A <<= X */
910 ctx->flags |= SEEN_A | SEEN_X;
911 emit_sllv(r_A, r_A, r_X, ctx);
912 break;
913 case BPF_ALU | BPF_RSH | BPF_K:
914 /* A >>= K */
915 ctx->flags |= SEEN_A;
916 emit_srl(r_A, r_A, k, ctx);
917 break;
918 case BPF_ALU | BPF_RSH | BPF_X:
919 ctx->flags |= SEEN_A | SEEN_X;
920 emit_srlv(r_A, r_A, r_X, ctx);
921 break;
922 case BPF_ALU | BPF_NEG:
923 /* A = -A */
924 ctx->flags |= SEEN_A;
925 emit_neg(r_A, ctx);
926 break;
927 case BPF_JMP | BPF_JA:
928 /* pc += K */
929 emit_b(b_imm(i + k + 1, ctx), ctx);
930 emit_nop(ctx);
931 break;
932 case BPF_JMP | BPF_JEQ | BPF_K:
933 /* pc += ( A == K ) ? pc->jt : pc->jf */
934 condt = MIPS_COND_EQ | MIPS_COND_K;
935 goto jmp_cmp;
936 case BPF_JMP | BPF_JEQ | BPF_X:
937 ctx->flags |= SEEN_X;
938 /* pc += ( A == X ) ? pc->jt : pc->jf */
939 condt = MIPS_COND_EQ | MIPS_COND_X;
940 goto jmp_cmp;
941 case BPF_JMP | BPF_JGE | BPF_K:
942 /* pc += ( A >= K ) ? pc->jt : pc->jf */
943 condt = MIPS_COND_GE | MIPS_COND_K;
944 goto jmp_cmp;
945 case BPF_JMP | BPF_JGE | BPF_X:
946 ctx->flags |= SEEN_X;
947 /* pc += ( A >= X ) ? pc->jt : pc->jf */
948 condt = MIPS_COND_GE | MIPS_COND_X;
949 goto jmp_cmp;
950 case BPF_JMP | BPF_JGT | BPF_K:
951 /* pc += ( A > K ) ? pc->jt : pc->jf */
952 condt = MIPS_COND_GT | MIPS_COND_K;
953 goto jmp_cmp;
954 case BPF_JMP | BPF_JGT | BPF_X:
955 ctx->flags |= SEEN_X;
956 /* pc += ( A > X ) ? pc->jt : pc->jf */
957 condt = MIPS_COND_GT | MIPS_COND_X;
958jmp_cmp:
959 /* Greater or Equal */
960 if ((condt & MIPS_COND_GE) ||
961 (condt & MIPS_COND_GT)) {
962 if (condt & MIPS_COND_K) { /* K */
963 ctx->flags |= SEEN_A;
964 emit_sltiu(r_s0, r_A, k, ctx);
965 } else { /* X */
966 ctx->flags |= SEEN_A |
967 SEEN_X;
968 emit_sltu(r_s0, r_A, r_X, ctx);
969 }
970 /* A < (K|X) ? r_scrach = 1 */
971 b_off = b_imm(i + inst->jf + 1, ctx);
972 emit_bcond(MIPS_COND_NE, r_s0, r_zero, b_off,
973 ctx);
974 emit_nop(ctx);
975 /* A > (K|X) ? scratch = 0 */
976 if (condt & MIPS_COND_GT) {
977 /* Checking for equality */
978 ctx->flags |= SEEN_A | SEEN_X;
979 if (condt & MIPS_COND_K)
980 emit_load_imm(r_s0, k, ctx);
981 else
982 emit_jit_reg_move(r_s0, r_X,
983 ctx);
984 b_off = b_imm(i + inst->jf + 1, ctx);
985 emit_bcond(MIPS_COND_EQ, r_A, r_s0,
986 b_off, ctx);
987 emit_nop(ctx);
988 /* Finally, A > K|X */
989 b_off = b_imm(i + inst->jt + 1, ctx);
990 emit_b(b_off, ctx);
991 emit_nop(ctx);
992 } else {
993 /* A >= (K|X) so jump */
994 b_off = b_imm(i + inst->jt + 1, ctx);
995 emit_b(b_off, ctx);
996 emit_nop(ctx);
997 }
998 } else {
999 /* A == K|X */
1000 if (condt & MIPS_COND_K) { /* K */
1001 ctx->flags |= SEEN_A;
1002 emit_load_imm(r_s0, k, ctx);
1003 /* jump true */
1004 b_off = b_imm(i + inst->jt + 1, ctx);
1005 emit_bcond(MIPS_COND_EQ, r_A, r_s0,
1006 b_off, ctx);
1007 emit_nop(ctx);
1008 /* jump false */
1009 b_off = b_imm(i + inst->jf + 1,
1010 ctx);
1011 emit_bcond(MIPS_COND_NE, r_A, r_s0,
1012 b_off, ctx);
1013 emit_nop(ctx);
1014 } else { /* X */
1015 /* jump true */
1016 ctx->flags |= SEEN_A | SEEN_X;
1017 b_off = b_imm(i + inst->jt + 1,
1018 ctx);
1019 emit_bcond(MIPS_COND_EQ, r_A, r_X,
1020 b_off, ctx);
1021 emit_nop(ctx);
1022 /* jump false */
1023 b_off = b_imm(i + inst->jf + 1, ctx);
1024 emit_bcond(MIPS_COND_NE, r_A, r_X,
1025 b_off, ctx);
1026 emit_nop(ctx);
1027 }
1028 }
1029 break;
1030 case BPF_JMP | BPF_JSET | BPF_K:
1031 ctx->flags |= SEEN_A;
1032 /* pc += (A & K) ? pc -> jt : pc -> jf */
1033 emit_load_imm(r_s1, k, ctx);
1034 emit_and(r_s0, r_A, r_s1, ctx);
1035 /* jump true */
1036 b_off = b_imm(i + inst->jt + 1, ctx);
1037 emit_bcond(MIPS_COND_NE, r_s0, r_zero, b_off, ctx);
1038 emit_nop(ctx);
1039 /* jump false */
1040 b_off = b_imm(i + inst->jf + 1, ctx);
1041 emit_b(b_off, ctx);
1042 emit_nop(ctx);
1043 break;
1044 case BPF_JMP | BPF_JSET | BPF_X:
1045 ctx->flags |= SEEN_X | SEEN_A;
1046 /* pc += (A & X) ? pc -> jt : pc -> jf */
1047 emit_and(r_s0, r_A, r_X, ctx);
1048 /* jump true */
1049 b_off = b_imm(i + inst->jt + 1, ctx);
1050 emit_bcond(MIPS_COND_NE, r_s0, r_zero, b_off, ctx);
1051 emit_nop(ctx);
1052 /* jump false */
1053 b_off = b_imm(i + inst->jf + 1, ctx);
1054 emit_b(b_off, ctx);
1055 emit_nop(ctx);
1056 break;
1057 case BPF_RET | BPF_A:
1058 ctx->flags |= SEEN_A;
1059 if (i != prog->len - 1)
1060 /*
1061 * If this is not the last instruction
1062 * then jump to the epilogue
1063 */
1064 emit_b(b_imm(prog->len, ctx), ctx);
1065 emit_reg_move(r_ret, r_A, ctx); /* delay slot */
1066 break;
1067 case BPF_RET | BPF_K:
1068 /*
1069 * It can emit two instructions so it does not fit on
1070 * the delay slot.
1071 */
1072 emit_load_imm(r_ret, k, ctx);
1073 if (i != prog->len - 1) {
1074 /*
1075 * If this is not the last instruction
1076 * then jump to the epilogue
1077 */
1078 emit_b(b_imm(prog->len, ctx), ctx);
1079 emit_nop(ctx);
1080 }
1081 break;
1082 case BPF_MISC | BPF_TAX:
1083 /* X = A */
1084 ctx->flags |= SEEN_X | SEEN_A;
1085 emit_jit_reg_move(r_X, r_A, ctx);
1086 break;
1087 case BPF_MISC | BPF_TXA:
1088 /* A = X */
1089 ctx->flags |= SEEN_A | SEEN_X;
1090 emit_jit_reg_move(r_A, r_X, ctx);
1091 break;
1092 /* AUX */
1093 case BPF_ANC | SKF_AD_PROTOCOL:
1094 /* A = ntohs(skb->protocol */
1095 ctx->flags |= SEEN_SKB | SEEN_OFF | SEEN_A;
1096 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
1097 protocol) != 2);
1098 off = offsetof(struct sk_buff, protocol);
1099 emit_half_load(r_A, r_skb, off, ctx);
1100#ifdef CONFIG_CPU_LITTLE_ENDIAN
1101 /* This needs little endian fixup */
1102 if (cpu_has_wsbh) {
1103 /* R2 and later have the wsbh instruction */
1104 emit_wsbh(r_A, r_A, ctx);
1105 } else {
1106 /* Get first byte */
1107 emit_andi(r_tmp_imm, r_A, 0xff, ctx);
1108 /* Shift it */
1109 emit_sll(r_tmp, r_tmp_imm, 8, ctx);
1110 /* Get second byte */
1111 emit_srl(r_tmp_imm, r_A, 8, ctx);
1112 emit_andi(r_tmp_imm, r_tmp_imm, 0xff, ctx);
1113 /* Put everyting together in r_A */
1114 emit_or(r_A, r_tmp, r_tmp_imm, ctx);
1115 }
1116#endif
1117 break;
1118 case BPF_ANC | SKF_AD_CPU:
1119 ctx->flags |= SEEN_A | SEEN_OFF;
1120 /* A = current_thread_info()->cpu */
1121 BUILD_BUG_ON(FIELD_SIZEOF(struct thread_info,
1122 cpu) != 4);
1123 off = offsetof(struct thread_info, cpu);
1124 /* $28/gp points to the thread_info struct */
1125 emit_load(r_A, 28, off, ctx);
1126 break;
1127 case BPF_ANC | SKF_AD_IFINDEX:
1128 /* A = skb->dev->ifindex */
1129 case BPF_ANC | SKF_AD_HATYPE:
1130 /* A = skb->dev->type */
1131 ctx->flags |= SEEN_SKB | SEEN_A;
1132 off = offsetof(struct sk_buff, dev);
1133 /* Load *dev pointer */
1134 emit_load_ptr(r_s0, r_skb, off, ctx);
1135 /* error (0) in the delay slot */
1136 emit_bcond(MIPS_COND_EQ, r_s0, r_zero,
1137 b_imm(prog->len, ctx), ctx);
1138 emit_reg_move(r_ret, r_zero, ctx);
1139 if (code == (BPF_ANC | SKF_AD_IFINDEX)) {
1140 BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, ifindex) != 4);
1141 off = offsetof(struct net_device, ifindex);
1142 emit_load(r_A, r_s0, off, ctx);
1143 } else { /* (code == (BPF_ANC | SKF_AD_HATYPE) */
1144 BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, type) != 2);
1145 off = offsetof(struct net_device, type);
1146 emit_half_load_unsigned(r_A, r_s0, off, ctx);
1147 }
1148 break;
1149 case BPF_ANC | SKF_AD_MARK:
1150 ctx->flags |= SEEN_SKB | SEEN_A;
1151 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
1152 off = offsetof(struct sk_buff, mark);
1153 emit_load(r_A, r_skb, off, ctx);
1154 break;
1155 case BPF_ANC | SKF_AD_RXHASH:
1156 ctx->flags |= SEEN_SKB | SEEN_A;
1157 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4);
1158 off = offsetof(struct sk_buff, hash);
1159 emit_load(r_A, r_skb, off, ctx);
1160 break;
1161 case BPF_ANC | SKF_AD_VLAN_TAG:
1162 ctx->flags |= SEEN_SKB | SEEN_A;
1163 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
1164 vlan_tci) != 2);
1165 off = offsetof(struct sk_buff, vlan_tci);
1166 emit_half_load_unsigned(r_A, r_skb, off, ctx);
1167 break;
1168 case BPF_ANC | SKF_AD_VLAN_TAG_PRESENT:
1169 ctx->flags |= SEEN_SKB | SEEN_A;
1170 emit_load_byte(r_A, r_skb, PKT_VLAN_PRESENT_OFFSET(), ctx);
1171 if (PKT_VLAN_PRESENT_BIT)
1172 emit_srl(r_A, r_A, PKT_VLAN_PRESENT_BIT, ctx);
1173 if (PKT_VLAN_PRESENT_BIT < 7)
1174 emit_andi(r_A, r_A, 1, ctx);
1175 break;
1176 case BPF_ANC | SKF_AD_PKTTYPE:
1177 ctx->flags |= SEEN_SKB;
1178
1179 emit_load_byte(r_tmp, r_skb, PKT_TYPE_OFFSET(), ctx);
1180 /* Keep only the last 3 bits */
1181 emit_andi(r_A, r_tmp, PKT_TYPE_MAX, ctx);
1182#ifdef __BIG_ENDIAN_BITFIELD
1183 /* Get the actual packet type to the lower 3 bits */
1184 emit_srl(r_A, r_A, 5, ctx);
1185#endif
1186 break;
1187 case BPF_ANC | SKF_AD_QUEUE:
1188 ctx->flags |= SEEN_SKB | SEEN_A;
1189 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
1190 queue_mapping) != 2);
1191 BUILD_BUG_ON(offsetof(struct sk_buff,
1192 queue_mapping) > 0xff);
1193 off = offsetof(struct sk_buff, queue_mapping);
1194 emit_half_load_unsigned(r_A, r_skb, off, ctx);
1195 break;
1196 default:
1197 pr_debug("%s: Unhandled opcode: 0x%02x\n", __FILE__,
1198 inst->code);
1199 return -1;
1200 }
1201 }
1202
1203 /* compute offsets only during the first pass */
1204 if (ctx->target == NULL)
1205 ctx->offsets[i] = ctx->idx * 4;
1206
1207 return 0;
1208}
1209
1210void bpf_jit_compile(struct bpf_prog *fp)
1211{
1212 struct jit_ctx ctx;
1213 unsigned int alloc_size, tmp_idx;
1214
1215 if (!bpf_jit_enable)
1216 return;
1217
1218 memset(&ctx, 0, sizeof(ctx));
1219
1220 ctx.offsets = kcalloc(fp->len + 1, sizeof(*ctx.offsets), GFP_KERNEL);
1221 if (ctx.offsets == NULL)
1222 return;
1223
1224 ctx.skf = fp;
1225
1226 if (build_body(&ctx))
1227 goto out;
1228
1229 tmp_idx = ctx.idx;
1230 build_prologue(&ctx);
1231 ctx.prologue_bytes = (ctx.idx - tmp_idx) * 4;
1232 /* just to complete the ctx.idx count */
1233 build_epilogue(&ctx);
1234
1235 alloc_size = 4 * ctx.idx;
1236 ctx.target = module_alloc(alloc_size);
1237 if (ctx.target == NULL)
1238 goto out;
1239
1240 /* Clean it */
1241 memset(ctx.target, 0, alloc_size);
1242
1243 ctx.idx = 0;
1244
1245 /* Generate the actual JIT code */
1246 build_prologue(&ctx);
1247 build_body(&ctx);
1248 build_epilogue(&ctx);
1249
1250 /* Update the icache */
1251 flush_icache_range((ptr)ctx.target, (ptr)(ctx.target + ctx.idx));
1252
1253 if (bpf_jit_enable > 1)
1254 /* Dump JIT code */
1255 bpf_jit_dump(fp->len, alloc_size, 2, ctx.target);
1256
1257 fp->bpf_func = (void *)ctx.target;
1258 fp->jited = 1;
1259
1260out:
1261 kfree(ctx.offsets);
1262}
1263
1264void bpf_jit_free(struct bpf_prog *fp)
1265{
1266 if (fp->jited)
1267 module_memfree(fp->bpf_func);
1268
1269 bpf_prog_unlock_free(fp);
1270}
diff --git a/arch/mips/net/bpf_jit_asm.S b/arch/mips/net/bpf_jit_asm.S
deleted file mode 100644
index 57154c5883b6..000000000000
--- a/arch/mips/net/bpf_jit_asm.S
+++ /dev/null
@@ -1,285 +0,0 @@
1/*
2 * bpf_jib_asm.S: Packet/header access helper functions for MIPS/MIPS64 BPF
3 * compiler.
4 *
5 * Copyright (C) 2015 Imagination Technologies Ltd.
6 * Author: Markos Chandras <markos.chandras@imgtec.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; version 2 of the License.
11 */
12
13#include <asm/asm.h>
14#include <asm/isa-rev.h>
15#include <asm/regdef.h>
16#include "bpf_jit.h"
17
18/* ABI
19 *
20 * r_skb_hl skb header length
21 * r_skb_data skb data
22 * r_off(a1) offset register
23 * r_A BPF register A
24 * r_X PF register X
25 * r_skb(a0) *skb
26 * r_M *scratch memory
27 * r_skb_le skb length
28 * r_s0 Scratch register 0
29 * r_s1 Scratch register 1
30 *
31 * On entry:
32 * a0: *skb
33 * a1: offset (imm or imm + X)
34 *
35 * All non-BPF-ABI registers are free for use. On return, we only
36 * care about r_ret. The BPF-ABI registers are assumed to remain
37 * unmodified during the entire filter operation.
38 */
39
40#define skb a0
41#define offset a1
42#define SKF_LL_OFF (-0x200000) /* Can't include linux/filter.h in assembly */
43
44 /* We know better :) so prevent assembler reordering etc */
45 .set noreorder
46
47#define is_offset_negative(TYPE) \
48 /* If offset is negative we have more work to do */ \
49 slti t0, offset, 0; \
50 bgtz t0, bpf_slow_path_##TYPE##_neg; \
51 /* Be careful what follows in DS. */
52
53#define is_offset_in_header(SIZE, TYPE) \
54 /* Reading from header? */ \
55 addiu $r_s0, $r_skb_hl, -SIZE; \
56 slt t0, $r_s0, offset; \
57 bgtz t0, bpf_slow_path_##TYPE; \
58
59LEAF(sk_load_word)
60 is_offset_negative(word)
61FEXPORT(sk_load_word_positive)
62 is_offset_in_header(4, word)
63 /* Offset within header boundaries */
64 PTR_ADDU t1, $r_skb_data, offset
65 .set reorder
66 lw $r_A, 0(t1)
67 .set noreorder
68#ifdef CONFIG_CPU_LITTLE_ENDIAN
69# if MIPS_ISA_REV >= 2
70 wsbh t0, $r_A
71 rotr $r_A, t0, 16
72# else
73 sll t0, $r_A, 24
74 srl t1, $r_A, 24
75 srl t2, $r_A, 8
76 or t0, t0, t1
77 andi t2, t2, 0xff00
78 andi t1, $r_A, 0xff00
79 or t0, t0, t2
80 sll t1, t1, 8
81 or $r_A, t0, t1
82# endif
83#endif
84 jr $r_ra
85 move $r_ret, zero
86 END(sk_load_word)
87
88LEAF(sk_load_half)
89 is_offset_negative(half)
90FEXPORT(sk_load_half_positive)
91 is_offset_in_header(2, half)
92 /* Offset within header boundaries */
93 PTR_ADDU t1, $r_skb_data, offset
94 lhu $r_A, 0(t1)
95#ifdef CONFIG_CPU_LITTLE_ENDIAN
96# if MIPS_ISA_REV >= 2
97 wsbh $r_A, $r_A
98# else
99 sll t0, $r_A, 8
100 srl t1, $r_A, 8
101 andi t0, t0, 0xff00
102 or $r_A, t0, t1
103# endif
104#endif
105 jr $r_ra
106 move $r_ret, zero
107 END(sk_load_half)
108
109LEAF(sk_load_byte)
110 is_offset_negative(byte)
111FEXPORT(sk_load_byte_positive)
112 is_offset_in_header(1, byte)
113 /* Offset within header boundaries */
114 PTR_ADDU t1, $r_skb_data, offset
115 lbu $r_A, 0(t1)
116 jr $r_ra
117 move $r_ret, zero
118 END(sk_load_byte)
119
120/*
121 * call skb_copy_bits:
122 * (prototype in linux/skbuff.h)
123 *
124 * int skb_copy_bits(sk_buff *skb, int offset, void *to, int len)
125 *
126 * o32 mandates we leave 4 spaces for argument registers in case
127 * the callee needs to use them. Even though we don't care about
128 * the argument registers ourselves, we need to allocate that space
129 * to remain ABI compliant since the callee may want to use that space.
130 * We also allocate 2 more spaces for $r_ra and our return register (*to).
131 *
132 * n64 is a bit different. The *caller* will allocate the space to preserve
133 * the arguments. So in 64-bit kernels, we allocate the 4-arg space for no
134 * good reason but it does not matter that much really.
135 *
136 * (void *to) is returned in r_s0
137 *
138 */
139#ifdef CONFIG_CPU_LITTLE_ENDIAN
140#define DS_OFFSET(SIZE) (4 * SZREG)
141#else
142#define DS_OFFSET(SIZE) ((4 * SZREG) + (4 - SIZE))
143#endif
144#define bpf_slow_path_common(SIZE) \
145 /* Quick check. Are we within reasonable boundaries? */ \
146 LONG_ADDIU $r_s1, $r_skb_len, -SIZE; \
147 sltu $r_s0, offset, $r_s1; \
148 beqz $r_s0, fault; \
149 /* Load 4th argument in DS */ \
150 LONG_ADDIU a3, zero, SIZE; \
151 PTR_ADDIU $r_sp, $r_sp, -(6 * SZREG); \
152 PTR_LA t0, skb_copy_bits; \
153 PTR_S $r_ra, (5 * SZREG)($r_sp); \
154 /* Assign low slot to a2 */ \
155 PTR_ADDIU a2, $r_sp, DS_OFFSET(SIZE); \
156 jalr t0; \
157 /* Reset our destination slot (DS but it's ok) */ \
158 INT_S zero, (4 * SZREG)($r_sp); \
159 /* \
160 * skb_copy_bits returns 0 on success and -EFAULT \
161 * on error. Our data live in a2. Do not bother with \
162 * our data if an error has been returned. \
163 */ \
164 /* Restore our frame */ \
165 PTR_L $r_ra, (5 * SZREG)($r_sp); \
166 INT_L $r_s0, (4 * SZREG)($r_sp); \
167 bltz v0, fault; \
168 PTR_ADDIU $r_sp, $r_sp, 6 * SZREG; \
169 move $r_ret, zero; \
170
171NESTED(bpf_slow_path_word, (6 * SZREG), $r_sp)
172 bpf_slow_path_common(4)
173#ifdef CONFIG_CPU_LITTLE_ENDIAN
174# if MIPS_ISA_REV >= 2
175 wsbh t0, $r_s0
176 jr $r_ra
177 rotr $r_A, t0, 16
178# else
179 sll t0, $r_s0, 24
180 srl t1, $r_s0, 24
181 srl t2, $r_s0, 8
182 or t0, t0, t1
183 andi t2, t2, 0xff00
184 andi t1, $r_s0, 0xff00
185 or t0, t0, t2
186 sll t1, t1, 8
187 jr $r_ra
188 or $r_A, t0, t1
189# endif
190#else
191 jr $r_ra
192 move $r_A, $r_s0
193#endif
194
195 END(bpf_slow_path_word)
196
197NESTED(bpf_slow_path_half, (6 * SZREG), $r_sp)
198 bpf_slow_path_common(2)
199#ifdef CONFIG_CPU_LITTLE_ENDIAN
200# if MIPS_ISA_REV >= 2
201 jr $r_ra
202 wsbh $r_A, $r_s0
203# else
204 sll t0, $r_s0, 8
205 andi t1, $r_s0, 0xff00
206 andi t0, t0, 0xff00
207 srl t1, t1, 8
208 jr $r_ra
209 or $r_A, t0, t1
210# endif
211#else
212 jr $r_ra
213 move $r_A, $r_s0
214#endif
215
216 END(bpf_slow_path_half)
217
218NESTED(bpf_slow_path_byte, (6 * SZREG), $r_sp)
219 bpf_slow_path_common(1)
220 jr $r_ra
221 move $r_A, $r_s0
222
223 END(bpf_slow_path_byte)
224
225/*
226 * Negative entry points
227 */
228 .macro bpf_is_end_of_data
229 li t0, SKF_LL_OFF
230 /* Reading link layer data? */
231 slt t1, offset, t0
232 bgtz t1, fault
233 /* Be careful what follows in DS. */
234 .endm
235/*
236 * call skb_copy_bits:
237 * (prototype in linux/filter.h)
238 *
239 * void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb,
240 * int k, unsigned int size)
241 *
242 * see above (bpf_slow_path_common) for ABI restrictions
243 */
244#define bpf_negative_common(SIZE) \
245 PTR_ADDIU $r_sp, $r_sp, -(6 * SZREG); \
246 PTR_LA t0, bpf_internal_load_pointer_neg_helper; \
247 PTR_S $r_ra, (5 * SZREG)($r_sp); \
248 jalr t0; \
249 li a2, SIZE; \
250 PTR_L $r_ra, (5 * SZREG)($r_sp); \
251 /* Check return pointer */ \
252 beqz v0, fault; \
253 PTR_ADDIU $r_sp, $r_sp, 6 * SZREG; \
254 /* Preserve our pointer */ \
255 move $r_s0, v0; \
256 /* Set return value */ \
257 move $r_ret, zero; \
258
259bpf_slow_path_word_neg:
260 bpf_is_end_of_data
261NESTED(sk_load_word_negative, (6 * SZREG), $r_sp)
262 bpf_negative_common(4)
263 jr $r_ra
264 lw $r_A, 0($r_s0)
265 END(sk_load_word_negative)
266
267bpf_slow_path_half_neg:
268 bpf_is_end_of_data
269NESTED(sk_load_half_negative, (6 * SZREG), $r_sp)
270 bpf_negative_common(2)
271 jr $r_ra
272 lhu $r_A, 0($r_s0)
273 END(sk_load_half_negative)
274
275bpf_slow_path_byte_neg:
276 bpf_is_end_of_data
277NESTED(sk_load_byte_negative, (6 * SZREG), $r_sp)
278 bpf_negative_common(1)
279 jr $r_ra
280 lbu $r_A, 0($r_s0)
281 END(sk_load_byte_negative)
282
283fault:
284 jr $r_ra
285 addiu $r_ret, zero, 1
diff --git a/arch/mips/net/ebpf_jit.c b/arch/mips/net/ebpf_jit.c
index 98bf0c222b5f..dfd5a4b1b779 100644
--- a/arch/mips/net/ebpf_jit.c
+++ b/arch/mips/net/ebpf_jit.c
@@ -22,6 +22,7 @@
22#include <asm/byteorder.h> 22#include <asm/byteorder.h>
23#include <asm/cacheflush.h> 23#include <asm/cacheflush.h>
24#include <asm/cpu-features.h> 24#include <asm/cpu-features.h>
25#include <asm/isa-rev.h>
25#include <asm/uasm.h> 26#include <asm/uasm.h>
26 27
27/* Registers used by JIT */ 28/* Registers used by JIT */
@@ -125,15 +126,21 @@ static enum reg_val_type get_reg_val_type(const struct jit_ctx *ctx,
125} 126}
126 127
127/* Simply emit the instruction if the JIT memory space has been allocated */ 128/* Simply emit the instruction if the JIT memory space has been allocated */
128#define emit_instr(ctx, func, ...) \ 129#define emit_instr_long(ctx, func64, func32, ...) \
129do { \ 130do { \
130 if ((ctx)->target != NULL) { \ 131 if ((ctx)->target != NULL) { \
131 u32 *p = &(ctx)->target[ctx->idx]; \ 132 u32 *p = &(ctx)->target[ctx->idx]; \
132 uasm_i_##func(&p, ##__VA_ARGS__); \ 133 if (IS_ENABLED(CONFIG_64BIT)) \
133 } \ 134 uasm_i_##func64(&p, ##__VA_ARGS__); \
134 (ctx)->idx++; \ 135 else \
136 uasm_i_##func32(&p, ##__VA_ARGS__); \
137 } \
138 (ctx)->idx++; \
135} while (0) 139} while (0)
136 140
141#define emit_instr(ctx, func, ...) \
142 emit_instr_long(ctx, func, func, ##__VA_ARGS__)
143
137static unsigned int j_target(struct jit_ctx *ctx, int target_idx) 144static unsigned int j_target(struct jit_ctx *ctx, int target_idx)
138{ 145{
139 unsigned long target_va, base_va; 146 unsigned long target_va, base_va;
@@ -274,17 +281,17 @@ static int gen_int_prologue(struct jit_ctx *ctx)
274 * If RA we are doing a function call and may need 281 * If RA we are doing a function call and may need
275 * extra 8-byte tmp area. 282 * extra 8-byte tmp area.
276 */ 283 */
277 stack_adjust += 16; 284 stack_adjust += 2 * sizeof(long);
278 if (ctx->flags & EBPF_SAVE_S0) 285 if (ctx->flags & EBPF_SAVE_S0)
279 stack_adjust += 8; 286 stack_adjust += sizeof(long);
280 if (ctx->flags & EBPF_SAVE_S1) 287 if (ctx->flags & EBPF_SAVE_S1)
281 stack_adjust += 8; 288 stack_adjust += sizeof(long);
282 if (ctx->flags & EBPF_SAVE_S2) 289 if (ctx->flags & EBPF_SAVE_S2)
283 stack_adjust += 8; 290 stack_adjust += sizeof(long);
284 if (ctx->flags & EBPF_SAVE_S3) 291 if (ctx->flags & EBPF_SAVE_S3)
285 stack_adjust += 8; 292 stack_adjust += sizeof(long);
286 if (ctx->flags & EBPF_SAVE_S4) 293 if (ctx->flags & EBPF_SAVE_S4)
287 stack_adjust += 8; 294 stack_adjust += sizeof(long);
288 295
289 BUILD_BUG_ON(MAX_BPF_STACK & 7); 296 BUILD_BUG_ON(MAX_BPF_STACK & 7);
290 locals_size = (ctx->flags & EBPF_SEEN_FP) ? MAX_BPF_STACK : 0; 297 locals_size = (ctx->flags & EBPF_SEEN_FP) ? MAX_BPF_STACK : 0;
@@ -298,41 +305,49 @@ static int gen_int_prologue(struct jit_ctx *ctx)
298 * On tail call we skip this instruction, and the TCC is 305 * On tail call we skip this instruction, and the TCC is
299 * passed in $v1 from the caller. 306 * passed in $v1 from the caller.
300 */ 307 */
301 emit_instr(ctx, daddiu, MIPS_R_V1, MIPS_R_ZERO, MAX_TAIL_CALL_CNT); 308 emit_instr(ctx, addiu, MIPS_R_V1, MIPS_R_ZERO, MAX_TAIL_CALL_CNT);
302 if (stack_adjust) 309 if (stack_adjust)
303 emit_instr(ctx, daddiu, MIPS_R_SP, MIPS_R_SP, -stack_adjust); 310 emit_instr_long(ctx, daddiu, addiu,
311 MIPS_R_SP, MIPS_R_SP, -stack_adjust);
304 else 312 else
305 return 0; 313 return 0;
306 314
307 store_offset = stack_adjust - 8; 315 store_offset = stack_adjust - sizeof(long);
308 316
309 if (ctx->flags & EBPF_SAVE_RA) { 317 if (ctx->flags & EBPF_SAVE_RA) {
310 emit_instr(ctx, sd, MIPS_R_RA, store_offset, MIPS_R_SP); 318 emit_instr_long(ctx, sd, sw,
311 store_offset -= 8; 319 MIPS_R_RA, store_offset, MIPS_R_SP);
320 store_offset -= sizeof(long);
312 } 321 }
313 if (ctx->flags & EBPF_SAVE_S0) { 322 if (ctx->flags & EBPF_SAVE_S0) {
314 emit_instr(ctx, sd, MIPS_R_S0, store_offset, MIPS_R_SP); 323 emit_instr_long(ctx, sd, sw,
315 store_offset -= 8; 324 MIPS_R_S0, store_offset, MIPS_R_SP);
325 store_offset -= sizeof(long);
316 } 326 }
317 if (ctx->flags & EBPF_SAVE_S1) { 327 if (ctx->flags & EBPF_SAVE_S1) {
318 emit_instr(ctx, sd, MIPS_R_S1, store_offset, MIPS_R_SP); 328 emit_instr_long(ctx, sd, sw,
319 store_offset -= 8; 329 MIPS_R_S1, store_offset, MIPS_R_SP);
330 store_offset -= sizeof(long);
320 } 331 }
321 if (ctx->flags & EBPF_SAVE_S2) { 332 if (ctx->flags & EBPF_SAVE_S2) {
322 emit_instr(ctx, sd, MIPS_R_S2, store_offset, MIPS_R_SP); 333 emit_instr_long(ctx, sd, sw,
323 store_offset -= 8; 334 MIPS_R_S2, store_offset, MIPS_R_SP);
335 store_offset -= sizeof(long);
324 } 336 }
325 if (ctx->flags & EBPF_SAVE_S3) { 337 if (ctx->flags & EBPF_SAVE_S3) {
326 emit_instr(ctx, sd, MIPS_R_S3, store_offset, MIPS_R_SP); 338 emit_instr_long(ctx, sd, sw,
327 store_offset -= 8; 339 MIPS_R_S3, store_offset, MIPS_R_SP);
340 store_offset -= sizeof(long);
328 } 341 }
329 if (ctx->flags & EBPF_SAVE_S4) { 342 if (ctx->flags & EBPF_SAVE_S4) {
330 emit_instr(ctx, sd, MIPS_R_S4, store_offset, MIPS_R_SP); 343 emit_instr_long(ctx, sd, sw,
331 store_offset -= 8; 344 MIPS_R_S4, store_offset, MIPS_R_SP);
345 store_offset -= sizeof(long);
332 } 346 }
333 347
334 if ((ctx->flags & EBPF_SEEN_TC) && !(ctx->flags & EBPF_TCC_IN_V1)) 348 if ((ctx->flags & EBPF_SEEN_TC) && !(ctx->flags & EBPF_TCC_IN_V1))
335 emit_instr(ctx, daddu, MIPS_R_S4, MIPS_R_V1, MIPS_R_ZERO); 349 emit_instr_long(ctx, daddu, addu,
350 MIPS_R_S4, MIPS_R_V1, MIPS_R_ZERO);
336 351
337 return 0; 352 return 0;
338} 353}
@@ -341,7 +356,7 @@ static int build_int_epilogue(struct jit_ctx *ctx, int dest_reg)
341{ 356{
342 const struct bpf_prog *prog = ctx->skf; 357 const struct bpf_prog *prog = ctx->skf;
343 int stack_adjust = ctx->stack_size; 358 int stack_adjust = ctx->stack_size;
344 int store_offset = stack_adjust - 8; 359 int store_offset = stack_adjust - sizeof(long);
345 enum reg_val_type td; 360 enum reg_val_type td;
346 int r0 = MIPS_R_V0; 361 int r0 = MIPS_R_V0;
347 362
@@ -353,33 +368,40 @@ static int build_int_epilogue(struct jit_ctx *ctx, int dest_reg)
353 } 368 }
354 369
355 if (ctx->flags & EBPF_SAVE_RA) { 370 if (ctx->flags & EBPF_SAVE_RA) {
356 emit_instr(ctx, ld, MIPS_R_RA, store_offset, MIPS_R_SP); 371 emit_instr_long(ctx, ld, lw,
357 store_offset -= 8; 372 MIPS_R_RA, store_offset, MIPS_R_SP);
373 store_offset -= sizeof(long);
358 } 374 }
359 if (ctx->flags & EBPF_SAVE_S0) { 375 if (ctx->flags & EBPF_SAVE_S0) {
360 emit_instr(ctx, ld, MIPS_R_S0, store_offset, MIPS_R_SP); 376 emit_instr_long(ctx, ld, lw,
361 store_offset -= 8; 377 MIPS_R_S0, store_offset, MIPS_R_SP);
378 store_offset -= sizeof(long);
362 } 379 }
363 if (ctx->flags & EBPF_SAVE_S1) { 380 if (ctx->flags & EBPF_SAVE_S1) {
364 emit_instr(ctx, ld, MIPS_R_S1, store_offset, MIPS_R_SP); 381 emit_instr_long(ctx, ld, lw,
365 store_offset -= 8; 382 MIPS_R_S1, store_offset, MIPS_R_SP);
383 store_offset -= sizeof(long);
366 } 384 }
367 if (ctx->flags & EBPF_SAVE_S2) { 385 if (ctx->flags & EBPF_SAVE_S2) {
368 emit_instr(ctx, ld, MIPS_R_S2, store_offset, MIPS_R_SP); 386 emit_instr_long(ctx, ld, lw,
369 store_offset -= 8; 387 MIPS_R_S2, store_offset, MIPS_R_SP);
388 store_offset -= sizeof(long);
370 } 389 }
371 if (ctx->flags & EBPF_SAVE_S3) { 390 if (ctx->flags & EBPF_SAVE_S3) {
372 emit_instr(ctx, ld, MIPS_R_S3, store_offset, MIPS_R_SP); 391 emit_instr_long(ctx, ld, lw,
373 store_offset -= 8; 392 MIPS_R_S3, store_offset, MIPS_R_SP);
393 store_offset -= sizeof(long);
374 } 394 }
375 if (ctx->flags & EBPF_SAVE_S4) { 395 if (ctx->flags & EBPF_SAVE_S4) {
376 emit_instr(ctx, ld, MIPS_R_S4, store_offset, MIPS_R_SP); 396 emit_instr_long(ctx, ld, lw,
377 store_offset -= 8; 397 MIPS_R_S4, store_offset, MIPS_R_SP);
398 store_offset -= sizeof(long);
378 } 399 }
379 emit_instr(ctx, jr, dest_reg); 400 emit_instr(ctx, jr, dest_reg);
380 401
381 if (stack_adjust) 402 if (stack_adjust)
382 emit_instr(ctx, daddiu, MIPS_R_SP, MIPS_R_SP, stack_adjust); 403 emit_instr_long(ctx, daddiu, addiu,
404 MIPS_R_SP, MIPS_R_SP, stack_adjust);
383 else 405 else
384 emit_instr(ctx, nop); 406 emit_instr(ctx, nop);
385 407
@@ -646,6 +668,10 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
646 s64 t64s; 668 s64 t64s;
647 int bpf_op = BPF_OP(insn->code); 669 int bpf_op = BPF_OP(insn->code);
648 670
671 if (IS_ENABLED(CONFIG_32BIT) && ((BPF_CLASS(insn->code) == BPF_ALU64)
672 || (bpf_op == BPF_DW)))
673 return -EINVAL;
674
649 switch (insn->code) { 675 switch (insn->code) {
650 case BPF_ALU64 | BPF_ADD | BPF_K: /* ALU64_IMM */ 676 case BPF_ALU64 | BPF_ADD | BPF_K: /* ALU64_IMM */
651 case BPF_ALU64 | BPF_SUB | BPF_K: /* ALU64_IMM */ 677 case BPF_ALU64 | BPF_SUB | BPF_K: /* ALU64_IMM */
@@ -678,8 +704,12 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
678 if (insn->imm == 1) /* Mult by 1 is a nop */ 704 if (insn->imm == 1) /* Mult by 1 is a nop */
679 break; 705 break;
680 gen_imm_to_reg(insn, MIPS_R_AT, ctx); 706 gen_imm_to_reg(insn, MIPS_R_AT, ctx);
681 emit_instr(ctx, dmultu, MIPS_R_AT, dst); 707 if (MIPS_ISA_REV >= 6) {
682 emit_instr(ctx, mflo, dst); 708 emit_instr(ctx, dmulu, dst, dst, MIPS_R_AT);
709 } else {
710 emit_instr(ctx, dmultu, MIPS_R_AT, dst);
711 emit_instr(ctx, mflo, dst);
712 }
683 break; 713 break;
684 case BPF_ALU64 | BPF_NEG | BPF_K: /* ALU64_IMM */ 714 case BPF_ALU64 | BPF_NEG | BPF_K: /* ALU64_IMM */
685 dst = ebpf_to_mips_reg(ctx, insn, dst_reg); 715 dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
@@ -701,8 +731,12 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
701 if (insn->imm == 1) /* Mult by 1 is a nop */ 731 if (insn->imm == 1) /* Mult by 1 is a nop */
702 break; 732 break;
703 gen_imm_to_reg(insn, MIPS_R_AT, ctx); 733 gen_imm_to_reg(insn, MIPS_R_AT, ctx);
704 emit_instr(ctx, multu, dst, MIPS_R_AT); 734 if (MIPS_ISA_REV >= 6) {
705 emit_instr(ctx, mflo, dst); 735 emit_instr(ctx, mulu, dst, dst, MIPS_R_AT);
736 } else {
737 emit_instr(ctx, multu, dst, MIPS_R_AT);
738 emit_instr(ctx, mflo, dst);
739 }
706 break; 740 break;
707 case BPF_ALU | BPF_NEG | BPF_K: /* ALU_IMM */ 741 case BPF_ALU | BPF_NEG | BPF_K: /* ALU_IMM */
708 dst = ebpf_to_mips_reg(ctx, insn, dst_reg); 742 dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
@@ -733,6 +767,13 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
733 break; 767 break;
734 } 768 }
735 gen_imm_to_reg(insn, MIPS_R_AT, ctx); 769 gen_imm_to_reg(insn, MIPS_R_AT, ctx);
770 if (MIPS_ISA_REV >= 6) {
771 if (bpf_op == BPF_DIV)
772 emit_instr(ctx, divu_r6, dst, dst, MIPS_R_AT);
773 else
774 emit_instr(ctx, modu, dst, dst, MIPS_R_AT);
775 break;
776 }
736 emit_instr(ctx, divu, dst, MIPS_R_AT); 777 emit_instr(ctx, divu, dst, MIPS_R_AT);
737 if (bpf_op == BPF_DIV) 778 if (bpf_op == BPF_DIV)
738 emit_instr(ctx, mflo, dst); 779 emit_instr(ctx, mflo, dst);
@@ -755,6 +796,13 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
755 break; 796 break;
756 } 797 }
757 gen_imm_to_reg(insn, MIPS_R_AT, ctx); 798 gen_imm_to_reg(insn, MIPS_R_AT, ctx);
799 if (MIPS_ISA_REV >= 6) {
800 if (bpf_op == BPF_DIV)
801 emit_instr(ctx, ddivu_r6, dst, dst, MIPS_R_AT);
802 else
803 emit_instr(ctx, modu, dst, dst, MIPS_R_AT);
804 break;
805 }
758 emit_instr(ctx, ddivu, dst, MIPS_R_AT); 806 emit_instr(ctx, ddivu, dst, MIPS_R_AT);
759 if (bpf_op == BPF_DIV) 807 if (bpf_op == BPF_DIV)
760 emit_instr(ctx, mflo, dst); 808 emit_instr(ctx, mflo, dst);
@@ -820,11 +868,23 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
820 emit_instr(ctx, and, dst, dst, src); 868 emit_instr(ctx, and, dst, dst, src);
821 break; 869 break;
822 case BPF_MUL: 870 case BPF_MUL:
823 emit_instr(ctx, dmultu, dst, src); 871 if (MIPS_ISA_REV >= 6) {
824 emit_instr(ctx, mflo, dst); 872 emit_instr(ctx, dmulu, dst, dst, src);
873 } else {
874 emit_instr(ctx, dmultu, dst, src);
875 emit_instr(ctx, mflo, dst);
876 }
825 break; 877 break;
826 case BPF_DIV: 878 case BPF_DIV:
827 case BPF_MOD: 879 case BPF_MOD:
880 if (MIPS_ISA_REV >= 6) {
881 if (bpf_op == BPF_DIV)
882 emit_instr(ctx, ddivu_r6,
883 dst, dst, src);
884 else
885 emit_instr(ctx, modu, dst, dst, src);
886 break;
887 }
828 emit_instr(ctx, ddivu, dst, src); 888 emit_instr(ctx, ddivu, dst, src);
829 if (bpf_op == BPF_DIV) 889 if (bpf_op == BPF_DIV)
830 emit_instr(ctx, mflo, dst); 890 emit_instr(ctx, mflo, dst);
@@ -904,6 +964,13 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
904 break; 964 break;
905 case BPF_DIV: 965 case BPF_DIV:
906 case BPF_MOD: 966 case BPF_MOD:
967 if (MIPS_ISA_REV >= 6) {
968 if (bpf_op == BPF_DIV)
969 emit_instr(ctx, divu_r6, dst, dst, src);
970 else
971 emit_instr(ctx, modu, dst, dst, src);
972 break;
973 }
907 emit_instr(ctx, divu, dst, src); 974 emit_instr(ctx, divu, dst, src);
908 if (bpf_op == BPF_DIV) 975 if (bpf_op == BPF_DIV)
909 emit_instr(ctx, mflo, dst); 976 emit_instr(ctx, mflo, dst);
@@ -1007,8 +1074,15 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
1007 emit_instr(ctx, dsubu, MIPS_R_T8, dst, src); 1074 emit_instr(ctx, dsubu, MIPS_R_T8, dst, src);
1008 emit_instr(ctx, sltu, MIPS_R_AT, dst, src); 1075 emit_instr(ctx, sltu, MIPS_R_AT, dst, src);
1009 /* SP known to be non-zero, movz becomes boolean not */ 1076 /* SP known to be non-zero, movz becomes boolean not */
1010 emit_instr(ctx, movz, MIPS_R_T9, MIPS_R_SP, MIPS_R_T8); 1077 if (MIPS_ISA_REV >= 6) {
1011 emit_instr(ctx, movn, MIPS_R_T9, MIPS_R_ZERO, MIPS_R_T8); 1078 emit_instr(ctx, seleqz, MIPS_R_T9,
1079 MIPS_R_SP, MIPS_R_T8);
1080 } else {
1081 emit_instr(ctx, movz, MIPS_R_T9,
1082 MIPS_R_SP, MIPS_R_T8);
1083 emit_instr(ctx, movn, MIPS_R_T9,
1084 MIPS_R_ZERO, MIPS_R_T8);
1085 }
1012 emit_instr(ctx, or, MIPS_R_AT, MIPS_R_T9, MIPS_R_AT); 1086 emit_instr(ctx, or, MIPS_R_AT, MIPS_R_T9, MIPS_R_AT);
1013 cmp_eq = bpf_op == BPF_JGT; 1087 cmp_eq = bpf_op == BPF_JGT;
1014 dst = MIPS_R_AT; 1088 dst = MIPS_R_AT;
@@ -1235,7 +1309,7 @@ jeq_common:
1235 1309
1236 case BPF_JMP | BPF_CALL: 1310 case BPF_JMP | BPF_CALL:
1237 ctx->flags |= EBPF_SAVE_RA; 1311 ctx->flags |= EBPF_SAVE_RA;
1238 t64s = (s64)insn->imm + (s64)__bpf_call_base; 1312 t64s = (s64)insn->imm + (long)__bpf_call_base;
1239 emit_const_to_reg(ctx, MIPS_R_T9, (u64)t64s); 1313 emit_const_to_reg(ctx, MIPS_R_T9, (u64)t64s);
1240 emit_instr(ctx, jalr, MIPS_R_RA, MIPS_R_T9); 1314 emit_instr(ctx, jalr, MIPS_R_RA, MIPS_R_T9);
1241 /* delay slot */ 1315 /* delay slot */
@@ -1367,6 +1441,17 @@ jeq_common:
1367 if (src < 0) 1441 if (src < 0)
1368 return src; 1442 return src;
1369 if (BPF_MODE(insn->code) == BPF_XADD) { 1443 if (BPF_MODE(insn->code) == BPF_XADD) {
1444 /*
1445 * If mem_off does not fit within the 9 bit ll/sc
1446 * instruction immediate field, use a temp reg.
1447 */
1448 if (MIPS_ISA_REV >= 6 &&
1449 (mem_off >= BIT(8) || mem_off < -BIT(8))) {
1450 emit_instr(ctx, daddiu, MIPS_R_T6,
1451 dst, mem_off);
1452 mem_off = 0;
1453 dst = MIPS_R_T6;
1454 }
1370 switch (BPF_SIZE(insn->code)) { 1455 switch (BPF_SIZE(insn->code)) {
1371 case BPF_W: 1456 case BPF_W:
1372 if (get_reg_val_type(ctx, this_idx, insn->src_reg) == REG_32BIT) { 1457 if (get_reg_val_type(ctx, this_idx, insn->src_reg) == REG_32BIT) {
@@ -1721,7 +1806,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
1721 unsigned int image_size; 1806 unsigned int image_size;
1722 u8 *image_ptr; 1807 u8 *image_ptr;
1723 1808
1724 if (!prog->jit_requested || !cpu_has_mips64r2) 1809 if (!prog->jit_requested || MIPS_ISA_REV < 2)
1725 return prog; 1810 return prog;
1726 1811
1727 tmp = bpf_jit_blind_constants(prog); 1812 tmp = bpf_jit_blind_constants(prog);
diff --git a/arch/mips/pic32/Kconfig b/arch/mips/pic32/Kconfig
index e284e89183cc..7acbb50c1dcd 100644
--- a/arch/mips/pic32/Kconfig
+++ b/arch/mips/pic32/Kconfig
@@ -39,12 +39,12 @@ choice
39 Select the devicetree. 39 Select the devicetree.
40 40
41config DTB_PIC32_NONE 41config DTB_PIC32_NONE
42 bool "None" 42 bool "None"
43 43
44config DTB_PIC32_MZDA_SK 44config DTB_PIC32_MZDA_SK
45 bool "PIC32MZDA Starter Kit" 45 bool "PIC32MZDA Starter Kit"
46 depends on PIC32MZDA 46 depends on PIC32MZDA
47 select BUILTIN_DTB 47 select BUILTIN_DTB
48 48
49endchoice 49endchoice
50 50
diff --git a/arch/mips/vdso/Makefile b/arch/mips/vdso/Makefile
index 0ede4deb8181..7221df24cb23 100644
--- a/arch/mips/vdso/Makefile
+++ b/arch/mips/vdso/Makefile
@@ -46,9 +46,7 @@ endif
46VDSO_LDFLAGS := \ 46VDSO_LDFLAGS := \
47 -Wl,-Bsymbolic -Wl,--no-undefined -Wl,-soname=linux-vdso.so.1 \ 47 -Wl,-Bsymbolic -Wl,--no-undefined -Wl,-soname=linux-vdso.so.1 \
48 $(addprefix -Wl$(comma),$(filter -E%,$(KBUILD_CFLAGS))) \ 48 $(addprefix -Wl$(comma),$(filter -E%,$(KBUILD_CFLAGS))) \
49 -nostdlib -shared \ 49 -nostdlib -shared -Wl,--hash-style=sysv -Wl,--build-id
50 $(call cc-ldoption, -Wl$(comma)--hash-style=sysv) \
51 $(call cc-ldoption, -Wl$(comma)--build-id)
52 50
53GCOV_PROFILE := n 51GCOV_PROFILE := n
54UBSAN_SANITIZE := n 52UBSAN_SANITIZE := n