aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/include/asm/hash.h7
-rw-r--r--arch/x86/lib/Makefile2
-rw-r--r--arch/x86/lib/hash.c92
-rw-r--r--arch/x86/net/bpf_jit_comp.c34
4 files changed, 19 insertions, 116 deletions
diff --git a/arch/x86/include/asm/hash.h b/arch/x86/include/asm/hash.h
deleted file mode 100644
index e8c58f88b1d4..000000000000
--- a/arch/x86/include/asm/hash.h
+++ /dev/null
@@ -1,7 +0,0 @@
1#ifndef _ASM_X86_HASH_H
2#define _ASM_X86_HASH_H
3
4struct fast_hash_ops;
5extern void setup_arch_fast_hash(struct fast_hash_ops *ops);
6
7#endif /* _ASM_X86_HASH_H */
diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile
index db92793b7e23..1530afb07c85 100644
--- a/arch/x86/lib/Makefile
+++ b/arch/x86/lib/Makefile
@@ -23,7 +23,7 @@ lib-y += memcpy_$(BITS).o
23lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o 23lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o
24lib-$(CONFIG_INSTRUCTION_DECODER) += insn.o inat.o 24lib-$(CONFIG_INSTRUCTION_DECODER) += insn.o inat.o
25 25
26obj-y += msr.o msr-reg.o msr-reg-export.o hash.o 26obj-y += msr.o msr-reg.o msr-reg-export.o
27 27
28ifeq ($(CONFIG_X86_32),y) 28ifeq ($(CONFIG_X86_32),y)
29 obj-y += atomic64_32.o 29 obj-y += atomic64_32.o
diff --git a/arch/x86/lib/hash.c b/arch/x86/lib/hash.c
deleted file mode 100644
index ff4fa51a5b1f..000000000000
--- a/arch/x86/lib/hash.c
+++ /dev/null
@@ -1,92 +0,0 @@
1/*
2 * Some portions derived from code covered by the following notice:
3 *
4 * Copyright (c) 2010-2013 Intel Corporation. All rights reserved.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34#include <linux/hash.h>
35#include <linux/init.h>
36
37#include <asm/processor.h>
38#include <asm/cpufeature.h>
39#include <asm/hash.h>
40
41static inline u32 crc32_u32(u32 crc, u32 val)
42{
43#ifdef CONFIG_AS_CRC32
44 asm ("crc32l %1,%0\n" : "+r" (crc) : "rm" (val));
45#else
46 asm (".byte 0xf2, 0x0f, 0x38, 0xf1, 0xc1" : "+a" (crc) : "c" (val));
47#endif
48 return crc;
49}
50
51static u32 intel_crc4_2_hash(const void *data, u32 len, u32 seed)
52{
53 const u32 *p32 = (const u32 *) data;
54 u32 i, tmp = 0;
55
56 for (i = 0; i < len / 4; i++)
57 seed = crc32_u32(seed, *p32++);
58
59 switch (len & 3) {
60 case 3:
61 tmp |= *((const u8 *) p32 + 2) << 16;
62 /* fallthrough */
63 case 2:
64 tmp |= *((const u8 *) p32 + 1) << 8;
65 /* fallthrough */
66 case 1:
67 tmp |= *((const u8 *) p32);
68 seed = crc32_u32(seed, tmp);
69 break;
70 }
71
72 return seed;
73}
74
75static u32 intel_crc4_2_hash2(const u32 *data, u32 len, u32 seed)
76{
77 const u32 *p32 = (const u32 *) data;
78 u32 i;
79
80 for (i = 0; i < len; i++)
81 seed = crc32_u32(seed, *p32++);
82
83 return seed;
84}
85
86void __init setup_arch_fast_hash(struct fast_hash_ops *ops)
87{
88 if (cpu_has_xmm4_2) {
89 ops->hash = intel_crc4_2_hash;
90 ops->hash2 = intel_crc4_2_hash2;
91 }
92}
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index 3f627345d51c..987514396c1e 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -24,7 +24,7 @@ extern u8 sk_load_byte_positive_offset[];
24extern u8 sk_load_word_negative_offset[], sk_load_half_negative_offset[]; 24extern u8 sk_load_word_negative_offset[], sk_load_half_negative_offset[];
25extern u8 sk_load_byte_negative_offset[]; 25extern u8 sk_load_byte_negative_offset[];
26 26
27static inline u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len) 27static u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len)
28{ 28{
29 if (len == 1) 29 if (len == 1)
30 *ptr = bytes; 30 *ptr = bytes;
@@ -52,12 +52,12 @@ static inline u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len)
52#define EMIT4_off32(b1, b2, b3, b4, off) \ 52#define EMIT4_off32(b1, b2, b3, b4, off) \
53 do {EMIT4(b1, b2, b3, b4); EMIT(off, 4); } while (0) 53 do {EMIT4(b1, b2, b3, b4); EMIT(off, 4); } while (0)
54 54
55static inline bool is_imm8(int value) 55static bool is_imm8(int value)
56{ 56{
57 return value <= 127 && value >= -128; 57 return value <= 127 && value >= -128;
58} 58}
59 59
60static inline bool is_simm32(s64 value) 60static bool is_simm32(s64 value)
61{ 61{
62 return value == (s64) (s32) value; 62 return value == (s64) (s32) value;
63} 63}
@@ -94,7 +94,7 @@ static int bpf_size_to_x86_bytes(int bpf_size)
94#define X86_JGE 0x7D 94#define X86_JGE 0x7D
95#define X86_JG 0x7F 95#define X86_JG 0x7F
96 96
97static inline void bpf_flush_icache(void *start, void *end) 97static void bpf_flush_icache(void *start, void *end)
98{ 98{
99 mm_segment_t old_fs = get_fs(); 99 mm_segment_t old_fs = get_fs();
100 100
@@ -133,24 +133,24 @@ static const int reg2hex[] = {
133 * which need extra byte of encoding. 133 * which need extra byte of encoding.
134 * rax,rcx,...,rbp have simpler encoding 134 * rax,rcx,...,rbp have simpler encoding
135 */ 135 */
136static inline bool is_ereg(u32 reg) 136static bool is_ereg(u32 reg)
137{ 137{
138 if (reg == BPF_REG_5 || reg == AUX_REG || 138 return (1 << reg) & (BIT(BPF_REG_5) |
139 (reg >= BPF_REG_7 && reg <= BPF_REG_9)) 139 BIT(AUX_REG) |
140 return true; 140 BIT(BPF_REG_7) |
141 else 141 BIT(BPF_REG_8) |
142 return false; 142 BIT(BPF_REG_9));
143} 143}
144 144
145/* add modifiers if 'reg' maps to x64 registers r8..r15 */ 145/* add modifiers if 'reg' maps to x64 registers r8..r15 */
146static inline u8 add_1mod(u8 byte, u32 reg) 146static u8 add_1mod(u8 byte, u32 reg)
147{ 147{
148 if (is_ereg(reg)) 148 if (is_ereg(reg))
149 byte |= 1; 149 byte |= 1;
150 return byte; 150 return byte;
151} 151}
152 152
153static inline u8 add_2mod(u8 byte, u32 r1, u32 r2) 153static u8 add_2mod(u8 byte, u32 r1, u32 r2)
154{ 154{
155 if (is_ereg(r1)) 155 if (is_ereg(r1))
156 byte |= 1; 156 byte |= 1;
@@ -160,13 +160,13 @@ static inline u8 add_2mod(u8 byte, u32 r1, u32 r2)
160} 160}
161 161
162/* encode 'dst_reg' register into x64 opcode 'byte' */ 162/* encode 'dst_reg' register into x64 opcode 'byte' */
163static inline u8 add_1reg(u8 byte, u32 dst_reg) 163static u8 add_1reg(u8 byte, u32 dst_reg)
164{ 164{
165 return byte + reg2hex[dst_reg]; 165 return byte + reg2hex[dst_reg];
166} 166}
167 167
168/* encode 'dst_reg' and 'src_reg' registers into x64 opcode 'byte' */ 168/* encode 'dst_reg' and 'src_reg' registers into x64 opcode 'byte' */
169static inline u8 add_2reg(u8 byte, u32 dst_reg, u32 src_reg) 169static u8 add_2reg(u8 byte, u32 dst_reg, u32 src_reg)
170{ 170{
171 return byte + reg2hex[dst_reg] + (reg2hex[src_reg] << 3); 171 return byte + reg2hex[dst_reg] + (reg2hex[src_reg] << 3);
172} 172}
@@ -178,7 +178,7 @@ static void jit_fill_hole(void *area, unsigned int size)
178} 178}
179 179
180struct jit_context { 180struct jit_context {
181 unsigned int cleanup_addr; /* epilogue code offset */ 181 int cleanup_addr; /* epilogue code offset */
182 bool seen_ld_abs; 182 bool seen_ld_abs;
183}; 183};
184 184
@@ -192,6 +192,7 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
192 struct bpf_insn *insn = bpf_prog->insnsi; 192 struct bpf_insn *insn = bpf_prog->insnsi;
193 int insn_cnt = bpf_prog->len; 193 int insn_cnt = bpf_prog->len;
194 bool seen_ld_abs = ctx->seen_ld_abs | (oldproglen == 0); 194 bool seen_ld_abs = ctx->seen_ld_abs | (oldproglen == 0);
195 bool seen_exit = false;
195 u8 temp[BPF_MAX_INSN_SIZE + BPF_INSN_SAFETY]; 196 u8 temp[BPF_MAX_INSN_SIZE + BPF_INSN_SAFETY];
196 int i; 197 int i;
197 int proglen = 0; 198 int proglen = 0;
@@ -854,10 +855,11 @@ common_load:
854 goto common_load; 855 goto common_load;
855 856
856 case BPF_JMP | BPF_EXIT: 857 case BPF_JMP | BPF_EXIT:
857 if (i != insn_cnt - 1) { 858 if (seen_exit) {
858 jmp_offset = ctx->cleanup_addr - addrs[i]; 859 jmp_offset = ctx->cleanup_addr - addrs[i];
859 goto emit_jmp; 860 goto emit_jmp;
860 } 861 }
862 seen_exit = true;
861 /* update cleanup_addr */ 863 /* update cleanup_addr */
862 ctx->cleanup_addr = proglen; 864 ctx->cleanup_addr = proglen;
863 /* mov rbx, qword ptr [rbp-X] */ 865 /* mov rbx, qword ptr [rbp-X] */