diff options
Diffstat (limited to 'kernel/bpf/core.c')
-rw-r--r-- | kernel/bpf/core.c | 108 |
1 files changed, 14 insertions, 94 deletions
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index ba03ec39efb3..d0d7d9462368 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c | |||
@@ -31,6 +31,7 @@ | |||
31 | #include <linux/rbtree_latch.h> | 31 | #include <linux/rbtree_latch.h> |
32 | #include <linux/kallsyms.h> | 32 | #include <linux/kallsyms.h> |
33 | #include <linux/rcupdate.h> | 33 | #include <linux/rcupdate.h> |
34 | #include <linux/perf_event.h> | ||
34 | 35 | ||
35 | #include <asm/unaligned.h> | 36 | #include <asm/unaligned.h> |
36 | 37 | ||
@@ -633,23 +634,6 @@ static int bpf_jit_blind_insn(const struct bpf_insn *from, | |||
633 | *to++ = BPF_JMP_REG(from->code, from->dst_reg, BPF_REG_AX, off); | 634 | *to++ = BPF_JMP_REG(from->code, from->dst_reg, BPF_REG_AX, off); |
634 | break; | 635 | break; |
635 | 636 | ||
636 | case BPF_LD | BPF_ABS | BPF_W: | ||
637 | case BPF_LD | BPF_ABS | BPF_H: | ||
638 | case BPF_LD | BPF_ABS | BPF_B: | ||
639 | *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm); | ||
640 | *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd); | ||
641 | *to++ = BPF_LD_IND(from->code, BPF_REG_AX, 0); | ||
642 | break; | ||
643 | |||
644 | case BPF_LD | BPF_IND | BPF_W: | ||
645 | case BPF_LD | BPF_IND | BPF_H: | ||
646 | case BPF_LD | BPF_IND | BPF_B: | ||
647 | *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm); | ||
648 | *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd); | ||
649 | *to++ = BPF_ALU32_REG(BPF_ADD, BPF_REG_AX, from->src_reg); | ||
650 | *to++ = BPF_LD_IND(from->code, BPF_REG_AX, 0); | ||
651 | break; | ||
652 | |||
653 | case BPF_LD | BPF_IMM | BPF_DW: | 637 | case BPF_LD | BPF_IMM | BPF_DW: |
654 | *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[1].imm); | 638 | *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[1].imm); |
655 | *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd); | 639 | *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd); |
@@ -890,14 +874,7 @@ EXPORT_SYMBOL_GPL(__bpf_call_base); | |||
890 | INSN_3(LDX, MEM, W), \ | 874 | INSN_3(LDX, MEM, W), \ |
891 | INSN_3(LDX, MEM, DW), \ | 875 | INSN_3(LDX, MEM, DW), \ |
892 | /* Immediate based. */ \ | 876 | /* Immediate based. */ \ |
893 | INSN_3(LD, IMM, DW), \ | 877 | INSN_3(LD, IMM, DW) |
894 | /* Misc (old cBPF carry-over). */ \ | ||
895 | INSN_3(LD, ABS, B), \ | ||
896 | INSN_3(LD, ABS, H), \ | ||
897 | INSN_3(LD, ABS, W), \ | ||
898 | INSN_3(LD, IND, B), \ | ||
899 | INSN_3(LD, IND, H), \ | ||
900 | INSN_3(LD, IND, W) | ||
901 | 878 | ||
902 | bool bpf_opcode_in_insntable(u8 code) | 879 | bool bpf_opcode_in_insntable(u8 code) |
903 | { | 880 | { |
@@ -907,6 +884,13 @@ bool bpf_opcode_in_insntable(u8 code) | |||
907 | [0 ... 255] = false, | 884 | [0 ... 255] = false, |
908 | /* Now overwrite non-defaults ... */ | 885 | /* Now overwrite non-defaults ... */ |
909 | BPF_INSN_MAP(BPF_INSN_2_TBL, BPF_INSN_3_TBL), | 886 | BPF_INSN_MAP(BPF_INSN_2_TBL, BPF_INSN_3_TBL), |
887 | /* UAPI exposed, but rewritten opcodes. cBPF carry-over. */ | ||
888 | [BPF_LD | BPF_ABS | BPF_B] = true, | ||
889 | [BPF_LD | BPF_ABS | BPF_H] = true, | ||
890 | [BPF_LD | BPF_ABS | BPF_W] = true, | ||
891 | [BPF_LD | BPF_IND | BPF_B] = true, | ||
892 | [BPF_LD | BPF_IND | BPF_H] = true, | ||
893 | [BPF_LD | BPF_IND | BPF_W] = true, | ||
910 | }; | 894 | }; |
911 | #undef BPF_INSN_3_TBL | 895 | #undef BPF_INSN_3_TBL |
912 | #undef BPF_INSN_2_TBL | 896 | #undef BPF_INSN_2_TBL |
@@ -937,8 +921,6 @@ static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn, u64 *stack) | |||
937 | #undef BPF_INSN_3_LBL | 921 | #undef BPF_INSN_3_LBL |
938 | #undef BPF_INSN_2_LBL | 922 | #undef BPF_INSN_2_LBL |
939 | u32 tail_call_cnt = 0; | 923 | u32 tail_call_cnt = 0; |
940 | void *ptr; | ||
941 | int off; | ||
942 | 924 | ||
943 | #define CONT ({ insn++; goto select_insn; }) | 925 | #define CONT ({ insn++; goto select_insn; }) |
944 | #define CONT_JMP ({ insn++; goto select_insn; }) | 926 | #define CONT_JMP ({ insn++; goto select_insn; }) |
@@ -1265,67 +1247,6 @@ out: | |||
1265 | atomic64_add((u64) SRC, (atomic64_t *)(unsigned long) | 1247 | atomic64_add((u64) SRC, (atomic64_t *)(unsigned long) |
1266 | (DST + insn->off)); | 1248 | (DST + insn->off)); |
1267 | CONT; | 1249 | CONT; |
1268 | LD_ABS_W: /* BPF_R0 = ntohl(*(u32 *) (skb->data + imm32)) */ | ||
1269 | off = IMM; | ||
1270 | load_word: | ||
1271 | /* BPF_LD + BPD_ABS and BPF_LD + BPF_IND insns are only | ||
1272 | * appearing in the programs where ctx == skb | ||
1273 | * (see may_access_skb() in the verifier). All programs | ||
1274 | * keep 'ctx' in regs[BPF_REG_CTX] == BPF_R6, | ||
1275 | * bpf_convert_filter() saves it in BPF_R6, internal BPF | ||
1276 | * verifier will check that BPF_R6 == ctx. | ||
1277 | * | ||
1278 | * BPF_ABS and BPF_IND are wrappers of function calls, | ||
1279 | * so they scratch BPF_R1-BPF_R5 registers, preserve | ||
1280 | * BPF_R6-BPF_R9, and store return value into BPF_R0. | ||
1281 | * | ||
1282 | * Implicit input: | ||
1283 | * ctx == skb == BPF_R6 == CTX | ||
1284 | * | ||
1285 | * Explicit input: | ||
1286 | * SRC == any register | ||
1287 | * IMM == 32-bit immediate | ||
1288 | * | ||
1289 | * Output: | ||
1290 | * BPF_R0 - 8/16/32-bit skb data converted to cpu endianness | ||
1291 | */ | ||
1292 | |||
1293 | ptr = bpf_load_pointer((struct sk_buff *) (unsigned long) CTX, off, 4, &tmp); | ||
1294 | if (likely(ptr != NULL)) { | ||
1295 | BPF_R0 = get_unaligned_be32(ptr); | ||
1296 | CONT; | ||
1297 | } | ||
1298 | |||
1299 | return 0; | ||
1300 | LD_ABS_H: /* BPF_R0 = ntohs(*(u16 *) (skb->data + imm32)) */ | ||
1301 | off = IMM; | ||
1302 | load_half: | ||
1303 | ptr = bpf_load_pointer((struct sk_buff *) (unsigned long) CTX, off, 2, &tmp); | ||
1304 | if (likely(ptr != NULL)) { | ||
1305 | BPF_R0 = get_unaligned_be16(ptr); | ||
1306 | CONT; | ||
1307 | } | ||
1308 | |||
1309 | return 0; | ||
1310 | LD_ABS_B: /* BPF_R0 = *(u8 *) (skb->data + imm32) */ | ||
1311 | off = IMM; | ||
1312 | load_byte: | ||
1313 | ptr = bpf_load_pointer((struct sk_buff *) (unsigned long) CTX, off, 1, &tmp); | ||
1314 | if (likely(ptr != NULL)) { | ||
1315 | BPF_R0 = *(u8 *)ptr; | ||
1316 | CONT; | ||
1317 | } | ||
1318 | |||
1319 | return 0; | ||
1320 | LD_IND_W: /* BPF_R0 = ntohl(*(u32 *) (skb->data + src_reg + imm32)) */ | ||
1321 | off = IMM + SRC; | ||
1322 | goto load_word; | ||
1323 | LD_IND_H: /* BPF_R0 = ntohs(*(u16 *) (skb->data + src_reg + imm32)) */ | ||
1324 | off = IMM + SRC; | ||
1325 | goto load_half; | ||
1326 | LD_IND_B: /* BPF_R0 = *(u8 *) (skb->data + src_reg + imm32) */ | ||
1327 | off = IMM + SRC; | ||
1328 | goto load_byte; | ||
1329 | 1250 | ||
1330 | default_label: | 1251 | default_label: |
1331 | /* If we ever reach this, we have a bug somewhere. Die hard here | 1252 | /* If we ever reach this, we have a bug somewhere. Die hard here |
@@ -1722,6 +1643,10 @@ static void bpf_prog_free_deferred(struct work_struct *work) | |||
1722 | aux = container_of(work, struct bpf_prog_aux, work); | 1643 | aux = container_of(work, struct bpf_prog_aux, work); |
1723 | if (bpf_prog_is_dev_bound(aux)) | 1644 | if (bpf_prog_is_dev_bound(aux)) |
1724 | bpf_prog_offload_destroy(aux->prog); | 1645 | bpf_prog_offload_destroy(aux->prog); |
1646 | #ifdef CONFIG_PERF_EVENTS | ||
1647 | if (aux->prog->has_callchain_buf) | ||
1648 | put_callchain_buffers(); | ||
1649 | #endif | ||
1725 | for (i = 0; i < aux->func_cnt; i++) | 1650 | for (i = 0; i < aux->func_cnt; i++) |
1726 | bpf_jit_free(aux->func[i]); | 1651 | bpf_jit_free(aux->func[i]); |
1727 | if (aux->func_cnt) { | 1652 | if (aux->func_cnt) { |
@@ -1794,6 +1719,7 @@ bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size, | |||
1794 | { | 1719 | { |
1795 | return -ENOTSUPP; | 1720 | return -ENOTSUPP; |
1796 | } | 1721 | } |
1722 | EXPORT_SYMBOL_GPL(bpf_event_output); | ||
1797 | 1723 | ||
1798 | /* Always built-in helper functions. */ | 1724 | /* Always built-in helper functions. */ |
1799 | const struct bpf_func_proto bpf_tail_call_proto = { | 1725 | const struct bpf_func_proto bpf_tail_call_proto = { |
@@ -1840,9 +1766,3 @@ int __weak skb_copy_bits(const struct sk_buff *skb, int offset, void *to, | |||
1840 | #include <linux/bpf_trace.h> | 1766 | #include <linux/bpf_trace.h> |
1841 | 1767 | ||
1842 | EXPORT_TRACEPOINT_SYMBOL_GPL(xdp_exception); | 1768 | EXPORT_TRACEPOINT_SYMBOL_GPL(xdp_exception); |
1843 | |||
1844 | /* These are only used within the BPF_SYSCALL code */ | ||
1845 | #ifdef CONFIG_BPF_SYSCALL | ||
1846 | EXPORT_TRACEPOINT_SYMBOL_GPL(bpf_prog_get_type); | ||
1847 | EXPORT_TRACEPOINT_SYMBOL_GPL(bpf_prog_put_rcu); | ||
1848 | #endif | ||