aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-04-15 12:00:47 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2015-04-15 12:00:47 -0400
commit6c373ca89399c5a3f7ef210ad8f63dc3437da345 (patch)
tree74d1ec65087df1da1021b43ac51acc1ee8601809 /kernel
parentbb0fd7ab0986105765d11baa82e619c618a235aa (diff)
parent9f9151412dd7aae0e3f51a89ae4a1f8755fdb4d0 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
Pull networking updates from David Miller: 1) Add BQL support to via-rhine, from Tino Reichardt. 2) Integrate SWITCHDEV layer support into the DSA layer, so DSA drivers can support hw switch offloading. From Floria Fainelli. 3) Allow 'ip address' commands to initiate multicast group join/leave, from Madhu Challa. 4) Many ipv4 FIB lookup optimizations from Alexander Duyck. 5) Support EBPF in cls_bpf classifier and act_bpf action, from Daniel Borkmann. 6) Remove the ugly compat support in ARP for ugly layers like ax25, rose, etc. And use this to clean up the neigh layer, then use it to implement MPLS support. All from Eric Biederman. 7) Support L3 forwarding offloading in switches, from Scott Feldman. 8) Collapse the LOCAL and MAIN ipv4 FIB tables when possible, to speed up route lookups even further. From Alexander Duyck. 9) Many improvements and bug fixes to the rhashtable implementation, from Herbert Xu and Thomas Graf. In particular, in the case where an rhashtable user bulk adds a large number of items into an empty table, we expand the table much more sanely. 10) Don't make the tcp_metrics hash table per-namespace, from Eric Biederman. 11) Extend EBPF to access SKB fields, from Alexei Starovoitov. 12) Split out new connection request sockets so that they can be established in the main hash table. Much less false sharing since hash lookups go direct to the request sockets instead of having to go first to the listener then to the request socks hashed underneath. From Eric Dumazet. 13) Add async I/O support for crytpo AF_ALG sockets, from Tadeusz Struk. 14) Support stable privacy address generation for RFC7217 in IPV6. From Hannes Frederic Sowa. 15) Hash network namespace into IP frag IDs, also from Hannes Frederic Sowa. 16) Convert PTP get/set methods to use 64-bit time, from Richard Cochran. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next: (1816 commits) fm10k: Bump driver version to 0.15.2 fm10k: corrected VF multicast update fm10k: mbx_update_max_size does not drop all oversized messages fm10k: reset head instead of calling update_max_size fm10k: renamed mbx_tx_dropped to mbx_tx_oversized fm10k: update xcast mode before synchronizing multicast addresses fm10k: start service timer on probe fm10k: fix function header comment fm10k: comment next_vf_mbx flow fm10k: don't handle mailbox events in iov_event path and always process mailbox fm10k: use separate workqueue for fm10k driver fm10k: Set PF queues to unlimited bandwidth during virtualization fm10k: expose tx_timeout_count as an ethtool stat fm10k: only increment tx_timeout_count in Tx hang path fm10k: remove extraneous "Reset interface" message fm10k: separate PF only stats so that VF does not display them fm10k: use hw->mac.max_queues for stats fm10k: only show actual queues, not the maximum in hardware fm10k: allow creation of VLAN on default vid fm10k: fix unused warnings ...
Diffstat (limited to 'kernel')
-rw-r--r--kernel/bpf/Makefile3
-rw-r--r--kernel/bpf/arraymap.c6
-rw-r--r--kernel/bpf/core.c8
-rw-r--r--kernel/bpf/hashtab.c6
-rw-r--r--kernel/bpf/helpers.c30
-rw-r--r--kernel/bpf/syscall.c11
-rw-r--r--kernel/bpf/test_stub.c78
-rw-r--r--kernel/bpf/verifier.c177
-rw-r--r--kernel/events/core.c2
9 files changed, 205 insertions, 116 deletions
diff --git a/kernel/bpf/Makefile b/kernel/bpf/Makefile
index a5ae60f0b0a2..e6983be12bd3 100644
--- a/kernel/bpf/Makefile
+++ b/kernel/bpf/Makefile
@@ -1,5 +1,2 @@
1obj-y := core.o 1obj-y := core.o
2obj-$(CONFIG_BPF_SYSCALL) += syscall.o verifier.o hashtab.o arraymap.o helpers.o 2obj-$(CONFIG_BPF_SYSCALL) += syscall.o verifier.o hashtab.o arraymap.o helpers.o
3ifdef CONFIG_TEST_BPF
4obj-$(CONFIG_BPF_SYSCALL) += test_stub.o
5endif
diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c
index 9eb4d8a7cd87..8a6616583f38 100644
--- a/kernel/bpf/arraymap.c
+++ b/kernel/bpf/arraymap.c
@@ -134,7 +134,7 @@ static void array_map_free(struct bpf_map *map)
134 kvfree(array); 134 kvfree(array);
135} 135}
136 136
137static struct bpf_map_ops array_ops = { 137static const struct bpf_map_ops array_ops = {
138 .map_alloc = array_map_alloc, 138 .map_alloc = array_map_alloc,
139 .map_free = array_map_free, 139 .map_free = array_map_free,
140 .map_get_next_key = array_map_get_next_key, 140 .map_get_next_key = array_map_get_next_key,
@@ -143,14 +143,14 @@ static struct bpf_map_ops array_ops = {
143 .map_delete_elem = array_map_delete_elem, 143 .map_delete_elem = array_map_delete_elem,
144}; 144};
145 145
146static struct bpf_map_type_list tl = { 146static struct bpf_map_type_list array_type __read_mostly = {
147 .ops = &array_ops, 147 .ops = &array_ops,
148 .type = BPF_MAP_TYPE_ARRAY, 148 .type = BPF_MAP_TYPE_ARRAY,
149}; 149};
150 150
151static int __init register_array_map(void) 151static int __init register_array_map(void)
152{ 152{
153 bpf_register_map_type(&tl); 153 bpf_register_map_type(&array_type);
154 return 0; 154 return 0;
155} 155}
156late_initcall(register_array_map); 156late_initcall(register_array_map);
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index a64e7a207d2b..4139a0f8b558 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -656,6 +656,14 @@ void bpf_prog_free(struct bpf_prog *fp)
656} 656}
657EXPORT_SYMBOL_GPL(bpf_prog_free); 657EXPORT_SYMBOL_GPL(bpf_prog_free);
658 658
659/* Weak definitions of helper functions in case we don't have bpf syscall. */
660const struct bpf_func_proto bpf_map_lookup_elem_proto __weak;
661const struct bpf_func_proto bpf_map_update_elem_proto __weak;
662const struct bpf_func_proto bpf_map_delete_elem_proto __weak;
663
664const struct bpf_func_proto bpf_get_prandom_u32_proto __weak;
665const struct bpf_func_proto bpf_get_smp_processor_id_proto __weak;
666
659/* To execute LD_ABS/LD_IND instructions __bpf_prog_run() may call 667/* To execute LD_ABS/LD_IND instructions __bpf_prog_run() may call
660 * skb_copy_bits(), so provide a weak definition of it for NET-less config. 668 * skb_copy_bits(), so provide a weak definition of it for NET-less config.
661 */ 669 */
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
index b3ba43674310..83c209d9b17a 100644
--- a/kernel/bpf/hashtab.c
+++ b/kernel/bpf/hashtab.c
@@ -345,7 +345,7 @@ static void htab_map_free(struct bpf_map *map)
345 kfree(htab); 345 kfree(htab);
346} 346}
347 347
348static struct bpf_map_ops htab_ops = { 348static const struct bpf_map_ops htab_ops = {
349 .map_alloc = htab_map_alloc, 349 .map_alloc = htab_map_alloc,
350 .map_free = htab_map_free, 350 .map_free = htab_map_free,
351 .map_get_next_key = htab_map_get_next_key, 351 .map_get_next_key = htab_map_get_next_key,
@@ -354,14 +354,14 @@ static struct bpf_map_ops htab_ops = {
354 .map_delete_elem = htab_map_delete_elem, 354 .map_delete_elem = htab_map_delete_elem,
355}; 355};
356 356
357static struct bpf_map_type_list tl = { 357static struct bpf_map_type_list htab_type __read_mostly = {
358 .ops = &htab_ops, 358 .ops = &htab_ops,
359 .type = BPF_MAP_TYPE_HASH, 359 .type = BPF_MAP_TYPE_HASH,
360}; 360};
361 361
362static int __init register_htab_map(void) 362static int __init register_htab_map(void)
363{ 363{
364 bpf_register_map_type(&tl); 364 bpf_register_map_type(&htab_type);
365 return 0; 365 return 0;
366} 366}
367late_initcall(register_htab_map); 367late_initcall(register_htab_map);
diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
index 9e3414d85459..bd7f5988ed9c 100644
--- a/kernel/bpf/helpers.c
+++ b/kernel/bpf/helpers.c
@@ -11,6 +11,8 @@
11 */ 11 */
12#include <linux/bpf.h> 12#include <linux/bpf.h>
13#include <linux/rcupdate.h> 13#include <linux/rcupdate.h>
14#include <linux/random.h>
15#include <linux/smp.h>
14 16
15/* If kernel subsystem is allowing eBPF programs to call this function, 17/* If kernel subsystem is allowing eBPF programs to call this function,
16 * inside its own verifier_ops->get_func_proto() callback it should return 18 * inside its own verifier_ops->get_func_proto() callback it should return
@@ -41,7 +43,7 @@ static u64 bpf_map_lookup_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
41 return (unsigned long) value; 43 return (unsigned long) value;
42} 44}
43 45
44struct bpf_func_proto bpf_map_lookup_elem_proto = { 46const struct bpf_func_proto bpf_map_lookup_elem_proto = {
45 .func = bpf_map_lookup_elem, 47 .func = bpf_map_lookup_elem,
46 .gpl_only = false, 48 .gpl_only = false,
47 .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL, 49 .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
@@ -60,7 +62,7 @@ static u64 bpf_map_update_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
60 return map->ops->map_update_elem(map, key, value, r4); 62 return map->ops->map_update_elem(map, key, value, r4);
61} 63}
62 64
63struct bpf_func_proto bpf_map_update_elem_proto = { 65const struct bpf_func_proto bpf_map_update_elem_proto = {
64 .func = bpf_map_update_elem, 66 .func = bpf_map_update_elem,
65 .gpl_only = false, 67 .gpl_only = false,
66 .ret_type = RET_INTEGER, 68 .ret_type = RET_INTEGER,
@@ -80,10 +82,32 @@ static u64 bpf_map_delete_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
80 return map->ops->map_delete_elem(map, key); 82 return map->ops->map_delete_elem(map, key);
81} 83}
82 84
83struct bpf_func_proto bpf_map_delete_elem_proto = { 85const struct bpf_func_proto bpf_map_delete_elem_proto = {
84 .func = bpf_map_delete_elem, 86 .func = bpf_map_delete_elem,
85 .gpl_only = false, 87 .gpl_only = false,
86 .ret_type = RET_INTEGER, 88 .ret_type = RET_INTEGER,
87 .arg1_type = ARG_CONST_MAP_PTR, 89 .arg1_type = ARG_CONST_MAP_PTR,
88 .arg2_type = ARG_PTR_TO_MAP_KEY, 90 .arg2_type = ARG_PTR_TO_MAP_KEY,
89}; 91};
92
93static u64 bpf_get_prandom_u32(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
94{
95 return prandom_u32();
96}
97
98const struct bpf_func_proto bpf_get_prandom_u32_proto = {
99 .func = bpf_get_prandom_u32,
100 .gpl_only = false,
101 .ret_type = RET_INTEGER,
102};
103
104static u64 bpf_get_smp_processor_id(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
105{
106 return raw_smp_processor_id();
107}
108
109const struct bpf_func_proto bpf_get_smp_processor_id_proto = {
110 .func = bpf_get_smp_processor_id,
111 .gpl_only = false,
112 .ret_type = RET_INTEGER,
113};
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 504c10b990ef..3bae6c591914 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -355,10 +355,11 @@ static int find_prog_type(enum bpf_prog_type type, struct bpf_prog *prog)
355 list_for_each_entry(tl, &bpf_prog_types, list_node) { 355 list_for_each_entry(tl, &bpf_prog_types, list_node) {
356 if (tl->type == type) { 356 if (tl->type == type) {
357 prog->aux->ops = tl->ops; 357 prog->aux->ops = tl->ops;
358 prog->aux->prog_type = type; 358 prog->type = type;
359 return 0; 359 return 0;
360 } 360 }
361 } 361 }
362
362 return -EINVAL; 363 return -EINVAL;
363} 364}
364 365
@@ -419,6 +420,7 @@ void bpf_prog_put(struct bpf_prog *prog)
419 bpf_prog_free(prog); 420 bpf_prog_free(prog);
420 } 421 }
421} 422}
423EXPORT_SYMBOL_GPL(bpf_prog_put);
422 424
423static int bpf_prog_release(struct inode *inode, struct file *filp) 425static int bpf_prog_release(struct inode *inode, struct file *filp)
424{ 426{
@@ -466,6 +468,7 @@ struct bpf_prog *bpf_prog_get(u32 ufd)
466 fdput(f); 468 fdput(f);
467 return prog; 469 return prog;
468} 470}
471EXPORT_SYMBOL_GPL(bpf_prog_get);
469 472
470/* last field in 'union bpf_attr' used by this command */ 473/* last field in 'union bpf_attr' used by this command */
471#define BPF_PROG_LOAD_LAST_FIELD kern_version 474#define BPF_PROG_LOAD_LAST_FIELD kern_version
@@ -513,7 +516,7 @@ static int bpf_prog_load(union bpf_attr *attr)
513 prog->jited = false; 516 prog->jited = false;
514 517
515 atomic_set(&prog->aux->refcnt, 1); 518 atomic_set(&prog->aux->refcnt, 1);
516 prog->aux->is_gpl_compatible = is_gpl; 519 prog->gpl_compatible = is_gpl;
517 520
518 /* find program type: socket_filter vs tracing_filter */ 521 /* find program type: socket_filter vs tracing_filter */
519 err = find_prog_type(type, prog); 522 err = find_prog_type(type, prog);
@@ -521,8 +524,7 @@ static int bpf_prog_load(union bpf_attr *attr)
521 goto free_prog; 524 goto free_prog;
522 525
523 /* run eBPF verifier */ 526 /* run eBPF verifier */
524 err = bpf_check(prog, attr); 527 err = bpf_check(&prog, attr);
525
526 if (err < 0) 528 if (err < 0)
527 goto free_used_maps; 529 goto free_used_maps;
528 530
@@ -533,7 +535,6 @@ static int bpf_prog_load(union bpf_attr *attr)
533 bpf_prog_select_runtime(prog); 535 bpf_prog_select_runtime(prog);
534 536
535 err = anon_inode_getfd("bpf-prog", &bpf_prog_fops, prog, O_RDWR | O_CLOEXEC); 537 err = anon_inode_getfd("bpf-prog", &bpf_prog_fops, prog, O_RDWR | O_CLOEXEC);
536
537 if (err < 0) 538 if (err < 0)
538 /* failed to allocate fd */ 539 /* failed to allocate fd */
539 goto free_used_maps; 540 goto free_used_maps;
diff --git a/kernel/bpf/test_stub.c b/kernel/bpf/test_stub.c
deleted file mode 100644
index 0ceae1e6e8b5..000000000000
--- a/kernel/bpf/test_stub.c
+++ /dev/null
@@ -1,78 +0,0 @@
1/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
2 *
3 * This program is free software; you can redistribute it and/or
4 * modify it under the terms of version 2 of the GNU General Public
5 * License as published by the Free Software Foundation.
6 */
7#include <linux/kernel.h>
8#include <linux/types.h>
9#include <linux/slab.h>
10#include <linux/err.h>
11#include <linux/bpf.h>
12
13/* test stubs for BPF_MAP_TYPE_UNSPEC and for BPF_PROG_TYPE_UNSPEC
14 * to be used by user space verifier testsuite
15 */
16struct bpf_context {
17 u64 arg1;
18 u64 arg2;
19};
20
21static const struct bpf_func_proto *test_func_proto(enum bpf_func_id func_id)
22{
23 switch (func_id) {
24 case BPF_FUNC_map_lookup_elem:
25 return &bpf_map_lookup_elem_proto;
26 case BPF_FUNC_map_update_elem:
27 return &bpf_map_update_elem_proto;
28 case BPF_FUNC_map_delete_elem:
29 return &bpf_map_delete_elem_proto;
30 default:
31 return NULL;
32 }
33}
34
35static const struct bpf_context_access {
36 int size;
37 enum bpf_access_type type;
38} test_ctx_access[] = {
39 [offsetof(struct bpf_context, arg1)] = {
40 FIELD_SIZEOF(struct bpf_context, arg1),
41 BPF_READ
42 },
43 [offsetof(struct bpf_context, arg2)] = {
44 FIELD_SIZEOF(struct bpf_context, arg2),
45 BPF_READ
46 },
47};
48
49static bool test_is_valid_access(int off, int size, enum bpf_access_type type)
50{
51 const struct bpf_context_access *access;
52
53 if (off < 0 || off >= ARRAY_SIZE(test_ctx_access))
54 return false;
55
56 access = &test_ctx_access[off];
57 if (access->size == size && (access->type & type))
58 return true;
59
60 return false;
61}
62
63static struct bpf_verifier_ops test_ops = {
64 .get_func_proto = test_func_proto,
65 .is_valid_access = test_is_valid_access,
66};
67
68static struct bpf_prog_type_list tl_prog = {
69 .ops = &test_ops,
70 .type = BPF_PROG_TYPE_UNSPEC,
71};
72
73static int __init register_test_ops(void)
74{
75 bpf_register_prog_type(&tl_prog);
76 return 0;
77}
78late_initcall(register_test_ops);
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index a28e09c7825d..630a7bac1e51 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -755,7 +755,7 @@ static int check_func_arg(struct verifier_env *env, u32 regno,
755 enum bpf_reg_type expected_type; 755 enum bpf_reg_type expected_type;
756 int err = 0; 756 int err = 0;
757 757
758 if (arg_type == ARG_ANYTHING) 758 if (arg_type == ARG_DONTCARE)
759 return 0; 759 return 0;
760 760
761 if (reg->type == NOT_INIT) { 761 if (reg->type == NOT_INIT) {
@@ -763,6 +763,9 @@ static int check_func_arg(struct verifier_env *env, u32 regno,
763 return -EACCES; 763 return -EACCES;
764 } 764 }
765 765
766 if (arg_type == ARG_ANYTHING)
767 return 0;
768
766 if (arg_type == ARG_PTR_TO_STACK || arg_type == ARG_PTR_TO_MAP_KEY || 769 if (arg_type == ARG_PTR_TO_STACK || arg_type == ARG_PTR_TO_MAP_KEY ||
767 arg_type == ARG_PTR_TO_MAP_VALUE) { 770 arg_type == ARG_PTR_TO_MAP_VALUE) {
768 expected_type = PTR_TO_STACK; 771 expected_type = PTR_TO_STACK;
@@ -770,6 +773,8 @@ static int check_func_arg(struct verifier_env *env, u32 regno,
770 expected_type = CONST_IMM; 773 expected_type = CONST_IMM;
771 } else if (arg_type == ARG_CONST_MAP_PTR) { 774 } else if (arg_type == ARG_CONST_MAP_PTR) {
772 expected_type = CONST_PTR_TO_MAP; 775 expected_type = CONST_PTR_TO_MAP;
776 } else if (arg_type == ARG_PTR_TO_CTX) {
777 expected_type = PTR_TO_CTX;
773 } else { 778 } else {
774 verbose("unsupported arg_type %d\n", arg_type); 779 verbose("unsupported arg_type %d\n", arg_type);
775 return -EFAULT; 780 return -EFAULT;
@@ -852,7 +857,7 @@ static int check_call(struct verifier_env *env, int func_id)
852 } 857 }
853 858
854 /* eBPF programs must be GPL compatible to use GPL-ed functions */ 859 /* eBPF programs must be GPL compatible to use GPL-ed functions */
855 if (!env->prog->aux->is_gpl_compatible && fn->gpl_only) { 860 if (!env->prog->gpl_compatible && fn->gpl_only) {
856 verbose("cannot call GPL only function from proprietary program\n"); 861 verbose("cannot call GPL only function from proprietary program\n");
857 return -EINVAL; 862 return -EINVAL;
858 } 863 }
@@ -1172,6 +1177,18 @@ static int check_ld_imm(struct verifier_env *env, struct bpf_insn *insn)
1172 return 0; 1177 return 0;
1173} 1178}
1174 1179
1180static bool may_access_skb(enum bpf_prog_type type)
1181{
1182 switch (type) {
1183 case BPF_PROG_TYPE_SOCKET_FILTER:
1184 case BPF_PROG_TYPE_SCHED_CLS:
1185 case BPF_PROG_TYPE_SCHED_ACT:
1186 return true;
1187 default:
1188 return false;
1189 }
1190}
1191
1175/* verify safety of LD_ABS|LD_IND instructions: 1192/* verify safety of LD_ABS|LD_IND instructions:
1176 * - they can only appear in the programs where ctx == skb 1193 * - they can only appear in the programs where ctx == skb
1177 * - since they are wrappers of function calls, they scratch R1-R5 registers, 1194 * - since they are wrappers of function calls, they scratch R1-R5 registers,
@@ -1194,8 +1211,8 @@ static int check_ld_abs(struct verifier_env *env, struct bpf_insn *insn)
1194 struct reg_state *reg; 1211 struct reg_state *reg;
1195 int i, err; 1212 int i, err;
1196 1213
1197 if (env->prog->aux->prog_type != BPF_PROG_TYPE_SOCKET_FILTER) { 1214 if (!may_access_skb(env->prog->type)) {
1198 verbose("BPF_LD_ABS|IND instructions are only allowed in socket filters\n"); 1215 verbose("BPF_LD_ABS|IND instructions not allowed for this program type\n");
1199 return -EINVAL; 1216 return -EINVAL;
1200 } 1217 }
1201 1218
@@ -1606,11 +1623,10 @@ static int do_check(struct verifier_env *env)
1606 return err; 1623 return err;
1607 1624
1608 } else if (class == BPF_LDX) { 1625 } else if (class == BPF_LDX) {
1609 if (BPF_MODE(insn->code) != BPF_MEM || 1626 enum bpf_reg_type src_reg_type;
1610 insn->imm != 0) { 1627
1611 verbose("BPF_LDX uses reserved fields\n"); 1628 /* check for reserved fields is already done */
1612 return -EINVAL; 1629
1613 }
1614 /* check src operand */ 1630 /* check src operand */
1615 err = check_reg_arg(regs, insn->src_reg, SRC_OP); 1631 err = check_reg_arg(regs, insn->src_reg, SRC_OP);
1616 if (err) 1632 if (err)
@@ -1629,6 +1645,29 @@ static int do_check(struct verifier_env *env)
1629 if (err) 1645 if (err)
1630 return err; 1646 return err;
1631 1647
1648 src_reg_type = regs[insn->src_reg].type;
1649
1650 if (insn->imm == 0 && BPF_SIZE(insn->code) == BPF_W) {
1651 /* saw a valid insn
1652 * dst_reg = *(u32 *)(src_reg + off)
1653 * use reserved 'imm' field to mark this insn
1654 */
1655 insn->imm = src_reg_type;
1656
1657 } else if (src_reg_type != insn->imm &&
1658 (src_reg_type == PTR_TO_CTX ||
1659 insn->imm == PTR_TO_CTX)) {
1660 /* ABuser program is trying to use the same insn
1661 * dst_reg = *(u32*) (src_reg + off)
1662 * with different pointer types:
1663 * src_reg == ctx in one branch and
1664 * src_reg == stack|map in some other branch.
1665 * Reject it.
1666 */
1667 verbose("same insn cannot be used with different pointers\n");
1668 return -EINVAL;
1669 }
1670
1632 } else if (class == BPF_STX) { 1671 } else if (class == BPF_STX) {
1633 if (BPF_MODE(insn->code) == BPF_XADD) { 1672 if (BPF_MODE(insn->code) == BPF_XADD) {
1634 err = check_xadd(env, insn); 1673 err = check_xadd(env, insn);
@@ -1776,6 +1815,13 @@ static int replace_map_fd_with_map_ptr(struct verifier_env *env)
1776 int i, j; 1815 int i, j;
1777 1816
1778 for (i = 0; i < insn_cnt; i++, insn++) { 1817 for (i = 0; i < insn_cnt; i++, insn++) {
1818 if (BPF_CLASS(insn->code) == BPF_LDX &&
1819 (BPF_MODE(insn->code) != BPF_MEM ||
1820 insn->imm != 0)) {
1821 verbose("BPF_LDX uses reserved fields\n");
1822 return -EINVAL;
1823 }
1824
1779 if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW)) { 1825 if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW)) {
1780 struct bpf_map *map; 1826 struct bpf_map *map;
1781 struct fd f; 1827 struct fd f;
@@ -1867,6 +1913,92 @@ static void convert_pseudo_ld_imm64(struct verifier_env *env)
1867 insn->src_reg = 0; 1913 insn->src_reg = 0;
1868} 1914}
1869 1915
1916static void adjust_branches(struct bpf_prog *prog, int pos, int delta)
1917{
1918 struct bpf_insn *insn = prog->insnsi;
1919 int insn_cnt = prog->len;
1920 int i;
1921
1922 for (i = 0; i < insn_cnt; i++, insn++) {
1923 if (BPF_CLASS(insn->code) != BPF_JMP ||
1924 BPF_OP(insn->code) == BPF_CALL ||
1925 BPF_OP(insn->code) == BPF_EXIT)
1926 continue;
1927
1928 /* adjust offset of jmps if necessary */
1929 if (i < pos && i + insn->off + 1 > pos)
1930 insn->off += delta;
1931 else if (i > pos && i + insn->off + 1 < pos)
1932 insn->off -= delta;
1933 }
1934}
1935
1936/* convert load instructions that access fields of 'struct __sk_buff'
1937 * into sequence of instructions that access fields of 'struct sk_buff'
1938 */
1939static int convert_ctx_accesses(struct verifier_env *env)
1940{
1941 struct bpf_insn *insn = env->prog->insnsi;
1942 int insn_cnt = env->prog->len;
1943 struct bpf_insn insn_buf[16];
1944 struct bpf_prog *new_prog;
1945 u32 cnt;
1946 int i;
1947
1948 if (!env->prog->aux->ops->convert_ctx_access)
1949 return 0;
1950
1951 for (i = 0; i < insn_cnt; i++, insn++) {
1952 if (insn->code != (BPF_LDX | BPF_MEM | BPF_W))
1953 continue;
1954
1955 if (insn->imm != PTR_TO_CTX) {
1956 /* clear internal mark */
1957 insn->imm = 0;
1958 continue;
1959 }
1960
1961 cnt = env->prog->aux->ops->
1962 convert_ctx_access(insn->dst_reg, insn->src_reg,
1963 insn->off, insn_buf);
1964 if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf)) {
1965 verbose("bpf verifier is misconfigured\n");
1966 return -EINVAL;
1967 }
1968
1969 if (cnt == 1) {
1970 memcpy(insn, insn_buf, sizeof(*insn));
1971 continue;
1972 }
1973
1974 /* several new insns need to be inserted. Make room for them */
1975 insn_cnt += cnt - 1;
1976 new_prog = bpf_prog_realloc(env->prog,
1977 bpf_prog_size(insn_cnt),
1978 GFP_USER);
1979 if (!new_prog)
1980 return -ENOMEM;
1981
1982 new_prog->len = insn_cnt;
1983
1984 memmove(new_prog->insnsi + i + cnt, new_prog->insns + i + 1,
1985 sizeof(*insn) * (insn_cnt - i - cnt));
1986
1987 /* copy substitute insns in place of load instruction */
1988 memcpy(new_prog->insnsi + i, insn_buf, sizeof(*insn) * cnt);
1989
1990 /* adjust branches in the whole program */
1991 adjust_branches(new_prog, i, cnt - 1);
1992
1993 /* keep walking new program and skip insns we just inserted */
1994 env->prog = new_prog;
1995 insn = new_prog->insnsi + i + cnt - 1;
1996 i += cnt - 1;
1997 }
1998
1999 return 0;
2000}
2001
1870static void free_states(struct verifier_env *env) 2002static void free_states(struct verifier_env *env)
1871{ 2003{
1872 struct verifier_state_list *sl, *sln; 2004 struct verifier_state_list *sl, *sln;
@@ -1889,13 +2021,13 @@ static void free_states(struct verifier_env *env)
1889 kfree(env->explored_states); 2021 kfree(env->explored_states);
1890} 2022}
1891 2023
1892int bpf_check(struct bpf_prog *prog, union bpf_attr *attr) 2024int bpf_check(struct bpf_prog **prog, union bpf_attr *attr)
1893{ 2025{
1894 char __user *log_ubuf = NULL; 2026 char __user *log_ubuf = NULL;
1895 struct verifier_env *env; 2027 struct verifier_env *env;
1896 int ret = -EINVAL; 2028 int ret = -EINVAL;
1897 2029
1898 if (prog->len <= 0 || prog->len > BPF_MAXINSNS) 2030 if ((*prog)->len <= 0 || (*prog)->len > BPF_MAXINSNS)
1899 return -E2BIG; 2031 return -E2BIG;
1900 2032
1901 /* 'struct verifier_env' can be global, but since it's not small, 2033 /* 'struct verifier_env' can be global, but since it's not small,
@@ -1905,7 +2037,7 @@ int bpf_check(struct bpf_prog *prog, union bpf_attr *attr)
1905 if (!env) 2037 if (!env)
1906 return -ENOMEM; 2038 return -ENOMEM;
1907 2039
1908 env->prog = prog; 2040 env->prog = *prog;
1909 2041
1910 /* grab the mutex to protect few globals used by verifier */ 2042 /* grab the mutex to protect few globals used by verifier */
1911 mutex_lock(&bpf_verifier_lock); 2043 mutex_lock(&bpf_verifier_lock);
@@ -1937,7 +2069,7 @@ int bpf_check(struct bpf_prog *prog, union bpf_attr *attr)
1937 if (ret < 0) 2069 if (ret < 0)
1938 goto skip_full_check; 2070 goto skip_full_check;
1939 2071
1940 env->explored_states = kcalloc(prog->len, 2072 env->explored_states = kcalloc(env->prog->len,
1941 sizeof(struct verifier_state_list *), 2073 sizeof(struct verifier_state_list *),
1942 GFP_USER); 2074 GFP_USER);
1943 ret = -ENOMEM; 2075 ret = -ENOMEM;
@@ -1954,6 +2086,10 @@ skip_full_check:
1954 while (pop_stack(env, NULL) >= 0); 2086 while (pop_stack(env, NULL) >= 0);
1955 free_states(env); 2087 free_states(env);
1956 2088
2089 if (ret == 0)
2090 /* program is valid, convert *(u32*)(ctx + off) accesses */
2091 ret = convert_ctx_accesses(env);
2092
1957 if (log_level && log_len >= log_size - 1) { 2093 if (log_level && log_len >= log_size - 1) {
1958 BUG_ON(log_len >= log_size); 2094 BUG_ON(log_len >= log_size);
1959 /* verifier log exceeded user supplied buffer */ 2095 /* verifier log exceeded user supplied buffer */
@@ -1969,18 +2105,18 @@ skip_full_check:
1969 2105
1970 if (ret == 0 && env->used_map_cnt) { 2106 if (ret == 0 && env->used_map_cnt) {
1971 /* if program passed verifier, update used_maps in bpf_prog_info */ 2107 /* if program passed verifier, update used_maps in bpf_prog_info */
1972 prog->aux->used_maps = kmalloc_array(env->used_map_cnt, 2108 env->prog->aux->used_maps = kmalloc_array(env->used_map_cnt,
1973 sizeof(env->used_maps[0]), 2109 sizeof(env->used_maps[0]),
1974 GFP_KERNEL); 2110 GFP_KERNEL);
1975 2111
1976 if (!prog->aux->used_maps) { 2112 if (!env->prog->aux->used_maps) {
1977 ret = -ENOMEM; 2113 ret = -ENOMEM;
1978 goto free_log_buf; 2114 goto free_log_buf;
1979 } 2115 }
1980 2116
1981 memcpy(prog->aux->used_maps, env->used_maps, 2117 memcpy(env->prog->aux->used_maps, env->used_maps,
1982 sizeof(env->used_maps[0]) * env->used_map_cnt); 2118 sizeof(env->used_maps[0]) * env->used_map_cnt);
1983 prog->aux->used_map_cnt = env->used_map_cnt; 2119 env->prog->aux->used_map_cnt = env->used_map_cnt;
1984 2120
1985 /* program is valid. Convert pseudo bpf_ld_imm64 into generic 2121 /* program is valid. Convert pseudo bpf_ld_imm64 into generic
1986 * bpf_ld_imm64 instructions 2122 * bpf_ld_imm64 instructions
@@ -1992,11 +2128,12 @@ free_log_buf:
1992 if (log_level) 2128 if (log_level)
1993 vfree(log_buf); 2129 vfree(log_buf);
1994free_env: 2130free_env:
1995 if (!prog->aux->used_maps) 2131 if (!env->prog->aux->used_maps)
1996 /* if we didn't copy map pointers into bpf_prog_info, release 2132 /* if we didn't copy map pointers into bpf_prog_info, release
1997 * them now. Otherwise free_bpf_prog_info() will release them. 2133 * them now. Otherwise free_bpf_prog_info() will release them.
1998 */ 2134 */
1999 release_maps(env); 2135 release_maps(env);
2136 *prog = env->prog;
2000 kfree(env); 2137 kfree(env);
2001 mutex_unlock(&bpf_verifier_lock); 2138 mutex_unlock(&bpf_verifier_lock);
2002 return ret; 2139 return ret;
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 06917d537302..81aa3a4ece9f 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -6729,7 +6729,7 @@ static int perf_event_set_bpf_prog(struct perf_event *event, u32 prog_fd)
6729 if (IS_ERR(prog)) 6729 if (IS_ERR(prog))
6730 return PTR_ERR(prog); 6730 return PTR_ERR(prog);
6731 6731
6732 if (prog->aux->prog_type != BPF_PROG_TYPE_KPROBE) { 6732 if (prog->type != BPF_PROG_TYPE_KPROBE) {
6733 /* valid fd, but invalid bpf program type */ 6733 /* valid fd, but invalid bpf program type */
6734 bpf_prog_put(prog); 6734 bpf_prog_put(prog);
6735 return -EINVAL; 6735 return -EINVAL;