summaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2018-12-05 19:30:30 -0500
committerDavid S. Miller <davem@davemloft.net>2018-12-05 19:30:30 -0500
commite37d05a538a6656e108f7704ad66e1ccc13d6f68 (patch)
tree139001db1e7f6bc6a19c6766ee3285d9a8d8de0b /net
parent22f6bbb7bcfcef0b373b0502a7ff390275c575dd (diff)
parenta92a72a24d48080f6c49bb514c082fbb1e5bf8fc (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf
Alexei Starovoitov says: ==================== pull-request: bpf 2018-12-05 The following pull-request contains BPF updates for your *net* tree. The main changes are: 1) fix bpf uapi pointers for 32-bit architectures, from Daniel. 2) improve verifer ability to handle progs with a lot of branches, from Alexei. 3) strict btf checks, from Yonghong. 4) bpf_sk_lookup api cleanup, from Joe. 5) other misc fixes ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r--net/bpf/test_run.c21
-rw-r--r--net/core/filter.c27
2 files changed, 29 insertions, 19 deletions
diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c
index c89c22c49015..25001913d03b 100644
--- a/net/bpf/test_run.c
+++ b/net/bpf/test_run.c
@@ -28,12 +28,13 @@ static __always_inline u32 bpf_test_run_one(struct bpf_prog *prog, void *ctx,
28 return ret; 28 return ret;
29} 29}
30 30
31static u32 bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat, u32 *time) 31static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat, u32 *ret,
32 u32 *time)
32{ 33{
33 struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE] = { 0 }; 34 struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE] = { 0 };
34 enum bpf_cgroup_storage_type stype; 35 enum bpf_cgroup_storage_type stype;
35 u64 time_start, time_spent = 0; 36 u64 time_start, time_spent = 0;
36 u32 ret = 0, i; 37 u32 i;
37 38
38 for_each_cgroup_storage_type(stype) { 39 for_each_cgroup_storage_type(stype) {
39 storage[stype] = bpf_cgroup_storage_alloc(prog, stype); 40 storage[stype] = bpf_cgroup_storage_alloc(prog, stype);
@@ -49,7 +50,7 @@ static u32 bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat, u32 *time)
49 repeat = 1; 50 repeat = 1;
50 time_start = ktime_get_ns(); 51 time_start = ktime_get_ns();
51 for (i = 0; i < repeat; i++) { 52 for (i = 0; i < repeat; i++) {
52 ret = bpf_test_run_one(prog, ctx, storage); 53 *ret = bpf_test_run_one(prog, ctx, storage);
53 if (need_resched()) { 54 if (need_resched()) {
54 if (signal_pending(current)) 55 if (signal_pending(current))
55 break; 56 break;
@@ -65,7 +66,7 @@ static u32 bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat, u32 *time)
65 for_each_cgroup_storage_type(stype) 66 for_each_cgroup_storage_type(stype)
66 bpf_cgroup_storage_free(storage[stype]); 67 bpf_cgroup_storage_free(storage[stype]);
67 68
68 return ret; 69 return 0;
69} 70}
70 71
71static int bpf_test_finish(const union bpf_attr *kattr, 72static int bpf_test_finish(const union bpf_attr *kattr,
@@ -165,7 +166,12 @@ int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
165 __skb_push(skb, hh_len); 166 __skb_push(skb, hh_len);
166 if (is_direct_pkt_access) 167 if (is_direct_pkt_access)
167 bpf_compute_data_pointers(skb); 168 bpf_compute_data_pointers(skb);
168 retval = bpf_test_run(prog, skb, repeat, &duration); 169 ret = bpf_test_run(prog, skb, repeat, &retval, &duration);
170 if (ret) {
171 kfree_skb(skb);
172 kfree(sk);
173 return ret;
174 }
169 if (!is_l2) { 175 if (!is_l2) {
170 if (skb_headroom(skb) < hh_len) { 176 if (skb_headroom(skb) < hh_len) {
171 int nhead = HH_DATA_ALIGN(hh_len - skb_headroom(skb)); 177 int nhead = HH_DATA_ALIGN(hh_len - skb_headroom(skb));
@@ -212,11 +218,14 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
212 rxqueue = __netif_get_rx_queue(current->nsproxy->net_ns->loopback_dev, 0); 218 rxqueue = __netif_get_rx_queue(current->nsproxy->net_ns->loopback_dev, 0);
213 xdp.rxq = &rxqueue->xdp_rxq; 219 xdp.rxq = &rxqueue->xdp_rxq;
214 220
215 retval = bpf_test_run(prog, &xdp, repeat, &duration); 221 ret = bpf_test_run(prog, &xdp, repeat, &retval, &duration);
222 if (ret)
223 goto out;
216 if (xdp.data != data + XDP_PACKET_HEADROOM + NET_IP_ALIGN || 224 if (xdp.data != data + XDP_PACKET_HEADROOM + NET_IP_ALIGN ||
217 xdp.data_end != xdp.data + size) 225 xdp.data_end != xdp.data + size)
218 size = xdp.data_end - xdp.data; 226 size = xdp.data_end - xdp.data;
219 ret = bpf_test_finish(kattr, uattr, xdp.data, size, retval, duration); 227 ret = bpf_test_finish(kattr, uattr, xdp.data, size, retval, duration);
228out:
220 kfree(data); 229 kfree(data);
221 return ret; 230 return ret;
222} 231}
diff --git a/net/core/filter.c b/net/core/filter.c
index 9a1327eb25fa..8d2c629501e2 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -4890,22 +4890,23 @@ bpf_sk_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len,
4890 struct net *net; 4890 struct net *net;
4891 4891
4892 family = len == sizeof(tuple->ipv4) ? AF_INET : AF_INET6; 4892 family = len == sizeof(tuple->ipv4) ? AF_INET : AF_INET6;
4893 if (unlikely(family == AF_UNSPEC || netns_id > U32_MAX || flags)) 4893 if (unlikely(family == AF_UNSPEC || flags ||
4894 !((s32)netns_id < 0 || netns_id <= S32_MAX)))
4894 goto out; 4895 goto out;
4895 4896
4896 if (skb->dev) 4897 if (skb->dev)
4897 caller_net = dev_net(skb->dev); 4898 caller_net = dev_net(skb->dev);
4898 else 4899 else
4899 caller_net = sock_net(skb->sk); 4900 caller_net = sock_net(skb->sk);
4900 if (netns_id) { 4901 if ((s32)netns_id < 0) {
4902 net = caller_net;
4903 sk = sk_lookup(net, tuple, skb, family, proto);
4904 } else {
4901 net = get_net_ns_by_id(caller_net, netns_id); 4905 net = get_net_ns_by_id(caller_net, netns_id);
4902 if (unlikely(!net)) 4906 if (unlikely(!net))
4903 goto out; 4907 goto out;
4904 sk = sk_lookup(net, tuple, skb, family, proto); 4908 sk = sk_lookup(net, tuple, skb, family, proto);
4905 put_net(net); 4909 put_net(net);
4906 } else {
4907 net = caller_net;
4908 sk = sk_lookup(net, tuple, skb, family, proto);
4909 } 4910 }
4910 4911
4911 if (sk) 4912 if (sk)
@@ -5435,8 +5436,8 @@ static bool bpf_skb_is_valid_access(int off, int size, enum bpf_access_type type
5435 if (size != size_default) 5436 if (size != size_default)
5436 return false; 5437 return false;
5437 break; 5438 break;
5438 case bpf_ctx_range(struct __sk_buff, flow_keys): 5439 case bpf_ctx_range_ptr(struct __sk_buff, flow_keys):
5439 if (size != sizeof(struct bpf_flow_keys *)) 5440 if (size != sizeof(__u64))
5440 return false; 5441 return false;
5441 break; 5442 break;
5442 default: 5443 default:
@@ -5464,7 +5465,7 @@ static bool sk_filter_is_valid_access(int off, int size,
5464 case bpf_ctx_range(struct __sk_buff, data): 5465 case bpf_ctx_range(struct __sk_buff, data):
5465 case bpf_ctx_range(struct __sk_buff, data_meta): 5466 case bpf_ctx_range(struct __sk_buff, data_meta):
5466 case bpf_ctx_range(struct __sk_buff, data_end): 5467 case bpf_ctx_range(struct __sk_buff, data_end):
5467 case bpf_ctx_range(struct __sk_buff, flow_keys): 5468 case bpf_ctx_range_ptr(struct __sk_buff, flow_keys):
5468 case bpf_ctx_range_till(struct __sk_buff, family, local_port): 5469 case bpf_ctx_range_till(struct __sk_buff, family, local_port):
5469 return false; 5470 return false;
5470 } 5471 }
@@ -5489,7 +5490,7 @@ static bool cg_skb_is_valid_access(int off, int size,
5489 switch (off) { 5490 switch (off) {
5490 case bpf_ctx_range(struct __sk_buff, tc_classid): 5491 case bpf_ctx_range(struct __sk_buff, tc_classid):
5491 case bpf_ctx_range(struct __sk_buff, data_meta): 5492 case bpf_ctx_range(struct __sk_buff, data_meta):
5492 case bpf_ctx_range(struct __sk_buff, flow_keys): 5493 case bpf_ctx_range_ptr(struct __sk_buff, flow_keys):
5493 return false; 5494 return false;
5494 case bpf_ctx_range(struct __sk_buff, data): 5495 case bpf_ctx_range(struct __sk_buff, data):
5495 case bpf_ctx_range(struct __sk_buff, data_end): 5496 case bpf_ctx_range(struct __sk_buff, data_end):
@@ -5530,7 +5531,7 @@ static bool lwt_is_valid_access(int off, int size,
5530 case bpf_ctx_range(struct __sk_buff, tc_classid): 5531 case bpf_ctx_range(struct __sk_buff, tc_classid):
5531 case bpf_ctx_range_till(struct __sk_buff, family, local_port): 5532 case bpf_ctx_range_till(struct __sk_buff, family, local_port):
5532 case bpf_ctx_range(struct __sk_buff, data_meta): 5533 case bpf_ctx_range(struct __sk_buff, data_meta):
5533 case bpf_ctx_range(struct __sk_buff, flow_keys): 5534 case bpf_ctx_range_ptr(struct __sk_buff, flow_keys):
5534 return false; 5535 return false;
5535 } 5536 }
5536 5537
@@ -5756,7 +5757,7 @@ static bool tc_cls_act_is_valid_access(int off, int size,
5756 case bpf_ctx_range(struct __sk_buff, data_end): 5757 case bpf_ctx_range(struct __sk_buff, data_end):
5757 info->reg_type = PTR_TO_PACKET_END; 5758 info->reg_type = PTR_TO_PACKET_END;
5758 break; 5759 break;
5759 case bpf_ctx_range(struct __sk_buff, flow_keys): 5760 case bpf_ctx_range_ptr(struct __sk_buff, flow_keys):
5760 case bpf_ctx_range_till(struct __sk_buff, family, local_port): 5761 case bpf_ctx_range_till(struct __sk_buff, family, local_port):
5761 return false; 5762 return false;
5762 } 5763 }
@@ -5958,7 +5959,7 @@ static bool sk_skb_is_valid_access(int off, int size,
5958 switch (off) { 5959 switch (off) {
5959 case bpf_ctx_range(struct __sk_buff, tc_classid): 5960 case bpf_ctx_range(struct __sk_buff, tc_classid):
5960 case bpf_ctx_range(struct __sk_buff, data_meta): 5961 case bpf_ctx_range(struct __sk_buff, data_meta):
5961 case bpf_ctx_range(struct __sk_buff, flow_keys): 5962 case bpf_ctx_range_ptr(struct __sk_buff, flow_keys):
5962 return false; 5963 return false;
5963 } 5964 }
5964 5965
@@ -6039,7 +6040,7 @@ static bool flow_dissector_is_valid_access(int off, int size,
6039 case bpf_ctx_range(struct __sk_buff, data_end): 6040 case bpf_ctx_range(struct __sk_buff, data_end):
6040 info->reg_type = PTR_TO_PACKET_END; 6041 info->reg_type = PTR_TO_PACKET_END;
6041 break; 6042 break;
6042 case bpf_ctx_range(struct __sk_buff, flow_keys): 6043 case bpf_ctx_range_ptr(struct __sk_buff, flow_keys):
6043 info->reg_type = PTR_TO_FLOW_KEYS; 6044 info->reg_type = PTR_TO_FLOW_KEYS;
6044 break; 6045 break;
6045 case bpf_ctx_range(struct __sk_buff, tc_classid): 6046 case bpf_ctx_range(struct __sk_buff, tc_classid):