summaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorRoman Gushchin <guro@fb.com>2018-12-01 13:39:44 -0500
committerAlexei Starovoitov <ast@kernel.org>2018-12-01 15:33:58 -0500
commitdcb40590e69e306030e944a39d0e4bf54247fb68 (patch)
tree0029074dcb0e8738360e4713f26158140419d34e /net
parentd74286d2c25ad29dbf9e342955dd8dc31f21653b (diff)
bpf: refactor bpf_test_run() to separate own failures and test program result
After commit f42ee093be29 ("bpf/test_run: support cgroup local storage") the bpf_test_run() function may fail with -ENOMEM, if it's not possible to allocate memory for a cgroup local storage. This error shouldn't be mixed with the return value of the testing program. Let's add an additional argument with a pointer where to store the testing program's result; and make bpf_test_run() return either 0 or -ENOMEM. Fixes: f42ee093be29 ("bpf/test_run: support cgroup local storage") Reported-by: Dan Carpenter <dan.carpenter@oracle.com> Suggested-by: Alexei Starovoitov <ast@kernel.org> Signed-off-by: Roman Gushchin <guro@fb.com> Cc: Daniel Borkmann <daniel@iogearbox.net> Cc: Alexei Starovoitov <ast@kernel.org> Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Diffstat (limited to 'net')
-rw-r--r--net/bpf/test_run.c21
1 files changed, 15 insertions, 6 deletions
diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c
index c89c22c49015..25001913d03b 100644
--- a/net/bpf/test_run.c
+++ b/net/bpf/test_run.c
@@ -28,12 +28,13 @@ static __always_inline u32 bpf_test_run_one(struct bpf_prog *prog, void *ctx,
28 return ret; 28 return ret;
29} 29}
30 30
31static u32 bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat, u32 *time) 31static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat, u32 *ret,
32 u32 *time)
32{ 33{
33 struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE] = { 0 }; 34 struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE] = { 0 };
34 enum bpf_cgroup_storage_type stype; 35 enum bpf_cgroup_storage_type stype;
35 u64 time_start, time_spent = 0; 36 u64 time_start, time_spent = 0;
36 u32 ret = 0, i; 37 u32 i;
37 38
38 for_each_cgroup_storage_type(stype) { 39 for_each_cgroup_storage_type(stype) {
39 storage[stype] = bpf_cgroup_storage_alloc(prog, stype); 40 storage[stype] = bpf_cgroup_storage_alloc(prog, stype);
@@ -49,7 +50,7 @@ static u32 bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat, u32 *time)
49 repeat = 1; 50 repeat = 1;
50 time_start = ktime_get_ns(); 51 time_start = ktime_get_ns();
51 for (i = 0; i < repeat; i++) { 52 for (i = 0; i < repeat; i++) {
52 ret = bpf_test_run_one(prog, ctx, storage); 53 *ret = bpf_test_run_one(prog, ctx, storage);
53 if (need_resched()) { 54 if (need_resched()) {
54 if (signal_pending(current)) 55 if (signal_pending(current))
55 break; 56 break;
@@ -65,7 +66,7 @@ static u32 bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat, u32 *time)
65 for_each_cgroup_storage_type(stype) 66 for_each_cgroup_storage_type(stype)
66 bpf_cgroup_storage_free(storage[stype]); 67 bpf_cgroup_storage_free(storage[stype]);
67 68
68 return ret; 69 return 0;
69} 70}
70 71
71static int bpf_test_finish(const union bpf_attr *kattr, 72static int bpf_test_finish(const union bpf_attr *kattr,
@@ -165,7 +166,12 @@ int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
165 __skb_push(skb, hh_len); 166 __skb_push(skb, hh_len);
166 if (is_direct_pkt_access) 167 if (is_direct_pkt_access)
167 bpf_compute_data_pointers(skb); 168 bpf_compute_data_pointers(skb);
168 retval = bpf_test_run(prog, skb, repeat, &duration); 169 ret = bpf_test_run(prog, skb, repeat, &retval, &duration);
170 if (ret) {
171 kfree_skb(skb);
172 kfree(sk);
173 return ret;
174 }
169 if (!is_l2) { 175 if (!is_l2) {
170 if (skb_headroom(skb) < hh_len) { 176 if (skb_headroom(skb) < hh_len) {
171 int nhead = HH_DATA_ALIGN(hh_len - skb_headroom(skb)); 177 int nhead = HH_DATA_ALIGN(hh_len - skb_headroom(skb));
@@ -212,11 +218,14 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
212 rxqueue = __netif_get_rx_queue(current->nsproxy->net_ns->loopback_dev, 0); 218 rxqueue = __netif_get_rx_queue(current->nsproxy->net_ns->loopback_dev, 0);
213 xdp.rxq = &rxqueue->xdp_rxq; 219 xdp.rxq = &rxqueue->xdp_rxq;
214 220
215 retval = bpf_test_run(prog, &xdp, repeat, &duration); 221 ret = bpf_test_run(prog, &xdp, repeat, &retval, &duration);
222 if (ret)
223 goto out;
216 if (xdp.data != data + XDP_PACKET_HEADROOM + NET_IP_ALIGN || 224 if (xdp.data != data + XDP_PACKET_HEADROOM + NET_IP_ALIGN ||
217 xdp.data_end != xdp.data + size) 225 xdp.data_end != xdp.data + size)
218 size = xdp.data_end - xdp.data; 226 size = xdp.data_end - xdp.data;
219 ret = bpf_test_finish(kattr, uattr, xdp.data, size, retval, duration); 227 ret = bpf_test_finish(kattr, uattr, xdp.data, size, retval, duration);
228out:
220 kfree(data); 229 kfree(data);
221 return ret; 230 return ret;
222} 231}