aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2018-08-10 02:18:29 -0400
committerDavid S. Miller <davem@davemloft.net>2018-08-10 02:18:29 -0400
commite91e21894684cfff30e3a1a04e3d99af687dbb30 (patch)
treee88240eedaadaf4292468284284a27345e36bdba
parent112cbae26d18e75098d95cc234cfa5059de8d479 (diff)
parent9c95420117393ed5f76de373e3c6479c21e3e380 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf
Daniel Borkmann says: ==================== pull-request: bpf 2018-08-10 The following pull-request contains BPF updates for your *net* tree. The main changes are: 1) Fix cpumap and devmap on teardown as they're under RCU context and won't have same assumption as running under NAPI protection, from Jesper. 2) Fix various sockmap bugs in bpf_tcp_sendmsg() code, e.g. we had a bug where socket error was not propagated correctly, from Daniel. 3) Fix incompatible libbpf header license for BTF code and match it before it gets officially released with the rest of libbpf which is LGPL-2.1, from Martin. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--kernel/bpf/cpumap.c15
-rw-r--r--kernel/bpf/devmap.c14
-rw-r--r--kernel/bpf/sockmap.c9
-rw-r--r--samples/bpf/xdp_redirect_cpu_kern.c2
-rw-r--r--samples/bpf/xdp_redirect_cpu_user.c4
-rw-r--r--tools/lib/bpf/btf.c2
-rw-r--r--tools/lib/bpf/btf.h2
-rw-r--r--tools/testing/selftests/bpf/test_sockmap.c2
8 files changed, 30 insertions, 20 deletions
diff --git a/kernel/bpf/cpumap.c b/kernel/bpf/cpumap.c
index e0918d180f08..46f5f29605d4 100644
--- a/kernel/bpf/cpumap.c
+++ b/kernel/bpf/cpumap.c
@@ -69,7 +69,7 @@ struct bpf_cpu_map {
69}; 69};
70 70
71static int bq_flush_to_queue(struct bpf_cpu_map_entry *rcpu, 71static int bq_flush_to_queue(struct bpf_cpu_map_entry *rcpu,
72 struct xdp_bulk_queue *bq); 72 struct xdp_bulk_queue *bq, bool in_napi_ctx);
73 73
74static u64 cpu_map_bitmap_size(const union bpf_attr *attr) 74static u64 cpu_map_bitmap_size(const union bpf_attr *attr)
75{ 75{
@@ -375,7 +375,7 @@ static void __cpu_map_entry_free(struct rcu_head *rcu)
375 struct xdp_bulk_queue *bq = per_cpu_ptr(rcpu->bulkq, cpu); 375 struct xdp_bulk_queue *bq = per_cpu_ptr(rcpu->bulkq, cpu);
376 376
377 /* No concurrent bq_enqueue can run at this point */ 377 /* No concurrent bq_enqueue can run at this point */
378 bq_flush_to_queue(rcpu, bq); 378 bq_flush_to_queue(rcpu, bq, false);
379 } 379 }
380 free_percpu(rcpu->bulkq); 380 free_percpu(rcpu->bulkq);
381 /* Cannot kthread_stop() here, last put free rcpu resources */ 381 /* Cannot kthread_stop() here, last put free rcpu resources */
@@ -558,7 +558,7 @@ const struct bpf_map_ops cpu_map_ops = {
558}; 558};
559 559
560static int bq_flush_to_queue(struct bpf_cpu_map_entry *rcpu, 560static int bq_flush_to_queue(struct bpf_cpu_map_entry *rcpu,
561 struct xdp_bulk_queue *bq) 561 struct xdp_bulk_queue *bq, bool in_napi_ctx)
562{ 562{
563 unsigned int processed = 0, drops = 0; 563 unsigned int processed = 0, drops = 0;
564 const int to_cpu = rcpu->cpu; 564 const int to_cpu = rcpu->cpu;
@@ -578,7 +578,10 @@ static int bq_flush_to_queue(struct bpf_cpu_map_entry *rcpu,
578 err = __ptr_ring_produce(q, xdpf); 578 err = __ptr_ring_produce(q, xdpf);
579 if (err) { 579 if (err) {
580 drops++; 580 drops++;
581 xdp_return_frame_rx_napi(xdpf); 581 if (likely(in_napi_ctx))
582 xdp_return_frame_rx_napi(xdpf);
583 else
584 xdp_return_frame(xdpf);
582 } 585 }
583 processed++; 586 processed++;
584 } 587 }
@@ -598,7 +601,7 @@ static int bq_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_frame *xdpf)
598 struct xdp_bulk_queue *bq = this_cpu_ptr(rcpu->bulkq); 601 struct xdp_bulk_queue *bq = this_cpu_ptr(rcpu->bulkq);
599 602
600 if (unlikely(bq->count == CPU_MAP_BULK_SIZE)) 603 if (unlikely(bq->count == CPU_MAP_BULK_SIZE))
601 bq_flush_to_queue(rcpu, bq); 604 bq_flush_to_queue(rcpu, bq, true);
602 605
603 /* Notice, xdp_buff/page MUST be queued here, long enough for 606 /* Notice, xdp_buff/page MUST be queued here, long enough for
604 * driver to code invoking us to finished, due to driver 607 * driver to code invoking us to finished, due to driver
@@ -661,7 +664,7 @@ void __cpu_map_flush(struct bpf_map *map)
661 664
662 /* Flush all frames in bulkq to real queue */ 665 /* Flush all frames in bulkq to real queue */
663 bq = this_cpu_ptr(rcpu->bulkq); 666 bq = this_cpu_ptr(rcpu->bulkq);
664 bq_flush_to_queue(rcpu, bq); 667 bq_flush_to_queue(rcpu, bq, true);
665 668
666 /* If already running, costs spin_lock_irqsave + smb_mb */ 669 /* If already running, costs spin_lock_irqsave + smb_mb */
667 wake_up_process(rcpu->kthread); 670 wake_up_process(rcpu->kthread);
diff --git a/kernel/bpf/devmap.c b/kernel/bpf/devmap.c
index d361fc1e3bf3..750d45edae79 100644
--- a/kernel/bpf/devmap.c
+++ b/kernel/bpf/devmap.c
@@ -217,7 +217,8 @@ void __dev_map_insert_ctx(struct bpf_map *map, u32 bit)
217} 217}
218 218
219static int bq_xmit_all(struct bpf_dtab_netdev *obj, 219static int bq_xmit_all(struct bpf_dtab_netdev *obj,
220 struct xdp_bulk_queue *bq, u32 flags) 220 struct xdp_bulk_queue *bq, u32 flags,
221 bool in_napi_ctx)
221{ 222{
222 struct net_device *dev = obj->dev; 223 struct net_device *dev = obj->dev;
223 int sent = 0, drops = 0, err = 0; 224 int sent = 0, drops = 0, err = 0;
@@ -254,7 +255,10 @@ error:
254 struct xdp_frame *xdpf = bq->q[i]; 255 struct xdp_frame *xdpf = bq->q[i];
255 256
256 /* RX path under NAPI protection, can return frames faster */ 257 /* RX path under NAPI protection, can return frames faster */
257 xdp_return_frame_rx_napi(xdpf); 258 if (likely(in_napi_ctx))
259 xdp_return_frame_rx_napi(xdpf);
260 else
261 xdp_return_frame(xdpf);
258 drops++; 262 drops++;
259 } 263 }
260 goto out; 264 goto out;
@@ -286,7 +290,7 @@ void __dev_map_flush(struct bpf_map *map)
286 __clear_bit(bit, bitmap); 290 __clear_bit(bit, bitmap);
287 291
288 bq = this_cpu_ptr(dev->bulkq); 292 bq = this_cpu_ptr(dev->bulkq);
289 bq_xmit_all(dev, bq, XDP_XMIT_FLUSH); 293 bq_xmit_all(dev, bq, XDP_XMIT_FLUSH, true);
290 } 294 }
291} 295}
292 296
@@ -316,7 +320,7 @@ static int bq_enqueue(struct bpf_dtab_netdev *obj, struct xdp_frame *xdpf,
316 struct xdp_bulk_queue *bq = this_cpu_ptr(obj->bulkq); 320 struct xdp_bulk_queue *bq = this_cpu_ptr(obj->bulkq);
317 321
318 if (unlikely(bq->count == DEV_MAP_BULK_SIZE)) 322 if (unlikely(bq->count == DEV_MAP_BULK_SIZE))
319 bq_xmit_all(obj, bq, 0); 323 bq_xmit_all(obj, bq, 0, true);
320 324
321 /* Ingress dev_rx will be the same for all xdp_frame's in 325 /* Ingress dev_rx will be the same for all xdp_frame's in
322 * bulk_queue, because bq stored per-CPU and must be flushed 326 * bulk_queue, because bq stored per-CPU and must be flushed
@@ -385,7 +389,7 @@ static void dev_map_flush_old(struct bpf_dtab_netdev *dev)
385 __clear_bit(dev->bit, bitmap); 389 __clear_bit(dev->bit, bitmap);
386 390
387 bq = per_cpu_ptr(dev->bulkq, cpu); 391 bq = per_cpu_ptr(dev->bulkq, cpu);
388 bq_xmit_all(dev, bq, XDP_XMIT_FLUSH); 392 bq_xmit_all(dev, bq, XDP_XMIT_FLUSH, false);
389 } 393 }
390 } 394 }
391} 395}
diff --git a/kernel/bpf/sockmap.c b/kernel/bpf/sockmap.c
index 98fb7938beea..c4d75c52b4fc 100644
--- a/kernel/bpf/sockmap.c
+++ b/kernel/bpf/sockmap.c
@@ -1048,12 +1048,12 @@ static int bpf_tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
1048 timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT); 1048 timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
1049 1049
1050 while (msg_data_left(msg)) { 1050 while (msg_data_left(msg)) {
1051 struct sk_msg_buff *m; 1051 struct sk_msg_buff *m = NULL;
1052 bool enospc = false; 1052 bool enospc = false;
1053 int copy; 1053 int copy;
1054 1054
1055 if (sk->sk_err) { 1055 if (sk->sk_err) {
1056 err = sk->sk_err; 1056 err = -sk->sk_err;
1057 goto out_err; 1057 goto out_err;
1058 } 1058 }
1059 1059
@@ -1116,8 +1116,11 @@ wait_for_sndbuf:
1116 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 1116 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1117wait_for_memory: 1117wait_for_memory:
1118 err = sk_stream_wait_memory(sk, &timeo); 1118 err = sk_stream_wait_memory(sk, &timeo);
1119 if (err) 1119 if (err) {
1120 if (m && m != psock->cork)
1121 free_start_sg(sk, m);
1120 goto out_err; 1122 goto out_err;
1123 }
1121 } 1124 }
1122out_err: 1125out_err:
1123 if (err < 0) 1126 if (err < 0)
diff --git a/samples/bpf/xdp_redirect_cpu_kern.c b/samples/bpf/xdp_redirect_cpu_kern.c
index 303e9e7161f3..4938dcbaecbf 100644
--- a/samples/bpf/xdp_redirect_cpu_kern.c
+++ b/samples/bpf/xdp_redirect_cpu_kern.c
@@ -14,7 +14,7 @@
14#include <uapi/linux/bpf.h> 14#include <uapi/linux/bpf.h>
15#include "bpf_helpers.h" 15#include "bpf_helpers.h"
16 16
17#define MAX_CPUS 12 /* WARNING - sync with _user.c */ 17#define MAX_CPUS 64 /* WARNING - sync with _user.c */
18 18
19/* Special map type that can XDP_REDIRECT frames to another CPU */ 19/* Special map type that can XDP_REDIRECT frames to another CPU */
20struct bpf_map_def SEC("maps") cpu_map = { 20struct bpf_map_def SEC("maps") cpu_map = {
diff --git a/samples/bpf/xdp_redirect_cpu_user.c b/samples/bpf/xdp_redirect_cpu_user.c
index f6efaefd485b..4b4d78fffe30 100644
--- a/samples/bpf/xdp_redirect_cpu_user.c
+++ b/samples/bpf/xdp_redirect_cpu_user.c
@@ -19,7 +19,7 @@ static const char *__doc__ =
19#include <arpa/inet.h> 19#include <arpa/inet.h>
20#include <linux/if_link.h> 20#include <linux/if_link.h>
21 21
22#define MAX_CPUS 12 /* WARNING - sync with _kern.c */ 22#define MAX_CPUS 64 /* WARNING - sync with _kern.c */
23 23
24/* How many xdp_progs are defined in _kern.c */ 24/* How many xdp_progs are defined in _kern.c */
25#define MAX_PROG 5 25#define MAX_PROG 5
@@ -527,7 +527,7 @@ static void stress_cpumap(void)
527 * procedure. 527 * procedure.
528 */ 528 */
529 create_cpu_entry(1, 1024, 0, false); 529 create_cpu_entry(1, 1024, 0, false);
530 create_cpu_entry(1, 128, 0, false); 530 create_cpu_entry(1, 8, 0, false);
531 create_cpu_entry(1, 16000, 0, false); 531 create_cpu_entry(1, 16000, 0, false);
532} 532}
533 533
diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c
index 2d270c560df3..c36a3a76986a 100644
--- a/tools/lib/bpf/btf.c
+++ b/tools/lib/bpf/btf.c
@@ -1,4 +1,4 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1// SPDX-License-Identifier: LGPL-2.1
2/* Copyright (c) 2018 Facebook */ 2/* Copyright (c) 2018 Facebook */
3 3
4#include <stdlib.h> 4#include <stdlib.h>
diff --git a/tools/lib/bpf/btf.h b/tools/lib/bpf/btf.h
index e2a09a155f84..caac3a404dc5 100644
--- a/tools/lib/bpf/btf.h
+++ b/tools/lib/bpf/btf.h
@@ -1,4 +1,4 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: LGPL-2.1 */
2/* Copyright (c) 2018 Facebook */ 2/* Copyright (c) 2018 Facebook */
3 3
4#ifndef __BPF_BTF_H 4#ifndef __BPF_BTF_H
diff --git a/tools/testing/selftests/bpf/test_sockmap.c b/tools/testing/selftests/bpf/test_sockmap.c
index 9e78df207919..0c7d9e556b47 100644
--- a/tools/testing/selftests/bpf/test_sockmap.c
+++ b/tools/testing/selftests/bpf/test_sockmap.c
@@ -354,7 +354,7 @@ static int msg_loop(int fd, int iov_count, int iov_length, int cnt,
354 while (s->bytes_recvd < total_bytes) { 354 while (s->bytes_recvd < total_bytes) {
355 if (txmsg_cork) { 355 if (txmsg_cork) {
356 timeout.tv_sec = 0; 356 timeout.tv_sec = 0;
357 timeout.tv_usec = 1000; 357 timeout.tv_usec = 300000;
358 } else { 358 } else {
359 timeout.tv_sec = 1; 359 timeout.tv_sec = 1;
360 timeout.tv_usec = 0; 360 timeout.tv_usec = 0;