diff options
| -rw-r--r-- | kernel/bpf/cpumap.c | 15 | ||||
| -rw-r--r-- | kernel/bpf/devmap.c | 14 | ||||
| -rw-r--r-- | kernel/bpf/sockmap.c | 9 | ||||
| -rw-r--r-- | samples/bpf/xdp_redirect_cpu_kern.c | 2 | ||||
| -rw-r--r-- | samples/bpf/xdp_redirect_cpu_user.c | 4 | ||||
| -rw-r--r-- | tools/lib/bpf/btf.c | 2 | ||||
| -rw-r--r-- | tools/lib/bpf/btf.h | 2 | ||||
| -rw-r--r-- | tools/testing/selftests/bpf/test_sockmap.c | 2 |
8 files changed, 30 insertions, 20 deletions
diff --git a/kernel/bpf/cpumap.c b/kernel/bpf/cpumap.c index e0918d180f08..46f5f29605d4 100644 --- a/kernel/bpf/cpumap.c +++ b/kernel/bpf/cpumap.c | |||
| @@ -69,7 +69,7 @@ struct bpf_cpu_map { | |||
| 69 | }; | 69 | }; |
| 70 | 70 | ||
| 71 | static int bq_flush_to_queue(struct bpf_cpu_map_entry *rcpu, | 71 | static int bq_flush_to_queue(struct bpf_cpu_map_entry *rcpu, |
| 72 | struct xdp_bulk_queue *bq); | 72 | struct xdp_bulk_queue *bq, bool in_napi_ctx); |
| 73 | 73 | ||
| 74 | static u64 cpu_map_bitmap_size(const union bpf_attr *attr) | 74 | static u64 cpu_map_bitmap_size(const union bpf_attr *attr) |
| 75 | { | 75 | { |
| @@ -375,7 +375,7 @@ static void __cpu_map_entry_free(struct rcu_head *rcu) | |||
| 375 | struct xdp_bulk_queue *bq = per_cpu_ptr(rcpu->bulkq, cpu); | 375 | struct xdp_bulk_queue *bq = per_cpu_ptr(rcpu->bulkq, cpu); |
| 376 | 376 | ||
| 377 | /* No concurrent bq_enqueue can run at this point */ | 377 | /* No concurrent bq_enqueue can run at this point */ |
| 378 | bq_flush_to_queue(rcpu, bq); | 378 | bq_flush_to_queue(rcpu, bq, false); |
| 379 | } | 379 | } |
| 380 | free_percpu(rcpu->bulkq); | 380 | free_percpu(rcpu->bulkq); |
| 381 | /* Cannot kthread_stop() here, last put free rcpu resources */ | 381 | /* Cannot kthread_stop() here, last put free rcpu resources */ |
| @@ -558,7 +558,7 @@ const struct bpf_map_ops cpu_map_ops = { | |||
| 558 | }; | 558 | }; |
| 559 | 559 | ||
| 560 | static int bq_flush_to_queue(struct bpf_cpu_map_entry *rcpu, | 560 | static int bq_flush_to_queue(struct bpf_cpu_map_entry *rcpu, |
| 561 | struct xdp_bulk_queue *bq) | 561 | struct xdp_bulk_queue *bq, bool in_napi_ctx) |
| 562 | { | 562 | { |
| 563 | unsigned int processed = 0, drops = 0; | 563 | unsigned int processed = 0, drops = 0; |
| 564 | const int to_cpu = rcpu->cpu; | 564 | const int to_cpu = rcpu->cpu; |
| @@ -578,7 +578,10 @@ static int bq_flush_to_queue(struct bpf_cpu_map_entry *rcpu, | |||
| 578 | err = __ptr_ring_produce(q, xdpf); | 578 | err = __ptr_ring_produce(q, xdpf); |
| 579 | if (err) { | 579 | if (err) { |
| 580 | drops++; | 580 | drops++; |
| 581 | xdp_return_frame_rx_napi(xdpf); | 581 | if (likely(in_napi_ctx)) |
| 582 | xdp_return_frame_rx_napi(xdpf); | ||
| 583 | else | ||
| 584 | xdp_return_frame(xdpf); | ||
| 582 | } | 585 | } |
| 583 | processed++; | 586 | processed++; |
| 584 | } | 587 | } |
| @@ -598,7 +601,7 @@ static int bq_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_frame *xdpf) | |||
| 598 | struct xdp_bulk_queue *bq = this_cpu_ptr(rcpu->bulkq); | 601 | struct xdp_bulk_queue *bq = this_cpu_ptr(rcpu->bulkq); |
| 599 | 602 | ||
| 600 | if (unlikely(bq->count == CPU_MAP_BULK_SIZE)) | 603 | if (unlikely(bq->count == CPU_MAP_BULK_SIZE)) |
| 601 | bq_flush_to_queue(rcpu, bq); | 604 | bq_flush_to_queue(rcpu, bq, true); |
| 602 | 605 | ||
| 603 | /* Notice, xdp_buff/page MUST be queued here, long enough for | 606 | /* Notice, xdp_buff/page MUST be queued here, long enough for |
| 604 | * driver to code invoking us to finished, due to driver | 607 | * driver to code invoking us to finished, due to driver |
| @@ -661,7 +664,7 @@ void __cpu_map_flush(struct bpf_map *map) | |||
| 661 | 664 | ||
| 662 | /* Flush all frames in bulkq to real queue */ | 665 | /* Flush all frames in bulkq to real queue */ |
| 663 | bq = this_cpu_ptr(rcpu->bulkq); | 666 | bq = this_cpu_ptr(rcpu->bulkq); |
| 664 | bq_flush_to_queue(rcpu, bq); | 667 | bq_flush_to_queue(rcpu, bq, true); |
| 665 | 668 | ||
| 666 | /* If already running, costs spin_lock_irqsave + smb_mb */ | 669 | /* If already running, costs spin_lock_irqsave + smb_mb */ |
| 667 | wake_up_process(rcpu->kthread); | 670 | wake_up_process(rcpu->kthread); |
diff --git a/kernel/bpf/devmap.c b/kernel/bpf/devmap.c index d361fc1e3bf3..750d45edae79 100644 --- a/kernel/bpf/devmap.c +++ b/kernel/bpf/devmap.c | |||
| @@ -217,7 +217,8 @@ void __dev_map_insert_ctx(struct bpf_map *map, u32 bit) | |||
| 217 | } | 217 | } |
| 218 | 218 | ||
| 219 | static int bq_xmit_all(struct bpf_dtab_netdev *obj, | 219 | static int bq_xmit_all(struct bpf_dtab_netdev *obj, |
| 220 | struct xdp_bulk_queue *bq, u32 flags) | 220 | struct xdp_bulk_queue *bq, u32 flags, |
| 221 | bool in_napi_ctx) | ||
| 221 | { | 222 | { |
| 222 | struct net_device *dev = obj->dev; | 223 | struct net_device *dev = obj->dev; |
| 223 | int sent = 0, drops = 0, err = 0; | 224 | int sent = 0, drops = 0, err = 0; |
| @@ -254,7 +255,10 @@ error: | |||
| 254 | struct xdp_frame *xdpf = bq->q[i]; | 255 | struct xdp_frame *xdpf = bq->q[i]; |
| 255 | 256 | ||
| 256 | /* RX path under NAPI protection, can return frames faster */ | 257 | /* RX path under NAPI protection, can return frames faster */ |
| 257 | xdp_return_frame_rx_napi(xdpf); | 258 | if (likely(in_napi_ctx)) |
| 259 | xdp_return_frame_rx_napi(xdpf); | ||
| 260 | else | ||
| 261 | xdp_return_frame(xdpf); | ||
| 258 | drops++; | 262 | drops++; |
| 259 | } | 263 | } |
| 260 | goto out; | 264 | goto out; |
| @@ -286,7 +290,7 @@ void __dev_map_flush(struct bpf_map *map) | |||
| 286 | __clear_bit(bit, bitmap); | 290 | __clear_bit(bit, bitmap); |
| 287 | 291 | ||
| 288 | bq = this_cpu_ptr(dev->bulkq); | 292 | bq = this_cpu_ptr(dev->bulkq); |
| 289 | bq_xmit_all(dev, bq, XDP_XMIT_FLUSH); | 293 | bq_xmit_all(dev, bq, XDP_XMIT_FLUSH, true); |
| 290 | } | 294 | } |
| 291 | } | 295 | } |
| 292 | 296 | ||
| @@ -316,7 +320,7 @@ static int bq_enqueue(struct bpf_dtab_netdev *obj, struct xdp_frame *xdpf, | |||
| 316 | struct xdp_bulk_queue *bq = this_cpu_ptr(obj->bulkq); | 320 | struct xdp_bulk_queue *bq = this_cpu_ptr(obj->bulkq); |
| 317 | 321 | ||
| 318 | if (unlikely(bq->count == DEV_MAP_BULK_SIZE)) | 322 | if (unlikely(bq->count == DEV_MAP_BULK_SIZE)) |
| 319 | bq_xmit_all(obj, bq, 0); | 323 | bq_xmit_all(obj, bq, 0, true); |
| 320 | 324 | ||
| 321 | /* Ingress dev_rx will be the same for all xdp_frame's in | 325 | /* Ingress dev_rx will be the same for all xdp_frame's in |
| 322 | * bulk_queue, because bq stored per-CPU and must be flushed | 326 | * bulk_queue, because bq stored per-CPU and must be flushed |
| @@ -385,7 +389,7 @@ static void dev_map_flush_old(struct bpf_dtab_netdev *dev) | |||
| 385 | __clear_bit(dev->bit, bitmap); | 389 | __clear_bit(dev->bit, bitmap); |
| 386 | 390 | ||
| 387 | bq = per_cpu_ptr(dev->bulkq, cpu); | 391 | bq = per_cpu_ptr(dev->bulkq, cpu); |
| 388 | bq_xmit_all(dev, bq, XDP_XMIT_FLUSH); | 392 | bq_xmit_all(dev, bq, XDP_XMIT_FLUSH, false); |
| 389 | } | 393 | } |
| 390 | } | 394 | } |
| 391 | } | 395 | } |
diff --git a/kernel/bpf/sockmap.c b/kernel/bpf/sockmap.c index 98fb7938beea..c4d75c52b4fc 100644 --- a/kernel/bpf/sockmap.c +++ b/kernel/bpf/sockmap.c | |||
| @@ -1048,12 +1048,12 @@ static int bpf_tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) | |||
| 1048 | timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT); | 1048 | timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT); |
| 1049 | 1049 | ||
| 1050 | while (msg_data_left(msg)) { | 1050 | while (msg_data_left(msg)) { |
| 1051 | struct sk_msg_buff *m; | 1051 | struct sk_msg_buff *m = NULL; |
| 1052 | bool enospc = false; | 1052 | bool enospc = false; |
| 1053 | int copy; | 1053 | int copy; |
| 1054 | 1054 | ||
| 1055 | if (sk->sk_err) { | 1055 | if (sk->sk_err) { |
| 1056 | err = sk->sk_err; | 1056 | err = -sk->sk_err; |
| 1057 | goto out_err; | 1057 | goto out_err; |
| 1058 | } | 1058 | } |
| 1059 | 1059 | ||
| @@ -1116,8 +1116,11 @@ wait_for_sndbuf: | |||
| 1116 | set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); | 1116 | set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); |
| 1117 | wait_for_memory: | 1117 | wait_for_memory: |
| 1118 | err = sk_stream_wait_memory(sk, &timeo); | 1118 | err = sk_stream_wait_memory(sk, &timeo); |
| 1119 | if (err) | 1119 | if (err) { |
| 1120 | if (m && m != psock->cork) | ||
| 1121 | free_start_sg(sk, m); | ||
| 1120 | goto out_err; | 1122 | goto out_err; |
| 1123 | } | ||
| 1121 | } | 1124 | } |
| 1122 | out_err: | 1125 | out_err: |
| 1123 | if (err < 0) | 1126 | if (err < 0) |
diff --git a/samples/bpf/xdp_redirect_cpu_kern.c b/samples/bpf/xdp_redirect_cpu_kern.c index 303e9e7161f3..4938dcbaecbf 100644 --- a/samples/bpf/xdp_redirect_cpu_kern.c +++ b/samples/bpf/xdp_redirect_cpu_kern.c | |||
| @@ -14,7 +14,7 @@ | |||
| 14 | #include <uapi/linux/bpf.h> | 14 | #include <uapi/linux/bpf.h> |
| 15 | #include "bpf_helpers.h" | 15 | #include "bpf_helpers.h" |
| 16 | 16 | ||
| 17 | #define MAX_CPUS 12 /* WARNING - sync with _user.c */ | 17 | #define MAX_CPUS 64 /* WARNING - sync with _user.c */ |
| 18 | 18 | ||
| 19 | /* Special map type that can XDP_REDIRECT frames to another CPU */ | 19 | /* Special map type that can XDP_REDIRECT frames to another CPU */ |
| 20 | struct bpf_map_def SEC("maps") cpu_map = { | 20 | struct bpf_map_def SEC("maps") cpu_map = { |
diff --git a/samples/bpf/xdp_redirect_cpu_user.c b/samples/bpf/xdp_redirect_cpu_user.c index f6efaefd485b..4b4d78fffe30 100644 --- a/samples/bpf/xdp_redirect_cpu_user.c +++ b/samples/bpf/xdp_redirect_cpu_user.c | |||
| @@ -19,7 +19,7 @@ static const char *__doc__ = | |||
| 19 | #include <arpa/inet.h> | 19 | #include <arpa/inet.h> |
| 20 | #include <linux/if_link.h> | 20 | #include <linux/if_link.h> |
| 21 | 21 | ||
| 22 | #define MAX_CPUS 12 /* WARNING - sync with _kern.c */ | 22 | #define MAX_CPUS 64 /* WARNING - sync with _kern.c */ |
| 23 | 23 | ||
| 24 | /* How many xdp_progs are defined in _kern.c */ | 24 | /* How many xdp_progs are defined in _kern.c */ |
| 25 | #define MAX_PROG 5 | 25 | #define MAX_PROG 5 |
| @@ -527,7 +527,7 @@ static void stress_cpumap(void) | |||
| 527 | * procedure. | 527 | * procedure. |
| 528 | */ | 528 | */ |
| 529 | create_cpu_entry(1, 1024, 0, false); | 529 | create_cpu_entry(1, 1024, 0, false); |
| 530 | create_cpu_entry(1, 128, 0, false); | 530 | create_cpu_entry(1, 8, 0, false); |
| 531 | create_cpu_entry(1, 16000, 0, false); | 531 | create_cpu_entry(1, 16000, 0, false); |
| 532 | } | 532 | } |
| 533 | 533 | ||
diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c index 2d270c560df3..c36a3a76986a 100644 --- a/tools/lib/bpf/btf.c +++ b/tools/lib/bpf/btf.c | |||
| @@ -1,4 +1,4 @@ | |||
| 1 | /* SPDX-License-Identifier: GPL-2.0 */ | 1 | // SPDX-License-Identifier: LGPL-2.1 |
| 2 | /* Copyright (c) 2018 Facebook */ | 2 | /* Copyright (c) 2018 Facebook */ |
| 3 | 3 | ||
| 4 | #include <stdlib.h> | 4 | #include <stdlib.h> |
diff --git a/tools/lib/bpf/btf.h b/tools/lib/bpf/btf.h index e2a09a155f84..caac3a404dc5 100644 --- a/tools/lib/bpf/btf.h +++ b/tools/lib/bpf/btf.h | |||
| @@ -1,4 +1,4 @@ | |||
| 1 | /* SPDX-License-Identifier: GPL-2.0 */ | 1 | /* SPDX-License-Identifier: LGPL-2.1 */ |
| 2 | /* Copyright (c) 2018 Facebook */ | 2 | /* Copyright (c) 2018 Facebook */ |
| 3 | 3 | ||
| 4 | #ifndef __BPF_BTF_H | 4 | #ifndef __BPF_BTF_H |
diff --git a/tools/testing/selftests/bpf/test_sockmap.c b/tools/testing/selftests/bpf/test_sockmap.c index 9e78df207919..0c7d9e556b47 100644 --- a/tools/testing/selftests/bpf/test_sockmap.c +++ b/tools/testing/selftests/bpf/test_sockmap.c | |||
| @@ -354,7 +354,7 @@ static int msg_loop(int fd, int iov_count, int iov_length, int cnt, | |||
| 354 | while (s->bytes_recvd < total_bytes) { | 354 | while (s->bytes_recvd < total_bytes) { |
| 355 | if (txmsg_cork) { | 355 | if (txmsg_cork) { |
| 356 | timeout.tv_sec = 0; | 356 | timeout.tv_sec = 0; |
| 357 | timeout.tv_usec = 1000; | 357 | timeout.tv_usec = 300000; |
| 358 | } else { | 358 | } else { |
| 359 | timeout.tv_sec = 1; | 359 | timeout.tv_sec = 1; |
| 360 | timeout.tv_usec = 0; | 360 | timeout.tv_usec = 0; |
