aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2018-10-14 16:01:20 -0400
committerDavid S. Miller <davem@davemloft.net>2018-10-14 16:01:20 -0400
commit028c99fa912b4ad2174793da97694a9c4b2cd305 (patch)
tree43d8bcccc596a098f490c6e244fd0350750d7886
parentbab5c80b211035739997ebd361a679fa85b39465 (diff)
parentcee271678d0e3177a25d0fcb2fa5e051d48e4262 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf
Daniel Borkmann says: ==================== pull-request: bpf 2018-10-14 The following pull-request contains BPF updates for your *net* tree. The main changes are: 1) Fix xsk map update and delete operation to not call synchronize_net() but to piggy back on SOCK_RCU_FREE for sockets instead as we are not allowed to sleep under RCU, from Björn. 2) Do not change RLIMIT_MEMLOCK in reuseport_bpf selftest if the process already has unlimited RLIMIT_MEMLOCK, from Eric. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--kernel/bpf/xskmap.c10
-rw-r--r--net/xdp/xsk.c2
-rw-r--r--tools/testing/selftests/net/reuseport_bpf.c13
3 files changed, 13 insertions, 12 deletions
diff --git a/kernel/bpf/xskmap.c b/kernel/bpf/xskmap.c
index 9f8463afda9c..47147c9e184d 100644
--- a/kernel/bpf/xskmap.c
+++ b/kernel/bpf/xskmap.c
@@ -192,11 +192,8 @@ static int xsk_map_update_elem(struct bpf_map *map, void *key, void *value,
192 sock_hold(sock->sk); 192 sock_hold(sock->sk);
193 193
194 old_xs = xchg(&m->xsk_map[i], xs); 194 old_xs = xchg(&m->xsk_map[i], xs);
195 if (old_xs) { 195 if (old_xs)
196 /* Make sure we've flushed everything. */
197 synchronize_net();
198 sock_put((struct sock *)old_xs); 196 sock_put((struct sock *)old_xs);
199 }
200 197
201 sockfd_put(sock); 198 sockfd_put(sock);
202 return 0; 199 return 0;
@@ -212,11 +209,8 @@ static int xsk_map_delete_elem(struct bpf_map *map, void *key)
212 return -EINVAL; 209 return -EINVAL;
213 210
214 old_xs = xchg(&m->xsk_map[k], NULL); 211 old_xs = xchg(&m->xsk_map[k], NULL);
215 if (old_xs) { 212 if (old_xs)
216 /* Make sure we've flushed everything. */
217 synchronize_net();
218 sock_put((struct sock *)old_xs); 213 sock_put((struct sock *)old_xs);
219 }
220 214
221 return 0; 215 return 0;
222} 216}
diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
index 4e937cd7c17d..661504042d30 100644
--- a/net/xdp/xsk.c
+++ b/net/xdp/xsk.c
@@ -744,6 +744,8 @@ static int xsk_create(struct net *net, struct socket *sock, int protocol,
744 sk->sk_destruct = xsk_destruct; 744 sk->sk_destruct = xsk_destruct;
745 sk_refcnt_debug_inc(sk); 745 sk_refcnt_debug_inc(sk);
746 746
747 sock_set_flag(sk, SOCK_RCU_FREE);
748
747 xs = xdp_sk(sk); 749 xs = xdp_sk(sk);
748 mutex_init(&xs->mutex); 750 mutex_init(&xs->mutex);
749 spin_lock_init(&xs->tx_completion_lock); 751 spin_lock_init(&xs->tx_completion_lock);
diff --git a/tools/testing/selftests/net/reuseport_bpf.c b/tools/testing/selftests/net/reuseport_bpf.c
index cad14cd0ea92..b5277106df1f 100644
--- a/tools/testing/selftests/net/reuseport_bpf.c
+++ b/tools/testing/selftests/net/reuseport_bpf.c
@@ -437,14 +437,19 @@ void enable_fastopen(void)
437 } 437 }
438} 438}
439 439
440static struct rlimit rlim_old, rlim_new; 440static struct rlimit rlim_old;
441 441
442static __attribute__((constructor)) void main_ctor(void) 442static __attribute__((constructor)) void main_ctor(void)
443{ 443{
444 getrlimit(RLIMIT_MEMLOCK, &rlim_old); 444 getrlimit(RLIMIT_MEMLOCK, &rlim_old);
445 rlim_new.rlim_cur = rlim_old.rlim_cur + (1UL << 20); 445
446 rlim_new.rlim_max = rlim_old.rlim_max + (1UL << 20); 446 if (rlim_old.rlim_cur != RLIM_INFINITY) {
447 setrlimit(RLIMIT_MEMLOCK, &rlim_new); 447 struct rlimit rlim_new;
448
449 rlim_new.rlim_cur = rlim_old.rlim_cur + (1UL << 20);
450 rlim_new.rlim_max = rlim_old.rlim_max + (1UL << 20);
451 setrlimit(RLIMIT_MEMLOCK, &rlim_new);
452 }
448} 453}
449 454
450static __attribute__((destructor)) void main_dtor(void) 455static __attribute__((destructor)) void main_dtor(void)