diff options
author | David S. Miller <davem@davemloft.net> | 2017-12-13 17:30:04 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2017-12-13 17:30:04 -0500 |
commit | 8c8f67a46f2bf33556ad12a1971734047b60831a (patch) | |
tree | 8953ffb18b57e8b9806ae14e03771b1df2da6be5 | |
parent | f6e168b4a147e169c1df9a9fc2158b974e3195b4 (diff) | |
parent | 9147efcbe0b7cc96b18eb64b1a3f0d4bba81443c (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf
Daniel Borkmann says:
====================
pull-request: bpf 2017-12-13
The following pull-request contains BPF updates for your *net* tree.
The main changes are:
1) Addition of explicit scheduling points to map alloc/free
in order to avoid having to hold the CPU for too long,
from Eric.
2) Fixing of a corruption in overlapping perf_event_output
calls from different BPF prog types on the same CPU out
of different contexts, from Daniel.
3) Fallout fixes for recent correction of broken uapi for
BPF_PROG_TYPE_PERF_EVENT. um had a missing asm header
that needed to be pulled in from asm-generic and for
BPF selftests the asm-generic include did not work,
so similar asm include scheme was adapted for that
problematic header that perf is having with other
header files under tools, from Daniel.
====================
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | arch/um/include/asm/Kbuild | 1 | ||||
-rw-r--r-- | kernel/bpf/hashtab.c | 2 | ||||
-rw-r--r-- | kernel/trace/bpf_trace.c | 19 | ||||
-rw-r--r-- | tools/include/uapi/asm/bpf_perf_event.h | 7 | ||||
-rw-r--r-- | tools/testing/selftests/bpf/Makefile | 13 |
5 files changed, 23 insertions, 19 deletions
diff --git a/arch/um/include/asm/Kbuild b/arch/um/include/asm/Kbuild index 50a32c33d729..73c57f614c9e 100644 --- a/arch/um/include/asm/Kbuild +++ b/arch/um/include/asm/Kbuild | |||
@@ -1,4 +1,5 @@ | |||
1 | generic-y += barrier.h | 1 | generic-y += barrier.h |
2 | generic-y += bpf_perf_event.h | ||
2 | generic-y += bug.h | 3 | generic-y += bug.h |
3 | generic-y += clkdev.h | 4 | generic-y += clkdev.h |
4 | generic-y += current.h | 5 | generic-y += current.h |
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c index e469e05c8e83..3905d4bc5b80 100644 --- a/kernel/bpf/hashtab.c +++ b/kernel/bpf/hashtab.c | |||
@@ -114,6 +114,7 @@ static void htab_free_elems(struct bpf_htab *htab) | |||
114 | pptr = htab_elem_get_ptr(get_htab_elem(htab, i), | 114 | pptr = htab_elem_get_ptr(get_htab_elem(htab, i), |
115 | htab->map.key_size); | 115 | htab->map.key_size); |
116 | free_percpu(pptr); | 116 | free_percpu(pptr); |
117 | cond_resched(); | ||
117 | } | 118 | } |
118 | free_elems: | 119 | free_elems: |
119 | bpf_map_area_free(htab->elems); | 120 | bpf_map_area_free(htab->elems); |
@@ -159,6 +160,7 @@ static int prealloc_init(struct bpf_htab *htab) | |||
159 | goto free_elems; | 160 | goto free_elems; |
160 | htab_elem_set_ptr(get_htab_elem(htab, i), htab->map.key_size, | 161 | htab_elem_set_ptr(get_htab_elem(htab, i), htab->map.key_size, |
161 | pptr); | 162 | pptr); |
163 | cond_resched(); | ||
162 | } | 164 | } |
163 | 165 | ||
164 | skip_percpu_elems: | 166 | skip_percpu_elems: |
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index 0ce99c379c30..40207c2a4113 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c | |||
@@ -343,14 +343,13 @@ static const struct bpf_func_proto bpf_perf_event_read_value_proto = { | |||
343 | .arg4_type = ARG_CONST_SIZE, | 343 | .arg4_type = ARG_CONST_SIZE, |
344 | }; | 344 | }; |
345 | 345 | ||
346 | static DEFINE_PER_CPU(struct perf_sample_data, bpf_sd); | 346 | static DEFINE_PER_CPU(struct perf_sample_data, bpf_trace_sd); |
347 | 347 | ||
348 | static __always_inline u64 | 348 | static __always_inline u64 |
349 | __bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map, | 349 | __bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map, |
350 | u64 flags, struct perf_raw_record *raw) | 350 | u64 flags, struct perf_sample_data *sd) |
351 | { | 351 | { |
352 | struct bpf_array *array = container_of(map, struct bpf_array, map); | 352 | struct bpf_array *array = container_of(map, struct bpf_array, map); |
353 | struct perf_sample_data *sd = this_cpu_ptr(&bpf_sd); | ||
354 | unsigned int cpu = smp_processor_id(); | 353 | unsigned int cpu = smp_processor_id(); |
355 | u64 index = flags & BPF_F_INDEX_MASK; | 354 | u64 index = flags & BPF_F_INDEX_MASK; |
356 | struct bpf_event_entry *ee; | 355 | struct bpf_event_entry *ee; |
@@ -373,8 +372,6 @@ __bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map, | |||
373 | if (unlikely(event->oncpu != cpu)) | 372 | if (unlikely(event->oncpu != cpu)) |
374 | return -EOPNOTSUPP; | 373 | return -EOPNOTSUPP; |
375 | 374 | ||
376 | perf_sample_data_init(sd, 0, 0); | ||
377 | sd->raw = raw; | ||
378 | perf_event_output(event, sd, regs); | 375 | perf_event_output(event, sd, regs); |
379 | return 0; | 376 | return 0; |
380 | } | 377 | } |
@@ -382,6 +379,7 @@ __bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map, | |||
382 | BPF_CALL_5(bpf_perf_event_output, struct pt_regs *, regs, struct bpf_map *, map, | 379 | BPF_CALL_5(bpf_perf_event_output, struct pt_regs *, regs, struct bpf_map *, map, |
383 | u64, flags, void *, data, u64, size) | 380 | u64, flags, void *, data, u64, size) |
384 | { | 381 | { |
382 | struct perf_sample_data *sd = this_cpu_ptr(&bpf_trace_sd); | ||
385 | struct perf_raw_record raw = { | 383 | struct perf_raw_record raw = { |
386 | .frag = { | 384 | .frag = { |
387 | .size = size, | 385 | .size = size, |
@@ -392,7 +390,10 @@ BPF_CALL_5(bpf_perf_event_output, struct pt_regs *, regs, struct bpf_map *, map, | |||
392 | if (unlikely(flags & ~(BPF_F_INDEX_MASK))) | 390 | if (unlikely(flags & ~(BPF_F_INDEX_MASK))) |
393 | return -EINVAL; | 391 | return -EINVAL; |
394 | 392 | ||
395 | return __bpf_perf_event_output(regs, map, flags, &raw); | 393 | perf_sample_data_init(sd, 0, 0); |
394 | sd->raw = &raw; | ||
395 | |||
396 | return __bpf_perf_event_output(regs, map, flags, sd); | ||
396 | } | 397 | } |
397 | 398 | ||
398 | static const struct bpf_func_proto bpf_perf_event_output_proto = { | 399 | static const struct bpf_func_proto bpf_perf_event_output_proto = { |
@@ -407,10 +408,12 @@ static const struct bpf_func_proto bpf_perf_event_output_proto = { | |||
407 | }; | 408 | }; |
408 | 409 | ||
409 | static DEFINE_PER_CPU(struct pt_regs, bpf_pt_regs); | 410 | static DEFINE_PER_CPU(struct pt_regs, bpf_pt_regs); |
411 | static DEFINE_PER_CPU(struct perf_sample_data, bpf_misc_sd); | ||
410 | 412 | ||
411 | u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size, | 413 | u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size, |
412 | void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy) | 414 | void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy) |
413 | { | 415 | { |
416 | struct perf_sample_data *sd = this_cpu_ptr(&bpf_misc_sd); | ||
414 | struct pt_regs *regs = this_cpu_ptr(&bpf_pt_regs); | 417 | struct pt_regs *regs = this_cpu_ptr(&bpf_pt_regs); |
415 | struct perf_raw_frag frag = { | 418 | struct perf_raw_frag frag = { |
416 | .copy = ctx_copy, | 419 | .copy = ctx_copy, |
@@ -428,8 +431,10 @@ u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size, | |||
428 | }; | 431 | }; |
429 | 432 | ||
430 | perf_fetch_caller_regs(regs); | 433 | perf_fetch_caller_regs(regs); |
434 | perf_sample_data_init(sd, 0, 0); | ||
435 | sd->raw = &raw; | ||
431 | 436 | ||
432 | return __bpf_perf_event_output(regs, map, flags, &raw); | 437 | return __bpf_perf_event_output(regs, map, flags, sd); |
433 | } | 438 | } |
434 | 439 | ||
435 | BPF_CALL_0(bpf_get_current_task) | 440 | BPF_CALL_0(bpf_get_current_task) |
diff --git a/tools/include/uapi/asm/bpf_perf_event.h b/tools/include/uapi/asm/bpf_perf_event.h new file mode 100644 index 000000000000..13a58531e6fa --- /dev/null +++ b/tools/include/uapi/asm/bpf_perf_event.h | |||
@@ -0,0 +1,7 @@ | |||
1 | #if defined(__aarch64__) | ||
2 | #include "../../arch/arm64/include/uapi/asm/bpf_perf_event.h" | ||
3 | #elif defined(__s390__) | ||
4 | #include "../../arch/s390/include/uapi/asm/bpf_perf_event.h" | ||
5 | #else | ||
6 | #include <uapi/asm-generic/bpf_perf_event.h> | ||
7 | #endif | ||
diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile index 21a2d76b67dc..792af7c3b74f 100644 --- a/tools/testing/selftests/bpf/Makefile +++ b/tools/testing/selftests/bpf/Makefile | |||
@@ -1,19 +1,8 @@ | |||
1 | # SPDX-License-Identifier: GPL-2.0 | 1 | # SPDX-License-Identifier: GPL-2.0 |
2 | 2 | ||
3 | ifeq ($(srctree),) | ||
4 | srctree := $(patsubst %/,%,$(dir $(CURDIR))) | ||
5 | srctree := $(patsubst %/,%,$(dir $(srctree))) | ||
6 | srctree := $(patsubst %/,%,$(dir $(srctree))) | ||
7 | srctree := $(patsubst %/,%,$(dir $(srctree))) | ||
8 | endif | ||
9 | include $(srctree)/tools/scripts/Makefile.arch | ||
10 | |||
11 | $(call detected_var,SRCARCH) | ||
12 | |||
13 | LIBDIR := ../../../lib | 3 | LIBDIR := ../../../lib |
14 | BPFDIR := $(LIBDIR)/bpf | 4 | BPFDIR := $(LIBDIR)/bpf |
15 | APIDIR := ../../../include/uapi | 5 | APIDIR := ../../../include/uapi |
16 | ASMDIR:= ../../../arch/$(ARCH)/include/uapi | ||
17 | GENDIR := ../../../../include/generated | 6 | GENDIR := ../../../../include/generated |
18 | GENHDR := $(GENDIR)/autoconf.h | 7 | GENHDR := $(GENDIR)/autoconf.h |
19 | 8 | ||
@@ -21,7 +10,7 @@ ifneq ($(wildcard $(GENHDR)),) | |||
21 | GENFLAGS := -DHAVE_GENHDR | 10 | GENFLAGS := -DHAVE_GENHDR |
22 | endif | 11 | endif |
23 | 12 | ||
24 | CFLAGS += -Wall -O2 -I$(APIDIR) -I$(ASMDIR) -I$(LIBDIR) -I$(GENDIR) $(GENFLAGS) -I../../../include | 13 | CFLAGS += -Wall -O2 -I$(APIDIR) -I$(LIBDIR) -I$(GENDIR) $(GENFLAGS) -I../../../include |
25 | LDLIBS += -lcap -lelf | 14 | LDLIBS += -lcap -lelf |
26 | 15 | ||
27 | TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map test_progs \ | 16 | TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map test_progs \ |