diff options
| -rw-r--r-- | kernel/bpf/btf.c | 2 | ||||
| -rw-r--r-- | kernel/bpf/cgroup.c | 1 | ||||
| -rw-r--r-- | kernel/bpf/map_in_map.c | 17 | ||||
| -rw-r--r-- | kernel/bpf/stackmap.c | 9 | ||||
| -rw-r--r-- | net/core/filter.c | 32 | ||||
| -rw-r--r-- | net/core/lwt_bpf.c | 1 | ||||
| -rw-r--r-- | net/xdp/xdp_umem.c | 16 | ||||
| -rw-r--r-- | samples/bpf/Makefile | 1 | ||||
| -rw-r--r-- | samples/bpf/asm_goto_workaround.h | 16 | ||||
| -rw-r--r-- | tools/bpf/bpftool/Makefile | 9 | ||||
| -rw-r--r-- | tools/bpf/bpftool/json_writer.c | 7 | ||||
| -rw-r--r-- | tools/bpf/bpftool/json_writer.h | 5 | ||||
| -rw-r--r-- | tools/include/uapi/linux/pkt_sched.h | 1163 | ||||
| -rw-r--r-- | tools/lib/bpf/bpf.c | 19 | ||||
| -rw-r--r-- | tools/testing/selftests/bpf/Makefile | 1 | ||||
| -rw-r--r-- | tools/testing/selftests/bpf/test_progs.c | 30 |
16 files changed, 1293 insertions, 36 deletions
diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c index a2f53642592b..befe570be5ba 100644 --- a/kernel/bpf/btf.c +++ b/kernel/bpf/btf.c | |||
| @@ -467,7 +467,7 @@ static const struct btf_kind_operations *btf_type_ops(const struct btf_type *t) | |||
| 467 | return kind_ops[BTF_INFO_KIND(t->info)]; | 467 | return kind_ops[BTF_INFO_KIND(t->info)]; |
| 468 | } | 468 | } |
| 469 | 469 | ||
| 470 | bool btf_name_offset_valid(const struct btf *btf, u32 offset) | 470 | static bool btf_name_offset_valid(const struct btf *btf, u32 offset) |
| 471 | { | 471 | { |
| 472 | return BTF_STR_OFFSET_VALID(offset) && | 472 | return BTF_STR_OFFSET_VALID(offset) && |
| 473 | offset < btf->hdr.str_len; | 473 | offset < btf->hdr.str_len; |
diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c index 9425c2fb872f..ab612fe9862f 100644 --- a/kernel/bpf/cgroup.c +++ b/kernel/bpf/cgroup.c | |||
| @@ -718,6 +718,7 @@ cgroup_dev_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) | |||
| 718 | case BPF_FUNC_trace_printk: | 718 | case BPF_FUNC_trace_printk: |
| 719 | if (capable(CAP_SYS_ADMIN)) | 719 | if (capable(CAP_SYS_ADMIN)) |
| 720 | return bpf_get_trace_printk_proto(); | 720 | return bpf_get_trace_printk_proto(); |
| 721 | /* fall through */ | ||
| 721 | default: | 722 | default: |
| 722 | return NULL; | 723 | return NULL; |
| 723 | } | 724 | } |
diff --git a/kernel/bpf/map_in_map.c b/kernel/bpf/map_in_map.c index 99d243e1ad6e..52378d3e34b3 100644 --- a/kernel/bpf/map_in_map.c +++ b/kernel/bpf/map_in_map.c | |||
| @@ -12,6 +12,7 @@ | |||
| 12 | struct bpf_map *bpf_map_meta_alloc(int inner_map_ufd) | 12 | struct bpf_map *bpf_map_meta_alloc(int inner_map_ufd) |
| 13 | { | 13 | { |
| 14 | struct bpf_map *inner_map, *inner_map_meta; | 14 | struct bpf_map *inner_map, *inner_map_meta; |
| 15 | u32 inner_map_meta_size; | ||
| 15 | struct fd f; | 16 | struct fd f; |
| 16 | 17 | ||
| 17 | f = fdget(inner_map_ufd); | 18 | f = fdget(inner_map_ufd); |
| @@ -36,7 +37,12 @@ struct bpf_map *bpf_map_meta_alloc(int inner_map_ufd) | |||
| 36 | return ERR_PTR(-EINVAL); | 37 | return ERR_PTR(-EINVAL); |
| 37 | } | 38 | } |
| 38 | 39 | ||
| 39 | inner_map_meta = kzalloc(sizeof(*inner_map_meta), GFP_USER); | 40 | inner_map_meta_size = sizeof(*inner_map_meta); |
| 41 | /* In some cases verifier needs to access beyond just base map. */ | ||
| 42 | if (inner_map->ops == &array_map_ops) | ||
| 43 | inner_map_meta_size = sizeof(struct bpf_array); | ||
| 44 | |||
| 45 | inner_map_meta = kzalloc(inner_map_meta_size, GFP_USER); | ||
| 40 | if (!inner_map_meta) { | 46 | if (!inner_map_meta) { |
| 41 | fdput(f); | 47 | fdput(f); |
| 42 | return ERR_PTR(-ENOMEM); | 48 | return ERR_PTR(-ENOMEM); |
| @@ -46,9 +52,16 @@ struct bpf_map *bpf_map_meta_alloc(int inner_map_ufd) | |||
| 46 | inner_map_meta->key_size = inner_map->key_size; | 52 | inner_map_meta->key_size = inner_map->key_size; |
| 47 | inner_map_meta->value_size = inner_map->value_size; | 53 | inner_map_meta->value_size = inner_map->value_size; |
| 48 | inner_map_meta->map_flags = inner_map->map_flags; | 54 | inner_map_meta->map_flags = inner_map->map_flags; |
| 49 | inner_map_meta->ops = inner_map->ops; | ||
| 50 | inner_map_meta->max_entries = inner_map->max_entries; | 55 | inner_map_meta->max_entries = inner_map->max_entries; |
| 51 | 56 | ||
| 57 | /* Misc members not needed in bpf_map_meta_equal() check. */ | ||
| 58 | inner_map_meta->ops = inner_map->ops; | ||
| 59 | if (inner_map->ops == &array_map_ops) { | ||
| 60 | inner_map_meta->unpriv_array = inner_map->unpriv_array; | ||
| 61 | container_of(inner_map_meta, struct bpf_array, map)->index_mask = | ||
| 62 | container_of(inner_map, struct bpf_array, map)->index_mask; | ||
| 63 | } | ||
| 64 | |||
| 52 | fdput(f); | 65 | fdput(f); |
| 53 | return inner_map_meta; | 66 | return inner_map_meta; |
| 54 | } | 67 | } |
diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c index d9e2483669d0..d43b14535827 100644 --- a/kernel/bpf/stackmap.c +++ b/kernel/bpf/stackmap.c | |||
| @@ -180,11 +180,14 @@ static inline int stack_map_parse_build_id(void *page_addr, | |||
| 180 | 180 | ||
| 181 | if (nhdr->n_type == BPF_BUILD_ID && | 181 | if (nhdr->n_type == BPF_BUILD_ID && |
| 182 | nhdr->n_namesz == sizeof("GNU") && | 182 | nhdr->n_namesz == sizeof("GNU") && |
| 183 | nhdr->n_descsz == BPF_BUILD_ID_SIZE) { | 183 | nhdr->n_descsz > 0 && |
| 184 | nhdr->n_descsz <= BPF_BUILD_ID_SIZE) { | ||
| 184 | memcpy(build_id, | 185 | memcpy(build_id, |
| 185 | note_start + note_offs + | 186 | note_start + note_offs + |
| 186 | ALIGN(sizeof("GNU"), 4) + sizeof(Elf32_Nhdr), | 187 | ALIGN(sizeof("GNU"), 4) + sizeof(Elf32_Nhdr), |
| 187 | BPF_BUILD_ID_SIZE); | 188 | nhdr->n_descsz); |
| 189 | memset(build_id + nhdr->n_descsz, 0, | ||
| 190 | BPF_BUILD_ID_SIZE - nhdr->n_descsz); | ||
| 188 | return 0; | 191 | return 0; |
| 189 | } | 192 | } |
| 190 | new_offs = note_offs + sizeof(Elf32_Nhdr) + | 193 | new_offs = note_offs + sizeof(Elf32_Nhdr) + |
| @@ -311,6 +314,7 @@ static void stack_map_get_build_id_offset(struct bpf_stack_build_id *id_offs, | |||
| 311 | for (i = 0; i < trace_nr; i++) { | 314 | for (i = 0; i < trace_nr; i++) { |
| 312 | id_offs[i].status = BPF_STACK_BUILD_ID_IP; | 315 | id_offs[i].status = BPF_STACK_BUILD_ID_IP; |
| 313 | id_offs[i].ip = ips[i]; | 316 | id_offs[i].ip = ips[i]; |
| 317 | memset(id_offs[i].build_id, 0, BPF_BUILD_ID_SIZE); | ||
| 314 | } | 318 | } |
| 315 | return; | 319 | return; |
| 316 | } | 320 | } |
| @@ -321,6 +325,7 @@ static void stack_map_get_build_id_offset(struct bpf_stack_build_id *id_offs, | |||
| 321 | /* per entry fall back to ips */ | 325 | /* per entry fall back to ips */ |
| 322 | id_offs[i].status = BPF_STACK_BUILD_ID_IP; | 326 | id_offs[i].status = BPF_STACK_BUILD_ID_IP; |
| 323 | id_offs[i].ip = ips[i]; | 327 | id_offs[i].ip = ips[i]; |
| 328 | memset(id_offs[i].build_id, 0, BPF_BUILD_ID_SIZE); | ||
| 324 | continue; | 329 | continue; |
| 325 | } | 330 | } |
| 326 | id_offs[i].offset = (vma->vm_pgoff << PAGE_SHIFT) + ips[i] | 331 | id_offs[i].offset = (vma->vm_pgoff << PAGE_SHIFT) + ips[i] |
diff --git a/net/core/filter.c b/net/core/filter.c index 2b3b436ef545..7559d6835ecb 100644 --- a/net/core/filter.c +++ b/net/core/filter.c | |||
| @@ -2020,18 +2020,19 @@ static inline int __bpf_tx_skb(struct net_device *dev, struct sk_buff *skb) | |||
| 2020 | static int __bpf_redirect_no_mac(struct sk_buff *skb, struct net_device *dev, | 2020 | static int __bpf_redirect_no_mac(struct sk_buff *skb, struct net_device *dev, |
| 2021 | u32 flags) | 2021 | u32 flags) |
| 2022 | { | 2022 | { |
| 2023 | /* skb->mac_len is not set on normal egress */ | 2023 | unsigned int mlen = skb_network_offset(skb); |
| 2024 | unsigned int mlen = skb->network_header - skb->mac_header; | ||
| 2025 | 2024 | ||
| 2026 | __skb_pull(skb, mlen); | 2025 | if (mlen) { |
| 2026 | __skb_pull(skb, mlen); | ||
| 2027 | 2027 | ||
| 2028 | /* At ingress, the mac header has already been pulled once. | 2028 | /* At ingress, the mac header has already been pulled once. |
| 2029 | * At egress, skb_pospull_rcsum has to be done in case that | 2029 | * At egress, skb_pospull_rcsum has to be done in case that |
| 2030 | * the skb is originated from ingress (i.e. a forwarded skb) | 2030 | * the skb is originated from ingress (i.e. a forwarded skb) |
| 2031 | * to ensure that rcsum starts at net header. | 2031 | * to ensure that rcsum starts at net header. |
| 2032 | */ | 2032 | */ |
| 2033 | if (!skb_at_tc_ingress(skb)) | 2033 | if (!skb_at_tc_ingress(skb)) |
| 2034 | skb_postpull_rcsum(skb, skb_mac_header(skb), mlen); | 2034 | skb_postpull_rcsum(skb, skb_mac_header(skb), mlen); |
| 2035 | } | ||
| 2035 | skb_pop_mac_header(skb); | 2036 | skb_pop_mac_header(skb); |
| 2036 | skb_reset_mac_len(skb); | 2037 | skb_reset_mac_len(skb); |
| 2037 | return flags & BPF_F_INGRESS ? | 2038 | return flags & BPF_F_INGRESS ? |
| @@ -4119,6 +4120,10 @@ BPF_CALL_5(bpf_setsockopt, struct bpf_sock_ops_kern *, bpf_sock, | |||
| 4119 | sk->sk_sndbuf = max_t(int, val * 2, SOCK_MIN_SNDBUF); | 4120 | sk->sk_sndbuf = max_t(int, val * 2, SOCK_MIN_SNDBUF); |
| 4120 | break; | 4121 | break; |
| 4121 | case SO_MAX_PACING_RATE: /* 32bit version */ | 4122 | case SO_MAX_PACING_RATE: /* 32bit version */ |
| 4123 | if (val != ~0U) | ||
| 4124 | cmpxchg(&sk->sk_pacing_status, | ||
| 4125 | SK_PACING_NONE, | ||
| 4126 | SK_PACING_NEEDED); | ||
| 4122 | sk->sk_max_pacing_rate = (val == ~0U) ? ~0UL : val; | 4127 | sk->sk_max_pacing_rate = (val == ~0U) ? ~0UL : val; |
| 4123 | sk->sk_pacing_rate = min(sk->sk_pacing_rate, | 4128 | sk->sk_pacing_rate = min(sk->sk_pacing_rate, |
| 4124 | sk->sk_max_pacing_rate); | 4129 | sk->sk_max_pacing_rate); |
| @@ -4132,7 +4137,10 @@ BPF_CALL_5(bpf_setsockopt, struct bpf_sock_ops_kern *, bpf_sock, | |||
| 4132 | sk->sk_rcvlowat = val ? : 1; | 4137 | sk->sk_rcvlowat = val ? : 1; |
| 4133 | break; | 4138 | break; |
| 4134 | case SO_MARK: | 4139 | case SO_MARK: |
| 4135 | sk->sk_mark = val; | 4140 | if (sk->sk_mark != val) { |
| 4141 | sk->sk_mark = val; | ||
| 4142 | sk_dst_reset(sk); | ||
| 4143 | } | ||
| 4136 | break; | 4144 | break; |
| 4137 | default: | 4145 | default: |
| 4138 | ret = -EINVAL; | 4146 | ret = -EINVAL; |
| @@ -5309,7 +5317,7 @@ bpf_base_func_proto(enum bpf_func_id func_id) | |||
| 5309 | case BPF_FUNC_trace_printk: | 5317 | case BPF_FUNC_trace_printk: |
| 5310 | if (capable(CAP_SYS_ADMIN)) | 5318 | if (capable(CAP_SYS_ADMIN)) |
| 5311 | return bpf_get_trace_printk_proto(); | 5319 | return bpf_get_trace_printk_proto(); |
| 5312 | /* else: fall through */ | 5320 | /* else, fall through */ |
| 5313 | default: | 5321 | default: |
| 5314 | return NULL; | 5322 | return NULL; |
| 5315 | } | 5323 | } |
diff --git a/net/core/lwt_bpf.c b/net/core/lwt_bpf.c index 3e85437f7106..a648568c5e8f 100644 --- a/net/core/lwt_bpf.c +++ b/net/core/lwt_bpf.c | |||
| @@ -63,6 +63,7 @@ static int run_lwt_bpf(struct sk_buff *skb, struct bpf_lwt_prog *lwt, | |||
| 63 | lwt->name ? : "<unknown>"); | 63 | lwt->name ? : "<unknown>"); |
| 64 | ret = BPF_OK; | 64 | ret = BPF_OK; |
| 65 | } else { | 65 | } else { |
| 66 | skb_reset_mac_header(skb); | ||
| 66 | ret = skb_do_redirect(skb); | 67 | ret = skb_do_redirect(skb); |
| 67 | if (ret == 0) | 68 | if (ret == 0) |
| 68 | ret = BPF_REDIRECT; | 69 | ret = BPF_REDIRECT; |
diff --git a/net/xdp/xdp_umem.c b/net/xdp/xdp_umem.c index a264cf2accd0..d4de871e7d4d 100644 --- a/net/xdp/xdp_umem.c +++ b/net/xdp/xdp_umem.c | |||
| @@ -41,13 +41,20 @@ void xdp_del_sk_umem(struct xdp_umem *umem, struct xdp_sock *xs) | |||
| 41 | * not know if the device has more tx queues than rx, or the opposite. | 41 | * not know if the device has more tx queues than rx, or the opposite. |
| 42 | * This might also change during run time. | 42 | * This might also change during run time. |
| 43 | */ | 43 | */ |
| 44 | static void xdp_reg_umem_at_qid(struct net_device *dev, struct xdp_umem *umem, | 44 | static int xdp_reg_umem_at_qid(struct net_device *dev, struct xdp_umem *umem, |
| 45 | u16 queue_id) | 45 | u16 queue_id) |
| 46 | { | 46 | { |
| 47 | if (queue_id >= max_t(unsigned int, | ||
| 48 | dev->real_num_rx_queues, | ||
| 49 | dev->real_num_tx_queues)) | ||
| 50 | return -EINVAL; | ||
| 51 | |||
| 47 | if (queue_id < dev->real_num_rx_queues) | 52 | if (queue_id < dev->real_num_rx_queues) |
| 48 | dev->_rx[queue_id].umem = umem; | 53 | dev->_rx[queue_id].umem = umem; |
| 49 | if (queue_id < dev->real_num_tx_queues) | 54 | if (queue_id < dev->real_num_tx_queues) |
| 50 | dev->_tx[queue_id].umem = umem; | 55 | dev->_tx[queue_id].umem = umem; |
| 56 | |||
| 57 | return 0; | ||
| 51 | } | 58 | } |
| 52 | 59 | ||
| 53 | struct xdp_umem *xdp_get_umem_from_qid(struct net_device *dev, | 60 | struct xdp_umem *xdp_get_umem_from_qid(struct net_device *dev, |
| @@ -88,7 +95,10 @@ int xdp_umem_assign_dev(struct xdp_umem *umem, struct net_device *dev, | |||
| 88 | goto out_rtnl_unlock; | 95 | goto out_rtnl_unlock; |
| 89 | } | 96 | } |
| 90 | 97 | ||
| 91 | xdp_reg_umem_at_qid(dev, umem, queue_id); | 98 | err = xdp_reg_umem_at_qid(dev, umem, queue_id); |
| 99 | if (err) | ||
| 100 | goto out_rtnl_unlock; | ||
| 101 | |||
| 92 | umem->dev = dev; | 102 | umem->dev = dev; |
| 93 | umem->queue_id = queue_id; | 103 | umem->queue_id = queue_id; |
| 94 | if (force_copy) | 104 | if (force_copy) |
diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile index 66ae15f27c70..db1a91dfa702 100644 --- a/samples/bpf/Makefile +++ b/samples/bpf/Makefile | |||
| @@ -279,6 +279,7 @@ $(obj)/%.o: $(src)/%.c | |||
| 279 | -Wno-gnu-variable-sized-type-not-at-end \ | 279 | -Wno-gnu-variable-sized-type-not-at-end \ |
| 280 | -Wno-address-of-packed-member -Wno-tautological-compare \ | 280 | -Wno-address-of-packed-member -Wno-tautological-compare \ |
| 281 | -Wno-unknown-warning-option $(CLANG_ARCH_ARGS) \ | 281 | -Wno-unknown-warning-option $(CLANG_ARCH_ARGS) \ |
| 282 | -I$(srctree)/samples/bpf/ -include asm_goto_workaround.h \ | ||
| 282 | -O2 -emit-llvm -c $< -o -| $(LLC) -march=bpf $(LLC_FLAGS) -filetype=obj -o $@ | 283 | -O2 -emit-llvm -c $< -o -| $(LLC) -march=bpf $(LLC_FLAGS) -filetype=obj -o $@ |
| 283 | ifeq ($(DWARF2BTF),y) | 284 | ifeq ($(DWARF2BTF),y) |
| 284 | $(BTF_PAHOLE) -J $@ | 285 | $(BTF_PAHOLE) -J $@ |
diff --git a/samples/bpf/asm_goto_workaround.h b/samples/bpf/asm_goto_workaround.h new file mode 100644 index 000000000000..5cd7c1d1a5d5 --- /dev/null +++ b/samples/bpf/asm_goto_workaround.h | |||
| @@ -0,0 +1,16 @@ | |||
| 1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
| 2 | /* Copyright (c) 2019 Facebook */ | ||
| 3 | #ifndef __ASM_GOTO_WORKAROUND_H | ||
| 4 | #define __ASM_GOTO_WORKAROUND_H | ||
| 5 | |||
| 6 | /* this will bring in asm_volatile_goto macro definition | ||
| 7 | * if enabled by compiler and config options. | ||
| 8 | */ | ||
| 9 | #include <linux/types.h> | ||
| 10 | |||
| 11 | #ifdef asm_volatile_goto | ||
| 12 | #undef asm_volatile_goto | ||
| 13 | #define asm_volatile_goto(x...) asm volatile("invalid use of asm_volatile_goto") | ||
| 14 | #endif | ||
| 15 | |||
| 16 | #endif | ||
diff --git a/tools/bpf/bpftool/Makefile b/tools/bpf/bpftool/Makefile index 492f0f24e2d3..4ad1f0894d53 100644 --- a/tools/bpf/bpftool/Makefile +++ b/tools/bpf/bpftool/Makefile | |||
| @@ -93,9 +93,16 @@ BFD_SRCS = jit_disasm.c | |||
| 93 | SRCS = $(filter-out $(BFD_SRCS),$(wildcard *.c)) | 93 | SRCS = $(filter-out $(BFD_SRCS),$(wildcard *.c)) |
| 94 | 94 | ||
| 95 | ifeq ($(feature-libbfd),1) | 95 | ifeq ($(feature-libbfd),1) |
| 96 | LIBS += -lbfd -ldl -lopcodes | ||
| 97 | else ifeq ($(feature-libbfd-liberty),1) | ||
| 98 | LIBS += -lbfd -ldl -lopcodes -liberty | ||
| 99 | else ifeq ($(feature-libbfd-liberty-z),1) | ||
| 100 | LIBS += -lbfd -ldl -lopcodes -liberty -lz | ||
| 101 | endif | ||
| 102 | |||
| 103 | ifneq ($(filter -lbfd,$(LIBS)),) | ||
| 96 | CFLAGS += -DHAVE_LIBBFD_SUPPORT | 104 | CFLAGS += -DHAVE_LIBBFD_SUPPORT |
| 97 | SRCS += $(BFD_SRCS) | 105 | SRCS += $(BFD_SRCS) |
| 98 | LIBS += -lbfd -lopcodes | ||
| 99 | endif | 106 | endif |
| 100 | 107 | ||
| 101 | OBJS = $(patsubst %.c,$(OUTPUT)%.o,$(SRCS)) $(OUTPUT)disasm.o | 108 | OBJS = $(patsubst %.c,$(OUTPUT)%.o,$(SRCS)) $(OUTPUT)disasm.o |
diff --git a/tools/bpf/bpftool/json_writer.c b/tools/bpf/bpftool/json_writer.c index bff7ee026680..6046dcab51cc 100644 --- a/tools/bpf/bpftool/json_writer.c +++ b/tools/bpf/bpftool/json_writer.c | |||
| @@ -1,15 +1,10 @@ | |||
| 1 | // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) | 1 | // SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-2-Clause) |
| 2 | /* | 2 | /* |
| 3 | * Simple streaming JSON writer | 3 | * Simple streaming JSON writer |
| 4 | * | 4 | * |
| 5 | * This takes care of the annoying bits of JSON syntax like the commas | 5 | * This takes care of the annoying bits of JSON syntax like the commas |
| 6 | * after elements | 6 | * after elements |
| 7 | * | 7 | * |
| 8 | * This program is free software; you can redistribute it and/or | ||
| 9 | * modify it under the terms of the GNU General Public License | ||
| 10 | * as published by the Free Software Foundation; either version | ||
| 11 | * 2 of the License, or (at your option) any later version. | ||
| 12 | * | ||
| 13 | * Authors: Stephen Hemminger <stephen@networkplumber.org> | 8 | * Authors: Stephen Hemminger <stephen@networkplumber.org> |
| 14 | */ | 9 | */ |
| 15 | 10 | ||
diff --git a/tools/bpf/bpftool/json_writer.h b/tools/bpf/bpftool/json_writer.h index c1ab51aed99c..cb9a1993681c 100644 --- a/tools/bpf/bpftool/json_writer.h +++ b/tools/bpf/bpftool/json_writer.h | |||
| @@ -5,11 +5,6 @@ | |||
| 5 | * This takes care of the annoying bits of JSON syntax like the commas | 5 | * This takes care of the annoying bits of JSON syntax like the commas |
| 6 | * after elements | 6 | * after elements |
| 7 | * | 7 | * |
| 8 | * This program is free software; you can redistribute it and/or | ||
| 9 | * modify it under the terms of the GNU General Public License | ||
| 10 | * as published by the Free Software Foundation; either version | ||
| 11 | * 2 of the License, or (at your option) any later version. | ||
| 12 | * | ||
| 13 | * Authors: Stephen Hemminger <stephen@networkplumber.org> | 8 | * Authors: Stephen Hemminger <stephen@networkplumber.org> |
| 14 | */ | 9 | */ |
| 15 | 10 | ||
diff --git a/tools/include/uapi/linux/pkt_sched.h b/tools/include/uapi/linux/pkt_sched.h new file mode 100644 index 000000000000..0d18b1d1fbbc --- /dev/null +++ b/tools/include/uapi/linux/pkt_sched.h | |||
| @@ -0,0 +1,1163 @@ | |||
| 1 | /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ | ||
| 2 | #ifndef __LINUX_PKT_SCHED_H | ||
| 3 | #define __LINUX_PKT_SCHED_H | ||
| 4 | |||
| 5 | #include <linux/types.h> | ||
| 6 | |||
| 7 | /* Logical priority bands not depending on specific packet scheduler. | ||
| 8 | Every scheduler will map them to real traffic classes, if it has | ||
| 9 | no more precise mechanism to classify packets. | ||
| 10 | |||
| 11 | These numbers have no special meaning, though their coincidence | ||
| 12 | with obsolete IPv6 values is not occasional :-). New IPv6 drafts | ||
| 13 | preferred full anarchy inspired by diffserv group. | ||
| 14 | |||
| 15 | Note: TC_PRIO_BESTEFFORT does not mean that it is the most unhappy | ||
| 16 | class, actually, as rule it will be handled with more care than | ||
| 17 | filler or even bulk. | ||
| 18 | */ | ||
| 19 | |||
| 20 | #define TC_PRIO_BESTEFFORT 0 | ||
| 21 | #define TC_PRIO_FILLER 1 | ||
| 22 | #define TC_PRIO_BULK 2 | ||
| 23 | #define TC_PRIO_INTERACTIVE_BULK 4 | ||
| 24 | #define TC_PRIO_INTERACTIVE 6 | ||
| 25 | #define TC_PRIO_CONTROL 7 | ||
| 26 | |||
| 27 | #define TC_PRIO_MAX 15 | ||
| 28 | |||
| 29 | /* Generic queue statistics, available for all the elements. | ||
| 30 | Particular schedulers may have also their private records. | ||
| 31 | */ | ||
| 32 | |||
| 33 | struct tc_stats { | ||
| 34 | __u64 bytes; /* Number of enqueued bytes */ | ||
| 35 | __u32 packets; /* Number of enqueued packets */ | ||
| 36 | __u32 drops; /* Packets dropped because of lack of resources */ | ||
| 37 | __u32 overlimits; /* Number of throttle events when this | ||
| 38 | * flow goes out of allocated bandwidth */ | ||
| 39 | __u32 bps; /* Current flow byte rate */ | ||
| 40 | __u32 pps; /* Current flow packet rate */ | ||
| 41 | __u32 qlen; | ||
| 42 | __u32 backlog; | ||
| 43 | }; | ||
| 44 | |||
| 45 | struct tc_estimator { | ||
| 46 | signed char interval; | ||
| 47 | unsigned char ewma_log; | ||
| 48 | }; | ||
| 49 | |||
| 50 | /* "Handles" | ||
| 51 | --------- | ||
| 52 | |||
| 53 | All the traffic control objects have 32bit identifiers, or "handles". | ||
| 54 | |||
| 55 | They can be considered as opaque numbers from user API viewpoint, | ||
| 56 | but actually they always consist of two fields: major and | ||
| 57 | minor numbers, which are interpreted by kernel specially, | ||
| 58 | that may be used by applications, though not recommended. | ||
| 59 | |||
| 60 | F.e. qdisc handles always have minor number equal to zero, | ||
| 61 | classes (or flows) have major equal to parent qdisc major, and | ||
| 62 | minor uniquely identifying class inside qdisc. | ||
| 63 | |||
| 64 | Macros to manipulate handles: | ||
| 65 | */ | ||
| 66 | |||
| 67 | #define TC_H_MAJ_MASK (0xFFFF0000U) | ||
| 68 | #define TC_H_MIN_MASK (0x0000FFFFU) | ||
| 69 | #define TC_H_MAJ(h) ((h)&TC_H_MAJ_MASK) | ||
| 70 | #define TC_H_MIN(h) ((h)&TC_H_MIN_MASK) | ||
| 71 | #define TC_H_MAKE(maj,min) (((maj)&TC_H_MAJ_MASK)|((min)&TC_H_MIN_MASK)) | ||
| 72 | |||
| 73 | #define TC_H_UNSPEC (0U) | ||
| 74 | #define TC_H_ROOT (0xFFFFFFFFU) | ||
| 75 | #define TC_H_INGRESS (0xFFFFFFF1U) | ||
| 76 | #define TC_H_CLSACT TC_H_INGRESS | ||
| 77 | |||
| 78 | #define TC_H_MIN_PRIORITY 0xFFE0U | ||
| 79 | #define TC_H_MIN_INGRESS 0xFFF2U | ||
| 80 | #define TC_H_MIN_EGRESS 0xFFF3U | ||
| 81 | |||
| 82 | /* Need to corrospond to iproute2 tc/tc_core.h "enum link_layer" */ | ||
| 83 | enum tc_link_layer { | ||
| 84 | TC_LINKLAYER_UNAWARE, /* Indicate unaware old iproute2 util */ | ||
| 85 | TC_LINKLAYER_ETHERNET, | ||
| 86 | TC_LINKLAYER_ATM, | ||
| 87 | }; | ||
| 88 | #define TC_LINKLAYER_MASK 0x0F /* limit use to lower 4 bits */ | ||
| 89 | |||
| 90 | struct tc_ratespec { | ||
| 91 | unsigned char cell_log; | ||
| 92 | __u8 linklayer; /* lower 4 bits */ | ||
| 93 | unsigned short overhead; | ||
| 94 | short cell_align; | ||
| 95 | unsigned short mpu; | ||
| 96 | __u32 rate; | ||
| 97 | }; | ||
| 98 | |||
| 99 | #define TC_RTAB_SIZE 1024 | ||
| 100 | |||
| 101 | struct tc_sizespec { | ||
| 102 | unsigned char cell_log; | ||
| 103 | unsigned char size_log; | ||
| 104 | short cell_align; | ||
| 105 | int overhead; | ||
| 106 | unsigned int linklayer; | ||
| 107 | unsigned int mpu; | ||
| 108 | unsigned int mtu; | ||
| 109 | unsigned int tsize; | ||
| 110 | }; | ||
| 111 | |||
| 112 | enum { | ||
| 113 | TCA_STAB_UNSPEC, | ||
| 114 | TCA_STAB_BASE, | ||
| 115 | TCA_STAB_DATA, | ||
| 116 | __TCA_STAB_MAX | ||
| 117 | }; | ||
| 118 | |||
| 119 | #define TCA_STAB_MAX (__TCA_STAB_MAX - 1) | ||
| 120 | |||
| 121 | /* FIFO section */ | ||
| 122 | |||
| 123 | struct tc_fifo_qopt { | ||
| 124 | __u32 limit; /* Queue length: bytes for bfifo, packets for pfifo */ | ||
| 125 | }; | ||
| 126 | |||
| 127 | /* SKBPRIO section */ | ||
| 128 | |||
| 129 | /* | ||
| 130 | * Priorities go from zero to (SKBPRIO_MAX_PRIORITY - 1). | ||
| 131 | * SKBPRIO_MAX_PRIORITY should be at least 64 in order for skbprio to be able | ||
| 132 | * to map one to one the DS field of IPV4 and IPV6 headers. | ||
| 133 | * Memory allocation grows linearly with SKBPRIO_MAX_PRIORITY. | ||
| 134 | */ | ||
| 135 | |||
| 136 | #define SKBPRIO_MAX_PRIORITY 64 | ||
| 137 | |||
| 138 | struct tc_skbprio_qopt { | ||
| 139 | __u32 limit; /* Queue length in packets. */ | ||
| 140 | }; | ||
| 141 | |||
| 142 | /* PRIO section */ | ||
| 143 | |||
| 144 | #define TCQ_PRIO_BANDS 16 | ||
| 145 | #define TCQ_MIN_PRIO_BANDS 2 | ||
| 146 | |||
| 147 | struct tc_prio_qopt { | ||
| 148 | int bands; /* Number of bands */ | ||
| 149 | __u8 priomap[TC_PRIO_MAX+1]; /* Map: logical priority -> PRIO band */ | ||
| 150 | }; | ||
| 151 | |||
| 152 | /* MULTIQ section */ | ||
| 153 | |||
| 154 | struct tc_multiq_qopt { | ||
| 155 | __u16 bands; /* Number of bands */ | ||
| 156 | __u16 max_bands; /* Maximum number of queues */ | ||
| 157 | }; | ||
| 158 | |||
| 159 | /* PLUG section */ | ||
| 160 | |||
| 161 | #define TCQ_PLUG_BUFFER 0 | ||
| 162 | #define TCQ_PLUG_RELEASE_ONE 1 | ||
| 163 | #define TCQ_PLUG_RELEASE_INDEFINITE 2 | ||
| 164 | #define TCQ_PLUG_LIMIT 3 | ||
| 165 | |||
| 166 | struct tc_plug_qopt { | ||
| 167 | /* TCQ_PLUG_BUFFER: Inset a plug into the queue and | ||
| 168 | * buffer any incoming packets | ||
| 169 | * TCQ_PLUG_RELEASE_ONE: Dequeue packets from queue head | ||
| 170 | * to beginning of the next plug. | ||
| 171 | * TCQ_PLUG_RELEASE_INDEFINITE: Dequeue all packets from queue. | ||
| 172 | * Stop buffering packets until the next TCQ_PLUG_BUFFER | ||
| 173 | * command is received (just act as a pass-thru queue). | ||
| 174 | * TCQ_PLUG_LIMIT: Increase/decrease queue size | ||
| 175 | */ | ||
| 176 | int action; | ||
| 177 | __u32 limit; | ||
| 178 | }; | ||
| 179 | |||
| 180 | /* TBF section */ | ||
| 181 | |||
| 182 | struct tc_tbf_qopt { | ||
| 183 | struct tc_ratespec rate; | ||
| 184 | struct tc_ratespec peakrate; | ||
| 185 | __u32 limit; | ||
| 186 | __u32 buffer; | ||
| 187 | __u32 mtu; | ||
| 188 | }; | ||
| 189 | |||
| 190 | enum { | ||
| 191 | TCA_TBF_UNSPEC, | ||
| 192 | TCA_TBF_PARMS, | ||
| 193 | TCA_TBF_RTAB, | ||
| 194 | TCA_TBF_PTAB, | ||
| 195 | TCA_TBF_RATE64, | ||
| 196 | TCA_TBF_PRATE64, | ||
| 197 | TCA_TBF_BURST, | ||
| 198 | TCA_TBF_PBURST, | ||
| 199 | TCA_TBF_PAD, | ||
| 200 | __TCA_TBF_MAX, | ||
| 201 | }; | ||
| 202 | |||
| 203 | #define TCA_TBF_MAX (__TCA_TBF_MAX - 1) | ||
| 204 | |||
| 205 | |||
| 206 | /* TEQL section */ | ||
| 207 | |||
| 208 | /* TEQL does not require any parameters */ | ||
| 209 | |||
| 210 | /* SFQ section */ | ||
| 211 | |||
| 212 | struct tc_sfq_qopt { | ||
| 213 | unsigned quantum; /* Bytes per round allocated to flow */ | ||
| 214 | int perturb_period; /* Period of hash perturbation */ | ||
| 215 | __u32 limit; /* Maximal packets in queue */ | ||
| 216 | unsigned divisor; /* Hash divisor */ | ||
| 217 | unsigned flows; /* Maximal number of flows */ | ||
| 218 | }; | ||
| 219 | |||
| 220 | struct tc_sfqred_stats { | ||
| 221 | __u32 prob_drop; /* Early drops, below max threshold */ | ||
| 222 | __u32 forced_drop; /* Early drops, after max threshold */ | ||
| 223 | __u32 prob_mark; /* Marked packets, below max threshold */ | ||
| 224 | __u32 forced_mark; /* Marked packets, after max threshold */ | ||
| 225 | __u32 prob_mark_head; /* Marked packets, below max threshold */ | ||
| 226 | __u32 forced_mark_head;/* Marked packets, after max threshold */ | ||
| 227 | }; | ||
| 228 | |||
| 229 | struct tc_sfq_qopt_v1 { | ||
| 230 | struct tc_sfq_qopt v0; | ||
| 231 | unsigned int depth; /* max number of packets per flow */ | ||
| 232 | unsigned int headdrop; | ||
| 233 | /* SFQRED parameters */ | ||
| 234 | __u32 limit; /* HARD maximal flow queue length (bytes) */ | ||
| 235 | __u32 qth_min; /* Min average length threshold (bytes) */ | ||
| 236 | __u32 qth_max; /* Max average length threshold (bytes) */ | ||
| 237 | unsigned char Wlog; /* log(W) */ | ||
| 238 | unsigned char Plog; /* log(P_max/(qth_max-qth_min)) */ | ||
| 239 | unsigned char Scell_log; /* cell size for idle damping */ | ||
| 240 | unsigned char flags; | ||
| 241 | __u32 max_P; /* probability, high resolution */ | ||
| 242 | /* SFQRED stats */ | ||
| 243 | struct tc_sfqred_stats stats; | ||
| 244 | }; | ||
| 245 | |||
| 246 | |||
| 247 | struct tc_sfq_xstats { | ||
| 248 | __s32 allot; | ||
| 249 | }; | ||
| 250 | |||
| 251 | /* RED section */ | ||
| 252 | |||
| 253 | enum { | ||
| 254 | TCA_RED_UNSPEC, | ||
| 255 | TCA_RED_PARMS, | ||
| 256 | TCA_RED_STAB, | ||
| 257 | TCA_RED_MAX_P, | ||
| 258 | __TCA_RED_MAX, | ||
| 259 | }; | ||
| 260 | |||
| 261 | #define TCA_RED_MAX (__TCA_RED_MAX - 1) | ||
| 262 | |||
| 263 | struct tc_red_qopt { | ||
| 264 | __u32 limit; /* HARD maximal queue length (bytes) */ | ||
| 265 | __u32 qth_min; /* Min average length threshold (bytes) */ | ||
| 266 | __u32 qth_max; /* Max average length threshold (bytes) */ | ||
| 267 | unsigned char Wlog; /* log(W) */ | ||
| 268 | unsigned char Plog; /* log(P_max/(qth_max-qth_min)) */ | ||
| 269 | unsigned char Scell_log; /* cell size for idle damping */ | ||
| 270 | unsigned char flags; | ||
| 271 | #define TC_RED_ECN 1 | ||
| 272 | #define TC_RED_HARDDROP 2 | ||
| 273 | #define TC_RED_ADAPTATIVE 4 | ||
| 274 | }; | ||
| 275 | |||
| 276 | struct tc_red_xstats { | ||
| 277 | __u32 early; /* Early drops */ | ||
| 278 | __u32 pdrop; /* Drops due to queue limits */ | ||
| 279 | __u32 other; /* Drops due to drop() calls */ | ||
| 280 | __u32 marked; /* Marked packets */ | ||
| 281 | }; | ||
| 282 | |||
| 283 | /* GRED section */ | ||
| 284 | |||
| 285 | #define MAX_DPs 16 | ||
| 286 | |||
| 287 | enum { | ||
| 288 | TCA_GRED_UNSPEC, | ||
| 289 | TCA_GRED_PARMS, | ||
| 290 | TCA_GRED_STAB, | ||
| 291 | TCA_GRED_DPS, | ||
| 292 | TCA_GRED_MAX_P, | ||
| 293 | TCA_GRED_LIMIT, | ||
| 294 | TCA_GRED_VQ_LIST, /* nested TCA_GRED_VQ_ENTRY */ | ||
| 295 | __TCA_GRED_MAX, | ||
| 296 | }; | ||
| 297 | |||
| 298 | #define TCA_GRED_MAX (__TCA_GRED_MAX - 1) | ||
| 299 | |||
| 300 | enum { | ||
| 301 | TCA_GRED_VQ_ENTRY_UNSPEC, | ||
| 302 | TCA_GRED_VQ_ENTRY, /* nested TCA_GRED_VQ_* */ | ||
| 303 | __TCA_GRED_VQ_ENTRY_MAX, | ||
| 304 | }; | ||
| 305 | #define TCA_GRED_VQ_ENTRY_MAX (__TCA_GRED_VQ_ENTRY_MAX - 1) | ||
| 306 | |||
| 307 | enum { | ||
| 308 | TCA_GRED_VQ_UNSPEC, | ||
| 309 | TCA_GRED_VQ_PAD, | ||
| 310 | TCA_GRED_VQ_DP, /* u32 */ | ||
| 311 | TCA_GRED_VQ_STAT_BYTES, /* u64 */ | ||
| 312 | TCA_GRED_VQ_STAT_PACKETS, /* u32 */ | ||
| 313 | TCA_GRED_VQ_STAT_BACKLOG, /* u32 */ | ||
| 314 | TCA_GRED_VQ_STAT_PROB_DROP, /* u32 */ | ||
| 315 | TCA_GRED_VQ_STAT_PROB_MARK, /* u32 */ | ||
| 316 | TCA_GRED_VQ_STAT_FORCED_DROP, /* u32 */ | ||
| 317 | TCA_GRED_VQ_STAT_FORCED_MARK, /* u32 */ | ||
| 318 | TCA_GRED_VQ_STAT_PDROP, /* u32 */ | ||
| 319 | TCA_GRED_VQ_STAT_OTHER, /* u32 */ | ||
| 320 | TCA_GRED_VQ_FLAGS, /* u32 */ | ||
| 321 | __TCA_GRED_VQ_MAX | ||
| 322 | }; | ||
| 323 | |||
| 324 | #define TCA_GRED_VQ_MAX (__TCA_GRED_VQ_MAX - 1) | ||
| 325 | |||
| 326 | struct tc_gred_qopt { | ||
| 327 | __u32 limit; /* HARD maximal queue length (bytes) */ | ||
| 328 | __u32 qth_min; /* Min average length threshold (bytes) */ | ||
| 329 | __u32 qth_max; /* Max average length threshold (bytes) */ | ||
| 330 | __u32 DP; /* up to 2^32 DPs */ | ||
| 331 | __u32 backlog; | ||
| 332 | __u32 qave; | ||
| 333 | __u32 forced; | ||
| 334 | __u32 early; | ||
| 335 | __u32 other; | ||
| 336 | __u32 pdrop; | ||
| 337 | __u8 Wlog; /* log(W) */ | ||
| 338 | __u8 Plog; /* log(P_max/(qth_max-qth_min)) */ | ||
| 339 | __u8 Scell_log; /* cell size for idle damping */ | ||
| 340 | __u8 prio; /* prio of this VQ */ | ||
| 341 | __u32 packets; | ||
| 342 | __u32 bytesin; | ||
| 343 | }; | ||
| 344 | |||
| 345 | /* gred setup */ | ||
| 346 | struct tc_gred_sopt { | ||
| 347 | __u32 DPs; | ||
| 348 | __u32 def_DP; | ||
| 349 | __u8 grio; | ||
| 350 | __u8 flags; | ||
| 351 | __u16 pad1; | ||
| 352 | }; | ||
| 353 | |||
| 354 | /* CHOKe section */ | ||
| 355 | |||
| 356 | enum { | ||
| 357 | TCA_CHOKE_UNSPEC, | ||
| 358 | TCA_CHOKE_PARMS, | ||
| 359 | TCA_CHOKE_STAB, | ||
| 360 | TCA_CHOKE_MAX_P, | ||
| 361 | __TCA_CHOKE_MAX, | ||
| 362 | }; | ||
| 363 | |||
| 364 | #define TCA_CHOKE_MAX (__TCA_CHOKE_MAX - 1) | ||
| 365 | |||
| 366 | struct tc_choke_qopt { | ||
| 367 | __u32 limit; /* Hard queue length (packets) */ | ||
| 368 | __u32 qth_min; /* Min average threshold (packets) */ | ||
| 369 | __u32 qth_max; /* Max average threshold (packets) */ | ||
| 370 | unsigned char Wlog; /* log(W) */ | ||
| 371 | unsigned char Plog; /* log(P_max/(qth_max-qth_min)) */ | ||
| 372 | unsigned char Scell_log; /* cell size for idle damping */ | ||
| 373 | unsigned char flags; /* see RED flags */ | ||
| 374 | }; | ||
| 375 | |||
| 376 | struct tc_choke_xstats { | ||
| 377 | __u32 early; /* Early drops */ | ||
| 378 | __u32 pdrop; /* Drops due to queue limits */ | ||
| 379 | __u32 other; /* Drops due to drop() calls */ | ||
| 380 | __u32 marked; /* Marked packets */ | ||
| 381 | __u32 matched; /* Drops due to flow match */ | ||
| 382 | }; | ||
| 383 | |||
| 384 | /* HTB section */ | ||
| 385 | #define TC_HTB_NUMPRIO 8 | ||
| 386 | #define TC_HTB_MAXDEPTH 8 | ||
| 387 | #define TC_HTB_PROTOVER 3 /* the same as HTB and TC's major */ | ||
| 388 | |||
| 389 | struct tc_htb_opt { | ||
| 390 | struct tc_ratespec rate; | ||
| 391 | struct tc_ratespec ceil; | ||
| 392 | __u32 buffer; | ||
| 393 | __u32 cbuffer; | ||
| 394 | __u32 quantum; | ||
| 395 | __u32 level; /* out only */ | ||
| 396 | __u32 prio; | ||
| 397 | }; | ||
| 398 | struct tc_htb_glob { | ||
| 399 | __u32 version; /* to match HTB/TC */ | ||
| 400 | __u32 rate2quantum; /* bps->quantum divisor */ | ||
| 401 | __u32 defcls; /* default class number */ | ||
| 402 | __u32 debug; /* debug flags */ | ||
| 403 | |||
| 404 | /* stats */ | ||
| 405 | __u32 direct_pkts; /* count of non shaped packets */ | ||
| 406 | }; | ||
| 407 | enum { | ||
| 408 | TCA_HTB_UNSPEC, | ||
| 409 | TCA_HTB_PARMS, | ||
| 410 | TCA_HTB_INIT, | ||
| 411 | TCA_HTB_CTAB, | ||
| 412 | TCA_HTB_RTAB, | ||
| 413 | TCA_HTB_DIRECT_QLEN, | ||
| 414 | TCA_HTB_RATE64, | ||
| 415 | TCA_HTB_CEIL64, | ||
| 416 | TCA_HTB_PAD, | ||
| 417 | __TCA_HTB_MAX, | ||
| 418 | }; | ||
| 419 | |||
| 420 | #define TCA_HTB_MAX (__TCA_HTB_MAX - 1) | ||
| 421 | |||
| 422 | struct tc_htb_xstats { | ||
| 423 | __u32 lends; | ||
| 424 | __u32 borrows; | ||
| 425 | __u32 giants; /* unused since 'Make HTB scheduler work with TSO.' */ | ||
| 426 | __s32 tokens; | ||
| 427 | __s32 ctokens; | ||
| 428 | }; | ||
| 429 | |||
| 430 | /* HFSC section */ | ||
| 431 | |||
| 432 | struct tc_hfsc_qopt { | ||
| 433 | __u16 defcls; /* default class */ | ||
| 434 | }; | ||
| 435 | |||
| 436 | struct tc_service_curve { | ||
| 437 | __u32 m1; /* slope of the first segment in bps */ | ||
| 438 | __u32 d; /* x-projection of the first segment in us */ | ||
| 439 | __u32 m2; /* slope of the second segment in bps */ | ||
| 440 | }; | ||
| 441 | |||
| 442 | struct tc_hfsc_stats { | ||
| 443 | __u64 work; /* total work done */ | ||
| 444 | __u64 rtwork; /* work done by real-time criteria */ | ||
| 445 | __u32 period; /* current period */ | ||
| 446 | __u32 level; /* class level in hierarchy */ | ||
| 447 | }; | ||
| 448 | |||
| 449 | enum { | ||
| 450 | TCA_HFSC_UNSPEC, | ||
| 451 | TCA_HFSC_RSC, | ||
| 452 | TCA_HFSC_FSC, | ||
| 453 | TCA_HFSC_USC, | ||
| 454 | __TCA_HFSC_MAX, | ||
| 455 | }; | ||
| 456 | |||
| 457 | #define TCA_HFSC_MAX (__TCA_HFSC_MAX - 1) | ||
| 458 | |||
| 459 | |||
| 460 | /* CBQ section */ | ||
| 461 | |||
| 462 | #define TC_CBQ_MAXPRIO 8 | ||
| 463 | #define TC_CBQ_MAXLEVEL 8 | ||
| 464 | #define TC_CBQ_DEF_EWMA 5 | ||
| 465 | |||
| 466 | struct tc_cbq_lssopt { | ||
| 467 | unsigned char change; | ||
| 468 | unsigned char flags; | ||
| 469 | #define TCF_CBQ_LSS_BOUNDED 1 | ||
| 470 | #define TCF_CBQ_LSS_ISOLATED 2 | ||
| 471 | unsigned char ewma_log; | ||
| 472 | unsigned char level; | ||
| 473 | #define TCF_CBQ_LSS_FLAGS 1 | ||
| 474 | #define TCF_CBQ_LSS_EWMA 2 | ||
| 475 | #define TCF_CBQ_LSS_MAXIDLE 4 | ||
| 476 | #define TCF_CBQ_LSS_MINIDLE 8 | ||
| 477 | #define TCF_CBQ_LSS_OFFTIME 0x10 | ||
| 478 | #define TCF_CBQ_LSS_AVPKT 0x20 | ||
| 479 | __u32 maxidle; | ||
| 480 | __u32 minidle; | ||
| 481 | __u32 offtime; | ||
| 482 | __u32 avpkt; | ||
| 483 | }; | ||
| 484 | |||
| 485 | struct tc_cbq_wrropt { | ||
| 486 | unsigned char flags; | ||
| 487 | unsigned char priority; | ||
| 488 | unsigned char cpriority; | ||
| 489 | unsigned char __reserved; | ||
| 490 | __u32 allot; | ||
| 491 | __u32 weight; | ||
| 492 | }; | ||
| 493 | |||
| 494 | struct tc_cbq_ovl { | ||
| 495 | unsigned char strategy; | ||
| 496 | #define TC_CBQ_OVL_CLASSIC 0 | ||
| 497 | #define TC_CBQ_OVL_DELAY 1 | ||
| 498 | #define TC_CBQ_OVL_LOWPRIO 2 | ||
| 499 | #define TC_CBQ_OVL_DROP 3 | ||
| 500 | #define TC_CBQ_OVL_RCLASSIC 4 | ||
| 501 | unsigned char priority2; | ||
| 502 | __u16 pad; | ||
| 503 | __u32 penalty; | ||
| 504 | }; | ||
| 505 | |||
| 506 | struct tc_cbq_police { | ||
| 507 | unsigned char police; | ||
| 508 | unsigned char __res1; | ||
| 509 | unsigned short __res2; | ||
| 510 | }; | ||
| 511 | |||
| 512 | struct tc_cbq_fopt { | ||
| 513 | __u32 split; | ||
| 514 | __u32 defmap; | ||
| 515 | __u32 defchange; | ||
| 516 | }; | ||
| 517 | |||
| 518 | struct tc_cbq_xstats { | ||
| 519 | __u32 borrows; | ||
| 520 | __u32 overactions; | ||
| 521 | __s32 avgidle; | ||
| 522 | __s32 undertime; | ||
| 523 | }; | ||
| 524 | |||
| 525 | enum { | ||
| 526 | TCA_CBQ_UNSPEC, | ||
| 527 | TCA_CBQ_LSSOPT, | ||
| 528 | TCA_CBQ_WRROPT, | ||
| 529 | TCA_CBQ_FOPT, | ||
| 530 | TCA_CBQ_OVL_STRATEGY, | ||
| 531 | TCA_CBQ_RATE, | ||
| 532 | TCA_CBQ_RTAB, | ||
| 533 | TCA_CBQ_POLICE, | ||
| 534 | __TCA_CBQ_MAX, | ||
| 535 | }; | ||
| 536 | |||
| 537 | #define TCA_CBQ_MAX (__TCA_CBQ_MAX - 1) | ||
| 538 | |||
| 539 | /* dsmark section */ | ||
| 540 | |||
| 541 | enum { | ||
| 542 | TCA_DSMARK_UNSPEC, | ||
| 543 | TCA_DSMARK_INDICES, | ||
| 544 | TCA_DSMARK_DEFAULT_INDEX, | ||
| 545 | TCA_DSMARK_SET_TC_INDEX, | ||
| 546 | TCA_DSMARK_MASK, | ||
| 547 | TCA_DSMARK_VALUE, | ||
| 548 | __TCA_DSMARK_MAX, | ||
| 549 | }; | ||
| 550 | |||
| 551 | #define TCA_DSMARK_MAX (__TCA_DSMARK_MAX - 1) | ||
| 552 | |||
| 553 | /* ATM section */ | ||
| 554 | |||
| 555 | enum { | ||
| 556 | TCA_ATM_UNSPEC, | ||
| 557 | TCA_ATM_FD, /* file/socket descriptor */ | ||
| 558 | TCA_ATM_PTR, /* pointer to descriptor - later */ | ||
| 559 | TCA_ATM_HDR, /* LL header */ | ||
| 560 | TCA_ATM_EXCESS, /* excess traffic class (0 for CLP) */ | ||
| 561 | TCA_ATM_ADDR, /* PVC address (for output only) */ | ||
| 562 | TCA_ATM_STATE, /* VC state (ATM_VS_*; for output only) */ | ||
| 563 | __TCA_ATM_MAX, | ||
| 564 | }; | ||
| 565 | |||
| 566 | #define TCA_ATM_MAX (__TCA_ATM_MAX - 1) | ||
| 567 | |||
| 568 | /* Network emulator */ | ||
| 569 | |||
| 570 | enum { | ||
| 571 | TCA_NETEM_UNSPEC, | ||
| 572 | TCA_NETEM_CORR, | ||
| 573 | TCA_NETEM_DELAY_DIST, | ||
| 574 | TCA_NETEM_REORDER, | ||
| 575 | TCA_NETEM_CORRUPT, | ||
| 576 | TCA_NETEM_LOSS, | ||
| 577 | TCA_NETEM_RATE, | ||
| 578 | TCA_NETEM_ECN, | ||
| 579 | TCA_NETEM_RATE64, | ||
| 580 | TCA_NETEM_PAD, | ||
| 581 | TCA_NETEM_LATENCY64, | ||
| 582 | TCA_NETEM_JITTER64, | ||
| 583 | TCA_NETEM_SLOT, | ||
| 584 | TCA_NETEM_SLOT_DIST, | ||
| 585 | __TCA_NETEM_MAX, | ||
| 586 | }; | ||
| 587 | |||
| 588 | #define TCA_NETEM_MAX (__TCA_NETEM_MAX - 1) | ||
| 589 | |||
| 590 | struct tc_netem_qopt { | ||
| 591 | __u32 latency; /* added delay (us) */ | ||
| 592 | __u32 limit; /* fifo limit (packets) */ | ||
| 593 | __u32 loss; /* random packet loss (0=none ~0=100%) */ | ||
| 594 | __u32 gap; /* re-ordering gap (0 for none) */ | ||
| 595 | __u32 duplicate; /* random packet dup (0=none ~0=100%) */ | ||
| 596 | __u32 jitter; /* random jitter in latency (us) */ | ||
| 597 | }; | ||
| 598 | |||
| 599 | struct tc_netem_corr { | ||
| 600 | __u32 delay_corr; /* delay correlation */ | ||
| 601 | __u32 loss_corr; /* packet loss correlation */ | ||
| 602 | __u32 dup_corr; /* duplicate correlation */ | ||
| 603 | }; | ||
| 604 | |||
| 605 | struct tc_netem_reorder { | ||
| 606 | __u32 probability; | ||
| 607 | __u32 correlation; | ||
| 608 | }; | ||
| 609 | |||
| 610 | struct tc_netem_corrupt { | ||
| 611 | __u32 probability; | ||
| 612 | __u32 correlation; | ||
| 613 | }; | ||
| 614 | |||
| 615 | struct tc_netem_rate { | ||
| 616 | __u32 rate; /* byte/s */ | ||
| 617 | __s32 packet_overhead; | ||
| 618 | __u32 cell_size; | ||
| 619 | __s32 cell_overhead; | ||
| 620 | }; | ||
| 621 | |||
| 622 | struct tc_netem_slot { | ||
| 623 | __s64 min_delay; /* nsec */ | ||
| 624 | __s64 max_delay; | ||
| 625 | __s32 max_packets; | ||
| 626 | __s32 max_bytes; | ||
| 627 | __s64 dist_delay; /* nsec */ | ||
| 628 | __s64 dist_jitter; /* nsec */ | ||
| 629 | }; | ||
| 630 | |||
| 631 | enum { | ||
| 632 | NETEM_LOSS_UNSPEC, | ||
| 633 | NETEM_LOSS_GI, /* General Intuitive - 4 state model */ | ||
| 634 | NETEM_LOSS_GE, /* Gilbert Elliot models */ | ||
| 635 | __NETEM_LOSS_MAX | ||
| 636 | }; | ||
| 637 | #define NETEM_LOSS_MAX (__NETEM_LOSS_MAX - 1) | ||
| 638 | |||
| 639 | /* State transition probabilities for 4 state model */ | ||
| 640 | struct tc_netem_gimodel { | ||
| 641 | __u32 p13; | ||
| 642 | __u32 p31; | ||
| 643 | __u32 p32; | ||
| 644 | __u32 p14; | ||
| 645 | __u32 p23; | ||
| 646 | }; | ||
| 647 | |||
| 648 | /* Gilbert-Elliot models */ | ||
| 649 | struct tc_netem_gemodel { | ||
| 650 | __u32 p; | ||
| 651 | __u32 r; | ||
| 652 | __u32 h; | ||
| 653 | __u32 k1; | ||
| 654 | }; | ||
| 655 | |||
| 656 | #define NETEM_DIST_SCALE 8192 | ||
| 657 | #define NETEM_DIST_MAX 16384 | ||
| 658 | |||
| 659 | /* DRR */ | ||
| 660 | |||
| 661 | enum { | ||
| 662 | TCA_DRR_UNSPEC, | ||
| 663 | TCA_DRR_QUANTUM, | ||
| 664 | __TCA_DRR_MAX | ||
| 665 | }; | ||
| 666 | |||
| 667 | #define TCA_DRR_MAX (__TCA_DRR_MAX - 1) | ||
| 668 | |||
| 669 | struct tc_drr_stats { | ||
| 670 | __u32 deficit; | ||
| 671 | }; | ||
| 672 | |||
| 673 | /* MQPRIO */ | ||
| 674 | #define TC_QOPT_BITMASK 15 | ||
| 675 | #define TC_QOPT_MAX_QUEUE 16 | ||
| 676 | |||
| 677 | enum { | ||
| 678 | TC_MQPRIO_HW_OFFLOAD_NONE, /* no offload requested */ | ||
| 679 | TC_MQPRIO_HW_OFFLOAD_TCS, /* offload TCs, no queue counts */ | ||
| 680 | __TC_MQPRIO_HW_OFFLOAD_MAX | ||
| 681 | }; | ||
| 682 | |||
| 683 | #define TC_MQPRIO_HW_OFFLOAD_MAX (__TC_MQPRIO_HW_OFFLOAD_MAX - 1) | ||
| 684 | |||
| 685 | enum { | ||
| 686 | TC_MQPRIO_MODE_DCB, | ||
| 687 | TC_MQPRIO_MODE_CHANNEL, | ||
| 688 | __TC_MQPRIO_MODE_MAX | ||
| 689 | }; | ||
| 690 | |||
| 691 | #define __TC_MQPRIO_MODE_MAX (__TC_MQPRIO_MODE_MAX - 1) | ||
| 692 | |||
| 693 | enum { | ||
| 694 | TC_MQPRIO_SHAPER_DCB, | ||
| 695 | TC_MQPRIO_SHAPER_BW_RATE, /* Add new shapers below */ | ||
| 696 | __TC_MQPRIO_SHAPER_MAX | ||
| 697 | }; | ||
| 698 | |||
| 699 | #define __TC_MQPRIO_SHAPER_MAX (__TC_MQPRIO_SHAPER_MAX - 1) | ||
| 700 | |||
| 701 | struct tc_mqprio_qopt { | ||
| 702 | __u8 num_tc; | ||
| 703 | __u8 prio_tc_map[TC_QOPT_BITMASK + 1]; | ||
| 704 | __u8 hw; | ||
| 705 | __u16 count[TC_QOPT_MAX_QUEUE]; | ||
| 706 | __u16 offset[TC_QOPT_MAX_QUEUE]; | ||
| 707 | }; | ||
| 708 | |||
| 709 | #define TC_MQPRIO_F_MODE 0x1 | ||
| 710 | #define TC_MQPRIO_F_SHAPER 0x2 | ||
| 711 | #define TC_MQPRIO_F_MIN_RATE 0x4 | ||
| 712 | #define TC_MQPRIO_F_MAX_RATE 0x8 | ||
| 713 | |||
| 714 | enum { | ||
| 715 | TCA_MQPRIO_UNSPEC, | ||
| 716 | TCA_MQPRIO_MODE, | ||
| 717 | TCA_MQPRIO_SHAPER, | ||
| 718 | TCA_MQPRIO_MIN_RATE64, | ||
| 719 | TCA_MQPRIO_MAX_RATE64, | ||
| 720 | __TCA_MQPRIO_MAX, | ||
| 721 | }; | ||
| 722 | |||
| 723 | #define TCA_MQPRIO_MAX (__TCA_MQPRIO_MAX - 1) | ||
| 724 | |||
| 725 | /* SFB */ | ||
| 726 | |||
| 727 | enum { | ||
| 728 | TCA_SFB_UNSPEC, | ||
| 729 | TCA_SFB_PARMS, | ||
| 730 | __TCA_SFB_MAX, | ||
| 731 | }; | ||
| 732 | |||
| 733 | #define TCA_SFB_MAX (__TCA_SFB_MAX - 1) | ||
| 734 | |||
| 735 | /* | ||
| 736 | * Note: increment, decrement are Q0.16 fixed-point values. | ||
| 737 | */ | ||
| 738 | struct tc_sfb_qopt { | ||
| 739 | __u32 rehash_interval; /* delay between hash move, in ms */ | ||
| 740 | __u32 warmup_time; /* double buffering warmup time in ms (warmup_time < rehash_interval) */ | ||
| 741 | __u32 max; /* max len of qlen_min */ | ||
| 742 | __u32 bin_size; /* maximum queue length per bin */ | ||
| 743 | __u32 increment; /* probability increment, (d1 in Blue) */ | ||
| 744 | __u32 decrement; /* probability decrement, (d2 in Blue) */ | ||
| 745 | __u32 limit; /* max SFB queue length */ | ||
| 746 | __u32 penalty_rate; /* inelastic flows are rate limited to 'rate' pps */ | ||
| 747 | __u32 penalty_burst; | ||
| 748 | }; | ||
| 749 | |||
| 750 | struct tc_sfb_xstats { | ||
| 751 | __u32 earlydrop; | ||
| 752 | __u32 penaltydrop; | ||
| 753 | __u32 bucketdrop; | ||
| 754 | __u32 queuedrop; | ||
| 755 | __u32 childdrop; /* drops in child qdisc */ | ||
| 756 | __u32 marked; | ||
| 757 | __u32 maxqlen; | ||
| 758 | __u32 maxprob; | ||
| 759 | __u32 avgprob; | ||
| 760 | }; | ||
| 761 | |||
| 762 | #define SFB_MAX_PROB 0xFFFF | ||
| 763 | |||
| 764 | /* QFQ */ | ||
| 765 | enum { | ||
| 766 | TCA_QFQ_UNSPEC, | ||
| 767 | TCA_QFQ_WEIGHT, | ||
| 768 | TCA_QFQ_LMAX, | ||
| 769 | __TCA_QFQ_MAX | ||
| 770 | }; | ||
| 771 | |||
| 772 | #define TCA_QFQ_MAX (__TCA_QFQ_MAX - 1) | ||
| 773 | |||
| 774 | struct tc_qfq_stats { | ||
| 775 | __u32 weight; | ||
| 776 | __u32 lmax; | ||
| 777 | }; | ||
| 778 | |||
| 779 | /* CODEL */ | ||
| 780 | |||
| 781 | enum { | ||
| 782 | TCA_CODEL_UNSPEC, | ||
| 783 | TCA_CODEL_TARGET, | ||
| 784 | TCA_CODEL_LIMIT, | ||
| 785 | TCA_CODEL_INTERVAL, | ||
| 786 | TCA_CODEL_ECN, | ||
| 787 | TCA_CODEL_CE_THRESHOLD, | ||
| 788 | __TCA_CODEL_MAX | ||
| 789 | }; | ||
| 790 | |||
| 791 | #define TCA_CODEL_MAX (__TCA_CODEL_MAX - 1) | ||
| 792 | |||
| 793 | struct tc_codel_xstats { | ||
| 794 | __u32 maxpacket; /* largest packet we've seen so far */ | ||
| 795 | __u32 count; /* how many drops we've done since the last time we | ||
| 796 | * entered dropping state | ||
| 797 | */ | ||
| 798 | __u32 lastcount; /* count at entry to dropping state */ | ||
| 799 | __u32 ldelay; /* in-queue delay seen by most recently dequeued packet */ | ||
| 800 | __s32 drop_next; /* time to drop next packet */ | ||
| 801 | __u32 drop_overlimit; /* number of time max qdisc packet limit was hit */ | ||
| 802 | __u32 ecn_mark; /* number of packets we ECN marked instead of dropped */ | ||
| 803 | __u32 dropping; /* are we in dropping state ? */ | ||
| 804 | __u32 ce_mark; /* number of CE marked packets because of ce_threshold */ | ||
| 805 | }; | ||
| 806 | |||
| 807 | /* FQ_CODEL */ | ||
| 808 | |||
| 809 | enum { | ||
| 810 | TCA_FQ_CODEL_UNSPEC, | ||
| 811 | TCA_FQ_CODEL_TARGET, | ||
| 812 | TCA_FQ_CODEL_LIMIT, | ||
| 813 | TCA_FQ_CODEL_INTERVAL, | ||
| 814 | TCA_FQ_CODEL_ECN, | ||
| 815 | TCA_FQ_CODEL_FLOWS, | ||
| 816 | TCA_FQ_CODEL_QUANTUM, | ||
| 817 | TCA_FQ_CODEL_CE_THRESHOLD, | ||
| 818 | TCA_FQ_CODEL_DROP_BATCH_SIZE, | ||
| 819 | TCA_FQ_CODEL_MEMORY_LIMIT, | ||
| 820 | __TCA_FQ_CODEL_MAX | ||
| 821 | }; | ||
| 822 | |||
| 823 | #define TCA_FQ_CODEL_MAX (__TCA_FQ_CODEL_MAX - 1) | ||
| 824 | |||
| 825 | enum { | ||
| 826 | TCA_FQ_CODEL_XSTATS_QDISC, | ||
| 827 | TCA_FQ_CODEL_XSTATS_CLASS, | ||
| 828 | }; | ||
| 829 | |||
| 830 | struct tc_fq_codel_qd_stats { | ||
| 831 | __u32 maxpacket; /* largest packet we've seen so far */ | ||
| 832 | __u32 drop_overlimit; /* number of time max qdisc | ||
| 833 | * packet limit was hit | ||
| 834 | */ | ||
| 835 | __u32 ecn_mark; /* number of packets we ECN marked | ||
| 836 | * instead of being dropped | ||
| 837 | */ | ||
| 838 | __u32 new_flow_count; /* number of time packets | ||
| 839 | * created a 'new flow' | ||
| 840 | */ | ||
| 841 | __u32 new_flows_len; /* count of flows in new list */ | ||
| 842 | __u32 old_flows_len; /* count of flows in old list */ | ||
| 843 | __u32 ce_mark; /* packets above ce_threshold */ | ||
| 844 | __u32 memory_usage; /* in bytes */ | ||
| 845 | __u32 drop_overmemory; | ||
| 846 | }; | ||
| 847 | |||
| 848 | struct tc_fq_codel_cl_stats { | ||
| 849 | __s32 deficit; | ||
| 850 | __u32 ldelay; /* in-queue delay seen by most recently | ||
| 851 | * dequeued packet | ||
| 852 | */ | ||
| 853 | __u32 count; | ||
| 854 | __u32 lastcount; | ||
| 855 | __u32 dropping; | ||
| 856 | __s32 drop_next; | ||
| 857 | }; | ||
| 858 | |||
| 859 | struct tc_fq_codel_xstats { | ||
| 860 | __u32 type; | ||
| 861 | union { | ||
| 862 | struct tc_fq_codel_qd_stats qdisc_stats; | ||
| 863 | struct tc_fq_codel_cl_stats class_stats; | ||
| 864 | }; | ||
| 865 | }; | ||
| 866 | |||
| 867 | /* FQ */ | ||
| 868 | |||
| 869 | enum { | ||
| 870 | TCA_FQ_UNSPEC, | ||
| 871 | |||
| 872 | TCA_FQ_PLIMIT, /* limit of total number of packets in queue */ | ||
| 873 | |||
| 874 | TCA_FQ_FLOW_PLIMIT, /* limit of packets per flow */ | ||
| 875 | |||
| 876 | TCA_FQ_QUANTUM, /* RR quantum */ | ||
| 877 | |||
| 878 | TCA_FQ_INITIAL_QUANTUM, /* RR quantum for new flow */ | ||
| 879 | |||
| 880 | TCA_FQ_RATE_ENABLE, /* enable/disable rate limiting */ | ||
| 881 | |||
| 882 | TCA_FQ_FLOW_DEFAULT_RATE,/* obsolete, do not use */ | ||
| 883 | |||
| 884 | TCA_FQ_FLOW_MAX_RATE, /* per flow max rate */ | ||
| 885 | |||
| 886 | TCA_FQ_BUCKETS_LOG, /* log2(number of buckets) */ | ||
| 887 | |||
| 888 | TCA_FQ_FLOW_REFILL_DELAY, /* flow credit refill delay in usec */ | ||
| 889 | |||
| 890 | TCA_FQ_ORPHAN_MASK, /* mask applied to orphaned skb hashes */ | ||
| 891 | |||
| 892 | TCA_FQ_LOW_RATE_THRESHOLD, /* per packet delay under this rate */ | ||
| 893 | |||
| 894 | TCA_FQ_CE_THRESHOLD, /* DCTCP-like CE-marking threshold */ | ||
| 895 | |||
| 896 | __TCA_FQ_MAX | ||
| 897 | }; | ||
| 898 | |||
| 899 | #define TCA_FQ_MAX (__TCA_FQ_MAX - 1) | ||
| 900 | |||
| 901 | struct tc_fq_qd_stats { | ||
| 902 | __u64 gc_flows; | ||
| 903 | __u64 highprio_packets; | ||
| 904 | __u64 tcp_retrans; | ||
| 905 | __u64 throttled; | ||
| 906 | __u64 flows_plimit; | ||
| 907 | __u64 pkts_too_long; | ||
| 908 | __u64 allocation_errors; | ||
| 909 | __s64 time_next_delayed_flow; | ||
| 910 | __u32 flows; | ||
| 911 | __u32 inactive_flows; | ||
| 912 | __u32 throttled_flows; | ||
| 913 | __u32 unthrottle_latency_ns; | ||
| 914 | __u64 ce_mark; /* packets above ce_threshold */ | ||
| 915 | }; | ||
| 916 | |||
| 917 | /* Heavy-Hitter Filter */ | ||
| 918 | |||
| 919 | enum { | ||
| 920 | TCA_HHF_UNSPEC, | ||
| 921 | TCA_HHF_BACKLOG_LIMIT, | ||
| 922 | TCA_HHF_QUANTUM, | ||
| 923 | TCA_HHF_HH_FLOWS_LIMIT, | ||
| 924 | TCA_HHF_RESET_TIMEOUT, | ||
| 925 | TCA_HHF_ADMIT_BYTES, | ||
| 926 | TCA_HHF_EVICT_TIMEOUT, | ||
| 927 | TCA_HHF_NON_HH_WEIGHT, | ||
| 928 | __TCA_HHF_MAX | ||
| 929 | }; | ||
| 930 | |||
| 931 | #define TCA_HHF_MAX (__TCA_HHF_MAX - 1) | ||
| 932 | |||
| 933 | struct tc_hhf_xstats { | ||
| 934 | __u32 drop_overlimit; /* number of times max qdisc packet limit | ||
| 935 | * was hit | ||
| 936 | */ | ||
| 937 | __u32 hh_overlimit; /* number of times max heavy-hitters was hit */ | ||
| 938 | __u32 hh_tot_count; /* number of captured heavy-hitters so far */ | ||
| 939 | __u32 hh_cur_count; /* number of current heavy-hitters */ | ||
| 940 | }; | ||
| 941 | |||
| 942 | /* PIE */ | ||
| 943 | enum { | ||
| 944 | TCA_PIE_UNSPEC, | ||
| 945 | TCA_PIE_TARGET, | ||
| 946 | TCA_PIE_LIMIT, | ||
| 947 | TCA_PIE_TUPDATE, | ||
| 948 | TCA_PIE_ALPHA, | ||
| 949 | TCA_PIE_BETA, | ||
| 950 | TCA_PIE_ECN, | ||
| 951 | TCA_PIE_BYTEMODE, | ||
| 952 | __TCA_PIE_MAX | ||
| 953 | }; | ||
| 954 | #define TCA_PIE_MAX (__TCA_PIE_MAX - 1) | ||
| 955 | |||
| 956 | struct tc_pie_xstats { | ||
| 957 | __u32 prob; /* current probability */ | ||
| 958 | __u32 delay; /* current delay in ms */ | ||
| 959 | __u32 avg_dq_rate; /* current average dq_rate in bits/pie_time */ | ||
| 960 | __u32 packets_in; /* total number of packets enqueued */ | ||
| 961 | __u32 dropped; /* packets dropped due to pie_action */ | ||
| 962 | __u32 overlimit; /* dropped due to lack of space in queue */ | ||
| 963 | __u32 maxq; /* maximum queue size */ | ||
| 964 | __u32 ecn_mark; /* packets marked with ecn*/ | ||
| 965 | }; | ||
| 966 | |||
| 967 | /* CBS */ | ||
| 968 | struct tc_cbs_qopt { | ||
| 969 | __u8 offload; | ||
| 970 | __u8 _pad[3]; | ||
| 971 | __s32 hicredit; | ||
| 972 | __s32 locredit; | ||
| 973 | __s32 idleslope; | ||
| 974 | __s32 sendslope; | ||
| 975 | }; | ||
| 976 | |||
| 977 | enum { | ||
| 978 | TCA_CBS_UNSPEC, | ||
| 979 | TCA_CBS_PARMS, | ||
| 980 | __TCA_CBS_MAX, | ||
| 981 | }; | ||
| 982 | |||
| 983 | #define TCA_CBS_MAX (__TCA_CBS_MAX - 1) | ||
| 984 | |||
| 985 | |||
| 986 | /* ETF */ | ||
| 987 | struct tc_etf_qopt { | ||
| 988 | __s32 delta; | ||
| 989 | __s32 clockid; | ||
| 990 | __u32 flags; | ||
| 991 | #define TC_ETF_DEADLINE_MODE_ON BIT(0) | ||
| 992 | #define TC_ETF_OFFLOAD_ON BIT(1) | ||
| 993 | }; | ||
| 994 | |||
| 995 | enum { | ||
| 996 | TCA_ETF_UNSPEC, | ||
| 997 | TCA_ETF_PARMS, | ||
| 998 | __TCA_ETF_MAX, | ||
| 999 | }; | ||
| 1000 | |||
| 1001 | #define TCA_ETF_MAX (__TCA_ETF_MAX - 1) | ||
| 1002 | |||
| 1003 | |||
| 1004 | /* CAKE */ | ||
| 1005 | enum { | ||
| 1006 | TCA_CAKE_UNSPEC, | ||
| 1007 | TCA_CAKE_PAD, | ||
| 1008 | TCA_CAKE_BASE_RATE64, | ||
| 1009 | TCA_CAKE_DIFFSERV_MODE, | ||
| 1010 | TCA_CAKE_ATM, | ||
| 1011 | TCA_CAKE_FLOW_MODE, | ||
| 1012 | TCA_CAKE_OVERHEAD, | ||
| 1013 | TCA_CAKE_RTT, | ||
| 1014 | TCA_CAKE_TARGET, | ||
| 1015 | TCA_CAKE_AUTORATE, | ||
| 1016 | TCA_CAKE_MEMORY, | ||
| 1017 | TCA_CAKE_NAT, | ||
| 1018 | TCA_CAKE_RAW, | ||
| 1019 | TCA_CAKE_WASH, | ||
| 1020 | TCA_CAKE_MPU, | ||
| 1021 | TCA_CAKE_INGRESS, | ||
| 1022 | TCA_CAKE_ACK_FILTER, | ||
| 1023 | TCA_CAKE_SPLIT_GSO, | ||
| 1024 | __TCA_CAKE_MAX | ||
| 1025 | }; | ||
| 1026 | #define TCA_CAKE_MAX (__TCA_CAKE_MAX - 1) | ||
| 1027 | |||
| 1028 | enum { | ||
| 1029 | __TCA_CAKE_STATS_INVALID, | ||
| 1030 | TCA_CAKE_STATS_PAD, | ||
| 1031 | TCA_CAKE_STATS_CAPACITY_ESTIMATE64, | ||
| 1032 | TCA_CAKE_STATS_MEMORY_LIMIT, | ||
| 1033 | TCA_CAKE_STATS_MEMORY_USED, | ||
| 1034 | TCA_CAKE_STATS_AVG_NETOFF, | ||
| 1035 | TCA_CAKE_STATS_MIN_NETLEN, | ||
| 1036 | TCA_CAKE_STATS_MAX_NETLEN, | ||
| 1037 | TCA_CAKE_STATS_MIN_ADJLEN, | ||
| 1038 | TCA_CAKE_STATS_MAX_ADJLEN, | ||
| 1039 | TCA_CAKE_STATS_TIN_STATS, | ||
| 1040 | TCA_CAKE_STATS_DEFICIT, | ||
| 1041 | TCA_CAKE_STATS_COBALT_COUNT, | ||
| 1042 | TCA_CAKE_STATS_DROPPING, | ||
| 1043 | TCA_CAKE_STATS_DROP_NEXT_US, | ||
| 1044 | TCA_CAKE_STATS_P_DROP, | ||
| 1045 | TCA_CAKE_STATS_BLUE_TIMER_US, | ||
| 1046 | __TCA_CAKE_STATS_MAX | ||
| 1047 | }; | ||
| 1048 | #define TCA_CAKE_STATS_MAX (__TCA_CAKE_STATS_MAX - 1) | ||
| 1049 | |||
| 1050 | enum { | ||
| 1051 | __TCA_CAKE_TIN_STATS_INVALID, | ||
| 1052 | TCA_CAKE_TIN_STATS_PAD, | ||
| 1053 | TCA_CAKE_TIN_STATS_SENT_PACKETS, | ||
| 1054 | TCA_CAKE_TIN_STATS_SENT_BYTES64, | ||
| 1055 | TCA_CAKE_TIN_STATS_DROPPED_PACKETS, | ||
| 1056 | TCA_CAKE_TIN_STATS_DROPPED_BYTES64, | ||
| 1057 | TCA_CAKE_TIN_STATS_ACKS_DROPPED_PACKETS, | ||
| 1058 | TCA_CAKE_TIN_STATS_ACKS_DROPPED_BYTES64, | ||
| 1059 | TCA_CAKE_TIN_STATS_ECN_MARKED_PACKETS, | ||
| 1060 | TCA_CAKE_TIN_STATS_ECN_MARKED_BYTES64, | ||
| 1061 | TCA_CAKE_TIN_STATS_BACKLOG_PACKETS, | ||
| 1062 | TCA_CAKE_TIN_STATS_BACKLOG_BYTES, | ||
| 1063 | TCA_CAKE_TIN_STATS_THRESHOLD_RATE64, | ||
| 1064 | TCA_CAKE_TIN_STATS_TARGET_US, | ||
| 1065 | TCA_CAKE_TIN_STATS_INTERVAL_US, | ||
| 1066 | TCA_CAKE_TIN_STATS_WAY_INDIRECT_HITS, | ||
| 1067 | TCA_CAKE_TIN_STATS_WAY_MISSES, | ||
| 1068 | TCA_CAKE_TIN_STATS_WAY_COLLISIONS, | ||
| 1069 | TCA_CAKE_TIN_STATS_PEAK_DELAY_US, | ||
| 1070 | TCA_CAKE_TIN_STATS_AVG_DELAY_US, | ||
| 1071 | TCA_CAKE_TIN_STATS_BASE_DELAY_US, | ||
| 1072 | TCA_CAKE_TIN_STATS_SPARSE_FLOWS, | ||
| 1073 | TCA_CAKE_TIN_STATS_BULK_FLOWS, | ||
| 1074 | TCA_CAKE_TIN_STATS_UNRESPONSIVE_FLOWS, | ||
| 1075 | TCA_CAKE_TIN_STATS_MAX_SKBLEN, | ||
| 1076 | TCA_CAKE_TIN_STATS_FLOW_QUANTUM, | ||
| 1077 | __TCA_CAKE_TIN_STATS_MAX | ||
| 1078 | }; | ||
| 1079 | #define TCA_CAKE_TIN_STATS_MAX (__TCA_CAKE_TIN_STATS_MAX - 1) | ||
| 1080 | #define TC_CAKE_MAX_TINS (8) | ||
| 1081 | |||
| 1082 | enum { | ||
| 1083 | CAKE_FLOW_NONE = 0, | ||
| 1084 | CAKE_FLOW_SRC_IP, | ||
| 1085 | CAKE_FLOW_DST_IP, | ||
| 1086 | CAKE_FLOW_HOSTS, /* = CAKE_FLOW_SRC_IP | CAKE_FLOW_DST_IP */ | ||
| 1087 | CAKE_FLOW_FLOWS, | ||
| 1088 | CAKE_FLOW_DUAL_SRC, /* = CAKE_FLOW_SRC_IP | CAKE_FLOW_FLOWS */ | ||
| 1089 | CAKE_FLOW_DUAL_DST, /* = CAKE_FLOW_DST_IP | CAKE_FLOW_FLOWS */ | ||
| 1090 | CAKE_FLOW_TRIPLE, /* = CAKE_FLOW_HOSTS | CAKE_FLOW_FLOWS */ | ||
| 1091 | CAKE_FLOW_MAX, | ||
| 1092 | }; | ||
| 1093 | |||
| 1094 | enum { | ||
| 1095 | CAKE_DIFFSERV_DIFFSERV3 = 0, | ||
| 1096 | CAKE_DIFFSERV_DIFFSERV4, | ||
| 1097 | CAKE_DIFFSERV_DIFFSERV8, | ||
| 1098 | CAKE_DIFFSERV_BESTEFFORT, | ||
| 1099 | CAKE_DIFFSERV_PRECEDENCE, | ||
| 1100 | CAKE_DIFFSERV_MAX | ||
| 1101 | }; | ||
| 1102 | |||
| 1103 | enum { | ||
| 1104 | CAKE_ACK_NONE = 0, | ||
| 1105 | CAKE_ACK_FILTER, | ||
| 1106 | CAKE_ACK_AGGRESSIVE, | ||
| 1107 | CAKE_ACK_MAX | ||
| 1108 | }; | ||
| 1109 | |||
| 1110 | enum { | ||
| 1111 | CAKE_ATM_NONE = 0, | ||
| 1112 | CAKE_ATM_ATM, | ||
| 1113 | CAKE_ATM_PTM, | ||
| 1114 | CAKE_ATM_MAX | ||
| 1115 | }; | ||
| 1116 | |||
| 1117 | |||
| 1118 | /* TAPRIO */ | ||
| 1119 | enum { | ||
| 1120 | TC_TAPRIO_CMD_SET_GATES = 0x00, | ||
| 1121 | TC_TAPRIO_CMD_SET_AND_HOLD = 0x01, | ||
| 1122 | TC_TAPRIO_CMD_SET_AND_RELEASE = 0x02, | ||
| 1123 | }; | ||
| 1124 | |||
| 1125 | enum { | ||
| 1126 | TCA_TAPRIO_SCHED_ENTRY_UNSPEC, | ||
| 1127 | TCA_TAPRIO_SCHED_ENTRY_INDEX, /* u32 */ | ||
| 1128 | TCA_TAPRIO_SCHED_ENTRY_CMD, /* u8 */ | ||
| 1129 | TCA_TAPRIO_SCHED_ENTRY_GATE_MASK, /* u32 */ | ||
| 1130 | TCA_TAPRIO_SCHED_ENTRY_INTERVAL, /* u32 */ | ||
| 1131 | __TCA_TAPRIO_SCHED_ENTRY_MAX, | ||
| 1132 | }; | ||
| 1133 | #define TCA_TAPRIO_SCHED_ENTRY_MAX (__TCA_TAPRIO_SCHED_ENTRY_MAX - 1) | ||
| 1134 | |||
| 1135 | /* The format for schedule entry list is: | ||
| 1136 | * [TCA_TAPRIO_SCHED_ENTRY_LIST] | ||
| 1137 | * [TCA_TAPRIO_SCHED_ENTRY] | ||
| 1138 | * [TCA_TAPRIO_SCHED_ENTRY_CMD] | ||
| 1139 | * [TCA_TAPRIO_SCHED_ENTRY_GATES] | ||
| 1140 | * [TCA_TAPRIO_SCHED_ENTRY_INTERVAL] | ||
| 1141 | */ | ||
| 1142 | enum { | ||
| 1143 | TCA_TAPRIO_SCHED_UNSPEC, | ||
| 1144 | TCA_TAPRIO_SCHED_ENTRY, | ||
| 1145 | __TCA_TAPRIO_SCHED_MAX, | ||
| 1146 | }; | ||
| 1147 | |||
| 1148 | #define TCA_TAPRIO_SCHED_MAX (__TCA_TAPRIO_SCHED_MAX - 1) | ||
| 1149 | |||
| 1150 | enum { | ||
| 1151 | TCA_TAPRIO_ATTR_UNSPEC, | ||
| 1152 | TCA_TAPRIO_ATTR_PRIOMAP, /* struct tc_mqprio_qopt */ | ||
| 1153 | TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST, /* nested of entry */ | ||
| 1154 | TCA_TAPRIO_ATTR_SCHED_BASE_TIME, /* s64 */ | ||
| 1155 | TCA_TAPRIO_ATTR_SCHED_SINGLE_ENTRY, /* single entry */ | ||
| 1156 | TCA_TAPRIO_ATTR_SCHED_CLOCKID, /* s32 */ | ||
| 1157 | TCA_TAPRIO_PAD, | ||
| 1158 | __TCA_TAPRIO_ATTR_MAX, | ||
| 1159 | }; | ||
| 1160 | |||
| 1161 | #define TCA_TAPRIO_ATTR_MAX (__TCA_TAPRIO_ATTR_MAX - 1) | ||
| 1162 | |||
| 1163 | #endif | ||
diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c index 3caaa3428774..88cbd110ae58 100644 --- a/tools/lib/bpf/bpf.c +++ b/tools/lib/bpf/bpf.c | |||
| @@ -65,6 +65,17 @@ static inline int sys_bpf(enum bpf_cmd cmd, union bpf_attr *attr, | |||
| 65 | return syscall(__NR_bpf, cmd, attr, size); | 65 | return syscall(__NR_bpf, cmd, attr, size); |
| 66 | } | 66 | } |
| 67 | 67 | ||
| 68 | static inline int sys_bpf_prog_load(union bpf_attr *attr, unsigned int size) | ||
| 69 | { | ||
| 70 | int fd; | ||
| 71 | |||
| 72 | do { | ||
| 73 | fd = sys_bpf(BPF_PROG_LOAD, attr, size); | ||
| 74 | } while (fd < 0 && errno == EAGAIN); | ||
| 75 | |||
| 76 | return fd; | ||
| 77 | } | ||
| 78 | |||
| 68 | int bpf_create_map_xattr(const struct bpf_create_map_attr *create_attr) | 79 | int bpf_create_map_xattr(const struct bpf_create_map_attr *create_attr) |
| 69 | { | 80 | { |
| 70 | __u32 name_len = create_attr->name ? strlen(create_attr->name) : 0; | 81 | __u32 name_len = create_attr->name ? strlen(create_attr->name) : 0; |
| @@ -232,7 +243,7 @@ int bpf_load_program_xattr(const struct bpf_load_program_attr *load_attr, | |||
| 232 | memcpy(attr.prog_name, load_attr->name, | 243 | memcpy(attr.prog_name, load_attr->name, |
| 233 | min(name_len, BPF_OBJ_NAME_LEN - 1)); | 244 | min(name_len, BPF_OBJ_NAME_LEN - 1)); |
| 234 | 245 | ||
| 235 | fd = sys_bpf(BPF_PROG_LOAD, &attr, sizeof(attr)); | 246 | fd = sys_bpf_prog_load(&attr, sizeof(attr)); |
| 236 | if (fd >= 0) | 247 | if (fd >= 0) |
| 237 | return fd; | 248 | return fd; |
| 238 | 249 | ||
| @@ -269,7 +280,7 @@ int bpf_load_program_xattr(const struct bpf_load_program_attr *load_attr, | |||
| 269 | break; | 280 | break; |
| 270 | } | 281 | } |
| 271 | 282 | ||
| 272 | fd = sys_bpf(BPF_PROG_LOAD, &attr, sizeof(attr)); | 283 | fd = sys_bpf_prog_load(&attr, sizeof(attr)); |
| 273 | 284 | ||
| 274 | if (fd >= 0) | 285 | if (fd >= 0) |
| 275 | goto done; | 286 | goto done; |
| @@ -283,7 +294,7 @@ int bpf_load_program_xattr(const struct bpf_load_program_attr *load_attr, | |||
| 283 | attr.log_size = log_buf_sz; | 294 | attr.log_size = log_buf_sz; |
| 284 | attr.log_level = 1; | 295 | attr.log_level = 1; |
| 285 | log_buf[0] = 0; | 296 | log_buf[0] = 0; |
| 286 | fd = sys_bpf(BPF_PROG_LOAD, &attr, sizeof(attr)); | 297 | fd = sys_bpf_prog_load(&attr, sizeof(attr)); |
| 287 | done: | 298 | done: |
| 288 | free(finfo); | 299 | free(finfo); |
| 289 | free(linfo); | 300 | free(linfo); |
| @@ -328,7 +339,7 @@ int bpf_verify_program(enum bpf_prog_type type, const struct bpf_insn *insns, | |||
| 328 | attr.kern_version = kern_version; | 339 | attr.kern_version = kern_version; |
| 329 | attr.prog_flags = prog_flags; | 340 | attr.prog_flags = prog_flags; |
| 330 | 341 | ||
| 331 | return sys_bpf(BPF_PROG_LOAD, &attr, sizeof(attr)); | 342 | return sys_bpf_prog_load(&attr, sizeof(attr)); |
| 332 | } | 343 | } |
| 333 | 344 | ||
| 334 | int bpf_map_update_elem(int fd, const void *key, const void *value, | 345 | int bpf_map_update_elem(int fd, const void *key, const void *value, |
diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile index 70229de510f5..41ab7a3668b3 100644 --- a/tools/testing/selftests/bpf/Makefile +++ b/tools/testing/selftests/bpf/Makefile | |||
| @@ -56,6 +56,7 @@ TEST_PROGS := test_kmod.sh \ | |||
| 56 | test_xdp_vlan.sh | 56 | test_xdp_vlan.sh |
| 57 | 57 | ||
| 58 | TEST_PROGS_EXTENDED := with_addr.sh \ | 58 | TEST_PROGS_EXTENDED := with_addr.sh \ |
| 59 | with_tunnels.sh \ | ||
| 59 | tcp_client.py \ | 60 | tcp_client.py \ |
| 60 | tcp_server.py | 61 | tcp_server.py |
| 61 | 62 | ||
diff --git a/tools/testing/selftests/bpf/test_progs.c b/tools/testing/selftests/bpf/test_progs.c index 126fc624290d..25f0083a9b2e 100644 --- a/tools/testing/selftests/bpf/test_progs.c +++ b/tools/testing/selftests/bpf/test_progs.c | |||
| @@ -1188,7 +1188,9 @@ static void test_stacktrace_build_id(void) | |||
| 1188 | int i, j; | 1188 | int i, j; |
| 1189 | struct bpf_stack_build_id id_offs[PERF_MAX_STACK_DEPTH]; | 1189 | struct bpf_stack_build_id id_offs[PERF_MAX_STACK_DEPTH]; |
| 1190 | int build_id_matches = 0; | 1190 | int build_id_matches = 0; |
| 1191 | int retry = 1; | ||
| 1191 | 1192 | ||
| 1193 | retry: | ||
| 1192 | err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd); | 1194 | err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd); |
| 1193 | if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno)) | 1195 | if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno)) |
| 1194 | goto out; | 1196 | goto out; |
| @@ -1301,6 +1303,19 @@ static void test_stacktrace_build_id(void) | |||
| 1301 | previous_key = key; | 1303 | previous_key = key; |
| 1302 | } while (bpf_map_get_next_key(stackmap_fd, &previous_key, &key) == 0); | 1304 | } while (bpf_map_get_next_key(stackmap_fd, &previous_key, &key) == 0); |
| 1303 | 1305 | ||
| 1306 | /* stack_map_get_build_id_offset() is racy and sometimes can return | ||
| 1307 | * BPF_STACK_BUILD_ID_IP instead of BPF_STACK_BUILD_ID_VALID; | ||
| 1308 | * try it one more time. | ||
| 1309 | */ | ||
| 1310 | if (build_id_matches < 1 && retry--) { | ||
| 1311 | ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE); | ||
| 1312 | close(pmu_fd); | ||
| 1313 | bpf_object__close(obj); | ||
| 1314 | printf("%s:WARN:Didn't find expected build ID from the map, retrying\n", | ||
| 1315 | __func__); | ||
| 1316 | goto retry; | ||
| 1317 | } | ||
| 1318 | |||
| 1304 | if (CHECK(build_id_matches < 1, "build id match", | 1319 | if (CHECK(build_id_matches < 1, "build id match", |
| 1305 | "Didn't find expected build ID from the map\n")) | 1320 | "Didn't find expected build ID from the map\n")) |
| 1306 | goto disable_pmu; | 1321 | goto disable_pmu; |
| @@ -1341,7 +1356,9 @@ static void test_stacktrace_build_id_nmi(void) | |||
| 1341 | int i, j; | 1356 | int i, j; |
| 1342 | struct bpf_stack_build_id id_offs[PERF_MAX_STACK_DEPTH]; | 1357 | struct bpf_stack_build_id id_offs[PERF_MAX_STACK_DEPTH]; |
| 1343 | int build_id_matches = 0; | 1358 | int build_id_matches = 0; |
| 1359 | int retry = 1; | ||
| 1344 | 1360 | ||
| 1361 | retry: | ||
| 1345 | err = bpf_prog_load(file, BPF_PROG_TYPE_PERF_EVENT, &obj, &prog_fd); | 1362 | err = bpf_prog_load(file, BPF_PROG_TYPE_PERF_EVENT, &obj, &prog_fd); |
| 1346 | if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno)) | 1363 | if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno)) |
| 1347 | return; | 1364 | return; |
| @@ -1436,6 +1453,19 @@ static void test_stacktrace_build_id_nmi(void) | |||
| 1436 | previous_key = key; | 1453 | previous_key = key; |
| 1437 | } while (bpf_map_get_next_key(stackmap_fd, &previous_key, &key) == 0); | 1454 | } while (bpf_map_get_next_key(stackmap_fd, &previous_key, &key) == 0); |
| 1438 | 1455 | ||
| 1456 | /* stack_map_get_build_id_offset() is racy and sometimes can return | ||
| 1457 | * BPF_STACK_BUILD_ID_IP instead of BPF_STACK_BUILD_ID_VALID; | ||
| 1458 | * try it one more time. | ||
| 1459 | */ | ||
| 1460 | if (build_id_matches < 1 && retry--) { | ||
| 1461 | ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE); | ||
| 1462 | close(pmu_fd); | ||
| 1463 | bpf_object__close(obj); | ||
| 1464 | printf("%s:WARN:Didn't find expected build ID from the map, retrying\n", | ||
| 1465 | __func__); | ||
| 1466 | goto retry; | ||
| 1467 | } | ||
| 1468 | |||
| 1439 | if (CHECK(build_id_matches < 1, "build id match", | 1469 | if (CHECK(build_id_matches < 1, "build id match", |
| 1440 | "Didn't find expected build ID from the map\n")) | 1470 | "Didn't find expected build ID from the map\n")) |
| 1441 | goto disable_pmu; | 1471 | goto disable_pmu; |
