aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDaniel Borkmann <daniel@iogearbox.net>2016-09-08 20:45:29 -0400
committerDavid S. Miller <davem@davemloft.net>2016-09-09 22:36:04 -0400
commitf035a51536af9802f55d8c79bd87f184ebffb093 (patch)
treeb10ca650031a03f3752a1ea9f7178282e8eb0a75
parent6088b5823b4cb132a838878747384cbfb5ce6646 (diff)
bpf: add BPF_SIZEOF and BPF_FIELD_SIZEOF macros
Add BPF_SIZEOF() and BPF_FIELD_SIZEOF() macros to improve the code a bit which otherwise often result in overly long bytes_to_bpf_size(sizeof()) and bytes_to_bpf_size(FIELD_SIZEOF()) lines. So place them into a macro helper instead. Moreover, we currently have a BUILD_BUG_ON(BPF_FIELD_SIZEOF()) check in convert_bpf_extensions(), but we should rather make that generic as well and add a BUILD_BUG_ON() test in all BPF_SIZEOF()/BPF_FIELD_SIZEOF() users to detect any rewriter size issues at compile time. Note, there are currently none, but we want to assert that it stays this way. Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Acked-by: Alexei Starovoitov <ast@kernel.org> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--include/linux/filter.h14
-rw-r--r--kernel/trace/bpf_trace.c12
-rw-r--r--net/core/filter.c15
3 files changed, 27 insertions, 14 deletions
diff --git a/include/linux/filter.h b/include/linux/filter.h
index a16439b99fd9..7fabad8dc3fc 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -314,6 +314,20 @@ struct bpf_prog_aux;
314 bpf_size; \ 314 bpf_size; \
315}) 315})
316 316
317#define BPF_SIZEOF(type) \
318 ({ \
319 const int __size = bytes_to_bpf_size(sizeof(type)); \
320 BUILD_BUG_ON(__size < 0); \
321 __size; \
322 })
323
324#define BPF_FIELD_SIZEOF(type, field) \
325 ({ \
326 const int __size = bytes_to_bpf_size(FIELD_SIZEOF(type, field)); \
327 BUILD_BUG_ON(__size < 0); \
328 __size; \
329 })
330
317#ifdef CONFIG_COMPAT 331#ifdef CONFIG_COMPAT
318/* A struct sock_filter is architecture independent. */ 332/* A struct sock_filter is architecture independent. */
319struct compat_sock_fprog { 333struct compat_sock_fprog {
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index d3869b03d9fe..e63d7d435796 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -583,18 +583,18 @@ static u32 pe_prog_convert_ctx_access(enum bpf_access_type type, int dst_reg,
583 switch (ctx_off) { 583 switch (ctx_off) {
584 case offsetof(struct bpf_perf_event_data, sample_period): 584 case offsetof(struct bpf_perf_event_data, sample_period):
585 BUILD_BUG_ON(FIELD_SIZEOF(struct perf_sample_data, period) != sizeof(u64)); 585 BUILD_BUG_ON(FIELD_SIZEOF(struct perf_sample_data, period) != sizeof(u64));
586 *insn++ = BPF_LDX_MEM(bytes_to_bpf_size(FIELD_SIZEOF(struct bpf_perf_event_data_kern, data)), 586
587 dst_reg, src_reg, 587 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
588 data), dst_reg, src_reg,
588 offsetof(struct bpf_perf_event_data_kern, data)); 589 offsetof(struct bpf_perf_event_data_kern, data));
589 *insn++ = BPF_LDX_MEM(BPF_DW, dst_reg, dst_reg, 590 *insn++ = BPF_LDX_MEM(BPF_DW, dst_reg, dst_reg,
590 offsetof(struct perf_sample_data, period)); 591 offsetof(struct perf_sample_data, period));
591 break; 592 break;
592 default: 593 default:
593 *insn++ = BPF_LDX_MEM(bytes_to_bpf_size(FIELD_SIZEOF(struct bpf_perf_event_data_kern, regs)), 594 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
594 dst_reg, src_reg, 595 regs), dst_reg, src_reg,
595 offsetof(struct bpf_perf_event_data_kern, regs)); 596 offsetof(struct bpf_perf_event_data_kern, regs));
596 *insn++ = BPF_LDX_MEM(bytes_to_bpf_size(sizeof(long)), 597 *insn++ = BPF_LDX_MEM(BPF_SIZEOF(long), dst_reg, dst_reg, ctx_off);
597 dst_reg, dst_reg, ctx_off);
598 break; 598 break;
599 } 599 }
600 600
diff --git a/net/core/filter.c b/net/core/filter.c
index 628ed8c7d38d..120c813ef030 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -233,9 +233,8 @@ static bool convert_bpf_extensions(struct sock_filter *fp,
233 case SKF_AD_OFF + SKF_AD_HATYPE: 233 case SKF_AD_OFF + SKF_AD_HATYPE:
234 BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, ifindex) != 4); 234 BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, ifindex) != 4);
235 BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, type) != 2); 235 BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, type) != 2);
236 BUILD_BUG_ON(bytes_to_bpf_size(FIELD_SIZEOF(struct sk_buff, dev)) < 0);
237 236
238 *insn++ = BPF_LDX_MEM(bytes_to_bpf_size(FIELD_SIZEOF(struct sk_buff, dev)), 237 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, dev),
239 BPF_REG_TMP, BPF_REG_CTX, 238 BPF_REG_TMP, BPF_REG_CTX,
240 offsetof(struct sk_buff, dev)); 239 offsetof(struct sk_buff, dev));
241 /* if (tmp != 0) goto pc + 1 */ 240 /* if (tmp != 0) goto pc + 1 */
@@ -2685,7 +2684,7 @@ static u32 bpf_net_convert_ctx_access(enum bpf_access_type type, int dst_reg,
2685 case offsetof(struct __sk_buff, ifindex): 2684 case offsetof(struct __sk_buff, ifindex):
2686 BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, ifindex) != 4); 2685 BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, ifindex) != 4);
2687 2686
2688 *insn++ = BPF_LDX_MEM(bytes_to_bpf_size(FIELD_SIZEOF(struct sk_buff, dev)), 2687 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, dev),
2689 dst_reg, src_reg, 2688 dst_reg, src_reg,
2690 offsetof(struct sk_buff, dev)); 2689 offsetof(struct sk_buff, dev));
2691 *insn++ = BPF_JMP_IMM(BPF_JEQ, dst_reg, 0, 1); 2690 *insn++ = BPF_JMP_IMM(BPF_JEQ, dst_reg, 0, 1);
@@ -2750,7 +2749,7 @@ static u32 bpf_net_convert_ctx_access(enum bpf_access_type type, int dst_reg,
2750 break; 2749 break;
2751 2750
2752 case offsetof(struct __sk_buff, data): 2751 case offsetof(struct __sk_buff, data):
2753 *insn++ = BPF_LDX_MEM(bytes_to_bpf_size(FIELD_SIZEOF(struct sk_buff, data)), 2752 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, data),
2754 dst_reg, src_reg, 2753 dst_reg, src_reg,
2755 offsetof(struct sk_buff, data)); 2754 offsetof(struct sk_buff, data));
2756 break; 2755 break;
@@ -2759,8 +2758,8 @@ static u32 bpf_net_convert_ctx_access(enum bpf_access_type type, int dst_reg,
2759 ctx_off -= offsetof(struct __sk_buff, data_end); 2758 ctx_off -= offsetof(struct __sk_buff, data_end);
2760 ctx_off += offsetof(struct sk_buff, cb); 2759 ctx_off += offsetof(struct sk_buff, cb);
2761 ctx_off += offsetof(struct bpf_skb_data_end, data_end); 2760 ctx_off += offsetof(struct bpf_skb_data_end, data_end);
2762 *insn++ = BPF_LDX_MEM(bytes_to_bpf_size(sizeof(void *)), 2761 *insn++ = BPF_LDX_MEM(BPF_SIZEOF(void *), dst_reg, src_reg,
2763 dst_reg, src_reg, ctx_off); 2762 ctx_off);
2764 break; 2763 break;
2765 2764
2766 case offsetof(struct __sk_buff, tc_index): 2765 case offsetof(struct __sk_buff, tc_index):
@@ -2795,12 +2794,12 @@ static u32 xdp_convert_ctx_access(enum bpf_access_type type, int dst_reg,
2795 2794
2796 switch (ctx_off) { 2795 switch (ctx_off) {
2797 case offsetof(struct xdp_md, data): 2796 case offsetof(struct xdp_md, data):
2798 *insn++ = BPF_LDX_MEM(bytes_to_bpf_size(FIELD_SIZEOF(struct xdp_buff, data)), 2797 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff, data),
2799 dst_reg, src_reg, 2798 dst_reg, src_reg,
2800 offsetof(struct xdp_buff, data)); 2799 offsetof(struct xdp_buff, data));
2801 break; 2800 break;
2802 case offsetof(struct xdp_md, data_end): 2801 case offsetof(struct xdp_md, data_end):
2803 *insn++ = BPF_LDX_MEM(bytes_to_bpf_size(FIELD_SIZEOF(struct xdp_buff, data_end)), 2802 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff, data_end),
2804 dst_reg, src_reg, 2803 dst_reg, src_reg,
2805 offsetof(struct xdp_buff, data_end)); 2804 offsetof(struct xdp_buff, data_end));
2806 break; 2805 break;