aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/bpf_trace.c
diff options
context:
space:
mode:
authorYonghong Song <yhs@fb.com>2017-06-22 18:07:39 -0400
committerDavid S. Miller <davem@davemloft.net>2017-06-23 14:04:11 -0400
commit239946314e57711d7da546b67964d0b387a3ee42 (patch)
tree958d35fbbbc439b561832c75de22f5fdfa825f7c /kernel/trace/bpf_trace.c
parent72de46556f8a291b2c72ea1fa22275ffef85e4f9 (diff)
bpf: possibly avoid extra masking for narrower load in verifier
Commit 31fd85816dbe ("bpf: permits narrower load from bpf program context fields") permits narrower load for certain ctx fields. The commit however will already generate a masking even if the prog-specific ctx conversion produces the result with narrower size. For example, for __sk_buff->protocol, the ctx conversion loads the data into register with 2-byte load. A narrower 2-byte load should not generate masking. For __sk_buff->vlan_present, the conversion function set the result as either 0 or 1, essentially a byte. The narrower 2-byte or 1-byte load should not generate masking. To avoid unnecessary masking, prog-specific *_is_valid_access now passes converted_op_size back to verifier, which indicates the valid data width after perceived future conversion. Based on this information, verifier is able to avoid unnecessary marking. Since we want more information back from prog-specific *_is_valid_access checking, all of them are packed into one data structure for more clarity. Acked-by: Daniel Borkmann <daniel@iogearbox.net> Signed-off-by: Yonghong Song <yhs@fb.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'kernel/trace/bpf_trace.c')
-rw-r--r--kernel/trace/bpf_trace.c17
1 files changed, 11 insertions, 6 deletions
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index 9d3ec8253131..97c46b440cd6 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -479,7 +479,7 @@ static const struct bpf_func_proto *kprobe_prog_func_proto(enum bpf_func_id func
479 479
480/* bpf+kprobe programs can access fields of 'struct pt_regs' */ 480/* bpf+kprobe programs can access fields of 'struct pt_regs' */
481static bool kprobe_prog_is_valid_access(int off, int size, enum bpf_access_type type, 481static bool kprobe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
482 enum bpf_reg_type *reg_type, int *ctx_field_size) 482 struct bpf_insn_access_aux *info)
483{ 483{
484 if (off < 0 || off >= sizeof(struct pt_regs)) 484 if (off < 0 || off >= sizeof(struct pt_regs))
485 return false; 485 return false;
@@ -562,7 +562,7 @@ static const struct bpf_func_proto *tp_prog_func_proto(enum bpf_func_id func_id)
562} 562}
563 563
564static bool tp_prog_is_valid_access(int off, int size, enum bpf_access_type type, 564static bool tp_prog_is_valid_access(int off, int size, enum bpf_access_type type,
565 enum bpf_reg_type *reg_type, int *ctx_field_size) 565 struct bpf_insn_access_aux *info)
566{ 566{
567 if (off < sizeof(void *) || off >= PERF_MAX_TRACE_SIZE) 567 if (off < sizeof(void *) || off >= PERF_MAX_TRACE_SIZE)
568 return false; 568 return false;
@@ -581,7 +581,7 @@ const struct bpf_verifier_ops tracepoint_prog_ops = {
581}; 581};
582 582
583static bool pe_prog_is_valid_access(int off, int size, enum bpf_access_type type, 583static bool pe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
584 enum bpf_reg_type *reg_type, int *ctx_field_size) 584 struct bpf_insn_access_aux *info)
585{ 585{
586 int sample_period_off; 586 int sample_period_off;
587 587
@@ -595,12 +595,17 @@ static bool pe_prog_is_valid_access(int off, int size, enum bpf_access_type type
595 /* permit 1, 2, 4 byte narrower and 8 normal read access to sample_period */ 595 /* permit 1, 2, 4 byte narrower and 8 normal read access to sample_period */
596 sample_period_off = offsetof(struct bpf_perf_event_data, sample_period); 596 sample_period_off = offsetof(struct bpf_perf_event_data, sample_period);
597 if (off >= sample_period_off && off < sample_period_off + sizeof(__u64)) { 597 if (off >= sample_period_off && off < sample_period_off + sizeof(__u64)) {
598 *ctx_field_size = 8; 598 int allowed;
599
599#ifdef __LITTLE_ENDIAN 600#ifdef __LITTLE_ENDIAN
600 return (off & 0x7) == 0 && size <= 8 && (size & (size - 1)) == 0; 601 allowed = (off & 0x7) == 0 && size <= 8 && (size & (size - 1)) == 0;
601#else 602#else
602 return ((off & 0x7) + size) == 8 && size <= 8 && (size & (size - 1)) == 0; 603 allowed = ((off & 0x7) + size) == 8 && size <= 8 && (size & (size - 1)) == 0;
603#endif 604#endif
605 if (!allowed)
606 return false;
607 info->ctx_field_size = 8;
608 info->converted_op_size = 8;
604 } else { 609 } else {
605 if (size != sizeof(long)) 610 if (size != sizeof(long))
606 return false; 611 return false;