aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/bpf.h11
-rw-r--r--include/linux/bpf_verifier.h3
-rw-r--r--kernel/bpf/verifier.c29
-rw-r--r--kernel/trace/bpf_trace.c17
-rw-r--r--net/core/filter.c92
5 files changed, 97 insertions, 55 deletions
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 1bcbf0a71f75..deca4e7f2845 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -149,6 +149,15 @@ enum bpf_reg_type {
149 149
150struct bpf_prog; 150struct bpf_prog;
151 151
152/* The information passed from prog-specific *_is_valid_access
153 * back to the verifier.
154 */
155struct bpf_insn_access_aux {
156 enum bpf_reg_type reg_type;
157 int ctx_field_size;
158 int converted_op_size;
159};
160
152struct bpf_verifier_ops { 161struct bpf_verifier_ops {
153 /* return eBPF function prototype for verification */ 162 /* return eBPF function prototype for verification */
154 const struct bpf_func_proto *(*get_func_proto)(enum bpf_func_id func_id); 163 const struct bpf_func_proto *(*get_func_proto)(enum bpf_func_id func_id);
@@ -157,7 +166,7 @@ struct bpf_verifier_ops {
157 * with 'type' (read or write) is allowed 166 * with 'type' (read or write) is allowed
158 */ 167 */
159 bool (*is_valid_access)(int off, int size, enum bpf_access_type type, 168 bool (*is_valid_access)(int off, int size, enum bpf_access_type type,
160 enum bpf_reg_type *reg_type, int *ctx_field_size); 169 struct bpf_insn_access_aux *info);
161 int (*gen_prologue)(struct bpf_insn *insn, bool direct_write, 170 int (*gen_prologue)(struct bpf_insn *insn, bool direct_write,
162 const struct bpf_prog *prog); 171 const struct bpf_prog *prog);
163 u32 (*convert_ctx_access)(enum bpf_access_type type, 172 u32 (*convert_ctx_access)(enum bpf_access_type type,
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index 189741c0da85..621076f56251 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -73,7 +73,8 @@ struct bpf_insn_aux_data {
73 enum bpf_reg_type ptr_type; /* pointer type for load/store insns */ 73 enum bpf_reg_type ptr_type; /* pointer type for load/store insns */
74 struct bpf_map *map_ptr; /* pointer for call insn into lookup_elem */ 74 struct bpf_map *map_ptr; /* pointer for call insn into lookup_elem */
75 }; 75 };
76 int ctx_field_size; /* the ctx field size for load/store insns, maybe 0 */ 76 int ctx_field_size; /* the ctx field size for load insn, maybe 0 */
77 int converted_op_size; /* the valid value width after perceived conversion */
77}; 78};
78 79
79#define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */ 80#define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 44b97d958fb7..74ea96ea391b 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -761,22 +761,34 @@ static int check_packet_access(struct bpf_verifier_env *env, u32 regno, int off,
761static int check_ctx_access(struct bpf_verifier_env *env, int insn_idx, int off, int size, 761static int check_ctx_access(struct bpf_verifier_env *env, int insn_idx, int off, int size,
762 enum bpf_access_type t, enum bpf_reg_type *reg_type) 762 enum bpf_access_type t, enum bpf_reg_type *reg_type)
763{ 763{
764 int ctx_field_size = 0; 764 struct bpf_insn_access_aux info = { .reg_type = *reg_type };
765 765
766 /* for analyzer ctx accesses are already validated and converted */ 766 /* for analyzer ctx accesses are already validated and converted */
767 if (env->analyzer_ops) 767 if (env->analyzer_ops)
768 return 0; 768 return 0;
769 769
770 if (env->prog->aux->ops->is_valid_access && 770 if (env->prog->aux->ops->is_valid_access &&
771 env->prog->aux->ops->is_valid_access(off, size, t, reg_type, &ctx_field_size)) { 771 env->prog->aux->ops->is_valid_access(off, size, t, &info)) {
772 /* a non zero ctx_field_size indicates: 772 /* a non zero info.ctx_field_size indicates:
773 * . For this field, the prog type specific ctx conversion algorithm 773 * . For this field, the prog type specific ctx conversion algorithm
774 * only supports whole field access. 774 * only supports whole field access.
775 * . This ctx access is a candiate for later verifier transformation 775 * . This ctx access is a candiate for later verifier transformation
776 * to load the whole field and then apply a mask to get correct result. 776 * to load the whole field and then apply a mask to get correct result.
777 * a non zero info.converted_op_size indicates perceived actual converted
778 * value width in convert_ctx_access.
777 */ 779 */
778 if (ctx_field_size) 780 if ((info.ctx_field_size && !info.converted_op_size) ||
779 env->insn_aux_data[insn_idx].ctx_field_size = ctx_field_size; 781 (!info.ctx_field_size && info.converted_op_size)) {
782 verbose("verifier bug in is_valid_access prog type=%u off=%d size=%d\n",
783 env->prog->type, off, size);
784 return -EACCES;
785 }
786
787 if (info.ctx_field_size) {
788 env->insn_aux_data[insn_idx].ctx_field_size = info.ctx_field_size;
789 env->insn_aux_data[insn_idx].converted_op_size = info.converted_op_size;
790 }
791 *reg_type = info.reg_type;
780 792
781 /* remember the offset of last byte accessed in ctx */ 793 /* remember the offset of last byte accessed in ctx */
782 if (env->prog->aux->max_ctx_offset < off + size) 794 if (env->prog->aux->max_ctx_offset < off + size)
@@ -3388,7 +3400,7 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
3388 struct bpf_insn insn_buf[16], *insn; 3400 struct bpf_insn insn_buf[16], *insn;
3389 struct bpf_prog *new_prog; 3401 struct bpf_prog *new_prog;
3390 enum bpf_access_type type; 3402 enum bpf_access_type type;
3391 int i, cnt, off, size, ctx_field_size, is_narrower_load, delta = 0; 3403 int i, cnt, off, size, ctx_field_size, converted_op_size, is_narrower_load, delta = 0;
3392 3404
3393 if (ops->gen_prologue) { 3405 if (ops->gen_prologue) {
3394 cnt = ops->gen_prologue(insn_buf, env->seen_direct_write, 3406 cnt = ops->gen_prologue(insn_buf, env->seen_direct_write,
@@ -3431,7 +3443,8 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
3431 off = insn->off; 3443 off = insn->off;
3432 size = bpf_size_to_bytes(BPF_SIZE(insn->code)); 3444 size = bpf_size_to_bytes(BPF_SIZE(insn->code));
3433 ctx_field_size = env->insn_aux_data[i + delta].ctx_field_size; 3445 ctx_field_size = env->insn_aux_data[i + delta].ctx_field_size;
3434 is_narrower_load = (type == BPF_READ && size < ctx_field_size); 3446 converted_op_size = env->insn_aux_data[i + delta].converted_op_size;
3447 is_narrower_load = type == BPF_READ && size < ctx_field_size;
3435 3448
3436 /* If the read access is a narrower load of the field, 3449 /* If the read access is a narrower load of the field,
3437 * convert to a 4/8-byte load, to minimum program type specific 3450 * convert to a 4/8-byte load, to minimum program type specific
@@ -3453,7 +3466,7 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
3453 verbose("bpf verifier is misconfigured\n"); 3466 verbose("bpf verifier is misconfigured\n");
3454 return -EINVAL; 3467 return -EINVAL;
3455 } 3468 }
3456 if (is_narrower_load) { 3469 if (is_narrower_load && size < converted_op_size) {
3457 if (ctx_field_size <= 4) 3470 if (ctx_field_size <= 4)
3458 insn_buf[cnt++] = BPF_ALU32_IMM(BPF_AND, insn->dst_reg, 3471 insn_buf[cnt++] = BPF_ALU32_IMM(BPF_AND, insn->dst_reg,
3459 (1 << size * 8) - 1); 3472 (1 << size * 8) - 1);
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index 9d3ec8253131..97c46b440cd6 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -479,7 +479,7 @@ static const struct bpf_func_proto *kprobe_prog_func_proto(enum bpf_func_id func
479 479
480/* bpf+kprobe programs can access fields of 'struct pt_regs' */ 480/* bpf+kprobe programs can access fields of 'struct pt_regs' */
481static bool kprobe_prog_is_valid_access(int off, int size, enum bpf_access_type type, 481static bool kprobe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
482 enum bpf_reg_type *reg_type, int *ctx_field_size) 482 struct bpf_insn_access_aux *info)
483{ 483{
484 if (off < 0 || off >= sizeof(struct pt_regs)) 484 if (off < 0 || off >= sizeof(struct pt_regs))
485 return false; 485 return false;
@@ -562,7 +562,7 @@ static const struct bpf_func_proto *tp_prog_func_proto(enum bpf_func_id func_id)
562} 562}
563 563
564static bool tp_prog_is_valid_access(int off, int size, enum bpf_access_type type, 564static bool tp_prog_is_valid_access(int off, int size, enum bpf_access_type type,
565 enum bpf_reg_type *reg_type, int *ctx_field_size) 565 struct bpf_insn_access_aux *info)
566{ 566{
567 if (off < sizeof(void *) || off >= PERF_MAX_TRACE_SIZE) 567 if (off < sizeof(void *) || off >= PERF_MAX_TRACE_SIZE)
568 return false; 568 return false;
@@ -581,7 +581,7 @@ const struct bpf_verifier_ops tracepoint_prog_ops = {
581}; 581};
582 582
583static bool pe_prog_is_valid_access(int off, int size, enum bpf_access_type type, 583static bool pe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
584 enum bpf_reg_type *reg_type, int *ctx_field_size) 584 struct bpf_insn_access_aux *info)
585{ 585{
586 int sample_period_off; 586 int sample_period_off;
587 587
@@ -595,12 +595,17 @@ static bool pe_prog_is_valid_access(int off, int size, enum bpf_access_type type
595 /* permit 1, 2, 4 byte narrower and 8 normal read access to sample_period */ 595 /* permit 1, 2, 4 byte narrower and 8 normal read access to sample_period */
596 sample_period_off = offsetof(struct bpf_perf_event_data, sample_period); 596 sample_period_off = offsetof(struct bpf_perf_event_data, sample_period);
597 if (off >= sample_period_off && off < sample_period_off + sizeof(__u64)) { 597 if (off >= sample_period_off && off < sample_period_off + sizeof(__u64)) {
598 *ctx_field_size = 8; 598 int allowed;
599
599#ifdef __LITTLE_ENDIAN 600#ifdef __LITTLE_ENDIAN
600 return (off & 0x7) == 0 && size <= 8 && (size & (size - 1)) == 0; 601 allowed = (off & 0x7) == 0 && size <= 8 && (size & (size - 1)) == 0;
601#else 602#else
602 return ((off & 0x7) + size) == 8 && size <= 8 && (size & (size - 1)) == 0; 603 allowed = ((off & 0x7) + size) == 8 && size <= 8 && (size & (size - 1)) == 0;
603#endif 604#endif
605 if (!allowed)
606 return false;
607 info->ctx_field_size = 8;
608 info->converted_op_size = 8;
604 } else { 609 } else {
605 if (size != sizeof(long)) 610 if (size != sizeof(long))
606 return false; 611 return false;
diff --git a/net/core/filter.c b/net/core/filter.c
index 60ed6f343a63..4b788007415f 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -2856,8 +2856,37 @@ lwt_xmit_func_proto(enum bpf_func_id func_id)
2856 } 2856 }
2857} 2857}
2858 2858
2859static void __set_access_aux_info(int off, struct bpf_insn_access_aux *info)
2860{
2861 info->ctx_field_size = 4;
2862 switch (off) {
2863 case offsetof(struct __sk_buff, pkt_type) ...
2864 offsetof(struct __sk_buff, pkt_type) + sizeof(__u32) - 1:
2865 case offsetof(struct __sk_buff, vlan_present) ...
2866 offsetof(struct __sk_buff, vlan_present) + sizeof(__u32) - 1:
2867 info->converted_op_size = 1;
2868 break;
2869 case offsetof(struct __sk_buff, queue_mapping) ...
2870 offsetof(struct __sk_buff, queue_mapping) + sizeof(__u32) - 1:
2871 case offsetof(struct __sk_buff, protocol) ...
2872 offsetof(struct __sk_buff, protocol) + sizeof(__u32) - 1:
2873 case offsetof(struct __sk_buff, vlan_tci) ...
2874 offsetof(struct __sk_buff, vlan_tci) + sizeof(__u32) - 1:
2875 case offsetof(struct __sk_buff, vlan_proto) ...
2876 offsetof(struct __sk_buff, vlan_proto) + sizeof(__u32) - 1:
2877 case offsetof(struct __sk_buff, tc_index) ...
2878 offsetof(struct __sk_buff, tc_index) + sizeof(__u32) - 1:
2879 case offsetof(struct __sk_buff, tc_classid) ...
2880 offsetof(struct __sk_buff, tc_classid) + sizeof(__u32) - 1:
2881 info->converted_op_size = 2;
2882 break;
2883 default:
2884 info->converted_op_size = 4;
2885 }
2886}
2887
2859static bool __is_valid_access(int off, int size, enum bpf_access_type type, 2888static bool __is_valid_access(int off, int size, enum bpf_access_type type,
2860 int *ctx_field_size) 2889 struct bpf_insn_access_aux *info)
2861{ 2890{
2862 if (off < 0 || off >= sizeof(struct __sk_buff)) 2891 if (off < 0 || off >= sizeof(struct __sk_buff))
2863 return false; 2892 return false;
@@ -2875,24 +2904,32 @@ static bool __is_valid_access(int off, int size, enum bpf_access_type type,
2875 break; 2904 break;
2876 case offsetof(struct __sk_buff, data) ... 2905 case offsetof(struct __sk_buff, data) ...
2877 offsetof(struct __sk_buff, data) + sizeof(__u32) - 1: 2906 offsetof(struct __sk_buff, data) + sizeof(__u32) - 1:
2907 if (size != sizeof(__u32))
2908 return false;
2909 info->reg_type = PTR_TO_PACKET;
2910 break;
2878 case offsetof(struct __sk_buff, data_end) ... 2911 case offsetof(struct __sk_buff, data_end) ...
2879 offsetof(struct __sk_buff, data_end) + sizeof(__u32) - 1: 2912 offsetof(struct __sk_buff, data_end) + sizeof(__u32) - 1:
2880 if (size != sizeof(__u32)) 2913 if (size != sizeof(__u32))
2881 return false; 2914 return false;
2915 info->reg_type = PTR_TO_PACKET_END;
2882 break; 2916 break;
2883 default: 2917 default:
2884 /* permit narrower load for not cb/data/data_end fields */
2885 *ctx_field_size = 4;
2886 if (type == BPF_WRITE) { 2918 if (type == BPF_WRITE) {
2887 if (size != sizeof(__u32)) 2919 if (size != sizeof(__u32))
2888 return false; 2920 return false;
2889 } else { 2921 } else {
2890 if (size != sizeof(__u32)) 2922 int allowed;
2923
2924 /* permit narrower load for not cb/data/data_end fields */
2891#ifdef __LITTLE_ENDIAN 2925#ifdef __LITTLE_ENDIAN
2892 return (off & 0x3) == 0 && (size == 1 || size == 2); 2926 allowed = (off & 0x3) == 0 && size <= 4 && (size & (size - 1)) == 0;
2893#else 2927#else
2894 return (off & 0x3) + size == 4 && (size == 1 || size == 2); 2928 allowed = (off & 0x3) + size == 4 && size <= 4 && (size & (size - 1)) == 0;
2895#endif 2929#endif
2930 if (!allowed)
2931 return false;
2932 __set_access_aux_info(off, info);
2896 } 2933 }
2897 } 2934 }
2898 2935
@@ -2901,8 +2938,7 @@ static bool __is_valid_access(int off, int size, enum bpf_access_type type,
2901 2938
2902static bool sk_filter_is_valid_access(int off, int size, 2939static bool sk_filter_is_valid_access(int off, int size,
2903 enum bpf_access_type type, 2940 enum bpf_access_type type,
2904 enum bpf_reg_type *reg_type, 2941 struct bpf_insn_access_aux *info)
2905 int *ctx_field_size)
2906{ 2942{
2907 switch (off) { 2943 switch (off) {
2908 case offsetof(struct __sk_buff, tc_classid) ... 2944 case offsetof(struct __sk_buff, tc_classid) ...
@@ -2924,13 +2960,12 @@ static bool sk_filter_is_valid_access(int off, int size,
2924 } 2960 }
2925 } 2961 }
2926 2962
2927 return __is_valid_access(off, size, type, ctx_field_size); 2963 return __is_valid_access(off, size, type, info);
2928} 2964}
2929 2965
2930static bool lwt_is_valid_access(int off, int size, 2966static bool lwt_is_valid_access(int off, int size,
2931 enum bpf_access_type type, 2967 enum bpf_access_type type,
2932 enum bpf_reg_type *reg_type, 2968 struct bpf_insn_access_aux *info)
2933 int *ctx_field_size)
2934{ 2969{
2935 switch (off) { 2970 switch (off) {
2936 case offsetof(struct __sk_buff, tc_classid) ... 2971 case offsetof(struct __sk_buff, tc_classid) ...
@@ -2950,22 +2985,12 @@ static bool lwt_is_valid_access(int off, int size,
2950 } 2985 }
2951 } 2986 }
2952 2987
2953 switch (off) { 2988 return __is_valid_access(off, size, type, info);
2954 case offsetof(struct __sk_buff, data):
2955 *reg_type = PTR_TO_PACKET;
2956 break;
2957 case offsetof(struct __sk_buff, data_end):
2958 *reg_type = PTR_TO_PACKET_END;
2959 break;
2960 }
2961
2962 return __is_valid_access(off, size, type, ctx_field_size);
2963} 2989}
2964 2990
2965static bool sock_filter_is_valid_access(int off, int size, 2991static bool sock_filter_is_valid_access(int off, int size,
2966 enum bpf_access_type type, 2992 enum bpf_access_type type,
2967 enum bpf_reg_type *reg_type, 2993 struct bpf_insn_access_aux *info)
2968 int *ctx_field_size)
2969{ 2994{
2970 if (type == BPF_WRITE) { 2995 if (type == BPF_WRITE) {
2971 switch (off) { 2996 switch (off) {
@@ -3028,8 +3053,7 @@ static int tc_cls_act_prologue(struct bpf_insn *insn_buf, bool direct_write,
3028 3053
3029static bool tc_cls_act_is_valid_access(int off, int size, 3054static bool tc_cls_act_is_valid_access(int off, int size,
3030 enum bpf_access_type type, 3055 enum bpf_access_type type,
3031 enum bpf_reg_type *reg_type, 3056 struct bpf_insn_access_aux *info)
3032 int *ctx_field_size)
3033{ 3057{
3034 if (type == BPF_WRITE) { 3058 if (type == BPF_WRITE) {
3035 switch (off) { 3059 switch (off) {
@@ -3045,16 +3069,7 @@ static bool tc_cls_act_is_valid_access(int off, int size,
3045 } 3069 }
3046 } 3070 }
3047 3071
3048 switch (off) { 3072 return __is_valid_access(off, size, type, info);
3049 case offsetof(struct __sk_buff, data):
3050 *reg_type = PTR_TO_PACKET;
3051 break;
3052 case offsetof(struct __sk_buff, data_end):
3053 *reg_type = PTR_TO_PACKET_END;
3054 break;
3055 }
3056
3057 return __is_valid_access(off, size, type, ctx_field_size);
3058} 3073}
3059 3074
3060static bool __is_valid_xdp_access(int off, int size) 3075static bool __is_valid_xdp_access(int off, int size)
@@ -3071,18 +3086,17 @@ static bool __is_valid_xdp_access(int off, int size)
3071 3086
3072static bool xdp_is_valid_access(int off, int size, 3087static bool xdp_is_valid_access(int off, int size,
3073 enum bpf_access_type type, 3088 enum bpf_access_type type,
3074 enum bpf_reg_type *reg_type, 3089 struct bpf_insn_access_aux *info)
3075 int *ctx_field_size)
3076{ 3090{
3077 if (type == BPF_WRITE) 3091 if (type == BPF_WRITE)
3078 return false; 3092 return false;
3079 3093
3080 switch (off) { 3094 switch (off) {
3081 case offsetof(struct xdp_md, data): 3095 case offsetof(struct xdp_md, data):
3082 *reg_type = PTR_TO_PACKET; 3096 info->reg_type = PTR_TO_PACKET;
3083 break; 3097 break;
3084 case offsetof(struct xdp_md, data_end): 3098 case offsetof(struct xdp_md, data_end):
3085 *reg_type = PTR_TO_PACKET_END; 3099 info->reg_type = PTR_TO_PACKET_END;
3086 break; 3100 break;
3087 } 3101 }
3088 3102