diff options
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/bpf/core.c | 3 | ||||
-rw-r--r-- | kernel/bpf/verifier.c | 203 |
2 files changed, 173 insertions, 33 deletions
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index 2a81b8af3748..1e443ba97310 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c | |||
@@ -362,7 +362,8 @@ static int bpf_adj_branches(struct bpf_prog *prog, u32 pos, s32 end_old, | |||
362 | insn = prog->insnsi + end_old; | 362 | insn = prog->insnsi + end_old; |
363 | } | 363 | } |
364 | code = insn->code; | 364 | code = insn->code; |
365 | if (BPF_CLASS(code) != BPF_JMP || | 365 | if ((BPF_CLASS(code) != BPF_JMP && |
366 | BPF_CLASS(code) != BPF_JMP32) || | ||
366 | BPF_OP(code) == BPF_EXIT) | 367 | BPF_OP(code) == BPF_EXIT) |
367 | continue; | 368 | continue; |
368 | /* Adjust offset of jmps if we cross patch boundaries. */ | 369 | /* Adjust offset of jmps if we cross patch boundaries. */ |
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index eae6cb1fe653..8c1c21cd50b4 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c | |||
@@ -1095,7 +1095,7 @@ static int check_subprogs(struct bpf_verifier_env *env) | |||
1095 | for (i = 0; i < insn_cnt; i++) { | 1095 | for (i = 0; i < insn_cnt; i++) { |
1096 | u8 code = insn[i].code; | 1096 | u8 code = insn[i].code; |
1097 | 1097 | ||
1098 | if (BPF_CLASS(code) != BPF_JMP) | 1098 | if (BPF_CLASS(code) != BPF_JMP && BPF_CLASS(code) != BPF_JMP32) |
1099 | goto next; | 1099 | goto next; |
1100 | if (BPF_OP(code) == BPF_EXIT || BPF_OP(code) == BPF_CALL) | 1100 | if (BPF_OP(code) == BPF_EXIT || BPF_OP(code) == BPF_CALL) |
1101 | goto next; | 1101 | goto next; |
@@ -4031,14 +4031,49 @@ static void find_good_pkt_pointers(struct bpf_verifier_state *vstate, | |||
4031 | * 0 - branch will not be taken and fall-through to next insn | 4031 | * 0 - branch will not be taken and fall-through to next insn |
4032 | * -1 - unknown. Example: "if (reg < 5)" is unknown when register value range [0,10] | 4032 | * -1 - unknown. Example: "if (reg < 5)" is unknown when register value range [0,10] |
4033 | */ | 4033 | */ |
4034 | static int is_branch_taken(struct bpf_reg_state *reg, u64 val, u8 opcode) | 4034 | static int is_branch_taken(struct bpf_reg_state *reg, u64 val, u8 opcode, |
4035 | bool is_jmp32) | ||
4035 | { | 4036 | { |
4037 | struct bpf_reg_state reg_lo; | ||
4036 | s64 sval; | 4038 | s64 sval; |
4037 | 4039 | ||
4038 | if (__is_pointer_value(false, reg)) | 4040 | if (__is_pointer_value(false, reg)) |
4039 | return -1; | 4041 | return -1; |
4040 | 4042 | ||
4041 | sval = (s64)val; | 4043 | if (is_jmp32) { |
4044 | reg_lo = *reg; | ||
4045 | reg = ®_lo; | ||
4046 | /* For JMP32, only low 32 bits are compared, coerce_reg_to_size | ||
4047 | * could truncate high bits and update umin/umax according to | ||
4048 | * information of low bits. | ||
4049 | */ | ||
4050 | coerce_reg_to_size(reg, 4); | ||
4051 | /* smin/smax need special handling. For example, after coerce, | ||
4052 | * if smin_value is 0x00000000ffffffffLL, the value is -1 when | ||
4053 | * used as operand to JMP32. It is a negative number from s32's | ||
4054 | * point of view, while it is a positive number when seen as | ||
4055 | * s64. The smin/smax are kept as s64, therefore, when used with | ||
4056 | * JMP32, they need to be transformed into s32, then sign | ||
4057 | * extended back to s64. | ||
4058 | * | ||
4059 | * Also, smin/smax were copied from umin/umax. If umin/umax has | ||
4060 | * different sign bit, then min/max relationship doesn't | ||
4061 | * maintain after casting into s32, for this case, set smin/smax | ||
4062 | * to safest range. | ||
4063 | */ | ||
4064 | if ((reg->umax_value ^ reg->umin_value) & | ||
4065 | (1ULL << 31)) { | ||
4066 | reg->smin_value = S32_MIN; | ||
4067 | reg->smax_value = S32_MAX; | ||
4068 | } | ||
4069 | reg->smin_value = (s64)(s32)reg->smin_value; | ||
4070 | reg->smax_value = (s64)(s32)reg->smax_value; | ||
4071 | |||
4072 | val = (u32)val; | ||
4073 | sval = (s64)(s32)val; | ||
4074 | } else { | ||
4075 | sval = (s64)val; | ||
4076 | } | ||
4042 | 4077 | ||
4043 | switch (opcode) { | 4078 | switch (opcode) { |
4044 | case BPF_JEQ: | 4079 | case BPF_JEQ: |
@@ -4108,6 +4143,29 @@ static int is_branch_taken(struct bpf_reg_state *reg, u64 val, u8 opcode) | |||
4108 | return -1; | 4143 | return -1; |
4109 | } | 4144 | } |
4110 | 4145 | ||
4146 | /* Generate min value of the high 32-bit from TNUM info. */ | ||
4147 | static u64 gen_hi_min(struct tnum var) | ||
4148 | { | ||
4149 | return var.value & ~0xffffffffULL; | ||
4150 | } | ||
4151 | |||
4152 | /* Generate max value of the high 32-bit from TNUM info. */ | ||
4153 | static u64 gen_hi_max(struct tnum var) | ||
4154 | { | ||
4155 | return (var.value | var.mask) & ~0xffffffffULL; | ||
4156 | } | ||
4157 | |||
4158 | /* Return true if VAL is compared with a s64 sign extended from s32, and they | ||
4159 | * are with the same signedness. | ||
4160 | */ | ||
4161 | static bool cmp_val_with_extended_s64(s64 sval, struct bpf_reg_state *reg) | ||
4162 | { | ||
4163 | return ((s32)sval >= 0 && | ||
4164 | reg->smin_value >= 0 && reg->smax_value <= S32_MAX) || | ||
4165 | ((s32)sval < 0 && | ||
4166 | reg->smax_value <= 0 && reg->smin_value >= S32_MIN); | ||
4167 | } | ||
4168 | |||
4111 | /* Adjusts the register min/max values in the case that the dst_reg is the | 4169 | /* Adjusts the register min/max values in the case that the dst_reg is the |
4112 | * variable register that we are working on, and src_reg is a constant or we're | 4170 | * variable register that we are working on, and src_reg is a constant or we're |
4113 | * simply doing a BPF_K check. | 4171 | * simply doing a BPF_K check. |
@@ -4115,7 +4173,7 @@ static int is_branch_taken(struct bpf_reg_state *reg, u64 val, u8 opcode) | |||
4115 | */ | 4173 | */ |
4116 | static void reg_set_min_max(struct bpf_reg_state *true_reg, | 4174 | static void reg_set_min_max(struct bpf_reg_state *true_reg, |
4117 | struct bpf_reg_state *false_reg, u64 val, | 4175 | struct bpf_reg_state *false_reg, u64 val, |
4118 | u8 opcode) | 4176 | u8 opcode, bool is_jmp32) |
4119 | { | 4177 | { |
4120 | s64 sval; | 4178 | s64 sval; |
4121 | 4179 | ||
@@ -4128,7 +4186,8 @@ static void reg_set_min_max(struct bpf_reg_state *true_reg, | |||
4128 | if (__is_pointer_value(false, false_reg)) | 4186 | if (__is_pointer_value(false, false_reg)) |
4129 | return; | 4187 | return; |
4130 | 4188 | ||
4131 | sval = (s64)val; | 4189 | val = is_jmp32 ? (u32)val : val; |
4190 | sval = is_jmp32 ? (s64)(s32)val : (s64)val; | ||
4132 | 4191 | ||
4133 | switch (opcode) { | 4192 | switch (opcode) { |
4134 | case BPF_JEQ: | 4193 | case BPF_JEQ: |
@@ -4141,7 +4200,15 @@ static void reg_set_min_max(struct bpf_reg_state *true_reg, | |||
4141 | * if it is true we know the value for sure. Likewise for | 4200 | * if it is true we know the value for sure. Likewise for |
4142 | * BPF_JNE. | 4201 | * BPF_JNE. |
4143 | */ | 4202 | */ |
4144 | __mark_reg_known(reg, val); | 4203 | if (is_jmp32) { |
4204 | u64 old_v = reg->var_off.value; | ||
4205 | u64 hi_mask = ~0xffffffffULL; | ||
4206 | |||
4207 | reg->var_off.value = (old_v & hi_mask) | val; | ||
4208 | reg->var_off.mask &= hi_mask; | ||
4209 | } else { | ||
4210 | __mark_reg_known(reg, val); | ||
4211 | } | ||
4145 | break; | 4212 | break; |
4146 | } | 4213 | } |
4147 | case BPF_JSET: | 4214 | case BPF_JSET: |
@@ -4157,6 +4224,10 @@ static void reg_set_min_max(struct bpf_reg_state *true_reg, | |||
4157 | u64 false_umax = opcode == BPF_JGT ? val : val - 1; | 4224 | u64 false_umax = opcode == BPF_JGT ? val : val - 1; |
4158 | u64 true_umin = opcode == BPF_JGT ? val + 1 : val; | 4225 | u64 true_umin = opcode == BPF_JGT ? val + 1 : val; |
4159 | 4226 | ||
4227 | if (is_jmp32) { | ||
4228 | false_umax += gen_hi_max(false_reg->var_off); | ||
4229 | true_umin += gen_hi_min(true_reg->var_off); | ||
4230 | } | ||
4160 | false_reg->umax_value = min(false_reg->umax_value, false_umax); | 4231 | false_reg->umax_value = min(false_reg->umax_value, false_umax); |
4161 | true_reg->umin_value = max(true_reg->umin_value, true_umin); | 4232 | true_reg->umin_value = max(true_reg->umin_value, true_umin); |
4162 | break; | 4233 | break; |
@@ -4167,6 +4238,11 @@ static void reg_set_min_max(struct bpf_reg_state *true_reg, | |||
4167 | s64 false_smax = opcode == BPF_JSGT ? sval : sval - 1; | 4238 | s64 false_smax = opcode == BPF_JSGT ? sval : sval - 1; |
4168 | s64 true_smin = opcode == BPF_JSGT ? sval + 1 : sval; | 4239 | s64 true_smin = opcode == BPF_JSGT ? sval + 1 : sval; |
4169 | 4240 | ||
4241 | /* If the full s64 was not sign-extended from s32 then don't | ||
4242 | * deduct further info. | ||
4243 | */ | ||
4244 | if (is_jmp32 && !cmp_val_with_extended_s64(sval, false_reg)) | ||
4245 | break; | ||
4170 | false_reg->smax_value = min(false_reg->smax_value, false_smax); | 4246 | false_reg->smax_value = min(false_reg->smax_value, false_smax); |
4171 | true_reg->smin_value = max(true_reg->smin_value, true_smin); | 4247 | true_reg->smin_value = max(true_reg->smin_value, true_smin); |
4172 | break; | 4248 | break; |
@@ -4177,6 +4253,10 @@ static void reg_set_min_max(struct bpf_reg_state *true_reg, | |||
4177 | u64 false_umin = opcode == BPF_JLT ? val : val + 1; | 4253 | u64 false_umin = opcode == BPF_JLT ? val : val + 1; |
4178 | u64 true_umax = opcode == BPF_JLT ? val - 1 : val; | 4254 | u64 true_umax = opcode == BPF_JLT ? val - 1 : val; |
4179 | 4255 | ||
4256 | if (is_jmp32) { | ||
4257 | false_umin += gen_hi_min(false_reg->var_off); | ||
4258 | true_umax += gen_hi_max(true_reg->var_off); | ||
4259 | } | ||
4180 | false_reg->umin_value = max(false_reg->umin_value, false_umin); | 4260 | false_reg->umin_value = max(false_reg->umin_value, false_umin); |
4181 | true_reg->umax_value = min(true_reg->umax_value, true_umax); | 4261 | true_reg->umax_value = min(true_reg->umax_value, true_umax); |
4182 | break; | 4262 | break; |
@@ -4187,6 +4267,8 @@ static void reg_set_min_max(struct bpf_reg_state *true_reg, | |||
4187 | s64 false_smin = opcode == BPF_JSLT ? sval : sval + 1; | 4267 | s64 false_smin = opcode == BPF_JSLT ? sval : sval + 1; |
4188 | s64 true_smax = opcode == BPF_JSLT ? sval - 1 : sval; | 4268 | s64 true_smax = opcode == BPF_JSLT ? sval - 1 : sval; |
4189 | 4269 | ||
4270 | if (is_jmp32 && !cmp_val_with_extended_s64(sval, false_reg)) | ||
4271 | break; | ||
4190 | false_reg->smin_value = max(false_reg->smin_value, false_smin); | 4272 | false_reg->smin_value = max(false_reg->smin_value, false_smin); |
4191 | true_reg->smax_value = min(true_reg->smax_value, true_smax); | 4273 | true_reg->smax_value = min(true_reg->smax_value, true_smax); |
4192 | break; | 4274 | break; |
@@ -4213,14 +4295,15 @@ static void reg_set_min_max(struct bpf_reg_state *true_reg, | |||
4213 | */ | 4295 | */ |
4214 | static void reg_set_min_max_inv(struct bpf_reg_state *true_reg, | 4296 | static void reg_set_min_max_inv(struct bpf_reg_state *true_reg, |
4215 | struct bpf_reg_state *false_reg, u64 val, | 4297 | struct bpf_reg_state *false_reg, u64 val, |
4216 | u8 opcode) | 4298 | u8 opcode, bool is_jmp32) |
4217 | { | 4299 | { |
4218 | s64 sval; | 4300 | s64 sval; |
4219 | 4301 | ||
4220 | if (__is_pointer_value(false, false_reg)) | 4302 | if (__is_pointer_value(false, false_reg)) |
4221 | return; | 4303 | return; |
4222 | 4304 | ||
4223 | sval = (s64)val; | 4305 | val = is_jmp32 ? (u32)val : val; |
4306 | sval = is_jmp32 ? (s64)(s32)val : (s64)val; | ||
4224 | 4307 | ||
4225 | switch (opcode) { | 4308 | switch (opcode) { |
4226 | case BPF_JEQ: | 4309 | case BPF_JEQ: |
@@ -4229,7 +4312,15 @@ static void reg_set_min_max_inv(struct bpf_reg_state *true_reg, | |||
4229 | struct bpf_reg_state *reg = | 4312 | struct bpf_reg_state *reg = |
4230 | opcode == BPF_JEQ ? true_reg : false_reg; | 4313 | opcode == BPF_JEQ ? true_reg : false_reg; |
4231 | 4314 | ||
4232 | __mark_reg_known(reg, val); | 4315 | if (is_jmp32) { |
4316 | u64 old_v = reg->var_off.value; | ||
4317 | u64 hi_mask = ~0xffffffffULL; | ||
4318 | |||
4319 | reg->var_off.value = (old_v & hi_mask) | val; | ||
4320 | reg->var_off.mask &= hi_mask; | ||
4321 | } else { | ||
4322 | __mark_reg_known(reg, val); | ||
4323 | } | ||
4233 | break; | 4324 | break; |
4234 | } | 4325 | } |
4235 | case BPF_JSET: | 4326 | case BPF_JSET: |
@@ -4245,6 +4336,10 @@ static void reg_set_min_max_inv(struct bpf_reg_state *true_reg, | |||
4245 | u64 false_umin = opcode == BPF_JGT ? val : val + 1; | 4336 | u64 false_umin = opcode == BPF_JGT ? val : val + 1; |
4246 | u64 true_umax = opcode == BPF_JGT ? val - 1 : val; | 4337 | u64 true_umax = opcode == BPF_JGT ? val - 1 : val; |
4247 | 4338 | ||
4339 | if (is_jmp32) { | ||
4340 | false_umin += gen_hi_min(false_reg->var_off); | ||
4341 | true_umax += gen_hi_max(true_reg->var_off); | ||
4342 | } | ||
4248 | false_reg->umin_value = max(false_reg->umin_value, false_umin); | 4343 | false_reg->umin_value = max(false_reg->umin_value, false_umin); |
4249 | true_reg->umax_value = min(true_reg->umax_value, true_umax); | 4344 | true_reg->umax_value = min(true_reg->umax_value, true_umax); |
4250 | break; | 4345 | break; |
@@ -4255,6 +4350,8 @@ static void reg_set_min_max_inv(struct bpf_reg_state *true_reg, | |||
4255 | s64 false_smin = opcode == BPF_JSGT ? sval : sval + 1; | 4350 | s64 false_smin = opcode == BPF_JSGT ? sval : sval + 1; |
4256 | s64 true_smax = opcode == BPF_JSGT ? sval - 1 : sval; | 4351 | s64 true_smax = opcode == BPF_JSGT ? sval - 1 : sval; |
4257 | 4352 | ||
4353 | if (is_jmp32 && !cmp_val_with_extended_s64(sval, false_reg)) | ||
4354 | break; | ||
4258 | false_reg->smin_value = max(false_reg->smin_value, false_smin); | 4355 | false_reg->smin_value = max(false_reg->smin_value, false_smin); |
4259 | true_reg->smax_value = min(true_reg->smax_value, true_smax); | 4356 | true_reg->smax_value = min(true_reg->smax_value, true_smax); |
4260 | break; | 4357 | break; |
@@ -4265,6 +4362,10 @@ static void reg_set_min_max_inv(struct bpf_reg_state *true_reg, | |||
4265 | u64 false_umax = opcode == BPF_JLT ? val : val - 1; | 4362 | u64 false_umax = opcode == BPF_JLT ? val : val - 1; |
4266 | u64 true_umin = opcode == BPF_JLT ? val + 1 : val; | 4363 | u64 true_umin = opcode == BPF_JLT ? val + 1 : val; |
4267 | 4364 | ||
4365 | if (is_jmp32) { | ||
4366 | false_umax += gen_hi_max(false_reg->var_off); | ||
4367 | true_umin += gen_hi_min(true_reg->var_off); | ||
4368 | } | ||
4268 | false_reg->umax_value = min(false_reg->umax_value, false_umax); | 4369 | false_reg->umax_value = min(false_reg->umax_value, false_umax); |
4269 | true_reg->umin_value = max(true_reg->umin_value, true_umin); | 4370 | true_reg->umin_value = max(true_reg->umin_value, true_umin); |
4270 | break; | 4371 | break; |
@@ -4275,6 +4376,8 @@ static void reg_set_min_max_inv(struct bpf_reg_state *true_reg, | |||
4275 | s64 false_smax = opcode == BPF_JSLT ? sval : sval - 1; | 4376 | s64 false_smax = opcode == BPF_JSLT ? sval : sval - 1; |
4276 | s64 true_smin = opcode == BPF_JSLT ? sval + 1 : sval; | 4377 | s64 true_smin = opcode == BPF_JSLT ? sval + 1 : sval; |
4277 | 4378 | ||
4379 | if (is_jmp32 && !cmp_val_with_extended_s64(sval, false_reg)) | ||
4380 | break; | ||
4278 | false_reg->smax_value = min(false_reg->smax_value, false_smax); | 4381 | false_reg->smax_value = min(false_reg->smax_value, false_smax); |
4279 | true_reg->smin_value = max(true_reg->smin_value, true_smin); | 4382 | true_reg->smin_value = max(true_reg->smin_value, true_smin); |
4280 | break; | 4383 | break; |
@@ -4416,6 +4519,10 @@ static bool try_match_pkt_pointers(const struct bpf_insn *insn, | |||
4416 | if (BPF_SRC(insn->code) != BPF_X) | 4519 | if (BPF_SRC(insn->code) != BPF_X) |
4417 | return false; | 4520 | return false; |
4418 | 4521 | ||
4522 | /* Pointers are always 64-bit. */ | ||
4523 | if (BPF_CLASS(insn->code) == BPF_JMP32) | ||
4524 | return false; | ||
4525 | |||
4419 | switch (BPF_OP(insn->code)) { | 4526 | switch (BPF_OP(insn->code)) { |
4420 | case BPF_JGT: | 4527 | case BPF_JGT: |
4421 | if ((dst_reg->type == PTR_TO_PACKET && | 4528 | if ((dst_reg->type == PTR_TO_PACKET && |
@@ -4508,16 +4615,18 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env, | |||
4508 | struct bpf_reg_state *regs = this_branch->frame[this_branch->curframe]->regs; | 4615 | struct bpf_reg_state *regs = this_branch->frame[this_branch->curframe]->regs; |
4509 | struct bpf_reg_state *dst_reg, *other_branch_regs; | 4616 | struct bpf_reg_state *dst_reg, *other_branch_regs; |
4510 | u8 opcode = BPF_OP(insn->code); | 4617 | u8 opcode = BPF_OP(insn->code); |
4618 | bool is_jmp32; | ||
4511 | int err; | 4619 | int err; |
4512 | 4620 | ||
4513 | if (opcode > BPF_JSLE) { | 4621 | /* Only conditional jumps are expected to reach here. */ |
4514 | verbose(env, "invalid BPF_JMP opcode %x\n", opcode); | 4622 | if (opcode == BPF_JA || opcode > BPF_JSLE) { |
4623 | verbose(env, "invalid BPF_JMP/JMP32 opcode %x\n", opcode); | ||
4515 | return -EINVAL; | 4624 | return -EINVAL; |
4516 | } | 4625 | } |
4517 | 4626 | ||
4518 | if (BPF_SRC(insn->code) == BPF_X) { | 4627 | if (BPF_SRC(insn->code) == BPF_X) { |
4519 | if (insn->imm != 0) { | 4628 | if (insn->imm != 0) { |
4520 | verbose(env, "BPF_JMP uses reserved fields\n"); | 4629 | verbose(env, "BPF_JMP/JMP32 uses reserved fields\n"); |
4521 | return -EINVAL; | 4630 | return -EINVAL; |
4522 | } | 4631 | } |
4523 | 4632 | ||
@@ -4533,7 +4642,7 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env, | |||
4533 | } | 4642 | } |
4534 | } else { | 4643 | } else { |
4535 | if (insn->src_reg != BPF_REG_0) { | 4644 | if (insn->src_reg != BPF_REG_0) { |
4536 | verbose(env, "BPF_JMP uses reserved fields\n"); | 4645 | verbose(env, "BPF_JMP/JMP32 uses reserved fields\n"); |
4537 | return -EINVAL; | 4646 | return -EINVAL; |
4538 | } | 4647 | } |
4539 | } | 4648 | } |
@@ -4544,9 +4653,11 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env, | |||
4544 | return err; | 4653 | return err; |
4545 | 4654 | ||
4546 | dst_reg = ®s[insn->dst_reg]; | 4655 | dst_reg = ®s[insn->dst_reg]; |
4656 | is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32; | ||
4547 | 4657 | ||
4548 | if (BPF_SRC(insn->code) == BPF_K) { | 4658 | if (BPF_SRC(insn->code) == BPF_K) { |
4549 | int pred = is_branch_taken(dst_reg, insn->imm, opcode); | 4659 | int pred = is_branch_taken(dst_reg, insn->imm, opcode, |
4660 | is_jmp32); | ||
4550 | 4661 | ||
4551 | if (pred == 1) { | 4662 | if (pred == 1) { |
4552 | /* only follow the goto, ignore fall-through */ | 4663 | /* only follow the goto, ignore fall-through */ |
@@ -4574,30 +4685,51 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env, | |||
4574 | * comparable. | 4685 | * comparable. |
4575 | */ | 4686 | */ |
4576 | if (BPF_SRC(insn->code) == BPF_X) { | 4687 | if (BPF_SRC(insn->code) == BPF_X) { |
4688 | struct bpf_reg_state *src_reg = ®s[insn->src_reg]; | ||
4689 | struct bpf_reg_state lo_reg0 = *dst_reg; | ||
4690 | struct bpf_reg_state lo_reg1 = *src_reg; | ||
4691 | struct bpf_reg_state *src_lo, *dst_lo; | ||
4692 | |||
4693 | dst_lo = &lo_reg0; | ||
4694 | src_lo = &lo_reg1; | ||
4695 | coerce_reg_to_size(dst_lo, 4); | ||
4696 | coerce_reg_to_size(src_lo, 4); | ||
4697 | |||
4577 | if (dst_reg->type == SCALAR_VALUE && | 4698 | if (dst_reg->type == SCALAR_VALUE && |
4578 | regs[insn->src_reg].type == SCALAR_VALUE) { | 4699 | src_reg->type == SCALAR_VALUE) { |
4579 | if (tnum_is_const(regs[insn->src_reg].var_off)) | 4700 | if (tnum_is_const(src_reg->var_off) || |
4701 | (is_jmp32 && tnum_is_const(src_lo->var_off))) | ||
4580 | reg_set_min_max(&other_branch_regs[insn->dst_reg], | 4702 | reg_set_min_max(&other_branch_regs[insn->dst_reg], |
4581 | dst_reg, regs[insn->src_reg].var_off.value, | 4703 | dst_reg, |
4582 | opcode); | 4704 | is_jmp32 |
4583 | else if (tnum_is_const(dst_reg->var_off)) | 4705 | ? src_lo->var_off.value |
4706 | : src_reg->var_off.value, | ||
4707 | opcode, is_jmp32); | ||
4708 | else if (tnum_is_const(dst_reg->var_off) || | ||
4709 | (is_jmp32 && tnum_is_const(dst_lo->var_off))) | ||
4584 | reg_set_min_max_inv(&other_branch_regs[insn->src_reg], | 4710 | reg_set_min_max_inv(&other_branch_regs[insn->src_reg], |
4585 | ®s[insn->src_reg], | 4711 | src_reg, |
4586 | dst_reg->var_off.value, opcode); | 4712 | is_jmp32 |
4587 | else if (opcode == BPF_JEQ || opcode == BPF_JNE) | 4713 | ? dst_lo->var_off.value |
4714 | : dst_reg->var_off.value, | ||
4715 | opcode, is_jmp32); | ||
4716 | else if (!is_jmp32 && | ||
4717 | (opcode == BPF_JEQ || opcode == BPF_JNE)) | ||
4588 | /* Comparing for equality, we can combine knowledge */ | 4718 | /* Comparing for equality, we can combine knowledge */ |
4589 | reg_combine_min_max(&other_branch_regs[insn->src_reg], | 4719 | reg_combine_min_max(&other_branch_regs[insn->src_reg], |
4590 | &other_branch_regs[insn->dst_reg], | 4720 | &other_branch_regs[insn->dst_reg], |
4591 | ®s[insn->src_reg], | 4721 | src_reg, dst_reg, opcode); |
4592 | ®s[insn->dst_reg], opcode); | ||
4593 | } | 4722 | } |
4594 | } else if (dst_reg->type == SCALAR_VALUE) { | 4723 | } else if (dst_reg->type == SCALAR_VALUE) { |
4595 | reg_set_min_max(&other_branch_regs[insn->dst_reg], | 4724 | reg_set_min_max(&other_branch_regs[insn->dst_reg], |
4596 | dst_reg, insn->imm, opcode); | 4725 | dst_reg, insn->imm, opcode, is_jmp32); |
4597 | } | 4726 | } |
4598 | 4727 | ||
4599 | /* detect if R == 0 where R is returned from bpf_map_lookup_elem() */ | 4728 | /* detect if R == 0 where R is returned from bpf_map_lookup_elem(). |
4600 | if (BPF_SRC(insn->code) == BPF_K && | 4729 | * NOTE: these optimizations below are related with pointer comparison |
4730 | * which will never be JMP32. | ||
4731 | */ | ||
4732 | if (!is_jmp32 && BPF_SRC(insn->code) == BPF_K && | ||
4601 | insn->imm == 0 && (opcode == BPF_JEQ || opcode == BPF_JNE) && | 4733 | insn->imm == 0 && (opcode == BPF_JEQ || opcode == BPF_JNE) && |
4602 | reg_type_may_be_null(dst_reg->type)) { | 4734 | reg_type_may_be_null(dst_reg->type)) { |
4603 | /* Mark all identical registers in each branch as either | 4735 | /* Mark all identical registers in each branch as either |
@@ -4926,7 +5058,8 @@ peek_stack: | |||
4926 | goto check_state; | 5058 | goto check_state; |
4927 | t = insn_stack[cur_stack - 1]; | 5059 | t = insn_stack[cur_stack - 1]; |
4928 | 5060 | ||
4929 | if (BPF_CLASS(insns[t].code) == BPF_JMP) { | 5061 | if (BPF_CLASS(insns[t].code) == BPF_JMP || |
5062 | BPF_CLASS(insns[t].code) == BPF_JMP32) { | ||
4930 | u8 opcode = BPF_OP(insns[t].code); | 5063 | u8 opcode = BPF_OP(insns[t].code); |
4931 | 5064 | ||
4932 | if (opcode == BPF_EXIT) { | 5065 | if (opcode == BPF_EXIT) { |
@@ -6082,7 +6215,7 @@ static int do_check(struct bpf_verifier_env *env) | |||
6082 | if (err) | 6215 | if (err) |
6083 | return err; | 6216 | return err; |
6084 | 6217 | ||
6085 | } else if (class == BPF_JMP) { | 6218 | } else if (class == BPF_JMP || class == BPF_JMP32) { |
6086 | u8 opcode = BPF_OP(insn->code); | 6219 | u8 opcode = BPF_OP(insn->code); |
6087 | 6220 | ||
6088 | if (opcode == BPF_CALL) { | 6221 | if (opcode == BPF_CALL) { |
@@ -6090,7 +6223,8 @@ static int do_check(struct bpf_verifier_env *env) | |||
6090 | insn->off != 0 || | 6223 | insn->off != 0 || |
6091 | (insn->src_reg != BPF_REG_0 && | 6224 | (insn->src_reg != BPF_REG_0 && |
6092 | insn->src_reg != BPF_PSEUDO_CALL) || | 6225 | insn->src_reg != BPF_PSEUDO_CALL) || |
6093 | insn->dst_reg != BPF_REG_0) { | 6226 | insn->dst_reg != BPF_REG_0 || |
6227 | class == BPF_JMP32) { | ||
6094 | verbose(env, "BPF_CALL uses reserved fields\n"); | 6228 | verbose(env, "BPF_CALL uses reserved fields\n"); |
6095 | return -EINVAL; | 6229 | return -EINVAL; |
6096 | } | 6230 | } |
@@ -6106,7 +6240,8 @@ static int do_check(struct bpf_verifier_env *env) | |||
6106 | if (BPF_SRC(insn->code) != BPF_K || | 6240 | if (BPF_SRC(insn->code) != BPF_K || |
6107 | insn->imm != 0 || | 6241 | insn->imm != 0 || |
6108 | insn->src_reg != BPF_REG_0 || | 6242 | insn->src_reg != BPF_REG_0 || |
6109 | insn->dst_reg != BPF_REG_0) { | 6243 | insn->dst_reg != BPF_REG_0 || |
6244 | class == BPF_JMP32) { | ||
6110 | verbose(env, "BPF_JA uses reserved fields\n"); | 6245 | verbose(env, "BPF_JA uses reserved fields\n"); |
6111 | return -EINVAL; | 6246 | return -EINVAL; |
6112 | } | 6247 | } |
@@ -6118,7 +6253,8 @@ static int do_check(struct bpf_verifier_env *env) | |||
6118 | if (BPF_SRC(insn->code) != BPF_K || | 6253 | if (BPF_SRC(insn->code) != BPF_K || |
6119 | insn->imm != 0 || | 6254 | insn->imm != 0 || |
6120 | insn->src_reg != BPF_REG_0 || | 6255 | insn->src_reg != BPF_REG_0 || |
6121 | insn->dst_reg != BPF_REG_0) { | 6256 | insn->dst_reg != BPF_REG_0 || |
6257 | class == BPF_JMP32) { | ||
6122 | verbose(env, "BPF_EXIT uses reserved fields\n"); | 6258 | verbose(env, "BPF_EXIT uses reserved fields\n"); |
6123 | return -EINVAL; | 6259 | return -EINVAL; |
6124 | } | 6260 | } |
@@ -6635,6 +6771,9 @@ static bool insn_is_cond_jump(u8 code) | |||
6635 | { | 6771 | { |
6636 | u8 op; | 6772 | u8 op; |
6637 | 6773 | ||
6774 | if (BPF_CLASS(code) == BPF_JMP32) | ||
6775 | return true; | ||
6776 | |||
6638 | if (BPF_CLASS(code) != BPF_JMP) | 6777 | if (BPF_CLASS(code) != BPF_JMP) |
6639 | return false; | 6778 | return false; |
6640 | 6779 | ||