aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2018-10-05 13:53:13 -0400
committerDavid S. Miller <davem@davemloft.net>2018-10-05 13:53:13 -0400
commitb8d5b7cec43618c8f91a9fbe80067ef2dcbc4d35 (patch)
tree02adf7f3451ecf1a7a397a86f705a9022d27ae09 /kernel
parent7e4183752735deb7543e179a44f4f4b44917cd6f (diff)
parentb799207e1e1816b09e7a5920fbb2d5fcf6edd681 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf
Daniel Borkmann says: ==================== pull-request: bpf 2018-10-05 The following pull-request contains BPF updates for your *net* tree. The main changes are: 1) Fix to truncate input on ALU operations in 32 bit mode, from Jann. 2) Fixes for cgroup local storage to reject reserved flags on element update and rejection of map allocation with zero-sized value, from Roman. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/bpf/local_storage.c5
-rw-r--r--kernel/bpf/verifier.c10
2 files changed, 13 insertions, 2 deletions
diff --git a/kernel/bpf/local_storage.c b/kernel/bpf/local_storage.c
index 22ad967d1e5f..830d7f095748 100644
--- a/kernel/bpf/local_storage.c
+++ b/kernel/bpf/local_storage.c
@@ -129,7 +129,7 @@ static int cgroup_storage_update_elem(struct bpf_map *map, void *_key,
129 struct bpf_cgroup_storage *storage; 129 struct bpf_cgroup_storage *storage;
130 struct bpf_storage_buffer *new; 130 struct bpf_storage_buffer *new;
131 131
132 if (flags & BPF_NOEXIST) 132 if (flags != BPF_ANY && flags != BPF_EXIST)
133 return -EINVAL; 133 return -EINVAL;
134 134
135 storage = cgroup_storage_lookup((struct bpf_cgroup_storage_map *)map, 135 storage = cgroup_storage_lookup((struct bpf_cgroup_storage_map *)map,
@@ -195,6 +195,9 @@ static struct bpf_map *cgroup_storage_map_alloc(union bpf_attr *attr)
195 if (attr->key_size != sizeof(struct bpf_cgroup_storage_key)) 195 if (attr->key_size != sizeof(struct bpf_cgroup_storage_key))
196 return ERR_PTR(-EINVAL); 196 return ERR_PTR(-EINVAL);
197 197
198 if (attr->value_size == 0)
199 return ERR_PTR(-EINVAL);
200
198 if (attr->value_size > PAGE_SIZE) 201 if (attr->value_size > PAGE_SIZE)
199 return ERR_PTR(-E2BIG); 202 return ERR_PTR(-E2BIG);
200 203
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index bb07e74b34a2..465952a8e465 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -2896,6 +2896,15 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
2896 u64 umin_val, umax_val; 2896 u64 umin_val, umax_val;
2897 u64 insn_bitness = (BPF_CLASS(insn->code) == BPF_ALU64) ? 64 : 32; 2897 u64 insn_bitness = (BPF_CLASS(insn->code) == BPF_ALU64) ? 64 : 32;
2898 2898
2899 if (insn_bitness == 32) {
2900 /* Relevant for 32-bit RSH: Information can propagate towards
2901 * LSB, so it isn't sufficient to only truncate the output to
2902 * 32 bits.
2903 */
2904 coerce_reg_to_size(dst_reg, 4);
2905 coerce_reg_to_size(&src_reg, 4);
2906 }
2907
2899 smin_val = src_reg.smin_value; 2908 smin_val = src_reg.smin_value;
2900 smax_val = src_reg.smax_value; 2909 smax_val = src_reg.smax_value;
2901 umin_val = src_reg.umin_value; 2910 umin_val = src_reg.umin_value;
@@ -3131,7 +3140,6 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
3131 if (BPF_CLASS(insn->code) != BPF_ALU64) { 3140 if (BPF_CLASS(insn->code) != BPF_ALU64) {
3132 /* 32-bit ALU ops are (32,32)->32 */ 3141 /* 32-bit ALU ops are (32,32)->32 */
3133 coerce_reg_to_size(dst_reg, 4); 3142 coerce_reg_to_size(dst_reg, 4);
3134 coerce_reg_to_size(&src_reg, 4);
3135 } 3143 }
3136 3144
3137 __reg_deduce_bounds(dst_reg); 3145 __reg_deduce_bounds(dst_reg);