aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/bpf/arraymap.c
diff options
context:
space:
mode:
authorMartin KaFai Lau <kafai@fb.com>2017-03-22 13:00:32 -0400
committerDavid S. Miller <davem@davemloft.net>2017-03-22 18:45:45 -0400
commitfad73a1a35ea61f13607a391aca669caad8c04ca (patch)
tree1070c51ee7354ee35ef1e23f17b7ce483c316144 /kernel/bpf/arraymap.c
parentb4f0a66155564aaf7e98492e027efad9f797c244 (diff)
bpf: Fix and simplifications on inline map lookup
Fix in verifier: For the same bpf_map_lookup_elem() instruction (i.e. "call 1"), a broken case is "a different type of map could be used for the same lookup instruction". For example, an array in one case and a hashmap in another. We have to resort to the old dynamic call behavior in this case. The fix is to check for collision on insn_aux->map_ptr. If there is collision, don't inline the map lookup. Please see the "do_reg_lookup()" in test_map_in_map_kern.c in the later patch for how-to trigger the above case. Simplifications on array_map_gen_lookup(): 1. Calculate elem_size from map->value_size. It removes the need for 'struct bpf_array' which makes the later map-in-map implementation easier. 2. Remove the 'elem_size == 1' test Fixes: 81ed18ab3098 ("bpf: add helper inlining infra and optimize map_array lookup") Signed-off-by: Martin KaFai Lau <kafai@fb.com> Acked-by: Alexei Starovoitov <ast@kernel.org> Acked-by: Daniel Borkmann <daniel@iogearbox.net> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'kernel/bpf/arraymap.c')
-rw-r--r--kernel/bpf/arraymap.c11
1 files changed, 4 insertions, 7 deletions
diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c
index bcf9955fac95..4d7d5d0ed76a 100644
--- a/kernel/bpf/arraymap.c
+++ b/kernel/bpf/arraymap.c
@@ -117,20 +117,17 @@ static void *array_map_lookup_elem(struct bpf_map *map, void *key)
117/* emit BPF instructions equivalent to C code of array_map_lookup_elem() */ 117/* emit BPF instructions equivalent to C code of array_map_lookup_elem() */
118static u32 array_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf) 118static u32 array_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
119{ 119{
120 struct bpf_array *array = container_of(map, struct bpf_array, map);
121 struct bpf_insn *insn = insn_buf; 120 struct bpf_insn *insn = insn_buf;
122 u32 elem_size = array->elem_size; 121 u32 elem_size = round_up(map->value_size, 8);
123 const int ret = BPF_REG_0; 122 const int ret = BPF_REG_0;
124 const int map_ptr = BPF_REG_1; 123 const int map_ptr = BPF_REG_1;
125 const int index = BPF_REG_2; 124 const int index = BPF_REG_2;
126 125
127 *insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value)); 126 *insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value));
128 *insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0); 127 *insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0);
129 *insn++ = BPF_JMP_IMM(BPF_JGE, ret, array->map.max_entries, 128 *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 3);
130 elem_size == 1 ? 2 : 3); 129
131 if (elem_size == 1) { 130 if (is_power_of_2(elem_size)) {
132 /* nop */
133 } else if (is_power_of_2(elem_size)) {
134 *insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size)); 131 *insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size));
135 } else { 132 } else {
136 *insn++ = BPF_ALU64_IMM(BPF_MUL, ret, elem_size); 133 *insn++ = BPF_ALU64_IMM(BPF_MUL, ret, elem_size);