aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/bpf/arraymap.c
diff options
context:
space:
mode:
authorDaniel Borkmann <daniel@iogearbox.net>2018-02-15 19:10:29 -0500
committerAlexei Starovoitov <ast@kernel.org>2018-02-16 00:34:33 -0500
commit9c2d63b843a5c8a8d0559cc067b5398aa5ec3ffc (patch)
treec1696b8aa7d31d16a505006a102ca3381161c109 /kernel/bpf/arraymap.c
parent67e3a172eba577f16ceae712d835b835b9718d61 (diff)
bpf: fix mlock precharge on arraymaps
syzkaller recently triggered OOM during percpu map allocation; while there is work in progress by Dennis Zhou to add __GFP_NORETRY semantics for percpu allocator under pressure, there seems also a missing bpf_map_precharge_memlock() check in array map allocation. Given today the actual bpf_map_charge_memlock() happens after the find_and_alloc_map() in syscall path, the bpf_map_precharge_memlock() is there to bail out early before we go and do the map setup work when we find that we hit the limits anyway. Therefore add this for array map as well. Fixes: 6c9059817432 ("bpf: pre-allocate hash map elements") Fixes: a10423b87a7e ("bpf: introduce BPF_MAP_TYPE_PERCPU_ARRAY map") Reported-by: syzbot+adb03f3f0bb57ce3acda@syzkaller.appspotmail.com Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Cc: Dennis Zhou <dennisszhou@gmail.com> Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Diffstat (limited to 'kernel/bpf/arraymap.c')
-rw-r--r--kernel/bpf/arraymap.c28
1 files changed, 16 insertions, 12 deletions
diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c
index b1f66480135b..a364c408f25a 100644
--- a/kernel/bpf/arraymap.c
+++ b/kernel/bpf/arraymap.c
@@ -73,11 +73,11 @@ static int array_map_alloc_check(union bpf_attr *attr)
73static struct bpf_map *array_map_alloc(union bpf_attr *attr) 73static struct bpf_map *array_map_alloc(union bpf_attr *attr)
74{ 74{
75 bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY; 75 bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
76 int numa_node = bpf_map_attr_numa_node(attr); 76 int ret, numa_node = bpf_map_attr_numa_node(attr);
77 u32 elem_size, index_mask, max_entries; 77 u32 elem_size, index_mask, max_entries;
78 bool unpriv = !capable(CAP_SYS_ADMIN); 78 bool unpriv = !capable(CAP_SYS_ADMIN);
79 u64 cost, array_size, mask64;
79 struct bpf_array *array; 80 struct bpf_array *array;
80 u64 array_size, mask64;
81 81
82 elem_size = round_up(attr->value_size, 8); 82 elem_size = round_up(attr->value_size, 8);
83 83
@@ -109,8 +109,19 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr)
109 array_size += (u64) max_entries * elem_size; 109 array_size += (u64) max_entries * elem_size;
110 110
111 /* make sure there is no u32 overflow later in round_up() */ 111 /* make sure there is no u32 overflow later in round_up() */
112 if (array_size >= U32_MAX - PAGE_SIZE) 112 cost = array_size;
113 if (cost >= U32_MAX - PAGE_SIZE)
113 return ERR_PTR(-ENOMEM); 114 return ERR_PTR(-ENOMEM);
115 if (percpu) {
116 cost += (u64)attr->max_entries * elem_size * num_possible_cpus();
117 if (cost >= U32_MAX - PAGE_SIZE)
118 return ERR_PTR(-ENOMEM);
119 }
120 cost = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
121
122 ret = bpf_map_precharge_memlock(cost);
123 if (ret < 0)
124 return ERR_PTR(ret);
114 125
115 /* allocate all map elements and zero-initialize them */ 126 /* allocate all map elements and zero-initialize them */
116 array = bpf_map_area_alloc(array_size, numa_node); 127 array = bpf_map_area_alloc(array_size, numa_node);
@@ -121,20 +132,13 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr)
121 132
122 /* copy mandatory map attributes */ 133 /* copy mandatory map attributes */
123 bpf_map_init_from_attr(&array->map, attr); 134 bpf_map_init_from_attr(&array->map, attr);
135 array->map.pages = cost;
124 array->elem_size = elem_size; 136 array->elem_size = elem_size;
125 137
126 if (!percpu) 138 if (percpu && bpf_array_alloc_percpu(array)) {
127 goto out;
128
129 array_size += (u64) attr->max_entries * elem_size * num_possible_cpus();
130
131 if (array_size >= U32_MAX - PAGE_SIZE ||
132 bpf_array_alloc_percpu(array)) {
133 bpf_map_area_free(array); 139 bpf_map_area_free(array);
134 return ERR_PTR(-ENOMEM); 140 return ERR_PTR(-ENOMEM);
135 } 141 }
136out:
137 array->map.pages = round_up(array_size, PAGE_SIZE) >> PAGE_SHIFT;
138 142
139 return &array->map; 143 return &array->map;
140} 144}