summaryrefslogtreecommitdiffstats
path: root/kernel/bpf/arraymap.c
diff options
context:
space:
mode:
authorRoman Gushchin <guro@fb.com>2019-05-29 21:03:59 -0400
committerAlexei Starovoitov <ast@kernel.org>2019-05-31 19:52:56 -0400
commitc85d69135a9175c50a823d04d62d932312d037b3 (patch)
treeedd6ec707ebbf68a89fc1c3fc2b2d06364978ad3 /kernel/bpf/arraymap.c
parentb936ca643ade11f265fa10e5fb71c20d9c5243f1 (diff)
bpf: move memory size checks to bpf_map_charge_init()
Most bpf map types doing similar checks and bytes to pages conversion during memory allocation and charging. Let's unify these checks by moving them into bpf_map_charge_init(). Signed-off-by: Roman Gushchin <guro@fb.com> Acked-by: Song Liu <songliubraving@fb.com> Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Diffstat (limited to 'kernel/bpf/arraymap.c')
-rw-r--r--kernel/bpf/arraymap.c8
1 files changed, 1 insertions, 7 deletions
diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c
index 3552da4407d9..0349cbf23cdb 100644
--- a/kernel/bpf/arraymap.c
+++ b/kernel/bpf/arraymap.c
@@ -117,14 +117,8 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr)
117 117
118 /* make sure there is no u32 overflow later in round_up() */ 118 /* make sure there is no u32 overflow later in round_up() */
119 cost = array_size; 119 cost = array_size;
120 if (cost >= U32_MAX - PAGE_SIZE) 120 if (percpu)
121 return ERR_PTR(-ENOMEM);
122 if (percpu) {
123 cost += (u64)attr->max_entries * elem_size * num_possible_cpus(); 121 cost += (u64)attr->max_entries * elem_size * num_possible_cpus();
124 if (cost >= U32_MAX - PAGE_SIZE)
125 return ERR_PTR(-ENOMEM);
126 }
127 cost = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
128 122
129 ret = bpf_map_charge_init(&mem, cost); 123 ret = bpf_map_charge_init(&mem, cost);
130 if (ret < 0) 124 if (ret < 0)