aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/bpf/arraymap.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/bpf/arraymap.c')
-rw-r--r--kernel/bpf/arraymap.c28
1 files changed, 16 insertions, 12 deletions
diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c
index b1f66480135b..a364c408f25a 100644
--- a/kernel/bpf/arraymap.c
+++ b/kernel/bpf/arraymap.c
@@ -73,11 +73,11 @@ static int array_map_alloc_check(union bpf_attr *attr)
73static struct bpf_map *array_map_alloc(union bpf_attr *attr) 73static struct bpf_map *array_map_alloc(union bpf_attr *attr)
74{ 74{
75 bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY; 75 bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
76 int numa_node = bpf_map_attr_numa_node(attr); 76 int ret, numa_node = bpf_map_attr_numa_node(attr);
77 u32 elem_size, index_mask, max_entries; 77 u32 elem_size, index_mask, max_entries;
78 bool unpriv = !capable(CAP_SYS_ADMIN); 78 bool unpriv = !capable(CAP_SYS_ADMIN);
79 u64 cost, array_size, mask64;
79 struct bpf_array *array; 80 struct bpf_array *array;
80 u64 array_size, mask64;
81 81
82 elem_size = round_up(attr->value_size, 8); 82 elem_size = round_up(attr->value_size, 8);
83 83
@@ -109,8 +109,19 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr)
109 array_size += (u64) max_entries * elem_size; 109 array_size += (u64) max_entries * elem_size;
110 110
111 /* make sure there is no u32 overflow later in round_up() */ 111 /* make sure there is no u32 overflow later in round_up() */
112 if (array_size >= U32_MAX - PAGE_SIZE) 112 cost = array_size;
113 if (cost >= U32_MAX - PAGE_SIZE)
113 return ERR_PTR(-ENOMEM); 114 return ERR_PTR(-ENOMEM);
115 if (percpu) {
116 cost += (u64)attr->max_entries * elem_size * num_possible_cpus();
117 if (cost >= U32_MAX - PAGE_SIZE)
118 return ERR_PTR(-ENOMEM);
119 }
120 cost = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
121
122 ret = bpf_map_precharge_memlock(cost);
123 if (ret < 0)
124 return ERR_PTR(ret);
114 125
115 /* allocate all map elements and zero-initialize them */ 126 /* allocate all map elements and zero-initialize them */
116 array = bpf_map_area_alloc(array_size, numa_node); 127 array = bpf_map_area_alloc(array_size, numa_node);
@@ -121,20 +132,13 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr)
121 132
122 /* copy mandatory map attributes */ 133 /* copy mandatory map attributes */
123 bpf_map_init_from_attr(&array->map, attr); 134 bpf_map_init_from_attr(&array->map, attr);
135 array->map.pages = cost;
124 array->elem_size = elem_size; 136 array->elem_size = elem_size;
125 137
126 if (!percpu) 138 if (percpu && bpf_array_alloc_percpu(array)) {
127 goto out;
128
129 array_size += (u64) attr->max_entries * elem_size * num_possible_cpus();
130
131 if (array_size >= U32_MAX - PAGE_SIZE ||
132 bpf_array_alloc_percpu(array)) {
133 bpf_map_area_free(array); 139 bpf_map_area_free(array);
134 return ERR_PTR(-ENOMEM); 140 return ERR_PTR(-ENOMEM);
135 } 141 }
136out:
137 array->map.pages = round_up(array_size, PAGE_SIZE) >> PAGE_SHIFT;
138 142
139 return &array->map; 143 return &array->map;
140} 144}