aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/bpf/arraymap.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/bpf/arraymap.c')
-rw-r--r--kernel/bpf/arraymap.c33
1 files changed, 20 insertions, 13 deletions
diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c
index b1f66480135b..14750e7c5ee4 100644
--- a/kernel/bpf/arraymap.c
+++ b/kernel/bpf/arraymap.c
@@ -26,8 +26,10 @@ static void bpf_array_free_percpu(struct bpf_array *array)
26{ 26{
27 int i; 27 int i;
28 28
29 for (i = 0; i < array->map.max_entries; i++) 29 for (i = 0; i < array->map.max_entries; i++) {
30 free_percpu(array->pptrs[i]); 30 free_percpu(array->pptrs[i]);
31 cond_resched();
32 }
31} 33}
32 34
33static int bpf_array_alloc_percpu(struct bpf_array *array) 35static int bpf_array_alloc_percpu(struct bpf_array *array)
@@ -43,6 +45,7 @@ static int bpf_array_alloc_percpu(struct bpf_array *array)
43 return -ENOMEM; 45 return -ENOMEM;
44 } 46 }
45 array->pptrs[i] = ptr; 47 array->pptrs[i] = ptr;
48 cond_resched();
46 } 49 }
47 50
48 return 0; 51 return 0;
@@ -73,11 +76,11 @@ static int array_map_alloc_check(union bpf_attr *attr)
73static struct bpf_map *array_map_alloc(union bpf_attr *attr) 76static struct bpf_map *array_map_alloc(union bpf_attr *attr)
74{ 77{
75 bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY; 78 bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
76 int numa_node = bpf_map_attr_numa_node(attr); 79 int ret, numa_node = bpf_map_attr_numa_node(attr);
77 u32 elem_size, index_mask, max_entries; 80 u32 elem_size, index_mask, max_entries;
78 bool unpriv = !capable(CAP_SYS_ADMIN); 81 bool unpriv = !capable(CAP_SYS_ADMIN);
82 u64 cost, array_size, mask64;
79 struct bpf_array *array; 83 struct bpf_array *array;
80 u64 array_size, mask64;
81 84
82 elem_size = round_up(attr->value_size, 8); 85 elem_size = round_up(attr->value_size, 8);
83 86
@@ -109,8 +112,19 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr)
109 array_size += (u64) max_entries * elem_size; 112 array_size += (u64) max_entries * elem_size;
110 113
111 /* make sure there is no u32 overflow later in round_up() */ 114 /* make sure there is no u32 overflow later in round_up() */
112 if (array_size >= U32_MAX - PAGE_SIZE) 115 cost = array_size;
116 if (cost >= U32_MAX - PAGE_SIZE)
113 return ERR_PTR(-ENOMEM); 117 return ERR_PTR(-ENOMEM);
118 if (percpu) {
119 cost += (u64)attr->max_entries * elem_size * num_possible_cpus();
120 if (cost >= U32_MAX - PAGE_SIZE)
121 return ERR_PTR(-ENOMEM);
122 }
123 cost = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
124
125 ret = bpf_map_precharge_memlock(cost);
126 if (ret < 0)
127 return ERR_PTR(ret);
114 128
115 /* allocate all map elements and zero-initialize them */ 129 /* allocate all map elements and zero-initialize them */
116 array = bpf_map_area_alloc(array_size, numa_node); 130 array = bpf_map_area_alloc(array_size, numa_node);
@@ -121,20 +135,13 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr)
121 135
122 /* copy mandatory map attributes */ 136 /* copy mandatory map attributes */
123 bpf_map_init_from_attr(&array->map, attr); 137 bpf_map_init_from_attr(&array->map, attr);
138 array->map.pages = cost;
124 array->elem_size = elem_size; 139 array->elem_size = elem_size;
125 140
126 if (!percpu) 141 if (percpu && bpf_array_alloc_percpu(array)) {
127 goto out;
128
129 array_size += (u64) attr->max_entries * elem_size * num_possible_cpus();
130
131 if (array_size >= U32_MAX - PAGE_SIZE ||
132 bpf_array_alloc_percpu(array)) {
133 bpf_map_area_free(array); 142 bpf_map_area_free(array);
134 return ERR_PTR(-ENOMEM); 143 return ERR_PTR(-ENOMEM);
135 } 144 }
136out:
137 array->map.pages = round_up(array_size, PAGE_SIZE) >> PAGE_SHIFT;
138 145
139 return &array->map; 146 return &array->map;
140} 147}