summaryrefslogtreecommitdiffstats
path: root/kernel/bpf/xskmap.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/bpf/xskmap.c')
-rw-r--r--kernel/bpf/xskmap.c9
1 files changed, 5 insertions, 4 deletions
diff --git a/kernel/bpf/xskmap.c b/kernel/bpf/xskmap.c
index f816ee1a0fa0..a329dab7c7a4 100644
--- a/kernel/bpf/xskmap.c
+++ b/kernel/bpf/xskmap.c
@@ -40,10 +40,9 @@ static struct bpf_map *xsk_map_alloc(union bpf_attr *attr)
40 if (cost >= U32_MAX - PAGE_SIZE) 40 if (cost >= U32_MAX - PAGE_SIZE)
41 goto free_m; 41 goto free_m;
42 42
43 m->map.memory.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
44
45 /* Notice returns -EPERM on if map size is larger than memlock limit */ 43 /* Notice returns -EPERM on if map size is larger than memlock limit */
46 err = bpf_map_precharge_memlock(m->map.memory.pages); 44 err = bpf_map_charge_init(&m->map.memory,
45 round_up(cost, PAGE_SIZE) >> PAGE_SHIFT);
47 if (err) 46 if (err)
48 goto free_m; 47 goto free_m;
49 48
@@ -51,7 +50,7 @@ static struct bpf_map *xsk_map_alloc(union bpf_attr *attr)
51 50
52 m->flush_list = alloc_percpu(struct list_head); 51 m->flush_list = alloc_percpu(struct list_head);
53 if (!m->flush_list) 52 if (!m->flush_list)
54 goto free_m; 53 goto free_charge;
55 54
56 for_each_possible_cpu(cpu) 55 for_each_possible_cpu(cpu)
57 INIT_LIST_HEAD(per_cpu_ptr(m->flush_list, cpu)); 56 INIT_LIST_HEAD(per_cpu_ptr(m->flush_list, cpu));
@@ -65,6 +64,8 @@ static struct bpf_map *xsk_map_alloc(union bpf_attr *attr)
65 64
66free_percpu: 65free_percpu:
67 free_percpu(m->flush_list); 66 free_percpu(m->flush_list);
67free_charge:
68 bpf_map_charge_finish(&m->map.memory);
68free_m: 69free_m:
69 kfree(m); 70 kfree(m);
70 return ERR_PTR(err); 71 return ERR_PTR(err);