aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/bpf/hashtab.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/bpf/hashtab.c')
-rw-r--r--kernel/bpf/hashtab.c23
1 files changed, 18 insertions, 5 deletions
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
index 192d32e77db3..0f2708fde5f7 100644
--- a/kernel/bpf/hashtab.c
+++ b/kernel/bpf/hashtab.c
@@ -527,18 +527,30 @@ static u32 htab_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
527 return insn - insn_buf; 527 return insn - insn_buf;
528} 528}
529 529
530static void *htab_lru_map_lookup_elem(struct bpf_map *map, void *key) 530static __always_inline void *__htab_lru_map_lookup_elem(struct bpf_map *map,
531 void *key, const bool mark)
531{ 532{
532 struct htab_elem *l = __htab_map_lookup_elem(map, key); 533 struct htab_elem *l = __htab_map_lookup_elem(map, key);
533 534
534 if (l) { 535 if (l) {
535 bpf_lru_node_set_ref(&l->lru_node); 536 if (mark)
537 bpf_lru_node_set_ref(&l->lru_node);
536 return l->key + round_up(map->key_size, 8); 538 return l->key + round_up(map->key_size, 8);
537 } 539 }
538 540
539 return NULL; 541 return NULL;
540} 542}
541 543
544static void *htab_lru_map_lookup_elem(struct bpf_map *map, void *key)
545{
546 return __htab_lru_map_lookup_elem(map, key, true);
547}
548
549static void *htab_lru_map_lookup_elem_sys(struct bpf_map *map, void *key)
550{
551 return __htab_lru_map_lookup_elem(map, key, false);
552}
553
542static u32 htab_lru_map_gen_lookup(struct bpf_map *map, 554static u32 htab_lru_map_gen_lookup(struct bpf_map *map,
543 struct bpf_insn *insn_buf) 555 struct bpf_insn *insn_buf)
544{ 556{
@@ -1250,6 +1262,7 @@ const struct bpf_map_ops htab_lru_map_ops = {
1250 .map_free = htab_map_free, 1262 .map_free = htab_map_free,
1251 .map_get_next_key = htab_map_get_next_key, 1263 .map_get_next_key = htab_map_get_next_key,
1252 .map_lookup_elem = htab_lru_map_lookup_elem, 1264 .map_lookup_elem = htab_lru_map_lookup_elem,
1265 .map_lookup_elem_sys_only = htab_lru_map_lookup_elem_sys,
1253 .map_update_elem = htab_lru_map_update_elem, 1266 .map_update_elem = htab_lru_map_update_elem,
1254 .map_delete_elem = htab_lru_map_delete_elem, 1267 .map_delete_elem = htab_lru_map_delete_elem,
1255 .map_gen_lookup = htab_lru_map_gen_lookup, 1268 .map_gen_lookup = htab_lru_map_gen_lookup,
@@ -1281,7 +1294,6 @@ static void *htab_lru_percpu_map_lookup_elem(struct bpf_map *map, void *key)
1281 1294
1282int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value) 1295int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value)
1283{ 1296{
1284 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1285 struct htab_elem *l; 1297 struct htab_elem *l;
1286 void __percpu *pptr; 1298 void __percpu *pptr;
1287 int ret = -ENOENT; 1299 int ret = -ENOENT;
@@ -1297,8 +1309,9 @@ int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value)
1297 l = __htab_map_lookup_elem(map, key); 1309 l = __htab_map_lookup_elem(map, key);
1298 if (!l) 1310 if (!l)
1299 goto out; 1311 goto out;
1300 if (htab_is_lru(htab)) 1312 /* We do not mark LRU map element here in order to not mess up
1301 bpf_lru_node_set_ref(&l->lru_node); 1313 * eviction heuristics when user space does a map walk.
1314 */
1302 pptr = htab_elem_get_ptr(l, map->key_size); 1315 pptr = htab_elem_get_ptr(l, map->key_size);
1303 for_each_possible_cpu(cpu) { 1316 for_each_possible_cpu(cpu) {
1304 bpf_long_memcpy(value + off, 1317 bpf_long_memcpy(value + off,