aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--tools/testing/selftests/bpf/Makefile2
-rw-r--r--tools/testing/selftests/bpf/bpf_helpers.h4
-rw-r--r--tools/testing/selftests/bpf/test_progs.c43
-rw-r--r--tools/testing/selftests/bpf/test_spin_lock.c108
4 files changed, 155 insertions, 2 deletions
diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile
index 8993e9c8f410..302b8e70dec9 100644
--- a/tools/testing/selftests/bpf/Makefile
+++ b/tools/testing/selftests/bpf/Makefile
@@ -35,7 +35,7 @@ BPF_OBJ_FILES = \
35 sendmsg4_prog.o sendmsg6_prog.o test_lirc_mode2_kern.o \ 35 sendmsg4_prog.o sendmsg6_prog.o test_lirc_mode2_kern.o \
36 get_cgroup_id_kern.o socket_cookie_prog.o test_select_reuseport_kern.o \ 36 get_cgroup_id_kern.o socket_cookie_prog.o test_select_reuseport_kern.o \
37 test_skb_cgroup_id_kern.o bpf_flow.o netcnt_prog.o test_xdp_vlan.o \ 37 test_skb_cgroup_id_kern.o bpf_flow.o netcnt_prog.o test_xdp_vlan.o \
38 xdp_dummy.o test_map_in_map.o 38 xdp_dummy.o test_map_in_map.o test_spin_lock.o
39 39
40# Objects are built with default compilation flags and with sub-register 40# Objects are built with default compilation flags and with sub-register
41# code-gen enabled. 41# code-gen enabled.
diff --git a/tools/testing/selftests/bpf/bpf_helpers.h b/tools/testing/selftests/bpf/bpf_helpers.h
index 6c77cf7bedce..6a0ce0f055c5 100644
--- a/tools/testing/selftests/bpf/bpf_helpers.h
+++ b/tools/testing/selftests/bpf/bpf_helpers.h
@@ -172,6 +172,10 @@ static int (*bpf_skb_vlan_pop)(void *ctx) =
172 (void *) BPF_FUNC_skb_vlan_pop; 172 (void *) BPF_FUNC_skb_vlan_pop;
173static int (*bpf_rc_pointer_rel)(void *ctx, int rel_x, int rel_y) = 173static int (*bpf_rc_pointer_rel)(void *ctx, int rel_x, int rel_y) =
174 (void *) BPF_FUNC_rc_pointer_rel; 174 (void *) BPF_FUNC_rc_pointer_rel;
175static void (*bpf_spin_lock)(struct bpf_spin_lock *lock) =
176 (void *) BPF_FUNC_spin_lock;
177static void (*bpf_spin_unlock)(struct bpf_spin_lock *lock) =
178 (void *) BPF_FUNC_spin_unlock;
175 179
176/* llvm builtin functions that eBPF C program may use to 180/* llvm builtin functions that eBPF C program may use to
177 * emit BPF_LD_ABS and BPF_LD_IND instructions 181 * emit BPF_LD_ABS and BPF_LD_IND instructions
diff --git a/tools/testing/selftests/bpf/test_progs.c b/tools/testing/selftests/bpf/test_progs.c
index d8940b8b2f8d..d2e71d697340 100644
--- a/tools/testing/selftests/bpf/test_progs.c
+++ b/tools/testing/selftests/bpf/test_progs.c
@@ -28,7 +28,7 @@ typedef __u16 __sum16;
28#include <sys/wait.h> 28#include <sys/wait.h>
29#include <sys/types.h> 29#include <sys/types.h>
30#include <fcntl.h> 30#include <fcntl.h>
31 31#include <pthread.h>
32#include <linux/bpf.h> 32#include <linux/bpf.h>
33#include <linux/err.h> 33#include <linux/err.h>
34#include <bpf/bpf.h> 34#include <bpf/bpf.h>
@@ -1985,6 +1985,46 @@ static void test_flow_dissector(void)
1985 bpf_object__close(obj); 1985 bpf_object__close(obj);
1986} 1986}
1987 1987
1988static void *test_spin_lock(void *arg)
1989{
1990 __u32 duration, retval;
1991 int err, prog_fd = *(u32 *) arg;
1992
1993 err = bpf_prog_test_run(prog_fd, 10000, &pkt_v4, sizeof(pkt_v4),
1994 NULL, NULL, &retval, &duration);
1995 CHECK(err || retval, "",
1996 "err %d errno %d retval %d duration %d\n",
1997 err, errno, retval, duration);
1998 pthread_exit(arg);
1999}
2000
2001static void test_spinlock(void)
2002{
2003 const char *file = "./test_spin_lock.o";
2004 pthread_t thread_id[4];
2005 struct bpf_object *obj;
2006 int prog_fd;
2007 int err = 0, i;
2008 void *ret;
2009
2010 err = bpf_prog_load(file, BPF_PROG_TYPE_CGROUP_SKB, &obj, &prog_fd);
2011 if (err) {
2012 printf("test_spin_lock:bpf_prog_load errno %d\n", errno);
2013 goto close_prog;
2014 }
2015 for (i = 0; i < 4; i++)
2016 assert(pthread_create(&thread_id[i], NULL,
2017 &test_spin_lock, &prog_fd) == 0);
2018 for (i = 0; i < 4; i++)
2019 assert(pthread_join(thread_id[i], &ret) == 0 &&
2020 ret == (void *)&prog_fd);
2021 goto close_prog_noerr;
2022close_prog:
2023 error_cnt++;
2024close_prog_noerr:
2025 bpf_object__close(obj);
2026}
2027
1988int main(void) 2028int main(void)
1989{ 2029{
1990 srand(time(NULL)); 2030 srand(time(NULL));
@@ -2013,6 +2053,7 @@ int main(void)
2013 test_queue_stack_map(QUEUE); 2053 test_queue_stack_map(QUEUE);
2014 test_queue_stack_map(STACK); 2054 test_queue_stack_map(STACK);
2015 test_flow_dissector(); 2055 test_flow_dissector();
2056 test_spinlock();
2016 2057
2017 printf("Summary: %d PASSED, %d FAILED\n", pass_cnt, error_cnt); 2058 printf("Summary: %d PASSED, %d FAILED\n", pass_cnt, error_cnt);
2018 return error_cnt ? EXIT_FAILURE : EXIT_SUCCESS; 2059 return error_cnt ? EXIT_FAILURE : EXIT_SUCCESS;
diff --git a/tools/testing/selftests/bpf/test_spin_lock.c b/tools/testing/selftests/bpf/test_spin_lock.c
new file mode 100644
index 000000000000..40f904312090
--- /dev/null
+++ b/tools/testing/selftests/bpf/test_spin_lock.c
@@ -0,0 +1,108 @@
1// SPDX-License-Identifier: GPL-2.0
2// Copyright (c) 2019 Facebook
3#include <linux/bpf.h>
4#include <linux/version.h>
5#include "bpf_helpers.h"
6
7struct hmap_elem {
8 volatile int cnt;
9 struct bpf_spin_lock lock;
10 int test_padding;
11};
12
13struct bpf_map_def SEC("maps") hmap = {
14 .type = BPF_MAP_TYPE_HASH,
15 .key_size = sizeof(int),
16 .value_size = sizeof(struct hmap_elem),
17 .max_entries = 1,
18};
19
20BPF_ANNOTATE_KV_PAIR(hmap, int, struct hmap_elem);
21
22
23struct cls_elem {
24 struct bpf_spin_lock lock;
25 volatile int cnt;
26};
27
28struct bpf_map_def SEC("maps") cls_map = {
29 .type = BPF_MAP_TYPE_CGROUP_STORAGE,
30 .key_size = sizeof(struct bpf_cgroup_storage_key),
31 .value_size = sizeof(struct cls_elem),
32};
33
34BPF_ANNOTATE_KV_PAIR(cls_map, struct bpf_cgroup_storage_key,
35 struct cls_elem);
36
37struct bpf_vqueue {
38 struct bpf_spin_lock lock;
39 /* 4 byte hole */
40 unsigned long long lasttime;
41 int credit;
42 unsigned int rate;
43};
44
45struct bpf_map_def SEC("maps") vqueue = {
46 .type = BPF_MAP_TYPE_ARRAY,
47 .key_size = sizeof(int),
48 .value_size = sizeof(struct bpf_vqueue),
49 .max_entries = 1,
50};
51
52BPF_ANNOTATE_KV_PAIR(vqueue, int, struct bpf_vqueue);
53#define CREDIT_PER_NS(delta, rate) (((delta) * rate) >> 20)
54
55SEC("spin_lock_demo")
56int bpf_sping_lock_test(struct __sk_buff *skb)
57{
58 volatile int credit = 0, max_credit = 100, pkt_len = 64;
59 struct hmap_elem zero = {}, *val;
60 unsigned long long curtime;
61 struct bpf_vqueue *q;
62 struct cls_elem *cls;
63 int key = 0;
64 int err = 0;
65
66 val = bpf_map_lookup_elem(&hmap, &key);
67 if (!val) {
68 bpf_map_update_elem(&hmap, &key, &zero, 0);
69 val = bpf_map_lookup_elem(&hmap, &key);
70 if (!val) {
71 err = 1;
72 goto err;
73 }
74 }
75 /* spin_lock in hash map run time test */
76 bpf_spin_lock(&val->lock);
77 if (val->cnt)
78 val->cnt--;
79 else
80 val->cnt++;
81 if (val->cnt != 0 && val->cnt != 1)
82 err = 1;
83 bpf_spin_unlock(&val->lock);
84
85 /* spin_lock in array. virtual queue demo */
86 q = bpf_map_lookup_elem(&vqueue, &key);
87 if (!q)
88 goto err;
89 curtime = bpf_ktime_get_ns();
90 bpf_spin_lock(&q->lock);
91 q->credit += CREDIT_PER_NS(curtime - q->lasttime, q->rate);
92 q->lasttime = curtime;
93 if (q->credit > max_credit)
94 q->credit = max_credit;
95 q->credit -= pkt_len;
96 credit = q->credit;
97 bpf_spin_unlock(&q->lock);
98
99 /* spin_lock in cgroup local storage */
100 cls = bpf_get_local_storage(&cls_map, 0);
101 bpf_spin_lock(&cls->lock);
102 cls->cnt++;
103 bpf_spin_unlock(&cls->lock);
104
105err:
106 return err;
107}
108char _license[] SEC("license") = "GPL";