aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/bpf/helpers.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/bpf/helpers.c')
-rw-r--r--kernel/bpf/helpers.c80
1 files changed, 80 insertions, 0 deletions
diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
index a74972b07e74..fbe544761628 100644
--- a/kernel/bpf/helpers.c
+++ b/kernel/bpf/helpers.c
@@ -221,6 +221,86 @@ const struct bpf_func_proto bpf_get_current_comm_proto = {
221 .arg2_type = ARG_CONST_SIZE, 221 .arg2_type = ARG_CONST_SIZE,
222}; 222};
223 223
224#if defined(CONFIG_QUEUED_SPINLOCKS) || defined(CONFIG_BPF_ARCH_SPINLOCK)
225
226static inline void __bpf_spin_lock(struct bpf_spin_lock *lock)
227{
228 arch_spinlock_t *l = (void *)lock;
229 union {
230 __u32 val;
231 arch_spinlock_t lock;
232 } u = { .lock = __ARCH_SPIN_LOCK_UNLOCKED };
233
234 compiletime_assert(u.val == 0, "__ARCH_SPIN_LOCK_UNLOCKED not 0");
235 BUILD_BUG_ON(sizeof(*l) != sizeof(__u32));
236 BUILD_BUG_ON(sizeof(*lock) != sizeof(__u32));
237 arch_spin_lock(l);
238}
239
240static inline void __bpf_spin_unlock(struct bpf_spin_lock *lock)
241{
242 arch_spinlock_t *l = (void *)lock;
243
244 arch_spin_unlock(l);
245}
246
247#else
248
249static inline void __bpf_spin_lock(struct bpf_spin_lock *lock)
250{
251 atomic_t *l = (void *)lock;
252
253 BUILD_BUG_ON(sizeof(*l) != sizeof(*lock));
254 do {
255 atomic_cond_read_relaxed(l, !VAL);
256 } while (atomic_xchg(l, 1));
257}
258
259static inline void __bpf_spin_unlock(struct bpf_spin_lock *lock)
260{
261 atomic_t *l = (void *)lock;
262
263 atomic_set_release(l, 0);
264}
265
266#endif
267
268static DEFINE_PER_CPU(unsigned long, irqsave_flags);
269
270notrace BPF_CALL_1(bpf_spin_lock, struct bpf_spin_lock *, lock)
271{
272 unsigned long flags;
273
274 local_irq_save(flags);
275 __bpf_spin_lock(lock);
276 __this_cpu_write(irqsave_flags, flags);
277 return 0;
278}
279
280const struct bpf_func_proto bpf_spin_lock_proto = {
281 .func = bpf_spin_lock,
282 .gpl_only = false,
283 .ret_type = RET_VOID,
284 .arg1_type = ARG_PTR_TO_SPIN_LOCK,
285};
286
287notrace BPF_CALL_1(bpf_spin_unlock, struct bpf_spin_lock *, lock)
288{
289 unsigned long flags;
290
291 flags = __this_cpu_read(irqsave_flags);
292 __bpf_spin_unlock(lock);
293 local_irq_restore(flags);
294 return 0;
295}
296
297const struct bpf_func_proto bpf_spin_unlock_proto = {
298 .func = bpf_spin_unlock,
299 .gpl_only = false,
300 .ret_type = RET_VOID,
301 .arg1_type = ARG_PTR_TO_SPIN_LOCK,
302};
303
224#ifdef CONFIG_CGROUPS 304#ifdef CONFIG_CGROUPS
225BPF_CALL_0(bpf_get_current_cgroup_id) 305BPF_CALL_0(bpf_get_current_cgroup_id)
226{ 306{