blob: d48cc77ba70db09707444d42ffa12affa691a99f (
plain) (
blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
|
#ifndef __LINUX_SMPLOCK_H
#define __LINUX_SMPLOCK_H
#ifdef CONFIG_LOCK_KERNEL
#include <linux/sched.h>
#include <trace/events/bkl.h>
#define kernel_locked() (current->lock_depth >= 0)
extern int __lockfunc __reacquire_kernel_lock(void);
extern void __lockfunc __release_kernel_lock(void);
/*
* Release/re-acquire global kernel lock for the scheduler
*/
#define release_kernel_lock(tsk) do { \
if (unlikely((tsk)->lock_depth >= 0)) \
__release_kernel_lock(); \
} while (0)
static inline int reacquire_kernel_lock(struct task_struct *task)
{
if (unlikely(task->lock_depth >= 0))
return __reacquire_kernel_lock();
return 0;
}
extern void __lockfunc _lock_kernel(void) __acquires(kernel_lock);
extern void __lockfunc _unlock_kernel(void) __releases(kernel_lock);
#define lock_kernel() { \
trace_lock_kernel(__func__, __FILE__, __LINE__); \
_lock_kernel(); \
}
#define unlock_kernel() { \
trace_unlock_kernel(__func__, __FILE__, __LINE__); \
_unlock_kernel(); \
}
/*
* Various legacy drivers don't really need the BKL in a specific
* function, but they *do* need to know that the BKL became available.
* This function just avoids wrapping a bunch of lock/unlock pairs
* around code which doesn't really need it.
*/
static inline void cycle_kernel_lock(void)
{
lock_kernel();
unlock_kernel();
}
#else
#define lock_kernel() trace_lock_kernel(__func__, __FILE__, __LINE__);
#define unlock_kernel() trace_unlock_kernel(__func__, __FILE__, __LINE__);
#define release_kernel_lock(task) do { } while(0)
#define cycle_kernel_lock() do { } while(0)
#define reacquire_kernel_lock(task) 0
#define kernel_locked() 1
#endif /* CONFIG_LOCK_KERNEL */
#endif /* __LINUX_SMPLOCK_H */
|