diff options
Diffstat (limited to 'include/linux/percpu-rwsem.h')
-rw-r--r-- | include/linux/percpu-rwsem.h | 89 |
1 files changed, 89 insertions, 0 deletions
diff --git a/include/linux/percpu-rwsem.h b/include/linux/percpu-rwsem.h new file mode 100644 index 000000000000..cf80f7e5277f --- /dev/null +++ b/include/linux/percpu-rwsem.h | |||
@@ -0,0 +1,89 @@ | |||
1 | #ifndef _LINUX_PERCPU_RWSEM_H | ||
2 | #define _LINUX_PERCPU_RWSEM_H | ||
3 | |||
4 | #include <linux/mutex.h> | ||
5 | #include <linux/percpu.h> | ||
6 | #include <linux/rcupdate.h> | ||
7 | #include <linux/delay.h> | ||
8 | |||
9 | struct percpu_rw_semaphore { | ||
10 | unsigned __percpu *counters; | ||
11 | bool locked; | ||
12 | struct mutex mtx; | ||
13 | }; | ||
14 | |||
15 | static inline void percpu_down_read(struct percpu_rw_semaphore *p) | ||
16 | { | ||
17 | rcu_read_lock(); | ||
18 | if (unlikely(p->locked)) { | ||
19 | rcu_read_unlock(); | ||
20 | mutex_lock(&p->mtx); | ||
21 | this_cpu_inc(*p->counters); | ||
22 | mutex_unlock(&p->mtx); | ||
23 | return; | ||
24 | } | ||
25 | this_cpu_inc(*p->counters); | ||
26 | rcu_read_unlock(); | ||
27 | } | ||
28 | |||
29 | static inline void percpu_up_read(struct percpu_rw_semaphore *p) | ||
30 | { | ||
31 | /* | ||
32 | * On X86, write operation in this_cpu_dec serves as a memory unlock | ||
33 | * barrier (i.e. memory accesses may be moved before the write, but | ||
34 | * no memory accesses are moved past the write). | ||
35 | * On other architectures this may not be the case, so we need smp_mb() | ||
36 | * there. | ||
37 | */ | ||
38 | #if defined(CONFIG_X86) && (!defined(CONFIG_X86_PPRO_FENCE) && !defined(CONFIG_X86_OOSTORE)) | ||
39 | barrier(); | ||
40 | #else | ||
41 | smp_mb(); | ||
42 | #endif | ||
43 | this_cpu_dec(*p->counters); | ||
44 | } | ||
45 | |||
46 | static inline unsigned __percpu_count(unsigned __percpu *counters) | ||
47 | { | ||
48 | unsigned total = 0; | ||
49 | int cpu; | ||
50 | |||
51 | for_each_possible_cpu(cpu) | ||
52 | total += ACCESS_ONCE(*per_cpu_ptr(counters, cpu)); | ||
53 | |||
54 | return total; | ||
55 | } | ||
56 | |||
57 | static inline void percpu_down_write(struct percpu_rw_semaphore *p) | ||
58 | { | ||
59 | mutex_lock(&p->mtx); | ||
60 | p->locked = true; | ||
61 | synchronize_rcu(); | ||
62 | while (__percpu_count(p->counters)) | ||
63 | msleep(1); | ||
64 | smp_rmb(); /* paired with smp_mb() in percpu_sem_up_read() */ | ||
65 | } | ||
66 | |||
67 | static inline void percpu_up_write(struct percpu_rw_semaphore *p) | ||
68 | { | ||
69 | p->locked = false; | ||
70 | mutex_unlock(&p->mtx); | ||
71 | } | ||
72 | |||
73 | static inline int percpu_init_rwsem(struct percpu_rw_semaphore *p) | ||
74 | { | ||
75 | p->counters = alloc_percpu(unsigned); | ||
76 | if (unlikely(!p->counters)) | ||
77 | return -ENOMEM; | ||
78 | p->locked = false; | ||
79 | mutex_init(&p->mtx); | ||
80 | return 0; | ||
81 | } | ||
82 | |||
83 | static inline void percpu_free_rwsem(struct percpu_rw_semaphore *p) | ||
84 | { | ||
85 | free_percpu(p->counters); | ||
86 | p->counters = NULL; /* catch use after free bugs */ | ||
87 | } | ||
88 | |||
89 | #endif | ||