aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/percpu-rwsem.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/percpu-rwsem.h')
-rw-r--r--include/linux/percpu-rwsem.h91
1 files changed, 21 insertions, 70 deletions
diff --git a/include/linux/percpu-rwsem.h b/include/linux/percpu-rwsem.h
index bd1e86071e57..3e88c9a7d57f 100644
--- a/include/linux/percpu-rwsem.h
+++ b/include/linux/percpu-rwsem.h
@@ -1,83 +1,34 @@
1#ifndef _LINUX_PERCPU_RWSEM_H 1#ifndef _LINUX_PERCPU_RWSEM_H
2#define _LINUX_PERCPU_RWSEM_H 2#define _LINUX_PERCPU_RWSEM_H
3 3
4#include <linux/mutex.h> 4#include <linux/atomic.h>
5#include <linux/rwsem.h>
5#include <linux/percpu.h> 6#include <linux/percpu.h>
6#include <linux/rcupdate.h> 7#include <linux/wait.h>
7#include <linux/delay.h> 8#include <linux/lockdep.h>
8 9
9struct percpu_rw_semaphore { 10struct percpu_rw_semaphore {
10 unsigned __percpu *counters; 11 unsigned int __percpu *fast_read_ctr;
11 bool locked; 12 atomic_t write_ctr;
12 struct mutex mtx; 13 struct rw_semaphore rw_sem;
14 atomic_t slow_read_ctr;
15 wait_queue_head_t write_waitq;
13}; 16};
14 17
15#define light_mb() barrier() 18extern void percpu_down_read(struct percpu_rw_semaphore *);
16#define heavy_mb() synchronize_sched_expedited() 19extern void percpu_up_read(struct percpu_rw_semaphore *);
17 20
18static inline void percpu_down_read(struct percpu_rw_semaphore *p) 21extern void percpu_down_write(struct percpu_rw_semaphore *);
19{ 22extern void percpu_up_write(struct percpu_rw_semaphore *);
20 rcu_read_lock_sched();
21 if (unlikely(p->locked)) {
22 rcu_read_unlock_sched();
23 mutex_lock(&p->mtx);
24 this_cpu_inc(*p->counters);
25 mutex_unlock(&p->mtx);
26 return;
27 }
28 this_cpu_inc(*p->counters);
29 rcu_read_unlock_sched();
30 light_mb(); /* A, between read of p->locked and read of data, paired with D */
31}
32 23
33static inline void percpu_up_read(struct percpu_rw_semaphore *p) 24extern int __percpu_init_rwsem(struct percpu_rw_semaphore *,
34{ 25 const char *, struct lock_class_key *);
35 light_mb(); /* B, between read of the data and write to p->counter, paired with C */ 26extern void percpu_free_rwsem(struct percpu_rw_semaphore *);
36 this_cpu_dec(*p->counters);
37}
38 27
39static inline unsigned __percpu_count(unsigned __percpu *counters) 28#define percpu_init_rwsem(brw) \
40{ 29({ \
41 unsigned total = 0; 30 static struct lock_class_key rwsem_key; \
42 int cpu; 31 __percpu_init_rwsem(brw, #brw, &rwsem_key); \
43 32})
44 for_each_possible_cpu(cpu)
45 total += ACCESS_ONCE(*per_cpu_ptr(counters, cpu));
46
47 return total;
48}
49
50static inline void percpu_down_write(struct percpu_rw_semaphore *p)
51{
52 mutex_lock(&p->mtx);
53 p->locked = true;
54 synchronize_sched_expedited(); /* make sure that all readers exit the rcu_read_lock_sched region */
55 while (__percpu_count(p->counters))
56 msleep(1);
57 heavy_mb(); /* C, between read of p->counter and write to data, paired with B */
58}
59
60static inline void percpu_up_write(struct percpu_rw_semaphore *p)
61{
62 heavy_mb(); /* D, between write to data and write to p->locked, paired with A */
63 p->locked = false;
64 mutex_unlock(&p->mtx);
65}
66
67static inline int percpu_init_rwsem(struct percpu_rw_semaphore *p)
68{
69 p->counters = alloc_percpu(unsigned);
70 if (unlikely(!p->counters))
71 return -ENOMEM;
72 p->locked = false;
73 mutex_init(&p->mtx);
74 return 0;
75}
76
77static inline void percpu_free_rwsem(struct percpu_rw_semaphore *p)
78{
79 free_percpu(p->counters);
80 p->counters = NULL; /* catch use after free bugs */
81}
82 33
83#endif 34#endif