diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2010-10-21 15:54:12 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2010-10-21 15:54:12 -0400 |
commit | 888a6f77e0418b049f83d37547c209b904d30af4 (patch) | |
tree | 42cdb9f781d2177e6b380e69a66a27ec7705f51f /include | |
parent | 31b7eab27a314b153d8fa07ba9e9ec00a98141e1 (diff) | |
parent | 6506cf6ce68d78a5470a8360c965dafe8e4b78e3 (diff) |
Merge branch 'core-rcu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'core-rcu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (52 commits)
sched: fix RCU lockdep splat from task_group()
rcu: using ACCESS_ONCE() to observe the jiffies_stall/rnp->qsmask value
sched: suppress RCU lockdep splat in task_fork_fair
net: suppress RCU lockdep false positive in sock_update_classid
rcu: move check from rcu_dereference_bh to rcu_read_lock_bh_held
rcu: Add advice to PROVE_RCU_REPEATEDLY kernel config parameter
rcu: Add tracing data to support queueing models
rcu: fix sparse errors in rcutorture.c
rcu: only one evaluation of arg in rcu_dereference_check() unless sparse
kernel: Remove undead ifdef CONFIG_DEBUG_LOCK_ALLOC
rcu: fix _oddness handling of verbose stall warnings
rcu: performance fixes to TINY_PREEMPT_RCU callback checking
rcu: upgrade stallwarn.txt documentation for CPU-bound RT processes
vhost: add __rcu annotations
rcu: add comment stating that list_empty() applies to RCU-protected lists
rcu: apply TINY_PREEMPT_RCU read-side speedup to TREE_PREEMPT_RCU
rcu: combine duplicate code, courtesy of CONFIG_PREEMPT_RCU
rcu: Upgrade srcu_read_lock() docbook about SRCU grace periods
rcu: document ways of stalling updates in low-memory situations
rcu: repair code-duplication FIXMEs
...
Diffstat (limited to 'include')
-rw-r--r-- | include/linux/cgroup.h | 4 | ||||
-rw-r--r-- | include/linux/compiler.h | 4 | ||||
-rw-r--r-- | include/linux/cred.h | 2 | ||||
-rw-r--r-- | include/linux/fdtable.h | 6 | ||||
-rw-r--r-- | include/linux/fs.h | 2 | ||||
-rw-r--r-- | include/linux/genhd.h | 6 | ||||
-rw-r--r-- | include/linux/hardirq.h | 2 | ||||
-rw-r--r-- | include/linux/idr.h | 4 | ||||
-rw-r--r-- | include/linux/init_task.h | 14 | ||||
-rw-r--r-- | include/linux/input.h | 2 | ||||
-rw-r--r-- | include/linux/iocontext.h | 2 | ||||
-rw-r--r-- | include/linux/key.h | 3 | ||||
-rw-r--r-- | include/linux/kvm_host.h | 2 | ||||
-rw-r--r-- | include/linux/mm_types.h | 2 | ||||
-rw-r--r-- | include/linux/nfs_fs.h | 2 | ||||
-rw-r--r-- | include/linux/notifier.h | 10 | ||||
-rw-r--r-- | include/linux/radix-tree.h | 4 | ||||
-rw-r--r-- | include/linux/rculist.h | 62 | ||||
-rw-r--r-- | include/linux/rculist_nulls.h | 16 | ||||
-rw-r--r-- | include/linux/rcupdate.h | 490 | ||||
-rw-r--r-- | include/linux/rcutiny.h | 104 | ||||
-rw-r--r-- | include/linux/rcutree.h | 57 | ||||
-rw-r--r-- | include/linux/sched.h | 16 | ||||
-rw-r--r-- | include/linux/srcu.h | 34 | ||||
-rw-r--r-- | include/linux/sunrpc/auth_gss.h | 4 | ||||
-rw-r--r-- | include/net/cls_cgroup.h | 3 | ||||
-rw-r--r-- | include/net/netfilter/nf_conntrack.h | 2 |
27 files changed, 527 insertions, 332 deletions
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index 0c991023ee4..709dfb901d1 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h | |||
@@ -75,7 +75,7 @@ struct cgroup_subsys_state { | |||
75 | 75 | ||
76 | unsigned long flags; | 76 | unsigned long flags; |
77 | /* ID for this css, if possible */ | 77 | /* ID for this css, if possible */ |
78 | struct css_id *id; | 78 | struct css_id __rcu *id; |
79 | }; | 79 | }; |
80 | 80 | ||
81 | /* bits in struct cgroup_subsys_state flags field */ | 81 | /* bits in struct cgroup_subsys_state flags field */ |
@@ -205,7 +205,7 @@ struct cgroup { | |||
205 | struct list_head children; /* my children */ | 205 | struct list_head children; /* my children */ |
206 | 206 | ||
207 | struct cgroup *parent; /* my parent */ | 207 | struct cgroup *parent; /* my parent */ |
208 | struct dentry *dentry; /* cgroup fs entry, RCU protected */ | 208 | struct dentry __rcu *dentry; /* cgroup fs entry, RCU protected */ |
209 | 209 | ||
210 | /* Private pointers for each registered subsystem */ | 210 | /* Private pointers for each registered subsystem */ |
211 | struct cgroup_subsys_state *subsys[CGROUP_SUBSYS_COUNT]; | 211 | struct cgroup_subsys_state *subsys[CGROUP_SUBSYS_COUNT]; |
diff --git a/include/linux/compiler.h b/include/linux/compiler.h index c1a62c56a66..320d6c94ff8 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h | |||
@@ -16,7 +16,11 @@ | |||
16 | # define __release(x) __context__(x,-1) | 16 | # define __release(x) __context__(x,-1) |
17 | # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0) | 17 | # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0) |
18 | # define __percpu __attribute__((noderef, address_space(3))) | 18 | # define __percpu __attribute__((noderef, address_space(3))) |
19 | #ifdef CONFIG_SPARSE_RCU_POINTER | ||
20 | # define __rcu __attribute__((noderef, address_space(4))) | ||
21 | #else | ||
19 | # define __rcu | 22 | # define __rcu |
23 | #endif | ||
20 | extern void __chk_user_ptr(const volatile void __user *); | 24 | extern void __chk_user_ptr(const volatile void __user *); |
21 | extern void __chk_io_ptr(const volatile void __iomem *); | 25 | extern void __chk_io_ptr(const volatile void __iomem *); |
22 | #else | 26 | #else |
diff --git a/include/linux/cred.h b/include/linux/cred.h index 4d2c39573f3..4aaeab37644 100644 --- a/include/linux/cred.h +++ b/include/linux/cred.h | |||
@@ -84,7 +84,7 @@ struct thread_group_cred { | |||
84 | atomic_t usage; | 84 | atomic_t usage; |
85 | pid_t tgid; /* thread group process ID */ | 85 | pid_t tgid; /* thread group process ID */ |
86 | spinlock_t lock; | 86 | spinlock_t lock; |
87 | struct key *session_keyring; /* keyring inherited over fork */ | 87 | struct key __rcu *session_keyring; /* keyring inherited over fork */ |
88 | struct key *process_keyring; /* keyring private to this process */ | 88 | struct key *process_keyring; /* keyring private to this process */ |
89 | struct rcu_head rcu; /* RCU deletion hook */ | 89 | struct rcu_head rcu; /* RCU deletion hook */ |
90 | }; | 90 | }; |
diff --git a/include/linux/fdtable.h b/include/linux/fdtable.h index f59ed297b66..133c0ba25e3 100644 --- a/include/linux/fdtable.h +++ b/include/linux/fdtable.h | |||
@@ -31,7 +31,7 @@ struct embedded_fd_set { | |||
31 | 31 | ||
32 | struct fdtable { | 32 | struct fdtable { |
33 | unsigned int max_fds; | 33 | unsigned int max_fds; |
34 | struct file ** fd; /* current fd array */ | 34 | struct file __rcu **fd; /* current fd array */ |
35 | fd_set *close_on_exec; | 35 | fd_set *close_on_exec; |
36 | fd_set *open_fds; | 36 | fd_set *open_fds; |
37 | struct rcu_head rcu; | 37 | struct rcu_head rcu; |
@@ -46,7 +46,7 @@ struct files_struct { | |||
46 | * read mostly part | 46 | * read mostly part |
47 | */ | 47 | */ |
48 | atomic_t count; | 48 | atomic_t count; |
49 | struct fdtable *fdt; | 49 | struct fdtable __rcu *fdt; |
50 | struct fdtable fdtab; | 50 | struct fdtable fdtab; |
51 | /* | 51 | /* |
52 | * written part on a separate cache line in SMP | 52 | * written part on a separate cache line in SMP |
@@ -55,7 +55,7 @@ struct files_struct { | |||
55 | int next_fd; | 55 | int next_fd; |
56 | struct embedded_fd_set close_on_exec_init; | 56 | struct embedded_fd_set close_on_exec_init; |
57 | struct embedded_fd_set open_fds_init; | 57 | struct embedded_fd_set open_fds_init; |
58 | struct file * fd_array[NR_OPEN_DEFAULT]; | 58 | struct file __rcu * fd_array[NR_OPEN_DEFAULT]; |
59 | }; | 59 | }; |
60 | 60 | ||
61 | #define rcu_dereference_check_fdtable(files, fdtfd) \ | 61 | #define rcu_dereference_check_fdtable(files, fdtfd) \ |
diff --git a/include/linux/fs.h b/include/linux/fs.h index 63d069bd80b..3168dcfb94f 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h | |||
@@ -1384,7 +1384,7 @@ struct super_block { | |||
1384 | * Saved mount options for lazy filesystems using | 1384 | * Saved mount options for lazy filesystems using |
1385 | * generic_show_options() | 1385 | * generic_show_options() |
1386 | */ | 1386 | */ |
1387 | char *s_options; | 1387 | char __rcu *s_options; |
1388 | }; | 1388 | }; |
1389 | 1389 | ||
1390 | extern struct timespec current_fs_time(struct super_block *sb); | 1390 | extern struct timespec current_fs_time(struct super_block *sb); |
diff --git a/include/linux/genhd.h b/include/linux/genhd.h index 5f2f4c4d8fb..af3f06b41dc 100644 --- a/include/linux/genhd.h +++ b/include/linux/genhd.h | |||
@@ -129,8 +129,8 @@ struct blk_scsi_cmd_filter { | |||
129 | struct disk_part_tbl { | 129 | struct disk_part_tbl { |
130 | struct rcu_head rcu_head; | 130 | struct rcu_head rcu_head; |
131 | int len; | 131 | int len; |
132 | struct hd_struct *last_lookup; | 132 | struct hd_struct __rcu *last_lookup; |
133 | struct hd_struct *part[]; | 133 | struct hd_struct __rcu *part[]; |
134 | }; | 134 | }; |
135 | 135 | ||
136 | struct gendisk { | 136 | struct gendisk { |
@@ -149,7 +149,7 @@ struct gendisk { | |||
149 | * non-critical accesses use RCU. Always access through | 149 | * non-critical accesses use RCU. Always access through |
150 | * helpers. | 150 | * helpers. |
151 | */ | 151 | */ |
152 | struct disk_part_tbl *part_tbl; | 152 | struct disk_part_tbl __rcu *part_tbl; |
153 | struct hd_struct part0; | 153 | struct hd_struct part0; |
154 | 154 | ||
155 | const struct block_device_operations *fops; | 155 | const struct block_device_operations *fops; |
diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h index d5b387669da..1f4517d55b1 100644 --- a/include/linux/hardirq.h +++ b/include/linux/hardirq.h | |||
@@ -139,7 +139,7 @@ static inline void account_system_vtime(struct task_struct *tsk) | |||
139 | #endif | 139 | #endif |
140 | 140 | ||
141 | #if defined(CONFIG_NO_HZ) | 141 | #if defined(CONFIG_NO_HZ) |
142 | #if defined(CONFIG_TINY_RCU) | 142 | #if defined(CONFIG_TINY_RCU) || defined(CONFIG_TINY_PREEMPT_RCU) |
143 | extern void rcu_enter_nohz(void); | 143 | extern void rcu_enter_nohz(void); |
144 | extern void rcu_exit_nohz(void); | 144 | extern void rcu_exit_nohz(void); |
145 | 145 | ||
diff --git a/include/linux/idr.h b/include/linux/idr.h index e968db71e33..cdb715e58e3 100644 --- a/include/linux/idr.h +++ b/include/linux/idr.h | |||
@@ -50,14 +50,14 @@ | |||
50 | 50 | ||
51 | struct idr_layer { | 51 | struct idr_layer { |
52 | unsigned long bitmap; /* A zero bit means "space here" */ | 52 | unsigned long bitmap; /* A zero bit means "space here" */ |
53 | struct idr_layer *ary[1<<IDR_BITS]; | 53 | struct idr_layer __rcu *ary[1<<IDR_BITS]; |
54 | int count; /* When zero, we can release it */ | 54 | int count; /* When zero, we can release it */ |
55 | int layer; /* distance from leaf */ | 55 | int layer; /* distance from leaf */ |
56 | struct rcu_head rcu_head; | 56 | struct rcu_head rcu_head; |
57 | }; | 57 | }; |
58 | 58 | ||
59 | struct idr { | 59 | struct idr { |
60 | struct idr_layer *top; | 60 | struct idr_layer __rcu *top; |
61 | struct idr_layer *id_free; | 61 | struct idr_layer *id_free; |
62 | int layers; /* only valid without concurrent changes */ | 62 | int layers; /* only valid without concurrent changes */ |
63 | int id_free_cnt; | 63 | int id_free_cnt; |
diff --git a/include/linux/init_task.h b/include/linux/init_task.h index 1f43fa56f60..2fea6c8ef6b 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h | |||
@@ -82,11 +82,17 @@ extern struct group_info init_groups; | |||
82 | # define CAP_INIT_BSET CAP_FULL_SET | 82 | # define CAP_INIT_BSET CAP_FULL_SET |
83 | 83 | ||
84 | #ifdef CONFIG_TREE_PREEMPT_RCU | 84 | #ifdef CONFIG_TREE_PREEMPT_RCU |
85 | #define INIT_TASK_RCU_TREE_PREEMPT() \ | ||
86 | .rcu_blocked_node = NULL, | ||
87 | #else | ||
88 | #define INIT_TASK_RCU_TREE_PREEMPT(tsk) | ||
89 | #endif | ||
90 | #ifdef CONFIG_PREEMPT_RCU | ||
85 | #define INIT_TASK_RCU_PREEMPT(tsk) \ | 91 | #define INIT_TASK_RCU_PREEMPT(tsk) \ |
86 | .rcu_read_lock_nesting = 0, \ | 92 | .rcu_read_lock_nesting = 0, \ |
87 | .rcu_read_unlock_special = 0, \ | 93 | .rcu_read_unlock_special = 0, \ |
88 | .rcu_blocked_node = NULL, \ | 94 | .rcu_node_entry = LIST_HEAD_INIT(tsk.rcu_node_entry), \ |
89 | .rcu_node_entry = LIST_HEAD_INIT(tsk.rcu_node_entry), | 95 | INIT_TASK_RCU_TREE_PREEMPT() |
90 | #else | 96 | #else |
91 | #define INIT_TASK_RCU_PREEMPT(tsk) | 97 | #define INIT_TASK_RCU_PREEMPT(tsk) |
92 | #endif | 98 | #endif |
@@ -137,8 +143,8 @@ extern struct cred init_cred; | |||
137 | .children = LIST_HEAD_INIT(tsk.children), \ | 143 | .children = LIST_HEAD_INIT(tsk.children), \ |
138 | .sibling = LIST_HEAD_INIT(tsk.sibling), \ | 144 | .sibling = LIST_HEAD_INIT(tsk.sibling), \ |
139 | .group_leader = &tsk, \ | 145 | .group_leader = &tsk, \ |
140 | .real_cred = &init_cred, \ | 146 | RCU_INIT_POINTER(.real_cred, &init_cred), \ |
141 | .cred = &init_cred, \ | 147 | RCU_INIT_POINTER(.cred, &init_cred), \ |
142 | .cred_guard_mutex = \ | 148 | .cred_guard_mutex = \ |
143 | __MUTEX_INITIALIZER(tsk.cred_guard_mutex), \ | 149 | __MUTEX_INITIALIZER(tsk.cred_guard_mutex), \ |
144 | .comm = "swapper", \ | 150 | .comm = "swapper", \ |
diff --git a/include/linux/input.h b/include/linux/input.h index 896a92227bc..d6ae1761be9 100644 --- a/include/linux/input.h +++ b/include/linux/input.h | |||
@@ -1196,7 +1196,7 @@ struct input_dev { | |||
1196 | int (*flush)(struct input_dev *dev, struct file *file); | 1196 | int (*flush)(struct input_dev *dev, struct file *file); |
1197 | int (*event)(struct input_dev *dev, unsigned int type, unsigned int code, int value); | 1197 | int (*event)(struct input_dev *dev, unsigned int type, unsigned int code, int value); |
1198 | 1198 | ||
1199 | struct input_handle *grab; | 1199 | struct input_handle __rcu *grab; |
1200 | 1200 | ||
1201 | spinlock_t event_lock; | 1201 | spinlock_t event_lock; |
1202 | struct mutex mutex; | 1202 | struct mutex mutex; |
diff --git a/include/linux/iocontext.h b/include/linux/iocontext.h index 64d52913303..3e70b21884a 100644 --- a/include/linux/iocontext.h +++ b/include/linux/iocontext.h | |||
@@ -53,7 +53,7 @@ struct io_context { | |||
53 | 53 | ||
54 | struct radix_tree_root radix_root; | 54 | struct radix_tree_root radix_root; |
55 | struct hlist_head cic_list; | 55 | struct hlist_head cic_list; |
56 | void *ioc_data; | 56 | void __rcu *ioc_data; |
57 | }; | 57 | }; |
58 | 58 | ||
59 | static inline struct io_context *ioc_task_link(struct io_context *ioc) | 59 | static inline struct io_context *ioc_task_link(struct io_context *ioc) |
diff --git a/include/linux/key.h b/include/linux/key.h index cd50dfa1d4c..3db0adce1fd 100644 --- a/include/linux/key.h +++ b/include/linux/key.h | |||
@@ -178,8 +178,9 @@ struct key { | |||
178 | */ | 178 | */ |
179 | union { | 179 | union { |
180 | unsigned long value; | 180 | unsigned long value; |
181 | void __rcu *rcudata; | ||
181 | void *data; | 182 | void *data; |
182 | struct keyring_list *subscriptions; | 183 | struct keyring_list __rcu *subscriptions; |
183 | } payload; | 184 | } payload; |
184 | }; | 185 | }; |
185 | 186 | ||
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index c13cc48697a..ac740b26eb1 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h | |||
@@ -205,7 +205,7 @@ struct kvm { | |||
205 | 205 | ||
206 | struct mutex irq_lock; | 206 | struct mutex irq_lock; |
207 | #ifdef CONFIG_HAVE_KVM_IRQCHIP | 207 | #ifdef CONFIG_HAVE_KVM_IRQCHIP |
208 | struct kvm_irq_routing_table *irq_routing; | 208 | struct kvm_irq_routing_table __rcu *irq_routing; |
209 | struct hlist_head mask_notifier_list; | 209 | struct hlist_head mask_notifier_list; |
210 | struct hlist_head irq_ack_notifier_list; | 210 | struct hlist_head irq_ack_notifier_list; |
211 | #endif | 211 | #endif |
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index ee7e258627f..cb57d657ce4 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h | |||
@@ -299,7 +299,7 @@ struct mm_struct { | |||
299 | * new_owner->mm == mm | 299 | * new_owner->mm == mm |
300 | * new_owner->alloc_lock is held | 300 | * new_owner->alloc_lock is held |
301 | */ | 301 | */ |
302 | struct task_struct *owner; | 302 | struct task_struct __rcu *owner; |
303 | #endif | 303 | #endif |
304 | 304 | ||
305 | #ifdef CONFIG_PROC_FS | 305 | #ifdef CONFIG_PROC_FS |
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index 508f8cf6da3..d0edf7d823a 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h | |||
@@ -185,7 +185,7 @@ struct nfs_inode { | |||
185 | struct nfs4_cached_acl *nfs4_acl; | 185 | struct nfs4_cached_acl *nfs4_acl; |
186 | /* NFSv4 state */ | 186 | /* NFSv4 state */ |
187 | struct list_head open_states; | 187 | struct list_head open_states; |
188 | struct nfs_delegation *delegation; | 188 | struct nfs_delegation __rcu *delegation; |
189 | fmode_t delegation_state; | 189 | fmode_t delegation_state; |
190 | struct rw_semaphore rwsem; | 190 | struct rw_semaphore rwsem; |
191 | #endif /* CONFIG_NFS_V4*/ | 191 | #endif /* CONFIG_NFS_V4*/ |
diff --git a/include/linux/notifier.h b/include/linux/notifier.h index b2f1a4d8355..2026f9e1ceb 100644 --- a/include/linux/notifier.h +++ b/include/linux/notifier.h | |||
@@ -49,28 +49,28 @@ | |||
49 | 49 | ||
50 | struct notifier_block { | 50 | struct notifier_block { |
51 | int (*notifier_call)(struct notifier_block *, unsigned long, void *); | 51 | int (*notifier_call)(struct notifier_block *, unsigned long, void *); |
52 | struct notifier_block *next; | 52 | struct notifier_block __rcu *next; |
53 | int priority; | 53 | int priority; |
54 | }; | 54 | }; |
55 | 55 | ||
56 | struct atomic_notifier_head { | 56 | struct atomic_notifier_head { |
57 | spinlock_t lock; | 57 | spinlock_t lock; |
58 | struct notifier_block *head; | 58 | struct notifier_block __rcu *head; |
59 | }; | 59 | }; |
60 | 60 | ||
61 | struct blocking_notifier_head { | 61 | struct blocking_notifier_head { |
62 | struct rw_semaphore rwsem; | 62 | struct rw_semaphore rwsem; |
63 | struct notifier_block *head; | 63 | struct notifier_block __rcu *head; |
64 | }; | 64 | }; |
65 | 65 | ||
66 | struct raw_notifier_head { | 66 | struct raw_notifier_head { |
67 | struct notifier_block *head; | 67 | struct notifier_block __rcu *head; |
68 | }; | 68 | }; |
69 | 69 | ||
70 | struct srcu_notifier_head { | 70 | struct srcu_notifier_head { |
71 | struct mutex mutex; | 71 | struct mutex mutex; |
72 | struct srcu_struct srcu; | 72 | struct srcu_struct srcu; |
73 | struct notifier_block *head; | 73 | struct notifier_block __rcu *head; |
74 | }; | 74 | }; |
75 | 75 | ||
76 | #define ATOMIC_INIT_NOTIFIER_HEAD(name) do { \ | 76 | #define ATOMIC_INIT_NOTIFIER_HEAD(name) do { \ |
diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h index 634b8e674ac..a39cbed9ee1 100644 --- a/include/linux/radix-tree.h +++ b/include/linux/radix-tree.h | |||
@@ -47,6 +47,8 @@ static inline void *radix_tree_indirect_to_ptr(void *ptr) | |||
47 | { | 47 | { |
48 | return (void *)((unsigned long)ptr & ~RADIX_TREE_INDIRECT_PTR); | 48 | return (void *)((unsigned long)ptr & ~RADIX_TREE_INDIRECT_PTR); |
49 | } | 49 | } |
50 | #define radix_tree_indirect_to_ptr(ptr) \ | ||
51 | radix_tree_indirect_to_ptr((void __force *)(ptr)) | ||
50 | 52 | ||
51 | static inline int radix_tree_is_indirect_ptr(void *ptr) | 53 | static inline int radix_tree_is_indirect_ptr(void *ptr) |
52 | { | 54 | { |
@@ -61,7 +63,7 @@ static inline int radix_tree_is_indirect_ptr(void *ptr) | |||
61 | struct radix_tree_root { | 63 | struct radix_tree_root { |
62 | unsigned int height; | 64 | unsigned int height; |
63 | gfp_t gfp_mask; | 65 | gfp_t gfp_mask; |
64 | struct radix_tree_node *rnode; | 66 | struct radix_tree_node __rcu *rnode; |
65 | }; | 67 | }; |
66 | 68 | ||
67 | #define RADIX_TREE_INIT(mask) { \ | 69 | #define RADIX_TREE_INIT(mask) { \ |
diff --git a/include/linux/rculist.h b/include/linux/rculist.h index 4ec3b38ce9c..f31ef61f1c6 100644 --- a/include/linux/rculist.h +++ b/include/linux/rculist.h | |||
@@ -10,6 +10,21 @@ | |||
10 | #include <linux/rcupdate.h> | 10 | #include <linux/rcupdate.h> |
11 | 11 | ||
12 | /* | 12 | /* |
13 | * Why is there no list_empty_rcu()? Because list_empty() serves this | ||
14 | * purpose. The list_empty() function fetches the RCU-protected pointer | ||
15 | * and compares it to the address of the list head, but neither dereferences | ||
16 | * this pointer itself nor provides this pointer to the caller. Therefore, | ||
17 | * it is not necessary to use rcu_dereference(), so that list_empty() can | ||
18 | * be used anywhere you would want to use a list_empty_rcu(). | ||
19 | */ | ||
20 | |||
21 | /* | ||
22 | * return the ->next pointer of a list_head in an rcu safe | ||
23 | * way, we must not access it directly | ||
24 | */ | ||
25 | #define list_next_rcu(list) (*((struct list_head __rcu **)(&(list)->next))) | ||
26 | |||
27 | /* | ||
13 | * Insert a new entry between two known consecutive entries. | 28 | * Insert a new entry between two known consecutive entries. |
14 | * | 29 | * |
15 | * This is only for internal list manipulation where we know | 30 | * This is only for internal list manipulation where we know |
@@ -20,7 +35,7 @@ static inline void __list_add_rcu(struct list_head *new, | |||
20 | { | 35 | { |
21 | new->next = next; | 36 | new->next = next; |
22 | new->prev = prev; | 37 | new->prev = prev; |
23 | rcu_assign_pointer(prev->next, new); | 38 | rcu_assign_pointer(list_next_rcu(prev), new); |
24 | next->prev = new; | 39 | next->prev = new; |
25 | } | 40 | } |
26 | 41 | ||
@@ -138,7 +153,7 @@ static inline void list_replace_rcu(struct list_head *old, | |||
138 | { | 153 | { |
139 | new->next = old->next; | 154 | new->next = old->next; |
140 | new->prev = old->prev; | 155 | new->prev = old->prev; |
141 | rcu_assign_pointer(new->prev->next, new); | 156 | rcu_assign_pointer(list_next_rcu(new->prev), new); |
142 | new->next->prev = new; | 157 | new->next->prev = new; |
143 | old->prev = LIST_POISON2; | 158 | old->prev = LIST_POISON2; |
144 | } | 159 | } |
@@ -193,7 +208,7 @@ static inline void list_splice_init_rcu(struct list_head *list, | |||
193 | */ | 208 | */ |
194 | 209 | ||
195 | last->next = at; | 210 | last->next = at; |
196 | rcu_assign_pointer(head->next, first); | 211 | rcu_assign_pointer(list_next_rcu(head), first); |
197 | first->prev = head; | 212 | first->prev = head; |
198 | at->prev = last; | 213 | at->prev = last; |
199 | } | 214 | } |
@@ -208,7 +223,9 @@ static inline void list_splice_init_rcu(struct list_head *list, | |||
208 | * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock(). | 223 | * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock(). |
209 | */ | 224 | */ |
210 | #define list_entry_rcu(ptr, type, member) \ | 225 | #define list_entry_rcu(ptr, type, member) \ |
211 | container_of(rcu_dereference_raw(ptr), type, member) | 226 | ({typeof (*ptr) __rcu *__ptr = (typeof (*ptr) __rcu __force *)ptr; \ |
227 | container_of((typeof(ptr))rcu_dereference_raw(__ptr), type, member); \ | ||
228 | }) | ||
212 | 229 | ||
213 | /** | 230 | /** |
214 | * list_first_entry_rcu - get the first element from a list | 231 | * list_first_entry_rcu - get the first element from a list |
@@ -225,9 +242,9 @@ static inline void list_splice_init_rcu(struct list_head *list, | |||
225 | list_entry_rcu((ptr)->next, type, member) | 242 | list_entry_rcu((ptr)->next, type, member) |
226 | 243 | ||
227 | #define __list_for_each_rcu(pos, head) \ | 244 | #define __list_for_each_rcu(pos, head) \ |
228 | for (pos = rcu_dereference_raw((head)->next); \ | 245 | for (pos = rcu_dereference_raw(list_next_rcu(head)); \ |
229 | pos != (head); \ | 246 | pos != (head); \ |
230 | pos = rcu_dereference_raw(pos->next)) | 247 | pos = rcu_dereference_raw(list_next_rcu((pos))) |
231 | 248 | ||
232 | /** | 249 | /** |
233 | * list_for_each_entry_rcu - iterate over rcu list of given type | 250 | * list_for_each_entry_rcu - iterate over rcu list of given type |
@@ -257,9 +274,9 @@ static inline void list_splice_init_rcu(struct list_head *list, | |||
257 | * as long as the traversal is guarded by rcu_read_lock(). | 274 | * as long as the traversal is guarded by rcu_read_lock(). |
258 | */ | 275 | */ |
259 | #define list_for_each_continue_rcu(pos, head) \ | 276 | #define list_for_each_continue_rcu(pos, head) \ |
260 | for ((pos) = rcu_dereference_raw((pos)->next); \ | 277 | for ((pos) = rcu_dereference_raw(list_next_rcu(pos)); \ |
261 | prefetch((pos)->next), (pos) != (head); \ | 278 | prefetch((pos)->next), (pos) != (head); \ |
262 | (pos) = rcu_dereference_raw((pos)->next)) | 279 | (pos) = rcu_dereference_raw(list_next_rcu(pos))) |
263 | 280 | ||
264 | /** | 281 | /** |
265 | * list_for_each_entry_continue_rcu - continue iteration over list of given type | 282 | * list_for_each_entry_continue_rcu - continue iteration over list of given type |
@@ -314,12 +331,19 @@ static inline void hlist_replace_rcu(struct hlist_node *old, | |||
314 | 331 | ||
315 | new->next = next; | 332 | new->next = next; |
316 | new->pprev = old->pprev; | 333 | new->pprev = old->pprev; |
317 | rcu_assign_pointer(*new->pprev, new); | 334 | rcu_assign_pointer(*(struct hlist_node __rcu **)new->pprev, new); |
318 | if (next) | 335 | if (next) |
319 | new->next->pprev = &new->next; | 336 | new->next->pprev = &new->next; |
320 | old->pprev = LIST_POISON2; | 337 | old->pprev = LIST_POISON2; |
321 | } | 338 | } |
322 | 339 | ||
340 | /* | ||
341 | * return the first or the next element in an RCU protected hlist | ||
342 | */ | ||
343 | #define hlist_first_rcu(head) (*((struct hlist_node __rcu **)(&(head)->first))) | ||
344 | #define hlist_next_rcu(node) (*((struct hlist_node __rcu **)(&(node)->next))) | ||
345 | #define hlist_pprev_rcu(node) (*((struct hlist_node __rcu **)((node)->pprev))) | ||
346 | |||
323 | /** | 347 | /** |
324 | * hlist_add_head_rcu | 348 | * hlist_add_head_rcu |
325 | * @n: the element to add to the hash list. | 349 | * @n: the element to add to the hash list. |
@@ -346,7 +370,7 @@ static inline void hlist_add_head_rcu(struct hlist_node *n, | |||
346 | 370 | ||
347 | n->next = first; | 371 | n->next = first; |
348 | n->pprev = &h->first; | 372 | n->pprev = &h->first; |
349 | rcu_assign_pointer(h->first, n); | 373 | rcu_assign_pointer(hlist_first_rcu(h), n); |
350 | if (first) | 374 | if (first) |
351 | first->pprev = &n->next; | 375 | first->pprev = &n->next; |
352 | } | 376 | } |
@@ -374,7 +398,7 @@ static inline void hlist_add_before_rcu(struct hlist_node *n, | |||
374 | { | 398 | { |
375 | n->pprev = next->pprev; | 399 | n->pprev = next->pprev; |
376 | n->next = next; | 400 | n->next = next; |
377 | rcu_assign_pointer(*(n->pprev), n); | 401 | rcu_assign_pointer(hlist_pprev_rcu(n), n); |
378 | next->pprev = &n->next; | 402 | next->pprev = &n->next; |
379 | } | 403 | } |
380 | 404 | ||
@@ -401,15 +425,15 @@ static inline void hlist_add_after_rcu(struct hlist_node *prev, | |||
401 | { | 425 | { |
402 | n->next = prev->next; | 426 | n->next = prev->next; |
403 | n->pprev = &prev->next; | 427 | n->pprev = &prev->next; |
404 | rcu_assign_pointer(prev->next, n); | 428 | rcu_assign_pointer(hlist_next_rcu(prev), n); |
405 | if (n->next) | 429 | if (n->next) |
406 | n->next->pprev = &n->next; | 430 | n->next->pprev = &n->next; |
407 | } | 431 | } |
408 | 432 | ||
409 | #define __hlist_for_each_rcu(pos, head) \ | 433 | #define __hlist_for_each_rcu(pos, head) \ |
410 | for (pos = rcu_dereference((head)->first); \ | 434 | for (pos = rcu_dereference(hlist_first_rcu(head)); \ |
411 | pos && ({ prefetch(pos->next); 1; }); \ | 435 | pos && ({ prefetch(pos->next); 1; }); \ |
412 | pos = rcu_dereference(pos->next)) | 436 | pos = rcu_dereference(hlist_next_rcu(pos))) |
413 | 437 | ||
414 | /** | 438 | /** |
415 | * hlist_for_each_entry_rcu - iterate over rcu list of given type | 439 | * hlist_for_each_entry_rcu - iterate over rcu list of given type |
@@ -422,11 +446,11 @@ static inline void hlist_add_after_rcu(struct hlist_node *prev, | |||
422 | * the _rcu list-mutation primitives such as hlist_add_head_rcu() | 446 | * the _rcu list-mutation primitives such as hlist_add_head_rcu() |
423 | * as long as the traversal is guarded by rcu_read_lock(). | 447 | * as long as the traversal is guarded by rcu_read_lock(). |
424 | */ | 448 | */ |
425 | #define hlist_for_each_entry_rcu(tpos, pos, head, member) \ | 449 | #define hlist_for_each_entry_rcu(tpos, pos, head, member) \ |
426 | for (pos = rcu_dereference_raw((head)->first); \ | 450 | for (pos = rcu_dereference_raw(hlist_first_rcu(head)); \ |
427 | pos && ({ prefetch(pos->next); 1; }) && \ | 451 | pos && ({ prefetch(pos->next); 1; }) && \ |
428 | ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1; }); \ | 452 | ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1; }); \ |
429 | pos = rcu_dereference_raw(pos->next)) | 453 | pos = rcu_dereference_raw(hlist_next_rcu(pos))) |
430 | 454 | ||
431 | /** | 455 | /** |
432 | * hlist_for_each_entry_rcu_bh - iterate over rcu list of given type | 456 | * hlist_for_each_entry_rcu_bh - iterate over rcu list of given type |
diff --git a/include/linux/rculist_nulls.h b/include/linux/rculist_nulls.h index b70ffe53cb9..2ae13714828 100644 --- a/include/linux/rculist_nulls.h +++ b/include/linux/rculist_nulls.h | |||
@@ -37,6 +37,12 @@ static inline void hlist_nulls_del_init_rcu(struct hlist_nulls_node *n) | |||
37 | } | 37 | } |
38 | } | 38 | } |
39 | 39 | ||
40 | #define hlist_nulls_first_rcu(head) \ | ||
41 | (*((struct hlist_nulls_node __rcu __force **)&(head)->first)) | ||
42 | |||
43 | #define hlist_nulls_next_rcu(node) \ | ||
44 | (*((struct hlist_nulls_node __rcu __force **)&(node)->next)) | ||
45 | |||
40 | /** | 46 | /** |
41 | * hlist_nulls_del_rcu - deletes entry from hash list without re-initialization | 47 | * hlist_nulls_del_rcu - deletes entry from hash list without re-initialization |
42 | * @n: the element to delete from the hash list. | 48 | * @n: the element to delete from the hash list. |
@@ -88,7 +94,7 @@ static inline void hlist_nulls_add_head_rcu(struct hlist_nulls_node *n, | |||
88 | 94 | ||
89 | n->next = first; | 95 | n->next = first; |
90 | n->pprev = &h->first; | 96 | n->pprev = &h->first; |
91 | rcu_assign_pointer(h->first, n); | 97 | rcu_assign_pointer(hlist_nulls_first_rcu(h), n); |
92 | if (!is_a_nulls(first)) | 98 | if (!is_a_nulls(first)) |
93 | first->pprev = &n->next; | 99 | first->pprev = &n->next; |
94 | } | 100 | } |
@@ -100,11 +106,11 @@ static inline void hlist_nulls_add_head_rcu(struct hlist_nulls_node *n, | |||
100 | * @member: the name of the hlist_nulls_node within the struct. | 106 | * @member: the name of the hlist_nulls_node within the struct. |
101 | * | 107 | * |
102 | */ | 108 | */ |
103 | #define hlist_nulls_for_each_entry_rcu(tpos, pos, head, member) \ | 109 | #define hlist_nulls_for_each_entry_rcu(tpos, pos, head, member) \ |
104 | for (pos = rcu_dereference_raw((head)->first); \ | 110 | for (pos = rcu_dereference_raw(hlist_nulls_first_rcu(head)); \ |
105 | (!is_a_nulls(pos)) && \ | 111 | (!is_a_nulls(pos)) && \ |
106 | ({ tpos = hlist_nulls_entry(pos, typeof(*tpos), member); 1; }); \ | 112 | ({ tpos = hlist_nulls_entry(pos, typeof(*tpos), member); 1; }); \ |
107 | pos = rcu_dereference_raw(pos->next)) | 113 | pos = rcu_dereference_raw(hlist_nulls_next_rcu(pos))) |
108 | 114 | ||
109 | #endif | 115 | #endif |
110 | #endif | 116 | #endif |
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 83af1f8d8b7..03cda7bed98 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h | |||
@@ -41,11 +41,15 @@ | |||
41 | #include <linux/lockdep.h> | 41 | #include <linux/lockdep.h> |
42 | #include <linux/completion.h> | 42 | #include <linux/completion.h> |
43 | #include <linux/debugobjects.h> | 43 | #include <linux/debugobjects.h> |
44 | #include <linux/compiler.h> | ||
44 | 45 | ||
45 | #ifdef CONFIG_RCU_TORTURE_TEST | 46 | #ifdef CONFIG_RCU_TORTURE_TEST |
46 | extern int rcutorture_runnable; /* for sysctl */ | 47 | extern int rcutorture_runnable; /* for sysctl */ |
47 | #endif /* #ifdef CONFIG_RCU_TORTURE_TEST */ | 48 | #endif /* #ifdef CONFIG_RCU_TORTURE_TEST */ |
48 | 49 | ||
50 | #define ULONG_CMP_GE(a, b) (ULONG_MAX / 2 >= (a) - (b)) | ||
51 | #define ULONG_CMP_LT(a, b) (ULONG_MAX / 2 < (a) - (b)) | ||
52 | |||
49 | /** | 53 | /** |
50 | * struct rcu_head - callback structure for use with RCU | 54 | * struct rcu_head - callback structure for use with RCU |
51 | * @next: next update requests in a list | 55 | * @next: next update requests in a list |
@@ -57,29 +61,94 @@ struct rcu_head { | |||
57 | }; | 61 | }; |
58 | 62 | ||
59 | /* Exported common interfaces */ | 63 | /* Exported common interfaces */ |
60 | extern void rcu_barrier(void); | 64 | extern void call_rcu_sched(struct rcu_head *head, |
65 | void (*func)(struct rcu_head *rcu)); | ||
66 | extern void synchronize_sched(void); | ||
61 | extern void rcu_barrier_bh(void); | 67 | extern void rcu_barrier_bh(void); |
62 | extern void rcu_barrier_sched(void); | 68 | extern void rcu_barrier_sched(void); |
63 | extern void synchronize_sched_expedited(void); | 69 | extern void synchronize_sched_expedited(void); |
64 | extern int sched_expedited_torture_stats(char *page); | 70 | extern int sched_expedited_torture_stats(char *page); |
65 | 71 | ||
72 | static inline void __rcu_read_lock_bh(void) | ||
73 | { | ||
74 | local_bh_disable(); | ||
75 | } | ||
76 | |||
77 | static inline void __rcu_read_unlock_bh(void) | ||
78 | { | ||
79 | local_bh_enable(); | ||
80 | } | ||
81 | |||
82 | #ifdef CONFIG_PREEMPT_RCU | ||
83 | |||
84 | extern void __rcu_read_lock(void); | ||
85 | extern void __rcu_read_unlock(void); | ||
86 | void synchronize_rcu(void); | ||
87 | |||
88 | /* | ||
89 | * Defined as a macro as it is a very low level header included from | ||
90 | * areas that don't even know about current. This gives the rcu_read_lock() | ||
91 | * nesting depth, but makes sense only if CONFIG_PREEMPT_RCU -- in other | ||
92 | * types of kernel builds, the rcu_read_lock() nesting depth is unknowable. | ||
93 | */ | ||
94 | #define rcu_preempt_depth() (current->rcu_read_lock_nesting) | ||
95 | |||
96 | #else /* #ifdef CONFIG_PREEMPT_RCU */ | ||
97 | |||
98 | static inline void __rcu_read_lock(void) | ||
99 | { | ||
100 | preempt_disable(); | ||
101 | } | ||
102 | |||
103 | static inline void __rcu_read_unlock(void) | ||
104 | { | ||
105 | preempt_enable(); | ||
106 | } | ||
107 | |||
108 | static inline void synchronize_rcu(void) | ||
109 | { | ||
110 | synchronize_sched(); | ||
111 | } | ||
112 | |||
113 | static inline int rcu_preempt_depth(void) | ||
114 | { | ||
115 | return 0; | ||
116 | } | ||
117 | |||
118 | #endif /* #else #ifdef CONFIG_PREEMPT_RCU */ | ||
119 | |||
66 | /* Internal to kernel */ | 120 | /* Internal to kernel */ |
67 | extern void rcu_init(void); | 121 | extern void rcu_init(void); |
122 | extern void rcu_sched_qs(int cpu); | ||
123 | extern void rcu_bh_qs(int cpu); | ||
124 | extern void rcu_check_callbacks(int cpu, int user); | ||
125 | struct notifier_block; | ||
126 | |||
127 | #ifdef CONFIG_NO_HZ | ||
128 | |||
129 | extern void rcu_enter_nohz(void); | ||
130 | extern void rcu_exit_nohz(void); | ||
131 | |||
132 | #else /* #ifdef CONFIG_NO_HZ */ | ||
133 | |||
134 | static inline void rcu_enter_nohz(void) | ||
135 | { | ||
136 | } | ||
137 | |||
138 | static inline void rcu_exit_nohz(void) | ||
139 | { | ||
140 | } | ||
141 | |||
142 | #endif /* #else #ifdef CONFIG_NO_HZ */ | ||
68 | 143 | ||
69 | #if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU) | 144 | #if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU) |
70 | #include <linux/rcutree.h> | 145 | #include <linux/rcutree.h> |
71 | #elif defined(CONFIG_TINY_RCU) | 146 | #elif defined(CONFIG_TINY_RCU) || defined(CONFIG_TINY_PREEMPT_RCU) |
72 | #include <linux/rcutiny.h> | 147 | #include <linux/rcutiny.h> |
73 | #else | 148 | #else |
74 | #error "Unknown RCU implementation specified to kernel configuration" | 149 | #error "Unknown RCU implementation specified to kernel configuration" |
75 | #endif | 150 | #endif |
76 | 151 | ||
77 | #define RCU_HEAD_INIT { .next = NULL, .func = NULL } | ||
78 | #define RCU_HEAD(head) struct rcu_head head = RCU_HEAD_INIT | ||
79 | #define INIT_RCU_HEAD(ptr) do { \ | ||
80 | (ptr)->next = NULL; (ptr)->func = NULL; \ | ||
81 | } while (0) | ||
82 | |||
83 | /* | 152 | /* |
84 | * init_rcu_head_on_stack()/destroy_rcu_head_on_stack() are needed for dynamic | 153 | * init_rcu_head_on_stack()/destroy_rcu_head_on_stack() are needed for dynamic |
85 | * initialization and destruction of rcu_head on the stack. rcu_head structures | 154 | * initialization and destruction of rcu_head on the stack. rcu_head structures |
@@ -120,14 +189,15 @@ extern struct lockdep_map rcu_sched_lock_map; | |||
120 | extern int debug_lockdep_rcu_enabled(void); | 189 | extern int debug_lockdep_rcu_enabled(void); |
121 | 190 | ||
122 | /** | 191 | /** |
123 | * rcu_read_lock_held - might we be in RCU read-side critical section? | 192 | * rcu_read_lock_held() - might we be in RCU read-side critical section? |
124 | * | 193 | * |
125 | * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an RCU | 194 | * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an RCU |
126 | * read-side critical section. In absence of CONFIG_DEBUG_LOCK_ALLOC, | 195 | * read-side critical section. In absence of CONFIG_DEBUG_LOCK_ALLOC, |
127 | * this assumes we are in an RCU read-side critical section unless it can | 196 | * this assumes we are in an RCU read-side critical section unless it can |
128 | * prove otherwise. | 197 | * prove otherwise. This is useful for debug checks in functions that |
198 | * require that they be called within an RCU read-side critical section. | ||
129 | * | 199 | * |
130 | * Check debug_lockdep_rcu_enabled() to prevent false positives during boot | 200 | * Checks debug_lockdep_rcu_enabled() to prevent false positives during boot |
131 | * and while lockdep is disabled. | 201 | * and while lockdep is disabled. |
132 | */ | 202 | */ |
133 | static inline int rcu_read_lock_held(void) | 203 | static inline int rcu_read_lock_held(void) |
@@ -144,14 +214,16 @@ static inline int rcu_read_lock_held(void) | |||
144 | extern int rcu_read_lock_bh_held(void); | 214 | extern int rcu_read_lock_bh_held(void); |
145 | 215 | ||
146 | /** | 216 | /** |
147 | * rcu_read_lock_sched_held - might we be in RCU-sched read-side critical section? | 217 | * rcu_read_lock_sched_held() - might we be in RCU-sched read-side critical section? |
148 | * | 218 | * |
149 | * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an | 219 | * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an |
150 | * RCU-sched read-side critical section. In absence of | 220 | * RCU-sched read-side critical section. In absence of |
151 | * CONFIG_DEBUG_LOCK_ALLOC, this assumes we are in an RCU-sched read-side | 221 | * CONFIG_DEBUG_LOCK_ALLOC, this assumes we are in an RCU-sched read-side |
152 | * critical section unless it can prove otherwise. Note that disabling | 222 | * critical section unless it can prove otherwise. Note that disabling |
153 | * of preemption (including disabling irqs) counts as an RCU-sched | 223 | * of preemption (including disabling irqs) counts as an RCU-sched |
154 | * read-side critical section. | 224 | * read-side critical section. This is useful for debug checks in functions |
225 | * that required that they be called within an RCU-sched read-side | ||
226 | * critical section. | ||
155 | * | 227 | * |
156 | * Check debug_lockdep_rcu_enabled() to prevent false positives during boot | 228 | * Check debug_lockdep_rcu_enabled() to prevent false positives during boot |
157 | * and while lockdep is disabled. | 229 | * and while lockdep is disabled. |
@@ -211,7 +283,11 @@ static inline int rcu_read_lock_sched_held(void) | |||
211 | 283 | ||
212 | extern int rcu_my_thread_group_empty(void); | 284 | extern int rcu_my_thread_group_empty(void); |
213 | 285 | ||
214 | #define __do_rcu_dereference_check(c) \ | 286 | /** |
287 | * rcu_lockdep_assert - emit lockdep splat if specified condition not met | ||
288 | * @c: condition to check | ||
289 | */ | ||
290 | #define rcu_lockdep_assert(c) \ | ||
215 | do { \ | 291 | do { \ |
216 | static bool __warned; \ | 292 | static bool __warned; \ |
217 | if (debug_lockdep_rcu_enabled() && !__warned && !(c)) { \ | 293 | if (debug_lockdep_rcu_enabled() && !__warned && !(c)) { \ |
@@ -220,41 +296,163 @@ extern int rcu_my_thread_group_empty(void); | |||
220 | } \ | 296 | } \ |
221 | } while (0) | 297 | } while (0) |
222 | 298 | ||
299 | #else /* #ifdef CONFIG_PROVE_RCU */ | ||
300 | |||
301 | #define rcu_lockdep_assert(c) do { } while (0) | ||
302 | |||
303 | #endif /* #else #ifdef CONFIG_PROVE_RCU */ | ||
304 | |||
305 | /* | ||
306 | * Helper functions for rcu_dereference_check(), rcu_dereference_protected() | ||
307 | * and rcu_assign_pointer(). Some of these could be folded into their | ||
308 | * callers, but they are left separate in order to ease introduction of | ||
309 | * multiple flavors of pointers to match the multiple flavors of RCU | ||
310 | * (e.g., __rcu_bh, * __rcu_sched, and __srcu), should this make sense in | ||
311 | * the future. | ||
312 | */ | ||
313 | |||
314 | #ifdef __CHECKER__ | ||
315 | #define rcu_dereference_sparse(p, space) \ | ||
316 | ((void)(((typeof(*p) space *)p) == p)) | ||
317 | #else /* #ifdef __CHECKER__ */ | ||
318 | #define rcu_dereference_sparse(p, space) | ||
319 | #endif /* #else #ifdef __CHECKER__ */ | ||
320 | |||
321 | #define __rcu_access_pointer(p, space) \ | ||
322 | ({ \ | ||
323 | typeof(*p) *_________p1 = (typeof(*p)*__force )ACCESS_ONCE(p); \ | ||
324 | rcu_dereference_sparse(p, space); \ | ||
325 | ((typeof(*p) __force __kernel *)(_________p1)); \ | ||
326 | }) | ||
327 | #define __rcu_dereference_check(p, c, space) \ | ||
328 | ({ \ | ||
329 | typeof(*p) *_________p1 = (typeof(*p)*__force )ACCESS_ONCE(p); \ | ||
330 | rcu_lockdep_assert(c); \ | ||
331 | rcu_dereference_sparse(p, space); \ | ||
332 | smp_read_barrier_depends(); \ | ||
333 | ((typeof(*p) __force __kernel *)(_________p1)); \ | ||
334 | }) | ||
335 | #define __rcu_dereference_protected(p, c, space) \ | ||
336 | ({ \ | ||
337 | rcu_lockdep_assert(c); \ | ||
338 | rcu_dereference_sparse(p, space); \ | ||
339 | ((typeof(*p) __force __kernel *)(p)); \ | ||
340 | }) | ||
341 | |||
342 | #define __rcu_dereference_index_check(p, c) \ | ||
343 | ({ \ | ||
344 | typeof(p) _________p1 = ACCESS_ONCE(p); \ | ||
345 | rcu_lockdep_assert(c); \ | ||
346 | smp_read_barrier_depends(); \ | ||
347 | (_________p1); \ | ||
348 | }) | ||
349 | #define __rcu_assign_pointer(p, v, space) \ | ||
350 | ({ \ | ||
351 | if (!__builtin_constant_p(v) || \ | ||
352 | ((v) != NULL)) \ | ||
353 | smp_wmb(); \ | ||
354 | (p) = (typeof(*v) __force space *)(v); \ | ||
355 | }) | ||
356 | |||
357 | |||
358 | /** | ||
359 | * rcu_access_pointer() - fetch RCU pointer with no dereferencing | ||
360 | * @p: The pointer to read | ||
361 | * | ||
362 | * Return the value of the specified RCU-protected pointer, but omit the | ||
363 | * smp_read_barrier_depends() and keep the ACCESS_ONCE(). This is useful | ||
364 | * when the value of this pointer is accessed, but the pointer is not | ||
365 | * dereferenced, for example, when testing an RCU-protected pointer against | ||
366 | * NULL. Although rcu_access_pointer() may also be used in cases where | ||
367 | * update-side locks prevent the value of the pointer from changing, you | ||
368 | * should instead use rcu_dereference_protected() for this use case. | ||
369 | */ | ||
370 | #define rcu_access_pointer(p) __rcu_access_pointer((p), __rcu) | ||
371 | |||
223 | /** | 372 | /** |
224 | * rcu_dereference_check - rcu_dereference with debug checking | 373 | * rcu_dereference_check() - rcu_dereference with debug checking |
225 | * @p: The pointer to read, prior to dereferencing | 374 | * @p: The pointer to read, prior to dereferencing |
226 | * @c: The conditions under which the dereference will take place | 375 | * @c: The conditions under which the dereference will take place |
227 | * | 376 | * |
228 | * Do an rcu_dereference(), but check that the conditions under which the | 377 | * Do an rcu_dereference(), but check that the conditions under which the |
229 | * dereference will take place are correct. Typically the conditions indicate | 378 | * dereference will take place are correct. Typically the conditions |
230 | * the various locking conditions that should be held at that point. The check | 379 | * indicate the various locking conditions that should be held at that |
231 | * should return true if the conditions are satisfied. | 380 | * point. The check should return true if the conditions are satisfied. |
381 | * An implicit check for being in an RCU read-side critical section | ||
382 | * (rcu_read_lock()) is included. | ||
232 | * | 383 | * |
233 | * For example: | 384 | * For example: |
234 | * | 385 | * |
235 | * bar = rcu_dereference_check(foo->bar, rcu_read_lock_held() || | 386 | * bar = rcu_dereference_check(foo->bar, lockdep_is_held(&foo->lock)); |
236 | * lockdep_is_held(&foo->lock)); | ||
237 | * | 387 | * |
238 | * could be used to indicate to lockdep that foo->bar may only be dereferenced | 388 | * could be used to indicate to lockdep that foo->bar may only be dereferenced |
239 | * if either the RCU read lock is held, or that the lock required to replace | 389 | * if either rcu_read_lock() is held, or that the lock required to replace |
240 | * the bar struct at foo->bar is held. | 390 | * the bar struct at foo->bar is held. |
241 | * | 391 | * |
242 | * Note that the list of conditions may also include indications of when a lock | 392 | * Note that the list of conditions may also include indications of when a lock |
243 | * need not be held, for example during initialisation or destruction of the | 393 | * need not be held, for example during initialisation or destruction of the |
244 | * target struct: | 394 | * target struct: |
245 | * | 395 | * |
246 | * bar = rcu_dereference_check(foo->bar, rcu_read_lock_held() || | 396 | * bar = rcu_dereference_check(foo->bar, lockdep_is_held(&foo->lock) || |
247 | * lockdep_is_held(&foo->lock) || | ||
248 | * atomic_read(&foo->usage) == 0); | 397 | * atomic_read(&foo->usage) == 0); |
398 | * | ||
399 | * Inserts memory barriers on architectures that require them | ||
400 | * (currently only the Alpha), prevents the compiler from refetching | ||
401 | * (and from merging fetches), and, more importantly, documents exactly | ||
402 | * which pointers are protected by RCU and checks that the pointer is | ||
403 | * annotated as __rcu. | ||
249 | */ | 404 | */ |
250 | #define rcu_dereference_check(p, c) \ | 405 | #define rcu_dereference_check(p, c) \ |
251 | ({ \ | 406 | __rcu_dereference_check((p), rcu_read_lock_held() || (c), __rcu) |
252 | __do_rcu_dereference_check(c); \ | 407 | |
253 | rcu_dereference_raw(p); \ | 408 | /** |
254 | }) | 409 | * rcu_dereference_bh_check() - rcu_dereference_bh with debug checking |
410 | * @p: The pointer to read, prior to dereferencing | ||
411 | * @c: The conditions under which the dereference will take place | ||
412 | * | ||
413 | * This is the RCU-bh counterpart to rcu_dereference_check(). | ||
414 | */ | ||
415 | #define rcu_dereference_bh_check(p, c) \ | ||
416 | __rcu_dereference_check((p), rcu_read_lock_bh_held() || (c), __rcu) | ||
255 | 417 | ||
256 | /** | 418 | /** |
257 | * rcu_dereference_protected - fetch RCU pointer when updates prevented | 419 | * rcu_dereference_sched_check() - rcu_dereference_sched with debug checking |
420 | * @p: The pointer to read, prior to dereferencing | ||
421 | * @c: The conditions under which the dereference will take place | ||
422 | * | ||
423 | * This is the RCU-sched counterpart to rcu_dereference_check(). | ||
424 | */ | ||
425 | #define rcu_dereference_sched_check(p, c) \ | ||
426 | __rcu_dereference_check((p), rcu_read_lock_sched_held() || (c), \ | ||
427 | __rcu) | ||
428 | |||
429 | #define rcu_dereference_raw(p) rcu_dereference_check(p, 1) /*@@@ needed? @@@*/ | ||
430 | |||
431 | /** | ||
432 | * rcu_dereference_index_check() - rcu_dereference for indices with debug checking | ||
433 | * @p: The pointer to read, prior to dereferencing | ||
434 | * @c: The conditions under which the dereference will take place | ||
435 | * | ||
436 | * Similar to rcu_dereference_check(), but omits the sparse checking. | ||
437 | * This allows rcu_dereference_index_check() to be used on integers, | ||
438 | * which can then be used as array indices. Attempting to use | ||
439 | * rcu_dereference_check() on an integer will give compiler warnings | ||
440 | * because the sparse address-space mechanism relies on dereferencing | ||
441 | * the RCU-protected pointer. Dereferencing integers is not something | ||
442 | * that even gcc will put up with. | ||
443 | * | ||
444 | * Note that this function does not implicitly check for RCU read-side | ||
445 | * critical sections. If this function gains lots of uses, it might | ||
446 | * make sense to provide versions for each flavor of RCU, but it does | ||
447 | * not make sense as of early 2010. | ||
448 | */ | ||
449 | #define rcu_dereference_index_check(p, c) \ | ||
450 | __rcu_dereference_index_check((p), (c)) | ||
451 | |||
452 | /** | ||
453 | * rcu_dereference_protected() - fetch RCU pointer when updates prevented | ||
454 | * @p: The pointer to read, prior to dereferencing | ||
455 | * @c: The conditions under which the dereference will take place | ||
258 | * | 456 | * |
259 | * Return the value of the specified RCU-protected pointer, but omit | 457 | * Return the value of the specified RCU-protected pointer, but omit |
260 | * both the smp_read_barrier_depends() and the ACCESS_ONCE(). This | 458 | * both the smp_read_barrier_depends() and the ACCESS_ONCE(). This |
@@ -263,35 +461,61 @@ extern int rcu_my_thread_group_empty(void); | |||
263 | * prevent the compiler from repeating this reference or combining it | 461 | * prevent the compiler from repeating this reference or combining it |
264 | * with other references, so it should not be used without protection | 462 | * with other references, so it should not be used without protection |
265 | * of appropriate locks. | 463 | * of appropriate locks. |
464 | * | ||
465 | * This function is only for update-side use. Using this function | ||
466 | * when protected only by rcu_read_lock() will result in infrequent | ||
467 | * but very ugly failures. | ||
266 | */ | 468 | */ |
267 | #define rcu_dereference_protected(p, c) \ | 469 | #define rcu_dereference_protected(p, c) \ |
268 | ({ \ | 470 | __rcu_dereference_protected((p), (c), __rcu) |
269 | __do_rcu_dereference_check(c); \ | ||
270 | (p); \ | ||
271 | }) | ||
272 | 471 | ||
273 | #else /* #ifdef CONFIG_PROVE_RCU */ | 472 | /** |
473 | * rcu_dereference_bh_protected() - fetch RCU-bh pointer when updates prevented | ||
474 | * @p: The pointer to read, prior to dereferencing | ||
475 | * @c: The conditions under which the dereference will take place | ||
476 | * | ||
477 | * This is the RCU-bh counterpart to rcu_dereference_protected(). | ||
478 | */ | ||
479 | #define rcu_dereference_bh_protected(p, c) \ | ||
480 | __rcu_dereference_protected((p), (c), __rcu) | ||
274 | 481 | ||
275 | #define rcu_dereference_check(p, c) rcu_dereference_raw(p) | 482 | /** |
276 | #define rcu_dereference_protected(p, c) (p) | 483 | * rcu_dereference_sched_protected() - fetch RCU-sched pointer when updates prevented |
484 | * @p: The pointer to read, prior to dereferencing | ||
485 | * @c: The conditions under which the dereference will take place | ||
486 | * | ||
487 | * This is the RCU-sched counterpart to rcu_dereference_protected(). | ||
488 | */ | ||
489 | #define rcu_dereference_sched_protected(p, c) \ | ||
490 | __rcu_dereference_protected((p), (c), __rcu) | ||
277 | 491 | ||
278 | #endif /* #else #ifdef CONFIG_PROVE_RCU */ | ||
279 | 492 | ||
280 | /** | 493 | /** |
281 | * rcu_access_pointer - fetch RCU pointer with no dereferencing | 494 | * rcu_dereference() - fetch RCU-protected pointer for dereferencing |
495 | * @p: The pointer to read, prior to dereferencing | ||
282 | * | 496 | * |
283 | * Return the value of the specified RCU-protected pointer, but omit the | 497 | * This is a simple wrapper around rcu_dereference_check(). |
284 | * smp_read_barrier_depends() and keep the ACCESS_ONCE(). This is useful | 498 | */ |
285 | * when the value of this pointer is accessed, but the pointer is not | 499 | #define rcu_dereference(p) rcu_dereference_check(p, 0) |
286 | * dereferenced, for example, when testing an RCU-protected pointer against | 500 | |
287 | * NULL. This may also be used in cases where update-side locks prevent | 501 | /** |
288 | * the value of the pointer from changing, but rcu_dereference_protected() | 502 | * rcu_dereference_bh() - fetch an RCU-bh-protected pointer for dereferencing |
289 | * is a lighter-weight primitive for this use case. | 503 | * @p: The pointer to read, prior to dereferencing |
504 | * | ||
505 | * Makes rcu_dereference_check() do the dirty work. | ||
506 | */ | ||
507 | #define rcu_dereference_bh(p) rcu_dereference_bh_check(p, 0) | ||
508 | |||
509 | /** | ||
510 | * rcu_dereference_sched() - fetch RCU-sched-protected pointer for dereferencing | ||
511 | * @p: The pointer to read, prior to dereferencing | ||
512 | * | ||
513 | * Makes rcu_dereference_check() do the dirty work. | ||
290 | */ | 514 | */ |
291 | #define rcu_access_pointer(p) ACCESS_ONCE(p) | 515 | #define rcu_dereference_sched(p) rcu_dereference_sched_check(p, 0) |
292 | 516 | ||
293 | /** | 517 | /** |
294 | * rcu_read_lock - mark the beginning of an RCU read-side critical section. | 518 | * rcu_read_lock() - mark the beginning of an RCU read-side critical section |
295 | * | 519 | * |
296 | * When synchronize_rcu() is invoked on one CPU while other CPUs | 520 | * When synchronize_rcu() is invoked on one CPU while other CPUs |
297 | * are within RCU read-side critical sections, then the | 521 | * are within RCU read-side critical sections, then the |
@@ -302,7 +526,7 @@ extern int rcu_my_thread_group_empty(void); | |||
302 | * until after the all the other CPUs exit their critical sections. | 526 | * until after the all the other CPUs exit their critical sections. |
303 | * | 527 | * |
304 | * Note, however, that RCU callbacks are permitted to run concurrently | 528 | * Note, however, that RCU callbacks are permitted to run concurrently |
305 | * with RCU read-side critical sections. One way that this can happen | 529 | * with new RCU read-side critical sections. One way that this can happen |
306 | * is via the following sequence of events: (1) CPU 0 enters an RCU | 530 | * is via the following sequence of events: (1) CPU 0 enters an RCU |
307 | * read-side critical section, (2) CPU 1 invokes call_rcu() to register | 531 | * read-side critical section, (2) CPU 1 invokes call_rcu() to register |
308 | * an RCU callback, (3) CPU 0 exits the RCU read-side critical section, | 532 | * an RCU callback, (3) CPU 0 exits the RCU read-side critical section, |
@@ -317,7 +541,20 @@ extern int rcu_my_thread_group_empty(void); | |||
317 | * will be deferred until the outermost RCU read-side critical section | 541 | * will be deferred until the outermost RCU read-side critical section |
318 | * completes. | 542 | * completes. |
319 | * | 543 | * |
320 | * It is illegal to block while in an RCU read-side critical section. | 544 | * You can avoid reading and understanding the next paragraph by |
545 | * following this rule: don't put anything in an rcu_read_lock() RCU | ||
546 | * read-side critical section that would block in a !PREEMPT kernel. | ||
547 | * But if you want the full story, read on! | ||
548 | * | ||
549 | * In non-preemptible RCU implementations (TREE_RCU and TINY_RCU), it | ||
550 | * is illegal to block while in an RCU read-side critical section. In | ||
551 | * preemptible RCU implementations (TREE_PREEMPT_RCU and TINY_PREEMPT_RCU) | ||
552 | * in CONFIG_PREEMPT kernel builds, RCU read-side critical sections may | ||
553 | * be preempted, but explicit blocking is illegal. Finally, in preemptible | ||
554 | * RCU implementations in real-time (CONFIG_PREEMPT_RT) kernel builds, | ||
555 | * RCU read-side critical sections may be preempted and they may also | ||
556 | * block, but only when acquiring spinlocks that are subject to priority | ||
557 | * inheritance. | ||
321 | */ | 558 | */ |
322 | static inline void rcu_read_lock(void) | 559 | static inline void rcu_read_lock(void) |
323 | { | 560 | { |
@@ -337,7 +574,7 @@ static inline void rcu_read_lock(void) | |||
337 | */ | 574 | */ |
338 | 575 | ||
339 | /** | 576 | /** |
340 | * rcu_read_unlock - marks the end of an RCU read-side critical section. | 577 | * rcu_read_unlock() - marks the end of an RCU read-side critical section. |
341 | * | 578 | * |
342 | * See rcu_read_lock() for more information. | 579 | * See rcu_read_lock() for more information. |
343 | */ | 580 | */ |
@@ -349,15 +586,16 @@ static inline void rcu_read_unlock(void) | |||
349 | } | 586 | } |
350 | 587 | ||
351 | /** | 588 | /** |
352 | * rcu_read_lock_bh - mark the beginning of a softirq-only RCU critical section | 589 | * rcu_read_lock_bh() - mark the beginning of an RCU-bh critical section |
353 | * | 590 | * |
354 | * This is equivalent of rcu_read_lock(), but to be used when updates | 591 | * This is equivalent of rcu_read_lock(), but to be used when updates |
355 | * are being done using call_rcu_bh(). Since call_rcu_bh() callbacks | 592 | * are being done using call_rcu_bh() or synchronize_rcu_bh(). Since |
356 | * consider completion of a softirq handler to be a quiescent state, | 593 | * both call_rcu_bh() and synchronize_rcu_bh() consider completion of a |
357 | * a process in RCU read-side critical section must be protected by | 594 | * softirq handler to be a quiescent state, a process in RCU read-side |
358 | * disabling softirqs. Read-side critical sections in interrupt context | 595 | * critical section must be protected by disabling softirqs. Read-side |
359 | * can use just rcu_read_lock(). | 596 | * critical sections in interrupt context can use just rcu_read_lock(), |
360 | * | 597 | * though this should at least be commented to avoid confusing people |
598 | * reading the code. | ||
361 | */ | 599 | */ |
362 | static inline void rcu_read_lock_bh(void) | 600 | static inline void rcu_read_lock_bh(void) |
363 | { | 601 | { |
@@ -379,13 +617,12 @@ static inline void rcu_read_unlock_bh(void) | |||
379 | } | 617 | } |
380 | 618 | ||
381 | /** | 619 | /** |
382 | * rcu_read_lock_sched - mark the beginning of a RCU-classic critical section | 620 | * rcu_read_lock_sched() - mark the beginning of a RCU-sched critical section |
383 | * | 621 | * |
384 | * Should be used with either | 622 | * This is equivalent of rcu_read_lock(), but to be used when updates |
385 | * - synchronize_sched() | 623 | * are being done using call_rcu_sched() or synchronize_rcu_sched(). |
386 | * or | 624 | * Read-side critical sections can also be introduced by anything that |
387 | * - call_rcu_sched() and rcu_barrier_sched() | 625 | * disables preemption, including local_irq_disable() and friends. |
388 | * on the write-side to insure proper synchronization. | ||
389 | */ | 626 | */ |
390 | static inline void rcu_read_lock_sched(void) | 627 | static inline void rcu_read_lock_sched(void) |
391 | { | 628 | { |
@@ -420,54 +657,14 @@ static inline notrace void rcu_read_unlock_sched_notrace(void) | |||
420 | preempt_enable_notrace(); | 657 | preempt_enable_notrace(); |
421 | } | 658 | } |
422 | 659 | ||
423 | |||
424 | /** | 660 | /** |
425 | * rcu_dereference_raw - fetch an RCU-protected pointer | 661 | * rcu_assign_pointer() - assign to RCU-protected pointer |
662 | * @p: pointer to assign to | ||
663 | * @v: value to assign (publish) | ||
426 | * | 664 | * |
427 | * The caller must be within some flavor of RCU read-side critical | 665 | * Assigns the specified value to the specified RCU-protected |
428 | * section, or must be otherwise preventing the pointer from changing, | 666 | * pointer, ensuring that any concurrent RCU readers will see |
429 | * for example, by holding an appropriate lock. This pointer may later | 667 | * any prior initialization. Returns the value assigned. |
430 | * be safely dereferenced. It is the caller's responsibility to have | ||
431 | * done the right thing, as this primitive does no checking of any kind. | ||
432 | * | ||
433 | * Inserts memory barriers on architectures that require them | ||
434 | * (currently only the Alpha), and, more importantly, documents | ||
435 | * exactly which pointers are protected by RCU. | ||
436 | */ | ||
437 | #define rcu_dereference_raw(p) ({ \ | ||
438 | typeof(p) _________p1 = ACCESS_ONCE(p); \ | ||
439 | smp_read_barrier_depends(); \ | ||
440 | (_________p1); \ | ||
441 | }) | ||
442 | |||
443 | /** | ||
444 | * rcu_dereference - fetch an RCU-protected pointer, checking for RCU | ||
445 | * | ||
446 | * Makes rcu_dereference_check() do the dirty work. | ||
447 | */ | ||
448 | #define rcu_dereference(p) \ | ||
449 | rcu_dereference_check(p, rcu_read_lock_held()) | ||
450 | |||
451 | /** | ||
452 | * rcu_dereference_bh - fetch an RCU-protected pointer, checking for RCU-bh | ||
453 | * | ||
454 | * Makes rcu_dereference_check() do the dirty work. | ||
455 | */ | ||
456 | #define rcu_dereference_bh(p) \ | ||
457 | rcu_dereference_check(p, rcu_read_lock_bh_held() || irqs_disabled()) | ||
458 | |||
459 | /** | ||
460 | * rcu_dereference_sched - fetch RCU-protected pointer, checking for RCU-sched | ||
461 | * | ||
462 | * Makes rcu_dereference_check() do the dirty work. | ||
463 | */ | ||
464 | #define rcu_dereference_sched(p) \ | ||
465 | rcu_dereference_check(p, rcu_read_lock_sched_held()) | ||
466 | |||
467 | /** | ||
468 | * rcu_assign_pointer - assign (publicize) a pointer to a newly | ||
469 | * initialized structure that will be dereferenced by RCU read-side | ||
470 | * critical sections. Returns the value assigned. | ||
471 | * | 668 | * |
472 | * Inserts memory barriers on architectures that require them | 669 | * Inserts memory barriers on architectures that require them |
473 | * (pretty much all of them other than x86), and also prevents | 670 | * (pretty much all of them other than x86), and also prevents |
@@ -476,14 +673,17 @@ static inline notrace void rcu_read_unlock_sched_notrace(void) | |||
476 | * call documents which pointers will be dereferenced by RCU read-side | 673 | * call documents which pointers will be dereferenced by RCU read-side |
477 | * code. | 674 | * code. |
478 | */ | 675 | */ |
479 | |||
480 | #define rcu_assign_pointer(p, v) \ | 676 | #define rcu_assign_pointer(p, v) \ |
481 | ({ \ | 677 | __rcu_assign_pointer((p), (v), __rcu) |
482 | if (!__builtin_constant_p(v) || \ | 678 | |
483 | ((v) != NULL)) \ | 679 | /** |
484 | smp_wmb(); \ | 680 | * RCU_INIT_POINTER() - initialize an RCU protected pointer |
485 | (p) = (v); \ | 681 | * |
486 | }) | 682 | * Initialize an RCU-protected pointer in such a way to avoid RCU-lockdep |
683 | * splats. | ||
684 | */ | ||
685 | #define RCU_INIT_POINTER(p, v) \ | ||
686 | p = (typeof(*v) __force __rcu *)(v) | ||
487 | 687 | ||
488 | /* Infrastructure to implement the synchronize_() primitives. */ | 688 | /* Infrastructure to implement the synchronize_() primitives. */ |
489 | 689 | ||
@@ -494,26 +694,37 @@ struct rcu_synchronize { | |||
494 | 694 | ||
495 | extern void wakeme_after_rcu(struct rcu_head *head); | 695 | extern void wakeme_after_rcu(struct rcu_head *head); |
496 | 696 | ||
697 | #ifdef CONFIG_PREEMPT_RCU | ||
698 | |||
497 | /** | 699 | /** |
498 | * call_rcu - Queue an RCU callback for invocation after a grace period. | 700 | * call_rcu() - Queue an RCU callback for invocation after a grace period. |
499 | * @head: structure to be used for queueing the RCU updates. | 701 | * @head: structure to be used for queueing the RCU updates. |
500 | * @func: actual update function to be invoked after the grace period | 702 | * @func: actual callback function to be invoked after the grace period |
501 | * | 703 | * |
502 | * The update function will be invoked some time after a full grace | 704 | * The callback function will be invoked some time after a full grace |
503 | * period elapses, in other words after all currently executing RCU | 705 | * period elapses, in other words after all pre-existing RCU read-side |
504 | * read-side critical sections have completed. RCU read-side critical | 706 | * critical sections have completed. However, the callback function |
707 | * might well execute concurrently with RCU read-side critical sections | ||
708 | * that started after call_rcu() was invoked. RCU read-side critical | ||
505 | * sections are delimited by rcu_read_lock() and rcu_read_unlock(), | 709 | * sections are delimited by rcu_read_lock() and rcu_read_unlock(), |
506 | * and may be nested. | 710 | * and may be nested. |
507 | */ | 711 | */ |
508 | extern void call_rcu(struct rcu_head *head, | 712 | extern void call_rcu(struct rcu_head *head, |
509 | void (*func)(struct rcu_head *head)); | 713 | void (*func)(struct rcu_head *head)); |
510 | 714 | ||
715 | #else /* #ifdef CONFIG_PREEMPT_RCU */ | ||
716 | |||
717 | /* In classic RCU, call_rcu() is just call_rcu_sched(). */ | ||
718 | #define call_rcu call_rcu_sched | ||
719 | |||
720 | #endif /* #else #ifdef CONFIG_PREEMPT_RCU */ | ||
721 | |||
511 | /** | 722 | /** |
512 | * call_rcu_bh - Queue an RCU for invocation after a quicker grace period. | 723 | * call_rcu_bh() - Queue an RCU for invocation after a quicker grace period. |
513 | * @head: structure to be used for queueing the RCU updates. | 724 | * @head: structure to be used for queueing the RCU updates. |
514 | * @func: actual update function to be invoked after the grace period | 725 | * @func: actual callback function to be invoked after the grace period |
515 | * | 726 | * |
516 | * The update function will be invoked some time after a full grace | 727 | * The callback function will be invoked some time after a full grace |
517 | * period elapses, in other words after all currently executing RCU | 728 | * period elapses, in other words after all currently executing RCU |
518 | * read-side critical sections have completed. call_rcu_bh() assumes | 729 | * read-side critical sections have completed. call_rcu_bh() assumes |
519 | * that the read-side critical sections end on completion of a softirq | 730 | * that the read-side critical sections end on completion of a softirq |
@@ -566,37 +777,4 @@ static inline void debug_rcu_head_unqueue(struct rcu_head *head) | |||
566 | } | 777 | } |
567 | #endif /* #else !CONFIG_DEBUG_OBJECTS_RCU_HEAD */ | 778 | #endif /* #else !CONFIG_DEBUG_OBJECTS_RCU_HEAD */ |
568 | 779 | ||
569 | #ifndef CONFIG_PROVE_RCU | ||
570 | #define __do_rcu_dereference_check(c) do { } while (0) | ||
571 | #endif /* #ifdef CONFIG_PROVE_RCU */ | ||
572 | |||
573 | #define __rcu_dereference_index_check(p, c) \ | ||
574 | ({ \ | ||
575 | typeof(p) _________p1 = ACCESS_ONCE(p); \ | ||
576 | __do_rcu_dereference_check(c); \ | ||
577 | smp_read_barrier_depends(); \ | ||
578 | (_________p1); \ | ||
579 | }) | ||
580 | |||
581 | /** | ||
582 | * rcu_dereference_index_check() - rcu_dereference for indices with debug checking | ||
583 | * @p: The pointer to read, prior to dereferencing | ||
584 | * @c: The conditions under which the dereference will take place | ||
585 | * | ||
586 | * Similar to rcu_dereference_check(), but omits the sparse checking. | ||
587 | * This allows rcu_dereference_index_check() to be used on integers, | ||
588 | * which can then be used as array indices. Attempting to use | ||
589 | * rcu_dereference_check() on an integer will give compiler warnings | ||
590 | * because the sparse address-space mechanism relies on dereferencing | ||
591 | * the RCU-protected pointer. Dereferencing integers is not something | ||
592 | * that even gcc will put up with. | ||
593 | * | ||
594 | * Note that this function does not implicitly check for RCU read-side | ||
595 | * critical sections. If this function gains lots of uses, it might | ||
596 | * make sense to provide versions for each flavor of RCU, but it does | ||
597 | * not make sense as of early 2010. | ||
598 | */ | ||
599 | #define rcu_dereference_index_check(p, c) \ | ||
600 | __rcu_dereference_index_check((p), (c)) | ||
601 | |||
602 | #endif /* __LINUX_RCUPDATE_H */ | 780 | #endif /* __LINUX_RCUPDATE_H */ |
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h index e2e893144a8..13877cb93a6 100644 --- a/include/linux/rcutiny.h +++ b/include/linux/rcutiny.h | |||
@@ -27,103 +27,101 @@ | |||
27 | 27 | ||
28 | #include <linux/cache.h> | 28 | #include <linux/cache.h> |
29 | 29 | ||
30 | void rcu_sched_qs(int cpu); | 30 | #define rcu_init_sched() do { } while (0) |
31 | void rcu_bh_qs(int cpu); | ||
32 | static inline void rcu_note_context_switch(int cpu) | ||
33 | { | ||
34 | rcu_sched_qs(cpu); | ||
35 | } | ||
36 | 31 | ||
37 | #define __rcu_read_lock() preempt_disable() | 32 | #ifdef CONFIG_TINY_RCU |
38 | #define __rcu_read_unlock() preempt_enable() | ||
39 | #define __rcu_read_lock_bh() local_bh_disable() | ||
40 | #define __rcu_read_unlock_bh() local_bh_enable() | ||
41 | #define call_rcu_sched call_rcu | ||
42 | 33 | ||
43 | #define rcu_init_sched() do { } while (0) | 34 | static inline void synchronize_rcu_expedited(void) |
44 | extern void rcu_check_callbacks(int cpu, int user); | 35 | { |
36 | synchronize_sched(); /* Only one CPU, so pretty fast anyway!!! */ | ||
37 | } | ||
45 | 38 | ||
46 | static inline int rcu_needs_cpu(int cpu) | 39 | static inline void rcu_barrier(void) |
47 | { | 40 | { |
48 | return 0; | 41 | rcu_barrier_sched(); /* Only one CPU, so only one list of callbacks! */ |
49 | } | 42 | } |
50 | 43 | ||
51 | /* | 44 | #else /* #ifdef CONFIG_TINY_RCU */ |
52 | * Return the number of grace periods. | 45 | |
53 | */ | 46 | void rcu_barrier(void); |
54 | static inline long rcu_batches_completed(void) | 47 | void synchronize_rcu_expedited(void); |
48 | |||
49 | #endif /* #else #ifdef CONFIG_TINY_RCU */ | ||
50 | |||
51 | static inline void synchronize_rcu_bh(void) | ||
55 | { | 52 | { |
56 | return 0; | 53 | synchronize_sched(); |
57 | } | 54 | } |
58 | 55 | ||
59 | /* | 56 | static inline void synchronize_rcu_bh_expedited(void) |
60 | * Return the number of bottom-half grace periods. | ||
61 | */ | ||
62 | static inline long rcu_batches_completed_bh(void) | ||
63 | { | 57 | { |
64 | return 0; | 58 | synchronize_sched(); |
65 | } | 59 | } |
66 | 60 | ||
67 | static inline void rcu_force_quiescent_state(void) | 61 | #ifdef CONFIG_TINY_RCU |
62 | |||
63 | static inline void rcu_preempt_note_context_switch(void) | ||
68 | { | 64 | { |
69 | } | 65 | } |
70 | 66 | ||
71 | static inline void rcu_bh_force_quiescent_state(void) | 67 | static inline void exit_rcu(void) |
72 | { | 68 | { |
73 | } | 69 | } |
74 | 70 | ||
75 | static inline void rcu_sched_force_quiescent_state(void) | 71 | static inline int rcu_needs_cpu(int cpu) |
76 | { | 72 | { |
73 | return 0; | ||
77 | } | 74 | } |
78 | 75 | ||
79 | extern void synchronize_sched(void); | 76 | #else /* #ifdef CONFIG_TINY_RCU */ |
77 | |||
78 | void rcu_preempt_note_context_switch(void); | ||
79 | extern void exit_rcu(void); | ||
80 | int rcu_preempt_needs_cpu(void); | ||
80 | 81 | ||
81 | static inline void synchronize_rcu(void) | 82 | static inline int rcu_needs_cpu(int cpu) |
82 | { | 83 | { |
83 | synchronize_sched(); | 84 | return rcu_preempt_needs_cpu(); |
84 | } | 85 | } |
85 | 86 | ||
86 | static inline void synchronize_rcu_bh(void) | 87 | #endif /* #else #ifdef CONFIG_TINY_RCU */ |
88 | |||
89 | static inline void rcu_note_context_switch(int cpu) | ||
87 | { | 90 | { |
88 | synchronize_sched(); | 91 | rcu_sched_qs(cpu); |
92 | rcu_preempt_note_context_switch(); | ||
89 | } | 93 | } |
90 | 94 | ||
91 | static inline void synchronize_rcu_expedited(void) | 95 | /* |
96 | * Return the number of grace periods. | ||
97 | */ | ||
98 | static inline long rcu_batches_completed(void) | ||
92 | { | 99 | { |
93 | synchronize_sched(); | 100 | return 0; |
94 | } | 101 | } |
95 | 102 | ||
96 | static inline void synchronize_rcu_bh_expedited(void) | 103 | /* |
104 | * Return the number of bottom-half grace periods. | ||
105 | */ | ||
106 | static inline long rcu_batches_completed_bh(void) | ||
97 | { | 107 | { |
98 | synchronize_sched(); | 108 | return 0; |
99 | } | 109 | } |
100 | 110 | ||
101 | struct notifier_block; | 111 | static inline void rcu_force_quiescent_state(void) |
102 | |||
103 | #ifdef CONFIG_NO_HZ | ||
104 | |||
105 | extern void rcu_enter_nohz(void); | ||
106 | extern void rcu_exit_nohz(void); | ||
107 | |||
108 | #else /* #ifdef CONFIG_NO_HZ */ | ||
109 | |||
110 | static inline void rcu_enter_nohz(void) | ||
111 | { | 112 | { |
112 | } | 113 | } |
113 | 114 | ||
114 | static inline void rcu_exit_nohz(void) | 115 | static inline void rcu_bh_force_quiescent_state(void) |
115 | { | 116 | { |
116 | } | 117 | } |
117 | 118 | ||
118 | #endif /* #else #ifdef CONFIG_NO_HZ */ | 119 | static inline void rcu_sched_force_quiescent_state(void) |
119 | |||
120 | static inline void exit_rcu(void) | ||
121 | { | 120 | { |
122 | } | 121 | } |
123 | 122 | ||
124 | static inline int rcu_preempt_depth(void) | 123 | static inline void rcu_cpu_stall_reset(void) |
125 | { | 124 | { |
126 | return 0; | ||
127 | } | 125 | } |
128 | 126 | ||
129 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 127 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h index c0ed1c056f2..95518e62879 100644 --- a/include/linux/rcutree.h +++ b/include/linux/rcutree.h | |||
@@ -30,64 +30,23 @@ | |||
30 | #ifndef __LINUX_RCUTREE_H | 30 | #ifndef __LINUX_RCUTREE_H |
31 | #define __LINUX_RCUTREE_H | 31 | #define __LINUX_RCUTREE_H |
32 | 32 | ||
33 | struct notifier_block; | ||
34 | |||
35 | extern void rcu_sched_qs(int cpu); | ||
36 | extern void rcu_bh_qs(int cpu); | ||
37 | extern void rcu_note_context_switch(int cpu); | 33 | extern void rcu_note_context_switch(int cpu); |
38 | extern int rcu_needs_cpu(int cpu); | 34 | extern int rcu_needs_cpu(int cpu); |
35 | extern void rcu_cpu_stall_reset(void); | ||
39 | 36 | ||
40 | #ifdef CONFIG_TREE_PREEMPT_RCU | 37 | #ifdef CONFIG_TREE_PREEMPT_RCU |
41 | 38 | ||
42 | extern void __rcu_read_lock(void); | ||
43 | extern void __rcu_read_unlock(void); | ||
44 | extern void synchronize_rcu(void); | ||
45 | extern void exit_rcu(void); | 39 | extern void exit_rcu(void); |
46 | 40 | ||
47 | /* | ||
48 | * Defined as macro as it is a very low level header | ||
49 | * included from areas that don't even know about current | ||
50 | */ | ||
51 | #define rcu_preempt_depth() (current->rcu_read_lock_nesting) | ||
52 | |||
53 | #else /* #ifdef CONFIG_TREE_PREEMPT_RCU */ | 41 | #else /* #ifdef CONFIG_TREE_PREEMPT_RCU */ |
54 | 42 | ||
55 | static inline void __rcu_read_lock(void) | ||
56 | { | ||
57 | preempt_disable(); | ||
58 | } | ||
59 | |||
60 | static inline void __rcu_read_unlock(void) | ||
61 | { | ||
62 | preempt_enable(); | ||
63 | } | ||
64 | |||
65 | #define synchronize_rcu synchronize_sched | ||
66 | |||
67 | static inline void exit_rcu(void) | 43 | static inline void exit_rcu(void) |
68 | { | 44 | { |
69 | } | 45 | } |
70 | 46 | ||
71 | static inline int rcu_preempt_depth(void) | ||
72 | { | ||
73 | return 0; | ||
74 | } | ||
75 | |||
76 | #endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */ | 47 | #endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */ |
77 | 48 | ||
78 | static inline void __rcu_read_lock_bh(void) | ||
79 | { | ||
80 | local_bh_disable(); | ||
81 | } | ||
82 | static inline void __rcu_read_unlock_bh(void) | ||
83 | { | ||
84 | local_bh_enable(); | ||
85 | } | ||
86 | |||
87 | extern void call_rcu_sched(struct rcu_head *head, | ||
88 | void (*func)(struct rcu_head *rcu)); | ||
89 | extern void synchronize_rcu_bh(void); | 49 | extern void synchronize_rcu_bh(void); |
90 | extern void synchronize_sched(void); | ||
91 | extern void synchronize_rcu_expedited(void); | 50 | extern void synchronize_rcu_expedited(void); |
92 | 51 | ||
93 | static inline void synchronize_rcu_bh_expedited(void) | 52 | static inline void synchronize_rcu_bh_expedited(void) |
@@ -95,7 +54,7 @@ static inline void synchronize_rcu_bh_expedited(void) | |||
95 | synchronize_sched_expedited(); | 54 | synchronize_sched_expedited(); |
96 | } | 55 | } |
97 | 56 | ||
98 | extern void rcu_check_callbacks(int cpu, int user); | 57 | extern void rcu_barrier(void); |
99 | 58 | ||
100 | extern long rcu_batches_completed(void); | 59 | extern long rcu_batches_completed(void); |
101 | extern long rcu_batches_completed_bh(void); | 60 | extern long rcu_batches_completed_bh(void); |
@@ -104,18 +63,6 @@ extern void rcu_force_quiescent_state(void); | |||
104 | extern void rcu_bh_force_quiescent_state(void); | 63 | extern void rcu_bh_force_quiescent_state(void); |
105 | extern void rcu_sched_force_quiescent_state(void); | 64 | extern void rcu_sched_force_quiescent_state(void); |
106 | 65 | ||
107 | #ifdef CONFIG_NO_HZ | ||
108 | void rcu_enter_nohz(void); | ||
109 | void rcu_exit_nohz(void); | ||
110 | #else /* CONFIG_NO_HZ */ | ||
111 | static inline void rcu_enter_nohz(void) | ||
112 | { | ||
113 | } | ||
114 | static inline void rcu_exit_nohz(void) | ||
115 | { | ||
116 | } | ||
117 | #endif /* CONFIG_NO_HZ */ | ||
118 | |||
119 | /* A context switch is a grace period for RCU-sched and RCU-bh. */ | 66 | /* A context switch is a grace period for RCU-sched and RCU-bh. */ |
120 | static inline int rcu_blocking_is_gp(void) | 67 | static inline int rcu_blocking_is_gp(void) |
121 | { | 68 | { |
diff --git a/include/linux/sched.h b/include/linux/sched.h index 1e2a6db2d7d..e18473f0eb7 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -1202,11 +1202,13 @@ struct task_struct { | |||
1202 | unsigned int policy; | 1202 | unsigned int policy; |
1203 | cpumask_t cpus_allowed; | 1203 | cpumask_t cpus_allowed; |
1204 | 1204 | ||
1205 | #ifdef CONFIG_TREE_PREEMPT_RCU | 1205 | #ifdef CONFIG_PREEMPT_RCU |
1206 | int rcu_read_lock_nesting; | 1206 | int rcu_read_lock_nesting; |
1207 | char rcu_read_unlock_special; | 1207 | char rcu_read_unlock_special; |
1208 | struct rcu_node *rcu_blocked_node; | ||
1209 | struct list_head rcu_node_entry; | 1208 | struct list_head rcu_node_entry; |
1209 | #endif /* #ifdef CONFIG_PREEMPT_RCU */ | ||
1210 | #ifdef CONFIG_TREE_PREEMPT_RCU | ||
1211 | struct rcu_node *rcu_blocked_node; | ||
1210 | #endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */ | 1212 | #endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */ |
1211 | 1213 | ||
1212 | #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) | 1214 | #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) |
@@ -1288,9 +1290,9 @@ struct task_struct { | |||
1288 | struct list_head cpu_timers[3]; | 1290 | struct list_head cpu_timers[3]; |
1289 | 1291 | ||
1290 | /* process credentials */ | 1292 | /* process credentials */ |
1291 | const struct cred *real_cred; /* objective and real subjective task | 1293 | const struct cred __rcu *real_cred; /* objective and real subjective task |
1292 | * credentials (COW) */ | 1294 | * credentials (COW) */ |
1293 | const struct cred *cred; /* effective (overridable) subjective task | 1295 | const struct cred __rcu *cred; /* effective (overridable) subjective task |
1294 | * credentials (COW) */ | 1296 | * credentials (COW) */ |
1295 | struct mutex cred_guard_mutex; /* guard against foreign influences on | 1297 | struct mutex cred_guard_mutex; /* guard against foreign influences on |
1296 | * credential calculations | 1298 | * credential calculations |
@@ -1418,7 +1420,7 @@ struct task_struct { | |||
1418 | #endif | 1420 | #endif |
1419 | #ifdef CONFIG_CGROUPS | 1421 | #ifdef CONFIG_CGROUPS |
1420 | /* Control Group info protected by css_set_lock */ | 1422 | /* Control Group info protected by css_set_lock */ |
1421 | struct css_set *cgroups; | 1423 | struct css_set __rcu *cgroups; |
1422 | /* cg_list protected by css_set_lock and tsk->alloc_lock */ | 1424 | /* cg_list protected by css_set_lock and tsk->alloc_lock */ |
1423 | struct list_head cg_list; | 1425 | struct list_head cg_list; |
1424 | #endif | 1426 | #endif |
@@ -1740,7 +1742,7 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t * | |||
1740 | #define tsk_used_math(p) ((p)->flags & PF_USED_MATH) | 1742 | #define tsk_used_math(p) ((p)->flags & PF_USED_MATH) |
1741 | #define used_math() tsk_used_math(current) | 1743 | #define used_math() tsk_used_math(current) |
1742 | 1744 | ||
1743 | #ifdef CONFIG_TREE_PREEMPT_RCU | 1745 | #ifdef CONFIG_PREEMPT_RCU |
1744 | 1746 | ||
1745 | #define RCU_READ_UNLOCK_BLOCKED (1 << 0) /* blocked while in RCU read-side. */ | 1747 | #define RCU_READ_UNLOCK_BLOCKED (1 << 0) /* blocked while in RCU read-side. */ |
1746 | #define RCU_READ_UNLOCK_NEED_QS (1 << 1) /* RCU core needs CPU response. */ | 1748 | #define RCU_READ_UNLOCK_NEED_QS (1 << 1) /* RCU core needs CPU response. */ |
@@ -1749,7 +1751,9 @@ static inline void rcu_copy_process(struct task_struct *p) | |||
1749 | { | 1751 | { |
1750 | p->rcu_read_lock_nesting = 0; | 1752 | p->rcu_read_lock_nesting = 0; |
1751 | p->rcu_read_unlock_special = 0; | 1753 | p->rcu_read_unlock_special = 0; |
1754 | #ifdef CONFIG_TREE_PREEMPT_RCU | ||
1752 | p->rcu_blocked_node = NULL; | 1755 | p->rcu_blocked_node = NULL; |
1756 | #endif | ||
1753 | INIT_LIST_HEAD(&p->rcu_node_entry); | 1757 | INIT_LIST_HEAD(&p->rcu_node_entry); |
1754 | } | 1758 | } |
1755 | 1759 | ||
diff --git a/include/linux/srcu.h b/include/linux/srcu.h index 4d5d2f546db..58971e891f4 100644 --- a/include/linux/srcu.h +++ b/include/linux/srcu.h | |||
@@ -108,19 +108,43 @@ static inline int srcu_read_lock_held(struct srcu_struct *sp) | |||
108 | #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ | 108 | #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ |
109 | 109 | ||
110 | /** | 110 | /** |
111 | * srcu_dereference - fetch SRCU-protected pointer with checking | 111 | * srcu_dereference_check - fetch SRCU-protected pointer for later dereferencing |
112 | * @p: the pointer to fetch and protect for later dereferencing | ||
113 | * @sp: pointer to the srcu_struct, which is used to check that we | ||
114 | * really are in an SRCU read-side critical section. | ||
115 | * @c: condition to check for update-side use | ||
112 | * | 116 | * |
113 | * Makes rcu_dereference_check() do the dirty work. | 117 | * If PROVE_RCU is enabled, invoking this outside of an RCU read-side |
118 | * critical section will result in an RCU-lockdep splat, unless @c evaluates | ||
119 | * to 1. The @c argument will normally be a logical expression containing | ||
120 | * lockdep_is_held() calls. | ||
114 | */ | 121 | */ |
115 | #define srcu_dereference(p, sp) \ | 122 | #define srcu_dereference_check(p, sp, c) \ |
116 | rcu_dereference_check(p, srcu_read_lock_held(sp)) | 123 | __rcu_dereference_check((p), srcu_read_lock_held(sp) || (c), __rcu) |
124 | |||
125 | /** | ||
126 | * srcu_dereference - fetch SRCU-protected pointer for later dereferencing | ||
127 | * @p: the pointer to fetch and protect for later dereferencing | ||
128 | * @sp: pointer to the srcu_struct, which is used to check that we | ||
129 | * really are in an SRCU read-side critical section. | ||
130 | * | ||
131 | * Makes rcu_dereference_check() do the dirty work. If PROVE_RCU | ||
132 | * is enabled, invoking this outside of an RCU read-side critical | ||
133 | * section will result in an RCU-lockdep splat. | ||
134 | */ | ||
135 | #define srcu_dereference(p, sp) srcu_dereference_check((p), (sp), 0) | ||
117 | 136 | ||
118 | /** | 137 | /** |
119 | * srcu_read_lock - register a new reader for an SRCU-protected structure. | 138 | * srcu_read_lock - register a new reader for an SRCU-protected structure. |
120 | * @sp: srcu_struct in which to register the new reader. | 139 | * @sp: srcu_struct in which to register the new reader. |
121 | * | 140 | * |
122 | * Enter an SRCU read-side critical section. Note that SRCU read-side | 141 | * Enter an SRCU read-side critical section. Note that SRCU read-side |
123 | * critical sections may be nested. | 142 | * critical sections may be nested. However, it is illegal to |
143 | * call anything that waits on an SRCU grace period for the same | ||
144 | * srcu_struct, whether directly or indirectly. Please note that | ||
145 | * one way to indirectly wait on an SRCU grace period is to acquire | ||
146 | * a mutex that is held elsewhere while calling synchronize_srcu() or | ||
147 | * synchronize_srcu_expedited(). | ||
124 | */ | 148 | */ |
125 | static inline int srcu_read_lock(struct srcu_struct *sp) __acquires(sp) | 149 | static inline int srcu_read_lock(struct srcu_struct *sp) __acquires(sp) |
126 | { | 150 | { |
diff --git a/include/linux/sunrpc/auth_gss.h b/include/linux/sunrpc/auth_gss.h index 671538d25bc..8eee9dbbfe7 100644 --- a/include/linux/sunrpc/auth_gss.h +++ b/include/linux/sunrpc/auth_gss.h | |||
@@ -69,7 +69,7 @@ struct gss_cl_ctx { | |||
69 | enum rpc_gss_proc gc_proc; | 69 | enum rpc_gss_proc gc_proc; |
70 | u32 gc_seq; | 70 | u32 gc_seq; |
71 | spinlock_t gc_seq_lock; | 71 | spinlock_t gc_seq_lock; |
72 | struct gss_ctx *gc_gss_ctx; | 72 | struct gss_ctx __rcu *gc_gss_ctx; |
73 | struct xdr_netobj gc_wire_ctx; | 73 | struct xdr_netobj gc_wire_ctx; |
74 | u32 gc_win; | 74 | u32 gc_win; |
75 | unsigned long gc_expiry; | 75 | unsigned long gc_expiry; |
@@ -80,7 +80,7 @@ struct gss_upcall_msg; | |||
80 | struct gss_cred { | 80 | struct gss_cred { |
81 | struct rpc_cred gc_base; | 81 | struct rpc_cred gc_base; |
82 | enum rpc_gss_svc gc_service; | 82 | enum rpc_gss_svc gc_service; |
83 | struct gss_cl_ctx *gc_ctx; | 83 | struct gss_cl_ctx __rcu *gc_ctx; |
84 | struct gss_upcall_msg *gc_upcall; | 84 | struct gss_upcall_msg *gc_upcall; |
85 | unsigned long gc_upcall_timestamp; | 85 | unsigned long gc_upcall_timestamp; |
86 | unsigned char gc_machine_cred : 1; | 86 | unsigned char gc_machine_cred : 1; |
diff --git a/include/net/cls_cgroup.h b/include/net/cls_cgroup.h index ef6c24a529e..a4dc5b027bd 100644 --- a/include/net/cls_cgroup.h +++ b/include/net/cls_cgroup.h | |||
@@ -51,7 +51,8 @@ static inline u32 task_cls_classid(struct task_struct *p) | |||
51 | return 0; | 51 | return 0; |
52 | 52 | ||
53 | rcu_read_lock(); | 53 | rcu_read_lock(); |
54 | id = rcu_dereference(net_cls_subsys_id); | 54 | id = rcu_dereference_index_check(net_cls_subsys_id, |
55 | rcu_read_lock_held()); | ||
55 | if (id >= 0) | 56 | if (id >= 0) |
56 | classid = container_of(task_subsys_state(p, id), | 57 | classid = container_of(task_subsys_state(p, id), |
57 | struct cgroup_cls_state, css)->classid; | 58 | struct cgroup_cls_state, css)->classid; |
diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h index e624dae54fa..caf17db87db 100644 --- a/include/net/netfilter/nf_conntrack.h +++ b/include/net/netfilter/nf_conntrack.h | |||
@@ -75,7 +75,7 @@ struct nf_conntrack_helper; | |||
75 | /* nf_conn feature for connections that have a helper */ | 75 | /* nf_conn feature for connections that have a helper */ |
76 | struct nf_conn_help { | 76 | struct nf_conn_help { |
77 | /* Helper. if any */ | 77 | /* Helper. if any */ |
78 | struct nf_conntrack_helper *helper; | 78 | struct nf_conntrack_helper __rcu *helper; |
79 | 79 | ||
80 | union nf_conntrack_help help; | 80 | union nf_conntrack_help help; |
81 | 81 | ||