diff options
Diffstat (limited to 'include')
-rw-r--r-- | include/linux/rculist_nulls.h | 2 | ||||
-rw-r--r-- | include/linux/rcupdate.h | 29 | ||||
-rw-r--r-- | include/linux/rcutree.h | 6 | ||||
-rw-r--r-- | include/linux/sched.h | 1 |
4 files changed, 12 insertions, 26 deletions
diff --git a/include/linux/rculist_nulls.h b/include/linux/rculist_nulls.h index f9ddd03961a8..589a40919f01 100644 --- a/include/linux/rculist_nulls.h +++ b/include/linux/rculist_nulls.h | |||
@@ -102,7 +102,7 @@ static inline void hlist_nulls_add_head_rcu(struct hlist_nulls_node *n, | |||
102 | */ | 102 | */ |
103 | #define hlist_nulls_for_each_entry_rcu(tpos, pos, head, member) \ | 103 | #define hlist_nulls_for_each_entry_rcu(tpos, pos, head, member) \ |
104 | for (pos = rcu_dereference((head)->first); \ | 104 | for (pos = rcu_dereference((head)->first); \ |
105 | (!is_a_nulls(pos)) && \ | 105 | (!is_a_nulls(pos)) && \ |
106 | ({ tpos = hlist_nulls_entry(pos, typeof(*tpos), member); 1; }); \ | 106 | ({ tpos = hlist_nulls_entry(pos, typeof(*tpos), member); 1; }); \ |
107 | pos = rcu_dereference(pos->next)) | 107 | pos = rcu_dereference(pos->next)) |
108 | 108 | ||
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 95e0615f4d75..6fe0363724e9 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Read-Copy Update mechanism for mutual exclusion | 2 | * Read-Copy Update mechanism for mutual exclusion |
3 | * | 3 | * |
4 | * This program is free software; you can redistribute it and/or modify | 4 | * This program is free software; you can redistribute it and/or modify |
5 | * it under the terms of the GNU General Public License as published by | 5 | * it under the terms of the GNU General Public License as published by |
@@ -18,7 +18,7 @@ | |||
18 | * Copyright IBM Corporation, 2001 | 18 | * Copyright IBM Corporation, 2001 |
19 | * | 19 | * |
20 | * Author: Dipankar Sarma <dipankar@in.ibm.com> | 20 | * Author: Dipankar Sarma <dipankar@in.ibm.com> |
21 | * | 21 | * |
22 | * Based on the original work by Paul McKenney <paulmck@us.ibm.com> | 22 | * Based on the original work by Paul McKenney <paulmck@us.ibm.com> |
23 | * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. | 23 | * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. |
24 | * Papers: | 24 | * Papers: |
@@ -26,7 +26,7 @@ | |||
26 | * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001) | 26 | * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001) |
27 | * | 27 | * |
28 | * For detailed explanation of Read-Copy Update mechanism see - | 28 | * For detailed explanation of Read-Copy Update mechanism see - |
29 | * http://lse.sourceforge.net/locking/rcupdate.html | 29 | * http://lse.sourceforge.net/locking/rcupdate.html |
30 | * | 30 | * |
31 | */ | 31 | */ |
32 | 32 | ||
@@ -52,8 +52,13 @@ struct rcu_head { | |||
52 | }; | 52 | }; |
53 | 53 | ||
54 | /* Exported common interfaces */ | 54 | /* Exported common interfaces */ |
55 | #ifdef CONFIG_TREE_PREEMPT_RCU | ||
55 | extern void synchronize_rcu(void); | 56 | extern void synchronize_rcu(void); |
57 | #else /* #ifdef CONFIG_TREE_PREEMPT_RCU */ | ||
58 | #define synchronize_rcu synchronize_sched | ||
59 | #endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */ | ||
56 | extern void synchronize_rcu_bh(void); | 60 | extern void synchronize_rcu_bh(void); |
61 | extern void synchronize_sched(void); | ||
57 | extern void rcu_barrier(void); | 62 | extern void rcu_barrier(void); |
58 | extern void rcu_barrier_bh(void); | 63 | extern void rcu_barrier_bh(void); |
59 | extern void rcu_barrier_sched(void); | 64 | extern void rcu_barrier_sched(void); |
@@ -262,24 +267,6 @@ struct rcu_synchronize { | |||
262 | extern void wakeme_after_rcu(struct rcu_head *head); | 267 | extern void wakeme_after_rcu(struct rcu_head *head); |
263 | 268 | ||
264 | /** | 269 | /** |
265 | * synchronize_sched - block until all CPUs have exited any non-preemptive | ||
266 | * kernel code sequences. | ||
267 | * | ||
268 | * This means that all preempt_disable code sequences, including NMI and | ||
269 | * hardware-interrupt handlers, in progress on entry will have completed | ||
270 | * before this primitive returns. However, this does not guarantee that | ||
271 | * softirq handlers will have completed, since in some kernels, these | ||
272 | * handlers can run in process context, and can block. | ||
273 | * | ||
274 | * This primitive provides the guarantees made by the (now removed) | ||
275 | * synchronize_kernel() API. In contrast, synchronize_rcu() only | ||
276 | * guarantees that rcu_read_lock() sections will have completed. | ||
277 | * In "classic RCU", these two guarantees happen to be one and | ||
278 | * the same, but can differ in realtime RCU implementations. | ||
279 | */ | ||
280 | #define synchronize_sched() __synchronize_sched() | ||
281 | |||
282 | /** | ||
283 | * call_rcu - Queue an RCU callback for invocation after a grace period. | 270 | * call_rcu - Queue an RCU callback for invocation after a grace period. |
284 | * @head: structure to be used for queueing the RCU updates. | 271 | * @head: structure to be used for queueing the RCU updates. |
285 | * @func: actual update function to be invoked after the grace period | 272 | * @func: actual update function to be invoked after the grace period |
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h index a89307717825..37682770e9d2 100644 --- a/include/linux/rcutree.h +++ b/include/linux/rcutree.h | |||
@@ -24,7 +24,7 @@ | |||
24 | * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. | 24 | * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. |
25 | * | 25 | * |
26 | * For detailed explanation of Read-Copy Update mechanism see - | 26 | * For detailed explanation of Read-Copy Update mechanism see - |
27 | * Documentation/RCU | 27 | * Documentation/RCU |
28 | */ | 28 | */ |
29 | 29 | ||
30 | #ifndef __LINUX_RCUTREE_H | 30 | #ifndef __LINUX_RCUTREE_H |
@@ -53,6 +53,8 @@ static inline void __rcu_read_unlock(void) | |||
53 | preempt_enable(); | 53 | preempt_enable(); |
54 | } | 54 | } |
55 | 55 | ||
56 | #define __synchronize_sched() synchronize_rcu() | ||
57 | |||
56 | static inline void exit_rcu(void) | 58 | static inline void exit_rcu(void) |
57 | { | 59 | { |
58 | } | 60 | } |
@@ -68,8 +70,6 @@ static inline void __rcu_read_unlock_bh(void) | |||
68 | local_bh_enable(); | 70 | local_bh_enable(); |
69 | } | 71 | } |
70 | 72 | ||
71 | #define __synchronize_sched() synchronize_rcu() | ||
72 | |||
73 | extern void call_rcu_sched(struct rcu_head *head, | 73 | extern void call_rcu_sched(struct rcu_head *head, |
74 | void (*func)(struct rcu_head *rcu)); | 74 | void (*func)(struct rcu_head *rcu)); |
75 | 75 | ||
diff --git a/include/linux/sched.h b/include/linux/sched.h index 239c8e0dba9f..115af05ecabd 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -1755,7 +1755,6 @@ extern cputime_t task_gtime(struct task_struct *p); | |||
1755 | 1755 | ||
1756 | #define RCU_READ_UNLOCK_BLOCKED (1 << 0) /* blocked while in RCU read-side. */ | 1756 | #define RCU_READ_UNLOCK_BLOCKED (1 << 0) /* blocked while in RCU read-side. */ |
1757 | #define RCU_READ_UNLOCK_NEED_QS (1 << 1) /* RCU core needs CPU response. */ | 1757 | #define RCU_READ_UNLOCK_NEED_QS (1 << 1) /* RCU core needs CPU response. */ |
1758 | #define RCU_READ_UNLOCK_GOT_QS (1 << 2) /* CPU has responded to RCU core. */ | ||
1759 | 1758 | ||
1760 | static inline void rcu_copy_process(struct task_struct *p) | 1759 | static inline void rcu_copy_process(struct task_struct *p) |
1761 | { | 1760 | { |