diff options
author | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2009-08-22 16:56:53 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-08-23 04:32:40 -0400 |
commit | 6b3ef48adf847f7adf11c870e3ffacac150f1564 (patch) | |
tree | e1403ce515bf00ade99ec806f6ab6b6db999aa0b /include/linux/rcupreempt.h | |
parent | f41d911f8c49a5d65c86504c19e8204bb605c4fd (diff) |
rcu: Remove CONFIG_PREEMPT_RCU
Now that CONFIG_TREE_PREEMPT_RCU is in place, there is no
further need for CONFIG_PREEMPT_RCU. Remove it, along with
whatever subtle bugs it may (or may not) contain.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: laijs@cn.fujitsu.com
Cc: dipankar@in.ibm.com
Cc: akpm@linux-foundation.org
Cc: mathieu.desnoyers@polymtl.ca
Cc: josht@linux.vnet.ibm.com
Cc: dvhltc@us.ibm.com
Cc: niv@us.ibm.com
Cc: peterz@infradead.org
Cc: rostedt@goodmis.org
LKML-Reference: <125097461396-git-send-email->
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'include/linux/rcupreempt.h')
-rw-r--r-- | include/linux/rcupreempt.h | 140 |
1 files changed, 0 insertions, 140 deletions
diff --git a/include/linux/rcupreempt.h b/include/linux/rcupreempt.h deleted file mode 100644 index a42ab88e9210..000000000000 --- a/include/linux/rcupreempt.h +++ /dev/null | |||
@@ -1,140 +0,0 @@ | |||
1 | /* | ||
2 | * Read-Copy Update mechanism for mutual exclusion (RT implementation) | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License as published by | ||
6 | * the Free Software Foundation; either version 2 of the License, or | ||
7 | * (at your option) any later version. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program; if not, write to the Free Software | ||
16 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
17 | * | ||
18 | * Copyright (C) IBM Corporation, 2006 | ||
19 | * | ||
20 | * Author: Paul McKenney <paulmck@us.ibm.com> | ||
21 | * | ||
22 | * Based on the original work by Paul McKenney <paul.mckenney@us.ibm.com> | ||
23 | * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. | ||
24 | * Papers: | ||
25 | * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf | ||
26 | * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001) | ||
27 | * | ||
28 | * For detailed explanation of Read-Copy Update mechanism see - | ||
29 | * Documentation/RCU | ||
30 | * | ||
31 | */ | ||
32 | |||
33 | #ifndef __LINUX_RCUPREEMPT_H | ||
34 | #define __LINUX_RCUPREEMPT_H | ||
35 | |||
36 | #include <linux/cache.h> | ||
37 | #include <linux/spinlock.h> | ||
38 | #include <linux/threads.h> | ||
39 | #include <linux/smp.h> | ||
40 | #include <linux/cpumask.h> | ||
41 | #include <linux/seqlock.h> | ||
42 | |||
43 | extern void rcu_sched_qs(int cpu); | ||
44 | static inline void rcu_bh_qs(int cpu) { } | ||
45 | |||
46 | /* | ||
47 | * Someone might want to pass call_rcu_bh as a function pointer. | ||
48 | * So this needs to just be a rename and not a macro function. | ||
49 | * (no parentheses) | ||
50 | */ | ||
51 | #define call_rcu_bh call_rcu | ||
52 | |||
53 | /** | ||
54 | * call_rcu_sched - Queue RCU callback for invocation after sched grace period. | ||
55 | * @head: structure to be used for queueing the RCU updates. | ||
56 | * @func: actual update function to be invoked after the grace period | ||
57 | * | ||
58 | * The update function will be invoked some time after a full | ||
59 | * synchronize_sched()-style grace period elapses, in other words after | ||
60 | * all currently executing preempt-disabled sections of code (including | ||
61 | * hardirq handlers, NMI handlers, and local_irq_save() blocks) have | ||
62 | * completed. | ||
63 | */ | ||
64 | extern void call_rcu_sched(struct rcu_head *head, | ||
65 | void (*func)(struct rcu_head *head)); | ||
66 | |||
67 | extern void __rcu_read_lock(void); | ||
68 | extern void __rcu_read_unlock(void); | ||
69 | extern int rcu_needs_cpu(int cpu); | ||
70 | |||
71 | #define __rcu_read_lock_bh() { rcu_read_lock(); local_bh_disable(); } | ||
72 | #define __rcu_read_unlock_bh() { local_bh_enable(); rcu_read_unlock(); } | ||
73 | |||
74 | extern void __synchronize_sched(void); | ||
75 | |||
76 | static inline void synchronize_rcu_expedited(void) | ||
77 | { | ||
78 | synchronize_rcu(); /* Placeholder for new rcupreempt implementation. */ | ||
79 | } | ||
80 | |||
81 | static inline void synchronize_rcu_bh_expedited(void) | ||
82 | { | ||
83 | synchronize_rcu_bh(); /* Placeholder for new rcupreempt impl. */ | ||
84 | } | ||
85 | |||
86 | extern void __rcu_init(void); | ||
87 | extern void rcu_init_sched(void); | ||
88 | extern void rcu_check_callbacks(int cpu, int user); | ||
89 | extern void rcu_restart_cpu(int cpu); | ||
90 | extern long rcu_batches_completed(void); | ||
91 | |||
92 | /* | ||
93 | * Return the number of RCU batches processed thus far. Useful for debug | ||
94 | * and statistic. The _bh variant is identifcal to straight RCU | ||
95 | */ | ||
96 | static inline long rcu_batches_completed_bh(void) | ||
97 | { | ||
98 | return rcu_batches_completed(); | ||
99 | } | ||
100 | |||
101 | static inline void exit_rcu(void) | ||
102 | { | ||
103 | } | ||
104 | |||
105 | #ifdef CONFIG_RCU_TRACE | ||
106 | struct rcupreempt_trace; | ||
107 | extern long *rcupreempt_flipctr(int cpu); | ||
108 | extern long rcupreempt_data_completed(void); | ||
109 | extern int rcupreempt_flip_flag(int cpu); | ||
110 | extern int rcupreempt_mb_flag(int cpu); | ||
111 | extern char *rcupreempt_try_flip_state_name(void); | ||
112 | extern struct rcupreempt_trace *rcupreempt_trace_cpu(int cpu); | ||
113 | #endif | ||
114 | |||
115 | struct softirq_action; | ||
116 | |||
117 | #ifdef CONFIG_NO_HZ | ||
118 | extern void rcu_enter_nohz(void); | ||
119 | extern void rcu_exit_nohz(void); | ||
120 | #else | ||
121 | # define rcu_enter_nohz() do { } while (0) | ||
122 | # define rcu_exit_nohz() do { } while (0) | ||
123 | #endif | ||
124 | |||
125 | /* | ||
126 | * A context switch is a grace period for rcupreempt synchronize_rcu() | ||
127 | * only during early boot, before the scheduler has been initialized. | ||
128 | * So, how the heck do we get a context switch? Well, if the caller | ||
129 | * invokes synchronize_rcu(), they are willing to accept a context | ||
130 | * switch, so we simply pretend that one happened. | ||
131 | * | ||
132 | * After boot, there might be a blocked or preempted task in an RCU | ||
133 | * read-side critical section, so we cannot then take the fastpath. | ||
134 | */ | ||
135 | static inline int rcu_blocking_is_gp(void) | ||
136 | { | ||
137 | return num_online_cpus() == 1 && !rcu_scheduler_active; | ||
138 | } | ||
139 | |||
140 | #endif /* __LINUX_RCUPREEMPT_H */ | ||