diff options
-rw-r--r-- | include/linux/rcupdate.h | 12 | ||||
-rw-r--r-- | include/linux/rcutiny.h | 11 | ||||
-rw-r--r-- | include/linux/rcutree.h | 4 | ||||
-rw-r--r-- | kernel/rcupdate.c | 104 | ||||
-rw-r--r-- | kernel/rcutree.c | 80 | ||||
-rw-r--r-- | kernel/rcutree_plugin.h | 24 |
6 files changed, 118 insertions, 117 deletions
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 2f1bc42a3b82..24440f4bf476 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h | |||
@@ -52,11 +52,6 @@ struct rcu_head { | |||
52 | }; | 52 | }; |
53 | 53 | ||
54 | /* Exported common interfaces */ | 54 | /* Exported common interfaces */ |
55 | #ifdef CONFIG_TREE_PREEMPT_RCU | ||
56 | extern void synchronize_rcu(void); | ||
57 | #else /* #ifdef CONFIG_TREE_PREEMPT_RCU */ | ||
58 | #define synchronize_rcu synchronize_sched | ||
59 | #endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */ | ||
60 | extern void synchronize_rcu_bh(void); | 55 | extern void synchronize_rcu_bh(void); |
61 | extern void synchronize_sched(void); | 56 | extern void synchronize_sched(void); |
62 | extern void rcu_barrier(void); | 57 | extern void rcu_barrier(void); |
@@ -67,13 +62,6 @@ extern int sched_expedited_torture_stats(char *page); | |||
67 | 62 | ||
68 | /* Internal to kernel */ | 63 | /* Internal to kernel */ |
69 | extern void rcu_init(void); | 64 | extern void rcu_init(void); |
70 | extern void rcu_scheduler_starting(void); | ||
71 | #ifndef CONFIG_TINY_RCU | ||
72 | extern int rcu_needs_cpu(int cpu); | ||
73 | #else | ||
74 | static inline int rcu_needs_cpu(int cpu) { return 0; } | ||
75 | #endif | ||
76 | extern int rcu_scheduler_active; | ||
77 | 65 | ||
78 | #if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU) | 66 | #if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU) |
79 | #include <linux/rcutree.h> | 67 | #include <linux/rcutree.h> |
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h index a3b6272af2dd..c4ba9a78721e 100644 --- a/include/linux/rcutiny.h +++ b/include/linux/rcutiny.h | |||
@@ -39,6 +39,11 @@ void rcu_bh_qs(int cpu); | |||
39 | #define rcu_init_sched() do { } while (0) | 39 | #define rcu_init_sched() do { } while (0) |
40 | extern void rcu_check_callbacks(int cpu, int user); | 40 | extern void rcu_check_callbacks(int cpu, int user); |
41 | 41 | ||
42 | static inline int rcu_needs_cpu(int cpu) | ||
43 | { | ||
44 | return 0; | ||
45 | } | ||
46 | |||
42 | /* | 47 | /* |
43 | * Return the number of grace periods. | 48 | * Return the number of grace periods. |
44 | */ | 49 | */ |
@@ -57,6 +62,8 @@ static inline long rcu_batches_completed_bh(void) | |||
57 | 62 | ||
58 | extern int rcu_expedited_torture_stats(char *page); | 63 | extern int rcu_expedited_torture_stats(char *page); |
59 | 64 | ||
65 | #define synchronize_rcu synchronize_sched | ||
66 | |||
60 | static inline void synchronize_rcu_expedited(void) | 67 | static inline void synchronize_rcu_expedited(void) |
61 | { | 68 | { |
62 | synchronize_sched(); | 69 | synchronize_sched(); |
@@ -86,6 +93,10 @@ static inline void rcu_exit_nohz(void) | |||
86 | 93 | ||
87 | #endif /* #else #ifdef CONFIG_NO_HZ */ | 94 | #endif /* #else #ifdef CONFIG_NO_HZ */ |
88 | 95 | ||
96 | static inline void rcu_scheduler_starting(void) | ||
97 | { | ||
98 | } | ||
99 | |||
89 | static inline void exit_rcu(void) | 100 | static inline void exit_rcu(void) |
90 | { | 101 | { |
91 | } | 102 | } |
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h index 111a65257350..c93eee5911b0 100644 --- a/include/linux/rcutree.h +++ b/include/linux/rcutree.h | |||
@@ -35,12 +35,14 @@ struct notifier_block; | |||
35 | extern void rcu_sched_qs(int cpu); | 35 | extern void rcu_sched_qs(int cpu); |
36 | extern void rcu_bh_qs(int cpu); | 36 | extern void rcu_bh_qs(int cpu); |
37 | extern int rcu_needs_cpu(int cpu); | 37 | extern int rcu_needs_cpu(int cpu); |
38 | extern void rcu_scheduler_starting(void); | ||
38 | extern int rcu_expedited_torture_stats(char *page); | 39 | extern int rcu_expedited_torture_stats(char *page); |
39 | 40 | ||
40 | #ifdef CONFIG_TREE_PREEMPT_RCU | 41 | #ifdef CONFIG_TREE_PREEMPT_RCU |
41 | 42 | ||
42 | extern void __rcu_read_lock(void); | 43 | extern void __rcu_read_lock(void); |
43 | extern void __rcu_read_unlock(void); | 44 | extern void __rcu_read_unlock(void); |
45 | extern void synchronize_rcu(void); | ||
44 | extern void exit_rcu(void); | 46 | extern void exit_rcu(void); |
45 | 47 | ||
46 | #else /* #ifdef CONFIG_TREE_PREEMPT_RCU */ | 48 | #else /* #ifdef CONFIG_TREE_PREEMPT_RCU */ |
@@ -55,7 +57,7 @@ static inline void __rcu_read_unlock(void) | |||
55 | preempt_enable(); | 57 | preempt_enable(); |
56 | } | 58 | } |
57 | 59 | ||
58 | #define __synchronize_sched() synchronize_rcu() | 60 | #define synchronize_rcu synchronize_sched |
59 | 61 | ||
60 | static inline void exit_rcu(void) | 62 | static inline void exit_rcu(void) |
61 | { | 63 | { |
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c index eb6b534db318..9b7fd4723878 100644 --- a/kernel/rcupdate.c +++ b/kernel/rcupdate.c | |||
@@ -44,7 +44,6 @@ | |||
44 | #include <linux/cpu.h> | 44 | #include <linux/cpu.h> |
45 | #include <linux/mutex.h> | 45 | #include <linux/mutex.h> |
46 | #include <linux/module.h> | 46 | #include <linux/module.h> |
47 | #include <linux/kernel_stat.h> | ||
48 | 47 | ||
49 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 48 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
50 | static struct lock_class_key rcu_lock_key; | 49 | static struct lock_class_key rcu_lock_key; |
@@ -53,8 +52,6 @@ struct lockdep_map rcu_lock_map = | |||
53 | EXPORT_SYMBOL_GPL(rcu_lock_map); | 52 | EXPORT_SYMBOL_GPL(rcu_lock_map); |
54 | #endif | 53 | #endif |
55 | 54 | ||
56 | int rcu_scheduler_active __read_mostly; | ||
57 | |||
58 | /* | 55 | /* |
59 | * Awaken the corresponding synchronize_rcu() instance now that a | 56 | * Awaken the corresponding synchronize_rcu() instance now that a |
60 | * grace period has elapsed. | 57 | * grace period has elapsed. |
@@ -66,104 +63,3 @@ void wakeme_after_rcu(struct rcu_head *head) | |||
66 | rcu = container_of(head, struct rcu_synchronize, head); | 63 | rcu = container_of(head, struct rcu_synchronize, head); |
67 | complete(&rcu->completion); | 64 | complete(&rcu->completion); |
68 | } | 65 | } |
69 | |||
70 | #ifndef CONFIG_TINY_RCU | ||
71 | |||
72 | #ifdef CONFIG_TREE_PREEMPT_RCU | ||
73 | |||
74 | /** | ||
75 | * synchronize_rcu - wait until a grace period has elapsed. | ||
76 | * | ||
77 | * Control will return to the caller some time after a full grace | ||
78 | * period has elapsed, in other words after all currently executing RCU | ||
79 | * read-side critical sections have completed. RCU read-side critical | ||
80 | * sections are delimited by rcu_read_lock() and rcu_read_unlock(), | ||
81 | * and may be nested. | ||
82 | */ | ||
83 | void synchronize_rcu(void) | ||
84 | { | ||
85 | struct rcu_synchronize rcu; | ||
86 | |||
87 | if (!rcu_scheduler_active) | ||
88 | return; | ||
89 | |||
90 | init_completion(&rcu.completion); | ||
91 | /* Will wake me after RCU finished. */ | ||
92 | call_rcu(&rcu.head, wakeme_after_rcu); | ||
93 | /* Wait for it. */ | ||
94 | wait_for_completion(&rcu.completion); | ||
95 | } | ||
96 | EXPORT_SYMBOL_GPL(synchronize_rcu); | ||
97 | |||
98 | #endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */ | ||
99 | |||
100 | /** | ||
101 | * synchronize_sched - wait until an rcu-sched grace period has elapsed. | ||
102 | * | ||
103 | * Control will return to the caller some time after a full rcu-sched | ||
104 | * grace period has elapsed, in other words after all currently executing | ||
105 | * rcu-sched read-side critical sections have completed. These read-side | ||
106 | * critical sections are delimited by rcu_read_lock_sched() and | ||
107 | * rcu_read_unlock_sched(), and may be nested. Note that preempt_disable(), | ||
108 | * local_irq_disable(), and so on may be used in place of | ||
109 | * rcu_read_lock_sched(). | ||
110 | * | ||
111 | * This means that all preempt_disable code sequences, including NMI and | ||
112 | * hardware-interrupt handlers, in progress on entry will have completed | ||
113 | * before this primitive returns. However, this does not guarantee that | ||
114 | * softirq handlers will have completed, since in some kernels, these | ||
115 | * handlers can run in process context, and can block. | ||
116 | * | ||
117 | * This primitive provides the guarantees made by the (now removed) | ||
118 | * synchronize_kernel() API. In contrast, synchronize_rcu() only | ||
119 | * guarantees that rcu_read_lock() sections will have completed. | ||
120 | * In "classic RCU", these two guarantees happen to be one and | ||
121 | * the same, but can differ in realtime RCU implementations. | ||
122 | */ | ||
123 | void synchronize_sched(void) | ||
124 | { | ||
125 | struct rcu_synchronize rcu; | ||
126 | |||
127 | if (rcu_blocking_is_gp()) | ||
128 | return; | ||
129 | |||
130 | init_completion(&rcu.completion); | ||
131 | /* Will wake me after RCU finished. */ | ||
132 | call_rcu_sched(&rcu.head, wakeme_after_rcu); | ||
133 | /* Wait for it. */ | ||
134 | wait_for_completion(&rcu.completion); | ||
135 | } | ||
136 | EXPORT_SYMBOL_GPL(synchronize_sched); | ||
137 | |||
138 | /** | ||
139 | * synchronize_rcu_bh - wait until an rcu_bh grace period has elapsed. | ||
140 | * | ||
141 | * Control will return to the caller some time after a full rcu_bh grace | ||
142 | * period has elapsed, in other words after all currently executing rcu_bh | ||
143 | * read-side critical sections have completed. RCU read-side critical | ||
144 | * sections are delimited by rcu_read_lock_bh() and rcu_read_unlock_bh(), | ||
145 | * and may be nested. | ||
146 | */ | ||
147 | void synchronize_rcu_bh(void) | ||
148 | { | ||
149 | struct rcu_synchronize rcu; | ||
150 | |||
151 | if (rcu_blocking_is_gp()) | ||
152 | return; | ||
153 | |||
154 | init_completion(&rcu.completion); | ||
155 | /* Will wake me after RCU finished. */ | ||
156 | call_rcu_bh(&rcu.head, wakeme_after_rcu); | ||
157 | /* Wait for it. */ | ||
158 | wait_for_completion(&rcu.completion); | ||
159 | } | ||
160 | EXPORT_SYMBOL_GPL(synchronize_rcu_bh); | ||
161 | |||
162 | #endif /* #ifndef CONFIG_TINY_RCU */ | ||
163 | |||
164 | void rcu_scheduler_starting(void) | ||
165 | { | ||
166 | WARN_ON(num_online_cpus() != 1); | ||
167 | WARN_ON(nr_context_switches() > 0); | ||
168 | rcu_scheduler_active = 1; | ||
169 | } | ||
diff --git a/kernel/rcutree.c b/kernel/rcutree.c index e3d3bbddbcd5..4ca7e0292fd8 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c | |||
@@ -46,6 +46,7 @@ | |||
46 | #include <linux/cpu.h> | 46 | #include <linux/cpu.h> |
47 | #include <linux/mutex.h> | 47 | #include <linux/mutex.h> |
48 | #include <linux/time.h> | 48 | #include <linux/time.h> |
49 | #include <linux/kernel_stat.h> | ||
49 | 50 | ||
50 | #include "rcutree.h" | 51 | #include "rcutree.h" |
51 | 52 | ||
@@ -79,6 +80,8 @@ DEFINE_PER_CPU(struct rcu_data, rcu_sched_data); | |||
79 | struct rcu_state rcu_bh_state = RCU_STATE_INITIALIZER(rcu_bh_state); | 80 | struct rcu_state rcu_bh_state = RCU_STATE_INITIALIZER(rcu_bh_state); |
80 | DEFINE_PER_CPU(struct rcu_data, rcu_bh_data); | 81 | DEFINE_PER_CPU(struct rcu_data, rcu_bh_data); |
81 | 82 | ||
83 | static int rcu_scheduler_active __read_mostly; | ||
84 | |||
82 | 85 | ||
83 | /* | 86 | /* |
84 | * Return true if an RCU grace period is in progress. The ACCESS_ONCE()s | 87 | * Return true if an RCU grace period is in progress. The ACCESS_ONCE()s |
@@ -1396,6 +1399,68 @@ void call_rcu_bh(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) | |||
1396 | } | 1399 | } |
1397 | EXPORT_SYMBOL_GPL(call_rcu_bh); | 1400 | EXPORT_SYMBOL_GPL(call_rcu_bh); |
1398 | 1401 | ||
1402 | /** | ||
1403 | * synchronize_sched - wait until an rcu-sched grace period has elapsed. | ||
1404 | * | ||
1405 | * Control will return to the caller some time after a full rcu-sched | ||
1406 | * grace period has elapsed, in other words after all currently executing | ||
1407 | * rcu-sched read-side critical sections have completed. These read-side | ||
1408 | * critical sections are delimited by rcu_read_lock_sched() and | ||
1409 | * rcu_read_unlock_sched(), and may be nested. Note that preempt_disable(), | ||
1410 | * local_irq_disable(), and so on may be used in place of | ||
1411 | * rcu_read_lock_sched(). | ||
1412 | * | ||
1413 | * This means that all preempt_disable code sequences, including NMI and | ||
1414 | * hardware-interrupt handlers, in progress on entry will have completed | ||
1415 | * before this primitive returns. However, this does not guarantee that | ||
1416 | * softirq handlers will have completed, since in some kernels, these | ||
1417 | * handlers can run in process context, and can block. | ||
1418 | * | ||
1419 | * This primitive provides the guarantees made by the (now removed) | ||
1420 | * synchronize_kernel() API. In contrast, synchronize_rcu() only | ||
1421 | * guarantees that rcu_read_lock() sections will have completed. | ||
1422 | * In "classic RCU", these two guarantees happen to be one and | ||
1423 | * the same, but can differ in realtime RCU implementations. | ||
1424 | */ | ||
1425 | void synchronize_sched(void) | ||
1426 | { | ||
1427 | struct rcu_synchronize rcu; | ||
1428 | |||
1429 | if (rcu_blocking_is_gp()) | ||
1430 | return; | ||
1431 | |||
1432 | init_completion(&rcu.completion); | ||
1433 | /* Will wake me after RCU finished. */ | ||
1434 | call_rcu_sched(&rcu.head, wakeme_after_rcu); | ||
1435 | /* Wait for it. */ | ||
1436 | wait_for_completion(&rcu.completion); | ||
1437 | } | ||
1438 | EXPORT_SYMBOL_GPL(synchronize_sched); | ||
1439 | |||
1440 | /** | ||
1441 | * synchronize_rcu_bh - wait until an rcu_bh grace period has elapsed. | ||
1442 | * | ||
1443 | * Control will return to the caller some time after a full rcu_bh grace | ||
1444 | * period has elapsed, in other words after all currently executing rcu_bh | ||
1445 | * read-side critical sections have completed. RCU read-side critical | ||
1446 | * sections are delimited by rcu_read_lock_bh() and rcu_read_unlock_bh(), | ||
1447 | * and may be nested. | ||
1448 | */ | ||
1449 | void synchronize_rcu_bh(void) | ||
1450 | { | ||
1451 | struct rcu_synchronize rcu; | ||
1452 | |||
1453 | if (rcu_blocking_is_gp()) | ||
1454 | return; | ||
1455 | |||
1456 | init_completion(&rcu.completion); | ||
1457 | /* Will wake me after RCU finished. */ | ||
1458 | call_rcu_bh(&rcu.head, wakeme_after_rcu); | ||
1459 | /* Wait for it. */ | ||
1460 | wait_for_completion(&rcu.completion); | ||
1461 | } | ||
1462 | EXPORT_SYMBOL_GPL(synchronize_rcu_bh); | ||
1463 | |||
1399 | /* | 1464 | /* |
1400 | * Check to see if there is any immediate RCU-related work to be done | 1465 | * Check to see if there is any immediate RCU-related work to be done |
1401 | * by the current CPU, for the specified type of RCU, returning 1 if so. | 1466 | * by the current CPU, for the specified type of RCU, returning 1 if so. |
@@ -1480,6 +1545,21 @@ int rcu_needs_cpu(int cpu) | |||
1480 | rcu_preempt_needs_cpu(cpu); | 1545 | rcu_preempt_needs_cpu(cpu); |
1481 | } | 1546 | } |
1482 | 1547 | ||
1548 | /* | ||
1549 | * This function is invoked towards the end of the scheduler's initialization | ||
1550 | * process. Before this is called, the idle task might contain | ||
1551 | * RCU read-side critical sections (during which time, this idle | ||
1552 | * task is booting the system). After this function is called, the | ||
1553 | * idle tasks are prohibited from containing RCU read-side critical | ||
1554 | * sections. | ||
1555 | */ | ||
1556 | void rcu_scheduler_starting(void) | ||
1557 | { | ||
1558 | WARN_ON(num_online_cpus() != 1); | ||
1559 | WARN_ON(nr_context_switches() > 0); | ||
1560 | rcu_scheduler_active = 1; | ||
1561 | } | ||
1562 | |||
1483 | static DEFINE_PER_CPU(struct rcu_head, rcu_barrier_head) = {NULL}; | 1563 | static DEFINE_PER_CPU(struct rcu_head, rcu_barrier_head) = {NULL}; |
1484 | static atomic_t rcu_barrier_cpu_count; | 1564 | static atomic_t rcu_barrier_cpu_count; |
1485 | static DEFINE_MUTEX(rcu_barrier_mutex); | 1565 | static DEFINE_MUTEX(rcu_barrier_mutex); |
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h index 0bdb592eee66..1d295c789d3d 100644 --- a/kernel/rcutree_plugin.h +++ b/kernel/rcutree_plugin.h | |||
@@ -425,6 +425,30 @@ void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) | |||
425 | } | 425 | } |
426 | EXPORT_SYMBOL_GPL(call_rcu); | 426 | EXPORT_SYMBOL_GPL(call_rcu); |
427 | 427 | ||
428 | /** | ||
429 | * synchronize_rcu - wait until a grace period has elapsed. | ||
430 | * | ||
431 | * Control will return to the caller some time after a full grace | ||
432 | * period has elapsed, in other words after all currently executing RCU | ||
433 | * read-side critical sections have completed. RCU read-side critical | ||
434 | * sections are delimited by rcu_read_lock() and rcu_read_unlock(), | ||
435 | * and may be nested. | ||
436 | */ | ||
437 | void synchronize_rcu(void) | ||
438 | { | ||
439 | struct rcu_synchronize rcu; | ||
440 | |||
441 | if (!rcu_scheduler_active) | ||
442 | return; | ||
443 | |||
444 | init_completion(&rcu.completion); | ||
445 | /* Will wake me after RCU finished. */ | ||
446 | call_rcu(&rcu.head, wakeme_after_rcu); | ||
447 | /* Wait for it. */ | ||
448 | wait_for_completion(&rcu.completion); | ||
449 | } | ||
450 | EXPORT_SYMBOL_GPL(synchronize_rcu); | ||
451 | |||
428 | /* | 452 | /* |
429 | * Wait for an rcu-preempt grace period. We are supposed to expedite the | 453 | * Wait for an rcu-preempt grace period. We are supposed to expedite the |
430 | * grace period, but this is the crude slow compatability hack, so just | 454 | * grace period, but this is the crude slow compatability hack, so just |