aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorFrederic Weisbecker <fweisbec@gmail.com>2011-11-17 12:48:14 -0500
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2011-12-11 13:31:57 -0500
commit1268fbc746ea1cd279886a740dcbad4ba5232225 (patch)
treedc0ff36b4114992a3f67479e25132f5e99f36b9e
parentb58bdccaa8d908e0f71dae396468a0d3f7bb3125 (diff)
nohz: Remove tick_nohz_idle_enter_norcu() / tick_nohz_idle_exit_norcu()
Those two APIs were provided to optimize the calls of tick_nohz_idle_enter() and rcu_idle_enter() into a single irq disabled section. This way no interrupt happening in-between would needlessly process any RCU job. Now we are talking about an optimization for which benefits have yet to be measured. Let's start simple and completely decouple idle rcu and dyntick idle logics to simplify. Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Peter Zijlstra <peterz@infradead.org> Reviewed-by: Josh Triplett <josh@joshtriplett.org> Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
-rw-r--r--arch/arm/kernel/process.c6
-rw-r--r--arch/avr32/kernel/process.c6
-rw-r--r--arch/blackfin/kernel/process.c6
-rw-r--r--arch/microblaze/kernel/process.c6
-rw-r--r--arch/mips/kernel/process.c6
-rw-r--r--arch/openrisc/kernel/idle.c6
-rw-r--r--arch/powerpc/kernel/idle.c15
-rw-r--r--arch/powerpc/platforms/iseries/setup.c12
-rw-r--r--arch/s390/kernel/process.c6
-rw-r--r--arch/sh/kernel/idle.c6
-rw-r--r--arch/sparc/kernel/process_64.c6
-rw-r--r--arch/tile/kernel/process.c6
-rw-r--r--arch/um/kernel/process.c6
-rw-r--r--arch/unicore32/kernel/process.c6
-rw-r--r--arch/x86/kernel/process_32.c6
-rw-r--r--include/linux/tick.h47
-rw-r--r--kernel/time/tick-sched.c15
17 files changed, 76 insertions, 91 deletions
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index 47e34c091276..e8e8fe505df1 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -183,7 +183,8 @@ void cpu_idle(void)
183 183
184 /* endless idle loop with no priority at all */ 184 /* endless idle loop with no priority at all */
185 while (1) { 185 while (1) {
186 tick_nohz_idle_enter_norcu(); 186 tick_nohz_idle_enter();
187 rcu_idle_enter();
187 leds_event(led_idle_start); 188 leds_event(led_idle_start);
188 while (!need_resched()) { 189 while (!need_resched()) {
189#ifdef CONFIG_HOTPLUG_CPU 190#ifdef CONFIG_HOTPLUG_CPU
@@ -213,7 +214,8 @@ void cpu_idle(void)
213 } 214 }
214 } 215 }
215 leds_event(led_idle_end); 216 leds_event(led_idle_end);
216 tick_nohz_idle_exit_norcu(); 217 rcu_idle_exit();
218 tick_nohz_idle_exit();
217 preempt_enable_no_resched(); 219 preempt_enable_no_resched();
218 schedule(); 220 schedule();
219 preempt_disable(); 221 preempt_disable();
diff --git a/arch/avr32/kernel/process.c b/arch/avr32/kernel/process.c
index 34c8c703bb16..ea3395750324 100644
--- a/arch/avr32/kernel/process.c
+++ b/arch/avr32/kernel/process.c
@@ -34,10 +34,12 @@ void cpu_idle(void)
34{ 34{
35 /* endless idle loop with no priority at all */ 35 /* endless idle loop with no priority at all */
36 while (1) { 36 while (1) {
37 tick_nohz_idle_enter_norcu(); 37 tick_nohz_idle_enter();
38 rcu_idle_enter();
38 while (!need_resched()) 39 while (!need_resched())
39 cpu_idle_sleep(); 40 cpu_idle_sleep();
40 tick_nohz_idle_exit_norcu(); 41 rcu_idle_exit();
42 tick_nohz_idle_exit();
41 preempt_enable_no_resched(); 43 preempt_enable_no_resched();
42 schedule(); 44 schedule();
43 preempt_disable(); 45 preempt_disable();
diff --git a/arch/blackfin/kernel/process.c b/arch/blackfin/kernel/process.c
index 57e07498a0e7..8dd0416673cb 100644
--- a/arch/blackfin/kernel/process.c
+++ b/arch/blackfin/kernel/process.c
@@ -88,10 +88,12 @@ void cpu_idle(void)
88#endif 88#endif
89 if (!idle) 89 if (!idle)
90 idle = default_idle; 90 idle = default_idle;
91 tick_nohz_idle_enter_norcu(); 91 tick_nohz_idle_enter();
92 rcu_idle_enter();
92 while (!need_resched()) 93 while (!need_resched())
93 idle(); 94 idle();
94 tick_nohz_idle_exit_norcu(); 95 rcu_idle_exit();
96 tick_nohz_idle_exit();
95 preempt_enable_no_resched(); 97 preempt_enable_no_resched();
96 schedule(); 98 schedule();
97 preempt_disable(); 99 preempt_disable();
diff --git a/arch/microblaze/kernel/process.c b/arch/microblaze/kernel/process.c
index 13d59f34b94e..7dcb5bfffb75 100644
--- a/arch/microblaze/kernel/process.c
+++ b/arch/microblaze/kernel/process.c
@@ -103,10 +103,12 @@ void cpu_idle(void)
103 if (!idle) 103 if (!idle)
104 idle = default_idle; 104 idle = default_idle;
105 105
106 tick_nohz_idle_enter_norcu(); 106 tick_nohz_idle_enter();
107 rcu_idle_enter();
107 while (!need_resched()) 108 while (!need_resched())
108 idle(); 109 idle();
109 tick_nohz_idle_exit_norcu(); 110 rcu_idle_exit();
111 tick_nohz_idle_exit();
110 112
111 preempt_enable_no_resched(); 113 preempt_enable_no_resched();
112 schedule(); 114 schedule();
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index 17fb3a270160..7955409051c4 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -56,7 +56,8 @@ void __noreturn cpu_idle(void)
56 56
57 /* endless idle loop with no priority at all */ 57 /* endless idle loop with no priority at all */
58 while (1) { 58 while (1) {
59 tick_nohz_idle_enter_norcu(); 59 tick_nohz_idle_enter();
60 rcu_idle_enter();
60 while (!need_resched() && cpu_online(cpu)) { 61 while (!need_resched() && cpu_online(cpu)) {
61#ifdef CONFIG_MIPS_MT_SMTC 62#ifdef CONFIG_MIPS_MT_SMTC
62 extern void smtc_idle_loop_hook(void); 63 extern void smtc_idle_loop_hook(void);
@@ -77,7 +78,8 @@ void __noreturn cpu_idle(void)
77 system_state == SYSTEM_BOOTING)) 78 system_state == SYSTEM_BOOTING))
78 play_dead(); 79 play_dead();
79#endif 80#endif
80 tick_nohz_idle_exit_norcu(); 81 rcu_idle_exit();
82 tick_nohz_idle_exit();
81 preempt_enable_no_resched(); 83 preempt_enable_no_resched();
82 schedule(); 84 schedule();
83 preempt_disable(); 85 preempt_disable();
diff --git a/arch/openrisc/kernel/idle.c b/arch/openrisc/kernel/idle.c
index 2e82cd0fa5e1..e5fc78877830 100644
--- a/arch/openrisc/kernel/idle.c
+++ b/arch/openrisc/kernel/idle.c
@@ -51,7 +51,8 @@ void cpu_idle(void)
51 51
52 /* endless idle loop with no priority at all */ 52 /* endless idle loop with no priority at all */
53 while (1) { 53 while (1) {
54 tick_nohz_idle_enter_norcu(); 54 tick_nohz_idle_enter();
55 rcu_idle_enter();
55 56
56 while (!need_resched()) { 57 while (!need_resched()) {
57 check_pgt_cache(); 58 check_pgt_cache();
@@ -69,7 +70,8 @@ void cpu_idle(void)
69 set_thread_flag(TIF_POLLING_NRFLAG); 70 set_thread_flag(TIF_POLLING_NRFLAG);
70 } 71 }
71 72
72 tick_nohz_idle_exit_norcu(); 73 rcu_idle_exit();
74 tick_nohz_idle_exit();
73 preempt_enable_no_resched(); 75 preempt_enable_no_resched();
74 schedule(); 76 schedule();
75 preempt_disable(); 77 preempt_disable();
diff --git a/arch/powerpc/kernel/idle.c b/arch/powerpc/kernel/idle.c
index 3cd73d1fc427..9c3cd490b1bd 100644
--- a/arch/powerpc/kernel/idle.c
+++ b/arch/powerpc/kernel/idle.c
@@ -62,10 +62,10 @@ void cpu_idle(void)
62 62
63 set_thread_flag(TIF_POLLING_NRFLAG); 63 set_thread_flag(TIF_POLLING_NRFLAG);
64 while (1) { 64 while (1) {
65 if (idle_uses_rcu) 65 tick_nohz_idle_enter();
66 tick_nohz_idle_enter(); 66 if (!idle_uses_rcu)
67 else 67 rcu_idle_enter();
68 tick_nohz_idle_enter_norcu(); 68
69 while (!need_resched() && !cpu_should_die()) { 69 while (!need_resched() && !cpu_should_die()) {
70 ppc64_runlatch_off(); 70 ppc64_runlatch_off();
71 71
@@ -102,10 +102,9 @@ void cpu_idle(void)
102 102
103 HMT_medium(); 103 HMT_medium();
104 ppc64_runlatch_on(); 104 ppc64_runlatch_on();
105 if (idle_uses_rcu) 105 if (!idle_uses_rcu)
106 tick_nohz_idle_exit(); 106 rcu_idle_exit();
107 else 107 tick_nohz_idle_exit();
108 tick_nohz_idle_exit_norcu();
109 preempt_enable_no_resched(); 108 preempt_enable_no_resched();
110 if (cpu_should_die()) 109 if (cpu_should_die())
111 cpu_die(); 110 cpu_die();
diff --git a/arch/powerpc/platforms/iseries/setup.c b/arch/powerpc/platforms/iseries/setup.c
index d69d3d185e89..8fc62586a973 100644
--- a/arch/powerpc/platforms/iseries/setup.c
+++ b/arch/powerpc/platforms/iseries/setup.c
@@ -563,7 +563,8 @@ static void yield_shared_processor(void)
563static void iseries_shared_idle(void) 563static void iseries_shared_idle(void)
564{ 564{
565 while (1) { 565 while (1) {
566 tick_nohz_idle_enter_norcu(); 566 tick_nohz_idle_enter();
567 rcu_idle_enter();
567 while (!need_resched() && !hvlpevent_is_pending()) { 568 while (!need_resched() && !hvlpevent_is_pending()) {
568 local_irq_disable(); 569 local_irq_disable();
569 ppc64_runlatch_off(); 570 ppc64_runlatch_off();
@@ -577,7 +578,8 @@ static void iseries_shared_idle(void)
577 } 578 }
578 579
579 ppc64_runlatch_on(); 580 ppc64_runlatch_on();
580 tick_nohz_idle_exit_norcu(); 581 rcu_idle_exit();
582 tick_nohz_idle_exit();
581 583
582 if (hvlpevent_is_pending()) 584 if (hvlpevent_is_pending())
583 process_iSeries_events(); 585 process_iSeries_events();
@@ -593,7 +595,8 @@ static void iseries_dedicated_idle(void)
593 set_thread_flag(TIF_POLLING_NRFLAG); 595 set_thread_flag(TIF_POLLING_NRFLAG);
594 596
595 while (1) { 597 while (1) {
596 tick_nohz_idle_enter_norcu(); 598 tick_nohz_idle_enter();
599 rcu_idle_enter();
597 if (!need_resched()) { 600 if (!need_resched()) {
598 while (!need_resched()) { 601 while (!need_resched()) {
599 ppc64_runlatch_off(); 602 ppc64_runlatch_off();
@@ -610,7 +613,8 @@ static void iseries_dedicated_idle(void)
610 } 613 }
611 614
612 ppc64_runlatch_on(); 615 ppc64_runlatch_on();
613 tick_nohz_idle_exit_norcu(); 616 rcu_idle_exit();
617 tick_nohz_idle_exit();
614 preempt_enable_no_resched(); 618 preempt_enable_no_resched();
615 schedule(); 619 schedule();
616 preempt_disable(); 620 preempt_disable();
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index 6fa987367ae6..3201ae447990 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -91,10 +91,12 @@ static void default_idle(void)
91void cpu_idle(void) 91void cpu_idle(void)
92{ 92{
93 for (;;) { 93 for (;;) {
94 tick_nohz_idle_enter_norcu(); 94 tick_nohz_idle_enter();
95 rcu_idle_enter();
95 while (!need_resched()) 96 while (!need_resched())
96 default_idle(); 97 default_idle();
97 tick_nohz_idle_exit_norcu(); 98 rcu_idle_exit();
99 tick_nohz_idle_exit();
98 preempt_enable_no_resched(); 100 preempt_enable_no_resched();
99 schedule(); 101 schedule();
100 preempt_disable(); 102 preempt_disable();
diff --git a/arch/sh/kernel/idle.c b/arch/sh/kernel/idle.c
index ad58e7535a7c..406508d4ce74 100644
--- a/arch/sh/kernel/idle.c
+++ b/arch/sh/kernel/idle.c
@@ -89,7 +89,8 @@ void cpu_idle(void)
89 89
90 /* endless idle loop with no priority at all */ 90 /* endless idle loop with no priority at all */
91 while (1) { 91 while (1) {
92 tick_nohz_idle_enter_norcu(); 92 tick_nohz_idle_enter();
93 rcu_idle_enter();
93 94
94 while (!need_resched()) { 95 while (!need_resched()) {
95 check_pgt_cache(); 96 check_pgt_cache();
@@ -111,7 +112,8 @@ void cpu_idle(void)
111 start_critical_timings(); 112 start_critical_timings();
112 } 113 }
113 114
114 tick_nohz_idle_exit_norcu(); 115 rcu_idle_exit();
116 tick_nohz_idle_exit();
115 preempt_enable_no_resched(); 117 preempt_enable_no_resched();
116 schedule(); 118 schedule();
117 preempt_disable(); 119 preempt_disable();
diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
index 4a0e7d79cb92..39d8b05201a2 100644
--- a/arch/sparc/kernel/process_64.c
+++ b/arch/sparc/kernel/process_64.c
@@ -95,12 +95,14 @@ void cpu_idle(void)
95 set_thread_flag(TIF_POLLING_NRFLAG); 95 set_thread_flag(TIF_POLLING_NRFLAG);
96 96
97 while(1) { 97 while(1) {
98 tick_nohz_idle_enter_norcu(); 98 tick_nohz_idle_enter();
99 rcu_idle_enter();
99 100
100 while (!need_resched() && !cpu_is_offline(cpu)) 101 while (!need_resched() && !cpu_is_offline(cpu))
101 sparc64_yield(cpu); 102 sparc64_yield(cpu);
102 103
103 tick_nohz_idle_exit_norcu(); 104 rcu_idle_exit();
105 tick_nohz_idle_exit();
104 106
105 preempt_enable_no_resched(); 107 preempt_enable_no_resched();
106 108
diff --git a/arch/tile/kernel/process.c b/arch/tile/kernel/process.c
index 53ac89595ab1..4c1ac6e5347a 100644
--- a/arch/tile/kernel/process.c
+++ b/arch/tile/kernel/process.c
@@ -85,7 +85,8 @@ void cpu_idle(void)
85 85
86 /* endless idle loop with no priority at all */ 86 /* endless idle loop with no priority at all */
87 while (1) { 87 while (1) {
88 tick_nohz_idle_enter_norcu(); 88 tick_nohz_idle_enter();
89 rcu_idle_enter();
89 while (!need_resched()) { 90 while (!need_resched()) {
90 if (cpu_is_offline(cpu)) 91 if (cpu_is_offline(cpu))
91 BUG(); /* no HOTPLUG_CPU */ 92 BUG(); /* no HOTPLUG_CPU */
@@ -105,7 +106,8 @@ void cpu_idle(void)
105 local_irq_enable(); 106 local_irq_enable();
106 current_thread_info()->status |= TS_POLLING; 107 current_thread_info()->status |= TS_POLLING;
107 } 108 }
108 tick_nohz_idle_exit_norcu(); 109 rcu_idle_exit();
110 tick_nohz_idle_exit();
109 preempt_enable_no_resched(); 111 preempt_enable_no_resched();
110 schedule(); 112 schedule();
111 preempt_disable(); 113 preempt_disable();
diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
index 55d2cf455f63..69f24905abdc 100644
--- a/arch/um/kernel/process.c
+++ b/arch/um/kernel/process.c
@@ -246,10 +246,12 @@ void default_idle(void)
246 if (need_resched()) 246 if (need_resched())
247 schedule(); 247 schedule();
248 248
249 tick_nohz_idle_enter_norcu(); 249 tick_nohz_idle_enter();
250 rcu_idle_enter();
250 nsecs = disable_timer(); 251 nsecs = disable_timer();
251 idle_sleep(nsecs); 252 idle_sleep(nsecs);
252 tick_nohz_idle_exit_norcu(); 253 rcu_idle_exit();
254 tick_nohz_idle_exit();
253 } 255 }
254} 256}
255 257
diff --git a/arch/unicore32/kernel/process.c b/arch/unicore32/kernel/process.c
index 095ff5a57928..52edc2b62873 100644
--- a/arch/unicore32/kernel/process.c
+++ b/arch/unicore32/kernel/process.c
@@ -55,7 +55,8 @@ void cpu_idle(void)
55{ 55{
56 /* endless idle loop with no priority at all */ 56 /* endless idle loop with no priority at all */
57 while (1) { 57 while (1) {
58 tick_nohz_idle_enter_norcu(); 58 tick_nohz_idle_enter();
59 rcu_idle_enter();
59 while (!need_resched()) { 60 while (!need_resched()) {
60 local_irq_disable(); 61 local_irq_disable();
61 stop_critical_timings(); 62 stop_critical_timings();
@@ -63,7 +64,8 @@ void cpu_idle(void)
63 local_irq_enable(); 64 local_irq_enable();
64 start_critical_timings(); 65 start_critical_timings();
65 } 66 }
66 tick_nohz_idle_exit_norcu(); 67 rcu_idle_exit();
68 tick_nohz_idle_exit();
67 preempt_enable_no_resched(); 69 preempt_enable_no_resched();
68 schedule(); 70 schedule();
69 preempt_disable(); 71 preempt_disable();
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
index f94da3920c36..485204f58cda 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -99,7 +99,8 @@ void cpu_idle(void)
99 99
100 /* endless idle loop with no priority at all */ 100 /* endless idle loop with no priority at all */
101 while (1) { 101 while (1) {
102 tick_nohz_idle_enter_norcu(); 102 tick_nohz_idle_enter();
103 rcu_idle_enter();
103 while (!need_resched()) { 104 while (!need_resched()) {
104 105
105 check_pgt_cache(); 106 check_pgt_cache();
@@ -116,7 +117,8 @@ void cpu_idle(void)
116 pm_idle(); 117 pm_idle();
117 start_critical_timings(); 118 start_critical_timings();
118 } 119 }
119 tick_nohz_idle_exit_norcu(); 120 rcu_idle_exit();
121 tick_nohz_idle_exit();
120 preempt_enable_no_resched(); 122 preempt_enable_no_resched();
121 schedule(); 123 schedule();
122 preempt_disable(); 124 preempt_disable();
diff --git a/include/linux/tick.h b/include/linux/tick.h
index 327434a05757..ab8be90b5cc9 100644
--- a/include/linux/tick.h
+++ b/include/linux/tick.h
@@ -122,45 +122,8 @@ static inline int tick_oneshot_mode_active(void) { return 0; }
122#endif /* !CONFIG_GENERIC_CLOCKEVENTS */ 122#endif /* !CONFIG_GENERIC_CLOCKEVENTS */
123 123
124# ifdef CONFIG_NO_HZ 124# ifdef CONFIG_NO_HZ
125extern void __tick_nohz_idle_enter(void); 125extern void tick_nohz_idle_enter(void);
126static inline void tick_nohz_idle_enter(void)
127{
128 local_irq_disable();
129 __tick_nohz_idle_enter();
130 local_irq_enable();
131}
132extern void tick_nohz_idle_exit(void); 126extern void tick_nohz_idle_exit(void);
133
134/*
135 * Call this pair of function if the arch doesn't make any use
136 * of RCU in-between. You won't need to call rcu_idle_enter() and
137 * rcu_idle_exit().
138 * Otherwise you need to call tick_nohz_idle_enter() and tick_nohz_idle_exit()
139 * and explicitly tell RCU about the window around the place the CPU enters low
140 * power mode where no RCU use is made. This is done by calling rcu_idle_enter()
141 * after the last use of RCU before the CPU is put to sleep and by calling
142 * rcu_idle_exit() before the first use of RCU after the CPU woke up.
143 */
144static inline void tick_nohz_idle_enter_norcu(void)
145{
146 /*
147 * Also call rcu_idle_enter() in the irq disabled section even
148 * if it disables irq itself.
149 * Just an optimization that prevents from an interrupt happening
150 * between it and __tick_nohz_idle_enter() to lose time to help
151 * completing a grace period while we could be in extended grace
152 * period already.
153 */
154 local_irq_disable();
155 __tick_nohz_idle_enter();
156 rcu_idle_enter();
157 local_irq_enable();
158}
159static inline void tick_nohz_idle_exit_norcu(void)
160{
161 rcu_idle_exit();
162 tick_nohz_idle_exit();
163}
164extern void tick_nohz_irq_exit(void); 127extern void tick_nohz_irq_exit(void);
165extern ktime_t tick_nohz_get_sleep_length(void); 128extern ktime_t tick_nohz_get_sleep_length(void);
166extern u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time); 129extern u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time);
@@ -168,14 +131,6 @@ extern u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time);
168# else 131# else
169static inline void tick_nohz_idle_enter(void) { } 132static inline void tick_nohz_idle_enter(void) { }
170static inline void tick_nohz_idle_exit(void) { } 133static inline void tick_nohz_idle_exit(void) { }
171static inline void tick_nohz_idle_enter_norcu(void)
172{
173 rcu_idle_enter();
174}
175static inline void tick_nohz_idle_exit_norcu(void)
176{
177 rcu_idle_exit();
178}
179 134
180static inline ktime_t tick_nohz_get_sleep_length(void) 135static inline ktime_t tick_nohz_get_sleep_length(void)
181{ 136{
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index c76aefe764b0..0ec8b832ab6b 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -454,21 +454,20 @@ out:
454 * When the next event is more than a tick into the future, stop the idle tick 454 * When the next event is more than a tick into the future, stop the idle tick
455 * Called when we start the idle loop. 455 * Called when we start the idle loop.
456 * 456 *
457 * If no use of RCU is made in the idle loop between 457 * The arch is responsible of calling:
458 * tick_nohz_idle_enter() and tick_nohz_idle_exit() calls, then
459 * tick_nohz_idle_enter_norcu() should be called instead and the arch
460 * doesn't need to call rcu_idle_enter() and rcu_idle_exit() explicitly.
461 *
462 * Otherwise the arch is responsible of calling:
463 * 458 *
464 * - rcu_idle_enter() after its last use of RCU before the CPU is put 459 * - rcu_idle_enter() after its last use of RCU before the CPU is put
465 * to sleep. 460 * to sleep.
466 * - rcu_idle_exit() before the first use of RCU after the CPU is woken up. 461 * - rcu_idle_exit() before the first use of RCU after the CPU is woken up.
467 */ 462 */
468void __tick_nohz_idle_enter(void) 463void tick_nohz_idle_enter(void)
469{ 464{
470 struct tick_sched *ts; 465 struct tick_sched *ts;
471 466
467 WARN_ON_ONCE(irqs_disabled());
468
469 local_irq_disable();
470
472 ts = &__get_cpu_var(tick_cpu_sched); 471 ts = &__get_cpu_var(tick_cpu_sched);
473 /* 472 /*
474 * set ts->inidle unconditionally. even if the system did not 473 * set ts->inidle unconditionally. even if the system did not
@@ -477,6 +476,8 @@ void __tick_nohz_idle_enter(void)
477 */ 476 */
478 ts->inidle = 1; 477 ts->inidle = 1;
479 tick_nohz_stop_sched_tick(ts); 478 tick_nohz_stop_sched_tick(ts);
479
480 local_irq_enable();
480} 481}
481 482
482/** 483/**