aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/arm/kernel/process.c4
-rw-r--r--arch/avr32/kernel/process.c4
-rw-r--r--arch/blackfin/kernel/process.c4
-rw-r--r--arch/microblaze/kernel/process.c4
-rw-r--r--arch/mips/kernel/process.c4
-rw-r--r--arch/openrisc/kernel/idle.c4
-rw-r--r--arch/powerpc/kernel/idle.c4
-rw-r--r--arch/powerpc/platforms/iseries/setup.c8
-rw-r--r--arch/s390/kernel/process.c4
-rw-r--r--arch/sh/kernel/idle.c4
-rw-r--r--arch/sparc/kernel/process_64.c4
-rw-r--r--arch/tile/kernel/process.c4
-rw-r--r--arch/um/kernel/process.c4
-rw-r--r--arch/unicore32/kernel/process.c4
-rw-r--r--arch/x86/kernel/process_32.c4
-rw-r--r--arch/x86/kernel/process_64.c4
-rw-r--r--include/linux/tick.h46
-rw-r--r--kernel/time/tick-sched.c25
18 files changed, 90 insertions, 49 deletions
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index 3f1f8daf703c..47e34c091276 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -183,7 +183,7 @@ void cpu_idle(void)
183 183
184 /* endless idle loop with no priority at all */ 184 /* endless idle loop with no priority at all */
185 while (1) { 185 while (1) {
186 tick_nohz_idle_enter(); 186 tick_nohz_idle_enter_norcu();
187 leds_event(led_idle_start); 187 leds_event(led_idle_start);
188 while (!need_resched()) { 188 while (!need_resched()) {
189#ifdef CONFIG_HOTPLUG_CPU 189#ifdef CONFIG_HOTPLUG_CPU
@@ -213,7 +213,7 @@ void cpu_idle(void)
213 } 213 }
214 } 214 }
215 leds_event(led_idle_end); 215 leds_event(led_idle_end);
216 tick_nohz_idle_exit(); 216 tick_nohz_idle_exit_norcu();
217 preempt_enable_no_resched(); 217 preempt_enable_no_resched();
218 schedule(); 218 schedule();
219 preempt_disable(); 219 preempt_disable();
diff --git a/arch/avr32/kernel/process.c b/arch/avr32/kernel/process.c
index 6ee7952248db..34c8c703bb16 100644
--- a/arch/avr32/kernel/process.c
+++ b/arch/avr32/kernel/process.c
@@ -34,10 +34,10 @@ void cpu_idle(void)
34{ 34{
35 /* endless idle loop with no priority at all */ 35 /* endless idle loop with no priority at all */
36 while (1) { 36 while (1) {
37 tick_nohz_idle_enter(); 37 tick_nohz_idle_enter_norcu();
38 while (!need_resched()) 38 while (!need_resched())
39 cpu_idle_sleep(); 39 cpu_idle_sleep();
40 tick_nohz_idle_exit(); 40 tick_nohz_idle_exit_norcu();
41 preempt_enable_no_resched(); 41 preempt_enable_no_resched();
42 schedule(); 42 schedule();
43 preempt_disable(); 43 preempt_disable();
diff --git a/arch/blackfin/kernel/process.c b/arch/blackfin/kernel/process.c
index 7b141b5c9e8d..57e07498a0e7 100644
--- a/arch/blackfin/kernel/process.c
+++ b/arch/blackfin/kernel/process.c
@@ -88,10 +88,10 @@ void cpu_idle(void)
88#endif 88#endif
89 if (!idle) 89 if (!idle)
90 idle = default_idle; 90 idle = default_idle;
91 tick_nohz_idle_enter(); 91 tick_nohz_idle_enter_norcu();
92 while (!need_resched()) 92 while (!need_resched())
93 idle(); 93 idle();
94 tick_nohz_idle_exit(); 94 tick_nohz_idle_exit_norcu();
95 preempt_enable_no_resched(); 95 preempt_enable_no_resched();
96 schedule(); 96 schedule();
97 preempt_disable(); 97 preempt_disable();
diff --git a/arch/microblaze/kernel/process.c b/arch/microblaze/kernel/process.c
index 5407f09b4be4..13d59f34b94e 100644
--- a/arch/microblaze/kernel/process.c
+++ b/arch/microblaze/kernel/process.c
@@ -103,10 +103,10 @@ void cpu_idle(void)
103 if (!idle) 103 if (!idle)
104 idle = default_idle; 104 idle = default_idle;
105 105
106 tick_nohz_idle_enter(); 106 tick_nohz_idle_enter_norcu();
107 while (!need_resched()) 107 while (!need_resched())
108 idle(); 108 idle();
109 tick_nohz_idle_exit(); 109 tick_nohz_idle_exit_norcu();
110 110
111 preempt_enable_no_resched(); 111 preempt_enable_no_resched();
112 schedule(); 112 schedule();
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index c11e5ca2a434..17fb3a270160 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -56,7 +56,7 @@ void __noreturn cpu_idle(void)
56 56
57 /* endless idle loop with no priority at all */ 57 /* endless idle loop with no priority at all */
58 while (1) { 58 while (1) {
59 tick_nohz_idle_enter(); 59 tick_nohz_idle_enter_norcu();
60 while (!need_resched() && cpu_online(cpu)) { 60 while (!need_resched() && cpu_online(cpu)) {
61#ifdef CONFIG_MIPS_MT_SMTC 61#ifdef CONFIG_MIPS_MT_SMTC
62 extern void smtc_idle_loop_hook(void); 62 extern void smtc_idle_loop_hook(void);
@@ -77,7 +77,7 @@ void __noreturn cpu_idle(void)
77 system_state == SYSTEM_BOOTING)) 77 system_state == SYSTEM_BOOTING))
78 play_dead(); 78 play_dead();
79#endif 79#endif
80 tick_nohz_idle_exit(); 80 tick_nohz_idle_exit_norcu();
81 preempt_enable_no_resched(); 81 preempt_enable_no_resched();
82 schedule(); 82 schedule();
83 preempt_disable(); 83 preempt_disable();
diff --git a/arch/openrisc/kernel/idle.c b/arch/openrisc/kernel/idle.c
index fb6a9bf40006..2e82cd0fa5e1 100644
--- a/arch/openrisc/kernel/idle.c
+++ b/arch/openrisc/kernel/idle.c
@@ -51,7 +51,7 @@ void cpu_idle(void)
51 51
52 /* endless idle loop with no priority at all */ 52 /* endless idle loop with no priority at all */
53 while (1) { 53 while (1) {
54 tick_nohz_idle_enter(); 54 tick_nohz_idle_enter_norcu();
55 55
56 while (!need_resched()) { 56 while (!need_resched()) {
57 check_pgt_cache(); 57 check_pgt_cache();
@@ -69,7 +69,7 @@ void cpu_idle(void)
69 set_thread_flag(TIF_POLLING_NRFLAG); 69 set_thread_flag(TIF_POLLING_NRFLAG);
70 } 70 }
71 71
72 tick_nohz_idle_exit(); 72 tick_nohz_idle_exit_norcu();
73 preempt_enable_no_resched(); 73 preempt_enable_no_resched();
74 schedule(); 74 schedule();
75 preempt_disable(); 75 preempt_disable();
diff --git a/arch/powerpc/kernel/idle.c b/arch/powerpc/kernel/idle.c
index 878572f70ac5..2e782a36d8f2 100644
--- a/arch/powerpc/kernel/idle.c
+++ b/arch/powerpc/kernel/idle.c
@@ -56,7 +56,7 @@ void cpu_idle(void)
56 56
57 set_thread_flag(TIF_POLLING_NRFLAG); 57 set_thread_flag(TIF_POLLING_NRFLAG);
58 while (1) { 58 while (1) {
59 tick_nohz_idle_enter(); 59 tick_nohz_idle_enter_norcu();
60 while (!need_resched() && !cpu_should_die()) { 60 while (!need_resched() && !cpu_should_die()) {
61 ppc64_runlatch_off(); 61 ppc64_runlatch_off();
62 62
@@ -93,7 +93,7 @@ void cpu_idle(void)
93 93
94 HMT_medium(); 94 HMT_medium();
95 ppc64_runlatch_on(); 95 ppc64_runlatch_on();
96 tick_nohz_idle_exit(); 96 tick_nohz_idle_exit_norcu();
97 preempt_enable_no_resched(); 97 preempt_enable_no_resched();
98 if (cpu_should_die()) 98 if (cpu_should_die())
99 cpu_die(); 99 cpu_die();
diff --git a/arch/powerpc/platforms/iseries/setup.c b/arch/powerpc/platforms/iseries/setup.c
index e83dfaf89f69..d69d3d185e89 100644
--- a/arch/powerpc/platforms/iseries/setup.c
+++ b/arch/powerpc/platforms/iseries/setup.c
@@ -563,7 +563,7 @@ static void yield_shared_processor(void)
563static void iseries_shared_idle(void) 563static void iseries_shared_idle(void)
564{ 564{
565 while (1) { 565 while (1) {
566 tick_nohz_idle_enter(); 566 tick_nohz_idle_enter_norcu();
567 while (!need_resched() && !hvlpevent_is_pending()) { 567 while (!need_resched() && !hvlpevent_is_pending()) {
568 local_irq_disable(); 568 local_irq_disable();
569 ppc64_runlatch_off(); 569 ppc64_runlatch_off();
@@ -577,7 +577,7 @@ static void iseries_shared_idle(void)
577 } 577 }
578 578
579 ppc64_runlatch_on(); 579 ppc64_runlatch_on();
580 tick_nohz_idle_exit(); 580 tick_nohz_idle_exit_norcu();
581 581
582 if (hvlpevent_is_pending()) 582 if (hvlpevent_is_pending())
583 process_iSeries_events(); 583 process_iSeries_events();
@@ -593,7 +593,7 @@ static void iseries_dedicated_idle(void)
593 set_thread_flag(TIF_POLLING_NRFLAG); 593 set_thread_flag(TIF_POLLING_NRFLAG);
594 594
595 while (1) { 595 while (1) {
596 tick_nohz_idle_enter(); 596 tick_nohz_idle_enter_norcu();
597 if (!need_resched()) { 597 if (!need_resched()) {
598 while (!need_resched()) { 598 while (!need_resched()) {
599 ppc64_runlatch_off(); 599 ppc64_runlatch_off();
@@ -610,7 +610,7 @@ static void iseries_dedicated_idle(void)
610 } 610 }
611 611
612 ppc64_runlatch_on(); 612 ppc64_runlatch_on();
613 tick_nohz_idle_exit(); 613 tick_nohz_idle_exit_norcu();
614 preempt_enable_no_resched(); 614 preempt_enable_no_resched();
615 schedule(); 615 schedule();
616 preempt_disable(); 616 preempt_disable();
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index 6224f9dbbc1f..6fa987367ae6 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -91,10 +91,10 @@ static void default_idle(void)
91void cpu_idle(void) 91void cpu_idle(void)
92{ 92{
93 for (;;) { 93 for (;;) {
94 tick_nohz_idle_enter(); 94 tick_nohz_idle_enter_norcu();
95 while (!need_resched()) 95 while (!need_resched())
96 default_idle(); 96 default_idle();
97 tick_nohz_idle_exit(); 97 tick_nohz_idle_exit_norcu();
98 preempt_enable_no_resched(); 98 preempt_enable_no_resched();
99 schedule(); 99 schedule();
100 preempt_disable(); 100 preempt_disable();
diff --git a/arch/sh/kernel/idle.c b/arch/sh/kernel/idle.c
index 6015743020a0..ad58e7535a7c 100644
--- a/arch/sh/kernel/idle.c
+++ b/arch/sh/kernel/idle.c
@@ -89,7 +89,7 @@ void cpu_idle(void)
89 89
90 /* endless idle loop with no priority at all */ 90 /* endless idle loop with no priority at all */
91 while (1) { 91 while (1) {
92 tick_nohz_idle_enter(); 92 tick_nohz_idle_enter_norcu();
93 93
94 while (!need_resched()) { 94 while (!need_resched()) {
95 check_pgt_cache(); 95 check_pgt_cache();
@@ -111,7 +111,7 @@ void cpu_idle(void)
111 start_critical_timings(); 111 start_critical_timings();
112 } 112 }
113 113
114 tick_nohz_idle_exit(); 114 tick_nohz_idle_exit_norcu();
115 preempt_enable_no_resched(); 115 preempt_enable_no_resched();
116 schedule(); 116 schedule();
117 preempt_disable(); 117 preempt_disable();
diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
index 9c2795ba2cfe..4a0e7d79cb92 100644
--- a/arch/sparc/kernel/process_64.c
+++ b/arch/sparc/kernel/process_64.c
@@ -95,12 +95,12 @@ void cpu_idle(void)
95 set_thread_flag(TIF_POLLING_NRFLAG); 95 set_thread_flag(TIF_POLLING_NRFLAG);
96 96
97 while(1) { 97 while(1) {
98 tick_nohz_idle_enter(); 98 tick_nohz_idle_enter_norcu();
99 99
100 while (!need_resched() && !cpu_is_offline(cpu)) 100 while (!need_resched() && !cpu_is_offline(cpu))
101 sparc64_yield(cpu); 101 sparc64_yield(cpu);
102 102
103 tick_nohz_idle_exit(); 103 tick_nohz_idle_exit_norcu();
104 104
105 preempt_enable_no_resched(); 105 preempt_enable_no_resched();
106 106
diff --git a/arch/tile/kernel/process.c b/arch/tile/kernel/process.c
index 920e674aedb9..53ac89595ab1 100644
--- a/arch/tile/kernel/process.c
+++ b/arch/tile/kernel/process.c
@@ -85,7 +85,7 @@ void cpu_idle(void)
85 85
86 /* endless idle loop with no priority at all */ 86 /* endless idle loop with no priority at all */
87 while (1) { 87 while (1) {
88 tick_nohz_idle_enter(); 88 tick_nohz_idle_enter_norcu();
89 while (!need_resched()) { 89 while (!need_resched()) {
90 if (cpu_is_offline(cpu)) 90 if (cpu_is_offline(cpu))
91 BUG(); /* no HOTPLUG_CPU */ 91 BUG(); /* no HOTPLUG_CPU */
@@ -105,7 +105,7 @@ void cpu_idle(void)
105 local_irq_enable(); 105 local_irq_enable();
106 current_thread_info()->status |= TS_POLLING; 106 current_thread_info()->status |= TS_POLLING;
107 } 107 }
108 tick_nohz_idle_exit(); 108 tick_nohz_idle_exit_norcu();
109 preempt_enable_no_resched(); 109 preempt_enable_no_resched();
110 schedule(); 110 schedule();
111 preempt_disable(); 111 preempt_disable();
diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
index cfb657e92849..55d2cf455f63 100644
--- a/arch/um/kernel/process.c
+++ b/arch/um/kernel/process.c
@@ -246,10 +246,10 @@ void default_idle(void)
246 if (need_resched()) 246 if (need_resched())
247 schedule(); 247 schedule();
248 248
249 tick_nohz_idle_enter(); 249 tick_nohz_idle_enter_norcu();
250 nsecs = disable_timer(); 250 nsecs = disable_timer();
251 idle_sleep(nsecs); 251 idle_sleep(nsecs);
252 tick_nohz_idle_exit(); 252 tick_nohz_idle_exit_norcu();
253 } 253 }
254} 254}
255 255
diff --git a/arch/unicore32/kernel/process.c b/arch/unicore32/kernel/process.c
index 9999b9a84d46..095ff5a57928 100644
--- a/arch/unicore32/kernel/process.c
+++ b/arch/unicore32/kernel/process.c
@@ -55,7 +55,7 @@ void cpu_idle(void)
55{ 55{
56 /* endless idle loop with no priority at all */ 56 /* endless idle loop with no priority at all */
57 while (1) { 57 while (1) {
58 tick_nohz_idle_enter(); 58 tick_nohz_idle_enter_norcu();
59 while (!need_resched()) { 59 while (!need_resched()) {
60 local_irq_disable(); 60 local_irq_disable();
61 stop_critical_timings(); 61 stop_critical_timings();
@@ -63,7 +63,7 @@ void cpu_idle(void)
63 local_irq_enable(); 63 local_irq_enable();
64 start_critical_timings(); 64 start_critical_timings();
65 } 65 }
66 tick_nohz_idle_exit(); 66 tick_nohz_idle_exit_norcu();
67 preempt_enable_no_resched(); 67 preempt_enable_no_resched();
68 schedule(); 68 schedule();
69 preempt_disable(); 69 preempt_disable();
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
index 6d9d4d52cac5..f94da3920c36 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -99,7 +99,7 @@ void cpu_idle(void)
99 99
100 /* endless idle loop with no priority at all */ 100 /* endless idle loop with no priority at all */
101 while (1) { 101 while (1) {
102 tick_nohz_idle_enter(); 102 tick_nohz_idle_enter_norcu();
103 while (!need_resched()) { 103 while (!need_resched()) {
104 104
105 check_pgt_cache(); 105 check_pgt_cache();
@@ -116,7 +116,7 @@ void cpu_idle(void)
116 pm_idle(); 116 pm_idle();
117 start_critical_timings(); 117 start_critical_timings();
118 } 118 }
119 tick_nohz_idle_exit(); 119 tick_nohz_idle_exit_norcu();
120 preempt_enable_no_resched(); 120 preempt_enable_no_resched();
121 schedule(); 121 schedule();
122 preempt_disable(); 122 preempt_disable();
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index b069e9d7875f..18e8cf3581f6 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -122,7 +122,7 @@ void cpu_idle(void)
122 122
123 /* endless idle loop with no priority at all */ 123 /* endless idle loop with no priority at all */
124 while (1) { 124 while (1) {
125 tick_nohz_idle_enter(); 125 tick_nohz_idle_enter_norcu();
126 while (!need_resched()) { 126 while (!need_resched()) {
127 127
128 rmb(); 128 rmb();
@@ -149,7 +149,7 @@ void cpu_idle(void)
149 __exit_idle(); 149 __exit_idle();
150 } 150 }
151 151
152 tick_nohz_idle_exit(); 152 tick_nohz_idle_exit_norcu();
153 preempt_enable_no_resched(); 153 preempt_enable_no_resched();
154 schedule(); 154 schedule();
155 preempt_disable(); 155 preempt_disable();
diff --git a/include/linux/tick.h b/include/linux/tick.h
index 0df1d50a408a..327434a05757 100644
--- a/include/linux/tick.h
+++ b/include/linux/tick.h
@@ -7,6 +7,7 @@
7#define _LINUX_TICK_H 7#define _LINUX_TICK_H
8 8
9#include <linux/clockchips.h> 9#include <linux/clockchips.h>
10#include <linux/irqflags.h>
10 11
11#ifdef CONFIG_GENERIC_CLOCKEVENTS 12#ifdef CONFIG_GENERIC_CLOCKEVENTS
12 13
@@ -121,18 +122,57 @@ static inline int tick_oneshot_mode_active(void) { return 0; }
121#endif /* !CONFIG_GENERIC_CLOCKEVENTS */ 122#endif /* !CONFIG_GENERIC_CLOCKEVENTS */
122 123
123# ifdef CONFIG_NO_HZ 124# ifdef CONFIG_NO_HZ
124extern void tick_nohz_idle_enter(void); 125extern void __tick_nohz_idle_enter(void);
126static inline void tick_nohz_idle_enter(void)
127{
128 local_irq_disable();
129 __tick_nohz_idle_enter();
130 local_irq_enable();
131}
125extern void tick_nohz_idle_exit(void); 132extern void tick_nohz_idle_exit(void);
133
134/*
135 * Call this pair of function if the arch doesn't make any use
136 * of RCU in-between. You won't need to call rcu_idle_enter() and
137 * rcu_idle_exit().
138 * Otherwise you need to call tick_nohz_idle_enter() and tick_nohz_idle_exit()
139 * and explicitly tell RCU about the window around the place the CPU enters low
140 * power mode where no RCU use is made. This is done by calling rcu_idle_enter()
141 * after the last use of RCU before the CPU is put to sleep and by calling
142 * rcu_idle_exit() before the first use of RCU after the CPU woke up.
143 */
144static inline void tick_nohz_idle_enter_norcu(void)
145{
146 /*
147 * Also call rcu_idle_enter() in the irq disabled section even
148 * if it disables irq itself.
149 * Just an optimization that prevents from an interrupt happening
150 * between it and __tick_nohz_idle_enter() to lose time to help
151 * completing a grace period while we could be in extended grace
152 * period already.
153 */
154 local_irq_disable();
155 __tick_nohz_idle_enter();
156 rcu_idle_enter();
157 local_irq_enable();
158}
159static inline void tick_nohz_idle_exit_norcu(void)
160{
161 rcu_idle_exit();
162 tick_nohz_idle_exit();
163}
126extern void tick_nohz_irq_exit(void); 164extern void tick_nohz_irq_exit(void);
127extern ktime_t tick_nohz_get_sleep_length(void); 165extern ktime_t tick_nohz_get_sleep_length(void);
128extern u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time); 166extern u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time);
129extern u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time); 167extern u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time);
130# else 168# else
131static inline void tick_nohz_idle_enter(void) 169static inline void tick_nohz_idle_enter(void) { }
170static inline void tick_nohz_idle_exit(void) { }
171static inline void tick_nohz_idle_enter_norcu(void)
132{ 172{
133 rcu_idle_enter(); 173 rcu_idle_enter();
134} 174}
135static inline void tick_nohz_idle_exit(void) 175static inline void tick_nohz_idle_exit_norcu(void)
136{ 176{
137 rcu_idle_exit(); 177 rcu_idle_exit();
138} 178}
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 266c242dc354..c76aefe764b0 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -453,18 +453,22 @@ out:
453 * 453 *
454 * When the next event is more than a tick into the future, stop the idle tick 454 * When the next event is more than a tick into the future, stop the idle tick
455 * Called when we start the idle loop. 455 * Called when we start the idle loop.
456 * This also enters into RCU extended quiescent state so that this CPU doesn't 456 *
457 * need anymore to be part of any global grace period completion. This way 457 * If no use of RCU is made in the idle loop between
458 * the tick can be stopped safely as we don't need to report quiescent states. 458 * tick_nohz_idle_enter() and tick_nohz_idle_exit() calls, then
459 * tick_nohz_idle_enter_norcu() should be called instead and the arch
460 * doesn't need to call rcu_idle_enter() and rcu_idle_exit() explicitly.
461 *
462 * Otherwise the arch is responsible of calling:
463 *
464 * - rcu_idle_enter() after its last use of RCU before the CPU is put
465 * to sleep.
466 * - rcu_idle_exit() before the first use of RCU after the CPU is woken up.
459 */ 467 */
460void tick_nohz_idle_enter(void) 468void __tick_nohz_idle_enter(void)
461{ 469{
462 struct tick_sched *ts; 470 struct tick_sched *ts;
463 471
464 WARN_ON_ONCE(irqs_disabled());
465
466 local_irq_disable();
467
468 ts = &__get_cpu_var(tick_cpu_sched); 472 ts = &__get_cpu_var(tick_cpu_sched);
469 /* 473 /*
470 * set ts->inidle unconditionally. even if the system did not 474 * set ts->inidle unconditionally. even if the system did not
@@ -473,9 +477,6 @@ void tick_nohz_idle_enter(void)
473 */ 477 */
474 ts->inidle = 1; 478 ts->inidle = 1;
475 tick_nohz_stop_sched_tick(ts); 479 tick_nohz_stop_sched_tick(ts);
476 rcu_idle_enter();
477
478 local_irq_enable();
479} 480}
480 481
481/** 482/**
@@ -551,7 +552,7 @@ void tick_nohz_idle_exit(void)
551 ktime_t now; 552 ktime_t now;
552 553
553 local_irq_disable(); 554 local_irq_disable();
554 rcu_idle_exit(); 555
555 if (ts->idle_active || (ts->inidle && ts->tick_stopped)) 556 if (ts->idle_active || (ts->inidle && ts->tick_stopped))
556 now = ktime_get(); 557 now = ktime_get();
557 558