aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/context_tracking.h10
-rw-r--r--include/linux/context_tracking_state.h11
-rw-r--r--include/linux/tick.h8
-rw-r--r--include/linux/vtime.h4
-rw-r--r--init/Kconfig2
-rw-r--r--kernel/context_tracking.c8
-rw-r--r--kernel/posix-cpu-timers.c21
-rw-r--r--kernel/softirq.c4
-rw-r--r--kernel/time/tick-broadcast.c6
-rw-r--r--kernel/time/tick-internal.h4
-rw-r--r--kernel/time/tick-sched.c39
11 files changed, 55 insertions, 62 deletions
diff --git a/include/linux/context_tracking.h b/include/linux/context_tracking.h
index 158158704c30..37b81bd51ec0 100644
--- a/include/linux/context_tracking.h
+++ b/include/linux/context_tracking.h
@@ -17,13 +17,13 @@ extern void __context_tracking_task_switch(struct task_struct *prev,
17 17
18static inline void user_enter(void) 18static inline void user_enter(void)
19{ 19{
20 if (static_key_false(&context_tracking_enabled)) 20 if (context_tracking_is_enabled())
21 context_tracking_user_enter(); 21 context_tracking_user_enter();
22 22
23} 23}
24static inline void user_exit(void) 24static inline void user_exit(void)
25{ 25{
26 if (static_key_false(&context_tracking_enabled)) 26 if (context_tracking_is_enabled())
27 context_tracking_user_exit(); 27 context_tracking_user_exit();
28} 28}
29 29
@@ -31,7 +31,7 @@ static inline enum ctx_state exception_enter(void)
31{ 31{
32 enum ctx_state prev_ctx; 32 enum ctx_state prev_ctx;
33 33
34 if (!static_key_false(&context_tracking_enabled)) 34 if (!context_tracking_is_enabled())
35 return 0; 35 return 0;
36 36
37 prev_ctx = this_cpu_read(context_tracking.state); 37 prev_ctx = this_cpu_read(context_tracking.state);
@@ -42,7 +42,7 @@ static inline enum ctx_state exception_enter(void)
42 42
43static inline void exception_exit(enum ctx_state prev_ctx) 43static inline void exception_exit(enum ctx_state prev_ctx)
44{ 44{
45 if (static_key_false(&context_tracking_enabled)) { 45 if (context_tracking_is_enabled()) {
46 if (prev_ctx == IN_USER) 46 if (prev_ctx == IN_USER)
47 context_tracking_user_enter(); 47 context_tracking_user_enter();
48 } 48 }
@@ -51,7 +51,7 @@ static inline void exception_exit(enum ctx_state prev_ctx)
51static inline void context_tracking_task_switch(struct task_struct *prev, 51static inline void context_tracking_task_switch(struct task_struct *prev,
52 struct task_struct *next) 52 struct task_struct *next)
53{ 53{
54 if (static_key_false(&context_tracking_enabled)) 54 if (context_tracking_is_enabled())
55 __context_tracking_task_switch(prev, next); 55 __context_tracking_task_switch(prev, next);
56} 56}
57#else 57#else
diff --git a/include/linux/context_tracking_state.h b/include/linux/context_tracking_state.h
index 0f1979d0674f..97a81225d037 100644
--- a/include/linux/context_tracking_state.h
+++ b/include/linux/context_tracking_state.h
@@ -22,15 +22,20 @@ struct context_tracking {
22extern struct static_key context_tracking_enabled; 22extern struct static_key context_tracking_enabled;
23DECLARE_PER_CPU(struct context_tracking, context_tracking); 23DECLARE_PER_CPU(struct context_tracking, context_tracking);
24 24
25static inline bool context_tracking_in_user(void) 25static inline bool context_tracking_is_enabled(void)
26{ 26{
27 return __this_cpu_read(context_tracking.state) == IN_USER; 27 return static_key_false(&context_tracking_enabled);
28} 28}
29 29
30static inline bool context_tracking_active(void) 30static inline bool context_tracking_cpu_is_enabled(void)
31{ 31{
32 return __this_cpu_read(context_tracking.active); 32 return __this_cpu_read(context_tracking.active);
33} 33}
34
35static inline bool context_tracking_in_user(void)
36{
37 return __this_cpu_read(context_tracking.state) == IN_USER;
38}
34#else 39#else
35static inline bool context_tracking_in_user(void) { return false; } 40static inline bool context_tracking_in_user(void) { return false; }
36static inline bool context_tracking_active(void) { return false; } 41static inline bool context_tracking_active(void) { return false; }
diff --git a/include/linux/tick.h b/include/linux/tick.h
index 5128d33bbb39..0175d8663b6c 100644
--- a/include/linux/tick.h
+++ b/include/linux/tick.h
@@ -104,7 +104,7 @@ extern struct cpumask *tick_get_broadcast_oneshot_mask(void);
104extern void tick_clock_notify(void); 104extern void tick_clock_notify(void);
105extern int tick_check_oneshot_change(int allow_nohz); 105extern int tick_check_oneshot_change(int allow_nohz);
106extern struct tick_sched *tick_get_tick_sched(int cpu); 106extern struct tick_sched *tick_get_tick_sched(int cpu);
107extern void tick_check_idle(int cpu); 107extern void tick_check_idle(void);
108extern int tick_oneshot_mode_active(void); 108extern int tick_oneshot_mode_active(void);
109# ifndef arch_needs_cpu 109# ifndef arch_needs_cpu
110# define arch_needs_cpu(cpu) (0) 110# define arch_needs_cpu(cpu) (0)
@@ -112,7 +112,7 @@ extern int tick_oneshot_mode_active(void);
112# else 112# else
113static inline void tick_clock_notify(void) { } 113static inline void tick_clock_notify(void) { }
114static inline int tick_check_oneshot_change(int allow_nohz) { return 0; } 114static inline int tick_check_oneshot_change(int allow_nohz) { return 0; }
115static inline void tick_check_idle(int cpu) { } 115static inline void tick_check_idle(void) { }
116static inline int tick_oneshot_mode_active(void) { return 0; } 116static inline int tick_oneshot_mode_active(void) { return 0; }
117# endif 117# endif
118 118
@@ -121,7 +121,7 @@ static inline void tick_init(void) { }
121static inline void tick_cancel_sched_timer(int cpu) { } 121static inline void tick_cancel_sched_timer(int cpu) { }
122static inline void tick_clock_notify(void) { } 122static inline void tick_clock_notify(void) { }
123static inline int tick_check_oneshot_change(int allow_nohz) { return 0; } 123static inline int tick_check_oneshot_change(int allow_nohz) { return 0; }
124static inline void tick_check_idle(int cpu) { } 124static inline void tick_check_idle(void) { }
125static inline int tick_oneshot_mode_active(void) { return 0; } 125static inline int tick_oneshot_mode_active(void) { return 0; }
126#endif /* !CONFIG_GENERIC_CLOCKEVENTS */ 126#endif /* !CONFIG_GENERIC_CLOCKEVENTS */
127 127
@@ -165,7 +165,7 @@ extern cpumask_var_t tick_nohz_full_mask;
165 165
166static inline bool tick_nohz_full_enabled(void) 166static inline bool tick_nohz_full_enabled(void)
167{ 167{
168 if (!static_key_false(&context_tracking_enabled)) 168 if (!context_tracking_is_enabled())
169 return false; 169 return false;
170 170
171 return tick_nohz_full_running; 171 return tick_nohz_full_running;
diff --git a/include/linux/vtime.h b/include/linux/vtime.h
index f5b72b364bda..c5165fd256f9 100644
--- a/include/linux/vtime.h
+++ b/include/linux/vtime.h
@@ -19,8 +19,8 @@ static inline bool vtime_accounting_enabled(void) { return true; }
19#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN 19#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
20static inline bool vtime_accounting_enabled(void) 20static inline bool vtime_accounting_enabled(void)
21{ 21{
22 if (static_key_false(&context_tracking_enabled)) { 22 if (context_tracking_is_enabled()) {
23 if (context_tracking_active()) 23 if (context_tracking_cpu_is_enabled())
24 return true; 24 return true;
25 } 25 }
26 26
diff --git a/init/Kconfig b/init/Kconfig
index 79383d3aa5dc..12d61f82e5f7 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -532,7 +532,7 @@ config CONTEXT_TRACKING_FORCE
532 dynticks subsystem by forcing the context tracking on all 532 dynticks subsystem by forcing the context tracking on all
533 CPUs in the system. 533 CPUs in the system.
534 534
535 Say Y only if you're working on the developpement of an 535 Say Y only if you're working on the development of an
536 architecture backend for the context tracking. 536 architecture backend for the context tracking.
537 537
538 Say N otherwise, this option brings an overhead that you 538 Say N otherwise, this option brings an overhead that you
diff --git a/kernel/context_tracking.c b/kernel/context_tracking.c
index e5f3917aa05b..6cb20d2e7ee0 100644
--- a/kernel/context_tracking.c
+++ b/kernel/context_tracking.c
@@ -53,10 +53,10 @@ void context_tracking_user_enter(void)
53 /* 53 /*
54 * Repeat the user_enter() check here because some archs may be calling 54 * Repeat the user_enter() check here because some archs may be calling
55 * this from asm and if no CPU needs context tracking, they shouldn't 55 * this from asm and if no CPU needs context tracking, they shouldn't
56 * go further. Repeat the check here until they support the static key 56 * go further. Repeat the check here until they support the inline static
57 * check. 57 * key check.
58 */ 58 */
59 if (!static_key_false(&context_tracking_enabled)) 59 if (!context_tracking_is_enabled())
60 return; 60 return;
61 61
62 /* 62 /*
@@ -160,7 +160,7 @@ void context_tracking_user_exit(void)
160{ 160{
161 unsigned long flags; 161 unsigned long flags;
162 162
163 if (!static_key_false(&context_tracking_enabled)) 163 if (!context_tracking_is_enabled())
164 return; 164 return;
165 165
166 if (in_interrupt()) 166 if (in_interrupt())
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
index c7f31aa272f7..79747b7d9420 100644
--- a/kernel/posix-cpu-timers.c
+++ b/kernel/posix-cpu-timers.c
@@ -608,7 +608,8 @@ static DECLARE_WORK(nohz_kick_work, nohz_kick_work_fn);
608 */ 608 */
609static void posix_cpu_timer_kick_nohz(void) 609static void posix_cpu_timer_kick_nohz(void)
610{ 610{
611 schedule_work(&nohz_kick_work); 611 if (context_tracking_is_enabled())
612 schedule_work(&nohz_kick_work);
612} 613}
613 614
614bool posix_cpu_timers_can_stop_tick(struct task_struct *tsk) 615bool posix_cpu_timers_can_stop_tick(struct task_struct *tsk)
@@ -1090,7 +1091,8 @@ void posix_cpu_timer_schedule(struct k_itimer *timer)
1090 put_task_struct(p); 1091 put_task_struct(p);
1091 timer->it.cpu.task = p = NULL; 1092 timer->it.cpu.task = p = NULL;
1092 timer->it.cpu.expires = 0; 1093 timer->it.cpu.expires = 0;
1093 goto out_unlock; 1094 read_unlock(&tasklist_lock);
1095 goto out;
1094 } else if (unlikely(p->exit_state) && thread_group_empty(p)) { 1096 } else if (unlikely(p->exit_state) && thread_group_empty(p)) {
1095 /* 1097 /*
1096 * We've noticed that the thread is dead, but 1098 * We've noticed that the thread is dead, but
@@ -1099,7 +1101,8 @@ void posix_cpu_timer_schedule(struct k_itimer *timer)
1099 */ 1101 */
1100 cpu_timer_sample_group(timer->it_clock, p, &now); 1102 cpu_timer_sample_group(timer->it_clock, p, &now);
1101 clear_dead_task(timer, now); 1103 clear_dead_task(timer, now);
1102 goto out_unlock; 1104 read_unlock(&tasklist_lock);
1105 goto out;
1103 } 1106 }
1104 spin_lock(&p->sighand->siglock); 1107 spin_lock(&p->sighand->siglock);
1105 cpu_timer_sample_group(timer->it_clock, p, &now); 1108 cpu_timer_sample_group(timer->it_clock, p, &now);
@@ -1113,10 +1116,11 @@ void posix_cpu_timer_schedule(struct k_itimer *timer)
1113 BUG_ON(!irqs_disabled()); 1116 BUG_ON(!irqs_disabled());
1114 arm_timer(timer); 1117 arm_timer(timer);
1115 spin_unlock(&p->sighand->siglock); 1118 spin_unlock(&p->sighand->siglock);
1116
1117out_unlock:
1118 read_unlock(&tasklist_lock); 1119 read_unlock(&tasklist_lock);
1119 1120
1121 /* Kick full dynticks CPUs in case they need to tick on the new timer */
1122 posix_cpu_timer_kick_nohz();
1123
1120out: 1124out:
1121 timer->it_overrun_last = timer->it_overrun; 1125 timer->it_overrun_last = timer->it_overrun;
1122 timer->it_overrun = -1; 1126 timer->it_overrun = -1;
@@ -1256,13 +1260,6 @@ void run_posix_cpu_timers(struct task_struct *tsk)
1256 cpu_timer_fire(timer); 1260 cpu_timer_fire(timer);
1257 spin_unlock(&timer->it_lock); 1261 spin_unlock(&timer->it_lock);
1258 } 1262 }
1259
1260 /*
1261 * In case some timers were rescheduled after the queue got emptied,
1262 * wake up full dynticks CPUs.
1263 */
1264 if (tsk->signal->cputimer.running)
1265 posix_cpu_timer_kick_nohz();
1266} 1263}
1267 1264
1268/* 1265/*
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 11025ccc06dd..11348de09400 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -311,8 +311,6 @@ asmlinkage void do_softirq(void)
311 */ 311 */
312void irq_enter(void) 312void irq_enter(void)
313{ 313{
314 int cpu = smp_processor_id();
315
316 rcu_irq_enter(); 314 rcu_irq_enter();
317 if (is_idle_task(current) && !in_interrupt()) { 315 if (is_idle_task(current) && !in_interrupt()) {
318 /* 316 /*
@@ -320,7 +318,7 @@ void irq_enter(void)
320 * here, as softirq will be serviced on return from interrupt. 318 * here, as softirq will be serviced on return from interrupt.
321 */ 319 */
322 local_bh_disable(); 320 local_bh_disable();
323 tick_check_idle(cpu); 321 tick_check_idle();
324 _local_bh_enable(); 322 _local_bh_enable();
325 } 323 }
326 324
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
index 9532690daaa9..43780ab5e279 100644
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -538,10 +538,10 @@ int tick_resume_broadcast_oneshot(struct clock_event_device *bc)
538 * Called from irq_enter() when idle was interrupted to reenable the 538 * Called from irq_enter() when idle was interrupted to reenable the
539 * per cpu device. 539 * per cpu device.
540 */ 540 */
541void tick_check_oneshot_broadcast(int cpu) 541void tick_check_oneshot_broadcast_this_cpu(void)
542{ 542{
543 if (cpumask_test_cpu(cpu, tick_broadcast_oneshot_mask)) { 543 if (cpumask_test_cpu(smp_processor_id(), tick_broadcast_oneshot_mask)) {
544 struct tick_device *td = &per_cpu(tick_cpu_device, cpu); 544 struct tick_device *td = &__get_cpu_var(tick_cpu_device);
545 545
546 /* 546 /*
547 * We might be in the middle of switching over from 547 * We might be in the middle of switching over from
diff --git a/kernel/time/tick-internal.h b/kernel/time/tick-internal.h
index 18e71f7fbc2a..e2bced59b6dd 100644
--- a/kernel/time/tick-internal.h
+++ b/kernel/time/tick-internal.h
@@ -51,7 +51,7 @@ extern void tick_broadcast_switch_to_oneshot(void);
51extern void tick_shutdown_broadcast_oneshot(unsigned int *cpup); 51extern void tick_shutdown_broadcast_oneshot(unsigned int *cpup);
52extern int tick_resume_broadcast_oneshot(struct clock_event_device *bc); 52extern int tick_resume_broadcast_oneshot(struct clock_event_device *bc);
53extern int tick_broadcast_oneshot_active(void); 53extern int tick_broadcast_oneshot_active(void);
54extern void tick_check_oneshot_broadcast(int cpu); 54extern void tick_check_oneshot_broadcast_this_cpu(void);
55bool tick_broadcast_oneshot_available(void); 55bool tick_broadcast_oneshot_available(void);
56# else /* BROADCAST */ 56# else /* BROADCAST */
57static inline void tick_broadcast_setup_oneshot(struct clock_event_device *bc) 57static inline void tick_broadcast_setup_oneshot(struct clock_event_device *bc)
@@ -62,7 +62,7 @@ static inline void tick_broadcast_oneshot_control(unsigned long reason) { }
62static inline void tick_broadcast_switch_to_oneshot(void) { } 62static inline void tick_broadcast_switch_to_oneshot(void) { }
63static inline void tick_shutdown_broadcast_oneshot(unsigned int *cpup) { } 63static inline void tick_shutdown_broadcast_oneshot(unsigned int *cpup) { }
64static inline int tick_broadcast_oneshot_active(void) { return 0; } 64static inline int tick_broadcast_oneshot_active(void) { return 0; }
65static inline void tick_check_oneshot_broadcast(int cpu) { } 65static inline void tick_check_oneshot_broadcast_this_cpu(void) { }
66static inline bool tick_broadcast_oneshot_available(void) { return true; } 66static inline bool tick_broadcast_oneshot_available(void) { return true; }
67# endif /* !BROADCAST */ 67# endif /* !BROADCAST */
68 68
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 3612fc77f834..2afd43fca93b 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -391,11 +391,9 @@ __setup("nohz=", setup_tick_nohz);
391 */ 391 */
392static void tick_nohz_update_jiffies(ktime_t now) 392static void tick_nohz_update_jiffies(ktime_t now)
393{ 393{
394 int cpu = smp_processor_id();
395 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
396 unsigned long flags; 394 unsigned long flags;
397 395
398 ts->idle_waketime = now; 396 __this_cpu_write(tick_cpu_sched.idle_waketime, now);
399 397
400 local_irq_save(flags); 398 local_irq_save(flags);
401 tick_do_update_jiffies64(now); 399 tick_do_update_jiffies64(now);
@@ -426,17 +424,15 @@ update_ts_time_stats(int cpu, struct tick_sched *ts, ktime_t now, u64 *last_upda
426 424
427} 425}
428 426
429static void tick_nohz_stop_idle(int cpu, ktime_t now) 427static void tick_nohz_stop_idle(struct tick_sched *ts, ktime_t now)
430{ 428{
431 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); 429 update_ts_time_stats(smp_processor_id(), ts, now, NULL);
432
433 update_ts_time_stats(cpu, ts, now, NULL);
434 ts->idle_active = 0; 430 ts->idle_active = 0;
435 431
436 sched_clock_idle_wakeup_event(0); 432 sched_clock_idle_wakeup_event(0);
437} 433}
438 434
439static ktime_t tick_nohz_start_idle(int cpu, struct tick_sched *ts) 435static ktime_t tick_nohz_start_idle(struct tick_sched *ts)
440{ 436{
441 ktime_t now = ktime_get(); 437 ktime_t now = ktime_get();
442 438
@@ -752,7 +748,7 @@ static void __tick_nohz_idle_enter(struct tick_sched *ts)
752 ktime_t now, expires; 748 ktime_t now, expires;
753 int cpu = smp_processor_id(); 749 int cpu = smp_processor_id();
754 750
755 now = tick_nohz_start_idle(cpu, ts); 751 now = tick_nohz_start_idle(ts);
756 752
757 if (can_stop_idle_tick(cpu, ts)) { 753 if (can_stop_idle_tick(cpu, ts)) {
758 int was_stopped = ts->tick_stopped; 754 int was_stopped = ts->tick_stopped;
@@ -914,8 +910,7 @@ static void tick_nohz_account_idle_ticks(struct tick_sched *ts)
914 */ 910 */
915void tick_nohz_idle_exit(void) 911void tick_nohz_idle_exit(void)
916{ 912{
917 int cpu = smp_processor_id(); 913 struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched);
918 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
919 ktime_t now; 914 ktime_t now;
920 915
921 local_irq_disable(); 916 local_irq_disable();
@@ -928,7 +923,7 @@ void tick_nohz_idle_exit(void)
928 now = ktime_get(); 923 now = ktime_get();
929 924
930 if (ts->idle_active) 925 if (ts->idle_active)
931 tick_nohz_stop_idle(cpu, now); 926 tick_nohz_stop_idle(ts, now);
932 927
933 if (ts->tick_stopped) { 928 if (ts->tick_stopped) {
934 tick_nohz_restart_sched_tick(ts, now); 929 tick_nohz_restart_sched_tick(ts, now);
@@ -1012,12 +1007,10 @@ static void tick_nohz_switch_to_nohz(void)
1012 * timer and do not touch the other magic bits which need to be done 1007 * timer and do not touch the other magic bits which need to be done
1013 * when idle is left. 1008 * when idle is left.
1014 */ 1009 */
1015static void tick_nohz_kick_tick(int cpu, ktime_t now) 1010static void tick_nohz_kick_tick(struct tick_sched *ts, ktime_t now)
1016{ 1011{
1017#if 0 1012#if 0
1018 /* Switch back to 2.6.27 behaviour */ 1013 /* Switch back to 2.6.27 behaviour */
1019
1020 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
1021 ktime_t delta; 1014 ktime_t delta;
1022 1015
1023 /* 1016 /*
@@ -1032,36 +1025,36 @@ static void tick_nohz_kick_tick(int cpu, ktime_t now)
1032#endif 1025#endif
1033} 1026}
1034 1027
1035static inline void tick_check_nohz(int cpu) 1028static inline void tick_check_nohz_this_cpu(void)
1036{ 1029{
1037 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); 1030 struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched);
1038 ktime_t now; 1031 ktime_t now;
1039 1032
1040 if (!ts->idle_active && !ts->tick_stopped) 1033 if (!ts->idle_active && !ts->tick_stopped)
1041 return; 1034 return;
1042 now = ktime_get(); 1035 now = ktime_get();
1043 if (ts->idle_active) 1036 if (ts->idle_active)
1044 tick_nohz_stop_idle(cpu, now); 1037 tick_nohz_stop_idle(ts, now);
1045 if (ts->tick_stopped) { 1038 if (ts->tick_stopped) {
1046 tick_nohz_update_jiffies(now); 1039 tick_nohz_update_jiffies(now);
1047 tick_nohz_kick_tick(cpu, now); 1040 tick_nohz_kick_tick(ts, now);
1048 } 1041 }
1049} 1042}
1050 1043
1051#else 1044#else
1052 1045
1053static inline void tick_nohz_switch_to_nohz(void) { } 1046static inline void tick_nohz_switch_to_nohz(void) { }
1054static inline void tick_check_nohz(int cpu) { } 1047static inline void tick_check_nohz_this_cpu(void) { }
1055 1048
1056#endif /* CONFIG_NO_HZ_COMMON */ 1049#endif /* CONFIG_NO_HZ_COMMON */
1057 1050
1058/* 1051/*
1059 * Called from irq_enter to notify about the possible interruption of idle() 1052 * Called from irq_enter to notify about the possible interruption of idle()
1060 */ 1053 */
1061void tick_check_idle(int cpu) 1054void tick_check_idle(void)
1062{ 1055{
1063 tick_check_oneshot_broadcast(cpu); 1056 tick_check_oneshot_broadcast_this_cpu();
1064 tick_check_nohz(cpu); 1057 tick_check_nohz_this_cpu();
1065} 1058}
1066 1059
1067/* 1060/*