aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2013-12-04 04:09:53 -0500
committerIngo Molnar <mingo@kernel.org>2013-12-04 04:09:53 -0500
commita934a56e1284f1863c9c800ff9c63183c25aec93 (patch)
tree643ffbf749b8b9d68e1b8641c6d179f535b9f9c0 /kernel
parentdea4f48a0a301b23c65af8e4fe8ccf360c272fbf (diff)
parentc925077c33fc9a546e7cf6c3be2adf4a2afe2608 (diff)
Merge branch 'timers/core-v2' of git://git.kernel.org/pub/scm/linux/kernel/git/frederic/linux-dynticks into timers/core
Pull dynticks updates from Frederic Weisbecker: * Fix a bug where posix cpu timers requeued due to interval got ignored on full dynticks CPUs (not a regression though as it only impacts full dynticks and the bug is there since we merged full dynticks). * Optimizations and cleanups on the use of per CPU APIs to improve code readability, performance and debuggability in the nohz subsystem; * Optimize posix cpu timer by sparing stub workqueue queue with full dynticks off case * Rename some functions to extend with *_this_cpu() suffix for clarity * Refine the naming of some context tracking subsystem state accessors * Trivial spelling fix by Paul Gortmaker Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/context_tracking.c8
-rw-r--r--kernel/posix-cpu-timers.c21
-rw-r--r--kernel/softirq.c4
-rw-r--r--kernel/time/tick-broadcast.c6
-rw-r--r--kernel/time/tick-internal.h4
-rw-r--r--kernel/time/tick-sched.c39
6 files changed, 35 insertions, 47 deletions
diff --git a/kernel/context_tracking.c b/kernel/context_tracking.c
index e5f3917aa05b..6cb20d2e7ee0 100644
--- a/kernel/context_tracking.c
+++ b/kernel/context_tracking.c
@@ -53,10 +53,10 @@ void context_tracking_user_enter(void)
53 /* 53 /*
54 * Repeat the user_enter() check here because some archs may be calling 54 * Repeat the user_enter() check here because some archs may be calling
55 * this from asm and if no CPU needs context tracking, they shouldn't 55 * this from asm and if no CPU needs context tracking, they shouldn't
56 * go further. Repeat the check here until they support the static key 56 * go further. Repeat the check here until they support the inline static
57 * check. 57 * key check.
58 */ 58 */
59 if (!static_key_false(&context_tracking_enabled)) 59 if (!context_tracking_is_enabled())
60 return; 60 return;
61 61
62 /* 62 /*
@@ -160,7 +160,7 @@ void context_tracking_user_exit(void)
160{ 160{
161 unsigned long flags; 161 unsigned long flags;
162 162
163 if (!static_key_false(&context_tracking_enabled)) 163 if (!context_tracking_is_enabled())
164 return; 164 return;
165 165
166 if (in_interrupt()) 166 if (in_interrupt())
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
index c7f31aa272f7..79747b7d9420 100644
--- a/kernel/posix-cpu-timers.c
+++ b/kernel/posix-cpu-timers.c
@@ -608,7 +608,8 @@ static DECLARE_WORK(nohz_kick_work, nohz_kick_work_fn);
608 */ 608 */
609static void posix_cpu_timer_kick_nohz(void) 609static void posix_cpu_timer_kick_nohz(void)
610{ 610{
611 schedule_work(&nohz_kick_work); 611 if (context_tracking_is_enabled())
612 schedule_work(&nohz_kick_work);
612} 613}
613 614
614bool posix_cpu_timers_can_stop_tick(struct task_struct *tsk) 615bool posix_cpu_timers_can_stop_tick(struct task_struct *tsk)
@@ -1090,7 +1091,8 @@ void posix_cpu_timer_schedule(struct k_itimer *timer)
1090 put_task_struct(p); 1091 put_task_struct(p);
1091 timer->it.cpu.task = p = NULL; 1092 timer->it.cpu.task = p = NULL;
1092 timer->it.cpu.expires = 0; 1093 timer->it.cpu.expires = 0;
1093 goto out_unlock; 1094 read_unlock(&tasklist_lock);
1095 goto out;
1094 } else if (unlikely(p->exit_state) && thread_group_empty(p)) { 1096 } else if (unlikely(p->exit_state) && thread_group_empty(p)) {
1095 /* 1097 /*
1096 * We've noticed that the thread is dead, but 1098 * We've noticed that the thread is dead, but
@@ -1099,7 +1101,8 @@ void posix_cpu_timer_schedule(struct k_itimer *timer)
1099 */ 1101 */
1100 cpu_timer_sample_group(timer->it_clock, p, &now); 1102 cpu_timer_sample_group(timer->it_clock, p, &now);
1101 clear_dead_task(timer, now); 1103 clear_dead_task(timer, now);
1102 goto out_unlock; 1104 read_unlock(&tasklist_lock);
1105 goto out;
1103 } 1106 }
1104 spin_lock(&p->sighand->siglock); 1107 spin_lock(&p->sighand->siglock);
1105 cpu_timer_sample_group(timer->it_clock, p, &now); 1108 cpu_timer_sample_group(timer->it_clock, p, &now);
@@ -1113,10 +1116,11 @@ void posix_cpu_timer_schedule(struct k_itimer *timer)
1113 BUG_ON(!irqs_disabled()); 1116 BUG_ON(!irqs_disabled());
1114 arm_timer(timer); 1117 arm_timer(timer);
1115 spin_unlock(&p->sighand->siglock); 1118 spin_unlock(&p->sighand->siglock);
1116
1117out_unlock:
1118 read_unlock(&tasklist_lock); 1119 read_unlock(&tasklist_lock);
1119 1120
1121 /* Kick full dynticks CPUs in case they need to tick on the new timer */
1122 posix_cpu_timer_kick_nohz();
1123
1120out: 1124out:
1121 timer->it_overrun_last = timer->it_overrun; 1125 timer->it_overrun_last = timer->it_overrun;
1122 timer->it_overrun = -1; 1126 timer->it_overrun = -1;
@@ -1256,13 +1260,6 @@ void run_posix_cpu_timers(struct task_struct *tsk)
1256 cpu_timer_fire(timer); 1260 cpu_timer_fire(timer);
1257 spin_unlock(&timer->it_lock); 1261 spin_unlock(&timer->it_lock);
1258 } 1262 }
1259
1260 /*
1261 * In case some timers were rescheduled after the queue got emptied,
1262 * wake up full dynticks CPUs.
1263 */
1264 if (tsk->signal->cputimer.running)
1265 posix_cpu_timer_kick_nohz();
1266} 1263}
1267 1264
1268/* 1265/*
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 11025ccc06dd..11348de09400 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -311,8 +311,6 @@ asmlinkage void do_softirq(void)
311 */ 311 */
312void irq_enter(void) 312void irq_enter(void)
313{ 313{
314 int cpu = smp_processor_id();
315
316 rcu_irq_enter(); 314 rcu_irq_enter();
317 if (is_idle_task(current) && !in_interrupt()) { 315 if (is_idle_task(current) && !in_interrupt()) {
318 /* 316 /*
@@ -320,7 +318,7 @@ void irq_enter(void)
320 * here, as softirq will be serviced on return from interrupt. 318 * here, as softirq will be serviced on return from interrupt.
321 */ 319 */
322 local_bh_disable(); 320 local_bh_disable();
323 tick_check_idle(cpu); 321 tick_check_idle();
324 _local_bh_enable(); 322 _local_bh_enable();
325 } 323 }
326 324
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
index 9532690daaa9..43780ab5e279 100644
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -538,10 +538,10 @@ int tick_resume_broadcast_oneshot(struct clock_event_device *bc)
538 * Called from irq_enter() when idle was interrupted to reenable the 538 * Called from irq_enter() when idle was interrupted to reenable the
539 * per cpu device. 539 * per cpu device.
540 */ 540 */
541void tick_check_oneshot_broadcast(int cpu) 541void tick_check_oneshot_broadcast_this_cpu(void)
542{ 542{
543 if (cpumask_test_cpu(cpu, tick_broadcast_oneshot_mask)) { 543 if (cpumask_test_cpu(smp_processor_id(), tick_broadcast_oneshot_mask)) {
544 struct tick_device *td = &per_cpu(tick_cpu_device, cpu); 544 struct tick_device *td = &__get_cpu_var(tick_cpu_device);
545 545
546 /* 546 /*
547 * We might be in the middle of switching over from 547 * We might be in the middle of switching over from
diff --git a/kernel/time/tick-internal.h b/kernel/time/tick-internal.h
index 18e71f7fbc2a..e2bced59b6dd 100644
--- a/kernel/time/tick-internal.h
+++ b/kernel/time/tick-internal.h
@@ -51,7 +51,7 @@ extern void tick_broadcast_switch_to_oneshot(void);
51extern void tick_shutdown_broadcast_oneshot(unsigned int *cpup); 51extern void tick_shutdown_broadcast_oneshot(unsigned int *cpup);
52extern int tick_resume_broadcast_oneshot(struct clock_event_device *bc); 52extern int tick_resume_broadcast_oneshot(struct clock_event_device *bc);
53extern int tick_broadcast_oneshot_active(void); 53extern int tick_broadcast_oneshot_active(void);
54extern void tick_check_oneshot_broadcast(int cpu); 54extern void tick_check_oneshot_broadcast_this_cpu(void);
55bool tick_broadcast_oneshot_available(void); 55bool tick_broadcast_oneshot_available(void);
56# else /* BROADCAST */ 56# else /* BROADCAST */
57static inline void tick_broadcast_setup_oneshot(struct clock_event_device *bc) 57static inline void tick_broadcast_setup_oneshot(struct clock_event_device *bc)
@@ -62,7 +62,7 @@ static inline void tick_broadcast_oneshot_control(unsigned long reason) { }
62static inline void tick_broadcast_switch_to_oneshot(void) { } 62static inline void tick_broadcast_switch_to_oneshot(void) { }
63static inline void tick_shutdown_broadcast_oneshot(unsigned int *cpup) { } 63static inline void tick_shutdown_broadcast_oneshot(unsigned int *cpup) { }
64static inline int tick_broadcast_oneshot_active(void) { return 0; } 64static inline int tick_broadcast_oneshot_active(void) { return 0; }
65static inline void tick_check_oneshot_broadcast(int cpu) { } 65static inline void tick_check_oneshot_broadcast_this_cpu(void) { }
66static inline bool tick_broadcast_oneshot_available(void) { return true; } 66static inline bool tick_broadcast_oneshot_available(void) { return true; }
67# endif /* !BROADCAST */ 67# endif /* !BROADCAST */
68 68
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 3612fc77f834..2afd43fca93b 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -391,11 +391,9 @@ __setup("nohz=", setup_tick_nohz);
391 */ 391 */
392static void tick_nohz_update_jiffies(ktime_t now) 392static void tick_nohz_update_jiffies(ktime_t now)
393{ 393{
394 int cpu = smp_processor_id();
395 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
396 unsigned long flags; 394 unsigned long flags;
397 395
398 ts->idle_waketime = now; 396 __this_cpu_write(tick_cpu_sched.idle_waketime, now);
399 397
400 local_irq_save(flags); 398 local_irq_save(flags);
401 tick_do_update_jiffies64(now); 399 tick_do_update_jiffies64(now);
@@ -426,17 +424,15 @@ update_ts_time_stats(int cpu, struct tick_sched *ts, ktime_t now, u64 *last_upda
426 424
427} 425}
428 426
429static void tick_nohz_stop_idle(int cpu, ktime_t now) 427static void tick_nohz_stop_idle(struct tick_sched *ts, ktime_t now)
430{ 428{
431 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); 429 update_ts_time_stats(smp_processor_id(), ts, now, NULL);
432
433 update_ts_time_stats(cpu, ts, now, NULL);
434 ts->idle_active = 0; 430 ts->idle_active = 0;
435 431
436 sched_clock_idle_wakeup_event(0); 432 sched_clock_idle_wakeup_event(0);
437} 433}
438 434
439static ktime_t tick_nohz_start_idle(int cpu, struct tick_sched *ts) 435static ktime_t tick_nohz_start_idle(struct tick_sched *ts)
440{ 436{
441 ktime_t now = ktime_get(); 437 ktime_t now = ktime_get();
442 438
@@ -752,7 +748,7 @@ static void __tick_nohz_idle_enter(struct tick_sched *ts)
752 ktime_t now, expires; 748 ktime_t now, expires;
753 int cpu = smp_processor_id(); 749 int cpu = smp_processor_id();
754 750
755 now = tick_nohz_start_idle(cpu, ts); 751 now = tick_nohz_start_idle(ts);
756 752
757 if (can_stop_idle_tick(cpu, ts)) { 753 if (can_stop_idle_tick(cpu, ts)) {
758 int was_stopped = ts->tick_stopped; 754 int was_stopped = ts->tick_stopped;
@@ -914,8 +910,7 @@ static void tick_nohz_account_idle_ticks(struct tick_sched *ts)
914 */ 910 */
915void tick_nohz_idle_exit(void) 911void tick_nohz_idle_exit(void)
916{ 912{
917 int cpu = smp_processor_id(); 913 struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched);
918 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
919 ktime_t now; 914 ktime_t now;
920 915
921 local_irq_disable(); 916 local_irq_disable();
@@ -928,7 +923,7 @@ void tick_nohz_idle_exit(void)
928 now = ktime_get(); 923 now = ktime_get();
929 924
930 if (ts->idle_active) 925 if (ts->idle_active)
931 tick_nohz_stop_idle(cpu, now); 926 tick_nohz_stop_idle(ts, now);
932 927
933 if (ts->tick_stopped) { 928 if (ts->tick_stopped) {
934 tick_nohz_restart_sched_tick(ts, now); 929 tick_nohz_restart_sched_tick(ts, now);
@@ -1012,12 +1007,10 @@ static void tick_nohz_switch_to_nohz(void)
1012 * timer and do not touch the other magic bits which need to be done 1007 * timer and do not touch the other magic bits which need to be done
1013 * when idle is left. 1008 * when idle is left.
1014 */ 1009 */
1015static void tick_nohz_kick_tick(int cpu, ktime_t now) 1010static void tick_nohz_kick_tick(struct tick_sched *ts, ktime_t now)
1016{ 1011{
1017#if 0 1012#if 0
1018 /* Switch back to 2.6.27 behaviour */ 1013 /* Switch back to 2.6.27 behaviour */
1019
1020 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
1021 ktime_t delta; 1014 ktime_t delta;
1022 1015
1023 /* 1016 /*
@@ -1032,36 +1025,36 @@ static void tick_nohz_kick_tick(int cpu, ktime_t now)
1032#endif 1025#endif
1033} 1026}
1034 1027
1035static inline void tick_check_nohz(int cpu) 1028static inline void tick_check_nohz_this_cpu(void)
1036{ 1029{
1037 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); 1030 struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched);
1038 ktime_t now; 1031 ktime_t now;
1039 1032
1040 if (!ts->idle_active && !ts->tick_stopped) 1033 if (!ts->idle_active && !ts->tick_stopped)
1041 return; 1034 return;
1042 now = ktime_get(); 1035 now = ktime_get();
1043 if (ts->idle_active) 1036 if (ts->idle_active)
1044 tick_nohz_stop_idle(cpu, now); 1037 tick_nohz_stop_idle(ts, now);
1045 if (ts->tick_stopped) { 1038 if (ts->tick_stopped) {
1046 tick_nohz_update_jiffies(now); 1039 tick_nohz_update_jiffies(now);
1047 tick_nohz_kick_tick(cpu, now); 1040 tick_nohz_kick_tick(ts, now);
1048 } 1041 }
1049} 1042}
1050 1043
1051#else 1044#else
1052 1045
1053static inline void tick_nohz_switch_to_nohz(void) { } 1046static inline void tick_nohz_switch_to_nohz(void) { }
1054static inline void tick_check_nohz(int cpu) { } 1047static inline void tick_check_nohz_this_cpu(void) { }
1055 1048
1056#endif /* CONFIG_NO_HZ_COMMON */ 1049#endif /* CONFIG_NO_HZ_COMMON */
1057 1050
1058/* 1051/*
1059 * Called from irq_enter to notify about the possible interruption of idle() 1052 * Called from irq_enter to notify about the possible interruption of idle()
1060 */ 1053 */
1061void tick_check_idle(int cpu) 1054void tick_check_idle(void)
1062{ 1055{
1063 tick_check_oneshot_broadcast(cpu); 1056 tick_check_oneshot_broadcast_this_cpu();
1064 tick_check_nohz(cpu); 1057 tick_check_nohz_this_cpu();
1065} 1058}
1066 1059
1067/* 1060/*