diff options
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/relay.c | 7 | ||||
-rw-r--r-- | kernel/sched.c | 43 | ||||
-rw-r--r-- | kernel/time/clocksource.c | 2 | ||||
-rw-r--r-- | kernel/timer.c | 10 |
4 files changed, 59 insertions, 3 deletions
diff --git a/kernel/relay.c b/kernel/relay.c index 4c035a8a248c..d6204a485818 100644 --- a/kernel/relay.c +++ b/kernel/relay.c | |||
@@ -736,7 +736,7 @@ static int relay_file_open(struct inode *inode, struct file *filp) | |||
736 | kref_get(&buf->kref); | 736 | kref_get(&buf->kref); |
737 | filp->private_data = buf; | 737 | filp->private_data = buf; |
738 | 738 | ||
739 | return 0; | 739 | return nonseekable_open(inode, filp); |
740 | } | 740 | } |
741 | 741 | ||
742 | /** | 742 | /** |
@@ -1056,6 +1056,10 @@ static struct pipe_buf_operations relay_pipe_buf_ops = { | |||
1056 | .get = generic_pipe_buf_get, | 1056 | .get = generic_pipe_buf_get, |
1057 | }; | 1057 | }; |
1058 | 1058 | ||
1059 | static void relay_page_release(struct splice_pipe_desc *spd, unsigned int i) | ||
1060 | { | ||
1061 | } | ||
1062 | |||
1059 | /* | 1063 | /* |
1060 | * subbuf_splice_actor - splice up to one subbuf's worth of data | 1064 | * subbuf_splice_actor - splice up to one subbuf's worth of data |
1061 | */ | 1065 | */ |
@@ -1083,6 +1087,7 @@ static int subbuf_splice_actor(struct file *in, | |||
1083 | .partial = partial, | 1087 | .partial = partial, |
1084 | .flags = flags, | 1088 | .flags = flags, |
1085 | .ops = &relay_pipe_buf_ops, | 1089 | .ops = &relay_pipe_buf_ops, |
1090 | .spd_release = relay_page_release, | ||
1086 | }; | 1091 | }; |
1087 | 1092 | ||
1088 | if (rbuf->subbufs_produced == rbuf->subbufs_consumed) | 1093 | if (rbuf->subbufs_produced == rbuf->subbufs_consumed) |
diff --git a/kernel/sched.c b/kernel/sched.c index 28c73f07efb2..8dcdec6fe0fe 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -1052,6 +1052,49 @@ static void resched_cpu(int cpu) | |||
1052 | resched_task(cpu_curr(cpu)); | 1052 | resched_task(cpu_curr(cpu)); |
1053 | spin_unlock_irqrestore(&rq->lock, flags); | 1053 | spin_unlock_irqrestore(&rq->lock, flags); |
1054 | } | 1054 | } |
1055 | |||
1056 | #ifdef CONFIG_NO_HZ | ||
1057 | /* | ||
1058 | * When add_timer_on() enqueues a timer into the timer wheel of an | ||
1059 | * idle CPU then this timer might expire before the next timer event | ||
1060 | * which is scheduled to wake up that CPU. In case of a completely | ||
1061 | * idle system the next event might even be infinite time into the | ||
1062 | * future. wake_up_idle_cpu() ensures that the CPU is woken up and | ||
1063 | * leaves the inner idle loop so the newly added timer is taken into | ||
1064 | * account when the CPU goes back to idle and evaluates the timer | ||
1065 | * wheel for the next timer event. | ||
1066 | */ | ||
1067 | void wake_up_idle_cpu(int cpu) | ||
1068 | { | ||
1069 | struct rq *rq = cpu_rq(cpu); | ||
1070 | |||
1071 | if (cpu == smp_processor_id()) | ||
1072 | return; | ||
1073 | |||
1074 | /* | ||
1075 | * This is safe, as this function is called with the timer | ||
1076 | * wheel base lock of (cpu) held. When the CPU is on the way | ||
1077 | * to idle and has not yet set rq->curr to idle then it will | ||
1078 | * be serialized on the timer wheel base lock and take the new | ||
1079 | * timer into account automatically. | ||
1080 | */ | ||
1081 | if (rq->curr != rq->idle) | ||
1082 | return; | ||
1083 | |||
1084 | /* | ||
1085 | * We can set TIF_RESCHED on the idle task of the other CPU | ||
1086 | * lockless. The worst case is that the other CPU runs the | ||
1087 | * idle task through an additional NOOP schedule() | ||
1088 | */ | ||
1089 | set_tsk_thread_flag(rq->idle, TIF_NEED_RESCHED); | ||
1090 | |||
1091 | /* NEED_RESCHED must be visible before we test polling */ | ||
1092 | smp_mb(); | ||
1093 | if (!tsk_is_polling(rq->idle)) | ||
1094 | smp_send_reschedule(cpu); | ||
1095 | } | ||
1096 | #endif | ||
1097 | |||
1055 | #else | 1098 | #else |
1056 | static void __resched_task(struct task_struct *p, int tif_bit) | 1099 | static void __resched_task(struct task_struct *p, int tif_bit) |
1057 | { | 1100 | { |
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c index 278534bbca95..7f60097d443a 100644 --- a/kernel/time/clocksource.c +++ b/kernel/time/clocksource.c | |||
@@ -174,7 +174,7 @@ static void clocksource_check_watchdog(struct clocksource *cs) | |||
174 | if (watchdog) | 174 | if (watchdog) |
175 | del_timer(&watchdog_timer); | 175 | del_timer(&watchdog_timer); |
176 | watchdog = cs; | 176 | watchdog = cs; |
177 | init_timer_deferrable(&watchdog_timer); | 177 | init_timer(&watchdog_timer); |
178 | watchdog_timer.function = clocksource_watchdog; | 178 | watchdog_timer.function = clocksource_watchdog; |
179 | 179 | ||
180 | /* Reset watchdog cycles */ | 180 | /* Reset watchdog cycles */ |
diff --git a/kernel/timer.c b/kernel/timer.c index 99b00a25f88b..b024106daa70 100644 --- a/kernel/timer.c +++ b/kernel/timer.c | |||
@@ -451,10 +451,18 @@ void add_timer_on(struct timer_list *timer, int cpu) | |||
451 | spin_lock_irqsave(&base->lock, flags); | 451 | spin_lock_irqsave(&base->lock, flags); |
452 | timer_set_base(timer, base); | 452 | timer_set_base(timer, base); |
453 | internal_add_timer(base, timer); | 453 | internal_add_timer(base, timer); |
454 | /* | ||
455 | * Check whether the other CPU is idle and needs to be | ||
456 | * triggered to reevaluate the timer wheel when nohz is | ||
457 | * active. We are protected against the other CPU fiddling | ||
458 | * with the timer by holding the timer base lock. This also | ||
459 | * makes sure that a CPU on the way to idle can not evaluate | ||
460 | * the timer wheel. | ||
461 | */ | ||
462 | wake_up_idle_cpu(cpu); | ||
454 | spin_unlock_irqrestore(&base->lock, flags); | 463 | spin_unlock_irqrestore(&base->lock, flags); |
455 | } | 464 | } |
456 | 465 | ||
457 | |||
458 | /** | 466 | /** |
459 | * mod_timer - modify a timer's timeout | 467 | * mod_timer - modify a timer's timeout |
460 | * @timer: the timer to be modified | 468 | * @timer: the timer to be modified |