aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorSteve French <sfrench@us.ibm.com>2011-12-16 01:39:20 -0500
committerSteve French <sfrench@us.ibm.com>2011-12-16 01:39:20 -0500
commitaaf015890754d58dcb71a4aa44ed246bb082bcf6 (patch)
tree17b51ff707fd1b3efec3a3ab872f0d7a7416aca5 /kernel
parent9c32c63bb70b2fafc3b18bee29959c3bf245ceba (diff)
parent8def5f51b012efb00e77ba2d04696cc0aadd0609 (diff)
Merge branch 'master' of git+ssh://git.samba.org/data/git/sfrench/cifs-2.6
Diffstat (limited to 'kernel')
-rw-r--r--kernel/cgroup_freezer.c11
-rw-r--r--kernel/events/core.c91
-rw-r--r--kernel/events/internal.h3
-rw-r--r--kernel/events/ring_buffer.c3
-rw-r--r--kernel/fork.c5
-rw-r--r--kernel/hrtimer.c6
-rw-r--r--kernel/irq/manage.c7
-rw-r--r--kernel/irq/spurious.c6
-rw-r--r--kernel/jump_label.c3
-rw-r--r--kernel/lockdep.c8
-rw-r--r--kernel/power/hibernate.c37
-rw-r--r--kernel/power/main.c3
-rw-r--r--kernel/power/qos.c1
-rw-r--r--kernel/printk.c3
-rw-r--r--kernel/sched.c17
-rw-r--r--kernel/sched_fair.c159
-rw-r--r--kernel/sched_features.h1
-rw-r--r--kernel/sched_rt.c3
-rw-r--r--kernel/time/alarmtimer.c2
-rw-r--r--kernel/time/clockevents.c1
-rw-r--r--kernel/time/clocksource.c62
-rw-r--r--kernel/time/tick-broadcast.c2
-rw-r--r--kernel/time/timekeeping.c92
-rw-r--r--kernel/timer.c2
-rw-r--r--kernel/trace/ftrace.c5
-rw-r--r--kernel/trace/trace_events.c1
-rw-r--r--kernel/trace/trace_events_filter.c13
27 files changed, 458 insertions, 89 deletions
diff --git a/kernel/cgroup_freezer.c b/kernel/cgroup_freezer.c
index 5e828a2ca8e6..213c0351dad8 100644
--- a/kernel/cgroup_freezer.c
+++ b/kernel/cgroup_freezer.c
@@ -153,6 +153,13 @@ static void freezer_destroy(struct cgroup_subsys *ss,
153 kfree(cgroup_freezer(cgroup)); 153 kfree(cgroup_freezer(cgroup));
154} 154}
155 155
156/* task is frozen or will freeze immediately when next it gets woken */
157static bool is_task_frozen_enough(struct task_struct *task)
158{
159 return frozen(task) ||
160 (task_is_stopped_or_traced(task) && freezing(task));
161}
162
156/* 163/*
157 * The call to cgroup_lock() in the freezer.state write method prevents 164 * The call to cgroup_lock() in the freezer.state write method prevents
158 * a write to that file racing against an attach, and hence the 165 * a write to that file racing against an attach, and hence the
@@ -231,7 +238,7 @@ static void update_if_frozen(struct cgroup *cgroup,
231 cgroup_iter_start(cgroup, &it); 238 cgroup_iter_start(cgroup, &it);
232 while ((task = cgroup_iter_next(cgroup, &it))) { 239 while ((task = cgroup_iter_next(cgroup, &it))) {
233 ntotal++; 240 ntotal++;
234 if (frozen(task)) 241 if (is_task_frozen_enough(task))
235 nfrozen++; 242 nfrozen++;
236 } 243 }
237 244
@@ -284,7 +291,7 @@ static int try_to_freeze_cgroup(struct cgroup *cgroup, struct freezer *freezer)
284 while ((task = cgroup_iter_next(cgroup, &it))) { 291 while ((task = cgroup_iter_next(cgroup, &it))) {
285 if (!freeze_task(task, true)) 292 if (!freeze_task(task, true))
286 continue; 293 continue;
287 if (frozen(task)) 294 if (is_task_frozen_enough(task))
288 continue; 295 continue;
289 if (!freezing(task) && !freezer_should_skip(task)) 296 if (!freezing(task) && !freezer_should_skip(task))
290 num_cant_freeze_now++; 297 num_cant_freeze_now++;
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 0e8457da6f95..d3b9df5962c2 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -185,6 +185,9 @@ static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
185static void update_context_time(struct perf_event_context *ctx); 185static void update_context_time(struct perf_event_context *ctx);
186static u64 perf_event_time(struct perf_event *event); 186static u64 perf_event_time(struct perf_event *event);
187 187
188static void ring_buffer_attach(struct perf_event *event,
189 struct ring_buffer *rb);
190
188void __weak perf_event_print_debug(void) { } 191void __weak perf_event_print_debug(void) { }
189 192
190extern __weak const char *perf_pmu_name(void) 193extern __weak const char *perf_pmu_name(void)
@@ -2171,9 +2174,10 @@ static void perf_event_context_sched_in(struct perf_event_context *ctx,
2171 */ 2174 */
2172 cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE); 2175 cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
2173 2176
2174 perf_event_sched_in(cpuctx, ctx, task); 2177 if (ctx->nr_events)
2178 cpuctx->task_ctx = ctx;
2175 2179
2176 cpuctx->task_ctx = ctx; 2180 perf_event_sched_in(cpuctx, cpuctx->task_ctx, task);
2177 2181
2178 perf_pmu_enable(ctx->pmu); 2182 perf_pmu_enable(ctx->pmu);
2179 perf_ctx_unlock(cpuctx, ctx); 2183 perf_ctx_unlock(cpuctx, ctx);
@@ -3190,12 +3194,33 @@ static unsigned int perf_poll(struct file *file, poll_table *wait)
3190 struct ring_buffer *rb; 3194 struct ring_buffer *rb;
3191 unsigned int events = POLL_HUP; 3195 unsigned int events = POLL_HUP;
3192 3196
3197 /*
3198 * Race between perf_event_set_output() and perf_poll(): perf_poll()
3199 * grabs the rb reference but perf_event_set_output() overrides it.
3200 * Here is the timeline for two threads T1, T2:
3201 * t0: T1, rb = rcu_dereference(event->rb)
3202 * t1: T2, old_rb = event->rb
3203 * t2: T2, event->rb = new rb
3204 * t3: T2, ring_buffer_detach(old_rb)
3205 * t4: T1, ring_buffer_attach(rb1)
3206 * t5: T1, poll_wait(event->waitq)
3207 *
3208 * To avoid this problem, we grab mmap_mutex in perf_poll()
3209 * thereby ensuring that the assignment of the new ring buffer
3210 * and the detachment of the old buffer appear atomic to perf_poll()
3211 */
3212 mutex_lock(&event->mmap_mutex);
3213
3193 rcu_read_lock(); 3214 rcu_read_lock();
3194 rb = rcu_dereference(event->rb); 3215 rb = rcu_dereference(event->rb);
3195 if (rb) 3216 if (rb) {
3217 ring_buffer_attach(event, rb);
3196 events = atomic_xchg(&rb->poll, 0); 3218 events = atomic_xchg(&rb->poll, 0);
3219 }
3197 rcu_read_unlock(); 3220 rcu_read_unlock();
3198 3221
3222 mutex_unlock(&event->mmap_mutex);
3223
3199 poll_wait(file, &event->waitq, wait); 3224 poll_wait(file, &event->waitq, wait);
3200 3225
3201 return events; 3226 return events;
@@ -3496,6 +3521,49 @@ unlock:
3496 return ret; 3521 return ret;
3497} 3522}
3498 3523
3524static void ring_buffer_attach(struct perf_event *event,
3525 struct ring_buffer *rb)
3526{
3527 unsigned long flags;
3528
3529 if (!list_empty(&event->rb_entry))
3530 return;
3531
3532 spin_lock_irqsave(&rb->event_lock, flags);
3533 if (!list_empty(&event->rb_entry))
3534 goto unlock;
3535
3536 list_add(&event->rb_entry, &rb->event_list);
3537unlock:
3538 spin_unlock_irqrestore(&rb->event_lock, flags);
3539}
3540
3541static void ring_buffer_detach(struct perf_event *event,
3542 struct ring_buffer *rb)
3543{
3544 unsigned long flags;
3545
3546 if (list_empty(&event->rb_entry))
3547 return;
3548
3549 spin_lock_irqsave(&rb->event_lock, flags);
3550 list_del_init(&event->rb_entry);
3551 wake_up_all(&event->waitq);
3552 spin_unlock_irqrestore(&rb->event_lock, flags);
3553}
3554
3555static void ring_buffer_wakeup(struct perf_event *event)
3556{
3557 struct ring_buffer *rb;
3558
3559 rcu_read_lock();
3560 rb = rcu_dereference(event->rb);
3561 list_for_each_entry_rcu(event, &rb->event_list, rb_entry) {
3562 wake_up_all(&event->waitq);
3563 }
3564 rcu_read_unlock();
3565}
3566
3499static void rb_free_rcu(struct rcu_head *rcu_head) 3567static void rb_free_rcu(struct rcu_head *rcu_head)
3500{ 3568{
3501 struct ring_buffer *rb; 3569 struct ring_buffer *rb;
@@ -3521,9 +3589,19 @@ static struct ring_buffer *ring_buffer_get(struct perf_event *event)
3521 3589
3522static void ring_buffer_put(struct ring_buffer *rb) 3590static void ring_buffer_put(struct ring_buffer *rb)
3523{ 3591{
3592 struct perf_event *event, *n;
3593 unsigned long flags;
3594
3524 if (!atomic_dec_and_test(&rb->refcount)) 3595 if (!atomic_dec_and_test(&rb->refcount))
3525 return; 3596 return;
3526 3597
3598 spin_lock_irqsave(&rb->event_lock, flags);
3599 list_for_each_entry_safe(event, n, &rb->event_list, rb_entry) {
3600 list_del_init(&event->rb_entry);
3601 wake_up_all(&event->waitq);
3602 }
3603 spin_unlock_irqrestore(&rb->event_lock, flags);
3604
3527 call_rcu(&rb->rcu_head, rb_free_rcu); 3605 call_rcu(&rb->rcu_head, rb_free_rcu);
3528} 3606}
3529 3607
@@ -3546,6 +3624,7 @@ static void perf_mmap_close(struct vm_area_struct *vma)
3546 atomic_long_sub((size >> PAGE_SHIFT) + 1, &user->locked_vm); 3624 atomic_long_sub((size >> PAGE_SHIFT) + 1, &user->locked_vm);
3547 vma->vm_mm->pinned_vm -= event->mmap_locked; 3625 vma->vm_mm->pinned_vm -= event->mmap_locked;
3548 rcu_assign_pointer(event->rb, NULL); 3626 rcu_assign_pointer(event->rb, NULL);
3627 ring_buffer_detach(event, rb);
3549 mutex_unlock(&event->mmap_mutex); 3628 mutex_unlock(&event->mmap_mutex);
3550 3629
3551 ring_buffer_put(rb); 3630 ring_buffer_put(rb);
@@ -3700,7 +3779,7 @@ static const struct file_operations perf_fops = {
3700 3779
3701void perf_event_wakeup(struct perf_event *event) 3780void perf_event_wakeup(struct perf_event *event)
3702{ 3781{
3703 wake_up_all(&event->waitq); 3782 ring_buffer_wakeup(event);
3704 3783
3705 if (event->pending_kill) { 3784 if (event->pending_kill) {
3706 kill_fasync(&event->fasync, SIGIO, event->pending_kill); 3785 kill_fasync(&event->fasync, SIGIO, event->pending_kill);
@@ -5822,6 +5901,8 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
5822 INIT_LIST_HEAD(&event->group_entry); 5901 INIT_LIST_HEAD(&event->group_entry);
5823 INIT_LIST_HEAD(&event->event_entry); 5902 INIT_LIST_HEAD(&event->event_entry);
5824 INIT_LIST_HEAD(&event->sibling_list); 5903 INIT_LIST_HEAD(&event->sibling_list);
5904 INIT_LIST_HEAD(&event->rb_entry);
5905
5825 init_waitqueue_head(&event->waitq); 5906 init_waitqueue_head(&event->waitq);
5826 init_irq_work(&event->pending, perf_pending_event); 5907 init_irq_work(&event->pending, perf_pending_event);
5827 5908
@@ -6028,6 +6109,8 @@ set:
6028 6109
6029 old_rb = event->rb; 6110 old_rb = event->rb;
6030 rcu_assign_pointer(event->rb, rb); 6111 rcu_assign_pointer(event->rb, rb);
6112 if (old_rb)
6113 ring_buffer_detach(event, old_rb);
6031 ret = 0; 6114 ret = 0;
6032unlock: 6115unlock:
6033 mutex_unlock(&event->mmap_mutex); 6116 mutex_unlock(&event->mmap_mutex);
diff --git a/kernel/events/internal.h b/kernel/events/internal.h
index 09097dd8116c..64568a699375 100644
--- a/kernel/events/internal.h
+++ b/kernel/events/internal.h
@@ -22,6 +22,9 @@ struct ring_buffer {
22 local_t lost; /* nr records lost */ 22 local_t lost; /* nr records lost */
23 23
24 long watermark; /* wakeup watermark */ 24 long watermark; /* wakeup watermark */
25 /* poll crap */
26 spinlock_t event_lock;
27 struct list_head event_list;
25 28
26 struct perf_event_mmap_page *user_page; 29 struct perf_event_mmap_page *user_page;
27 void *data_pages[0]; 30 void *data_pages[0];
diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c
index a2a29205cc0f..7f3011c6b57f 100644
--- a/kernel/events/ring_buffer.c
+++ b/kernel/events/ring_buffer.c
@@ -209,6 +209,9 @@ ring_buffer_init(struct ring_buffer *rb, long watermark, int flags)
209 rb->writable = 1; 209 rb->writable = 1;
210 210
211 atomic_set(&rb->refcount, 1); 211 atomic_set(&rb->refcount, 1);
212
213 INIT_LIST_HEAD(&rb->event_list);
214 spin_lock_init(&rb->event_lock);
212} 215}
213 216
214#ifndef CONFIG_PERF_USE_VMALLOC 217#ifndef CONFIG_PERF_USE_VMALLOC
diff --git a/kernel/fork.c b/kernel/fork.c
index ba0d17261329..da4a6a10d088 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -162,7 +162,6 @@ static void account_kernel_stack(struct thread_info *ti, int account)
162 162
163void free_task(struct task_struct *tsk) 163void free_task(struct task_struct *tsk)
164{ 164{
165 prop_local_destroy_single(&tsk->dirties);
166 account_kernel_stack(tsk->stack, -1); 165 account_kernel_stack(tsk->stack, -1);
167 free_thread_info(tsk->stack); 166 free_thread_info(tsk->stack);
168 rt_mutex_debug_task_free(tsk); 167 rt_mutex_debug_task_free(tsk);
@@ -274,10 +273,6 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
274 273
275 tsk->stack = ti; 274 tsk->stack = ti;
276 275
277 err = prop_local_init_single(&tsk->dirties);
278 if (err)
279 goto out;
280
281 setup_thread_stack(tsk, orig); 276 setup_thread_stack(tsk, orig);
282 clear_user_return_notifier(tsk); 277 clear_user_return_notifier(tsk);
283 clear_tsk_need_resched(tsk); 278 clear_tsk_need_resched(tsk);
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index 422e567eecf6..ae34bf51682b 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -885,10 +885,13 @@ static void __remove_hrtimer(struct hrtimer *timer,
885 struct hrtimer_clock_base *base, 885 struct hrtimer_clock_base *base,
886 unsigned long newstate, int reprogram) 886 unsigned long newstate, int reprogram)
887{ 887{
888 struct timerqueue_node *next_timer;
888 if (!(timer->state & HRTIMER_STATE_ENQUEUED)) 889 if (!(timer->state & HRTIMER_STATE_ENQUEUED))
889 goto out; 890 goto out;
890 891
891 if (&timer->node == timerqueue_getnext(&base->active)) { 892 next_timer = timerqueue_getnext(&base->active);
893 timerqueue_del(&base->active, &timer->node);
894 if (&timer->node == next_timer) {
892#ifdef CONFIG_HIGH_RES_TIMERS 895#ifdef CONFIG_HIGH_RES_TIMERS
893 /* Reprogram the clock event device. if enabled */ 896 /* Reprogram the clock event device. if enabled */
894 if (reprogram && hrtimer_hres_active()) { 897 if (reprogram && hrtimer_hres_active()) {
@@ -901,7 +904,6 @@ static void __remove_hrtimer(struct hrtimer *timer,
901 } 904 }
902#endif 905#endif
903 } 906 }
904 timerqueue_del(&base->active, &timer->node);
905 if (!timerqueue_getnext(&base->active)) 907 if (!timerqueue_getnext(&base->active))
906 base->cpu_base->active_bases &= ~(1 << base->index); 908 base->cpu_base->active_bases &= ~(1 << base->index);
907out: 909out:
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 67ce837ae52c..1da999f5e746 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -623,8 +623,9 @@ static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id)
623 623
624static int irq_wait_for_interrupt(struct irqaction *action) 624static int irq_wait_for_interrupt(struct irqaction *action)
625{ 625{
626 set_current_state(TASK_INTERRUPTIBLE);
627
626 while (!kthread_should_stop()) { 628 while (!kthread_should_stop()) {
627 set_current_state(TASK_INTERRUPTIBLE);
628 629
629 if (test_and_clear_bit(IRQTF_RUNTHREAD, 630 if (test_and_clear_bit(IRQTF_RUNTHREAD,
630 &action->thread_flags)) { 631 &action->thread_flags)) {
@@ -632,7 +633,9 @@ static int irq_wait_for_interrupt(struct irqaction *action)
632 return 0; 633 return 0;
633 } 634 }
634 schedule(); 635 schedule();
636 set_current_state(TASK_INTERRUPTIBLE);
635 } 637 }
638 __set_current_state(TASK_RUNNING);
636 return -1; 639 return -1;
637} 640}
638 641
@@ -1596,7 +1599,7 @@ int request_percpu_irq(unsigned int irq, irq_handler_t handler,
1596 return -ENOMEM; 1599 return -ENOMEM;
1597 1600
1598 action->handler = handler; 1601 action->handler = handler;
1599 action->flags = IRQF_PERCPU; 1602 action->flags = IRQF_PERCPU | IRQF_NO_SUSPEND;
1600 action->name = devname; 1603 action->name = devname;
1601 action->percpu_dev_id = dev_id; 1604 action->percpu_dev_id = dev_id;
1602 1605
diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c
index aa57d5da18c1..dc813a948be2 100644
--- a/kernel/irq/spurious.c
+++ b/kernel/irq/spurious.c
@@ -84,7 +84,9 @@ static int try_one_irq(int irq, struct irq_desc *desc, bool force)
84 */ 84 */
85 action = desc->action; 85 action = desc->action;
86 if (!action || !(action->flags & IRQF_SHARED) || 86 if (!action || !(action->flags & IRQF_SHARED) ||
87 (action->flags & __IRQF_TIMER) || !action->next) 87 (action->flags & __IRQF_TIMER) ||
88 (action->handler(irq, action->dev_id) == IRQ_HANDLED) ||
89 !action->next)
88 goto out; 90 goto out;
89 91
90 /* Already running on another processor */ 92 /* Already running on another processor */
@@ -115,7 +117,7 @@ static int misrouted_irq(int irq)
115 struct irq_desc *desc; 117 struct irq_desc *desc;
116 int i, ok = 0; 118 int i, ok = 0;
117 119
118 if (atomic_inc_return(&irq_poll_active) == 1) 120 if (atomic_inc_return(&irq_poll_active) != 1)
119 goto out; 121 goto out;
120 122
121 irq_poll_cpu = smp_processor_id(); 123 irq_poll_cpu = smp_processor_id();
diff --git a/kernel/jump_label.c b/kernel/jump_label.c
index bbdfe2a462a0..66ff7109f697 100644
--- a/kernel/jump_label.c
+++ b/kernel/jump_label.c
@@ -66,8 +66,9 @@ void jump_label_inc(struct jump_label_key *key)
66 return; 66 return;
67 67
68 jump_label_lock(); 68 jump_label_lock();
69 if (atomic_add_return(1, &key->enabled) == 1) 69 if (atomic_read(&key->enabled) == 0)
70 jump_label_update(key, JUMP_LABEL_ENABLE); 70 jump_label_update(key, JUMP_LABEL_ENABLE);
71 atomic_inc(&key->enabled);
71 jump_label_unlock(); 72 jump_label_unlock();
72} 73}
73 74
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index e69434b070da..b2e08c932d91 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -44,6 +44,7 @@
44#include <linux/stringify.h> 44#include <linux/stringify.h>
45#include <linux/bitops.h> 45#include <linux/bitops.h>
46#include <linux/gfp.h> 46#include <linux/gfp.h>
47#include <linux/kmemcheck.h>
47 48
48#include <asm/sections.h> 49#include <asm/sections.h>
49 50
@@ -2948,7 +2949,12 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this,
2948void lockdep_init_map(struct lockdep_map *lock, const char *name, 2949void lockdep_init_map(struct lockdep_map *lock, const char *name,
2949 struct lock_class_key *key, int subclass) 2950 struct lock_class_key *key, int subclass)
2950{ 2951{
2951 memset(lock, 0, sizeof(*lock)); 2952 int i;
2953
2954 kmemcheck_mark_initialized(lock, sizeof(*lock));
2955
2956 for (i = 0; i < NR_LOCKDEP_CACHING_CLASSES; i++)
2957 lock->class_cache[i] = NULL;
2952 2958
2953#ifdef CONFIG_LOCK_STAT 2959#ifdef CONFIG_LOCK_STAT
2954 lock->cpu = raw_smp_processor_id(); 2960 lock->cpu = raw_smp_processor_id();
diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
index b4511b6d3ef9..a6b0503574ee 100644
--- a/kernel/power/hibernate.c
+++ b/kernel/power/hibernate.c
@@ -55,6 +55,8 @@ enum {
55 55
56static int hibernation_mode = HIBERNATION_SHUTDOWN; 56static int hibernation_mode = HIBERNATION_SHUTDOWN;
57 57
58static bool freezer_test_done;
59
58static const struct platform_hibernation_ops *hibernation_ops; 60static const struct platform_hibernation_ops *hibernation_ops;
59 61
60/** 62/**
@@ -345,11 +347,24 @@ int hibernation_snapshot(int platform_mode)
345 347
346 error = freeze_kernel_threads(); 348 error = freeze_kernel_threads();
347 if (error) 349 if (error)
348 goto Close; 350 goto Cleanup;
351
352 if (hibernation_test(TEST_FREEZER) ||
353 hibernation_testmode(HIBERNATION_TESTPROC)) {
354
355 /*
356 * Indicate to the caller that we are returning due to a
357 * successful freezer test.
358 */
359 freezer_test_done = true;
360 goto Cleanup;
361 }
349 362
350 error = dpm_prepare(PMSG_FREEZE); 363 error = dpm_prepare(PMSG_FREEZE);
351 if (error) 364 if (error) {
352 goto Complete_devices; 365 dpm_complete(msg);
366 goto Cleanup;
367 }
353 368
354 suspend_console(); 369 suspend_console();
355 pm_restrict_gfp_mask(); 370 pm_restrict_gfp_mask();
@@ -378,8 +393,6 @@ int hibernation_snapshot(int platform_mode)
378 pm_restore_gfp_mask(); 393 pm_restore_gfp_mask();
379 394
380 resume_console(); 395 resume_console();
381
382 Complete_devices:
383 dpm_complete(msg); 396 dpm_complete(msg);
384 397
385 Close: 398 Close:
@@ -389,6 +402,10 @@ int hibernation_snapshot(int platform_mode)
389 Recover_platform: 402 Recover_platform:
390 platform_recover(platform_mode); 403 platform_recover(platform_mode);
391 goto Resume_devices; 404 goto Resume_devices;
405
406 Cleanup:
407 swsusp_free();
408 goto Close;
392} 409}
393 410
394/** 411/**
@@ -641,15 +658,13 @@ int hibernate(void)
641 if (error) 658 if (error)
642 goto Finish; 659 goto Finish;
643 660
644 if (hibernation_test(TEST_FREEZER))
645 goto Thaw;
646
647 if (hibernation_testmode(HIBERNATION_TESTPROC))
648 goto Thaw;
649
650 error = hibernation_snapshot(hibernation_mode == HIBERNATION_PLATFORM); 661 error = hibernation_snapshot(hibernation_mode == HIBERNATION_PLATFORM);
651 if (error) 662 if (error)
652 goto Thaw; 663 goto Thaw;
664 if (freezer_test_done) {
665 freezer_test_done = false;
666 goto Thaw;
667 }
653 668
654 if (in_suspend) { 669 if (in_suspend) {
655 unsigned int flags = 0; 670 unsigned int flags = 0;
diff --git a/kernel/power/main.c b/kernel/power/main.c
index 71f49fe4377e..36e0f0903c32 100644
--- a/kernel/power/main.c
+++ b/kernel/power/main.c
@@ -290,13 +290,14 @@ static ssize_t state_store(struct kobject *kobj, struct kobj_attribute *attr,
290 if (*s && len == strlen(*s) && !strncmp(buf, *s, len)) 290 if (*s && len == strlen(*s) && !strncmp(buf, *s, len))
291 break; 291 break;
292 } 292 }
293 if (state < PM_SUSPEND_MAX && *s) 293 if (state < PM_SUSPEND_MAX && *s) {
294 error = enter_state(state); 294 error = enter_state(state);
295 if (error) { 295 if (error) {
296 suspend_stats.fail++; 296 suspend_stats.fail++;
297 dpm_save_failed_errno(error); 297 dpm_save_failed_errno(error);
298 } else 298 } else
299 suspend_stats.success++; 299 suspend_stats.success++;
300 }
300#endif 301#endif
301 302
302 Exit: 303 Exit:
diff --git a/kernel/power/qos.c b/kernel/power/qos.c
index 56db75147186..995e3bd3417b 100644
--- a/kernel/power/qos.c
+++ b/kernel/power/qos.c
@@ -70,6 +70,7 @@ static struct pm_qos_constraints cpu_dma_constraints = {
70}; 70};
71static struct pm_qos_object cpu_dma_pm_qos = { 71static struct pm_qos_object cpu_dma_pm_qos = {
72 .constraints = &cpu_dma_constraints, 72 .constraints = &cpu_dma_constraints,
73 .name = "cpu_dma_latency",
73}; 74};
74 75
75static BLOCKING_NOTIFIER_HEAD(network_lat_notifier); 76static BLOCKING_NOTIFIER_HEAD(network_lat_notifier);
diff --git a/kernel/printk.c b/kernel/printk.c
index 1455a0d4eedd..7982a0a841ea 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -1293,10 +1293,11 @@ again:
1293 raw_spin_lock(&logbuf_lock); 1293 raw_spin_lock(&logbuf_lock);
1294 if (con_start != log_end) 1294 if (con_start != log_end)
1295 retry = 1; 1295 retry = 1;
1296 raw_spin_unlock_irqrestore(&logbuf_lock, flags);
1297
1296 if (retry && console_trylock()) 1298 if (retry && console_trylock())
1297 goto again; 1299 goto again;
1298 1300
1299 raw_spin_unlock_irqrestore(&logbuf_lock, flags);
1300 if (wake_klogd) 1301 if (wake_klogd)
1301 wake_up_klogd(); 1302 wake_up_klogd();
1302} 1303}
diff --git a/kernel/sched.c b/kernel/sched.c
index 0e9344a71be3..d6b149ccf925 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -71,6 +71,7 @@
71#include <linux/ctype.h> 71#include <linux/ctype.h>
72#include <linux/ftrace.h> 72#include <linux/ftrace.h>
73#include <linux/slab.h> 73#include <linux/slab.h>
74#include <linux/init_task.h>
74 75
75#include <asm/tlb.h> 76#include <asm/tlb.h>
76#include <asm/irq_regs.h> 77#include <asm/irq_regs.h>
@@ -4810,6 +4811,9 @@ EXPORT_SYMBOL(wait_for_completion);
4810 * This waits for either a completion of a specific task to be signaled or for a 4811 * This waits for either a completion of a specific task to be signaled or for a
4811 * specified timeout to expire. The timeout is in jiffies. It is not 4812 * specified timeout to expire. The timeout is in jiffies. It is not
4812 * interruptible. 4813 * interruptible.
4814 *
4815 * The return value is 0 if timed out, and positive (at least 1, or number of
4816 * jiffies left till timeout) if completed.
4813 */ 4817 */
4814unsigned long __sched 4818unsigned long __sched
4815wait_for_completion_timeout(struct completion *x, unsigned long timeout) 4819wait_for_completion_timeout(struct completion *x, unsigned long timeout)
@@ -4824,6 +4828,8 @@ EXPORT_SYMBOL(wait_for_completion_timeout);
4824 * 4828 *
4825 * This waits for completion of a specific task to be signaled. It is 4829 * This waits for completion of a specific task to be signaled. It is
4826 * interruptible. 4830 * interruptible.
4831 *
4832 * The return value is -ERESTARTSYS if interrupted, 0 if completed.
4827 */ 4833 */
4828int __sched wait_for_completion_interruptible(struct completion *x) 4834int __sched wait_for_completion_interruptible(struct completion *x)
4829{ 4835{
@@ -4841,6 +4847,9 @@ EXPORT_SYMBOL(wait_for_completion_interruptible);
4841 * 4847 *
4842 * This waits for either a completion of a specific task to be signaled or for a 4848 * This waits for either a completion of a specific task to be signaled or for a
4843 * specified timeout to expire. It is interruptible. The timeout is in jiffies. 4849 * specified timeout to expire. It is interruptible. The timeout is in jiffies.
4850 *
4851 * The return value is -ERESTARTSYS if interrupted, 0 if timed out,
4852 * positive (at least 1, or number of jiffies left till timeout) if completed.
4844 */ 4853 */
4845long __sched 4854long __sched
4846wait_for_completion_interruptible_timeout(struct completion *x, 4855wait_for_completion_interruptible_timeout(struct completion *x,
@@ -4856,6 +4865,8 @@ EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
4856 * 4865 *
4857 * This waits to be signaled for completion of a specific task. It can be 4866 * This waits to be signaled for completion of a specific task. It can be
4858 * interrupted by a kill signal. 4867 * interrupted by a kill signal.
4868 *
4869 * The return value is -ERESTARTSYS if interrupted, 0 if completed.
4859 */ 4870 */
4860int __sched wait_for_completion_killable(struct completion *x) 4871int __sched wait_for_completion_killable(struct completion *x)
4861{ 4872{
@@ -4874,6 +4885,9 @@ EXPORT_SYMBOL(wait_for_completion_killable);
4874 * This waits for either a completion of a specific task to be 4885 * This waits for either a completion of a specific task to be
4875 * signaled or for a specified timeout to expire. It can be 4886 * signaled or for a specified timeout to expire. It can be
4876 * interrupted by a kill signal. The timeout is in jiffies. 4887 * interrupted by a kill signal. The timeout is in jiffies.
4888 *
4889 * The return value is -ERESTARTSYS if interrupted, 0 if timed out,
4890 * positive (at least 1, or number of jiffies left till timeout) if completed.
4877 */ 4891 */
4878long __sched 4892long __sched
4879wait_for_completion_killable_timeout(struct completion *x, 4893wait_for_completion_killable_timeout(struct completion *x,
@@ -6099,6 +6113,9 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
6099 */ 6113 */
6100 idle->sched_class = &idle_sched_class; 6114 idle->sched_class = &idle_sched_class;
6101 ftrace_graph_init_idle_task(idle, cpu); 6115 ftrace_graph_init_idle_task(idle, cpu);
6116#if defined(CONFIG_SMP)
6117 sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu);
6118#endif
6102} 6119}
6103 6120
6104/* 6121/*
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 5c9e67923b7c..a78ed2736ba7 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -772,19 +772,32 @@ static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update)
772 list_del_leaf_cfs_rq(cfs_rq); 772 list_del_leaf_cfs_rq(cfs_rq);
773} 773}
774 774
775static inline long calc_tg_weight(struct task_group *tg, struct cfs_rq *cfs_rq)
776{
777 long tg_weight;
778
779 /*
780 * Use this CPU's actual weight instead of the last load_contribution
781 * to gain a more accurate current total weight. See
782 * update_cfs_rq_load_contribution().
783 */
784 tg_weight = atomic_read(&tg->load_weight);
785 tg_weight -= cfs_rq->load_contribution;
786 tg_weight += cfs_rq->load.weight;
787
788 return tg_weight;
789}
790
775static long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg) 791static long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
776{ 792{
777 long load_weight, load, shares; 793 long tg_weight, load, shares;
778 794
795 tg_weight = calc_tg_weight(tg, cfs_rq);
779 load = cfs_rq->load.weight; 796 load = cfs_rq->load.weight;
780 797
781 load_weight = atomic_read(&tg->load_weight);
782 load_weight += load;
783 load_weight -= cfs_rq->load_contribution;
784
785 shares = (tg->shares * load); 798 shares = (tg->shares * load);
786 if (load_weight) 799 if (tg_weight)
787 shares /= load_weight; 800 shares /= tg_weight;
788 801
789 if (shares < MIN_SHARES) 802 if (shares < MIN_SHARES)
790 shares = MIN_SHARES; 803 shares = MIN_SHARES;
@@ -1743,7 +1756,7 @@ static void __return_cfs_rq_runtime(struct cfs_rq *cfs_rq)
1743 1756
1744static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) 1757static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq)
1745{ 1758{
1746 if (!cfs_rq->runtime_enabled || !cfs_rq->nr_running) 1759 if (!cfs_rq->runtime_enabled || cfs_rq->nr_running)
1747 return; 1760 return;
1748 1761
1749 __return_cfs_rq_runtime(cfs_rq); 1762 __return_cfs_rq_runtime(cfs_rq);
@@ -2036,36 +2049,100 @@ static void task_waking_fair(struct task_struct *p)
2036 * Adding load to a group doesn't make a group heavier, but can cause movement 2049 * Adding load to a group doesn't make a group heavier, but can cause movement
2037 * of group shares between cpus. Assuming the shares were perfectly aligned one 2050 * of group shares between cpus. Assuming the shares were perfectly aligned one
2038 * can calculate the shift in shares. 2051 * can calculate the shift in shares.
2052 *
2053 * Calculate the effective load difference if @wl is added (subtracted) to @tg
2054 * on this @cpu and results in a total addition (subtraction) of @wg to the
2055 * total group weight.
2056 *
2057 * Given a runqueue weight distribution (rw_i) we can compute a shares
2058 * distribution (s_i) using:
2059 *
2060 * s_i = rw_i / \Sum rw_j (1)
2061 *
2062 * Suppose we have 4 CPUs and our @tg is a direct child of the root group and
2063 * has 7 equal weight tasks, distributed as below (rw_i), with the resulting
2064 * shares distribution (s_i):
2065 *
2066 * rw_i = { 2, 4, 1, 0 }
2067 * s_i = { 2/7, 4/7, 1/7, 0 }
2068 *
2069 * As per wake_affine() we're interested in the load of two CPUs (the CPU the
2070 * task used to run on and the CPU the waker is running on), we need to
2071 * compute the effect of waking a task on either CPU and, in case of a sync
2072 * wakeup, compute the effect of the current task going to sleep.
2073 *
2074 * So for a change of @wl to the local @cpu with an overall group weight change
2075 * of @wl we can compute the new shares distribution (s'_i) using:
2076 *
2077 * s'_i = (rw_i + @wl) / (@wg + \Sum rw_j) (2)
2078 *
2079 * Suppose we're interested in CPUs 0 and 1, and want to compute the load
2080 * differences in waking a task to CPU 0. The additional task changes the
2081 * weight and shares distributions like:
2082 *
2083 * rw'_i = { 3, 4, 1, 0 }
2084 * s'_i = { 3/8, 4/8, 1/8, 0 }
2085 *
2086 * We can then compute the difference in effective weight by using:
2087 *
2088 * dw_i = S * (s'_i - s_i) (3)
2089 *
2090 * Where 'S' is the group weight as seen by its parent.
2091 *
2092 * Therefore the effective change in loads on CPU 0 would be 5/56 (3/8 - 2/7)
2093 * times the weight of the group. The effect on CPU 1 would be -4/56 (4/8 -
2094 * 4/7) times the weight of the group.
2039 */ 2095 */
2040static long effective_load(struct task_group *tg, int cpu, long wl, long wg) 2096static long effective_load(struct task_group *tg, int cpu, long wl, long wg)
2041{ 2097{
2042 struct sched_entity *se = tg->se[cpu]; 2098 struct sched_entity *se = tg->se[cpu];
2043 2099
2044 if (!tg->parent) 2100 if (!tg->parent) /* the trivial, non-cgroup case */
2045 return wl; 2101 return wl;
2046 2102
2047 for_each_sched_entity(se) { 2103 for_each_sched_entity(se) {
2048 long lw, w; 2104 long w, W;
2049 2105
2050 tg = se->my_q->tg; 2106 tg = se->my_q->tg;
2051 w = se->my_q->load.weight;
2052 2107
2053 /* use this cpu's instantaneous contribution */ 2108 /*
2054 lw = atomic_read(&tg->load_weight); 2109 * W = @wg + \Sum rw_j
2055 lw -= se->my_q->load_contribution; 2110 */
2056 lw += w + wg; 2111 W = wg + calc_tg_weight(tg, se->my_q);
2057 2112
2058 wl += w; 2113 /*
2114 * w = rw_i + @wl
2115 */
2116 w = se->my_q->load.weight + wl;
2059 2117
2060 if (lw > 0 && wl < lw) 2118 /*
2061 wl = (wl * tg->shares) / lw; 2119 * wl = S * s'_i; see (2)
2120 */
2121 if (W > 0 && w < W)
2122 wl = (w * tg->shares) / W;
2062 else 2123 else
2063 wl = tg->shares; 2124 wl = tg->shares;
2064 2125
2065 /* zero point is MIN_SHARES */ 2126 /*
2127 * Per the above, wl is the new se->load.weight value; since
2128 * those are clipped to [MIN_SHARES, ...) do so now. See
2129 * calc_cfs_shares().
2130 */
2066 if (wl < MIN_SHARES) 2131 if (wl < MIN_SHARES)
2067 wl = MIN_SHARES; 2132 wl = MIN_SHARES;
2133
2134 /*
2135 * wl = dw_i = S * (s'_i - s_i); see (3)
2136 */
2068 wl -= se->load.weight; 2137 wl -= se->load.weight;
2138
2139 /*
2140 * Recursively apply this logic to all parent groups to compute
2141 * the final effective load change on the root group. Since
2142 * only the @tg group gets extra weight, all parent groups can
2143 * only redistribute existing shares. @wl is the shift in shares
2144 * resulting from this level per the above.
2145 */
2069 wg = 0; 2146 wg = 0;
2070 } 2147 }
2071 2148
@@ -2249,7 +2326,8 @@ static int select_idle_sibling(struct task_struct *p, int target)
2249 int cpu = smp_processor_id(); 2326 int cpu = smp_processor_id();
2250 int prev_cpu = task_cpu(p); 2327 int prev_cpu = task_cpu(p);
2251 struct sched_domain *sd; 2328 struct sched_domain *sd;
2252 int i; 2329 struct sched_group *sg;
2330 int i, smt = 0;
2253 2331
2254 /* 2332 /*
2255 * If the task is going to be woken-up on this cpu and if it is 2333 * If the task is going to be woken-up on this cpu and if it is
@@ -2269,25 +2347,38 @@ static int select_idle_sibling(struct task_struct *p, int target)
2269 * Otherwise, iterate the domains and find an elegible idle cpu. 2347 * Otherwise, iterate the domains and find an elegible idle cpu.
2270 */ 2348 */
2271 rcu_read_lock(); 2349 rcu_read_lock();
2350again:
2272 for_each_domain(target, sd) { 2351 for_each_domain(target, sd) {
2273 if (!(sd->flags & SD_SHARE_PKG_RESOURCES)) 2352 if (!smt && (sd->flags & SD_SHARE_CPUPOWER))
2274 break; 2353 continue;
2275 2354
2276 for_each_cpu_and(i, sched_domain_span(sd), tsk_cpus_allowed(p)) { 2355 if (!(sd->flags & SD_SHARE_PKG_RESOURCES)) {
2277 if (idle_cpu(i)) { 2356 if (!smt) {
2278 target = i; 2357 smt = 1;
2279 break; 2358 goto again;
2280 } 2359 }
2360 break;
2281 } 2361 }
2282 2362
2283 /* 2363 sg = sd->groups;
2284 * Lets stop looking for an idle sibling when we reached 2364 do {
2285 * the domain that spans the current cpu and prev_cpu. 2365 if (!cpumask_intersects(sched_group_cpus(sg),
2286 */ 2366 tsk_cpus_allowed(p)))
2287 if (cpumask_test_cpu(cpu, sched_domain_span(sd)) && 2367 goto next;
2288 cpumask_test_cpu(prev_cpu, sched_domain_span(sd))) 2368
2289 break; 2369 for_each_cpu(i, sched_group_cpus(sg)) {
2370 if (!idle_cpu(i))
2371 goto next;
2372 }
2373
2374 target = cpumask_first_and(sched_group_cpus(sg),
2375 tsk_cpus_allowed(p));
2376 goto done;
2377next:
2378 sg = sg->next;
2379 } while (sg != sd->groups);
2290 } 2380 }
2381done:
2291 rcu_read_unlock(); 2382 rcu_read_unlock();
2292 2383
2293 return target; 2384 return target;
@@ -3511,7 +3602,7 @@ static bool update_sd_pick_busiest(struct sched_domain *sd,
3511} 3602}
3512 3603
3513/** 3604/**
3514 * update_sd_lb_stats - Update sched_group's statistics for load balancing. 3605 * update_sd_lb_stats - Update sched_domain's statistics for load balancing.
3515 * @sd: sched_domain whose statistics are to be updated. 3606 * @sd: sched_domain whose statistics are to be updated.
3516 * @this_cpu: Cpu for which load balance is currently performed. 3607 * @this_cpu: Cpu for which load balance is currently performed.
3517 * @idle: Idle status of this_cpu 3608 * @idle: Idle status of this_cpu
diff --git a/kernel/sched_features.h b/kernel/sched_features.h
index efa0a7b75dde..84802245abd2 100644
--- a/kernel/sched_features.h
+++ b/kernel/sched_features.h
@@ -67,3 +67,4 @@ SCHED_FEAT(NONTASK_POWER, 1)
67SCHED_FEAT(TTWU_QUEUE, 1) 67SCHED_FEAT(TTWU_QUEUE, 1)
68 68
69SCHED_FEAT(FORCE_SD_OVERLAP, 0) 69SCHED_FEAT(FORCE_SD_OVERLAP, 0)
70SCHED_FEAT(RT_RUNTIME_SHARE, 1)
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index 056cbd2e2a27..583a1368afe6 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -560,6 +560,9 @@ static int balance_runtime(struct rt_rq *rt_rq)
560{ 560{
561 int more = 0; 561 int more = 0;
562 562
563 if (!sched_feat(RT_RUNTIME_SHARE))
564 return more;
565
563 if (rt_rq->rt_time > rt_rq->rt_runtime) { 566 if (rt_rq->rt_time > rt_rq->rt_runtime) {
564 raw_spin_unlock(&rt_rq->rt_runtime_lock); 567 raw_spin_unlock(&rt_rq->rt_runtime_lock);
565 more = do_balance_runtime(rt_rq); 568 more = do_balance_runtime(rt_rq);
diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
index c436e790b21b..8a46f5d64504 100644
--- a/kernel/time/alarmtimer.c
+++ b/kernel/time/alarmtimer.c
@@ -195,7 +195,7 @@ static enum hrtimer_restart alarmtimer_fired(struct hrtimer *timer)
195 struct alarm *alarm; 195 struct alarm *alarm;
196 ktime_t expired = next->expires; 196 ktime_t expired = next->expires;
197 197
198 if (expired.tv64 >= now.tv64) 198 if (expired.tv64 > now.tv64)
199 break; 199 break;
200 200
201 alarm = container_of(next, struct alarm, node); 201 alarm = container_of(next, struct alarm, node);
diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c
index 1ecd6ba36d6c..c4eb71c8b2ea 100644
--- a/kernel/time/clockevents.c
+++ b/kernel/time/clockevents.c
@@ -387,6 +387,7 @@ void clockevents_exchange_device(struct clock_event_device *old,
387 * released list and do a notify add later. 387 * released list and do a notify add later.
388 */ 388 */
389 if (old) { 389 if (old) {
390 old->event_handler = clockevents_handle_noop;
390 clockevents_set_mode(old, CLOCK_EVT_MODE_UNUSED); 391 clockevents_set_mode(old, CLOCK_EVT_MODE_UNUSED);
391 list_del(&old->list); 392 list_del(&old->list);
392 list_add(&old->list, &clockevents_released); 393 list_add(&old->list, &clockevents_released);
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
index cf52fda2e096..da2f760e780c 100644
--- a/kernel/time/clocksource.c
+++ b/kernel/time/clocksource.c
@@ -492,6 +492,22 @@ void clocksource_touch_watchdog(void)
492} 492}
493 493
494/** 494/**
495 * clocksource_max_adjustment- Returns max adjustment amount
496 * @cs: Pointer to clocksource
497 *
498 */
499static u32 clocksource_max_adjustment(struct clocksource *cs)
500{
501 u64 ret;
502 /*
503 * We won't try to correct for more then 11% adjustments (110,000 ppm),
504 */
505 ret = (u64)cs->mult * 11;
506 do_div(ret,100);
507 return (u32)ret;
508}
509
510/**
495 * clocksource_max_deferment - Returns max time the clocksource can be deferred 511 * clocksource_max_deferment - Returns max time the clocksource can be deferred
496 * @cs: Pointer to clocksource 512 * @cs: Pointer to clocksource
497 * 513 *
@@ -503,25 +519,28 @@ static u64 clocksource_max_deferment(struct clocksource *cs)
503 /* 519 /*
504 * Calculate the maximum number of cycles that we can pass to the 520 * Calculate the maximum number of cycles that we can pass to the
505 * cyc2ns function without overflowing a 64-bit signed result. The 521 * cyc2ns function without overflowing a 64-bit signed result. The
506 * maximum number of cycles is equal to ULLONG_MAX/cs->mult which 522 * maximum number of cycles is equal to ULLONG_MAX/(cs->mult+cs->maxadj)
507 * is equivalent to the below. 523 * which is equivalent to the below.
508 * max_cycles < (2^63)/cs->mult 524 * max_cycles < (2^63)/(cs->mult + cs->maxadj)
509 * max_cycles < 2^(log2((2^63)/cs->mult)) 525 * max_cycles < 2^(log2((2^63)/(cs->mult + cs->maxadj)))
510 * max_cycles < 2^(log2(2^63) - log2(cs->mult)) 526 * max_cycles < 2^(log2(2^63) - log2(cs->mult + cs->maxadj))
511 * max_cycles < 2^(63 - log2(cs->mult)) 527 * max_cycles < 2^(63 - log2(cs->mult + cs->maxadj))
512 * max_cycles < 1 << (63 - log2(cs->mult)) 528 * max_cycles < 1 << (63 - log2(cs->mult + cs->maxadj))
513 * Please note that we add 1 to the result of the log2 to account for 529 * Please note that we add 1 to the result of the log2 to account for
514 * any rounding errors, ensure the above inequality is satisfied and 530 * any rounding errors, ensure the above inequality is satisfied and
515 * no overflow will occur. 531 * no overflow will occur.
516 */ 532 */
517 max_cycles = 1ULL << (63 - (ilog2(cs->mult) + 1)); 533 max_cycles = 1ULL << (63 - (ilog2(cs->mult + cs->maxadj) + 1));
518 534
519 /* 535 /*
520 * The actual maximum number of cycles we can defer the clocksource is 536 * The actual maximum number of cycles we can defer the clocksource is
521 * determined by the minimum of max_cycles and cs->mask. 537 * determined by the minimum of max_cycles and cs->mask.
538 * Note: Here we subtract the maxadj to make sure we don't sleep for
539 * too long if there's a large negative adjustment.
522 */ 540 */
523 max_cycles = min_t(u64, max_cycles, (u64) cs->mask); 541 max_cycles = min_t(u64, max_cycles, (u64) cs->mask);
524 max_nsecs = clocksource_cyc2ns(max_cycles, cs->mult, cs->shift); 542 max_nsecs = clocksource_cyc2ns(max_cycles, cs->mult - cs->maxadj,
543 cs->shift);
525 544
526 /* 545 /*
527 * To ensure that the clocksource does not wrap whilst we are idle, 546 * To ensure that the clocksource does not wrap whilst we are idle,
@@ -529,7 +548,7 @@ static u64 clocksource_max_deferment(struct clocksource *cs)
529 * note a margin of 12.5% is used because this can be computed with 548 * note a margin of 12.5% is used because this can be computed with
530 * a shift, versus say 10% which would require division. 549 * a shift, versus say 10% which would require division.
531 */ 550 */
532 return max_nsecs - (max_nsecs >> 5); 551 return max_nsecs - (max_nsecs >> 3);
533} 552}
534 553
535#ifndef CONFIG_ARCH_USES_GETTIMEOFFSET 554#ifndef CONFIG_ARCH_USES_GETTIMEOFFSET
@@ -640,7 +659,6 @@ static void clocksource_enqueue(struct clocksource *cs)
640void __clocksource_updatefreq_scale(struct clocksource *cs, u32 scale, u32 freq) 659void __clocksource_updatefreq_scale(struct clocksource *cs, u32 scale, u32 freq)
641{ 660{
642 u64 sec; 661 u64 sec;
643
644 /* 662 /*
645 * Calc the maximum number of seconds which we can run before 663 * Calc the maximum number of seconds which we can run before
646 * wrapping around. For clocksources which have a mask > 32bit 664 * wrapping around. For clocksources which have a mask > 32bit
@@ -651,7 +669,7 @@ void __clocksource_updatefreq_scale(struct clocksource *cs, u32 scale, u32 freq)
651 * ~ 0.06ppm granularity for NTP. We apply the same 12.5% 669 * ~ 0.06ppm granularity for NTP. We apply the same 12.5%
652 * margin as we do in clocksource_max_deferment() 670 * margin as we do in clocksource_max_deferment()
653 */ 671 */
654 sec = (cs->mask - (cs->mask >> 5)); 672 sec = (cs->mask - (cs->mask >> 3));
655 do_div(sec, freq); 673 do_div(sec, freq);
656 do_div(sec, scale); 674 do_div(sec, scale);
657 if (!sec) 675 if (!sec)
@@ -661,6 +679,20 @@ void __clocksource_updatefreq_scale(struct clocksource *cs, u32 scale, u32 freq)
661 679
662 clocks_calc_mult_shift(&cs->mult, &cs->shift, freq, 680 clocks_calc_mult_shift(&cs->mult, &cs->shift, freq,
663 NSEC_PER_SEC / scale, sec * scale); 681 NSEC_PER_SEC / scale, sec * scale);
682
683 /*
684 * for clocksources that have large mults, to avoid overflow.
685 * Since mult may be adjusted by ntp, add an safety extra margin
686 *
687 */
688 cs->maxadj = clocksource_max_adjustment(cs);
689 while ((cs->mult + cs->maxadj < cs->mult)
690 || (cs->mult - cs->maxadj > cs->mult)) {
691 cs->mult >>= 1;
692 cs->shift--;
693 cs->maxadj = clocksource_max_adjustment(cs);
694 }
695
664 cs->max_idle_ns = clocksource_max_deferment(cs); 696 cs->max_idle_ns = clocksource_max_deferment(cs);
665} 697}
666EXPORT_SYMBOL_GPL(__clocksource_updatefreq_scale); 698EXPORT_SYMBOL_GPL(__clocksource_updatefreq_scale);
@@ -701,6 +733,12 @@ EXPORT_SYMBOL_GPL(__clocksource_register_scale);
701 */ 733 */
702int clocksource_register(struct clocksource *cs) 734int clocksource_register(struct clocksource *cs)
703{ 735{
736 /* calculate max adjustment for given mult/shift */
737 cs->maxadj = clocksource_max_adjustment(cs);
738 WARN_ONCE(cs->mult + cs->maxadj < cs->mult,
739 "Clocksource %s might overflow on 11%% adjustment\n",
740 cs->name);
741
704 /* calculate max idle time permitted for this clocksource */ 742 /* calculate max idle time permitted for this clocksource */
705 cs->max_idle_ns = clocksource_max_deferment(cs); 743 cs->max_idle_ns = clocksource_max_deferment(cs);
706 744
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
index f954282d9a82..fd4a7b1625a2 100644
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -71,7 +71,7 @@ int tick_check_broadcast_device(struct clock_event_device *dev)
71 (dev->features & CLOCK_EVT_FEAT_C3STOP)) 71 (dev->features & CLOCK_EVT_FEAT_C3STOP))
72 return 0; 72 return 0;
73 73
74 clockevents_exchange_device(NULL, dev); 74 clockevents_exchange_device(tick_broadcast_device.evtdev, dev);
75 tick_broadcast_device.evtdev = dev; 75 tick_broadcast_device.evtdev = dev;
76 if (!cpumask_empty(tick_get_broadcast_mask())) 76 if (!cpumask_empty(tick_get_broadcast_mask()))
77 tick_broadcast_start_periodic(dev); 77 tick_broadcast_start_periodic(dev);
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index 2b021b0e8507..237841378c03 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -249,6 +249,8 @@ ktime_t ktime_get(void)
249 secs = xtime.tv_sec + wall_to_monotonic.tv_sec; 249 secs = xtime.tv_sec + wall_to_monotonic.tv_sec;
250 nsecs = xtime.tv_nsec + wall_to_monotonic.tv_nsec; 250 nsecs = xtime.tv_nsec + wall_to_monotonic.tv_nsec;
251 nsecs += timekeeping_get_ns(); 251 nsecs += timekeeping_get_ns();
252 /* If arch requires, add in gettimeoffset() */
253 nsecs += arch_gettimeoffset();
252 254
253 } while (read_seqretry(&xtime_lock, seq)); 255 } while (read_seqretry(&xtime_lock, seq));
254 /* 256 /*
@@ -280,6 +282,8 @@ void ktime_get_ts(struct timespec *ts)
280 *ts = xtime; 282 *ts = xtime;
281 tomono = wall_to_monotonic; 283 tomono = wall_to_monotonic;
282 nsecs = timekeeping_get_ns(); 284 nsecs = timekeeping_get_ns();
285 /* If arch requires, add in gettimeoffset() */
286 nsecs += arch_gettimeoffset();
283 287
284 } while (read_seqretry(&xtime_lock, seq)); 288 } while (read_seqretry(&xtime_lock, seq));
285 289
@@ -802,14 +806,44 @@ static void timekeeping_adjust(s64 offset)
802 s64 error, interval = timekeeper.cycle_interval; 806 s64 error, interval = timekeeper.cycle_interval;
803 int adj; 807 int adj;
804 808
809 /*
810 * The point of this is to check if the error is greater then half
811 * an interval.
812 *
813 * First we shift it down from NTP_SHIFT to clocksource->shifted nsecs.
814 *
815 * Note we subtract one in the shift, so that error is really error*2.
816 * This "saves" dividing(shifting) intererval twice, but keeps the
817 * (error > interval) comparision as still measuring if error is
818 * larger then half an interval.
819 *
820 * Note: It does not "save" on aggrivation when reading the code.
821 */
805 error = timekeeper.ntp_error >> (timekeeper.ntp_error_shift - 1); 822 error = timekeeper.ntp_error >> (timekeeper.ntp_error_shift - 1);
806 if (error > interval) { 823 if (error > interval) {
824 /*
825 * We now divide error by 4(via shift), which checks if
826 * the error is greater then twice the interval.
827 * If it is greater, we need a bigadjust, if its smaller,
828 * we can adjust by 1.
829 */
807 error >>= 2; 830 error >>= 2;
831 /*
832 * XXX - In update_wall_time, we round up to the next
833 * nanosecond, and store the amount rounded up into
834 * the error. This causes the likely below to be unlikely.
835 *
836 * The properfix is to avoid rounding up by using
837 * the high precision timekeeper.xtime_nsec instead of
838 * xtime.tv_nsec everywhere. Fixing this will take some
839 * time.
840 */
808 if (likely(error <= interval)) 841 if (likely(error <= interval))
809 adj = 1; 842 adj = 1;
810 else 843 else
811 adj = timekeeping_bigadjust(error, &interval, &offset); 844 adj = timekeeping_bigadjust(error, &interval, &offset);
812 } else if (error < -interval) { 845 } else if (error < -interval) {
846 /* See comment above, this is just switched for the negative */
813 error >>= 2; 847 error >>= 2;
814 if (likely(error >= -interval)) { 848 if (likely(error >= -interval)) {
815 adj = -1; 849 adj = -1;
@@ -817,9 +851,65 @@ static void timekeeping_adjust(s64 offset)
817 offset = -offset; 851 offset = -offset;
818 } else 852 } else
819 adj = timekeeping_bigadjust(error, &interval, &offset); 853 adj = timekeeping_bigadjust(error, &interval, &offset);
820 } else 854 } else /* No adjustment needed */
821 return; 855 return;
822 856
857 WARN_ONCE(timekeeper.clock->maxadj &&
858 (timekeeper.mult + adj > timekeeper.clock->mult +
859 timekeeper.clock->maxadj),
860 "Adjusting %s more then 11%% (%ld vs %ld)\n",
861 timekeeper.clock->name, (long)timekeeper.mult + adj,
862 (long)timekeeper.clock->mult +
863 timekeeper.clock->maxadj);
864 /*
865 * So the following can be confusing.
866 *
867 * To keep things simple, lets assume adj == 1 for now.
868 *
869 * When adj != 1, remember that the interval and offset values
870 * have been appropriately scaled so the math is the same.
871 *
872 * The basic idea here is that we're increasing the multiplier
873 * by one, this causes the xtime_interval to be incremented by
874 * one cycle_interval. This is because:
875 * xtime_interval = cycle_interval * mult
876 * So if mult is being incremented by one:
877 * xtime_interval = cycle_interval * (mult + 1)
878 * Its the same as:
879 * xtime_interval = (cycle_interval * mult) + cycle_interval
880 * Which can be shortened to:
881 * xtime_interval += cycle_interval
882 *
883 * So offset stores the non-accumulated cycles. Thus the current
884 * time (in shifted nanoseconds) is:
885 * now = (offset * adj) + xtime_nsec
886 * Now, even though we're adjusting the clock frequency, we have
887 * to keep time consistent. In other words, we can't jump back
888 * in time, and we also want to avoid jumping forward in time.
889 *
890 * So given the same offset value, we need the time to be the same
891 * both before and after the freq adjustment.
892 * now = (offset * adj_1) + xtime_nsec_1
893 * now = (offset * adj_2) + xtime_nsec_2
894 * So:
895 * (offset * adj_1) + xtime_nsec_1 =
896 * (offset * adj_2) + xtime_nsec_2
897 * And we know:
898 * adj_2 = adj_1 + 1
899 * So:
900 * (offset * adj_1) + xtime_nsec_1 =
901 * (offset * (adj_1+1)) + xtime_nsec_2
902 * (offset * adj_1) + xtime_nsec_1 =
903 * (offset * adj_1) + offset + xtime_nsec_2
904 * Canceling the sides:
905 * xtime_nsec_1 = offset + xtime_nsec_2
906 * Which gives us:
907 * xtime_nsec_2 = xtime_nsec_1 - offset
908 * Which simplfies to:
909 * xtime_nsec -= offset
910 *
911 * XXX - TODO: Doc ntp_error calculation.
912 */
823 timekeeper.mult += adj; 913 timekeeper.mult += adj;
824 timekeeper.xtime_interval += interval; 914 timekeeper.xtime_interval += interval;
825 timekeeper.xtime_nsec -= offset; 915 timekeeper.xtime_nsec -= offset;
diff --git a/kernel/timer.c b/kernel/timer.c
index dbaa62422b13..9c3c62b0c4bc 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -1368,7 +1368,7 @@ SYSCALL_DEFINE0(getppid)
1368 int pid; 1368 int pid;
1369 1369
1370 rcu_read_lock(); 1370 rcu_read_lock();
1371 pid = task_tgid_vnr(current->real_parent); 1371 pid = task_tgid_vnr(rcu_dereference(current->real_parent));
1372 rcu_read_unlock(); 1372 rcu_read_unlock();
1373 1373
1374 return pid; 1374 return pid;
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 900b409543db..b1e8943fed1d 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -152,7 +152,6 @@ void clear_ftrace_function(void)
152 ftrace_pid_function = ftrace_stub; 152 ftrace_pid_function = ftrace_stub;
153} 153}
154 154
155#undef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
156#ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST 155#ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
157/* 156/*
158 * For those archs that do not test ftrace_trace_stop in their 157 * For those archs that do not test ftrace_trace_stop in their
@@ -1212,7 +1211,9 @@ ftrace_hash_move(struct ftrace_ops *ops, int enable,
1212 if (!src->count) { 1211 if (!src->count) {
1213 free_ftrace_hash_rcu(*dst); 1212 free_ftrace_hash_rcu(*dst);
1214 rcu_assign_pointer(*dst, EMPTY_HASH); 1213 rcu_assign_pointer(*dst, EMPTY_HASH);
1215 return 0; 1214 /* still need to update the function records */
1215 ret = 0;
1216 goto out;
1216 } 1217 }
1217 1218
1218 /* 1219 /*
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index 581876f9f387..c212a7f934ec 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -1078,7 +1078,6 @@ event_subsystem_dir(const char *name, struct dentry *d_events)
1078 /* First see if we did not already create this dir */ 1078 /* First see if we did not already create this dir */
1079 list_for_each_entry(system, &event_subsystems, list) { 1079 list_for_each_entry(system, &event_subsystems, list) {
1080 if (strcmp(system->name, name) == 0) { 1080 if (strcmp(system->name, name) == 0) {
1081 __get_system(system);
1082 system->nr_events++; 1081 system->nr_events++;
1083 return system->entry; 1082 return system->entry;
1084 } 1083 }
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
index 816d3d074979..95dc31efd6dd 100644
--- a/kernel/trace/trace_events_filter.c
+++ b/kernel/trace/trace_events_filter.c
@@ -1649,7 +1649,9 @@ static int replace_system_preds(struct event_subsystem *system,
1649 */ 1649 */
1650 err = replace_preds(call, NULL, ps, filter_string, true); 1650 err = replace_preds(call, NULL, ps, filter_string, true);
1651 if (err) 1651 if (err)
1652 goto fail; 1652 call->flags |= TRACE_EVENT_FL_NO_SET_FILTER;
1653 else
1654 call->flags &= ~TRACE_EVENT_FL_NO_SET_FILTER;
1653 } 1655 }
1654 1656
1655 list_for_each_entry(call, &ftrace_events, list) { 1657 list_for_each_entry(call, &ftrace_events, list) {
@@ -1658,6 +1660,9 @@ static int replace_system_preds(struct event_subsystem *system,
1658 if (strcmp(call->class->system, system->name) != 0) 1660 if (strcmp(call->class->system, system->name) != 0)
1659 continue; 1661 continue;
1660 1662
1663 if (call->flags & TRACE_EVENT_FL_NO_SET_FILTER)
1664 continue;
1665
1661 filter_item = kzalloc(sizeof(*filter_item), GFP_KERNEL); 1666 filter_item = kzalloc(sizeof(*filter_item), GFP_KERNEL);
1662 if (!filter_item) 1667 if (!filter_item)
1663 goto fail_mem; 1668 goto fail_mem;
@@ -1686,7 +1691,7 @@ static int replace_system_preds(struct event_subsystem *system,
1686 * replace the filter for the call. 1691 * replace the filter for the call.
1687 */ 1692 */
1688 filter = call->filter; 1693 filter = call->filter;
1689 call->filter = filter_item->filter; 1694 rcu_assign_pointer(call->filter, filter_item->filter);
1690 filter_item->filter = filter; 1695 filter_item->filter = filter;
1691 1696
1692 fail = false; 1697 fail = false;
@@ -1741,7 +1746,7 @@ int apply_event_filter(struct ftrace_event_call *call, char *filter_string)
1741 filter = call->filter; 1746 filter = call->filter;
1742 if (!filter) 1747 if (!filter)
1743 goto out_unlock; 1748 goto out_unlock;
1744 call->filter = NULL; 1749 RCU_INIT_POINTER(call->filter, NULL);
1745 /* Make sure the filter is not being used */ 1750 /* Make sure the filter is not being used */
1746 synchronize_sched(); 1751 synchronize_sched();
1747 __free_filter(filter); 1752 __free_filter(filter);
@@ -1782,7 +1787,7 @@ out:
1782 * string 1787 * string
1783 */ 1788 */
1784 tmp = call->filter; 1789 tmp = call->filter;
1785 call->filter = filter; 1790 rcu_assign_pointer(call->filter, filter);
1786 if (tmp) { 1791 if (tmp) {
1787 /* Make sure the call is done with the filter */ 1792 /* Make sure the call is done with the filter */
1788 synchronize_sched(); 1793 synchronize_sched();