aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/Makefile2
-rw-r--r--kernel/bpf/Makefile6
-rw-r--r--kernel/bpf/core.c9
-rw-r--r--kernel/bpf/verifier.c3
-rw-r--r--kernel/context_tracking.c40
-rw-r--r--kernel/cpu.c14
-rw-r--r--kernel/events/core.c8
-rw-r--r--kernel/freezer.c9
-rw-r--r--kernel/futex.c36
-rw-r--r--kernel/gcov/Kconfig2
-rw-r--r--kernel/kmod.c76
-rw-r--r--kernel/power/hibernate.c8
-rw-r--r--kernel/power/process.c57
-rw-r--r--kernel/power/qos.c27
-rw-r--r--kernel/rcu/tree.c15
-rw-r--r--kernel/rcu/tree.h1
-rw-r--r--kernel/rcu/tree_plugin.h33
-rw-r--r--kernel/sched/core.c47
-rw-r--r--kernel/sched/deadline.c41
-rw-r--r--kernel/sched/fair.c21
-rw-r--r--kernel/sysctl.c3
-rw-r--r--kernel/time/clockevents.c2
-rw-r--r--kernel/time/posix-timers.c1
-rw-r--r--kernel/trace/ftrace.c54
-rw-r--r--kernel/trace/trace_syscalls.c8
25 files changed, 360 insertions, 163 deletions
diff --git a/kernel/Makefile b/kernel/Makefile
index dc5c77544fd6..17ea6d4a9a24 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -86,7 +86,7 @@ obj-$(CONFIG_RING_BUFFER) += trace/
86obj-$(CONFIG_TRACEPOINTS) += trace/ 86obj-$(CONFIG_TRACEPOINTS) += trace/
87obj-$(CONFIG_IRQ_WORK) += irq_work.o 87obj-$(CONFIG_IRQ_WORK) += irq_work.o
88obj-$(CONFIG_CPU_PM) += cpu_pm.o 88obj-$(CONFIG_CPU_PM) += cpu_pm.o
89obj-$(CONFIG_NET) += bpf/ 89obj-$(CONFIG_BPF) += bpf/
90 90
91obj-$(CONFIG_PERF_EVENTS) += events/ 91obj-$(CONFIG_PERF_EVENTS) += events/
92 92
diff --git a/kernel/bpf/Makefile b/kernel/bpf/Makefile
index 45427239f375..0daf7f6ae7df 100644
--- a/kernel/bpf/Makefile
+++ b/kernel/bpf/Makefile
@@ -1,5 +1,5 @@
1obj-y := core.o syscall.o verifier.o 1obj-y := core.o
2 2obj-$(CONFIG_BPF_SYSCALL) += syscall.o verifier.o
3ifdef CONFIG_TEST_BPF 3ifdef CONFIG_TEST_BPF
4obj-y += test_stub.o 4obj-$(CONFIG_BPF_SYSCALL) += test_stub.o
5endif 5endif
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index f0c30c59b317..d6594e457a25 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -655,3 +655,12 @@ void bpf_prog_free(struct bpf_prog *fp)
655 schedule_work(&aux->work); 655 schedule_work(&aux->work);
656} 656}
657EXPORT_SYMBOL_GPL(bpf_prog_free); 657EXPORT_SYMBOL_GPL(bpf_prog_free);
658
659/* To execute LD_ABS/LD_IND instructions __bpf_prog_run() may call
660 * skb_copy_bits(), so provide a weak definition of it for NET-less config.
661 */
662int __weak skb_copy_bits(const struct sk_buff *skb, int offset, void *to,
663 int len)
664{
665 return -EFAULT;
666}
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 801f5f3b9307..9f81818f2941 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -1409,7 +1409,8 @@ static bool states_equal(struct verifier_state *old, struct verifier_state *cur)
1409 if (memcmp(&old->regs[i], &cur->regs[i], 1409 if (memcmp(&old->regs[i], &cur->regs[i],
1410 sizeof(old->regs[0])) != 0) { 1410 sizeof(old->regs[0])) != 0) {
1411 if (old->regs[i].type == NOT_INIT || 1411 if (old->regs[i].type == NOT_INIT ||
1412 old->regs[i].type == UNKNOWN_VALUE) 1412 (old->regs[i].type == UNKNOWN_VALUE &&
1413 cur->regs[i].type != NOT_INIT))
1413 continue; 1414 continue;
1414 return false; 1415 return false;
1415 } 1416 }
diff --git a/kernel/context_tracking.c b/kernel/context_tracking.c
index 5664985c46a0..937ecdfdf258 100644
--- a/kernel/context_tracking.c
+++ b/kernel/context_tracking.c
@@ -107,46 +107,6 @@ void context_tracking_user_enter(void)
107} 107}
108NOKPROBE_SYMBOL(context_tracking_user_enter); 108NOKPROBE_SYMBOL(context_tracking_user_enter);
109 109
110#ifdef CONFIG_PREEMPT
111/**
112 * preempt_schedule_context - preempt_schedule called by tracing
113 *
114 * The tracing infrastructure uses preempt_enable_notrace to prevent
115 * recursion and tracing preempt enabling caused by the tracing
116 * infrastructure itself. But as tracing can happen in areas coming
117 * from userspace or just about to enter userspace, a preempt enable
118 * can occur before user_exit() is called. This will cause the scheduler
119 * to be called when the system is still in usermode.
120 *
121 * To prevent this, the preempt_enable_notrace will use this function
122 * instead of preempt_schedule() to exit user context if needed before
123 * calling the scheduler.
124 */
125asmlinkage __visible void __sched notrace preempt_schedule_context(void)
126{
127 enum ctx_state prev_ctx;
128
129 if (likely(!preemptible()))
130 return;
131
132 /*
133 * Need to disable preemption in case user_exit() is traced
134 * and the tracer calls preempt_enable_notrace() causing
135 * an infinite recursion.
136 */
137 preempt_disable_notrace();
138 prev_ctx = exception_enter();
139 preempt_enable_no_resched_notrace();
140
141 preempt_schedule();
142
143 preempt_disable_notrace();
144 exception_exit(prev_ctx);
145 preempt_enable_notrace();
146}
147EXPORT_SYMBOL_GPL(preempt_schedule_context);
148#endif /* CONFIG_PREEMPT */
149
150/** 110/**
151 * context_tracking_user_exit - Inform the context tracking that the CPU is 111 * context_tracking_user_exit - Inform the context tracking that the CPU is
152 * exiting userspace mode and entering the kernel. 112 * exiting userspace mode and entering the kernel.
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 356450f09c1f..90a3d017b90c 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -64,6 +64,8 @@ static struct {
64 * an ongoing cpu hotplug operation. 64 * an ongoing cpu hotplug operation.
65 */ 65 */
66 int refcount; 66 int refcount;
67 /* And allows lockless put_online_cpus(). */
68 atomic_t puts_pending;
67 69
68#ifdef CONFIG_DEBUG_LOCK_ALLOC 70#ifdef CONFIG_DEBUG_LOCK_ALLOC
69 struct lockdep_map dep_map; 71 struct lockdep_map dep_map;
@@ -113,7 +115,11 @@ void put_online_cpus(void)
113{ 115{
114 if (cpu_hotplug.active_writer == current) 116 if (cpu_hotplug.active_writer == current)
115 return; 117 return;
116 mutex_lock(&cpu_hotplug.lock); 118 if (!mutex_trylock(&cpu_hotplug.lock)) {
119 atomic_inc(&cpu_hotplug.puts_pending);
120 cpuhp_lock_release();
121 return;
122 }
117 123
118 if (WARN_ON(!cpu_hotplug.refcount)) 124 if (WARN_ON(!cpu_hotplug.refcount))
119 cpu_hotplug.refcount++; /* try to fix things up */ 125 cpu_hotplug.refcount++; /* try to fix things up */
@@ -155,6 +161,12 @@ void cpu_hotplug_begin(void)
155 cpuhp_lock_acquire(); 161 cpuhp_lock_acquire();
156 for (;;) { 162 for (;;) {
157 mutex_lock(&cpu_hotplug.lock); 163 mutex_lock(&cpu_hotplug.lock);
164 if (atomic_read(&cpu_hotplug.puts_pending)) {
165 int delta;
166
167 delta = atomic_xchg(&cpu_hotplug.puts_pending, 0);
168 cpu_hotplug.refcount -= delta;
169 }
158 if (likely(!cpu_hotplug.refcount)) 170 if (likely(!cpu_hotplug.refcount))
159 break; 171 break;
160 __set_current_state(TASK_UNINTERRUPTIBLE); 172 __set_current_state(TASK_UNINTERRUPTIBLE);
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 2b02c9fda790..1cd5eef1fcdd 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -1562,8 +1562,10 @@ static void perf_remove_from_context(struct perf_event *event, bool detach_group
1562 1562
1563 if (!task) { 1563 if (!task) {
1564 /* 1564 /*
1565 * Per cpu events are removed via an smp call and 1565 * Per cpu events are removed via an smp call. The removal can
1566 * the removal is always successful. 1566 * fail if the CPU is currently offline, but in that case we
1567 * already called __perf_remove_from_context from
1568 * perf_event_exit_cpu.
1567 */ 1569 */
1568 cpu_function_call(event->cpu, __perf_remove_from_context, &re); 1570 cpu_function_call(event->cpu, __perf_remove_from_context, &re);
1569 return; 1571 return;
@@ -8117,7 +8119,7 @@ static void perf_pmu_rotate_stop(struct pmu *pmu)
8117 8119
8118static void __perf_event_exit_context(void *__info) 8120static void __perf_event_exit_context(void *__info)
8119{ 8121{
8120 struct remove_event re = { .detach_group = false }; 8122 struct remove_event re = { .detach_group = true };
8121 struct perf_event_context *ctx = __info; 8123 struct perf_event_context *ctx = __info;
8122 8124
8123 perf_pmu_rotate_stop(ctx->pmu); 8125 perf_pmu_rotate_stop(ctx->pmu);
diff --git a/kernel/freezer.c b/kernel/freezer.c
index aa6a8aadb911..a8900a3bc27a 100644
--- a/kernel/freezer.c
+++ b/kernel/freezer.c
@@ -42,6 +42,9 @@ bool freezing_slow_path(struct task_struct *p)
42 if (p->flags & (PF_NOFREEZE | PF_SUSPEND_TASK)) 42 if (p->flags & (PF_NOFREEZE | PF_SUSPEND_TASK))
43 return false; 43 return false;
44 44
45 if (test_thread_flag(TIF_MEMDIE))
46 return false;
47
45 if (pm_nosig_freezing || cgroup_freezing(p)) 48 if (pm_nosig_freezing || cgroup_freezing(p))
46 return true; 49 return true;
47 50
@@ -147,12 +150,6 @@ void __thaw_task(struct task_struct *p)
147{ 150{
148 unsigned long flags; 151 unsigned long flags;
149 152
150 /*
151 * Clear freezing and kick @p if FROZEN. Clearing is guaranteed to
152 * be visible to @p as waking up implies wmb. Waking up inside
153 * freezer_lock also prevents wakeups from leaking outside
154 * refrigerator.
155 */
156 spin_lock_irqsave(&freezer_lock, flags); 153 spin_lock_irqsave(&freezer_lock, flags);
157 if (frozen(p)) 154 if (frozen(p))
158 wake_up_process(p); 155 wake_up_process(p);
diff --git a/kernel/futex.c b/kernel/futex.c
index f3a3a071283c..63678b573d61 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -143,9 +143,8 @@
143 * 143 *
144 * Where (A) orders the waiters increment and the futex value read through 144 * Where (A) orders the waiters increment and the futex value read through
145 * atomic operations (see hb_waiters_inc) and where (B) orders the write 145 * atomic operations (see hb_waiters_inc) and where (B) orders the write
146 * to futex and the waiters read -- this is done by the barriers in 146 * to futex and the waiters read -- this is done by the barriers for both
147 * get_futex_key_refs(), through either ihold or atomic_inc, depending on the 147 * shared and private futexes in get_futex_key_refs().
148 * futex type.
149 * 148 *
150 * This yields the following case (where X:=waiters, Y:=futex): 149 * This yields the following case (where X:=waiters, Y:=futex):
151 * 150 *
@@ -344,13 +343,20 @@ static void get_futex_key_refs(union futex_key *key)
344 futex_get_mm(key); /* implies MB (B) */ 343 futex_get_mm(key); /* implies MB (B) */
345 break; 344 break;
346 default: 345 default:
346 /*
347 * Private futexes do not hold reference on an inode or
348 * mm, therefore the only purpose of calling get_futex_key_refs
349 * is because we need the barrier for the lockless waiter check.
350 */
347 smp_mb(); /* explicit MB (B) */ 351 smp_mb(); /* explicit MB (B) */
348 } 352 }
349} 353}
350 354
351/* 355/*
352 * Drop a reference to the resource addressed by a key. 356 * Drop a reference to the resource addressed by a key.
353 * The hash bucket spinlock must not be held. 357 * The hash bucket spinlock must not be held. This is
358 * a no-op for private futexes, see comment in the get
359 * counterpart.
354 */ 360 */
355static void drop_futex_key_refs(union futex_key *key) 361static void drop_futex_key_refs(union futex_key *key)
356{ 362{
@@ -641,8 +647,14 @@ static struct futex_pi_state * alloc_pi_state(void)
641 return pi_state; 647 return pi_state;
642} 648}
643 649
650/*
651 * Must be called with the hb lock held.
652 */
644static void free_pi_state(struct futex_pi_state *pi_state) 653static void free_pi_state(struct futex_pi_state *pi_state)
645{ 654{
655 if (!pi_state)
656 return;
657
646 if (!atomic_dec_and_test(&pi_state->refcount)) 658 if (!atomic_dec_and_test(&pi_state->refcount))
647 return; 659 return;
648 660
@@ -1521,15 +1533,6 @@ static int futex_requeue(u32 __user *uaddr1, unsigned int flags,
1521 } 1533 }
1522 1534
1523retry: 1535retry:
1524 if (pi_state != NULL) {
1525 /*
1526 * We will have to lookup the pi_state again, so free this one
1527 * to keep the accounting correct.
1528 */
1529 free_pi_state(pi_state);
1530 pi_state = NULL;
1531 }
1532
1533 ret = get_futex_key(uaddr1, flags & FLAGS_SHARED, &key1, VERIFY_READ); 1536 ret = get_futex_key(uaddr1, flags & FLAGS_SHARED, &key1, VERIFY_READ);
1534 if (unlikely(ret != 0)) 1537 if (unlikely(ret != 0))
1535 goto out; 1538 goto out;
@@ -1619,6 +1622,8 @@ retry_private:
1619 case 0: 1622 case 0:
1620 break; 1623 break;
1621 case -EFAULT: 1624 case -EFAULT:
1625 free_pi_state(pi_state);
1626 pi_state = NULL;
1622 double_unlock_hb(hb1, hb2); 1627 double_unlock_hb(hb1, hb2);
1623 hb_waiters_dec(hb2); 1628 hb_waiters_dec(hb2);
1624 put_futex_key(&key2); 1629 put_futex_key(&key2);
@@ -1634,6 +1639,8 @@ retry_private:
1634 * exit to complete. 1639 * exit to complete.
1635 * - The user space value changed. 1640 * - The user space value changed.
1636 */ 1641 */
1642 free_pi_state(pi_state);
1643 pi_state = NULL;
1637 double_unlock_hb(hb1, hb2); 1644 double_unlock_hb(hb1, hb2);
1638 hb_waiters_dec(hb2); 1645 hb_waiters_dec(hb2);
1639 put_futex_key(&key2); 1646 put_futex_key(&key2);
@@ -1710,6 +1717,7 @@ retry_private:
1710 } 1717 }
1711 1718
1712out_unlock: 1719out_unlock:
1720 free_pi_state(pi_state);
1713 double_unlock_hb(hb1, hb2); 1721 double_unlock_hb(hb1, hb2);
1714 hb_waiters_dec(hb2); 1722 hb_waiters_dec(hb2);
1715 1723
@@ -1727,8 +1735,6 @@ out_put_keys:
1727out_put_key1: 1735out_put_key1:
1728 put_futex_key(&key1); 1736 put_futex_key(&key1);
1729out: 1737out:
1730 if (pi_state != NULL)
1731 free_pi_state(pi_state);
1732 return ret ? ret : task_count; 1738 return ret ? ret : task_count;
1733} 1739}
1734 1740
diff --git a/kernel/gcov/Kconfig b/kernel/gcov/Kconfig
index cf66c5c8458e..3b7408759bdf 100644
--- a/kernel/gcov/Kconfig
+++ b/kernel/gcov/Kconfig
@@ -35,7 +35,7 @@ config GCOV_KERNEL
35config GCOV_PROFILE_ALL 35config GCOV_PROFILE_ALL
36 bool "Profile entire Kernel" 36 bool "Profile entire Kernel"
37 depends on GCOV_KERNEL 37 depends on GCOV_KERNEL
38 depends on SUPERH || S390 || X86 || PPC || MICROBLAZE || ARM 38 depends on SUPERH || S390 || X86 || PPC || MICROBLAZE || ARM || ARM64
39 default n 39 default n
40 ---help--- 40 ---help---
41 This options activates profiling for the entire kernel. 41 This options activates profiling for the entire kernel.
diff --git a/kernel/kmod.c b/kernel/kmod.c
index 8637e041a247..80f7a6d00519 100644
--- a/kernel/kmod.c
+++ b/kernel/kmod.c
@@ -196,12 +196,34 @@ int __request_module(bool wait, const char *fmt, ...)
196EXPORT_SYMBOL(__request_module); 196EXPORT_SYMBOL(__request_module);
197#endif /* CONFIG_MODULES */ 197#endif /* CONFIG_MODULES */
198 198
199static void call_usermodehelper_freeinfo(struct subprocess_info *info)
200{
201 if (info->cleanup)
202 (*info->cleanup)(info);
203 kfree(info);
204}
205
206static void umh_complete(struct subprocess_info *sub_info)
207{
208 struct completion *comp = xchg(&sub_info->complete, NULL);
209 /*
210 * See call_usermodehelper_exec(). If xchg() returns NULL
211 * we own sub_info, the UMH_KILLABLE caller has gone away
212 * or the caller used UMH_NO_WAIT.
213 */
214 if (comp)
215 complete(comp);
216 else
217 call_usermodehelper_freeinfo(sub_info);
218}
219
199/* 220/*
200 * This is the task which runs the usermode application 221 * This is the task which runs the usermode application
201 */ 222 */
202static int ____call_usermodehelper(void *data) 223static int ____call_usermodehelper(void *data)
203{ 224{
204 struct subprocess_info *sub_info = data; 225 struct subprocess_info *sub_info = data;
226 int wait = sub_info->wait & ~UMH_KILLABLE;
205 struct cred *new; 227 struct cred *new;
206 int retval; 228 int retval;
207 229
@@ -221,7 +243,7 @@ static int ____call_usermodehelper(void *data)
221 retval = -ENOMEM; 243 retval = -ENOMEM;
222 new = prepare_kernel_cred(current); 244 new = prepare_kernel_cred(current);
223 if (!new) 245 if (!new)
224 goto fail; 246 goto out;
225 247
226 spin_lock(&umh_sysctl_lock); 248 spin_lock(&umh_sysctl_lock);
227 new->cap_bset = cap_intersect(usermodehelper_bset, new->cap_bset); 249 new->cap_bset = cap_intersect(usermodehelper_bset, new->cap_bset);
@@ -233,7 +255,7 @@ static int ____call_usermodehelper(void *data)
233 retval = sub_info->init(sub_info, new); 255 retval = sub_info->init(sub_info, new);
234 if (retval) { 256 if (retval) {
235 abort_creds(new); 257 abort_creds(new);
236 goto fail; 258 goto out;
237 } 259 }
238 } 260 }
239 261
@@ -242,12 +264,13 @@ static int ____call_usermodehelper(void *data)
242 retval = do_execve(getname_kernel(sub_info->path), 264 retval = do_execve(getname_kernel(sub_info->path),
243 (const char __user *const __user *)sub_info->argv, 265 (const char __user *const __user *)sub_info->argv,
244 (const char __user *const __user *)sub_info->envp); 266 (const char __user *const __user *)sub_info->envp);
267out:
268 sub_info->retval = retval;
269 /* wait_for_helper() will call umh_complete if UHM_WAIT_PROC. */
270 if (wait != UMH_WAIT_PROC)
271 umh_complete(sub_info);
245 if (!retval) 272 if (!retval)
246 return 0; 273 return 0;
247
248 /* Exec failed? */
249fail:
250 sub_info->retval = retval;
251 do_exit(0); 274 do_exit(0);
252} 275}
253 276
@@ -258,26 +281,6 @@ static int call_helper(void *data)
258 return ____call_usermodehelper(data); 281 return ____call_usermodehelper(data);
259} 282}
260 283
261static void call_usermodehelper_freeinfo(struct subprocess_info *info)
262{
263 if (info->cleanup)
264 (*info->cleanup)(info);
265 kfree(info);
266}
267
268static void umh_complete(struct subprocess_info *sub_info)
269{
270 struct completion *comp = xchg(&sub_info->complete, NULL);
271 /*
272 * See call_usermodehelper_exec(). If xchg() returns NULL
273 * we own sub_info, the UMH_KILLABLE caller has gone away.
274 */
275 if (comp)
276 complete(comp);
277 else
278 call_usermodehelper_freeinfo(sub_info);
279}
280
281/* Keventd can't block, but this (a child) can. */ 284/* Keventd can't block, but this (a child) can. */
282static int wait_for_helper(void *data) 285static int wait_for_helper(void *data)
283{ 286{
@@ -336,18 +339,8 @@ static void __call_usermodehelper(struct work_struct *work)
336 kmod_thread_locker = NULL; 339 kmod_thread_locker = NULL;
337 } 340 }
338 341
339 switch (wait) { 342 if (pid < 0) {
340 case UMH_NO_WAIT: 343 sub_info->retval = pid;
341 call_usermodehelper_freeinfo(sub_info);
342 break;
343
344 case UMH_WAIT_PROC:
345 if (pid > 0)
346 break;
347 /* FALLTHROUGH */
348 case UMH_WAIT_EXEC:
349 if (pid < 0)
350 sub_info->retval = pid;
351 umh_complete(sub_info); 344 umh_complete(sub_info);
352 } 345 }
353} 346}
@@ -588,7 +581,12 @@ int call_usermodehelper_exec(struct subprocess_info *sub_info, int wait)
588 goto out; 581 goto out;
589 } 582 }
590 583
591 sub_info->complete = &done; 584 /*
585 * Set the completion pointer only if there is a waiter.
586 * This makes it possible to use umh_complete to free
587 * the data structure in case of UMH_NO_WAIT.
588 */
589 sub_info->complete = (wait == UMH_NO_WAIT) ? NULL : &done;
592 sub_info->wait = wait; 590 sub_info->wait = wait;
593 591
594 queue_work(khelper_wq, &sub_info->work); 592 queue_work(khelper_wq, &sub_info->work);
diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
index a9dfa79b6bab..1f35a3478f3c 100644
--- a/kernel/power/hibernate.c
+++ b/kernel/power/hibernate.c
@@ -502,8 +502,14 @@ int hibernation_restore(int platform_mode)
502 error = dpm_suspend_start(PMSG_QUIESCE); 502 error = dpm_suspend_start(PMSG_QUIESCE);
503 if (!error) { 503 if (!error) {
504 error = resume_target_kernel(platform_mode); 504 error = resume_target_kernel(platform_mode);
505 dpm_resume_end(PMSG_RECOVER); 505 /*
506 * The above should either succeed and jump to the new kernel,
507 * or return with an error. Otherwise things are just
508 * undefined, so let's be paranoid.
509 */
510 BUG_ON(!error);
506 } 511 }
512 dpm_resume_end(PMSG_RECOVER);
507 pm_restore_gfp_mask(); 513 pm_restore_gfp_mask();
508 resume_console(); 514 resume_console();
509 pm_restore_console(); 515 pm_restore_console();
diff --git a/kernel/power/process.c b/kernel/power/process.c
index 7b323221b9ee..5a6ec8678b9a 100644
--- a/kernel/power/process.c
+++ b/kernel/power/process.c
@@ -46,13 +46,13 @@ static int try_to_freeze_tasks(bool user_only)
46 while (true) { 46 while (true) {
47 todo = 0; 47 todo = 0;
48 read_lock(&tasklist_lock); 48 read_lock(&tasklist_lock);
49 do_each_thread(g, p) { 49 for_each_process_thread(g, p) {
50 if (p == current || !freeze_task(p)) 50 if (p == current || !freeze_task(p))
51 continue; 51 continue;
52 52
53 if (!freezer_should_skip(p)) 53 if (!freezer_should_skip(p))
54 todo++; 54 todo++;
55 } while_each_thread(g, p); 55 }
56 read_unlock(&tasklist_lock); 56 read_unlock(&tasklist_lock);
57 57
58 if (!user_only) { 58 if (!user_only) {
@@ -93,11 +93,11 @@ static int try_to_freeze_tasks(bool user_only)
93 93
94 if (!wakeup) { 94 if (!wakeup) {
95 read_lock(&tasklist_lock); 95 read_lock(&tasklist_lock);
96 do_each_thread(g, p) { 96 for_each_process_thread(g, p) {
97 if (p != current && !freezer_should_skip(p) 97 if (p != current && !freezer_should_skip(p)
98 && freezing(p) && !frozen(p)) 98 && freezing(p) && !frozen(p))
99 sched_show_task(p); 99 sched_show_task(p);
100 } while_each_thread(g, p); 100 }
101 read_unlock(&tasklist_lock); 101 read_unlock(&tasklist_lock);
102 } 102 }
103 } else { 103 } else {
@@ -108,6 +108,30 @@ static int try_to_freeze_tasks(bool user_only)
108 return todo ? -EBUSY : 0; 108 return todo ? -EBUSY : 0;
109} 109}
110 110
111static bool __check_frozen_processes(void)
112{
113 struct task_struct *g, *p;
114
115 for_each_process_thread(g, p)
116 if (p != current && !freezer_should_skip(p) && !frozen(p))
117 return false;
118
119 return true;
120}
121
122/*
123 * Returns true if all freezable tasks (except for current) are frozen already
124 */
125static bool check_frozen_processes(void)
126{
127 bool ret;
128
129 read_lock(&tasklist_lock);
130 ret = __check_frozen_processes();
131 read_unlock(&tasklist_lock);
132 return ret;
133}
134
111/** 135/**
112 * freeze_processes - Signal user space processes to enter the refrigerator. 136 * freeze_processes - Signal user space processes to enter the refrigerator.
113 * The current thread will not be frozen. The same process that calls 137 * The current thread will not be frozen. The same process that calls
@@ -118,6 +142,7 @@ static int try_to_freeze_tasks(bool user_only)
118int freeze_processes(void) 142int freeze_processes(void)
119{ 143{
120 int error; 144 int error;
145 int oom_kills_saved;
121 146
122 error = __usermodehelper_disable(UMH_FREEZING); 147 error = __usermodehelper_disable(UMH_FREEZING);
123 if (error) 148 if (error)
@@ -132,11 +157,25 @@ int freeze_processes(void)
132 pm_wakeup_clear(); 157 pm_wakeup_clear();
133 printk("Freezing user space processes ... "); 158 printk("Freezing user space processes ... ");
134 pm_freezing = true; 159 pm_freezing = true;
160 oom_kills_saved = oom_kills_count();
135 error = try_to_freeze_tasks(true); 161 error = try_to_freeze_tasks(true);
136 if (!error) { 162 if (!error) {
137 printk("done.");
138 __usermodehelper_set_disable_depth(UMH_DISABLED); 163 __usermodehelper_set_disable_depth(UMH_DISABLED);
139 oom_killer_disable(); 164 oom_killer_disable();
165
166 /*
167 * There might have been an OOM kill while we were
168 * freezing tasks and the killed task might be still
169 * on the way out so we have to double check for race.
170 */
171 if (oom_kills_count() != oom_kills_saved &&
172 !check_frozen_processes()) {
173 __usermodehelper_set_disable_depth(UMH_ENABLED);
174 printk("OOM in progress.");
175 error = -EBUSY;
176 } else {
177 printk("done.");
178 }
140 } 179 }
141 printk("\n"); 180 printk("\n");
142 BUG_ON(in_atomic()); 181 BUG_ON(in_atomic());
@@ -191,11 +230,11 @@ void thaw_processes(void)
191 thaw_workqueues(); 230 thaw_workqueues();
192 231
193 read_lock(&tasklist_lock); 232 read_lock(&tasklist_lock);
194 do_each_thread(g, p) { 233 for_each_process_thread(g, p) {
195 /* No other threads should have PF_SUSPEND_TASK set */ 234 /* No other threads should have PF_SUSPEND_TASK set */
196 WARN_ON((p != curr) && (p->flags & PF_SUSPEND_TASK)); 235 WARN_ON((p != curr) && (p->flags & PF_SUSPEND_TASK));
197 __thaw_task(p); 236 __thaw_task(p);
198 } while_each_thread(g, p); 237 }
199 read_unlock(&tasklist_lock); 238 read_unlock(&tasklist_lock);
200 239
201 WARN_ON(!(curr->flags & PF_SUSPEND_TASK)); 240 WARN_ON(!(curr->flags & PF_SUSPEND_TASK));
@@ -218,10 +257,10 @@ void thaw_kernel_threads(void)
218 thaw_workqueues(); 257 thaw_workqueues();
219 258
220 read_lock(&tasklist_lock); 259 read_lock(&tasklist_lock);
221 do_each_thread(g, p) { 260 for_each_process_thread(g, p) {
222 if (p->flags & (PF_KTHREAD | PF_WQ_WORKER)) 261 if (p->flags & (PF_KTHREAD | PF_WQ_WORKER))
223 __thaw_task(p); 262 __thaw_task(p);
224 } while_each_thread(g, p); 263 }
225 read_unlock(&tasklist_lock); 264 read_unlock(&tasklist_lock);
226 265
227 schedule(); 266 schedule();
diff --git a/kernel/power/qos.c b/kernel/power/qos.c
index 884b77058864..5f4c006c4b1e 100644
--- a/kernel/power/qos.c
+++ b/kernel/power/qos.c
@@ -105,11 +105,27 @@ static struct pm_qos_object network_throughput_pm_qos = {
105}; 105};
106 106
107 107
108static BLOCKING_NOTIFIER_HEAD(memory_bandwidth_notifier);
109static struct pm_qos_constraints memory_bw_constraints = {
110 .list = PLIST_HEAD_INIT(memory_bw_constraints.list),
111 .target_value = PM_QOS_MEMORY_BANDWIDTH_DEFAULT_VALUE,
112 .default_value = PM_QOS_MEMORY_BANDWIDTH_DEFAULT_VALUE,
113 .no_constraint_value = PM_QOS_MEMORY_BANDWIDTH_DEFAULT_VALUE,
114 .type = PM_QOS_SUM,
115 .notifiers = &memory_bandwidth_notifier,
116};
117static struct pm_qos_object memory_bandwidth_pm_qos = {
118 .constraints = &memory_bw_constraints,
119 .name = "memory_bandwidth",
120};
121
122
108static struct pm_qos_object *pm_qos_array[] = { 123static struct pm_qos_object *pm_qos_array[] = {
109 &null_pm_qos, 124 &null_pm_qos,
110 &cpu_dma_pm_qos, 125 &cpu_dma_pm_qos,
111 &network_lat_pm_qos, 126 &network_lat_pm_qos,
112 &network_throughput_pm_qos 127 &network_throughput_pm_qos,
128 &memory_bandwidth_pm_qos,
113}; 129};
114 130
115static ssize_t pm_qos_power_write(struct file *filp, const char __user *buf, 131static ssize_t pm_qos_power_write(struct file *filp, const char __user *buf,
@@ -130,6 +146,9 @@ static const struct file_operations pm_qos_power_fops = {
130/* unlocked internal variant */ 146/* unlocked internal variant */
131static inline int pm_qos_get_value(struct pm_qos_constraints *c) 147static inline int pm_qos_get_value(struct pm_qos_constraints *c)
132{ 148{
149 struct plist_node *node;
150 int total_value = 0;
151
133 if (plist_head_empty(&c->list)) 152 if (plist_head_empty(&c->list))
134 return c->no_constraint_value; 153 return c->no_constraint_value;
135 154
@@ -140,6 +159,12 @@ static inline int pm_qos_get_value(struct pm_qos_constraints *c)
140 case PM_QOS_MAX: 159 case PM_QOS_MAX:
141 return plist_last(&c->list)->prio; 160 return plist_last(&c->list)->prio;
142 161
162 case PM_QOS_SUM:
163 plist_for_each(node, &c->list)
164 total_value += node->prio;
165
166 return total_value;
167
143 default: 168 default:
144 /* runtime check for not using enum */ 169 /* runtime check for not using enum */
145 BUG(); 170 BUG();
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 133e47223095..9815447d22e0 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -3299,11 +3299,16 @@ static void _rcu_barrier(struct rcu_state *rsp)
3299 continue; 3299 continue;
3300 rdp = per_cpu_ptr(rsp->rda, cpu); 3300 rdp = per_cpu_ptr(rsp->rda, cpu);
3301 if (rcu_is_nocb_cpu(cpu)) { 3301 if (rcu_is_nocb_cpu(cpu)) {
3302 _rcu_barrier_trace(rsp, "OnlineNoCB", cpu, 3302 if (!rcu_nocb_cpu_needs_barrier(rsp, cpu)) {
3303 rsp->n_barrier_done); 3303 _rcu_barrier_trace(rsp, "OfflineNoCB", cpu,
3304 atomic_inc(&rsp->barrier_cpu_count); 3304 rsp->n_barrier_done);
3305 __call_rcu(&rdp->barrier_head, rcu_barrier_callback, 3305 } else {
3306 rsp, cpu, 0); 3306 _rcu_barrier_trace(rsp, "OnlineNoCB", cpu,
3307 rsp->n_barrier_done);
3308 atomic_inc(&rsp->barrier_cpu_count);
3309 __call_rcu(&rdp->barrier_head,
3310 rcu_barrier_callback, rsp, cpu, 0);
3311 }
3307 } else if (ACCESS_ONCE(rdp->qlen)) { 3312 } else if (ACCESS_ONCE(rdp->qlen)) {
3308 _rcu_barrier_trace(rsp, "OnlineQ", cpu, 3313 _rcu_barrier_trace(rsp, "OnlineQ", cpu,
3309 rsp->n_barrier_done); 3314 rsp->n_barrier_done);
diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h
index d03764652d91..bbdc45d8d74f 100644
--- a/kernel/rcu/tree.h
+++ b/kernel/rcu/tree.h
@@ -587,6 +587,7 @@ static void print_cpu_stall_info(struct rcu_state *rsp, int cpu);
587static void print_cpu_stall_info_end(void); 587static void print_cpu_stall_info_end(void);
588static void zero_cpu_stall_ticks(struct rcu_data *rdp); 588static void zero_cpu_stall_ticks(struct rcu_data *rdp);
589static void increment_cpu_stall_ticks(void); 589static void increment_cpu_stall_ticks(void);
590static bool rcu_nocb_cpu_needs_barrier(struct rcu_state *rsp, int cpu);
590static void rcu_nocb_gp_set(struct rcu_node *rnp, int nrq); 591static void rcu_nocb_gp_set(struct rcu_node *rnp, int nrq);
591static void rcu_nocb_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp); 592static void rcu_nocb_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp);
592static void rcu_init_one_nocb(struct rcu_node *rnp); 593static void rcu_init_one_nocb(struct rcu_node *rnp);
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index 387dd4599344..c1d7f27bd38f 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -2050,6 +2050,33 @@ static void wake_nocb_leader(struct rcu_data *rdp, bool force)
2050} 2050}
2051 2051
2052/* 2052/*
2053 * Does the specified CPU need an RCU callback for the specified flavor
2054 * of rcu_barrier()?
2055 */
2056static bool rcu_nocb_cpu_needs_barrier(struct rcu_state *rsp, int cpu)
2057{
2058 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
2059 struct rcu_head *rhp;
2060
2061 /* No-CBs CPUs might have callbacks on any of three lists. */
2062 rhp = ACCESS_ONCE(rdp->nocb_head);
2063 if (!rhp)
2064 rhp = ACCESS_ONCE(rdp->nocb_gp_head);
2065 if (!rhp)
2066 rhp = ACCESS_ONCE(rdp->nocb_follower_head);
2067
2068 /* Having no rcuo kthread but CBs after scheduler starts is bad! */
2069 if (!ACCESS_ONCE(rdp->nocb_kthread) && rhp) {
2070 /* RCU callback enqueued before CPU first came online??? */
2071 pr_err("RCU: Never-onlined no-CBs CPU %d has CB %p\n",
2072 cpu, rhp->func);
2073 WARN_ON_ONCE(1);
2074 }
2075
2076 return !!rhp;
2077}
2078
2079/*
2053 * Enqueue the specified string of rcu_head structures onto the specified 2080 * Enqueue the specified string of rcu_head structures onto the specified
2054 * CPU's no-CBs lists. The CPU is specified by rdp, the head of the 2081 * CPU's no-CBs lists. The CPU is specified by rdp, the head of the
2055 * string by rhp, and the tail of the string by rhtp. The non-lazy/lazy 2082 * string by rhp, and the tail of the string by rhtp. The non-lazy/lazy
@@ -2642,6 +2669,12 @@ static bool init_nocb_callback_list(struct rcu_data *rdp)
2642 2669
2643#else /* #ifdef CONFIG_RCU_NOCB_CPU */ 2670#else /* #ifdef CONFIG_RCU_NOCB_CPU */
2644 2671
2672static bool rcu_nocb_cpu_needs_barrier(struct rcu_state *rsp, int cpu)
2673{
2674 WARN_ON_ONCE(1); /* Should be dead code. */
2675 return false;
2676}
2677
2645static void rcu_nocb_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp) 2678static void rcu_nocb_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp)
2646{ 2679{
2647} 2680}
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 44999505e1bf..240157c13ddc 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -2951,6 +2951,47 @@ asmlinkage __visible void __sched notrace preempt_schedule(void)
2951} 2951}
2952NOKPROBE_SYMBOL(preempt_schedule); 2952NOKPROBE_SYMBOL(preempt_schedule);
2953EXPORT_SYMBOL(preempt_schedule); 2953EXPORT_SYMBOL(preempt_schedule);
2954
2955#ifdef CONFIG_CONTEXT_TRACKING
2956/**
2957 * preempt_schedule_context - preempt_schedule called by tracing
2958 *
2959 * The tracing infrastructure uses preempt_enable_notrace to prevent
2960 * recursion and tracing preempt enabling caused by the tracing
2961 * infrastructure itself. But as tracing can happen in areas coming
2962 * from userspace or just about to enter userspace, a preempt enable
2963 * can occur before user_exit() is called. This will cause the scheduler
2964 * to be called when the system is still in usermode.
2965 *
2966 * To prevent this, the preempt_enable_notrace will use this function
2967 * instead of preempt_schedule() to exit user context if needed before
2968 * calling the scheduler.
2969 */
2970asmlinkage __visible void __sched notrace preempt_schedule_context(void)
2971{
2972 enum ctx_state prev_ctx;
2973
2974 if (likely(!preemptible()))
2975 return;
2976
2977 do {
2978 __preempt_count_add(PREEMPT_ACTIVE);
2979 /*
2980 * Needs preempt disabled in case user_exit() is traced
2981 * and the tracer calls preempt_enable_notrace() causing
2982 * an infinite recursion.
2983 */
2984 prev_ctx = exception_enter();
2985 __schedule();
2986 exception_exit(prev_ctx);
2987
2988 __preempt_count_sub(PREEMPT_ACTIVE);
2989 barrier();
2990 } while (need_resched());
2991}
2992EXPORT_SYMBOL_GPL(preempt_schedule_context);
2993#endif /* CONFIG_CONTEXT_TRACKING */
2994
2954#endif /* CONFIG_PREEMPT */ 2995#endif /* CONFIG_PREEMPT */
2955 2996
2956/* 2997/*
@@ -7833,6 +7874,11 @@ static void cpu_cgroup_css_offline(struct cgroup_subsys_state *css)
7833 sched_offline_group(tg); 7874 sched_offline_group(tg);
7834} 7875}
7835 7876
7877static void cpu_cgroup_fork(struct task_struct *task)
7878{
7879 sched_move_task(task);
7880}
7881
7836static int cpu_cgroup_can_attach(struct cgroup_subsys_state *css, 7882static int cpu_cgroup_can_attach(struct cgroup_subsys_state *css,
7837 struct cgroup_taskset *tset) 7883 struct cgroup_taskset *tset)
7838{ 7884{
@@ -8205,6 +8251,7 @@ struct cgroup_subsys cpu_cgrp_subsys = {
8205 .css_free = cpu_cgroup_css_free, 8251 .css_free = cpu_cgroup_css_free,
8206 .css_online = cpu_cgroup_css_online, 8252 .css_online = cpu_cgroup_css_online,
8207 .css_offline = cpu_cgroup_css_offline, 8253 .css_offline = cpu_cgroup_css_offline,
8254 .fork = cpu_cgroup_fork,
8208 .can_attach = cpu_cgroup_can_attach, 8255 .can_attach = cpu_cgroup_can_attach,
8209 .attach = cpu_cgroup_attach, 8256 .attach = cpu_cgroup_attach,
8210 .exit = cpu_cgroup_exit, 8257 .exit = cpu_cgroup_exit,
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index 256e577faf1b..5285332392d5 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -518,12 +518,20 @@ again:
518 } 518 }
519 519
520 /* 520 /*
521 * We need to take care of a possible races here. In fact, the 521 * We need to take care of several possible races here:
522 * task might have changed its scheduling policy to something 522 *
523 * different from SCHED_DEADLINE or changed its reservation 523 * - the task might have changed its scheduling policy
524 * parameters (through sched_setattr()). 524 * to something different than SCHED_DEADLINE
525 * - the task might have changed its reservation parameters
526 * (through sched_setattr())
527 * - the task might have been boosted by someone else and
528 * might be in the boosting/deboosting path
529 *
530 * In all this cases we bail out, as the task is already
531 * in the runqueue or is going to be enqueued back anyway.
525 */ 532 */
526 if (!dl_task(p) || dl_se->dl_new) 533 if (!dl_task(p) || dl_se->dl_new ||
534 dl_se->dl_boosted || !dl_se->dl_throttled)
527 goto unlock; 535 goto unlock;
528 536
529 sched_clock_tick(); 537 sched_clock_tick();
@@ -532,7 +540,7 @@ again:
532 dl_se->dl_yielded = 0; 540 dl_se->dl_yielded = 0;
533 if (task_on_rq_queued(p)) { 541 if (task_on_rq_queued(p)) {
534 enqueue_task_dl(rq, p, ENQUEUE_REPLENISH); 542 enqueue_task_dl(rq, p, ENQUEUE_REPLENISH);
535 if (task_has_dl_policy(rq->curr)) 543 if (dl_task(rq->curr))
536 check_preempt_curr_dl(rq, p, 0); 544 check_preempt_curr_dl(rq, p, 0);
537 else 545 else
538 resched_curr(rq); 546 resched_curr(rq);
@@ -847,8 +855,19 @@ static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)
847 * smaller than our one... OTW we keep our runtime and 855 * smaller than our one... OTW we keep our runtime and
848 * deadline. 856 * deadline.
849 */ 857 */
850 if (pi_task && p->dl.dl_boosted && dl_prio(pi_task->normal_prio)) 858 if (pi_task && p->dl.dl_boosted && dl_prio(pi_task->normal_prio)) {
851 pi_se = &pi_task->dl; 859 pi_se = &pi_task->dl;
860 } else if (!dl_prio(p->normal_prio)) {
861 /*
862 * Special case in which we have a !SCHED_DEADLINE task
863 * that is going to be deboosted, but exceedes its
864 * runtime while doing so. No point in replenishing
865 * it, as it's going to return back to its original
866 * scheduling class after this.
867 */
868 BUG_ON(!p->dl.dl_boosted || flags != ENQUEUE_REPLENISH);
869 return;
870 }
852 871
853 /* 872 /*
854 * If p is throttled, we do nothing. In fact, if it exhausted 873 * If p is throttled, we do nothing. In fact, if it exhausted
@@ -1607,8 +1626,12 @@ static void switched_to_dl(struct rq *rq, struct task_struct *p)
1607 /* Only reschedule if pushing failed */ 1626 /* Only reschedule if pushing failed */
1608 check_resched = 0; 1627 check_resched = 0;
1609#endif /* CONFIG_SMP */ 1628#endif /* CONFIG_SMP */
1610 if (check_resched && task_has_dl_policy(rq->curr)) 1629 if (check_resched) {
1611 check_preempt_curr_dl(rq, p, 0); 1630 if (dl_task(rq->curr))
1631 check_preempt_curr_dl(rq, p, 0);
1632 else
1633 resched_curr(rq);
1634 }
1612 } 1635 }
1613} 1636}
1614 1637
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 0b069bf3e708..34baa60f8a7b 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -828,11 +828,12 @@ static unsigned int task_nr_scan_windows(struct task_struct *p)
828 828
829static unsigned int task_scan_min(struct task_struct *p) 829static unsigned int task_scan_min(struct task_struct *p)
830{ 830{
831 unsigned int scan_size = ACCESS_ONCE(sysctl_numa_balancing_scan_size);
831 unsigned int scan, floor; 832 unsigned int scan, floor;
832 unsigned int windows = 1; 833 unsigned int windows = 1;
833 834
834 if (sysctl_numa_balancing_scan_size < MAX_SCAN_WINDOW) 835 if (scan_size < MAX_SCAN_WINDOW)
835 windows = MAX_SCAN_WINDOW / sysctl_numa_balancing_scan_size; 836 windows = MAX_SCAN_WINDOW / scan_size;
836 floor = 1000 / windows; 837 floor = 1000 / windows;
837 838
838 scan = sysctl_numa_balancing_scan_period_min / task_nr_scan_windows(p); 839 scan = sysctl_numa_balancing_scan_period_min / task_nr_scan_windows(p);
@@ -1164,9 +1165,19 @@ static void task_numa_compare(struct task_numa_env *env,
1164 long moveimp = imp; 1165 long moveimp = imp;
1165 1166
1166 rcu_read_lock(); 1167 rcu_read_lock();
1167 cur = ACCESS_ONCE(dst_rq->curr); 1168
1168 if (cur->pid == 0) /* idle */ 1169 raw_spin_lock_irq(&dst_rq->lock);
1170 cur = dst_rq->curr;
1171 /*
1172 * No need to move the exiting task, and this ensures that ->curr
1173 * wasn't reaped and thus get_task_struct() in task_numa_assign()
1174 * is safe under RCU read lock.
1175 * Note that rcu_read_lock() itself can't protect from the final
1176 * put_task_struct() after the last schedule().
1177 */
1178 if ((cur->flags & PF_EXITING) || is_idle_task(cur))
1169 cur = NULL; 1179 cur = NULL;
1180 raw_spin_unlock_irq(&dst_rq->lock);
1170 1181
1171 /* 1182 /*
1172 * "imp" is the fault differential for the source task between the 1183 * "imp" is the fault differential for the source task between the
@@ -1520,7 +1531,7 @@ static void update_task_scan_period(struct task_struct *p,
1520 * scanning faster if shared accesses dominate as it may 1531 * scanning faster if shared accesses dominate as it may
1521 * simply bounce migrations uselessly 1532 * simply bounce migrations uselessly
1522 */ 1533 */
1523 ratio = DIV_ROUND_UP(private * NUMA_PERIOD_SLOTS, (private + shared)); 1534 ratio = DIV_ROUND_UP(private * NUMA_PERIOD_SLOTS, (private + shared + 1));
1524 diff = (diff * ratio) / NUMA_PERIOD_SLOTS; 1535 diff = (diff * ratio) / NUMA_PERIOD_SLOTS;
1525 } 1536 }
1526 1537
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 4aada6d9fe74..15f2511a1b7c 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -387,7 +387,8 @@ static struct ctl_table kern_table[] = {
387 .data = &sysctl_numa_balancing_scan_size, 387 .data = &sysctl_numa_balancing_scan_size,
388 .maxlen = sizeof(unsigned int), 388 .maxlen = sizeof(unsigned int),
389 .mode = 0644, 389 .mode = 0644,
390 .proc_handler = proc_dointvec, 390 .proc_handler = proc_dointvec_minmax,
391 .extra1 = &one,
391 }, 392 },
392 { 393 {
393 .procname = "numa_balancing", 394 .procname = "numa_balancing",
diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c
index 9c94c19f1305..55449909f114 100644
--- a/kernel/time/clockevents.c
+++ b/kernel/time/clockevents.c
@@ -72,7 +72,7 @@ static u64 cev_delta2ns(unsigned long latch, struct clock_event_device *evt,
72 * Also omit the add if it would overflow the u64 boundary. 72 * Also omit the add if it would overflow the u64 boundary.
73 */ 73 */
74 if ((~0ULL - clc > rnd) && 74 if ((~0ULL - clc > rnd) &&
75 (!ismax || evt->mult <= (1U << evt->shift))) 75 (!ismax || evt->mult <= (1ULL << evt->shift)))
76 clc += rnd; 76 clc += rnd;
77 77
78 do_div(clc, evt->mult); 78 do_div(clc, evt->mult);
diff --git a/kernel/time/posix-timers.c b/kernel/time/posix-timers.c
index 42b463ad90f2..31ea01f42e1f 100644
--- a/kernel/time/posix-timers.c
+++ b/kernel/time/posix-timers.c
@@ -636,6 +636,7 @@ SYSCALL_DEFINE3(timer_create, const clockid_t, which_clock,
636 goto out; 636 goto out;
637 } 637 }
638 } else { 638 } else {
639 memset(&event.sigev_value, 0, sizeof(event.sigev_value));
639 event.sigev_notify = SIGEV_SIGNAL; 640 event.sigev_notify = SIGEV_SIGNAL;
640 event.sigev_signo = SIGALRM; 641 event.sigev_signo = SIGALRM;
641 event.sigev_value.sival_int = new_timer->it_id; 642 event.sigev_value.sival_int = new_timer->it_id;
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index fb186b9ddf51..31c90fec4158 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -1925,8 +1925,16 @@ ftrace_find_tramp_ops_curr(struct dyn_ftrace *rec)
1925 * when we are adding another op to the rec or removing the 1925 * when we are adding another op to the rec or removing the
1926 * current one. Thus, if the op is being added, we can 1926 * current one. Thus, if the op is being added, we can
1927 * ignore it because it hasn't attached itself to the rec 1927 * ignore it because it hasn't attached itself to the rec
1928 * yet. That means we just need to find the op that has a 1928 * yet.
1929 * trampoline and is not beeing added. 1929 *
1930 * If an ops is being modified (hooking to different functions)
1931 * then we don't care about the new functions that are being
1932 * added, just the old ones (that are probably being removed).
1933 *
1934 * If we are adding an ops to a function that already is using
1935 * a trampoline, it needs to be removed (trampolines are only
1936 * for single ops connected), then an ops that is not being
1937 * modified also needs to be checked.
1930 */ 1938 */
1931 do_for_each_ftrace_op(op, ftrace_ops_list) { 1939 do_for_each_ftrace_op(op, ftrace_ops_list) {
1932 1940
@@ -1940,17 +1948,23 @@ ftrace_find_tramp_ops_curr(struct dyn_ftrace *rec)
1940 if (op->flags & FTRACE_OPS_FL_ADDING) 1948 if (op->flags & FTRACE_OPS_FL_ADDING)
1941 continue; 1949 continue;
1942 1950
1951
1943 /* 1952 /*
1944 * If the ops is not being added and has a trampoline, 1953 * If the ops is being modified and is in the old
1945 * then it must be the one that we want! 1954 * hash, then it is probably being removed from this
1955 * function.
1946 */ 1956 */
1947 if (hash_contains_ip(ip, op->func_hash))
1948 return op;
1949
1950 /* If the ops is being modified, it may be in the old hash. */
1951 if ((op->flags & FTRACE_OPS_FL_MODIFYING) && 1957 if ((op->flags & FTRACE_OPS_FL_MODIFYING) &&
1952 hash_contains_ip(ip, &op->old_hash)) 1958 hash_contains_ip(ip, &op->old_hash))
1953 return op; 1959 return op;
1960 /*
1961 * If the ops is not being added or modified, and it's
1962 * in its normal filter hash, then this must be the one
1963 * we want!
1964 */
1965 if (!(op->flags & FTRACE_OPS_FL_MODIFYING) &&
1966 hash_contains_ip(ip, op->func_hash))
1967 return op;
1954 1968
1955 } while_for_each_ftrace_op(op); 1969 } while_for_each_ftrace_op(op);
1956 1970
@@ -2293,10 +2307,13 @@ static void ftrace_run_update_code(int command)
2293 FTRACE_WARN_ON(ret); 2307 FTRACE_WARN_ON(ret);
2294} 2308}
2295 2309
2296static void ftrace_run_modify_code(struct ftrace_ops *ops, int command) 2310static void ftrace_run_modify_code(struct ftrace_ops *ops, int command,
2311 struct ftrace_hash *old_hash)
2297{ 2312{
2298 ops->flags |= FTRACE_OPS_FL_MODIFYING; 2313 ops->flags |= FTRACE_OPS_FL_MODIFYING;
2314 ops->old_hash.filter_hash = old_hash;
2299 ftrace_run_update_code(command); 2315 ftrace_run_update_code(command);
2316 ops->old_hash.filter_hash = NULL;
2300 ops->flags &= ~FTRACE_OPS_FL_MODIFYING; 2317 ops->flags &= ~FTRACE_OPS_FL_MODIFYING;
2301} 2318}
2302 2319
@@ -3340,7 +3357,7 @@ static struct ftrace_ops trace_probe_ops __read_mostly =
3340 3357
3341static int ftrace_probe_registered; 3358static int ftrace_probe_registered;
3342 3359
3343static void __enable_ftrace_function_probe(void) 3360static void __enable_ftrace_function_probe(struct ftrace_hash *old_hash)
3344{ 3361{
3345 int ret; 3362 int ret;
3346 int i; 3363 int i;
@@ -3348,7 +3365,8 @@ static void __enable_ftrace_function_probe(void)
3348 if (ftrace_probe_registered) { 3365 if (ftrace_probe_registered) {
3349 /* still need to update the function call sites */ 3366 /* still need to update the function call sites */
3350 if (ftrace_enabled) 3367 if (ftrace_enabled)
3351 ftrace_run_modify_code(&trace_probe_ops, FTRACE_UPDATE_CALLS); 3368 ftrace_run_modify_code(&trace_probe_ops, FTRACE_UPDATE_CALLS,
3369 old_hash);
3352 return; 3370 return;
3353 } 3371 }
3354 3372
@@ -3477,13 +3495,14 @@ register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
3477 } while_for_each_ftrace_rec(); 3495 } while_for_each_ftrace_rec();
3478 3496
3479 ret = ftrace_hash_move(&trace_probe_ops, 1, orig_hash, hash); 3497 ret = ftrace_hash_move(&trace_probe_ops, 1, orig_hash, hash);
3498
3499 __enable_ftrace_function_probe(old_hash);
3500
3480 if (!ret) 3501 if (!ret)
3481 free_ftrace_hash_rcu(old_hash); 3502 free_ftrace_hash_rcu(old_hash);
3482 else 3503 else
3483 count = ret; 3504 count = ret;
3484 3505
3485 __enable_ftrace_function_probe();
3486
3487 out_unlock: 3506 out_unlock:
3488 mutex_unlock(&ftrace_lock); 3507 mutex_unlock(&ftrace_lock);
3489 out: 3508 out:
@@ -3764,10 +3783,11 @@ ftrace_match_addr(struct ftrace_hash *hash, unsigned long ip, int remove)
3764 return add_hash_entry(hash, ip); 3783 return add_hash_entry(hash, ip);
3765} 3784}
3766 3785
3767static void ftrace_ops_update_code(struct ftrace_ops *ops) 3786static void ftrace_ops_update_code(struct ftrace_ops *ops,
3787 struct ftrace_hash *old_hash)
3768{ 3788{
3769 if (ops->flags & FTRACE_OPS_FL_ENABLED && ftrace_enabled) 3789 if (ops->flags & FTRACE_OPS_FL_ENABLED && ftrace_enabled)
3770 ftrace_run_modify_code(ops, FTRACE_UPDATE_CALLS); 3790 ftrace_run_modify_code(ops, FTRACE_UPDATE_CALLS, old_hash);
3771} 3791}
3772 3792
3773static int 3793static int
@@ -3813,7 +3833,7 @@ ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len,
3813 old_hash = *orig_hash; 3833 old_hash = *orig_hash;
3814 ret = ftrace_hash_move(ops, enable, orig_hash, hash); 3834 ret = ftrace_hash_move(ops, enable, orig_hash, hash);
3815 if (!ret) { 3835 if (!ret) {
3816 ftrace_ops_update_code(ops); 3836 ftrace_ops_update_code(ops, old_hash);
3817 free_ftrace_hash_rcu(old_hash); 3837 free_ftrace_hash_rcu(old_hash);
3818 } 3838 }
3819 mutex_unlock(&ftrace_lock); 3839 mutex_unlock(&ftrace_lock);
@@ -4058,7 +4078,7 @@ int ftrace_regex_release(struct inode *inode, struct file *file)
4058 ret = ftrace_hash_move(iter->ops, filter_hash, 4078 ret = ftrace_hash_move(iter->ops, filter_hash,
4059 orig_hash, iter->hash); 4079 orig_hash, iter->hash);
4060 if (!ret) { 4080 if (!ret) {
4061 ftrace_ops_update_code(iter->ops); 4081 ftrace_ops_update_code(iter->ops, old_hash);
4062 free_ftrace_hash_rcu(old_hash); 4082 free_ftrace_hash_rcu(old_hash);
4063 } 4083 }
4064 mutex_unlock(&ftrace_lock); 4084 mutex_unlock(&ftrace_lock);
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c
index 4dc8b79c5f75..29228c4d5696 100644
--- a/kernel/trace/trace_syscalls.c
+++ b/kernel/trace/trace_syscalls.c
@@ -313,7 +313,7 @@ static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id)
313 int size; 313 int size;
314 314
315 syscall_nr = trace_get_syscall_nr(current, regs); 315 syscall_nr = trace_get_syscall_nr(current, regs);
316 if (syscall_nr < 0) 316 if (syscall_nr < 0 || syscall_nr >= NR_syscalls)
317 return; 317 return;
318 318
319 /* Here we're inside tp handler's rcu_read_lock_sched (__DO_TRACE) */ 319 /* Here we're inside tp handler's rcu_read_lock_sched (__DO_TRACE) */
@@ -360,7 +360,7 @@ static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret)
360 int syscall_nr; 360 int syscall_nr;
361 361
362 syscall_nr = trace_get_syscall_nr(current, regs); 362 syscall_nr = trace_get_syscall_nr(current, regs);
363 if (syscall_nr < 0) 363 if (syscall_nr < 0 || syscall_nr >= NR_syscalls)
364 return; 364 return;
365 365
366 /* Here we're inside tp handler's rcu_read_lock_sched (__DO_TRACE()) */ 366 /* Here we're inside tp handler's rcu_read_lock_sched (__DO_TRACE()) */
@@ -567,7 +567,7 @@ static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id)
567 int size; 567 int size;
568 568
569 syscall_nr = trace_get_syscall_nr(current, regs); 569 syscall_nr = trace_get_syscall_nr(current, regs);
570 if (syscall_nr < 0) 570 if (syscall_nr < 0 || syscall_nr >= NR_syscalls)
571 return; 571 return;
572 if (!test_bit(syscall_nr, enabled_perf_enter_syscalls)) 572 if (!test_bit(syscall_nr, enabled_perf_enter_syscalls))
573 return; 573 return;
@@ -641,7 +641,7 @@ static void perf_syscall_exit(void *ignore, struct pt_regs *regs, long ret)
641 int size; 641 int size;
642 642
643 syscall_nr = trace_get_syscall_nr(current, regs); 643 syscall_nr = trace_get_syscall_nr(current, regs);
644 if (syscall_nr < 0) 644 if (syscall_nr < 0 || syscall_nr >= NR_syscalls)
645 return; 645 return;
646 if (!test_bit(syscall_nr, enabled_perf_exit_syscalls)) 646 if (!test_bit(syscall_nr, enabled_perf_exit_syscalls))
647 return; 647 return;