aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/async.c27
-rw-r--r--kernel/events/core.c20
-rw-r--r--kernel/pid.c2
-rw-r--r--kernel/printk.c9
-rw-r--r--kernel/ptrace.c72
-rw-r--r--kernel/rcutree_plugin.h13
-rw-r--r--kernel/sched/core.c3
-rw-r--r--kernel/sched/debug.c4
-rw-r--r--kernel/sched/fair.c2
-rw-r--r--kernel/sched/rt.c2
-rw-r--r--kernel/signal.c19
-rw-r--r--kernel/smp.c13
-rw-r--r--kernel/trace/ftrace.c2
13 files changed, 135 insertions, 53 deletions
diff --git a/kernel/async.c b/kernel/async.c
index a1d585c351d6..6f34904a0b53 100644
--- a/kernel/async.c
+++ b/kernel/async.c
@@ -86,18 +86,27 @@ static atomic_t entry_count;
86 */ 86 */
87static async_cookie_t __lowest_in_progress(struct async_domain *running) 87static async_cookie_t __lowest_in_progress(struct async_domain *running)
88{ 88{
89 async_cookie_t first_running = next_cookie; /* infinity value */
90 async_cookie_t first_pending = next_cookie; /* ditto */
89 struct async_entry *entry; 91 struct async_entry *entry;
90 92
93 /*
94 * Both running and pending lists are sorted but not disjoint.
95 * Take the first cookies from both and return the min.
96 */
91 if (!list_empty(&running->domain)) { 97 if (!list_empty(&running->domain)) {
92 entry = list_first_entry(&running->domain, typeof(*entry), list); 98 entry = list_first_entry(&running->domain, typeof(*entry), list);
93 return entry->cookie; 99 first_running = entry->cookie;
94 } 100 }
95 101
96 list_for_each_entry(entry, &async_pending, list) 102 list_for_each_entry(entry, &async_pending, list) {
97 if (entry->running == running) 103 if (entry->running == running) {
98 return entry->cookie; 104 first_pending = entry->cookie;
105 break;
106 }
107 }
99 108
100 return next_cookie; /* "infinity" value */ 109 return min(first_running, first_pending);
101} 110}
102 111
103static async_cookie_t lowest_in_progress(struct async_domain *running) 112static async_cookie_t lowest_in_progress(struct async_domain *running)
@@ -118,13 +127,17 @@ static void async_run_entry_fn(struct work_struct *work)
118{ 127{
119 struct async_entry *entry = 128 struct async_entry *entry =
120 container_of(work, struct async_entry, work); 129 container_of(work, struct async_entry, work);
130 struct async_entry *pos;
121 unsigned long flags; 131 unsigned long flags;
122 ktime_t uninitialized_var(calltime), delta, rettime; 132 ktime_t uninitialized_var(calltime), delta, rettime;
123 struct async_domain *running = entry->running; 133 struct async_domain *running = entry->running;
124 134
125 /* 1) move self to the running queue */ 135 /* 1) move self to the running queue, make sure it stays sorted */
126 spin_lock_irqsave(&async_lock, flags); 136 spin_lock_irqsave(&async_lock, flags);
127 list_move_tail(&entry->list, &running->domain); 137 list_for_each_entry_reverse(pos, &running->domain, list)
138 if (entry->cookie < pos->cookie)
139 break;
140 list_move_tail(&entry->list, &pos->list);
128 spin_unlock_irqrestore(&async_lock, flags); 141 spin_unlock_irqrestore(&async_lock, flags);
129 142
130 /* 2) run (and print duration) */ 143 /* 2) run (and print duration) */
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 301079d06f24..7b6646a8c067 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -908,6 +908,15 @@ list_add_event(struct perf_event *event, struct perf_event_context *ctx)
908} 908}
909 909
910/* 910/*
911 * Initialize event state based on the perf_event_attr::disabled.
912 */
913static inline void perf_event__state_init(struct perf_event *event)
914{
915 event->state = event->attr.disabled ? PERF_EVENT_STATE_OFF :
916 PERF_EVENT_STATE_INACTIVE;
917}
918
919/*
911 * Called at perf_event creation and when events are attached/detached from a 920 * Called at perf_event creation and when events are attached/detached from a
912 * group. 921 * group.
913 */ 922 */
@@ -6179,8 +6188,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
6179 event->overflow_handler = overflow_handler; 6188 event->overflow_handler = overflow_handler;
6180 event->overflow_handler_context = context; 6189 event->overflow_handler_context = context;
6181 6190
6182 if (attr->disabled) 6191 perf_event__state_init(event);
6183 event->state = PERF_EVENT_STATE_OFF;
6184 6192
6185 pmu = NULL; 6193 pmu = NULL;
6186 6194
@@ -6609,9 +6617,17 @@ SYSCALL_DEFINE5(perf_event_open,
6609 6617
6610 mutex_lock(&gctx->mutex); 6618 mutex_lock(&gctx->mutex);
6611 perf_remove_from_context(group_leader); 6619 perf_remove_from_context(group_leader);
6620
6621 /*
6622 * Removing from the context ends up with disabled
6623 * event. What we want here is event in the initial
6624 * startup state, ready to be add into new context.
6625 */
6626 perf_event__state_init(group_leader);
6612 list_for_each_entry(sibling, &group_leader->sibling_list, 6627 list_for_each_entry(sibling, &group_leader->sibling_list,
6613 group_entry) { 6628 group_entry) {
6614 perf_remove_from_context(sibling); 6629 perf_remove_from_context(sibling);
6630 perf_event__state_init(sibling);
6615 put_ctx(gctx); 6631 put_ctx(gctx);
6616 } 6632 }
6617 mutex_unlock(&gctx->mutex); 6633 mutex_unlock(&gctx->mutex);
diff --git a/kernel/pid.c b/kernel/pid.c
index de9af600006f..f2c6a6825098 100644
--- a/kernel/pid.c
+++ b/kernel/pid.c
@@ -331,7 +331,7 @@ out:
331 return pid; 331 return pid;
332 332
333out_unlock: 333out_unlock:
334 spin_unlock(&pidmap_lock); 334 spin_unlock_irq(&pidmap_lock);
335out_free: 335out_free:
336 while (++i <= ns->level) 336 while (++i <= ns->level)
337 free_pidmap(pid->numbers + i); 337 free_pidmap(pid->numbers + i);
diff --git a/kernel/printk.c b/kernel/printk.c
index 357f714ddd49..267ce780abe8 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -87,12 +87,6 @@ static DEFINE_SEMAPHORE(console_sem);
87struct console *console_drivers; 87struct console *console_drivers;
88EXPORT_SYMBOL_GPL(console_drivers); 88EXPORT_SYMBOL_GPL(console_drivers);
89 89
90#ifdef CONFIG_LOCKDEP
91static struct lockdep_map console_lock_dep_map = {
92 .name = "console_lock"
93};
94#endif
95
96/* 90/*
97 * This is used for debugging the mess that is the VT code by 91 * This is used for debugging the mess that is the VT code by
98 * keeping track if we have the console semaphore held. It's 92 * keeping track if we have the console semaphore held. It's
@@ -1924,7 +1918,6 @@ void console_lock(void)
1924 return; 1918 return;
1925 console_locked = 1; 1919 console_locked = 1;
1926 console_may_schedule = 1; 1920 console_may_schedule = 1;
1927 mutex_acquire(&console_lock_dep_map, 0, 0, _RET_IP_);
1928} 1921}
1929EXPORT_SYMBOL(console_lock); 1922EXPORT_SYMBOL(console_lock);
1930 1923
@@ -1946,7 +1939,6 @@ int console_trylock(void)
1946 } 1939 }
1947 console_locked = 1; 1940 console_locked = 1;
1948 console_may_schedule = 0; 1941 console_may_schedule = 0;
1949 mutex_acquire(&console_lock_dep_map, 0, 1, _RET_IP_);
1950 return 1; 1942 return 1;
1951} 1943}
1952EXPORT_SYMBOL(console_trylock); 1944EXPORT_SYMBOL(console_trylock);
@@ -2107,7 +2099,6 @@ skip:
2107 local_irq_restore(flags); 2099 local_irq_restore(flags);
2108 } 2100 }
2109 console_locked = 0; 2101 console_locked = 0;
2110 mutex_release(&console_lock_dep_map, 1, _RET_IP_);
2111 2102
2112 /* Release the exclusive_console once it is used */ 2103 /* Release the exclusive_console once it is used */
2113 if (unlikely(exclusive_console)) 2104 if (unlikely(exclusive_console))
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index 612a56126851..6cbeaae4406d 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -117,11 +117,45 @@ void __ptrace_unlink(struct task_struct *child)
117 * TASK_KILLABLE sleeps. 117 * TASK_KILLABLE sleeps.
118 */ 118 */
119 if (child->jobctl & JOBCTL_STOP_PENDING || task_is_traced(child)) 119 if (child->jobctl & JOBCTL_STOP_PENDING || task_is_traced(child))
120 signal_wake_up(child, task_is_traced(child)); 120 ptrace_signal_wake_up(child, true);
121 121
122 spin_unlock(&child->sighand->siglock); 122 spin_unlock(&child->sighand->siglock);
123} 123}
124 124
125/* Ensure that nothing can wake it up, even SIGKILL */
126static bool ptrace_freeze_traced(struct task_struct *task)
127{
128 bool ret = false;
129
130 /* Lockless, nobody but us can set this flag */
131 if (task->jobctl & JOBCTL_LISTENING)
132 return ret;
133
134 spin_lock_irq(&task->sighand->siglock);
135 if (task_is_traced(task) && !__fatal_signal_pending(task)) {
136 task->state = __TASK_TRACED;
137 ret = true;
138 }
139 spin_unlock_irq(&task->sighand->siglock);
140
141 return ret;
142}
143
144static void ptrace_unfreeze_traced(struct task_struct *task)
145{
146 if (task->state != __TASK_TRACED)
147 return;
148
149 WARN_ON(!task->ptrace || task->parent != current);
150
151 spin_lock_irq(&task->sighand->siglock);
152 if (__fatal_signal_pending(task))
153 wake_up_state(task, __TASK_TRACED);
154 else
155 task->state = TASK_TRACED;
156 spin_unlock_irq(&task->sighand->siglock);
157}
158
125/** 159/**
126 * ptrace_check_attach - check whether ptracee is ready for ptrace operation 160 * ptrace_check_attach - check whether ptracee is ready for ptrace operation
127 * @child: ptracee to check for 161 * @child: ptracee to check for
@@ -151,24 +185,29 @@ static int ptrace_check_attach(struct task_struct *child, bool ignore_state)
151 * be changed by us so it's not changing right after this. 185 * be changed by us so it's not changing right after this.
152 */ 186 */
153 read_lock(&tasklist_lock); 187 read_lock(&tasklist_lock);
154 if ((child->ptrace & PT_PTRACED) && child->parent == current) { 188 if (child->ptrace && child->parent == current) {
189 WARN_ON(child->state == __TASK_TRACED);
155 /* 190 /*
156 * child->sighand can't be NULL, release_task() 191 * child->sighand can't be NULL, release_task()
157 * does ptrace_unlink() before __exit_signal(). 192 * does ptrace_unlink() before __exit_signal().
158 */ 193 */
159 spin_lock_irq(&child->sighand->siglock); 194 if (ignore_state || ptrace_freeze_traced(child))
160 WARN_ON_ONCE(task_is_stopped(child));
161 if (ignore_state || (task_is_traced(child) &&
162 !(child->jobctl & JOBCTL_LISTENING)))
163 ret = 0; 195 ret = 0;
164 spin_unlock_irq(&child->sighand->siglock);
165 } 196 }
166 read_unlock(&tasklist_lock); 197 read_unlock(&tasklist_lock);
167 198
168 if (!ret && !ignore_state) 199 if (!ret && !ignore_state) {
169 ret = wait_task_inactive(child, TASK_TRACED) ? 0 : -ESRCH; 200 if (!wait_task_inactive(child, __TASK_TRACED)) {
201 /*
202 * This can only happen if may_ptrace_stop() fails and
203 * ptrace_stop() changes ->state back to TASK_RUNNING,
204 * so we should not worry about leaking __TASK_TRACED.
205 */
206 WARN_ON(child->state == __TASK_TRACED);
207 ret = -ESRCH;
208 }
209 }
170 210
171 /* All systems go.. */
172 return ret; 211 return ret;
173} 212}
174 213
@@ -317,7 +356,7 @@ static int ptrace_attach(struct task_struct *task, long request,
317 */ 356 */
318 if (task_is_stopped(task) && 357 if (task_is_stopped(task) &&
319 task_set_jobctl_pending(task, JOBCTL_TRAP_STOP | JOBCTL_TRAPPING)) 358 task_set_jobctl_pending(task, JOBCTL_TRAP_STOP | JOBCTL_TRAPPING))
320 signal_wake_up(task, 1); 359 signal_wake_up_state(task, __TASK_STOPPED);
321 360
322 spin_unlock(&task->sighand->siglock); 361 spin_unlock(&task->sighand->siglock);
323 362
@@ -737,7 +776,7 @@ int ptrace_request(struct task_struct *child, long request,
737 * tracee into STOP. 776 * tracee into STOP.
738 */ 777 */
739 if (likely(task_set_jobctl_pending(child, JOBCTL_TRAP_STOP))) 778 if (likely(task_set_jobctl_pending(child, JOBCTL_TRAP_STOP)))
740 signal_wake_up(child, child->jobctl & JOBCTL_LISTENING); 779 ptrace_signal_wake_up(child, child->jobctl & JOBCTL_LISTENING);
741 780
742 unlock_task_sighand(child, &flags); 781 unlock_task_sighand(child, &flags);
743 ret = 0; 782 ret = 0;
@@ -763,7 +802,7 @@ int ptrace_request(struct task_struct *child, long request,
763 * start of this trap and now. Trigger re-trap. 802 * start of this trap and now. Trigger re-trap.
764 */ 803 */
765 if (child->jobctl & JOBCTL_TRAP_NOTIFY) 804 if (child->jobctl & JOBCTL_TRAP_NOTIFY)
766 signal_wake_up(child, true); 805 ptrace_signal_wake_up(child, true);
767 ret = 0; 806 ret = 0;
768 } 807 }
769 unlock_task_sighand(child, &flags); 808 unlock_task_sighand(child, &flags);
@@ -900,6 +939,8 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
900 goto out_put_task_struct; 939 goto out_put_task_struct;
901 940
902 ret = arch_ptrace(child, request, addr, data); 941 ret = arch_ptrace(child, request, addr, data);
942 if (ret || request != PTRACE_DETACH)
943 ptrace_unfreeze_traced(child);
903 944
904 out_put_task_struct: 945 out_put_task_struct:
905 put_task_struct(child); 946 put_task_struct(child);
@@ -1039,8 +1080,11 @@ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
1039 1080
1040 ret = ptrace_check_attach(child, request == PTRACE_KILL || 1081 ret = ptrace_check_attach(child, request == PTRACE_KILL ||
1041 request == PTRACE_INTERRUPT); 1082 request == PTRACE_INTERRUPT);
1042 if (!ret) 1083 if (!ret) {
1043 ret = compat_arch_ptrace(child, request, addr, data); 1084 ret = compat_arch_ptrace(child, request, addr, data);
1085 if (ret || request != PTRACE_DETACH)
1086 ptrace_unfreeze_traced(child);
1087 }
1044 1088
1045 out_put_task_struct: 1089 out_put_task_struct:
1046 put_task_struct(child); 1090 put_task_struct(child);
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
index f6e5ec2932b4..c1cc7e17ff9d 100644
--- a/kernel/rcutree_plugin.h
+++ b/kernel/rcutree_plugin.h
@@ -40,8 +40,7 @@
40#ifdef CONFIG_RCU_NOCB_CPU 40#ifdef CONFIG_RCU_NOCB_CPU
41static cpumask_var_t rcu_nocb_mask; /* CPUs to have callbacks offloaded. */ 41static cpumask_var_t rcu_nocb_mask; /* CPUs to have callbacks offloaded. */
42static bool have_rcu_nocb_mask; /* Was rcu_nocb_mask allocated? */ 42static bool have_rcu_nocb_mask; /* Was rcu_nocb_mask allocated? */
43static bool rcu_nocb_poll; /* Offload kthread are to poll. */ 43static bool __read_mostly rcu_nocb_poll; /* Offload kthread are to poll. */
44module_param(rcu_nocb_poll, bool, 0444);
45static char __initdata nocb_buf[NR_CPUS * 5]; 44static char __initdata nocb_buf[NR_CPUS * 5];
46#endif /* #ifdef CONFIG_RCU_NOCB_CPU */ 45#endif /* #ifdef CONFIG_RCU_NOCB_CPU */
47 46
@@ -2159,6 +2158,13 @@ static int __init rcu_nocb_setup(char *str)
2159} 2158}
2160__setup("rcu_nocbs=", rcu_nocb_setup); 2159__setup("rcu_nocbs=", rcu_nocb_setup);
2161 2160
2161static int __init parse_rcu_nocb_poll(char *arg)
2162{
2163 rcu_nocb_poll = 1;
2164 return 0;
2165}
2166early_param("rcu_nocb_poll", parse_rcu_nocb_poll);
2167
2162/* Is the specified CPU a no-CPUs CPU? */ 2168/* Is the specified CPU a no-CPUs CPU? */
2163static bool is_nocb_cpu(int cpu) 2169static bool is_nocb_cpu(int cpu)
2164{ 2170{
@@ -2366,10 +2372,11 @@ static int rcu_nocb_kthread(void *arg)
2366 for (;;) { 2372 for (;;) {
2367 /* If not polling, wait for next batch of callbacks. */ 2373 /* If not polling, wait for next batch of callbacks. */
2368 if (!rcu_nocb_poll) 2374 if (!rcu_nocb_poll)
2369 wait_event(rdp->nocb_wq, rdp->nocb_head); 2375 wait_event_interruptible(rdp->nocb_wq, rdp->nocb_head);
2370 list = ACCESS_ONCE(rdp->nocb_head); 2376 list = ACCESS_ONCE(rdp->nocb_head);
2371 if (!list) { 2377 if (!list) {
2372 schedule_timeout_interruptible(1); 2378 schedule_timeout_interruptible(1);
2379 flush_signals(current);
2373 continue; 2380 continue;
2374 } 2381 }
2375 2382
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 257002c13bb0..26058d0bebba 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1523,7 +1523,8 @@ out:
1523 */ 1523 */
1524int wake_up_process(struct task_struct *p) 1524int wake_up_process(struct task_struct *p)
1525{ 1525{
1526 return try_to_wake_up(p, TASK_ALL, 0); 1526 WARN_ON(task_is_stopped_or_traced(p));
1527 return try_to_wake_up(p, TASK_NORMAL, 0);
1527} 1528}
1528EXPORT_SYMBOL(wake_up_process); 1529EXPORT_SYMBOL(wake_up_process);
1529 1530
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
index 2cd3c1b4e582..7ae4c4c5420e 100644
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -222,8 +222,8 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
222 cfs_rq->runnable_load_avg); 222 cfs_rq->runnable_load_avg);
223 SEQ_printf(m, " .%-30s: %lld\n", "blocked_load_avg", 223 SEQ_printf(m, " .%-30s: %lld\n", "blocked_load_avg",
224 cfs_rq->blocked_load_avg); 224 cfs_rq->blocked_load_avg);
225 SEQ_printf(m, " .%-30s: %ld\n", "tg_load_avg", 225 SEQ_printf(m, " .%-30s: %lld\n", "tg_load_avg",
226 atomic64_read(&cfs_rq->tg->load_avg)); 226 (unsigned long long)atomic64_read(&cfs_rq->tg->load_avg));
227 SEQ_printf(m, " .%-30s: %lld\n", "tg_load_contrib", 227 SEQ_printf(m, " .%-30s: %lld\n", "tg_load_contrib",
228 cfs_rq->tg_load_contrib); 228 cfs_rq->tg_load_contrib);
229 SEQ_printf(m, " .%-30s: %d\n", "tg_runnable_contrib", 229 SEQ_printf(m, " .%-30s: %d\n", "tg_runnable_contrib",
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 5eea8707234a..81fa53643409 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -2663,7 +2663,7 @@ static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
2663 hrtimer_cancel(&cfs_b->slack_timer); 2663 hrtimer_cancel(&cfs_b->slack_timer);
2664} 2664}
2665 2665
2666static void unthrottle_offline_cfs_rqs(struct rq *rq) 2666static void __maybe_unused unthrottle_offline_cfs_rqs(struct rq *rq)
2667{ 2667{
2668 struct cfs_rq *cfs_rq; 2668 struct cfs_rq *cfs_rq;
2669 2669
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 418feb01344e..4f02b2847357 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -566,7 +566,7 @@ static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
566static int do_balance_runtime(struct rt_rq *rt_rq) 566static int do_balance_runtime(struct rt_rq *rt_rq)
567{ 567{
568 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq); 568 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
569 struct root_domain *rd = cpu_rq(smp_processor_id())->rd; 569 struct root_domain *rd = rq_of_rt_rq(rt_rq)->rd;
570 int i, weight, more = 0; 570 int i, weight, more = 0;
571 u64 rt_period; 571 u64 rt_period;
572 572
diff --git a/kernel/signal.c b/kernel/signal.c
index 53cd5c4d1172..3d09cf6cde75 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -680,23 +680,17 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
680 * No need to set need_resched since signal event passing 680 * No need to set need_resched since signal event passing
681 * goes through ->blocked 681 * goes through ->blocked
682 */ 682 */
683void signal_wake_up(struct task_struct *t, int resume) 683void signal_wake_up_state(struct task_struct *t, unsigned int state)
684{ 684{
685 unsigned int mask;
686
687 set_tsk_thread_flag(t, TIF_SIGPENDING); 685 set_tsk_thread_flag(t, TIF_SIGPENDING);
688
689 /* 686 /*
690 * For SIGKILL, we want to wake it up in the stopped/traced/killable 687 * TASK_WAKEKILL also means wake it up in the stopped/traced/killable
691 * case. We don't check t->state here because there is a race with it 688 * case. We don't check t->state here because there is a race with it
692 * executing another processor and just now entering stopped state. 689 * executing another processor and just now entering stopped state.
693 * By using wake_up_state, we ensure the process will wake up and 690 * By using wake_up_state, we ensure the process will wake up and
694 * handle its death signal. 691 * handle its death signal.
695 */ 692 */
696 mask = TASK_INTERRUPTIBLE; 693 if (!wake_up_state(t, state | TASK_INTERRUPTIBLE))
697 if (resume)
698 mask |= TASK_WAKEKILL;
699 if (!wake_up_state(t, mask))
700 kick_process(t); 694 kick_process(t);
701} 695}
702 696
@@ -844,7 +838,7 @@ static void ptrace_trap_notify(struct task_struct *t)
844 assert_spin_locked(&t->sighand->siglock); 838 assert_spin_locked(&t->sighand->siglock);
845 839
846 task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY); 840 task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY);
847 signal_wake_up(t, t->jobctl & JOBCTL_LISTENING); 841 ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING);
848} 842}
849 843
850/* 844/*
@@ -1800,6 +1794,10 @@ static inline int may_ptrace_stop(void)
1800 * If SIGKILL was already sent before the caller unlocked 1794 * If SIGKILL was already sent before the caller unlocked
1801 * ->siglock we must see ->core_state != NULL. Otherwise it 1795 * ->siglock we must see ->core_state != NULL. Otherwise it
1802 * is safe to enter schedule(). 1796 * is safe to enter schedule().
1797 *
1798 * This is almost outdated, a task with the pending SIGKILL can't
1799 * block in TASK_TRACED. But PTRACE_EVENT_EXIT can be reported
1800 * after SIGKILL was already dequeued.
1803 */ 1801 */
1804 if (unlikely(current->mm->core_state) && 1802 if (unlikely(current->mm->core_state) &&
1805 unlikely(current->mm == current->parent->mm)) 1803 unlikely(current->mm == current->parent->mm))
@@ -1925,6 +1923,7 @@ static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info)
1925 if (gstop_done) 1923 if (gstop_done)
1926 do_notify_parent_cldstop(current, false, why); 1924 do_notify_parent_cldstop(current, false, why);
1927 1925
1926 /* tasklist protects us from ptrace_freeze_traced() */
1928 __set_current_state(TASK_RUNNING); 1927 __set_current_state(TASK_RUNNING);
1929 if (clear_code) 1928 if (clear_code)
1930 current->exit_code = 0; 1929 current->exit_code = 0;
diff --git a/kernel/smp.c b/kernel/smp.c
index 29dd40a9f2f4..69f38bd98b42 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -33,6 +33,7 @@ struct call_function_data {
33 struct call_single_data csd; 33 struct call_single_data csd;
34 atomic_t refs; 34 atomic_t refs;
35 cpumask_var_t cpumask; 35 cpumask_var_t cpumask;
36 cpumask_var_t cpumask_ipi;
36}; 37};
37 38
38static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_function_data, cfd_data); 39static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_function_data, cfd_data);
@@ -56,6 +57,9 @@ hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu)
56 if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL, 57 if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL,
57 cpu_to_node(cpu))) 58 cpu_to_node(cpu)))
58 return notifier_from_errno(-ENOMEM); 59 return notifier_from_errno(-ENOMEM);
60 if (!zalloc_cpumask_var_node(&cfd->cpumask_ipi, GFP_KERNEL,
61 cpu_to_node(cpu)))
62 return notifier_from_errno(-ENOMEM);
59 break; 63 break;
60 64
61#ifdef CONFIG_HOTPLUG_CPU 65#ifdef CONFIG_HOTPLUG_CPU
@@ -65,6 +69,7 @@ hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu)
65 case CPU_DEAD: 69 case CPU_DEAD:
66 case CPU_DEAD_FROZEN: 70 case CPU_DEAD_FROZEN:
67 free_cpumask_var(cfd->cpumask); 71 free_cpumask_var(cfd->cpumask);
72 free_cpumask_var(cfd->cpumask_ipi);
68 break; 73 break;
69#endif 74#endif
70 }; 75 };
@@ -526,6 +531,12 @@ void smp_call_function_many(const struct cpumask *mask,
526 return; 531 return;
527 } 532 }
528 533
534 /*
535 * After we put an entry into the list, data->cpumask
536 * may be cleared again when another CPU sends another IPI for
537 * a SMP function call, so data->cpumask will be zero.
538 */
539 cpumask_copy(data->cpumask_ipi, data->cpumask);
529 raw_spin_lock_irqsave(&call_function.lock, flags); 540 raw_spin_lock_irqsave(&call_function.lock, flags);
530 /* 541 /*
531 * Place entry at the _HEAD_ of the list, so that any cpu still 542 * Place entry at the _HEAD_ of the list, so that any cpu still
@@ -549,7 +560,7 @@ void smp_call_function_many(const struct cpumask *mask,
549 smp_mb(); 560 smp_mb();
550 561
551 /* Send a message to all CPUs in the map */ 562 /* Send a message to all CPUs in the map */
552 arch_send_call_function_ipi_mask(data->cpumask); 563 arch_send_call_function_ipi_mask(data->cpumask_ipi);
553 564
554 /* Optionally wait for the CPUs to complete */ 565 /* Optionally wait for the CPUs to complete */
555 if (wait) 566 if (wait)
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 3ffe4c5ad3f3..41473b4ad7a4 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -3998,7 +3998,7 @@ static int ftrace_module_notify(struct notifier_block *self,
3998 3998
3999struct notifier_block ftrace_module_nb = { 3999struct notifier_block ftrace_module_nb = {
4000 .notifier_call = ftrace_module_notify, 4000 .notifier_call = ftrace_module_notify,
4001 .priority = 0, 4001 .priority = INT_MAX, /* Run before anything that can use kprobes */
4002}; 4002};
4003 4003
4004extern unsigned long __start_mcount_loc[]; 4004extern unsigned long __start_mcount_loc[];