diff options
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/cpu.c | 14 | ||||
-rw-r--r-- | kernel/freezer.c | 9 | ||||
-rw-r--r-- | kernel/gcov/Kconfig | 2 | ||||
-rw-r--r-- | kernel/kmod.c | 76 | ||||
-rw-r--r-- | kernel/power/process.c | 57 | ||||
-rw-r--r-- | kernel/power/qos.c | 27 | ||||
-rw-r--r-- | kernel/rcu/tree.c | 15 | ||||
-rw-r--r-- | kernel/rcu/tree.h | 1 | ||||
-rw-r--r-- | kernel/rcu/tree_plugin.h | 33 | ||||
-rw-r--r-- | kernel/time/clockevents.c | 2 | ||||
-rw-r--r-- | kernel/time/posix-timers.c | 1 | ||||
-rw-r--r-- | kernel/trace/ftrace.c | 54 | ||||
-rw-r--r-- | kernel/trace/trace_syscalls.c | 8 |
13 files changed, 215 insertions, 84 deletions
diff --git a/kernel/cpu.c b/kernel/cpu.c index 356450f09c1f..90a3d017b90c 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c | |||
@@ -64,6 +64,8 @@ static struct { | |||
64 | * an ongoing cpu hotplug operation. | 64 | * an ongoing cpu hotplug operation. |
65 | */ | 65 | */ |
66 | int refcount; | 66 | int refcount; |
67 | /* And allows lockless put_online_cpus(). */ | ||
68 | atomic_t puts_pending; | ||
67 | 69 | ||
68 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 70 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
69 | struct lockdep_map dep_map; | 71 | struct lockdep_map dep_map; |
@@ -113,7 +115,11 @@ void put_online_cpus(void) | |||
113 | { | 115 | { |
114 | if (cpu_hotplug.active_writer == current) | 116 | if (cpu_hotplug.active_writer == current) |
115 | return; | 117 | return; |
116 | mutex_lock(&cpu_hotplug.lock); | 118 | if (!mutex_trylock(&cpu_hotplug.lock)) { |
119 | atomic_inc(&cpu_hotplug.puts_pending); | ||
120 | cpuhp_lock_release(); | ||
121 | return; | ||
122 | } | ||
117 | 123 | ||
118 | if (WARN_ON(!cpu_hotplug.refcount)) | 124 | if (WARN_ON(!cpu_hotplug.refcount)) |
119 | cpu_hotplug.refcount++; /* try to fix things up */ | 125 | cpu_hotplug.refcount++; /* try to fix things up */ |
@@ -155,6 +161,12 @@ void cpu_hotplug_begin(void) | |||
155 | cpuhp_lock_acquire(); | 161 | cpuhp_lock_acquire(); |
156 | for (;;) { | 162 | for (;;) { |
157 | mutex_lock(&cpu_hotplug.lock); | 163 | mutex_lock(&cpu_hotplug.lock); |
164 | if (atomic_read(&cpu_hotplug.puts_pending)) { | ||
165 | int delta; | ||
166 | |||
167 | delta = atomic_xchg(&cpu_hotplug.puts_pending, 0); | ||
168 | cpu_hotplug.refcount -= delta; | ||
169 | } | ||
158 | if (likely(!cpu_hotplug.refcount)) | 170 | if (likely(!cpu_hotplug.refcount)) |
159 | break; | 171 | break; |
160 | __set_current_state(TASK_UNINTERRUPTIBLE); | 172 | __set_current_state(TASK_UNINTERRUPTIBLE); |
diff --git a/kernel/freezer.c b/kernel/freezer.c index aa6a8aadb911..a8900a3bc27a 100644 --- a/kernel/freezer.c +++ b/kernel/freezer.c | |||
@@ -42,6 +42,9 @@ bool freezing_slow_path(struct task_struct *p) | |||
42 | if (p->flags & (PF_NOFREEZE | PF_SUSPEND_TASK)) | 42 | if (p->flags & (PF_NOFREEZE | PF_SUSPEND_TASK)) |
43 | return false; | 43 | return false; |
44 | 44 | ||
45 | if (test_thread_flag(TIF_MEMDIE)) | ||
46 | return false; | ||
47 | |||
45 | if (pm_nosig_freezing || cgroup_freezing(p)) | 48 | if (pm_nosig_freezing || cgroup_freezing(p)) |
46 | return true; | 49 | return true; |
47 | 50 | ||
@@ -147,12 +150,6 @@ void __thaw_task(struct task_struct *p) | |||
147 | { | 150 | { |
148 | unsigned long flags; | 151 | unsigned long flags; |
149 | 152 | ||
150 | /* | ||
151 | * Clear freezing and kick @p if FROZEN. Clearing is guaranteed to | ||
152 | * be visible to @p as waking up implies wmb. Waking up inside | ||
153 | * freezer_lock also prevents wakeups from leaking outside | ||
154 | * refrigerator. | ||
155 | */ | ||
156 | spin_lock_irqsave(&freezer_lock, flags); | 153 | spin_lock_irqsave(&freezer_lock, flags); |
157 | if (frozen(p)) | 154 | if (frozen(p)) |
158 | wake_up_process(p); | 155 | wake_up_process(p); |
diff --git a/kernel/gcov/Kconfig b/kernel/gcov/Kconfig index cf66c5c8458e..3b7408759bdf 100644 --- a/kernel/gcov/Kconfig +++ b/kernel/gcov/Kconfig | |||
@@ -35,7 +35,7 @@ config GCOV_KERNEL | |||
35 | config GCOV_PROFILE_ALL | 35 | config GCOV_PROFILE_ALL |
36 | bool "Profile entire Kernel" | 36 | bool "Profile entire Kernel" |
37 | depends on GCOV_KERNEL | 37 | depends on GCOV_KERNEL |
38 | depends on SUPERH || S390 || X86 || PPC || MICROBLAZE || ARM | 38 | depends on SUPERH || S390 || X86 || PPC || MICROBLAZE || ARM || ARM64 |
39 | default n | 39 | default n |
40 | ---help--- | 40 | ---help--- |
41 | This options activates profiling for the entire kernel. | 41 | This options activates profiling for the entire kernel. |
diff --git a/kernel/kmod.c b/kernel/kmod.c index 8637e041a247..80f7a6d00519 100644 --- a/kernel/kmod.c +++ b/kernel/kmod.c | |||
@@ -196,12 +196,34 @@ int __request_module(bool wait, const char *fmt, ...) | |||
196 | EXPORT_SYMBOL(__request_module); | 196 | EXPORT_SYMBOL(__request_module); |
197 | #endif /* CONFIG_MODULES */ | 197 | #endif /* CONFIG_MODULES */ |
198 | 198 | ||
199 | static void call_usermodehelper_freeinfo(struct subprocess_info *info) | ||
200 | { | ||
201 | if (info->cleanup) | ||
202 | (*info->cleanup)(info); | ||
203 | kfree(info); | ||
204 | } | ||
205 | |||
206 | static void umh_complete(struct subprocess_info *sub_info) | ||
207 | { | ||
208 | struct completion *comp = xchg(&sub_info->complete, NULL); | ||
209 | /* | ||
210 | * See call_usermodehelper_exec(). If xchg() returns NULL | ||
211 | * we own sub_info, the UMH_KILLABLE caller has gone away | ||
212 | * or the caller used UMH_NO_WAIT. | ||
213 | */ | ||
214 | if (comp) | ||
215 | complete(comp); | ||
216 | else | ||
217 | call_usermodehelper_freeinfo(sub_info); | ||
218 | } | ||
219 | |||
199 | /* | 220 | /* |
200 | * This is the task which runs the usermode application | 221 | * This is the task which runs the usermode application |
201 | */ | 222 | */ |
202 | static int ____call_usermodehelper(void *data) | 223 | static int ____call_usermodehelper(void *data) |
203 | { | 224 | { |
204 | struct subprocess_info *sub_info = data; | 225 | struct subprocess_info *sub_info = data; |
226 | int wait = sub_info->wait & ~UMH_KILLABLE; | ||
205 | struct cred *new; | 227 | struct cred *new; |
206 | int retval; | 228 | int retval; |
207 | 229 | ||
@@ -221,7 +243,7 @@ static int ____call_usermodehelper(void *data) | |||
221 | retval = -ENOMEM; | 243 | retval = -ENOMEM; |
222 | new = prepare_kernel_cred(current); | 244 | new = prepare_kernel_cred(current); |
223 | if (!new) | 245 | if (!new) |
224 | goto fail; | 246 | goto out; |
225 | 247 | ||
226 | spin_lock(&umh_sysctl_lock); | 248 | spin_lock(&umh_sysctl_lock); |
227 | new->cap_bset = cap_intersect(usermodehelper_bset, new->cap_bset); | 249 | new->cap_bset = cap_intersect(usermodehelper_bset, new->cap_bset); |
@@ -233,7 +255,7 @@ static int ____call_usermodehelper(void *data) | |||
233 | retval = sub_info->init(sub_info, new); | 255 | retval = sub_info->init(sub_info, new); |
234 | if (retval) { | 256 | if (retval) { |
235 | abort_creds(new); | 257 | abort_creds(new); |
236 | goto fail; | 258 | goto out; |
237 | } | 259 | } |
238 | } | 260 | } |
239 | 261 | ||
@@ -242,12 +264,13 @@ static int ____call_usermodehelper(void *data) | |||
242 | retval = do_execve(getname_kernel(sub_info->path), | 264 | retval = do_execve(getname_kernel(sub_info->path), |
243 | (const char __user *const __user *)sub_info->argv, | 265 | (const char __user *const __user *)sub_info->argv, |
244 | (const char __user *const __user *)sub_info->envp); | 266 | (const char __user *const __user *)sub_info->envp); |
267 | out: | ||
268 | sub_info->retval = retval; | ||
269 | /* wait_for_helper() will call umh_complete if UHM_WAIT_PROC. */ | ||
270 | if (wait != UMH_WAIT_PROC) | ||
271 | umh_complete(sub_info); | ||
245 | if (!retval) | 272 | if (!retval) |
246 | return 0; | 273 | return 0; |
247 | |||
248 | /* Exec failed? */ | ||
249 | fail: | ||
250 | sub_info->retval = retval; | ||
251 | do_exit(0); | 274 | do_exit(0); |
252 | } | 275 | } |
253 | 276 | ||
@@ -258,26 +281,6 @@ static int call_helper(void *data) | |||
258 | return ____call_usermodehelper(data); | 281 | return ____call_usermodehelper(data); |
259 | } | 282 | } |
260 | 283 | ||
261 | static void call_usermodehelper_freeinfo(struct subprocess_info *info) | ||
262 | { | ||
263 | if (info->cleanup) | ||
264 | (*info->cleanup)(info); | ||
265 | kfree(info); | ||
266 | } | ||
267 | |||
268 | static void umh_complete(struct subprocess_info *sub_info) | ||
269 | { | ||
270 | struct completion *comp = xchg(&sub_info->complete, NULL); | ||
271 | /* | ||
272 | * See call_usermodehelper_exec(). If xchg() returns NULL | ||
273 | * we own sub_info, the UMH_KILLABLE caller has gone away. | ||
274 | */ | ||
275 | if (comp) | ||
276 | complete(comp); | ||
277 | else | ||
278 | call_usermodehelper_freeinfo(sub_info); | ||
279 | } | ||
280 | |||
281 | /* Keventd can't block, but this (a child) can. */ | 284 | /* Keventd can't block, but this (a child) can. */ |
282 | static int wait_for_helper(void *data) | 285 | static int wait_for_helper(void *data) |
283 | { | 286 | { |
@@ -336,18 +339,8 @@ static void __call_usermodehelper(struct work_struct *work) | |||
336 | kmod_thread_locker = NULL; | 339 | kmod_thread_locker = NULL; |
337 | } | 340 | } |
338 | 341 | ||
339 | switch (wait) { | 342 | if (pid < 0) { |
340 | case UMH_NO_WAIT: | 343 | sub_info->retval = pid; |
341 | call_usermodehelper_freeinfo(sub_info); | ||
342 | break; | ||
343 | |||
344 | case UMH_WAIT_PROC: | ||
345 | if (pid > 0) | ||
346 | break; | ||
347 | /* FALLTHROUGH */ | ||
348 | case UMH_WAIT_EXEC: | ||
349 | if (pid < 0) | ||
350 | sub_info->retval = pid; | ||
351 | umh_complete(sub_info); | 344 | umh_complete(sub_info); |
352 | } | 345 | } |
353 | } | 346 | } |
@@ -588,7 +581,12 @@ int call_usermodehelper_exec(struct subprocess_info *sub_info, int wait) | |||
588 | goto out; | 581 | goto out; |
589 | } | 582 | } |
590 | 583 | ||
591 | sub_info->complete = &done; | 584 | /* |
585 | * Set the completion pointer only if there is a waiter. | ||
586 | * This makes it possible to use umh_complete to free | ||
587 | * the data structure in case of UMH_NO_WAIT. | ||
588 | */ | ||
589 | sub_info->complete = (wait == UMH_NO_WAIT) ? NULL : &done; | ||
592 | sub_info->wait = wait; | 590 | sub_info->wait = wait; |
593 | 591 | ||
594 | queue_work(khelper_wq, &sub_info->work); | 592 | queue_work(khelper_wq, &sub_info->work); |
diff --git a/kernel/power/process.c b/kernel/power/process.c index 7b323221b9ee..5a6ec8678b9a 100644 --- a/kernel/power/process.c +++ b/kernel/power/process.c | |||
@@ -46,13 +46,13 @@ static int try_to_freeze_tasks(bool user_only) | |||
46 | while (true) { | 46 | while (true) { |
47 | todo = 0; | 47 | todo = 0; |
48 | read_lock(&tasklist_lock); | 48 | read_lock(&tasklist_lock); |
49 | do_each_thread(g, p) { | 49 | for_each_process_thread(g, p) { |
50 | if (p == current || !freeze_task(p)) | 50 | if (p == current || !freeze_task(p)) |
51 | continue; | 51 | continue; |
52 | 52 | ||
53 | if (!freezer_should_skip(p)) | 53 | if (!freezer_should_skip(p)) |
54 | todo++; | 54 | todo++; |
55 | } while_each_thread(g, p); | 55 | } |
56 | read_unlock(&tasklist_lock); | 56 | read_unlock(&tasklist_lock); |
57 | 57 | ||
58 | if (!user_only) { | 58 | if (!user_only) { |
@@ -93,11 +93,11 @@ static int try_to_freeze_tasks(bool user_only) | |||
93 | 93 | ||
94 | if (!wakeup) { | 94 | if (!wakeup) { |
95 | read_lock(&tasklist_lock); | 95 | read_lock(&tasklist_lock); |
96 | do_each_thread(g, p) { | 96 | for_each_process_thread(g, p) { |
97 | if (p != current && !freezer_should_skip(p) | 97 | if (p != current && !freezer_should_skip(p) |
98 | && freezing(p) && !frozen(p)) | 98 | && freezing(p) && !frozen(p)) |
99 | sched_show_task(p); | 99 | sched_show_task(p); |
100 | } while_each_thread(g, p); | 100 | } |
101 | read_unlock(&tasklist_lock); | 101 | read_unlock(&tasklist_lock); |
102 | } | 102 | } |
103 | } else { | 103 | } else { |
@@ -108,6 +108,30 @@ static int try_to_freeze_tasks(bool user_only) | |||
108 | return todo ? -EBUSY : 0; | 108 | return todo ? -EBUSY : 0; |
109 | } | 109 | } |
110 | 110 | ||
111 | static bool __check_frozen_processes(void) | ||
112 | { | ||
113 | struct task_struct *g, *p; | ||
114 | |||
115 | for_each_process_thread(g, p) | ||
116 | if (p != current && !freezer_should_skip(p) && !frozen(p)) | ||
117 | return false; | ||
118 | |||
119 | return true; | ||
120 | } | ||
121 | |||
122 | /* | ||
123 | * Returns true if all freezable tasks (except for current) are frozen already | ||
124 | */ | ||
125 | static bool check_frozen_processes(void) | ||
126 | { | ||
127 | bool ret; | ||
128 | |||
129 | read_lock(&tasklist_lock); | ||
130 | ret = __check_frozen_processes(); | ||
131 | read_unlock(&tasklist_lock); | ||
132 | return ret; | ||
133 | } | ||
134 | |||
111 | /** | 135 | /** |
112 | * freeze_processes - Signal user space processes to enter the refrigerator. | 136 | * freeze_processes - Signal user space processes to enter the refrigerator. |
113 | * The current thread will not be frozen. The same process that calls | 137 | * The current thread will not be frozen. The same process that calls |
@@ -118,6 +142,7 @@ static int try_to_freeze_tasks(bool user_only) | |||
118 | int freeze_processes(void) | 142 | int freeze_processes(void) |
119 | { | 143 | { |
120 | int error; | 144 | int error; |
145 | int oom_kills_saved; | ||
121 | 146 | ||
122 | error = __usermodehelper_disable(UMH_FREEZING); | 147 | error = __usermodehelper_disable(UMH_FREEZING); |
123 | if (error) | 148 | if (error) |
@@ -132,11 +157,25 @@ int freeze_processes(void) | |||
132 | pm_wakeup_clear(); | 157 | pm_wakeup_clear(); |
133 | printk("Freezing user space processes ... "); | 158 | printk("Freezing user space processes ... "); |
134 | pm_freezing = true; | 159 | pm_freezing = true; |
160 | oom_kills_saved = oom_kills_count(); | ||
135 | error = try_to_freeze_tasks(true); | 161 | error = try_to_freeze_tasks(true); |
136 | if (!error) { | 162 | if (!error) { |
137 | printk("done."); | ||
138 | __usermodehelper_set_disable_depth(UMH_DISABLED); | 163 | __usermodehelper_set_disable_depth(UMH_DISABLED); |
139 | oom_killer_disable(); | 164 | oom_killer_disable(); |
165 | |||
166 | /* | ||
167 | * There might have been an OOM kill while we were | ||
168 | * freezing tasks and the killed task might be still | ||
169 | * on the way out so we have to double check for race. | ||
170 | */ | ||
171 | if (oom_kills_count() != oom_kills_saved && | ||
172 | !check_frozen_processes()) { | ||
173 | __usermodehelper_set_disable_depth(UMH_ENABLED); | ||
174 | printk("OOM in progress."); | ||
175 | error = -EBUSY; | ||
176 | } else { | ||
177 | printk("done."); | ||
178 | } | ||
140 | } | 179 | } |
141 | printk("\n"); | 180 | printk("\n"); |
142 | BUG_ON(in_atomic()); | 181 | BUG_ON(in_atomic()); |
@@ -191,11 +230,11 @@ void thaw_processes(void) | |||
191 | thaw_workqueues(); | 230 | thaw_workqueues(); |
192 | 231 | ||
193 | read_lock(&tasklist_lock); | 232 | read_lock(&tasklist_lock); |
194 | do_each_thread(g, p) { | 233 | for_each_process_thread(g, p) { |
195 | /* No other threads should have PF_SUSPEND_TASK set */ | 234 | /* No other threads should have PF_SUSPEND_TASK set */ |
196 | WARN_ON((p != curr) && (p->flags & PF_SUSPEND_TASK)); | 235 | WARN_ON((p != curr) && (p->flags & PF_SUSPEND_TASK)); |
197 | __thaw_task(p); | 236 | __thaw_task(p); |
198 | } while_each_thread(g, p); | 237 | } |
199 | read_unlock(&tasklist_lock); | 238 | read_unlock(&tasklist_lock); |
200 | 239 | ||
201 | WARN_ON(!(curr->flags & PF_SUSPEND_TASK)); | 240 | WARN_ON(!(curr->flags & PF_SUSPEND_TASK)); |
@@ -218,10 +257,10 @@ void thaw_kernel_threads(void) | |||
218 | thaw_workqueues(); | 257 | thaw_workqueues(); |
219 | 258 | ||
220 | read_lock(&tasklist_lock); | 259 | read_lock(&tasklist_lock); |
221 | do_each_thread(g, p) { | 260 | for_each_process_thread(g, p) { |
222 | if (p->flags & (PF_KTHREAD | PF_WQ_WORKER)) | 261 | if (p->flags & (PF_KTHREAD | PF_WQ_WORKER)) |
223 | __thaw_task(p); | 262 | __thaw_task(p); |
224 | } while_each_thread(g, p); | 263 | } |
225 | read_unlock(&tasklist_lock); | 264 | read_unlock(&tasklist_lock); |
226 | 265 | ||
227 | schedule(); | 266 | schedule(); |
diff --git a/kernel/power/qos.c b/kernel/power/qos.c index 884b77058864..5f4c006c4b1e 100644 --- a/kernel/power/qos.c +++ b/kernel/power/qos.c | |||
@@ -105,11 +105,27 @@ static struct pm_qos_object network_throughput_pm_qos = { | |||
105 | }; | 105 | }; |
106 | 106 | ||
107 | 107 | ||
108 | static BLOCKING_NOTIFIER_HEAD(memory_bandwidth_notifier); | ||
109 | static struct pm_qos_constraints memory_bw_constraints = { | ||
110 | .list = PLIST_HEAD_INIT(memory_bw_constraints.list), | ||
111 | .target_value = PM_QOS_MEMORY_BANDWIDTH_DEFAULT_VALUE, | ||
112 | .default_value = PM_QOS_MEMORY_BANDWIDTH_DEFAULT_VALUE, | ||
113 | .no_constraint_value = PM_QOS_MEMORY_BANDWIDTH_DEFAULT_VALUE, | ||
114 | .type = PM_QOS_SUM, | ||
115 | .notifiers = &memory_bandwidth_notifier, | ||
116 | }; | ||
117 | static struct pm_qos_object memory_bandwidth_pm_qos = { | ||
118 | .constraints = &memory_bw_constraints, | ||
119 | .name = "memory_bandwidth", | ||
120 | }; | ||
121 | |||
122 | |||
108 | static struct pm_qos_object *pm_qos_array[] = { | 123 | static struct pm_qos_object *pm_qos_array[] = { |
109 | &null_pm_qos, | 124 | &null_pm_qos, |
110 | &cpu_dma_pm_qos, | 125 | &cpu_dma_pm_qos, |
111 | &network_lat_pm_qos, | 126 | &network_lat_pm_qos, |
112 | &network_throughput_pm_qos | 127 | &network_throughput_pm_qos, |
128 | &memory_bandwidth_pm_qos, | ||
113 | }; | 129 | }; |
114 | 130 | ||
115 | static ssize_t pm_qos_power_write(struct file *filp, const char __user *buf, | 131 | static ssize_t pm_qos_power_write(struct file *filp, const char __user *buf, |
@@ -130,6 +146,9 @@ static const struct file_operations pm_qos_power_fops = { | |||
130 | /* unlocked internal variant */ | 146 | /* unlocked internal variant */ |
131 | static inline int pm_qos_get_value(struct pm_qos_constraints *c) | 147 | static inline int pm_qos_get_value(struct pm_qos_constraints *c) |
132 | { | 148 | { |
149 | struct plist_node *node; | ||
150 | int total_value = 0; | ||
151 | |||
133 | if (plist_head_empty(&c->list)) | 152 | if (plist_head_empty(&c->list)) |
134 | return c->no_constraint_value; | 153 | return c->no_constraint_value; |
135 | 154 | ||
@@ -140,6 +159,12 @@ static inline int pm_qos_get_value(struct pm_qos_constraints *c) | |||
140 | case PM_QOS_MAX: | 159 | case PM_QOS_MAX: |
141 | return plist_last(&c->list)->prio; | 160 | return plist_last(&c->list)->prio; |
142 | 161 | ||
162 | case PM_QOS_SUM: | ||
163 | plist_for_each(node, &c->list) | ||
164 | total_value += node->prio; | ||
165 | |||
166 | return total_value; | ||
167 | |||
143 | default: | 168 | default: |
144 | /* runtime check for not using enum */ | 169 | /* runtime check for not using enum */ |
145 | BUG(); | 170 | BUG(); |
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 133e47223095..9815447d22e0 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c | |||
@@ -3299,11 +3299,16 @@ static void _rcu_barrier(struct rcu_state *rsp) | |||
3299 | continue; | 3299 | continue; |
3300 | rdp = per_cpu_ptr(rsp->rda, cpu); | 3300 | rdp = per_cpu_ptr(rsp->rda, cpu); |
3301 | if (rcu_is_nocb_cpu(cpu)) { | 3301 | if (rcu_is_nocb_cpu(cpu)) { |
3302 | _rcu_barrier_trace(rsp, "OnlineNoCB", cpu, | 3302 | if (!rcu_nocb_cpu_needs_barrier(rsp, cpu)) { |
3303 | rsp->n_barrier_done); | 3303 | _rcu_barrier_trace(rsp, "OfflineNoCB", cpu, |
3304 | atomic_inc(&rsp->barrier_cpu_count); | 3304 | rsp->n_barrier_done); |
3305 | __call_rcu(&rdp->barrier_head, rcu_barrier_callback, | 3305 | } else { |
3306 | rsp, cpu, 0); | 3306 | _rcu_barrier_trace(rsp, "OnlineNoCB", cpu, |
3307 | rsp->n_barrier_done); | ||
3308 | atomic_inc(&rsp->barrier_cpu_count); | ||
3309 | __call_rcu(&rdp->barrier_head, | ||
3310 | rcu_barrier_callback, rsp, cpu, 0); | ||
3311 | } | ||
3307 | } else if (ACCESS_ONCE(rdp->qlen)) { | 3312 | } else if (ACCESS_ONCE(rdp->qlen)) { |
3308 | _rcu_barrier_trace(rsp, "OnlineQ", cpu, | 3313 | _rcu_barrier_trace(rsp, "OnlineQ", cpu, |
3309 | rsp->n_barrier_done); | 3314 | rsp->n_barrier_done); |
diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h index d03764652d91..bbdc45d8d74f 100644 --- a/kernel/rcu/tree.h +++ b/kernel/rcu/tree.h | |||
@@ -587,6 +587,7 @@ static void print_cpu_stall_info(struct rcu_state *rsp, int cpu); | |||
587 | static void print_cpu_stall_info_end(void); | 587 | static void print_cpu_stall_info_end(void); |
588 | static void zero_cpu_stall_ticks(struct rcu_data *rdp); | 588 | static void zero_cpu_stall_ticks(struct rcu_data *rdp); |
589 | static void increment_cpu_stall_ticks(void); | 589 | static void increment_cpu_stall_ticks(void); |
590 | static bool rcu_nocb_cpu_needs_barrier(struct rcu_state *rsp, int cpu); | ||
590 | static void rcu_nocb_gp_set(struct rcu_node *rnp, int nrq); | 591 | static void rcu_nocb_gp_set(struct rcu_node *rnp, int nrq); |
591 | static void rcu_nocb_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp); | 592 | static void rcu_nocb_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp); |
592 | static void rcu_init_one_nocb(struct rcu_node *rnp); | 593 | static void rcu_init_one_nocb(struct rcu_node *rnp); |
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index 387dd4599344..c1d7f27bd38f 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h | |||
@@ -2050,6 +2050,33 @@ static void wake_nocb_leader(struct rcu_data *rdp, bool force) | |||
2050 | } | 2050 | } |
2051 | 2051 | ||
2052 | /* | 2052 | /* |
2053 | * Does the specified CPU need an RCU callback for the specified flavor | ||
2054 | * of rcu_barrier()? | ||
2055 | */ | ||
2056 | static bool rcu_nocb_cpu_needs_barrier(struct rcu_state *rsp, int cpu) | ||
2057 | { | ||
2058 | struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu); | ||
2059 | struct rcu_head *rhp; | ||
2060 | |||
2061 | /* No-CBs CPUs might have callbacks on any of three lists. */ | ||
2062 | rhp = ACCESS_ONCE(rdp->nocb_head); | ||
2063 | if (!rhp) | ||
2064 | rhp = ACCESS_ONCE(rdp->nocb_gp_head); | ||
2065 | if (!rhp) | ||
2066 | rhp = ACCESS_ONCE(rdp->nocb_follower_head); | ||
2067 | |||
2068 | /* Having no rcuo kthread but CBs after scheduler starts is bad! */ | ||
2069 | if (!ACCESS_ONCE(rdp->nocb_kthread) && rhp) { | ||
2070 | /* RCU callback enqueued before CPU first came online??? */ | ||
2071 | pr_err("RCU: Never-onlined no-CBs CPU %d has CB %p\n", | ||
2072 | cpu, rhp->func); | ||
2073 | WARN_ON_ONCE(1); | ||
2074 | } | ||
2075 | |||
2076 | return !!rhp; | ||
2077 | } | ||
2078 | |||
2079 | /* | ||
2053 | * Enqueue the specified string of rcu_head structures onto the specified | 2080 | * Enqueue the specified string of rcu_head structures onto the specified |
2054 | * CPU's no-CBs lists. The CPU is specified by rdp, the head of the | 2081 | * CPU's no-CBs lists. The CPU is specified by rdp, the head of the |
2055 | * string by rhp, and the tail of the string by rhtp. The non-lazy/lazy | 2082 | * string by rhp, and the tail of the string by rhtp. The non-lazy/lazy |
@@ -2642,6 +2669,12 @@ static bool init_nocb_callback_list(struct rcu_data *rdp) | |||
2642 | 2669 | ||
2643 | #else /* #ifdef CONFIG_RCU_NOCB_CPU */ | 2670 | #else /* #ifdef CONFIG_RCU_NOCB_CPU */ |
2644 | 2671 | ||
2672 | static bool rcu_nocb_cpu_needs_barrier(struct rcu_state *rsp, int cpu) | ||
2673 | { | ||
2674 | WARN_ON_ONCE(1); /* Should be dead code. */ | ||
2675 | return false; | ||
2676 | } | ||
2677 | |||
2645 | static void rcu_nocb_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp) | 2678 | static void rcu_nocb_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp) |
2646 | { | 2679 | { |
2647 | } | 2680 | } |
diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c index 9c94c19f1305..55449909f114 100644 --- a/kernel/time/clockevents.c +++ b/kernel/time/clockevents.c | |||
@@ -72,7 +72,7 @@ static u64 cev_delta2ns(unsigned long latch, struct clock_event_device *evt, | |||
72 | * Also omit the add if it would overflow the u64 boundary. | 72 | * Also omit the add if it would overflow the u64 boundary. |
73 | */ | 73 | */ |
74 | if ((~0ULL - clc > rnd) && | 74 | if ((~0ULL - clc > rnd) && |
75 | (!ismax || evt->mult <= (1U << evt->shift))) | 75 | (!ismax || evt->mult <= (1ULL << evt->shift))) |
76 | clc += rnd; | 76 | clc += rnd; |
77 | 77 | ||
78 | do_div(clc, evt->mult); | 78 | do_div(clc, evt->mult); |
diff --git a/kernel/time/posix-timers.c b/kernel/time/posix-timers.c index 42b463ad90f2..31ea01f42e1f 100644 --- a/kernel/time/posix-timers.c +++ b/kernel/time/posix-timers.c | |||
@@ -636,6 +636,7 @@ SYSCALL_DEFINE3(timer_create, const clockid_t, which_clock, | |||
636 | goto out; | 636 | goto out; |
637 | } | 637 | } |
638 | } else { | 638 | } else { |
639 | memset(&event.sigev_value, 0, sizeof(event.sigev_value)); | ||
639 | event.sigev_notify = SIGEV_SIGNAL; | 640 | event.sigev_notify = SIGEV_SIGNAL; |
640 | event.sigev_signo = SIGALRM; | 641 | event.sigev_signo = SIGALRM; |
641 | event.sigev_value.sival_int = new_timer->it_id; | 642 | event.sigev_value.sival_int = new_timer->it_id; |
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index fb186b9ddf51..31c90fec4158 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
@@ -1925,8 +1925,16 @@ ftrace_find_tramp_ops_curr(struct dyn_ftrace *rec) | |||
1925 | * when we are adding another op to the rec or removing the | 1925 | * when we are adding another op to the rec or removing the |
1926 | * current one. Thus, if the op is being added, we can | 1926 | * current one. Thus, if the op is being added, we can |
1927 | * ignore it because it hasn't attached itself to the rec | 1927 | * ignore it because it hasn't attached itself to the rec |
1928 | * yet. That means we just need to find the op that has a | 1928 | * yet. |
1929 | * trampoline and is not beeing added. | 1929 | * |
1930 | * If an ops is being modified (hooking to different functions) | ||
1931 | * then we don't care about the new functions that are being | ||
1932 | * added, just the old ones (that are probably being removed). | ||
1933 | * | ||
1934 | * If we are adding an ops to a function that already is using | ||
1935 | * a trampoline, it needs to be removed (trampolines are only | ||
1936 | * for single ops connected), then an ops that is not being | ||
1937 | * modified also needs to be checked. | ||
1930 | */ | 1938 | */ |
1931 | do_for_each_ftrace_op(op, ftrace_ops_list) { | 1939 | do_for_each_ftrace_op(op, ftrace_ops_list) { |
1932 | 1940 | ||
@@ -1940,17 +1948,23 @@ ftrace_find_tramp_ops_curr(struct dyn_ftrace *rec) | |||
1940 | if (op->flags & FTRACE_OPS_FL_ADDING) | 1948 | if (op->flags & FTRACE_OPS_FL_ADDING) |
1941 | continue; | 1949 | continue; |
1942 | 1950 | ||
1951 | |||
1943 | /* | 1952 | /* |
1944 | * If the ops is not being added and has a trampoline, | 1953 | * If the ops is being modified and is in the old |
1945 | * then it must be the one that we want! | 1954 | * hash, then it is probably being removed from this |
1955 | * function. | ||
1946 | */ | 1956 | */ |
1947 | if (hash_contains_ip(ip, op->func_hash)) | ||
1948 | return op; | ||
1949 | |||
1950 | /* If the ops is being modified, it may be in the old hash. */ | ||
1951 | if ((op->flags & FTRACE_OPS_FL_MODIFYING) && | 1957 | if ((op->flags & FTRACE_OPS_FL_MODIFYING) && |
1952 | hash_contains_ip(ip, &op->old_hash)) | 1958 | hash_contains_ip(ip, &op->old_hash)) |
1953 | return op; | 1959 | return op; |
1960 | /* | ||
1961 | * If the ops is not being added or modified, and it's | ||
1962 | * in its normal filter hash, then this must be the one | ||
1963 | * we want! | ||
1964 | */ | ||
1965 | if (!(op->flags & FTRACE_OPS_FL_MODIFYING) && | ||
1966 | hash_contains_ip(ip, op->func_hash)) | ||
1967 | return op; | ||
1954 | 1968 | ||
1955 | } while_for_each_ftrace_op(op); | 1969 | } while_for_each_ftrace_op(op); |
1956 | 1970 | ||
@@ -2293,10 +2307,13 @@ static void ftrace_run_update_code(int command) | |||
2293 | FTRACE_WARN_ON(ret); | 2307 | FTRACE_WARN_ON(ret); |
2294 | } | 2308 | } |
2295 | 2309 | ||
2296 | static void ftrace_run_modify_code(struct ftrace_ops *ops, int command) | 2310 | static void ftrace_run_modify_code(struct ftrace_ops *ops, int command, |
2311 | struct ftrace_hash *old_hash) | ||
2297 | { | 2312 | { |
2298 | ops->flags |= FTRACE_OPS_FL_MODIFYING; | 2313 | ops->flags |= FTRACE_OPS_FL_MODIFYING; |
2314 | ops->old_hash.filter_hash = old_hash; | ||
2299 | ftrace_run_update_code(command); | 2315 | ftrace_run_update_code(command); |
2316 | ops->old_hash.filter_hash = NULL; | ||
2300 | ops->flags &= ~FTRACE_OPS_FL_MODIFYING; | 2317 | ops->flags &= ~FTRACE_OPS_FL_MODIFYING; |
2301 | } | 2318 | } |
2302 | 2319 | ||
@@ -3340,7 +3357,7 @@ static struct ftrace_ops trace_probe_ops __read_mostly = | |||
3340 | 3357 | ||
3341 | static int ftrace_probe_registered; | 3358 | static int ftrace_probe_registered; |
3342 | 3359 | ||
3343 | static void __enable_ftrace_function_probe(void) | 3360 | static void __enable_ftrace_function_probe(struct ftrace_hash *old_hash) |
3344 | { | 3361 | { |
3345 | int ret; | 3362 | int ret; |
3346 | int i; | 3363 | int i; |
@@ -3348,7 +3365,8 @@ static void __enable_ftrace_function_probe(void) | |||
3348 | if (ftrace_probe_registered) { | 3365 | if (ftrace_probe_registered) { |
3349 | /* still need to update the function call sites */ | 3366 | /* still need to update the function call sites */ |
3350 | if (ftrace_enabled) | 3367 | if (ftrace_enabled) |
3351 | ftrace_run_modify_code(&trace_probe_ops, FTRACE_UPDATE_CALLS); | 3368 | ftrace_run_modify_code(&trace_probe_ops, FTRACE_UPDATE_CALLS, |
3369 | old_hash); | ||
3352 | return; | 3370 | return; |
3353 | } | 3371 | } |
3354 | 3372 | ||
@@ -3477,13 +3495,14 @@ register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, | |||
3477 | } while_for_each_ftrace_rec(); | 3495 | } while_for_each_ftrace_rec(); |
3478 | 3496 | ||
3479 | ret = ftrace_hash_move(&trace_probe_ops, 1, orig_hash, hash); | 3497 | ret = ftrace_hash_move(&trace_probe_ops, 1, orig_hash, hash); |
3498 | |||
3499 | __enable_ftrace_function_probe(old_hash); | ||
3500 | |||
3480 | if (!ret) | 3501 | if (!ret) |
3481 | free_ftrace_hash_rcu(old_hash); | 3502 | free_ftrace_hash_rcu(old_hash); |
3482 | else | 3503 | else |
3483 | count = ret; | 3504 | count = ret; |
3484 | 3505 | ||
3485 | __enable_ftrace_function_probe(); | ||
3486 | |||
3487 | out_unlock: | 3506 | out_unlock: |
3488 | mutex_unlock(&ftrace_lock); | 3507 | mutex_unlock(&ftrace_lock); |
3489 | out: | 3508 | out: |
@@ -3764,10 +3783,11 @@ ftrace_match_addr(struct ftrace_hash *hash, unsigned long ip, int remove) | |||
3764 | return add_hash_entry(hash, ip); | 3783 | return add_hash_entry(hash, ip); |
3765 | } | 3784 | } |
3766 | 3785 | ||
3767 | static void ftrace_ops_update_code(struct ftrace_ops *ops) | 3786 | static void ftrace_ops_update_code(struct ftrace_ops *ops, |
3787 | struct ftrace_hash *old_hash) | ||
3768 | { | 3788 | { |
3769 | if (ops->flags & FTRACE_OPS_FL_ENABLED && ftrace_enabled) | 3789 | if (ops->flags & FTRACE_OPS_FL_ENABLED && ftrace_enabled) |
3770 | ftrace_run_modify_code(ops, FTRACE_UPDATE_CALLS); | 3790 | ftrace_run_modify_code(ops, FTRACE_UPDATE_CALLS, old_hash); |
3771 | } | 3791 | } |
3772 | 3792 | ||
3773 | static int | 3793 | static int |
@@ -3813,7 +3833,7 @@ ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len, | |||
3813 | old_hash = *orig_hash; | 3833 | old_hash = *orig_hash; |
3814 | ret = ftrace_hash_move(ops, enable, orig_hash, hash); | 3834 | ret = ftrace_hash_move(ops, enable, orig_hash, hash); |
3815 | if (!ret) { | 3835 | if (!ret) { |
3816 | ftrace_ops_update_code(ops); | 3836 | ftrace_ops_update_code(ops, old_hash); |
3817 | free_ftrace_hash_rcu(old_hash); | 3837 | free_ftrace_hash_rcu(old_hash); |
3818 | } | 3838 | } |
3819 | mutex_unlock(&ftrace_lock); | 3839 | mutex_unlock(&ftrace_lock); |
@@ -4058,7 +4078,7 @@ int ftrace_regex_release(struct inode *inode, struct file *file) | |||
4058 | ret = ftrace_hash_move(iter->ops, filter_hash, | 4078 | ret = ftrace_hash_move(iter->ops, filter_hash, |
4059 | orig_hash, iter->hash); | 4079 | orig_hash, iter->hash); |
4060 | if (!ret) { | 4080 | if (!ret) { |
4061 | ftrace_ops_update_code(iter->ops); | 4081 | ftrace_ops_update_code(iter->ops, old_hash); |
4062 | free_ftrace_hash_rcu(old_hash); | 4082 | free_ftrace_hash_rcu(old_hash); |
4063 | } | 4083 | } |
4064 | mutex_unlock(&ftrace_lock); | 4084 | mutex_unlock(&ftrace_lock); |
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c index 4dc8b79c5f75..29228c4d5696 100644 --- a/kernel/trace/trace_syscalls.c +++ b/kernel/trace/trace_syscalls.c | |||
@@ -313,7 +313,7 @@ static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id) | |||
313 | int size; | 313 | int size; |
314 | 314 | ||
315 | syscall_nr = trace_get_syscall_nr(current, regs); | 315 | syscall_nr = trace_get_syscall_nr(current, regs); |
316 | if (syscall_nr < 0) | 316 | if (syscall_nr < 0 || syscall_nr >= NR_syscalls) |
317 | return; | 317 | return; |
318 | 318 | ||
319 | /* Here we're inside tp handler's rcu_read_lock_sched (__DO_TRACE) */ | 319 | /* Here we're inside tp handler's rcu_read_lock_sched (__DO_TRACE) */ |
@@ -360,7 +360,7 @@ static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret) | |||
360 | int syscall_nr; | 360 | int syscall_nr; |
361 | 361 | ||
362 | syscall_nr = trace_get_syscall_nr(current, regs); | 362 | syscall_nr = trace_get_syscall_nr(current, regs); |
363 | if (syscall_nr < 0) | 363 | if (syscall_nr < 0 || syscall_nr >= NR_syscalls) |
364 | return; | 364 | return; |
365 | 365 | ||
366 | /* Here we're inside tp handler's rcu_read_lock_sched (__DO_TRACE()) */ | 366 | /* Here we're inside tp handler's rcu_read_lock_sched (__DO_TRACE()) */ |
@@ -567,7 +567,7 @@ static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id) | |||
567 | int size; | 567 | int size; |
568 | 568 | ||
569 | syscall_nr = trace_get_syscall_nr(current, regs); | 569 | syscall_nr = trace_get_syscall_nr(current, regs); |
570 | if (syscall_nr < 0) | 570 | if (syscall_nr < 0 || syscall_nr >= NR_syscalls) |
571 | return; | 571 | return; |
572 | if (!test_bit(syscall_nr, enabled_perf_enter_syscalls)) | 572 | if (!test_bit(syscall_nr, enabled_perf_enter_syscalls)) |
573 | return; | 573 | return; |
@@ -641,7 +641,7 @@ static void perf_syscall_exit(void *ignore, struct pt_regs *regs, long ret) | |||
641 | int size; | 641 | int size; |
642 | 642 | ||
643 | syscall_nr = trace_get_syscall_nr(current, regs); | 643 | syscall_nr = trace_get_syscall_nr(current, regs); |
644 | if (syscall_nr < 0) | 644 | if (syscall_nr < 0 || syscall_nr >= NR_syscalls) |
645 | return; | 645 | return; |
646 | if (!test_bit(syscall_nr, enabled_perf_exit_syscalls)) | 646 | if (!test_bit(syscall_nr, enabled_perf_exit_syscalls)) |
647 | return; | 647 | return; |