diff options
author | Ingo Molnar <mingo@kernel.org> | 2014-09-09 00:48:07 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2014-09-09 00:48:07 -0400 |
commit | bdea534db894ea19320f470ce2e63b1d9de96a15 (patch) | |
tree | 8ec2a0a93d9bb5e7205253c67aa624b9ec227477 /kernel | |
parent | 39b5a56ec0be5effe9b7d0f18cb27724bf2e5d47 (diff) | |
parent | 2ce7598c9a453e0acd0e07be7be3f5eb39608ebd (diff) |
Merge tag 'v3.17-rc4' into perf/core, to pick up fixes
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/compat.c | 24 | ||||
-rw-r--r-- | kernel/events/core.c | 23 | ||||
-rw-r--r-- | kernel/irq/chip.c | 1 | ||||
-rw-r--r-- | kernel/kexec.c | 11 | ||||
-rw-r--r-- | kernel/power/power.h | 1 | ||||
-rw-r--r-- | kernel/power/suspend.c | 2 | ||||
-rw-r--r-- | kernel/power/suspend_test.c | 31 | ||||
-rw-r--r-- | kernel/rcu/tree.h | 2 | ||||
-rw-r--r-- | kernel/rcu/tree_plugin.h | 22 | ||||
-rw-r--r-- | kernel/resource.c | 11 | ||||
-rw-r--r-- | kernel/time/tick-sched.c | 14 | ||||
-rw-r--r-- | kernel/time/timekeeping.c | 5 | ||||
-rw-r--r-- | kernel/trace/ftrace.c | 246 | ||||
-rw-r--r-- | kernel/trace/ring_buffer.c | 16 |
14 files changed, 281 insertions, 128 deletions
diff --git a/kernel/compat.c b/kernel/compat.c index 633394f442f8..ebb3c369d03d 100644 --- a/kernel/compat.c +++ b/kernel/compat.c | |||
@@ -226,7 +226,7 @@ static long compat_nanosleep_restart(struct restart_block *restart) | |||
226 | ret = hrtimer_nanosleep_restart(restart); | 226 | ret = hrtimer_nanosleep_restart(restart); |
227 | set_fs(oldfs); | 227 | set_fs(oldfs); |
228 | 228 | ||
229 | if (ret) { | 229 | if (ret == -ERESTART_RESTARTBLOCK) { |
230 | rmtp = restart->nanosleep.compat_rmtp; | 230 | rmtp = restart->nanosleep.compat_rmtp; |
231 | 231 | ||
232 | if (rmtp && compat_put_timespec(&rmt, rmtp)) | 232 | if (rmtp && compat_put_timespec(&rmt, rmtp)) |
@@ -256,7 +256,26 @@ COMPAT_SYSCALL_DEFINE2(nanosleep, struct compat_timespec __user *, rqtp, | |||
256 | HRTIMER_MODE_REL, CLOCK_MONOTONIC); | 256 | HRTIMER_MODE_REL, CLOCK_MONOTONIC); |
257 | set_fs(oldfs); | 257 | set_fs(oldfs); |
258 | 258 | ||
259 | if (ret) { | 259 | /* |
260 | * hrtimer_nanosleep() can only return 0 or | ||
261 | * -ERESTART_RESTARTBLOCK here because: | ||
262 | * | ||
263 | * - we call it with HRTIMER_MODE_REL and therefor exclude the | ||
264 | * -ERESTARTNOHAND return path. | ||
265 | * | ||
266 | * - we supply the rmtp argument from the task stack (due to | ||
267 | * the necessary compat conversion. So the update cannot | ||
268 | * fail, which excludes the -EFAULT return path as well. If | ||
269 | * it fails nevertheless we have a bigger problem and wont | ||
270 | * reach this place anymore. | ||
271 | * | ||
272 | * - if the return value is 0, we do not have to update rmtp | ||
273 | * because there is no remaining time. | ||
274 | * | ||
275 | * We check for -ERESTART_RESTARTBLOCK nevertheless if the | ||
276 | * core implementation decides to return random nonsense. | ||
277 | */ | ||
278 | if (ret == -ERESTART_RESTARTBLOCK) { | ||
260 | struct restart_block *restart | 279 | struct restart_block *restart |
261 | = ¤t_thread_info()->restart_block; | 280 | = ¤t_thread_info()->restart_block; |
262 | 281 | ||
@@ -266,7 +285,6 @@ COMPAT_SYSCALL_DEFINE2(nanosleep, struct compat_timespec __user *, rqtp, | |||
266 | if (rmtp && compat_put_timespec(&rmt, rmtp)) | 285 | if (rmtp && compat_put_timespec(&rmt, rmtp)) |
267 | return -EFAULT; | 286 | return -EFAULT; |
268 | } | 287 | } |
269 | |||
270 | return ret; | 288 | return ret; |
271 | } | 289 | } |
272 | 290 | ||
diff --git a/kernel/events/core.c b/kernel/events/core.c index d8cb4d21a346..01bd42ed516c 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c | |||
@@ -41,6 +41,7 @@ | |||
41 | #include <linux/cgroup.h> | 41 | #include <linux/cgroup.h> |
42 | #include <linux/module.h> | 42 | #include <linux/module.h> |
43 | #include <linux/mman.h> | 43 | #include <linux/mman.h> |
44 | #include <linux/compat.h> | ||
44 | 45 | ||
45 | #include "internal.h" | 46 | #include "internal.h" |
46 | 47 | ||
@@ -3821,6 +3822,26 @@ static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg) | |||
3821 | return 0; | 3822 | return 0; |
3822 | } | 3823 | } |
3823 | 3824 | ||
3825 | #ifdef CONFIG_COMPAT | ||
3826 | static long perf_compat_ioctl(struct file *file, unsigned int cmd, | ||
3827 | unsigned long arg) | ||
3828 | { | ||
3829 | switch (_IOC_NR(cmd)) { | ||
3830 | case _IOC_NR(PERF_EVENT_IOC_SET_FILTER): | ||
3831 | case _IOC_NR(PERF_EVENT_IOC_ID): | ||
3832 | /* Fix up pointer size (usually 4 -> 8 in 32-on-64-bit case */ | ||
3833 | if (_IOC_SIZE(cmd) == sizeof(compat_uptr_t)) { | ||
3834 | cmd &= ~IOCSIZE_MASK; | ||
3835 | cmd |= sizeof(void *) << IOCSIZE_SHIFT; | ||
3836 | } | ||
3837 | break; | ||
3838 | } | ||
3839 | return perf_ioctl(file, cmd, arg); | ||
3840 | } | ||
3841 | #else | ||
3842 | # define perf_compat_ioctl NULL | ||
3843 | #endif | ||
3844 | |||
3824 | int perf_event_task_enable(void) | 3845 | int perf_event_task_enable(void) |
3825 | { | 3846 | { |
3826 | struct perf_event *event; | 3847 | struct perf_event *event; |
@@ -4326,7 +4347,7 @@ static const struct file_operations perf_fops = { | |||
4326 | .read = perf_read, | 4347 | .read = perf_read, |
4327 | .poll = perf_poll, | 4348 | .poll = perf_poll, |
4328 | .unlocked_ioctl = perf_ioctl, | 4349 | .unlocked_ioctl = perf_ioctl, |
4329 | .compat_ioctl = perf_ioctl, | 4350 | .compat_ioctl = perf_compat_ioctl, |
4330 | .mmap = perf_mmap, | 4351 | .mmap = perf_mmap, |
4331 | .fasync = perf_fasync, | 4352 | .fasync = perf_fasync, |
4332 | }; | 4353 | }; |
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c index a2b28a2fd7b1..6223fab9a9d2 100644 --- a/kernel/irq/chip.c +++ b/kernel/irq/chip.c | |||
@@ -517,6 +517,7 @@ out: | |||
517 | chip->irq_eoi(&desc->irq_data); | 517 | chip->irq_eoi(&desc->irq_data); |
518 | raw_spin_unlock(&desc->lock); | 518 | raw_spin_unlock(&desc->lock); |
519 | } | 519 | } |
520 | EXPORT_SYMBOL_GPL(handle_fasteoi_irq); | ||
520 | 521 | ||
521 | /** | 522 | /** |
522 | * handle_edge_irq - edge type IRQ handler | 523 | * handle_edge_irq - edge type IRQ handler |
diff --git a/kernel/kexec.c b/kernel/kexec.c index 0b49a0a58102..2bee072268d9 100644 --- a/kernel/kexec.c +++ b/kernel/kexec.c | |||
@@ -64,7 +64,9 @@ bool kexec_in_progress = false; | |||
64 | char __weak kexec_purgatory[0]; | 64 | char __weak kexec_purgatory[0]; |
65 | size_t __weak kexec_purgatory_size = 0; | 65 | size_t __weak kexec_purgatory_size = 0; |
66 | 66 | ||
67 | #ifdef CONFIG_KEXEC_FILE | ||
67 | static int kexec_calculate_store_digests(struct kimage *image); | 68 | static int kexec_calculate_store_digests(struct kimage *image); |
69 | #endif | ||
68 | 70 | ||
69 | /* Location of the reserved area for the crash kernel */ | 71 | /* Location of the reserved area for the crash kernel */ |
70 | struct resource crashk_res = { | 72 | struct resource crashk_res = { |
@@ -341,6 +343,7 @@ out_free_image: | |||
341 | return ret; | 343 | return ret; |
342 | } | 344 | } |
343 | 345 | ||
346 | #ifdef CONFIG_KEXEC_FILE | ||
344 | static int copy_file_from_fd(int fd, void **buf, unsigned long *buf_len) | 347 | static int copy_file_from_fd(int fd, void **buf, unsigned long *buf_len) |
345 | { | 348 | { |
346 | struct fd f = fdget(fd); | 349 | struct fd f = fdget(fd); |
@@ -612,6 +615,9 @@ out_free_image: | |||
612 | kfree(image); | 615 | kfree(image); |
613 | return ret; | 616 | return ret; |
614 | } | 617 | } |
618 | #else /* CONFIG_KEXEC_FILE */ | ||
619 | static inline void kimage_file_post_load_cleanup(struct kimage *image) { } | ||
620 | #endif /* CONFIG_KEXEC_FILE */ | ||
615 | 621 | ||
616 | static int kimage_is_destination_range(struct kimage *image, | 622 | static int kimage_is_destination_range(struct kimage *image, |
617 | unsigned long start, | 623 | unsigned long start, |
@@ -1375,6 +1381,7 @@ COMPAT_SYSCALL_DEFINE4(kexec_load, compat_ulong_t, entry, | |||
1375 | } | 1381 | } |
1376 | #endif | 1382 | #endif |
1377 | 1383 | ||
1384 | #ifdef CONFIG_KEXEC_FILE | ||
1378 | SYSCALL_DEFINE5(kexec_file_load, int, kernel_fd, int, initrd_fd, | 1385 | SYSCALL_DEFINE5(kexec_file_load, int, kernel_fd, int, initrd_fd, |
1379 | unsigned long, cmdline_len, const char __user *, cmdline_ptr, | 1386 | unsigned long, cmdline_len, const char __user *, cmdline_ptr, |
1380 | unsigned long, flags) | 1387 | unsigned long, flags) |
@@ -1451,6 +1458,8 @@ out: | |||
1451 | return ret; | 1458 | return ret; |
1452 | } | 1459 | } |
1453 | 1460 | ||
1461 | #endif /* CONFIG_KEXEC_FILE */ | ||
1462 | |||
1454 | void crash_kexec(struct pt_regs *regs) | 1463 | void crash_kexec(struct pt_regs *regs) |
1455 | { | 1464 | { |
1456 | /* Take the kexec_mutex here to prevent sys_kexec_load | 1465 | /* Take the kexec_mutex here to prevent sys_kexec_load |
@@ -2006,6 +2015,7 @@ static int __init crash_save_vmcoreinfo_init(void) | |||
2006 | 2015 | ||
2007 | subsys_initcall(crash_save_vmcoreinfo_init); | 2016 | subsys_initcall(crash_save_vmcoreinfo_init); |
2008 | 2017 | ||
2018 | #ifdef CONFIG_KEXEC_FILE | ||
2009 | static int __kexec_add_segment(struct kimage *image, char *buf, | 2019 | static int __kexec_add_segment(struct kimage *image, char *buf, |
2010 | unsigned long bufsz, unsigned long mem, | 2020 | unsigned long bufsz, unsigned long mem, |
2011 | unsigned long memsz) | 2021 | unsigned long memsz) |
@@ -2682,6 +2692,7 @@ int kexec_purgatory_get_set_symbol(struct kimage *image, const char *name, | |||
2682 | 2692 | ||
2683 | return 0; | 2693 | return 0; |
2684 | } | 2694 | } |
2695 | #endif /* CONFIG_KEXEC_FILE */ | ||
2685 | 2696 | ||
2686 | /* | 2697 | /* |
2687 | * Move into place and start executing a preloaded standalone | 2698 | * Move into place and start executing a preloaded standalone |
diff --git a/kernel/power/power.h b/kernel/power/power.h index 5d49dcac2537..2df883a9d3cb 100644 --- a/kernel/power/power.h +++ b/kernel/power/power.h | |||
@@ -179,6 +179,7 @@ extern void swsusp_show_speed(struct timeval *, struct timeval *, | |||
179 | 179 | ||
180 | #ifdef CONFIG_SUSPEND | 180 | #ifdef CONFIG_SUSPEND |
181 | /* kernel/power/suspend.c */ | 181 | /* kernel/power/suspend.c */ |
182 | extern const char *pm_labels[]; | ||
182 | extern const char *pm_states[]; | 183 | extern const char *pm_states[]; |
183 | 184 | ||
184 | extern int suspend_devices_and_enter(suspend_state_t state); | 185 | extern int suspend_devices_and_enter(suspend_state_t state); |
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c index 6dadb25cb0d8..18c62195660f 100644 --- a/kernel/power/suspend.c +++ b/kernel/power/suspend.c | |||
@@ -31,7 +31,7 @@ | |||
31 | 31 | ||
32 | #include "power.h" | 32 | #include "power.h" |
33 | 33 | ||
34 | static const char *pm_labels[] = { "mem", "standby", "freeze", }; | 34 | const char *pm_labels[] = { "mem", "standby", "freeze", NULL }; |
35 | const char *pm_states[PM_SUSPEND_MAX]; | 35 | const char *pm_states[PM_SUSPEND_MAX]; |
36 | 36 | ||
37 | static const struct platform_suspend_ops *suspend_ops; | 37 | static const struct platform_suspend_ops *suspend_ops; |
diff --git a/kernel/power/suspend_test.c b/kernel/power/suspend_test.c index 2f524928b6aa..bd91bc177c93 100644 --- a/kernel/power/suspend_test.c +++ b/kernel/power/suspend_test.c | |||
@@ -129,20 +129,20 @@ static int __init has_wakealarm(struct device *dev, const void *data) | |||
129 | * at startup time. They're normally disabled, for faster boot and because | 129 | * at startup time. They're normally disabled, for faster boot and because |
130 | * we can't know which states really work on this particular system. | 130 | * we can't know which states really work on this particular system. |
131 | */ | 131 | */ |
132 | static suspend_state_t test_state __initdata = PM_SUSPEND_ON; | 132 | static const char *test_state_label __initdata; |
133 | 133 | ||
134 | static char warn_bad_state[] __initdata = | 134 | static char warn_bad_state[] __initdata = |
135 | KERN_WARNING "PM: can't test '%s' suspend state\n"; | 135 | KERN_WARNING "PM: can't test '%s' suspend state\n"; |
136 | 136 | ||
137 | static int __init setup_test_suspend(char *value) | 137 | static int __init setup_test_suspend(char *value) |
138 | { | 138 | { |
139 | suspend_state_t i; | 139 | int i; |
140 | 140 | ||
141 | /* "=mem" ==> "mem" */ | 141 | /* "=mem" ==> "mem" */ |
142 | value++; | 142 | value++; |
143 | for (i = PM_SUSPEND_MIN; i < PM_SUSPEND_MAX; i++) | 143 | for (i = 0; pm_labels[i]; i++) |
144 | if (!strcmp(pm_states[i], value)) { | 144 | if (!strcmp(pm_labels[i], value)) { |
145 | test_state = i; | 145 | test_state_label = pm_labels[i]; |
146 | return 0; | 146 | return 0; |
147 | } | 147 | } |
148 | 148 | ||
@@ -158,13 +158,21 @@ static int __init test_suspend(void) | |||
158 | 158 | ||
159 | struct rtc_device *rtc = NULL; | 159 | struct rtc_device *rtc = NULL; |
160 | struct device *dev; | 160 | struct device *dev; |
161 | suspend_state_t test_state; | ||
161 | 162 | ||
162 | /* PM is initialized by now; is that state testable? */ | 163 | /* PM is initialized by now; is that state testable? */ |
163 | if (test_state == PM_SUSPEND_ON) | 164 | if (!test_state_label) |
164 | goto done; | 165 | return 0; |
165 | if (!pm_states[test_state]) { | 166 | |
166 | printk(warn_bad_state, pm_states[test_state]); | 167 | for (test_state = PM_SUSPEND_MIN; test_state < PM_SUSPEND_MAX; test_state++) { |
167 | goto done; | 168 | const char *state_label = pm_states[test_state]; |
169 | |||
170 | if (state_label && !strcmp(test_state_label, state_label)) | ||
171 | break; | ||
172 | } | ||
173 | if (test_state == PM_SUSPEND_MAX) { | ||
174 | printk(warn_bad_state, test_state_label); | ||
175 | return 0; | ||
168 | } | 176 | } |
169 | 177 | ||
170 | /* RTCs have initialized by now too ... can we use one? */ | 178 | /* RTCs have initialized by now too ... can we use one? */ |
@@ -173,13 +181,12 @@ static int __init test_suspend(void) | |||
173 | rtc = rtc_class_open(dev_name(dev)); | 181 | rtc = rtc_class_open(dev_name(dev)); |
174 | if (!rtc) { | 182 | if (!rtc) { |
175 | printk(warn_no_rtc); | 183 | printk(warn_no_rtc); |
176 | goto done; | 184 | return 0; |
177 | } | 185 | } |
178 | 186 | ||
179 | /* go for it */ | 187 | /* go for it */ |
180 | test_wakealarm(rtc, test_state); | 188 | test_wakealarm(rtc, test_state); |
181 | rtc_class_close(rtc); | 189 | rtc_class_close(rtc); |
182 | done: | ||
183 | return 0; | 190 | return 0; |
184 | } | 191 | } |
185 | late_initcall(test_suspend); | 192 | late_initcall(test_suspend); |
diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h index 71e64c718f75..6a86eb7bac45 100644 --- a/kernel/rcu/tree.h +++ b/kernel/rcu/tree.h | |||
@@ -358,7 +358,7 @@ struct rcu_data { | |||
358 | struct rcu_head **nocb_gp_tail; | 358 | struct rcu_head **nocb_gp_tail; |
359 | long nocb_gp_count; | 359 | long nocb_gp_count; |
360 | long nocb_gp_count_lazy; | 360 | long nocb_gp_count_lazy; |
361 | bool nocb_leader_wake; /* Is the nocb leader thread awake? */ | 361 | bool nocb_leader_sleep; /* Is the nocb leader thread asleep? */ |
362 | struct rcu_data *nocb_next_follower; | 362 | struct rcu_data *nocb_next_follower; |
363 | /* Next follower in wakeup chain. */ | 363 | /* Next follower in wakeup chain. */ |
364 | 364 | ||
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index 00dc411e9676..a7997e272564 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h | |||
@@ -2074,9 +2074,9 @@ static void wake_nocb_leader(struct rcu_data *rdp, bool force) | |||
2074 | 2074 | ||
2075 | if (!ACCESS_ONCE(rdp_leader->nocb_kthread)) | 2075 | if (!ACCESS_ONCE(rdp_leader->nocb_kthread)) |
2076 | return; | 2076 | return; |
2077 | if (!ACCESS_ONCE(rdp_leader->nocb_leader_wake) || force) { | 2077 | if (ACCESS_ONCE(rdp_leader->nocb_leader_sleep) || force) { |
2078 | /* Prior xchg orders against prior callback enqueue. */ | 2078 | /* Prior xchg orders against prior callback enqueue. */ |
2079 | ACCESS_ONCE(rdp_leader->nocb_leader_wake) = true; | 2079 | ACCESS_ONCE(rdp_leader->nocb_leader_sleep) = false; |
2080 | wake_up(&rdp_leader->nocb_wq); | 2080 | wake_up(&rdp_leader->nocb_wq); |
2081 | } | 2081 | } |
2082 | } | 2082 | } |
@@ -2253,7 +2253,7 @@ wait_again: | |||
2253 | if (!rcu_nocb_poll) { | 2253 | if (!rcu_nocb_poll) { |
2254 | trace_rcu_nocb_wake(my_rdp->rsp->name, my_rdp->cpu, "Sleep"); | 2254 | trace_rcu_nocb_wake(my_rdp->rsp->name, my_rdp->cpu, "Sleep"); |
2255 | wait_event_interruptible(my_rdp->nocb_wq, | 2255 | wait_event_interruptible(my_rdp->nocb_wq, |
2256 | ACCESS_ONCE(my_rdp->nocb_leader_wake)); | 2256 | !ACCESS_ONCE(my_rdp->nocb_leader_sleep)); |
2257 | /* Memory barrier handled by smp_mb() calls below and repoll. */ | 2257 | /* Memory barrier handled by smp_mb() calls below and repoll. */ |
2258 | } else if (firsttime) { | 2258 | } else if (firsttime) { |
2259 | firsttime = false; /* Don't drown trace log with "Poll"! */ | 2259 | firsttime = false; /* Don't drown trace log with "Poll"! */ |
@@ -2292,12 +2292,12 @@ wait_again: | |||
2292 | schedule_timeout_interruptible(1); | 2292 | schedule_timeout_interruptible(1); |
2293 | 2293 | ||
2294 | /* Rescan in case we were a victim of memory ordering. */ | 2294 | /* Rescan in case we were a victim of memory ordering. */ |
2295 | my_rdp->nocb_leader_wake = false; | 2295 | my_rdp->nocb_leader_sleep = true; |
2296 | smp_mb(); /* Ensure _wake false before scan. */ | 2296 | smp_mb(); /* Ensure _sleep true before scan. */ |
2297 | for (rdp = my_rdp; rdp; rdp = rdp->nocb_next_follower) | 2297 | for (rdp = my_rdp; rdp; rdp = rdp->nocb_next_follower) |
2298 | if (ACCESS_ONCE(rdp->nocb_head)) { | 2298 | if (ACCESS_ONCE(rdp->nocb_head)) { |
2299 | /* Found CB, so short-circuit next wait. */ | 2299 | /* Found CB, so short-circuit next wait. */ |
2300 | my_rdp->nocb_leader_wake = true; | 2300 | my_rdp->nocb_leader_sleep = false; |
2301 | break; | 2301 | break; |
2302 | } | 2302 | } |
2303 | goto wait_again; | 2303 | goto wait_again; |
@@ -2307,17 +2307,17 @@ wait_again: | |||
2307 | rcu_nocb_wait_gp(my_rdp); | 2307 | rcu_nocb_wait_gp(my_rdp); |
2308 | 2308 | ||
2309 | /* | 2309 | /* |
2310 | * We left ->nocb_leader_wake set to reduce cache thrashing. | 2310 | * We left ->nocb_leader_sleep unset to reduce cache thrashing. |
2311 | * We clear it now, but recheck for new callbacks while | 2311 | * We set it now, but recheck for new callbacks while |
2312 | * traversing our follower list. | 2312 | * traversing our follower list. |
2313 | */ | 2313 | */ |
2314 | my_rdp->nocb_leader_wake = false; | 2314 | my_rdp->nocb_leader_sleep = true; |
2315 | smp_mb(); /* Ensure _wake false before scan of ->nocb_head. */ | 2315 | smp_mb(); /* Ensure _sleep true before scan of ->nocb_head. */ |
2316 | 2316 | ||
2317 | /* Each pass through the following loop wakes a follower, if needed. */ | 2317 | /* Each pass through the following loop wakes a follower, if needed. */ |
2318 | for (rdp = my_rdp; rdp; rdp = rdp->nocb_next_follower) { | 2318 | for (rdp = my_rdp; rdp; rdp = rdp->nocb_next_follower) { |
2319 | if (ACCESS_ONCE(rdp->nocb_head)) | 2319 | if (ACCESS_ONCE(rdp->nocb_head)) |
2320 | my_rdp->nocb_leader_wake = true; /* No need to wait. */ | 2320 | my_rdp->nocb_leader_sleep = false;/* No need to sleep.*/ |
2321 | if (!rdp->nocb_gp_head) | 2321 | if (!rdp->nocb_gp_head) |
2322 | continue; /* No CBs, so no need to wake follower. */ | 2322 | continue; /* No CBs, so no need to wake follower. */ |
2323 | 2323 | ||
diff --git a/kernel/resource.c b/kernel/resource.c index da14b8d09296..60c5a3856ab7 100644 --- a/kernel/resource.c +++ b/kernel/resource.c | |||
@@ -351,15 +351,12 @@ static int find_next_iomem_res(struct resource *res, char *name, | |||
351 | end = res->end; | 351 | end = res->end; |
352 | BUG_ON(start >= end); | 352 | BUG_ON(start >= end); |
353 | 353 | ||
354 | read_lock(&resource_lock); | 354 | if (first_level_children_only) |
355 | |||
356 | if (first_level_children_only) { | ||
357 | p = iomem_resource.child; | ||
358 | sibling_only = true; | 355 | sibling_only = true; |
359 | } else | ||
360 | p = &iomem_resource; | ||
361 | 356 | ||
362 | while ((p = next_resource(p, sibling_only))) { | 357 | read_lock(&resource_lock); |
358 | |||
359 | for (p = iomem_resource.child; p; p = next_resource(p, sibling_only)) { | ||
363 | if (p->flags != res->flags) | 360 | if (p->flags != res->flags) |
364 | continue; | 361 | continue; |
365 | if (name && strcmp(p->name, name)) | 362 | if (name && strcmp(p->name, name)) |
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index 99aa6ee3908f..f654a8a298fa 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c | |||
@@ -225,6 +225,20 @@ static DEFINE_PER_CPU(struct irq_work, nohz_full_kick_work) = { | |||
225 | }; | 225 | }; |
226 | 226 | ||
227 | /* | 227 | /* |
228 | * Kick this CPU if it's full dynticks in order to force it to | ||
229 | * re-evaluate its dependency on the tick and restart it if necessary. | ||
230 | * This kick, unlike tick_nohz_full_kick_cpu() and tick_nohz_full_kick_all(), | ||
231 | * is NMI safe. | ||
232 | */ | ||
233 | void tick_nohz_full_kick(void) | ||
234 | { | ||
235 | if (!tick_nohz_full_cpu(smp_processor_id())) | ||
236 | return; | ||
237 | |||
238 | irq_work_queue(&__get_cpu_var(nohz_full_kick_work)); | ||
239 | } | ||
240 | |||
241 | /* | ||
228 | * Kick the CPU if it's full dynticks in order to force it to | 242 | * Kick the CPU if it's full dynticks in order to force it to |
229 | * re-evaluate its dependency on the tick and restart it if necessary. | 243 | * re-evaluate its dependency on the tick and restart it if necessary. |
230 | */ | 244 | */ |
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index fb4a9c2cf8d9..ec1791fae965 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c | |||
@@ -442,11 +442,12 @@ static void timekeeping_update(struct timekeeper *tk, unsigned int action) | |||
442 | tk->ntp_error = 0; | 442 | tk->ntp_error = 0; |
443 | ntp_clear(); | 443 | ntp_clear(); |
444 | } | 444 | } |
445 | update_vsyscall(tk); | ||
446 | update_pvclock_gtod(tk, action & TK_CLOCK_WAS_SET); | ||
447 | 445 | ||
448 | tk_update_ktime_data(tk); | 446 | tk_update_ktime_data(tk); |
449 | 447 | ||
448 | update_vsyscall(tk); | ||
449 | update_pvclock_gtod(tk, action & TK_CLOCK_WAS_SET); | ||
450 | |||
450 | if (action & TK_MIRROR) | 451 | if (action & TK_MIRROR) |
451 | memcpy(&shadow_timekeeper, &tk_core.timekeeper, | 452 | memcpy(&shadow_timekeeper, &tk_core.timekeeper, |
452 | sizeof(tk_core.timekeeper)); | 453 | sizeof(tk_core.timekeeper)); |
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 1654b12c891a..5916a8e59e87 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
@@ -65,15 +65,21 @@ | |||
65 | #define FL_GLOBAL_CONTROL_MASK (FTRACE_OPS_FL_CONTROL) | 65 | #define FL_GLOBAL_CONTROL_MASK (FTRACE_OPS_FL_CONTROL) |
66 | 66 | ||
67 | #ifdef CONFIG_DYNAMIC_FTRACE | 67 | #ifdef CONFIG_DYNAMIC_FTRACE |
68 | #define INIT_REGEX_LOCK(opsname) \ | 68 | #define INIT_OPS_HASH(opsname) \ |
69 | .regex_lock = __MUTEX_INITIALIZER(opsname.regex_lock), | 69 | .func_hash = &opsname.local_hash, \ |
70 | .local_hash.regex_lock = __MUTEX_INITIALIZER(opsname.local_hash.regex_lock), | ||
71 | #define ASSIGN_OPS_HASH(opsname, val) \ | ||
72 | .func_hash = val, \ | ||
73 | .local_hash.regex_lock = __MUTEX_INITIALIZER(opsname.local_hash.regex_lock), | ||
70 | #else | 74 | #else |
71 | #define INIT_REGEX_LOCK(opsname) | 75 | #define INIT_OPS_HASH(opsname) |
76 | #define ASSIGN_OPS_HASH(opsname, val) | ||
72 | #endif | 77 | #endif |
73 | 78 | ||
74 | static struct ftrace_ops ftrace_list_end __read_mostly = { | 79 | static struct ftrace_ops ftrace_list_end __read_mostly = { |
75 | .func = ftrace_stub, | 80 | .func = ftrace_stub, |
76 | .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_STUB, | 81 | .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_STUB, |
82 | INIT_OPS_HASH(ftrace_list_end) | ||
77 | }; | 83 | }; |
78 | 84 | ||
79 | /* ftrace_enabled is a method to turn ftrace on or off */ | 85 | /* ftrace_enabled is a method to turn ftrace on or off */ |
@@ -140,7 +146,8 @@ static inline void ftrace_ops_init(struct ftrace_ops *ops) | |||
140 | { | 146 | { |
141 | #ifdef CONFIG_DYNAMIC_FTRACE | 147 | #ifdef CONFIG_DYNAMIC_FTRACE |
142 | if (!(ops->flags & FTRACE_OPS_FL_INITIALIZED)) { | 148 | if (!(ops->flags & FTRACE_OPS_FL_INITIALIZED)) { |
143 | mutex_init(&ops->regex_lock); | 149 | mutex_init(&ops->local_hash.regex_lock); |
150 | ops->func_hash = &ops->local_hash; | ||
144 | ops->flags |= FTRACE_OPS_FL_INITIALIZED; | 151 | ops->flags |= FTRACE_OPS_FL_INITIALIZED; |
145 | } | 152 | } |
146 | #endif | 153 | #endif |
@@ -899,7 +906,7 @@ static void unregister_ftrace_profiler(void) | |||
899 | static struct ftrace_ops ftrace_profile_ops __read_mostly = { | 906 | static struct ftrace_ops ftrace_profile_ops __read_mostly = { |
900 | .func = function_profile_call, | 907 | .func = function_profile_call, |
901 | .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED, | 908 | .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED, |
902 | INIT_REGEX_LOCK(ftrace_profile_ops) | 909 | INIT_OPS_HASH(ftrace_profile_ops) |
903 | }; | 910 | }; |
904 | 911 | ||
905 | static int register_ftrace_profiler(void) | 912 | static int register_ftrace_profiler(void) |
@@ -1081,11 +1088,12 @@ static const struct ftrace_hash empty_hash = { | |||
1081 | #define EMPTY_HASH ((struct ftrace_hash *)&empty_hash) | 1088 | #define EMPTY_HASH ((struct ftrace_hash *)&empty_hash) |
1082 | 1089 | ||
1083 | static struct ftrace_ops global_ops = { | 1090 | static struct ftrace_ops global_ops = { |
1084 | .func = ftrace_stub, | 1091 | .func = ftrace_stub, |
1085 | .notrace_hash = EMPTY_HASH, | 1092 | .local_hash.notrace_hash = EMPTY_HASH, |
1086 | .filter_hash = EMPTY_HASH, | 1093 | .local_hash.filter_hash = EMPTY_HASH, |
1087 | .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED, | 1094 | INIT_OPS_HASH(global_ops) |
1088 | INIT_REGEX_LOCK(global_ops) | 1095 | .flags = FTRACE_OPS_FL_RECURSION_SAFE | |
1096 | FTRACE_OPS_FL_INITIALIZED, | ||
1089 | }; | 1097 | }; |
1090 | 1098 | ||
1091 | struct ftrace_page { | 1099 | struct ftrace_page { |
@@ -1226,8 +1234,8 @@ static void free_ftrace_hash_rcu(struct ftrace_hash *hash) | |||
1226 | void ftrace_free_filter(struct ftrace_ops *ops) | 1234 | void ftrace_free_filter(struct ftrace_ops *ops) |
1227 | { | 1235 | { |
1228 | ftrace_ops_init(ops); | 1236 | ftrace_ops_init(ops); |
1229 | free_ftrace_hash(ops->filter_hash); | 1237 | free_ftrace_hash(ops->func_hash->filter_hash); |
1230 | free_ftrace_hash(ops->notrace_hash); | 1238 | free_ftrace_hash(ops->func_hash->notrace_hash); |
1231 | } | 1239 | } |
1232 | 1240 | ||
1233 | static struct ftrace_hash *alloc_ftrace_hash(int size_bits) | 1241 | static struct ftrace_hash *alloc_ftrace_hash(int size_bits) |
@@ -1288,9 +1296,9 @@ alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash) | |||
1288 | } | 1296 | } |
1289 | 1297 | ||
1290 | static void | 1298 | static void |
1291 | ftrace_hash_rec_disable(struct ftrace_ops *ops, int filter_hash); | 1299 | ftrace_hash_rec_disable_modify(struct ftrace_ops *ops, int filter_hash); |
1292 | static void | 1300 | static void |
1293 | ftrace_hash_rec_enable(struct ftrace_ops *ops, int filter_hash); | 1301 | ftrace_hash_rec_enable_modify(struct ftrace_ops *ops, int filter_hash); |
1294 | 1302 | ||
1295 | static int | 1303 | static int |
1296 | ftrace_hash_move(struct ftrace_ops *ops, int enable, | 1304 | ftrace_hash_move(struct ftrace_ops *ops, int enable, |
@@ -1342,13 +1350,13 @@ update: | |||
1342 | * Remove the current set, update the hash and add | 1350 | * Remove the current set, update the hash and add |
1343 | * them back. | 1351 | * them back. |
1344 | */ | 1352 | */ |
1345 | ftrace_hash_rec_disable(ops, enable); | 1353 | ftrace_hash_rec_disable_modify(ops, enable); |
1346 | 1354 | ||
1347 | old_hash = *dst; | 1355 | old_hash = *dst; |
1348 | rcu_assign_pointer(*dst, new_hash); | 1356 | rcu_assign_pointer(*dst, new_hash); |
1349 | free_ftrace_hash_rcu(old_hash); | 1357 | free_ftrace_hash_rcu(old_hash); |
1350 | 1358 | ||
1351 | ftrace_hash_rec_enable(ops, enable); | 1359 | ftrace_hash_rec_enable_modify(ops, enable); |
1352 | 1360 | ||
1353 | return 0; | 1361 | return 0; |
1354 | } | 1362 | } |
@@ -1382,8 +1390,8 @@ ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs) | |||
1382 | return 0; | 1390 | return 0; |
1383 | #endif | 1391 | #endif |
1384 | 1392 | ||
1385 | filter_hash = rcu_dereference_raw_notrace(ops->filter_hash); | 1393 | filter_hash = rcu_dereference_raw_notrace(ops->func_hash->filter_hash); |
1386 | notrace_hash = rcu_dereference_raw_notrace(ops->notrace_hash); | 1394 | notrace_hash = rcu_dereference_raw_notrace(ops->func_hash->notrace_hash); |
1387 | 1395 | ||
1388 | if ((ftrace_hash_empty(filter_hash) || | 1396 | if ((ftrace_hash_empty(filter_hash) || |
1389 | ftrace_lookup_ip(filter_hash, ip)) && | 1397 | ftrace_lookup_ip(filter_hash, ip)) && |
@@ -1503,25 +1511,38 @@ static bool test_rec_ops_needs_regs(struct dyn_ftrace *rec) | |||
1503 | static void ftrace_remove_tramp(struct ftrace_ops *ops, | 1511 | static void ftrace_remove_tramp(struct ftrace_ops *ops, |
1504 | struct dyn_ftrace *rec) | 1512 | struct dyn_ftrace *rec) |
1505 | { | 1513 | { |
1506 | struct ftrace_func_entry *entry; | 1514 | /* If TRAMP is not set, no ops should have a trampoline for this */ |
1507 | 1515 | if (!(rec->flags & FTRACE_FL_TRAMP)) | |
1508 | entry = ftrace_lookup_ip(ops->tramp_hash, rec->ip); | ||
1509 | if (!entry) | ||
1510 | return; | 1516 | return; |
1511 | 1517 | ||
1518 | rec->flags &= ~FTRACE_FL_TRAMP; | ||
1519 | |||
1520 | if ((!ftrace_hash_empty(ops->func_hash->filter_hash) && | ||
1521 | !ftrace_lookup_ip(ops->func_hash->filter_hash, rec->ip)) || | ||
1522 | ftrace_lookup_ip(ops->func_hash->notrace_hash, rec->ip)) | ||
1523 | return; | ||
1512 | /* | 1524 | /* |
1513 | * The tramp_hash entry will be removed at time | 1525 | * The tramp_hash entry will be removed at time |
1514 | * of update. | 1526 | * of update. |
1515 | */ | 1527 | */ |
1516 | ops->nr_trampolines--; | 1528 | ops->nr_trampolines--; |
1517 | rec->flags &= ~FTRACE_FL_TRAMP; | ||
1518 | } | 1529 | } |
1519 | 1530 | ||
1520 | static void ftrace_clear_tramps(struct dyn_ftrace *rec) | 1531 | static void ftrace_clear_tramps(struct dyn_ftrace *rec, struct ftrace_ops *ops) |
1521 | { | 1532 | { |
1522 | struct ftrace_ops *op; | 1533 | struct ftrace_ops *op; |
1523 | 1534 | ||
1535 | /* If TRAMP is not set, no ops should have a trampoline for this */ | ||
1536 | if (!(rec->flags & FTRACE_FL_TRAMP)) | ||
1537 | return; | ||
1538 | |||
1524 | do_for_each_ftrace_op(op, ftrace_ops_list) { | 1539 | do_for_each_ftrace_op(op, ftrace_ops_list) { |
1540 | /* | ||
1541 | * This function is called to clear other tramps | ||
1542 | * not the one that is being updated. | ||
1543 | */ | ||
1544 | if (op == ops) | ||
1545 | continue; | ||
1525 | if (op->nr_trampolines) | 1546 | if (op->nr_trampolines) |
1526 | ftrace_remove_tramp(op, rec); | 1547 | ftrace_remove_tramp(op, rec); |
1527 | } while_for_each_ftrace_op(op); | 1548 | } while_for_each_ftrace_op(op); |
@@ -1554,14 +1575,14 @@ static void __ftrace_hash_rec_update(struct ftrace_ops *ops, | |||
1554 | * gets inversed. | 1575 | * gets inversed. |
1555 | */ | 1576 | */ |
1556 | if (filter_hash) { | 1577 | if (filter_hash) { |
1557 | hash = ops->filter_hash; | 1578 | hash = ops->func_hash->filter_hash; |
1558 | other_hash = ops->notrace_hash; | 1579 | other_hash = ops->func_hash->notrace_hash; |
1559 | if (ftrace_hash_empty(hash)) | 1580 | if (ftrace_hash_empty(hash)) |
1560 | all = 1; | 1581 | all = 1; |
1561 | } else { | 1582 | } else { |
1562 | inc = !inc; | 1583 | inc = !inc; |
1563 | hash = ops->notrace_hash; | 1584 | hash = ops->func_hash->notrace_hash; |
1564 | other_hash = ops->filter_hash; | 1585 | other_hash = ops->func_hash->filter_hash; |
1565 | /* | 1586 | /* |
1566 | * If the notrace hash has no items, | 1587 | * If the notrace hash has no items, |
1567 | * then there's nothing to do. | 1588 | * then there's nothing to do. |
@@ -1622,13 +1643,10 @@ static void __ftrace_hash_rec_update(struct ftrace_ops *ops, | |||
1622 | /* | 1643 | /* |
1623 | * If we are adding another function callback | 1644 | * If we are adding another function callback |
1624 | * to this function, and the previous had a | 1645 | * to this function, and the previous had a |
1625 | * trampoline used, then we need to go back to | 1646 | * custom trampoline in use, then we need to go |
1626 | * the default trampoline. | 1647 | * back to the default trampoline. |
1627 | */ | 1648 | */ |
1628 | rec->flags &= ~FTRACE_FL_TRAMP; | 1649 | ftrace_clear_tramps(rec, ops); |
1629 | |||
1630 | /* remove trampolines from any ops for this rec */ | ||
1631 | ftrace_clear_tramps(rec); | ||
1632 | } | 1650 | } |
1633 | 1651 | ||
1634 | /* | 1652 | /* |
@@ -1682,6 +1700,41 @@ static void ftrace_hash_rec_enable(struct ftrace_ops *ops, | |||
1682 | __ftrace_hash_rec_update(ops, filter_hash, 1); | 1700 | __ftrace_hash_rec_update(ops, filter_hash, 1); |
1683 | } | 1701 | } |
1684 | 1702 | ||
1703 | static void ftrace_hash_rec_update_modify(struct ftrace_ops *ops, | ||
1704 | int filter_hash, int inc) | ||
1705 | { | ||
1706 | struct ftrace_ops *op; | ||
1707 | |||
1708 | __ftrace_hash_rec_update(ops, filter_hash, inc); | ||
1709 | |||
1710 | if (ops->func_hash != &global_ops.local_hash) | ||
1711 | return; | ||
1712 | |||
1713 | /* | ||
1714 | * If the ops shares the global_ops hash, then we need to update | ||
1715 | * all ops that are enabled and use this hash. | ||
1716 | */ | ||
1717 | do_for_each_ftrace_op(op, ftrace_ops_list) { | ||
1718 | /* Already done */ | ||
1719 | if (op == ops) | ||
1720 | continue; | ||
1721 | if (op->func_hash == &global_ops.local_hash) | ||
1722 | __ftrace_hash_rec_update(op, filter_hash, inc); | ||
1723 | } while_for_each_ftrace_op(op); | ||
1724 | } | ||
1725 | |||
1726 | static void ftrace_hash_rec_disable_modify(struct ftrace_ops *ops, | ||
1727 | int filter_hash) | ||
1728 | { | ||
1729 | ftrace_hash_rec_update_modify(ops, filter_hash, 0); | ||
1730 | } | ||
1731 | |||
1732 | static void ftrace_hash_rec_enable_modify(struct ftrace_ops *ops, | ||
1733 | int filter_hash) | ||
1734 | { | ||
1735 | ftrace_hash_rec_update_modify(ops, filter_hash, 1); | ||
1736 | } | ||
1737 | |||
1685 | static void print_ip_ins(const char *fmt, unsigned char *p) | 1738 | static void print_ip_ins(const char *fmt, unsigned char *p) |
1686 | { | 1739 | { |
1687 | int i; | 1740 | int i; |
@@ -1896,8 +1949,8 @@ unsigned long ftrace_get_addr_new(struct dyn_ftrace *rec) | |||
1896 | if (rec->flags & FTRACE_FL_TRAMP) { | 1949 | if (rec->flags & FTRACE_FL_TRAMP) { |
1897 | ops = ftrace_find_tramp_ops_new(rec); | 1950 | ops = ftrace_find_tramp_ops_new(rec); |
1898 | if (FTRACE_WARN_ON(!ops || !ops->trampoline)) { | 1951 | if (FTRACE_WARN_ON(!ops || !ops->trampoline)) { |
1899 | pr_warning("Bad trampoline accounting at: %p (%pS)\n", | 1952 | pr_warn("Bad trampoline accounting at: %p (%pS) (%lx)\n", |
1900 | (void *)rec->ip, (void *)rec->ip); | 1953 | (void *)rec->ip, (void *)rec->ip, rec->flags); |
1901 | /* Ftrace is shutting down, return anything */ | 1954 | /* Ftrace is shutting down, return anything */ |
1902 | return (unsigned long)FTRACE_ADDR; | 1955 | return (unsigned long)FTRACE_ADDR; |
1903 | } | 1956 | } |
@@ -1964,7 +2017,7 @@ __ftrace_replace_code(struct dyn_ftrace *rec, int enable) | |||
1964 | return ftrace_make_call(rec, ftrace_addr); | 2017 | return ftrace_make_call(rec, ftrace_addr); |
1965 | 2018 | ||
1966 | case FTRACE_UPDATE_MAKE_NOP: | 2019 | case FTRACE_UPDATE_MAKE_NOP: |
1967 | return ftrace_make_nop(NULL, rec, ftrace_addr); | 2020 | return ftrace_make_nop(NULL, rec, ftrace_old_addr); |
1968 | 2021 | ||
1969 | case FTRACE_UPDATE_MODIFY_CALL: | 2022 | case FTRACE_UPDATE_MODIFY_CALL: |
1970 | return ftrace_modify_call(rec, ftrace_old_addr, ftrace_addr); | 2023 | return ftrace_modify_call(rec, ftrace_old_addr, ftrace_addr); |
@@ -2227,7 +2280,10 @@ static int ftrace_save_ops_tramp_hash(struct ftrace_ops *ops) | |||
2227 | } while_for_each_ftrace_rec(); | 2280 | } while_for_each_ftrace_rec(); |
2228 | 2281 | ||
2229 | /* The number of recs in the hash must match nr_trampolines */ | 2282 | /* The number of recs in the hash must match nr_trampolines */ |
2230 | FTRACE_WARN_ON(ops->tramp_hash->count != ops->nr_trampolines); | 2283 | if (FTRACE_WARN_ON(ops->tramp_hash->count != ops->nr_trampolines)) |
2284 | pr_warn("count=%ld trampolines=%d\n", | ||
2285 | ops->tramp_hash->count, | ||
2286 | ops->nr_trampolines); | ||
2231 | 2287 | ||
2232 | return 0; | 2288 | return 0; |
2233 | } | 2289 | } |
@@ -2436,8 +2492,8 @@ static inline int ops_traces_mod(struct ftrace_ops *ops) | |||
2436 | * Filter_hash being empty will default to trace module. | 2492 | * Filter_hash being empty will default to trace module. |
2437 | * But notrace hash requires a test of individual module functions. | 2493 | * But notrace hash requires a test of individual module functions. |
2438 | */ | 2494 | */ |
2439 | return ftrace_hash_empty(ops->filter_hash) && | 2495 | return ftrace_hash_empty(ops->func_hash->filter_hash) && |
2440 | ftrace_hash_empty(ops->notrace_hash); | 2496 | ftrace_hash_empty(ops->func_hash->notrace_hash); |
2441 | } | 2497 | } |
2442 | 2498 | ||
2443 | /* | 2499 | /* |
@@ -2459,12 +2515,12 @@ ops_references_rec(struct ftrace_ops *ops, struct dyn_ftrace *rec) | |||
2459 | return 0; | 2515 | return 0; |
2460 | 2516 | ||
2461 | /* The function must be in the filter */ | 2517 | /* The function must be in the filter */ |
2462 | if (!ftrace_hash_empty(ops->filter_hash) && | 2518 | if (!ftrace_hash_empty(ops->func_hash->filter_hash) && |
2463 | !ftrace_lookup_ip(ops->filter_hash, rec->ip)) | 2519 | !ftrace_lookup_ip(ops->func_hash->filter_hash, rec->ip)) |
2464 | return 0; | 2520 | return 0; |
2465 | 2521 | ||
2466 | /* If in notrace hash, we ignore it too */ | 2522 | /* If in notrace hash, we ignore it too */ |
2467 | if (ftrace_lookup_ip(ops->notrace_hash, rec->ip)) | 2523 | if (ftrace_lookup_ip(ops->func_hash->notrace_hash, rec->ip)) |
2468 | return 0; | 2524 | return 0; |
2469 | 2525 | ||
2470 | return 1; | 2526 | return 1; |
@@ -2785,10 +2841,10 @@ t_next(struct seq_file *m, void *v, loff_t *pos) | |||
2785 | } else { | 2841 | } else { |
2786 | rec = &iter->pg->records[iter->idx++]; | 2842 | rec = &iter->pg->records[iter->idx++]; |
2787 | if (((iter->flags & FTRACE_ITER_FILTER) && | 2843 | if (((iter->flags & FTRACE_ITER_FILTER) && |
2788 | !(ftrace_lookup_ip(ops->filter_hash, rec->ip))) || | 2844 | !(ftrace_lookup_ip(ops->func_hash->filter_hash, rec->ip))) || |
2789 | 2845 | ||
2790 | ((iter->flags & FTRACE_ITER_NOTRACE) && | 2846 | ((iter->flags & FTRACE_ITER_NOTRACE) && |
2791 | !ftrace_lookup_ip(ops->notrace_hash, rec->ip)) || | 2847 | !ftrace_lookup_ip(ops->func_hash->notrace_hash, rec->ip)) || |
2792 | 2848 | ||
2793 | ((iter->flags & FTRACE_ITER_ENABLED) && | 2849 | ((iter->flags & FTRACE_ITER_ENABLED) && |
2794 | !(rec->flags & FTRACE_FL_ENABLED))) { | 2850 | !(rec->flags & FTRACE_FL_ENABLED))) { |
@@ -2837,9 +2893,9 @@ static void *t_start(struct seq_file *m, loff_t *pos) | |||
2837 | * functions are enabled. | 2893 | * functions are enabled. |
2838 | */ | 2894 | */ |
2839 | if ((iter->flags & FTRACE_ITER_FILTER && | 2895 | if ((iter->flags & FTRACE_ITER_FILTER && |
2840 | ftrace_hash_empty(ops->filter_hash)) || | 2896 | ftrace_hash_empty(ops->func_hash->filter_hash)) || |
2841 | (iter->flags & FTRACE_ITER_NOTRACE && | 2897 | (iter->flags & FTRACE_ITER_NOTRACE && |
2842 | ftrace_hash_empty(ops->notrace_hash))) { | 2898 | ftrace_hash_empty(ops->func_hash->notrace_hash))) { |
2843 | if (*pos > 0) | 2899 | if (*pos > 0) |
2844 | return t_hash_start(m, pos); | 2900 | return t_hash_start(m, pos); |
2845 | iter->flags |= FTRACE_ITER_PRINTALL; | 2901 | iter->flags |= FTRACE_ITER_PRINTALL; |
@@ -3001,12 +3057,12 @@ ftrace_regex_open(struct ftrace_ops *ops, int flag, | |||
3001 | iter->ops = ops; | 3057 | iter->ops = ops; |
3002 | iter->flags = flag; | 3058 | iter->flags = flag; |
3003 | 3059 | ||
3004 | mutex_lock(&ops->regex_lock); | 3060 | mutex_lock(&ops->func_hash->regex_lock); |
3005 | 3061 | ||
3006 | if (flag & FTRACE_ITER_NOTRACE) | 3062 | if (flag & FTRACE_ITER_NOTRACE) |
3007 | hash = ops->notrace_hash; | 3063 | hash = ops->func_hash->notrace_hash; |
3008 | else | 3064 | else |
3009 | hash = ops->filter_hash; | 3065 | hash = ops->func_hash->filter_hash; |
3010 | 3066 | ||
3011 | if (file->f_mode & FMODE_WRITE) { | 3067 | if (file->f_mode & FMODE_WRITE) { |
3012 | const int size_bits = FTRACE_HASH_DEFAULT_BITS; | 3068 | const int size_bits = FTRACE_HASH_DEFAULT_BITS; |
@@ -3041,7 +3097,7 @@ ftrace_regex_open(struct ftrace_ops *ops, int flag, | |||
3041 | file->private_data = iter; | 3097 | file->private_data = iter; |
3042 | 3098 | ||
3043 | out_unlock: | 3099 | out_unlock: |
3044 | mutex_unlock(&ops->regex_lock); | 3100 | mutex_unlock(&ops->func_hash->regex_lock); |
3045 | 3101 | ||
3046 | return ret; | 3102 | return ret; |
3047 | } | 3103 | } |
@@ -3279,7 +3335,7 @@ static struct ftrace_ops trace_probe_ops __read_mostly = | |||
3279 | { | 3335 | { |
3280 | .func = function_trace_probe_call, | 3336 | .func = function_trace_probe_call, |
3281 | .flags = FTRACE_OPS_FL_INITIALIZED, | 3337 | .flags = FTRACE_OPS_FL_INITIALIZED, |
3282 | INIT_REGEX_LOCK(trace_probe_ops) | 3338 | INIT_OPS_HASH(trace_probe_ops) |
3283 | }; | 3339 | }; |
3284 | 3340 | ||
3285 | static int ftrace_probe_registered; | 3341 | static int ftrace_probe_registered; |
@@ -3342,7 +3398,7 @@ register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, | |||
3342 | void *data) | 3398 | void *data) |
3343 | { | 3399 | { |
3344 | struct ftrace_func_probe *entry; | 3400 | struct ftrace_func_probe *entry; |
3345 | struct ftrace_hash **orig_hash = &trace_probe_ops.filter_hash; | 3401 | struct ftrace_hash **orig_hash = &trace_probe_ops.func_hash->filter_hash; |
3346 | struct ftrace_hash *hash; | 3402 | struct ftrace_hash *hash; |
3347 | struct ftrace_page *pg; | 3403 | struct ftrace_page *pg; |
3348 | struct dyn_ftrace *rec; | 3404 | struct dyn_ftrace *rec; |
@@ -3359,7 +3415,7 @@ register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, | |||
3359 | if (WARN_ON(not)) | 3415 | if (WARN_ON(not)) |
3360 | return -EINVAL; | 3416 | return -EINVAL; |
3361 | 3417 | ||
3362 | mutex_lock(&trace_probe_ops.regex_lock); | 3418 | mutex_lock(&trace_probe_ops.func_hash->regex_lock); |
3363 | 3419 | ||
3364 | hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash); | 3420 | hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash); |
3365 | if (!hash) { | 3421 | if (!hash) { |
@@ -3428,7 +3484,7 @@ register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, | |||
3428 | out_unlock: | 3484 | out_unlock: |
3429 | mutex_unlock(&ftrace_lock); | 3485 | mutex_unlock(&ftrace_lock); |
3430 | out: | 3486 | out: |
3431 | mutex_unlock(&trace_probe_ops.regex_lock); | 3487 | mutex_unlock(&trace_probe_ops.func_hash->regex_lock); |
3432 | free_ftrace_hash(hash); | 3488 | free_ftrace_hash(hash); |
3433 | 3489 | ||
3434 | return count; | 3490 | return count; |
@@ -3446,7 +3502,7 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, | |||
3446 | struct ftrace_func_entry *rec_entry; | 3502 | struct ftrace_func_entry *rec_entry; |
3447 | struct ftrace_func_probe *entry; | 3503 | struct ftrace_func_probe *entry; |
3448 | struct ftrace_func_probe *p; | 3504 | struct ftrace_func_probe *p; |
3449 | struct ftrace_hash **orig_hash = &trace_probe_ops.filter_hash; | 3505 | struct ftrace_hash **orig_hash = &trace_probe_ops.func_hash->filter_hash; |
3450 | struct list_head free_list; | 3506 | struct list_head free_list; |
3451 | struct ftrace_hash *hash; | 3507 | struct ftrace_hash *hash; |
3452 | struct hlist_node *tmp; | 3508 | struct hlist_node *tmp; |
@@ -3468,7 +3524,7 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, | |||
3468 | return; | 3524 | return; |
3469 | } | 3525 | } |
3470 | 3526 | ||
3471 | mutex_lock(&trace_probe_ops.regex_lock); | 3527 | mutex_lock(&trace_probe_ops.func_hash->regex_lock); |
3472 | 3528 | ||
3473 | hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash); | 3529 | hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash); |
3474 | if (!hash) | 3530 | if (!hash) |
@@ -3521,7 +3577,7 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, | |||
3521 | mutex_unlock(&ftrace_lock); | 3577 | mutex_unlock(&ftrace_lock); |
3522 | 3578 | ||
3523 | out_unlock: | 3579 | out_unlock: |
3524 | mutex_unlock(&trace_probe_ops.regex_lock); | 3580 | mutex_unlock(&trace_probe_ops.func_hash->regex_lock); |
3525 | free_ftrace_hash(hash); | 3581 | free_ftrace_hash(hash); |
3526 | } | 3582 | } |
3527 | 3583 | ||
@@ -3717,12 +3773,12 @@ ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len, | |||
3717 | if (unlikely(ftrace_disabled)) | 3773 | if (unlikely(ftrace_disabled)) |
3718 | return -ENODEV; | 3774 | return -ENODEV; |
3719 | 3775 | ||
3720 | mutex_lock(&ops->regex_lock); | 3776 | mutex_lock(&ops->func_hash->regex_lock); |
3721 | 3777 | ||
3722 | if (enable) | 3778 | if (enable) |
3723 | orig_hash = &ops->filter_hash; | 3779 | orig_hash = &ops->func_hash->filter_hash; |
3724 | else | 3780 | else |
3725 | orig_hash = &ops->notrace_hash; | 3781 | orig_hash = &ops->func_hash->notrace_hash; |
3726 | 3782 | ||
3727 | if (reset) | 3783 | if (reset) |
3728 | hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS); | 3784 | hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS); |
@@ -3752,7 +3808,7 @@ ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len, | |||
3752 | mutex_unlock(&ftrace_lock); | 3808 | mutex_unlock(&ftrace_lock); |
3753 | 3809 | ||
3754 | out_regex_unlock: | 3810 | out_regex_unlock: |
3755 | mutex_unlock(&ops->regex_lock); | 3811 | mutex_unlock(&ops->func_hash->regex_lock); |
3756 | 3812 | ||
3757 | free_ftrace_hash(hash); | 3813 | free_ftrace_hash(hash); |
3758 | return ret; | 3814 | return ret; |
@@ -3975,15 +4031,15 @@ int ftrace_regex_release(struct inode *inode, struct file *file) | |||
3975 | 4031 | ||
3976 | trace_parser_put(parser); | 4032 | trace_parser_put(parser); |
3977 | 4033 | ||
3978 | mutex_lock(&iter->ops->regex_lock); | 4034 | mutex_lock(&iter->ops->func_hash->regex_lock); |
3979 | 4035 | ||
3980 | if (file->f_mode & FMODE_WRITE) { | 4036 | if (file->f_mode & FMODE_WRITE) { |
3981 | filter_hash = !!(iter->flags & FTRACE_ITER_FILTER); | 4037 | filter_hash = !!(iter->flags & FTRACE_ITER_FILTER); |
3982 | 4038 | ||
3983 | if (filter_hash) | 4039 | if (filter_hash) |
3984 | orig_hash = &iter->ops->filter_hash; | 4040 | orig_hash = &iter->ops->func_hash->filter_hash; |
3985 | else | 4041 | else |
3986 | orig_hash = &iter->ops->notrace_hash; | 4042 | orig_hash = &iter->ops->func_hash->notrace_hash; |
3987 | 4043 | ||
3988 | mutex_lock(&ftrace_lock); | 4044 | mutex_lock(&ftrace_lock); |
3989 | ret = ftrace_hash_move(iter->ops, filter_hash, | 4045 | ret = ftrace_hash_move(iter->ops, filter_hash, |
@@ -3994,7 +4050,7 @@ int ftrace_regex_release(struct inode *inode, struct file *file) | |||
3994 | mutex_unlock(&ftrace_lock); | 4050 | mutex_unlock(&ftrace_lock); |
3995 | } | 4051 | } |
3996 | 4052 | ||
3997 | mutex_unlock(&iter->ops->regex_lock); | 4053 | mutex_unlock(&iter->ops->func_hash->regex_lock); |
3998 | free_ftrace_hash(iter->hash); | 4054 | free_ftrace_hash(iter->hash); |
3999 | kfree(iter); | 4055 | kfree(iter); |
4000 | 4056 | ||
@@ -4611,7 +4667,6 @@ void __init ftrace_init(void) | |||
4611 | static struct ftrace_ops global_ops = { | 4667 | static struct ftrace_ops global_ops = { |
4612 | .func = ftrace_stub, | 4668 | .func = ftrace_stub, |
4613 | .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED, | 4669 | .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED, |
4614 | INIT_REGEX_LOCK(global_ops) | ||
4615 | }; | 4670 | }; |
4616 | 4671 | ||
4617 | static int __init ftrace_nodyn_init(void) | 4672 | static int __init ftrace_nodyn_init(void) |
@@ -4713,7 +4768,7 @@ ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip, | |||
4713 | static struct ftrace_ops control_ops = { | 4768 | static struct ftrace_ops control_ops = { |
4714 | .func = ftrace_ops_control_func, | 4769 | .func = ftrace_ops_control_func, |
4715 | .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED, | 4770 | .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED, |
4716 | INIT_REGEX_LOCK(control_ops) | 4771 | INIT_OPS_HASH(control_ops) |
4717 | }; | 4772 | }; |
4718 | 4773 | ||
4719 | static inline void | 4774 | static inline void |
@@ -5145,6 +5200,17 @@ ftrace_enable_sysctl(struct ctl_table *table, int write, | |||
5145 | 5200 | ||
5146 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 5201 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
5147 | 5202 | ||
5203 | static struct ftrace_ops graph_ops = { | ||
5204 | .func = ftrace_stub, | ||
5205 | .flags = FTRACE_OPS_FL_RECURSION_SAFE | | ||
5206 | FTRACE_OPS_FL_INITIALIZED | | ||
5207 | FTRACE_OPS_FL_STUB, | ||
5208 | #ifdef FTRACE_GRAPH_TRAMP_ADDR | ||
5209 | .trampoline = FTRACE_GRAPH_TRAMP_ADDR, | ||
5210 | #endif | ||
5211 | ASSIGN_OPS_HASH(graph_ops, &global_ops.local_hash) | ||
5212 | }; | ||
5213 | |||
5148 | static int ftrace_graph_active; | 5214 | static int ftrace_graph_active; |
5149 | 5215 | ||
5150 | int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace) | 5216 | int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace) |
@@ -5307,12 +5373,28 @@ static int ftrace_graph_entry_test(struct ftrace_graph_ent *trace) | |||
5307 | */ | 5373 | */ |
5308 | static void update_function_graph_func(void) | 5374 | static void update_function_graph_func(void) |
5309 | { | 5375 | { |
5310 | if (ftrace_ops_list == &ftrace_list_end || | 5376 | struct ftrace_ops *op; |
5311 | (ftrace_ops_list == &global_ops && | 5377 | bool do_test = false; |
5312 | global_ops.next == &ftrace_list_end)) | 5378 | |
5313 | ftrace_graph_entry = __ftrace_graph_entry; | 5379 | /* |
5314 | else | 5380 | * The graph and global ops share the same set of functions |
5381 | * to test. If any other ops is on the list, then | ||
5382 | * the graph tracing needs to test if its the function | ||
5383 | * it should call. | ||
5384 | */ | ||
5385 | do_for_each_ftrace_op(op, ftrace_ops_list) { | ||
5386 | if (op != &global_ops && op != &graph_ops && | ||
5387 | op != &ftrace_list_end) { | ||
5388 | do_test = true; | ||
5389 | /* in double loop, break out with goto */ | ||
5390 | goto out; | ||
5391 | } | ||
5392 | } while_for_each_ftrace_op(op); | ||
5393 | out: | ||
5394 | if (do_test) | ||
5315 | ftrace_graph_entry = ftrace_graph_entry_test; | 5395 | ftrace_graph_entry = ftrace_graph_entry_test; |
5396 | else | ||
5397 | ftrace_graph_entry = __ftrace_graph_entry; | ||
5316 | } | 5398 | } |
5317 | 5399 | ||
5318 | static struct notifier_block ftrace_suspend_notifier = { | 5400 | static struct notifier_block ftrace_suspend_notifier = { |
@@ -5353,16 +5435,7 @@ int register_ftrace_graph(trace_func_graph_ret_t retfunc, | |||
5353 | ftrace_graph_entry = ftrace_graph_entry_test; | 5435 | ftrace_graph_entry = ftrace_graph_entry_test; |
5354 | update_function_graph_func(); | 5436 | update_function_graph_func(); |
5355 | 5437 | ||
5356 | /* Function graph doesn't use the .func field of global_ops */ | 5438 | ret = ftrace_startup(&graph_ops, FTRACE_START_FUNC_RET); |
5357 | global_ops.flags |= FTRACE_OPS_FL_STUB; | ||
5358 | |||
5359 | #ifdef CONFIG_DYNAMIC_FTRACE | ||
5360 | /* Optimize function graph calling (if implemented by arch) */ | ||
5361 | if (FTRACE_GRAPH_TRAMP_ADDR != 0) | ||
5362 | global_ops.trampoline = FTRACE_GRAPH_TRAMP_ADDR; | ||
5363 | #endif | ||
5364 | |||
5365 | ret = ftrace_startup(&global_ops, FTRACE_START_FUNC_RET); | ||
5366 | 5439 | ||
5367 | out: | 5440 | out: |
5368 | mutex_unlock(&ftrace_lock); | 5441 | mutex_unlock(&ftrace_lock); |
@@ -5380,12 +5453,7 @@ void unregister_ftrace_graph(void) | |||
5380 | ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub; | 5453 | ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub; |
5381 | ftrace_graph_entry = ftrace_graph_entry_stub; | 5454 | ftrace_graph_entry = ftrace_graph_entry_stub; |
5382 | __ftrace_graph_entry = ftrace_graph_entry_stub; | 5455 | __ftrace_graph_entry = ftrace_graph_entry_stub; |
5383 | ftrace_shutdown(&global_ops, FTRACE_STOP_FUNC_RET); | 5456 | ftrace_shutdown(&graph_ops, FTRACE_STOP_FUNC_RET); |
5384 | global_ops.flags &= ~FTRACE_OPS_FL_STUB; | ||
5385 | #ifdef CONFIG_DYNAMIC_FTRACE | ||
5386 | if (FTRACE_GRAPH_TRAMP_ADDR != 0) | ||
5387 | global_ops.trampoline = 0; | ||
5388 | #endif | ||
5389 | unregister_pm_notifier(&ftrace_suspend_notifier); | 5457 | unregister_pm_notifier(&ftrace_suspend_notifier); |
5390 | unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL); | 5458 | unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL); |
5391 | 5459 | ||
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index afb04b9b818a..b38fb2b9e237 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
@@ -626,8 +626,22 @@ int ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu, | |||
626 | work = &cpu_buffer->irq_work; | 626 | work = &cpu_buffer->irq_work; |
627 | } | 627 | } |
628 | 628 | ||
629 | work->waiters_pending = true; | ||
630 | poll_wait(filp, &work->waiters, poll_table); | 629 | poll_wait(filp, &work->waiters, poll_table); |
630 | work->waiters_pending = true; | ||
631 | /* | ||
632 | * There's a tight race between setting the waiters_pending and | ||
633 | * checking if the ring buffer is empty. Once the waiters_pending bit | ||
634 | * is set, the next event will wake the task up, but we can get stuck | ||
635 | * if there's only a single event in. | ||
636 | * | ||
637 | * FIXME: Ideally, we need a memory barrier on the writer side as well, | ||
638 | * but adding a memory barrier to all events will cause too much of a | ||
639 | * performance hit in the fast path. We only need a memory barrier when | ||
640 | * the buffer goes from empty to having content. But as this race is | ||
641 | * extremely small, and it's not a problem if another event comes in, we | ||
642 | * will fix it later. | ||
643 | */ | ||
644 | smp_mb(); | ||
631 | 645 | ||
632 | if ((cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) || | 646 | if ((cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) || |
633 | (cpu != RING_BUFFER_ALL_CPUS && !ring_buffer_empty_cpu(buffer, cpu))) | 647 | (cpu != RING_BUFFER_ALL_CPUS && !ring_buffer_empty_cpu(buffer, cpu))) |