diff options
| author | Mark Brown <broonie@kernel.org> | 2014-10-20 12:55:07 -0400 |
|---|---|---|
| committer | Mark Brown <broonie@kernel.org> | 2014-10-20 13:27:32 -0400 |
| commit | b7a40242c82cd73cfcea305f23e67d068dd8401a (patch) | |
| tree | 251b49d19cd7c371847ae1f951e1b537ca0e1c15 /kernel | |
| parent | d26833bfce5e56017bea9f1f50838f20e18e7b7e (diff) | |
| parent | 9c6de47d53a3ce8df1642ae67823688eb98a190a (diff) | |
Merge branch 'fix/dw' of git://git.kernel.org/pub/scm/linux/kernel/git/broonie/spi into spi-dw
Conflicts:
drivers/spi/spi-dw-mid.c
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/cgroup.c | 48 | ||||
| -rw-r--r-- | kernel/compat.c | 24 | ||||
| -rw-r--r-- | kernel/cpuset.c | 9 | ||||
| -rw-r--r-- | kernel/events/core.c | 33 | ||||
| -rw-r--r-- | kernel/futex.c | 1 | ||||
| -rw-r--r-- | kernel/irq/chip.c | 1 | ||||
| -rw-r--r-- | kernel/kcmp.c | 7 | ||||
| -rw-r--r-- | kernel/kexec.c | 11 | ||||
| -rw-r--r-- | kernel/kprobes.c | 13 | ||||
| -rw-r--r-- | kernel/power/power.h | 1 | ||||
| -rw-r--r-- | kernel/power/snapshot.c | 50 | ||||
| -rw-r--r-- | kernel/power/suspend.c | 2 | ||||
| -rw-r--r-- | kernel/power/suspend_test.c | 31 | ||||
| -rw-r--r-- | kernel/printk/printk.c | 6 | ||||
| -rw-r--r-- | kernel/rcu/tree.h | 2 | ||||
| -rw-r--r-- | kernel/rcu/tree_plugin.h | 22 | ||||
| -rw-r--r-- | kernel/resource.c | 11 | ||||
| -rw-r--r-- | kernel/time/alarmtimer.c | 34 | ||||
| -rw-r--r-- | kernel/time/tick-sched.c | 14 | ||||
| -rw-r--r-- | kernel/time/time.c | 56 | ||||
| -rw-r--r-- | kernel/time/timekeeping.c | 5 | ||||
| -rw-r--r-- | kernel/trace/ftrace.c | 246 | ||||
| -rw-r--r-- | kernel/trace/ring_buffer.c | 16 |
23 files changed, 418 insertions, 225 deletions
diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 7dc8788cfd52..3a73f995a81e 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c | |||
| @@ -1035,6 +1035,11 @@ static void cgroup_get(struct cgroup *cgrp) | |||
| 1035 | css_get(&cgrp->self); | 1035 | css_get(&cgrp->self); |
| 1036 | } | 1036 | } |
| 1037 | 1037 | ||
| 1038 | static bool cgroup_tryget(struct cgroup *cgrp) | ||
| 1039 | { | ||
| 1040 | return css_tryget(&cgrp->self); | ||
| 1041 | } | ||
| 1042 | |||
| 1038 | static void cgroup_put(struct cgroup *cgrp) | 1043 | static void cgroup_put(struct cgroup *cgrp) |
| 1039 | { | 1044 | { |
| 1040 | css_put(&cgrp->self); | 1045 | css_put(&cgrp->self); |
| @@ -1147,7 +1152,8 @@ static struct cgroup *cgroup_kn_lock_live(struct kernfs_node *kn) | |||
| 1147 | * protection against removal. Ensure @cgrp stays accessible and | 1152 | * protection against removal. Ensure @cgrp stays accessible and |
| 1148 | * break the active_ref protection. | 1153 | * break the active_ref protection. |
| 1149 | */ | 1154 | */ |
| 1150 | cgroup_get(cgrp); | 1155 | if (!cgroup_tryget(cgrp)) |
| 1156 | return NULL; | ||
| 1151 | kernfs_break_active_protection(kn); | 1157 | kernfs_break_active_protection(kn); |
| 1152 | 1158 | ||
| 1153 | mutex_lock(&cgroup_mutex); | 1159 | mutex_lock(&cgroup_mutex); |
| @@ -3271,8 +3277,17 @@ int cgroup_add_legacy_cftypes(struct cgroup_subsys *ss, struct cftype *cfts) | |||
| 3271 | { | 3277 | { |
| 3272 | struct cftype *cft; | 3278 | struct cftype *cft; |
| 3273 | 3279 | ||
| 3274 | for (cft = cfts; cft && cft->name[0] != '\0'; cft++) | 3280 | /* |
| 3275 | cft->flags |= __CFTYPE_NOT_ON_DFL; | 3281 | * If legacy_flies_on_dfl, we want to show the legacy files on the |
| 3282 | * dfl hierarchy but iff the target subsystem hasn't been updated | ||
| 3283 | * for the dfl hierarchy yet. | ||
| 3284 | */ | ||
| 3285 | if (!cgroup_legacy_files_on_dfl || | ||
| 3286 | ss->dfl_cftypes != ss->legacy_cftypes) { | ||
| 3287 | for (cft = cfts; cft && cft->name[0] != '\0'; cft++) | ||
| 3288 | cft->flags |= __CFTYPE_NOT_ON_DFL; | ||
| 3289 | } | ||
| 3290 | |||
| 3276 | return cgroup_add_cftypes(ss, cfts); | 3291 | return cgroup_add_cftypes(ss, cfts); |
| 3277 | } | 3292 | } |
| 3278 | 3293 | ||
| @@ -3970,7 +3985,6 @@ static int pidlist_array_load(struct cgroup *cgrp, enum cgroup_filetype type, | |||
| 3970 | 3985 | ||
| 3971 | l = cgroup_pidlist_find_create(cgrp, type); | 3986 | l = cgroup_pidlist_find_create(cgrp, type); |
| 3972 | if (!l) { | 3987 | if (!l) { |
| 3973 | mutex_unlock(&cgrp->pidlist_mutex); | ||
| 3974 | pidlist_free(array); | 3988 | pidlist_free(array); |
| 3975 | return -ENOMEM; | 3989 | return -ENOMEM; |
| 3976 | } | 3990 | } |
| @@ -4387,6 +4401,15 @@ static void css_release_work_fn(struct work_struct *work) | |||
| 4387 | /* cgroup release path */ | 4401 | /* cgroup release path */ |
| 4388 | cgroup_idr_remove(&cgrp->root->cgroup_idr, cgrp->id); | 4402 | cgroup_idr_remove(&cgrp->root->cgroup_idr, cgrp->id); |
| 4389 | cgrp->id = -1; | 4403 | cgrp->id = -1; |
| 4404 | |||
| 4405 | /* | ||
| 4406 | * There are two control paths which try to determine | ||
| 4407 | * cgroup from dentry without going through kernfs - | ||
| 4408 | * cgroupstats_build() and css_tryget_online_from_dir(). | ||
| 4409 | * Those are supported by RCU protecting clearing of | ||
| 4410 | * cgrp->kn->priv backpointer. | ||
| 4411 | */ | ||
| 4412 | RCU_INIT_POINTER(*(void __rcu __force **)&cgrp->kn->priv, NULL); | ||
| 4390 | } | 4413 | } |
| 4391 | 4414 | ||
| 4392 | mutex_unlock(&cgroup_mutex); | 4415 | mutex_unlock(&cgroup_mutex); |
| @@ -4543,6 +4566,11 @@ static int cgroup_mkdir(struct kernfs_node *parent_kn, const char *name, | |||
| 4543 | struct cftype *base_files; | 4566 | struct cftype *base_files; |
| 4544 | int ssid, ret; | 4567 | int ssid, ret; |
| 4545 | 4568 | ||
| 4569 | /* Do not accept '\n' to prevent making /proc/<pid>/cgroup unparsable. | ||
| 4570 | */ | ||
| 4571 | if (strchr(name, '\n')) | ||
| 4572 | return -EINVAL; | ||
| 4573 | |||
| 4546 | parent = cgroup_kn_lock_live(parent_kn); | 4574 | parent = cgroup_kn_lock_live(parent_kn); |
| 4547 | if (!parent) | 4575 | if (!parent) |
| 4548 | return -ENODEV; | 4576 | return -ENODEV; |
| @@ -4820,16 +4848,6 @@ static int cgroup_rmdir(struct kernfs_node *kn) | |||
| 4820 | 4848 | ||
| 4821 | cgroup_kn_unlock(kn); | 4849 | cgroup_kn_unlock(kn); |
| 4822 | 4850 | ||
| 4823 | /* | ||
| 4824 | * There are two control paths which try to determine cgroup from | ||
| 4825 | * dentry without going through kernfs - cgroupstats_build() and | ||
| 4826 | * css_tryget_online_from_dir(). Those are supported by RCU | ||
| 4827 | * protecting clearing of cgrp->kn->priv backpointer, which should | ||
| 4828 | * happen after all files under it have been removed. | ||
| 4829 | */ | ||
| 4830 | if (!ret) | ||
| 4831 | RCU_INIT_POINTER(*(void __rcu __force **)&kn->priv, NULL); | ||
| 4832 | |||
| 4833 | cgroup_put(cgrp); | 4851 | cgroup_put(cgrp); |
| 4834 | return ret; | 4852 | return ret; |
| 4835 | } | 4853 | } |
| @@ -5416,7 +5434,7 @@ struct cgroup_subsys_state *css_tryget_online_from_dir(struct dentry *dentry, | |||
| 5416 | /* | 5434 | /* |
| 5417 | * This path doesn't originate from kernfs and @kn could already | 5435 | * This path doesn't originate from kernfs and @kn could already |
| 5418 | * have been or be removed at any point. @kn->priv is RCU | 5436 | * have been or be removed at any point. @kn->priv is RCU |
| 5419 | * protected for this access. See cgroup_rmdir() for details. | 5437 | * protected for this access. See css_release_work_fn() for details. |
| 5420 | */ | 5438 | */ |
| 5421 | cgrp = rcu_dereference(kn->priv); | 5439 | cgrp = rcu_dereference(kn->priv); |
| 5422 | if (cgrp) | 5440 | if (cgrp) |
diff --git a/kernel/compat.c b/kernel/compat.c index 633394f442f8..ebb3c369d03d 100644 --- a/kernel/compat.c +++ b/kernel/compat.c | |||
| @@ -226,7 +226,7 @@ static long compat_nanosleep_restart(struct restart_block *restart) | |||
| 226 | ret = hrtimer_nanosleep_restart(restart); | 226 | ret = hrtimer_nanosleep_restart(restart); |
| 227 | set_fs(oldfs); | 227 | set_fs(oldfs); |
| 228 | 228 | ||
| 229 | if (ret) { | 229 | if (ret == -ERESTART_RESTARTBLOCK) { |
| 230 | rmtp = restart->nanosleep.compat_rmtp; | 230 | rmtp = restart->nanosleep.compat_rmtp; |
| 231 | 231 | ||
| 232 | if (rmtp && compat_put_timespec(&rmt, rmtp)) | 232 | if (rmtp && compat_put_timespec(&rmt, rmtp)) |
| @@ -256,7 +256,26 @@ COMPAT_SYSCALL_DEFINE2(nanosleep, struct compat_timespec __user *, rqtp, | |||
| 256 | HRTIMER_MODE_REL, CLOCK_MONOTONIC); | 256 | HRTIMER_MODE_REL, CLOCK_MONOTONIC); |
| 257 | set_fs(oldfs); | 257 | set_fs(oldfs); |
| 258 | 258 | ||
| 259 | if (ret) { | 259 | /* |
| 260 | * hrtimer_nanosleep() can only return 0 or | ||
| 261 | * -ERESTART_RESTARTBLOCK here because: | ||
| 262 | * | ||
| 263 | * - we call it with HRTIMER_MODE_REL and therefor exclude the | ||
| 264 | * -ERESTARTNOHAND return path. | ||
| 265 | * | ||
| 266 | * - we supply the rmtp argument from the task stack (due to | ||
| 267 | * the necessary compat conversion. So the update cannot | ||
| 268 | * fail, which excludes the -EFAULT return path as well. If | ||
| 269 | * it fails nevertheless we have a bigger problem and wont | ||
| 270 | * reach this place anymore. | ||
| 271 | * | ||
| 272 | * - if the return value is 0, we do not have to update rmtp | ||
| 273 | * because there is no remaining time. | ||
| 274 | * | ||
| 275 | * We check for -ERESTART_RESTARTBLOCK nevertheless if the | ||
| 276 | * core implementation decides to return random nonsense. | ||
| 277 | */ | ||
| 278 | if (ret == -ERESTART_RESTARTBLOCK) { | ||
| 260 | struct restart_block *restart | 279 | struct restart_block *restart |
| 261 | = ¤t_thread_info()->restart_block; | 280 | = ¤t_thread_info()->restart_block; |
| 262 | 281 | ||
| @@ -266,7 +285,6 @@ COMPAT_SYSCALL_DEFINE2(nanosleep, struct compat_timespec __user *, rqtp, | |||
| 266 | if (rmtp && compat_put_timespec(&rmt, rmtp)) | 285 | if (rmtp && compat_put_timespec(&rmt, rmtp)) |
| 267 | return -EFAULT; | 286 | return -EFAULT; |
| 268 | } | 287 | } |
| 269 | |||
| 270 | return ret; | 288 | return ret; |
| 271 | } | 289 | } |
| 272 | 290 | ||
diff --git a/kernel/cpuset.c b/kernel/cpuset.c index 22874d7cf2c0..52cb04c993b7 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c | |||
| @@ -365,13 +365,14 @@ static void cpuset_update_task_spread_flag(struct cpuset *cs, | |||
| 365 | struct task_struct *tsk) | 365 | struct task_struct *tsk) |
| 366 | { | 366 | { |
| 367 | if (is_spread_page(cs)) | 367 | if (is_spread_page(cs)) |
| 368 | tsk->flags |= PF_SPREAD_PAGE; | 368 | task_set_spread_page(tsk); |
| 369 | else | 369 | else |
| 370 | tsk->flags &= ~PF_SPREAD_PAGE; | 370 | task_clear_spread_page(tsk); |
| 371 | |||
| 371 | if (is_spread_slab(cs)) | 372 | if (is_spread_slab(cs)) |
| 372 | tsk->flags |= PF_SPREAD_SLAB; | 373 | task_set_spread_slab(tsk); |
| 373 | else | 374 | else |
| 374 | tsk->flags &= ~PF_SPREAD_SLAB; | 375 | task_clear_spread_slab(tsk); |
| 375 | } | 376 | } |
| 376 | 377 | ||
| 377 | /* | 378 | /* |
diff --git a/kernel/events/core.c b/kernel/events/core.c index 1cf24b3e42ec..d640a8b4dcbc 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c | |||
| @@ -41,6 +41,7 @@ | |||
| 41 | #include <linux/cgroup.h> | 41 | #include <linux/cgroup.h> |
| 42 | #include <linux/module.h> | 42 | #include <linux/module.h> |
| 43 | #include <linux/mman.h> | 43 | #include <linux/mman.h> |
| 44 | #include <linux/compat.h> | ||
| 44 | 45 | ||
| 45 | #include "internal.h" | 46 | #include "internal.h" |
| 46 | 47 | ||
| @@ -1523,6 +1524,11 @@ retry: | |||
| 1523 | */ | 1524 | */ |
| 1524 | if (ctx->is_active) { | 1525 | if (ctx->is_active) { |
| 1525 | raw_spin_unlock_irq(&ctx->lock); | 1526 | raw_spin_unlock_irq(&ctx->lock); |
| 1527 | /* | ||
| 1528 | * Reload the task pointer, it might have been changed by | ||
| 1529 | * a concurrent perf_event_context_sched_out(). | ||
| 1530 | */ | ||
| 1531 | task = ctx->task; | ||
| 1526 | goto retry; | 1532 | goto retry; |
| 1527 | } | 1533 | } |
| 1528 | 1534 | ||
| @@ -1966,6 +1972,11 @@ retry: | |||
| 1966 | */ | 1972 | */ |
| 1967 | if (ctx->is_active) { | 1973 | if (ctx->is_active) { |
| 1968 | raw_spin_unlock_irq(&ctx->lock); | 1974 | raw_spin_unlock_irq(&ctx->lock); |
| 1975 | /* | ||
| 1976 | * Reload the task pointer, it might have been changed by | ||
| 1977 | * a concurrent perf_event_context_sched_out(). | ||
| 1978 | */ | ||
| 1979 | task = ctx->task; | ||
| 1969 | goto retry; | 1980 | goto retry; |
| 1970 | } | 1981 | } |
| 1971 | 1982 | ||
| @@ -3717,6 +3728,26 @@ static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg) | |||
| 3717 | return 0; | 3728 | return 0; |
| 3718 | } | 3729 | } |
| 3719 | 3730 | ||
| 3731 | #ifdef CONFIG_COMPAT | ||
| 3732 | static long perf_compat_ioctl(struct file *file, unsigned int cmd, | ||
| 3733 | unsigned long arg) | ||
| 3734 | { | ||
| 3735 | switch (_IOC_NR(cmd)) { | ||
| 3736 | case _IOC_NR(PERF_EVENT_IOC_SET_FILTER): | ||
| 3737 | case _IOC_NR(PERF_EVENT_IOC_ID): | ||
| 3738 | /* Fix up pointer size (usually 4 -> 8 in 32-on-64-bit case */ | ||
| 3739 | if (_IOC_SIZE(cmd) == sizeof(compat_uptr_t)) { | ||
| 3740 | cmd &= ~IOCSIZE_MASK; | ||
| 3741 | cmd |= sizeof(void *) << IOCSIZE_SHIFT; | ||
| 3742 | } | ||
| 3743 | break; | ||
| 3744 | } | ||
| 3745 | return perf_ioctl(file, cmd, arg); | ||
| 3746 | } | ||
| 3747 | #else | ||
| 3748 | # define perf_compat_ioctl NULL | ||
| 3749 | #endif | ||
| 3750 | |||
| 3720 | int perf_event_task_enable(void) | 3751 | int perf_event_task_enable(void) |
| 3721 | { | 3752 | { |
| 3722 | struct perf_event *event; | 3753 | struct perf_event *event; |
| @@ -4222,7 +4253,7 @@ static const struct file_operations perf_fops = { | |||
| 4222 | .read = perf_read, | 4253 | .read = perf_read, |
| 4223 | .poll = perf_poll, | 4254 | .poll = perf_poll, |
| 4224 | .unlocked_ioctl = perf_ioctl, | 4255 | .unlocked_ioctl = perf_ioctl, |
| 4225 | .compat_ioctl = perf_ioctl, | 4256 | .compat_ioctl = perf_compat_ioctl, |
| 4226 | .mmap = perf_mmap, | 4257 | .mmap = perf_mmap, |
| 4227 | .fasync = perf_fasync, | 4258 | .fasync = perf_fasync, |
| 4228 | }; | 4259 | }; |
diff --git a/kernel/futex.c b/kernel/futex.c index d3a9d946d0b7..815d7af2ffe8 100644 --- a/kernel/futex.c +++ b/kernel/futex.c | |||
| @@ -2592,6 +2592,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, | |||
| 2592 | * shared futexes. We need to compare the keys: | 2592 | * shared futexes. We need to compare the keys: |
| 2593 | */ | 2593 | */ |
| 2594 | if (match_futex(&q.key, &key2)) { | 2594 | if (match_futex(&q.key, &key2)) { |
| 2595 | queue_unlock(hb); | ||
| 2595 | ret = -EINVAL; | 2596 | ret = -EINVAL; |
| 2596 | goto out_put_keys; | 2597 | goto out_put_keys; |
| 2597 | } | 2598 | } |
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c index a2b28a2fd7b1..6223fab9a9d2 100644 --- a/kernel/irq/chip.c +++ b/kernel/irq/chip.c | |||
| @@ -517,6 +517,7 @@ out: | |||
| 517 | chip->irq_eoi(&desc->irq_data); | 517 | chip->irq_eoi(&desc->irq_data); |
| 518 | raw_spin_unlock(&desc->lock); | 518 | raw_spin_unlock(&desc->lock); |
| 519 | } | 519 | } |
| 520 | EXPORT_SYMBOL_GPL(handle_fasteoi_irq); | ||
| 520 | 521 | ||
| 521 | /** | 522 | /** |
| 522 | * handle_edge_irq - edge type IRQ handler | 523 | * handle_edge_irq - edge type IRQ handler |
diff --git a/kernel/kcmp.c b/kernel/kcmp.c index e30ac0fe61c3..0aa69ea1d8fd 100644 --- a/kernel/kcmp.c +++ b/kernel/kcmp.c | |||
| @@ -44,11 +44,12 @@ static long kptr_obfuscate(long v, int type) | |||
| 44 | */ | 44 | */ |
| 45 | static int kcmp_ptr(void *v1, void *v2, enum kcmp_type type) | 45 | static int kcmp_ptr(void *v1, void *v2, enum kcmp_type type) |
| 46 | { | 46 | { |
| 47 | long ret; | 47 | long t1, t2; |
| 48 | 48 | ||
| 49 | ret = kptr_obfuscate((long)v1, type) - kptr_obfuscate((long)v2, type); | 49 | t1 = kptr_obfuscate((long)v1, type); |
| 50 | t2 = kptr_obfuscate((long)v2, type); | ||
| 50 | 51 | ||
| 51 | return (ret < 0) | ((ret > 0) << 1); | 52 | return (t1 < t2) | ((t1 > t2) << 1); |
| 52 | } | 53 | } |
| 53 | 54 | ||
| 54 | /* The caller must have pinned the task */ | 55 | /* The caller must have pinned the task */ |
diff --git a/kernel/kexec.c b/kernel/kexec.c index 0b49a0a58102..2bee072268d9 100644 --- a/kernel/kexec.c +++ b/kernel/kexec.c | |||
| @@ -64,7 +64,9 @@ bool kexec_in_progress = false; | |||
| 64 | char __weak kexec_purgatory[0]; | 64 | char __weak kexec_purgatory[0]; |
| 65 | size_t __weak kexec_purgatory_size = 0; | 65 | size_t __weak kexec_purgatory_size = 0; |
| 66 | 66 | ||
| 67 | #ifdef CONFIG_KEXEC_FILE | ||
| 67 | static int kexec_calculate_store_digests(struct kimage *image); | 68 | static int kexec_calculate_store_digests(struct kimage *image); |
| 69 | #endif | ||
| 68 | 70 | ||
| 69 | /* Location of the reserved area for the crash kernel */ | 71 | /* Location of the reserved area for the crash kernel */ |
| 70 | struct resource crashk_res = { | 72 | struct resource crashk_res = { |
| @@ -341,6 +343,7 @@ out_free_image: | |||
| 341 | return ret; | 343 | return ret; |
| 342 | } | 344 | } |
| 343 | 345 | ||
| 346 | #ifdef CONFIG_KEXEC_FILE | ||
| 344 | static int copy_file_from_fd(int fd, void **buf, unsigned long *buf_len) | 347 | static int copy_file_from_fd(int fd, void **buf, unsigned long *buf_len) |
| 345 | { | 348 | { |
| 346 | struct fd f = fdget(fd); | 349 | struct fd f = fdget(fd); |
| @@ -612,6 +615,9 @@ out_free_image: | |||
| 612 | kfree(image); | 615 | kfree(image); |
| 613 | return ret; | 616 | return ret; |
| 614 | } | 617 | } |
| 618 | #else /* CONFIG_KEXEC_FILE */ | ||
| 619 | static inline void kimage_file_post_load_cleanup(struct kimage *image) { } | ||
| 620 | #endif /* CONFIG_KEXEC_FILE */ | ||
| 615 | 621 | ||
| 616 | static int kimage_is_destination_range(struct kimage *image, | 622 | static int kimage_is_destination_range(struct kimage *image, |
| 617 | unsigned long start, | 623 | unsigned long start, |
| @@ -1375,6 +1381,7 @@ COMPAT_SYSCALL_DEFINE4(kexec_load, compat_ulong_t, entry, | |||
| 1375 | } | 1381 | } |
| 1376 | #endif | 1382 | #endif |
| 1377 | 1383 | ||
| 1384 | #ifdef CONFIG_KEXEC_FILE | ||
| 1378 | SYSCALL_DEFINE5(kexec_file_load, int, kernel_fd, int, initrd_fd, | 1385 | SYSCALL_DEFINE5(kexec_file_load, int, kernel_fd, int, initrd_fd, |
| 1379 | unsigned long, cmdline_len, const char __user *, cmdline_ptr, | 1386 | unsigned long, cmdline_len, const char __user *, cmdline_ptr, |
| 1380 | unsigned long, flags) | 1387 | unsigned long, flags) |
| @@ -1451,6 +1458,8 @@ out: | |||
| 1451 | return ret; | 1458 | return ret; |
| 1452 | } | 1459 | } |
| 1453 | 1460 | ||
| 1461 | #endif /* CONFIG_KEXEC_FILE */ | ||
| 1462 | |||
| 1454 | void crash_kexec(struct pt_regs *regs) | 1463 | void crash_kexec(struct pt_regs *regs) |
| 1455 | { | 1464 | { |
| 1456 | /* Take the kexec_mutex here to prevent sys_kexec_load | 1465 | /* Take the kexec_mutex here to prevent sys_kexec_load |
| @@ -2006,6 +2015,7 @@ static int __init crash_save_vmcoreinfo_init(void) | |||
| 2006 | 2015 | ||
| 2007 | subsys_initcall(crash_save_vmcoreinfo_init); | 2016 | subsys_initcall(crash_save_vmcoreinfo_init); |
| 2008 | 2017 | ||
| 2018 | #ifdef CONFIG_KEXEC_FILE | ||
| 2009 | static int __kexec_add_segment(struct kimage *image, char *buf, | 2019 | static int __kexec_add_segment(struct kimage *image, char *buf, |
| 2010 | unsigned long bufsz, unsigned long mem, | 2020 | unsigned long bufsz, unsigned long mem, |
| 2011 | unsigned long memsz) | 2021 | unsigned long memsz) |
| @@ -2682,6 +2692,7 @@ int kexec_purgatory_get_set_symbol(struct kimage *image, const char *name, | |||
| 2682 | 2692 | ||
| 2683 | return 0; | 2693 | return 0; |
| 2684 | } | 2694 | } |
| 2695 | #endif /* CONFIG_KEXEC_FILE */ | ||
| 2685 | 2696 | ||
| 2686 | /* | 2697 | /* |
| 2687 | * Move into place and start executing a preloaded standalone | 2698 | * Move into place and start executing a preloaded standalone |
diff --git a/kernel/kprobes.c b/kernel/kprobes.c index 734e9a7d280b..3995f546d0f3 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c | |||
| @@ -1778,7 +1778,18 @@ static int pre_handler_kretprobe(struct kprobe *p, struct pt_regs *regs) | |||
| 1778 | unsigned long hash, flags = 0; | 1778 | unsigned long hash, flags = 0; |
| 1779 | struct kretprobe_instance *ri; | 1779 | struct kretprobe_instance *ri; |
| 1780 | 1780 | ||
| 1781 | /*TODO: consider to only swap the RA after the last pre_handler fired */ | 1781 | /* |
| 1782 | * To avoid deadlocks, prohibit return probing in NMI contexts, | ||
| 1783 | * just skip the probe and increase the (inexact) 'nmissed' | ||
| 1784 | * statistical counter, so that the user is informed that | ||
| 1785 | * something happened: | ||
| 1786 | */ | ||
| 1787 | if (unlikely(in_nmi())) { | ||
| 1788 | rp->nmissed++; | ||
| 1789 | return 0; | ||
| 1790 | } | ||
| 1791 | |||
| 1792 | /* TODO: consider to only swap the RA after the last pre_handler fired */ | ||
| 1782 | hash = hash_ptr(current, KPROBE_HASH_BITS); | 1793 | hash = hash_ptr(current, KPROBE_HASH_BITS); |
| 1783 | raw_spin_lock_irqsave(&rp->lock, flags); | 1794 | raw_spin_lock_irqsave(&rp->lock, flags); |
| 1784 | if (!hlist_empty(&rp->free_instances)) { | 1795 | if (!hlist_empty(&rp->free_instances)) { |
diff --git a/kernel/power/power.h b/kernel/power/power.h index 5d49dcac2537..2df883a9d3cb 100644 --- a/kernel/power/power.h +++ b/kernel/power/power.h | |||
| @@ -179,6 +179,7 @@ extern void swsusp_show_speed(struct timeval *, struct timeval *, | |||
| 179 | 179 | ||
| 180 | #ifdef CONFIG_SUSPEND | 180 | #ifdef CONFIG_SUSPEND |
| 181 | /* kernel/power/suspend.c */ | 181 | /* kernel/power/suspend.c */ |
| 182 | extern const char *pm_labels[]; | ||
| 182 | extern const char *pm_states[]; | 183 | extern const char *pm_states[]; |
| 183 | 184 | ||
| 184 | extern int suspend_devices_and_enter(suspend_state_t state); | 185 | extern int suspend_devices_and_enter(suspend_state_t state); |
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c index c4b8093c80b3..f1604d8cf489 100644 --- a/kernel/power/snapshot.c +++ b/kernel/power/snapshot.c | |||
| @@ -725,14 +725,6 @@ static void memory_bm_clear_bit(struct memory_bitmap *bm, unsigned long pfn) | |||
| 725 | clear_bit(bit, addr); | 725 | clear_bit(bit, addr); |
| 726 | } | 726 | } |
| 727 | 727 | ||
| 728 | static void memory_bm_clear_current(struct memory_bitmap *bm) | ||
| 729 | { | ||
| 730 | int bit; | ||
| 731 | |||
| 732 | bit = max(bm->cur.node_bit - 1, 0); | ||
| 733 | clear_bit(bit, bm->cur.node->data); | ||
| 734 | } | ||
| 735 | |||
| 736 | static int memory_bm_test_bit(struct memory_bitmap *bm, unsigned long pfn) | 728 | static int memory_bm_test_bit(struct memory_bitmap *bm, unsigned long pfn) |
| 737 | { | 729 | { |
| 738 | void *addr; | 730 | void *addr; |
| @@ -1341,35 +1333,23 @@ static struct memory_bitmap copy_bm; | |||
| 1341 | 1333 | ||
| 1342 | void swsusp_free(void) | 1334 | void swsusp_free(void) |
| 1343 | { | 1335 | { |
| 1344 | unsigned long fb_pfn, fr_pfn; | 1336 | struct zone *zone; |
| 1345 | 1337 | unsigned long pfn, max_zone_pfn; | |
| 1346 | memory_bm_position_reset(forbidden_pages_map); | ||
| 1347 | memory_bm_position_reset(free_pages_map); | ||
| 1348 | |||
| 1349 | loop: | ||
| 1350 | fr_pfn = memory_bm_next_pfn(free_pages_map); | ||
| 1351 | fb_pfn = memory_bm_next_pfn(forbidden_pages_map); | ||
| 1352 | |||
| 1353 | /* | ||
| 1354 | * Find the next bit set in both bitmaps. This is guaranteed to | ||
| 1355 | * terminate when fb_pfn == fr_pfn == BM_END_OF_MAP. | ||
| 1356 | */ | ||
| 1357 | do { | ||
| 1358 | if (fb_pfn < fr_pfn) | ||
| 1359 | fb_pfn = memory_bm_next_pfn(forbidden_pages_map); | ||
| 1360 | if (fr_pfn < fb_pfn) | ||
| 1361 | fr_pfn = memory_bm_next_pfn(free_pages_map); | ||
| 1362 | } while (fb_pfn != fr_pfn); | ||
| 1363 | |||
| 1364 | if (fr_pfn != BM_END_OF_MAP && pfn_valid(fr_pfn)) { | ||
| 1365 | struct page *page = pfn_to_page(fr_pfn); | ||
| 1366 | 1338 | ||
| 1367 | memory_bm_clear_current(forbidden_pages_map); | 1339 | for_each_populated_zone(zone) { |
| 1368 | memory_bm_clear_current(free_pages_map); | 1340 | max_zone_pfn = zone_end_pfn(zone); |
| 1369 | __free_page(page); | 1341 | for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) |
| 1370 | goto loop; | 1342 | if (pfn_valid(pfn)) { |
| 1343 | struct page *page = pfn_to_page(pfn); | ||
| 1344 | |||
| 1345 | if (swsusp_page_is_forbidden(page) && | ||
| 1346 | swsusp_page_is_free(page)) { | ||
| 1347 | swsusp_unset_page_forbidden(page); | ||
| 1348 | swsusp_unset_page_free(page); | ||
| 1349 | __free_page(page); | ||
| 1350 | } | ||
| 1351 | } | ||
| 1371 | } | 1352 | } |
| 1372 | |||
| 1373 | nr_copy_pages = 0; | 1353 | nr_copy_pages = 0; |
| 1374 | nr_meta_pages = 0; | 1354 | nr_meta_pages = 0; |
| 1375 | restore_pblist = NULL; | 1355 | restore_pblist = NULL; |
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c index 6dadb25cb0d8..18c62195660f 100644 --- a/kernel/power/suspend.c +++ b/kernel/power/suspend.c | |||
| @@ -31,7 +31,7 @@ | |||
| 31 | 31 | ||
| 32 | #include "power.h" | 32 | #include "power.h" |
| 33 | 33 | ||
| 34 | static const char *pm_labels[] = { "mem", "standby", "freeze", }; | 34 | const char *pm_labels[] = { "mem", "standby", "freeze", NULL }; |
| 35 | const char *pm_states[PM_SUSPEND_MAX]; | 35 | const char *pm_states[PM_SUSPEND_MAX]; |
| 36 | 36 | ||
| 37 | static const struct platform_suspend_ops *suspend_ops; | 37 | static const struct platform_suspend_ops *suspend_ops; |
diff --git a/kernel/power/suspend_test.c b/kernel/power/suspend_test.c index 2f524928b6aa..bd91bc177c93 100644 --- a/kernel/power/suspend_test.c +++ b/kernel/power/suspend_test.c | |||
| @@ -129,20 +129,20 @@ static int __init has_wakealarm(struct device *dev, const void *data) | |||
| 129 | * at startup time. They're normally disabled, for faster boot and because | 129 | * at startup time. They're normally disabled, for faster boot and because |
| 130 | * we can't know which states really work on this particular system. | 130 | * we can't know which states really work on this particular system. |
| 131 | */ | 131 | */ |
| 132 | static suspend_state_t test_state __initdata = PM_SUSPEND_ON; | 132 | static const char *test_state_label __initdata; |
| 133 | 133 | ||
| 134 | static char warn_bad_state[] __initdata = | 134 | static char warn_bad_state[] __initdata = |
| 135 | KERN_WARNING "PM: can't test '%s' suspend state\n"; | 135 | KERN_WARNING "PM: can't test '%s' suspend state\n"; |
| 136 | 136 | ||
| 137 | static int __init setup_test_suspend(char *value) | 137 | static int __init setup_test_suspend(char *value) |
| 138 | { | 138 | { |
| 139 | suspend_state_t i; | 139 | int i; |
| 140 | 140 | ||
| 141 | /* "=mem" ==> "mem" */ | 141 | /* "=mem" ==> "mem" */ |
| 142 | value++; | 142 | value++; |
| 143 | for (i = PM_SUSPEND_MIN; i < PM_SUSPEND_MAX; i++) | 143 | for (i = 0; pm_labels[i]; i++) |
| 144 | if (!strcmp(pm_states[i], value)) { | 144 | if (!strcmp(pm_labels[i], value)) { |
| 145 | test_state = i; | 145 | test_state_label = pm_labels[i]; |
| 146 | return 0; | 146 | return 0; |
| 147 | } | 147 | } |
| 148 | 148 | ||
| @@ -158,13 +158,21 @@ static int __init test_suspend(void) | |||
| 158 | 158 | ||
| 159 | struct rtc_device *rtc = NULL; | 159 | struct rtc_device *rtc = NULL; |
| 160 | struct device *dev; | 160 | struct device *dev; |
| 161 | suspend_state_t test_state; | ||
| 161 | 162 | ||
| 162 | /* PM is initialized by now; is that state testable? */ | 163 | /* PM is initialized by now; is that state testable? */ |
| 163 | if (test_state == PM_SUSPEND_ON) | 164 | if (!test_state_label) |
| 164 | goto done; | 165 | return 0; |
| 165 | if (!pm_states[test_state]) { | 166 | |
| 166 | printk(warn_bad_state, pm_states[test_state]); | 167 | for (test_state = PM_SUSPEND_MIN; test_state < PM_SUSPEND_MAX; test_state++) { |
| 167 | goto done; | 168 | const char *state_label = pm_states[test_state]; |
| 169 | |||
| 170 | if (state_label && !strcmp(test_state_label, state_label)) | ||
| 171 | break; | ||
| 172 | } | ||
| 173 | if (test_state == PM_SUSPEND_MAX) { | ||
| 174 | printk(warn_bad_state, test_state_label); | ||
| 175 | return 0; | ||
| 168 | } | 176 | } |
| 169 | 177 | ||
| 170 | /* RTCs have initialized by now too ... can we use one? */ | 178 | /* RTCs have initialized by now too ... can we use one? */ |
| @@ -173,13 +181,12 @@ static int __init test_suspend(void) | |||
| 173 | rtc = rtc_class_open(dev_name(dev)); | 181 | rtc = rtc_class_open(dev_name(dev)); |
| 174 | if (!rtc) { | 182 | if (!rtc) { |
| 175 | printk(warn_no_rtc); | 183 | printk(warn_no_rtc); |
| 176 | goto done; | 184 | return 0; |
| 177 | } | 185 | } |
| 178 | 186 | ||
| 179 | /* go for it */ | 187 | /* go for it */ |
| 180 | test_wakealarm(rtc, test_state); | 188 | test_wakealarm(rtc, test_state); |
| 181 | rtc_class_close(rtc); | 189 | rtc_class_close(rtc); |
| 182 | done: | ||
| 183 | return 0; | 190 | return 0; |
| 184 | } | 191 | } |
| 185 | late_initcall(test_suspend); | 192 | late_initcall(test_suspend); |
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c index e04c455a0e38..1ce770687ea8 100644 --- a/kernel/printk/printk.c +++ b/kernel/printk/printk.c | |||
| @@ -1665,15 +1665,15 @@ asmlinkage int vprintk_emit(int facility, int level, | |||
| 1665 | raw_spin_lock(&logbuf_lock); | 1665 | raw_spin_lock(&logbuf_lock); |
| 1666 | logbuf_cpu = this_cpu; | 1666 | logbuf_cpu = this_cpu; |
| 1667 | 1667 | ||
| 1668 | if (recursion_bug) { | 1668 | if (unlikely(recursion_bug)) { |
| 1669 | static const char recursion_msg[] = | 1669 | static const char recursion_msg[] = |
| 1670 | "BUG: recent printk recursion!"; | 1670 | "BUG: recent printk recursion!"; |
| 1671 | 1671 | ||
| 1672 | recursion_bug = 0; | 1672 | recursion_bug = 0; |
| 1673 | text_len = strlen(recursion_msg); | ||
| 1674 | /* emit KERN_CRIT message */ | 1673 | /* emit KERN_CRIT message */ |
| 1675 | printed_len += log_store(0, 2, LOG_PREFIX|LOG_NEWLINE, 0, | 1674 | printed_len += log_store(0, 2, LOG_PREFIX|LOG_NEWLINE, 0, |
| 1676 | NULL, 0, recursion_msg, text_len); | 1675 | NULL, 0, recursion_msg, |
| 1676 | strlen(recursion_msg)); | ||
| 1677 | } | 1677 | } |
| 1678 | 1678 | ||
| 1679 | /* | 1679 | /* |
diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h index 71e64c718f75..6a86eb7bac45 100644 --- a/kernel/rcu/tree.h +++ b/kernel/rcu/tree.h | |||
| @@ -358,7 +358,7 @@ struct rcu_data { | |||
| 358 | struct rcu_head **nocb_gp_tail; | 358 | struct rcu_head **nocb_gp_tail; |
| 359 | long nocb_gp_count; | 359 | long nocb_gp_count; |
| 360 | long nocb_gp_count_lazy; | 360 | long nocb_gp_count_lazy; |
| 361 | bool nocb_leader_wake; /* Is the nocb leader thread awake? */ | 361 | bool nocb_leader_sleep; /* Is the nocb leader thread asleep? */ |
| 362 | struct rcu_data *nocb_next_follower; | 362 | struct rcu_data *nocb_next_follower; |
| 363 | /* Next follower in wakeup chain. */ | 363 | /* Next follower in wakeup chain. */ |
| 364 | 364 | ||
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index 00dc411e9676..a7997e272564 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h | |||
| @@ -2074,9 +2074,9 @@ static void wake_nocb_leader(struct rcu_data *rdp, bool force) | |||
| 2074 | 2074 | ||
| 2075 | if (!ACCESS_ONCE(rdp_leader->nocb_kthread)) | 2075 | if (!ACCESS_ONCE(rdp_leader->nocb_kthread)) |
| 2076 | return; | 2076 | return; |
| 2077 | if (!ACCESS_ONCE(rdp_leader->nocb_leader_wake) || force) { | 2077 | if (ACCESS_ONCE(rdp_leader->nocb_leader_sleep) || force) { |
| 2078 | /* Prior xchg orders against prior callback enqueue. */ | 2078 | /* Prior xchg orders against prior callback enqueue. */ |
| 2079 | ACCESS_ONCE(rdp_leader->nocb_leader_wake) = true; | 2079 | ACCESS_ONCE(rdp_leader->nocb_leader_sleep) = false; |
| 2080 | wake_up(&rdp_leader->nocb_wq); | 2080 | wake_up(&rdp_leader->nocb_wq); |
| 2081 | } | 2081 | } |
| 2082 | } | 2082 | } |
| @@ -2253,7 +2253,7 @@ wait_again: | |||
| 2253 | if (!rcu_nocb_poll) { | 2253 | if (!rcu_nocb_poll) { |
| 2254 | trace_rcu_nocb_wake(my_rdp->rsp->name, my_rdp->cpu, "Sleep"); | 2254 | trace_rcu_nocb_wake(my_rdp->rsp->name, my_rdp->cpu, "Sleep"); |
| 2255 | wait_event_interruptible(my_rdp->nocb_wq, | 2255 | wait_event_interruptible(my_rdp->nocb_wq, |
| 2256 | ACCESS_ONCE(my_rdp->nocb_leader_wake)); | 2256 | !ACCESS_ONCE(my_rdp->nocb_leader_sleep)); |
| 2257 | /* Memory barrier handled by smp_mb() calls below and repoll. */ | 2257 | /* Memory barrier handled by smp_mb() calls below and repoll. */ |
| 2258 | } else if (firsttime) { | 2258 | } else if (firsttime) { |
| 2259 | firsttime = false; /* Don't drown trace log with "Poll"! */ | 2259 | firsttime = false; /* Don't drown trace log with "Poll"! */ |
| @@ -2292,12 +2292,12 @@ wait_again: | |||
| 2292 | schedule_timeout_interruptible(1); | 2292 | schedule_timeout_interruptible(1); |
| 2293 | 2293 | ||
| 2294 | /* Rescan in case we were a victim of memory ordering. */ | 2294 | /* Rescan in case we were a victim of memory ordering. */ |
| 2295 | my_rdp->nocb_leader_wake = false; | 2295 | my_rdp->nocb_leader_sleep = true; |
| 2296 | smp_mb(); /* Ensure _wake false before scan. */ | 2296 | smp_mb(); /* Ensure _sleep true before scan. */ |
| 2297 | for (rdp = my_rdp; rdp; rdp = rdp->nocb_next_follower) | 2297 | for (rdp = my_rdp; rdp; rdp = rdp->nocb_next_follower) |
| 2298 | if (ACCESS_ONCE(rdp->nocb_head)) { | 2298 | if (ACCESS_ONCE(rdp->nocb_head)) { |
| 2299 | /* Found CB, so short-circuit next wait. */ | 2299 | /* Found CB, so short-circuit next wait. */ |
| 2300 | my_rdp->nocb_leader_wake = true; | 2300 | my_rdp->nocb_leader_sleep = false; |
| 2301 | break; | 2301 | break; |
| 2302 | } | 2302 | } |
| 2303 | goto wait_again; | 2303 | goto wait_again; |
| @@ -2307,17 +2307,17 @@ wait_again: | |||
| 2307 | rcu_nocb_wait_gp(my_rdp); | 2307 | rcu_nocb_wait_gp(my_rdp); |
| 2308 | 2308 | ||
| 2309 | /* | 2309 | /* |
| 2310 | * We left ->nocb_leader_wake set to reduce cache thrashing. | 2310 | * We left ->nocb_leader_sleep unset to reduce cache thrashing. |
| 2311 | * We clear it now, but recheck for new callbacks while | 2311 | * We set it now, but recheck for new callbacks while |
| 2312 | * traversing our follower list. | 2312 | * traversing our follower list. |
| 2313 | */ | 2313 | */ |
| 2314 | my_rdp->nocb_leader_wake = false; | 2314 | my_rdp->nocb_leader_sleep = true; |
| 2315 | smp_mb(); /* Ensure _wake false before scan of ->nocb_head. */ | 2315 | smp_mb(); /* Ensure _sleep true before scan of ->nocb_head. */ |
| 2316 | 2316 | ||
| 2317 | /* Each pass through the following loop wakes a follower, if needed. */ | 2317 | /* Each pass through the following loop wakes a follower, if needed. */ |
| 2318 | for (rdp = my_rdp; rdp; rdp = rdp->nocb_next_follower) { | 2318 | for (rdp = my_rdp; rdp; rdp = rdp->nocb_next_follower) { |
| 2319 | if (ACCESS_ONCE(rdp->nocb_head)) | 2319 | if (ACCESS_ONCE(rdp->nocb_head)) |
| 2320 | my_rdp->nocb_leader_wake = true; /* No need to wait. */ | 2320 | my_rdp->nocb_leader_sleep = false;/* No need to sleep.*/ |
| 2321 | if (!rdp->nocb_gp_head) | 2321 | if (!rdp->nocb_gp_head) |
| 2322 | continue; /* No CBs, so no need to wake follower. */ | 2322 | continue; /* No CBs, so no need to wake follower. */ |
| 2323 | 2323 | ||
diff --git a/kernel/resource.c b/kernel/resource.c index da14b8d09296..60c5a3856ab7 100644 --- a/kernel/resource.c +++ b/kernel/resource.c | |||
| @@ -351,15 +351,12 @@ static int find_next_iomem_res(struct resource *res, char *name, | |||
| 351 | end = res->end; | 351 | end = res->end; |
| 352 | BUG_ON(start >= end); | 352 | BUG_ON(start >= end); |
| 353 | 353 | ||
| 354 | read_lock(&resource_lock); | 354 | if (first_level_children_only) |
| 355 | |||
| 356 | if (first_level_children_only) { | ||
| 357 | p = iomem_resource.child; | ||
| 358 | sibling_only = true; | 355 | sibling_only = true; |
| 359 | } else | ||
| 360 | p = &iomem_resource; | ||
| 361 | 356 | ||
| 362 | while ((p = next_resource(p, sibling_only))) { | 357 | read_lock(&resource_lock); |
| 358 | |||
| 359 | for (p = iomem_resource.child; p; p = next_resource(p, sibling_only)) { | ||
| 363 | if (p->flags != res->flags) | 360 | if (p->flags != res->flags) |
| 364 | continue; | 361 | continue; |
| 365 | if (name && strcmp(p->name, name)) | 362 | if (name && strcmp(p->name, name)) |
diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c index 4aec4a457431..a7077d3ae52f 100644 --- a/kernel/time/alarmtimer.c +++ b/kernel/time/alarmtimer.c | |||
| @@ -464,18 +464,26 @@ static enum alarmtimer_type clock2alarm(clockid_t clockid) | |||
| 464 | static enum alarmtimer_restart alarm_handle_timer(struct alarm *alarm, | 464 | static enum alarmtimer_restart alarm_handle_timer(struct alarm *alarm, |
| 465 | ktime_t now) | 465 | ktime_t now) |
| 466 | { | 466 | { |
| 467 | unsigned long flags; | ||
| 467 | struct k_itimer *ptr = container_of(alarm, struct k_itimer, | 468 | struct k_itimer *ptr = container_of(alarm, struct k_itimer, |
| 468 | it.alarm.alarmtimer); | 469 | it.alarm.alarmtimer); |
| 469 | if (posix_timer_event(ptr, 0) != 0) | 470 | enum alarmtimer_restart result = ALARMTIMER_NORESTART; |
| 470 | ptr->it_overrun++; | 471 | |
| 472 | spin_lock_irqsave(&ptr->it_lock, flags); | ||
| 473 | if ((ptr->it_sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE) { | ||
| 474 | if (posix_timer_event(ptr, 0) != 0) | ||
| 475 | ptr->it_overrun++; | ||
| 476 | } | ||
| 471 | 477 | ||
| 472 | /* Re-add periodic timers */ | 478 | /* Re-add periodic timers */ |
| 473 | if (ptr->it.alarm.interval.tv64) { | 479 | if (ptr->it.alarm.interval.tv64) { |
| 474 | ptr->it_overrun += alarm_forward(alarm, now, | 480 | ptr->it_overrun += alarm_forward(alarm, now, |
| 475 | ptr->it.alarm.interval); | 481 | ptr->it.alarm.interval); |
| 476 | return ALARMTIMER_RESTART; | 482 | result = ALARMTIMER_RESTART; |
| 477 | } | 483 | } |
| 478 | return ALARMTIMER_NORESTART; | 484 | spin_unlock_irqrestore(&ptr->it_lock, flags); |
| 485 | |||
| 486 | return result; | ||
| 479 | } | 487 | } |
| 480 | 488 | ||
| 481 | /** | 489 | /** |
| @@ -541,18 +549,22 @@ static int alarm_timer_create(struct k_itimer *new_timer) | |||
| 541 | * @new_timer: k_itimer pointer | 549 | * @new_timer: k_itimer pointer |
| 542 | * @cur_setting: itimerspec data to fill | 550 | * @cur_setting: itimerspec data to fill |
| 543 | * | 551 | * |
| 544 | * Copies the itimerspec data out from the k_itimer | 552 | * Copies out the current itimerspec data |
| 545 | */ | 553 | */ |
| 546 | static void alarm_timer_get(struct k_itimer *timr, | 554 | static void alarm_timer_get(struct k_itimer *timr, |
| 547 | struct itimerspec *cur_setting) | 555 | struct itimerspec *cur_setting) |
| 548 | { | 556 | { |
| 549 | memset(cur_setting, 0, sizeof(struct itimerspec)); | 557 | ktime_t relative_expiry_time = |
| 558 | alarm_expires_remaining(&(timr->it.alarm.alarmtimer)); | ||
| 559 | |||
| 560 | if (ktime_to_ns(relative_expiry_time) > 0) { | ||
| 561 | cur_setting->it_value = ktime_to_timespec(relative_expiry_time); | ||
| 562 | } else { | ||
| 563 | cur_setting->it_value.tv_sec = 0; | ||
| 564 | cur_setting->it_value.tv_nsec = 0; | ||
| 565 | } | ||
| 550 | 566 | ||
| 551 | cur_setting->it_interval = | 567 | cur_setting->it_interval = ktime_to_timespec(timr->it.alarm.interval); |
| 552 | ktime_to_timespec(timr->it.alarm.interval); | ||
| 553 | cur_setting->it_value = | ||
| 554 | ktime_to_timespec(timr->it.alarm.alarmtimer.node.expires); | ||
| 555 | return; | ||
| 556 | } | 568 | } |
| 557 | 569 | ||
| 558 | /** | 570 | /** |
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index 99aa6ee3908f..f654a8a298fa 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c | |||
| @@ -225,6 +225,20 @@ static DEFINE_PER_CPU(struct irq_work, nohz_full_kick_work) = { | |||
| 225 | }; | 225 | }; |
| 226 | 226 | ||
| 227 | /* | 227 | /* |
| 228 | * Kick this CPU if it's full dynticks in order to force it to | ||
| 229 | * re-evaluate its dependency on the tick and restart it if necessary. | ||
| 230 | * This kick, unlike tick_nohz_full_kick_cpu() and tick_nohz_full_kick_all(), | ||
| 231 | * is NMI safe. | ||
| 232 | */ | ||
| 233 | void tick_nohz_full_kick(void) | ||
| 234 | { | ||
| 235 | if (!tick_nohz_full_cpu(smp_processor_id())) | ||
| 236 | return; | ||
| 237 | |||
| 238 | irq_work_queue(&__get_cpu_var(nohz_full_kick_work)); | ||
| 239 | } | ||
| 240 | |||
| 241 | /* | ||
| 228 | * Kick the CPU if it's full dynticks in order to force it to | 242 | * Kick the CPU if it's full dynticks in order to force it to |
| 229 | * re-evaluate its dependency on the tick and restart it if necessary. | 243 | * re-evaluate its dependency on the tick and restart it if necessary. |
| 230 | */ | 244 | */ |
diff --git a/kernel/time/time.c b/kernel/time/time.c index f0294ba14634..a9ae20fb0b11 100644 --- a/kernel/time/time.c +++ b/kernel/time/time.c | |||
| @@ -559,17 +559,20 @@ EXPORT_SYMBOL(usecs_to_jiffies); | |||
| 559 | * that a remainder subtract here would not do the right thing as the | 559 | * that a remainder subtract here would not do the right thing as the |
| 560 | * resolution values don't fall on second boundries. I.e. the line: | 560 | * resolution values don't fall on second boundries. I.e. the line: |
| 561 | * nsec -= nsec % TICK_NSEC; is NOT a correct resolution rounding. | 561 | * nsec -= nsec % TICK_NSEC; is NOT a correct resolution rounding. |
| 562 | * Note that due to the small error in the multiplier here, this | ||
| 563 | * rounding is incorrect for sufficiently large values of tv_nsec, but | ||
| 564 | * well formed timespecs should have tv_nsec < NSEC_PER_SEC, so we're | ||
| 565 | * OK. | ||
| 562 | * | 566 | * |
| 563 | * Rather, we just shift the bits off the right. | 567 | * Rather, we just shift the bits off the right. |
| 564 | * | 568 | * |
| 565 | * The >> (NSEC_JIFFIE_SC - SEC_JIFFIE_SC) converts the scaled nsec | 569 | * The >> (NSEC_JIFFIE_SC - SEC_JIFFIE_SC) converts the scaled nsec |
| 566 | * value to a scaled second value. | 570 | * value to a scaled second value. |
| 567 | */ | 571 | */ |
| 568 | unsigned long | 572 | static unsigned long |
| 569 | timespec_to_jiffies(const struct timespec *value) | 573 | __timespec_to_jiffies(unsigned long sec, long nsec) |
| 570 | { | 574 | { |
| 571 | unsigned long sec = value->tv_sec; | 575 | nsec = nsec + TICK_NSEC - 1; |
| 572 | long nsec = value->tv_nsec + TICK_NSEC - 1; | ||
| 573 | 576 | ||
| 574 | if (sec >= MAX_SEC_IN_JIFFIES){ | 577 | if (sec >= MAX_SEC_IN_JIFFIES){ |
| 575 | sec = MAX_SEC_IN_JIFFIES; | 578 | sec = MAX_SEC_IN_JIFFIES; |
| @@ -580,6 +583,13 @@ timespec_to_jiffies(const struct timespec *value) | |||
| 580 | (NSEC_JIFFIE_SC - SEC_JIFFIE_SC))) >> SEC_JIFFIE_SC; | 583 | (NSEC_JIFFIE_SC - SEC_JIFFIE_SC))) >> SEC_JIFFIE_SC; |
| 581 | 584 | ||
| 582 | } | 585 | } |
| 586 | |||
| 587 | unsigned long | ||
| 588 | timespec_to_jiffies(const struct timespec *value) | ||
| 589 | { | ||
| 590 | return __timespec_to_jiffies(value->tv_sec, value->tv_nsec); | ||
| 591 | } | ||
| 592 | |||
| 583 | EXPORT_SYMBOL(timespec_to_jiffies); | 593 | EXPORT_SYMBOL(timespec_to_jiffies); |
| 584 | 594 | ||
| 585 | void | 595 | void |
| @@ -596,31 +606,27 @@ jiffies_to_timespec(const unsigned long jiffies, struct timespec *value) | |||
| 596 | } | 606 | } |
| 597 | EXPORT_SYMBOL(jiffies_to_timespec); | 607 | EXPORT_SYMBOL(jiffies_to_timespec); |
| 598 | 608 | ||
| 599 | /* Same for "timeval" | 609 | /* |
| 600 | * | 610 | * We could use a similar algorithm to timespec_to_jiffies (with a |
| 601 | * Well, almost. The problem here is that the real system resolution is | 611 | * different multiplier for usec instead of nsec). But this has a |
| 602 | * in nanoseconds and the value being converted is in micro seconds. | 612 | * problem with rounding: we can't exactly add TICK_NSEC - 1 to the |
| 603 | * Also for some machines (those that use HZ = 1024, in-particular), | 613 | * usec value, since it's not necessarily integral. |
| 604 | * there is a LARGE error in the tick size in microseconds. | 614 | * |
| 605 | 615 | * We could instead round in the intermediate scaled representation | |
| 606 | * The solution we use is to do the rounding AFTER we convert the | 616 | * (i.e. in units of 1/2^(large scale) jiffies) but that's also |
| 607 | * microsecond part. Thus the USEC_ROUND, the bits to be shifted off. | 617 | * perilous: the scaling introduces a small positive error, which |
| 608 | * Instruction wise, this should cost only an additional add with carry | 618 | * combined with a division-rounding-upward (i.e. adding 2^(scale) - 1 |
| 609 | * instruction above the way it was done above. | 619 | * units to the intermediate before shifting) leads to accidental |
| 620 | * overflow and overestimates. | ||
| 621 | * | ||
| 622 | * At the cost of one additional multiplication by a constant, just | ||
| 623 | * use the timespec implementation. | ||
| 610 | */ | 624 | */ |
| 611 | unsigned long | 625 | unsigned long |
| 612 | timeval_to_jiffies(const struct timeval *value) | 626 | timeval_to_jiffies(const struct timeval *value) |
| 613 | { | 627 | { |
| 614 | unsigned long sec = value->tv_sec; | 628 | return __timespec_to_jiffies(value->tv_sec, |
| 615 | long usec = value->tv_usec; | 629 | value->tv_usec * NSEC_PER_USEC); |
| 616 | |||
| 617 | if (sec >= MAX_SEC_IN_JIFFIES){ | ||
| 618 | sec = MAX_SEC_IN_JIFFIES; | ||
| 619 | usec = 0; | ||
| 620 | } | ||
| 621 | return (((u64)sec * SEC_CONVERSION) + | ||
| 622 | (((u64)usec * USEC_CONVERSION + USEC_ROUND) >> | ||
| 623 | (USEC_JIFFIE_SC - SEC_JIFFIE_SC))) >> SEC_JIFFIE_SC; | ||
| 624 | } | 630 | } |
| 625 | EXPORT_SYMBOL(timeval_to_jiffies); | 631 | EXPORT_SYMBOL(timeval_to_jiffies); |
| 626 | 632 | ||
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index fb4a9c2cf8d9..ec1791fae965 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c | |||
| @@ -442,11 +442,12 @@ static void timekeeping_update(struct timekeeper *tk, unsigned int action) | |||
| 442 | tk->ntp_error = 0; | 442 | tk->ntp_error = 0; |
| 443 | ntp_clear(); | 443 | ntp_clear(); |
| 444 | } | 444 | } |
| 445 | update_vsyscall(tk); | ||
| 446 | update_pvclock_gtod(tk, action & TK_CLOCK_WAS_SET); | ||
| 447 | 445 | ||
| 448 | tk_update_ktime_data(tk); | 446 | tk_update_ktime_data(tk); |
| 449 | 447 | ||
| 448 | update_vsyscall(tk); | ||
| 449 | update_pvclock_gtod(tk, action & TK_CLOCK_WAS_SET); | ||
| 450 | |||
| 450 | if (action & TK_MIRROR) | 451 | if (action & TK_MIRROR) |
| 451 | memcpy(&shadow_timekeeper, &tk_core.timekeeper, | 452 | memcpy(&shadow_timekeeper, &tk_core.timekeeper, |
| 452 | sizeof(tk_core.timekeeper)); | 453 | sizeof(tk_core.timekeeper)); |
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 1654b12c891a..5916a8e59e87 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
| @@ -65,15 +65,21 @@ | |||
| 65 | #define FL_GLOBAL_CONTROL_MASK (FTRACE_OPS_FL_CONTROL) | 65 | #define FL_GLOBAL_CONTROL_MASK (FTRACE_OPS_FL_CONTROL) |
| 66 | 66 | ||
| 67 | #ifdef CONFIG_DYNAMIC_FTRACE | 67 | #ifdef CONFIG_DYNAMIC_FTRACE |
| 68 | #define INIT_REGEX_LOCK(opsname) \ | 68 | #define INIT_OPS_HASH(opsname) \ |
| 69 | .regex_lock = __MUTEX_INITIALIZER(opsname.regex_lock), | 69 | .func_hash = &opsname.local_hash, \ |
| 70 | .local_hash.regex_lock = __MUTEX_INITIALIZER(opsname.local_hash.regex_lock), | ||
| 71 | #define ASSIGN_OPS_HASH(opsname, val) \ | ||
| 72 | .func_hash = val, \ | ||
| 73 | .local_hash.regex_lock = __MUTEX_INITIALIZER(opsname.local_hash.regex_lock), | ||
| 70 | #else | 74 | #else |
| 71 | #define INIT_REGEX_LOCK(opsname) | 75 | #define INIT_OPS_HASH(opsname) |
| 76 | #define ASSIGN_OPS_HASH(opsname, val) | ||
| 72 | #endif | 77 | #endif |
| 73 | 78 | ||
| 74 | static struct ftrace_ops ftrace_list_end __read_mostly = { | 79 | static struct ftrace_ops ftrace_list_end __read_mostly = { |
| 75 | .func = ftrace_stub, | 80 | .func = ftrace_stub, |
| 76 | .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_STUB, | 81 | .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_STUB, |
| 82 | INIT_OPS_HASH(ftrace_list_end) | ||
| 77 | }; | 83 | }; |
| 78 | 84 | ||
| 79 | /* ftrace_enabled is a method to turn ftrace on or off */ | 85 | /* ftrace_enabled is a method to turn ftrace on or off */ |
| @@ -140,7 +146,8 @@ static inline void ftrace_ops_init(struct ftrace_ops *ops) | |||
| 140 | { | 146 | { |
| 141 | #ifdef CONFIG_DYNAMIC_FTRACE | 147 | #ifdef CONFIG_DYNAMIC_FTRACE |
| 142 | if (!(ops->flags & FTRACE_OPS_FL_INITIALIZED)) { | 148 | if (!(ops->flags & FTRACE_OPS_FL_INITIALIZED)) { |
| 143 | mutex_init(&ops->regex_lock); | 149 | mutex_init(&ops->local_hash.regex_lock); |
| 150 | ops->func_hash = &ops->local_hash; | ||
| 144 | ops->flags |= FTRACE_OPS_FL_INITIALIZED; | 151 | ops->flags |= FTRACE_OPS_FL_INITIALIZED; |
| 145 | } | 152 | } |
| 146 | #endif | 153 | #endif |
| @@ -899,7 +906,7 @@ static void unregister_ftrace_profiler(void) | |||
| 899 | static struct ftrace_ops ftrace_profile_ops __read_mostly = { | 906 | static struct ftrace_ops ftrace_profile_ops __read_mostly = { |
| 900 | .func = function_profile_call, | 907 | .func = function_profile_call, |
| 901 | .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED, | 908 | .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED, |
| 902 | INIT_REGEX_LOCK(ftrace_profile_ops) | 909 | INIT_OPS_HASH(ftrace_profile_ops) |
| 903 | }; | 910 | }; |
| 904 | 911 | ||
| 905 | static int register_ftrace_profiler(void) | 912 | static int register_ftrace_profiler(void) |
| @@ -1081,11 +1088,12 @@ static const struct ftrace_hash empty_hash = { | |||
| 1081 | #define EMPTY_HASH ((struct ftrace_hash *)&empty_hash) | 1088 | #define EMPTY_HASH ((struct ftrace_hash *)&empty_hash) |
| 1082 | 1089 | ||
| 1083 | static struct ftrace_ops global_ops = { | 1090 | static struct ftrace_ops global_ops = { |
| 1084 | .func = ftrace_stub, | 1091 | .func = ftrace_stub, |
| 1085 | .notrace_hash = EMPTY_HASH, | 1092 | .local_hash.notrace_hash = EMPTY_HASH, |
| 1086 | .filter_hash = EMPTY_HASH, | 1093 | .local_hash.filter_hash = EMPTY_HASH, |
| 1087 | .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED, | 1094 | INIT_OPS_HASH(global_ops) |
| 1088 | INIT_REGEX_LOCK(global_ops) | 1095 | .flags = FTRACE_OPS_FL_RECURSION_SAFE | |
| 1096 | FTRACE_OPS_FL_INITIALIZED, | ||
| 1089 | }; | 1097 | }; |
| 1090 | 1098 | ||
| 1091 | struct ftrace_page { | 1099 | struct ftrace_page { |
| @@ -1226,8 +1234,8 @@ static void free_ftrace_hash_rcu(struct ftrace_hash *hash) | |||
| 1226 | void ftrace_free_filter(struct ftrace_ops *ops) | 1234 | void ftrace_free_filter(struct ftrace_ops *ops) |
| 1227 | { | 1235 | { |
| 1228 | ftrace_ops_init(ops); | 1236 | ftrace_ops_init(ops); |
| 1229 | free_ftrace_hash(ops->filter_hash); | 1237 | free_ftrace_hash(ops->func_hash->filter_hash); |
| 1230 | free_ftrace_hash(ops->notrace_hash); | 1238 | free_ftrace_hash(ops->func_hash->notrace_hash); |
| 1231 | } | 1239 | } |
| 1232 | 1240 | ||
| 1233 | static struct ftrace_hash *alloc_ftrace_hash(int size_bits) | 1241 | static struct ftrace_hash *alloc_ftrace_hash(int size_bits) |
| @@ -1288,9 +1296,9 @@ alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash) | |||
| 1288 | } | 1296 | } |
| 1289 | 1297 | ||
| 1290 | static void | 1298 | static void |
| 1291 | ftrace_hash_rec_disable(struct ftrace_ops *ops, int filter_hash); | 1299 | ftrace_hash_rec_disable_modify(struct ftrace_ops *ops, int filter_hash); |
| 1292 | static void | 1300 | static void |
| 1293 | ftrace_hash_rec_enable(struct ftrace_ops *ops, int filter_hash); | 1301 | ftrace_hash_rec_enable_modify(struct ftrace_ops *ops, int filter_hash); |
| 1294 | 1302 | ||
| 1295 | static int | 1303 | static int |
| 1296 | ftrace_hash_move(struct ftrace_ops *ops, int enable, | 1304 | ftrace_hash_move(struct ftrace_ops *ops, int enable, |
| @@ -1342,13 +1350,13 @@ update: | |||
| 1342 | * Remove the current set, update the hash and add | 1350 | * Remove the current set, update the hash and add |
| 1343 | * them back. | 1351 | * them back. |
| 1344 | */ | 1352 | */ |
| 1345 | ftrace_hash_rec_disable(ops, enable); | 1353 | ftrace_hash_rec_disable_modify(ops, enable); |
| 1346 | 1354 | ||
| 1347 | old_hash = *dst; | 1355 | old_hash = *dst; |
| 1348 | rcu_assign_pointer(*dst, new_hash); | 1356 | rcu_assign_pointer(*dst, new_hash); |
| 1349 | free_ftrace_hash_rcu(old_hash); | 1357 | free_ftrace_hash_rcu(old_hash); |
| 1350 | 1358 | ||
| 1351 | ftrace_hash_rec_enable(ops, enable); | 1359 | ftrace_hash_rec_enable_modify(ops, enable); |
| 1352 | 1360 | ||
| 1353 | return 0; | 1361 | return 0; |
| 1354 | } | 1362 | } |
| @@ -1382,8 +1390,8 @@ ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs) | |||
| 1382 | return 0; | 1390 | return 0; |
| 1383 | #endif | 1391 | #endif |
| 1384 | 1392 | ||
| 1385 | filter_hash = rcu_dereference_raw_notrace(ops->filter_hash); | 1393 | filter_hash = rcu_dereference_raw_notrace(ops->func_hash->filter_hash); |
| 1386 | notrace_hash = rcu_dereference_raw_notrace(ops->notrace_hash); | 1394 | notrace_hash = rcu_dereference_raw_notrace(ops->func_hash->notrace_hash); |
| 1387 | 1395 | ||
| 1388 | if ((ftrace_hash_empty(filter_hash) || | 1396 | if ((ftrace_hash_empty(filter_hash) || |
| 1389 | ftrace_lookup_ip(filter_hash, ip)) && | 1397 | ftrace_lookup_ip(filter_hash, ip)) && |
| @@ -1503,25 +1511,38 @@ static bool test_rec_ops_needs_regs(struct dyn_ftrace *rec) | |||
| 1503 | static void ftrace_remove_tramp(struct ftrace_ops *ops, | 1511 | static void ftrace_remove_tramp(struct ftrace_ops *ops, |
| 1504 | struct dyn_ftrace *rec) | 1512 | struct dyn_ftrace *rec) |
| 1505 | { | 1513 | { |
| 1506 | struct ftrace_func_entry *entry; | 1514 | /* If TRAMP is not set, no ops should have a trampoline for this */ |
| 1507 | 1515 | if (!(rec->flags & FTRACE_FL_TRAMP)) | |
| 1508 | entry = ftrace_lookup_ip(ops->tramp_hash, rec->ip); | ||
| 1509 | if (!entry) | ||
| 1510 | return; | 1516 | return; |
| 1511 | 1517 | ||
| 1518 | rec->flags &= ~FTRACE_FL_TRAMP; | ||
| 1519 | |||
| 1520 | if ((!ftrace_hash_empty(ops->func_hash->filter_hash) && | ||
| 1521 | !ftrace_lookup_ip(ops->func_hash->filter_hash, rec->ip)) || | ||
| 1522 | ftrace_lookup_ip(ops->func_hash->notrace_hash, rec->ip)) | ||
| 1523 | return; | ||
| 1512 | /* | 1524 | /* |
| 1513 | * The tramp_hash entry will be removed at time | 1525 | * The tramp_hash entry will be removed at time |
| 1514 | * of update. | 1526 | * of update. |
| 1515 | */ | 1527 | */ |
| 1516 | ops->nr_trampolines--; | 1528 | ops->nr_trampolines--; |
| 1517 | rec->flags &= ~FTRACE_FL_TRAMP; | ||
| 1518 | } | 1529 | } |
| 1519 | 1530 | ||
| 1520 | static void ftrace_clear_tramps(struct dyn_ftrace *rec) | 1531 | static void ftrace_clear_tramps(struct dyn_ftrace *rec, struct ftrace_ops *ops) |
| 1521 | { | 1532 | { |
| 1522 | struct ftrace_ops *op; | 1533 | struct ftrace_ops *op; |
| 1523 | 1534 | ||
| 1535 | /* If TRAMP is not set, no ops should have a trampoline for this */ | ||
| 1536 | if (!(rec->flags & FTRACE_FL_TRAMP)) | ||
| 1537 | return; | ||
| 1538 | |||
| 1524 | do_for_each_ftrace_op(op, ftrace_ops_list) { | 1539 | do_for_each_ftrace_op(op, ftrace_ops_list) { |
| 1540 | /* | ||
| 1541 | * This function is called to clear other tramps | ||
| 1542 | * not the one that is being updated. | ||
| 1543 | */ | ||
| 1544 | if (op == ops) | ||
| 1545 | continue; | ||
| 1525 | if (op->nr_trampolines) | 1546 | if (op->nr_trampolines) |
| 1526 | ftrace_remove_tramp(op, rec); | 1547 | ftrace_remove_tramp(op, rec); |
| 1527 | } while_for_each_ftrace_op(op); | 1548 | } while_for_each_ftrace_op(op); |
| @@ -1554,14 +1575,14 @@ static void __ftrace_hash_rec_update(struct ftrace_ops *ops, | |||
| 1554 | * gets inversed. | 1575 | * gets inversed. |
| 1555 | */ | 1576 | */ |
| 1556 | if (filter_hash) { | 1577 | if (filter_hash) { |
| 1557 | hash = ops->filter_hash; | 1578 | hash = ops->func_hash->filter_hash; |
| 1558 | other_hash = ops->notrace_hash; | 1579 | other_hash = ops->func_hash->notrace_hash; |
| 1559 | if (ftrace_hash_empty(hash)) | 1580 | if (ftrace_hash_empty(hash)) |
| 1560 | all = 1; | 1581 | all = 1; |
| 1561 | } else { | 1582 | } else { |
| 1562 | inc = !inc; | 1583 | inc = !inc; |
| 1563 | hash = ops->notrace_hash; | 1584 | hash = ops->func_hash->notrace_hash; |
| 1564 | other_hash = ops->filter_hash; | 1585 | other_hash = ops->func_hash->filter_hash; |
| 1565 | /* | 1586 | /* |
| 1566 | * If the notrace hash has no items, | 1587 | * If the notrace hash has no items, |
| 1567 | * then there's nothing to do. | 1588 | * then there's nothing to do. |
| @@ -1622,13 +1643,10 @@ static void __ftrace_hash_rec_update(struct ftrace_ops *ops, | |||
| 1622 | /* | 1643 | /* |
| 1623 | * If we are adding another function callback | 1644 | * If we are adding another function callback |
| 1624 | * to this function, and the previous had a | 1645 | * to this function, and the previous had a |
| 1625 | * trampoline used, then we need to go back to | 1646 | * custom trampoline in use, then we need to go |
| 1626 | * the default trampoline. | 1647 | * back to the default trampoline. |
| 1627 | */ | 1648 | */ |
| 1628 | rec->flags &= ~FTRACE_FL_TRAMP; | 1649 | ftrace_clear_tramps(rec, ops); |
| 1629 | |||
| 1630 | /* remove trampolines from any ops for this rec */ | ||
| 1631 | ftrace_clear_tramps(rec); | ||
| 1632 | } | 1650 | } |
| 1633 | 1651 | ||
| 1634 | /* | 1652 | /* |
| @@ -1682,6 +1700,41 @@ static void ftrace_hash_rec_enable(struct ftrace_ops *ops, | |||
| 1682 | __ftrace_hash_rec_update(ops, filter_hash, 1); | 1700 | __ftrace_hash_rec_update(ops, filter_hash, 1); |
| 1683 | } | 1701 | } |
| 1684 | 1702 | ||
| 1703 | static void ftrace_hash_rec_update_modify(struct ftrace_ops *ops, | ||
| 1704 | int filter_hash, int inc) | ||
| 1705 | { | ||
| 1706 | struct ftrace_ops *op; | ||
| 1707 | |||
| 1708 | __ftrace_hash_rec_update(ops, filter_hash, inc); | ||
| 1709 | |||
| 1710 | if (ops->func_hash != &global_ops.local_hash) | ||
| 1711 | return; | ||
| 1712 | |||
| 1713 | /* | ||
| 1714 | * If the ops shares the global_ops hash, then we need to update | ||
| 1715 | * all ops that are enabled and use this hash. | ||
| 1716 | */ | ||
| 1717 | do_for_each_ftrace_op(op, ftrace_ops_list) { | ||
| 1718 | /* Already done */ | ||
| 1719 | if (op == ops) | ||
| 1720 | continue; | ||
| 1721 | if (op->func_hash == &global_ops.local_hash) | ||
| 1722 | __ftrace_hash_rec_update(op, filter_hash, inc); | ||
| 1723 | } while_for_each_ftrace_op(op); | ||
| 1724 | } | ||
| 1725 | |||
| 1726 | static void ftrace_hash_rec_disable_modify(struct ftrace_ops *ops, | ||
| 1727 | int filter_hash) | ||
| 1728 | { | ||
| 1729 | ftrace_hash_rec_update_modify(ops, filter_hash, 0); | ||
| 1730 | } | ||
| 1731 | |||
| 1732 | static void ftrace_hash_rec_enable_modify(struct ftrace_ops *ops, | ||
| 1733 | int filter_hash) | ||
| 1734 | { | ||
| 1735 | ftrace_hash_rec_update_modify(ops, filter_hash, 1); | ||
| 1736 | } | ||
| 1737 | |||
| 1685 | static void print_ip_ins(const char *fmt, unsigned char *p) | 1738 | static void print_ip_ins(const char *fmt, unsigned char *p) |
| 1686 | { | 1739 | { |
| 1687 | int i; | 1740 | int i; |
| @@ -1896,8 +1949,8 @@ unsigned long ftrace_get_addr_new(struct dyn_ftrace *rec) | |||
| 1896 | if (rec->flags & FTRACE_FL_TRAMP) { | 1949 | if (rec->flags & FTRACE_FL_TRAMP) { |
| 1897 | ops = ftrace_find_tramp_ops_new(rec); | 1950 | ops = ftrace_find_tramp_ops_new(rec); |
| 1898 | if (FTRACE_WARN_ON(!ops || !ops->trampoline)) { | 1951 | if (FTRACE_WARN_ON(!ops || !ops->trampoline)) { |
| 1899 | pr_warning("Bad trampoline accounting at: %p (%pS)\n", | 1952 | pr_warn("Bad trampoline accounting at: %p (%pS) (%lx)\n", |
| 1900 | (void *)rec->ip, (void *)rec->ip); | 1953 | (void *)rec->ip, (void *)rec->ip, rec->flags); |
| 1901 | /* Ftrace is shutting down, return anything */ | 1954 | /* Ftrace is shutting down, return anything */ |
| 1902 | return (unsigned long)FTRACE_ADDR; | 1955 | return (unsigned long)FTRACE_ADDR; |
| 1903 | } | 1956 | } |
| @@ -1964,7 +2017,7 @@ __ftrace_replace_code(struct dyn_ftrace *rec, int enable) | |||
| 1964 | return ftrace_make_call(rec, ftrace_addr); | 2017 | return ftrace_make_call(rec, ftrace_addr); |
| 1965 | 2018 | ||
| 1966 | case FTRACE_UPDATE_MAKE_NOP: | 2019 | case FTRACE_UPDATE_MAKE_NOP: |
| 1967 | return ftrace_make_nop(NULL, rec, ftrace_addr); | 2020 | return ftrace_make_nop(NULL, rec, ftrace_old_addr); |
| 1968 | 2021 | ||
| 1969 | case FTRACE_UPDATE_MODIFY_CALL: | 2022 | case FTRACE_UPDATE_MODIFY_CALL: |
| 1970 | return ftrace_modify_call(rec, ftrace_old_addr, ftrace_addr); | 2023 | return ftrace_modify_call(rec, ftrace_old_addr, ftrace_addr); |
| @@ -2227,7 +2280,10 @@ static int ftrace_save_ops_tramp_hash(struct ftrace_ops *ops) | |||
| 2227 | } while_for_each_ftrace_rec(); | 2280 | } while_for_each_ftrace_rec(); |
| 2228 | 2281 | ||
| 2229 | /* The number of recs in the hash must match nr_trampolines */ | 2282 | /* The number of recs in the hash must match nr_trampolines */ |
| 2230 | FTRACE_WARN_ON(ops->tramp_hash->count != ops->nr_trampolines); | 2283 | if (FTRACE_WARN_ON(ops->tramp_hash->count != ops->nr_trampolines)) |
| 2284 | pr_warn("count=%ld trampolines=%d\n", | ||
| 2285 | ops->tramp_hash->count, | ||
| 2286 | ops->nr_trampolines); | ||
| 2231 | 2287 | ||
| 2232 | return 0; | 2288 | return 0; |
| 2233 | } | 2289 | } |
| @@ -2436,8 +2492,8 @@ static inline int ops_traces_mod(struct ftrace_ops *ops) | |||
| 2436 | * Filter_hash being empty will default to trace module. | 2492 | * Filter_hash being empty will default to trace module. |
| 2437 | * But notrace hash requires a test of individual module functions. | 2493 | * But notrace hash requires a test of individual module functions. |
| 2438 | */ | 2494 | */ |
| 2439 | return ftrace_hash_empty(ops->filter_hash) && | 2495 | return ftrace_hash_empty(ops->func_hash->filter_hash) && |
| 2440 | ftrace_hash_empty(ops->notrace_hash); | 2496 | ftrace_hash_empty(ops->func_hash->notrace_hash); |
| 2441 | } | 2497 | } |
| 2442 | 2498 | ||
| 2443 | /* | 2499 | /* |
| @@ -2459,12 +2515,12 @@ ops_references_rec(struct ftrace_ops *ops, struct dyn_ftrace *rec) | |||
| 2459 | return 0; | 2515 | return 0; |
| 2460 | 2516 | ||
| 2461 | /* The function must be in the filter */ | 2517 | /* The function must be in the filter */ |
| 2462 | if (!ftrace_hash_empty(ops->filter_hash) && | 2518 | if (!ftrace_hash_empty(ops->func_hash->filter_hash) && |
| 2463 | !ftrace_lookup_ip(ops->filter_hash, rec->ip)) | 2519 | !ftrace_lookup_ip(ops->func_hash->filter_hash, rec->ip)) |
| 2464 | return 0; | 2520 | return 0; |
| 2465 | 2521 | ||
| 2466 | /* If in notrace hash, we ignore it too */ | 2522 | /* If in notrace hash, we ignore it too */ |
| 2467 | if (ftrace_lookup_ip(ops->notrace_hash, rec->ip)) | 2523 | if (ftrace_lookup_ip(ops->func_hash->notrace_hash, rec->ip)) |
| 2468 | return 0; | 2524 | return 0; |
| 2469 | 2525 | ||
| 2470 | return 1; | 2526 | return 1; |
| @@ -2785,10 +2841,10 @@ t_next(struct seq_file *m, void *v, loff_t *pos) | |||
| 2785 | } else { | 2841 | } else { |
| 2786 | rec = &iter->pg->records[iter->idx++]; | 2842 | rec = &iter->pg->records[iter->idx++]; |
| 2787 | if (((iter->flags & FTRACE_ITER_FILTER) && | 2843 | if (((iter->flags & FTRACE_ITER_FILTER) && |
| 2788 | !(ftrace_lookup_ip(ops->filter_hash, rec->ip))) || | 2844 | !(ftrace_lookup_ip(ops->func_hash->filter_hash, rec->ip))) || |
| 2789 | 2845 | ||
| 2790 | ((iter->flags & FTRACE_ITER_NOTRACE) && | 2846 | ((iter->flags & FTRACE_ITER_NOTRACE) && |
| 2791 | !ftrace_lookup_ip(ops->notrace_hash, rec->ip)) || | 2847 | !ftrace_lookup_ip(ops->func_hash->notrace_hash, rec->ip)) || |
| 2792 | 2848 | ||
| 2793 | ((iter->flags & FTRACE_ITER_ENABLED) && | 2849 | ((iter->flags & FTRACE_ITER_ENABLED) && |
| 2794 | !(rec->flags & FTRACE_FL_ENABLED))) { | 2850 | !(rec->flags & FTRACE_FL_ENABLED))) { |
| @@ -2837,9 +2893,9 @@ static void *t_start(struct seq_file *m, loff_t *pos) | |||
| 2837 | * functions are enabled. | 2893 | * functions are enabled. |
| 2838 | */ | 2894 | */ |
| 2839 | if ((iter->flags & FTRACE_ITER_FILTER && | 2895 | if ((iter->flags & FTRACE_ITER_FILTER && |
| 2840 | ftrace_hash_empty(ops->filter_hash)) || | 2896 | ftrace_hash_empty(ops->func_hash->filter_hash)) || |
| 2841 | (iter->flags & FTRACE_ITER_NOTRACE && | 2897 | (iter->flags & FTRACE_ITER_NOTRACE && |
| 2842 | ftrace_hash_empty(ops->notrace_hash))) { | 2898 | ftrace_hash_empty(ops->func_hash->notrace_hash))) { |
| 2843 | if (*pos > 0) | 2899 | if (*pos > 0) |
| 2844 | return t_hash_start(m, pos); | 2900 | return t_hash_start(m, pos); |
| 2845 | iter->flags |= FTRACE_ITER_PRINTALL; | 2901 | iter->flags |= FTRACE_ITER_PRINTALL; |
| @@ -3001,12 +3057,12 @@ ftrace_regex_open(struct ftrace_ops *ops, int flag, | |||
| 3001 | iter->ops = ops; | 3057 | iter->ops = ops; |
| 3002 | iter->flags = flag; | 3058 | iter->flags = flag; |
| 3003 | 3059 | ||
| 3004 | mutex_lock(&ops->regex_lock); | 3060 | mutex_lock(&ops->func_hash->regex_lock); |
| 3005 | 3061 | ||
| 3006 | if (flag & FTRACE_ITER_NOTRACE) | 3062 | if (flag & FTRACE_ITER_NOTRACE) |
| 3007 | hash = ops->notrace_hash; | 3063 | hash = ops->func_hash->notrace_hash; |
| 3008 | else | 3064 | else |
| 3009 | hash = ops->filter_hash; | 3065 | hash = ops->func_hash->filter_hash; |
| 3010 | 3066 | ||
| 3011 | if (file->f_mode & FMODE_WRITE) { | 3067 | if (file->f_mode & FMODE_WRITE) { |
| 3012 | const int size_bits = FTRACE_HASH_DEFAULT_BITS; | 3068 | const int size_bits = FTRACE_HASH_DEFAULT_BITS; |
| @@ -3041,7 +3097,7 @@ ftrace_regex_open(struct ftrace_ops *ops, int flag, | |||
| 3041 | file->private_data = iter; | 3097 | file->private_data = iter; |
| 3042 | 3098 | ||
| 3043 | out_unlock: | 3099 | out_unlock: |
| 3044 | mutex_unlock(&ops->regex_lock); | 3100 | mutex_unlock(&ops->func_hash->regex_lock); |
| 3045 | 3101 | ||
| 3046 | return ret; | 3102 | return ret; |
| 3047 | } | 3103 | } |
| @@ -3279,7 +3335,7 @@ static struct ftrace_ops trace_probe_ops __read_mostly = | |||
| 3279 | { | 3335 | { |
| 3280 | .func = function_trace_probe_call, | 3336 | .func = function_trace_probe_call, |
| 3281 | .flags = FTRACE_OPS_FL_INITIALIZED, | 3337 | .flags = FTRACE_OPS_FL_INITIALIZED, |
| 3282 | INIT_REGEX_LOCK(trace_probe_ops) | 3338 | INIT_OPS_HASH(trace_probe_ops) |
| 3283 | }; | 3339 | }; |
| 3284 | 3340 | ||
| 3285 | static int ftrace_probe_registered; | 3341 | static int ftrace_probe_registered; |
| @@ -3342,7 +3398,7 @@ register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, | |||
| 3342 | void *data) | 3398 | void *data) |
| 3343 | { | 3399 | { |
| 3344 | struct ftrace_func_probe *entry; | 3400 | struct ftrace_func_probe *entry; |
| 3345 | struct ftrace_hash **orig_hash = &trace_probe_ops.filter_hash; | 3401 | struct ftrace_hash **orig_hash = &trace_probe_ops.func_hash->filter_hash; |
| 3346 | struct ftrace_hash *hash; | 3402 | struct ftrace_hash *hash; |
| 3347 | struct ftrace_page *pg; | 3403 | struct ftrace_page *pg; |
| 3348 | struct dyn_ftrace *rec; | 3404 | struct dyn_ftrace *rec; |
| @@ -3359,7 +3415,7 @@ register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, | |||
| 3359 | if (WARN_ON(not)) | 3415 | if (WARN_ON(not)) |
| 3360 | return -EINVAL; | 3416 | return -EINVAL; |
| 3361 | 3417 | ||
| 3362 | mutex_lock(&trace_probe_ops.regex_lock); | 3418 | mutex_lock(&trace_probe_ops.func_hash->regex_lock); |
| 3363 | 3419 | ||
| 3364 | hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash); | 3420 | hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash); |
| 3365 | if (!hash) { | 3421 | if (!hash) { |
| @@ -3428,7 +3484,7 @@ register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, | |||
| 3428 | out_unlock: | 3484 | out_unlock: |
| 3429 | mutex_unlock(&ftrace_lock); | 3485 | mutex_unlock(&ftrace_lock); |
| 3430 | out: | 3486 | out: |
| 3431 | mutex_unlock(&trace_probe_ops.regex_lock); | 3487 | mutex_unlock(&trace_probe_ops.func_hash->regex_lock); |
| 3432 | free_ftrace_hash(hash); | 3488 | free_ftrace_hash(hash); |
| 3433 | 3489 | ||
| 3434 | return count; | 3490 | return count; |
| @@ -3446,7 +3502,7 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, | |||
| 3446 | struct ftrace_func_entry *rec_entry; | 3502 | struct ftrace_func_entry *rec_entry; |
| 3447 | struct ftrace_func_probe *entry; | 3503 | struct ftrace_func_probe *entry; |
| 3448 | struct ftrace_func_probe *p; | 3504 | struct ftrace_func_probe *p; |
| 3449 | struct ftrace_hash **orig_hash = &trace_probe_ops.filter_hash; | 3505 | struct ftrace_hash **orig_hash = &trace_probe_ops.func_hash->filter_hash; |
| 3450 | struct list_head free_list; | 3506 | struct list_head free_list; |
| 3451 | struct ftrace_hash *hash; | 3507 | struct ftrace_hash *hash; |
| 3452 | struct hlist_node *tmp; | 3508 | struct hlist_node *tmp; |
| @@ -3468,7 +3524,7 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, | |||
| 3468 | return; | 3524 | return; |
| 3469 | } | 3525 | } |
| 3470 | 3526 | ||
| 3471 | mutex_lock(&trace_probe_ops.regex_lock); | 3527 | mutex_lock(&trace_probe_ops.func_hash->regex_lock); |
| 3472 | 3528 | ||
| 3473 | hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash); | 3529 | hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash); |
| 3474 | if (!hash) | 3530 | if (!hash) |
| @@ -3521,7 +3577,7 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, | |||
| 3521 | mutex_unlock(&ftrace_lock); | 3577 | mutex_unlock(&ftrace_lock); |
| 3522 | 3578 | ||
| 3523 | out_unlock: | 3579 | out_unlock: |
| 3524 | mutex_unlock(&trace_probe_ops.regex_lock); | 3580 | mutex_unlock(&trace_probe_ops.func_hash->regex_lock); |
| 3525 | free_ftrace_hash(hash); | 3581 | free_ftrace_hash(hash); |
| 3526 | } | 3582 | } |
| 3527 | 3583 | ||
| @@ -3717,12 +3773,12 @@ ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len, | |||
| 3717 | if (unlikely(ftrace_disabled)) | 3773 | if (unlikely(ftrace_disabled)) |
| 3718 | return -ENODEV; | 3774 | return -ENODEV; |
| 3719 | 3775 | ||
| 3720 | mutex_lock(&ops->regex_lock); | 3776 | mutex_lock(&ops->func_hash->regex_lock); |
| 3721 | 3777 | ||
| 3722 | if (enable) | 3778 | if (enable) |
| 3723 | orig_hash = &ops->filter_hash; | 3779 | orig_hash = &ops->func_hash->filter_hash; |
| 3724 | else | 3780 | else |
| 3725 | orig_hash = &ops->notrace_hash; | 3781 | orig_hash = &ops->func_hash->notrace_hash; |
| 3726 | 3782 | ||
| 3727 | if (reset) | 3783 | if (reset) |
| 3728 | hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS); | 3784 | hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS); |
| @@ -3752,7 +3808,7 @@ ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len, | |||
| 3752 | mutex_unlock(&ftrace_lock); | 3808 | mutex_unlock(&ftrace_lock); |
| 3753 | 3809 | ||
| 3754 | out_regex_unlock: | 3810 | out_regex_unlock: |
| 3755 | mutex_unlock(&ops->regex_lock); | 3811 | mutex_unlock(&ops->func_hash->regex_lock); |
| 3756 | 3812 | ||
| 3757 | free_ftrace_hash(hash); | 3813 | free_ftrace_hash(hash); |
| 3758 | return ret; | 3814 | return ret; |
| @@ -3975,15 +4031,15 @@ int ftrace_regex_release(struct inode *inode, struct file *file) | |||
| 3975 | 4031 | ||
| 3976 | trace_parser_put(parser); | 4032 | trace_parser_put(parser); |
| 3977 | 4033 | ||
| 3978 | mutex_lock(&iter->ops->regex_lock); | 4034 | mutex_lock(&iter->ops->func_hash->regex_lock); |
| 3979 | 4035 | ||
| 3980 | if (file->f_mode & FMODE_WRITE) { | 4036 | if (file->f_mode & FMODE_WRITE) { |
| 3981 | filter_hash = !!(iter->flags & FTRACE_ITER_FILTER); | 4037 | filter_hash = !!(iter->flags & FTRACE_ITER_FILTER); |
| 3982 | 4038 | ||
| 3983 | if (filter_hash) | 4039 | if (filter_hash) |
| 3984 | orig_hash = &iter->ops->filter_hash; | 4040 | orig_hash = &iter->ops->func_hash->filter_hash; |
| 3985 | else | 4041 | else |
| 3986 | orig_hash = &iter->ops->notrace_hash; | 4042 | orig_hash = &iter->ops->func_hash->notrace_hash; |
| 3987 | 4043 | ||
| 3988 | mutex_lock(&ftrace_lock); | 4044 | mutex_lock(&ftrace_lock); |
| 3989 | ret = ftrace_hash_move(iter->ops, filter_hash, | 4045 | ret = ftrace_hash_move(iter->ops, filter_hash, |
| @@ -3994,7 +4050,7 @@ int ftrace_regex_release(struct inode *inode, struct file *file) | |||
| 3994 | mutex_unlock(&ftrace_lock); | 4050 | mutex_unlock(&ftrace_lock); |
| 3995 | } | 4051 | } |
| 3996 | 4052 | ||
| 3997 | mutex_unlock(&iter->ops->regex_lock); | 4053 | mutex_unlock(&iter->ops->func_hash->regex_lock); |
| 3998 | free_ftrace_hash(iter->hash); | 4054 | free_ftrace_hash(iter->hash); |
| 3999 | kfree(iter); | 4055 | kfree(iter); |
| 4000 | 4056 | ||
| @@ -4611,7 +4667,6 @@ void __init ftrace_init(void) | |||
| 4611 | static struct ftrace_ops global_ops = { | 4667 | static struct ftrace_ops global_ops = { |
| 4612 | .func = ftrace_stub, | 4668 | .func = ftrace_stub, |
| 4613 | .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED, | 4669 | .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED, |
| 4614 | INIT_REGEX_LOCK(global_ops) | ||
| 4615 | }; | 4670 | }; |
| 4616 | 4671 | ||
| 4617 | static int __init ftrace_nodyn_init(void) | 4672 | static int __init ftrace_nodyn_init(void) |
| @@ -4713,7 +4768,7 @@ ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip, | |||
| 4713 | static struct ftrace_ops control_ops = { | 4768 | static struct ftrace_ops control_ops = { |
| 4714 | .func = ftrace_ops_control_func, | 4769 | .func = ftrace_ops_control_func, |
| 4715 | .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED, | 4770 | .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED, |
| 4716 | INIT_REGEX_LOCK(control_ops) | 4771 | INIT_OPS_HASH(control_ops) |
| 4717 | }; | 4772 | }; |
| 4718 | 4773 | ||
| 4719 | static inline void | 4774 | static inline void |
| @@ -5145,6 +5200,17 @@ ftrace_enable_sysctl(struct ctl_table *table, int write, | |||
| 5145 | 5200 | ||
| 5146 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 5201 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
| 5147 | 5202 | ||
| 5203 | static struct ftrace_ops graph_ops = { | ||
| 5204 | .func = ftrace_stub, | ||
| 5205 | .flags = FTRACE_OPS_FL_RECURSION_SAFE | | ||
| 5206 | FTRACE_OPS_FL_INITIALIZED | | ||
| 5207 | FTRACE_OPS_FL_STUB, | ||
| 5208 | #ifdef FTRACE_GRAPH_TRAMP_ADDR | ||
| 5209 | .trampoline = FTRACE_GRAPH_TRAMP_ADDR, | ||
| 5210 | #endif | ||
| 5211 | ASSIGN_OPS_HASH(graph_ops, &global_ops.local_hash) | ||
| 5212 | }; | ||
| 5213 | |||
| 5148 | static int ftrace_graph_active; | 5214 | static int ftrace_graph_active; |
| 5149 | 5215 | ||
| 5150 | int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace) | 5216 | int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace) |
| @@ -5307,12 +5373,28 @@ static int ftrace_graph_entry_test(struct ftrace_graph_ent *trace) | |||
| 5307 | */ | 5373 | */ |
| 5308 | static void update_function_graph_func(void) | 5374 | static void update_function_graph_func(void) |
| 5309 | { | 5375 | { |
| 5310 | if (ftrace_ops_list == &ftrace_list_end || | 5376 | struct ftrace_ops *op; |
| 5311 | (ftrace_ops_list == &global_ops && | 5377 | bool do_test = false; |
| 5312 | global_ops.next == &ftrace_list_end)) | 5378 | |
| 5313 | ftrace_graph_entry = __ftrace_graph_entry; | 5379 | /* |
| 5314 | else | 5380 | * The graph and global ops share the same set of functions |
| 5381 | * to test. If any other ops is on the list, then | ||
| 5382 | * the graph tracing needs to test if its the function | ||
| 5383 | * it should call. | ||
| 5384 | */ | ||
| 5385 | do_for_each_ftrace_op(op, ftrace_ops_list) { | ||
| 5386 | if (op != &global_ops && op != &graph_ops && | ||
| 5387 | op != &ftrace_list_end) { | ||
| 5388 | do_test = true; | ||
| 5389 | /* in double loop, break out with goto */ | ||
| 5390 | goto out; | ||
| 5391 | } | ||
| 5392 | } while_for_each_ftrace_op(op); | ||
| 5393 | out: | ||
| 5394 | if (do_test) | ||
| 5315 | ftrace_graph_entry = ftrace_graph_entry_test; | 5395 | ftrace_graph_entry = ftrace_graph_entry_test; |
| 5396 | else | ||
| 5397 | ftrace_graph_entry = __ftrace_graph_entry; | ||
| 5316 | } | 5398 | } |
| 5317 | 5399 | ||
| 5318 | static struct notifier_block ftrace_suspend_notifier = { | 5400 | static struct notifier_block ftrace_suspend_notifier = { |
| @@ -5353,16 +5435,7 @@ int register_ftrace_graph(trace_func_graph_ret_t retfunc, | |||
| 5353 | ftrace_graph_entry = ftrace_graph_entry_test; | 5435 | ftrace_graph_entry = ftrace_graph_entry_test; |
| 5354 | update_function_graph_func(); | 5436 | update_function_graph_func(); |
| 5355 | 5437 | ||
| 5356 | /* Function graph doesn't use the .func field of global_ops */ | 5438 | ret = ftrace_startup(&graph_ops, FTRACE_START_FUNC_RET); |
| 5357 | global_ops.flags |= FTRACE_OPS_FL_STUB; | ||
| 5358 | |||
| 5359 | #ifdef CONFIG_DYNAMIC_FTRACE | ||
| 5360 | /* Optimize function graph calling (if implemented by arch) */ | ||
| 5361 | if (FTRACE_GRAPH_TRAMP_ADDR != 0) | ||
| 5362 | global_ops.trampoline = FTRACE_GRAPH_TRAMP_ADDR; | ||
| 5363 | #endif | ||
| 5364 | |||
| 5365 | ret = ftrace_startup(&global_ops, FTRACE_START_FUNC_RET); | ||
| 5366 | 5439 | ||
| 5367 | out: | 5440 | out: |
| 5368 | mutex_unlock(&ftrace_lock); | 5441 | mutex_unlock(&ftrace_lock); |
| @@ -5380,12 +5453,7 @@ void unregister_ftrace_graph(void) | |||
| 5380 | ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub; | 5453 | ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub; |
| 5381 | ftrace_graph_entry = ftrace_graph_entry_stub; | 5454 | ftrace_graph_entry = ftrace_graph_entry_stub; |
| 5382 | __ftrace_graph_entry = ftrace_graph_entry_stub; | 5455 | __ftrace_graph_entry = ftrace_graph_entry_stub; |
| 5383 | ftrace_shutdown(&global_ops, FTRACE_STOP_FUNC_RET); | 5456 | ftrace_shutdown(&graph_ops, FTRACE_STOP_FUNC_RET); |
| 5384 | global_ops.flags &= ~FTRACE_OPS_FL_STUB; | ||
| 5385 | #ifdef CONFIG_DYNAMIC_FTRACE | ||
| 5386 | if (FTRACE_GRAPH_TRAMP_ADDR != 0) | ||
| 5387 | global_ops.trampoline = 0; | ||
| 5388 | #endif | ||
| 5389 | unregister_pm_notifier(&ftrace_suspend_notifier); | 5457 | unregister_pm_notifier(&ftrace_suspend_notifier); |
| 5390 | unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL); | 5458 | unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL); |
| 5391 | 5459 | ||
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index afb04b9b818a..b38fb2b9e237 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
| @@ -626,8 +626,22 @@ int ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu, | |||
| 626 | work = &cpu_buffer->irq_work; | 626 | work = &cpu_buffer->irq_work; |
| 627 | } | 627 | } |
| 628 | 628 | ||
| 629 | work->waiters_pending = true; | ||
| 630 | poll_wait(filp, &work->waiters, poll_table); | 629 | poll_wait(filp, &work->waiters, poll_table); |
| 630 | work->waiters_pending = true; | ||
| 631 | /* | ||
| 632 | * There's a tight race between setting the waiters_pending and | ||
| 633 | * checking if the ring buffer is empty. Once the waiters_pending bit | ||
| 634 | * is set, the next event will wake the task up, but we can get stuck | ||
| 635 | * if there's only a single event in. | ||
| 636 | * | ||
| 637 | * FIXME: Ideally, we need a memory barrier on the writer side as well, | ||
| 638 | * but adding a memory barrier to all events will cause too much of a | ||
| 639 | * performance hit in the fast path. We only need a memory barrier when | ||
| 640 | * the buffer goes from empty to having content. But as this race is | ||
| 641 | * extremely small, and it's not a problem if another event comes in, we | ||
| 642 | * will fix it later. | ||
| 643 | */ | ||
| 644 | smp_mb(); | ||
| 631 | 645 | ||
| 632 | if ((cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) || | 646 | if ((cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) || |
| 633 | (cpu != RING_BUFFER_ALL_CPUS && !ring_buffer_empty_cpu(buffer, cpu))) | 647 | (cpu != RING_BUFFER_ALL_CPUS && !ring_buffer_empty_cpu(buffer, cpu))) |
