diff options
author | Ingo Molnar <mingo@kernel.org> | 2013-07-22 04:26:10 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2013-07-22 04:26:10 -0400 |
commit | b24d6f49122d9da8202d751ac7e66fe8136bb434 (patch) | |
tree | b54cdf17c2a6d7a718733aa2e72fe26eb14927f7 /kernel | |
parent | cedce3e730833d26a37826a96e1905b6ef387df9 (diff) | |
parent | 3b2f64d00c46e1e4e9bd0bb9bb12619adac27a4b (diff) |
Merge tag 'v3.11-rc2' into sched/core
Merge in Linux 3.11-rc2, to provide a post-merge-window development base.
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/cgroup.c | 11 | ||||
-rw-r--r-- | kernel/cpu.c | 6 | ||||
-rw-r--r-- | kernel/events/core.c | 34 | ||||
-rw-r--r-- | kernel/fork.c | 2 | ||||
-rw-r--r-- | kernel/hrtimer.c | 6 | ||||
-rw-r--r-- | kernel/irq/generic-chip.c | 2 | ||||
-rw-r--r-- | kernel/irq/irqdomain.c | 12 | ||||
-rw-r--r-- | kernel/mutex.c | 1 | ||||
-rw-r--r-- | kernel/power/autosleep.c | 3 | ||||
-rw-r--r-- | kernel/printk.c | 2 | ||||
-rw-r--r-- | kernel/profile.c | 2 | ||||
-rw-r--r-- | kernel/rcutorture.c | 6 | ||||
-rw-r--r-- | kernel/rcutree.c | 6 | ||||
-rw-r--r-- | kernel/rcutree.h | 4 | ||||
-rw-r--r-- | kernel/rcutree_plugin.h | 6 | ||||
-rw-r--r-- | kernel/relay.c | 2 | ||||
-rw-r--r-- | kernel/sched/core.c | 12 | ||||
-rw-r--r-- | kernel/sched/fair.c | 2 | ||||
-rw-r--r-- | kernel/smp.c | 2 | ||||
-rw-r--r-- | kernel/smpboot.c | 2 | ||||
-rw-r--r-- | kernel/softirq.c | 8 | ||||
-rw-r--r-- | kernel/sysctl.c | 4 | ||||
-rw-r--r-- | kernel/time/tick-broadcast.c | 5 | ||||
-rw-r--r-- | kernel/time/tick-sched.c | 17 | ||||
-rw-r--r-- | kernel/timer.c | 10 | ||||
-rw-r--r-- | kernel/watchdog.c | 113 | ||||
-rw-r--r-- | kernel/workqueue.c | 4 |
27 files changed, 149 insertions, 135 deletions
diff --git a/kernel/cgroup.c b/kernel/cgroup.c index e5583d10a325..0e0b20b8c5db 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c | |||
@@ -802,7 +802,6 @@ static struct cgroup *task_cgroup_from_root(struct task_struct *task, | |||
802 | */ | 802 | */ |
803 | 803 | ||
804 | static int cgroup_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode); | 804 | static int cgroup_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode); |
805 | static struct dentry *cgroup_lookup(struct inode *, struct dentry *, unsigned int); | ||
806 | static int cgroup_rmdir(struct inode *unused_dir, struct dentry *dentry); | 805 | static int cgroup_rmdir(struct inode *unused_dir, struct dentry *dentry); |
807 | static int cgroup_populate_dir(struct cgroup *cgrp, bool base_files, | 806 | static int cgroup_populate_dir(struct cgroup *cgrp, bool base_files, |
808 | unsigned long subsys_mask); | 807 | unsigned long subsys_mask); |
@@ -2642,7 +2641,7 @@ static const struct inode_operations cgroup_file_inode_operations = { | |||
2642 | }; | 2641 | }; |
2643 | 2642 | ||
2644 | static const struct inode_operations cgroup_dir_inode_operations = { | 2643 | static const struct inode_operations cgroup_dir_inode_operations = { |
2645 | .lookup = cgroup_lookup, | 2644 | .lookup = simple_lookup, |
2646 | .mkdir = cgroup_mkdir, | 2645 | .mkdir = cgroup_mkdir, |
2647 | .rmdir = cgroup_rmdir, | 2646 | .rmdir = cgroup_rmdir, |
2648 | .rename = cgroup_rename, | 2647 | .rename = cgroup_rename, |
@@ -2652,14 +2651,6 @@ static const struct inode_operations cgroup_dir_inode_operations = { | |||
2652 | .removexattr = cgroup_removexattr, | 2651 | .removexattr = cgroup_removexattr, |
2653 | }; | 2652 | }; |
2654 | 2653 | ||
2655 | static struct dentry *cgroup_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags) | ||
2656 | { | ||
2657 | if (dentry->d_name.len > NAME_MAX) | ||
2658 | return ERR_PTR(-ENAMETOOLONG); | ||
2659 | d_add(dentry, NULL); | ||
2660 | return NULL; | ||
2661 | } | ||
2662 | |||
2663 | /* | 2654 | /* |
2664 | * Check if a file is a control file | 2655 | * Check if a file is a control file |
2665 | */ | 2656 | */ |
diff --git a/kernel/cpu.c b/kernel/cpu.c index 198a38883e64..b2b227b82123 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c | |||
@@ -366,7 +366,7 @@ EXPORT_SYMBOL(cpu_down); | |||
366 | #endif /*CONFIG_HOTPLUG_CPU*/ | 366 | #endif /*CONFIG_HOTPLUG_CPU*/ |
367 | 367 | ||
368 | /* Requires cpu_add_remove_lock to be held */ | 368 | /* Requires cpu_add_remove_lock to be held */ |
369 | static int __cpuinit _cpu_up(unsigned int cpu, int tasks_frozen) | 369 | static int _cpu_up(unsigned int cpu, int tasks_frozen) |
370 | { | 370 | { |
371 | int ret, nr_calls = 0; | 371 | int ret, nr_calls = 0; |
372 | void *hcpu = (void *)(long)cpu; | 372 | void *hcpu = (void *)(long)cpu; |
@@ -419,7 +419,7 @@ out: | |||
419 | return ret; | 419 | return ret; |
420 | } | 420 | } |
421 | 421 | ||
422 | int __cpuinit cpu_up(unsigned int cpu) | 422 | int cpu_up(unsigned int cpu) |
423 | { | 423 | { |
424 | int err = 0; | 424 | int err = 0; |
425 | 425 | ||
@@ -618,7 +618,7 @@ core_initcall(cpu_hotplug_pm_sync_init); | |||
618 | * It must be called by the arch code on the new cpu, before the new cpu | 618 | * It must be called by the arch code on the new cpu, before the new cpu |
619 | * enables interrupts and before the "boot" cpu returns from __cpu_up(). | 619 | * enables interrupts and before the "boot" cpu returns from __cpu_up(). |
620 | */ | 620 | */ |
621 | void __cpuinit notify_cpu_starting(unsigned int cpu) | 621 | void notify_cpu_starting(unsigned int cpu) |
622 | { | 622 | { |
623 | unsigned long val = CPU_STARTING; | 623 | unsigned long val = CPU_STARTING; |
624 | 624 | ||
diff --git a/kernel/events/core.c b/kernel/events/core.c index 1833bc5a84a7..f86599e8c123 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c | |||
@@ -947,8 +947,18 @@ perf_lock_task_context(struct task_struct *task, int ctxn, unsigned long *flags) | |||
947 | { | 947 | { |
948 | struct perf_event_context *ctx; | 948 | struct perf_event_context *ctx; |
949 | 949 | ||
950 | rcu_read_lock(); | ||
951 | retry: | 950 | retry: |
951 | /* | ||
952 | * One of the few rules of preemptible RCU is that one cannot do | ||
953 | * rcu_read_unlock() while holding a scheduler (or nested) lock when | ||
954 | * part of the read side critical section was preemptible -- see | ||
955 | * rcu_read_unlock_special(). | ||
956 | * | ||
957 | * Since ctx->lock nests under rq->lock we must ensure the entire read | ||
958 | * side critical section is non-preemptible. | ||
959 | */ | ||
960 | preempt_disable(); | ||
961 | rcu_read_lock(); | ||
952 | ctx = rcu_dereference(task->perf_event_ctxp[ctxn]); | 962 | ctx = rcu_dereference(task->perf_event_ctxp[ctxn]); |
953 | if (ctx) { | 963 | if (ctx) { |
954 | /* | 964 | /* |
@@ -964,6 +974,8 @@ retry: | |||
964 | raw_spin_lock_irqsave(&ctx->lock, *flags); | 974 | raw_spin_lock_irqsave(&ctx->lock, *flags); |
965 | if (ctx != rcu_dereference(task->perf_event_ctxp[ctxn])) { | 975 | if (ctx != rcu_dereference(task->perf_event_ctxp[ctxn])) { |
966 | raw_spin_unlock_irqrestore(&ctx->lock, *flags); | 976 | raw_spin_unlock_irqrestore(&ctx->lock, *flags); |
977 | rcu_read_unlock(); | ||
978 | preempt_enable(); | ||
967 | goto retry; | 979 | goto retry; |
968 | } | 980 | } |
969 | 981 | ||
@@ -973,6 +985,7 @@ retry: | |||
973 | } | 985 | } |
974 | } | 986 | } |
975 | rcu_read_unlock(); | 987 | rcu_read_unlock(); |
988 | preempt_enable(); | ||
976 | return ctx; | 989 | return ctx; |
977 | } | 990 | } |
978 | 991 | ||
@@ -1950,7 +1963,16 @@ static int __perf_event_enable(void *info) | |||
1950 | struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); | 1963 | struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); |
1951 | int err; | 1964 | int err; |
1952 | 1965 | ||
1953 | if (WARN_ON_ONCE(!ctx->is_active)) | 1966 | /* |
1967 | * There's a time window between 'ctx->is_active' check | ||
1968 | * in perf_event_enable function and this place having: | ||
1969 | * - IRQs on | ||
1970 | * - ctx->lock unlocked | ||
1971 | * | ||
1972 | * where the task could be killed and 'ctx' deactivated | ||
1973 | * by perf_event_exit_task. | ||
1974 | */ | ||
1975 | if (!ctx->is_active) | ||
1954 | return -EINVAL; | 1976 | return -EINVAL; |
1955 | 1977 | ||
1956 | raw_spin_lock(&ctx->lock); | 1978 | raw_spin_lock(&ctx->lock); |
@@ -6212,8 +6234,6 @@ perf_event_mux_interval_ms_store(struct device *dev, | |||
6212 | return count; | 6234 | return count; |
6213 | } | 6235 | } |
6214 | 6236 | ||
6215 | #define __ATTR_RW(attr) __ATTR(attr, 0644, attr##_show, attr##_store) | ||
6216 | |||
6217 | static struct device_attribute pmu_dev_attrs[] = { | 6237 | static struct device_attribute pmu_dev_attrs[] = { |
6218 | __ATTR_RO(type), | 6238 | __ATTR_RO(type), |
6219 | __ATTR_RW(perf_event_mux_interval_ms), | 6239 | __ATTR_RW(perf_event_mux_interval_ms), |
@@ -7465,7 +7485,7 @@ inherit_task_group(struct perf_event *event, struct task_struct *parent, | |||
7465 | * child. | 7485 | * child. |
7466 | */ | 7486 | */ |
7467 | 7487 | ||
7468 | child_ctx = alloc_perf_context(event->pmu, child); | 7488 | child_ctx = alloc_perf_context(parent_ctx->pmu, child); |
7469 | if (!child_ctx) | 7489 | if (!child_ctx) |
7470 | return -ENOMEM; | 7490 | return -ENOMEM; |
7471 | 7491 | ||
@@ -7608,7 +7628,7 @@ static void __init perf_event_init_all_cpus(void) | |||
7608 | } | 7628 | } |
7609 | } | 7629 | } |
7610 | 7630 | ||
7611 | static void __cpuinit perf_event_init_cpu(int cpu) | 7631 | static void perf_event_init_cpu(int cpu) |
7612 | { | 7632 | { |
7613 | struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu); | 7633 | struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu); |
7614 | 7634 | ||
@@ -7697,7 +7717,7 @@ static struct notifier_block perf_reboot_notifier = { | |||
7697 | .priority = INT_MIN, | 7717 | .priority = INT_MIN, |
7698 | }; | 7718 | }; |
7699 | 7719 | ||
7700 | static int __cpuinit | 7720 | static int |
7701 | perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu) | 7721 | perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu) |
7702 | { | 7722 | { |
7703 | unsigned int cpu = (long)hcpu; | 7723 | unsigned int cpu = (long)hcpu; |
diff --git a/kernel/fork.c b/kernel/fork.c index 66635c80a813..403d2bb8a968 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
@@ -1546,7 +1546,7 @@ static inline void init_idle_pids(struct pid_link *links) | |||
1546 | } | 1546 | } |
1547 | } | 1547 | } |
1548 | 1548 | ||
1549 | struct task_struct * __cpuinit fork_idle(int cpu) | 1549 | struct task_struct *fork_idle(int cpu) |
1550 | { | 1550 | { |
1551 | struct task_struct *task; | 1551 | struct task_struct *task; |
1552 | task = copy_process(CLONE_VM, 0, 0, NULL, &init_struct_pid, 0); | 1552 | task = copy_process(CLONE_VM, 0, 0, NULL, &init_struct_pid, 0); |
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index f0f4fe29cd21..383319bae3f7 100644 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c | |||
@@ -1659,7 +1659,7 @@ SYSCALL_DEFINE2(nanosleep, struct timespec __user *, rqtp, | |||
1659 | /* | 1659 | /* |
1660 | * Functions related to boot-time initialization: | 1660 | * Functions related to boot-time initialization: |
1661 | */ | 1661 | */ |
1662 | static void __cpuinit init_hrtimers_cpu(int cpu) | 1662 | static void init_hrtimers_cpu(int cpu) |
1663 | { | 1663 | { |
1664 | struct hrtimer_cpu_base *cpu_base = &per_cpu(hrtimer_bases, cpu); | 1664 | struct hrtimer_cpu_base *cpu_base = &per_cpu(hrtimer_bases, cpu); |
1665 | int i; | 1665 | int i; |
@@ -1740,7 +1740,7 @@ static void migrate_hrtimers(int scpu) | |||
1740 | 1740 | ||
1741 | #endif /* CONFIG_HOTPLUG_CPU */ | 1741 | #endif /* CONFIG_HOTPLUG_CPU */ |
1742 | 1742 | ||
1743 | static int __cpuinit hrtimer_cpu_notify(struct notifier_block *self, | 1743 | static int hrtimer_cpu_notify(struct notifier_block *self, |
1744 | unsigned long action, void *hcpu) | 1744 | unsigned long action, void *hcpu) |
1745 | { | 1745 | { |
1746 | int scpu = (long)hcpu; | 1746 | int scpu = (long)hcpu; |
@@ -1773,7 +1773,7 @@ static int __cpuinit hrtimer_cpu_notify(struct notifier_block *self, | |||
1773 | return NOTIFY_OK; | 1773 | return NOTIFY_OK; |
1774 | } | 1774 | } |
1775 | 1775 | ||
1776 | static struct notifier_block __cpuinitdata hrtimers_nb = { | 1776 | static struct notifier_block hrtimers_nb = { |
1777 | .notifier_call = hrtimer_cpu_notify, | 1777 | .notifier_call = hrtimer_cpu_notify, |
1778 | }; | 1778 | }; |
1779 | 1779 | ||
diff --git a/kernel/irq/generic-chip.c b/kernel/irq/generic-chip.c index 10e663ab1f4a..452d6f2ba21d 100644 --- a/kernel/irq/generic-chip.c +++ b/kernel/irq/generic-chip.c | |||
@@ -275,7 +275,7 @@ int irq_alloc_domain_generic_chips(struct irq_domain *d, int irqs_per_chip, | |||
275 | if (d->gc) | 275 | if (d->gc) |
276 | return -EBUSY; | 276 | return -EBUSY; |
277 | 277 | ||
278 | numchips = d->revmap_size / irqs_per_chip; | 278 | numchips = DIV_ROUND_UP(d->revmap_size, irqs_per_chip); |
279 | if (!numchips) | 279 | if (!numchips) |
280 | return -EINVAL; | 280 | return -EINVAL; |
281 | 281 | ||
diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c index 2d7cd3428365..706724e9835d 100644 --- a/kernel/irq/irqdomain.c +++ b/kernel/irq/irqdomain.c | |||
@@ -475,18 +475,6 @@ unsigned int irq_create_of_mapping(struct device_node *controller, | |||
475 | 475 | ||
476 | domain = controller ? irq_find_host(controller) : irq_default_domain; | 476 | domain = controller ? irq_find_host(controller) : irq_default_domain; |
477 | if (!domain) { | 477 | if (!domain) { |
478 | #ifdef CONFIG_MIPS | ||
479 | /* | ||
480 | * Workaround to avoid breaking interrupt controller drivers | ||
481 | * that don't yet register an irq_domain. This is temporary | ||
482 | * code. ~~~gcl, Feb 24, 2012 | ||
483 | * | ||
484 | * Scheduled for removal in Linux v3.6. That should be enough | ||
485 | * time. | ||
486 | */ | ||
487 | if (intsize > 0) | ||
488 | return intspec[0]; | ||
489 | #endif | ||
490 | pr_warn("no irq domain found for %s !\n", | 478 | pr_warn("no irq domain found for %s !\n", |
491 | of_node_full_name(controller)); | 479 | of_node_full_name(controller)); |
492 | return 0; | 480 | return 0; |
diff --git a/kernel/mutex.c b/kernel/mutex.c index e581ada5faf4..ff05f4bd86eb 100644 --- a/kernel/mutex.c +++ b/kernel/mutex.c | |||
@@ -18,6 +18,7 @@ | |||
18 | * Also see Documentation/mutex-design.txt. | 18 | * Also see Documentation/mutex-design.txt. |
19 | */ | 19 | */ |
20 | #include <linux/mutex.h> | 20 | #include <linux/mutex.h> |
21 | #include <linux/ww_mutex.h> | ||
21 | #include <linux/sched.h> | 22 | #include <linux/sched.h> |
22 | #include <linux/sched/rt.h> | 23 | #include <linux/sched/rt.h> |
23 | #include <linux/export.h> | 24 | #include <linux/export.h> |
diff --git a/kernel/power/autosleep.c b/kernel/power/autosleep.c index c6422ffeda9a..9012ecf7b814 100644 --- a/kernel/power/autosleep.c +++ b/kernel/power/autosleep.c | |||
@@ -32,7 +32,8 @@ static void try_to_suspend(struct work_struct *work) | |||
32 | 32 | ||
33 | mutex_lock(&autosleep_lock); | 33 | mutex_lock(&autosleep_lock); |
34 | 34 | ||
35 | if (!pm_save_wakeup_count(initial_count)) { | 35 | if (!pm_save_wakeup_count(initial_count) || |
36 | system_state != SYSTEM_RUNNING) { | ||
36 | mutex_unlock(&autosleep_lock); | 37 | mutex_unlock(&autosleep_lock); |
37 | goto out; | 38 | goto out; |
38 | } | 39 | } |
diff --git a/kernel/printk.c b/kernel/printk.c index d37d45c90ae6..69b0890ed7e5 100644 --- a/kernel/printk.c +++ b/kernel/printk.c | |||
@@ -1921,7 +1921,7 @@ void resume_console(void) | |||
1921 | * called when a new CPU comes online (or fails to come up), and ensures | 1921 | * called when a new CPU comes online (or fails to come up), and ensures |
1922 | * that any such output gets printed. | 1922 | * that any such output gets printed. |
1923 | */ | 1923 | */ |
1924 | static int __cpuinit console_cpu_notify(struct notifier_block *self, | 1924 | static int console_cpu_notify(struct notifier_block *self, |
1925 | unsigned long action, void *hcpu) | 1925 | unsigned long action, void *hcpu) |
1926 | { | 1926 | { |
1927 | switch (action) { | 1927 | switch (action) { |
diff --git a/kernel/profile.c b/kernel/profile.c index 0bf400737660..6631e1ef55ab 100644 --- a/kernel/profile.c +++ b/kernel/profile.c | |||
@@ -331,7 +331,7 @@ out: | |||
331 | put_cpu(); | 331 | put_cpu(); |
332 | } | 332 | } |
333 | 333 | ||
334 | static int __cpuinit profile_cpu_callback(struct notifier_block *info, | 334 | static int profile_cpu_callback(struct notifier_block *info, |
335 | unsigned long action, void *__cpu) | 335 | unsigned long action, void *__cpu) |
336 | { | 336 | { |
337 | int node, cpu = (unsigned long)__cpu; | 337 | int node, cpu = (unsigned long)__cpu; |
diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c index b1fa5510388d..f4871e52c546 100644 --- a/kernel/rcutorture.c +++ b/kernel/rcutorture.c | |||
@@ -1476,7 +1476,7 @@ rcu_torture_shutdown(void *arg) | |||
1476 | * Execute random CPU-hotplug operations at the interval specified | 1476 | * Execute random CPU-hotplug operations at the interval specified |
1477 | * by the onoff_interval. | 1477 | * by the onoff_interval. |
1478 | */ | 1478 | */ |
1479 | static int __cpuinit | 1479 | static int |
1480 | rcu_torture_onoff(void *arg) | 1480 | rcu_torture_onoff(void *arg) |
1481 | { | 1481 | { |
1482 | int cpu; | 1482 | int cpu; |
@@ -1558,7 +1558,7 @@ rcu_torture_onoff(void *arg) | |||
1558 | return 0; | 1558 | return 0; |
1559 | } | 1559 | } |
1560 | 1560 | ||
1561 | static int __cpuinit | 1561 | static int |
1562 | rcu_torture_onoff_init(void) | 1562 | rcu_torture_onoff_init(void) |
1563 | { | 1563 | { |
1564 | int ret; | 1564 | int ret; |
@@ -1601,7 +1601,7 @@ static void rcu_torture_onoff_cleanup(void) | |||
1601 | * CPU-stall kthread. It waits as specified by stall_cpu_holdoff, then | 1601 | * CPU-stall kthread. It waits as specified by stall_cpu_holdoff, then |
1602 | * induces a CPU stall for the time specified by stall_cpu. | 1602 | * induces a CPU stall for the time specified by stall_cpu. |
1603 | */ | 1603 | */ |
1604 | static int __cpuinit rcu_torture_stall(void *args) | 1604 | static int rcu_torture_stall(void *args) |
1605 | { | 1605 | { |
1606 | unsigned long stop_at; | 1606 | unsigned long stop_at; |
1607 | 1607 | ||
diff --git a/kernel/rcutree.c b/kernel/rcutree.c index e08abb9461ac..068de3a93606 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c | |||
@@ -2910,7 +2910,7 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp) | |||
2910 | * can accept some slop in the rsp->completed access due to the fact | 2910 | * can accept some slop in the rsp->completed access due to the fact |
2911 | * that this CPU cannot possibly have any RCU callbacks in flight yet. | 2911 | * that this CPU cannot possibly have any RCU callbacks in flight yet. |
2912 | */ | 2912 | */ |
2913 | static void __cpuinit | 2913 | static void |
2914 | rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptible) | 2914 | rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptible) |
2915 | { | 2915 | { |
2916 | unsigned long flags; | 2916 | unsigned long flags; |
@@ -2962,7 +2962,7 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptible) | |||
2962 | mutex_unlock(&rsp->onoff_mutex); | 2962 | mutex_unlock(&rsp->onoff_mutex); |
2963 | } | 2963 | } |
2964 | 2964 | ||
2965 | static void __cpuinit rcu_prepare_cpu(int cpu) | 2965 | static void rcu_prepare_cpu(int cpu) |
2966 | { | 2966 | { |
2967 | struct rcu_state *rsp; | 2967 | struct rcu_state *rsp; |
2968 | 2968 | ||
@@ -2974,7 +2974,7 @@ static void __cpuinit rcu_prepare_cpu(int cpu) | |||
2974 | /* | 2974 | /* |
2975 | * Handle CPU online/offline notification events. | 2975 | * Handle CPU online/offline notification events. |
2976 | */ | 2976 | */ |
2977 | static int __cpuinit rcu_cpu_notify(struct notifier_block *self, | 2977 | static int rcu_cpu_notify(struct notifier_block *self, |
2978 | unsigned long action, void *hcpu) | 2978 | unsigned long action, void *hcpu) |
2979 | { | 2979 | { |
2980 | long cpu = (long)hcpu; | 2980 | long cpu = (long)hcpu; |
diff --git a/kernel/rcutree.h b/kernel/rcutree.h index 4a39d364493c..b3832581043c 100644 --- a/kernel/rcutree.h +++ b/kernel/rcutree.h | |||
@@ -521,10 +521,10 @@ static void invoke_rcu_callbacks_kthread(void); | |||
521 | static bool rcu_is_callbacks_kthread(void); | 521 | static bool rcu_is_callbacks_kthread(void); |
522 | #ifdef CONFIG_RCU_BOOST | 522 | #ifdef CONFIG_RCU_BOOST |
523 | static void rcu_preempt_do_callbacks(void); | 523 | static void rcu_preempt_do_callbacks(void); |
524 | static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp, | 524 | static int rcu_spawn_one_boost_kthread(struct rcu_state *rsp, |
525 | struct rcu_node *rnp); | 525 | struct rcu_node *rnp); |
526 | #endif /* #ifdef CONFIG_RCU_BOOST */ | 526 | #endif /* #ifdef CONFIG_RCU_BOOST */ |
527 | static void __cpuinit rcu_prepare_kthreads(int cpu); | 527 | static void rcu_prepare_kthreads(int cpu); |
528 | static void rcu_cleanup_after_idle(int cpu); | 528 | static void rcu_cleanup_after_idle(int cpu); |
529 | static void rcu_prepare_for_idle(int cpu); | 529 | static void rcu_prepare_for_idle(int cpu); |
530 | static void rcu_idle_count_callbacks_posted(void); | 530 | static void rcu_idle_count_callbacks_posted(void); |
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h index 63098a59216e..769e12e3151b 100644 --- a/kernel/rcutree_plugin.h +++ b/kernel/rcutree_plugin.h | |||
@@ -1352,7 +1352,7 @@ static void rcu_preempt_boost_start_gp(struct rcu_node *rnp) | |||
1352 | * already exist. We only create this kthread for preemptible RCU. | 1352 | * already exist. We only create this kthread for preemptible RCU. |
1353 | * Returns zero if all is well, a negated errno otherwise. | 1353 | * Returns zero if all is well, a negated errno otherwise. |
1354 | */ | 1354 | */ |
1355 | static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp, | 1355 | static int rcu_spawn_one_boost_kthread(struct rcu_state *rsp, |
1356 | struct rcu_node *rnp) | 1356 | struct rcu_node *rnp) |
1357 | { | 1357 | { |
1358 | int rnp_index = rnp - &rsp->node[0]; | 1358 | int rnp_index = rnp - &rsp->node[0]; |
@@ -1507,7 +1507,7 @@ static int __init rcu_spawn_kthreads(void) | |||
1507 | } | 1507 | } |
1508 | early_initcall(rcu_spawn_kthreads); | 1508 | early_initcall(rcu_spawn_kthreads); |
1509 | 1509 | ||
1510 | static void __cpuinit rcu_prepare_kthreads(int cpu) | 1510 | static void rcu_prepare_kthreads(int cpu) |
1511 | { | 1511 | { |
1512 | struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, cpu); | 1512 | struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, cpu); |
1513 | struct rcu_node *rnp = rdp->mynode; | 1513 | struct rcu_node *rnp = rdp->mynode; |
@@ -1549,7 +1549,7 @@ static int __init rcu_scheduler_really_started(void) | |||
1549 | } | 1549 | } |
1550 | early_initcall(rcu_scheduler_really_started); | 1550 | early_initcall(rcu_scheduler_really_started); |
1551 | 1551 | ||
1552 | static void __cpuinit rcu_prepare_kthreads(int cpu) | 1552 | static void rcu_prepare_kthreads(int cpu) |
1553 | { | 1553 | { |
1554 | } | 1554 | } |
1555 | 1555 | ||
diff --git a/kernel/relay.c b/kernel/relay.c index b91488ba2e5a..5001c9887db1 100644 --- a/kernel/relay.c +++ b/kernel/relay.c | |||
@@ -516,7 +516,7 @@ static void setup_callbacks(struct rchan *chan, | |||
516 | * | 516 | * |
517 | * Returns the success/failure of the operation. (%NOTIFY_OK, %NOTIFY_BAD) | 517 | * Returns the success/failure of the operation. (%NOTIFY_OK, %NOTIFY_BAD) |
518 | */ | 518 | */ |
519 | static int __cpuinit relay_hotcpu_callback(struct notifier_block *nb, | 519 | static int relay_hotcpu_callback(struct notifier_block *nb, |
520 | unsigned long action, | 520 | unsigned long action, |
521 | void *hcpu) | 521 | void *hcpu) |
522 | { | 522 | { |
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index f73787159188..b7415cfdd7de 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c | |||
@@ -4133,7 +4133,7 @@ void show_state_filter(unsigned long state_filter) | |||
4133 | debug_show_all_locks(); | 4133 | debug_show_all_locks(); |
4134 | } | 4134 | } |
4135 | 4135 | ||
4136 | void __cpuinit init_idle_bootup_task(struct task_struct *idle) | 4136 | void init_idle_bootup_task(struct task_struct *idle) |
4137 | { | 4137 | { |
4138 | idle->sched_class = &idle_sched_class; | 4138 | idle->sched_class = &idle_sched_class; |
4139 | } | 4139 | } |
@@ -4146,7 +4146,7 @@ void __cpuinit init_idle_bootup_task(struct task_struct *idle) | |||
4146 | * NOTE: this function does not set the idle thread's NEED_RESCHED | 4146 | * NOTE: this function does not set the idle thread's NEED_RESCHED |
4147 | * flag, to make booting more robust. | 4147 | * flag, to make booting more robust. |
4148 | */ | 4148 | */ |
4149 | void __cpuinit init_idle(struct task_struct *idle, int cpu) | 4149 | void init_idle(struct task_struct *idle, int cpu) |
4150 | { | 4150 | { |
4151 | struct rq *rq = cpu_rq(cpu); | 4151 | struct rq *rq = cpu_rq(cpu); |
4152 | unsigned long flags; | 4152 | unsigned long flags; |
@@ -4630,7 +4630,7 @@ static void set_rq_offline(struct rq *rq) | |||
4630 | * migration_call - callback that gets triggered when a CPU is added. | 4630 | * migration_call - callback that gets triggered when a CPU is added. |
4631 | * Here we can start up the necessary migration thread for the new CPU. | 4631 | * Here we can start up the necessary migration thread for the new CPU. |
4632 | */ | 4632 | */ |
4633 | static int __cpuinit | 4633 | static int |
4634 | migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) | 4634 | migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) |
4635 | { | 4635 | { |
4636 | int cpu = (long)hcpu; | 4636 | int cpu = (long)hcpu; |
@@ -4684,12 +4684,12 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) | |||
4684 | * happens before everything else. This has to be lower priority than | 4684 | * happens before everything else. This has to be lower priority than |
4685 | * the notifier in the perf_event subsystem, though. | 4685 | * the notifier in the perf_event subsystem, though. |
4686 | */ | 4686 | */ |
4687 | static struct notifier_block __cpuinitdata migration_notifier = { | 4687 | static struct notifier_block migration_notifier = { |
4688 | .notifier_call = migration_call, | 4688 | .notifier_call = migration_call, |
4689 | .priority = CPU_PRI_MIGRATION, | 4689 | .priority = CPU_PRI_MIGRATION, |
4690 | }; | 4690 | }; |
4691 | 4691 | ||
4692 | static int __cpuinit sched_cpu_active(struct notifier_block *nfb, | 4692 | static int sched_cpu_active(struct notifier_block *nfb, |
4693 | unsigned long action, void *hcpu) | 4693 | unsigned long action, void *hcpu) |
4694 | { | 4694 | { |
4695 | switch (action & ~CPU_TASKS_FROZEN) { | 4695 | switch (action & ~CPU_TASKS_FROZEN) { |
@@ -4702,7 +4702,7 @@ static int __cpuinit sched_cpu_active(struct notifier_block *nfb, | |||
4702 | } | 4702 | } |
4703 | } | 4703 | } |
4704 | 4704 | ||
4705 | static int __cpuinit sched_cpu_inactive(struct notifier_block *nfb, | 4705 | static int sched_cpu_inactive(struct notifier_block *nfb, |
4706 | unsigned long action, void *hcpu) | 4706 | unsigned long action, void *hcpu) |
4707 | { | 4707 | { |
4708 | switch (action & ~CPU_TASKS_FROZEN) { | 4708 | switch (action & ~CPU_TASKS_FROZEN) { |
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index f77f9c527449..bb456f44b7b1 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c | |||
@@ -5506,7 +5506,7 @@ void nohz_balance_enter_idle(int cpu) | |||
5506 | set_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)); | 5506 | set_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)); |
5507 | } | 5507 | } |
5508 | 5508 | ||
5509 | static int __cpuinit sched_ilb_notifier(struct notifier_block *nfb, | 5509 | static int sched_ilb_notifier(struct notifier_block *nfb, |
5510 | unsigned long action, void *hcpu) | 5510 | unsigned long action, void *hcpu) |
5511 | { | 5511 | { |
5512 | switch (action & ~CPU_TASKS_FROZEN) { | 5512 | switch (action & ~CPU_TASKS_FROZEN) { |
diff --git a/kernel/smp.c b/kernel/smp.c index 4dba0f7b72ad..fe9f773d7114 100644 --- a/kernel/smp.c +++ b/kernel/smp.c | |||
@@ -73,7 +73,7 @@ hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu) | |||
73 | return NOTIFY_OK; | 73 | return NOTIFY_OK; |
74 | } | 74 | } |
75 | 75 | ||
76 | static struct notifier_block __cpuinitdata hotplug_cfd_notifier = { | 76 | static struct notifier_block hotplug_cfd_notifier = { |
77 | .notifier_call = hotplug_cfd, | 77 | .notifier_call = hotplug_cfd, |
78 | }; | 78 | }; |
79 | 79 | ||
diff --git a/kernel/smpboot.c b/kernel/smpboot.c index 02fc5c933673..eb89e1807408 100644 --- a/kernel/smpboot.c +++ b/kernel/smpboot.c | |||
@@ -24,7 +24,7 @@ | |||
24 | */ | 24 | */ |
25 | static DEFINE_PER_CPU(struct task_struct *, idle_threads); | 25 | static DEFINE_PER_CPU(struct task_struct *, idle_threads); |
26 | 26 | ||
27 | struct task_struct * __cpuinit idle_thread_get(unsigned int cpu) | 27 | struct task_struct *idle_thread_get(unsigned int cpu) |
28 | { | 28 | { |
29 | struct task_struct *tsk = per_cpu(idle_threads, cpu); | 29 | struct task_struct *tsk = per_cpu(idle_threads, cpu); |
30 | 30 | ||
diff --git a/kernel/softirq.c b/kernel/softirq.c index ca25e6e704a2..be3d3514c325 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c | |||
@@ -699,7 +699,7 @@ void send_remote_softirq(struct call_single_data *cp, int cpu, int softirq) | |||
699 | } | 699 | } |
700 | EXPORT_SYMBOL(send_remote_softirq); | 700 | EXPORT_SYMBOL(send_remote_softirq); |
701 | 701 | ||
702 | static int __cpuinit remote_softirq_cpu_notify(struct notifier_block *self, | 702 | static int remote_softirq_cpu_notify(struct notifier_block *self, |
703 | unsigned long action, void *hcpu) | 703 | unsigned long action, void *hcpu) |
704 | { | 704 | { |
705 | /* | 705 | /* |
@@ -728,7 +728,7 @@ static int __cpuinit remote_softirq_cpu_notify(struct notifier_block *self, | |||
728 | return NOTIFY_OK; | 728 | return NOTIFY_OK; |
729 | } | 729 | } |
730 | 730 | ||
731 | static struct notifier_block __cpuinitdata remote_softirq_cpu_notifier = { | 731 | static struct notifier_block remote_softirq_cpu_notifier = { |
732 | .notifier_call = remote_softirq_cpu_notify, | 732 | .notifier_call = remote_softirq_cpu_notify, |
733 | }; | 733 | }; |
734 | 734 | ||
@@ -830,7 +830,7 @@ static void takeover_tasklets(unsigned int cpu) | |||
830 | } | 830 | } |
831 | #endif /* CONFIG_HOTPLUG_CPU */ | 831 | #endif /* CONFIG_HOTPLUG_CPU */ |
832 | 832 | ||
833 | static int __cpuinit cpu_callback(struct notifier_block *nfb, | 833 | static int cpu_callback(struct notifier_block *nfb, |
834 | unsigned long action, | 834 | unsigned long action, |
835 | void *hcpu) | 835 | void *hcpu) |
836 | { | 836 | { |
@@ -845,7 +845,7 @@ static int __cpuinit cpu_callback(struct notifier_block *nfb, | |||
845 | return NOTIFY_OK; | 845 | return NOTIFY_OK; |
846 | } | 846 | } |
847 | 847 | ||
848 | static struct notifier_block __cpuinitdata cpu_nfb = { | 848 | static struct notifier_block cpu_nfb = { |
849 | .notifier_call = cpu_callback | 849 | .notifier_call = cpu_callback |
850 | }; | 850 | }; |
851 | 851 | ||
diff --git a/kernel/sysctl.c b/kernel/sysctl.c index e5b31aff67aa..ac09d98490aa 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c | |||
@@ -807,7 +807,7 @@ static struct ctl_table kern_table[] = { | |||
807 | #if defined(CONFIG_LOCKUP_DETECTOR) | 807 | #if defined(CONFIG_LOCKUP_DETECTOR) |
808 | { | 808 | { |
809 | .procname = "watchdog", | 809 | .procname = "watchdog", |
810 | .data = &watchdog_enabled, | 810 | .data = &watchdog_user_enabled, |
811 | .maxlen = sizeof (int), | 811 | .maxlen = sizeof (int), |
812 | .mode = 0644, | 812 | .mode = 0644, |
813 | .proc_handler = proc_dowatchdog, | 813 | .proc_handler = proc_dowatchdog, |
@@ -834,7 +834,7 @@ static struct ctl_table kern_table[] = { | |||
834 | }, | 834 | }, |
835 | { | 835 | { |
836 | .procname = "nmi_watchdog", | 836 | .procname = "nmi_watchdog", |
837 | .data = &watchdog_enabled, | 837 | .data = &watchdog_user_enabled, |
838 | .maxlen = sizeof (int), | 838 | .maxlen = sizeof (int), |
839 | .mode = 0644, | 839 | .mode = 0644, |
840 | .proc_handler = proc_dowatchdog, | 840 | .proc_handler = proc_dowatchdog, |
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c index 6d3f91631de6..218bcb565fed 100644 --- a/kernel/time/tick-broadcast.c +++ b/kernel/time/tick-broadcast.c | |||
@@ -157,7 +157,10 @@ int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu) | |||
157 | dev->event_handler = tick_handle_periodic; | 157 | dev->event_handler = tick_handle_periodic; |
158 | tick_device_setup_broadcast_func(dev); | 158 | tick_device_setup_broadcast_func(dev); |
159 | cpumask_set_cpu(cpu, tick_broadcast_mask); | 159 | cpumask_set_cpu(cpu, tick_broadcast_mask); |
160 | tick_broadcast_start_periodic(bc); | 160 | if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) |
161 | tick_broadcast_start_periodic(bc); | ||
162 | else | ||
163 | tick_broadcast_setup_oneshot(bc); | ||
161 | ret = 1; | 164 | ret = 1; |
162 | } else { | 165 | } else { |
163 | /* | 166 | /* |
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index 0cf1c1453181..e80183f4a6c4 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c | |||
@@ -178,6 +178,11 @@ static bool can_stop_full_tick(void) | |||
178 | */ | 178 | */ |
179 | if (!sched_clock_stable) { | 179 | if (!sched_clock_stable) { |
180 | trace_tick_stop(0, "unstable sched clock\n"); | 180 | trace_tick_stop(0, "unstable sched clock\n"); |
181 | /* | ||
182 | * Don't allow the user to think they can get | ||
183 | * full NO_HZ with this machine. | ||
184 | */ | ||
185 | WARN_ONCE(1, "NO_HZ FULL will not work with unstable sched clock"); | ||
181 | return false; | 186 | return false; |
182 | } | 187 | } |
183 | #endif | 188 | #endif |
@@ -293,7 +298,7 @@ static int __init tick_nohz_full_setup(char *str) | |||
293 | } | 298 | } |
294 | __setup("nohz_full=", tick_nohz_full_setup); | 299 | __setup("nohz_full=", tick_nohz_full_setup); |
295 | 300 | ||
296 | static int __cpuinit tick_nohz_cpu_down_callback(struct notifier_block *nfb, | 301 | static int tick_nohz_cpu_down_callback(struct notifier_block *nfb, |
297 | unsigned long action, | 302 | unsigned long action, |
298 | void *hcpu) | 303 | void *hcpu) |
299 | { | 304 | { |
@@ -346,16 +351,6 @@ void __init tick_nohz_init(void) | |||
346 | } | 351 | } |
347 | 352 | ||
348 | cpu_notifier(tick_nohz_cpu_down_callback, 0); | 353 | cpu_notifier(tick_nohz_cpu_down_callback, 0); |
349 | |||
350 | /* Make sure full dynticks CPU are also RCU nocbs */ | ||
351 | for_each_cpu(cpu, nohz_full_mask) { | ||
352 | if (!rcu_is_nocb_cpu(cpu)) { | ||
353 | pr_warning("NO_HZ: CPU %d is not RCU nocb: " | ||
354 | "cleared from nohz_full range", cpu); | ||
355 | cpumask_clear_cpu(cpu, nohz_full_mask); | ||
356 | } | ||
357 | } | ||
358 | |||
359 | cpulist_scnprintf(nohz_full_buf, sizeof(nohz_full_buf), nohz_full_mask); | 354 | cpulist_scnprintf(nohz_full_buf, sizeof(nohz_full_buf), nohz_full_mask); |
360 | pr_info("NO_HZ: Full dynticks CPUs: %s.\n", nohz_full_buf); | 355 | pr_info("NO_HZ: Full dynticks CPUs: %s.\n", nohz_full_buf); |
361 | } | 356 | } |
diff --git a/kernel/timer.c b/kernel/timer.c index 15bc1b41021d..4296d13db3d1 100644 --- a/kernel/timer.c +++ b/kernel/timer.c | |||
@@ -1505,11 +1505,11 @@ signed long __sched schedule_timeout_uninterruptible(signed long timeout) | |||
1505 | } | 1505 | } |
1506 | EXPORT_SYMBOL(schedule_timeout_uninterruptible); | 1506 | EXPORT_SYMBOL(schedule_timeout_uninterruptible); |
1507 | 1507 | ||
1508 | static int __cpuinit init_timers_cpu(int cpu) | 1508 | static int init_timers_cpu(int cpu) |
1509 | { | 1509 | { |
1510 | int j; | 1510 | int j; |
1511 | struct tvec_base *base; | 1511 | struct tvec_base *base; |
1512 | static char __cpuinitdata tvec_base_done[NR_CPUS]; | 1512 | static char tvec_base_done[NR_CPUS]; |
1513 | 1513 | ||
1514 | if (!tvec_base_done[cpu]) { | 1514 | if (!tvec_base_done[cpu]) { |
1515 | static char boot_done; | 1515 | static char boot_done; |
@@ -1577,7 +1577,7 @@ static void migrate_timer_list(struct tvec_base *new_base, struct list_head *hea | |||
1577 | } | 1577 | } |
1578 | } | 1578 | } |
1579 | 1579 | ||
1580 | static void __cpuinit migrate_timers(int cpu) | 1580 | static void migrate_timers(int cpu) |
1581 | { | 1581 | { |
1582 | struct tvec_base *old_base; | 1582 | struct tvec_base *old_base; |
1583 | struct tvec_base *new_base; | 1583 | struct tvec_base *new_base; |
@@ -1610,7 +1610,7 @@ static void __cpuinit migrate_timers(int cpu) | |||
1610 | } | 1610 | } |
1611 | #endif /* CONFIG_HOTPLUG_CPU */ | 1611 | #endif /* CONFIG_HOTPLUG_CPU */ |
1612 | 1612 | ||
1613 | static int __cpuinit timer_cpu_notify(struct notifier_block *self, | 1613 | static int timer_cpu_notify(struct notifier_block *self, |
1614 | unsigned long action, void *hcpu) | 1614 | unsigned long action, void *hcpu) |
1615 | { | 1615 | { |
1616 | long cpu = (long)hcpu; | 1616 | long cpu = (long)hcpu; |
@@ -1635,7 +1635,7 @@ static int __cpuinit timer_cpu_notify(struct notifier_block *self, | |||
1635 | return NOTIFY_OK; | 1635 | return NOTIFY_OK; |
1636 | } | 1636 | } |
1637 | 1637 | ||
1638 | static struct notifier_block __cpuinitdata timers_nb = { | 1638 | static struct notifier_block timers_nb = { |
1639 | .notifier_call = timer_cpu_notify, | 1639 | .notifier_call = timer_cpu_notify, |
1640 | }; | 1640 | }; |
1641 | 1641 | ||
diff --git a/kernel/watchdog.c b/kernel/watchdog.c index 05039e348f07..1241d8c91d5e 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c | |||
@@ -29,9 +29,9 @@ | |||
29 | #include <linux/kvm_para.h> | 29 | #include <linux/kvm_para.h> |
30 | #include <linux/perf_event.h> | 30 | #include <linux/perf_event.h> |
31 | 31 | ||
32 | int watchdog_enabled = 1; | 32 | int watchdog_user_enabled = 1; |
33 | int __read_mostly watchdog_thresh = 10; | 33 | int __read_mostly watchdog_thresh = 10; |
34 | static int __read_mostly watchdog_disabled; | 34 | static int __read_mostly watchdog_running; |
35 | static u64 __read_mostly sample_period; | 35 | static u64 __read_mostly sample_period; |
36 | 36 | ||
37 | static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts); | 37 | static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts); |
@@ -63,7 +63,7 @@ static int __init hardlockup_panic_setup(char *str) | |||
63 | else if (!strncmp(str, "nopanic", 7)) | 63 | else if (!strncmp(str, "nopanic", 7)) |
64 | hardlockup_panic = 0; | 64 | hardlockup_panic = 0; |
65 | else if (!strncmp(str, "0", 1)) | 65 | else if (!strncmp(str, "0", 1)) |
66 | watchdog_enabled = 0; | 66 | watchdog_user_enabled = 0; |
67 | return 1; | 67 | return 1; |
68 | } | 68 | } |
69 | __setup("nmi_watchdog=", hardlockup_panic_setup); | 69 | __setup("nmi_watchdog=", hardlockup_panic_setup); |
@@ -82,7 +82,7 @@ __setup("softlockup_panic=", softlockup_panic_setup); | |||
82 | 82 | ||
83 | static int __init nowatchdog_setup(char *str) | 83 | static int __init nowatchdog_setup(char *str) |
84 | { | 84 | { |
85 | watchdog_enabled = 0; | 85 | watchdog_user_enabled = 0; |
86 | return 1; | 86 | return 1; |
87 | } | 87 | } |
88 | __setup("nowatchdog", nowatchdog_setup); | 88 | __setup("nowatchdog", nowatchdog_setup); |
@@ -90,7 +90,7 @@ __setup("nowatchdog", nowatchdog_setup); | |||
90 | /* deprecated */ | 90 | /* deprecated */ |
91 | static int __init nosoftlockup_setup(char *str) | 91 | static int __init nosoftlockup_setup(char *str) |
92 | { | 92 | { |
93 | watchdog_enabled = 0; | 93 | watchdog_user_enabled = 0; |
94 | return 1; | 94 | return 1; |
95 | } | 95 | } |
96 | __setup("nosoftlockup", nosoftlockup_setup); | 96 | __setup("nosoftlockup", nosoftlockup_setup); |
@@ -158,7 +158,7 @@ void touch_all_softlockup_watchdogs(void) | |||
158 | #ifdef CONFIG_HARDLOCKUP_DETECTOR | 158 | #ifdef CONFIG_HARDLOCKUP_DETECTOR |
159 | void touch_nmi_watchdog(void) | 159 | void touch_nmi_watchdog(void) |
160 | { | 160 | { |
161 | if (watchdog_enabled) { | 161 | if (watchdog_user_enabled) { |
162 | unsigned cpu; | 162 | unsigned cpu; |
163 | 163 | ||
164 | for_each_present_cpu(cpu) { | 164 | for_each_present_cpu(cpu) { |
@@ -347,11 +347,6 @@ static void watchdog_enable(unsigned int cpu) | |||
347 | hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); | 347 | hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); |
348 | hrtimer->function = watchdog_timer_fn; | 348 | hrtimer->function = watchdog_timer_fn; |
349 | 349 | ||
350 | if (!watchdog_enabled) { | ||
351 | kthread_park(current); | ||
352 | return; | ||
353 | } | ||
354 | |||
355 | /* Enable the perf event */ | 350 | /* Enable the perf event */ |
356 | watchdog_nmi_enable(cpu); | 351 | watchdog_nmi_enable(cpu); |
357 | 352 | ||
@@ -374,6 +369,11 @@ static void watchdog_disable(unsigned int cpu) | |||
374 | watchdog_nmi_disable(cpu); | 369 | watchdog_nmi_disable(cpu); |
375 | } | 370 | } |
376 | 371 | ||
372 | static void watchdog_cleanup(unsigned int cpu, bool online) | ||
373 | { | ||
374 | watchdog_disable(cpu); | ||
375 | } | ||
376 | |||
377 | static int watchdog_should_run(unsigned int cpu) | 377 | static int watchdog_should_run(unsigned int cpu) |
378 | { | 378 | { |
379 | return __this_cpu_read(hrtimer_interrupts) != | 379 | return __this_cpu_read(hrtimer_interrupts) != |
@@ -475,28 +475,40 @@ static int watchdog_nmi_enable(unsigned int cpu) { return 0; } | |||
475 | static void watchdog_nmi_disable(unsigned int cpu) { return; } | 475 | static void watchdog_nmi_disable(unsigned int cpu) { return; } |
476 | #endif /* CONFIG_HARDLOCKUP_DETECTOR */ | 476 | #endif /* CONFIG_HARDLOCKUP_DETECTOR */ |
477 | 477 | ||
478 | /* prepare/enable/disable routines */ | 478 | static struct smp_hotplug_thread watchdog_threads = { |
479 | /* sysctl functions */ | 479 | .store = &softlockup_watchdog, |
480 | #ifdef CONFIG_SYSCTL | 480 | .thread_should_run = watchdog_should_run, |
481 | static void watchdog_enable_all_cpus(void) | 481 | .thread_fn = watchdog, |
482 | .thread_comm = "watchdog/%u", | ||
483 | .setup = watchdog_enable, | ||
484 | .cleanup = watchdog_cleanup, | ||
485 | .park = watchdog_disable, | ||
486 | .unpark = watchdog_enable, | ||
487 | }; | ||
488 | |||
489 | static int watchdog_enable_all_cpus(void) | ||
482 | { | 490 | { |
483 | unsigned int cpu; | 491 | int err = 0; |
484 | 492 | ||
485 | if (watchdog_disabled) { | 493 | if (!watchdog_running) { |
486 | watchdog_disabled = 0; | 494 | err = smpboot_register_percpu_thread(&watchdog_threads); |
487 | for_each_online_cpu(cpu) | 495 | if (err) |
488 | kthread_unpark(per_cpu(softlockup_watchdog, cpu)); | 496 | pr_err("Failed to create watchdog threads, disabled\n"); |
497 | else | ||
498 | watchdog_running = 1; | ||
489 | } | 499 | } |
500 | |||
501 | return err; | ||
490 | } | 502 | } |
491 | 503 | ||
504 | /* prepare/enable/disable routines */ | ||
505 | /* sysctl functions */ | ||
506 | #ifdef CONFIG_SYSCTL | ||
492 | static void watchdog_disable_all_cpus(void) | 507 | static void watchdog_disable_all_cpus(void) |
493 | { | 508 | { |
494 | unsigned int cpu; | 509 | if (watchdog_running) { |
495 | 510 | watchdog_running = 0; | |
496 | if (!watchdog_disabled) { | 511 | smpboot_unregister_percpu_thread(&watchdog_threads); |
497 | watchdog_disabled = 1; | ||
498 | for_each_online_cpu(cpu) | ||
499 | kthread_park(per_cpu(softlockup_watchdog, cpu)); | ||
500 | } | 512 | } |
501 | } | 513 | } |
502 | 514 | ||
@@ -507,45 +519,48 @@ static void watchdog_disable_all_cpus(void) | |||
507 | int proc_dowatchdog(struct ctl_table *table, int write, | 519 | int proc_dowatchdog(struct ctl_table *table, int write, |
508 | void __user *buffer, size_t *lenp, loff_t *ppos) | 520 | void __user *buffer, size_t *lenp, loff_t *ppos) |
509 | { | 521 | { |
510 | int ret; | 522 | int err, old_thresh, old_enabled; |
511 | 523 | ||
512 | if (watchdog_disabled < 0) | 524 | old_thresh = ACCESS_ONCE(watchdog_thresh); |
513 | return -ENODEV; | 525 | old_enabled = ACCESS_ONCE(watchdog_user_enabled); |
514 | 526 | ||
515 | ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); | 527 | err = proc_dointvec_minmax(table, write, buffer, lenp, ppos); |
516 | if (ret || !write) | 528 | if (err || !write) |
517 | return ret; | 529 | return err; |
518 | 530 | ||
519 | set_sample_period(); | 531 | set_sample_period(); |
520 | /* | 532 | /* |
521 | * Watchdog threads shouldn't be enabled if they are | 533 | * Watchdog threads shouldn't be enabled if they are |
522 | * disabled. The 'watchdog_disabled' variable check in | 534 | * disabled. The 'watchdog_running' variable check in |
523 | * watchdog_*_all_cpus() function takes care of this. | 535 | * watchdog_*_all_cpus() function takes care of this. |
524 | */ | 536 | */ |
525 | if (watchdog_enabled && watchdog_thresh) | 537 | if (watchdog_user_enabled && watchdog_thresh) |
526 | watchdog_enable_all_cpus(); | 538 | err = watchdog_enable_all_cpus(); |
527 | else | 539 | else |
528 | watchdog_disable_all_cpus(); | 540 | watchdog_disable_all_cpus(); |
529 | 541 | ||
530 | return ret; | 542 | /* Restore old values on failure */ |
543 | if (err) { | ||
544 | watchdog_thresh = old_thresh; | ||
545 | watchdog_user_enabled = old_enabled; | ||
546 | } | ||
547 | |||
548 | return err; | ||
531 | } | 549 | } |
532 | #endif /* CONFIG_SYSCTL */ | 550 | #endif /* CONFIG_SYSCTL */ |
533 | 551 | ||
534 | static struct smp_hotplug_thread watchdog_threads = { | ||
535 | .store = &softlockup_watchdog, | ||
536 | .thread_should_run = watchdog_should_run, | ||
537 | .thread_fn = watchdog, | ||
538 | .thread_comm = "watchdog/%u", | ||
539 | .setup = watchdog_enable, | ||
540 | .park = watchdog_disable, | ||
541 | .unpark = watchdog_enable, | ||
542 | }; | ||
543 | |||
544 | void __init lockup_detector_init(void) | 552 | void __init lockup_detector_init(void) |
545 | { | 553 | { |
546 | set_sample_period(); | 554 | set_sample_period(); |
547 | if (smpboot_register_percpu_thread(&watchdog_threads)) { | 555 | |
548 | pr_err("Failed to create watchdog threads, disabled\n"); | 556 | #ifdef CONFIG_NO_HZ_FULL |
549 | watchdog_disabled = -ENODEV; | 557 | if (watchdog_user_enabled) { |
558 | watchdog_user_enabled = 0; | ||
559 | pr_warning("Disabled lockup detectors by default for full dynticks\n"); | ||
560 | pr_warning("You can reactivate it with 'sysctl -w kernel.watchdog=1'\n"); | ||
550 | } | 561 | } |
562 | #endif | ||
563 | |||
564 | if (watchdog_user_enabled) | ||
565 | watchdog_enable_all_cpus(); | ||
551 | } | 566 | } |
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index f02c4a4a0c3c..0b72e816b8d0 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -4644,7 +4644,7 @@ static void restore_unbound_workers_cpumask(struct worker_pool *pool, int cpu) | |||
4644 | * Workqueues should be brought up before normal priority CPU notifiers. | 4644 | * Workqueues should be brought up before normal priority CPU notifiers. |
4645 | * This will be registered high priority CPU notifier. | 4645 | * This will be registered high priority CPU notifier. |
4646 | */ | 4646 | */ |
4647 | static int __cpuinit workqueue_cpu_up_callback(struct notifier_block *nfb, | 4647 | static int workqueue_cpu_up_callback(struct notifier_block *nfb, |
4648 | unsigned long action, | 4648 | unsigned long action, |
4649 | void *hcpu) | 4649 | void *hcpu) |
4650 | { | 4650 | { |
@@ -4697,7 +4697,7 @@ static int __cpuinit workqueue_cpu_up_callback(struct notifier_block *nfb, | |||
4697 | * Workqueues should be brought down after normal priority CPU notifiers. | 4697 | * Workqueues should be brought down after normal priority CPU notifiers. |
4698 | * This will be registered as low priority CPU notifier. | 4698 | * This will be registered as low priority CPU notifier. |
4699 | */ | 4699 | */ |
4700 | static int __cpuinit workqueue_cpu_down_callback(struct notifier_block *nfb, | 4700 | static int workqueue_cpu_down_callback(struct notifier_block *nfb, |
4701 | unsigned long action, | 4701 | unsigned long action, |
4702 | void *hcpu) | 4702 | void *hcpu) |
4703 | { | 4703 | { |