diff options
author | Peter Zijlstra <peterz@infradead.org> | 2017-09-20 13:00:21 -0400 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2017-09-25 16:11:44 -0400 |
commit | 1db49484f21ed0fcdadd0635a3669f5f386546fa (patch) | |
tree | 533d9d2c340550a1b60bafcf77eff90fce1fe409 /kernel/cpu.c | |
parent | 5ebe7742fff8be5f1359bc50f5d43fb6ff7bd060 (diff) |
smp/hotplug: Hotplug state fail injection
Add a sysfs file to one-time fail a specific state. This can be used
to test the state rollback code paths.
Something like this (hotplug-up.sh):
#!/bin/bash
echo 0 > /debug/sched_debug
echo 1 > /debug/tracing/events/cpuhp/enable
ALL_STATES=`cat /sys/devices/system/cpu/hotplug/states | cut -d':' -f1`
STATES=${1:-$ALL_STATES}
for state in $STATES
do
echo 0 > /sys/devices/system/cpu/cpu1/online
echo 0 > /debug/tracing/trace
echo Fail state: $state
echo $state > /sys/devices/system/cpu/cpu1/hotplug/fail
cat /sys/devices/system/cpu/cpu1/hotplug/fail
echo 1 > /sys/devices/system/cpu/cpu1/online
cat /debug/tracing/trace > hotfail-${state}.trace
sleep 1
done
Can be used to test for all possible rollback (barring multi-instance)
scenarios on CPU-up, CPU-down is a trivial modification of the above.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: bigeasy@linutronix.de
Cc: efault@gmx.de
Cc: rostedt@goodmis.org
Cc: max.byungchul.park@gmail.com
Link: https://lkml.kernel.org/r/20170920170546.972581715@infradead.org
Diffstat (limited to 'kernel/cpu.c')
-rw-r--r-- | kernel/cpu.c | 60 |
1 files changed, 59 insertions, 1 deletions
diff --git a/kernel/cpu.c b/kernel/cpu.c index 6bbe261b851f..8de11a29e495 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c | |||
@@ -52,6 +52,7 @@ | |||
52 | struct cpuhp_cpu_state { | 52 | struct cpuhp_cpu_state { |
53 | enum cpuhp_state state; | 53 | enum cpuhp_state state; |
54 | enum cpuhp_state target; | 54 | enum cpuhp_state target; |
55 | enum cpuhp_state fail; | ||
55 | #ifdef CONFIG_SMP | 56 | #ifdef CONFIG_SMP |
56 | struct task_struct *thread; | 57 | struct task_struct *thread; |
57 | bool should_run; | 58 | bool should_run; |
@@ -67,7 +68,9 @@ struct cpuhp_cpu_state { | |||
67 | #endif | 68 | #endif |
68 | }; | 69 | }; |
69 | 70 | ||
70 | static DEFINE_PER_CPU(struct cpuhp_cpu_state, cpuhp_state); | 71 | static DEFINE_PER_CPU(struct cpuhp_cpu_state, cpuhp_state) = { |
72 | .fail = CPUHP_INVALID, | ||
73 | }; | ||
71 | 74 | ||
72 | #if defined(CONFIG_LOCKDEP) && defined(CONFIG_SMP) | 75 | #if defined(CONFIG_LOCKDEP) && defined(CONFIG_SMP) |
73 | static struct lockdep_map cpuhp_state_up_map = | 76 | static struct lockdep_map cpuhp_state_up_map = |
@@ -160,6 +163,15 @@ static int cpuhp_invoke_callback(unsigned int cpu, enum cpuhp_state state, | |||
160 | int (*cb)(unsigned int cpu); | 163 | int (*cb)(unsigned int cpu); |
161 | int ret, cnt; | 164 | int ret, cnt; |
162 | 165 | ||
166 | if (st->fail == state) { | ||
167 | st->fail = CPUHP_INVALID; | ||
168 | |||
169 | if (!(bringup ? step->startup.single : step->teardown.single)) | ||
170 | return 0; | ||
171 | |||
172 | return -EAGAIN; | ||
173 | } | ||
174 | |||
163 | if (!step->multi_instance) { | 175 | if (!step->multi_instance) { |
164 | WARN_ON_ONCE(lastp && *lastp); | 176 | WARN_ON_ONCE(lastp && *lastp); |
165 | cb = bringup ? step->startup.single : step->teardown.single; | 177 | cb = bringup ? step->startup.single : step->teardown.single; |
@@ -1805,9 +1817,55 @@ static ssize_t show_cpuhp_target(struct device *dev, | |||
1805 | } | 1817 | } |
1806 | static DEVICE_ATTR(target, 0644, show_cpuhp_target, write_cpuhp_target); | 1818 | static DEVICE_ATTR(target, 0644, show_cpuhp_target, write_cpuhp_target); |
1807 | 1819 | ||
1820 | |||
1821 | static ssize_t write_cpuhp_fail(struct device *dev, | ||
1822 | struct device_attribute *attr, | ||
1823 | const char *buf, size_t count) | ||
1824 | { | ||
1825 | struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id); | ||
1826 | struct cpuhp_step *sp; | ||
1827 | int fail, ret; | ||
1828 | |||
1829 | ret = kstrtoint(buf, 10, &fail); | ||
1830 | if (ret) | ||
1831 | return ret; | ||
1832 | |||
1833 | /* | ||
1834 | * Cannot fail STARTING/DYING callbacks. | ||
1835 | */ | ||
1836 | if (cpuhp_is_atomic_state(fail)) | ||
1837 | return -EINVAL; | ||
1838 | |||
1839 | /* | ||
1840 | * Cannot fail anything that doesn't have callbacks. | ||
1841 | */ | ||
1842 | mutex_lock(&cpuhp_state_mutex); | ||
1843 | sp = cpuhp_get_step(fail); | ||
1844 | if (!sp->startup.single && !sp->teardown.single) | ||
1845 | ret = -EINVAL; | ||
1846 | mutex_unlock(&cpuhp_state_mutex); | ||
1847 | if (ret) | ||
1848 | return ret; | ||
1849 | |||
1850 | st->fail = fail; | ||
1851 | |||
1852 | return count; | ||
1853 | } | ||
1854 | |||
1855 | static ssize_t show_cpuhp_fail(struct device *dev, | ||
1856 | struct device_attribute *attr, char *buf) | ||
1857 | { | ||
1858 | struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id); | ||
1859 | |||
1860 | return sprintf(buf, "%d\n", st->fail); | ||
1861 | } | ||
1862 | |||
1863 | static DEVICE_ATTR(fail, 0644, show_cpuhp_fail, write_cpuhp_fail); | ||
1864 | |||
1808 | static struct attribute *cpuhp_cpu_attrs[] = { | 1865 | static struct attribute *cpuhp_cpu_attrs[] = { |
1809 | &dev_attr_state.attr, | 1866 | &dev_attr_state.attr, |
1810 | &dev_attr_target.attr, | 1867 | &dev_attr_target.attr, |
1868 | &dev_attr_fail.attr, | ||
1811 | NULL | 1869 | NULL |
1812 | }; | 1870 | }; |
1813 | 1871 | ||