diff options
author | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2014-01-28 18:29:21 -0500 |
---|---|---|
committer | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2014-02-23 12:01:05 -0500 |
commit | 3808dc9fab05913060626d7f0edd0f195cb9dcab (patch) | |
tree | 5db578f420dc657197fd34b3a9a2c20bf79e0de3 /kernel/torture.c | |
parent | f67a33561e6e5463b548219df98130da95f2e4a7 (diff) |
rcutorture: Abstract torture_shuffle()
The torture_shuffle() function forces each CPU in turn to go idle
periodically in order to check for problems interacting with per-CPU
variables and with dyntick-idle mode. Because this sort of debugging
is not specific to RCU, this commit abstracts that functionality.
This in turn requires abstracting some additional infrastructure.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Reviewed-by: Josh Triplett <josh@joshtriplett.org>
Diffstat (limited to 'kernel/torture.c')
-rw-r--r-- | kernel/torture.c | 151 |
1 files changed, 151 insertions, 0 deletions
diff --git a/kernel/torture.c b/kernel/torture.c index f05042036ae8..26058f20ee83 100644 --- a/kernel/torture.c +++ b/kernel/torture.c | |||
@@ -76,6 +76,157 @@ torture_random(struct torture_random_state *trsp) | |||
76 | EXPORT_SYMBOL_GPL(torture_random); | 76 | EXPORT_SYMBOL_GPL(torture_random); |
77 | 77 | ||
78 | /* | 78 | /* |
79 | * Variables for shuffling. The idea is to ensure that each CPU stays | ||
80 | * idle for an extended period to test interactions with dyntick idle, | ||
81 | * as well as interactions with any per-CPU varibles. | ||
82 | */ | ||
83 | struct shuffle_task { | ||
84 | struct list_head st_l; | ||
85 | struct task_struct *st_t; | ||
86 | }; | ||
87 | |||
88 | static long shuffle_interval; /* In jiffies. */ | ||
89 | static struct task_struct *shuffler_task; | ||
90 | static cpumask_var_t shuffle_tmp_mask; | ||
91 | static int shuffle_idle_cpu; /* Force all torture tasks off this CPU */ | ||
92 | static struct list_head shuffle_task_list = LIST_HEAD_INIT(shuffle_task_list); | ||
93 | static DEFINE_MUTEX(shuffle_task_mutex); | ||
94 | |||
95 | /* | ||
96 | * Register a task to be shuffled. If there is no memory, just splat | ||
97 | * and don't bother registering. | ||
98 | */ | ||
99 | void torture_shuffle_task_register(struct task_struct *tp) | ||
100 | { | ||
101 | struct shuffle_task *stp; | ||
102 | |||
103 | if (WARN_ON_ONCE(tp == NULL)) | ||
104 | return; | ||
105 | stp = kmalloc(sizeof(*stp), GFP_KERNEL); | ||
106 | if (WARN_ON_ONCE(stp == NULL)) | ||
107 | return; | ||
108 | stp->st_t = tp; | ||
109 | mutex_lock(&shuffle_task_mutex); | ||
110 | list_add(&stp->st_l, &shuffle_task_list); | ||
111 | mutex_unlock(&shuffle_task_mutex); | ||
112 | } | ||
113 | EXPORT_SYMBOL_GPL(torture_shuffle_task_register); | ||
114 | |||
115 | /* | ||
116 | * Unregister all tasks, for example, at the end of the torture run. | ||
117 | */ | ||
118 | static void torture_shuffle_task_unregister_all(void) | ||
119 | { | ||
120 | struct shuffle_task *stp; | ||
121 | struct shuffle_task *p; | ||
122 | |||
123 | mutex_lock(&shuffle_task_mutex); | ||
124 | list_for_each_entry_safe(stp, p, &shuffle_task_list, st_l) { | ||
125 | list_del(&stp->st_l); | ||
126 | kfree(stp); | ||
127 | } | ||
128 | mutex_unlock(&shuffle_task_mutex); | ||
129 | } | ||
130 | |||
131 | /* Shuffle tasks such that we allow shuffle_idle_cpu to become idle. | ||
132 | * A special case is when shuffle_idle_cpu = -1, in which case we allow | ||
133 | * the tasks to run on all CPUs. | ||
134 | */ | ||
135 | static void torture_shuffle_tasks(void) | ||
136 | { | ||
137 | struct shuffle_task *stp; | ||
138 | |||
139 | cpumask_setall(shuffle_tmp_mask); | ||
140 | get_online_cpus(); | ||
141 | |||
142 | /* No point in shuffling if there is only one online CPU (ex: UP) */ | ||
143 | if (num_online_cpus() == 1) { | ||
144 | put_online_cpus(); | ||
145 | return; | ||
146 | } | ||
147 | |||
148 | /* Advance to the next CPU. Upon overflow, don't idle any CPUs. */ | ||
149 | shuffle_idle_cpu = cpumask_next(shuffle_idle_cpu, shuffle_tmp_mask); | ||
150 | if (shuffle_idle_cpu >= nr_cpu_ids) | ||
151 | shuffle_idle_cpu = -1; | ||
152 | if (shuffle_idle_cpu != -1) { | ||
153 | cpumask_clear_cpu(shuffle_idle_cpu, shuffle_tmp_mask); | ||
154 | if (cpumask_empty(shuffle_tmp_mask)) { | ||
155 | put_online_cpus(); | ||
156 | return; | ||
157 | } | ||
158 | } | ||
159 | |||
160 | mutex_lock(&shuffle_task_mutex); | ||
161 | list_for_each_entry(stp, &shuffle_task_list, st_l) | ||
162 | set_cpus_allowed_ptr(stp->st_t, shuffle_tmp_mask); | ||
163 | mutex_unlock(&shuffle_task_mutex); | ||
164 | |||
165 | put_online_cpus(); | ||
166 | } | ||
167 | |||
168 | /* Shuffle tasks across CPUs, with the intent of allowing each CPU in the | ||
169 | * system to become idle at a time and cut off its timer ticks. This is meant | ||
170 | * to test the support for such tickless idle CPU in RCU. | ||
171 | */ | ||
172 | static int torture_shuffle(void *arg) | ||
173 | { | ||
174 | VERBOSE_TOROUT_STRING("torture_shuffle task started"); | ||
175 | do { | ||
176 | schedule_timeout_interruptible(shuffle_interval); | ||
177 | torture_shuffle_tasks(); | ||
178 | torture_shutdown_absorb("torture_shuffle"); | ||
179 | } while (!torture_must_stop()); | ||
180 | VERBOSE_TOROUT_STRING("torture_shuffle task stopping"); | ||
181 | return 0; | ||
182 | } | ||
183 | |||
184 | /* | ||
185 | * Start the shuffler, with shuffint in jiffies. | ||
186 | */ | ||
187 | int torture_shuffle_init(long shuffint) | ||
188 | { | ||
189 | int ret; | ||
190 | |||
191 | shuffle_interval = shuffint; | ||
192 | |||
193 | shuffle_idle_cpu = -1; | ||
194 | |||
195 | if (!alloc_cpumask_var(&shuffle_tmp_mask, GFP_KERNEL)) { | ||
196 | VERBOSE_TOROUT_ERRSTRING("Failed to alloc mask"); | ||
197 | return -ENOMEM; | ||
198 | } | ||
199 | |||
200 | /* Create the shuffler thread */ | ||
201 | shuffler_task = kthread_run(torture_shuffle, NULL, "torture_shuffle"); | ||
202 | if (IS_ERR(shuffler_task)) { | ||
203 | ret = PTR_ERR(shuffler_task); | ||
204 | free_cpumask_var(shuffle_tmp_mask); | ||
205 | VERBOSE_TOROUT_ERRSTRING("Failed to create shuffler"); | ||
206 | shuffler_task = NULL; | ||
207 | return ret; | ||
208 | } | ||
209 | torture_shuffle_task_register(shuffler_task); | ||
210 | return 0; | ||
211 | } | ||
212 | EXPORT_SYMBOL_GPL(torture_shuffle_init); | ||
213 | |||
214 | /* | ||
215 | * Stop the shuffling. | ||
216 | */ | ||
217 | void torture_shuffle_cleanup(void) | ||
218 | { | ||
219 | torture_shuffle_task_unregister_all(); | ||
220 | if (shuffler_task) { | ||
221 | VERBOSE_TOROUT_STRING("Stopping torture_shuffle task"); | ||
222 | kthread_stop(shuffler_task); | ||
223 | free_cpumask_var(shuffle_tmp_mask); | ||
224 | } | ||
225 | shuffler_task = NULL; | ||
226 | } | ||
227 | EXPORT_SYMBOL_GPL(torture_shuffle_cleanup); | ||
228 | |||
229 | /* | ||
79 | * Absorb kthreads into a kernel function that won't return, so that | 230 | * Absorb kthreads into a kernel function that won't return, so that |
80 | * they won't ever access module text or data again. | 231 | * they won't ever access module text or data again. |
81 | */ | 232 | */ |