diff options
Diffstat (limited to 'kernel/kthread.c')
-rw-r--r-- | kernel/kthread.c | 164 |
1 files changed, 164 insertions, 0 deletions
diff --git a/kernel/kthread.c b/kernel/kthread.c index 83911c780175..2dc3786349d1 100644 --- a/kernel/kthread.c +++ b/kernel/kthread.c | |||
@@ -14,6 +14,8 @@ | |||
14 | #include <linux/file.h> | 14 | #include <linux/file.h> |
15 | #include <linux/module.h> | 15 | #include <linux/module.h> |
16 | #include <linux/mutex.h> | 16 | #include <linux/mutex.h> |
17 | #include <linux/slab.h> | ||
18 | #include <linux/freezer.h> | ||
17 | #include <trace/events/sched.h> | 19 | #include <trace/events/sched.h> |
18 | 20 | ||
19 | static DEFINE_SPINLOCK(kthread_create_lock); | 21 | static DEFINE_SPINLOCK(kthread_create_lock); |
@@ -35,6 +37,7 @@ struct kthread_create_info | |||
35 | 37 | ||
36 | struct kthread { | 38 | struct kthread { |
37 | int should_stop; | 39 | int should_stop; |
40 | void *data; | ||
38 | struct completion exited; | 41 | struct completion exited; |
39 | }; | 42 | }; |
40 | 43 | ||
@@ -54,6 +57,19 @@ int kthread_should_stop(void) | |||
54 | } | 57 | } |
55 | EXPORT_SYMBOL(kthread_should_stop); | 58 | EXPORT_SYMBOL(kthread_should_stop); |
56 | 59 | ||
60 | /** | ||
61 | * kthread_data - return data value specified on kthread creation | ||
62 | * @task: kthread task in question | ||
63 | * | ||
64 | * Return the data value specified when kthread @task was created. | ||
65 | * The caller is responsible for ensuring the validity of @task when | ||
66 | * calling this function. | ||
67 | */ | ||
68 | void *kthread_data(struct task_struct *task) | ||
69 | { | ||
70 | return to_kthread(task)->data; | ||
71 | } | ||
72 | |||
57 | static int kthread(void *_create) | 73 | static int kthread(void *_create) |
58 | { | 74 | { |
59 | /* Copy data: it's on kthread's stack */ | 75 | /* Copy data: it's on kthread's stack */ |
@@ -64,6 +80,7 @@ static int kthread(void *_create) | |||
64 | int ret; | 80 | int ret; |
65 | 81 | ||
66 | self.should_stop = 0; | 82 | self.should_stop = 0; |
83 | self.data = data; | ||
67 | init_completion(&self.exited); | 84 | init_completion(&self.exited); |
68 | current->vfork_done = &self.exited; | 85 | current->vfork_done = &self.exited; |
69 | 86 | ||
@@ -247,3 +264,150 @@ int kthreadd(void *unused) | |||
247 | 264 | ||
248 | return 0; | 265 | return 0; |
249 | } | 266 | } |
267 | |||
268 | /** | ||
269 | * kthread_worker_fn - kthread function to process kthread_worker | ||
270 | * @worker_ptr: pointer to initialized kthread_worker | ||
271 | * | ||
272 | * This function can be used as @threadfn to kthread_create() or | ||
273 | * kthread_run() with @worker_ptr argument pointing to an initialized | ||
274 | * kthread_worker. The started kthread will process work_list until | ||
275 | * the it is stopped with kthread_stop(). A kthread can also call | ||
276 | * this function directly after extra initialization. | ||
277 | * | ||
278 | * Different kthreads can be used for the same kthread_worker as long | ||
279 | * as there's only one kthread attached to it at any given time. A | ||
280 | * kthread_worker without an attached kthread simply collects queued | ||
281 | * kthread_works. | ||
282 | */ | ||
283 | int kthread_worker_fn(void *worker_ptr) | ||
284 | { | ||
285 | struct kthread_worker *worker = worker_ptr; | ||
286 | struct kthread_work *work; | ||
287 | |||
288 | WARN_ON(worker->task); | ||
289 | worker->task = current; | ||
290 | repeat: | ||
291 | set_current_state(TASK_INTERRUPTIBLE); /* mb paired w/ kthread_stop */ | ||
292 | |||
293 | if (kthread_should_stop()) { | ||
294 | __set_current_state(TASK_RUNNING); | ||
295 | spin_lock_irq(&worker->lock); | ||
296 | worker->task = NULL; | ||
297 | spin_unlock_irq(&worker->lock); | ||
298 | return 0; | ||
299 | } | ||
300 | |||
301 | work = NULL; | ||
302 | spin_lock_irq(&worker->lock); | ||
303 | if (!list_empty(&worker->work_list)) { | ||
304 | work = list_first_entry(&worker->work_list, | ||
305 | struct kthread_work, node); | ||
306 | list_del_init(&work->node); | ||
307 | } | ||
308 | spin_unlock_irq(&worker->lock); | ||
309 | |||
310 | if (work) { | ||
311 | __set_current_state(TASK_RUNNING); | ||
312 | work->func(work); | ||
313 | smp_wmb(); /* wmb worker-b0 paired with flush-b1 */ | ||
314 | work->done_seq = work->queue_seq; | ||
315 | smp_mb(); /* mb worker-b1 paired with flush-b0 */ | ||
316 | if (atomic_read(&work->flushing)) | ||
317 | wake_up_all(&work->done); | ||
318 | } else if (!freezing(current)) | ||
319 | schedule(); | ||
320 | |||
321 | try_to_freeze(); | ||
322 | goto repeat; | ||
323 | } | ||
324 | EXPORT_SYMBOL_GPL(kthread_worker_fn); | ||
325 | |||
326 | /** | ||
327 | * queue_kthread_work - queue a kthread_work | ||
328 | * @worker: target kthread_worker | ||
329 | * @work: kthread_work to queue | ||
330 | * | ||
331 | * Queue @work to work processor @task for async execution. @task | ||
332 | * must have been created with kthread_worker_create(). Returns %true | ||
333 | * if @work was successfully queued, %false if it was already pending. | ||
334 | */ | ||
335 | bool queue_kthread_work(struct kthread_worker *worker, | ||
336 | struct kthread_work *work) | ||
337 | { | ||
338 | bool ret = false; | ||
339 | unsigned long flags; | ||
340 | |||
341 | spin_lock_irqsave(&worker->lock, flags); | ||
342 | if (list_empty(&work->node)) { | ||
343 | list_add_tail(&work->node, &worker->work_list); | ||
344 | work->queue_seq++; | ||
345 | if (likely(worker->task)) | ||
346 | wake_up_process(worker->task); | ||
347 | ret = true; | ||
348 | } | ||
349 | spin_unlock_irqrestore(&worker->lock, flags); | ||
350 | return ret; | ||
351 | } | ||
352 | EXPORT_SYMBOL_GPL(queue_kthread_work); | ||
353 | |||
354 | /** | ||
355 | * flush_kthread_work - flush a kthread_work | ||
356 | * @work: work to flush | ||
357 | * | ||
358 | * If @work is queued or executing, wait for it to finish execution. | ||
359 | */ | ||
360 | void flush_kthread_work(struct kthread_work *work) | ||
361 | { | ||
362 | int seq = work->queue_seq; | ||
363 | |||
364 | atomic_inc(&work->flushing); | ||
365 | |||
366 | /* | ||
367 | * mb flush-b0 paired with worker-b1, to make sure either | ||
368 | * worker sees the above increment or we see done_seq update. | ||
369 | */ | ||
370 | smp_mb__after_atomic_inc(); | ||
371 | |||
372 | /* A - B <= 0 tests whether B is in front of A regardless of overflow */ | ||
373 | wait_event(work->done, seq - work->done_seq <= 0); | ||
374 | atomic_dec(&work->flushing); | ||
375 | |||
376 | /* | ||
377 | * rmb flush-b1 paired with worker-b0, to make sure our caller | ||
378 | * sees every change made by work->func(). | ||
379 | */ | ||
380 | smp_mb__after_atomic_dec(); | ||
381 | } | ||
382 | EXPORT_SYMBOL_GPL(flush_kthread_work); | ||
383 | |||
384 | struct kthread_flush_work { | ||
385 | struct kthread_work work; | ||
386 | struct completion done; | ||
387 | }; | ||
388 | |||
389 | static void kthread_flush_work_fn(struct kthread_work *work) | ||
390 | { | ||
391 | struct kthread_flush_work *fwork = | ||
392 | container_of(work, struct kthread_flush_work, work); | ||
393 | complete(&fwork->done); | ||
394 | } | ||
395 | |||
396 | /** | ||
397 | * flush_kthread_worker - flush all current works on a kthread_worker | ||
398 | * @worker: worker to flush | ||
399 | * | ||
400 | * Wait until all currently executing or pending works on @worker are | ||
401 | * finished. | ||
402 | */ | ||
403 | void flush_kthread_worker(struct kthread_worker *worker) | ||
404 | { | ||
405 | struct kthread_flush_work fwork = { | ||
406 | KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn), | ||
407 | COMPLETION_INITIALIZER_ONSTACK(fwork.done), | ||
408 | }; | ||
409 | |||
410 | queue_kthread_work(worker, &fwork.work); | ||
411 | wait_for_completion(&fwork.done); | ||
412 | } | ||
413 | EXPORT_SYMBOL_GPL(flush_kthread_worker); | ||