diff options
Diffstat (limited to 'include')
| -rw-r--r-- | include/drm/drm_crtc.h | 3 | ||||
| -rw-r--r-- | include/linux/cpu.h | 2 | ||||
| -rw-r--r-- | include/linux/fscache-cache.h | 47 | ||||
| -rw-r--r-- | include/linux/kthread.h | 65 | ||||
| -rw-r--r-- | include/linux/libata.h | 1 | ||||
| -rw-r--r-- | include/linux/slow-work.h | 163 | ||||
| -rw-r--r-- | include/linux/workqueue.h | 154 | ||||
| -rw-r--r-- | include/trace/events/workqueue.h | 92 |
8 files changed, 215 insertions, 312 deletions
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h index 93a1a31b9c2d..c707270bff5a 100644 --- a/include/drm/drm_crtc.h +++ b/include/drm/drm_crtc.h | |||
| @@ -31,7 +31,6 @@ | |||
| 31 | #include <linux/idr.h> | 31 | #include <linux/idr.h> |
| 32 | 32 | ||
| 33 | #include <linux/fb.h> | 33 | #include <linux/fb.h> |
| 34 | #include <linux/slow-work.h> | ||
| 35 | 34 | ||
| 36 | struct drm_device; | 35 | struct drm_device; |
| 37 | struct drm_mode_set; | 36 | struct drm_mode_set; |
| @@ -595,7 +594,7 @@ struct drm_mode_config { | |||
| 595 | 594 | ||
| 596 | /* output poll support */ | 595 | /* output poll support */ |
| 597 | bool poll_enabled; | 596 | bool poll_enabled; |
| 598 | struct delayed_slow_work output_poll_slow_work; | 597 | struct delayed_work output_poll_work; |
| 599 | 598 | ||
| 600 | /* pointers to standard properties */ | 599 | /* pointers to standard properties */ |
| 601 | struct list_head property_blob_list; | 600 | struct list_head property_blob_list; |
diff --git a/include/linux/cpu.h b/include/linux/cpu.h index de6b1722cdca..4823af64e9db 100644 --- a/include/linux/cpu.h +++ b/include/linux/cpu.h | |||
| @@ -71,6 +71,8 @@ enum { | |||
| 71 | /* migration should happen before other stuff but after perf */ | 71 | /* migration should happen before other stuff but after perf */ |
| 72 | CPU_PRI_PERF = 20, | 72 | CPU_PRI_PERF = 20, |
| 73 | CPU_PRI_MIGRATION = 10, | 73 | CPU_PRI_MIGRATION = 10, |
| 74 | /* prepare workqueues for other notifiers */ | ||
| 75 | CPU_PRI_WORKQUEUE = 5, | ||
| 74 | }; | 76 | }; |
| 75 | 77 | ||
| 76 | #ifdef CONFIG_SMP | 78 | #ifdef CONFIG_SMP |
diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h index c57db27ac861..b8581c09d19f 100644 --- a/include/linux/fscache-cache.h +++ b/include/linux/fscache-cache.h | |||
| @@ -20,7 +20,7 @@ | |||
| 20 | 20 | ||
| 21 | #include <linux/fscache.h> | 21 | #include <linux/fscache.h> |
| 22 | #include <linux/sched.h> | 22 | #include <linux/sched.h> |
| 23 | #include <linux/slow-work.h> | 23 | #include <linux/workqueue.h> |
| 24 | 24 | ||
| 25 | #define NR_MAXCACHES BITS_PER_LONG | 25 | #define NR_MAXCACHES BITS_PER_LONG |
| 26 | 26 | ||
| @@ -76,18 +76,14 @@ typedef void (*fscache_operation_release_t)(struct fscache_operation *op); | |||
| 76 | typedef void (*fscache_operation_processor_t)(struct fscache_operation *op); | 76 | typedef void (*fscache_operation_processor_t)(struct fscache_operation *op); |
| 77 | 77 | ||
| 78 | struct fscache_operation { | 78 | struct fscache_operation { |
| 79 | union { | 79 | struct work_struct work; /* record for async ops */ |
| 80 | struct work_struct fast_work; /* record for fast ops */ | ||
| 81 | struct slow_work slow_work; /* record for (very) slow ops */ | ||
| 82 | }; | ||
| 83 | struct list_head pend_link; /* link in object->pending_ops */ | 80 | struct list_head pend_link; /* link in object->pending_ops */ |
| 84 | struct fscache_object *object; /* object to be operated upon */ | 81 | struct fscache_object *object; /* object to be operated upon */ |
| 85 | 82 | ||
| 86 | unsigned long flags; | 83 | unsigned long flags; |
| 87 | #define FSCACHE_OP_TYPE 0x000f /* operation type */ | 84 | #define FSCACHE_OP_TYPE 0x000f /* operation type */ |
| 88 | #define FSCACHE_OP_FAST 0x0001 /* - fast op, processor may not sleep for disk */ | 85 | #define FSCACHE_OP_ASYNC 0x0001 /* - async op, processor may sleep for disk */ |
| 89 | #define FSCACHE_OP_SLOW 0x0002 /* - (very) slow op, processor may sleep for disk */ | 86 | #define FSCACHE_OP_MYTHREAD 0x0002 /* - processing is done be issuing thread, not pool */ |
| 90 | #define FSCACHE_OP_MYTHREAD 0x0003 /* - processing is done be issuing thread, not pool */ | ||
| 91 | #define FSCACHE_OP_WAITING 4 /* cleared when op is woken */ | 87 | #define FSCACHE_OP_WAITING 4 /* cleared when op is woken */ |
| 92 | #define FSCACHE_OP_EXCLUSIVE 5 /* exclusive op, other ops must wait */ | 88 | #define FSCACHE_OP_EXCLUSIVE 5 /* exclusive op, other ops must wait */ |
| 93 | #define FSCACHE_OP_DEAD 6 /* op is now dead */ | 89 | #define FSCACHE_OP_DEAD 6 /* op is now dead */ |
| @@ -105,7 +101,8 @@ struct fscache_operation { | |||
| 105 | /* operation releaser */ | 101 | /* operation releaser */ |
| 106 | fscache_operation_release_t release; | 102 | fscache_operation_release_t release; |
| 107 | 103 | ||
| 108 | #ifdef CONFIG_SLOW_WORK_DEBUG | 104 | #ifdef CONFIG_WORKQUEUE_DEBUGFS |
| 105 | struct work_struct put_work; /* work to delay operation put */ | ||
| 109 | const char *name; /* operation name */ | 106 | const char *name; /* operation name */ |
| 110 | const char *state; /* operation state */ | 107 | const char *state; /* operation state */ |
| 111 | #define fscache_set_op_name(OP, N) do { (OP)->name = (N); } while(0) | 108 | #define fscache_set_op_name(OP, N) do { (OP)->name = (N); } while(0) |
| @@ -117,7 +114,7 @@ struct fscache_operation { | |||
| 117 | }; | 114 | }; |
| 118 | 115 | ||
| 119 | extern atomic_t fscache_op_debug_id; | 116 | extern atomic_t fscache_op_debug_id; |
| 120 | extern const struct slow_work_ops fscache_op_slow_work_ops; | 117 | extern void fscache_op_work_func(struct work_struct *work); |
| 121 | 118 | ||
| 122 | extern void fscache_enqueue_operation(struct fscache_operation *); | 119 | extern void fscache_enqueue_operation(struct fscache_operation *); |
| 123 | extern void fscache_put_operation(struct fscache_operation *); | 120 | extern void fscache_put_operation(struct fscache_operation *); |
| @@ -128,33 +125,21 @@ extern void fscache_put_operation(struct fscache_operation *); | |||
| 128 | * @release: The release function to assign | 125 | * @release: The release function to assign |
| 129 | * | 126 | * |
| 130 | * Do basic initialisation of an operation. The caller must still set flags, | 127 | * Do basic initialisation of an operation. The caller must still set flags, |
| 131 | * object, either fast_work or slow_work if necessary, and processor if needed. | 128 | * object and processor if needed. |
| 132 | */ | 129 | */ |
| 133 | static inline void fscache_operation_init(struct fscache_operation *op, | 130 | static inline void fscache_operation_init(struct fscache_operation *op, |
| 134 | fscache_operation_release_t release) | 131 | fscache_operation_processor_t processor, |
| 132 | fscache_operation_release_t release) | ||
| 135 | { | 133 | { |
| 134 | INIT_WORK(&op->work, fscache_op_work_func); | ||
| 136 | atomic_set(&op->usage, 1); | 135 | atomic_set(&op->usage, 1); |
| 137 | op->debug_id = atomic_inc_return(&fscache_op_debug_id); | 136 | op->debug_id = atomic_inc_return(&fscache_op_debug_id); |
| 137 | op->processor = processor; | ||
| 138 | op->release = release; | 138 | op->release = release; |
| 139 | INIT_LIST_HEAD(&op->pend_link); | 139 | INIT_LIST_HEAD(&op->pend_link); |
| 140 | fscache_set_op_state(op, "Init"); | 140 | fscache_set_op_state(op, "Init"); |
| 141 | } | 141 | } |
| 142 | 142 | ||
| 143 | /** | ||
| 144 | * fscache_operation_init_slow - Do additional initialisation of a slow op | ||
| 145 | * @op: The operation to initialise | ||
| 146 | * @processor: The processor function to assign | ||
| 147 | * | ||
| 148 | * Do additional initialisation of an operation as required for slow work. | ||
| 149 | */ | ||
| 150 | static inline | ||
| 151 | void fscache_operation_init_slow(struct fscache_operation *op, | ||
| 152 | fscache_operation_processor_t processor) | ||
| 153 | { | ||
| 154 | op->processor = processor; | ||
| 155 | slow_work_init(&op->slow_work, &fscache_op_slow_work_ops); | ||
| 156 | } | ||
| 157 | |||
| 158 | /* | 143 | /* |
| 159 | * data read operation | 144 | * data read operation |
| 160 | */ | 145 | */ |
| @@ -389,7 +374,7 @@ struct fscache_object { | |||
| 389 | struct fscache_cache *cache; /* cache that supplied this object */ | 374 | struct fscache_cache *cache; /* cache that supplied this object */ |
| 390 | struct fscache_cookie *cookie; /* netfs's file/index object */ | 375 | struct fscache_cookie *cookie; /* netfs's file/index object */ |
| 391 | struct fscache_object *parent; /* parent object */ | 376 | struct fscache_object *parent; /* parent object */ |
| 392 | struct slow_work work; /* attention scheduling record */ | 377 | struct work_struct work; /* attention scheduling record */ |
| 393 | struct list_head dependents; /* FIFO of dependent objects */ | 378 | struct list_head dependents; /* FIFO of dependent objects */ |
| 394 | struct list_head dep_link; /* link in parent's dependents list */ | 379 | struct list_head dep_link; /* link in parent's dependents list */ |
| 395 | struct list_head pending_ops; /* unstarted operations on this object */ | 380 | struct list_head pending_ops; /* unstarted operations on this object */ |
| @@ -411,7 +396,7 @@ extern const char *fscache_object_states[]; | |||
| 411 | (test_bit(FSCACHE_IOERROR, &(obj)->cache->flags) && \ | 396 | (test_bit(FSCACHE_IOERROR, &(obj)->cache->flags) && \ |
| 412 | (obj)->state >= FSCACHE_OBJECT_DYING) | 397 | (obj)->state >= FSCACHE_OBJECT_DYING) |
| 413 | 398 | ||
| 414 | extern const struct slow_work_ops fscache_object_slow_work_ops; | 399 | extern void fscache_object_work_func(struct work_struct *work); |
| 415 | 400 | ||
| 416 | /** | 401 | /** |
| 417 | * fscache_object_init - Initialise a cache object description | 402 | * fscache_object_init - Initialise a cache object description |
| @@ -433,7 +418,7 @@ void fscache_object_init(struct fscache_object *object, | |||
| 433 | spin_lock_init(&object->lock); | 418 | spin_lock_init(&object->lock); |
| 434 | INIT_LIST_HEAD(&object->cache_link); | 419 | INIT_LIST_HEAD(&object->cache_link); |
| 435 | INIT_HLIST_NODE(&object->cookie_link); | 420 | INIT_HLIST_NODE(&object->cookie_link); |
| 436 | vslow_work_init(&object->work, &fscache_object_slow_work_ops); | 421 | INIT_WORK(&object->work, fscache_object_work_func); |
| 437 | INIT_LIST_HEAD(&object->dependents); | 422 | INIT_LIST_HEAD(&object->dependents); |
| 438 | INIT_LIST_HEAD(&object->dep_link); | 423 | INIT_LIST_HEAD(&object->dep_link); |
| 439 | INIT_LIST_HEAD(&object->pending_ops); | 424 | INIT_LIST_HEAD(&object->pending_ops); |
| @@ -534,6 +519,8 @@ extern void fscache_io_error(struct fscache_cache *cache); | |||
| 534 | extern void fscache_mark_pages_cached(struct fscache_retrieval *op, | 519 | extern void fscache_mark_pages_cached(struct fscache_retrieval *op, |
| 535 | struct pagevec *pagevec); | 520 | struct pagevec *pagevec); |
| 536 | 521 | ||
| 522 | extern bool fscache_object_sleep_till_congested(signed long *timeoutp); | ||
| 523 | |||
| 537 | extern enum fscache_checkaux fscache_check_aux(struct fscache_object *object, | 524 | extern enum fscache_checkaux fscache_check_aux(struct fscache_object *object, |
| 538 | const void *data, | 525 | const void *data, |
| 539 | uint16_t datalen); | 526 | uint16_t datalen); |
diff --git a/include/linux/kthread.h b/include/linux/kthread.h index aabc8a13ba71..685ea65eb803 100644 --- a/include/linux/kthread.h +++ b/include/linux/kthread.h | |||
| @@ -30,8 +30,73 @@ struct task_struct *kthread_create(int (*threadfn)(void *data), | |||
| 30 | void kthread_bind(struct task_struct *k, unsigned int cpu); | 30 | void kthread_bind(struct task_struct *k, unsigned int cpu); |
| 31 | int kthread_stop(struct task_struct *k); | 31 | int kthread_stop(struct task_struct *k); |
| 32 | int kthread_should_stop(void); | 32 | int kthread_should_stop(void); |
| 33 | void *kthread_data(struct task_struct *k); | ||
| 33 | 34 | ||
| 34 | int kthreadd(void *unused); | 35 | int kthreadd(void *unused); |
| 35 | extern struct task_struct *kthreadd_task; | 36 | extern struct task_struct *kthreadd_task; |
| 36 | 37 | ||
| 38 | /* | ||
| 39 | * Simple work processor based on kthread. | ||
| 40 | * | ||
| 41 | * This provides easier way to make use of kthreads. A kthread_work | ||
| 42 | * can be queued and flushed using queue/flush_kthread_work() | ||
| 43 | * respectively. Queued kthread_works are processed by a kthread | ||
| 44 | * running kthread_worker_fn(). | ||
| 45 | * | ||
| 46 | * A kthread_work can't be freed while it is executing. | ||
| 47 | */ | ||
| 48 | struct kthread_work; | ||
| 49 | typedef void (*kthread_work_func_t)(struct kthread_work *work); | ||
| 50 | |||
| 51 | struct kthread_worker { | ||
| 52 | spinlock_t lock; | ||
| 53 | struct list_head work_list; | ||
| 54 | struct task_struct *task; | ||
| 55 | }; | ||
| 56 | |||
| 57 | struct kthread_work { | ||
| 58 | struct list_head node; | ||
| 59 | kthread_work_func_t func; | ||
| 60 | wait_queue_head_t done; | ||
| 61 | atomic_t flushing; | ||
| 62 | int queue_seq; | ||
| 63 | int done_seq; | ||
| 64 | }; | ||
| 65 | |||
| 66 | #define KTHREAD_WORKER_INIT(worker) { \ | ||
| 67 | .lock = SPIN_LOCK_UNLOCKED, \ | ||
| 68 | .work_list = LIST_HEAD_INIT((worker).work_list), \ | ||
| 69 | } | ||
| 70 | |||
| 71 | #define KTHREAD_WORK_INIT(work, fn) { \ | ||
| 72 | .node = LIST_HEAD_INIT((work).node), \ | ||
| 73 | .func = (fn), \ | ||
| 74 | .done = __WAIT_QUEUE_HEAD_INITIALIZER((work).done), \ | ||
| 75 | .flushing = ATOMIC_INIT(0), \ | ||
| 76 | } | ||
| 77 | |||
| 78 | #define DEFINE_KTHREAD_WORKER(worker) \ | ||
| 79 | struct kthread_worker worker = KTHREAD_WORKER_INIT(worker) | ||
| 80 | |||
| 81 | #define DEFINE_KTHREAD_WORK(work, fn) \ | ||
| 82 | struct kthread_work work = KTHREAD_WORK_INIT(work, fn) | ||
| 83 | |||
| 84 | static inline void init_kthread_worker(struct kthread_worker *worker) | ||
| 85 | { | ||
| 86 | *worker = (struct kthread_worker)KTHREAD_WORKER_INIT(*worker); | ||
| 87 | } | ||
| 88 | |||
| 89 | static inline void init_kthread_work(struct kthread_work *work, | ||
| 90 | kthread_work_func_t fn) | ||
| 91 | { | ||
| 92 | *work = (struct kthread_work)KTHREAD_WORK_INIT(*work, fn); | ||
| 93 | } | ||
| 94 | |||
| 95 | int kthread_worker_fn(void *worker_ptr); | ||
| 96 | |||
| 97 | bool queue_kthread_work(struct kthread_worker *worker, | ||
| 98 | struct kthread_work *work); | ||
| 99 | void flush_kthread_work(struct kthread_work *work); | ||
| 100 | void flush_kthread_worker(struct kthread_worker *worker); | ||
| 101 | |||
| 37 | #endif /* _LINUX_KTHREAD_H */ | 102 | #endif /* _LINUX_KTHREAD_H */ |
diff --git a/include/linux/libata.h b/include/linux/libata.h index b85f3ff34d7d..f010f18a0f86 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h | |||
| @@ -751,6 +751,7 @@ struct ata_port { | |||
| 751 | struct ata_host *host; | 751 | struct ata_host *host; |
| 752 | struct device *dev; | 752 | struct device *dev; |
| 753 | 753 | ||
| 754 | struct mutex scsi_scan_mutex; | ||
| 754 | struct delayed_work hotplug_task; | 755 | struct delayed_work hotplug_task; |
| 755 | struct work_struct scsi_rescan_task; | 756 | struct work_struct scsi_rescan_task; |
| 756 | 757 | ||
diff --git a/include/linux/slow-work.h b/include/linux/slow-work.h deleted file mode 100644 index 13337bf6c3f5..000000000000 --- a/include/linux/slow-work.h +++ /dev/null | |||
| @@ -1,163 +0,0 @@ | |||
| 1 | /* Worker thread pool for slow items, such as filesystem lookups or mkdirs | ||
| 2 | * | ||
| 3 | * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved. | ||
| 4 | * Written by David Howells (dhowells@redhat.com) | ||
| 5 | * | ||
| 6 | * This program is free software; you can redistribute it and/or | ||
| 7 | * modify it under the terms of the GNU General Public Licence | ||
| 8 | * as published by the Free Software Foundation; either version | ||
| 9 | * 2 of the Licence, or (at your option) any later version. | ||
| 10 | * | ||
| 11 | * See Documentation/slow-work.txt | ||
| 12 | */ | ||
| 13 | |||
| 14 | #ifndef _LINUX_SLOW_WORK_H | ||
| 15 | #define _LINUX_SLOW_WORK_H | ||
| 16 | |||
| 17 | #ifdef CONFIG_SLOW_WORK | ||
| 18 | |||
| 19 | #include <linux/sysctl.h> | ||
| 20 | #include <linux/timer.h> | ||
| 21 | |||
| 22 | struct slow_work; | ||
| 23 | #ifdef CONFIG_SLOW_WORK_DEBUG | ||
| 24 | struct seq_file; | ||
| 25 | #endif | ||
| 26 | |||
| 27 | /* | ||
| 28 | * The operations used to support slow work items | ||
| 29 | */ | ||
| 30 | struct slow_work_ops { | ||
| 31 | /* owner */ | ||
| 32 | struct module *owner; | ||
| 33 | |||
| 34 | /* get a ref on a work item | ||
| 35 | * - return 0 if successful, -ve if not | ||
| 36 | */ | ||
| 37 | int (*get_ref)(struct slow_work *work); | ||
| 38 | |||
| 39 | /* discard a ref to a work item */ | ||
| 40 | void (*put_ref)(struct slow_work *work); | ||
| 41 | |||
| 42 | /* execute a work item */ | ||
| 43 | void (*execute)(struct slow_work *work); | ||
| 44 | |||
| 45 | #ifdef CONFIG_SLOW_WORK_DEBUG | ||
| 46 | /* describe a work item for debugfs */ | ||
| 47 | void (*desc)(struct slow_work *work, struct seq_file *m); | ||
| 48 | #endif | ||
| 49 | }; | ||
| 50 | |||
| 51 | /* | ||
| 52 | * A slow work item | ||
| 53 | * - A reference is held on the parent object by the thread pool when it is | ||
| 54 | * queued | ||
| 55 | */ | ||
| 56 | struct slow_work { | ||
| 57 | struct module *owner; /* the owning module */ | ||
| 58 | unsigned long flags; | ||
| 59 | #define SLOW_WORK_PENDING 0 /* item pending (further) execution */ | ||
| 60 | #define SLOW_WORK_EXECUTING 1 /* item currently executing */ | ||
| 61 | #define SLOW_WORK_ENQ_DEFERRED 2 /* item enqueue deferred */ | ||
| 62 | #define SLOW_WORK_VERY_SLOW 3 /* item is very slow */ | ||
| 63 | #define SLOW_WORK_CANCELLING 4 /* item is being cancelled, don't enqueue */ | ||
| 64 | #define SLOW_WORK_DELAYED 5 /* item is struct delayed_slow_work with active timer */ | ||
| 65 | const struct slow_work_ops *ops; /* operations table for this item */ | ||
| 66 | struct list_head link; /* link in queue */ | ||
| 67 | #ifdef CONFIG_SLOW_WORK_DEBUG | ||
| 68 | struct timespec mark; /* jiffies at which queued or exec begun */ | ||
| 69 | #endif | ||
| 70 | }; | ||
| 71 | |||
| 72 | struct delayed_slow_work { | ||
| 73 | struct slow_work work; | ||
| 74 | struct timer_list timer; | ||
| 75 | }; | ||
| 76 | |||
| 77 | /** | ||
| 78 | * slow_work_init - Initialise a slow work item | ||
| 79 | * @work: The work item to initialise | ||
| 80 | * @ops: The operations to use to handle the slow work item | ||
| 81 | * | ||
| 82 | * Initialise a slow work item. | ||
| 83 | */ | ||
| 84 | static inline void slow_work_init(struct slow_work *work, | ||
| 85 | const struct slow_work_ops *ops) | ||
| 86 | { | ||
| 87 | work->flags = 0; | ||
| 88 | work->ops = ops; | ||
| 89 | INIT_LIST_HEAD(&work->link); | ||
| 90 | } | ||
| 91 | |||
| 92 | /** | ||
| 93 | * slow_work_init - Initialise a delayed slow work item | ||
| 94 | * @work: The work item to initialise | ||
| 95 | * @ops: The operations to use to handle the slow work item | ||
| 96 | * | ||
| 97 | * Initialise a delayed slow work item. | ||
| 98 | */ | ||
| 99 | static inline void delayed_slow_work_init(struct delayed_slow_work *dwork, | ||
| 100 | const struct slow_work_ops *ops) | ||
| 101 | { | ||
| 102 | init_timer(&dwork->timer); | ||
| 103 | slow_work_init(&dwork->work, ops); | ||
| 104 | } | ||
| 105 | |||
| 106 | /** | ||
| 107 | * vslow_work_init - Initialise a very slow work item | ||
| 108 | * @work: The work item to initialise | ||
| 109 | * @ops: The operations to use to handle the slow work item | ||
| 110 | * | ||
| 111 | * Initialise a very slow work item. This item will be restricted such that | ||
| 112 | * only a certain number of the pool threads will be able to execute items of | ||
| 113 | * this type. | ||
| 114 | */ | ||
| 115 | static inline void vslow_work_init(struct slow_work *work, | ||
| 116 | const struct slow_work_ops *ops) | ||
| 117 | { | ||
| 118 | work->flags = 1 << SLOW_WORK_VERY_SLOW; | ||
| 119 | work->ops = ops; | ||
| 120 | INIT_LIST_HEAD(&work->link); | ||
| 121 | } | ||
| 122 | |||
| 123 | /** | ||
| 124 | * slow_work_is_queued - Determine if a slow work item is on the work queue | ||
| 125 | * work: The work item to test | ||
| 126 | * | ||
| 127 | * Determine if the specified slow-work item is on the work queue. This | ||
| 128 | * returns true if it is actually on the queue. | ||
| 129 | * | ||
| 130 | * If the item is executing and has been marked for requeue when execution | ||
| 131 | * finishes, then false will be returned. | ||
| 132 | * | ||
| 133 | * Anyone wishing to wait for completion of execution can wait on the | ||
| 134 | * SLOW_WORK_EXECUTING bit. | ||
| 135 | */ | ||
| 136 | static inline bool slow_work_is_queued(struct slow_work *work) | ||
| 137 | { | ||
| 138 | unsigned long flags = work->flags; | ||
| 139 | return flags & SLOW_WORK_PENDING && !(flags & SLOW_WORK_EXECUTING); | ||
| 140 | } | ||
| 141 | |||
| 142 | extern int slow_work_enqueue(struct slow_work *work); | ||
| 143 | extern void slow_work_cancel(struct slow_work *work); | ||
| 144 | extern int slow_work_register_user(struct module *owner); | ||
| 145 | extern void slow_work_unregister_user(struct module *owner); | ||
| 146 | |||
| 147 | extern int delayed_slow_work_enqueue(struct delayed_slow_work *dwork, | ||
| 148 | unsigned long delay); | ||
| 149 | |||
| 150 | static inline void delayed_slow_work_cancel(struct delayed_slow_work *dwork) | ||
| 151 | { | ||
| 152 | slow_work_cancel(&dwork->work); | ||
| 153 | } | ||
| 154 | |||
| 155 | extern bool slow_work_sleep_till_thread_needed(struct slow_work *work, | ||
| 156 | signed long *_timeout); | ||
| 157 | |||
| 158 | #ifdef CONFIG_SYSCTL | ||
| 159 | extern ctl_table slow_work_sysctls[]; | ||
| 160 | #endif | ||
| 161 | |||
| 162 | #endif /* CONFIG_SLOW_WORK */ | ||
| 163 | #endif /* _LINUX_SLOW_WORK_H */ | ||
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index d0f7c8178498..4f9d277bcd9a 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h | |||
| @@ -9,6 +9,7 @@ | |||
| 9 | #include <linux/linkage.h> | 9 | #include <linux/linkage.h> |
| 10 | #include <linux/bitops.h> | 10 | #include <linux/bitops.h> |
| 11 | #include <linux/lockdep.h> | 11 | #include <linux/lockdep.h> |
| 12 | #include <linux/threads.h> | ||
| 12 | #include <asm/atomic.h> | 13 | #include <asm/atomic.h> |
| 13 | 14 | ||
| 14 | struct workqueue_struct; | 15 | struct workqueue_struct; |
| @@ -22,12 +23,59 @@ typedef void (*work_func_t)(struct work_struct *work); | |||
| 22 | */ | 23 | */ |
| 23 | #define work_data_bits(work) ((unsigned long *)(&(work)->data)) | 24 | #define work_data_bits(work) ((unsigned long *)(&(work)->data)) |
| 24 | 25 | ||
| 26 | enum { | ||
| 27 | WORK_STRUCT_PENDING_BIT = 0, /* work item is pending execution */ | ||
| 28 | WORK_STRUCT_CWQ_BIT = 1, /* data points to cwq */ | ||
| 29 | WORK_STRUCT_LINKED_BIT = 2, /* next work is linked to this one */ | ||
| 30 | #ifdef CONFIG_DEBUG_OBJECTS_WORK | ||
| 31 | WORK_STRUCT_STATIC_BIT = 3, /* static initializer (debugobjects) */ | ||
| 32 | WORK_STRUCT_COLOR_SHIFT = 4, /* color for workqueue flushing */ | ||
| 33 | #else | ||
| 34 | WORK_STRUCT_COLOR_SHIFT = 3, /* color for workqueue flushing */ | ||
| 35 | #endif | ||
| 36 | |||
| 37 | WORK_STRUCT_COLOR_BITS = 4, | ||
| 38 | |||
| 39 | WORK_STRUCT_PENDING = 1 << WORK_STRUCT_PENDING_BIT, | ||
| 40 | WORK_STRUCT_CWQ = 1 << WORK_STRUCT_CWQ_BIT, | ||
| 41 | WORK_STRUCT_LINKED = 1 << WORK_STRUCT_LINKED_BIT, | ||
| 42 | #ifdef CONFIG_DEBUG_OBJECTS_WORK | ||
| 43 | WORK_STRUCT_STATIC = 1 << WORK_STRUCT_STATIC_BIT, | ||
| 44 | #else | ||
| 45 | WORK_STRUCT_STATIC = 0, | ||
| 46 | #endif | ||
| 47 | |||
| 48 | /* | ||
| 49 | * The last color is no color used for works which don't | ||
| 50 | * participate in workqueue flushing. | ||
| 51 | */ | ||
| 52 | WORK_NR_COLORS = (1 << WORK_STRUCT_COLOR_BITS) - 1, | ||
| 53 | WORK_NO_COLOR = WORK_NR_COLORS, | ||
| 54 | |||
| 55 | /* special cpu IDs */ | ||
| 56 | WORK_CPU_UNBOUND = NR_CPUS, | ||
| 57 | WORK_CPU_NONE = NR_CPUS + 1, | ||
| 58 | WORK_CPU_LAST = WORK_CPU_NONE, | ||
| 59 | |||
| 60 | /* | ||
| 61 | * Reserve 7 bits off of cwq pointer w/ debugobjects turned | ||
| 62 | * off. This makes cwqs aligned to 128 bytes which isn't too | ||
| 63 | * excessive while allowing 15 workqueue flush colors. | ||
| 64 | */ | ||
| 65 | WORK_STRUCT_FLAG_BITS = WORK_STRUCT_COLOR_SHIFT + | ||
| 66 | WORK_STRUCT_COLOR_BITS, | ||
| 67 | |||
| 68 | WORK_STRUCT_FLAG_MASK = (1UL << WORK_STRUCT_FLAG_BITS) - 1, | ||
| 69 | WORK_STRUCT_WQ_DATA_MASK = ~WORK_STRUCT_FLAG_MASK, | ||
| 70 | WORK_STRUCT_NO_CPU = WORK_CPU_NONE << WORK_STRUCT_FLAG_BITS, | ||
| 71 | |||
| 72 | /* bit mask for work_busy() return values */ | ||
| 73 | WORK_BUSY_PENDING = 1 << 0, | ||
| 74 | WORK_BUSY_RUNNING = 1 << 1, | ||
| 75 | }; | ||
| 76 | |||
| 25 | struct work_struct { | 77 | struct work_struct { |
| 26 | atomic_long_t data; | 78 | atomic_long_t data; |
| 27 | #define WORK_STRUCT_PENDING 0 /* T if work item pending execution */ | ||
| 28 | #define WORK_STRUCT_STATIC 1 /* static initializer (debugobjects) */ | ||
| 29 | #define WORK_STRUCT_FLAG_MASK (3UL) | ||
| 30 | #define WORK_STRUCT_WQ_DATA_MASK (~WORK_STRUCT_FLAG_MASK) | ||
| 31 | struct list_head entry; | 79 | struct list_head entry; |
| 32 | work_func_t func; | 80 | work_func_t func; |
| 33 | #ifdef CONFIG_LOCKDEP | 81 | #ifdef CONFIG_LOCKDEP |
| @@ -35,8 +83,9 @@ struct work_struct { | |||
| 35 | #endif | 83 | #endif |
| 36 | }; | 84 | }; |
| 37 | 85 | ||
| 38 | #define WORK_DATA_INIT() ATOMIC_LONG_INIT(0) | 86 | #define WORK_DATA_INIT() ATOMIC_LONG_INIT(WORK_STRUCT_NO_CPU) |
| 39 | #define WORK_DATA_STATIC_INIT() ATOMIC_LONG_INIT(2) | 87 | #define WORK_DATA_STATIC_INIT() \ |
| 88 | ATOMIC_LONG_INIT(WORK_STRUCT_NO_CPU | WORK_STRUCT_STATIC) | ||
| 40 | 89 | ||
| 41 | struct delayed_work { | 90 | struct delayed_work { |
| 42 | struct work_struct work; | 91 | struct work_struct work; |
| @@ -96,9 +145,14 @@ struct execute_work { | |||
| 96 | #ifdef CONFIG_DEBUG_OBJECTS_WORK | 145 | #ifdef CONFIG_DEBUG_OBJECTS_WORK |
| 97 | extern void __init_work(struct work_struct *work, int onstack); | 146 | extern void __init_work(struct work_struct *work, int onstack); |
| 98 | extern void destroy_work_on_stack(struct work_struct *work); | 147 | extern void destroy_work_on_stack(struct work_struct *work); |
| 148 | static inline unsigned int work_static(struct work_struct *work) | ||
| 149 | { | ||
| 150 | return *work_data_bits(work) & WORK_STRUCT_STATIC; | ||
| 151 | } | ||
| 99 | #else | 152 | #else |
| 100 | static inline void __init_work(struct work_struct *work, int onstack) { } | 153 | static inline void __init_work(struct work_struct *work, int onstack) { } |
| 101 | static inline void destroy_work_on_stack(struct work_struct *work) { } | 154 | static inline void destroy_work_on_stack(struct work_struct *work) { } |
| 155 | static inline unsigned int work_static(struct work_struct *work) { return 0; } | ||
| 102 | #endif | 156 | #endif |
| 103 | 157 | ||
| 104 | /* | 158 | /* |
| @@ -162,7 +216,7 @@ static inline void destroy_work_on_stack(struct work_struct *work) { } | |||
| 162 | * @work: The work item in question | 216 | * @work: The work item in question |
| 163 | */ | 217 | */ |
| 164 | #define work_pending(work) \ | 218 | #define work_pending(work) \ |
| 165 | test_bit(WORK_STRUCT_PENDING, work_data_bits(work)) | 219 | test_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)) |
| 166 | 220 | ||
| 167 | /** | 221 | /** |
| 168 | * delayed_work_pending - Find out whether a delayable work item is currently | 222 | * delayed_work_pending - Find out whether a delayable work item is currently |
| @@ -177,16 +231,56 @@ static inline void destroy_work_on_stack(struct work_struct *work) { } | |||
| 177 | * @work: The work item in question | 231 | * @work: The work item in question |
| 178 | */ | 232 | */ |
| 179 | #define work_clear_pending(work) \ | 233 | #define work_clear_pending(work) \ |
| 180 | clear_bit(WORK_STRUCT_PENDING, work_data_bits(work)) | 234 | clear_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)) |
| 235 | |||
| 236 | enum { | ||
| 237 | WQ_NON_REENTRANT = 1 << 0, /* guarantee non-reentrance */ | ||
| 238 | WQ_UNBOUND = 1 << 1, /* not bound to any cpu */ | ||
| 239 | WQ_FREEZEABLE = 1 << 2, /* freeze during suspend */ | ||
| 240 | WQ_RESCUER = 1 << 3, /* has an rescue worker */ | ||
| 241 | WQ_HIGHPRI = 1 << 4, /* high priority */ | ||
| 242 | WQ_CPU_INTENSIVE = 1 << 5, /* cpu instensive workqueue */ | ||
| 243 | |||
| 244 | WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */ | ||
| 245 | WQ_MAX_UNBOUND_PER_CPU = 4, /* 4 * #cpus for unbound wq */ | ||
| 246 | WQ_DFL_ACTIVE = WQ_MAX_ACTIVE / 2, | ||
| 247 | }; | ||
| 248 | |||
| 249 | /* unbound wq's aren't per-cpu, scale max_active according to #cpus */ | ||
| 250 | #define WQ_UNBOUND_MAX_ACTIVE \ | ||
| 251 | max_t(int, WQ_MAX_ACTIVE, num_possible_cpus() * WQ_MAX_UNBOUND_PER_CPU) | ||
| 181 | 252 | ||
| 253 | /* | ||
| 254 | * System-wide workqueues which are always present. | ||
| 255 | * | ||
| 256 | * system_wq is the one used by schedule[_delayed]_work[_on](). | ||
| 257 | * Multi-CPU multi-threaded. There are users which expect relatively | ||
| 258 | * short queue flush time. Don't queue works which can run for too | ||
| 259 | * long. | ||
| 260 | * | ||
| 261 | * system_long_wq is similar to system_wq but may host long running | ||
| 262 | * works. Queue flushing might take relatively long. | ||
| 263 | * | ||
| 264 | * system_nrt_wq is non-reentrant and guarantees that any given work | ||
| 265 | * item is never executed in parallel by multiple CPUs. Queue | ||
| 266 | * flushing might take relatively long. | ||
| 267 | * | ||
| 268 | * system_unbound_wq is unbound workqueue. Workers are not bound to | ||
| 269 | * any specific CPU, not concurrency managed, and all queued works are | ||
| 270 | * executed immediately as long as max_active limit is not reached and | ||
| 271 | * resources are available. | ||
| 272 | */ | ||
| 273 | extern struct workqueue_struct *system_wq; | ||
| 274 | extern struct workqueue_struct *system_long_wq; | ||
| 275 | extern struct workqueue_struct *system_nrt_wq; | ||
| 276 | extern struct workqueue_struct *system_unbound_wq; | ||
| 182 | 277 | ||
| 183 | extern struct workqueue_struct * | 278 | extern struct workqueue_struct * |
| 184 | __create_workqueue_key(const char *name, int singlethread, | 279 | __alloc_workqueue_key(const char *name, unsigned int flags, int max_active, |
| 185 | int freezeable, int rt, struct lock_class_key *key, | 280 | struct lock_class_key *key, const char *lock_name); |
| 186 | const char *lock_name); | ||
| 187 | 281 | ||
| 188 | #ifdef CONFIG_LOCKDEP | 282 | #ifdef CONFIG_LOCKDEP |
| 189 | #define __create_workqueue(name, singlethread, freezeable, rt) \ | 283 | #define alloc_workqueue(name, flags, max_active) \ |
| 190 | ({ \ | 284 | ({ \ |
| 191 | static struct lock_class_key __key; \ | 285 | static struct lock_class_key __key; \ |
| 192 | const char *__lock_name; \ | 286 | const char *__lock_name; \ |
| @@ -196,20 +290,20 @@ __create_workqueue_key(const char *name, int singlethread, | |||
| 196 | else \ | 290 | else \ |
| 197 | __lock_name = #name; \ | 291 | __lock_name = #name; \ |
| 198 | \ | 292 | \ |
| 199 | __create_workqueue_key((name), (singlethread), \ | 293 | __alloc_workqueue_key((name), (flags), (max_active), \ |
| 200 | (freezeable), (rt), &__key, \ | 294 | &__key, __lock_name); \ |
| 201 | __lock_name); \ | ||
| 202 | }) | 295 | }) |
| 203 | #else | 296 | #else |
| 204 | #define __create_workqueue(name, singlethread, freezeable, rt) \ | 297 | #define alloc_workqueue(name, flags, max_active) \ |
| 205 | __create_workqueue_key((name), (singlethread), (freezeable), (rt), \ | 298 | __alloc_workqueue_key((name), (flags), (max_active), NULL, NULL) |
| 206 | NULL, NULL) | ||
| 207 | #endif | 299 | #endif |
| 208 | 300 | ||
| 209 | #define create_workqueue(name) __create_workqueue((name), 0, 0, 0) | 301 | #define create_workqueue(name) \ |
| 210 | #define create_rt_workqueue(name) __create_workqueue((name), 0, 0, 1) | 302 | alloc_workqueue((name), WQ_RESCUER, 1) |
| 211 | #define create_freezeable_workqueue(name) __create_workqueue((name), 1, 1, 0) | 303 | #define create_freezeable_workqueue(name) \ |
| 212 | #define create_singlethread_workqueue(name) __create_workqueue((name), 1, 0, 0) | 304 | alloc_workqueue((name), WQ_FREEZEABLE | WQ_UNBOUND | WQ_RESCUER, 1) |
| 305 | #define create_singlethread_workqueue(name) \ | ||
| 306 | alloc_workqueue((name), WQ_UNBOUND | WQ_RESCUER, 1) | ||
| 213 | 307 | ||
| 214 | extern void destroy_workqueue(struct workqueue_struct *wq); | 308 | extern void destroy_workqueue(struct workqueue_struct *wq); |
| 215 | 309 | ||
| @@ -231,16 +325,19 @@ extern int schedule_delayed_work(struct delayed_work *work, unsigned long delay) | |||
| 231 | extern int schedule_delayed_work_on(int cpu, struct delayed_work *work, | 325 | extern int schedule_delayed_work_on(int cpu, struct delayed_work *work, |
| 232 | unsigned long delay); | 326 | unsigned long delay); |
| 233 | extern int schedule_on_each_cpu(work_func_t func); | 327 | extern int schedule_on_each_cpu(work_func_t func); |
| 234 | extern int current_is_keventd(void); | ||
| 235 | extern int keventd_up(void); | 328 | extern int keventd_up(void); |
| 236 | 329 | ||
| 237 | extern void init_workqueues(void); | ||
| 238 | int execute_in_process_context(work_func_t fn, struct execute_work *); | 330 | int execute_in_process_context(work_func_t fn, struct execute_work *); |
| 239 | 331 | ||
| 240 | extern int flush_work(struct work_struct *work); | 332 | extern int flush_work(struct work_struct *work); |
| 241 | |||
| 242 | extern int cancel_work_sync(struct work_struct *work); | 333 | extern int cancel_work_sync(struct work_struct *work); |
| 243 | 334 | ||
| 335 | extern void workqueue_set_max_active(struct workqueue_struct *wq, | ||
| 336 | int max_active); | ||
| 337 | extern bool workqueue_congested(unsigned int cpu, struct workqueue_struct *wq); | ||
| 338 | extern unsigned int work_cpu(struct work_struct *work); | ||
| 339 | extern unsigned int work_busy(struct work_struct *work); | ||
| 340 | |||
| 244 | /* | 341 | /* |
| 245 | * Kill off a pending schedule_delayed_work(). Note that the work callback | 342 | * Kill off a pending schedule_delayed_work(). Note that the work callback |
| 246 | * function may still be running on return from cancel_delayed_work(), unless | 343 | * function may still be running on return from cancel_delayed_work(), unless |
| @@ -298,7 +395,14 @@ static inline long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg) | |||
| 298 | long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg); | 395 | long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg); |
| 299 | #endif /* CONFIG_SMP */ | 396 | #endif /* CONFIG_SMP */ |
| 300 | 397 | ||
| 398 | #ifdef CONFIG_FREEZER | ||
| 399 | extern void freeze_workqueues_begin(void); | ||
| 400 | extern bool freeze_workqueues_busy(void); | ||
| 401 | extern void thaw_workqueues(void); | ||
| 402 | #endif /* CONFIG_FREEZER */ | ||
| 403 | |||
| 301 | #ifdef CONFIG_LOCKDEP | 404 | #ifdef CONFIG_LOCKDEP |
| 302 | int in_workqueue_context(struct workqueue_struct *wq); | 405 | int in_workqueue_context(struct workqueue_struct *wq); |
| 303 | #endif | 406 | #endif |
| 407 | |||
| 304 | #endif | 408 | #endif |
diff --git a/include/trace/events/workqueue.h b/include/trace/events/workqueue.h deleted file mode 100644 index d6c974474e70..000000000000 --- a/include/trace/events/workqueue.h +++ /dev/null | |||
| @@ -1,92 +0,0 @@ | |||
| 1 | #undef TRACE_SYSTEM | ||
| 2 | #define TRACE_SYSTEM workqueue | ||
| 3 | |||
| 4 | #if !defined(_TRACE_WORKQUEUE_H) || defined(TRACE_HEADER_MULTI_READ) | ||
| 5 | #define _TRACE_WORKQUEUE_H | ||
| 6 | |||
| 7 | #include <linux/workqueue.h> | ||
| 8 | #include <linux/sched.h> | ||
| 9 | #include <linux/tracepoint.h> | ||
| 10 | |||
| 11 | DECLARE_EVENT_CLASS(workqueue, | ||
| 12 | |||
| 13 | TP_PROTO(struct task_struct *wq_thread, struct work_struct *work), | ||
| 14 | |||
| 15 | TP_ARGS(wq_thread, work), | ||
| 16 | |||
| 17 | TP_STRUCT__entry( | ||
| 18 | __array(char, thread_comm, TASK_COMM_LEN) | ||
| 19 | __field(pid_t, thread_pid) | ||
| 20 | __field(work_func_t, func) | ||
| 21 | ), | ||
| 22 | |||
| 23 | TP_fast_assign( | ||
| 24 | memcpy(__entry->thread_comm, wq_thread->comm, TASK_COMM_LEN); | ||
| 25 | __entry->thread_pid = wq_thread->pid; | ||
| 26 | __entry->func = work->func; | ||
| 27 | ), | ||
| 28 | |||
| 29 | TP_printk("thread=%s:%d func=%pf", __entry->thread_comm, | ||
| 30 | __entry->thread_pid, __entry->func) | ||
| 31 | ); | ||
| 32 | |||
| 33 | DEFINE_EVENT(workqueue, workqueue_insertion, | ||
| 34 | |||
| 35 | TP_PROTO(struct task_struct *wq_thread, struct work_struct *work), | ||
| 36 | |||
| 37 | TP_ARGS(wq_thread, work) | ||
| 38 | ); | ||
| 39 | |||
| 40 | DEFINE_EVENT(workqueue, workqueue_execution, | ||
| 41 | |||
| 42 | TP_PROTO(struct task_struct *wq_thread, struct work_struct *work), | ||
| 43 | |||
| 44 | TP_ARGS(wq_thread, work) | ||
| 45 | ); | ||
| 46 | |||
| 47 | /* Trace the creation of one workqueue thread on a cpu */ | ||
| 48 | TRACE_EVENT(workqueue_creation, | ||
| 49 | |||
| 50 | TP_PROTO(struct task_struct *wq_thread, int cpu), | ||
| 51 | |||
| 52 | TP_ARGS(wq_thread, cpu), | ||
| 53 | |||
| 54 | TP_STRUCT__entry( | ||
| 55 | __array(char, thread_comm, TASK_COMM_LEN) | ||
| 56 | __field(pid_t, thread_pid) | ||
| 57 | __field(int, cpu) | ||
| 58 | ), | ||
| 59 | |||
| 60 | TP_fast_assign( | ||
| 61 | memcpy(__entry->thread_comm, wq_thread->comm, TASK_COMM_LEN); | ||
| 62 | __entry->thread_pid = wq_thread->pid; | ||
| 63 | __entry->cpu = cpu; | ||
| 64 | ), | ||
| 65 | |||
| 66 | TP_printk("thread=%s:%d cpu=%d", __entry->thread_comm, | ||
| 67 | __entry->thread_pid, __entry->cpu) | ||
| 68 | ); | ||
| 69 | |||
| 70 | TRACE_EVENT(workqueue_destruction, | ||
| 71 | |||
| 72 | TP_PROTO(struct task_struct *wq_thread), | ||
| 73 | |||
| 74 | TP_ARGS(wq_thread), | ||
| 75 | |||
| 76 | TP_STRUCT__entry( | ||
| 77 | __array(char, thread_comm, TASK_COMM_LEN) | ||
| 78 | __field(pid_t, thread_pid) | ||
| 79 | ), | ||
| 80 | |||
| 81 | TP_fast_assign( | ||
| 82 | memcpy(__entry->thread_comm, wq_thread->comm, TASK_COMM_LEN); | ||
| 83 | __entry->thread_pid = wq_thread->pid; | ||
| 84 | ), | ||
| 85 | |||
| 86 | TP_printk("thread=%s:%d", __entry->thread_comm, __entry->thread_pid) | ||
| 87 | ); | ||
| 88 | |||
| 89 | #endif /* _TRACE_WORKQUEUE_H */ | ||
| 90 | |||
| 91 | /* This part must be outside protection */ | ||
| 92 | #include <trace/define_trace.h> | ||
