diff options
Diffstat (limited to 'include/linux/workqueue.h')
| -rw-r--r-- | include/linux/workqueue.h | 145 |
1 files changed, 118 insertions, 27 deletions
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index 9bca3539a1e5..4a3ea83c6d16 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h | |||
| @@ -11,12 +11,23 @@ | |||
| 11 | 11 | ||
| 12 | struct workqueue_struct; | 12 | struct workqueue_struct; |
| 13 | 13 | ||
| 14 | struct work_struct; | ||
| 15 | typedef void (*work_func_t)(struct work_struct *work); | ||
| 16 | |||
| 14 | struct work_struct { | 17 | struct work_struct { |
| 15 | unsigned long pending; | 18 | /* the first word is the work queue pointer and the flags rolled into |
| 19 | * one */ | ||
| 20 | unsigned long management; | ||
| 21 | #define WORK_STRUCT_PENDING 0 /* T if work item pending execution */ | ||
| 22 | #define WORK_STRUCT_NOAUTOREL 1 /* F if work item automatically released on exec */ | ||
| 23 | #define WORK_STRUCT_FLAG_MASK (3UL) | ||
| 24 | #define WORK_STRUCT_WQ_DATA_MASK (~WORK_STRUCT_FLAG_MASK) | ||
| 16 | struct list_head entry; | 25 | struct list_head entry; |
| 17 | void (*func)(void *); | 26 | work_func_t func; |
| 18 | void *data; | 27 | }; |
| 19 | void *wq_data; | 28 | |
| 29 | struct delayed_work { | ||
| 30 | struct work_struct work; | ||
| 20 | struct timer_list timer; | 31 | struct timer_list timer; |
| 21 | }; | 32 | }; |
| 22 | 33 | ||
| @@ -24,36 +35,117 @@ struct execute_work { | |||
| 24 | struct work_struct work; | 35 | struct work_struct work; |
| 25 | }; | 36 | }; |
| 26 | 37 | ||
| 27 | #define __WORK_INITIALIZER(n, f, d) { \ | 38 | #define __WORK_INITIALIZER(n, f) { \ |
| 39 | .management = 0, \ | ||
| 40 | .entry = { &(n).entry, &(n).entry }, \ | ||
| 41 | .func = (f), \ | ||
| 42 | } | ||
| 43 | |||
| 44 | #define __WORK_INITIALIZER_NAR(n, f) { \ | ||
| 45 | .management = (1 << WORK_STRUCT_NOAUTOREL), \ | ||
| 28 | .entry = { &(n).entry, &(n).entry }, \ | 46 | .entry = { &(n).entry, &(n).entry }, \ |
| 29 | .func = (f), \ | 47 | .func = (f), \ |
| 30 | .data = (d), \ | 48 | } |
| 49 | |||
| 50 | #define __DELAYED_WORK_INITIALIZER(n, f) { \ | ||
| 51 | .work = __WORK_INITIALIZER((n).work, (f)), \ | ||
| 52 | .timer = TIMER_INITIALIZER(NULL, 0, 0), \ | ||
| 53 | } | ||
| 54 | |||
| 55 | #define __DELAYED_WORK_INITIALIZER_NAR(n, f) { \ | ||
| 56 | .work = __WORK_INITIALIZER_NAR((n).work, (f)), \ | ||
| 31 | .timer = TIMER_INITIALIZER(NULL, 0, 0), \ | 57 | .timer = TIMER_INITIALIZER(NULL, 0, 0), \ |
| 32 | } | 58 | } |
| 33 | 59 | ||
| 34 | #define DECLARE_WORK(n, f, d) \ | 60 | #define DECLARE_WORK(n, f) \ |
| 35 | struct work_struct n = __WORK_INITIALIZER(n, f, d) | 61 | struct work_struct n = __WORK_INITIALIZER(n, f) |
| 62 | |||
| 63 | #define DECLARE_WORK_NAR(n, f) \ | ||
| 64 | struct work_struct n = __WORK_INITIALIZER_NAR(n, f) | ||
| 65 | |||
| 66 | #define DECLARE_DELAYED_WORK(n, f) \ | ||
| 67 | struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f) | ||
| 68 | |||
| 69 | #define DECLARE_DELAYED_WORK_NAR(n, f) \ | ||
| 70 | struct dwork_struct n = __DELAYED_WORK_INITIALIZER_NAR(n, f) | ||
| 36 | 71 | ||
| 37 | /* | 72 | /* |
| 38 | * initialize a work-struct's func and data pointers: | 73 | * initialize a work item's function pointer |
| 39 | */ | 74 | */ |
| 40 | #define PREPARE_WORK(_work, _func, _data) \ | 75 | #define PREPARE_WORK(_work, _func) \ |
| 41 | do { \ | 76 | do { \ |
| 42 | (_work)->func = _func; \ | 77 | (_work)->func = (_func); \ |
| 43 | (_work)->data = _data; \ | ||
| 44 | } while (0) | 78 | } while (0) |
| 45 | 79 | ||
| 80 | #define PREPARE_DELAYED_WORK(_work, _func) \ | ||
| 81 | PREPARE_WORK(&(_work)->work, (_func)) | ||
| 82 | |||
| 46 | /* | 83 | /* |
| 47 | * initialize all of a work-struct: | 84 | * initialize all of a work item in one go |
| 48 | */ | 85 | */ |
| 49 | #define INIT_WORK(_work, _func, _data) \ | 86 | #define INIT_WORK(_work, _func) \ |
| 50 | do { \ | 87 | do { \ |
| 88 | (_work)->management = 0; \ | ||
| 51 | INIT_LIST_HEAD(&(_work)->entry); \ | 89 | INIT_LIST_HEAD(&(_work)->entry); \ |
| 52 | (_work)->pending = 0; \ | 90 | PREPARE_WORK((_work), (_func)); \ |
| 53 | PREPARE_WORK((_work), (_func), (_data)); \ | 91 | } while (0) |
| 92 | |||
| 93 | #define INIT_WORK_NAR(_work, _func) \ | ||
| 94 | do { \ | ||
| 95 | (_work)->management = (1 << WORK_STRUCT_NOAUTOREL); \ | ||
| 96 | INIT_LIST_HEAD(&(_work)->entry); \ | ||
| 97 | PREPARE_WORK((_work), (_func)); \ | ||
| 98 | } while (0) | ||
| 99 | |||
| 100 | #define INIT_DELAYED_WORK(_work, _func) \ | ||
| 101 | do { \ | ||
| 102 | INIT_WORK(&(_work)->work, (_func)); \ | ||
| 103 | init_timer(&(_work)->timer); \ | ||
| 104 | } while (0) | ||
| 105 | |||
| 106 | #define INIT_DELAYED_WORK_NAR(_work, _func) \ | ||
| 107 | do { \ | ||
| 108 | INIT_WORK_NAR(&(_work)->work, (_func)); \ | ||
| 54 | init_timer(&(_work)->timer); \ | 109 | init_timer(&(_work)->timer); \ |
| 55 | } while (0) | 110 | } while (0) |
| 56 | 111 | ||
| 112 | /** | ||
| 113 | * work_pending - Find out whether a work item is currently pending | ||
| 114 | * @work: The work item in question | ||
| 115 | */ | ||
| 116 | #define work_pending(work) \ | ||
| 117 | test_bit(WORK_STRUCT_PENDING, &(work)->management) | ||
| 118 | |||
| 119 | /** | ||
| 120 | * delayed_work_pending - Find out whether a delayable work item is currently | ||
| 121 | * pending | ||
| 122 | * @work: The work item in question | ||
| 123 | */ | ||
| 124 | #define delayed_work_pending(work) \ | ||
| 125 | test_bit(WORK_STRUCT_PENDING, &(work)->work.management) | ||
| 126 | |||
| 127 | /** | ||
| 128 | * work_release - Release a work item under execution | ||
| 129 | * @work: The work item to release | ||
| 130 | * | ||
| 131 | * This is used to release a work item that has been initialised with automatic | ||
| 132 | * release mode disabled (WORK_STRUCT_NOAUTOREL is set). This gives the work | ||
| 133 | * function the opportunity to grab auxiliary data from the container of the | ||
| 134 | * work_struct before clearing the pending bit as the work_struct may be | ||
| 135 | * subject to deallocation the moment the pending bit is cleared. | ||
| 136 | * | ||
| 137 | * In such a case, this should be called in the work function after it has | ||
| 138 | * fetched any data it may require from the containter of the work_struct. | ||
| 139 | * After this function has been called, the work_struct may be scheduled for | ||
| 140 | * further execution or it may be deallocated unless other precautions are | ||
| 141 | * taken. | ||
| 142 | * | ||
| 143 | * This should also be used to release a delayed work item. | ||
| 144 | */ | ||
| 145 | #define work_release(work) \ | ||
| 146 | clear_bit(WORK_STRUCT_PENDING, &(work)->management) | ||
| 147 | |||
| 148 | |||
| 57 | extern struct workqueue_struct *__create_workqueue(const char *name, | 149 | extern struct workqueue_struct *__create_workqueue(const char *name, |
| 58 | int singlethread); | 150 | int singlethread); |
| 59 | #define create_workqueue(name) __create_workqueue((name), 0) | 151 | #define create_workqueue(name) __create_workqueue((name), 0) |
| @@ -62,39 +154,38 @@ extern struct workqueue_struct *__create_workqueue(const char *name, | |||
| 62 | extern void destroy_workqueue(struct workqueue_struct *wq); | 154 | extern void destroy_workqueue(struct workqueue_struct *wq); |
| 63 | 155 | ||
| 64 | extern int FASTCALL(queue_work(struct workqueue_struct *wq, struct work_struct *work)); | 156 | extern int FASTCALL(queue_work(struct workqueue_struct *wq, struct work_struct *work)); |
| 65 | extern int FASTCALL(queue_delayed_work(struct workqueue_struct *wq, struct work_struct *work, unsigned long delay)); | 157 | extern int FASTCALL(queue_delayed_work(struct workqueue_struct *wq, struct delayed_work *work, unsigned long delay)); |
| 66 | extern int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, | 158 | extern int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, |
| 67 | struct work_struct *work, unsigned long delay); | 159 | struct delayed_work *work, unsigned long delay); |
| 68 | extern void FASTCALL(flush_workqueue(struct workqueue_struct *wq)); | 160 | extern void FASTCALL(flush_workqueue(struct workqueue_struct *wq)); |
| 69 | 161 | ||
| 70 | extern int FASTCALL(schedule_work(struct work_struct *work)); | 162 | extern int FASTCALL(schedule_work(struct work_struct *work)); |
| 71 | extern int FASTCALL(schedule_delayed_work(struct work_struct *work, unsigned long delay)); | 163 | extern int FASTCALL(schedule_delayed_work(struct delayed_work *work, unsigned long delay)); |
| 72 | 164 | ||
| 73 | extern int schedule_delayed_work_on(int cpu, struct work_struct *work, unsigned long delay); | 165 | extern int schedule_delayed_work_on(int cpu, struct delayed_work *work, unsigned long delay); |
| 74 | extern int schedule_on_each_cpu(void (*func)(void *info), void *info); | 166 | extern int schedule_on_each_cpu(work_func_t func); |
| 75 | extern void flush_scheduled_work(void); | 167 | extern void flush_scheduled_work(void); |
| 76 | extern int current_is_keventd(void); | 168 | extern int current_is_keventd(void); |
| 77 | extern int keventd_up(void); | 169 | extern int keventd_up(void); |
| 78 | 170 | ||
| 79 | extern void init_workqueues(void); | 171 | extern void init_workqueues(void); |
| 80 | void cancel_rearming_delayed_work(struct work_struct *work); | 172 | void cancel_rearming_delayed_work(struct delayed_work *work); |
| 81 | void cancel_rearming_delayed_workqueue(struct workqueue_struct *, | 173 | void cancel_rearming_delayed_workqueue(struct workqueue_struct *, |
| 82 | struct work_struct *); | 174 | struct delayed_work *); |
| 83 | int execute_in_process_context(void (*fn)(void *), void *, | 175 | int execute_in_process_context(work_func_t fn, struct execute_work *); |
| 84 | struct execute_work *); | ||
| 85 | 176 | ||
| 86 | /* | 177 | /* |
| 87 | * Kill off a pending schedule_delayed_work(). Note that the work callback | 178 | * Kill off a pending schedule_delayed_work(). Note that the work callback |
| 88 | * function may still be running on return from cancel_delayed_work(). Run | 179 | * function may still be running on return from cancel_delayed_work(). Run |
| 89 | * flush_scheduled_work() to wait on it. | 180 | * flush_scheduled_work() to wait on it. |
| 90 | */ | 181 | */ |
| 91 | static inline int cancel_delayed_work(struct work_struct *work) | 182 | static inline int cancel_delayed_work(struct delayed_work *work) |
| 92 | { | 183 | { |
| 93 | int ret; | 184 | int ret; |
| 94 | 185 | ||
| 95 | ret = del_timer_sync(&work->timer); | 186 | ret = del_timer_sync(&work->timer); |
| 96 | if (ret) | 187 | if (ret) |
| 97 | clear_bit(0, &work->pending); | 188 | clear_bit(WORK_STRUCT_PENDING, &work->work.management); |
| 98 | return ret; | 189 | return ret; |
| 99 | } | 190 | } |
| 100 | 191 | ||
