diff options
Diffstat (limited to 'kernel/async.c')
| -rw-r--r-- | kernel/async.c | 76 |
1 files changed, 57 insertions, 19 deletions
diff --git a/kernel/async.c b/kernel/async.c index bd0c168a3bbe..9d3118384858 100644 --- a/kernel/async.c +++ b/kernel/async.c | |||
| @@ -62,8 +62,10 @@ static async_cookie_t next_cookie = 1; | |||
| 62 | #define MAX_WORK 32768 | 62 | #define MAX_WORK 32768 |
| 63 | 63 | ||
| 64 | static LIST_HEAD(async_pending); | 64 | static LIST_HEAD(async_pending); |
| 65 | static LIST_HEAD(async_running); | 65 | static ASYNC_DOMAIN(async_running); |
| 66 | static LIST_HEAD(async_domains); | ||
| 66 | static DEFINE_SPINLOCK(async_lock); | 67 | static DEFINE_SPINLOCK(async_lock); |
| 68 | static DEFINE_MUTEX(async_register_mutex); | ||
| 67 | 69 | ||
| 68 | struct async_entry { | 70 | struct async_entry { |
| 69 | struct list_head list; | 71 | struct list_head list; |
| @@ -71,7 +73,7 @@ struct async_entry { | |||
| 71 | async_cookie_t cookie; | 73 | async_cookie_t cookie; |
| 72 | async_func_ptr *func; | 74 | async_func_ptr *func; |
| 73 | void *data; | 75 | void *data; |
| 74 | struct list_head *running; | 76 | struct async_domain *running; |
| 75 | }; | 77 | }; |
| 76 | 78 | ||
| 77 | static DECLARE_WAIT_QUEUE_HEAD(async_done); | 79 | static DECLARE_WAIT_QUEUE_HEAD(async_done); |
| @@ -82,13 +84,12 @@ static atomic_t entry_count; | |||
| 82 | /* | 84 | /* |
| 83 | * MUST be called with the lock held! | 85 | * MUST be called with the lock held! |
| 84 | */ | 86 | */ |
| 85 | static async_cookie_t __lowest_in_progress(struct list_head *running) | 87 | static async_cookie_t __lowest_in_progress(struct async_domain *running) |
| 86 | { | 88 | { |
| 87 | struct async_entry *entry; | 89 | struct async_entry *entry; |
| 88 | 90 | ||
| 89 | if (!list_empty(running)) { | 91 | if (!list_empty(&running->domain)) { |
| 90 | entry = list_first_entry(running, | 92 | entry = list_first_entry(&running->domain, typeof(*entry), list); |
| 91 | struct async_entry, list); | ||
| 92 | return entry->cookie; | 93 | return entry->cookie; |
| 93 | } | 94 | } |
| 94 | 95 | ||
| @@ -99,7 +100,7 @@ static async_cookie_t __lowest_in_progress(struct list_head *running) | |||
| 99 | return next_cookie; /* "infinity" value */ | 100 | return next_cookie; /* "infinity" value */ |
| 100 | } | 101 | } |
| 101 | 102 | ||
| 102 | static async_cookie_t lowest_in_progress(struct list_head *running) | 103 | static async_cookie_t lowest_in_progress(struct async_domain *running) |
| 103 | { | 104 | { |
| 104 | unsigned long flags; | 105 | unsigned long flags; |
| 105 | async_cookie_t ret; | 106 | async_cookie_t ret; |
| @@ -119,10 +120,11 @@ static void async_run_entry_fn(struct work_struct *work) | |||
| 119 | container_of(work, struct async_entry, work); | 120 | container_of(work, struct async_entry, work); |
| 120 | unsigned long flags; | 121 | unsigned long flags; |
| 121 | ktime_t uninitialized_var(calltime), delta, rettime; | 122 | ktime_t uninitialized_var(calltime), delta, rettime; |
| 123 | struct async_domain *running = entry->running; | ||
| 122 | 124 | ||
| 123 | /* 1) move self to the running queue */ | 125 | /* 1) move self to the running queue */ |
| 124 | spin_lock_irqsave(&async_lock, flags); | 126 | spin_lock_irqsave(&async_lock, flags); |
| 125 | list_move_tail(&entry->list, entry->running); | 127 | list_move_tail(&entry->list, &running->domain); |
| 126 | spin_unlock_irqrestore(&async_lock, flags); | 128 | spin_unlock_irqrestore(&async_lock, flags); |
| 127 | 129 | ||
| 128 | /* 2) run (and print duration) */ | 130 | /* 2) run (and print duration) */ |
| @@ -145,6 +147,8 @@ static void async_run_entry_fn(struct work_struct *work) | |||
| 145 | /* 3) remove self from the running queue */ | 147 | /* 3) remove self from the running queue */ |
| 146 | spin_lock_irqsave(&async_lock, flags); | 148 | spin_lock_irqsave(&async_lock, flags); |
| 147 | list_del(&entry->list); | 149 | list_del(&entry->list); |
| 150 | if (running->registered && --running->count == 0) | ||
| 151 | list_del_init(&running->node); | ||
| 148 | 152 | ||
| 149 | /* 4) free the entry */ | 153 | /* 4) free the entry */ |
| 150 | kfree(entry); | 154 | kfree(entry); |
| @@ -156,7 +160,7 @@ static void async_run_entry_fn(struct work_struct *work) | |||
| 156 | wake_up(&async_done); | 160 | wake_up(&async_done); |
| 157 | } | 161 | } |
| 158 | 162 | ||
| 159 | static async_cookie_t __async_schedule(async_func_ptr *ptr, void *data, struct list_head *running) | 163 | static async_cookie_t __async_schedule(async_func_ptr *ptr, void *data, struct async_domain *running) |
| 160 | { | 164 | { |
| 161 | struct async_entry *entry; | 165 | struct async_entry *entry; |
| 162 | unsigned long flags; | 166 | unsigned long flags; |
| @@ -187,6 +191,8 @@ static async_cookie_t __async_schedule(async_func_ptr *ptr, void *data, struct l | |||
| 187 | spin_lock_irqsave(&async_lock, flags); | 191 | spin_lock_irqsave(&async_lock, flags); |
| 188 | newcookie = entry->cookie = next_cookie++; | 192 | newcookie = entry->cookie = next_cookie++; |
| 189 | list_add_tail(&entry->list, &async_pending); | 193 | list_add_tail(&entry->list, &async_pending); |
| 194 | if (running->registered && running->count++ == 0) | ||
| 195 | list_add_tail(&running->node, &async_domains); | ||
| 190 | atomic_inc(&entry_count); | 196 | atomic_inc(&entry_count); |
| 191 | spin_unlock_irqrestore(&async_lock, flags); | 197 | spin_unlock_irqrestore(&async_lock, flags); |
| 192 | 198 | ||
| @@ -223,7 +229,7 @@ EXPORT_SYMBOL_GPL(async_schedule); | |||
| 223 | * Note: This function may be called from atomic or non-atomic contexts. | 229 | * Note: This function may be called from atomic or non-atomic contexts. |
| 224 | */ | 230 | */ |
| 225 | async_cookie_t async_schedule_domain(async_func_ptr *ptr, void *data, | 231 | async_cookie_t async_schedule_domain(async_func_ptr *ptr, void *data, |
| 226 | struct list_head *running) | 232 | struct async_domain *running) |
| 227 | { | 233 | { |
| 228 | return __async_schedule(ptr, data, running); | 234 | return __async_schedule(ptr, data, running); |
| 229 | } | 235 | } |
| @@ -236,22 +242,52 @@ EXPORT_SYMBOL_GPL(async_schedule_domain); | |||
| 236 | */ | 242 | */ |
| 237 | void async_synchronize_full(void) | 243 | void async_synchronize_full(void) |
| 238 | { | 244 | { |
| 245 | mutex_lock(&async_register_mutex); | ||
| 239 | do { | 246 | do { |
| 240 | async_synchronize_cookie(next_cookie); | 247 | struct async_domain *domain = NULL; |
| 241 | } while (!list_empty(&async_running) || !list_empty(&async_pending)); | 248 | |
| 249 | spin_lock_irq(&async_lock); | ||
| 250 | if (!list_empty(&async_domains)) | ||
| 251 | domain = list_first_entry(&async_domains, typeof(*domain), node); | ||
| 252 | spin_unlock_irq(&async_lock); | ||
| 253 | |||
| 254 | async_synchronize_cookie_domain(next_cookie, domain); | ||
| 255 | } while (!list_empty(&async_domains)); | ||
| 256 | mutex_unlock(&async_register_mutex); | ||
| 242 | } | 257 | } |
| 243 | EXPORT_SYMBOL_GPL(async_synchronize_full); | 258 | EXPORT_SYMBOL_GPL(async_synchronize_full); |
| 244 | 259 | ||
| 245 | /** | 260 | /** |
| 261 | * async_unregister_domain - ensure no more anonymous waiters on this domain | ||
| 262 | * @domain: idle domain to flush out of any async_synchronize_full instances | ||
| 263 | * | ||
| 264 | * async_synchronize_{cookie|full}_domain() are not flushed since callers | ||
| 265 | * of these routines should know the lifetime of @domain | ||
| 266 | * | ||
| 267 | * Prefer ASYNC_DOMAIN_EXCLUSIVE() declarations over flushing | ||
| 268 | */ | ||
| 269 | void async_unregister_domain(struct async_domain *domain) | ||
| 270 | { | ||
| 271 | mutex_lock(&async_register_mutex); | ||
| 272 | spin_lock_irq(&async_lock); | ||
| 273 | WARN_ON(!domain->registered || !list_empty(&domain->node) || | ||
| 274 | !list_empty(&domain->domain)); | ||
| 275 | domain->registered = 0; | ||
| 276 | spin_unlock_irq(&async_lock); | ||
| 277 | mutex_unlock(&async_register_mutex); | ||
| 278 | } | ||
| 279 | EXPORT_SYMBOL_GPL(async_unregister_domain); | ||
| 280 | |||
| 281 | /** | ||
| 246 | * async_synchronize_full_domain - synchronize all asynchronous function within a certain domain | 282 | * async_synchronize_full_domain - synchronize all asynchronous function within a certain domain |
| 247 | * @list: running list to synchronize on | 283 | * @domain: running list to synchronize on |
| 248 | * | 284 | * |
| 249 | * This function waits until all asynchronous function calls for the | 285 | * This function waits until all asynchronous function calls for the |
| 250 | * synchronization domain specified by the running list @list have been done. | 286 | * synchronization domain specified by the running list @domain have been done. |
| 251 | */ | 287 | */ |
| 252 | void async_synchronize_full_domain(struct list_head *list) | 288 | void async_synchronize_full_domain(struct async_domain *domain) |
| 253 | { | 289 | { |
| 254 | async_synchronize_cookie_domain(next_cookie, list); | 290 | async_synchronize_cookie_domain(next_cookie, domain); |
| 255 | } | 291 | } |
| 256 | EXPORT_SYMBOL_GPL(async_synchronize_full_domain); | 292 | EXPORT_SYMBOL_GPL(async_synchronize_full_domain); |
| 257 | 293 | ||
| @@ -261,14 +297,16 @@ EXPORT_SYMBOL_GPL(async_synchronize_full_domain); | |||
| 261 | * @running: running list to synchronize on | 297 | * @running: running list to synchronize on |
| 262 | * | 298 | * |
| 263 | * This function waits until all asynchronous function calls for the | 299 | * This function waits until all asynchronous function calls for the |
| 264 | * synchronization domain specified by the running list @list submitted | 300 | * synchronization domain specified by running list @running submitted |
| 265 | * prior to @cookie have been done. | 301 | * prior to @cookie have been done. |
| 266 | */ | 302 | */ |
| 267 | void async_synchronize_cookie_domain(async_cookie_t cookie, | 303 | void async_synchronize_cookie_domain(async_cookie_t cookie, struct async_domain *running) |
| 268 | struct list_head *running) | ||
| 269 | { | 304 | { |
| 270 | ktime_t uninitialized_var(starttime), delta, endtime; | 305 | ktime_t uninitialized_var(starttime), delta, endtime; |
| 271 | 306 | ||
| 307 | if (!running) | ||
| 308 | return; | ||
| 309 | |||
| 272 | if (initcall_debug && system_state == SYSTEM_BOOTING) { | 310 | if (initcall_debug && system_state == SYSTEM_BOOTING) { |
| 273 | printk(KERN_DEBUG "async_waiting @ %i\n", task_pid_nr(current)); | 311 | printk(KERN_DEBUG "async_waiting @ %i\n", task_pid_nr(current)); |
| 274 | starttime = ktime_get(); | 312 | starttime = ktime_get(); |
