diff options
Diffstat (limited to 'kernel/async.c')
-rw-r--r-- | kernel/async.c | 63 |
1 files changed, 29 insertions, 34 deletions
diff --git a/kernel/async.c b/kernel/async.c index 7c9f50f436d6..6958000eeb44 100644 --- a/kernel/async.c +++ b/kernel/async.c | |||
@@ -64,13 +64,13 @@ static async_cookie_t next_cookie = 1; | |||
64 | #define MAX_WORK 32768 | 64 | #define MAX_WORK 32768 |
65 | #define ASYNC_COOKIE_MAX ULLONG_MAX /* infinity cookie */ | 65 | #define ASYNC_COOKIE_MAX ULLONG_MAX /* infinity cookie */ |
66 | 66 | ||
67 | static LIST_HEAD(async_global_pending); /* pending from all registered doms */ | ||
67 | static ASYNC_DOMAIN(async_dfl_domain); | 68 | static ASYNC_DOMAIN(async_dfl_domain); |
68 | static LIST_HEAD(async_domains); | ||
69 | static DEFINE_SPINLOCK(async_lock); | 69 | static DEFINE_SPINLOCK(async_lock); |
70 | static DEFINE_MUTEX(async_register_mutex); | ||
71 | 70 | ||
72 | struct async_entry { | 71 | struct async_entry { |
73 | struct list_head list; | 72 | struct list_head domain_list; |
73 | struct list_head global_list; | ||
74 | struct work_struct work; | 74 | struct work_struct work; |
75 | async_cookie_t cookie; | 75 | async_cookie_t cookie; |
76 | async_func_ptr *func; | 76 | async_func_ptr *func; |
@@ -84,15 +84,25 @@ static atomic_t entry_count; | |||
84 | 84 | ||
85 | static async_cookie_t lowest_in_progress(struct async_domain *domain) | 85 | static async_cookie_t lowest_in_progress(struct async_domain *domain) |
86 | { | 86 | { |
87 | struct async_entry *first = NULL; | ||
87 | async_cookie_t ret = ASYNC_COOKIE_MAX; | 88 | async_cookie_t ret = ASYNC_COOKIE_MAX; |
88 | unsigned long flags; | 89 | unsigned long flags; |
89 | 90 | ||
90 | spin_lock_irqsave(&async_lock, flags); | 91 | spin_lock_irqsave(&async_lock, flags); |
91 | if (!list_empty(&domain->pending)) { | 92 | |
92 | struct async_entry *first = list_first_entry(&domain->pending, | 93 | if (domain) { |
93 | struct async_entry, list); | 94 | if (!list_empty(&domain->pending)) |
94 | ret = first->cookie; | 95 | first = list_first_entry(&domain->pending, |
96 | struct async_entry, domain_list); | ||
97 | } else { | ||
98 | if (!list_empty(&async_global_pending)) | ||
99 | first = list_first_entry(&async_global_pending, | ||
100 | struct async_entry, global_list); | ||
95 | } | 101 | } |
102 | |||
103 | if (first) | ||
104 | ret = first->cookie; | ||
105 | |||
96 | spin_unlock_irqrestore(&async_lock, flags); | 106 | spin_unlock_irqrestore(&async_lock, flags); |
97 | return ret; | 107 | return ret; |
98 | } | 108 | } |
@@ -106,7 +116,6 @@ static void async_run_entry_fn(struct work_struct *work) | |||
106 | container_of(work, struct async_entry, work); | 116 | container_of(work, struct async_entry, work); |
107 | unsigned long flags; | 117 | unsigned long flags; |
108 | ktime_t uninitialized_var(calltime), delta, rettime; | 118 | ktime_t uninitialized_var(calltime), delta, rettime; |
109 | struct async_domain *domain = entry->domain; | ||
110 | 119 | ||
111 | /* 1) run (and print duration) */ | 120 | /* 1) run (and print duration) */ |
112 | if (initcall_debug && system_state == SYSTEM_BOOTING) { | 121 | if (initcall_debug && system_state == SYSTEM_BOOTING) { |
@@ -127,9 +136,8 @@ static void async_run_entry_fn(struct work_struct *work) | |||
127 | 136 | ||
128 | /* 2) remove self from the pending queues */ | 137 | /* 2) remove self from the pending queues */ |
129 | spin_lock_irqsave(&async_lock, flags); | 138 | spin_lock_irqsave(&async_lock, flags); |
130 | list_del(&entry->list); | 139 | list_del_init(&entry->domain_list); |
131 | if (domain->registered && list_empty(&domain->pending)) | 140 | list_del_init(&entry->global_list); |
132 | list_del_init(&domain->node); | ||
133 | 141 | ||
134 | /* 3) free the entry */ | 142 | /* 3) free the entry */ |
135 | kfree(entry); | 143 | kfree(entry); |
@@ -170,10 +178,14 @@ static async_cookie_t __async_schedule(async_func_ptr *ptr, void *data, struct a | |||
170 | entry->domain = domain; | 178 | entry->domain = domain; |
171 | 179 | ||
172 | spin_lock_irqsave(&async_lock, flags); | 180 | spin_lock_irqsave(&async_lock, flags); |
181 | |||
182 | /* allocate cookie and queue */ | ||
173 | newcookie = entry->cookie = next_cookie++; | 183 | newcookie = entry->cookie = next_cookie++; |
174 | if (domain->registered && list_empty(&domain->pending)) | 184 | |
175 | list_add_tail(&domain->node, &async_domains); | 185 | list_add_tail(&entry->domain_list, &domain->pending); |
176 | list_add_tail(&entry->list, &domain->pending); | 186 | if (domain->registered) |
187 | list_add_tail(&entry->global_list, &async_global_pending); | ||
188 | |||
177 | atomic_inc(&entry_count); | 189 | atomic_inc(&entry_count); |
178 | spin_unlock_irqrestore(&async_lock, flags); | 190 | spin_unlock_irqrestore(&async_lock, flags); |
179 | 191 | ||
@@ -226,18 +238,7 @@ EXPORT_SYMBOL_GPL(async_schedule_domain); | |||
226 | */ | 238 | */ |
227 | void async_synchronize_full(void) | 239 | void async_synchronize_full(void) |
228 | { | 240 | { |
229 | mutex_lock(&async_register_mutex); | 241 | async_synchronize_full_domain(NULL); |
230 | do { | ||
231 | struct async_domain *domain = NULL; | ||
232 | |||
233 | spin_lock_irq(&async_lock); | ||
234 | if (!list_empty(&async_domains)) | ||
235 | domain = list_first_entry(&async_domains, typeof(*domain), node); | ||
236 | spin_unlock_irq(&async_lock); | ||
237 | |||
238 | async_synchronize_cookie_domain(ASYNC_COOKIE_MAX, domain); | ||
239 | } while (!list_empty(&async_domains)); | ||
240 | mutex_unlock(&async_register_mutex); | ||
241 | } | 242 | } |
242 | EXPORT_SYMBOL_GPL(async_synchronize_full); | 243 | EXPORT_SYMBOL_GPL(async_synchronize_full); |
243 | 244 | ||
@@ -252,13 +253,10 @@ EXPORT_SYMBOL_GPL(async_synchronize_full); | |||
252 | */ | 253 | */ |
253 | void async_unregister_domain(struct async_domain *domain) | 254 | void async_unregister_domain(struct async_domain *domain) |
254 | { | 255 | { |
255 | mutex_lock(&async_register_mutex); | ||
256 | spin_lock_irq(&async_lock); | 256 | spin_lock_irq(&async_lock); |
257 | WARN_ON(!domain->registered || !list_empty(&domain->node) || | 257 | WARN_ON(!domain->registered || !list_empty(&domain->pending)); |
258 | !list_empty(&domain->pending)); | ||
259 | domain->registered = 0; | 258 | domain->registered = 0; |
260 | spin_unlock_irq(&async_lock); | 259 | spin_unlock_irq(&async_lock); |
261 | mutex_unlock(&async_register_mutex); | ||
262 | } | 260 | } |
263 | EXPORT_SYMBOL_GPL(async_unregister_domain); | 261 | EXPORT_SYMBOL_GPL(async_unregister_domain); |
264 | 262 | ||
@@ -278,7 +276,7 @@ EXPORT_SYMBOL_GPL(async_synchronize_full_domain); | |||
278 | /** | 276 | /** |
279 | * async_synchronize_cookie_domain - synchronize asynchronous function calls within a certain domain with cookie checkpointing | 277 | * async_synchronize_cookie_domain - synchronize asynchronous function calls within a certain domain with cookie checkpointing |
280 | * @cookie: async_cookie_t to use as checkpoint | 278 | * @cookie: async_cookie_t to use as checkpoint |
281 | * @domain: the domain to synchronize | 279 | * @domain: the domain to synchronize (%NULL for all registered domains) |
282 | * | 280 | * |
283 | * This function waits until all asynchronous function calls for the | 281 | * This function waits until all asynchronous function calls for the |
284 | * synchronization domain specified by @domain submitted prior to @cookie | 282 | * synchronization domain specified by @domain submitted prior to @cookie |
@@ -288,9 +286,6 @@ void async_synchronize_cookie_domain(async_cookie_t cookie, struct async_domain | |||
288 | { | 286 | { |
289 | ktime_t uninitialized_var(starttime), delta, endtime; | 287 | ktime_t uninitialized_var(starttime), delta, endtime; |
290 | 288 | ||
291 | if (!domain) | ||
292 | return; | ||
293 | |||
294 | if (initcall_debug && system_state == SYSTEM_BOOTING) { | 289 | if (initcall_debug && system_state == SYSTEM_BOOTING) { |
295 | printk(KERN_DEBUG "async_waiting @ %i\n", task_pid_nr(current)); | 290 | printk(KERN_DEBUG "async_waiting @ %i\n", task_pid_nr(current)); |
296 | starttime = ktime_get(); | 291 | starttime = ktime_get(); |