aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/async.c
diff options
context:
space:
mode:
authorDan Williams <dan.j.williams@intel.com>2012-07-09 22:33:30 -0400
committerJames Bottomley <JBottomley@Parallels.com>2012-07-20 04:07:37 -0400
commita4683487f90bfe3049686fc5c566bdc1ad03ace6 (patch)
treeb5aeadd3155256378804c04571febeee9f77c5fb /kernel/async.c
parent2955b47d2c1983998a8c5915cb96884e67f7cb53 (diff)
[SCSI] async: make async_synchronize_full() flush all work regardless of domain
In response to an async related regression James noted: "My theory is that this is an init problem: The assumption in a lot of our code is that async_synchronize_full() waits for everything ... even the domain specific async schedules, which isn't true." ...so make this assumption true. Each domain, including the default one, registers itself on a global domain list when work is scheduled. Once all entries complete it exits that list. Waiting for the list to be empty syncs all in-flight work across all domains. Domains can opt-out of global syncing if they are declared as exclusive ASYNC_DOMAIN_EXCLUSIVE(). All stack-based domains have been declared exclusive since the domain may go out of scope as soon as the last work item completes. Statically declared domains are mostly ok, but async_unregister_domain() is there to close any theoretical races with pending async_synchronize_full waiters at module removal time. Signed-off-by: Dan Williams <dan.j.williams@intel.com> Acked-by: Arjan van de Ven <arjan@linux.intel.com> Reported-by: Meelis Roos <mroos@linux.ee> Reported-by: Eldad Zack <eldadzack@gmail.com> Tested-by: Eldad Zack <eldad@fogrefinery.com> Signed-off-by: James Bottomley <JBottomley@Parallels.com>
Diffstat (limited to 'kernel/async.c')
-rw-r--r--kernel/async.c43
1 files changed, 41 insertions, 2 deletions
diff --git a/kernel/async.c b/kernel/async.c
index ba5491dfa991..9d3118384858 100644
--- a/kernel/async.c
+++ b/kernel/async.c
@@ -63,7 +63,9 @@ static async_cookie_t next_cookie = 1;
63 63
64static LIST_HEAD(async_pending); 64static LIST_HEAD(async_pending);
65static ASYNC_DOMAIN(async_running); 65static ASYNC_DOMAIN(async_running);
66static LIST_HEAD(async_domains);
66static DEFINE_SPINLOCK(async_lock); 67static DEFINE_SPINLOCK(async_lock);
68static DEFINE_MUTEX(async_register_mutex);
67 69
68struct async_entry { 70struct async_entry {
69 struct list_head list; 71 struct list_head list;
@@ -145,6 +147,8 @@ static void async_run_entry_fn(struct work_struct *work)
145 /* 3) remove self from the running queue */ 147 /* 3) remove self from the running queue */
146 spin_lock_irqsave(&async_lock, flags); 148 spin_lock_irqsave(&async_lock, flags);
147 list_del(&entry->list); 149 list_del(&entry->list);
150 if (running->registered && --running->count == 0)
151 list_del_init(&running->node);
148 152
149 /* 4) free the entry */ 153 /* 4) free the entry */
150 kfree(entry); 154 kfree(entry);
@@ -187,6 +191,8 @@ static async_cookie_t __async_schedule(async_func_ptr *ptr, void *data, struct a
187 spin_lock_irqsave(&async_lock, flags); 191 spin_lock_irqsave(&async_lock, flags);
188 newcookie = entry->cookie = next_cookie++; 192 newcookie = entry->cookie = next_cookie++;
189 list_add_tail(&entry->list, &async_pending); 193 list_add_tail(&entry->list, &async_pending);
194 if (running->registered && running->count++ == 0)
195 list_add_tail(&running->node, &async_domains);
190 atomic_inc(&entry_count); 196 atomic_inc(&entry_count);
191 spin_unlock_irqrestore(&async_lock, flags); 197 spin_unlock_irqrestore(&async_lock, flags);
192 198
@@ -236,13 +242,43 @@ EXPORT_SYMBOL_GPL(async_schedule_domain);
236 */ 242 */
237void async_synchronize_full(void) 243void async_synchronize_full(void)
238{ 244{
245 mutex_lock(&async_register_mutex);
239 do { 246 do {
240 async_synchronize_cookie(next_cookie); 247 struct async_domain *domain = NULL;
241 } while (!list_empty(&async_running.domain) || !list_empty(&async_pending)); 248
249 spin_lock_irq(&async_lock);
250 if (!list_empty(&async_domains))
251 domain = list_first_entry(&async_domains, typeof(*domain), node);
252 spin_unlock_irq(&async_lock);
253
254 async_synchronize_cookie_domain(next_cookie, domain);
255 } while (!list_empty(&async_domains));
256 mutex_unlock(&async_register_mutex);
242} 257}
243EXPORT_SYMBOL_GPL(async_synchronize_full); 258EXPORT_SYMBOL_GPL(async_synchronize_full);
244 259
245/** 260/**
261 * async_unregister_domain - ensure no more anonymous waiters on this domain
262 * @domain: idle domain to flush out of any async_synchronize_full instances
263 *
264 * async_synchronize_{cookie|full}_domain() are not flushed since callers
265 * of these routines should know the lifetime of @domain
266 *
267 * Prefer ASYNC_DOMAIN_EXCLUSIVE() declarations over flushing
268 */
269void async_unregister_domain(struct async_domain *domain)
270{
271 mutex_lock(&async_register_mutex);
272 spin_lock_irq(&async_lock);
273 WARN_ON(!domain->registered || !list_empty(&domain->node) ||
274 !list_empty(&domain->domain));
275 domain->registered = 0;
276 spin_unlock_irq(&async_lock);
277 mutex_unlock(&async_register_mutex);
278}
279EXPORT_SYMBOL_GPL(async_unregister_domain);
280
281/**
246 * async_synchronize_full_domain - synchronize all asynchronous function within a certain domain 282 * async_synchronize_full_domain - synchronize all asynchronous function within a certain domain
247 * @domain: running list to synchronize on 283 * @domain: running list to synchronize on
248 * 284 *
@@ -268,6 +304,9 @@ void async_synchronize_cookie_domain(async_cookie_t cookie, struct async_domain
268{ 304{
269 ktime_t uninitialized_var(starttime), delta, endtime; 305 ktime_t uninitialized_var(starttime), delta, endtime;
270 306
307 if (!running)
308 return;
309
271 if (initcall_debug && system_state == SYSTEM_BOOTING) { 310 if (initcall_debug && system_state == SYSTEM_BOOTING) {
272 printk(KERN_DEBUG "async_waiting @ %i\n", task_pid_nr(current)); 311 printk(KERN_DEBUG "async_waiting @ %i\n", task_pid_nr(current));
273 starttime = ktime_get(); 312 starttime = ktime_get();