aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/async.c
diff options
context:
space:
mode:
authorDan Williams <dan.j.williams@intel.com>2012-07-09 22:33:25 -0400
committerJames Bottomley <JBottomley@Parallels.com>2012-07-20 04:05:54 -0400
commit2955b47d2c1983998a8c5915cb96884e67f7cb53 (patch)
treee21ace685c01c698f20b7cb81b0097519c2fa18c /kernel/async.c
parent529f9a765509c2c141ecfee0c54e17bf9a6b8bc1 (diff)
[SCSI] async: introduce 'async_domain' type
This is in preparation for teaching async_synchronize_full() to sync all pending async work, and not just on the async_running domain. This conversion is functionally equivalent, just embedding the existing list in a new async_domain type. The .registered attribute is used in a later patch to distinguish between domains that want to be flushed by async_synchronize_full() versus those that only expect async_synchronize_{full|cookie}_domain to be used for flushing. [jejb: add async.h to scsi_priv.h for struct async_domain] Signed-off-by: Dan Williams <dan.j.williams@intel.com> Acked-by: Arjan van de Ven <arjan@linux.intel.com> Acked-by: Mark Brown <broonie@opensource.wolfsonmicro.com> Tested-by: Eldad Zack <eldad@fogrefinery.com> Signed-off-by: James Bottomley <JBottomley@Parallels.com>
Diffstat (limited to 'kernel/async.c')
-rw-r--r--kernel/async.c35
1 files changed, 17 insertions, 18 deletions
diff --git a/kernel/async.c b/kernel/async.c
index bd0c168a3bbe..ba5491dfa991 100644
--- a/kernel/async.c
+++ b/kernel/async.c
@@ -62,7 +62,7 @@ static async_cookie_t next_cookie = 1;
62#define MAX_WORK 32768 62#define MAX_WORK 32768
63 63
64static LIST_HEAD(async_pending); 64static LIST_HEAD(async_pending);
65static LIST_HEAD(async_running); 65static ASYNC_DOMAIN(async_running);
66static DEFINE_SPINLOCK(async_lock); 66static DEFINE_SPINLOCK(async_lock);
67 67
68struct async_entry { 68struct async_entry {
@@ -71,7 +71,7 @@ struct async_entry {
71 async_cookie_t cookie; 71 async_cookie_t cookie;
72 async_func_ptr *func; 72 async_func_ptr *func;
73 void *data; 73 void *data;
74 struct list_head *running; 74 struct async_domain *running;
75}; 75};
76 76
77static DECLARE_WAIT_QUEUE_HEAD(async_done); 77static DECLARE_WAIT_QUEUE_HEAD(async_done);
@@ -82,13 +82,12 @@ static atomic_t entry_count;
82/* 82/*
83 * MUST be called with the lock held! 83 * MUST be called with the lock held!
84 */ 84 */
85static async_cookie_t __lowest_in_progress(struct list_head *running) 85static async_cookie_t __lowest_in_progress(struct async_domain *running)
86{ 86{
87 struct async_entry *entry; 87 struct async_entry *entry;
88 88
89 if (!list_empty(running)) { 89 if (!list_empty(&running->domain)) {
90 entry = list_first_entry(running, 90 entry = list_first_entry(&running->domain, typeof(*entry), list);
91 struct async_entry, list);
92 return entry->cookie; 91 return entry->cookie;
93 } 92 }
94 93
@@ -99,7 +98,7 @@ static async_cookie_t __lowest_in_progress(struct list_head *running)
99 return next_cookie; /* "infinity" value */ 98 return next_cookie; /* "infinity" value */
100} 99}
101 100
102static async_cookie_t lowest_in_progress(struct list_head *running) 101static async_cookie_t lowest_in_progress(struct async_domain *running)
103{ 102{
104 unsigned long flags; 103 unsigned long flags;
105 async_cookie_t ret; 104 async_cookie_t ret;
@@ -119,10 +118,11 @@ static void async_run_entry_fn(struct work_struct *work)
119 container_of(work, struct async_entry, work); 118 container_of(work, struct async_entry, work);
120 unsigned long flags; 119 unsigned long flags;
121 ktime_t uninitialized_var(calltime), delta, rettime; 120 ktime_t uninitialized_var(calltime), delta, rettime;
121 struct async_domain *running = entry->running;
122 122
123 /* 1) move self to the running queue */ 123 /* 1) move self to the running queue */
124 spin_lock_irqsave(&async_lock, flags); 124 spin_lock_irqsave(&async_lock, flags);
125 list_move_tail(&entry->list, entry->running); 125 list_move_tail(&entry->list, &running->domain);
126 spin_unlock_irqrestore(&async_lock, flags); 126 spin_unlock_irqrestore(&async_lock, flags);
127 127
128 /* 2) run (and print duration) */ 128 /* 2) run (and print duration) */
@@ -156,7 +156,7 @@ static void async_run_entry_fn(struct work_struct *work)
156 wake_up(&async_done); 156 wake_up(&async_done);
157} 157}
158 158
159static async_cookie_t __async_schedule(async_func_ptr *ptr, void *data, struct list_head *running) 159static async_cookie_t __async_schedule(async_func_ptr *ptr, void *data, struct async_domain *running)
160{ 160{
161 struct async_entry *entry; 161 struct async_entry *entry;
162 unsigned long flags; 162 unsigned long flags;
@@ -223,7 +223,7 @@ EXPORT_SYMBOL_GPL(async_schedule);
223 * Note: This function may be called from atomic or non-atomic contexts. 223 * Note: This function may be called from atomic or non-atomic contexts.
224 */ 224 */
225async_cookie_t async_schedule_domain(async_func_ptr *ptr, void *data, 225async_cookie_t async_schedule_domain(async_func_ptr *ptr, void *data,
226 struct list_head *running) 226 struct async_domain *running)
227{ 227{
228 return __async_schedule(ptr, data, running); 228 return __async_schedule(ptr, data, running);
229} 229}
@@ -238,20 +238,20 @@ void async_synchronize_full(void)
238{ 238{
239 do { 239 do {
240 async_synchronize_cookie(next_cookie); 240 async_synchronize_cookie(next_cookie);
241 } while (!list_empty(&async_running) || !list_empty(&async_pending)); 241 } while (!list_empty(&async_running.domain) || !list_empty(&async_pending));
242} 242}
243EXPORT_SYMBOL_GPL(async_synchronize_full); 243EXPORT_SYMBOL_GPL(async_synchronize_full);
244 244
245/** 245/**
246 * async_synchronize_full_domain - synchronize all asynchronous function within a certain domain 246 * async_synchronize_full_domain - synchronize all asynchronous function within a certain domain
247 * @list: running list to synchronize on 247 * @domain: running list to synchronize on
248 * 248 *
249 * This function waits until all asynchronous function calls for the 249 * This function waits until all asynchronous function calls for the
250 * synchronization domain specified by the running list @list have been done. 250 * synchronization domain specified by the running list @domain have been done.
251 */ 251 */
252void async_synchronize_full_domain(struct list_head *list) 252void async_synchronize_full_domain(struct async_domain *domain)
253{ 253{
254 async_synchronize_cookie_domain(next_cookie, list); 254 async_synchronize_cookie_domain(next_cookie, domain);
255} 255}
256EXPORT_SYMBOL_GPL(async_synchronize_full_domain); 256EXPORT_SYMBOL_GPL(async_synchronize_full_domain);
257 257
@@ -261,11 +261,10 @@ EXPORT_SYMBOL_GPL(async_synchronize_full_domain);
261 * @running: running list to synchronize on 261 * @running: running list to synchronize on
262 * 262 *
263 * This function waits until all asynchronous function calls for the 263 * This function waits until all asynchronous function calls for the
264 * synchronization domain specified by the running list @list submitted 264 * synchronization domain specified by running list @running submitted
265 * prior to @cookie have been done. 265 * prior to @cookie have been done.
266 */ 266 */
267void async_synchronize_cookie_domain(async_cookie_t cookie, 267void async_synchronize_cookie_domain(async_cookie_t cookie, struct async_domain *running)
268 struct list_head *running)
269{ 268{
270 ktime_t uninitialized_var(starttime), delta, endtime; 269 ktime_t uninitialized_var(starttime), delta, endtime;
271 270