diff options
author | Tejun Heo <tj@kernel.org> | 2013-01-23 12:32:30 -0500 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2013-01-23 12:32:30 -0500 |
commit | 8723d5037cafea09c7242303c6c8e5d7058cec61 (patch) | |
tree | 198cd51a199501557657dac0f2c46faa6be0a4e4 /kernel/async.c | |
parent | c14afb82ffff5903a701a9fb737ac20f36d1f755 (diff) |
async: bring sanity to the use of words domain and running
In the beginning, running lists were literal struct list_heads. Later
on, struct async_domain was added. For some reason, while the
conversion substituted list_heads with async_domains, the variable
names weren't fully converted. In more places, "running" was used for
struct async_domain while other places adopted new "domain" name.
The situation is made much worse by having async_domain's running list
named "domain" and async_entry's field pointing to async_domain named
"running".
So, we end up with mix of "running" and "domain" for variable names
for async_domain, with the field names of async_domain and async_entry
swapped between "running" and "domain".
It feels almost intentionally made to be as confusing as possible.
Bring some sanity by
* Renaming all async_domain variables "domain".
* s/async_running/async_dfl_domain/
* s/async_domain->domain/async_domain->running/
* s/async_entry->running/async_entry->domain/
Signed-off-by: Tejun Heo <tj@kernel.org>
Cc: Arjan van de Ven <arjan@linux.intel.com>
Cc: Dan Williams <djbw@fb.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'kernel/async.c')
-rw-r--r-- | kernel/async.c | 68 |
1 files changed, 34 insertions, 34 deletions
diff --git a/kernel/async.c b/kernel/async.c index 6c68fc3fae7b..29d51d483bee 100644 --- a/kernel/async.c +++ b/kernel/async.c | |||
@@ -64,7 +64,7 @@ static async_cookie_t next_cookie = 1; | |||
64 | #define MAX_WORK 32768 | 64 | #define MAX_WORK 32768 |
65 | 65 | ||
66 | static LIST_HEAD(async_pending); | 66 | static LIST_HEAD(async_pending); |
67 | static ASYNC_DOMAIN(async_running); | 67 | static ASYNC_DOMAIN(async_dfl_domain); |
68 | static LIST_HEAD(async_domains); | 68 | static LIST_HEAD(async_domains); |
69 | static DEFINE_SPINLOCK(async_lock); | 69 | static DEFINE_SPINLOCK(async_lock); |
70 | static DEFINE_MUTEX(async_register_mutex); | 70 | static DEFINE_MUTEX(async_register_mutex); |
@@ -75,7 +75,7 @@ struct async_entry { | |||
75 | async_cookie_t cookie; | 75 | async_cookie_t cookie; |
76 | async_func_ptr *func; | 76 | async_func_ptr *func; |
77 | void *data; | 77 | void *data; |
78 | struct async_domain *running; | 78 | struct async_domain *domain; |
79 | }; | 79 | }; |
80 | 80 | ||
81 | static DECLARE_WAIT_QUEUE_HEAD(async_done); | 81 | static DECLARE_WAIT_QUEUE_HEAD(async_done); |
@@ -86,7 +86,7 @@ static atomic_t entry_count; | |||
86 | /* | 86 | /* |
87 | * MUST be called with the lock held! | 87 | * MUST be called with the lock held! |
88 | */ | 88 | */ |
89 | static async_cookie_t __lowest_in_progress(struct async_domain *running) | 89 | static async_cookie_t __lowest_in_progress(struct async_domain *domain) |
90 | { | 90 | { |
91 | async_cookie_t first_running = next_cookie; /* infinity value */ | 91 | async_cookie_t first_running = next_cookie; /* infinity value */ |
92 | async_cookie_t first_pending = next_cookie; /* ditto */ | 92 | async_cookie_t first_pending = next_cookie; /* ditto */ |
@@ -96,13 +96,13 @@ static async_cookie_t __lowest_in_progress(struct async_domain *running) | |||
96 | * Both running and pending lists are sorted but not disjoint. | 96 | * Both running and pending lists are sorted but not disjoint. |
97 | * Take the first cookies from both and return the min. | 97 | * Take the first cookies from both and return the min. |
98 | */ | 98 | */ |
99 | if (!list_empty(&running->domain)) { | 99 | if (!list_empty(&domain->running)) { |
100 | entry = list_first_entry(&running->domain, typeof(*entry), list); | 100 | entry = list_first_entry(&domain->running, typeof(*entry), list); |
101 | first_running = entry->cookie; | 101 | first_running = entry->cookie; |
102 | } | 102 | } |
103 | 103 | ||
104 | list_for_each_entry(entry, &async_pending, list) { | 104 | list_for_each_entry(entry, &async_pending, list) { |
105 | if (entry->running == running) { | 105 | if (entry->domain == domain) { |
106 | first_pending = entry->cookie; | 106 | first_pending = entry->cookie; |
107 | break; | 107 | break; |
108 | } | 108 | } |
@@ -111,13 +111,13 @@ static async_cookie_t __lowest_in_progress(struct async_domain *running) | |||
111 | return min(first_running, first_pending); | 111 | return min(first_running, first_pending); |
112 | } | 112 | } |
113 | 113 | ||
114 | static async_cookie_t lowest_in_progress(struct async_domain *running) | 114 | static async_cookie_t lowest_in_progress(struct async_domain *domain) |
115 | { | 115 | { |
116 | unsigned long flags; | 116 | unsigned long flags; |
117 | async_cookie_t ret; | 117 | async_cookie_t ret; |
118 | 118 | ||
119 | spin_lock_irqsave(&async_lock, flags); | 119 | spin_lock_irqsave(&async_lock, flags); |
120 | ret = __lowest_in_progress(running); | 120 | ret = __lowest_in_progress(domain); |
121 | spin_unlock_irqrestore(&async_lock, flags); | 121 | spin_unlock_irqrestore(&async_lock, flags); |
122 | return ret; | 122 | return ret; |
123 | } | 123 | } |
@@ -132,11 +132,11 @@ static void async_run_entry_fn(struct work_struct *work) | |||
132 | struct async_entry *pos; | 132 | struct async_entry *pos; |
133 | unsigned long flags; | 133 | unsigned long flags; |
134 | ktime_t uninitialized_var(calltime), delta, rettime; | 134 | ktime_t uninitialized_var(calltime), delta, rettime; |
135 | struct async_domain *running = entry->running; | 135 | struct async_domain *domain = entry->domain; |
136 | 136 | ||
137 | /* 1) move self to the running queue, make sure it stays sorted */ | 137 | /* 1) move self to the running queue, make sure it stays sorted */ |
138 | spin_lock_irqsave(&async_lock, flags); | 138 | spin_lock_irqsave(&async_lock, flags); |
139 | list_for_each_entry_reverse(pos, &running->domain, list) | 139 | list_for_each_entry_reverse(pos, &domain->running, list) |
140 | if (entry->cookie < pos->cookie) | 140 | if (entry->cookie < pos->cookie) |
141 | break; | 141 | break; |
142 | list_move_tail(&entry->list, &pos->list); | 142 | list_move_tail(&entry->list, &pos->list); |
@@ -162,8 +162,8 @@ static void async_run_entry_fn(struct work_struct *work) | |||
162 | /* 3) remove self from the running queue */ | 162 | /* 3) remove self from the running queue */ |
163 | spin_lock_irqsave(&async_lock, flags); | 163 | spin_lock_irqsave(&async_lock, flags); |
164 | list_del(&entry->list); | 164 | list_del(&entry->list); |
165 | if (running->registered && --running->count == 0) | 165 | if (domain->registered && --domain->count == 0) |
166 | list_del_init(&running->node); | 166 | list_del_init(&domain->node); |
167 | 167 | ||
168 | /* 4) free the entry */ | 168 | /* 4) free the entry */ |
169 | kfree(entry); | 169 | kfree(entry); |
@@ -175,7 +175,7 @@ static void async_run_entry_fn(struct work_struct *work) | |||
175 | wake_up(&async_done); | 175 | wake_up(&async_done); |
176 | } | 176 | } |
177 | 177 | ||
178 | static async_cookie_t __async_schedule(async_func_ptr *ptr, void *data, struct async_domain *running) | 178 | static async_cookie_t __async_schedule(async_func_ptr *ptr, void *data, struct async_domain *domain) |
179 | { | 179 | { |
180 | struct async_entry *entry; | 180 | struct async_entry *entry; |
181 | unsigned long flags; | 181 | unsigned long flags; |
@@ -201,13 +201,13 @@ static async_cookie_t __async_schedule(async_func_ptr *ptr, void *data, struct a | |||
201 | INIT_WORK(&entry->work, async_run_entry_fn); | 201 | INIT_WORK(&entry->work, async_run_entry_fn); |
202 | entry->func = ptr; | 202 | entry->func = ptr; |
203 | entry->data = data; | 203 | entry->data = data; |
204 | entry->running = running; | 204 | entry->domain = domain; |
205 | 205 | ||
206 | spin_lock_irqsave(&async_lock, flags); | 206 | spin_lock_irqsave(&async_lock, flags); |
207 | newcookie = entry->cookie = next_cookie++; | 207 | newcookie = entry->cookie = next_cookie++; |
208 | list_add_tail(&entry->list, &async_pending); | 208 | list_add_tail(&entry->list, &async_pending); |
209 | if (running->registered && running->count++ == 0) | 209 | if (domain->registered && domain->count++ == 0) |
210 | list_add_tail(&running->node, &async_domains); | 210 | list_add_tail(&domain->node, &async_domains); |
211 | atomic_inc(&entry_count); | 211 | atomic_inc(&entry_count); |
212 | spin_unlock_irqrestore(&async_lock, flags); | 212 | spin_unlock_irqrestore(&async_lock, flags); |
213 | 213 | ||
@@ -230,7 +230,7 @@ static async_cookie_t __async_schedule(async_func_ptr *ptr, void *data, struct a | |||
230 | */ | 230 | */ |
231 | async_cookie_t async_schedule(async_func_ptr *ptr, void *data) | 231 | async_cookie_t async_schedule(async_func_ptr *ptr, void *data) |
232 | { | 232 | { |
233 | return __async_schedule(ptr, data, &async_running); | 233 | return __async_schedule(ptr, data, &async_dfl_domain); |
234 | } | 234 | } |
235 | EXPORT_SYMBOL_GPL(async_schedule); | 235 | EXPORT_SYMBOL_GPL(async_schedule); |
236 | 236 | ||
@@ -238,18 +238,18 @@ EXPORT_SYMBOL_GPL(async_schedule); | |||
238 | * async_schedule_domain - schedule a function for asynchronous execution within a certain domain | 238 | * async_schedule_domain - schedule a function for asynchronous execution within a certain domain |
239 | * @ptr: function to execute asynchronously | 239 | * @ptr: function to execute asynchronously |
240 | * @data: data pointer to pass to the function | 240 | * @data: data pointer to pass to the function |
241 | * @running: running list for the domain | 241 | * @domain: the domain |
242 | * | 242 | * |
243 | * Returns an async_cookie_t that may be used for checkpointing later. | 243 | * Returns an async_cookie_t that may be used for checkpointing later. |
244 | * @running may be used in the async_synchronize_*_domain() functions | 244 | * @domain may be used in the async_synchronize_*_domain() functions to |
245 | * to wait within a certain synchronization domain rather than globally. | 245 | * wait within a certain synchronization domain rather than globally. A |
246 | * A synchronization domain is specified via the running queue @running to use. | 246 | * synchronization domain is specified via @domain. Note: This function |
247 | * Note: This function may be called from atomic or non-atomic contexts. | 247 | * may be called from atomic or non-atomic contexts. |
248 | */ | 248 | */ |
249 | async_cookie_t async_schedule_domain(async_func_ptr *ptr, void *data, | 249 | async_cookie_t async_schedule_domain(async_func_ptr *ptr, void *data, |
250 | struct async_domain *running) | 250 | struct async_domain *domain) |
251 | { | 251 | { |
252 | return __async_schedule(ptr, data, running); | 252 | return __async_schedule(ptr, data, domain); |
253 | } | 253 | } |
254 | EXPORT_SYMBOL_GPL(async_schedule_domain); | 254 | EXPORT_SYMBOL_GPL(async_schedule_domain); |
255 | 255 | ||
@@ -289,7 +289,7 @@ void async_unregister_domain(struct async_domain *domain) | |||
289 | mutex_lock(&async_register_mutex); | 289 | mutex_lock(&async_register_mutex); |
290 | spin_lock_irq(&async_lock); | 290 | spin_lock_irq(&async_lock); |
291 | WARN_ON(!domain->registered || !list_empty(&domain->node) || | 291 | WARN_ON(!domain->registered || !list_empty(&domain->node) || |
292 | !list_empty(&domain->domain)); | 292 | !list_empty(&domain->running)); |
293 | domain->registered = 0; | 293 | domain->registered = 0; |
294 | spin_unlock_irq(&async_lock); | 294 | spin_unlock_irq(&async_lock); |
295 | mutex_unlock(&async_register_mutex); | 295 | mutex_unlock(&async_register_mutex); |
@@ -298,10 +298,10 @@ EXPORT_SYMBOL_GPL(async_unregister_domain); | |||
298 | 298 | ||
299 | /** | 299 | /** |
300 | * async_synchronize_full_domain - synchronize all asynchronous function within a certain domain | 300 | * async_synchronize_full_domain - synchronize all asynchronous function within a certain domain |
301 | * @domain: running list to synchronize on | 301 | * @domain: the domain to synchronize |
302 | * | 302 | * |
303 | * This function waits until all asynchronous function calls for the | 303 | * This function waits until all asynchronous function calls for the |
304 | * synchronization domain specified by the running list @domain have been done. | 304 | * synchronization domain specified by @domain have been done. |
305 | */ | 305 | */ |
306 | void async_synchronize_full_domain(struct async_domain *domain) | 306 | void async_synchronize_full_domain(struct async_domain *domain) |
307 | { | 307 | { |
@@ -312,17 +312,17 @@ EXPORT_SYMBOL_GPL(async_synchronize_full_domain); | |||
312 | /** | 312 | /** |
313 | * async_synchronize_cookie_domain - synchronize asynchronous function calls within a certain domain with cookie checkpointing | 313 | * async_synchronize_cookie_domain - synchronize asynchronous function calls within a certain domain with cookie checkpointing |
314 | * @cookie: async_cookie_t to use as checkpoint | 314 | * @cookie: async_cookie_t to use as checkpoint |
315 | * @running: running list to synchronize on | 315 | * @domain: the domain to synchronize |
316 | * | 316 | * |
317 | * This function waits until all asynchronous function calls for the | 317 | * This function waits until all asynchronous function calls for the |
318 | * synchronization domain specified by running list @running submitted | 318 | * synchronization domain specified by @domain submitted prior to @cookie |
319 | * prior to @cookie have been done. | 319 | * have been done. |
320 | */ | 320 | */ |
321 | void async_synchronize_cookie_domain(async_cookie_t cookie, struct async_domain *running) | 321 | void async_synchronize_cookie_domain(async_cookie_t cookie, struct async_domain *domain) |
322 | { | 322 | { |
323 | ktime_t uninitialized_var(starttime), delta, endtime; | 323 | ktime_t uninitialized_var(starttime), delta, endtime; |
324 | 324 | ||
325 | if (!running) | 325 | if (!domain) |
326 | return; | 326 | return; |
327 | 327 | ||
328 | if (initcall_debug && system_state == SYSTEM_BOOTING) { | 328 | if (initcall_debug && system_state == SYSTEM_BOOTING) { |
@@ -330,7 +330,7 @@ void async_synchronize_cookie_domain(async_cookie_t cookie, struct async_domain | |||
330 | starttime = ktime_get(); | 330 | starttime = ktime_get(); |
331 | } | 331 | } |
332 | 332 | ||
333 | wait_event(async_done, lowest_in_progress(running) >= cookie); | 333 | wait_event(async_done, lowest_in_progress(domain) >= cookie); |
334 | 334 | ||
335 | if (initcall_debug && system_state == SYSTEM_BOOTING) { | 335 | if (initcall_debug && system_state == SYSTEM_BOOTING) { |
336 | endtime = ktime_get(); | 336 | endtime = ktime_get(); |
@@ -352,7 +352,7 @@ EXPORT_SYMBOL_GPL(async_synchronize_cookie_domain); | |||
352 | */ | 352 | */ |
353 | void async_synchronize_cookie(async_cookie_t cookie) | 353 | void async_synchronize_cookie(async_cookie_t cookie) |
354 | { | 354 | { |
355 | async_synchronize_cookie_domain(cookie, &async_running); | 355 | async_synchronize_cookie_domain(cookie, &async_dfl_domain); |
356 | } | 356 | } |
357 | EXPORT_SYMBOL_GPL(async_synchronize_cookie); | 357 | EXPORT_SYMBOL_GPL(async_synchronize_cookie); |
358 | 358 | ||