aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/async.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2013-01-23 12:31:01 -0500
committerTejun Heo <tj@kernel.org>2013-01-23 12:31:01 -0500
commitc14afb82ffff5903a701a9fb737ac20f36d1f755 (patch)
tree304dcc7b1d7b9a5f564f7e978228e61ef41fbef2 /kernel/async.c
parent0fdff3ec6d87856cdcc99e69cf42143fdd6c56b4 (diff)
parent1d8549085377674224bf30a368284c391a3ce40e (diff)
Merge branch 'master' into for-3.9-async
To receive f56c3196f251012de9b3ebaff55732a9074fdaae ("async: fix __lowest_in_progress()"). Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'kernel/async.c')
-rw-r--r--kernel/async.c30
1 files changed, 23 insertions, 7 deletions
diff --git a/kernel/async.c b/kernel/async.c
index d9bf2a9b5cee..6c68fc3fae7b 100644
--- a/kernel/async.c
+++ b/kernel/async.c
@@ -88,18 +88,27 @@ static atomic_t entry_count;
88 */ 88 */
89static async_cookie_t __lowest_in_progress(struct async_domain *running) 89static async_cookie_t __lowest_in_progress(struct async_domain *running)
90{ 90{
91 async_cookie_t first_running = next_cookie; /* infinity value */
92 async_cookie_t first_pending = next_cookie; /* ditto */
91 struct async_entry *entry; 93 struct async_entry *entry;
92 94
95 /*
96 * Both running and pending lists are sorted but not disjoint.
97 * Take the first cookies from both and return the min.
98 */
93 if (!list_empty(&running->domain)) { 99 if (!list_empty(&running->domain)) {
94 entry = list_first_entry(&running->domain, typeof(*entry), list); 100 entry = list_first_entry(&running->domain, typeof(*entry), list);
95 return entry->cookie; 101 first_running = entry->cookie;
96 } 102 }
97 103
98 list_for_each_entry(entry, &async_pending, list) 104 list_for_each_entry(entry, &async_pending, list) {
99 if (entry->running == running) 105 if (entry->running == running) {
100 return entry->cookie; 106 first_pending = entry->cookie;
107 break;
108 }
109 }
101 110
102 return next_cookie; /* "infinity" value */ 111 return min(first_running, first_pending);
103} 112}
104 113
105static async_cookie_t lowest_in_progress(struct async_domain *running) 114static async_cookie_t lowest_in_progress(struct async_domain *running)
@@ -120,13 +129,17 @@ static void async_run_entry_fn(struct work_struct *work)
120{ 129{
121 struct async_entry *entry = 130 struct async_entry *entry =
122 container_of(work, struct async_entry, work); 131 container_of(work, struct async_entry, work);
132 struct async_entry *pos;
123 unsigned long flags; 133 unsigned long flags;
124 ktime_t uninitialized_var(calltime), delta, rettime; 134 ktime_t uninitialized_var(calltime), delta, rettime;
125 struct async_domain *running = entry->running; 135 struct async_domain *running = entry->running;
126 136
127 /* 1) move self to the running queue */ 137 /* 1) move self to the running queue, make sure it stays sorted */
128 spin_lock_irqsave(&async_lock, flags); 138 spin_lock_irqsave(&async_lock, flags);
129 list_move_tail(&entry->list, &running->domain); 139 list_for_each_entry_reverse(pos, &running->domain, list)
140 if (entry->cookie < pos->cookie)
141 break;
142 list_move_tail(&entry->list, &pos->list);
130 spin_unlock_irqrestore(&async_lock, flags); 143 spin_unlock_irqrestore(&async_lock, flags);
131 144
132 /* 2) run (and print duration) */ 145 /* 2) run (and print duration) */
@@ -198,6 +211,9 @@ static async_cookie_t __async_schedule(async_func_ptr *ptr, void *data, struct a
198 atomic_inc(&entry_count); 211 atomic_inc(&entry_count);
199 spin_unlock_irqrestore(&async_lock, flags); 212 spin_unlock_irqrestore(&async_lock, flags);
200 213
214 /* mark that this task has queued an async job, used by module init */
215 current->flags |= PF_USED_ASYNC;
216
201 /* schedule for execution */ 217 /* schedule for execution */
202 queue_work(system_unbound_wq, &entry->work); 218 queue_work(system_unbound_wq, &entry->work);
203 219