aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/async.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2013-01-22 19:15:15 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2013-01-22 19:21:24 -0500
commitf56c3196f251012de9b3ebaff55732a9074fdaae (patch)
tree65ce7316a1262f402a14c4299d881907e7d03b05 /kernel/async.c
parented06ef318a7ddde3823966f808f39b515eae1862 (diff)
async: fix __lowest_in_progress()
Commit 083b804c4d3e ("async: use workqueue for worker pool") made it possible that async jobs are moved from pending to running out-of-order. While pending async jobs will be queued and dispatched for execution in the same order, nothing guarantees they'll enter "1) move self to the running queue" of async_run_entry_fn() in the same order. Before the conversion, async implemented its own worker pool. An async worker, upon being woken up, fetches the first item from the pending list, which kept the executing lists sorted. The conversion to workqueue was done by adding work_struct to each async_entry and async just schedules the work item. The queueing and dispatching of such work items are still in order but now each worker thread is associated with a specific async_entry and moves that specific async_entry to the executing list. So, depending on which worker reaches that point earlier, which is non-deterministic, we may end up moving an async_entry with larger cookie before one with smaller one. This broke __lowest_in_progress(). running->domain may not be properly sorted and is not guaranteed to contain lower cookies than pending list when not empty. Fix it by ensuring sort-inserting to the running list and always looking at both pending and running when trying to determine the lowest cookie. Over time, the async synchronization implementation became quite messy. We better restructure it such that each async_entry is linked to two lists - one global and one per domain - and not move it when execution starts. There's no reason to distinguish pending and running. They behave the same for synchronization purposes. Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Arjan van de Ven <arjan@linux.intel.com> Cc: stable@vger.kernel.org Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'kernel/async.c')
-rw-r--r--kernel/async.c27
1 files changed, 20 insertions, 7 deletions
diff --git a/kernel/async.c b/kernel/async.c
index a1d585c351d6..6f34904a0b53 100644
--- a/kernel/async.c
+++ b/kernel/async.c
@@ -86,18 +86,27 @@ static atomic_t entry_count;
86 */ 86 */
87static async_cookie_t __lowest_in_progress(struct async_domain *running) 87static async_cookie_t __lowest_in_progress(struct async_domain *running)
88{ 88{
89 async_cookie_t first_running = next_cookie; /* infinity value */
90 async_cookie_t first_pending = next_cookie; /* ditto */
89 struct async_entry *entry; 91 struct async_entry *entry;
90 92
93 /*
94 * Both running and pending lists are sorted but not disjoint.
95 * Take the first cookies from both and return the min.
96 */
91 if (!list_empty(&running->domain)) { 97 if (!list_empty(&running->domain)) {
92 entry = list_first_entry(&running->domain, typeof(*entry), list); 98 entry = list_first_entry(&running->domain, typeof(*entry), list);
93 return entry->cookie; 99 first_running = entry->cookie;
94 } 100 }
95 101
96 list_for_each_entry(entry, &async_pending, list) 102 list_for_each_entry(entry, &async_pending, list) {
97 if (entry->running == running) 103 if (entry->running == running) {
98 return entry->cookie; 104 first_pending = entry->cookie;
105 break;
106 }
107 }
99 108
100 return next_cookie; /* "infinity" value */ 109 return min(first_running, first_pending);
101} 110}
102 111
103static async_cookie_t lowest_in_progress(struct async_domain *running) 112static async_cookie_t lowest_in_progress(struct async_domain *running)
@@ -118,13 +127,17 @@ static void async_run_entry_fn(struct work_struct *work)
118{ 127{
119 struct async_entry *entry = 128 struct async_entry *entry =
120 container_of(work, struct async_entry, work); 129 container_of(work, struct async_entry, work);
130 struct async_entry *pos;
121 unsigned long flags; 131 unsigned long flags;
122 ktime_t uninitialized_var(calltime), delta, rettime; 132 ktime_t uninitialized_var(calltime), delta, rettime;
123 struct async_domain *running = entry->running; 133 struct async_domain *running = entry->running;
124 134
125 /* 1) move self to the running queue */ 135 /* 1) move self to the running queue, make sure it stays sorted */
126 spin_lock_irqsave(&async_lock, flags); 136 spin_lock_irqsave(&async_lock, flags);
127 list_move_tail(&entry->list, &running->domain); 137 list_for_each_entry_reverse(pos, &running->domain, list)
138 if (entry->cookie < pos->cookie)
139 break;
140 list_move_tail(&entry->list, &pos->list);
128 spin_unlock_irqrestore(&async_lock, flags); 141 spin_unlock_irqrestore(&async_lock, flags);
129 142
130 /* 2) run (and print duration) */ 143 /* 2) run (and print duration) */