aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2013-01-23 12:32:30 -0500
committerTejun Heo <tj@kernel.org>2013-01-23 12:32:30 -0500
commit52722794d6a48162fd8906d54618ae60a4abdb21 (patch)
tree9395231ed86edd40880a3438f77d10af2f6d953a /kernel
parentc68eee14ec2da345e86f2778c8570759309a4a2e (diff)
async: keep pending tasks on async_domain and remove async_pending
Async kept single global pending list and per-domain running lists. When an async item is queued, it's put on the global pending list. The item is moved to the per-domain running list when its execution starts. At this point, this design complicates execution and synchronization without bringing any benefit. The list only matters for synchronization which doesn't care whether a given async item is pending or executing. Also, global synchronization is done by iterating through all active registered async_domains, so the global async_pending list doesn't help anything either. Rename async_domain->running to async_domain->pending and put async items directly there and remove when execution completes. This simplifies lowest_in_progress() a lot - the first item on the pending list is the one with the lowest cookie, and async_run_entry_fn() doesn't have to mess with moving the item from pending to running. After the change, whether a domain is empty or not can be trivially determined by looking at async_domain->pending. Remove async_domain->count and use list_empty() on pending instead. Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Arjan van de Ven <arjan@linux.intel.com> Cc: Dan Williams <djbw@fb.com> Cc: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/async.c63
1 files changed, 14 insertions, 49 deletions
diff --git a/kernel/async.c b/kernel/async.c
index a4c1a9e63b2e..7c9f50f436d6 100644
--- a/kernel/async.c
+++ b/kernel/async.c
@@ -64,7 +64,6 @@ static async_cookie_t next_cookie = 1;
64#define MAX_WORK 32768 64#define MAX_WORK 32768
65#define ASYNC_COOKIE_MAX ULLONG_MAX /* infinity cookie */ 65#define ASYNC_COOKIE_MAX ULLONG_MAX /* infinity cookie */
66 66
67static LIST_HEAD(async_pending);
68static ASYNC_DOMAIN(async_dfl_domain); 67static ASYNC_DOMAIN(async_dfl_domain);
69static LIST_HEAD(async_domains); 68static LIST_HEAD(async_domains);
70static DEFINE_SPINLOCK(async_lock); 69static DEFINE_SPINLOCK(async_lock);
@@ -83,42 +82,17 @@ static DECLARE_WAIT_QUEUE_HEAD(async_done);
83 82
84static atomic_t entry_count; 83static atomic_t entry_count;
85 84
86
87/*
88 * MUST be called with the lock held!
89 */
90static async_cookie_t __lowest_in_progress(struct async_domain *domain)
91{
92 async_cookie_t first_running = ASYNC_COOKIE_MAX;
93 async_cookie_t first_pending = ASYNC_COOKIE_MAX;
94 struct async_entry *entry;
95
96 /*
97 * Both running and pending lists are sorted but not disjoint.
98 * Take the first cookies from both and return the min.
99 */
100 if (!list_empty(&domain->running)) {
101 entry = list_first_entry(&domain->running, typeof(*entry), list);
102 first_running = entry->cookie;
103 }
104
105 list_for_each_entry(entry, &async_pending, list) {
106 if (entry->domain == domain) {
107 first_pending = entry->cookie;
108 break;
109 }
110 }
111
112 return min(first_running, first_pending);
113}
114
115static async_cookie_t lowest_in_progress(struct async_domain *domain) 85static async_cookie_t lowest_in_progress(struct async_domain *domain)
116{ 86{
87 async_cookie_t ret = ASYNC_COOKIE_MAX;
117 unsigned long flags; 88 unsigned long flags;
118 async_cookie_t ret;
119 89
120 spin_lock_irqsave(&async_lock, flags); 90 spin_lock_irqsave(&async_lock, flags);
121 ret = __lowest_in_progress(domain); 91 if (!list_empty(&domain->pending)) {
92 struct async_entry *first = list_first_entry(&domain->pending,
93 struct async_entry, list);
94 ret = first->cookie;
95 }
122 spin_unlock_irqrestore(&async_lock, flags); 96 spin_unlock_irqrestore(&async_lock, flags);
123 return ret; 97 return ret;
124} 98}
@@ -130,20 +104,11 @@ static void async_run_entry_fn(struct work_struct *work)
130{ 104{
131 struct async_entry *entry = 105 struct async_entry *entry =
132 container_of(work, struct async_entry, work); 106 container_of(work, struct async_entry, work);
133 struct async_entry *pos;
134 unsigned long flags; 107 unsigned long flags;
135 ktime_t uninitialized_var(calltime), delta, rettime; 108 ktime_t uninitialized_var(calltime), delta, rettime;
136 struct async_domain *domain = entry->domain; 109 struct async_domain *domain = entry->domain;
137 110
138 /* 1) move self to the running queue, make sure it stays sorted */ 111 /* 1) run (and print duration) */
139 spin_lock_irqsave(&async_lock, flags);
140 list_for_each_entry_reverse(pos, &domain->running, list)
141 if (entry->cookie < pos->cookie)
142 break;
143 list_move_tail(&entry->list, &pos->list);
144 spin_unlock_irqrestore(&async_lock, flags);
145
146 /* 2) run (and print duration) */
147 if (initcall_debug && system_state == SYSTEM_BOOTING) { 112 if (initcall_debug && system_state == SYSTEM_BOOTING) {
148 printk(KERN_DEBUG "calling %lli_%pF @ %i\n", 113 printk(KERN_DEBUG "calling %lli_%pF @ %i\n",
149 (long long)entry->cookie, 114 (long long)entry->cookie,
@@ -160,19 +125,19 @@ static void async_run_entry_fn(struct work_struct *work)
160 (long long)ktime_to_ns(delta) >> 10); 125 (long long)ktime_to_ns(delta) >> 10);
161 } 126 }
162 127
163 /* 3) remove self from the running queue */ 128 /* 2) remove self from the pending queues */
164 spin_lock_irqsave(&async_lock, flags); 129 spin_lock_irqsave(&async_lock, flags);
165 list_del(&entry->list); 130 list_del(&entry->list);
166 if (domain->registered && --domain->count == 0) 131 if (domain->registered && list_empty(&domain->pending))
167 list_del_init(&domain->node); 132 list_del_init(&domain->node);
168 133
169 /* 4) free the entry */ 134 /* 3) free the entry */
170 kfree(entry); 135 kfree(entry);
171 atomic_dec(&entry_count); 136 atomic_dec(&entry_count);
172 137
173 spin_unlock_irqrestore(&async_lock, flags); 138 spin_unlock_irqrestore(&async_lock, flags);
174 139
175 /* 5) wake up any waiters */ 140 /* 4) wake up any waiters */
176 wake_up(&async_done); 141 wake_up(&async_done);
177} 142}
178 143
@@ -206,9 +171,9 @@ static async_cookie_t __async_schedule(async_func_ptr *ptr, void *data, struct a
206 171
207 spin_lock_irqsave(&async_lock, flags); 172 spin_lock_irqsave(&async_lock, flags);
208 newcookie = entry->cookie = next_cookie++; 173 newcookie = entry->cookie = next_cookie++;
209 list_add_tail(&entry->list, &async_pending); 174 if (domain->registered && list_empty(&domain->pending))
210 if (domain->registered && domain->count++ == 0)
211 list_add_tail(&domain->node, &async_domains); 175 list_add_tail(&domain->node, &async_domains);
176 list_add_tail(&entry->list, &domain->pending);
212 atomic_inc(&entry_count); 177 atomic_inc(&entry_count);
213 spin_unlock_irqrestore(&async_lock, flags); 178 spin_unlock_irqrestore(&async_lock, flags);
214 179
@@ -290,7 +255,7 @@ void async_unregister_domain(struct async_domain *domain)
290 mutex_lock(&async_register_mutex); 255 mutex_lock(&async_register_mutex);
291 spin_lock_irq(&async_lock); 256 spin_lock_irq(&async_lock);
292 WARN_ON(!domain->registered || !list_empty(&domain->node) || 257 WARN_ON(!domain->registered || !list_empty(&domain->node) ||
293 !list_empty(&domain->running)); 258 !list_empty(&domain->pending));
294 domain->registered = 0; 259 domain->registered = 0;
295 spin_unlock_irq(&async_lock); 260 spin_unlock_irq(&async_lock);
296 mutex_unlock(&async_register_mutex); 261 mutex_unlock(&async_register_mutex);