aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/async-thread.c
diff options
context:
space:
mode:
authorChris Mason <chris.mason@oracle.com>2009-08-04 16:56:34 -0400
committerChris Mason <chris.mason@oracle.com>2009-09-11 13:30:56 -0400
commit9042846bc7ae69cc3288d85af6bad16208d93a95 (patch)
treed46077d217d31ce5600ef870c92fcdf330922b41 /fs/btrfs/async-thread.c
parentceab36edd3d3ad3ffd01d41d6d1e05ac1ff8357e (diff)
Btrfs: Allow worker threads to exit when idle
The Btrfs worker threads don't currently die off after they have been idle for a while, leading to a lot of threads sitting around doing nothing for each mount. Also, they are unable to start atomically (from end_io hanlders). This commit reworks the worker threads so they can be started from end_io handlers (just setting a flag that asks for a thread to be added at a later date) and so they can exit if they have been idle for a long time. Signed-off-by: Chris Mason <chris.mason@oracle.com>
Diffstat (limited to 'fs/btrfs/async-thread.c')
-rw-r--r--fs/btrfs/async-thread.c133
1 files changed, 111 insertions, 22 deletions
diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c
index 019e8af449ab..f10c895224ae 100644
--- a/fs/btrfs/async-thread.c
+++ b/fs/btrfs/async-thread.c
@@ -48,6 +48,9 @@ struct btrfs_worker_thread {
48 /* number of things on the pending list */ 48 /* number of things on the pending list */
49 atomic_t num_pending; 49 atomic_t num_pending;
50 50
51 /* reference counter for this struct */
52 atomic_t refs;
53
51 unsigned long sequence; 54 unsigned long sequence;
52 55
53 /* protects the pending list. */ 56 /* protects the pending list. */
@@ -93,6 +96,31 @@ static void check_busy_worker(struct btrfs_worker_thread *worker)
93 } 96 }
94} 97}
95 98
99static void check_pending_worker_creates(struct btrfs_worker_thread *worker)
100{
101 struct btrfs_workers *workers = worker->workers;
102 unsigned long flags;
103
104 rmb();
105 if (!workers->atomic_start_pending)
106 return;
107
108 spin_lock_irqsave(&workers->lock, flags);
109 if (!workers->atomic_start_pending)
110 goto out;
111
112 workers->atomic_start_pending = 0;
113 if (workers->num_workers >= workers->max_workers)
114 goto out;
115
116 spin_unlock_irqrestore(&workers->lock, flags);
117 btrfs_start_workers(workers, 1);
118 return;
119
120out:
121 spin_unlock_irqrestore(&workers->lock, flags);
122}
123
96static noinline int run_ordered_completions(struct btrfs_workers *workers, 124static noinline int run_ordered_completions(struct btrfs_workers *workers,
97 struct btrfs_work *work) 125 struct btrfs_work *work)
98{ 126{
@@ -140,6 +168,36 @@ static noinline int run_ordered_completions(struct btrfs_workers *workers,
140 return 0; 168 return 0;
141} 169}
142 170
171static void put_worker(struct btrfs_worker_thread *worker)
172{
173 if (atomic_dec_and_test(&worker->refs))
174 kfree(worker);
175}
176
177static int try_worker_shutdown(struct btrfs_worker_thread *worker)
178{
179 int freeit = 0;
180
181 spin_lock_irq(&worker->lock);
182 spin_lock_irq(&worker->workers->lock);
183 if (worker->workers->num_workers > 1 &&
184 worker->idle &&
185 !worker->working &&
186 !list_empty(&worker->worker_list) &&
187 list_empty(&worker->prio_pending) &&
188 list_empty(&worker->pending)) {
189 freeit = 1;
190 list_del_init(&worker->worker_list);
191 worker->workers->num_workers--;
192 }
193 spin_unlock_irq(&worker->workers->lock);
194 spin_unlock_irq(&worker->lock);
195
196 if (freeit)
197 put_worker(worker);
198 return freeit;
199}
200
143/* 201/*
144 * main loop for servicing work items 202 * main loop for servicing work items
145 */ 203 */
@@ -175,6 +233,8 @@ again_locked:
175 */ 233 */
176 run_ordered_completions(worker->workers, work); 234 run_ordered_completions(worker->workers, work);
177 235
236 check_pending_worker_creates(worker);
237
178 spin_lock_irq(&worker->lock); 238 spin_lock_irq(&worker->lock);
179 check_idle_worker(worker); 239 check_idle_worker(worker);
180 } 240 }
@@ -226,8 +286,13 @@ again_locked:
226 worker->working = 0; 286 worker->working = 0;
227 spin_unlock_irq(&worker->lock); 287 spin_unlock_irq(&worker->lock);
228 288
229 if (!kthread_should_stop()) 289 if (!kthread_should_stop()) {
230 schedule(); 290 schedule_timeout(HZ * 120);
291 if (!worker->working &&
292 try_worker_shutdown(worker)) {
293 return 0;
294 }
295 }
231 } 296 }
232 __set_current_state(TASK_RUNNING); 297 __set_current_state(TASK_RUNNING);
233 } 298 }
@@ -242,16 +307,30 @@ int btrfs_stop_workers(struct btrfs_workers *workers)
242{ 307{
243 struct list_head *cur; 308 struct list_head *cur;
244 struct btrfs_worker_thread *worker; 309 struct btrfs_worker_thread *worker;
310 int can_stop;
245 311
312 spin_lock_irq(&workers->lock);
246 list_splice_init(&workers->idle_list, &workers->worker_list); 313 list_splice_init(&workers->idle_list, &workers->worker_list);
247 while (!list_empty(&workers->worker_list)) { 314 while (!list_empty(&workers->worker_list)) {
248 cur = workers->worker_list.next; 315 cur = workers->worker_list.next;
249 worker = list_entry(cur, struct btrfs_worker_thread, 316 worker = list_entry(cur, struct btrfs_worker_thread,
250 worker_list); 317 worker_list);
251 kthread_stop(worker->task); 318
252 list_del(&worker->worker_list); 319 atomic_inc(&worker->refs);
253 kfree(worker); 320 workers->num_workers -= 1;
321 if (!list_empty(&worker->worker_list)) {
322 list_del_init(&worker->worker_list);
323 put_worker(worker);
324 can_stop = 1;
325 } else
326 can_stop = 0;
327 spin_unlock_irq(&workers->lock);
328 if (can_stop)
329 kthread_stop(worker->task);
330 spin_lock_irq(&workers->lock);
331 put_worker(worker);
254 } 332 }
333 spin_unlock_irq(&workers->lock);
255 return 0; 334 return 0;
256} 335}
257 336
@@ -270,6 +349,8 @@ void btrfs_init_workers(struct btrfs_workers *workers, char *name, int max)
270 workers->idle_thresh = 32; 349 workers->idle_thresh = 32;
271 workers->name = name; 350 workers->name = name;
272 workers->ordered = 0; 351 workers->ordered = 0;
352 workers->atomic_start_pending = 0;
353 workers->atomic_worker_start = 0;
273} 354}
274 355
275/* 356/*
@@ -294,6 +375,7 @@ int btrfs_start_workers(struct btrfs_workers *workers, int num_workers)
294 INIT_LIST_HEAD(&worker->worker_list); 375 INIT_LIST_HEAD(&worker->worker_list);
295 spin_lock_init(&worker->lock); 376 spin_lock_init(&worker->lock);
296 atomic_set(&worker->num_pending, 0); 377 atomic_set(&worker->num_pending, 0);
378 atomic_set(&worker->refs, 1);
297 worker->workers = workers; 379 worker->workers = workers;
298 worker->task = kthread_run(worker_loop, worker, 380 worker->task = kthread_run(worker_loop, worker,
299 "btrfs-%s-%d", workers->name, 381 "btrfs-%s-%d", workers->name,
@@ -303,7 +385,6 @@ int btrfs_start_workers(struct btrfs_workers *workers, int num_workers)
303 kfree(worker); 385 kfree(worker);
304 goto fail; 386 goto fail;
305 } 387 }
306
307 spin_lock_irq(&workers->lock); 388 spin_lock_irq(&workers->lock);
308 list_add_tail(&worker->worker_list, &workers->idle_list); 389 list_add_tail(&worker->worker_list, &workers->idle_list);
309 worker->idle = 1; 390 worker->idle = 1;
@@ -367,6 +448,7 @@ static struct btrfs_worker_thread *find_worker(struct btrfs_workers *workers)
367{ 448{
368 struct btrfs_worker_thread *worker; 449 struct btrfs_worker_thread *worker;
369 unsigned long flags; 450 unsigned long flags;
451 struct list_head *fallback;
370 452
371again: 453again:
372 spin_lock_irqsave(&workers->lock, flags); 454 spin_lock_irqsave(&workers->lock, flags);
@@ -376,19 +458,10 @@ again:
376 if (!worker) { 458 if (!worker) {
377 spin_lock_irqsave(&workers->lock, flags); 459 spin_lock_irqsave(&workers->lock, flags);
378 if (workers->num_workers >= workers->max_workers) { 460 if (workers->num_workers >= workers->max_workers) {
379 struct list_head *fallback = NULL; 461 goto fallback;
380 /* 462 } else if (workers->atomic_worker_start) {
381 * we have failed to find any workers, just 463 workers->atomic_start_pending = 1;
382 * return the force one 464 goto fallback;
383 */
384 if (!list_empty(&workers->worker_list))
385 fallback = workers->worker_list.next;
386 if (!list_empty(&workers->idle_list))
387 fallback = workers->idle_list.next;
388 BUG_ON(!fallback);
389 worker = list_entry(fallback,
390 struct btrfs_worker_thread, worker_list);
391 spin_unlock_irqrestore(&workers->lock, flags);
392 } else { 465 } else {
393 spin_unlock_irqrestore(&workers->lock, flags); 466 spin_unlock_irqrestore(&workers->lock, flags);
394 /* we're below the limit, start another worker */ 467 /* we're below the limit, start another worker */
@@ -397,6 +470,22 @@ again:
397 } 470 }
398 } 471 }
399 return worker; 472 return worker;
473
474fallback:
475 fallback = NULL;
476 /*
477 * we have failed to find any workers, just
478 * return the first one we can find.
479 */
480 if (!list_empty(&workers->worker_list))
481 fallback = workers->worker_list.next;
482 if (!list_empty(&workers->idle_list))
483 fallback = workers->idle_list.next;
484 BUG_ON(!fallback);
485 worker = list_entry(fallback,
486 struct btrfs_worker_thread, worker_list);
487 spin_unlock_irqrestore(&workers->lock, flags);
488 return worker;
400} 489}
401 490
402/* 491/*
@@ -435,9 +524,9 @@ int btrfs_requeue_work(struct btrfs_work *work)
435 worker->working = 1; 524 worker->working = 1;
436 } 525 }
437 526
438 spin_unlock_irqrestore(&worker->lock, flags);
439 if (wake) 527 if (wake)
440 wake_up_process(worker->task); 528 wake_up_process(worker->task);
529 spin_unlock_irqrestore(&worker->lock, flags);
441out: 530out:
442 531
443 return 0; 532 return 0;
@@ -492,10 +581,10 @@ int btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work)
492 wake = 1; 581 wake = 1;
493 worker->working = 1; 582 worker->working = 1;
494 583
495 spin_unlock_irqrestore(&worker->lock, flags);
496
497 if (wake) 584 if (wake)
498 wake_up_process(worker->task); 585 wake_up_process(worker->task);
586 spin_unlock_irqrestore(&worker->lock, flags);
587
499out: 588out:
500 return 0; 589 return 0;
501} 590}