aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs
diff options
context:
space:
mode:
authorChris Mason <chris.mason@oracle.com>2009-08-04 16:56:34 -0400
committerChris Mason <chris.mason@oracle.com>2009-09-11 13:30:56 -0400
commit9042846bc7ae69cc3288d85af6bad16208d93a95 (patch)
treed46077d217d31ce5600ef870c92fcdf330922b41 /fs/btrfs
parentceab36edd3d3ad3ffd01d41d6d1e05ac1ff8357e (diff)
Btrfs: Allow worker threads to exit when idle
The Btrfs worker threads don't currently die off after they have been idle for a while, leading to a lot of threads sitting around doing nothing for each mount. Also, they are unable to start atomically (from end_io hanlders). This commit reworks the worker threads so they can be started from end_io handlers (just setting a flag that asks for a thread to be added at a later date) and so they can exit if they have been idle for a long time. Signed-off-by: Chris Mason <chris.mason@oracle.com>
Diffstat (limited to 'fs/btrfs')
-rw-r--r--fs/btrfs/async-thread.c133
-rw-r--r--fs/btrfs/async-thread.h9
-rw-r--r--fs/btrfs/disk-io.c22
3 files changed, 132 insertions, 32 deletions
diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c
index 019e8af449ab..f10c895224ae 100644
--- a/fs/btrfs/async-thread.c
+++ b/fs/btrfs/async-thread.c
@@ -48,6 +48,9 @@ struct btrfs_worker_thread {
48 /* number of things on the pending list */ 48 /* number of things on the pending list */
49 atomic_t num_pending; 49 atomic_t num_pending;
50 50
51 /* reference counter for this struct */
52 atomic_t refs;
53
51 unsigned long sequence; 54 unsigned long sequence;
52 55
53 /* protects the pending list. */ 56 /* protects the pending list. */
@@ -93,6 +96,31 @@ static void check_busy_worker(struct btrfs_worker_thread *worker)
93 } 96 }
94} 97}
95 98
99static void check_pending_worker_creates(struct btrfs_worker_thread *worker)
100{
101 struct btrfs_workers *workers = worker->workers;
102 unsigned long flags;
103
104 rmb();
105 if (!workers->atomic_start_pending)
106 return;
107
108 spin_lock_irqsave(&workers->lock, flags);
109 if (!workers->atomic_start_pending)
110 goto out;
111
112 workers->atomic_start_pending = 0;
113 if (workers->num_workers >= workers->max_workers)
114 goto out;
115
116 spin_unlock_irqrestore(&workers->lock, flags);
117 btrfs_start_workers(workers, 1);
118 return;
119
120out:
121 spin_unlock_irqrestore(&workers->lock, flags);
122}
123
96static noinline int run_ordered_completions(struct btrfs_workers *workers, 124static noinline int run_ordered_completions(struct btrfs_workers *workers,
97 struct btrfs_work *work) 125 struct btrfs_work *work)
98{ 126{
@@ -140,6 +168,36 @@ static noinline int run_ordered_completions(struct btrfs_workers *workers,
140 return 0; 168 return 0;
141} 169}
142 170
171static void put_worker(struct btrfs_worker_thread *worker)
172{
173 if (atomic_dec_and_test(&worker->refs))
174 kfree(worker);
175}
176
177static int try_worker_shutdown(struct btrfs_worker_thread *worker)
178{
179 int freeit = 0;
180
181 spin_lock_irq(&worker->lock);
182 spin_lock_irq(&worker->workers->lock);
183 if (worker->workers->num_workers > 1 &&
184 worker->idle &&
185 !worker->working &&
186 !list_empty(&worker->worker_list) &&
187 list_empty(&worker->prio_pending) &&
188 list_empty(&worker->pending)) {
189 freeit = 1;
190 list_del_init(&worker->worker_list);
191 worker->workers->num_workers--;
192 }
193 spin_unlock_irq(&worker->workers->lock);
194 spin_unlock_irq(&worker->lock);
195
196 if (freeit)
197 put_worker(worker);
198 return freeit;
199}
200
143/* 201/*
144 * main loop for servicing work items 202 * main loop for servicing work items
145 */ 203 */
@@ -175,6 +233,8 @@ again_locked:
175 */ 233 */
176 run_ordered_completions(worker->workers, work); 234 run_ordered_completions(worker->workers, work);
177 235
236 check_pending_worker_creates(worker);
237
178 spin_lock_irq(&worker->lock); 238 spin_lock_irq(&worker->lock);
179 check_idle_worker(worker); 239 check_idle_worker(worker);
180 } 240 }
@@ -226,8 +286,13 @@ again_locked:
226 worker->working = 0; 286 worker->working = 0;
227 spin_unlock_irq(&worker->lock); 287 spin_unlock_irq(&worker->lock);
228 288
229 if (!kthread_should_stop()) 289 if (!kthread_should_stop()) {
230 schedule(); 290 schedule_timeout(HZ * 120);
291 if (!worker->working &&
292 try_worker_shutdown(worker)) {
293 return 0;
294 }
295 }
231 } 296 }
232 __set_current_state(TASK_RUNNING); 297 __set_current_state(TASK_RUNNING);
233 } 298 }
@@ -242,16 +307,30 @@ int btrfs_stop_workers(struct btrfs_workers *workers)
242{ 307{
243 struct list_head *cur; 308 struct list_head *cur;
244 struct btrfs_worker_thread *worker; 309 struct btrfs_worker_thread *worker;
310 int can_stop;
245 311
312 spin_lock_irq(&workers->lock);
246 list_splice_init(&workers->idle_list, &workers->worker_list); 313 list_splice_init(&workers->idle_list, &workers->worker_list);
247 while (!list_empty(&workers->worker_list)) { 314 while (!list_empty(&workers->worker_list)) {
248 cur = workers->worker_list.next; 315 cur = workers->worker_list.next;
249 worker = list_entry(cur, struct btrfs_worker_thread, 316 worker = list_entry(cur, struct btrfs_worker_thread,
250 worker_list); 317 worker_list);
251 kthread_stop(worker->task); 318
252 list_del(&worker->worker_list); 319 atomic_inc(&worker->refs);
253 kfree(worker); 320 workers->num_workers -= 1;
321 if (!list_empty(&worker->worker_list)) {
322 list_del_init(&worker->worker_list);
323 put_worker(worker);
324 can_stop = 1;
325 } else
326 can_stop = 0;
327 spin_unlock_irq(&workers->lock);
328 if (can_stop)
329 kthread_stop(worker->task);
330 spin_lock_irq(&workers->lock);
331 put_worker(worker);
254 } 332 }
333 spin_unlock_irq(&workers->lock);
255 return 0; 334 return 0;
256} 335}
257 336
@@ -270,6 +349,8 @@ void btrfs_init_workers(struct btrfs_workers *workers, char *name, int max)
270 workers->idle_thresh = 32; 349 workers->idle_thresh = 32;
271 workers->name = name; 350 workers->name = name;
272 workers->ordered = 0; 351 workers->ordered = 0;
352 workers->atomic_start_pending = 0;
353 workers->atomic_worker_start = 0;
273} 354}
274 355
275/* 356/*
@@ -294,6 +375,7 @@ int btrfs_start_workers(struct btrfs_workers *workers, int num_workers)
294 INIT_LIST_HEAD(&worker->worker_list); 375 INIT_LIST_HEAD(&worker->worker_list);
295 spin_lock_init(&worker->lock); 376 spin_lock_init(&worker->lock);
296 atomic_set(&worker->num_pending, 0); 377 atomic_set(&worker->num_pending, 0);
378 atomic_set(&worker->refs, 1);
297 worker->workers = workers; 379 worker->workers = workers;
298 worker->task = kthread_run(worker_loop, worker, 380 worker->task = kthread_run(worker_loop, worker,
299 "btrfs-%s-%d", workers->name, 381 "btrfs-%s-%d", workers->name,
@@ -303,7 +385,6 @@ int btrfs_start_workers(struct btrfs_workers *workers, int num_workers)
303 kfree(worker); 385 kfree(worker);
304 goto fail; 386 goto fail;
305 } 387 }
306
307 spin_lock_irq(&workers->lock); 388 spin_lock_irq(&workers->lock);
308 list_add_tail(&worker->worker_list, &workers->idle_list); 389 list_add_tail(&worker->worker_list, &workers->idle_list);
309 worker->idle = 1; 390 worker->idle = 1;
@@ -367,6 +448,7 @@ static struct btrfs_worker_thread *find_worker(struct btrfs_workers *workers)
367{ 448{
368 struct btrfs_worker_thread *worker; 449 struct btrfs_worker_thread *worker;
369 unsigned long flags; 450 unsigned long flags;
451 struct list_head *fallback;
370 452
371again: 453again:
372 spin_lock_irqsave(&workers->lock, flags); 454 spin_lock_irqsave(&workers->lock, flags);
@@ -376,19 +458,10 @@ again:
376 if (!worker) { 458 if (!worker) {
377 spin_lock_irqsave(&workers->lock, flags); 459 spin_lock_irqsave(&workers->lock, flags);
378 if (workers->num_workers >= workers->max_workers) { 460 if (workers->num_workers >= workers->max_workers) {
379 struct list_head *fallback = NULL; 461 goto fallback;
380 /* 462 } else if (workers->atomic_worker_start) {
381 * we have failed to find any workers, just 463 workers->atomic_start_pending = 1;
382 * return the force one 464 goto fallback;
383 */
384 if (!list_empty(&workers->worker_list))
385 fallback = workers->worker_list.next;
386 if (!list_empty(&workers->idle_list))
387 fallback = workers->idle_list.next;
388 BUG_ON(!fallback);
389 worker = list_entry(fallback,
390 struct btrfs_worker_thread, worker_list);
391 spin_unlock_irqrestore(&workers->lock, flags);
392 } else { 465 } else {
393 spin_unlock_irqrestore(&workers->lock, flags); 466 spin_unlock_irqrestore(&workers->lock, flags);
394 /* we're below the limit, start another worker */ 467 /* we're below the limit, start another worker */
@@ -397,6 +470,22 @@ again:
397 } 470 }
398 } 471 }
399 return worker; 472 return worker;
473
474fallback:
475 fallback = NULL;
476 /*
477 * we have failed to find any workers, just
478 * return the first one we can find.
479 */
480 if (!list_empty(&workers->worker_list))
481 fallback = workers->worker_list.next;
482 if (!list_empty(&workers->idle_list))
483 fallback = workers->idle_list.next;
484 BUG_ON(!fallback);
485 worker = list_entry(fallback,
486 struct btrfs_worker_thread, worker_list);
487 spin_unlock_irqrestore(&workers->lock, flags);
488 return worker;
400} 489}
401 490
402/* 491/*
@@ -435,9 +524,9 @@ int btrfs_requeue_work(struct btrfs_work *work)
435 worker->working = 1; 524 worker->working = 1;
436 } 525 }
437 526
438 spin_unlock_irqrestore(&worker->lock, flags);
439 if (wake) 527 if (wake)
440 wake_up_process(worker->task); 528 wake_up_process(worker->task);
529 spin_unlock_irqrestore(&worker->lock, flags);
441out: 530out:
442 531
443 return 0; 532 return 0;
@@ -492,10 +581,10 @@ int btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work)
492 wake = 1; 581 wake = 1;
493 worker->working = 1; 582 worker->working = 1;
494 583
495 spin_unlock_irqrestore(&worker->lock, flags);
496
497 if (wake) 584 if (wake)
498 wake_up_process(worker->task); 585 wake_up_process(worker->task);
586 spin_unlock_irqrestore(&worker->lock, flags);
587
499out: 588out:
500 return 0; 589 return 0;
501} 590}
diff --git a/fs/btrfs/async-thread.h b/fs/btrfs/async-thread.h
index 1b511c109db6..a562ad8d83aa 100644
--- a/fs/btrfs/async-thread.h
+++ b/fs/btrfs/async-thread.h
@@ -73,6 +73,15 @@ struct btrfs_workers {
73 /* force completions in the order they were queued */ 73 /* force completions in the order they were queued */
74 int ordered; 74 int ordered;
75 75
76 /* more workers required, but in an interrupt handler */
77 int atomic_start_pending;
78
79 /*
80 * are we allowed to sleep while starting workers or are we required
81 * to start them at a later time?
82 */
83 int atomic_worker_start;
84
76 /* list with all the work threads. The workers on the idle thread 85 /* list with all the work threads. The workers on the idle thread
77 * may be actively servicing jobs, but they haven't yet hit the 86 * may be actively servicing jobs, but they haven't yet hit the
78 * idle thresh limit above. 87 * idle thresh limit above.
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 3cf4cfa575c8..20cefc6f22c4 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -1682,7 +1682,7 @@ struct btrfs_root *open_ctree(struct super_block *sb,
1682 err = -EINVAL; 1682 err = -EINVAL;
1683 goto fail_iput; 1683 goto fail_iput;
1684 } 1684 }
1685 1685printk("thread pool is %d\n", fs_info->thread_pool_size);
1686 /* 1686 /*
1687 * we need to start all the end_io workers up front because the 1687 * we need to start all the end_io workers up front because the
1688 * queue work function gets called at interrupt time, and so it 1688 * queue work function gets called at interrupt time, and so it
@@ -1727,20 +1727,22 @@ struct btrfs_root *open_ctree(struct super_block *sb,
1727 fs_info->endio_workers.idle_thresh = 4; 1727 fs_info->endio_workers.idle_thresh = 4;
1728 fs_info->endio_meta_workers.idle_thresh = 4; 1728 fs_info->endio_meta_workers.idle_thresh = 4;
1729 1729
1730 fs_info->endio_write_workers.idle_thresh = 64; 1730 fs_info->endio_write_workers.idle_thresh = 2;
1731 fs_info->endio_meta_write_workers.idle_thresh = 64; 1731 fs_info->endio_meta_write_workers.idle_thresh = 2;
1732
1733 fs_info->endio_workers.atomic_worker_start = 1;
1734 fs_info->endio_meta_workers.atomic_worker_start = 1;
1735 fs_info->endio_write_workers.atomic_worker_start = 1;
1736 fs_info->endio_meta_write_workers.atomic_worker_start = 1;
1732 1737
1733 btrfs_start_workers(&fs_info->workers, 1); 1738 btrfs_start_workers(&fs_info->workers, 1);
1734 btrfs_start_workers(&fs_info->submit_workers, 1); 1739 btrfs_start_workers(&fs_info->submit_workers, 1);
1735 btrfs_start_workers(&fs_info->delalloc_workers, 1); 1740 btrfs_start_workers(&fs_info->delalloc_workers, 1);
1736 btrfs_start_workers(&fs_info->fixup_workers, 1); 1741 btrfs_start_workers(&fs_info->fixup_workers, 1);
1737 btrfs_start_workers(&fs_info->endio_workers, fs_info->thread_pool_size); 1742 btrfs_start_workers(&fs_info->endio_workers, 1);
1738 btrfs_start_workers(&fs_info->endio_meta_workers, 1743 btrfs_start_workers(&fs_info->endio_meta_workers, 1);
1739 fs_info->thread_pool_size); 1744 btrfs_start_workers(&fs_info->endio_meta_write_workers, 1);
1740 btrfs_start_workers(&fs_info->endio_meta_write_workers, 1745 btrfs_start_workers(&fs_info->endio_write_workers, 1);
1741 fs_info->thread_pool_size);
1742 btrfs_start_workers(&fs_info->endio_write_workers,
1743 fs_info->thread_pool_size);
1744 1746
1745 fs_info->bdi.ra_pages *= btrfs_super_num_devices(disk_super); 1747 fs_info->bdi.ra_pages *= btrfs_super_num_devices(disk_super);
1746 fs_info->bdi.ra_pages = max(fs_info->bdi.ra_pages, 1748 fs_info->bdi.ra_pages = max(fs_info->bdi.ra_pages,