aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/async-thread.c
diff options
context:
space:
mode:
authorChris Mason <chris.mason@oracle.com>2008-09-29 15:18:18 -0400
committerChris Mason <chris.mason@oracle.com>2008-09-29 15:18:18 -0400
commitd352ac68148b69937d39ca5d48bcc4478e118dbf (patch)
tree7951dd7311999d9e77766acdc7f8e93de97874d8 /fs/btrfs/async-thread.c
parent9a5e1ea1e1e539e244a54afffc330fc368376ab9 (diff)
Btrfs: add and improve comments
This improves the comments at the top of many functions. It didn't dive into the guts of functions because I was trying to avoid merging problems with the new allocator and back reference work. extent-tree.c and volumes.c were both skipped, and there is definitely more work todo in cleaning and commenting the code. Signed-off-by: Chris Mason <chris.mason@oracle.com>
Diffstat (limited to 'fs/btrfs/async-thread.c')
-rw-r--r--fs/btrfs/async-thread.c10
1 files changed, 9 insertions, 1 deletions
diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c
index 4e780b279de6..04fb9702d14c 100644
--- a/fs/btrfs/async-thread.c
+++ b/fs/btrfs/async-thread.c
@@ -231,17 +231,25 @@ static struct btrfs_worker_thread *next_worker(struct btrfs_workers *workers)
231 231
232 /* 232 /*
233 * if we pick a busy task, move the task to the end of the list. 233 * if we pick a busy task, move the task to the end of the list.
234 * hopefully this will keep things somewhat evenly balanced 234 * hopefully this will keep things somewhat evenly balanced.
235 * Do the move in batches based on the sequence number. This groups
236 * requests submitted at roughly the same time onto the same worker.
235 */ 237 */
236 next = workers->worker_list.next; 238 next = workers->worker_list.next;
237 worker = list_entry(next, struct btrfs_worker_thread, worker_list); 239 worker = list_entry(next, struct btrfs_worker_thread, worker_list);
238 atomic_inc(&worker->num_pending); 240 atomic_inc(&worker->num_pending);
239 worker->sequence++; 241 worker->sequence++;
242
240 if (worker->sequence % workers->idle_thresh == 0) 243 if (worker->sequence % workers->idle_thresh == 0)
241 list_move_tail(next, &workers->worker_list); 244 list_move_tail(next, &workers->worker_list);
242 return worker; 245 return worker;
243} 246}
244 247
248/*
249 * selects a worker thread to take the next job. This will either find
250 * an idle worker, start a new worker up to the max count, or just return
251 * one of the existing busy workers.
252 */
245static struct btrfs_worker_thread *find_worker(struct btrfs_workers *workers) 253static struct btrfs_worker_thread *find_worker(struct btrfs_workers *workers)
246{ 254{
247 struct btrfs_worker_thread *worker; 255 struct btrfs_worker_thread *worker;