aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/ctree.h
diff options
context:
space:
mode:
authorChris Mason <chris.mason@oracle.com>2008-06-11 16:50:36 -0400
committerChris Mason <chris.mason@oracle.com>2008-09-25 11:04:03 -0400
commit8b7128429235d9bd72cfd5ed20c77c4f3118f744 (patch)
tree982eda13094af1ccd46e8c3853559a0eb6e298f6 /fs/btrfs/ctree.h
parent43e570b08a6c6b1d75f218566a6240542a386fd9 (diff)
Btrfs: Add async worker threads for pre and post IO checksumming
Btrfs has been using workqueues to spread the checksumming load across other CPUs in the system. But, workqueues only schedule work on the same CPU that queued the work, giving them a limited benefit for systems with higher CPU counts. This code adds a generic facility to schedule work with pools of kthreads, and changes the bio submission code to queue bios up. The queueing is important to make sure large numbers of procs on the system don't turn streaming workloads into random workloads by sending IO down concurrently. The end result of all of this is much higher performance (and CPU usage) when doing checksumming on large machines. Two worker pools are created, one for writes and one for endio processing. The two could deadlock if we tried to service both from a single pool. Signed-off-by: Chris Mason <chris.mason@oracle.com>
Diffstat (limited to 'fs/btrfs/ctree.h')
-rw-r--r--fs/btrfs/ctree.h14
1 files changed, 11 insertions, 3 deletions
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 49cbc62b42f9..6c91a510c965 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -30,6 +30,7 @@
30#include "bit-radix.h" 30#include "bit-radix.h"
31#include "extent_io.h" 31#include "extent_io.h"
32#include "extent_map.h" 32#include "extent_map.h"
33#include "async-thread.h"
33 34
34struct btrfs_trans_handle; 35struct btrfs_trans_handle;
35struct btrfs_transaction; 36struct btrfs_transaction;
@@ -518,13 +519,20 @@ struct btrfs_fs_info {
518 struct list_head hashers; 519 struct list_head hashers;
519 struct list_head dead_roots; 520 struct list_head dead_roots;
520 struct list_head end_io_work_list; 521 struct list_head end_io_work_list;
521 struct list_head async_submit_work_list;
522 struct work_struct end_io_work; 522 struct work_struct end_io_work;
523 struct work_struct async_submit_work;
524 spinlock_t end_io_work_lock; 523 spinlock_t end_io_work_lock;
525 spinlock_t async_submit_work_lock;
526 atomic_t nr_async_submits; 524 atomic_t nr_async_submits;
527 525
526 /*
527 * there is a pool of worker threads for checksumming during writes
528 * and a pool for checksumming after reads. This is because readers
529 * can run with FS locks held, and the writers may be waiting for
530 * those locks. We don't want ordering in the pending list to cause
531 * deadlocks, and so the two are serviced separately.
532 */
533 struct btrfs_workers workers;
534 struct btrfs_workers endio_workers;
535
528#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,18) 536#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,18)
529 struct work_struct trans_work; 537 struct work_struct trans_work;
530#else 538#else