aboutsummaryrefslogtreecommitdiffstats
path: root/fs/xfs/xfs_alloc.c
diff options
context:
space:
mode:
authorDave Chinner <dchinner@redhat.com>2012-03-22 01:15:07 -0400
committerBen Myers <bpm@sgi.com>2012-03-22 17:12:24 -0400
commitc999a223c2f0d31c64ef7379814cea1378b2b800 (patch)
treeed699c3c98075bbfc4aed0ab22bd174e65e575a8 /fs/xfs/xfs_alloc.c
parent1a1d772433d42aaff7315b3468fef5951604f5c6 (diff)
xfs: introduce an allocation workqueue
We currently have significant issues with the amount of stack that allocation in XFS uses, especially in the writeback path. We can easily consume 4k of stack between mapping the page, manipulating the bmap btree and allocating blocks from the free list. Not to mention btree block readahead and other functionality that issues IO in the allocation path. As a result, we can no longer fit allocation in the writeback path in the stack space provided on x86_64. To alleviate this problem, introduce an allocation workqueue and move all allocations to a seperate context. This can be easily added as an interposing layer into xfs_alloc_vextent(), which takes a single argument structure and does not return until the allocation is complete or has failed. To do this, add a work structure and a completion to the allocation args structure. This allows xfs_alloc_vextent to queue the args onto the workqueue and wait for it to be completed by the worker. This can be done completely transparently to the caller. The worker function needs to ensure that it sets and clears the PF_TRANS flag appropriately as it is being run in an active transaction context. Work can also be queued in a memory reclaim context, so a rescuer is needed for the workqueue. Signed-off-by: Dave Chinner <dchinner@redhat.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Ben Myers <bpm@sgi.com>
Diffstat (limited to 'fs/xfs/xfs_alloc.c')
-rw-r--r--fs/xfs/xfs_alloc.c34
1 files changed, 33 insertions, 1 deletions
diff --git a/fs/xfs/xfs_alloc.c b/fs/xfs/xfs_alloc.c
index ce84ffd0264c..31e90335b83d 100644
--- a/fs/xfs/xfs_alloc.c
+++ b/fs/xfs/xfs_alloc.c
@@ -35,6 +35,7 @@
35#include "xfs_error.h" 35#include "xfs_error.h"
36#include "xfs_trace.h" 36#include "xfs_trace.h"
37 37
38struct workqueue_struct *xfs_alloc_wq;
38 39
39#define XFS_ABSDIFF(a,b) (((a) <= (b)) ? ((b) - (a)) : ((a) - (b))) 40#define XFS_ABSDIFF(a,b) (((a) <= (b)) ? ((b) - (a)) : ((a) - (b)))
40 41
@@ -2207,7 +2208,7 @@ xfs_alloc_read_agf(
2207 * group or loop over the allocation groups to find the result. 2208 * group or loop over the allocation groups to find the result.
2208 */ 2209 */
2209int /* error */ 2210int /* error */
2210xfs_alloc_vextent( 2211__xfs_alloc_vextent(
2211 xfs_alloc_arg_t *args) /* allocation argument structure */ 2212 xfs_alloc_arg_t *args) /* allocation argument structure */
2212{ 2213{
2213 xfs_agblock_t agsize; /* allocation group size */ 2214 xfs_agblock_t agsize; /* allocation group size */
@@ -2417,6 +2418,37 @@ error0:
2417 return error; 2418 return error;
2418} 2419}
2419 2420
2421static void
2422xfs_alloc_vextent_worker(
2423 struct work_struct *work)
2424{
2425 struct xfs_alloc_arg *args = container_of(work,
2426 struct xfs_alloc_arg, work);
2427 unsigned long pflags;
2428
2429 /* we are in a transaction context here */
2430 current_set_flags_nested(&pflags, PF_FSTRANS);
2431
2432 args->result = __xfs_alloc_vextent(args);
2433 complete(args->done);
2434
2435 current_restore_flags_nested(&pflags, PF_FSTRANS);
2436}
2437
2438
2439int /* error */
2440xfs_alloc_vextent(
2441 xfs_alloc_arg_t *args) /* allocation argument structure */
2442{
2443 DECLARE_COMPLETION_ONSTACK(done);
2444
2445 args->done = &done;
2446 INIT_WORK(&args->work, xfs_alloc_vextent_worker);
2447 queue_work(xfs_alloc_wq, &args->work);
2448 wait_for_completion(&done);
2449 return args->result;
2450}
2451
2420/* 2452/*
2421 * Free an extent. 2453 * Free an extent.
2422 * Just break up the extent address and hand off to xfs_free_ag_extent 2454 * Just break up the extent address and hand off to xfs_free_ag_extent