aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/async-thread.c
diff options
context:
space:
mode:
authorChris Mason <chris.mason@oracle.com>2008-09-30 19:24:06 -0400
committerChris Mason <chris.mason@oracle.com>2008-09-30 19:36:34 -0400
commit75ccf47d13bfb66de7faf596bfe497b9af7aaa40 (patch)
treee1b06bc9afec8f6b48cc2fb00c5e1e4d4dda2e0b /fs/btrfs/async-thread.c
parent45b8c9a8b1e15bf79c2c17ec217adf96785f8011 (diff)
Btrfs: fix multi-device code to use raid policies set by mkfs
When reading in block groups, a global mask of the available raid policies should be adjusted based on the types of block groups found on disk. This global mask is then used to decide which raid policy to use for new block groups. The recent allocator changes dropped the call that updated the global mask, making all the block groups allocated at run time single striped onto a single drive. This also fixes the async worker threads to set any thread that uses the requeue mechanism as busy. This allows us to avoid blocking on get_request_wait for the async bio submission threads. Signed-off-by: Chris Mason <chris.mason@oracle.com>
Diffstat (limited to 'fs/btrfs/async-thread.c')
-rw-r--r--fs/btrfs/async-thread.c14
1 files changed, 13 insertions, 1 deletions
diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c
index 04fb9702d14c..d82efd722a48 100644
--- a/fs/btrfs/async-thread.c
+++ b/fs/btrfs/async-thread.c
@@ -302,8 +302,20 @@ int btrfs_requeue_work(struct btrfs_work *work)
302 spin_lock_irqsave(&worker->lock, flags); 302 spin_lock_irqsave(&worker->lock, flags);
303 atomic_inc(&worker->num_pending); 303 atomic_inc(&worker->num_pending);
304 list_add_tail(&work->list, &worker->pending); 304 list_add_tail(&work->list, &worker->pending);
305 check_busy_worker(worker); 305
306 /* by definition we're busy, take ourselves off the idle
307 * list
308 */
309 if (worker->idle) {
310 spin_lock_irqsave(&worker->workers->lock, flags);
311 worker->idle = 0;
312 list_move_tail(&worker->worker_list,
313 &worker->workers->worker_list);
314 spin_unlock_irqrestore(&worker->workers->lock, flags);
315 }
316
306 spin_unlock_irqrestore(&worker->lock, flags); 317 spin_unlock_irqrestore(&worker->lock, flags);
318
307out: 319out:
308 return 0; 320 return 0;
309} 321}