aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/transaction.c
diff options
context:
space:
mode:
authorChris Mason <chris.mason@oracle.com>2008-07-29 16:15:18 -0400
committerChris Mason <chris.mason@oracle.com>2008-09-25 11:04:05 -0400
commitab78c84de1ce4db1b2a2cef361625ad80abbab3f (patch)
treea9726645832b50da1da9a9690c4200c0f198ce7d /fs/btrfs/transaction.c
parent1a3f5d0400d786aec41ede15fec5710ad1a1d18b (diff)
Btrfs: Throttle operations if the reference cache gets too large
A large reference cache is directly related to a lot of work pending for the cleaner thread. This throttles back new operations based on the size of the reference cache so the cleaner thread will be able to keep up. Overall, this actually makes the FS faster because the cleaner thread will be more likely to find things in cache. Signed-off-by: Chris Mason <chris.mason@oracle.com>
Diffstat (limited to 'fs/btrfs/transaction.c')
-rw-r--r--fs/btrfs/transaction.c59
1 files changed, 44 insertions, 15 deletions
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index fcef3cae0c92..b8be6703189a 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -202,35 +202,64 @@ static noinline int wait_for_commit(struct btrfs_root *root,
202 return 0; 202 return 0;
203} 203}
204 204
205void btrfs_throttle(struct btrfs_root *root)
206{
207 struct btrfs_fs_info *info = root->fs_info;
208
209harder:
210 if (atomic_read(&info->throttles)) {
211 DEFINE_WAIT(wait);
212 int thr;
213 int harder_count = 0;
214 thr = atomic_read(&info->throttle_gen);
215
216 do {
217 prepare_to_wait(&info->transaction_throttle,
218 &wait, TASK_UNINTERRUPTIBLE);
219 if (!atomic_read(&info->throttles)) {
220 finish_wait(&info->transaction_throttle, &wait);
221 break;
222 }
223 schedule();
224 finish_wait(&info->transaction_throttle, &wait);
225 } while (thr == atomic_read(&info->throttle_gen));
226
227 if (harder_count < 5 &&
228 info->total_ref_cache_size > 5 * 1024 * 1024) {
229 harder_count++;
230 goto harder;
231 }
232
233 if (harder_count < 10 &&
234 info->total_ref_cache_size > 10 * 1024 * 1024) {
235 harder_count++;
236 goto harder;
237 }
238 }
239}
240
205static int __btrfs_end_transaction(struct btrfs_trans_handle *trans, 241static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
206 struct btrfs_root *root, int throttle) 242 struct btrfs_root *root, int throttle)
207{ 243{
208 struct btrfs_transaction *cur_trans; 244 struct btrfs_transaction *cur_trans;
245 struct btrfs_fs_info *info = root->fs_info;
209 246
210 mutex_lock(&root->fs_info->trans_mutex); 247 mutex_lock(&info->trans_mutex);
211 cur_trans = root->fs_info->running_transaction; 248 cur_trans = info->running_transaction;
212 WARN_ON(cur_trans != trans->transaction); 249 WARN_ON(cur_trans != trans->transaction);
213 WARN_ON(cur_trans->num_writers < 1); 250 WARN_ON(cur_trans->num_writers < 1);
214 cur_trans->num_writers--; 251 cur_trans->num_writers--;
215 252
216 if (waitqueue_active(&cur_trans->writer_wait)) 253 if (waitqueue_active(&cur_trans->writer_wait))
217 wake_up(&cur_trans->writer_wait); 254 wake_up(&cur_trans->writer_wait);
218
219 if (throttle && atomic_read(&root->fs_info->throttles)) {
220 DEFINE_WAIT(wait);
221 mutex_unlock(&root->fs_info->trans_mutex);
222 prepare_to_wait(&root->fs_info->transaction_throttle, &wait,
223 TASK_UNINTERRUPTIBLE);
224 if (atomic_read(&root->fs_info->throttles))
225 schedule();
226 finish_wait(&root->fs_info->transaction_throttle, &wait);
227 mutex_lock(&root->fs_info->trans_mutex);
228 }
229
230 put_transaction(cur_trans); 255 put_transaction(cur_trans);
231 mutex_unlock(&root->fs_info->trans_mutex); 256 mutex_unlock(&info->trans_mutex);
232 memset(trans, 0, sizeof(*trans)); 257 memset(trans, 0, sizeof(*trans));
233 kmem_cache_free(btrfs_trans_handle_cachep, trans); 258 kmem_cache_free(btrfs_trans_handle_cachep, trans);
259
260 if (throttle)
261 btrfs_throttle(root);
262
234 return 0; 263 return 0;
235} 264}
236 265