diff options
author | Jens Axboe <axboe@suse.de> | 2005-06-27 04:55:12 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-06-27 17:33:29 -0400 |
commit | 22e2c507c301c3dbbcf91b4948b88f78842ee6c9 (patch) | |
tree | 9a97c91d1362e69703aa286021daffb8a5456f4c /fs/reiserfs | |
parent | 020f46a39eb7b99a575b9f4d105fce2b142acdf1 (diff) |
[PATCH] Update cfq io scheduler to time sliced design
This updates the CFQ io scheduler to the new time sliced design (cfq
v3). It provides full process fairness, while giving excellent
aggregate system throughput even for many competing processes. It
supports io priorities, either inherited from the cpu nice value or set
directly with the ioprio_get/set syscalls. The latter closely mimic
set/getpriority.
This import is based on my latest from -mm.
Signed-off-by: Jens Axboe <axboe@suse.de>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'fs/reiserfs')
-rw-r--r-- | fs/reiserfs/journal.c | 12 |
1 files changed, 12 insertions, 0 deletions
diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c index 7b87707acc36..d1bcf0da6728 100644 --- a/fs/reiserfs/journal.c +++ b/fs/reiserfs/journal.c | |||
@@ -645,18 +645,22 @@ struct buffer_chunk { | |||
645 | 645 | ||
646 | static void write_chunk(struct buffer_chunk *chunk) { | 646 | static void write_chunk(struct buffer_chunk *chunk) { |
647 | int i; | 647 | int i; |
648 | get_fs_excl(); | ||
648 | for (i = 0; i < chunk->nr ; i++) { | 649 | for (i = 0; i < chunk->nr ; i++) { |
649 | submit_logged_buffer(chunk->bh[i]) ; | 650 | submit_logged_buffer(chunk->bh[i]) ; |
650 | } | 651 | } |
651 | chunk->nr = 0; | 652 | chunk->nr = 0; |
653 | put_fs_excl(); | ||
652 | } | 654 | } |
653 | 655 | ||
654 | static void write_ordered_chunk(struct buffer_chunk *chunk) { | 656 | static void write_ordered_chunk(struct buffer_chunk *chunk) { |
655 | int i; | 657 | int i; |
658 | get_fs_excl(); | ||
656 | for (i = 0; i < chunk->nr ; i++) { | 659 | for (i = 0; i < chunk->nr ; i++) { |
657 | submit_ordered_buffer(chunk->bh[i]) ; | 660 | submit_ordered_buffer(chunk->bh[i]) ; |
658 | } | 661 | } |
659 | chunk->nr = 0; | 662 | chunk->nr = 0; |
663 | put_fs_excl(); | ||
660 | } | 664 | } |
661 | 665 | ||
662 | static int add_to_chunk(struct buffer_chunk *chunk, struct buffer_head *bh, | 666 | static int add_to_chunk(struct buffer_chunk *chunk, struct buffer_head *bh, |
@@ -918,6 +922,8 @@ static int flush_commit_list(struct super_block *s, struct reiserfs_journal_list | |||
918 | return 0 ; | 922 | return 0 ; |
919 | } | 923 | } |
920 | 924 | ||
925 | get_fs_excl(); | ||
926 | |||
921 | /* before we can put our commit blocks on disk, we have to make sure everyone older than | 927 | /* before we can put our commit blocks on disk, we have to make sure everyone older than |
922 | ** us is on disk too | 928 | ** us is on disk too |
923 | */ | 929 | */ |
@@ -1055,6 +1061,7 @@ put_jl: | |||
1055 | 1061 | ||
1056 | if (retval) | 1062 | if (retval) |
1057 | reiserfs_abort (s, retval, "Journal write error in %s", __FUNCTION__); | 1063 | reiserfs_abort (s, retval, "Journal write error in %s", __FUNCTION__); |
1064 | put_fs_excl(); | ||
1058 | return retval; | 1065 | return retval; |
1059 | } | 1066 | } |
1060 | 1067 | ||
@@ -1251,6 +1258,8 @@ static int flush_journal_list(struct super_block *s, | |||
1251 | return 0 ; | 1258 | return 0 ; |
1252 | } | 1259 | } |
1253 | 1260 | ||
1261 | get_fs_excl(); | ||
1262 | |||
1254 | /* if all the work is already done, get out of here */ | 1263 | /* if all the work is already done, get out of here */ |
1255 | if (atomic_read(&(jl->j_nonzerolen)) <= 0 && | 1264 | if (atomic_read(&(jl->j_nonzerolen)) <= 0 && |
1256 | atomic_read(&(jl->j_commit_left)) <= 0) { | 1265 | atomic_read(&(jl->j_commit_left)) <= 0) { |
@@ -1450,6 +1459,7 @@ flush_older_and_return: | |||
1450 | put_journal_list(s, jl); | 1459 | put_journal_list(s, jl); |
1451 | if (flushall) | 1460 | if (flushall) |
1452 | up(&journal->j_flush_sem); | 1461 | up(&journal->j_flush_sem); |
1462 | put_fs_excl(); | ||
1453 | return err ; | 1463 | return err ; |
1454 | } | 1464 | } |
1455 | 1465 | ||
@@ -2719,6 +2729,7 @@ relock: | |||
2719 | th->t_trans_id = journal->j_trans_id ; | 2729 | th->t_trans_id = journal->j_trans_id ; |
2720 | unlock_journal(p_s_sb) ; | 2730 | unlock_journal(p_s_sb) ; |
2721 | INIT_LIST_HEAD (&th->t_list); | 2731 | INIT_LIST_HEAD (&th->t_list); |
2732 | get_fs_excl(); | ||
2722 | return 0 ; | 2733 | return 0 ; |
2723 | 2734 | ||
2724 | out_fail: | 2735 | out_fail: |
@@ -3526,6 +3537,7 @@ static int do_journal_end(struct reiserfs_transaction_handle *th, struct super_b | |||
3526 | BUG_ON (th->t_refcount > 1); | 3537 | BUG_ON (th->t_refcount > 1); |
3527 | BUG_ON (!th->t_trans_id); | 3538 | BUG_ON (!th->t_trans_id); |
3528 | 3539 | ||
3540 | put_fs_excl(); | ||
3529 | current->journal_info = th->t_handle_save; | 3541 | current->journal_info = th->t_handle_save; |
3530 | reiserfs_check_lock_depth(p_s_sb, "journal end"); | 3542 | reiserfs_check_lock_depth(p_s_sb, "journal end"); |
3531 | if (journal->j_len == 0) { | 3543 | if (journal->j_len == 0) { |