aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorJan Kara <jack@suse.cz>2012-03-13 22:45:25 -0400
committerTheodore Ts'o <tytso@mit.edu>2012-03-13 22:45:25 -0400
commit932bb305ba2a01cd62809644d569f004e77a4355 (patch)
treef03c1389c2902285e7e964210729bbbf2505d607 /fs
parentc254c9ec14d5c418c8f36ea7573edae2470a1dc1 (diff)
jbd2: remove bh_state lock from checkpointing code
All accesses to checkpointing entries in journal_head are protected by j_list_lock. Thus __jbd2_journal_remove_checkpoint() doesn't really need bh_state lock. Also the only part of journal head that the rest of checkpointing code needs to check is jh->b_transaction which is safe to read under j_list_lock. So we can safely remove bh_state lock from all of checkpointing code which makes it considerably prettier. Signed-off-by: Jan Kara <jack@suse.cz> Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
Diffstat (limited to 'fs')
-rw-r--r--fs/jbd2/checkpoint.c59
1 files changed, 7 insertions, 52 deletions
diff --git a/fs/jbd2/checkpoint.c b/fs/jbd2/checkpoint.c
index 546c3b300eef..c78841ee81cf 100644
--- a/fs/jbd2/checkpoint.c
+++ b/fs/jbd2/checkpoint.c
@@ -88,14 +88,13 @@ static inline void __buffer_relink_io(struct journal_head *jh)
88 * whole transaction. 88 * whole transaction.
89 * 89 *
90 * Requires j_list_lock 90 * Requires j_list_lock
91 * Called under jbd_lock_bh_state(jh2bh(jh)), and drops it
92 */ 91 */
93static int __try_to_free_cp_buf(struct journal_head *jh) 92static int __try_to_free_cp_buf(struct journal_head *jh)
94{ 93{
95 int ret = 0; 94 int ret = 0;
96 struct buffer_head *bh = jh2bh(jh); 95 struct buffer_head *bh = jh2bh(jh);
97 96
98 if (jh->b_jlist == BJ_None && !buffer_locked(bh) && 97 if (jh->b_transaction == NULL && !buffer_locked(bh) &&
99 !buffer_dirty(bh) && !buffer_write_io_error(bh)) { 98 !buffer_dirty(bh) && !buffer_write_io_error(bh)) {
100 /* 99 /*
101 * Get our reference so that bh cannot be freed before 100 * Get our reference so that bh cannot be freed before
@@ -104,11 +103,8 @@ static int __try_to_free_cp_buf(struct journal_head *jh)
104 get_bh(bh); 103 get_bh(bh);
105 JBUFFER_TRACE(jh, "remove from checkpoint list"); 104 JBUFFER_TRACE(jh, "remove from checkpoint list");
106 ret = __jbd2_journal_remove_checkpoint(jh) + 1; 105 ret = __jbd2_journal_remove_checkpoint(jh) + 1;
107 jbd_unlock_bh_state(bh);
108 BUFFER_TRACE(bh, "release"); 106 BUFFER_TRACE(bh, "release");
109 __brelse(bh); 107 __brelse(bh);
110 } else {
111 jbd_unlock_bh_state(bh);
112 } 108 }
113 return ret; 109 return ret;
114} 110}
@@ -180,21 +176,6 @@ void __jbd2_log_wait_for_space(journal_t *journal)
180} 176}
181 177
182/* 178/*
183 * We were unable to perform jbd_trylock_bh_state() inside j_list_lock.
184 * The caller must restart a list walk. Wait for someone else to run
185 * jbd_unlock_bh_state().
186 */
187static void jbd_sync_bh(journal_t *journal, struct buffer_head *bh)
188 __releases(journal->j_list_lock)
189{
190 get_bh(bh);
191 spin_unlock(&journal->j_list_lock);
192 jbd_lock_bh_state(bh);
193 jbd_unlock_bh_state(bh);
194 put_bh(bh);
195}
196
197/*
198 * Clean up transaction's list of buffers submitted for io. 179 * Clean up transaction's list of buffers submitted for io.
199 * We wait for any pending IO to complete and remove any clean 180 * We wait for any pending IO to complete and remove any clean
200 * buffers. Note that we take the buffers in the opposite ordering 181 * buffers. Note that we take the buffers in the opposite ordering
@@ -222,15 +203,9 @@ restart:
222 while (!released && transaction->t_checkpoint_io_list) { 203 while (!released && transaction->t_checkpoint_io_list) {
223 jh = transaction->t_checkpoint_io_list; 204 jh = transaction->t_checkpoint_io_list;
224 bh = jh2bh(jh); 205 bh = jh2bh(jh);
225 if (!jbd_trylock_bh_state(bh)) {
226 jbd_sync_bh(journal, bh);
227 spin_lock(&journal->j_list_lock);
228 goto restart;
229 }
230 get_bh(bh); 206 get_bh(bh);
231 if (buffer_locked(bh)) { 207 if (buffer_locked(bh)) {
232 spin_unlock(&journal->j_list_lock); 208 spin_unlock(&journal->j_list_lock);
233 jbd_unlock_bh_state(bh);
234 wait_on_buffer(bh); 209 wait_on_buffer(bh);
235 /* the journal_head may have gone by now */ 210 /* the journal_head may have gone by now */
236 BUFFER_TRACE(bh, "brelse"); 211 BUFFER_TRACE(bh, "brelse");
@@ -246,7 +221,6 @@ restart:
246 * it has been written out and so we can drop it from the list 221 * it has been written out and so we can drop it from the list
247 */ 222 */
248 released = __jbd2_journal_remove_checkpoint(jh); 223 released = __jbd2_journal_remove_checkpoint(jh);
249 jbd_unlock_bh_state(bh);
250 __brelse(bh); 224 __brelse(bh);
251 } 225 }
252 226
@@ -280,7 +254,6 @@ __flush_batch(journal_t *journal, int *batch_count)
280 * be written out. 254 * be written out.
281 * 255 *
282 * Called with j_list_lock held and drops it if 1 is returned 256 * Called with j_list_lock held and drops it if 1 is returned
283 * Called under jbd_lock_bh_state(jh2bh(jh)), and drops it
284 */ 257 */
285static int __process_buffer(journal_t *journal, struct journal_head *jh, 258static int __process_buffer(journal_t *journal, struct journal_head *jh,
286 int *batch_count, transaction_t *transaction) 259 int *batch_count, transaction_t *transaction)
@@ -291,7 +264,6 @@ static int __process_buffer(journal_t *journal, struct journal_head *jh,
291 if (buffer_locked(bh)) { 264 if (buffer_locked(bh)) {
292 get_bh(bh); 265 get_bh(bh);
293 spin_unlock(&journal->j_list_lock); 266 spin_unlock(&journal->j_list_lock);
294 jbd_unlock_bh_state(bh);
295 wait_on_buffer(bh); 267 wait_on_buffer(bh);
296 /* the journal_head may have gone by now */ 268 /* the journal_head may have gone by now */
297 BUFFER_TRACE(bh, "brelse"); 269 BUFFER_TRACE(bh, "brelse");
@@ -303,7 +275,6 @@ static int __process_buffer(journal_t *journal, struct journal_head *jh,
303 275
304 transaction->t_chp_stats.cs_forced_to_close++; 276 transaction->t_chp_stats.cs_forced_to_close++;
305 spin_unlock(&journal->j_list_lock); 277 spin_unlock(&journal->j_list_lock);
306 jbd_unlock_bh_state(bh);
307 if (unlikely(journal->j_flags & JBD2_UNMOUNT)) 278 if (unlikely(journal->j_flags & JBD2_UNMOUNT))
308 /* 279 /*
309 * The journal thread is dead; so starting and 280 * The journal thread is dead; so starting and
@@ -322,11 +293,9 @@ static int __process_buffer(journal_t *journal, struct journal_head *jh,
322 if (unlikely(buffer_write_io_error(bh))) 293 if (unlikely(buffer_write_io_error(bh)))
323 ret = -EIO; 294 ret = -EIO;
324 get_bh(bh); 295 get_bh(bh);
325 J_ASSERT_JH(jh, !buffer_jbddirty(bh));
326 BUFFER_TRACE(bh, "remove from checkpoint"); 296 BUFFER_TRACE(bh, "remove from checkpoint");
327 __jbd2_journal_remove_checkpoint(jh); 297 __jbd2_journal_remove_checkpoint(jh);
328 spin_unlock(&journal->j_list_lock); 298 spin_unlock(&journal->j_list_lock);
329 jbd_unlock_bh_state(bh);
330 __brelse(bh); 299 __brelse(bh);
331 } else { 300 } else {
332 /* 301 /*
@@ -341,7 +310,6 @@ static int __process_buffer(journal_t *journal, struct journal_head *jh,
341 J_ASSERT_BH(bh, !buffer_jwrite(bh)); 310 J_ASSERT_BH(bh, !buffer_jwrite(bh));
342 journal->j_chkpt_bhs[*batch_count] = bh; 311 journal->j_chkpt_bhs[*batch_count] = bh;
343 __buffer_relink_io(jh); 312 __buffer_relink_io(jh);
344 jbd_unlock_bh_state(bh);
345 transaction->t_chp_stats.cs_written++; 313 transaction->t_chp_stats.cs_written++;
346 (*batch_count)++; 314 (*batch_count)++;
347 if (*batch_count == JBD2_NR_BATCH) { 315 if (*batch_count == JBD2_NR_BATCH) {
@@ -405,15 +373,7 @@ restart:
405 int retry = 0, err; 373 int retry = 0, err;
406 374
407 while (!retry && transaction->t_checkpoint_list) { 375 while (!retry && transaction->t_checkpoint_list) {
408 struct buffer_head *bh;
409
410 jh = transaction->t_checkpoint_list; 376 jh = transaction->t_checkpoint_list;
411 bh = jh2bh(jh);
412 if (!jbd_trylock_bh_state(bh)) {
413 jbd_sync_bh(journal, bh);
414 retry = 1;
415 break;
416 }
417 retry = __process_buffer(journal, jh, &batch_count, 377 retry = __process_buffer(journal, jh, &batch_count,
418 transaction); 378 transaction);
419 if (retry < 0 && !result) 379 if (retry < 0 && !result)
@@ -529,15 +489,12 @@ static int journal_clean_one_cp_list(struct journal_head *jh, int *released)
529 do { 489 do {
530 jh = next_jh; 490 jh = next_jh;
531 next_jh = jh->b_cpnext; 491 next_jh = jh->b_cpnext;
532 /* Use trylock because of the ranking */ 492 ret = __try_to_free_cp_buf(jh);
533 if (jbd_trylock_bh_state(jh2bh(jh))) { 493 if (ret) {
534 ret = __try_to_free_cp_buf(jh); 494 freed++;
535 if (ret) { 495 if (ret == 2) {
536 freed++; 496 *released = 1;
537 if (ret == 2) { 497 return freed;
538 *released = 1;
539 return freed;
540 }
541 } 498 }
542 } 499 }
543 /* 500 /*
@@ -620,9 +577,7 @@ out:
620 * The function can free jh and bh. 577 * The function can free jh and bh.
621 * 578 *
622 * This function is called with j_list_lock held. 579 * This function is called with j_list_lock held.
623 * This function is called with jbd_lock_bh_state(jh2bh(jh))
624 */ 580 */
625
626int __jbd2_journal_remove_checkpoint(struct journal_head *jh) 581int __jbd2_journal_remove_checkpoint(struct journal_head *jh)
627{ 582{
628 struct transaction_chp_stats_s *stats; 583 struct transaction_chp_stats_s *stats;