aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--fs/gfs2/incore.h9
-rw-r--r--fs/gfs2/log.c145
-rw-r--r--fs/gfs2/log.h1
-rw-r--r--fs/gfs2/lops.c242
-rw-r--r--fs/gfs2/meta_io.c55
-rw-r--r--fs/gfs2/meta_io.h3
-rw-r--r--fs/gfs2/ops_address.c11
-rw-r--r--fs/gfs2/ops_fstype.c3
-rw-r--r--fs/gfs2/ops_inode.c7
-rw-r--r--fs/gfs2/ops_super.c13
10 files changed, 269 insertions, 220 deletions
diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h
index 8aa5780862be..eaddfb5a8e6f 100644
--- a/fs/gfs2/incore.h
+++ b/fs/gfs2/incore.h
@@ -341,12 +341,6 @@ struct gfs2_quota_data {
341 unsigned long qd_last_touched; 341 unsigned long qd_last_touched;
342}; 342};
343 343
344struct gfs2_log_buf {
345 struct list_head lb_list;
346 struct buffer_head *lb_bh;
347 struct buffer_head *lb_real;
348};
349
350struct gfs2_trans { 344struct gfs2_trans {
351 unsigned long tr_ip; 345 unsigned long tr_ip;
352 346
@@ -631,7 +625,8 @@ struct gfs2_sbd {
631 625
632 unsigned long sd_log_flush_time; 626 unsigned long sd_log_flush_time;
633 struct rw_semaphore sd_log_flush_lock; 627 struct rw_semaphore sd_log_flush_lock;
634 struct list_head sd_log_flush_list; 628 atomic_t sd_log_in_flight;
629 wait_queue_head_t sd_log_flush_wait;
635 630
636 unsigned int sd_log_flush_head; 631 unsigned int sd_log_flush_head;
637 u64 sd_log_flush_wrapped; 632 u64 sd_log_flush_wrapped;
diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c
index 4d04e6f19706..ee704676b2f1 100644
--- a/fs/gfs2/log.c
+++ b/fs/gfs2/log.c
@@ -104,11 +104,8 @@ static void gfs2_ail1_start_one(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
104 gfs2_assert(sdp, bd->bd_ail == ai); 104 gfs2_assert(sdp, bd->bd_ail == ai);
105 105
106 if (!buffer_busy(bh)) { 106 if (!buffer_busy(bh)) {
107 if (!buffer_uptodate(bh)) { 107 if (!buffer_uptodate(bh))
108 gfs2_log_unlock(sdp);
109 gfs2_io_error_bh(sdp, bh); 108 gfs2_io_error_bh(sdp, bh);
110 gfs2_log_lock(sdp);
111 }
112 list_move(&bd->bd_ail_st_list, &ai->ai_ail2_list); 109 list_move(&bd->bd_ail_st_list, &ai->ai_ail2_list);
113 continue; 110 continue;
114 } 111 }
@@ -118,9 +115,16 @@ static void gfs2_ail1_start_one(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
118 115
119 list_move(&bd->bd_ail_st_list, &ai->ai_ail1_list); 116 list_move(&bd->bd_ail_st_list, &ai->ai_ail1_list);
120 117
118 get_bh(bh);
121 gfs2_log_unlock(sdp); 119 gfs2_log_unlock(sdp);
122 wait_on_buffer(bh); 120 lock_buffer(bh);
123 ll_rw_block(WRITE, 1, &bh); 121 if (test_clear_buffer_dirty(bh)) {
122 bh->b_end_io = end_buffer_write_sync;
123 submit_bh(WRITE, bh);
124 } else {
125 unlock_buffer(bh);
126 brelse(bh);
127 }
124 gfs2_log_lock(sdp); 128 gfs2_log_lock(sdp);
125 129
126 retry = 1; 130 retry = 1;
@@ -446,10 +450,10 @@ static unsigned int current_tail(struct gfs2_sbd *sdp)
446 return tail; 450 return tail;
447} 451}
448 452
449static inline void log_incr_head(struct gfs2_sbd *sdp) 453void gfs2_log_incr_head(struct gfs2_sbd *sdp)
450{ 454{
451 if (sdp->sd_log_flush_head == sdp->sd_log_tail) 455 if (sdp->sd_log_flush_head == sdp->sd_log_tail)
452 gfs2_assert_withdraw(sdp, sdp->sd_log_flush_head == sdp->sd_log_head); 456 BUG_ON(sdp->sd_log_flush_head != sdp->sd_log_head);
453 457
454 if (++sdp->sd_log_flush_head == sdp->sd_jdesc->jd_blocks) { 458 if (++sdp->sd_log_flush_head == sdp->sd_jdesc->jd_blocks) {
455 sdp->sd_log_flush_head = 0; 459 sdp->sd_log_flush_head = 0;
@@ -458,6 +462,23 @@ static inline void log_incr_head(struct gfs2_sbd *sdp)
458} 462}
459 463
460/** 464/**
465 * gfs2_log_write_endio - End of I/O for a log buffer
466 * @bh: The buffer head
467 * @uptodate: I/O Status
468 *
469 */
470
471static void gfs2_log_write_endio(struct buffer_head *bh, int uptodate)
472{
473 struct gfs2_sbd *sdp = bh->b_private;
474 bh->b_private = NULL;
475
476 end_buffer_write_sync(bh, uptodate);
477 if (atomic_dec_and_test(&sdp->sd_log_in_flight))
478 wake_up(&sdp->sd_log_flush_wait);
479}
480
481/**
461 * gfs2_log_get_buf - Get and initialize a buffer to use for log control data 482 * gfs2_log_get_buf - Get and initialize a buffer to use for log control data
462 * @sdp: The GFS2 superblock 483 * @sdp: The GFS2 superblock
463 * 484 *
@@ -467,25 +488,42 @@ static inline void log_incr_head(struct gfs2_sbd *sdp)
467struct buffer_head *gfs2_log_get_buf(struct gfs2_sbd *sdp) 488struct buffer_head *gfs2_log_get_buf(struct gfs2_sbd *sdp)
468{ 489{
469 u64 blkno = log_bmap(sdp, sdp->sd_log_flush_head); 490 u64 blkno = log_bmap(sdp, sdp->sd_log_flush_head);
470 struct gfs2_log_buf *lb;
471 struct buffer_head *bh; 491 struct buffer_head *bh;
472 492
473 lb = kzalloc(sizeof(struct gfs2_log_buf), GFP_NOFS | __GFP_NOFAIL); 493 bh = sb_getblk(sdp->sd_vfs, blkno);
474 list_add(&lb->lb_list, &sdp->sd_log_flush_list);
475
476 bh = lb->lb_bh = sb_getblk(sdp->sd_vfs, blkno);
477 lock_buffer(bh); 494 lock_buffer(bh);
478 memset(bh->b_data, 0, bh->b_size); 495 memset(bh->b_data, 0, bh->b_size);
479 set_buffer_uptodate(bh); 496 set_buffer_uptodate(bh);
480 clear_buffer_dirty(bh); 497 clear_buffer_dirty(bh);
481 unlock_buffer(bh); 498 gfs2_log_incr_head(sdp);
482 499 atomic_inc(&sdp->sd_log_in_flight);
483 log_incr_head(sdp); 500 bh->b_private = sdp;
501 bh->b_end_io = gfs2_log_write_endio;
484 502
485 return bh; 503 return bh;
486} 504}
487 505
488/** 506/**
507 * gfs2_fake_write_endio -
508 * @bh: The buffer head
509 * @uptodate: The I/O Status
510 *
511 */
512
513static void gfs2_fake_write_endio(struct buffer_head *bh, int uptodate)
514{
515 struct buffer_head *real_bh = bh->b_private;
516 struct gfs2_sbd *sdp = GFS2_SB(real_bh->b_page->mapping->host);
517
518 end_buffer_write_sync(bh, uptodate);
519 free_buffer_head(bh);
520 unlock_buffer(real_bh);
521 brelse(real_bh);
522 if (atomic_dec_and_test(&sdp->sd_log_in_flight))
523 wake_up(&sdp->sd_log_flush_wait);
524}
525
526/**
489 * gfs2_log_fake_buf - Build a fake buffer head to write metadata buffer to log 527 * gfs2_log_fake_buf - Build a fake buffer head to write metadata buffer to log
490 * @sdp: the filesystem 528 * @sdp: the filesystem
491 * @data: the data the buffer_head should point to 529 * @data: the data the buffer_head should point to
@@ -497,22 +535,20 @@ struct buffer_head *gfs2_log_fake_buf(struct gfs2_sbd *sdp,
497 struct buffer_head *real) 535 struct buffer_head *real)
498{ 536{
499 u64 blkno = log_bmap(sdp, sdp->sd_log_flush_head); 537 u64 blkno = log_bmap(sdp, sdp->sd_log_flush_head);
500 struct gfs2_log_buf *lb;
501 struct buffer_head *bh; 538 struct buffer_head *bh;
502 539
503 lb = kzalloc(sizeof(struct gfs2_log_buf), GFP_NOFS | __GFP_NOFAIL); 540 bh = alloc_buffer_head(GFP_NOFS | __GFP_NOFAIL);
504 list_add(&lb->lb_list, &sdp->sd_log_flush_list);
505 lb->lb_real = real;
506
507 bh = lb->lb_bh = alloc_buffer_head(GFP_NOFS | __GFP_NOFAIL);
508 atomic_set(&bh->b_count, 1); 541 atomic_set(&bh->b_count, 1);
509 bh->b_state = (1 << BH_Mapped) | (1 << BH_Uptodate); 542 bh->b_state = (1 << BH_Mapped) | (1 << BH_Uptodate) | (1 << BH_Lock);
510 set_bh_page(bh, real->b_page, bh_offset(real)); 543 set_bh_page(bh, real->b_page, bh_offset(real));
511 bh->b_blocknr = blkno; 544 bh->b_blocknr = blkno;
512 bh->b_size = sdp->sd_sb.sb_bsize; 545 bh->b_size = sdp->sd_sb.sb_bsize;
513 bh->b_bdev = sdp->sd_vfs->s_bdev; 546 bh->b_bdev = sdp->sd_vfs->s_bdev;
547 bh->b_private = real;
548 bh->b_end_io = gfs2_fake_write_endio;
514 549
515 log_incr_head(sdp); 550 gfs2_log_incr_head(sdp);
551 atomic_inc(&sdp->sd_log_in_flight);
516 552
517 return bh; 553 return bh;
518} 554}
@@ -579,45 +615,24 @@ static void log_write_header(struct gfs2_sbd *sdp, u32 flags, int pull)
579 gfs2_assert_withdraw(sdp, !pull); 615 gfs2_assert_withdraw(sdp, !pull);
580 616
581 sdp->sd_log_idle = (tail == sdp->sd_log_flush_head); 617 sdp->sd_log_idle = (tail == sdp->sd_log_flush_head);
582 log_incr_head(sdp); 618 gfs2_log_incr_head(sdp);
583} 619}
584 620
585static void log_flush_commit(struct gfs2_sbd *sdp) 621static void log_flush_commit(struct gfs2_sbd *sdp)
586{ 622{
587 struct list_head *head = &sdp->sd_log_flush_list; 623 DEFINE_WAIT(wait);
588 struct gfs2_log_buf *lb; 624
589 struct buffer_head *bh; 625 if (atomic_read(&sdp->sd_log_in_flight)) {
590 int flushcount = 0; 626 do {
591 627 prepare_to_wait(&sdp->sd_log_flush_wait, &wait,
592 while (!list_empty(head)) { 628 TASK_UNINTERRUPTIBLE);
593 lb = list_entry(head->next, struct gfs2_log_buf, lb_list); 629 if (atomic_read(&sdp->sd_log_in_flight))
594 list_del(&lb->lb_list); 630 io_schedule();
595 bh = lb->lb_bh; 631 } while(atomic_read(&sdp->sd_log_in_flight));
596 632 finish_wait(&sdp->sd_log_flush_wait, &wait);
597 wait_on_buffer(bh);
598 if (!buffer_uptodate(bh))
599 gfs2_io_error_bh(sdp, bh);
600 if (lb->lb_real) {
601 while (atomic_read(&bh->b_count) != 1) /* Grrrr... */
602 schedule();
603 free_buffer_head(bh);
604 } else
605 brelse(bh);
606 kfree(lb);
607 flushcount++;
608 } 633 }
609 634
610 /* If nothing was journaled, the header is unplanned and unwanted. */ 635 log_write_header(sdp, 0, 0);
611 if (flushcount) {
612 log_write_header(sdp, 0, 0);
613 } else {
614 unsigned int tail;
615 tail = current_tail(sdp);
616
617 gfs2_ail1_empty(sdp, 0);
618 if (sdp->sd_log_tail != tail)
619 log_pull_tail(sdp, tail);
620 }
621} 636}
622 637
623static void gfs2_ordered_write(struct gfs2_sbd *sdp) 638static void gfs2_ordered_write(struct gfs2_sbd *sdp)
@@ -698,10 +713,16 @@ void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl)
698 INIT_LIST_HEAD(&ai->ai_ail1_list); 713 INIT_LIST_HEAD(&ai->ai_ail1_list);
699 INIT_LIST_HEAD(&ai->ai_ail2_list); 714 INIT_LIST_HEAD(&ai->ai_ail2_list);
700 715
701 gfs2_assert_withdraw(sdp, 716 if (sdp->sd_log_num_buf != sdp->sd_log_commited_buf) {
702 sdp->sd_log_num_buf + sdp->sd_log_num_databuf == 717 printk(KERN_INFO "GFS2: log buf %u %u\n", sdp->sd_log_num_buf,
703 sdp->sd_log_commited_buf + 718 sdp->sd_log_commited_buf);
704 sdp->sd_log_commited_databuf); 719 gfs2_assert_withdraw(sdp, 0);
720 }
721 if (sdp->sd_log_num_databuf != sdp->sd_log_commited_databuf) {
722 printk(KERN_INFO "GFS2: log databuf %u %u\n",
723 sdp->sd_log_num_databuf, sdp->sd_log_commited_databuf);
724 gfs2_assert_withdraw(sdp, 0);
725 }
705 gfs2_assert_withdraw(sdp, 726 gfs2_assert_withdraw(sdp,
706 sdp->sd_log_num_revoke == sdp->sd_log_commited_revoke); 727 sdp->sd_log_num_revoke == sdp->sd_log_commited_revoke);
707 728
@@ -713,7 +734,7 @@ void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl)
713 lops_before_commit(sdp); 734 lops_before_commit(sdp);
714 gfs2_ordered_wait(sdp); 735 gfs2_ordered_wait(sdp);
715 736
716 if (!list_empty(&sdp->sd_log_flush_list)) 737 if (sdp->sd_log_head != sdp->sd_log_flush_head)
717 log_flush_commit(sdp); 738 log_flush_commit(sdp);
718 else if (sdp->sd_log_tail != current_tail(sdp) && !sdp->sd_log_idle){ 739 else if (sdp->sd_log_tail != current_tail(sdp) && !sdp->sd_log_idle){
719 gfs2_log_lock(sdp); 740 gfs2_log_lock(sdp);
diff --git a/fs/gfs2/log.h b/fs/gfs2/log.h
index 639423561b2d..dae282400627 100644
--- a/fs/gfs2/log.h
+++ b/fs/gfs2/log.h
@@ -52,6 +52,7 @@ int gfs2_ail1_empty(struct gfs2_sbd *sdp, int flags);
52 52
53int gfs2_log_reserve(struct gfs2_sbd *sdp, unsigned int blks); 53int gfs2_log_reserve(struct gfs2_sbd *sdp, unsigned int blks);
54void gfs2_log_release(struct gfs2_sbd *sdp, unsigned int blks); 54void gfs2_log_release(struct gfs2_sbd *sdp, unsigned int blks);
55void gfs2_log_incr_head(struct gfs2_sbd *sdp);
55 56
56struct buffer_head *gfs2_log_get_buf(struct gfs2_sbd *sdp); 57struct buffer_head *gfs2_log_get_buf(struct gfs2_sbd *sdp);
57struct buffer_head *gfs2_log_fake_buf(struct gfs2_sbd *sdp, 58struct buffer_head *gfs2_log_fake_buf(struct gfs2_sbd *sdp,
diff --git a/fs/gfs2/lops.c b/fs/gfs2/lops.c
index 342c10e12af2..6c27cea761c6 100644
--- a/fs/gfs2/lops.c
+++ b/fs/gfs2/lops.c
@@ -91,6 +91,39 @@ static void gfs2_unpin(struct gfs2_sbd *sdp, struct buffer_head *bh,
91 unlock_buffer(bh); 91 unlock_buffer(bh);
92} 92}
93 93
94
95static inline struct gfs2_log_descriptor *bh_log_desc(struct buffer_head *bh)
96{
97 return (struct gfs2_log_descriptor *)bh->b_data;
98}
99
100static inline __be64 *bh_log_ptr(struct buffer_head *bh)
101{
102 struct gfs2_log_descriptor *ld = bh_log_desc(bh);
103 return (__force __be64 *)(ld + 1);
104}
105
106static inline __be64 *bh_ptr_end(struct buffer_head *bh)
107{
108 return (__force __be64 *)(bh->b_data + bh->b_size);
109}
110
111
112static struct buffer_head *gfs2_get_log_desc(struct gfs2_sbd *sdp, u32 ld_type)
113{
114 struct buffer_head *bh = gfs2_log_get_buf(sdp);
115 struct gfs2_log_descriptor *ld = bh_log_desc(bh);
116 ld->ld_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
117 ld->ld_header.mh_type = cpu_to_be32(GFS2_METATYPE_LD);
118 ld->ld_header.mh_format = cpu_to_be32(GFS2_FORMAT_LD);
119 ld->ld_type = cpu_to_be32(ld_type);
120 ld->ld_length = 0;
121 ld->ld_data1 = 0;
122 ld->ld_data2 = 0;
123 memset(ld->ld_reserved, 0, sizeof(ld->ld_reserved));
124 return bh;
125}
126
94static void __glock_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le) 127static void __glock_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le)
95{ 128{
96 struct gfs2_glock *gl; 129 struct gfs2_glock *gl;
@@ -181,7 +214,6 @@ static void buf_lo_before_commit(struct gfs2_sbd *sdp)
181 struct gfs2_log_descriptor *ld; 214 struct gfs2_log_descriptor *ld;
182 struct gfs2_bufdata *bd1 = NULL, *bd2; 215 struct gfs2_bufdata *bd1 = NULL, *bd2;
183 unsigned int total; 216 unsigned int total;
184 unsigned int offset = BUF_OFFSET;
185 unsigned int limit; 217 unsigned int limit;
186 unsigned int num; 218 unsigned int num;
187 unsigned n; 219 unsigned n;
@@ -198,18 +230,12 @@ static void buf_lo_before_commit(struct gfs2_sbd *sdp)
198 if (total > limit) 230 if (total > limit)
199 num = limit; 231 num = limit;
200 gfs2_log_unlock(sdp); 232 gfs2_log_unlock(sdp);
201 bh = gfs2_log_get_buf(sdp); 233 bh = gfs2_get_log_desc(sdp, GFS2_LOG_DESC_METADATA);
202 gfs2_log_lock(sdp); 234 gfs2_log_lock(sdp);
203 ld = (struct gfs2_log_descriptor *)bh->b_data; 235 ld = bh_log_desc(bh);
204 ptr = (__be64 *)(bh->b_data + offset); 236 ptr = bh_log_ptr(bh);
205 ld->ld_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
206 ld->ld_header.mh_type = cpu_to_be32(GFS2_METATYPE_LD);
207 ld->ld_header.mh_format = cpu_to_be32(GFS2_FORMAT_LD);
208 ld->ld_type = cpu_to_be32(GFS2_LOG_DESC_METADATA);
209 ld->ld_length = cpu_to_be32(num + 1); 237 ld->ld_length = cpu_to_be32(num + 1);
210 ld->ld_data1 = cpu_to_be32(num); 238 ld->ld_data1 = cpu_to_be32(num);
211 ld->ld_data2 = cpu_to_be32(0);
212 memset(ld->ld_reserved, 0, sizeof(ld->ld_reserved));
213 239
214 n = 0; 240 n = 0;
215 list_for_each_entry_continue(bd1, &sdp->sd_log_le_buf, 241 list_for_each_entry_continue(bd1, &sdp->sd_log_le_buf,
@@ -220,17 +246,17 @@ static void buf_lo_before_commit(struct gfs2_sbd *sdp)
220 } 246 }
221 247
222 gfs2_log_unlock(sdp); 248 gfs2_log_unlock(sdp);
223 set_buffer_dirty(bh); 249 submit_bh(WRITE, bh);
224 ll_rw_block(WRITE, 1, &bh);
225 gfs2_log_lock(sdp); 250 gfs2_log_lock(sdp);
226 251
227 n = 0; 252 n = 0;
228 list_for_each_entry_continue(bd2, &sdp->sd_log_le_buf, 253 list_for_each_entry_continue(bd2, &sdp->sd_log_le_buf,
229 bd_le.le_list) { 254 bd_le.le_list) {
255 get_bh(bd2->bd_bh);
230 gfs2_log_unlock(sdp); 256 gfs2_log_unlock(sdp);
257 lock_buffer(bd2->bd_bh);
231 bh = gfs2_log_fake_buf(sdp, bd2->bd_bh); 258 bh = gfs2_log_fake_buf(sdp, bd2->bd_bh);
232 set_buffer_dirty(bh); 259 submit_bh(WRITE, bh);
233 ll_rw_block(WRITE, 1, &bh);
234 gfs2_log_lock(sdp); 260 gfs2_log_lock(sdp);
235 if (++n >= num) 261 if (++n >= num)
236 break; 262 break;
@@ -359,17 +385,11 @@ static void revoke_lo_before_commit(struct gfs2_sbd *sdp)
359 if (!sdp->sd_log_num_revoke) 385 if (!sdp->sd_log_num_revoke)
360 return; 386 return;
361 387
362 bh = gfs2_log_get_buf(sdp); 388 bh = gfs2_get_log_desc(sdp, GFS2_LOG_DESC_REVOKE);
363 ld = (struct gfs2_log_descriptor *)bh->b_data; 389 ld = bh_log_desc(bh);
364 ld->ld_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
365 ld->ld_header.mh_type = cpu_to_be32(GFS2_METATYPE_LD);
366 ld->ld_header.mh_format = cpu_to_be32(GFS2_FORMAT_LD);
367 ld->ld_type = cpu_to_be32(GFS2_LOG_DESC_REVOKE);
368 ld->ld_length = cpu_to_be32(gfs2_struct2blk(sdp, sdp->sd_log_num_revoke, 390 ld->ld_length = cpu_to_be32(gfs2_struct2blk(sdp, sdp->sd_log_num_revoke,
369 sizeof(u64))); 391 sizeof(u64)));
370 ld->ld_data1 = cpu_to_be32(sdp->sd_log_num_revoke); 392 ld->ld_data1 = cpu_to_be32(sdp->sd_log_num_revoke);
371 ld->ld_data2 = cpu_to_be32(0);
372 memset(ld->ld_reserved, 0, sizeof(ld->ld_reserved));
373 offset = sizeof(struct gfs2_log_descriptor); 393 offset = sizeof(struct gfs2_log_descriptor);
374 394
375 while (!list_empty(head)) { 395 while (!list_empty(head)) {
@@ -378,8 +398,7 @@ static void revoke_lo_before_commit(struct gfs2_sbd *sdp)
378 sdp->sd_log_num_revoke--; 398 sdp->sd_log_num_revoke--;
379 399
380 if (offset + sizeof(u64) > sdp->sd_sb.sb_bsize) { 400 if (offset + sizeof(u64) > sdp->sd_sb.sb_bsize) {
381 set_buffer_dirty(bh); 401 submit_bh(WRITE, bh);
382 ll_rw_block(WRITE, 1, &bh);
383 402
384 bh = gfs2_log_get_buf(sdp); 403 bh = gfs2_log_get_buf(sdp);
385 mh = (struct gfs2_meta_header *)bh->b_data; 404 mh = (struct gfs2_meta_header *)bh->b_data;
@@ -396,8 +415,7 @@ static void revoke_lo_before_commit(struct gfs2_sbd *sdp)
396 } 415 }
397 gfs2_assert_withdraw(sdp, !sdp->sd_log_num_revoke); 416 gfs2_assert_withdraw(sdp, !sdp->sd_log_num_revoke);
398 417
399 set_buffer_dirty(bh); 418 submit_bh(WRITE, bh);
400 ll_rw_block(WRITE, 1, &bh);
401} 419}
402 420
403static void revoke_lo_before_scan(struct gfs2_jdesc *jd, 421static void revoke_lo_before_scan(struct gfs2_jdesc *jd,
@@ -562,118 +580,110 @@ out:
562 unlock_buffer(bd->bd_bh); 580 unlock_buffer(bd->bd_bh);
563} 581}
564 582
565static int gfs2_check_magic(struct buffer_head *bh) 583static void gfs2_check_magic(struct buffer_head *bh)
566{ 584{
567 struct page *page = bh->b_page;
568 void *kaddr; 585 void *kaddr;
569 __be32 *ptr; 586 __be32 *ptr;
570 int rv = 0;
571 587
572 kaddr = kmap_atomic(page, KM_USER0); 588 clear_buffer_escaped(bh);
589 kaddr = kmap_atomic(bh->b_page, KM_USER0);
573 ptr = kaddr + bh_offset(bh); 590 ptr = kaddr + bh_offset(bh);
574 if (*ptr == cpu_to_be32(GFS2_MAGIC)) 591 if (*ptr == cpu_to_be32(GFS2_MAGIC))
575 rv = 1; 592 set_buffer_escaped(bh);
576 kunmap_atomic(kaddr, KM_USER0); 593 kunmap_atomic(kaddr, KM_USER0);
577
578 return rv;
579} 594}
580 595
581/** 596static void gfs2_write_blocks(struct gfs2_sbd *sdp, struct buffer_head *bh,
582 * databuf_lo_before_commit - Scan the data buffers, writing as we go 597 struct list_head *list, struct list_head *done,
583 * 598 unsigned int n)
584 */
585
586static void databuf_lo_before_commit(struct gfs2_sbd *sdp)
587{ 599{
588 struct gfs2_bufdata *bd1 = NULL, *bd2; 600 struct buffer_head *bh1;
589 struct buffer_head *bh = NULL,*bh1 = NULL;
590 struct gfs2_log_descriptor *ld; 601 struct gfs2_log_descriptor *ld;
591 unsigned int limit; 602 struct gfs2_bufdata *bd;
592 unsigned int total; 603 __be64 *ptr;
593 unsigned int num, n;
594 __be64 *ptr = NULL;
595 int magic;
596 604
605 if (!bh)
606 return;
597 607
598 limit = databuf_limit(sdp); 608 ld = bh_log_desc(bh);
609 ld->ld_length = cpu_to_be32(n + 1);
610 ld->ld_data1 = cpu_to_be32(n);
599 611
612 ptr = bh_log_ptr(bh);
613
614 get_bh(bh);
615 submit_bh(WRITE, bh);
600 gfs2_log_lock(sdp); 616 gfs2_log_lock(sdp);
601 total = sdp->sd_log_num_databuf; 617 while(!list_empty(list)) {
602 bd2 = bd1 = list_prepare_entry(bd1, &sdp->sd_log_le_databuf, 618 bd = list_entry(list->next, struct gfs2_bufdata, bd_le.le_list);
603 bd_le.le_list); 619 list_move_tail(&bd->bd_le.le_list, done);
604 while(total) { 620 get_bh(bd->bd_bh);
605 num = total; 621 while (be64_to_cpu(*ptr) != bd->bd_bh->b_blocknr) {
606 if (num > limit) 622 gfs2_log_incr_head(sdp);
607 num = limit; 623 ptr += 2;
608
609 gfs2_log_unlock(sdp);
610 bh = gfs2_log_get_buf(sdp);
611 gfs2_log_lock(sdp);
612
613 ld = (struct gfs2_log_descriptor *)bh->b_data;
614 ptr = (__be64 *)(bh->b_data + DATABUF_OFFSET);
615 ld->ld_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
616 ld->ld_header.mh_type = cpu_to_be32(GFS2_METATYPE_LD);
617 ld->ld_header.mh_format = cpu_to_be32(GFS2_FORMAT_LD);
618 ld->ld_type = cpu_to_be32(GFS2_LOG_DESC_JDATA);
619 ld->ld_length = cpu_to_be32(num + 1);
620 ld->ld_data1 = cpu_to_be32(num);
621 ld->ld_data2 = cpu_to_be32(0);
622 memset(ld->ld_reserved, 0, sizeof(ld->ld_reserved));
623
624 n = 0;
625 list_for_each_entry_continue(bd1, &sdp->sd_log_le_databuf,
626 bd_le.le_list) {
627 bh1 = bd1->bd_bh;
628
629 magic = gfs2_check_magic(bh1);
630 *ptr++ = cpu_to_be64(bh1->b_blocknr);
631 *ptr++ = cpu_to_be64((__u64)magic);
632 clear_buffer_escaped(bh1);
633 if (unlikely(magic != 0))
634 set_buffer_escaped(bh1);
635 if (++n >= num)
636 break;
637 } 624 }
638 gfs2_log_unlock(sdp); 625 gfs2_log_unlock(sdp);
639 if (bh) { 626 lock_buffer(bd->bd_bh);
640 set_buffer_dirty(bh); 627 if (buffer_escaped(bd->bd_bh)) {
641 ll_rw_block(WRITE, 1, &bh); 628 void *kaddr;
642 bh = NULL; 629 bh1 = gfs2_log_get_buf(sdp);
643 ptr = NULL; 630 kaddr = kmap_atomic(bd->bd_bh->b_page, KM_USER0);
631 memcpy(bh1->b_data, kaddr + bh_offset(bd->bd_bh),
632 bh1->b_size);
633 kunmap_atomic(kaddr, KM_USER0);
634 *(__be32 *)bh1->b_data = 0;
635 clear_buffer_escaped(bd->bd_bh);
636 unlock_buffer(bd->bd_bh);
637 brelse(bd->bd_bh);
638 } else {
639 bh1 = gfs2_log_fake_buf(sdp, bd->bd_bh);
644 } 640 }
645 n = 0; 641 submit_bh(WRITE, bh1);
646 gfs2_log_lock(sdp); 642 gfs2_log_lock(sdp);
647 list_for_each_entry_continue(bd2, &sdp->sd_log_le_databuf, 643 ptr += 2;
648 bd_le.le_list) { 644 }
649 if (!bd2->bd_bh) 645 gfs2_log_unlock(sdp);
650 continue; 646 brelse(bh);
651 /* copy buffer if it needs escaping */ 647}
648
649/**
650 * databuf_lo_before_commit - Scan the data buffers, writing as we go
651 *
652 */
653
654static void databuf_lo_before_commit(struct gfs2_sbd *sdp)
655{
656 struct gfs2_bufdata *bd = NULL;
657 struct buffer_head *bh = NULL;
658 unsigned int n = 0;
659 __be64 *ptr = NULL, *end = NULL;
660 LIST_HEAD(processed);
661 LIST_HEAD(in_progress);
662
663 gfs2_log_lock(sdp);
664 while (!list_empty(&sdp->sd_log_le_databuf)) {
665 if (ptr == end) {
652 gfs2_log_unlock(sdp); 666 gfs2_log_unlock(sdp);
653 if (unlikely(buffer_escaped(bd2->bd_bh))) { 667 gfs2_write_blocks(sdp, bh, &in_progress, &processed, n);
654 void *kaddr; 668 n = 0;
655 struct page *page = bd2->bd_bh->b_page; 669 bh = gfs2_get_log_desc(sdp, GFS2_LOG_DESC_JDATA);
656 bh = gfs2_log_get_buf(sdp); 670 ptr = bh_log_ptr(bh);
657 kaddr = kmap_atomic(page, KM_USER0); 671 end = bh_ptr_end(bh) - 1;
658 memcpy(bh->b_data,
659 kaddr + bh_offset(bd2->bd_bh),
660 sdp->sd_sb.sb_bsize);
661 kunmap_atomic(kaddr, KM_USER0);
662 *(__be32 *)bh->b_data = 0;
663 } else {
664 bh = gfs2_log_fake_buf(sdp, bd2->bd_bh);
665 }
666 set_buffer_dirty(bh);
667 ll_rw_block(WRITE, 1, &bh);
668 gfs2_log_lock(sdp); 672 gfs2_log_lock(sdp);
669 if (++n >= num) 673 continue;
670 break;
671 } 674 }
672 bh = NULL; 675 bd = list_entry(sdp->sd_log_le_databuf.next, struct gfs2_bufdata, bd_le.le_list);
673 BUG_ON(total < num); 676 list_move_tail(&bd->bd_le.le_list, &in_progress);
674 total -= num; 677 gfs2_check_magic(bd->bd_bh);
678 *ptr++ = cpu_to_be64(bd->bd_bh->b_blocknr);
679 *ptr++ = cpu_to_be64(buffer_escaped(bh) ? 1 : 0);
680 n++;
675 } 681 }
676 gfs2_log_unlock(sdp); 682 gfs2_log_unlock(sdp);
683 gfs2_write_blocks(sdp, bh, &in_progress, &processed, n);
684 gfs2_log_lock(sdp);
685 list_splice(&processed, &sdp->sd_log_le_databuf);
686 gfs2_log_unlock(sdp);
677} 687}
678 688
679static int databuf_lo_scan_elements(struct gfs2_jdesc *jd, unsigned int start, 689static int databuf_lo_scan_elements(struct gfs2_jdesc *jd, unsigned int start,
@@ -807,10 +817,10 @@ const struct gfs2_log_operations gfs2_databuf_lops = {
807 817
808const struct gfs2_log_operations *gfs2_log_ops[] = { 818const struct gfs2_log_operations *gfs2_log_ops[] = {
809 &gfs2_glock_lops, 819 &gfs2_glock_lops,
820 &gfs2_databuf_lops,
810 &gfs2_buf_lops, 821 &gfs2_buf_lops,
811 &gfs2_revoke_lops,
812 &gfs2_rg_lops, 822 &gfs2_rg_lops,
813 &gfs2_databuf_lops, 823 &gfs2_revoke_lops,
814 NULL, 824 NULL,
815}; 825};
816 826
diff --git a/fs/gfs2/meta_io.c b/fs/gfs2/meta_io.c
index 19097bc7c81d..1d80f2d42122 100644
--- a/fs/gfs2/meta_io.c
+++ b/fs/gfs2/meta_io.c
@@ -297,6 +297,37 @@ void gfs2_attach_bufdata(struct gfs2_glock *gl, struct buffer_head *bh,
297 unlock_page(bh->b_page); 297 unlock_page(bh->b_page);
298} 298}
299 299
300void gfs2_remove_from_journal(struct buffer_head *bh, struct gfs2_trans *tr, int meta)
301{
302 struct gfs2_sbd *sdp = GFS2_SB(bh->b_page->mapping->host);
303 struct gfs2_bufdata *bd = bh->b_private;
304 if (test_clear_buffer_pinned(bh)) {
305 list_del_init(&bd->bd_le.le_list);
306 if (meta) {
307 gfs2_assert_warn(sdp, sdp->sd_log_num_buf);
308 sdp->sd_log_num_buf--;
309 tr->tr_num_buf_rm++;
310 } else {
311 gfs2_assert_warn(sdp, sdp->sd_log_num_databuf);
312 sdp->sd_log_num_databuf--;
313 tr->tr_num_databuf_rm++;
314 }
315 tr->tr_touched = 1;
316 brelse(bh);
317 }
318 if (bd) {
319 if (bd->bd_ail) {
320 gfs2_remove_from_ail(NULL, bd);
321 bh->b_private = NULL;
322 bd->bd_bh = NULL;
323 bd->bd_blkno = bh->b_blocknr;
324 gfs2_trans_add_revoke(sdp, bd);
325 }
326 }
327 clear_buffer_dirty(bh);
328 clear_buffer_uptodate(bh);
329}
330
300/** 331/**
301 * gfs2_meta_wipe - make inode's buffers so they aren't dirty/pinned anymore 332 * gfs2_meta_wipe - make inode's buffers so they aren't dirty/pinned anymore
302 * @ip: the inode who owns the buffers 333 * @ip: the inode who owns the buffers
@@ -313,33 +344,11 @@ void gfs2_meta_wipe(struct gfs2_inode *ip, u64 bstart, u32 blen)
313 while (blen) { 344 while (blen) {
314 bh = getbuf(ip->i_gl, bstart, NO_CREATE); 345 bh = getbuf(ip->i_gl, bstart, NO_CREATE);
315 if (bh) { 346 if (bh) {
316 struct gfs2_bufdata *bd;
317
318 lock_buffer(bh); 347 lock_buffer(bh);
319 gfs2_log_lock(sdp); 348 gfs2_log_lock(sdp);
320 bd = bh->b_private; 349 gfs2_remove_from_journal(bh, current->journal_info, 1);
321 if (test_clear_buffer_pinned(bh)) {
322 struct gfs2_trans *tr = current->journal_info;
323 list_del_init(&bd->bd_le.le_list);
324 gfs2_assert_warn(sdp, sdp->sd_log_num_buf);
325 sdp->sd_log_num_buf--;
326 tr->tr_num_buf_rm++;
327 brelse(bh);
328 }
329 if (bd) {
330 if (bd->bd_ail) {
331 gfs2_remove_from_ail(NULL, bd);
332 bh->b_private = NULL;
333 bd->bd_bh = NULL;
334 bd->bd_blkno = bh->b_blocknr;
335 gfs2_trans_add_revoke(sdp, bd);
336 }
337 }
338 clear_buffer_dirty(bh);
339 clear_buffer_uptodate(bh);
340 gfs2_log_unlock(sdp); 350 gfs2_log_unlock(sdp);
341 unlock_buffer(bh); 351 unlock_buffer(bh);
342
343 brelse(bh); 352 brelse(bh);
344 } 353 }
345 354
diff --git a/fs/gfs2/meta_io.h b/fs/gfs2/meta_io.h
index fd67f07cd60e..b7048222ebb4 100644
--- a/fs/gfs2/meta_io.h
+++ b/fs/gfs2/meta_io.h
@@ -51,6 +51,9 @@ int gfs2_meta_wait(struct gfs2_sbd *sdp, struct buffer_head *bh);
51void gfs2_attach_bufdata(struct gfs2_glock *gl, struct buffer_head *bh, 51void gfs2_attach_bufdata(struct gfs2_glock *gl, struct buffer_head *bh,
52 int meta); 52 int meta);
53 53
54void gfs2_remove_from_journal(struct buffer_head *bh, struct gfs2_trans *tr,
55 int meta);
56
54void gfs2_meta_wipe(struct gfs2_inode *ip, u64 bstart, u32 blen); 57void gfs2_meta_wipe(struct gfs2_inode *ip, u64 bstart, u32 blen);
55 58
56void gfs2_meta_cache_flush(struct gfs2_inode *ip); 59void gfs2_meta_cache_flush(struct gfs2_inode *ip);
diff --git a/fs/gfs2/ops_address.c b/fs/gfs2/ops_address.c
index dd1ea491ddcb..b7baf1831912 100644
--- a/fs/gfs2/ops_address.c
+++ b/fs/gfs2/ops_address.c
@@ -414,7 +414,8 @@ static int gfs2_prepare_write(struct file *file, struct page *page,
414 if (ind_blocks || data_blocks) 414 if (ind_blocks || data_blocks)
415 rblocks += RES_STATFS + RES_QUOTA; 415 rblocks += RES_STATFS + RES_QUOTA;
416 416
417 error = gfs2_trans_begin(sdp, rblocks, 0); 417 error = gfs2_trans_begin(sdp, rblocks,
418 PAGE_CACHE_SIZE/sdp->sd_sb.sb_bsize);
418 if (error) 419 if (error)
419 goto out_trans_fail; 420 goto out_trans_fail;
420 421
@@ -625,10 +626,10 @@ static void gfs2_discard(struct gfs2_sbd *sdp, struct buffer_head *bh)
625 clear_buffer_dirty(bh); 626 clear_buffer_dirty(bh);
626 bd = bh->b_private; 627 bd = bh->b_private;
627 if (bd) { 628 if (bd) {
628 if (!list_empty(&bd->bd_le.le_list)) { 629 if (!list_empty(&bd->bd_le.le_list) && !buffer_pinned(bh))
629 if (!buffer_pinned(bh)) 630 list_del_init(&bd->bd_le.le_list);
630 list_del_init(&bd->bd_le.le_list); 631 else
631 } 632 gfs2_remove_from_journal(bh, current->journal_info, 0);
632 } 633 }
633 bh->b_bdev = NULL; 634 bh->b_bdev = NULL;
634 clear_buffer_mapped(bh); 635 clear_buffer_mapped(bh);
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
index 35f3dfa9bfb1..1ac9afa58c65 100644
--- a/fs/gfs2/ops_fstype.c
+++ b/fs/gfs2/ops_fstype.c
@@ -89,7 +89,8 @@ static struct gfs2_sbd *init_sbd(struct super_block *sb)
89 INIT_LIST_HEAD(&sdp->sd_ail2_list); 89 INIT_LIST_HEAD(&sdp->sd_ail2_list);
90 90
91 init_rwsem(&sdp->sd_log_flush_lock); 91 init_rwsem(&sdp->sd_log_flush_lock);
92 INIT_LIST_HEAD(&sdp->sd_log_flush_list); 92 atomic_set(&sdp->sd_log_in_flight, 0);
93 init_waitqueue_head(&sdp->sd_log_flush_wait);
93 94
94 INIT_LIST_HEAD(&sdp->sd_revoke_list); 95 INIT_LIST_HEAD(&sdp->sd_revoke_list);
95 96
diff --git a/fs/gfs2/ops_inode.c b/fs/gfs2/ops_inode.c
index 2cbe5a321e89..291f0c7eaa3b 100644
--- a/fs/gfs2/ops_inode.c
+++ b/fs/gfs2/ops_inode.c
@@ -905,12 +905,17 @@ static int gfs2_permission(struct inode *inode, int mask, struct nameidata *nd)
905static int setattr_size(struct inode *inode, struct iattr *attr) 905static int setattr_size(struct inode *inode, struct iattr *attr)
906{ 906{
907 struct gfs2_inode *ip = GFS2_I(inode); 907 struct gfs2_inode *ip = GFS2_I(inode);
908 struct gfs2_sbd *sdp = GFS2_SB(inode);
908 int error; 909 int error;
909 910
910 if (attr->ia_size != ip->i_di.di_size) { 911 if (attr->ia_size != ip->i_di.di_size) {
911 error = vmtruncate(inode, attr->ia_size); 912 error = gfs2_trans_begin(sdp, 0, sdp->sd_jdesc->jd_blocks);
912 if (error) 913 if (error)
913 return error; 914 return error;
915 error = vmtruncate(inode, attr->ia_size);
916 gfs2_trans_end(sdp);
917 if (error)
918 return error;
914 } 919 }
915 920
916 error = gfs2_truncatei(ip, attr->ia_size); 921 error = gfs2_truncatei(ip, attr->ia_size);
diff --git a/fs/gfs2/ops_super.c b/fs/gfs2/ops_super.c
index 4316690d86f6..950f31460e8b 100644
--- a/fs/gfs2/ops_super.c
+++ b/fs/gfs2/ops_super.c
@@ -455,12 +455,15 @@ static void gfs2_delete_inode(struct inode *inode)
455 } 455 }
456 456
457 error = gfs2_dinode_dealloc(ip); 457 error = gfs2_dinode_dealloc(ip);
458 /* 458 if (error)
459 * Must do this before unlock to avoid trying to write back 459 goto out_unlock;
460 * potentially dirty data now that inode no longer exists 460
461 * on disk. 461 error = gfs2_trans_begin(sdp, 0, sdp->sd_jdesc->jd_blocks);
462 */ 462 if (error)
463 goto out_unlock;
464 /* Needs to be done before glock release & also in a transaction */
463 truncate_inode_pages(&inode->i_data, 0); 465 truncate_inode_pages(&inode->i_data, 0);
466 gfs2_trans_end(sdp);
464 467
465out_unlock: 468out_unlock:
466 gfs2_glock_dq(&ip->i_iopen_gh); 469 gfs2_glock_dq(&ip->i_iopen_gh);