diff options
-rw-r--r-- | fs/gfs2/lops.c | 175 |
1 files changed, 59 insertions, 116 deletions
diff --git a/fs/gfs2/lops.c b/fs/gfs2/lops.c index 872d3e6ae05e..7882671bfe09 100644 --- a/fs/gfs2/lops.c +++ b/fs/gfs2/lops.c | |||
@@ -372,7 +372,8 @@ void gfs2_log_write_page(struct gfs2_sbd *sdp, struct page *page) | |||
372 | gfs2_log_write(sdp, page, sb->s_blocksize, 0); | 372 | gfs2_log_write(sdp, page, sb->s_blocksize, 0); |
373 | } | 373 | } |
374 | 374 | ||
375 | static struct page *gfs2_get_log_desc(struct gfs2_sbd *sdp, u32 ld_type) | 375 | static struct page *gfs2_get_log_desc(struct gfs2_sbd *sdp, u32 ld_type, |
376 | u32 ld_length, u32 ld_data1) | ||
376 | { | 377 | { |
377 | void *page = mempool_alloc(gfs2_page_pool, GFP_NOIO); | 378 | void *page = mempool_alloc(gfs2_page_pool, GFP_NOIO); |
378 | struct gfs2_log_descriptor *ld = page_address(page); | 379 | struct gfs2_log_descriptor *ld = page_address(page); |
@@ -381,8 +382,8 @@ static struct page *gfs2_get_log_desc(struct gfs2_sbd *sdp, u32 ld_type) | |||
381 | ld->ld_header.mh_type = cpu_to_be32(GFS2_METATYPE_LD); | 382 | ld->ld_header.mh_type = cpu_to_be32(GFS2_METATYPE_LD); |
382 | ld->ld_header.mh_format = cpu_to_be32(GFS2_FORMAT_LD); | 383 | ld->ld_header.mh_format = cpu_to_be32(GFS2_FORMAT_LD); |
383 | ld->ld_type = cpu_to_be32(ld_type); | 384 | ld->ld_type = cpu_to_be32(ld_type); |
384 | ld->ld_length = 0; | 385 | ld->ld_length = cpu_to_be32(ld_length); |
385 | ld->ld_data1 = 0; | 386 | ld->ld_data1 = cpu_to_be32(ld_data1); |
386 | ld->ld_data2 = 0; | 387 | ld->ld_data2 = 0; |
387 | return page; | 388 | return page; |
388 | } | 389 | } |
@@ -418,39 +419,49 @@ out: | |||
418 | unlock_buffer(bd->bd_bh); | 419 | unlock_buffer(bd->bd_bh); |
419 | } | 420 | } |
420 | 421 | ||
421 | static void buf_lo_before_commit(struct gfs2_sbd *sdp) | 422 | static void gfs2_check_magic(struct buffer_head *bh) |
423 | { | ||
424 | void *kaddr; | ||
425 | __be32 *ptr; | ||
426 | |||
427 | clear_buffer_escaped(bh); | ||
428 | kaddr = kmap_atomic(bh->b_page); | ||
429 | ptr = kaddr + bh_offset(bh); | ||
430 | if (*ptr == cpu_to_be32(GFS2_MAGIC)) | ||
431 | set_buffer_escaped(bh); | ||
432 | kunmap_atomic(kaddr); | ||
433 | } | ||
434 | |||
435 | static void gfs2_before_commit(struct gfs2_sbd *sdp, unsigned int limit, | ||
436 | unsigned int total, struct list_head *blist, | ||
437 | bool is_databuf) | ||
422 | { | 438 | { |
423 | struct gfs2_log_descriptor *ld; | 439 | struct gfs2_log_descriptor *ld; |
424 | struct gfs2_bufdata *bd1 = NULL, *bd2; | 440 | struct gfs2_bufdata *bd1 = NULL, *bd2; |
425 | struct page *page; | 441 | struct page *page; |
426 | unsigned int total; | ||
427 | unsigned int limit; | ||
428 | unsigned int num; | 442 | unsigned int num; |
429 | unsigned n; | 443 | unsigned n; |
430 | __be64 *ptr; | 444 | __be64 *ptr; |
431 | 445 | ||
432 | limit = buf_limit(sdp); | ||
433 | /* for 4k blocks, limit = 503 */ | ||
434 | |||
435 | gfs2_log_lock(sdp); | 446 | gfs2_log_lock(sdp); |
436 | total = sdp->sd_log_num_buf; | 447 | bd1 = bd2 = list_prepare_entry(bd1, blist, bd_le.le_list); |
437 | bd1 = bd2 = list_prepare_entry(bd1, &sdp->sd_log_le_buf, bd_le.le_list); | ||
438 | while(total) { | 448 | while(total) { |
439 | num = total; | 449 | num = total; |
440 | if (total > limit) | 450 | if (total > limit) |
441 | num = limit; | 451 | num = limit; |
442 | gfs2_log_unlock(sdp); | 452 | gfs2_log_unlock(sdp); |
443 | page = gfs2_get_log_desc(sdp, GFS2_LOG_DESC_METADATA); | 453 | page = gfs2_get_log_desc(sdp, GFS2_LOG_DESC_METADATA, num + 1, num); |
444 | ld = page_address(page); | 454 | ld = page_address(page); |
445 | gfs2_log_lock(sdp); | 455 | gfs2_log_lock(sdp); |
446 | ptr = (__be64 *)(ld + 1); | 456 | ptr = (__be64 *)(ld + 1); |
447 | ld->ld_length = cpu_to_be32(num + 1); | ||
448 | ld->ld_data1 = cpu_to_be32(num); | ||
449 | 457 | ||
450 | n = 0; | 458 | n = 0; |
451 | list_for_each_entry_continue(bd1, &sdp->sd_log_le_buf, | 459 | list_for_each_entry_continue(bd1, blist, bd_le.le_list) { |
452 | bd_le.le_list) { | ||
453 | *ptr++ = cpu_to_be64(bd1->bd_bh->b_blocknr); | 460 | *ptr++ = cpu_to_be64(bd1->bd_bh->b_blocknr); |
461 | if (is_databuf) { | ||
462 | gfs2_check_magic(bd1->bd_bh); | ||
463 | *ptr++ = cpu_to_be64(buffer_escaped(bd1->bd_bh) ? 1 : 0); | ||
464 | } | ||
454 | if (++n >= num) | 465 | if (++n >= num) |
455 | break; | 466 | break; |
456 | } | 467 | } |
@@ -460,12 +471,27 @@ static void buf_lo_before_commit(struct gfs2_sbd *sdp) | |||
460 | gfs2_log_lock(sdp); | 471 | gfs2_log_lock(sdp); |
461 | 472 | ||
462 | n = 0; | 473 | n = 0; |
463 | list_for_each_entry_continue(bd2, &sdp->sd_log_le_buf, | 474 | list_for_each_entry_continue(bd2, blist, bd_le.le_list) { |
464 | bd_le.le_list) { | ||
465 | get_bh(bd2->bd_bh); | 475 | get_bh(bd2->bd_bh); |
466 | gfs2_log_unlock(sdp); | 476 | gfs2_log_unlock(sdp); |
467 | lock_buffer(bd2->bd_bh); | 477 | lock_buffer(bd2->bd_bh); |
468 | gfs2_log_write_bh(sdp, bd2->bd_bh); | 478 | |
479 | if (buffer_escaped(bd2->bd_bh)) { | ||
480 | void *kaddr; | ||
481 | page = mempool_alloc(gfs2_page_pool, GFP_NOIO); | ||
482 | ptr = page_address(page); | ||
483 | kaddr = kmap_atomic(bd2->bd_bh->b_page); | ||
484 | memcpy(ptr, kaddr + bh_offset(bd2->bd_bh), | ||
485 | bd2->bd_bh->b_size); | ||
486 | kunmap_atomic(kaddr); | ||
487 | *(__be32 *)ptr = 0; | ||
488 | clear_buffer_escaped(bd2->bd_bh); | ||
489 | unlock_buffer(bd2->bd_bh); | ||
490 | brelse(bd2->bd_bh); | ||
491 | gfs2_log_write_page(sdp, page); | ||
492 | } else { | ||
493 | gfs2_log_write_bh(sdp, bd2->bd_bh); | ||
494 | } | ||
469 | gfs2_log_lock(sdp); | 495 | gfs2_log_lock(sdp); |
470 | if (++n >= num) | 496 | if (++n >= num) |
471 | break; | 497 | break; |
@@ -477,6 +503,14 @@ static void buf_lo_before_commit(struct gfs2_sbd *sdp) | |||
477 | gfs2_log_unlock(sdp); | 503 | gfs2_log_unlock(sdp); |
478 | } | 504 | } |
479 | 505 | ||
506 | static void buf_lo_before_commit(struct gfs2_sbd *sdp) | ||
507 | { | ||
508 | unsigned int limit = buf_limit(sdp); /* 503 for 4k blocks */ | ||
509 | |||
510 | gfs2_before_commit(sdp, limit, sdp->sd_log_num_buf, | ||
511 | &sdp->sd_log_le_buf, 0); | ||
512 | } | ||
513 | |||
480 | static void buf_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_ail *ai) | 514 | static void buf_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_ail *ai) |
481 | { | 515 | { |
482 | struct list_head *head = &sdp->sd_log_le_buf; | 516 | struct list_head *head = &sdp->sd_log_le_buf; |
@@ -594,15 +628,14 @@ static void revoke_lo_before_commit(struct gfs2_sbd *sdp) | |||
594 | struct list_head *head = &sdp->sd_log_le_revoke; | 628 | struct list_head *head = &sdp->sd_log_le_revoke; |
595 | struct gfs2_bufdata *bd; | 629 | struct gfs2_bufdata *bd; |
596 | struct page *page; | 630 | struct page *page; |
631 | unsigned int length; | ||
597 | 632 | ||
598 | if (!sdp->sd_log_num_revoke) | 633 | if (!sdp->sd_log_num_revoke) |
599 | return; | 634 | return; |
600 | 635 | ||
601 | page = gfs2_get_log_desc(sdp, GFS2_LOG_DESC_REVOKE); | 636 | length = gfs2_struct2blk(sdp, sdp->sd_log_num_revoke, sizeof(u64)); |
637 | page = gfs2_get_log_desc(sdp, GFS2_LOG_DESC_REVOKE, length, sdp->sd_log_num_revoke); | ||
602 | ld = page_address(page); | 638 | ld = page_address(page); |
603 | ld->ld_length = cpu_to_be32(gfs2_struct2blk(sdp, sdp->sd_log_num_revoke, | ||
604 | sizeof(u64))); | ||
605 | ld->ld_data1 = cpu_to_be32(sdp->sd_log_num_revoke); | ||
606 | offset = sizeof(struct gfs2_log_descriptor); | 639 | offset = sizeof(struct gfs2_log_descriptor); |
607 | 640 | ||
608 | list_for_each_entry(bd, head, bd_le.le_list) { | 641 | list_for_each_entry(bd, head, bd_le.le_list) { |
@@ -775,66 +808,6 @@ out: | |||
775 | unlock_buffer(bd->bd_bh); | 808 | unlock_buffer(bd->bd_bh); |
776 | } | 809 | } |
777 | 810 | ||
778 | static void gfs2_check_magic(struct buffer_head *bh) | ||
779 | { | ||
780 | void *kaddr; | ||
781 | __be32 *ptr; | ||
782 | |||
783 | clear_buffer_escaped(bh); | ||
784 | kaddr = kmap_atomic(bh->b_page); | ||
785 | ptr = kaddr + bh_offset(bh); | ||
786 | if (*ptr == cpu_to_be32(GFS2_MAGIC)) | ||
787 | set_buffer_escaped(bh); | ||
788 | kunmap_atomic(kaddr); | ||
789 | } | ||
790 | |||
791 | static void gfs2_write_blocks(struct gfs2_sbd *sdp, | ||
792 | struct gfs2_log_descriptor *ld, | ||
793 | struct page *page, | ||
794 | struct list_head *list, struct list_head *done, | ||
795 | unsigned int n) | ||
796 | { | ||
797 | struct gfs2_bufdata *bd; | ||
798 | __be64 *ptr; | ||
799 | |||
800 | if (!ld) | ||
801 | return; | ||
802 | |||
803 | ld->ld_length = cpu_to_be32(n + 1); | ||
804 | ld->ld_data1 = cpu_to_be32(n); | ||
805 | ptr = (__force __be64 *)(ld + 1); | ||
806 | |||
807 | gfs2_log_write_page(sdp, page); | ||
808 | gfs2_log_lock(sdp); | ||
809 | while (!list_empty(list)) { | ||
810 | bd = list_entry(list->next, struct gfs2_bufdata, bd_le.le_list); | ||
811 | list_move_tail(&bd->bd_le.le_list, done); | ||
812 | get_bh(bd->bd_bh); | ||
813 | gfs2_log_unlock(sdp); | ||
814 | lock_buffer(bd->bd_bh); | ||
815 | if (buffer_escaped(bd->bd_bh)) { | ||
816 | void *kaddr; | ||
817 | page = mempool_alloc(gfs2_page_pool, GFP_NOIO); | ||
818 | ptr = page_address(page); | ||
819 | kaddr = kmap_atomic(bd->bd_bh->b_page); | ||
820 | memcpy(ptr, kaddr + bh_offset(bd->bd_bh), | ||
821 | bd->bd_bh->b_size); | ||
822 | kunmap_atomic(kaddr); | ||
823 | *(__be32 *)ptr = 0; | ||
824 | clear_buffer_escaped(bd->bd_bh); | ||
825 | unlock_buffer(bd->bd_bh); | ||
826 | brelse(bd->bd_bh); | ||
827 | gfs2_log_write_page(sdp, page); | ||
828 | } else { | ||
829 | gfs2_log_write_bh(sdp, bd->bd_bh); | ||
830 | } | ||
831 | n--; | ||
832 | gfs2_log_lock(sdp); | ||
833 | } | ||
834 | gfs2_log_unlock(sdp); | ||
835 | BUG_ON(n != 0); | ||
836 | } | ||
837 | |||
838 | /** | 811 | /** |
839 | * databuf_lo_before_commit - Scan the data buffers, writing as we go | 812 | * databuf_lo_before_commit - Scan the data buffers, writing as we go |
840 | * | 813 | * |
@@ -842,40 +815,10 @@ static void gfs2_write_blocks(struct gfs2_sbd *sdp, | |||
842 | 815 | ||
843 | static void databuf_lo_before_commit(struct gfs2_sbd *sdp) | 816 | static void databuf_lo_before_commit(struct gfs2_sbd *sdp) |
844 | { | 817 | { |
845 | struct gfs2_bufdata *bd = NULL; | 818 | unsigned int limit = buf_limit(sdp) / 2; |
846 | struct gfs2_log_descriptor *ld = NULL; | ||
847 | struct page *page = NULL; | ||
848 | unsigned int n = 0; | ||
849 | __be64 *ptr = NULL, *end = NULL; | ||
850 | LIST_HEAD(processed); | ||
851 | LIST_HEAD(in_progress); | ||
852 | 819 | ||
853 | gfs2_log_lock(sdp); | 820 | gfs2_before_commit(sdp, limit, sdp->sd_log_num_databuf, |
854 | while (!list_empty(&sdp->sd_log_le_databuf)) { | 821 | &sdp->sd_log_le_databuf, 1); |
855 | if (ptr == end) { | ||
856 | gfs2_log_unlock(sdp); | ||
857 | gfs2_write_blocks(sdp, ld, page, &in_progress, &processed, n); | ||
858 | n = 0; | ||
859 | page = gfs2_get_log_desc(sdp, GFS2_LOG_DESC_JDATA); | ||
860 | ld = page_address(page); | ||
861 | ptr = (__force __be64 *)(ld + 1); | ||
862 | end = (__force __be64 *)(page_address(page) + sdp->sd_vfs->s_blocksize); | ||
863 | end--; | ||
864 | gfs2_log_lock(sdp); | ||
865 | continue; | ||
866 | } | ||
867 | bd = list_entry(sdp->sd_log_le_databuf.next, struct gfs2_bufdata, bd_le.le_list); | ||
868 | list_move_tail(&bd->bd_le.le_list, &in_progress); | ||
869 | gfs2_check_magic(bd->bd_bh); | ||
870 | *ptr++ = cpu_to_be64(bd->bd_bh->b_blocknr); | ||
871 | *ptr++ = cpu_to_be64(buffer_escaped(bd->bd_bh) ? 1 : 0); | ||
872 | n++; | ||
873 | } | ||
874 | gfs2_log_unlock(sdp); | ||
875 | gfs2_write_blocks(sdp, ld, page, &in_progress, &processed, n); | ||
876 | gfs2_log_lock(sdp); | ||
877 | list_splice(&processed, &sdp->sd_log_le_databuf); | ||
878 | gfs2_log_unlock(sdp); | ||
879 | } | 822 | } |
880 | 823 | ||
881 | static int databuf_lo_scan_elements(struct gfs2_jdesc *jd, unsigned int start, | 824 | static int databuf_lo_scan_elements(struct gfs2_jdesc *jd, unsigned int start, |