aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBob Peterson <rpeterso@redhat.com>2017-02-16 10:27:16 -0500
committerAndreas Gruenbacher <agruenba@redhat.com>2017-10-31 09:26:33 -0400
commit3974320ca6aa68d479051f208d5c95afd1e47a4c (patch)
tree6ffea2a1d15f260142da64d7fa410baaa93b809b
parent5f8bd4440d94729d1977fba6ca0b4875c2ee1515 (diff)
GFS2: Implement iomap for block_map
This patch implements iomap for block mapping, and switches the block_map function to use it under the covers. The additional IOMAP_F_BOUNDARY iomap flag indicates when iomap has reached a "metadata boundary" and fetching the next mapping is likely to incur an additional I/O. This flag is used for setting the bh buffer boundary flag. Signed-off-by: Bob Peterson <rpeterso@redhat.com> Signed-off-by: Andreas Gruenbacher <agruenba@redhat.com>
-rw-r--r--fs/gfs2/bmap.c273
-rw-r--r--fs/gfs2/bmap.h4
-rw-r--r--fs/gfs2/trace_gfs2.h65
-rw-r--r--include/linux/iomap.h3
4 files changed, 276 insertions, 69 deletions
diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c
index 03badc8417d7..d5f0d96169c5 100644
--- a/fs/gfs2/bmap.c
+++ b/fs/gfs2/bmap.c
@@ -13,6 +13,7 @@
13#include <linux/blkdev.h> 13#include <linux/blkdev.h>
14#include <linux/gfs2_ondisk.h> 14#include <linux/gfs2_ondisk.h>
15#include <linux/crc32.h> 15#include <linux/crc32.h>
16#include <linux/iomap.h>
16 17
17#include "gfs2.h" 18#include "gfs2.h"
18#include "incore.h" 19#include "incore.h"
@@ -505,10 +506,8 @@ static inline unsigned int hptrs(struct gfs2_sbd *sdp, const unsigned int hgt)
505 * Returns: errno on error 506 * Returns: errno on error
506 */ 507 */
507 508
508static int gfs2_bmap_alloc(struct inode *inode, const sector_t lblock, 509static int gfs2_iomap_alloc(struct inode *inode, struct iomap *iomap,
509 bool zero_new, struct metapath *mp, 510 unsigned flags, struct metapath *mp)
510 const size_t maxlen, sector_t *dblock,
511 unsigned *dblks)
512{ 511{
513 struct gfs2_inode *ip = GFS2_I(inode); 512 struct gfs2_inode *ip = GFS2_I(inode);
514 struct gfs2_sbd *sdp = GFS2_SB(inode); 513 struct gfs2_sbd *sdp = GFS2_SB(inode);
@@ -516,36 +515,37 @@ static int gfs2_bmap_alloc(struct inode *inode, const sector_t lblock,
516 struct buffer_head *dibh = mp->mp_bh[0]; 515 struct buffer_head *dibh = mp->mp_bh[0];
517 u64 bn; 516 u64 bn;
518 unsigned n, i, blks, alloced = 0, iblks = 0, branch_start = 0; 517 unsigned n, i, blks, alloced = 0, iblks = 0, branch_start = 0;
518 unsigned dblks = 0;
519 unsigned ptrs_per_blk; 519 unsigned ptrs_per_blk;
520 const unsigned end_of_metadata = mp->mp_fheight - 1; 520 const unsigned end_of_metadata = mp->mp_fheight - 1;
521 int ret; 521 int ret;
522 int eob = 0;
523 enum alloc_state state; 522 enum alloc_state state;
524 __be64 *ptr; 523 __be64 *ptr;
525 __be64 zero_bn = 0; 524 __be64 zero_bn = 0;
525 size_t maxlen = iomap->length >> inode->i_blkbits;
526 526
527 BUG_ON(mp->mp_aheight < 1); 527 BUG_ON(mp->mp_aheight < 1);
528 BUG_ON(dibh == NULL); 528 BUG_ON(dibh == NULL);
529 529
530 *dblock = 0;
531 *dblks = 0;
532 gfs2_trans_add_meta(ip->i_gl, dibh); 530 gfs2_trans_add_meta(ip->i_gl, dibh);
533 531
534 if (mp->mp_fheight == mp->mp_aheight) { 532 if (mp->mp_fheight == mp->mp_aheight) {
535 struct buffer_head *bh; 533 struct buffer_head *bh;
534 int eob;
535
536 /* Bottom indirect block exists, find unalloced extent size */ 536 /* Bottom indirect block exists, find unalloced extent size */
537 ptr = metapointer(end_of_metadata, mp); 537 ptr = metapointer(end_of_metadata, mp);
538 bh = mp->mp_bh[end_of_metadata]; 538 bh = mp->mp_bh[end_of_metadata];
539 *dblks = gfs2_extent_length(bh->b_data, bh->b_size, ptr, 539 dblks = gfs2_extent_length(bh->b_data, bh->b_size, ptr,
540 maxlen, &eob); 540 maxlen, &eob);
541 BUG_ON(*dblks < 1); 541 BUG_ON(dblks < 1);
542 state = ALLOC_DATA; 542 state = ALLOC_DATA;
543 } else { 543 } else {
544 /* Need to allocate indirect blocks */ 544 /* Need to allocate indirect blocks */
545 ptrs_per_blk = mp->mp_fheight > 1 ? sdp->sd_inptrs : 545 ptrs_per_blk = mp->mp_fheight > 1 ? sdp->sd_inptrs :
546 sdp->sd_diptrs; 546 sdp->sd_diptrs;
547 *dblks = min(maxlen, (size_t)(ptrs_per_blk - 547 dblks = min(maxlen, (size_t)(ptrs_per_blk -
548 mp->mp_list[end_of_metadata])); 548 mp->mp_list[end_of_metadata]));
549 if (mp->mp_fheight == ip->i_height) { 549 if (mp->mp_fheight == ip->i_height) {
550 /* Writing into existing tree, extend tree down */ 550 /* Writing into existing tree, extend tree down */
551 iblks = mp->mp_fheight - mp->mp_aheight; 551 iblks = mp->mp_fheight - mp->mp_aheight;
@@ -561,7 +561,7 @@ static int gfs2_bmap_alloc(struct inode *inode, const sector_t lblock,
561 561
562 /* start of the second part of the function (state machine) */ 562 /* start of the second part of the function (state machine) */
563 563
564 blks = *dblks + iblks; 564 blks = dblks + iblks;
565 i = mp->mp_aheight; 565 i = mp->mp_aheight;
566 do { 566 do {
567 int error; 567 int error;
@@ -618,26 +618,29 @@ static int gfs2_bmap_alloc(struct inode *inode, const sector_t lblock,
618 break; 618 break;
619 /* Tree complete, adding data blocks */ 619 /* Tree complete, adding data blocks */
620 case ALLOC_DATA: 620 case ALLOC_DATA:
621 BUG_ON(n > *dblks); 621 BUG_ON(n > dblks);
622 BUG_ON(mp->mp_bh[end_of_metadata] == NULL); 622 BUG_ON(mp->mp_bh[end_of_metadata] == NULL);
623 gfs2_trans_add_meta(ip->i_gl, mp->mp_bh[end_of_metadata]); 623 gfs2_trans_add_meta(ip->i_gl, mp->mp_bh[end_of_metadata]);
624 *dblks = n; 624 dblks = n;
625 ptr = metapointer(end_of_metadata, mp); 625 ptr = metapointer(end_of_metadata, mp);
626 *dblock = bn; 626 iomap->addr = bn << inode->i_blkbits;
627 iomap->flags |= IOMAP_F_NEW;
627 while (n-- > 0) 628 while (n-- > 0)
628 *ptr++ = cpu_to_be64(bn++); 629 *ptr++ = cpu_to_be64(bn++);
629 if (zero_new) { 630 if (flags & IOMAP_ZERO) {
630 ret = sb_issue_zeroout(sb, *dblock, *dblks, 631 ret = sb_issue_zeroout(sb, iomap->addr >> inode->i_blkbits,
631 GFP_NOFS); 632 dblks, GFP_NOFS);
632 if (ret) { 633 if (ret) {
633 fs_err(sdp, 634 fs_err(sdp,
634 "Failed to zero data buffers\n"); 635 "Failed to zero data buffers\n");
636 flags &= ~IOMAP_ZERO;
635 } 637 }
636 } 638 }
637 break; 639 break;
638 } 640 }
639 } while ((state != ALLOC_DATA) || !(*dblock)); 641 } while (iomap->addr == IOMAP_NULL_ADDR);
640 642
643 iomap->length = (u64)dblks << inode->i_blkbits;
641 ip->i_height = mp->mp_fheight; 644 ip->i_height = mp->mp_fheight;
642 gfs2_add_inode_blocks(&ip->i_inode, alloced); 645 gfs2_add_inode_blocks(&ip->i_inode, alloced);
643 gfs2_dinode_out(ip, mp->mp_bh[0]->b_data); 646 gfs2_dinode_out(ip, mp->mp_bh[0]->b_data);
@@ -645,47 +648,123 @@ static int gfs2_bmap_alloc(struct inode *inode, const sector_t lblock,
645} 648}
646 649
647/** 650/**
648 * gfs2_block_map - Map a block from an inode to a disk block 651 * hole_size - figure out the size of a hole
649 * @inode: The inode 652 * @inode: The inode
650 * @lblock: The logical block number 653 * @lblock: The logical starting block number
651 * @bh_map: The bh to be mapped 654 * @mp: The metapath
652 * @create: True if its ok to alloc blocks to satify the request
653 * 655 *
654 * Sets buffer_mapped() if successful, sets buffer_boundary() if a 656 * Returns: The hole size in bytes
655 * read of metadata will be required before the next block can be
656 * mapped. Sets buffer_new() if new blocks were allocated.
657 * 657 *
658 * Returns: errno
659 */ 658 */
659static u64 hole_size(struct inode *inode, sector_t lblock, struct metapath *mp)
660{
661 struct gfs2_inode *ip = GFS2_I(inode);
662 struct gfs2_sbd *sdp = GFS2_SB(inode);
663 struct metapath mp_eof;
664 u64 factor = 1;
665 int hgt;
666 u64 holesz = 0;
667 const __be64 *first, *end, *ptr;
668 const struct buffer_head *bh;
669 u64 lblock_stop = (i_size_read(inode) - 1) >> inode->i_blkbits;
670 int zeroptrs;
671 bool done = false;
672
673 /* Get another metapath, to the very last byte */
674 find_metapath(sdp, lblock_stop, &mp_eof, ip->i_height);
675 for (hgt = ip->i_height - 1; hgt >= 0 && !done; hgt--) {
676 bh = mp->mp_bh[hgt];
677 if (bh) {
678 zeroptrs = 0;
679 first = metapointer(hgt, mp);
680 end = (const __be64 *)(bh->b_data + bh->b_size);
681
682 for (ptr = first; ptr < end; ptr++) {
683 if (*ptr) {
684 done = true;
685 break;
686 } else {
687 zeroptrs++;
688 }
689 }
690 } else {
691 zeroptrs = sdp->sd_inptrs;
692 }
693 if (factor * zeroptrs >= lblock_stop - lblock + 1) {
694 holesz = lblock_stop - lblock + 1;
695 break;
696 }
697 holesz += factor * zeroptrs;
660 698
661int gfs2_block_map(struct inode *inode, sector_t lblock, 699 factor *= sdp->sd_inptrs;
662 struct buffer_head *bh_map, int create) 700 if (hgt && (mp->mp_list[hgt - 1] < mp_eof.mp_list[hgt - 1]))
701 (mp->mp_list[hgt - 1])++;
702 }
703 return holesz << inode->i_blkbits;
704}
705
706static void gfs2_stuffed_iomap(struct inode *inode, struct iomap *iomap)
707{
708 struct gfs2_inode *ip = GFS2_I(inode);
709
710 iomap->addr = (ip->i_no_addr << inode->i_blkbits) +
711 sizeof(struct gfs2_dinode);
712 iomap->offset = 0;
713 iomap->length = i_size_read(inode);
714 iomap->type = IOMAP_MAPPED;
715 iomap->flags = IOMAP_F_DATA_INLINE;
716}
717
718/**
719 * gfs2_iomap_begin - Map blocks from an inode to disk blocks
720 * @inode: The inode
721 * @pos: Starting position in bytes
722 * @length: Length to map, in bytes
723 * @flags: iomap flags
724 * @iomap: The iomap structure
725 *
726 * Returns: errno
727 */
728int gfs2_iomap_begin(struct inode *inode, loff_t pos, loff_t length,
729 unsigned flags, struct iomap *iomap)
663{ 730{
664 struct gfs2_inode *ip = GFS2_I(inode); 731 struct gfs2_inode *ip = GFS2_I(inode);
665 struct gfs2_sbd *sdp = GFS2_SB(inode); 732 struct gfs2_sbd *sdp = GFS2_SB(inode);
733 struct metapath mp = { .mp_aheight = 1, };
666 unsigned int factor = sdp->sd_sb.sb_bsize; 734 unsigned int factor = sdp->sd_sb.sb_bsize;
667 const size_t maxlen = bh_map->b_size >> inode->i_blkbits;
668 const u64 *arr = sdp->sd_heightsize; 735 const u64 *arr = sdp->sd_heightsize;
669 __be64 *ptr; 736 __be64 *ptr;
670 u64 size; 737 sector_t lblock;
671 struct metapath mp; 738 sector_t lend;
672 int ret; 739 int ret;
673 int eob; 740 int eob;
674 unsigned int len; 741 unsigned int len;
675 struct buffer_head *bh; 742 struct buffer_head *bh;
676 u8 height; 743 u8 height;
677 bool zero_new = false;
678 sector_t dblock = 0;
679 unsigned dblks;
680 744
681 BUG_ON(maxlen == 0); 745 trace_gfs2_iomap_start(ip, pos, length, flags);
746 if (!length) {
747 ret = -EINVAL;
748 goto out;
749 }
682 750
683 memset(&mp, 0, sizeof(mp)); 751 if ((flags & IOMAP_REPORT) && gfs2_is_stuffed(ip)) {
684 bmap_lock(ip, create); 752 gfs2_stuffed_iomap(inode, iomap);
685 clear_buffer_mapped(bh_map); 753 if (pos >= iomap->length)
686 clear_buffer_new(bh_map); 754 return -ENOENT;
687 clear_buffer_boundary(bh_map); 755 ret = 0;
688 trace_gfs2_bmap(ip, bh_map, lblock, create, 1); 756 goto out;
757 }
758
759 lblock = pos >> inode->i_blkbits;
760 lend = (pos + length + sdp->sd_sb.sb_bsize - 1) >> inode->i_blkbits;
761
762 iomap->offset = lblock << inode->i_blkbits;
763 iomap->addr = IOMAP_NULL_ADDR;
764 iomap->type = IOMAP_HOLE;
765 iomap->length = (u64)(lend - lblock) << inode->i_blkbits;
766 iomap->flags = IOMAP_F_MERGED;
767 bmap_lock(ip, 0);
689 768
690 /* 769 /*
691 * Directory data blocks have a struct gfs2_meta_header header, so the 770 * Directory data blocks have a struct gfs2_meta_header header, so the
@@ -699,56 +778,114 @@ int gfs2_block_map(struct inode *inode, sector_t lblock,
699 778
700 ret = gfs2_meta_inode_buffer(ip, &mp.mp_bh[0]); 779 ret = gfs2_meta_inode_buffer(ip, &mp.mp_bh[0]);
701 if (ret) 780 if (ret)
702 goto out; 781 goto out_release;
703 782
704 height = ip->i_height; 783 height = ip->i_height;
705 size = (lblock + 1) * factor; 784 while ((lblock + 1) * factor > arr[height])
706 while (size > arr[height])
707 height++; 785 height++;
708 find_metapath(sdp, lblock, &mp, height); 786 find_metapath(sdp, lblock, &mp, height);
709 mp.mp_aheight = 1;
710 if (height > ip->i_height || gfs2_is_stuffed(ip)) 787 if (height > ip->i_height || gfs2_is_stuffed(ip))
711 goto do_alloc; 788 goto do_alloc;
789
712 ret = lookup_metapath(ip, &mp); 790 ret = lookup_metapath(ip, &mp);
713 if (ret < 0) 791 if (ret < 0)
714 goto out; 792 goto out_release;
793
715 if (mp.mp_aheight != ip->i_height) 794 if (mp.mp_aheight != ip->i_height)
716 goto do_alloc; 795 goto do_alloc;
796
717 ptr = metapointer(ip->i_height - 1, &mp); 797 ptr = metapointer(ip->i_height - 1, &mp);
718 if (*ptr == 0) 798 if (*ptr == 0)
719 goto do_alloc; 799 goto do_alloc;
720 map_bh(bh_map, inode->i_sb, be64_to_cpu(*ptr)); 800
801 iomap->type = IOMAP_MAPPED;
802 iomap->addr = be64_to_cpu(*ptr) << inode->i_blkbits;
803
721 bh = mp.mp_bh[ip->i_height - 1]; 804 bh = mp.mp_bh[ip->i_height - 1];
722 len = gfs2_extent_length(bh->b_data, bh->b_size, ptr, maxlen, &eob); 805 len = gfs2_extent_length(bh->b_data, bh->b_size, ptr, lend - lblock, &eob);
723 bh_map->b_size = (len << inode->i_blkbits);
724 if (eob) 806 if (eob)
725 set_buffer_boundary(bh_map); 807 iomap->flags |= IOMAP_F_BOUNDARY;
808 iomap->length = (u64)len << inode->i_blkbits;
809
726 ret = 0; 810 ret = 0;
727out: 811
812out_release:
728 release_metapath(&mp); 813 release_metapath(&mp);
729 trace_gfs2_bmap(ip, bh_map, lblock, create, ret); 814 bmap_unlock(ip, 0);
730 bmap_unlock(ip, create); 815out:
816 trace_gfs2_iomap_end(ip, iomap, ret);
731 return ret; 817 return ret;
732 818
733do_alloc: 819do_alloc:
734 /* All allocations are done here, firstly check create flag */ 820 if (!(flags & IOMAP_WRITE)) {
735 if (!create) { 821 if (pos >= i_size_read(inode)) {
736 BUG_ON(gfs2_is_stuffed(ip)); 822 ret = -ENOENT;
823 goto out_release;
824 }
737 ret = 0; 825 ret = 0;
738 goto out; 826 iomap->length = hole_size(inode, lblock, &mp);
827 goto out_release;
739 } 828 }
740 829
741 /* At this point ret is the tree depth of already allocated blocks */ 830 ret = gfs2_iomap_alloc(inode, iomap, flags, &mp);
831 goto out_release;
832}
833
834/**
835 * gfs2_block_map - Map a block from an inode to a disk block
836 * @inode: The inode
837 * @lblock: The logical block number
838 * @bh_map: The bh to be mapped
839 * @create: True if its ok to alloc blocks to satify the request
840 *
841 * Sets buffer_mapped() if successful, sets buffer_boundary() if a
842 * read of metadata will be required before the next block can be
843 * mapped. Sets buffer_new() if new blocks were allocated.
844 *
845 * Returns: errno
846 */
847
848int gfs2_block_map(struct inode *inode, sector_t lblock,
849 struct buffer_head *bh_map, int create)
850{
851 struct gfs2_inode *ip = GFS2_I(inode);
852 struct iomap iomap;
853 int ret, flags = 0;
854
855 clear_buffer_mapped(bh_map);
856 clear_buffer_new(bh_map);
857 clear_buffer_boundary(bh_map);
858 trace_gfs2_bmap(ip, bh_map, lblock, create, 1);
859
860 if (create)
861 flags |= IOMAP_WRITE;
742 if (buffer_zeronew(bh_map)) 862 if (buffer_zeronew(bh_map))
743 zero_new = true; 863 flags |= IOMAP_ZERO;
744 ret = gfs2_bmap_alloc(inode, lblock, zero_new, &mp, maxlen, &dblock, 864 ret = gfs2_iomap_begin(inode, (loff_t)lblock << inode->i_blkbits,
745 &dblks); 865 bh_map->b_size, flags, &iomap);
746 if (ret == 0) { 866 if (ret) {
747 map_bh(bh_map, inode->i_sb, dblock); 867 if (!create && ret == -ENOENT) {
748 bh_map->b_size = dblks << inode->i_blkbits; 868 /* Return unmapped buffer beyond the end of file. */
749 set_buffer_new(bh_map); 869 ret = 0;
870 }
871 goto out;
872 }
873
874 if (iomap.length > bh_map->b_size) {
875 iomap.length = bh_map->b_size;
876 iomap.flags &= ~IOMAP_F_BOUNDARY;
750 } 877 }
751 goto out; 878 if (iomap.addr != IOMAP_NULL_ADDR)
879 map_bh(bh_map, inode->i_sb, iomap.addr >> inode->i_blkbits);
880 bh_map->b_size = iomap.length;
881 if (iomap.flags & IOMAP_F_BOUNDARY)
882 set_buffer_boundary(bh_map);
883 if (iomap.flags & IOMAP_F_NEW)
884 set_buffer_new(bh_map);
885
886out:
887 trace_gfs2_bmap(ip, bh_map, lblock, create, ret);
888 return ret;
752} 889}
753 890
754/* 891/*
diff --git a/fs/gfs2/bmap.h b/fs/gfs2/bmap.h
index 81ded5e2aaa2..443cc182cf18 100644
--- a/fs/gfs2/bmap.h
+++ b/fs/gfs2/bmap.h
@@ -10,6 +10,8 @@
10#ifndef __BMAP_DOT_H__ 10#ifndef __BMAP_DOT_H__
11#define __BMAP_DOT_H__ 11#define __BMAP_DOT_H__
12 12
13#include <linux/iomap.h>
14
13#include "inode.h" 15#include "inode.h"
14 16
15struct inode; 17struct inode;
@@ -47,6 +49,8 @@ static inline void gfs2_write_calc_reserv(const struct gfs2_inode *ip,
47extern int gfs2_unstuff_dinode(struct gfs2_inode *ip, struct page *page); 49extern int gfs2_unstuff_dinode(struct gfs2_inode *ip, struct page *page);
48extern int gfs2_block_map(struct inode *inode, sector_t lblock, 50extern int gfs2_block_map(struct inode *inode, sector_t lblock,
49 struct buffer_head *bh, int create); 51 struct buffer_head *bh, int create);
52extern int gfs2_iomap_begin(struct inode *inode, loff_t pos, loff_t length,
53 unsigned flags, struct iomap *iomap);
50extern int gfs2_extent_map(struct inode *inode, u64 lblock, int *new, 54extern int gfs2_extent_map(struct inode *inode, u64 lblock, int *new,
51 u64 *dblock, unsigned *extlen); 55 u64 *dblock, unsigned *extlen);
52extern int gfs2_setattr_size(struct inode *inode, u64 size); 56extern int gfs2_setattr_size(struct inode *inode, u64 size);
diff --git a/fs/gfs2/trace_gfs2.h b/fs/gfs2/trace_gfs2.h
index 49ac55da4e33..3c91ae3cf0b2 100644
--- a/fs/gfs2/trace_gfs2.h
+++ b/fs/gfs2/trace_gfs2.h
@@ -12,6 +12,7 @@
12#include <linux/gfs2_ondisk.h> 12#include <linux/gfs2_ondisk.h>
13#include <linux/writeback.h> 13#include <linux/writeback.h>
14#include <linux/ktime.h> 14#include <linux/ktime.h>
15#include <linux/iomap.h>
15#include "incore.h" 16#include "incore.h"
16#include "glock.h" 17#include "glock.h"
17#include "rgrp.h" 18#include "rgrp.h"
@@ -469,6 +470,70 @@ TRACE_EVENT(gfs2_bmap,
469 __entry->errno) 470 __entry->errno)
470); 471);
471 472
473TRACE_EVENT(gfs2_iomap_start,
474
475 TP_PROTO(const struct gfs2_inode *ip, loff_t pos, ssize_t length,
476 u16 flags),
477
478 TP_ARGS(ip, pos, length, flags),
479
480 TP_STRUCT__entry(
481 __field( dev_t, dev )
482 __field( u64, inum )
483 __field( loff_t, pos )
484 __field( ssize_t, length )
485 __field( u16, flags )
486 ),
487
488 TP_fast_assign(
489 __entry->dev = ip->i_gl->gl_name.ln_sbd->sd_vfs->s_dev;
490 __entry->inum = ip->i_no_addr;
491 __entry->pos = pos;
492 __entry->length = length;
493 __entry->flags = flags;
494 ),
495
496 TP_printk("%u,%u bmap %llu iomap start %llu/%lu flags:%08x",
497 MAJOR(__entry->dev), MINOR(__entry->dev),
498 (unsigned long long)__entry->inum,
499 (unsigned long long)__entry->pos,
500 (unsigned long)__entry->length, (u16)__entry->flags)
501);
502
503TRACE_EVENT(gfs2_iomap_end,
504
505 TP_PROTO(const struct gfs2_inode *ip, struct iomap *iomap, int ret),
506
507 TP_ARGS(ip, iomap, ret),
508
509 TP_STRUCT__entry(
510 __field( dev_t, dev )
511 __field( u64, inum )
512 __field( loff_t, offset )
513 __field( ssize_t, length )
514 __field( u16, flags )
515 __field( u16, type )
516 __field( int, ret )
517 ),
518
519 TP_fast_assign(
520 __entry->dev = ip->i_gl->gl_name.ln_sbd->sd_vfs->s_dev;
521 __entry->inum = ip->i_no_addr;
522 __entry->offset = iomap->offset;
523 __entry->length = iomap->length;
524 __entry->flags = iomap->flags;
525 __entry->type = iomap->type;
526 __entry->ret = ret;
527 ),
528
529 TP_printk("%u,%u bmap %llu iomap end %llu/%lu ty:%d flags:%08x rc:%d",
530 MAJOR(__entry->dev), MINOR(__entry->dev),
531 (unsigned long long)__entry->inum,
532 (unsigned long long)__entry->offset,
533 (unsigned long)__entry->length, (u16)__entry->type,
534 (u16)__entry->flags, __entry->ret)
535);
536
472/* Keep track of blocks as they are allocated/freed */ 537/* Keep track of blocks as they are allocated/freed */
473TRACE_EVENT(gfs2_block_alloc, 538TRACE_EVENT(gfs2_block_alloc,
474 539
diff --git a/include/linux/iomap.h b/include/linux/iomap.h
index 2b0790dbd6ea..a61be86710b5 100644
--- a/include/linux/iomap.h
+++ b/include/linux/iomap.h
@@ -21,7 +21,8 @@ struct vm_fault;
21/* 21/*
22 * Flags for all iomap mappings: 22 * Flags for all iomap mappings:
23 */ 23 */
24#define IOMAP_F_NEW 0x01 /* blocks have been newly allocated */ 24#define IOMAP_F_NEW 0x01 /* blocks have been newly allocated */
25#define IOMAP_F_BOUNDARY 0x02 /* mapping ends at metadata boundary */
25 26
26/* 27/*
27 * Flags that only need to be reported for IOMAP_REPORT requests: 28 * Flags that only need to be reported for IOMAP_REPORT requests: