aboutsummaryrefslogtreecommitdiffstats
path: root/fs/xfs
diff options
context:
space:
mode:
Diffstat (limited to 'fs/xfs')
-rw-r--r--fs/xfs/xfs_bmap.c162
1 files changed, 79 insertions, 83 deletions
diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
index 3a6ed426327a..abe42448b1c0 100644
--- a/fs/xfs/xfs_bmap.c
+++ b/fs/xfs/xfs_bmap.c
@@ -5880,7 +5880,7 @@ xfs_getbmap(
5880 void *arg) /* formatter arg */ 5880 void *arg) /* formatter arg */
5881{ 5881{
5882 __int64_t bmvend; /* last block requested */ 5882 __int64_t bmvend; /* last block requested */
5883 int error; /* return value */ 5883 int error = 0; /* return value */
5884 __int64_t fixlen; /* length for -1 case */ 5884 __int64_t fixlen; /* length for -1 case */
5885 int i; /* extent number */ 5885 int i; /* extent number */
5886 int lock; /* lock state */ 5886 int lock; /* lock state */
@@ -5899,30 +5899,8 @@ xfs_getbmap(
5899 5899
5900 mp = ip->i_mount; 5900 mp = ip->i_mount;
5901 iflags = bmv->bmv_iflags; 5901 iflags = bmv->bmv_iflags;
5902
5903 whichfork = iflags & BMV_IF_ATTRFORK ? XFS_ATTR_FORK : XFS_DATA_FORK; 5902 whichfork = iflags & BMV_IF_ATTRFORK ? XFS_ATTR_FORK : XFS_DATA_FORK;
5904 5903
5905 /* If the BMV_IF_NO_DMAPI_READ interface bit specified, do not
5906 * generate a DMAPI read event. Otherwise, if the DM_EVENT_READ
5907 * bit is set for the file, generate a read event in order
5908 * that the DMAPI application may do its thing before we return
5909 * the extents. Usually this means restoring user file data to
5910 * regions of the file that look like holes.
5911 *
5912 * The "old behavior" (from XFS_IOC_GETBMAP) is to not specify
5913 * BMV_IF_NO_DMAPI_READ so that read events are generated.
5914 * If this were not true, callers of ioctl( XFS_IOC_GETBMAP )
5915 * could misinterpret holes in a DMAPI file as true holes,
5916 * when in fact they may represent offline user data.
5917 */
5918 if ((iflags & BMV_IF_NO_DMAPI_READ) == 0 &&
5919 DM_EVENT_ENABLED(ip, DM_EVENT_READ) &&
5920 whichfork == XFS_DATA_FORK) {
5921 error = XFS_SEND_DATA(mp, DM_EVENT_READ, ip, 0, 0, 0, NULL);
5922 if (error)
5923 return XFS_ERROR(error);
5924 }
5925
5926 if (whichfork == XFS_ATTR_FORK) { 5904 if (whichfork == XFS_ATTR_FORK) {
5927 if (XFS_IFORK_Q(ip)) { 5905 if (XFS_IFORK_Q(ip)) {
5928 if (ip->i_d.di_aformat != XFS_DINODE_FMT_EXTENTS && 5906 if (ip->i_d.di_aformat != XFS_DINODE_FMT_EXTENTS &&
@@ -5936,11 +5914,37 @@ xfs_getbmap(
5936 ip->i_mount); 5914 ip->i_mount);
5937 return XFS_ERROR(EFSCORRUPTED); 5915 return XFS_ERROR(EFSCORRUPTED);
5938 } 5916 }
5939 } else if (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS && 5917
5940 ip->i_d.di_format != XFS_DINODE_FMT_BTREE && 5918 prealloced = 0;
5941 ip->i_d.di_format != XFS_DINODE_FMT_LOCAL) 5919 fixlen = 1LL << 32;
5942 return XFS_ERROR(EINVAL); 5920 } else {
5943 if (whichfork == XFS_DATA_FORK) { 5921 /*
5922 * If the BMV_IF_NO_DMAPI_READ interface bit specified, do
5923 * not generate a DMAPI read event. Otherwise, if the
5924 * DM_EVENT_READ bit is set for the file, generate a read
5925 * event in order that the DMAPI application may do its thing
5926 * before we return the extents. Usually this means restoring
5927 * user file data to regions of the file that look like holes.
5928 *
5929 * The "old behavior" (from XFS_IOC_GETBMAP) is to not specify
5930 * BMV_IF_NO_DMAPI_READ so that read events are generated.
5931 * If this were not true, callers of ioctl(XFS_IOC_GETBMAP)
5932 * could misinterpret holes in a DMAPI file as true holes,
5933 * when in fact they may represent offline user data.
5934 */
5935 if (DM_EVENT_ENABLED(ip, DM_EVENT_READ) &&
5936 !(iflags & BMV_IF_NO_DMAPI_READ)) {
5937 error = XFS_SEND_DATA(mp, DM_EVENT_READ, ip,
5938 0, 0, 0, NULL);
5939 if (error)
5940 return XFS_ERROR(error);
5941 }
5942
5943 if (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS &&
5944 ip->i_d.di_format != XFS_DINODE_FMT_BTREE &&
5945 ip->i_d.di_format != XFS_DINODE_FMT_LOCAL)
5946 return XFS_ERROR(EINVAL);
5947
5944 if (xfs_get_extsz_hint(ip) || 5948 if (xfs_get_extsz_hint(ip) ||
5945 ip->i_d.di_flags & (XFS_DIFLAG_PREALLOC|XFS_DIFLAG_APPEND)){ 5949 ip->i_d.di_flags & (XFS_DIFLAG_PREALLOC|XFS_DIFLAG_APPEND)){
5946 prealloced = 1; 5950 prealloced = 1;
@@ -5949,42 +5953,34 @@ xfs_getbmap(
5949 prealloced = 0; 5953 prealloced = 0;
5950 fixlen = ip->i_size; 5954 fixlen = ip->i_size;
5951 } 5955 }
5952 } else {
5953 prealloced = 0;
5954 fixlen = 1LL << 32;
5955 } 5956 }
5956 5957
5957 if (bmv->bmv_length == -1) { 5958 if (bmv->bmv_length == -1) {
5958 fixlen = XFS_FSB_TO_BB(mp, XFS_B_TO_FSB(mp, fixlen)); 5959 fixlen = XFS_FSB_TO_BB(mp, XFS_B_TO_FSB(mp, fixlen));
5959 bmv->bmv_length = MAX( (__int64_t)(fixlen - bmv->bmv_offset), 5960 bmv->bmv_length =
5960 (__int64_t)0); 5961 max_t(__int64_t, fixlen - bmv->bmv_offset, 0);
5961 } else if (bmv->bmv_length < 0) 5962 } else if (bmv->bmv_length == 0) {
5962 return XFS_ERROR(EINVAL);
5963 if (bmv->bmv_length == 0) {
5964 bmv->bmv_entries = 0; 5963 bmv->bmv_entries = 0;
5965 return 0; 5964 return 0;
5965 } else if (bmv->bmv_length < 0) {
5966 return XFS_ERROR(EINVAL);
5966 } 5967 }
5968
5967 nex = bmv->bmv_count - 1; 5969 nex = bmv->bmv_count - 1;
5968 if (nex <= 0) 5970 if (nex <= 0)
5969 return XFS_ERROR(EINVAL); 5971 return XFS_ERROR(EINVAL);
5970 bmvend = bmv->bmv_offset + bmv->bmv_length; 5972 bmvend = bmv->bmv_offset + bmv->bmv_length;
5971 5973
5972 xfs_ilock(ip, XFS_IOLOCK_SHARED); 5974 xfs_ilock(ip, XFS_IOLOCK_SHARED);
5973 5975 if (whichfork == XFS_DATA_FORK && !(iflags & BMV_IF_DELALLOC)) {
5974 if (((iflags & BMV_IF_DELALLOC) == 0) && 5976 if (ip->i_delayed_blks || ip->i_size > ip->i_d.di_size) {
5975 (whichfork == XFS_DATA_FORK) && 5977 error = xfs_flush_pages(ip, 0, -1, 0, FI_REMAPF);
5976 (ip->i_delayed_blks || ip->i_size > ip->i_d.di_size)) { 5978 if (error)
5977 /* xfs_fsize_t last_byte = xfs_file_last_byte(ip); */ 5979 goto out_unlock_iolock;
5978 error = xfs_flush_pages(ip, (xfs_off_t)0,
5979 -1, 0, FI_REMAPF);
5980 if (error) {
5981 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
5982 return error;
5983 } 5980 }
5984 }
5985 5981
5986 ASSERT(whichfork == XFS_ATTR_FORK || (iflags & BMV_IF_DELALLOC) || 5982 ASSERT(ip->i_delayed_blks == 0);
5987 ip->i_delayed_blks == 0); 5983 }
5988 5984
5989 lock = xfs_ilock_map_shared(ip); 5985 lock = xfs_ilock_map_shared(ip);
5990 5986
@@ -5995,23 +5991,25 @@ xfs_getbmap(
5995 if (nex > XFS_IFORK_NEXTENTS(ip, whichfork) * 2 + 1) 5991 if (nex > XFS_IFORK_NEXTENTS(ip, whichfork) * 2 + 1)
5996 nex = XFS_IFORK_NEXTENTS(ip, whichfork) * 2 + 1; 5992 nex = XFS_IFORK_NEXTENTS(ip, whichfork) * 2 + 1;
5997 5993
5998 bmapi_flags = xfs_bmapi_aflag(whichfork) | 5994 bmapi_flags = xfs_bmapi_aflag(whichfork);
5999 ((iflags & BMV_IF_PREALLOC) ? 0 : XFS_BMAPI_IGSTATE); 5995 if (!(iflags & BMV_IF_PREALLOC))
5996 bmapi_flags |= XFS_BMAPI_IGSTATE;
6000 5997
6001 /* 5998 /*
6002 * Allocate enough space to handle "subnex" maps at a time. 5999 * Allocate enough space to handle "subnex" maps at a time.
6003 */ 6000 */
6001 error = ENOMEM;
6004 subnex = 16; 6002 subnex = 16;
6005 map = kmem_alloc(subnex * sizeof(*map), KM_SLEEP); 6003 map = kmem_alloc(subnex * sizeof(*map), KM_MAYFAIL);
6004 if (!map)
6005 goto out_unlock_ilock;
6006 6006
6007 bmv->bmv_entries = 0; 6007 bmv->bmv_entries = 0;
6008 6008
6009 if ((XFS_IFORK_NEXTENTS(ip, whichfork) == 0)) { 6009 if (XFS_IFORK_NEXTENTS(ip, whichfork) == 0 &&
6010 if (((iflags & BMV_IF_DELALLOC) == 0) || 6010 (whichfork == XFS_ATTR_FORK || !(iflags & BMV_IF_DELALLOC))) {
6011 whichfork == XFS_ATTR_FORK) { 6011 error = 0;
6012 error = 0; 6012 goto out_free_map;
6013 goto unlock_and_return;
6014 }
6015 } 6013 }
6016 6014
6017 nexleft = nex; 6015 nexleft = nex;
@@ -6023,10 +6021,12 @@ xfs_getbmap(
6023 bmapi_flags, NULL, 0, map, &nmap, 6021 bmapi_flags, NULL, 0, map, &nmap,
6024 NULL, NULL); 6022 NULL, NULL);
6025 if (error) 6023 if (error)
6026 goto unlock_and_return; 6024 goto out_free_map;
6027 ASSERT(nmap <= subnex); 6025 ASSERT(nmap <= subnex);
6028 6026
6029 for (i = 0; i < nmap && nexleft && bmv->bmv_length; i++) { 6027 for (i = 0; i < nmap && nexleft && bmv->bmv_length; i++) {
6028 int full = 0; /* user array is full */
6029
6030 out.bmv_oflags = 0; 6030 out.bmv_oflags = 0;
6031 if (map[i].br_state == XFS_EXT_UNWRITTEN) 6031 if (map[i].br_state == XFS_EXT_UNWRITTEN)
6032 out.bmv_oflags |= BMV_OF_PREALLOC; 6032 out.bmv_oflags |= BMV_OF_PREALLOC;
@@ -6041,36 +6041,32 @@ xfs_getbmap(
6041 whichfork == XFS_ATTR_FORK) { 6041 whichfork == XFS_ATTR_FORK) {
6042 /* came to the end of attribute fork */ 6042 /* came to the end of attribute fork */
6043 out.bmv_oflags |= BMV_OF_LAST; 6043 out.bmv_oflags |= BMV_OF_LAST;
6044 goto unlock_and_return; 6044 goto out_free_map;
6045 } else {
6046 int full = 0; /* user array is full */
6047
6048 if (!xfs_getbmapx_fix_eof_hole(ip, &out,
6049 prealloced, bmvend,
6050 map[i].br_startblock)) {
6051 goto unlock_and_return;
6052 }
6053
6054 /* format results & advance arg */
6055 error = formatter(&arg, &out, &full);
6056 if (error || full)
6057 goto unlock_and_return;
6058 nexleft--;
6059 bmv->bmv_offset =
6060 out.bmv_offset + out.bmv_length;
6061 bmv->bmv_length = MAX((__int64_t)0,
6062 (__int64_t)(bmvend - bmv->bmv_offset));
6063 bmv->bmv_entries++;
6064 } 6045 }
6046
6047 if (!xfs_getbmapx_fix_eof_hole(ip, &out, prealloced,
6048 bmvend, map[i].br_startblock))
6049 goto out_free_map;
6050
6051 /* format results & advance arg */
6052 error = formatter(&arg, &out, &full);
6053 if (error || full)
6054 goto out_free_map;
6055 nexleft--;
6056 bmv->bmv_offset =
6057 out.bmv_offset + out.bmv_length;
6058 bmv->bmv_length =
6059 max_t(__int64_t, 0, bmvend - bmv->bmv_offset);
6060 bmv->bmv_entries++;
6065 } 6061 }
6066 } while (nmap && nexleft && bmv->bmv_length); 6062 } while (nmap && nexleft && bmv->bmv_length);
6067 6063
6068unlock_and_return: 6064 out_free_map:
6065 kmem_free(map);
6066 out_unlock_ilock:
6069 xfs_iunlock_map_shared(ip, lock); 6067 xfs_iunlock_map_shared(ip, lock);
6068 out_unlock_iolock:
6070 xfs_iunlock(ip, XFS_IOLOCK_SHARED); 6069 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
6071
6072 kmem_free(map);
6073
6074 return error; 6070 return error;
6075} 6071}
6076 6072