diff options
Diffstat (limited to 'fs/xfs/xfs_inode.c')
-rw-r--r-- | fs/xfs/xfs_inode.c | 209 |
1 files changed, 86 insertions, 123 deletions
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c index 0ffd56447045..68415cb4f23c 100644 --- a/fs/xfs/xfs_inode.c +++ b/fs/xfs/xfs_inode.c | |||
@@ -27,13 +27,10 @@ | |||
27 | #include "xfs_trans_priv.h" | 27 | #include "xfs_trans_priv.h" |
28 | #include "xfs_sb.h" | 28 | #include "xfs_sb.h" |
29 | #include "xfs_ag.h" | 29 | #include "xfs_ag.h" |
30 | #include "xfs_dir2.h" | ||
31 | #include "xfs_dmapi.h" | ||
32 | #include "xfs_mount.h" | 30 | #include "xfs_mount.h" |
33 | #include "xfs_bmap_btree.h" | 31 | #include "xfs_bmap_btree.h" |
34 | #include "xfs_alloc_btree.h" | 32 | #include "xfs_alloc_btree.h" |
35 | #include "xfs_ialloc_btree.h" | 33 | #include "xfs_ialloc_btree.h" |
36 | #include "xfs_dir2_sf.h" | ||
37 | #include "xfs_attr_sf.h" | 34 | #include "xfs_attr_sf.h" |
38 | #include "xfs_dinode.h" | 35 | #include "xfs_dinode.h" |
39 | #include "xfs_inode.h" | 36 | #include "xfs_inode.h" |
@@ -44,7 +41,6 @@ | |||
44 | #include "xfs_alloc.h" | 41 | #include "xfs_alloc.h" |
45 | #include "xfs_ialloc.h" | 42 | #include "xfs_ialloc.h" |
46 | #include "xfs_bmap.h" | 43 | #include "xfs_bmap.h" |
47 | #include "xfs_rw.h" | ||
48 | #include "xfs_error.h" | 44 | #include "xfs_error.h" |
49 | #include "xfs_utils.h" | 45 | #include "xfs_utils.h" |
50 | #include "xfs_quota.h" | 46 | #include "xfs_quota.h" |
@@ -177,7 +173,7 @@ xfs_imap_to_bp( | |||
177 | if (unlikely(XFS_TEST_ERROR(!di_ok, mp, | 173 | if (unlikely(XFS_TEST_ERROR(!di_ok, mp, |
178 | XFS_ERRTAG_ITOBP_INOTOBP, | 174 | XFS_ERRTAG_ITOBP_INOTOBP, |
179 | XFS_RANDOM_ITOBP_INOTOBP))) { | 175 | XFS_RANDOM_ITOBP_INOTOBP))) { |
180 | if (iget_flags & XFS_IGET_BULKSTAT) { | 176 | if (iget_flags & XFS_IGET_UNTRUSTED) { |
181 | xfs_trans_brelse(tp, bp); | 177 | xfs_trans_brelse(tp, bp); |
182 | return XFS_ERROR(EINVAL); | 178 | return XFS_ERROR(EINVAL); |
183 | } | 179 | } |
@@ -426,7 +422,7 @@ xfs_iformat( | |||
426 | if (!XFS_DFORK_Q(dip)) | 422 | if (!XFS_DFORK_Q(dip)) |
427 | return 0; | 423 | return 0; |
428 | ASSERT(ip->i_afp == NULL); | 424 | ASSERT(ip->i_afp == NULL); |
429 | ip->i_afp = kmem_zone_zalloc(xfs_ifork_zone, KM_SLEEP); | 425 | ip->i_afp = kmem_zone_zalloc(xfs_ifork_zone, KM_SLEEP | KM_NOFS); |
430 | ip->i_afp->if_ext_max = | 426 | ip->i_afp->if_ext_max = |
431 | XFS_IFORK_ASIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t); | 427 | XFS_IFORK_ASIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t); |
432 | switch (dip->di_aformat) { | 428 | switch (dip->di_aformat) { |
@@ -509,7 +505,7 @@ xfs_iformat_local( | |||
509 | ifp->if_u1.if_data = ifp->if_u2.if_inline_data; | 505 | ifp->if_u1.if_data = ifp->if_u2.if_inline_data; |
510 | else { | 506 | else { |
511 | real_size = roundup(size, 4); | 507 | real_size = roundup(size, 4); |
512 | ifp->if_u1.if_data = kmem_alloc(real_size, KM_SLEEP); | 508 | ifp->if_u1.if_data = kmem_alloc(real_size, KM_SLEEP | KM_NOFS); |
513 | } | 509 | } |
514 | ifp->if_bytes = size; | 510 | ifp->if_bytes = size; |
515 | ifp->if_real_bytes = real_size; | 511 | ifp->if_real_bytes = real_size; |
@@ -636,7 +632,7 @@ xfs_iformat_btree( | |||
636 | } | 632 | } |
637 | 633 | ||
638 | ifp->if_broot_bytes = size; | 634 | ifp->if_broot_bytes = size; |
639 | ifp->if_broot = kmem_alloc(size, KM_SLEEP); | 635 | ifp->if_broot = kmem_alloc(size, KM_SLEEP | KM_NOFS); |
640 | ASSERT(ifp->if_broot != NULL); | 636 | ASSERT(ifp->if_broot != NULL); |
641 | /* | 637 | /* |
642 | * Copy and convert from the on-disk structure | 638 | * Copy and convert from the on-disk structure |
@@ -787,7 +783,6 @@ xfs_iread( | |||
787 | xfs_mount_t *mp, | 783 | xfs_mount_t *mp, |
788 | xfs_trans_t *tp, | 784 | xfs_trans_t *tp, |
789 | xfs_inode_t *ip, | 785 | xfs_inode_t *ip, |
790 | xfs_daddr_t bno, | ||
791 | uint iget_flags) | 786 | uint iget_flags) |
792 | { | 787 | { |
793 | xfs_buf_t *bp; | 788 | xfs_buf_t *bp; |
@@ -797,11 +792,9 @@ xfs_iread( | |||
797 | /* | 792 | /* |
798 | * Fill in the location information in the in-core inode. | 793 | * Fill in the location information in the in-core inode. |
799 | */ | 794 | */ |
800 | ip->i_imap.im_blkno = bno; | ||
801 | error = xfs_imap(mp, tp, ip->i_ino, &ip->i_imap, iget_flags); | 795 | error = xfs_imap(mp, tp, ip->i_ino, &ip->i_imap, iget_flags); |
802 | if (error) | 796 | if (error) |
803 | return error; | 797 | return error; |
804 | ASSERT(bno == 0 || bno == ip->i_imap.im_blkno); | ||
805 | 798 | ||
806 | /* | 799 | /* |
807 | * Get pointers to the on-disk inode and the buffer containing it. | 800 | * Get pointers to the on-disk inode and the buffer containing it. |
@@ -925,7 +918,6 @@ xfs_iread_extents( | |||
925 | int error; | 918 | int error; |
926 | xfs_ifork_t *ifp; | 919 | xfs_ifork_t *ifp; |
927 | xfs_extnum_t nextents; | 920 | xfs_extnum_t nextents; |
928 | size_t size; | ||
929 | 921 | ||
930 | if (unlikely(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)) { | 922 | if (unlikely(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)) { |
931 | XFS_ERROR_REPORT("xfs_iread_extents", XFS_ERRLEVEL_LOW, | 923 | XFS_ERROR_REPORT("xfs_iread_extents", XFS_ERRLEVEL_LOW, |
@@ -933,7 +925,6 @@ xfs_iread_extents( | |||
933 | return XFS_ERROR(EFSCORRUPTED); | 925 | return XFS_ERROR(EFSCORRUPTED); |
934 | } | 926 | } |
935 | nextents = XFS_IFORK_NEXTENTS(ip, whichfork); | 927 | nextents = XFS_IFORK_NEXTENTS(ip, whichfork); |
936 | size = nextents * sizeof(xfs_bmbt_rec_t); | ||
937 | ifp = XFS_IFORK_PTR(ip, whichfork); | 928 | ifp = XFS_IFORK_PTR(ip, whichfork); |
938 | 929 | ||
939 | /* | 930 | /* |
@@ -1229,7 +1220,7 @@ xfs_isize_check( | |||
1229 | (xfs_ufsize_t)XFS_MAXIOFFSET(mp)) - | 1220 | (xfs_ufsize_t)XFS_MAXIOFFSET(mp)) - |
1230 | map_first), | 1221 | map_first), |
1231 | XFS_BMAPI_ENTIRE, NULL, 0, imaps, &nimaps, | 1222 | XFS_BMAPI_ENTIRE, NULL, 0, imaps, &nimaps, |
1232 | NULL, NULL)) | 1223 | NULL)) |
1233 | return; | 1224 | return; |
1234 | ASSERT(nimaps == 1); | 1225 | ASSERT(nimaps == 1); |
1235 | ASSERT(imaps[0].br_startblock == HOLESTARTBLOCK); | 1226 | ASSERT(imaps[0].br_startblock == HOLESTARTBLOCK); |
@@ -1463,7 +1454,7 @@ xfs_itruncate_finish( | |||
1463 | ASSERT((*tp)->t_flags & XFS_TRANS_PERM_LOG_RES); | 1454 | ASSERT((*tp)->t_flags & XFS_TRANS_PERM_LOG_RES); |
1464 | ASSERT(ip->i_transp == *tp); | 1455 | ASSERT(ip->i_transp == *tp); |
1465 | ASSERT(ip->i_itemp != NULL); | 1456 | ASSERT(ip->i_itemp != NULL); |
1466 | ASSERT(ip->i_itemp->ili_flags & XFS_ILI_HOLD); | 1457 | ASSERT(ip->i_itemp->ili_lock_flags == 0); |
1467 | 1458 | ||
1468 | 1459 | ||
1469 | ntp = *tp; | 1460 | ntp = *tp; |
@@ -1592,11 +1583,10 @@ xfs_itruncate_finish( | |||
1592 | xfs_bmap_init(&free_list, &first_block); | 1583 | xfs_bmap_init(&free_list, &first_block); |
1593 | error = xfs_bunmapi(ntp, ip, | 1584 | error = xfs_bunmapi(ntp, ip, |
1594 | first_unmap_block, unmap_len, | 1585 | first_unmap_block, unmap_len, |
1595 | xfs_bmapi_aflag(fork) | | 1586 | xfs_bmapi_aflag(fork), |
1596 | (sync ? 0 : XFS_BMAPI_ASYNC), | ||
1597 | XFS_ITRUNC_MAX_EXTENTS, | 1587 | XFS_ITRUNC_MAX_EXTENTS, |
1598 | &first_block, &free_list, | 1588 | &first_block, &free_list, |
1599 | NULL, &done); | 1589 | &done); |
1600 | if (error) { | 1590 | if (error) { |
1601 | /* | 1591 | /* |
1602 | * If the bunmapi call encounters an error, | 1592 | * If the bunmapi call encounters an error, |
@@ -1615,12 +1605,8 @@ xfs_itruncate_finish( | |||
1615 | */ | 1605 | */ |
1616 | error = xfs_bmap_finish(tp, &free_list, &committed); | 1606 | error = xfs_bmap_finish(tp, &free_list, &committed); |
1617 | ntp = *tp; | 1607 | ntp = *tp; |
1618 | if (committed) { | 1608 | if (committed) |
1619 | /* link the inode into the next xact in the chain */ | 1609 | xfs_trans_ijoin(ntp, ip); |
1620 | xfs_trans_ijoin(ntp, ip, | ||
1621 | XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL); | ||
1622 | xfs_trans_ihold(ntp, ip); | ||
1623 | } | ||
1624 | 1610 | ||
1625 | if (error) { | 1611 | if (error) { |
1626 | /* | 1612 | /* |
@@ -1649,9 +1635,7 @@ xfs_itruncate_finish( | |||
1649 | error = xfs_trans_commit(*tp, 0); | 1635 | error = xfs_trans_commit(*tp, 0); |
1650 | *tp = ntp; | 1636 | *tp = ntp; |
1651 | 1637 | ||
1652 | /* link the inode into the next transaction in the chain */ | 1638 | xfs_trans_ijoin(ntp, ip); |
1653 | xfs_trans_ijoin(ntp, ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL); | ||
1654 | xfs_trans_ihold(ntp, ip); | ||
1655 | 1639 | ||
1656 | if (error) | 1640 | if (error) |
1657 | return error; | 1641 | return error; |
@@ -1940,10 +1924,10 @@ xfs_ifree_cluster( | |||
1940 | int blks_per_cluster; | 1924 | int blks_per_cluster; |
1941 | int nbufs; | 1925 | int nbufs; |
1942 | int ninodes; | 1926 | int ninodes; |
1943 | int i, j, found, pre_flushed; | 1927 | int i, j; |
1944 | xfs_daddr_t blkno; | 1928 | xfs_daddr_t blkno; |
1945 | xfs_buf_t *bp; | 1929 | xfs_buf_t *bp; |
1946 | xfs_inode_t *ip, **ip_found; | 1930 | xfs_inode_t *ip; |
1947 | xfs_inode_log_item_t *iip; | 1931 | xfs_inode_log_item_t *iip; |
1948 | xfs_log_item_t *lip; | 1932 | xfs_log_item_t *lip; |
1949 | struct xfs_perag *pag; | 1933 | struct xfs_perag *pag; |
@@ -1960,114 +1944,97 @@ xfs_ifree_cluster( | |||
1960 | nbufs = XFS_IALLOC_BLOCKS(mp) / blks_per_cluster; | 1944 | nbufs = XFS_IALLOC_BLOCKS(mp) / blks_per_cluster; |
1961 | } | 1945 | } |
1962 | 1946 | ||
1963 | ip_found = kmem_alloc(ninodes * sizeof(xfs_inode_t *), KM_NOFS); | ||
1964 | |||
1965 | for (j = 0; j < nbufs; j++, inum += ninodes) { | 1947 | for (j = 0; j < nbufs; j++, inum += ninodes) { |
1948 | int found = 0; | ||
1949 | |||
1966 | blkno = XFS_AGB_TO_DADDR(mp, XFS_INO_TO_AGNO(mp, inum), | 1950 | blkno = XFS_AGB_TO_DADDR(mp, XFS_INO_TO_AGNO(mp, inum), |
1967 | XFS_INO_TO_AGBNO(mp, inum)); | 1951 | XFS_INO_TO_AGBNO(mp, inum)); |
1968 | 1952 | ||
1953 | /* | ||
1954 | * We obtain and lock the backing buffer first in the process | ||
1955 | * here, as we have to ensure that any dirty inode that we | ||
1956 | * can't get the flush lock on is attached to the buffer. | ||
1957 | * If we scan the in-memory inodes first, then buffer IO can | ||
1958 | * complete before we get a lock on it, and hence we may fail | ||
1959 | * to mark all the active inodes on the buffer stale. | ||
1960 | */ | ||
1961 | bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, blkno, | ||
1962 | mp->m_bsize * blks_per_cluster, | ||
1963 | XBF_LOCK); | ||
1964 | |||
1965 | /* | ||
1966 | * Walk the inodes already attached to the buffer and mark them | ||
1967 | * stale. These will all have the flush locks held, so an | ||
1968 | * in-memory inode walk can't lock them. | ||
1969 | */ | ||
1970 | lip = XFS_BUF_FSPRIVATE(bp, xfs_log_item_t *); | ||
1971 | while (lip) { | ||
1972 | if (lip->li_type == XFS_LI_INODE) { | ||
1973 | iip = (xfs_inode_log_item_t *)lip; | ||
1974 | ASSERT(iip->ili_logged == 1); | ||
1975 | lip->li_cb = xfs_istale_done; | ||
1976 | xfs_trans_ail_copy_lsn(mp->m_ail, | ||
1977 | &iip->ili_flush_lsn, | ||
1978 | &iip->ili_item.li_lsn); | ||
1979 | xfs_iflags_set(iip->ili_inode, XFS_ISTALE); | ||
1980 | found++; | ||
1981 | } | ||
1982 | lip = lip->li_bio_list; | ||
1983 | } | ||
1969 | 1984 | ||
1970 | /* | 1985 | /* |
1971 | * Look for each inode in memory and attempt to lock it, | 1986 | * For each inode in memory attempt to add it to the inode |
1972 | * we can be racing with flush and tail pushing here. | 1987 | * buffer and set it up for being staled on buffer IO |
1973 | * any inode we get the locks on, add to an array of | 1988 | * completion. This is safe as we've locked out tail pushing |
1974 | * inode items to process later. | 1989 | * and flushing by locking the buffer. |
1975 | * | 1990 | * |
1976 | * The get the buffer lock, we could beat a flush | 1991 | * We have already marked every inode that was part of a |
1977 | * or tail pushing thread to the lock here, in which | 1992 | * transaction stale above, which means there is no point in |
1978 | * case they will go looking for the inode buffer | 1993 | * even trying to lock them. |
1979 | * and fail, we need some other form of interlock | ||
1980 | * here. | ||
1981 | */ | 1994 | */ |
1982 | found = 0; | ||
1983 | for (i = 0; i < ninodes; i++) { | 1995 | for (i = 0; i < ninodes; i++) { |
1984 | read_lock(&pag->pag_ici_lock); | 1996 | read_lock(&pag->pag_ici_lock); |
1985 | ip = radix_tree_lookup(&pag->pag_ici_root, | 1997 | ip = radix_tree_lookup(&pag->pag_ici_root, |
1986 | XFS_INO_TO_AGINO(mp, (inum + i))); | 1998 | XFS_INO_TO_AGINO(mp, (inum + i))); |
1987 | 1999 | ||
1988 | /* Inode not in memory or we found it already, | 2000 | /* Inode not in memory or stale, nothing to do */ |
1989 | * nothing to do | ||
1990 | */ | ||
1991 | if (!ip || xfs_iflags_test(ip, XFS_ISTALE)) { | 2001 | if (!ip || xfs_iflags_test(ip, XFS_ISTALE)) { |
1992 | read_unlock(&pag->pag_ici_lock); | 2002 | read_unlock(&pag->pag_ici_lock); |
1993 | continue; | 2003 | continue; |
1994 | } | 2004 | } |
1995 | 2005 | ||
1996 | if (xfs_inode_clean(ip)) { | 2006 | /* don't try to lock/unlock the current inode */ |
1997 | read_unlock(&pag->pag_ici_lock); | 2007 | if (ip != free_ip && |
1998 | continue; | 2008 | !xfs_ilock_nowait(ip, XFS_ILOCK_EXCL)) { |
1999 | } | ||
2000 | |||
2001 | /* If we can get the locks then add it to the | ||
2002 | * list, otherwise by the time we get the bp lock | ||
2003 | * below it will already be attached to the | ||
2004 | * inode buffer. | ||
2005 | */ | ||
2006 | |||
2007 | /* This inode will already be locked - by us, lets | ||
2008 | * keep it that way. | ||
2009 | */ | ||
2010 | |||
2011 | if (ip == free_ip) { | ||
2012 | if (xfs_iflock_nowait(ip)) { | ||
2013 | xfs_iflags_set(ip, XFS_ISTALE); | ||
2014 | if (xfs_inode_clean(ip)) { | ||
2015 | xfs_ifunlock(ip); | ||
2016 | } else { | ||
2017 | ip_found[found++] = ip; | ||
2018 | } | ||
2019 | } | ||
2020 | read_unlock(&pag->pag_ici_lock); | 2009 | read_unlock(&pag->pag_ici_lock); |
2021 | continue; | 2010 | continue; |
2022 | } | 2011 | } |
2012 | read_unlock(&pag->pag_ici_lock); | ||
2023 | 2013 | ||
2024 | if (xfs_ilock_nowait(ip, XFS_ILOCK_EXCL)) { | 2014 | if (!xfs_iflock_nowait(ip)) { |
2025 | if (xfs_iflock_nowait(ip)) { | 2015 | if (ip != free_ip) |
2026 | xfs_iflags_set(ip, XFS_ISTALE); | ||
2027 | |||
2028 | if (xfs_inode_clean(ip)) { | ||
2029 | xfs_ifunlock(ip); | ||
2030 | xfs_iunlock(ip, XFS_ILOCK_EXCL); | ||
2031 | } else { | ||
2032 | ip_found[found++] = ip; | ||
2033 | } | ||
2034 | } else { | ||
2035 | xfs_iunlock(ip, XFS_ILOCK_EXCL); | 2016 | xfs_iunlock(ip, XFS_ILOCK_EXCL); |
2036 | } | 2017 | continue; |
2037 | } | 2018 | } |
2038 | read_unlock(&pag->pag_ici_lock); | ||
2039 | } | ||
2040 | 2019 | ||
2041 | bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, blkno, | 2020 | xfs_iflags_set(ip, XFS_ISTALE); |
2042 | mp->m_bsize * blks_per_cluster, | 2021 | if (xfs_inode_clean(ip)) { |
2043 | XBF_LOCK); | 2022 | ASSERT(ip != free_ip); |
2044 | 2023 | xfs_ifunlock(ip); | |
2045 | pre_flushed = 0; | 2024 | xfs_iunlock(ip, XFS_ILOCK_EXCL); |
2046 | lip = XFS_BUF_FSPRIVATE(bp, xfs_log_item_t *); | 2025 | continue; |
2047 | while (lip) { | ||
2048 | if (lip->li_type == XFS_LI_INODE) { | ||
2049 | iip = (xfs_inode_log_item_t *)lip; | ||
2050 | ASSERT(iip->ili_logged == 1); | ||
2051 | lip->li_cb = (void(*)(xfs_buf_t*,xfs_log_item_t*)) xfs_istale_done; | ||
2052 | xfs_trans_ail_copy_lsn(mp->m_ail, | ||
2053 | &iip->ili_flush_lsn, | ||
2054 | &iip->ili_item.li_lsn); | ||
2055 | xfs_iflags_set(iip->ili_inode, XFS_ISTALE); | ||
2056 | pre_flushed++; | ||
2057 | } | 2026 | } |
2058 | lip = lip->li_bio_list; | ||
2059 | } | ||
2060 | 2027 | ||
2061 | for (i = 0; i < found; i++) { | ||
2062 | ip = ip_found[i]; | ||
2063 | iip = ip->i_itemp; | 2028 | iip = ip->i_itemp; |
2064 | |||
2065 | if (!iip) { | 2029 | if (!iip) { |
2030 | /* inode with unlogged changes only */ | ||
2031 | ASSERT(ip != free_ip); | ||
2066 | ip->i_update_core = 0; | 2032 | ip->i_update_core = 0; |
2067 | xfs_ifunlock(ip); | 2033 | xfs_ifunlock(ip); |
2068 | xfs_iunlock(ip, XFS_ILOCK_EXCL); | 2034 | xfs_iunlock(ip, XFS_ILOCK_EXCL); |
2069 | continue; | 2035 | continue; |
2070 | } | 2036 | } |
2037 | found++; | ||
2071 | 2038 | ||
2072 | iip->ili_last_fields = iip->ili_format.ilf_fields; | 2039 | iip->ili_last_fields = iip->ili_format.ilf_fields; |
2073 | iip->ili_format.ilf_fields = 0; | 2040 | iip->ili_format.ilf_fields = 0; |
@@ -2075,20 +2042,18 @@ xfs_ifree_cluster( | |||
2075 | xfs_trans_ail_copy_lsn(mp->m_ail, &iip->ili_flush_lsn, | 2042 | xfs_trans_ail_copy_lsn(mp->m_ail, &iip->ili_flush_lsn, |
2076 | &iip->ili_item.li_lsn); | 2043 | &iip->ili_item.li_lsn); |
2077 | 2044 | ||
2078 | xfs_buf_attach_iodone(bp, | 2045 | xfs_buf_attach_iodone(bp, xfs_istale_done, |
2079 | (void(*)(xfs_buf_t*,xfs_log_item_t*)) | 2046 | &iip->ili_item); |
2080 | xfs_istale_done, (xfs_log_item_t *)iip); | 2047 | |
2081 | if (ip != free_ip) { | 2048 | if (ip != free_ip) |
2082 | xfs_iunlock(ip, XFS_ILOCK_EXCL); | 2049 | xfs_iunlock(ip, XFS_ILOCK_EXCL); |
2083 | } | ||
2084 | } | 2050 | } |
2085 | 2051 | ||
2086 | if (found || pre_flushed) | 2052 | if (found) |
2087 | xfs_trans_stale_inode_buf(tp, bp); | 2053 | xfs_trans_stale_inode_buf(tp, bp); |
2088 | xfs_trans_binval(tp, bp); | 2054 | xfs_trans_binval(tp, bp); |
2089 | } | 2055 | } |
2090 | 2056 | ||
2091 | kmem_free(ip_found); | ||
2092 | xfs_perag_put(pag); | 2057 | xfs_perag_put(pag); |
2093 | } | 2058 | } |
2094 | 2059 | ||
@@ -2224,7 +2189,7 @@ xfs_iroot_realloc( | |||
2224 | */ | 2189 | */ |
2225 | if (ifp->if_broot_bytes == 0) { | 2190 | if (ifp->if_broot_bytes == 0) { |
2226 | new_size = (size_t)XFS_BMAP_BROOT_SPACE_CALC(rec_diff); | 2191 | new_size = (size_t)XFS_BMAP_BROOT_SPACE_CALC(rec_diff); |
2227 | ifp->if_broot = kmem_alloc(new_size, KM_SLEEP); | 2192 | ifp->if_broot = kmem_alloc(new_size, KM_SLEEP | KM_NOFS); |
2228 | ifp->if_broot_bytes = (int)new_size; | 2193 | ifp->if_broot_bytes = (int)new_size; |
2229 | return; | 2194 | return; |
2230 | } | 2195 | } |
@@ -2240,7 +2205,7 @@ xfs_iroot_realloc( | |||
2240 | new_size = (size_t)XFS_BMAP_BROOT_SPACE_CALC(new_max); | 2205 | new_size = (size_t)XFS_BMAP_BROOT_SPACE_CALC(new_max); |
2241 | ifp->if_broot = kmem_realloc(ifp->if_broot, new_size, | 2206 | ifp->if_broot = kmem_realloc(ifp->if_broot, new_size, |
2242 | (size_t)XFS_BMAP_BROOT_SPACE_CALC(cur_max), /* old size */ | 2207 | (size_t)XFS_BMAP_BROOT_SPACE_CALC(cur_max), /* old size */ |
2243 | KM_SLEEP); | 2208 | KM_SLEEP | KM_NOFS); |
2244 | op = (char *)XFS_BMAP_BROOT_PTR_ADDR(mp, ifp->if_broot, 1, | 2209 | op = (char *)XFS_BMAP_BROOT_PTR_ADDR(mp, ifp->if_broot, 1, |
2245 | ifp->if_broot_bytes); | 2210 | ifp->if_broot_bytes); |
2246 | np = (char *)XFS_BMAP_BROOT_PTR_ADDR(mp, ifp->if_broot, 1, | 2211 | np = (char *)XFS_BMAP_BROOT_PTR_ADDR(mp, ifp->if_broot, 1, |
@@ -2266,7 +2231,7 @@ xfs_iroot_realloc( | |||
2266 | else | 2231 | else |
2267 | new_size = 0; | 2232 | new_size = 0; |
2268 | if (new_size > 0) { | 2233 | if (new_size > 0) { |
2269 | new_broot = kmem_alloc(new_size, KM_SLEEP); | 2234 | new_broot = kmem_alloc(new_size, KM_SLEEP | KM_NOFS); |
2270 | /* | 2235 | /* |
2271 | * First copy over the btree block header. | 2236 | * First copy over the btree block header. |
2272 | */ | 2237 | */ |
@@ -2370,7 +2335,8 @@ xfs_idata_realloc( | |||
2370 | real_size = roundup(new_size, 4); | 2335 | real_size = roundup(new_size, 4); |
2371 | if (ifp->if_u1.if_data == NULL) { | 2336 | if (ifp->if_u1.if_data == NULL) { |
2372 | ASSERT(ifp->if_real_bytes == 0); | 2337 | ASSERT(ifp->if_real_bytes == 0); |
2373 | ifp->if_u1.if_data = kmem_alloc(real_size, KM_SLEEP); | 2338 | ifp->if_u1.if_data = kmem_alloc(real_size, |
2339 | KM_SLEEP | KM_NOFS); | ||
2374 | } else if (ifp->if_u1.if_data != ifp->if_u2.if_inline_data) { | 2340 | } else if (ifp->if_u1.if_data != ifp->if_u2.if_inline_data) { |
2375 | /* | 2341 | /* |
2376 | * Only do the realloc if the underlying size | 2342 | * Only do the realloc if the underlying size |
@@ -2381,11 +2347,12 @@ xfs_idata_realloc( | |||
2381 | kmem_realloc(ifp->if_u1.if_data, | 2347 | kmem_realloc(ifp->if_u1.if_data, |
2382 | real_size, | 2348 | real_size, |
2383 | ifp->if_real_bytes, | 2349 | ifp->if_real_bytes, |
2384 | KM_SLEEP); | 2350 | KM_SLEEP | KM_NOFS); |
2385 | } | 2351 | } |
2386 | } else { | 2352 | } else { |
2387 | ASSERT(ifp->if_real_bytes == 0); | 2353 | ASSERT(ifp->if_real_bytes == 0); |
2388 | ifp->if_u1.if_data = kmem_alloc(real_size, KM_SLEEP); | 2354 | ifp->if_u1.if_data = kmem_alloc(real_size, |
2355 | KM_SLEEP | KM_NOFS); | ||
2389 | memcpy(ifp->if_u1.if_data, ifp->if_u2.if_inline_data, | 2356 | memcpy(ifp->if_u1.if_data, ifp->if_u2.if_inline_data, |
2390 | ifp->if_bytes); | 2357 | ifp->if_bytes); |
2391 | } | 2358 | } |
@@ -2449,6 +2416,8 @@ xfs_iunpin_nowait( | |||
2449 | { | 2416 | { |
2450 | ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)); | 2417 | ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)); |
2451 | 2418 | ||
2419 | trace_xfs_inode_unpin_nowait(ip, _RET_IP_); | ||
2420 | |||
2452 | /* Give the log a push to start the unpinning I/O */ | 2421 | /* Give the log a push to start the unpinning I/O */ |
2453 | xfs_log_force_lsn(ip->i_mount, ip->i_itemp->ili_last_lsn, 0); | 2422 | xfs_log_force_lsn(ip->i_mount, ip->i_itemp->ili_last_lsn, 0); |
2454 | 2423 | ||
@@ -2647,8 +2616,6 @@ xfs_iflush_cluster( | |||
2647 | int i; | 2616 | int i; |
2648 | 2617 | ||
2649 | pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino)); | 2618 | pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino)); |
2650 | ASSERT(pag->pagi_inodeok); | ||
2651 | ASSERT(pag->pag_ici_init); | ||
2652 | 2619 | ||
2653 | inodes_per_cluster = XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_inodelog; | 2620 | inodes_per_cluster = XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_inodelog; |
2654 | ilist_size = inodes_per_cluster * sizeof(xfs_inode_t *); | 2621 | ilist_size = inodes_per_cluster * sizeof(xfs_inode_t *); |
@@ -2752,7 +2719,6 @@ cluster_corrupt_out: | |||
2752 | * mark it as stale and brelse. | 2719 | * mark it as stale and brelse. |
2753 | */ | 2720 | */ |
2754 | if (XFS_BUF_IODONE_FUNC(bp)) { | 2721 | if (XFS_BUF_IODONE_FUNC(bp)) { |
2755 | XFS_BUF_CLR_BDSTRAT_FUNC(bp); | ||
2756 | XFS_BUF_UNDONE(bp); | 2722 | XFS_BUF_UNDONE(bp); |
2757 | XFS_BUF_STALE(bp); | 2723 | XFS_BUF_STALE(bp); |
2758 | XFS_BUF_ERROR(bp,EIO); | 2724 | XFS_BUF_ERROR(bp,EIO); |
@@ -3090,8 +3056,7 @@ xfs_iflush_int( | |||
3090 | * and unlock the inode's flush lock when the inode is | 3056 | * and unlock the inode's flush lock when the inode is |
3091 | * completely written to disk. | 3057 | * completely written to disk. |
3092 | */ | 3058 | */ |
3093 | xfs_buf_attach_iodone(bp, (void(*)(xfs_buf_t*,xfs_log_item_t*)) | 3059 | xfs_buf_attach_iodone(bp, xfs_iflush_done, &iip->ili_item); |
3094 | xfs_iflush_done, (xfs_log_item_t *)iip); | ||
3095 | 3060 | ||
3096 | ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL); | 3061 | ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL); |
3097 | ASSERT(XFS_BUF_IODONE_FUNC(bp) != NULL); | 3062 | ASSERT(XFS_BUF_IODONE_FUNC(bp) != NULL); |
@@ -3535,13 +3500,11 @@ xfs_iext_remove_indirect( | |||
3535 | xfs_extnum_t ext_diff; /* extents to remove in current list */ | 3500 | xfs_extnum_t ext_diff; /* extents to remove in current list */ |
3536 | xfs_extnum_t nex1; /* number of extents before idx */ | 3501 | xfs_extnum_t nex1; /* number of extents before idx */ |
3537 | xfs_extnum_t nex2; /* extents after idx + count */ | 3502 | xfs_extnum_t nex2; /* extents after idx + count */ |
3538 | int nlists; /* entries in indirection array */ | ||
3539 | int page_idx = idx; /* index in target extent list */ | 3503 | int page_idx = idx; /* index in target extent list */ |
3540 | 3504 | ||
3541 | ASSERT(ifp->if_flags & XFS_IFEXTIREC); | 3505 | ASSERT(ifp->if_flags & XFS_IFEXTIREC); |
3542 | erp = xfs_iext_idx_to_irec(ifp, &page_idx, &erp_idx, 0); | 3506 | erp = xfs_iext_idx_to_irec(ifp, &page_idx, &erp_idx, 0); |
3543 | ASSERT(erp != NULL); | 3507 | ASSERT(erp != NULL); |
3544 | nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; | ||
3545 | nex1 = page_idx; | 3508 | nex1 = page_idx; |
3546 | ext_cnt = count; | 3509 | ext_cnt = count; |
3547 | while (ext_cnt) { | 3510 | while (ext_cnt) { |