diff options
author | Dave Chinner <dchinner@redhat.com> | 2010-03-22 20:29:44 -0400 |
---|---|---|
committer | Alex Elder <aelder@sgi.com> | 2010-05-19 10:58:09 -0400 |
commit | b5203cd0a43c17dfb9d498bc9e3146624e8c9622 (patch) | |
tree | ce779712f4419aa238e0bf3a919ecc4010953fea /fs/xfs | |
parent | 9b9fc2b7602ed671d1a8524d4c31302b89554947 (diff) |
xfs: factor xlog_write
xlog_write is a mess that takes a lot of effort to understand. It is
a mass of nested loops with 4 space indents to get it to fit in 80 columns
and lots of funky variables that aren't obvious what they mean or do.
Break it down into understandable chunks.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Dave Chinner <dchinner@redhat.com>
Diffstat (limited to 'fs/xfs')
-rw-r--r-- | fs/xfs/xfs_log.c | 339 |
1 files changed, 226 insertions, 113 deletions
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c index 81323d73a4ee..4a0ec592564c 100644 --- a/fs/xfs/xfs_log.c +++ b/fs/xfs/xfs_log.c | |||
@@ -1630,6 +1630,193 @@ xlog_print_tic_res(xfs_mount_t *mp, xlog_ticket_t *ticket) | |||
1630 | } | 1630 | } |
1631 | 1631 | ||
1632 | /* | 1632 | /* |
1633 | * Calculate the potential space needed by the log vector. Each region gets | ||
1634 | * its own xlog_op_header_t and may need to be double word aligned. | ||
1635 | */ | ||
1636 | static int | ||
1637 | xlog_write_calc_vec_length( | ||
1638 | struct xlog_ticket *ticket, | ||
1639 | struct xfs_log_iovec reg[], | ||
1640 | int nentries) | ||
1641 | { | ||
1642 | int headers = 0; | ||
1643 | int len = 0; | ||
1644 | int i; | ||
1645 | |||
1646 | /* acct for start rec of xact */ | ||
1647 | if (ticket->t_flags & XLOG_TIC_INITED) | ||
1648 | headers++; | ||
1649 | |||
1650 | for (i = 0; i < nentries; i++) { | ||
1651 | /* each region gets >= 1 */ | ||
1652 | headers++; | ||
1653 | |||
1654 | len += reg[i].i_len; | ||
1655 | xlog_tic_add_region(ticket, reg[i].i_len, reg[i].i_type); | ||
1656 | } | ||
1657 | |||
1658 | ticket->t_res_num_ophdrs += headers; | ||
1659 | len += headers * sizeof(struct xlog_op_header); | ||
1660 | |||
1661 | return len; | ||
1662 | } | ||
1663 | |||
1664 | /* | ||
1665 | * If first write for transaction, insert start record We can't be trying to | ||
1666 | * commit if we are inited. We can't have any "partial_copy" if we are inited. | ||
1667 | */ | ||
1668 | static int | ||
1669 | xlog_write_start_rec( | ||
1670 | __psint_t ptr, | ||
1671 | struct xlog_ticket *ticket) | ||
1672 | { | ||
1673 | struct xlog_op_header *ophdr = (struct xlog_op_header *)ptr; | ||
1674 | |||
1675 | if (!(ticket->t_flags & XLOG_TIC_INITED)) | ||
1676 | return 0; | ||
1677 | |||
1678 | ophdr->oh_tid = cpu_to_be32(ticket->t_tid); | ||
1679 | ophdr->oh_clientid = ticket->t_clientid; | ||
1680 | ophdr->oh_len = 0; | ||
1681 | ophdr->oh_flags = XLOG_START_TRANS; | ||
1682 | ophdr->oh_res2 = 0; | ||
1683 | |||
1684 | ticket->t_flags &= ~XLOG_TIC_INITED; | ||
1685 | |||
1686 | return sizeof(struct xlog_op_header); | ||
1687 | } | ||
1688 | |||
1689 | static xlog_op_header_t * | ||
1690 | xlog_write_setup_ophdr( | ||
1691 | struct log *log, | ||
1692 | __psint_t ptr, | ||
1693 | struct xlog_ticket *ticket, | ||
1694 | uint flags) | ||
1695 | { | ||
1696 | struct xlog_op_header *ophdr = (struct xlog_op_header *)ptr; | ||
1697 | |||
1698 | ophdr->oh_tid = cpu_to_be32(ticket->t_tid); | ||
1699 | ophdr->oh_clientid = ticket->t_clientid; | ||
1700 | ophdr->oh_res2 = 0; | ||
1701 | |||
1702 | /* are we copying a commit or unmount record? */ | ||
1703 | ophdr->oh_flags = flags; | ||
1704 | |||
1705 | /* | ||
1706 | * We've seen logs corrupted with bad transaction client ids. This | ||
1707 | * makes sure that XFS doesn't generate them on. Turn this into an EIO | ||
1708 | * and shut down the filesystem. | ||
1709 | */ | ||
1710 | switch (ophdr->oh_clientid) { | ||
1711 | case XFS_TRANSACTION: | ||
1712 | case XFS_VOLUME: | ||
1713 | case XFS_LOG: | ||
1714 | break; | ||
1715 | default: | ||
1716 | xfs_fs_cmn_err(CE_WARN, log->l_mp, | ||
1717 | "Bad XFS transaction clientid 0x%x in ticket 0x%p", | ||
1718 | ophdr->oh_clientid, ticket); | ||
1719 | return NULL; | ||
1720 | } | ||
1721 | |||
1722 | return ophdr; | ||
1723 | } | ||
1724 | |||
1725 | /* | ||
1726 | * Set up the parameters of the region copy into the log. This has | ||
1727 | * to handle region write split across multiple log buffers - this | ||
1728 | * state is kept external to this function so that this code can | ||
1729 | * can be written in an obvious, self documenting manner. | ||
1730 | */ | ||
1731 | static int | ||
1732 | xlog_write_setup_copy( | ||
1733 | struct xlog_ticket *ticket, | ||
1734 | struct xlog_op_header *ophdr, | ||
1735 | int space_available, | ||
1736 | int space_required, | ||
1737 | int *copy_off, | ||
1738 | int *copy_len, | ||
1739 | int *last_was_partial_copy, | ||
1740 | int *bytes_consumed) | ||
1741 | { | ||
1742 | int still_to_copy; | ||
1743 | |||
1744 | still_to_copy = space_required - *bytes_consumed; | ||
1745 | *copy_off = *bytes_consumed; | ||
1746 | |||
1747 | if (still_to_copy <= space_available) { | ||
1748 | /* write of region completes here */ | ||
1749 | *copy_len = still_to_copy; | ||
1750 | ophdr->oh_len = cpu_to_be32(*copy_len); | ||
1751 | if (*last_was_partial_copy) | ||
1752 | ophdr->oh_flags |= (XLOG_END_TRANS|XLOG_WAS_CONT_TRANS); | ||
1753 | *last_was_partial_copy = 0; | ||
1754 | *bytes_consumed = 0; | ||
1755 | return 0; | ||
1756 | } | ||
1757 | |||
1758 | /* partial write of region, needs extra log op header reservation */ | ||
1759 | *copy_len = space_available; | ||
1760 | ophdr->oh_len = cpu_to_be32(*copy_len); | ||
1761 | ophdr->oh_flags |= XLOG_CONTINUE_TRANS; | ||
1762 | if (*last_was_partial_copy) | ||
1763 | ophdr->oh_flags |= XLOG_WAS_CONT_TRANS; | ||
1764 | *bytes_consumed += *copy_len; | ||
1765 | (*last_was_partial_copy)++; | ||
1766 | |||
1767 | /* account for new log op header */ | ||
1768 | ticket->t_curr_res -= sizeof(struct xlog_op_header); | ||
1769 | ticket->t_res_num_ophdrs++; | ||
1770 | |||
1771 | return sizeof(struct xlog_op_header); | ||
1772 | } | ||
1773 | |||
1774 | static int | ||
1775 | xlog_write_copy_finish( | ||
1776 | struct log *log, | ||
1777 | struct xlog_in_core *iclog, | ||
1778 | uint flags, | ||
1779 | int *record_cnt, | ||
1780 | int *data_cnt, | ||
1781 | int *partial_copy, | ||
1782 | int *partial_copy_len, | ||
1783 | int log_offset, | ||
1784 | struct xlog_in_core **commit_iclog) | ||
1785 | { | ||
1786 | if (*partial_copy) { | ||
1787 | /* | ||
1788 | * This iclog has already been marked WANT_SYNC by | ||
1789 | * xlog_state_get_iclog_space. | ||
1790 | */ | ||
1791 | xlog_state_finish_copy(log, iclog, *record_cnt, *data_cnt); | ||
1792 | *record_cnt = 0; | ||
1793 | *data_cnt = 0; | ||
1794 | return xlog_state_release_iclog(log, iclog); | ||
1795 | } | ||
1796 | |||
1797 | *partial_copy = 0; | ||
1798 | *partial_copy_len = 0; | ||
1799 | |||
1800 | if (iclog->ic_size - log_offset <= sizeof(xlog_op_header_t)) { | ||
1801 | /* no more space in this iclog - push it. */ | ||
1802 | xlog_state_finish_copy(log, iclog, *record_cnt, *data_cnt); | ||
1803 | *record_cnt = 0; | ||
1804 | *data_cnt = 0; | ||
1805 | |||
1806 | spin_lock(&log->l_icloglock); | ||
1807 | xlog_state_want_sync(log, iclog); | ||
1808 | spin_unlock(&log->l_icloglock); | ||
1809 | |||
1810 | if (!commit_iclog) | ||
1811 | return xlog_state_release_iclog(log, iclog); | ||
1812 | ASSERT(flags & XLOG_COMMIT_TRANS); | ||
1813 | *commit_iclog = iclog; | ||
1814 | } | ||
1815 | |||
1816 | return 0; | ||
1817 | } | ||
1818 | |||
1819 | /* | ||
1633 | * Write some region out to in-core log | 1820 | * Write some region out to in-core log |
1634 | * | 1821 | * |
1635 | * This will be called when writing externally provided regions or when | 1822 | * This will be called when writing externally provided regions or when |
@@ -1689,7 +1876,6 @@ xlog_write( | |||
1689 | int start_rec_copy; /* # bytes to copy for start record */ | 1876 | int start_rec_copy; /* # bytes to copy for start record */ |
1690 | int partial_copy; /* did we split a region? */ | 1877 | int partial_copy; /* did we split a region? */ |
1691 | int partial_copy_len;/* # bytes copied if split region */ | 1878 | int partial_copy_len;/* # bytes copied if split region */ |
1692 | int need_copy; /* # bytes need to memcpy this region */ | ||
1693 | int copy_len; /* # bytes actually memcpy'ing */ | 1879 | int copy_len; /* # bytes actually memcpy'ing */ |
1694 | int copy_off; /* # bytes from entry start */ | 1880 | int copy_off; /* # bytes from entry start */ |
1695 | int contwr; /* continued write of in-core log? */ | 1881 | int contwr; /* continued write of in-core log? */ |
@@ -1697,24 +1883,9 @@ xlog_write( | |||
1697 | int record_cnt = 0, data_cnt = 0; | 1883 | int record_cnt = 0, data_cnt = 0; |
1698 | 1884 | ||
1699 | partial_copy_len = partial_copy = 0; | 1885 | partial_copy_len = partial_copy = 0; |
1700 | |||
1701 | /* Calculate potential maximum space. Each region gets its own | ||
1702 | * xlog_op_header_t and may need to be double word aligned. | ||
1703 | */ | ||
1704 | len = 0; | ||
1705 | if (ticket->t_flags & XLOG_TIC_INITED) { /* acct for start rec of xact */ | ||
1706 | len += sizeof(xlog_op_header_t); | ||
1707 | ticket->t_res_num_ophdrs++; | ||
1708 | } | ||
1709 | |||
1710 | for (index = 0; index < nentries; index++) { | ||
1711 | len += sizeof(xlog_op_header_t); /* each region gets >= 1 */ | ||
1712 | ticket->t_res_num_ophdrs++; | ||
1713 | len += reg[index].i_len; | ||
1714 | xlog_tic_add_region(ticket, reg[index].i_len, reg[index].i_type); | ||
1715 | } | ||
1716 | contwr = *start_lsn = 0; | 1886 | contwr = *start_lsn = 0; |
1717 | 1887 | ||
1888 | len = xlog_write_calc_vec_length(ticket, reg, nentries); | ||
1718 | if (ticket->t_curr_res < len) { | 1889 | if (ticket->t_curr_res < len) { |
1719 | xlog_print_tic_res(mp, ticket); | 1890 | xlog_print_tic_res(mp, ticket); |
1720 | #ifdef DEBUG | 1891 | #ifdef DEBUG |
@@ -1748,81 +1919,23 @@ xlog_write( | |||
1748 | while (index < nentries) { | 1919 | while (index < nentries) { |
1749 | ASSERT(reg[index].i_len % sizeof(__int32_t) == 0); | 1920 | ASSERT(reg[index].i_len % sizeof(__int32_t) == 0); |
1750 | ASSERT((__psint_t)ptr % sizeof(__int32_t) == 0); | 1921 | ASSERT((__psint_t)ptr % sizeof(__int32_t) == 0); |
1751 | start_rec_copy = 0; | ||
1752 | 1922 | ||
1753 | /* If first write for transaction, insert start record. | 1923 | start_rec_copy = xlog_write_start_rec(ptr, ticket); |
1754 | * We can't be trying to commit if we are inited. We can't | 1924 | if (start_rec_copy) { |
1755 | * have any "partial_copy" if we are inited. | ||
1756 | */ | ||
1757 | if (ticket->t_flags & XLOG_TIC_INITED) { | ||
1758 | logop_head = (xlog_op_header_t *)ptr; | ||
1759 | logop_head->oh_tid = cpu_to_be32(ticket->t_tid); | ||
1760 | logop_head->oh_clientid = ticket->t_clientid; | ||
1761 | logop_head->oh_len = 0; | ||
1762 | logop_head->oh_flags = XLOG_START_TRANS; | ||
1763 | logop_head->oh_res2 = 0; | ||
1764 | ticket->t_flags &= ~XLOG_TIC_INITED; /* clear bit */ | ||
1765 | record_cnt++; | 1925 | record_cnt++; |
1766 | |||
1767 | start_rec_copy = sizeof(xlog_op_header_t); | ||
1768 | xlog_write_adv_cnt(ptr, len, log_offset, start_rec_copy); | 1926 | xlog_write_adv_cnt(ptr, len, log_offset, start_rec_copy); |
1769 | } | 1927 | } |
1770 | 1928 | ||
1771 | /* Copy log operation header directly into data section */ | 1929 | logop_head = xlog_write_setup_ophdr(log, ptr, ticket, flags); |
1772 | logop_head = (xlog_op_header_t *)ptr; | 1930 | if (!logop_head) |
1773 | logop_head->oh_tid = cpu_to_be32(ticket->t_tid); | 1931 | return XFS_ERROR(EIO); |
1774 | logop_head->oh_clientid = ticket->t_clientid; | ||
1775 | logop_head->oh_res2 = 0; | ||
1776 | |||
1777 | /* header copied directly */ | ||
1778 | xlog_write_adv_cnt(ptr, len, log_offset, sizeof(xlog_op_header_t)); | 1932 | xlog_write_adv_cnt(ptr, len, log_offset, sizeof(xlog_op_header_t)); |
1779 | 1933 | ||
1780 | /* are we copying a commit or unmount record? */ | 1934 | len += xlog_write_setup_copy(ticket, logop_head, |
1781 | logop_head->oh_flags = flags; | 1935 | iclog->ic_size - log_offset, |
1782 | 1936 | reg[index].i_len, ©_off, | |
1783 | /* | 1937 | ©_len, &partial_copy, |
1784 | * We've seen logs corrupted with bad transaction client | 1938 | &partial_copy_len); |
1785 | * ids. This makes sure that XFS doesn't generate them on. | ||
1786 | * Turn this into an EIO and shut down the filesystem. | ||
1787 | */ | ||
1788 | switch (logop_head->oh_clientid) { | ||
1789 | case XFS_TRANSACTION: | ||
1790 | case XFS_VOLUME: | ||
1791 | case XFS_LOG: | ||
1792 | break; | ||
1793 | default: | ||
1794 | xfs_fs_cmn_err(CE_WARN, mp, | ||
1795 | "Bad XFS transaction clientid 0x%x in ticket 0x%p", | ||
1796 | logop_head->oh_clientid, ticket); | ||
1797 | return XFS_ERROR(EIO); | ||
1798 | } | ||
1799 | |||
1800 | /* Partial write last time? => (partial_copy != 0) | ||
1801 | * need_copy is the amount we'd like to copy if everything could | ||
1802 | * fit in the current memcpy. | ||
1803 | */ | ||
1804 | need_copy = reg[index].i_len - partial_copy_len; | ||
1805 | |||
1806 | copy_off = partial_copy_len; | ||
1807 | if (need_copy <= iclog->ic_size - log_offset) { /*complete write */ | ||
1808 | copy_len = need_copy; | ||
1809 | logop_head->oh_len = cpu_to_be32(copy_len); | ||
1810 | if (partial_copy) | ||
1811 | logop_head->oh_flags|= (XLOG_END_TRANS|XLOG_WAS_CONT_TRANS); | ||
1812 | partial_copy_len = partial_copy = 0; | ||
1813 | } else { /* partial write */ | ||
1814 | copy_len = iclog->ic_size - log_offset; | ||
1815 | logop_head->oh_len = cpu_to_be32(copy_len); | ||
1816 | logop_head->oh_flags |= XLOG_CONTINUE_TRANS; | ||
1817 | if (partial_copy) | ||
1818 | logop_head->oh_flags |= XLOG_WAS_CONT_TRANS; | ||
1819 | partial_copy_len += copy_len; | ||
1820 | partial_copy++; | ||
1821 | len += sizeof(xlog_op_header_t); /* from splitting of region */ | ||
1822 | /* account for new log op header */ | ||
1823 | ticket->t_curr_res -= sizeof(xlog_op_header_t); | ||
1824 | ticket->t_res_num_ophdrs++; | ||
1825 | } | ||
1826 | xlog_verify_dest_ptr(log, ptr); | 1939 | xlog_verify_dest_ptr(log, ptr); |
1827 | 1940 | ||
1828 | /* copy region */ | 1941 | /* copy region */ |
@@ -1834,34 +1947,34 @@ xlog_write( | |||
1834 | copy_len += start_rec_copy + sizeof(xlog_op_header_t); | 1947 | copy_len += start_rec_copy + sizeof(xlog_op_header_t); |
1835 | record_cnt++; | 1948 | record_cnt++; |
1836 | data_cnt += contwr ? copy_len : 0; | 1949 | data_cnt += contwr ? copy_len : 0; |
1837 | if (partial_copy) { /* copied partial region */ | 1950 | |
1838 | /* already marked WANT_SYNC by xlog_state_get_iclog_space */ | 1951 | error = xlog_write_copy_finish(log, iclog, flags, |
1839 | xlog_state_finish_copy(log, iclog, record_cnt, data_cnt); | 1952 | &record_cnt, &data_cnt, |
1840 | record_cnt = data_cnt = 0; | 1953 | &partial_copy, &partial_copy_len, |
1841 | if ((error = xlog_state_release_iclog(log, iclog))) | 1954 | log_offset, commit_iclog); |
1842 | return error; | 1955 | if (error) |
1843 | break; /* don't increment index */ | 1956 | return error; |
1844 | } else { /* copied entire region */ | 1957 | |
1845 | index++; | 1958 | /* |
1846 | partial_copy_len = partial_copy = 0; | 1959 | * if we had a partial copy, we need to get more iclog |
1847 | 1960 | * space but we don't want to increment the region | |
1848 | if (iclog->ic_size - log_offset <= sizeof(xlog_op_header_t)) { | 1961 | * index because there is still more is this region to write. |
1849 | xlog_state_finish_copy(log, iclog, record_cnt, data_cnt); | 1962 | * |
1850 | record_cnt = data_cnt = 0; | 1963 | * If we completed writing this region, and we flushed |
1851 | spin_lock(&log->l_icloglock); | 1964 | * the iclog (indicated by resetting of the record |
1852 | xlog_state_want_sync(log, iclog); | 1965 | * count), then we also need to get more log space. If |
1853 | spin_unlock(&log->l_icloglock); | 1966 | * this was the last record, though, we are done and |
1854 | if (commit_iclog) { | 1967 | * can just return. |
1855 | ASSERT(flags & XLOG_COMMIT_TRANS); | 1968 | */ |
1856 | *commit_iclog = iclog; | 1969 | if (partial_copy) |
1857 | } else if ((error = xlog_state_release_iclog(log, iclog))) | 1970 | break; |
1858 | return error; | 1971 | |
1859 | if (index == nentries) | 1972 | index++; |
1860 | return 0; /* we are done */ | 1973 | if (record_cnt == 0) { |
1861 | else | 1974 | if (index == nentries) |
1862 | break; | 1975 | return 0; |
1863 | } | 1976 | break; |
1864 | } /* if (partial_copy) */ | 1977 | } |
1865 | } /* while (index < nentries) */ | 1978 | } /* while (index < nentries) */ |
1866 | } /* for (index = 0; index < nentries; ) */ | 1979 | } /* for (index = 0; index < nentries; ) */ |
1867 | ASSERT(len == 0); | 1980 | ASSERT(len == 0); |