diff options
Diffstat (limited to 'fs/ntfs/runlist.c')
-rw-r--r-- | fs/ntfs/runlist.c | 374 |
1 files changed, 353 insertions, 21 deletions
diff --git a/fs/ntfs/runlist.c b/fs/ntfs/runlist.c index 758855b0414e..f5b2ac929081 100644 --- a/fs/ntfs/runlist.c +++ b/fs/ntfs/runlist.c | |||
@@ -35,7 +35,7 @@ static inline void ntfs_rl_mm(runlist_element *base, int dst, int src, | |||
35 | int size) | 35 | int size) |
36 | { | 36 | { |
37 | if (likely((dst != src) && (size > 0))) | 37 | if (likely((dst != src) && (size > 0))) |
38 | memmove(base + dst, base + src, size * sizeof (*base)); | 38 | memmove(base + dst, base + src, size * sizeof(*base)); |
39 | } | 39 | } |
40 | 40 | ||
41 | /** | 41 | /** |
@@ -95,6 +95,51 @@ static inline runlist_element *ntfs_rl_realloc(runlist_element *rl, | |||
95 | } | 95 | } |
96 | 96 | ||
97 | /** | 97 | /** |
98 | * ntfs_rl_realloc_nofail - Reallocate memory for runlists | ||
99 | * @rl: original runlist | ||
100 | * @old_size: number of runlist elements in the original runlist @rl | ||
101 | * @new_size: number of runlist elements we need space for | ||
102 | * | ||
103 | * As the runlists grow, more memory will be required. To prevent the | ||
104 | * kernel having to allocate and reallocate large numbers of small bits of | ||
105 | * memory, this function returns an entire page of memory. | ||
106 | * | ||
107 | * This function guarantees that the allocation will succeed. It will sleep | ||
108 | * for as long as it takes to complete the allocation. | ||
109 | * | ||
110 | * It is up to the caller to serialize access to the runlist @rl. | ||
111 | * | ||
112 | * N.B. If the new allocation doesn't require a different number of pages in | ||
113 | * memory, the function will return the original pointer. | ||
114 | * | ||
115 | * On success, return a pointer to the newly allocated, or recycled, memory. | ||
116 | * On error, return -errno. The following error codes are defined: | ||
117 | * -ENOMEM - Not enough memory to allocate runlist array. | ||
118 | * -EINVAL - Invalid parameters were passed in. | ||
119 | */ | ||
120 | static inline runlist_element *ntfs_rl_realloc_nofail(runlist_element *rl, | ||
121 | int old_size, int new_size) | ||
122 | { | ||
123 | runlist_element *new_rl; | ||
124 | |||
125 | old_size = PAGE_ALIGN(old_size * sizeof(*rl)); | ||
126 | new_size = PAGE_ALIGN(new_size * sizeof(*rl)); | ||
127 | if (old_size == new_size) | ||
128 | return rl; | ||
129 | |||
130 | new_rl = ntfs_malloc_nofs_nofail(new_size); | ||
131 | BUG_ON(!new_rl); | ||
132 | |||
133 | if (likely(rl != NULL)) { | ||
134 | if (unlikely(old_size > new_size)) | ||
135 | old_size = new_size; | ||
136 | memcpy(new_rl, rl, old_size); | ||
137 | ntfs_free(rl); | ||
138 | } | ||
139 | return new_rl; | ||
140 | } | ||
141 | |||
142 | /** | ||
98 | * ntfs_are_rl_mergeable - test if two runlists can be joined together | 143 | * ntfs_are_rl_mergeable - test if two runlists can be joined together |
99 | * @dst: original runlist | 144 | * @dst: original runlist |
100 | * @src: new runlist to test for mergeability with @dst | 145 | * @src: new runlist to test for mergeability with @dst |
@@ -497,6 +542,7 @@ runlist_element *ntfs_runlists_merge(runlist_element *drl, | |||
497 | /* Scan to the end of the source runlist. */ | 542 | /* Scan to the end of the source runlist. */ |
498 | for (dend = 0; likely(drl[dend].length); dend++) | 543 | for (dend = 0; likely(drl[dend].length); dend++) |
499 | ; | 544 | ; |
545 | dend++; | ||
500 | drl = ntfs_rl_realloc(drl, dend, dend + 1); | 546 | drl = ntfs_rl_realloc(drl, dend, dend + 1); |
501 | if (IS_ERR(drl)) | 547 | if (IS_ERR(drl)) |
502 | return drl; | 548 | return drl; |
@@ -566,8 +612,8 @@ runlist_element *ntfs_runlists_merge(runlist_element *drl, | |||
566 | ((drl[dins].vcn + drl[dins].length) <= /* End of hole */ | 612 | ((drl[dins].vcn + drl[dins].length) <= /* End of hole */ |
567 | (srl[send - 1].vcn + srl[send - 1].length))); | 613 | (srl[send - 1].vcn + srl[send - 1].length))); |
568 | 614 | ||
569 | /* Or we'll lose an end marker */ | 615 | /* Or we will lose an end marker. */ |
570 | if (start && finish && (drl[dins].length == 0)) | 616 | if (finish && !drl[dins].length) |
571 | ss++; | 617 | ss++; |
572 | if (marker && (drl[dins].vcn + drl[dins].length > srl[send - 1].vcn)) | 618 | if (marker && (drl[dins].vcn + drl[dins].length > srl[send - 1].vcn)) |
573 | finish = FALSE; | 619 | finish = FALSE; |
@@ -621,11 +667,8 @@ runlist_element *ntfs_runlists_merge(runlist_element *drl, | |||
621 | if (drl[ds].lcn != LCN_RL_NOT_MAPPED) { | 667 | if (drl[ds].lcn != LCN_RL_NOT_MAPPED) { |
622 | /* Add an unmapped runlist element. */ | 668 | /* Add an unmapped runlist element. */ |
623 | if (!slots) { | 669 | if (!slots) { |
624 | /* FIXME/TODO: We need to have the | 670 | drl = ntfs_rl_realloc_nofail(drl, ds, |
625 | * extra memory already! (AIA) */ | 671 | ds + 2); |
626 | drl = ntfs_rl_realloc(drl, ds, ds + 2); | ||
627 | if (!drl) | ||
628 | goto critical_error; | ||
629 | slots = 2; | 672 | slots = 2; |
630 | } | 673 | } |
631 | ds++; | 674 | ds++; |
@@ -640,13 +683,8 @@ runlist_element *ntfs_runlists_merge(runlist_element *drl, | |||
640 | drl[ds].length = marker_vcn - drl[ds].vcn; | 683 | drl[ds].length = marker_vcn - drl[ds].vcn; |
641 | /* Finally add the ENOENT terminator. */ | 684 | /* Finally add the ENOENT terminator. */ |
642 | ds++; | 685 | ds++; |
643 | if (!slots) { | 686 | if (!slots) |
644 | /* FIXME/TODO: We need to have the extra | 687 | drl = ntfs_rl_realloc_nofail(drl, ds, ds + 1); |
645 | * memory already! (AIA) */ | ||
646 | drl = ntfs_rl_realloc(drl, ds, ds + 1); | ||
647 | if (!drl) | ||
648 | goto critical_error; | ||
649 | } | ||
650 | drl[ds].vcn = marker_vcn; | 688 | drl[ds].vcn = marker_vcn; |
651 | drl[ds].lcn = LCN_ENOENT; | 689 | drl[ds].lcn = LCN_ENOENT; |
652 | drl[ds].length = (s64)0; | 690 | drl[ds].length = (s64)0; |
@@ -659,11 +697,6 @@ finished: | |||
659 | ntfs_debug("Merged runlist:"); | 697 | ntfs_debug("Merged runlist:"); |
660 | ntfs_debug_dump_runlist(drl); | 698 | ntfs_debug_dump_runlist(drl); |
661 | return drl; | 699 | return drl; |
662 | |||
663 | critical_error: | ||
664 | /* Critical error! We cannot afford to fail here. */ | ||
665 | ntfs_error(NULL, "Critical error! Not enough memory."); | ||
666 | panic("NTFS: Cannot continue."); | ||
667 | } | 700 | } |
668 | 701 | ||
669 | /** | 702 | /** |
@@ -727,6 +760,9 @@ runlist_element *ntfs_mapping_pairs_decompress(const ntfs_volume *vol, | |||
727 | ntfs_error(vol->sb, "Corrupt attribute."); | 760 | ntfs_error(vol->sb, "Corrupt attribute."); |
728 | return ERR_PTR(-EIO); | 761 | return ERR_PTR(-EIO); |
729 | } | 762 | } |
763 | /* If the mapping pairs array is valid but empty, nothing to do. */ | ||
764 | if (!vcn && !*buf) | ||
765 | return old_rl; | ||
730 | /* Current position in runlist array. */ | 766 | /* Current position in runlist array. */ |
731 | rlpos = 0; | 767 | rlpos = 0; |
732 | /* Allocate first page and set current runlist size to one page. */ | 768 | /* Allocate first page and set current runlist size to one page. */ |
@@ -1419,6 +1455,7 @@ err_out: | |||
1419 | 1455 | ||
1420 | /** | 1456 | /** |
1421 | * ntfs_rl_truncate_nolock - truncate a runlist starting at a specified vcn | 1457 | * ntfs_rl_truncate_nolock - truncate a runlist starting at a specified vcn |
1458 | * @vol: ntfs volume (needed for error output) | ||
1422 | * @runlist: runlist to truncate | 1459 | * @runlist: runlist to truncate |
1423 | * @new_length: the new length of the runlist in VCNs | 1460 | * @new_length: the new length of the runlist in VCNs |
1424 | * | 1461 | * |
@@ -1426,12 +1463,16 @@ err_out: | |||
1426 | * holding the runlist elements to a length of @new_length VCNs. | 1463 | * holding the runlist elements to a length of @new_length VCNs. |
1427 | * | 1464 | * |
1428 | * If @new_length lies within the runlist, the runlist elements with VCNs of | 1465 | * If @new_length lies within the runlist, the runlist elements with VCNs of |
1429 | * @new_length and above are discarded. | 1466 | * @new_length and above are discarded. As a special case if @new_length is |
1467 | * zero, the runlist is discarded and set to NULL. | ||
1430 | * | 1468 | * |
1431 | * If @new_length lies beyond the runlist, a sparse runlist element is added to | 1469 | * If @new_length lies beyond the runlist, a sparse runlist element is added to |
1432 | * the end of the runlist @runlist or if the last runlist element is a sparse | 1470 | * the end of the runlist @runlist or if the last runlist element is a sparse |
1433 | * one already, this is extended. | 1471 | * one already, this is extended. |
1434 | * | 1472 | * |
1473 | * Note, no checking is done for unmapped runlist elements. It is assumed that | ||
1474 | * the caller has mapped any elements that need to be mapped already. | ||
1475 | * | ||
1435 | * Return 0 on success and -errno on error. | 1476 | * Return 0 on success and -errno on error. |
1436 | * | 1477 | * |
1437 | * Locking: The caller must hold @runlist->lock for writing. | 1478 | * Locking: The caller must hold @runlist->lock for writing. |
@@ -1446,6 +1487,13 @@ int ntfs_rl_truncate_nolock(const ntfs_volume *vol, runlist *const runlist, | |||
1446 | BUG_ON(!runlist); | 1487 | BUG_ON(!runlist); |
1447 | BUG_ON(new_length < 0); | 1488 | BUG_ON(new_length < 0); |
1448 | rl = runlist->rl; | 1489 | rl = runlist->rl; |
1490 | if (!new_length) { | ||
1491 | ntfs_debug("Freeing runlist."); | ||
1492 | runlist->rl = NULL; | ||
1493 | if (rl) | ||
1494 | ntfs_free(rl); | ||
1495 | return 0; | ||
1496 | } | ||
1449 | if (unlikely(!rl)) { | 1497 | if (unlikely(!rl)) { |
1450 | /* | 1498 | /* |
1451 | * Create a runlist consisting of a sparse runlist element of | 1499 | * Create a runlist consisting of a sparse runlist element of |
@@ -1553,4 +1601,288 @@ int ntfs_rl_truncate_nolock(const ntfs_volume *vol, runlist *const runlist, | |||
1553 | return 0; | 1601 | return 0; |
1554 | } | 1602 | } |
1555 | 1603 | ||
1604 | /** | ||
1605 | * ntfs_rl_punch_nolock - punch a hole into a runlist | ||
1606 | * @vol: ntfs volume (needed for error output) | ||
1607 | * @runlist: runlist to punch a hole into | ||
1608 | * @start: starting VCN of the hole to be created | ||
1609 | * @length: size of the hole to be created in units of clusters | ||
1610 | * | ||
1611 | * Punch a hole into the runlist @runlist starting at VCN @start and of size | ||
1612 | * @length clusters. | ||
1613 | * | ||
1614 | * Return 0 on success and -errno on error, in which case @runlist has not been | ||
1615 | * modified. | ||
1616 | * | ||
1617 | * If @start and/or @start + @length are outside the runlist return error code | ||
1618 | * -ENOENT. | ||
1619 | * | ||
1620 | * If the runlist contains unmapped or error elements between @start and @start | ||
1621 | * + @length return error code -EINVAL. | ||
1622 | * | ||
1623 | * Locking: The caller must hold @runlist->lock for writing. | ||
1624 | */ | ||
1625 | int ntfs_rl_punch_nolock(const ntfs_volume *vol, runlist *const runlist, | ||
1626 | const VCN start, const s64 length) | ||
1627 | { | ||
1628 | const VCN end = start + length; | ||
1629 | s64 delta; | ||
1630 | runlist_element *rl, *rl_end, *rl_real_end, *trl; | ||
1631 | int old_size; | ||
1632 | BOOL lcn_fixup = FALSE; | ||
1633 | |||
1634 | ntfs_debug("Entering for start 0x%llx, length 0x%llx.", | ||
1635 | (long long)start, (long long)length); | ||
1636 | BUG_ON(!runlist); | ||
1637 | BUG_ON(start < 0); | ||
1638 | BUG_ON(length < 0); | ||
1639 | BUG_ON(end < 0); | ||
1640 | rl = runlist->rl; | ||
1641 | if (unlikely(!rl)) { | ||
1642 | if (likely(!start && !length)) | ||
1643 | return 0; | ||
1644 | return -ENOENT; | ||
1645 | } | ||
1646 | /* Find @start in the runlist. */ | ||
1647 | while (likely(rl->length && start >= rl[1].vcn)) | ||
1648 | rl++; | ||
1649 | rl_end = rl; | ||
1650 | /* Find @end in the runlist. */ | ||
1651 | while (likely(rl_end->length && end >= rl_end[1].vcn)) { | ||
1652 | /* Verify there are no unmapped or error elements. */ | ||
1653 | if (unlikely(rl_end->lcn < LCN_HOLE)) | ||
1654 | return -EINVAL; | ||
1655 | rl_end++; | ||
1656 | } | ||
1657 | /* Check the last element. */ | ||
1658 | if (unlikely(rl_end->length && rl_end->lcn < LCN_HOLE)) | ||
1659 | return -EINVAL; | ||
1660 | /* This covers @start being out of bounds, too. */ | ||
1661 | if (!rl_end->length && end > rl_end->vcn) | ||
1662 | return -ENOENT; | ||
1663 | if (!length) | ||
1664 | return 0; | ||
1665 | if (!rl->length) | ||
1666 | return -ENOENT; | ||
1667 | rl_real_end = rl_end; | ||
1668 | /* Determine the runlist size. */ | ||
1669 | while (likely(rl_real_end->length)) | ||
1670 | rl_real_end++; | ||
1671 | old_size = rl_real_end - runlist->rl + 1; | ||
1672 | /* If @start is in a hole simply extend the hole. */ | ||
1673 | if (rl->lcn == LCN_HOLE) { | ||
1674 | /* | ||
1675 | * If both @start and @end are in the same sparse run, we are | ||
1676 | * done. | ||
1677 | */ | ||
1678 | if (end <= rl[1].vcn) { | ||
1679 | ntfs_debug("Done (requested hole is already sparse)."); | ||
1680 | return 0; | ||
1681 | } | ||
1682 | extend_hole: | ||
1683 | /* Extend the hole. */ | ||
1684 | rl->length = end - rl->vcn; | ||
1685 | /* If @end is in a hole, merge it with the current one. */ | ||
1686 | if (rl_end->lcn == LCN_HOLE) { | ||
1687 | rl_end++; | ||
1688 | rl->length = rl_end->vcn - rl->vcn; | ||
1689 | } | ||
1690 | /* We have done the hole. Now deal with the remaining tail. */ | ||
1691 | rl++; | ||
1692 | /* Cut out all runlist elements up to @end. */ | ||
1693 | if (rl < rl_end) | ||
1694 | memmove(rl, rl_end, (rl_real_end - rl_end + 1) * | ||
1695 | sizeof(*rl)); | ||
1696 | /* Adjust the beginning of the tail if necessary. */ | ||
1697 | if (end > rl->vcn) { | ||
1698 | s64 delta = end - rl->vcn; | ||
1699 | rl->vcn = end; | ||
1700 | rl->length -= delta; | ||
1701 | /* Only adjust the lcn if it is real. */ | ||
1702 | if (rl->lcn >= 0) | ||
1703 | rl->lcn += delta; | ||
1704 | } | ||
1705 | shrink_allocation: | ||
1706 | /* Reallocate memory if the allocation changed. */ | ||
1707 | if (rl < rl_end) { | ||
1708 | rl = ntfs_rl_realloc(runlist->rl, old_size, | ||
1709 | old_size - (rl_end - rl)); | ||
1710 | if (IS_ERR(rl)) | ||
1711 | ntfs_warning(vol->sb, "Failed to shrink " | ||
1712 | "runlist buffer. This just " | ||
1713 | "wastes a bit of memory " | ||
1714 | "temporarily so we ignore it " | ||
1715 | "and return success."); | ||
1716 | else | ||
1717 | runlist->rl = rl; | ||
1718 | } | ||
1719 | ntfs_debug("Done (extend hole)."); | ||
1720 | return 0; | ||
1721 | } | ||
1722 | /* | ||
1723 | * If @start is at the beginning of a run things are easier as there is | ||
1724 | * no need to split the first run. | ||
1725 | */ | ||
1726 | if (start == rl->vcn) { | ||
1727 | /* | ||
1728 | * @start is at the beginning of a run. | ||
1729 | * | ||
1730 | * If the previous run is sparse, extend its hole. | ||
1731 | * | ||
1732 | * If @end is not in the same run, switch the run to be sparse | ||
1733 | * and extend the newly created hole. | ||
1734 | * | ||
1735 | * Thus both of these cases reduce the problem to the above | ||
1736 | * case of "@start is in a hole". | ||
1737 | */ | ||
1738 | if (rl > runlist->rl && (rl - 1)->lcn == LCN_HOLE) { | ||
1739 | rl--; | ||
1740 | goto extend_hole; | ||
1741 | } | ||
1742 | if (end >= rl[1].vcn) { | ||
1743 | rl->lcn = LCN_HOLE; | ||
1744 | goto extend_hole; | ||
1745 | } | ||
1746 | /* | ||
1747 | * The final case is when @end is in the same run as @start. | ||
1748 | * For this need to split the run into two. One run for the | ||
1749 | * sparse region between the beginning of the old run, i.e. | ||
1750 | * @start, and @end and one for the remaining non-sparse | ||
1751 | * region, i.e. between @end and the end of the old run. | ||
1752 | */ | ||
1753 | trl = ntfs_rl_realloc(runlist->rl, old_size, old_size + 1); | ||
1754 | if (IS_ERR(trl)) | ||
1755 | goto enomem_out; | ||
1756 | old_size++; | ||
1757 | if (runlist->rl != trl) { | ||
1758 | rl = trl + (rl - runlist->rl); | ||
1759 | rl_end = trl + (rl_end - runlist->rl); | ||
1760 | rl_real_end = trl + (rl_real_end - runlist->rl); | ||
1761 | runlist->rl = trl; | ||
1762 | } | ||
1763 | split_end: | ||
1764 | /* Shift all the runs up by one. */ | ||
1765 | memmove(rl + 1, rl, (rl_real_end - rl + 1) * sizeof(*rl)); | ||
1766 | /* Finally, setup the two split runs. */ | ||
1767 | rl->lcn = LCN_HOLE; | ||
1768 | rl->length = length; | ||
1769 | rl++; | ||
1770 | rl->vcn += length; | ||
1771 | /* Only adjust the lcn if it is real. */ | ||
1772 | if (rl->lcn >= 0 || lcn_fixup) | ||
1773 | rl->lcn += length; | ||
1774 | rl->length -= length; | ||
1775 | ntfs_debug("Done (split one)."); | ||
1776 | return 0; | ||
1777 | } | ||
1778 | /* | ||
1779 | * @start is neither in a hole nor at the beginning of a run. | ||
1780 | * | ||
1781 | * If @end is in a hole, things are easier as simply truncating the run | ||
1782 | * @start is in to end at @start - 1, deleting all runs after that up | ||
1783 | * to @end, and finally extending the beginning of the run @end is in | ||
1784 | * to be @start is all that is needed. | ||
1785 | */ | ||
1786 | if (rl_end->lcn == LCN_HOLE) { | ||
1787 | /* Truncate the run containing @start. */ | ||
1788 | rl->length = start - rl->vcn; | ||
1789 | rl++; | ||
1790 | /* Cut out all runlist elements up to @end. */ | ||
1791 | if (rl < rl_end) | ||
1792 | memmove(rl, rl_end, (rl_real_end - rl_end + 1) * | ||
1793 | sizeof(*rl)); | ||
1794 | /* Extend the beginning of the run @end is in to be @start. */ | ||
1795 | rl->vcn = start; | ||
1796 | rl->length = rl[1].vcn - start; | ||
1797 | goto shrink_allocation; | ||
1798 | } | ||
1799 | /* | ||
1800 | * If @end is not in a hole there are still two cases to distinguish. | ||
1801 | * Either @end is or is not in the same run as @start. | ||
1802 | * | ||
1803 | * The second case is easier as it can be reduced to an already solved | ||
1804 | * problem by truncating the run @start is in to end at @start - 1. | ||
1805 | * Then, if @end is in the next run need to split the run into a sparse | ||
1806 | * run followed by a non-sparse run (already covered above) and if @end | ||
1807 | * is not in the next run switching it to be sparse, again reduces the | ||
1808 | * problem to the already covered case of "@start is in a hole". | ||
1809 | */ | ||
1810 | if (end >= rl[1].vcn) { | ||
1811 | /* | ||
1812 | * If @end is not in the next run, reduce the problem to the | ||
1813 | * case of "@start is in a hole". | ||
1814 | */ | ||
1815 | if (rl[1].length && end >= rl[2].vcn) { | ||
1816 | /* Truncate the run containing @start. */ | ||
1817 | rl->length = start - rl->vcn; | ||
1818 | rl++; | ||
1819 | rl->vcn = start; | ||
1820 | rl->lcn = LCN_HOLE; | ||
1821 | goto extend_hole; | ||
1822 | } | ||
1823 | trl = ntfs_rl_realloc(runlist->rl, old_size, old_size + 1); | ||
1824 | if (IS_ERR(trl)) | ||
1825 | goto enomem_out; | ||
1826 | old_size++; | ||
1827 | if (runlist->rl != trl) { | ||
1828 | rl = trl + (rl - runlist->rl); | ||
1829 | rl_end = trl + (rl_end - runlist->rl); | ||
1830 | rl_real_end = trl + (rl_real_end - runlist->rl); | ||
1831 | runlist->rl = trl; | ||
1832 | } | ||
1833 | /* Truncate the run containing @start. */ | ||
1834 | rl->length = start - rl->vcn; | ||
1835 | rl++; | ||
1836 | /* | ||
1837 | * @end is in the next run, reduce the problem to the case | ||
1838 | * where "@start is at the beginning of a run and @end is in | ||
1839 | * the same run as @start". | ||
1840 | */ | ||
1841 | delta = rl->vcn - start; | ||
1842 | rl->vcn = start; | ||
1843 | if (rl->lcn >= 0) { | ||
1844 | rl->lcn -= delta; | ||
1845 | /* Need this in case the lcn just became negative. */ | ||
1846 | lcn_fixup = TRUE; | ||
1847 | } | ||
1848 | rl->length += delta; | ||
1849 | goto split_end; | ||
1850 | } | ||
1851 | /* | ||
1852 | * The first case from above, i.e. @end is in the same run as @start. | ||
1853 | * We need to split the run into three. One run for the non-sparse | ||
1854 | * region between the beginning of the old run and @start, one for the | ||
1855 | * sparse region between @start and @end, and one for the remaining | ||
1856 | * non-sparse region, i.e. between @end and the end of the old run. | ||
1857 | */ | ||
1858 | trl = ntfs_rl_realloc(runlist->rl, old_size, old_size + 2); | ||
1859 | if (IS_ERR(trl)) | ||
1860 | goto enomem_out; | ||
1861 | old_size += 2; | ||
1862 | if (runlist->rl != trl) { | ||
1863 | rl = trl + (rl - runlist->rl); | ||
1864 | rl_end = trl + (rl_end - runlist->rl); | ||
1865 | rl_real_end = trl + (rl_real_end - runlist->rl); | ||
1866 | runlist->rl = trl; | ||
1867 | } | ||
1868 | /* Shift all the runs up by two. */ | ||
1869 | memmove(rl + 2, rl, (rl_real_end - rl + 1) * sizeof(*rl)); | ||
1870 | /* Finally, setup the three split runs. */ | ||
1871 | rl->length = start - rl->vcn; | ||
1872 | rl++; | ||
1873 | rl->vcn = start; | ||
1874 | rl->lcn = LCN_HOLE; | ||
1875 | rl->length = length; | ||
1876 | rl++; | ||
1877 | delta = end - rl->vcn; | ||
1878 | rl->vcn = end; | ||
1879 | rl->lcn += delta; | ||
1880 | rl->length -= delta; | ||
1881 | ntfs_debug("Done (split both)."); | ||
1882 | return 0; | ||
1883 | enomem_out: | ||
1884 | ntfs_error(vol->sb, "Not enough memory to extend runlist buffer."); | ||
1885 | return -ENOMEM; | ||
1886 | } | ||
1887 | |||
1556 | #endif /* NTFS_RW */ | 1888 | #endif /* NTFS_RW */ |