aboutsummaryrefslogtreecommitdiffstats
path: root/fs/ntfs/attrib.c
diff options
context:
space:
mode:
authorAnton Altaparmakov <aia21@cantab.net>2005-10-04 10:18:56 -0400
committerAnton Altaparmakov <aia21@cantab.net>2005-10-04 10:18:56 -0400
commit2d86829b846d1447a6ab5af4060fc9f301521317 (patch)
treedeec965c0fb9681376fc4317846eaf5d0e922f21 /fs/ntfs/attrib.c
parent2a6fc4e1b0f7d2ec3711d5b1782fb30f78cca765 (diff)
NTFS: Add fs/ntfs/attrib.[hc]::ntfs_attr_extend_allocation(), a function to
extend the allocation of an attributes. Optionally, the data size, but not the initialized size can be extended, too. Signed-off-by: Anton Altaparmakov <aia21@cantab.net>
Diffstat (limited to 'fs/ntfs/attrib.c')
-rw-r--r--fs/ntfs/attrib.c634
1 files changed, 634 insertions, 0 deletions
diff --git a/fs/ntfs/attrib.c b/fs/ntfs/attrib.c
index 8821e2d088b7..bc25e88ad468 100644
--- a/fs/ntfs/attrib.c
+++ b/fs/ntfs/attrib.c
@@ -1835,6 +1835,640 @@ page_err_out:
1835} 1835}
1836 1836
1837/** 1837/**
1838 * ntfs_attr_extend_allocation - extend the allocated space of an attribute
1839 * @ni: ntfs inode of the attribute whose allocation to extend
1840 * @new_alloc_size: new size in bytes to which to extend the allocation to
1841 * @new_data_size: new size in bytes to which to extend the data to
1842 * @data_start: beginning of region which is required to be non-sparse
1843 *
1844 * Extend the allocated space of an attribute described by the ntfs inode @ni
1845 * to @new_alloc_size bytes. If @data_start is -1, the whole extension may be
1846 * implemented as a hole in the file (as long as both the volume and the ntfs
1847 * inode @ni have sparse support enabled). If @data_start is >= 0, then the
1848 * region between the old allocated size and @data_start - 1 may be made sparse
1849 * but the regions between @data_start and @new_alloc_size must be backed by
1850 * actual clusters.
1851 *
1852 * If @new_data_size is -1, it is ignored. If it is >= 0, then the data size
1853 * of the attribute is extended to @new_data_size. Note that the i_size of the
1854 * vfs inode is not updated. Only the data size in the base attribute record
1855 * is updated. The caller has to update i_size separately if this is required.
1856 * WARNING: It is a BUG() for @new_data_size to be smaller than the old data
1857 * size as well as for @new_data_size to be greater than @new_alloc_size.
1858 *
1859 * For resident attributes this involves resizing the attribute record and if
1860 * necessary moving it and/or other attributes into extent mft records and/or
1861 * converting the attribute to a non-resident attribute which in turn involves
1862 * extending the allocation of a non-resident attribute as described below.
1863 *
1864 * For non-resident attributes this involves allocating clusters in the data
1865 * zone on the volume (except for regions that are being made sparse) and
1866 * extending the run list to describe the allocated clusters as well as
1867 * updating the mapping pairs array of the attribute. This in turn involves
1868 * resizing the attribute record and if necessary moving it and/or other
1869 * attributes into extent mft records and/or splitting the attribute record
1870 * into multiple extent attribute records.
1871 *
1872 * Also, the attribute list attribute is updated if present and in some of the
1873 * above cases (the ones where extent mft records/attributes come into play),
1874 * an attribute list attribute is created if not already present.
1875 *
1876 * Return the new allocated size on success and -errno on error. In the case
1877 * that an error is encountered but a partial extension at least up to
1878 * @data_start (if present) is possible, the allocation is partially extended
1879 * and this is returned. This means the caller must check the returned size to
1880 * determine if the extension was partial. If @data_start is -1 then partial
1881 * allocations are not performed.
1882 *
1883 * WARNING: Do not call ntfs_attr_extend_allocation() for $MFT/$DATA.
1884 *
1885 * Locking: This function takes the runlist lock of @ni for writing as well as
1886 * locking the mft record of the base ntfs inode. These locks are maintained
1887 * throughout execution of the function. These locks are required so that the
1888 * attribute can be resized safely and so that it can for example be converted
1889 * from resident to non-resident safely.
1890 *
1891 * TODO: At present attribute list attribute handling is not implemented.
1892 *
1893 * TODO: At present it is not safe to call this function for anything other
1894 * than the $DATA attribute(s) of an uncompressed and unencrypted file.
1895 */
1896s64 ntfs_attr_extend_allocation(ntfs_inode *ni, s64 new_alloc_size,
1897 const s64 new_data_size, const s64 data_start)
1898{
1899 VCN vcn;
1900 s64 ll, allocated_size, start = data_start;
1901 struct inode *vi = VFS_I(ni);
1902 ntfs_volume *vol = ni->vol;
1903 ntfs_inode *base_ni;
1904 MFT_RECORD *m;
1905 ATTR_RECORD *a;
1906 ntfs_attr_search_ctx *ctx;
1907 runlist_element *rl, *rl2;
1908 unsigned long flags;
1909 int err, mp_size;
1910 u32 attr_len = 0; /* Silence stupid gcc warning. */
1911 BOOL mp_rebuilt;
1912
1913#ifdef NTFS_DEBUG
1914 read_lock_irqsave(&ni->size_lock, flags);
1915 allocated_size = ni->allocated_size;
1916 read_unlock_irqrestore(&ni->size_lock, flags);
1917 ntfs_debug("Entering for i_ino 0x%lx, attribute type 0x%x, "
1918 "old_allocated_size 0x%llx, "
1919 "new_allocated_size 0x%llx, new_data_size 0x%llx, "
1920 "data_start 0x%llx.", vi->i_ino,
1921 (unsigned)le32_to_cpu(ni->type),
1922 (unsigned long long)allocated_size,
1923 (unsigned long long)new_alloc_size,
1924 (unsigned long long)new_data_size,
1925 (unsigned long long)start);
1926#endif
1927retry_extend:
1928 /*
1929 * For non-resident attributes, @start and @new_size need to be aligned
1930 * to cluster boundaries for allocation purposes.
1931 */
1932 if (NInoNonResident(ni)) {
1933 if (start > 0)
1934 start &= ~(s64)vol->cluster_size_mask;
1935 new_alloc_size = (new_alloc_size + vol->cluster_size - 1) &
1936 ~(s64)vol->cluster_size_mask;
1937 }
1938 BUG_ON(new_data_size >= 0 && new_data_size > new_alloc_size);
1939 /* Check if new size is allowed in $AttrDef. */
1940 err = ntfs_attr_size_bounds_check(vol, ni->type, new_alloc_size);
1941 if (unlikely(err)) {
1942 /* Only emit errors when the write will fail completely. */
1943 read_lock_irqsave(&ni->size_lock, flags);
1944 allocated_size = ni->allocated_size;
1945 read_unlock_irqrestore(&ni->size_lock, flags);
1946 if (start < 0 || start >= allocated_size) {
1947 if (err == -ERANGE) {
1948 ntfs_error(vol->sb, "Cannot extend allocation "
1949 "of inode 0x%lx, attribute "
1950 "type 0x%x, because the new "
1951 "allocation would exceed the "
1952 "maximum allowed size for "
1953 "this attribute type.",
1954 vi->i_ino, (unsigned)
1955 le32_to_cpu(ni->type));
1956 } else {
1957 ntfs_error(vol->sb, "Cannot extend allocation "
1958 "of inode 0x%lx, attribute "
1959 "type 0x%x, because this "
1960 "attribute type is not "
1961 "defined on the NTFS volume. "
1962 "Possible corruption! You "
1963 "should run chkdsk!",
1964 vi->i_ino, (unsigned)
1965 le32_to_cpu(ni->type));
1966 }
1967 }
1968 /* Translate error code to be POSIX conformant for write(2). */
1969 if (err == -ERANGE)
1970 err = -EFBIG;
1971 else
1972 err = -EIO;
1973 return err;
1974 }
1975 if (!NInoAttr(ni))
1976 base_ni = ni;
1977 else
1978 base_ni = ni->ext.base_ntfs_ino;
1979 /*
1980 * We will be modifying both the runlist (if non-resident) and the mft
1981 * record so lock them both down.
1982 */
1983 down_write(&ni->runlist.lock);
1984 m = map_mft_record(base_ni);
1985 if (IS_ERR(m)) {
1986 err = PTR_ERR(m);
1987 m = NULL;
1988 ctx = NULL;
1989 goto err_out;
1990 }
1991 ctx = ntfs_attr_get_search_ctx(base_ni, m);
1992 if (unlikely(!ctx)) {
1993 err = -ENOMEM;
1994 goto err_out;
1995 }
1996 read_lock_irqsave(&ni->size_lock, flags);
1997 allocated_size = ni->allocated_size;
1998 read_unlock_irqrestore(&ni->size_lock, flags);
1999 /*
2000 * If non-resident, seek to the last extent. If resident, there is
2001 * only one extent, so seek to that.
2002 */
2003 vcn = NInoNonResident(ni) ? allocated_size >> vol->cluster_size_bits :
2004 0;
2005 /*
2006 * Abort if someone did the work whilst we waited for the locks. If we
2007 * just converted the attribute from resident to non-resident it is
2008 * likely that exactly this has happened already. We cannot quite
2009 * abort if we need to update the data size.
2010 */
2011 if (unlikely(new_alloc_size <= allocated_size)) {
2012 ntfs_debug("Allocated size already exceeds requested size.");
2013 new_alloc_size = allocated_size;
2014 if (new_data_size < 0)
2015 goto done;
2016 /*
2017 * We want the first attribute extent so that we can update the
2018 * data size.
2019 */
2020 vcn = 0;
2021 }
2022 err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len,
2023 CASE_SENSITIVE, vcn, NULL, 0, ctx);
2024 if (unlikely(err)) {
2025 if (err == -ENOENT)
2026 err = -EIO;
2027 goto err_out;
2028 }
2029 m = ctx->mrec;
2030 a = ctx->attr;
2031 /* Use goto to reduce indentation. */
2032 if (a->non_resident)
2033 goto do_non_resident_extend;
2034 BUG_ON(NInoNonResident(ni));
2035 /* The total length of the attribute value. */
2036 attr_len = le32_to_cpu(a->data.resident.value_length);
2037 /*
2038 * Extend the attribute record to be able to store the new attribute
2039 * size. ntfs_attr_record_resize() will not do anything if the size is
2040 * not changing.
2041 */
2042 if (new_alloc_size < vol->mft_record_size &&
2043 !ntfs_attr_record_resize(m, a,
2044 le16_to_cpu(a->data.resident.value_offset) +
2045 new_alloc_size)) {
2046 /* The resize succeeded! */
2047 write_lock_irqsave(&ni->size_lock, flags);
2048 ni->allocated_size = le32_to_cpu(a->length) -
2049 le16_to_cpu(a->data.resident.value_offset);
2050 write_unlock_irqrestore(&ni->size_lock, flags);
2051 if (new_data_size >= 0) {
2052 BUG_ON(new_data_size < attr_len);
2053 a->data.resident.value_length =
2054 cpu_to_le32((u32)new_data_size);
2055 }
2056 goto flush_done;
2057 }
2058 /*
2059 * We have to drop all the locks so we can call
2060 * ntfs_attr_make_non_resident(). This could be optimised by try-
2061 * locking the first page cache page and only if that fails dropping
2062 * the locks, locking the page, and redoing all the locking and
2063 * lookups. While this would be a huge optimisation, it is not worth
2064 * it as this is definitely a slow code path.
2065 */
2066 ntfs_attr_put_search_ctx(ctx);
2067 unmap_mft_record(base_ni);
2068 up_write(&ni->runlist.lock);
2069 /*
2070 * Not enough space in the mft record, try to make the attribute
2071 * non-resident and if successful restart the extension process.
2072 */
2073 err = ntfs_attr_make_non_resident(ni, attr_len);
2074 if (likely(!err))
2075 goto retry_extend;
2076 /*
2077 * Could not make non-resident. If this is due to this not being
2078 * permitted for this attribute type or there not being enough space,
2079 * try to make other attributes non-resident. Otherwise fail.
2080 */
2081 if (unlikely(err != -EPERM && err != -ENOSPC)) {
2082 /* Only emit errors when the write will fail completely. */
2083 read_lock_irqsave(&ni->size_lock, flags);
2084 allocated_size = ni->allocated_size;
2085 read_unlock_irqrestore(&ni->size_lock, flags);
2086 if (start < 0 || start >= allocated_size)
2087 ntfs_error(vol->sb, "Cannot extend allocation of "
2088 "inode 0x%lx, attribute type 0x%x, "
2089 "because the conversion from resident "
2090 "to non-resident attribute failed "
2091 "with error code %i.", vi->i_ino,
2092 (unsigned)le32_to_cpu(ni->type), err);
2093 if (err != -ENOMEM)
2094 err = -EIO;
2095 goto conv_err_out;
2096 }
2097 /* TODO: Not implemented from here, abort. */
2098 read_lock_irqsave(&ni->size_lock, flags);
2099 allocated_size = ni->allocated_size;
2100 read_unlock_irqrestore(&ni->size_lock, flags);
2101 if (start < 0 || start >= allocated_size) {
2102 if (err == -ENOSPC)
2103 ntfs_error(vol->sb, "Not enough space in the mft "
2104 "record/on disk for the non-resident "
2105 "attribute value. This case is not "
2106 "implemented yet.");
2107 else /* if (err == -EPERM) */
2108 ntfs_error(vol->sb, "This attribute type may not be "
2109 "non-resident. This case is not "
2110 "implemented yet.");
2111 }
2112 err = -EOPNOTSUPP;
2113 goto conv_err_out;
2114#if 0
2115 // TODO: Attempt to make other attributes non-resident.
2116 if (!err)
2117 goto do_resident_extend;
2118 /*
2119 * Both the attribute list attribute and the standard information
2120 * attribute must remain in the base inode. Thus, if this is one of
2121 * these attributes, we have to try to move other attributes out into
2122 * extent mft records instead.
2123 */
2124 if (ni->type == AT_ATTRIBUTE_LIST ||
2125 ni->type == AT_STANDARD_INFORMATION) {
2126 // TODO: Attempt to move other attributes into extent mft
2127 // records.
2128 err = -EOPNOTSUPP;
2129 if (!err)
2130 goto do_resident_extend;
2131 goto err_out;
2132 }
2133 // TODO: Attempt to move this attribute to an extent mft record, but
2134 // only if it is not already the only attribute in an mft record in
2135 // which case there would be nothing to gain.
2136 err = -EOPNOTSUPP;
2137 if (!err)
2138 goto do_resident_extend;
2139 /* There is nothing we can do to make enough space. )-: */
2140 goto err_out;
2141#endif
2142do_non_resident_extend:
2143 BUG_ON(!NInoNonResident(ni));
2144 if (new_alloc_size == allocated_size) {
2145 BUG_ON(vcn);
2146 goto alloc_done;
2147 }
2148 /*
2149 * If the data starts after the end of the old allocation, this is a
2150 * $DATA attribute and sparse attributes are enabled on the volume and
2151 * for this inode, then create a sparse region between the old
2152 * allocated size and the start of the data. Otherwise simply proceed
2153 * with filling the whole space between the old allocated size and the
2154 * new allocated size with clusters.
2155 */
2156 if ((start >= 0 && start <= allocated_size) || ni->type != AT_DATA ||
2157 !NVolSparseEnabled(vol) || NInoSparseDisabled(ni))
2158 goto skip_sparse;
2159 // TODO: This is not implemented yet. We just fill in with real
2160 // clusters for now...
2161 ntfs_debug("Inserting holes is not-implemented yet. Falling back to "
2162 "allocating real clusters instead.");
2163skip_sparse:
2164 rl = ni->runlist.rl;
2165 if (likely(rl)) {
2166 /* Seek to the end of the runlist. */
2167 while (rl->length)
2168 rl++;
2169 }
2170 /* If this attribute extent is not mapped, map it now. */
2171 if (unlikely(!rl || rl->lcn == LCN_RL_NOT_MAPPED ||
2172 (rl->lcn == LCN_ENOENT && rl > ni->runlist.rl &&
2173 (rl-1)->lcn == LCN_RL_NOT_MAPPED))) {
2174 if (!rl && !allocated_size)
2175 goto first_alloc;
2176 rl = ntfs_mapping_pairs_decompress(vol, a, ni->runlist.rl);
2177 if (IS_ERR(rl)) {
2178 err = PTR_ERR(rl);
2179 if (start < 0 || start >= allocated_size)
2180 ntfs_error(vol->sb, "Cannot extend allocation "
2181 "of inode 0x%lx, attribute "
2182 "type 0x%x, because the "
2183 "mapping of a runlist "
2184 "fragment failed with error "
2185 "code %i.", vi->i_ino,
2186 (unsigned)le32_to_cpu(ni->type),
2187 err);
2188 if (err != -ENOMEM)
2189 err = -EIO;
2190 goto err_out;
2191 }
2192 ni->runlist.rl = rl;
2193 /* Seek to the end of the runlist. */
2194 while (rl->length)
2195 rl++;
2196 }
2197 /*
2198 * We now know the runlist of the last extent is mapped and @rl is at
2199 * the end of the runlist. We want to begin allocating clusters
2200 * starting at the last allocated cluster to reduce fragmentation. If
2201 * there are no valid LCNs in the attribute we let the cluster
2202 * allocator choose the starting cluster.
2203 */
2204 /* If the last LCN is a hole or simillar seek back to last real LCN. */
2205 while (rl->lcn < 0 && rl > ni->runlist.rl)
2206 rl--;
2207first_alloc:
2208 // FIXME: Need to implement partial allocations so at least part of the
2209 // write can be performed when start >= 0. (Needed for POSIX write(2)
2210 // conformance.)
2211 rl2 = ntfs_cluster_alloc(vol, allocated_size >> vol->cluster_size_bits,
2212 (new_alloc_size - allocated_size) >>
2213 vol->cluster_size_bits, (rl && (rl->lcn >= 0)) ?
2214 rl->lcn + rl->length : -1, DATA_ZONE, TRUE);
2215 if (IS_ERR(rl2)) {
2216 err = PTR_ERR(rl2);
2217 if (start < 0 || start >= allocated_size)
2218 ntfs_error(vol->sb, "Cannot extend allocation of "
2219 "inode 0x%lx, attribute type 0x%x, "
2220 "because the allocation of clusters "
2221 "failed with error code %i.", vi->i_ino,
2222 (unsigned)le32_to_cpu(ni->type), err);
2223 if (err != -ENOMEM && err != -ENOSPC)
2224 err = -EIO;
2225 goto err_out;
2226 }
2227 rl = ntfs_runlists_merge(ni->runlist.rl, rl2);
2228 if (IS_ERR(rl)) {
2229 err = PTR_ERR(rl);
2230 if (start < 0 || start >= allocated_size)
2231 ntfs_error(vol->sb, "Cannot extend allocation of "
2232 "inode 0x%lx, attribute type 0x%x, "
2233 "because the runlist merge failed "
2234 "with error code %i.", vi->i_ino,
2235 (unsigned)le32_to_cpu(ni->type), err);
2236 if (err != -ENOMEM)
2237 err = -EIO;
2238 if (ntfs_cluster_free_from_rl(vol, rl2)) {
2239 ntfs_error(vol->sb, "Failed to release allocated "
2240 "cluster(s) in error code path. Run "
2241 "chkdsk to recover the lost "
2242 "cluster(s).");
2243 NVolSetErrors(vol);
2244 }
2245 ntfs_free(rl2);
2246 goto err_out;
2247 }
2248 ni->runlist.rl = rl;
2249 ntfs_debug("Allocated 0x%llx clusters.", (long long)(new_alloc_size -
2250 allocated_size) >> vol->cluster_size_bits);
2251 /* Find the runlist element with which the attribute extent starts. */
2252 ll = sle64_to_cpu(a->data.non_resident.lowest_vcn);
2253 rl2 = ntfs_rl_find_vcn_nolock(rl, ll);
2254 BUG_ON(!rl2);
2255 BUG_ON(!rl2->length);
2256 BUG_ON(rl2->lcn < LCN_HOLE);
2257 mp_rebuilt = FALSE;
2258 /* Get the size for the new mapping pairs array for this extent. */
2259 mp_size = ntfs_get_size_for_mapping_pairs(vol, rl2, ll, -1);
2260 if (unlikely(mp_size <= 0)) {
2261 err = mp_size;
2262 if (start < 0 || start >= allocated_size)
2263 ntfs_error(vol->sb, "Cannot extend allocation of "
2264 "inode 0x%lx, attribute type 0x%x, "
2265 "because determining the size for the "
2266 "mapping pairs failed with error code "
2267 "%i.", vi->i_ino,
2268 (unsigned)le32_to_cpu(ni->type), err);
2269 err = -EIO;
2270 goto undo_alloc;
2271 }
2272 /* Extend the attribute record to fit the bigger mapping pairs array. */
2273 attr_len = le32_to_cpu(a->length);
2274 err = ntfs_attr_record_resize(m, a, mp_size +
2275 le16_to_cpu(a->data.non_resident.mapping_pairs_offset));
2276 if (unlikely(err)) {
2277 BUG_ON(err != -ENOSPC);
2278 // TODO: Deal with this by moving this extent to a new mft
2279 // record or by starting a new extent in a new mft record,
2280 // possibly by extending this extent partially and filling it
2281 // and creating a new extent for the remainder, or by making
2282 // other attributes non-resident and/or by moving other
2283 // attributes out of this mft record.
2284 if (start < 0 || start >= allocated_size)
2285 ntfs_error(vol->sb, "Not enough space in the mft "
2286 "record for the extended attribute "
2287 "record. This case is not "
2288 "implemented yet.");
2289 err = -EOPNOTSUPP;
2290 goto undo_alloc;
2291 }
2292 mp_rebuilt = TRUE;
2293 /* Generate the mapping pairs array directly into the attr record. */
2294 err = ntfs_mapping_pairs_build(vol, (u8*)a +
2295 le16_to_cpu(a->data.non_resident.mapping_pairs_offset),
2296 mp_size, rl2, ll, -1, NULL);
2297 if (unlikely(err)) {
2298 if (start < 0 || start >= allocated_size)
2299 ntfs_error(vol->sb, "Cannot extend allocation of "
2300 "inode 0x%lx, attribute type 0x%x, "
2301 "because building the mapping pairs "
2302 "failed with error code %i.", vi->i_ino,
2303 (unsigned)le32_to_cpu(ni->type), err);
2304 err = -EIO;
2305 goto undo_alloc;
2306 }
2307 /* Update the highest_vcn. */
2308 a->data.non_resident.highest_vcn = cpu_to_sle64((new_alloc_size >>
2309 vol->cluster_size_bits) - 1);
2310 /*
2311 * We now have extended the allocated size of the attribute. Reflect
2312 * this in the ntfs_inode structure and the attribute record.
2313 */
2314 if (a->data.non_resident.lowest_vcn) {
2315 /*
2316 * We are not in the first attribute extent, switch to it, but
2317 * first ensure the changes will make it to disk later.
2318 */
2319 flush_dcache_mft_record_page(ctx->ntfs_ino);
2320 mark_mft_record_dirty(ctx->ntfs_ino);
2321 ntfs_attr_reinit_search_ctx(ctx);
2322 err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len,
2323 CASE_SENSITIVE, 0, NULL, 0, ctx);
2324 if (unlikely(err))
2325 goto restore_undo_alloc;
2326 /* @m is not used any more so no need to set it. */
2327 a = ctx->attr;
2328 }
2329 write_lock_irqsave(&ni->size_lock, flags);
2330 ni->allocated_size = new_alloc_size;
2331 a->data.non_resident.allocated_size = cpu_to_sle64(new_alloc_size);
2332 /*
2333 * FIXME: This would fail if @ni is a directory, $MFT, or an index,
2334 * since those can have sparse/compressed set. For example can be
2335 * set compressed even though it is not compressed itself and in that
2336 * case the bit means that files are to be created compressed in the
2337 * directory... At present this is ok as this code is only called for
2338 * regular files, and only for their $DATA attribute(s).
2339 * FIXME: The calculation is wrong if we created a hole above. For now
2340 * it does not matter as we never create holes.
2341 */
2342 if (NInoSparse(ni) || NInoCompressed(ni)) {
2343 ni->itype.compressed.size += new_alloc_size - allocated_size;
2344 a->data.non_resident.compressed_size =
2345 cpu_to_sle64(ni->itype.compressed.size);
2346 vi->i_blocks = ni->itype.compressed.size >> 9;
2347 } else
2348 vi->i_blocks = new_alloc_size >> 9;
2349 write_unlock_irqrestore(&ni->size_lock, flags);
2350alloc_done:
2351 if (new_data_size >= 0) {
2352 BUG_ON(new_data_size <
2353 sle64_to_cpu(a->data.non_resident.data_size));
2354 a->data.non_resident.data_size = cpu_to_sle64(new_data_size);
2355 }
2356flush_done:
2357 /* Ensure the changes make it to disk. */
2358 flush_dcache_mft_record_page(ctx->ntfs_ino);
2359 mark_mft_record_dirty(ctx->ntfs_ino);
2360done:
2361 ntfs_attr_put_search_ctx(ctx);
2362 unmap_mft_record(base_ni);
2363 up_write(&ni->runlist.lock);
2364 ntfs_debug("Done, new_allocated_size 0x%llx.",
2365 (unsigned long long)new_alloc_size);
2366 return new_alloc_size;
2367restore_undo_alloc:
2368 if (start < 0 || start >= allocated_size)
2369 ntfs_error(vol->sb, "Cannot complete extension of allocation "
2370 "of inode 0x%lx, attribute type 0x%x, because "
2371 "lookup of first attribute extent failed with "
2372 "error code %i.", vi->i_ino,
2373 (unsigned)le32_to_cpu(ni->type), err);
2374 if (err == -ENOENT)
2375 err = -EIO;
2376 ntfs_attr_reinit_search_ctx(ctx);
2377 if (ntfs_attr_lookup(ni->type, ni->name, ni->name_len, CASE_SENSITIVE,
2378 allocated_size >> vol->cluster_size_bits, NULL, 0,
2379 ctx)) {
2380 ntfs_error(vol->sb, "Failed to find last attribute extent of "
2381 "attribute in error code path. Run chkdsk to "
2382 "recover.");
2383 write_lock_irqsave(&ni->size_lock, flags);
2384 ni->allocated_size = new_alloc_size;
2385 /*
2386 * FIXME: This would fail if @ni is a directory... See above.
2387 * FIXME: The calculation is wrong if we created a hole above.
2388 * For now it does not matter as we never create holes.
2389 */
2390 if (NInoSparse(ni) || NInoCompressed(ni)) {
2391 ni->itype.compressed.size += new_alloc_size -
2392 allocated_size;
2393 vi->i_blocks = ni->itype.compressed.size >> 9;
2394 } else
2395 vi->i_blocks = new_alloc_size >> 9;
2396 write_unlock_irqrestore(&ni->size_lock, flags);
2397 ntfs_attr_put_search_ctx(ctx);
2398 unmap_mft_record(base_ni);
2399 up_write(&ni->runlist.lock);
2400 /*
2401 * The only thing that is now wrong is the allocated size of the
2402 * base attribute extent which chkdsk should be able to fix.
2403 */
2404 NVolSetErrors(vol);
2405 return err;
2406 }
2407 ctx->attr->data.non_resident.highest_vcn = cpu_to_sle64(
2408 (allocated_size >> vol->cluster_size_bits) - 1);
2409undo_alloc:
2410 ll = allocated_size >> vol->cluster_size_bits;
2411 if (ntfs_cluster_free(ni, ll, -1, ctx) < 0) {
2412 ntfs_error(vol->sb, "Failed to release allocated cluster(s) "
2413 "in error code path. Run chkdsk to recover "
2414 "the lost cluster(s).");
2415 NVolSetErrors(vol);
2416 }
2417 m = ctx->mrec;
2418 a = ctx->attr;
2419 /*
2420 * If the runlist truncation fails and/or the search context is no
2421 * longer valid, we cannot resize the attribute record or build the
2422 * mapping pairs array thus we mark the inode bad so that no access to
2423 * the freed clusters can happen.
2424 */
2425 if (ntfs_rl_truncate_nolock(vol, &ni->runlist, ll) || IS_ERR(m)) {
2426 ntfs_error(vol->sb, "Failed to %s in error code path. Run "
2427 "chkdsk to recover.", IS_ERR(m) ?
2428 "restore attribute search context" :
2429 "truncate attribute runlist");
2430 make_bad_inode(vi);
2431 make_bad_inode(VFS_I(base_ni));
2432 NVolSetErrors(vol);
2433 } else if (mp_rebuilt) {
2434 if (ntfs_attr_record_resize(m, a, attr_len)) {
2435 ntfs_error(vol->sb, "Failed to restore attribute "
2436 "record in error code path. Run "
2437 "chkdsk to recover.");
2438 make_bad_inode(vi);
2439 make_bad_inode(VFS_I(base_ni));
2440 NVolSetErrors(vol);
2441 } else /* if (success) */ {
2442 if (ntfs_mapping_pairs_build(vol, (u8*)a + le16_to_cpu(
2443 a->data.non_resident.
2444 mapping_pairs_offset), attr_len -
2445 le16_to_cpu(a->data.non_resident.
2446 mapping_pairs_offset), rl2, ll, -1,
2447 NULL)) {
2448 ntfs_error(vol->sb, "Failed to restore "
2449 "mapping pairs array in error "
2450 "code path. Run chkdsk to "
2451 "recover.");
2452 make_bad_inode(vi);
2453 make_bad_inode(VFS_I(base_ni));
2454 NVolSetErrors(vol);
2455 }
2456 flush_dcache_mft_record_page(ctx->ntfs_ino);
2457 mark_mft_record_dirty(ctx->ntfs_ino);
2458 }
2459 }
2460err_out:
2461 if (ctx)
2462 ntfs_attr_put_search_ctx(ctx);
2463 if (m)
2464 unmap_mft_record(base_ni);
2465 up_write(&ni->runlist.lock);
2466conv_err_out:
2467 ntfs_debug("Failed. Returning error code %i.", err);
2468 return err;
2469}
2470
2471/**
1838 * ntfs_attr_set - fill (a part of) an attribute with a byte 2472 * ntfs_attr_set - fill (a part of) an attribute with a byte
1839 * @ni: ntfs inode describing the attribute to fill 2473 * @ni: ntfs inode describing the attribute to fill
1840 * @ofs: offset inside the attribute at which to start to fill 2474 * @ofs: offset inside the attribute at which to start to fill