aboutsummaryrefslogtreecommitdiffstats
path: root/fs/xfs
diff options
context:
space:
mode:
authorNathan Scott <nathans@sgi.com>2006-01-10 23:28:28 -0500
committerNathan Scott <nathans@sgi.com>2006-01-10 23:28:28 -0500
commitdd9f438e32900d67def49fa1b8961b3e19b6fefc (patch)
treea0a51110a13445f3a27b72303e36ef6ef48db0eb /fs/xfs
parent061f7209bdfb0193b306f88b4ff36b2574b001d3 (diff)
[XFS] Implement the di_extsize allocator hint for non-realtime files as
well. Also provides a mechanism for inheriting this property from the parent directory for new files. SGI-PV: 945264 SGI-Modid: xfs-linux-melb:xfs-kern:24367a Signed-off-by: Nathan Scott <nathans@sgi.com>
Diffstat (limited to 'fs/xfs')
-rw-r--r--fs/xfs/xfs_bmap.c373
-rw-r--r--fs/xfs/xfs_bmap.h7
-rw-r--r--fs/xfs/xfs_dinode.h11
-rw-r--r--fs/xfs/xfs_fs.h2
-rw-r--r--fs/xfs/xfs_inode.c16
-rw-r--r--fs/xfs/xfs_iomap.c390
-rw-r--r--fs/xfs/xfs_vnodeops.c158
7 files changed, 547 insertions, 410 deletions
diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
index e415a4698e9c..8a32d65211b0 100644
--- a/fs/xfs/xfs_bmap.c
+++ b/fs/xfs/xfs_bmap.c
@@ -2146,13 +2146,176 @@ xfs_bmap_add_extent_hole_real(
2146 return 0; /* keep gcc quite */ 2146 return 0; /* keep gcc quite */
2147} 2147}
2148 2148
2149/*
2150 * Adjust the size of the new extent based on di_extsize and rt extsize.
2151 */
2152STATIC int
2153xfs_bmap_extsize_align(
2154 xfs_mount_t *mp,
2155 xfs_bmbt_irec_t *gotp, /* next extent pointer */
2156 xfs_bmbt_irec_t *prevp, /* previous extent pointer */
2157 xfs_extlen_t extsz, /* align to this extent size */
2158 int rt, /* is this a realtime inode? */
2159 int eof, /* is extent at end-of-file? */
2160 int delay, /* creating delalloc extent? */
2161 int convert, /* overwriting unwritten extent? */
2162 xfs_fileoff_t *offp, /* in/out: aligned offset */
2163 xfs_extlen_t *lenp) /* in/out: aligned length */
2164{
2165 xfs_fileoff_t orig_off; /* original offset */
2166 xfs_extlen_t orig_alen; /* original length */
2167 xfs_fileoff_t orig_end; /* original off+len */
2168 xfs_fileoff_t nexto; /* next file offset */
2169 xfs_fileoff_t prevo; /* previous file offset */
2170 xfs_fileoff_t align_off; /* temp for offset */
2171 xfs_extlen_t align_alen; /* temp for length */
2172 xfs_extlen_t temp; /* temp for calculations */
2173
2174 if (convert)
2175 return 0;
2176
2177 orig_off = align_off = *offp;
2178 orig_alen = align_alen = *lenp;
2179 orig_end = orig_off + orig_alen;
2180
2181 /*
2182 * If this request overlaps an existing extent, then don't
2183 * attempt to perform any additional alignment.
2184 */
2185 if (!delay && !eof &&
2186 (orig_off >= gotp->br_startoff) &&
2187 (orig_end <= gotp->br_startoff + gotp->br_blockcount)) {
2188 return 0;
2189 }
2190
2191 /*
2192 * If the file offset is unaligned vs. the extent size
2193 * we need to align it. This will be possible unless
2194 * the file was previously written with a kernel that didn't
2195 * perform this alignment, or if a truncate shot us in the
2196 * foot.
2197 */
2198 temp = do_mod(orig_off, extsz);
2199 if (temp) {
2200 align_alen += temp;
2201 align_off -= temp;
2202 }
2203 /*
2204 * Same adjustment for the end of the requested area.
2205 */
2206 if ((temp = (align_alen % extsz))) {
2207 align_alen += extsz - temp;
2208 }
2209 /*
2210 * If the previous block overlaps with this proposed allocation
2211 * then move the start forward without adjusting the length.
2212 */
2213 if (prevp->br_startoff != NULLFILEOFF) {
2214 if (prevp->br_startblock == HOLESTARTBLOCK)
2215 prevo = prevp->br_startoff;
2216 else
2217 prevo = prevp->br_startoff + prevp->br_blockcount;
2218 } else
2219 prevo = 0;
2220 if (align_off != orig_off && align_off < prevo)
2221 align_off = prevo;
2222 /*
2223 * If the next block overlaps with this proposed allocation
2224 * then move the start back without adjusting the length,
2225 * but not before offset 0.
2226 * This may of course make the start overlap previous block,
2227 * and if we hit the offset 0 limit then the next block
2228 * can still overlap too.
2229 */
2230 if (!eof && gotp->br_startoff != NULLFILEOFF) {
2231 if ((delay && gotp->br_startblock == HOLESTARTBLOCK) ||
2232 (!delay && gotp->br_startblock == DELAYSTARTBLOCK))
2233 nexto = gotp->br_startoff + gotp->br_blockcount;
2234 else
2235 nexto = gotp->br_startoff;
2236 } else
2237 nexto = NULLFILEOFF;
2238 if (!eof &&
2239 align_off + align_alen != orig_end &&
2240 align_off + align_alen > nexto)
2241 align_off = nexto > align_alen ? nexto - align_alen : 0;
2242 /*
2243 * If we're now overlapping the next or previous extent that
2244 * means we can't fit an extsz piece in this hole. Just move
2245 * the start forward to the first valid spot and set
2246 * the length so we hit the end.
2247 */
2248 if (align_off != orig_off && align_off < prevo)
2249 align_off = prevo;
2250 if (align_off + align_alen != orig_end &&
2251 align_off + align_alen > nexto &&
2252 nexto != NULLFILEOFF) {
2253 ASSERT(nexto > prevo);
2254 align_alen = nexto - align_off;
2255 }
2256
2257 /*
2258 * If realtime, and the result isn't a multiple of the realtime
2259 * extent size we need to remove blocks until it is.
2260 */
2261 if (rt && (temp = (align_alen % mp->m_sb.sb_rextsize))) {
2262 /*
2263 * We're not covering the original request, or
2264 * we won't be able to once we fix the length.
2265 */
2266 if (orig_off < align_off ||
2267 orig_end > align_off + align_alen ||
2268 align_alen - temp < orig_alen)
2269 return XFS_ERROR(EINVAL);
2270 /*
2271 * Try to fix it by moving the start up.
2272 */
2273 if (align_off + temp <= orig_off) {
2274 align_alen -= temp;
2275 align_off += temp;
2276 }
2277 /*
2278 * Try to fix it by moving the end in.
2279 */
2280 else if (align_off + align_alen - temp >= orig_end)
2281 align_alen -= temp;
2282 /*
2283 * Set the start to the minimum then trim the length.
2284 */
2285 else {
2286 align_alen -= orig_off - align_off;
2287 align_off = orig_off;
2288 align_alen -= align_alen % mp->m_sb.sb_rextsize;
2289 }
2290 /*
2291 * Result doesn't cover the request, fail it.
2292 */
2293 if (orig_off < align_off || orig_end > align_off + align_alen)
2294 return XFS_ERROR(EINVAL);
2295 } else {
2296 ASSERT(orig_off >= align_off);
2297 ASSERT(orig_end <= align_off + align_alen);
2298 }
2299
2300#ifdef DEBUG
2301 if (!eof && gotp->br_startoff != NULLFILEOFF)
2302 ASSERT(align_off + align_alen <= gotp->br_startoff);
2303 if (prevp->br_startoff != NULLFILEOFF)
2304 ASSERT(align_off >= prevp->br_startoff + prevp->br_blockcount);
2305#endif
2306
2307 *lenp = align_alen;
2308 *offp = align_off;
2309 return 0;
2310}
2311
2149#define XFS_ALLOC_GAP_UNITS 4 2312#define XFS_ALLOC_GAP_UNITS 4
2150 2313
2151/* 2314/*
2152 * xfs_bmap_alloc is called by xfs_bmapi to allocate an extent for a file. 2315 * xfs_bmap_alloc is called by xfs_bmapi to allocate an extent for a file.
2153 * It figures out where to ask the underlying allocator to put the new extent. 2316 * It figures out where to ask the underlying allocator to put the new extent.
2154 */ 2317 */
2155STATIC int /* error */ 2318STATIC int
2156xfs_bmap_alloc( 2319xfs_bmap_alloc(
2157 xfs_bmalloca_t *ap) /* bmap alloc argument struct */ 2320 xfs_bmalloca_t *ap) /* bmap alloc argument struct */
2158{ 2321{
@@ -2163,10 +2326,10 @@ xfs_bmap_alloc(
2163 xfs_mount_t *mp; /* mount point structure */ 2326 xfs_mount_t *mp; /* mount point structure */
2164 int nullfb; /* true if ap->firstblock isn't set */ 2327 int nullfb; /* true if ap->firstblock isn't set */
2165 int rt; /* true if inode is realtime */ 2328 int rt; /* true if inode is realtime */
2166#ifdef __KERNEL__ 2329 xfs_extlen_t prod = 0; /* product factor for allocators */
2167 xfs_extlen_t prod=0; /* product factor for allocators */ 2330 xfs_extlen_t ralen = 0; /* realtime allocation length */
2168 xfs_extlen_t ralen=0; /* realtime allocation length */ 2331 xfs_extlen_t align; /* minimum allocation alignment */
2169#endif 2332 xfs_rtblock_t rtx;
2170 2333
2171#define ISVALID(x,y) \ 2334#define ISVALID(x,y) \
2172 (rt ? \ 2335 (rt ? \
@@ -2182,125 +2345,25 @@ xfs_bmap_alloc(
2182 nullfb = ap->firstblock == NULLFSBLOCK; 2345 nullfb = ap->firstblock == NULLFSBLOCK;
2183 rt = XFS_IS_REALTIME_INODE(ap->ip) && ap->userdata; 2346 rt = XFS_IS_REALTIME_INODE(ap->ip) && ap->userdata;
2184 fb_agno = nullfb ? NULLAGNUMBER : XFS_FSB_TO_AGNO(mp, ap->firstblock); 2347 fb_agno = nullfb ? NULLAGNUMBER : XFS_FSB_TO_AGNO(mp, ap->firstblock);
2185#ifdef __KERNEL__
2186 if (rt) { 2348 if (rt) {
2187 xfs_extlen_t extsz; /* file extent size for rt */ 2349 align = ap->ip->i_d.di_extsize ?
2188 xfs_fileoff_t nexto; /* next file offset */ 2350 ap->ip->i_d.di_extsize : mp->m_sb.sb_rextsize;
2189 xfs_extlen_t orig_alen; /* original ap->alen */ 2351 /* Set prod to match the extent size */
2190 xfs_fileoff_t orig_end; /* original off+len */ 2352 prod = align / mp->m_sb.sb_rextsize;
2191 xfs_fileoff_t orig_off; /* original ap->off */ 2353
2192 xfs_extlen_t mod_off; /* modulus calculations */ 2354 error = xfs_bmap_extsize_align(mp, ap->gotp, ap->prevp,
2193 xfs_fileoff_t prevo; /* previous file offset */ 2355 align, rt, ap->eof, 0,
2194 xfs_rtblock_t rtx; /* realtime extent number */ 2356 ap->conv, &ap->off, &ap->alen);
2195 xfs_extlen_t temp; /* temp for rt calculations */ 2357 if (error)
2196 2358 return error;
2197 /* 2359 ASSERT(ap->alen);
2198 * Set prod to match the realtime extent size.
2199 */
2200 if (!(extsz = ap->ip->i_d.di_extsize))
2201 extsz = mp->m_sb.sb_rextsize;
2202 prod = extsz / mp->m_sb.sb_rextsize;
2203 orig_off = ap->off;
2204 orig_alen = ap->alen;
2205 orig_end = orig_off + orig_alen;
2206 /*
2207 * If the file offset is unaligned vs. the extent size
2208 * we need to align it. This will be possible unless
2209 * the file was previously written with a kernel that didn't
2210 * perform this alignment.
2211 */
2212 mod_off = do_mod(orig_off, extsz);
2213 if (mod_off) {
2214 ap->alen += mod_off;
2215 ap->off -= mod_off;
2216 }
2217 /*
2218 * Same adjustment for the end of the requested area.
2219 */
2220 if ((temp = (ap->alen % extsz)))
2221 ap->alen += extsz - temp;
2222 /*
2223 * If the previous block overlaps with this proposed allocation
2224 * then move the start forward without adjusting the length.
2225 */
2226 prevo =
2227 ap->prevp->br_startoff == NULLFILEOFF ?
2228 0 :
2229 (ap->prevp->br_startoff +
2230 ap->prevp->br_blockcount);
2231 if (ap->off != orig_off && ap->off < prevo)
2232 ap->off = prevo;
2233 /*
2234 * If the next block overlaps with this proposed allocation
2235 * then move the start back without adjusting the length,
2236 * but not before offset 0.
2237 * This may of course make the start overlap previous block,
2238 * and if we hit the offset 0 limit then the next block
2239 * can still overlap too.
2240 */
2241 nexto = (ap->eof || ap->gotp->br_startoff == NULLFILEOFF) ?
2242 NULLFILEOFF : ap->gotp->br_startoff;
2243 if (!ap->eof &&
2244 ap->off + ap->alen != orig_end &&
2245 ap->off + ap->alen > nexto)
2246 ap->off = nexto > ap->alen ? nexto - ap->alen : 0;
2247 /*
2248 * If we're now overlapping the next or previous extent that
2249 * means we can't fit an extsz piece in this hole. Just move
2250 * the start forward to the first valid spot and set
2251 * the length so we hit the end.
2252 */
2253 if ((ap->off != orig_off && ap->off < prevo) ||
2254 (ap->off + ap->alen != orig_end &&
2255 ap->off + ap->alen > nexto)) {
2256 ap->off = prevo;
2257 ap->alen = nexto - prevo;
2258 }
2259 /*
2260 * If the result isn't a multiple of rtextents we need to
2261 * remove blocks until it is.
2262 */
2263 if ((temp = (ap->alen % mp->m_sb.sb_rextsize))) {
2264 /*
2265 * We're not covering the original request, or
2266 * we won't be able to once we fix the length.
2267 */
2268 if (orig_off < ap->off ||
2269 orig_end > ap->off + ap->alen ||
2270 ap->alen - temp < orig_alen)
2271 return XFS_ERROR(EINVAL);
2272 /*
2273 * Try to fix it by moving the start up.
2274 */
2275 if (ap->off + temp <= orig_off) {
2276 ap->alen -= temp;
2277 ap->off += temp;
2278 }
2279 /*
2280 * Try to fix it by moving the end in.
2281 */
2282 else if (ap->off + ap->alen - temp >= orig_end)
2283 ap->alen -= temp;
2284 /*
2285 * Set the start to the minimum then trim the length.
2286 */
2287 else {
2288 ap->alen -= orig_off - ap->off;
2289 ap->off = orig_off;
2290 ap->alen -= ap->alen % mp->m_sb.sb_rextsize;
2291 }
2292 /*
2293 * Result doesn't cover the request, fail it.
2294 */
2295 if (orig_off < ap->off || orig_end > ap->off + ap->alen)
2296 return XFS_ERROR(EINVAL);
2297 }
2298 ASSERT(ap->alen % mp->m_sb.sb_rextsize == 0); 2360 ASSERT(ap->alen % mp->m_sb.sb_rextsize == 0);
2361
2299 /* 2362 /*
2300 * If the offset & length are not perfectly aligned 2363 * If the offset & length are not perfectly aligned
2301 * then kill prod, it will just get us in trouble. 2364 * then kill prod, it will just get us in trouble.
2302 */ 2365 */
2303 if (do_mod(ap->off, extsz) || ap->alen % extsz) 2366 if (do_mod(ap->off, align) || ap->alen % align)
2304 prod = 1; 2367 prod = 1;
2305 /* 2368 /*
2306 * Set ralen to be the actual requested length in rtextents. 2369 * Set ralen to be the actual requested length in rtextents.
@@ -2326,15 +2389,24 @@ xfs_bmap_alloc(
2326 ap->rval = rtx * mp->m_sb.sb_rextsize; 2389 ap->rval = rtx * mp->m_sb.sb_rextsize;
2327 } else 2390 } else
2328 ap->rval = 0; 2391 ap->rval = 0;
2392 } else {
2393 align = (ap->userdata && ap->ip->i_d.di_extsize &&
2394 (ap->ip->i_d.di_flags & XFS_DIFLAG_EXTSIZE)) ?
2395 ap->ip->i_d.di_extsize : 0;
2396 if (unlikely(align)) {
2397 error = xfs_bmap_extsize_align(mp, ap->gotp, ap->prevp,
2398 align, rt,
2399 ap->eof, 0, ap->conv,
2400 &ap->off, &ap->alen);
2401 ASSERT(!error);
2402 ASSERT(ap->alen);
2403 }
2404 if (nullfb)
2405 ap->rval = XFS_INO_TO_FSB(mp, ap->ip->i_ino);
2406 else
2407 ap->rval = ap->firstblock;
2329 } 2408 }
2330#else 2409
2331 if (rt)
2332 ap->rval = 0;
2333#endif /* __KERNEL__ */
2334 else if (nullfb)
2335 ap->rval = XFS_INO_TO_FSB(mp, ap->ip->i_ino);
2336 else
2337 ap->rval = ap->firstblock;
2338 /* 2410 /*
2339 * If allocating at eof, and there's a previous real block, 2411 * If allocating at eof, and there's a previous real block,
2340 * try to use it's last block as our starting point. 2412 * try to use it's last block as our starting point.
@@ -2598,11 +2670,12 @@ xfs_bmap_alloc(
2598 args.total = ap->total; 2670 args.total = ap->total;
2599 args.minlen = ap->minlen; 2671 args.minlen = ap->minlen;
2600 } 2672 }
2601 if (ap->ip->i_d.di_extsize) { 2673 if (unlikely(ap->userdata && ap->ip->i_d.di_extsize &&
2674 (ap->ip->i_d.di_flags & XFS_DIFLAG_EXTSIZE))) {
2602 args.prod = ap->ip->i_d.di_extsize; 2675 args.prod = ap->ip->i_d.di_extsize;
2603 if ((args.mod = (xfs_extlen_t)do_mod(ap->off, args.prod))) 2676 if ((args.mod = (xfs_extlen_t)do_mod(ap->off, args.prod)))
2604 args.mod = (xfs_extlen_t)(args.prod - args.mod); 2677 args.mod = (xfs_extlen_t)(args.prod - args.mod);
2605 } else if (mp->m_sb.sb_blocksize >= NBPP) { 2678 } else if (unlikely(mp->m_sb.sb_blocksize >= NBPP)) {
2606 args.prod = 1; 2679 args.prod = 1;
2607 args.mod = 0; 2680 args.mod = 0;
2608 } else { 2681 } else {
@@ -4590,6 +4663,7 @@ xfs_bmapi(
4590 char contig; /* allocation must be one extent */ 4663 char contig; /* allocation must be one extent */
4591 char delay; /* this request is for delayed alloc */ 4664 char delay; /* this request is for delayed alloc */
4592 char exact; /* don't do all of wasdelayed extent */ 4665 char exact; /* don't do all of wasdelayed extent */
4666 char convert; /* unwritten extent I/O completion */
4593 xfs_bmbt_rec_t *ep; /* extent list entry pointer */ 4667 xfs_bmbt_rec_t *ep; /* extent list entry pointer */
4594 int error; /* error return */ 4668 int error; /* error return */
4595 xfs_bmbt_irec_t got; /* current extent list record */ 4669 xfs_bmbt_irec_t got; /* current extent list record */
@@ -4643,7 +4717,7 @@ xfs_bmapi(
4643 } 4717 }
4644 if (XFS_FORCED_SHUTDOWN(mp)) 4718 if (XFS_FORCED_SHUTDOWN(mp))
4645 return XFS_ERROR(EIO); 4719 return XFS_ERROR(EIO);
4646 rt = XFS_IS_REALTIME_INODE(ip); 4720 rt = (whichfork == XFS_DATA_FORK) && XFS_IS_REALTIME_INODE(ip);
4647 ifp = XFS_IFORK_PTR(ip, whichfork); 4721 ifp = XFS_IFORK_PTR(ip, whichfork);
4648 ASSERT(ifp->if_ext_max == 4722 ASSERT(ifp->if_ext_max ==
4649 XFS_IFORK_SIZE(ip, whichfork) / (uint)sizeof(xfs_bmbt_rec_t)); 4723 XFS_IFORK_SIZE(ip, whichfork) / (uint)sizeof(xfs_bmbt_rec_t));
@@ -4654,6 +4728,7 @@ xfs_bmapi(
4654 delay = (flags & XFS_BMAPI_DELAY) != 0; 4728 delay = (flags & XFS_BMAPI_DELAY) != 0;
4655 trim = (flags & XFS_BMAPI_ENTIRE) == 0; 4729 trim = (flags & XFS_BMAPI_ENTIRE) == 0;
4656 userdata = (flags & XFS_BMAPI_METADATA) == 0; 4730 userdata = (flags & XFS_BMAPI_METADATA) == 0;
4731 convert = (flags & XFS_BMAPI_CONVERT) != 0;
4657 exact = (flags & XFS_BMAPI_EXACT) != 0; 4732 exact = (flags & XFS_BMAPI_EXACT) != 0;
4658 rsvd = (flags & XFS_BMAPI_RSVBLOCKS) != 0; 4733 rsvd = (flags & XFS_BMAPI_RSVBLOCKS) != 0;
4659 contig = (flags & XFS_BMAPI_CONTIG) != 0; 4734 contig = (flags & XFS_BMAPI_CONTIG) != 0;
@@ -4748,16 +4823,26 @@ xfs_bmapi(
4748 } 4823 }
4749 minlen = contig ? alen : 1; 4824 minlen = contig ? alen : 1;
4750 if (delay) { 4825 if (delay) {
4751 xfs_extlen_t extsz = 0; 4826 xfs_extlen_t extsz;
4752 4827
4753 /* Figure out the extent size, adjust alen */ 4828 /* Figure out the extent size, adjust alen */
4754 if (rt) { 4829 if (rt) {
4755 if (!(extsz = ip->i_d.di_extsize)) 4830 if (!(extsz = ip->i_d.di_extsize))
4756 extsz = mp->m_sb.sb_rextsize; 4831 extsz = mp->m_sb.sb_rextsize;
4757 alen = roundup(alen, extsz); 4832 } else {
4758 extsz = alen / mp->m_sb.sb_rextsize; 4833 extsz = ip->i_d.di_extsize;
4834 }
4835 if (extsz) {
4836 error = xfs_bmap_extsize_align(mp,
4837 &got, &prev, extsz,
4838 rt, eof, delay, convert,
4839 &aoff, &alen);
4840 ASSERT(!error);
4759 } 4841 }
4760 4842
4843 if (rt)
4844 extsz = alen / mp->m_sb.sb_rextsize;
4845
4761 /* 4846 /*
4762 * Make a transaction-less quota reservation for 4847 * Make a transaction-less quota reservation for
4763 * delayed allocation blocks. This number gets 4848 * delayed allocation blocks. This number gets
@@ -4785,14 +4870,15 @@ xfs_bmapi(
4785 xfs_bmap_worst_indlen(ip, alen); 4870 xfs_bmap_worst_indlen(ip, alen);
4786 ASSERT(indlen > 0); 4871 ASSERT(indlen > 0);
4787 4872
4788 if (rt) 4873 if (rt) {
4789 error = xfs_mod_incore_sb(mp, 4874 error = xfs_mod_incore_sb(mp,
4790 XFS_SBS_FREXTENTS, 4875 XFS_SBS_FREXTENTS,
4791 -(extsz), rsvd); 4876 -(extsz), rsvd);
4792 else 4877 } else {
4793 error = xfs_mod_incore_sb(mp, 4878 error = xfs_mod_incore_sb(mp,
4794 XFS_SBS_FDBLOCKS, 4879 XFS_SBS_FDBLOCKS,
4795 -(alen), rsvd); 4880 -(alen), rsvd);
4881 }
4796 if (!error) { 4882 if (!error) {
4797 error = xfs_mod_incore_sb(mp, 4883 error = xfs_mod_incore_sb(mp,
4798 XFS_SBS_FDBLOCKS, 4884 XFS_SBS_FDBLOCKS,
@@ -4811,6 +4897,7 @@ xfs_bmapi(
4811 if (error) { 4897 if (error) {
4812 if (XFS_IS_QUOTA_ON(ip->i_mount)) 4898 if (XFS_IS_QUOTA_ON(ip->i_mount))
4813 /* unreserve the blocks now */ 4899 /* unreserve the blocks now */
4900 (void)
4814 XFS_TRANS_UNRESERVE_QUOTA_NBLKS( 4901 XFS_TRANS_UNRESERVE_QUOTA_NBLKS(
4815 mp, NULL, ip, 4902 mp, NULL, ip,
4816 (long)alen, 0, rt ? 4903 (long)alen, 0, rt ?
@@ -4849,6 +4936,7 @@ xfs_bmapi(
4849 bma.firstblock = *firstblock; 4936 bma.firstblock = *firstblock;
4850 bma.alen = alen; 4937 bma.alen = alen;
4851 bma.off = aoff; 4938 bma.off = aoff;
4939 bma.conv = convert;
4852 bma.wasdel = wasdelay; 4940 bma.wasdel = wasdelay;
4853 bma.minlen = minlen; 4941 bma.minlen = minlen;
4854 bma.low = flist->xbf_low; 4942 bma.low = flist->xbf_low;
@@ -5270,8 +5358,7 @@ xfs_bunmapi(
5270 return 0; 5358 return 0;
5271 } 5359 }
5272 XFS_STATS_INC(xs_blk_unmap); 5360 XFS_STATS_INC(xs_blk_unmap);
5273 isrt = (whichfork == XFS_DATA_FORK) && 5361 isrt = (whichfork == XFS_DATA_FORK) && XFS_IS_REALTIME_INODE(ip);
5274 (ip->i_d.di_flags & XFS_DIFLAG_REALTIME);
5275 start = bno; 5362 start = bno;
5276 bno = start + len - 1; 5363 bno = start + len - 1;
5277 ep = xfs_bmap_search_extents(ip, bno, whichfork, &eof, &lastx, &got, 5364 ep = xfs_bmap_search_extents(ip, bno, whichfork, &eof, &lastx, &got,
@@ -5443,7 +5530,7 @@ xfs_bunmapi(
5443 } 5530 }
5444 if (wasdel) { 5531 if (wasdel) {
5445 ASSERT(STARTBLOCKVAL(del.br_startblock) > 0); 5532 ASSERT(STARTBLOCKVAL(del.br_startblock) > 0);
5446 /* Update realtim/data freespace, unreserve quota */ 5533 /* Update realtime/data freespace, unreserve quota */
5447 if (isrt) { 5534 if (isrt) {
5448 xfs_filblks_t rtexts; 5535 xfs_filblks_t rtexts;
5449 5536
@@ -5451,14 +5538,14 @@ xfs_bunmapi(
5451 do_div(rtexts, mp->m_sb.sb_rextsize); 5538 do_div(rtexts, mp->m_sb.sb_rextsize);
5452 xfs_mod_incore_sb(mp, XFS_SBS_FREXTENTS, 5539 xfs_mod_incore_sb(mp, XFS_SBS_FREXTENTS,
5453 (int)rtexts, rsvd); 5540 (int)rtexts, rsvd);
5454 XFS_TRANS_RESERVE_QUOTA_NBLKS(mp, NULL, ip, 5541 (void)XFS_TRANS_RESERVE_QUOTA_NBLKS(mp,
5455 -((long)del.br_blockcount), 0, 5542 NULL, ip, -((long)del.br_blockcount), 0,
5456 XFS_QMOPT_RES_RTBLKS); 5543 XFS_QMOPT_RES_RTBLKS);
5457 } else { 5544 } else {
5458 xfs_mod_incore_sb(mp, XFS_SBS_FDBLOCKS, 5545 xfs_mod_incore_sb(mp, XFS_SBS_FDBLOCKS,
5459 (int)del.br_blockcount, rsvd); 5546 (int)del.br_blockcount, rsvd);
5460 XFS_TRANS_RESERVE_QUOTA_NBLKS(mp, NULL, ip, 5547 (void)XFS_TRANS_RESERVE_QUOTA_NBLKS(mp,
5461 -((long)del.br_blockcount), 0, 5548 NULL, ip, -((long)del.br_blockcount), 0,
5462 XFS_QMOPT_RES_REGBLKS); 5549 XFS_QMOPT_RES_REGBLKS);
5463 } 5550 }
5464 ip->i_delayed_blks -= del.br_blockcount; 5551 ip->i_delayed_blks -= del.br_blockcount;
@@ -5652,7 +5739,9 @@ xfs_getbmap(
5652 ip->i_d.di_format != XFS_DINODE_FMT_LOCAL) 5739 ip->i_d.di_format != XFS_DINODE_FMT_LOCAL)
5653 return XFS_ERROR(EINVAL); 5740 return XFS_ERROR(EINVAL);
5654 if (whichfork == XFS_DATA_FORK) { 5741 if (whichfork == XFS_DATA_FORK) {
5655 if (ip->i_d.di_flags & XFS_DIFLAG_PREALLOC) { 5742 if ((ip->i_d.di_extsize && (ip->i_d.di_flags &
5743 (XFS_DIFLAG_REALTIME|XFS_DIFLAG_EXTSIZE))) ||
5744 ip->i_d.di_flags & (XFS_DIFLAG_PREALLOC|XFS_DIFLAG_APPEND)){
5656 prealloced = 1; 5745 prealloced = 1;
5657 fixlen = XFS_MAXIOFFSET(mp); 5746 fixlen = XFS_MAXIOFFSET(mp);
5658 } else { 5747 } else {
diff --git a/fs/xfs/xfs_bmap.h b/fs/xfs/xfs_bmap.h
index 2e0717a01309..12cc63dfc2c4 100644
--- a/fs/xfs/xfs_bmap.h
+++ b/fs/xfs/xfs_bmap.h
@@ -62,6 +62,10 @@ typedef struct xfs_bmap_free
62#define XFS_BMAPI_IGSTATE 0x200 /* Ignore state - */ 62#define XFS_BMAPI_IGSTATE 0x200 /* Ignore state - */
63 /* combine contig. space */ 63 /* combine contig. space */
64#define XFS_BMAPI_CONTIG 0x400 /* must allocate only one extent */ 64#define XFS_BMAPI_CONTIG 0x400 /* must allocate only one extent */
65/* XFS_BMAPI_DIRECT_IO 0x800 */
66#define XFS_BMAPI_CONVERT 0x1000 /* unwritten extent conversion - */
67 /* need write cache flushing and no */
68 /* additional allocation alignments */
65 69
66#define XFS_BMAPI_AFLAG(w) xfs_bmapi_aflag(w) 70#define XFS_BMAPI_AFLAG(w) xfs_bmapi_aflag(w)
67static inline int xfs_bmapi_aflag(int w) 71static inline int xfs_bmapi_aflag(int w)
@@ -101,7 +105,8 @@ typedef struct xfs_bmalloca {
101 char wasdel; /* replacing a delayed allocation */ 105 char wasdel; /* replacing a delayed allocation */
102 char userdata;/* set if is user data */ 106 char userdata;/* set if is user data */
103 char low; /* low on space, using seq'l ags */ 107 char low; /* low on space, using seq'l ags */
104 char aeof; /* allocated space at eof */ 108 char aeof; /* allocated space at eof */
109 char conv; /* overwriting unwritten extents */
105} xfs_bmalloca_t; 110} xfs_bmalloca_t;
106 111
107#ifdef __KERNEL__ 112#ifdef __KERNEL__
diff --git a/fs/xfs/xfs_dinode.h b/fs/xfs/xfs_dinode.h
index c5a0e537ff1a..f697aab8a3d2 100644
--- a/fs/xfs/xfs_dinode.h
+++ b/fs/xfs/xfs_dinode.h
@@ -246,8 +246,10 @@ typedef enum xfs_dinode_fmt
246#define XFS_DIFLAG_NOATIME_BIT 6 /* do not update atime */ 246#define XFS_DIFLAG_NOATIME_BIT 6 /* do not update atime */
247#define XFS_DIFLAG_NODUMP_BIT 7 /* do not dump */ 247#define XFS_DIFLAG_NODUMP_BIT 7 /* do not dump */
248#define XFS_DIFLAG_RTINHERIT_BIT 8 /* create with realtime bit set */ 248#define XFS_DIFLAG_RTINHERIT_BIT 8 /* create with realtime bit set */
249#define XFS_DIFLAG_PROJINHERIT_BIT 9 /* create with parents projid */ 249#define XFS_DIFLAG_PROJINHERIT_BIT 9 /* create with parents projid */
250#define XFS_DIFLAG_NOSYMLINKS_BIT 10 /* disallow symlink creation */ 250#define XFS_DIFLAG_NOSYMLINKS_BIT 10 /* disallow symlink creation */
251#define XFS_DIFLAG_EXTSIZE_BIT 11 /* inode extent size allocator hint */
252#define XFS_DIFLAG_EXTSZINHERIT_BIT 12 /* inherit inode extent size */
251#define XFS_DIFLAG_REALTIME (1 << XFS_DIFLAG_REALTIME_BIT) 253#define XFS_DIFLAG_REALTIME (1 << XFS_DIFLAG_REALTIME_BIT)
252#define XFS_DIFLAG_PREALLOC (1 << XFS_DIFLAG_PREALLOC_BIT) 254#define XFS_DIFLAG_PREALLOC (1 << XFS_DIFLAG_PREALLOC_BIT)
253#define XFS_DIFLAG_NEWRTBM (1 << XFS_DIFLAG_NEWRTBM_BIT) 255#define XFS_DIFLAG_NEWRTBM (1 << XFS_DIFLAG_NEWRTBM_BIT)
@@ -259,11 +261,14 @@ typedef enum xfs_dinode_fmt
259#define XFS_DIFLAG_RTINHERIT (1 << XFS_DIFLAG_RTINHERIT_BIT) 261#define XFS_DIFLAG_RTINHERIT (1 << XFS_DIFLAG_RTINHERIT_BIT)
260#define XFS_DIFLAG_PROJINHERIT (1 << XFS_DIFLAG_PROJINHERIT_BIT) 262#define XFS_DIFLAG_PROJINHERIT (1 << XFS_DIFLAG_PROJINHERIT_BIT)
261#define XFS_DIFLAG_NOSYMLINKS (1 << XFS_DIFLAG_NOSYMLINKS_BIT) 263#define XFS_DIFLAG_NOSYMLINKS (1 << XFS_DIFLAG_NOSYMLINKS_BIT)
264#define XFS_DIFLAG_EXTSIZE (1 << XFS_DIFLAG_EXTSIZE_BIT)
265#define XFS_DIFLAG_EXTSZINHERIT (1 << XFS_DIFLAG_EXTSZINHERIT_BIT)
262 266
263#define XFS_DIFLAG_ANY \ 267#define XFS_DIFLAG_ANY \
264 (XFS_DIFLAG_REALTIME | XFS_DIFLAG_PREALLOC | XFS_DIFLAG_NEWRTBM | \ 268 (XFS_DIFLAG_REALTIME | XFS_DIFLAG_PREALLOC | XFS_DIFLAG_NEWRTBM | \
265 XFS_DIFLAG_IMMUTABLE | XFS_DIFLAG_APPEND | XFS_DIFLAG_SYNC | \ 269 XFS_DIFLAG_IMMUTABLE | XFS_DIFLAG_APPEND | XFS_DIFLAG_SYNC | \
266 XFS_DIFLAG_NOATIME | XFS_DIFLAG_NODUMP | XFS_DIFLAG_RTINHERIT | \ 270 XFS_DIFLAG_NOATIME | XFS_DIFLAG_NODUMP | XFS_DIFLAG_RTINHERIT | \
267 XFS_DIFLAG_PROJINHERIT | XFS_DIFLAG_NOSYMLINKS) 271 XFS_DIFLAG_PROJINHERIT | XFS_DIFLAG_NOSYMLINKS | XFS_DIFLAG_EXTSIZE | \
272 XFS_DIFLAG_EXTSZINHERIT)
268 273
269#endif /* __XFS_DINODE_H__ */ 274#endif /* __XFS_DINODE_H__ */
diff --git a/fs/xfs/xfs_fs.h b/fs/xfs/xfs_fs.h
index ba096f80f48d..3280f49496ba 100644
--- a/fs/xfs/xfs_fs.h
+++ b/fs/xfs/xfs_fs.h
@@ -65,6 +65,8 @@ struct fsxattr {
65#define XFS_XFLAG_RTINHERIT 0x00000100 /* create with rt bit set */ 65#define XFS_XFLAG_RTINHERIT 0x00000100 /* create with rt bit set */
66#define XFS_XFLAG_PROJINHERIT 0x00000200 /* create with parents projid */ 66#define XFS_XFLAG_PROJINHERIT 0x00000200 /* create with parents projid */
67#define XFS_XFLAG_NOSYMLINKS 0x00000400 /* disallow symlink creation */ 67#define XFS_XFLAG_NOSYMLINKS 0x00000400 /* disallow symlink creation */
68#define XFS_XFLAG_EXTSIZE 0x00000800 /* extent size allocator hint */
69#define XFS_XFLAG_EXTSZINHERIT 0x00001000 /* inherit inode extent size */
68#define XFS_XFLAG_HASATTR 0x80000000 /* no DIFLAG for this */ 70#define XFS_XFLAG_HASATTR 0x80000000 /* no DIFLAG for this */
69 71
70/* 72/*
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index df0d4572d70a..e486c7d244c2 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -809,6 +809,10 @@ _xfs_dic2xflags(
809 flags |= XFS_XFLAG_PROJINHERIT; 809 flags |= XFS_XFLAG_PROJINHERIT;
810 if (di_flags & XFS_DIFLAG_NOSYMLINKS) 810 if (di_flags & XFS_DIFLAG_NOSYMLINKS)
811 flags |= XFS_XFLAG_NOSYMLINKS; 811 flags |= XFS_XFLAG_NOSYMLINKS;
812 if (di_flags & XFS_DIFLAG_EXTSIZE)
813 flags |= XFS_XFLAG_EXTSIZE;
814 if (di_flags & XFS_DIFLAG_EXTSZINHERIT)
815 flags |= XFS_XFLAG_EXTSZINHERIT;
812 } 816 }
813 817
814 return flags; 818 return flags;
@@ -1192,11 +1196,19 @@ xfs_ialloc(
1192 if ((mode & S_IFMT) == S_IFDIR) { 1196 if ((mode & S_IFMT) == S_IFDIR) {
1193 if (pip->i_d.di_flags & XFS_DIFLAG_RTINHERIT) 1197 if (pip->i_d.di_flags & XFS_DIFLAG_RTINHERIT)
1194 di_flags |= XFS_DIFLAG_RTINHERIT; 1198 di_flags |= XFS_DIFLAG_RTINHERIT;
1195 } else { 1199 if (pip->i_d.di_flags & XFS_DIFLAG_EXTSZINHERIT) {
1200 di_flags |= XFS_DIFLAG_EXTSZINHERIT;
1201 ip->i_d.di_extsize = pip->i_d.di_extsize;
1202 }
1203 } else if ((mode & S_IFMT) == S_IFREG) {
1196 if (pip->i_d.di_flags & XFS_DIFLAG_RTINHERIT) { 1204 if (pip->i_d.di_flags & XFS_DIFLAG_RTINHERIT) {
1197 di_flags |= XFS_DIFLAG_REALTIME; 1205 di_flags |= XFS_DIFLAG_REALTIME;
1198 ip->i_iocore.io_flags |= XFS_IOCORE_RT; 1206 ip->i_iocore.io_flags |= XFS_IOCORE_RT;
1199 } 1207 }
1208 if (pip->i_d.di_flags & XFS_DIFLAG_EXTSZINHERIT) {
1209 di_flags |= XFS_DIFLAG_EXTSIZE;
1210 ip->i_d.di_extsize = pip->i_d.di_extsize;
1211 }
1200 } 1212 }
1201 if ((pip->i_d.di_flags & XFS_DIFLAG_NOATIME) && 1213 if ((pip->i_d.di_flags & XFS_DIFLAG_NOATIME) &&
1202 xfs_inherit_noatime) 1214 xfs_inherit_noatime)
@@ -1262,7 +1274,7 @@ xfs_isize_check(
1262 if ((ip->i_d.di_mode & S_IFMT) != S_IFREG) 1274 if ((ip->i_d.di_mode & S_IFMT) != S_IFREG)
1263 return; 1275 return;
1264 1276
1265 if ( ip->i_d.di_flags & XFS_DIFLAG_REALTIME ) 1277 if (ip->i_d.di_flags & (XFS_DIFLAG_REALTIME | XFS_DIFLAG_EXTSIZE))
1266 return; 1278 return;
1267 1279
1268 nimaps = 2; 1280 nimaps = 2;
diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c
index 45a77a3a6c07..5ecf3e3e86aa 100644
--- a/fs/xfs/xfs_iomap.c
+++ b/fs/xfs/xfs_iomap.c
@@ -263,7 +263,7 @@ phase2:
263 case BMAPI_WRITE: 263 case BMAPI_WRITE:
264 /* If we found an extent, return it */ 264 /* If we found an extent, return it */
265 if (nimaps && 265 if (nimaps &&
266 (imap.br_startblock != HOLESTARTBLOCK) && 266 (imap.br_startblock != HOLESTARTBLOCK) &&
267 (imap.br_startblock != DELAYSTARTBLOCK)) { 267 (imap.br_startblock != DELAYSTARTBLOCK)) {
268 xfs_iomap_map_trace(XFS_IOMAP_WRITE_MAP, io, 268 xfs_iomap_map_trace(XFS_IOMAP_WRITE_MAP, io,
269 offset, count, iomapp, &imap, flags); 269 offset, count, iomapp, &imap, flags);
@@ -318,6 +318,58 @@ out:
318} 318}
319 319
320STATIC int 320STATIC int
321xfs_iomap_eof_align_last_fsb(
322 xfs_mount_t *mp,
323 xfs_iocore_t *io,
324 xfs_fsize_t isize,
325 xfs_extlen_t extsize,
326 xfs_fileoff_t *last_fsb)
327{
328 xfs_fileoff_t new_last_fsb = 0;
329 xfs_extlen_t align;
330 int eof, error;
331
332 if (io->io_flags & XFS_IOCORE_RT)
333 ;
334 /*
335 * If mounted with the "-o swalloc" option, roundup the allocation
336 * request to a stripe width boundary if the file size is >=
337 * stripe width and we are allocating past the allocation eof.
338 */
339 else if (mp->m_swidth && (mp->m_flags & XFS_MOUNT_SWALLOC) &&
340 (isize >= XFS_FSB_TO_B(mp, mp->m_swidth)))
341 new_last_fsb = roundup_64(*last_fsb, mp->m_swidth);
342 /*
343 * Roundup the allocation request to a stripe unit (m_dalign) boundary
344 * if the file size is >= stripe unit size, and we are allocating past
345 * the allocation eof.
346 */
347 else if (mp->m_dalign && (isize >= XFS_FSB_TO_B(mp, mp->m_dalign)))
348 new_last_fsb = roundup_64(*last_fsb, mp->m_dalign);
349
350 /*
351 * Always round up the allocation request to an extent boundary
352 * (when file on a real-time subvolume or has di_extsize hint).
353 */
354 if (extsize) {
355 if (new_last_fsb)
356 align = roundup_64(new_last_fsb, extsize);
357 else
358 align = extsize;
359 new_last_fsb = roundup_64(*last_fsb, align);
360 }
361
362 if (new_last_fsb) {
363 error = XFS_BMAP_EOF(mp, io, new_last_fsb, XFS_DATA_FORK, &eof);
364 if (error)
365 return error;
366 if (eof)
367 *last_fsb = new_last_fsb;
368 }
369 return 0;
370}
371
372STATIC int
321xfs_flush_space( 373xfs_flush_space(
322 xfs_inode_t *ip, 374 xfs_inode_t *ip,
323 int *fsynced, 375 int *fsynced,
@@ -363,19 +415,20 @@ xfs_iomap_write_direct(
363 xfs_iocore_t *io = &ip->i_iocore; 415 xfs_iocore_t *io = &ip->i_iocore;
364 xfs_fileoff_t offset_fsb; 416 xfs_fileoff_t offset_fsb;
365 xfs_fileoff_t last_fsb; 417 xfs_fileoff_t last_fsb;
366 xfs_filblks_t count_fsb; 418 xfs_filblks_t count_fsb, resaligned;
367 xfs_fsblock_t firstfsb; 419 xfs_fsblock_t firstfsb;
420 xfs_extlen_t extsz, temp;
421 xfs_fsize_t isize;
368 int nimaps; 422 int nimaps;
369 int error;
370 int bmapi_flag; 423 int bmapi_flag;
371 int quota_flag; 424 int quota_flag;
372 int rt; 425 int rt;
373 xfs_trans_t *tp; 426 xfs_trans_t *tp;
374 xfs_bmbt_irec_t imap; 427 xfs_bmbt_irec_t imap;
375 xfs_bmap_free_t free_list; 428 xfs_bmap_free_t free_list;
376 xfs_filblks_t qblocks, resblks; 429 uint qblocks, resblks, resrtextents;
377 int committed; 430 int committed;
378 int resrtextents; 431 int error;
379 432
380 /* 433 /*
381 * Make sure that the dquots are there. This doesn't hold 434 * Make sure that the dquots are there. This doesn't hold
@@ -385,37 +438,52 @@ xfs_iomap_write_direct(
385 if (error) 438 if (error)
386 return XFS_ERROR(error); 439 return XFS_ERROR(error);
387 440
388 offset_fsb = XFS_B_TO_FSBT(mp, offset); 441 rt = XFS_IS_REALTIME_INODE(ip);
389 last_fsb = XFS_B_TO_FSB(mp, ((xfs_ufsize_t)(offset + count))); 442 if (unlikely(rt)) {
390 count_fsb = last_fsb - offset_fsb; 443 if (!(extsz = ip->i_d.di_extsize))
391 if (found && (ret_imap->br_startblock == HOLESTARTBLOCK)) { 444 extsz = mp->m_sb.sb_rextsize;
392 xfs_fileoff_t map_last_fsb; 445 } else {
393 446 extsz = ip->i_d.di_extsize;
394 map_last_fsb = ret_imap->br_blockcount + ret_imap->br_startoff;
395 if (map_last_fsb < last_fsb) {
396 last_fsb = map_last_fsb;
397 count_fsb = last_fsb - offset_fsb;
398 }
399 ASSERT(count_fsb > 0);
400 } 447 }
401 448
402 /* 449 isize = ip->i_d.di_size;
403 * Determine if reserving space on the data or realtime partition. 450 if (io->io_new_size > isize)
404 */ 451 isize = io->io_new_size;
405 if ((rt = XFS_IS_REALTIME_INODE(ip))) {
406 xfs_extlen_t extsz;
407 452
408 if (!(extsz = ip->i_d.di_extsize)) 453 offset_fsb = XFS_B_TO_FSBT(mp, offset);
409 extsz = mp->m_sb.sb_rextsize; 454 last_fsb = XFS_B_TO_FSB(mp, ((xfs_ufsize_t)(offset + count)));
410 resrtextents = qblocks = (count_fsb + extsz - 1); 455 if ((offset + count) > isize) {
411 do_div(resrtextents, mp->m_sb.sb_rextsize); 456 error = xfs_iomap_eof_align_last_fsb(mp, io, isize, extsz,
412 resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0); 457 &last_fsb);
413 quota_flag = XFS_QMOPT_RES_RTBLKS; 458 if (error)
459 goto error_out;
414 } else { 460 } else {
415 resrtextents = 0; 461 if (found && (ret_imap->br_startblock == HOLESTARTBLOCK))
416 resblks = qblocks = XFS_DIOSTRAT_SPACE_RES(mp, count_fsb); 462 last_fsb = MIN(last_fsb, (xfs_fileoff_t)
417 quota_flag = XFS_QMOPT_RES_REGBLKS; 463 ret_imap->br_blockcount +
464 ret_imap->br_startoff);
418 } 465 }
466 count_fsb = last_fsb - offset_fsb;
467 ASSERT(count_fsb > 0);
468
469 resaligned = count_fsb;
470 if (unlikely(extsz)) {
471 if ((temp = do_mod(offset_fsb, extsz)))
472 resaligned += temp;
473 if ((temp = do_mod(resaligned, extsz)))
474 resaligned += extsz - temp;
475 }
476
477 if (unlikely(rt)) {
478 resrtextents = qblocks = resaligned;
479 resrtextents /= mp->m_sb.sb_rextsize;
480 resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
481 quota_flag = XFS_QMOPT_RES_RTBLKS;
482 } else {
483 resrtextents = 0;
484 resblks = qblocks = XFS_DIOSTRAT_SPACE_RES(mp, resaligned);
485 quota_flag = XFS_QMOPT_RES_REGBLKS;
486 }
419 487
420 /* 488 /*
421 * Allocate and setup the transaction 489 * Allocate and setup the transaction
@@ -426,7 +494,6 @@ xfs_iomap_write_direct(
426 XFS_WRITE_LOG_RES(mp), resrtextents, 494 XFS_WRITE_LOG_RES(mp), resrtextents,
427 XFS_TRANS_PERM_LOG_RES, 495 XFS_TRANS_PERM_LOG_RES,
428 XFS_WRITE_LOG_COUNT); 496 XFS_WRITE_LOG_COUNT);
429
430 /* 497 /*
431 * Check for running out of space, note: need lock to return 498 * Check for running out of space, note: need lock to return
432 */ 499 */
@@ -436,20 +503,20 @@ xfs_iomap_write_direct(
436 if (error) 503 if (error)
437 goto error_out; 504 goto error_out;
438 505
439 if (XFS_TRANS_RESERVE_QUOTA_NBLKS(mp, tp, ip, qblocks, 0, quota_flag)) { 506 error = XFS_TRANS_RESERVE_QUOTA_NBLKS(mp, tp, ip,
440 error = (EDQUOT); 507 qblocks, 0, quota_flag);
508 if (error)
441 goto error1; 509 goto error1;
442 }
443 510
444 bmapi_flag = XFS_BMAPI_WRITE;
445 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); 511 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
446 xfs_trans_ihold(tp, ip); 512 xfs_trans_ihold(tp, ip);
447 513
448 if (!(flags & BMAPI_MMAP) && (offset < ip->i_d.di_size || rt)) 514 bmapi_flag = XFS_BMAPI_WRITE;
515 if ((flags & BMAPI_DIRECT) && (offset < ip->i_d.di_size || extsz))
449 bmapi_flag |= XFS_BMAPI_PREALLOC; 516 bmapi_flag |= XFS_BMAPI_PREALLOC;
450 517
451 /* 518 /*
452 * Issue the bmapi() call to allocate the blocks 519 * Issue the xfs_bmapi() call to allocate the blocks
453 */ 520 */
454 XFS_BMAP_INIT(&free_list, &firstfsb); 521 XFS_BMAP_INIT(&free_list, &firstfsb);
455 nimaps = 1; 522 nimaps = 1;
@@ -501,6 +568,62 @@ error_out:
501 return XFS_ERROR(error); 568 return XFS_ERROR(error);
502} 569}
503 570
571/*
572 * If the caller is doing a write at the end of the file,
573 * then extend the allocation out to the file system's write
574 * iosize. We clean up any extra space left over when the
575 * file is closed in xfs_inactive().
576 *
577 * For sync writes, we are flushing delayed allocate space to
578 * try to make additional space available for allocation near
579 * the filesystem full boundary - preallocation hurts in that
580 * situation, of course.
581 */
582STATIC int
583xfs_iomap_eof_want_preallocate(
584 xfs_mount_t *mp,
585 xfs_iocore_t *io,
586 xfs_fsize_t isize,
587 xfs_off_t offset,
588 size_t count,
589 int ioflag,
590 xfs_bmbt_irec_t *imap,
591 int nimaps,
592 int *prealloc)
593{
594 xfs_fileoff_t start_fsb;
595 xfs_filblks_t count_fsb;
596 xfs_fsblock_t firstblock;
597 int n, error, imaps;
598
599 *prealloc = 0;
600 if ((ioflag & BMAPI_SYNC) || (offset + count) <= isize)
601 return 0;
602
603 /*
604 * If there are any real blocks past eof, then don't
605 * do any speculative allocation.
606 */
607 start_fsb = XFS_B_TO_FSBT(mp, ((xfs_ufsize_t)(offset + count - 1)));
608 count_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_MAXIOFFSET(mp));
609 while (count_fsb > 0) {
610 imaps = nimaps;
611 error = XFS_BMAPI(mp, NULL, io, start_fsb, count_fsb,
612 0, &firstblock, 0, imap, &imaps, NULL);
613 if (error)
614 return error;
615 for (n = 0; n < imaps; n++) {
616 if ((imap[n].br_startblock != HOLESTARTBLOCK) &&
617 (imap[n].br_startblock != DELAYSTARTBLOCK))
618 return 0;
619 start_fsb += imap[n].br_blockcount;
620 count_fsb -= imap[n].br_blockcount;
621 }
622 }
623 *prealloc = 1;
624 return 0;
625}
626
504int 627int
505xfs_iomap_write_delay( 628xfs_iomap_write_delay(
506 xfs_inode_t *ip, 629 xfs_inode_t *ip,
@@ -514,13 +637,15 @@ xfs_iomap_write_delay(
514 xfs_iocore_t *io = &ip->i_iocore; 637 xfs_iocore_t *io = &ip->i_iocore;
515 xfs_fileoff_t offset_fsb; 638 xfs_fileoff_t offset_fsb;
516 xfs_fileoff_t last_fsb; 639 xfs_fileoff_t last_fsb;
517 xfs_fsize_t isize; 640 xfs_off_t aligned_offset;
641 xfs_fileoff_t ioalign;
518 xfs_fsblock_t firstblock; 642 xfs_fsblock_t firstblock;
643 xfs_extlen_t extsz;
644 xfs_fsize_t isize;
519 int nimaps; 645 int nimaps;
520 int error;
521 xfs_bmbt_irec_t imap[XFS_WRITE_IMAPS]; 646 xfs_bmbt_irec_t imap[XFS_WRITE_IMAPS];
522 int aeof; 647 int prealloc, fsynced = 0;
523 int fsynced = 0; 648 int error;
524 649
525 ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE) != 0); 650 ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE) != 0);
526 651
@@ -528,152 +653,57 @@ xfs_iomap_write_delay(
528 * Make sure that the dquots are there. This doesn't hold 653 * Make sure that the dquots are there. This doesn't hold
529 * the ilock across a disk read. 654 * the ilock across a disk read.
530 */ 655 */
531
532 error = XFS_QM_DQATTACH(mp, ip, XFS_QMOPT_ILOCKED); 656 error = XFS_QM_DQATTACH(mp, ip, XFS_QMOPT_ILOCKED);
533 if (error) 657 if (error)
534 return XFS_ERROR(error); 658 return XFS_ERROR(error);
535 659
660 if (XFS_IS_REALTIME_INODE(ip)) {
661 if (!(extsz = ip->i_d.di_extsize))
662 extsz = mp->m_sb.sb_rextsize;
663 } else {
664 extsz = ip->i_d.di_extsize;
665 }
666
667 offset_fsb = XFS_B_TO_FSBT(mp, offset);
668
536retry: 669retry:
537 isize = ip->i_d.di_size; 670 isize = ip->i_d.di_size;
538 if (io->io_new_size > isize) { 671 if (io->io_new_size > isize)
539 isize = io->io_new_size; 672 isize = io->io_new_size;
540 }
541 673
542 aeof = 0; 674 error = xfs_iomap_eof_want_preallocate(mp, io, isize, offset, count,
543 offset_fsb = XFS_B_TO_FSBT(mp, offset); 675 ioflag, imap, XFS_WRITE_IMAPS, &prealloc);
544 last_fsb = XFS_B_TO_FSB(mp, ((xfs_ufsize_t)(offset + count))); 676 if (error)
545 /* 677 return error;
546 * If the caller is doing a write at the end of the file,
547 * then extend the allocation (and the buffer used for the write)
548 * out to the file system's write iosize. We clean up any extra
549 * space left over when the file is closed in xfs_inactive().
550 *
551 * For sync writes, we are flushing delayed allocate space to
552 * try to make additional space available for allocation near
553 * the filesystem full boundary - preallocation hurts in that
554 * situation, of course.
555 */
556 if (!(ioflag & BMAPI_SYNC) && ((offset + count) > ip->i_d.di_size)) {
557 xfs_off_t aligned_offset;
558 xfs_filblks_t count_fsb;
559 unsigned int iosize;
560 xfs_fileoff_t ioalign;
561 int n;
562 xfs_fileoff_t start_fsb;
563 678
564 /* 679 if (prealloc) {
565 * If there are any real blocks past eof, then don't
566 * do any speculative allocation.
567 */
568 start_fsb = XFS_B_TO_FSBT(mp,
569 ((xfs_ufsize_t)(offset + count - 1)));
570 count_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_MAXIOFFSET(mp));
571 while (count_fsb > 0) {
572 nimaps = XFS_WRITE_IMAPS;
573 error = XFS_BMAPI(mp, NULL, io, start_fsb, count_fsb,
574 0, &firstblock, 0, imap, &nimaps, NULL);
575 if (error) {
576 return error;
577 }
578 for (n = 0; n < nimaps; n++) {
579 if ( !(io->io_flags & XFS_IOCORE_RT) &&
580 !imap[n].br_startblock) {
581 cmn_err(CE_PANIC,"Access to block "
582 "zero: fs <%s> inode: %lld "
583 "start_block : %llx start_off "
584 ": %llx blkcnt : %llx "
585 "extent-state : %x \n",
586 (ip->i_mount)->m_fsname,
587 (long long)ip->i_ino,
588 imap[n].br_startblock,
589 imap[n].br_startoff,
590 imap[n].br_blockcount,
591 imap[n].br_state);
592 }
593 if ((imap[n].br_startblock != HOLESTARTBLOCK) &&
594 (imap[n].br_startblock != DELAYSTARTBLOCK)) {
595 goto write_map;
596 }
597 start_fsb += imap[n].br_blockcount;
598 count_fsb -= imap[n].br_blockcount;
599 }
600 }
601 iosize = mp->m_writeio_blocks;
602 aligned_offset = XFS_WRITEIO_ALIGN(mp, (offset + count - 1)); 680 aligned_offset = XFS_WRITEIO_ALIGN(mp, (offset + count - 1));
603 ioalign = XFS_B_TO_FSBT(mp, aligned_offset); 681 ioalign = XFS_B_TO_FSBT(mp, aligned_offset);
604 last_fsb = ioalign + iosize; 682 last_fsb = ioalign + mp->m_writeio_blocks;
605 aeof = 1; 683 } else {
684 last_fsb = XFS_B_TO_FSB(mp, ((xfs_ufsize_t)(offset + count)));
606 } 685 }
607write_map:
608 nimaps = XFS_WRITE_IMAPS;
609 firstblock = NULLFSBLOCK;
610 686
611 /* 687 if (prealloc || extsz) {
612 * If mounted with the "-o swalloc" option, roundup the allocation 688 error = xfs_iomap_eof_align_last_fsb(mp, io, isize, extsz,
613 * request to a stripe width boundary if the file size is >= 689 &last_fsb);
614 * stripe width and we are allocating past the allocation eof. 690 if (error)
615 */
616 if (!(io->io_flags & XFS_IOCORE_RT) && mp->m_swidth
617 && (mp->m_flags & XFS_MOUNT_SWALLOC)
618 && (isize >= XFS_FSB_TO_B(mp, mp->m_swidth)) && aeof) {
619 int eof;
620 xfs_fileoff_t new_last_fsb;
621
622 new_last_fsb = roundup_64(last_fsb, mp->m_swidth);
623 error = xfs_bmap_eof(ip, new_last_fsb, XFS_DATA_FORK, &eof);
624 if (error) {
625 return error;
626 }
627 if (eof) {
628 last_fsb = new_last_fsb;
629 }
630 /*
631 * Roundup the allocation request to a stripe unit (m_dalign) boundary
632 * if the file size is >= stripe unit size, and we are allocating past
633 * the allocation eof.
634 */
635 } else if (!(io->io_flags & XFS_IOCORE_RT) && mp->m_dalign &&
636 (isize >= XFS_FSB_TO_B(mp, mp->m_dalign)) && aeof) {
637 int eof;
638 xfs_fileoff_t new_last_fsb;
639 new_last_fsb = roundup_64(last_fsb, mp->m_dalign);
640 error = xfs_bmap_eof(ip, new_last_fsb, XFS_DATA_FORK, &eof);
641 if (error) {
642 return error;
643 }
644 if (eof) {
645 last_fsb = new_last_fsb;
646 }
647 /*
648 * Round up the allocation request to a real-time extent boundary
649 * if the file is on the real-time subvolume.
650 */
651 } else if (io->io_flags & XFS_IOCORE_RT && aeof) {
652 int eof;
653 xfs_fileoff_t new_last_fsb;
654
655 new_last_fsb = roundup_64(last_fsb, mp->m_sb.sb_rextsize);
656 error = XFS_BMAP_EOF(mp, io, new_last_fsb, XFS_DATA_FORK, &eof);
657 if (error) {
658 return error; 691 return error;
659 }
660 if (eof)
661 last_fsb = new_last_fsb;
662 } 692 }
693
694 nimaps = XFS_WRITE_IMAPS;
695 firstblock = NULLFSBLOCK;
663 error = xfs_bmapi(NULL, ip, offset_fsb, 696 error = xfs_bmapi(NULL, ip, offset_fsb,
664 (xfs_filblks_t)(last_fsb - offset_fsb), 697 (xfs_filblks_t)(last_fsb - offset_fsb),
665 XFS_BMAPI_DELAY | XFS_BMAPI_WRITE | 698 XFS_BMAPI_DELAY | XFS_BMAPI_WRITE |
666 XFS_BMAPI_ENTIRE, &firstblock, 1, imap, 699 XFS_BMAPI_ENTIRE, &firstblock, 1, imap,
667 &nimaps, NULL); 700 &nimaps, NULL);
668 /* 701 if (error && (error != ENOSPC))
669 * This can be EDQUOT, if nimaps == 0
670 */
671 if (error && (error != ENOSPC)) {
672 return XFS_ERROR(error); 702 return XFS_ERROR(error);
673 } 703
674 /* 704 /*
675 * If bmapi returned us nothing, and if we didn't get back EDQUOT, 705 * If bmapi returned us nothing, and if we didn't get back EDQUOT,
676 * then we must have run out of space. 706 * then we must have run out of space - flush delalloc, and retry..
677 */ 707 */
678 if (nimaps == 0) { 708 if (nimaps == 0) {
679 xfs_iomap_enter_trace(XFS_IOMAP_WRITE_NOSPACE, 709 xfs_iomap_enter_trace(XFS_IOMAP_WRITE_NOSPACE,
@@ -685,9 +715,7 @@ write_map:
685 goto retry; 715 goto retry;
686 } 716 }
687 717
688 *ret_imap = imap[0]; 718 if (!(io->io_flags & XFS_IOCORE_RT) && !ret_imap->br_startblock) {
689 *nmaps = 1;
690 if ( !(io->io_flags & XFS_IOCORE_RT) && !ret_imap->br_startblock) {
691 cmn_err(CE_PANIC,"Access to block zero: fs <%s> inode: %lld " 719 cmn_err(CE_PANIC,"Access to block zero: fs <%s> inode: %lld "
692 "start_block : %llx start_off : %llx blkcnt : %llx " 720 "start_block : %llx start_off : %llx blkcnt : %llx "
693 "extent-state : %x \n", 721 "extent-state : %x \n",
@@ -696,6 +724,10 @@ write_map:
696 ret_imap->br_startblock, ret_imap->br_startoff, 724 ret_imap->br_startblock, ret_imap->br_startoff,
697 ret_imap->br_blockcount,ret_imap->br_state); 725 ret_imap->br_blockcount,ret_imap->br_state);
698 } 726 }
727
728 *ret_imap = imap[0];
729 *nmaps = 1;
730
699 return 0; 731 return 0;
700} 732}
701 733
@@ -868,17 +900,17 @@ xfs_iomap_write_unwritten(
868{ 900{
869 xfs_mount_t *mp = ip->i_mount; 901 xfs_mount_t *mp = ip->i_mount;
870 xfs_iocore_t *io = &ip->i_iocore; 902 xfs_iocore_t *io = &ip->i_iocore;
871 xfs_trans_t *tp;
872 xfs_fileoff_t offset_fsb; 903 xfs_fileoff_t offset_fsb;
873 xfs_filblks_t count_fsb; 904 xfs_filblks_t count_fsb;
874 xfs_filblks_t numblks_fsb; 905 xfs_filblks_t numblks_fsb;
875 xfs_bmbt_irec_t imap; 906 xfs_fsblock_t firstfsb;
907 int nimaps;
908 xfs_trans_t *tp;
909 xfs_bmbt_irec_t imap;
910 xfs_bmap_free_t free_list;
911 uint resblks;
876 int committed; 912 int committed;
877 int error; 913 int error;
878 int nres;
879 int nimaps;
880 xfs_fsblock_t firstfsb;
881 xfs_bmap_free_t free_list;
882 914
883 xfs_iomap_enter_trace(XFS_IOMAP_UNWRITTEN, 915 xfs_iomap_enter_trace(XFS_IOMAP_UNWRITTEN,
884 &ip->i_iocore, offset, count); 916 &ip->i_iocore, offset, count);
@@ -887,9 +919,9 @@ xfs_iomap_write_unwritten(
887 count_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count); 919 count_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count);
888 count_fsb = (xfs_filblks_t)(count_fsb - offset_fsb); 920 count_fsb = (xfs_filblks_t)(count_fsb - offset_fsb);
889 921
890 do { 922 resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0) << 1;
891 nres = XFS_DIOSTRAT_SPACE_RES(mp, 0);
892 923
924 do {
893 /* 925 /*
894 * set up a transaction to convert the range of extents 926 * set up a transaction to convert the range of extents
895 * from unwritten to real. Do allocations in a loop until 927 * from unwritten to real. Do allocations in a loop until
@@ -897,7 +929,7 @@ xfs_iomap_write_unwritten(
897 */ 929 */
898 930
899 tp = xfs_trans_alloc(mp, XFS_TRANS_STRAT_WRITE); 931 tp = xfs_trans_alloc(mp, XFS_TRANS_STRAT_WRITE);
900 error = xfs_trans_reserve(tp, nres, 932 error = xfs_trans_reserve(tp, resblks,
901 XFS_WRITE_LOG_RES(mp), 0, 933 XFS_WRITE_LOG_RES(mp), 0,
902 XFS_TRANS_PERM_LOG_RES, 934 XFS_TRANS_PERM_LOG_RES,
903 XFS_WRITE_LOG_COUNT); 935 XFS_WRITE_LOG_COUNT);
@@ -916,7 +948,7 @@ xfs_iomap_write_unwritten(
916 XFS_BMAP_INIT(&free_list, &firstfsb); 948 XFS_BMAP_INIT(&free_list, &firstfsb);
917 nimaps = 1; 949 nimaps = 1;
918 error = xfs_bmapi(tp, ip, offset_fsb, count_fsb, 950 error = xfs_bmapi(tp, ip, offset_fsb, count_fsb,
919 XFS_BMAPI_WRITE, &firstfsb, 951 XFS_BMAPI_WRITE|XFS_BMAPI_CONVERT, &firstfsb,
920 1, &imap, &nimaps, &free_list); 952 1, &imap, &nimaps, &free_list);
921 if (error) 953 if (error)
922 goto error_on_bmapi_transaction; 954 goto error_on_bmapi_transaction;
@@ -930,7 +962,7 @@ xfs_iomap_write_unwritten(
930 xfs_iunlock(ip, XFS_ILOCK_EXCL); 962 xfs_iunlock(ip, XFS_ILOCK_EXCL);
931 if (error) 963 if (error)
932 goto error0; 964 goto error0;
933 965
934 if ( !(io->io_flags & XFS_IOCORE_RT) && !imap.br_startblock) { 966 if ( !(io->io_flags & XFS_IOCORE_RT) && !imap.br_startblock) {
935 cmn_err(CE_PANIC,"Access to block zero: fs <%s> " 967 cmn_err(CE_PANIC,"Access to block zero: fs <%s> "
936 "inode: %lld start_block : %llx start_off : " 968 "inode: %lld start_block : %llx start_off : "
diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c
index 5f6dce3b4fd6..a2b422c984f2 100644
--- a/fs/xfs/xfs_vnodeops.c
+++ b/fs/xfs/xfs_vnodeops.c
@@ -541,24 +541,6 @@ xfs_setattr(
541 } 541 }
542 542
543 /* 543 /*
544 * Can't set extent size unless the file is marked, or
545 * about to be marked as a realtime file.
546 *
547 * This check will be removed when fixed size extents
548 * with buffered data writes is implemented.
549 *
550 */
551 if ((mask & XFS_AT_EXTSIZE) &&
552 ((ip->i_d.di_extsize << mp->m_sb.sb_blocklog) !=
553 vap->va_extsize) &&
554 (!((ip->i_d.di_flags & XFS_DIFLAG_REALTIME) ||
555 ((mask & XFS_AT_XFLAGS) &&
556 (vap->va_xflags & XFS_XFLAG_REALTIME))))) {
557 code = XFS_ERROR(EINVAL);
558 goto error_return;
559 }
560
561 /*
562 * Can't change realtime flag if any extents are allocated. 544 * Can't change realtime flag if any extents are allocated.
563 */ 545 */
564 if ((ip->i_d.di_nextents || ip->i_delayed_blks) && 546 if ((ip->i_d.di_nextents || ip->i_delayed_blks) &&
@@ -820,13 +802,17 @@ xfs_setattr(
820 di_flags |= XFS_DIFLAG_RTINHERIT; 802 di_flags |= XFS_DIFLAG_RTINHERIT;
821 if (vap->va_xflags & XFS_XFLAG_NOSYMLINKS) 803 if (vap->va_xflags & XFS_XFLAG_NOSYMLINKS)
822 di_flags |= XFS_DIFLAG_NOSYMLINKS; 804 di_flags |= XFS_DIFLAG_NOSYMLINKS;
823 } else { 805 if (vap->va_xflags & XFS_XFLAG_EXTSZINHERIT)
806 di_flags |= XFS_DIFLAG_EXTSZINHERIT;
807 } else if ((ip->i_d.di_mode & S_IFMT) == S_IFREG) {
824 if (vap->va_xflags & XFS_XFLAG_REALTIME) { 808 if (vap->va_xflags & XFS_XFLAG_REALTIME) {
825 di_flags |= XFS_DIFLAG_REALTIME; 809 di_flags |= XFS_DIFLAG_REALTIME;
826 ip->i_iocore.io_flags |= XFS_IOCORE_RT; 810 ip->i_iocore.io_flags |= XFS_IOCORE_RT;
827 } else { 811 } else {
828 ip->i_iocore.io_flags &= ~XFS_IOCORE_RT; 812 ip->i_iocore.io_flags &= ~XFS_IOCORE_RT;
829 } 813 }
814 if (vap->va_xflags & XFS_XFLAG_EXTSIZE)
815 di_flags |= XFS_DIFLAG_EXTSIZE;
830 } 816 }
831 ip->i_d.di_flags = di_flags; 817 ip->i_d.di_flags = di_flags;
832 } 818 }
@@ -1568,7 +1554,8 @@ xfs_release(
1568 if ((((ip->i_d.di_mode & S_IFMT) == S_IFREG) && 1554 if ((((ip->i_d.di_mode & S_IFMT) == S_IFREG) &&
1569 ((ip->i_d.di_size > 0) || (VN_CACHED(vp) > 0)) && 1555 ((ip->i_d.di_size > 0) || (VN_CACHED(vp) > 0)) &&
1570 (ip->i_df.if_flags & XFS_IFEXTENTS)) && 1556 (ip->i_df.if_flags & XFS_IFEXTENTS)) &&
1571 (!(ip->i_d.di_flags & (XFS_DIFLAG_PREALLOC|XFS_DIFLAG_APPEND)))) { 1557 (!(ip->i_d.di_flags &
1558 (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND)))) {
1572 if ((error = xfs_inactive_free_eofblocks(mp, ip))) 1559 if ((error = xfs_inactive_free_eofblocks(mp, ip)))
1573 return (error); 1560 return (error);
1574 /* Update linux inode block count after free above */ 1561 /* Update linux inode block count after free above */
@@ -1644,9 +1631,10 @@ xfs_inactive(
1644 if (ip->i_d.di_nlink != 0) { 1631 if (ip->i_d.di_nlink != 0) {
1645 if ((((ip->i_d.di_mode & S_IFMT) == S_IFREG) && 1632 if ((((ip->i_d.di_mode & S_IFMT) == S_IFREG) &&
1646 ((ip->i_d.di_size > 0) || (VN_CACHED(vp) > 0)) && 1633 ((ip->i_d.di_size > 0) || (VN_CACHED(vp) > 0)) &&
1647 (ip->i_df.if_flags & XFS_IFEXTENTS)) && 1634 (ip->i_df.if_flags & XFS_IFEXTENTS) &&
1648 (!(ip->i_d.di_flags & (XFS_DIFLAG_PREALLOC|XFS_DIFLAG_APPEND)) || 1635 (!(ip->i_d.di_flags &
1649 (ip->i_delayed_blks != 0))) { 1636 (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND)) ||
1637 (ip->i_delayed_blks != 0)))) {
1650 if ((error = xfs_inactive_free_eofblocks(mp, ip))) 1638 if ((error = xfs_inactive_free_eofblocks(mp, ip)))
1651 return (VN_INACTIVE_CACHE); 1639 return (VN_INACTIVE_CACHE);
1652 /* Update linux inode block count after free above */ 1640 /* Update linux inode block count after free above */
@@ -3998,42 +3986,36 @@ xfs_alloc_file_space(
3998 int alloc_type, 3986 int alloc_type,
3999 int attr_flags) 3987 int attr_flags)
4000{ 3988{
3989 xfs_mount_t *mp = ip->i_mount;
3990 xfs_off_t count;
4001 xfs_filblks_t allocated_fsb; 3991 xfs_filblks_t allocated_fsb;
4002 xfs_filblks_t allocatesize_fsb; 3992 xfs_filblks_t allocatesize_fsb;
4003 int committed; 3993 xfs_extlen_t extsz, temp;
4004 xfs_off_t count; 3994 xfs_fileoff_t startoffset_fsb;
4005 xfs_filblks_t datablocks;
4006 int error;
4007 xfs_fsblock_t firstfsb; 3995 xfs_fsblock_t firstfsb;
4008 xfs_bmap_free_t free_list; 3996 int nimaps;
4009 xfs_bmbt_irec_t *imapp; 3997 int bmapi_flag;
4010 xfs_bmbt_irec_t imaps[1]; 3998 int quota_flag;
4011 xfs_mount_t *mp;
4012 int numrtextents;
4013 int reccount;
4014 uint resblks;
4015 int rt; 3999 int rt;
4016 int rtextsize;
4017 xfs_fileoff_t startoffset_fsb;
4018 xfs_trans_t *tp; 4000 xfs_trans_t *tp;
4019 int xfs_bmapi_flags; 4001 xfs_bmbt_irec_t imaps[1], *imapp;
4002 xfs_bmap_free_t free_list;
4003 uint qblocks, resblks, resrtextents;
4004 int committed;
4005 int error;
4020 4006
4021 vn_trace_entry(XFS_ITOV(ip), __FUNCTION__, (inst_t *)__return_address); 4007 vn_trace_entry(XFS_ITOV(ip), __FUNCTION__, (inst_t *)__return_address);
4022 mp = ip->i_mount;
4023 4008
4024 if (XFS_FORCED_SHUTDOWN(mp)) 4009 if (XFS_FORCED_SHUTDOWN(mp))
4025 return XFS_ERROR(EIO); 4010 return XFS_ERROR(EIO);
4026 4011
4027 /* 4012 rt = XFS_IS_REALTIME_INODE(ip);
4028 * determine if this is a realtime file 4013 if (unlikely(rt)) {
4029 */ 4014 if (!(extsz = ip->i_d.di_extsize))
4030 if ((rt = XFS_IS_REALTIME_INODE(ip)) != 0) { 4015 extsz = mp->m_sb.sb_rextsize;
4031 if (ip->i_d.di_extsize) 4016 } else {
4032 rtextsize = ip->i_d.di_extsize; 4017 extsz = ip->i_d.di_extsize;
4033 else 4018 }
4034 rtextsize = mp->m_sb.sb_rextsize;
4035 } else
4036 rtextsize = 0;
4037 4019
4038 if ((error = XFS_QM_DQATTACH(mp, ip, 0))) 4020 if ((error = XFS_QM_DQATTACH(mp, ip, 0)))
4039 return error; 4021 return error;
@@ -4044,8 +4026,8 @@ xfs_alloc_file_space(
4044 count = len; 4026 count = len;
4045 error = 0; 4027 error = 0;
4046 imapp = &imaps[0]; 4028 imapp = &imaps[0];
4047 reccount = 1; 4029 nimaps = 1;
4048 xfs_bmapi_flags = XFS_BMAPI_WRITE | (alloc_type ? XFS_BMAPI_PREALLOC : 0); 4030 bmapi_flag = XFS_BMAPI_WRITE | (alloc_type ? XFS_BMAPI_PREALLOC : 0);
4049 startoffset_fsb = XFS_B_TO_FSBT(mp, offset); 4031 startoffset_fsb = XFS_B_TO_FSBT(mp, offset);
4050 allocatesize_fsb = XFS_B_TO_FSB(mp, count); 4032 allocatesize_fsb = XFS_B_TO_FSB(mp, count);
4051 4033
@@ -4066,43 +4048,51 @@ xfs_alloc_file_space(
4066 } 4048 }
4067 4049
4068 /* 4050 /*
4069 * allocate file space until done or until there is an error 4051 * Allocate file space until done or until there is an error
4070 */ 4052 */
4071retry: 4053retry:
4072 while (allocatesize_fsb && !error) { 4054 while (allocatesize_fsb && !error) {
4055 xfs_fileoff_t s, e;
4056
4073 /* 4057 /*
4074 * determine if reserving space on 4058 * Determine space reservations for data/realtime,
4075 * the data or realtime partition.
4076 */ 4059 */
4077 if (rt) { 4060 if (unlikely(extsz)) {
4078 xfs_fileoff_t s, e;
4079
4080 s = startoffset_fsb; 4061 s = startoffset_fsb;
4081 do_div(s, rtextsize); 4062 do_div(s, extsz);
4082 s *= rtextsize; 4063 s *= extsz;
4083 e = roundup_64(startoffset_fsb + allocatesize_fsb, 4064 e = startoffset_fsb + allocatesize_fsb;
4084 rtextsize); 4065 if ((temp = do_mod(startoffset_fsb, extsz)))
4085 numrtextents = (int)(e - s) / mp->m_sb.sb_rextsize; 4066 e += temp;
4086 datablocks = 0; 4067 if ((temp = do_mod(e, extsz)))
4068 e += extsz - temp;
4069 } else {
4070 s = 0;
4071 e = allocatesize_fsb;
4072 }
4073
4074 if (unlikely(rt)) {
4075 resrtextents = qblocks = (uint)(e - s);
4076 resrtextents /= mp->m_sb.sb_rextsize;
4077 resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
4078 quota_flag = XFS_QMOPT_RES_RTBLKS;
4087 } else { 4079 } else {
4088 datablocks = allocatesize_fsb; 4080 resrtextents = 0;
4089 numrtextents = 0; 4081 resblks = qblocks = \
4082 XFS_DIOSTRAT_SPACE_RES(mp, (uint)(e - s));
4083 quota_flag = XFS_QMOPT_RES_REGBLKS;
4090 } 4084 }
4091 4085
4092 /* 4086 /*
4093 * allocate and setup the transaction 4087 * Allocate and setup the transaction.
4094 */ 4088 */
4095 tp = xfs_trans_alloc(mp, XFS_TRANS_DIOSTRAT); 4089 tp = xfs_trans_alloc(mp, XFS_TRANS_DIOSTRAT);
4096 resblks = XFS_DIOSTRAT_SPACE_RES(mp, datablocks); 4090 error = xfs_trans_reserve(tp, resblks,
4097 error = xfs_trans_reserve(tp, 4091 XFS_WRITE_LOG_RES(mp), resrtextents,
4098 resblks,
4099 XFS_WRITE_LOG_RES(mp),
4100 numrtextents,
4101 XFS_TRANS_PERM_LOG_RES, 4092 XFS_TRANS_PERM_LOG_RES,
4102 XFS_WRITE_LOG_COUNT); 4093 XFS_WRITE_LOG_COUNT);
4103
4104 /* 4094 /*
4105 * check for running out of space 4095 * Check for running out of space
4106 */ 4096 */
4107 if (error) { 4097 if (error) {
4108 /* 4098 /*
@@ -4113,8 +4103,8 @@ retry:
4113 break; 4103 break;
4114 } 4104 }
4115 xfs_ilock(ip, XFS_ILOCK_EXCL); 4105 xfs_ilock(ip, XFS_ILOCK_EXCL);
4116 error = XFS_TRANS_RESERVE_QUOTA(mp, tp, 4106 error = XFS_TRANS_RESERVE_QUOTA_NBLKS(mp, tp, ip,
4117 ip->i_udquot, ip->i_gdquot, resblks, 0, 0); 4107 qblocks, 0, quota_flag);
4118 if (error) 4108 if (error)
4119 goto error1; 4109 goto error1;
4120 4110
@@ -4122,19 +4112,19 @@ retry:
4122 xfs_trans_ihold(tp, ip); 4112 xfs_trans_ihold(tp, ip);
4123 4113
4124 /* 4114 /*
4125 * issue the bmapi() call to allocate the blocks 4115 * Issue the xfs_bmapi() call to allocate the blocks
4126 */ 4116 */
4127 XFS_BMAP_INIT(&free_list, &firstfsb); 4117 XFS_BMAP_INIT(&free_list, &firstfsb);
4128 error = xfs_bmapi(tp, ip, startoffset_fsb, 4118 error = xfs_bmapi(tp, ip, startoffset_fsb,
4129 allocatesize_fsb, xfs_bmapi_flags, 4119 allocatesize_fsb, bmapi_flag,
4130 &firstfsb, 0, imapp, &reccount, 4120 &firstfsb, 0, imapp, &nimaps,
4131 &free_list); 4121 &free_list);
4132 if (error) { 4122 if (error) {
4133 goto error0; 4123 goto error0;
4134 } 4124 }
4135 4125
4136 /* 4126 /*
4137 * complete the transaction 4127 * Complete the transaction
4138 */ 4128 */
4139 error = xfs_bmap_finish(&tp, &free_list, firstfsb, &committed); 4129 error = xfs_bmap_finish(&tp, &free_list, firstfsb, &committed);
4140 if (error) { 4130 if (error) {
@@ -4149,7 +4139,7 @@ retry:
4149 4139
4150 allocated_fsb = imapp->br_blockcount; 4140 allocated_fsb = imapp->br_blockcount;
4151 4141
4152 if (reccount == 0) { 4142 if (nimaps == 0) {
4153 error = XFS_ERROR(ENOSPC); 4143 error = XFS_ERROR(ENOSPC);
4154 break; 4144 break;
4155 } 4145 }
@@ -4172,9 +4162,11 @@ dmapi_enospc_check:
4172 4162
4173 return error; 4163 return error;
4174 4164
4175 error0: 4165error0: /* Cancel bmap, unlock inode, unreserve quota blocks, cancel trans */
4176 xfs_bmap_cancel(&free_list); 4166 xfs_bmap_cancel(&free_list);
4177 error1: 4167 XFS_TRANS_UNRESERVE_QUOTA_NBLKS(mp, tp, ip, qblocks, 0, quota_flag);
4168
4169error1: /* Just cancel transaction */
4178 xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT); 4170 xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT);
4179 xfs_iunlock(ip, XFS_ILOCK_EXCL); 4171 xfs_iunlock(ip, XFS_ILOCK_EXCL);
4180 goto dmapi_enospc_check; 4172 goto dmapi_enospc_check;
@@ -4419,8 +4411,8 @@ xfs_free_file_space(
4419 } 4411 }
4420 xfs_ilock(ip, XFS_ILOCK_EXCL); 4412 xfs_ilock(ip, XFS_ILOCK_EXCL);
4421 error = XFS_TRANS_RESERVE_QUOTA(mp, tp, 4413 error = XFS_TRANS_RESERVE_QUOTA(mp, tp,
4422 ip->i_udquot, ip->i_gdquot, resblks, 0, rt ? 4414 ip->i_udquot, ip->i_gdquot, resblks, 0,
4423 XFS_QMOPT_RES_RTBLKS : XFS_QMOPT_RES_REGBLKS); 4415 XFS_QMOPT_RES_REGBLKS);
4424 if (error) 4416 if (error)
4425 goto error1; 4417 goto error1;
4426 4418