aboutsummaryrefslogtreecommitdiffstats
path: root/fs/xfs/xfs_alloc.c
diff options
context:
space:
mode:
authorJiri Kosina <jkosina@suse.cz>2013-01-29 04:48:30 -0500
committerJiri Kosina <jkosina@suse.cz>2013-01-29 04:48:30 -0500
commit617677295b53a40d0e54aac4cbbc216ffbc755dd (patch)
tree51b9e87213243ed5efff252c8e8d8fec4eebc588 /fs/xfs/xfs_alloc.c
parent5c8d1b68e01a144813e38795fe6dbe7ebb506131 (diff)
parent6abb7c25775b7fb2225ad0508236d63ca710e65f (diff)
Merge branch 'master' into for-next
Conflicts: drivers/devfreq/exynos4_bus.c Sync with Linus' tree to be able to apply patches that are against newer code (mvneta).
Diffstat (limited to 'fs/xfs/xfs_alloc.c')
-rw-r--r--fs/xfs/xfs_alloc.c183
1 files changed, 115 insertions, 68 deletions
diff --git a/fs/xfs/xfs_alloc.c b/fs/xfs/xfs_alloc.c
index 4f33c32affe3..393055fe3aef 100644
--- a/fs/xfs/xfs_alloc.c
+++ b/fs/xfs/xfs_alloc.c
@@ -430,6 +430,60 @@ xfs_alloc_fixup_trees(
430 return 0; 430 return 0;
431} 431}
432 432
433static void
434xfs_agfl_verify(
435 struct xfs_buf *bp)
436{
437#ifdef WHEN_CRCS_COME_ALONG
438 /*
439 * we cannot actually do any verification of the AGFL because mkfs does
440 * not initialise the AGFL to zero or NULL. Hence the only valid part of
441 * the AGFL is what the AGF says is active. We can't get to the AGF, so
442 * we can't verify just those entries are valid.
443 *
444 * This problem goes away when the CRC format change comes along as that
445 * requires the AGFL to be initialised by mkfs. At that point, we can
446 * verify the blocks in the agfl -active or not- lie within the bounds
447 * of the AG. Until then, just leave this check ifdef'd out.
448 */
449 struct xfs_mount *mp = bp->b_target->bt_mount;
450 struct xfs_agfl *agfl = XFS_BUF_TO_AGFL(bp);
451 int agfl_ok = 1;
452
453 int i;
454
455 for (i = 0; i < XFS_AGFL_SIZE(mp); i++) {
456 if (be32_to_cpu(agfl->agfl_bno[i]) == NULLAGBLOCK ||
457 be32_to_cpu(agfl->agfl_bno[i]) >= mp->m_sb.sb_agblocks)
458 agfl_ok = 0;
459 }
460
461 if (!agfl_ok) {
462 XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, agfl);
463 xfs_buf_ioerror(bp, EFSCORRUPTED);
464 }
465#endif
466}
467
468static void
469xfs_agfl_write_verify(
470 struct xfs_buf *bp)
471{
472 xfs_agfl_verify(bp);
473}
474
475static void
476xfs_agfl_read_verify(
477 struct xfs_buf *bp)
478{
479 xfs_agfl_verify(bp);
480}
481
482const struct xfs_buf_ops xfs_agfl_buf_ops = {
483 .verify_read = xfs_agfl_read_verify,
484 .verify_write = xfs_agfl_write_verify,
485};
486
433/* 487/*
434 * Read in the allocation group free block array. 488 * Read in the allocation group free block array.
435 */ 489 */
@@ -447,7 +501,7 @@ xfs_alloc_read_agfl(
447 error = xfs_trans_read_buf( 501 error = xfs_trans_read_buf(
448 mp, tp, mp->m_ddev_targp, 502 mp, tp, mp->m_ddev_targp,
449 XFS_AG_DADDR(mp, agno, XFS_AGFL_DADDR(mp)), 503 XFS_AG_DADDR(mp, agno, XFS_AGFL_DADDR(mp)),
450 XFS_FSS_TO_BB(mp, 1), 0, &bp); 504 XFS_FSS_TO_BB(mp, 1), 0, &bp, &xfs_agfl_buf_ops);
451 if (error) 505 if (error)
452 return error; 506 return error;
453 ASSERT(!xfs_buf_geterror(bp)); 507 ASSERT(!xfs_buf_geterror(bp));
@@ -1866,6 +1920,7 @@ xfs_alloc_fix_freelist(
1866 /* 1920 /*
1867 * Initialize the args structure. 1921 * Initialize the args structure.
1868 */ 1922 */
1923 memset(&targs, 0, sizeof(targs));
1869 targs.tp = tp; 1924 targs.tp = tp;
1870 targs.mp = mp; 1925 targs.mp = mp;
1871 targs.agbp = agbp; 1926 targs.agbp = agbp;
@@ -2090,6 +2145,63 @@ xfs_alloc_put_freelist(
2090 return 0; 2145 return 0;
2091} 2146}
2092 2147
2148static void
2149xfs_agf_verify(
2150 struct xfs_buf *bp)
2151 {
2152 struct xfs_mount *mp = bp->b_target->bt_mount;
2153 struct xfs_agf *agf;
2154 int agf_ok;
2155
2156 agf = XFS_BUF_TO_AGF(bp);
2157
2158 agf_ok = agf->agf_magicnum == cpu_to_be32(XFS_AGF_MAGIC) &&
2159 XFS_AGF_GOOD_VERSION(be32_to_cpu(agf->agf_versionnum)) &&
2160 be32_to_cpu(agf->agf_freeblks) <= be32_to_cpu(agf->agf_length) &&
2161 be32_to_cpu(agf->agf_flfirst) < XFS_AGFL_SIZE(mp) &&
2162 be32_to_cpu(agf->agf_fllast) < XFS_AGFL_SIZE(mp) &&
2163 be32_to_cpu(agf->agf_flcount) <= XFS_AGFL_SIZE(mp);
2164
2165 /*
2166 * during growfs operations, the perag is not fully initialised,
2167 * so we can't use it for any useful checking. growfs ensures we can't
2168 * use it by using uncached buffers that don't have the perag attached
2169 * so we can detect and avoid this problem.
2170 */
2171 if (bp->b_pag)
2172 agf_ok = agf_ok && be32_to_cpu(agf->agf_seqno) ==
2173 bp->b_pag->pag_agno;
2174
2175 if (xfs_sb_version_haslazysbcount(&mp->m_sb))
2176 agf_ok = agf_ok && be32_to_cpu(agf->agf_btreeblks) <=
2177 be32_to_cpu(agf->agf_length);
2178
2179 if (unlikely(XFS_TEST_ERROR(!agf_ok, mp, XFS_ERRTAG_ALLOC_READ_AGF,
2180 XFS_RANDOM_ALLOC_READ_AGF))) {
2181 XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, agf);
2182 xfs_buf_ioerror(bp, EFSCORRUPTED);
2183 }
2184}
2185
2186static void
2187xfs_agf_read_verify(
2188 struct xfs_buf *bp)
2189{
2190 xfs_agf_verify(bp);
2191}
2192
2193static void
2194xfs_agf_write_verify(
2195 struct xfs_buf *bp)
2196{
2197 xfs_agf_verify(bp);
2198}
2199
2200const struct xfs_buf_ops xfs_agf_buf_ops = {
2201 .verify_read = xfs_agf_read_verify,
2202 .verify_write = xfs_agf_write_verify,
2203};
2204
2093/* 2205/*
2094 * Read in the allocation group header (free/alloc section). 2206 * Read in the allocation group header (free/alloc section).
2095 */ 2207 */
@@ -2101,44 +2213,19 @@ xfs_read_agf(
2101 int flags, /* XFS_BUF_ */ 2213 int flags, /* XFS_BUF_ */
2102 struct xfs_buf **bpp) /* buffer for the ag freelist header */ 2214 struct xfs_buf **bpp) /* buffer for the ag freelist header */
2103{ 2215{
2104 struct xfs_agf *agf; /* ag freelist header */
2105 int agf_ok; /* set if agf is consistent */
2106 int error; 2216 int error;
2107 2217
2108 ASSERT(agno != NULLAGNUMBER); 2218 ASSERT(agno != NULLAGNUMBER);
2109 error = xfs_trans_read_buf( 2219 error = xfs_trans_read_buf(
2110 mp, tp, mp->m_ddev_targp, 2220 mp, tp, mp->m_ddev_targp,
2111 XFS_AG_DADDR(mp, agno, XFS_AGF_DADDR(mp)), 2221 XFS_AG_DADDR(mp, agno, XFS_AGF_DADDR(mp)),
2112 XFS_FSS_TO_BB(mp, 1), flags, bpp); 2222 XFS_FSS_TO_BB(mp, 1), flags, bpp, &xfs_agf_buf_ops);
2113 if (error) 2223 if (error)
2114 return error; 2224 return error;
2115 if (!*bpp) 2225 if (!*bpp)
2116 return 0; 2226 return 0;
2117 2227
2118 ASSERT(!(*bpp)->b_error); 2228 ASSERT(!(*bpp)->b_error);
2119 agf = XFS_BUF_TO_AGF(*bpp);
2120
2121 /*
2122 * Validate the magic number of the agf block.
2123 */
2124 agf_ok =
2125 agf->agf_magicnum == cpu_to_be32(XFS_AGF_MAGIC) &&
2126 XFS_AGF_GOOD_VERSION(be32_to_cpu(agf->agf_versionnum)) &&
2127 be32_to_cpu(agf->agf_freeblks) <= be32_to_cpu(agf->agf_length) &&
2128 be32_to_cpu(agf->agf_flfirst) < XFS_AGFL_SIZE(mp) &&
2129 be32_to_cpu(agf->agf_fllast) < XFS_AGFL_SIZE(mp) &&
2130 be32_to_cpu(agf->agf_flcount) <= XFS_AGFL_SIZE(mp) &&
2131 be32_to_cpu(agf->agf_seqno) == agno;
2132 if (xfs_sb_version_haslazysbcount(&mp->m_sb))
2133 agf_ok = agf_ok && be32_to_cpu(agf->agf_btreeblks) <=
2134 be32_to_cpu(agf->agf_length);
2135 if (unlikely(XFS_TEST_ERROR(!agf_ok, mp, XFS_ERRTAG_ALLOC_READ_AGF,
2136 XFS_RANDOM_ALLOC_READ_AGF))) {
2137 XFS_CORRUPTION_ERROR("xfs_alloc_read_agf",
2138 XFS_ERRLEVEL_LOW, mp, agf);
2139 xfs_trans_brelse(tp, *bpp);
2140 return XFS_ERROR(EFSCORRUPTED);
2141 }
2142 xfs_buf_set_ref(*bpp, XFS_AGF_REF); 2229 xfs_buf_set_ref(*bpp, XFS_AGF_REF);
2143 return 0; 2230 return 0;
2144} 2231}
@@ -2207,7 +2294,7 @@ xfs_alloc_read_agf(
2207 * group or loop over the allocation groups to find the result. 2294 * group or loop over the allocation groups to find the result.
2208 */ 2295 */
2209int /* error */ 2296int /* error */
2210__xfs_alloc_vextent( 2297xfs_alloc_vextent(
2211 xfs_alloc_arg_t *args) /* allocation argument structure */ 2298 xfs_alloc_arg_t *args) /* allocation argument structure */
2212{ 2299{
2213 xfs_agblock_t agsize; /* allocation group size */ 2300 xfs_agblock_t agsize; /* allocation group size */
@@ -2417,46 +2504,6 @@ error0:
2417 return error; 2504 return error;
2418} 2505}
2419 2506
2420static void
2421xfs_alloc_vextent_worker(
2422 struct work_struct *work)
2423{
2424 struct xfs_alloc_arg *args = container_of(work,
2425 struct xfs_alloc_arg, work);
2426 unsigned long pflags;
2427
2428 /* we are in a transaction context here */
2429 current_set_flags_nested(&pflags, PF_FSTRANS);
2430
2431 args->result = __xfs_alloc_vextent(args);
2432 complete(args->done);
2433
2434 current_restore_flags_nested(&pflags, PF_FSTRANS);
2435}
2436
2437/*
2438 * Data allocation requests often come in with little stack to work on. Push
2439 * them off to a worker thread so there is lots of stack to use. Metadata
2440 * requests, OTOH, are generally from low stack usage paths, so avoid the
2441 * context switch overhead here.
2442 */
2443int
2444xfs_alloc_vextent(
2445 struct xfs_alloc_arg *args)
2446{
2447 DECLARE_COMPLETION_ONSTACK(done);
2448
2449 if (!args->userdata)
2450 return __xfs_alloc_vextent(args);
2451
2452
2453 args->done = &done;
2454 INIT_WORK_ONSTACK(&args->work, xfs_alloc_vextent_worker);
2455 queue_work(xfs_alloc_wq, &args->work);
2456 wait_for_completion(&done);
2457 return args->result;
2458}
2459
2460/* 2507/*
2461 * Free an extent. 2508 * Free an extent.
2462 * Just break up the extent address and hand off to xfs_free_ag_extent 2509 * Just break up the extent address and hand off to xfs_free_ag_extent