aboutsummaryrefslogtreecommitdiffstats
path: root/fs/xfs
diff options
context:
space:
mode:
authorBen Myers <bpm@sgi.com>2013-12-13 15:15:33 -0500
committerBen Myers <bpm@sgi.com>2013-12-13 15:15:33 -0500
commit46f23adf78545c49591619a615edeec41ed5a549 (patch)
treed9bbcb204be3f9875b708a7ec847fc99a572f4f3 /fs/xfs
parentffda4e83aa107ff55345dc583efdb24fca486fb5 (diff)
parentf9e5abcfc5b299a988cf8f9d0ad11e03da14806b (diff)
Merge branch 'xfs-factor-icluster-macros' into for-next
Diffstat (limited to 'fs/xfs')
-rw-r--r--fs/xfs/xfs_ialloc.c53
-rw-r--r--fs/xfs/xfs_ialloc.h21
-rw-r--r--fs/xfs/xfs_inode.c23
-rw-r--r--fs/xfs/xfs_itable.c22
-rw-r--r--fs/xfs/xfs_log_recover.c16
-rw-r--r--fs/xfs/xfs_trans_resv.c10
-rw-r--r--fs/xfs/xfs_trans_space.h2
7 files changed, 65 insertions, 82 deletions
diff --git a/fs/xfs/xfs_ialloc.c b/fs/xfs/xfs_ialloc.c
index 7a728f9fc0be..5d7f105a1c82 100644
--- a/fs/xfs/xfs_ialloc.c
+++ b/fs/xfs/xfs_ialloc.c
@@ -52,7 +52,7 @@ xfs_ialloc_cluster_alignment(
52{ 52{
53 if (xfs_sb_version_hasalign(&args->mp->m_sb) && 53 if (xfs_sb_version_hasalign(&args->mp->m_sb) &&
54 args->mp->m_sb.sb_inoalignmt >= 54 args->mp->m_sb.sb_inoalignmt >=
55 XFS_B_TO_FSBT(args->mp, XFS_INODE_CLUSTER_SIZE(args->mp))) 55 XFS_B_TO_FSBT(args->mp, args->mp->m_inode_cluster_size))
56 return args->mp->m_sb.sb_inoalignmt; 56 return args->mp->m_sb.sb_inoalignmt;
57 return 1; 57 return 1;
58} 58}
@@ -170,27 +170,20 @@ xfs_ialloc_inode_init(
170{ 170{
171 struct xfs_buf *fbuf; 171 struct xfs_buf *fbuf;
172 struct xfs_dinode *free; 172 struct xfs_dinode *free;
173 int blks_per_cluster, nbufs, ninodes; 173 int nbufs, blks_per_cluster, inodes_per_cluster;
174 int version; 174 int version;
175 int i, j; 175 int i, j;
176 xfs_daddr_t d; 176 xfs_daddr_t d;
177 xfs_ino_t ino = 0; 177 xfs_ino_t ino = 0;
178 178
179 /* 179 /*
180 * Loop over the new block(s), filling in the inodes. 180 * Loop over the new block(s), filling in the inodes. For small block
181 * For small block sizes, manipulate the inodes in buffers 181 * sizes, manipulate the inodes in buffers which are multiples of the
182 * which are multiples of the blocks size. 182 * blocks size.
183 */ 183 */
184 if (mp->m_sb.sb_blocksize >= XFS_INODE_CLUSTER_SIZE(mp)) { 184 blks_per_cluster = xfs_icluster_size_fsb(mp);
185 blks_per_cluster = 1; 185 inodes_per_cluster = blks_per_cluster << mp->m_sb.sb_inopblog;
186 nbufs = length; 186 nbufs = length / blks_per_cluster;
187 ninodes = mp->m_sb.sb_inopblock;
188 } else {
189 blks_per_cluster = XFS_INODE_CLUSTER_SIZE(mp) /
190 mp->m_sb.sb_blocksize;
191 nbufs = length / blks_per_cluster;
192 ninodes = blks_per_cluster * mp->m_sb.sb_inopblock;
193 }
194 187
195 /* 188 /*
196 * Figure out what version number to use in the inodes we create. If 189 * Figure out what version number to use in the inodes we create. If
@@ -225,7 +218,7 @@ xfs_ialloc_inode_init(
225 * they track in the AIL as if they were physically logged. 218 * they track in the AIL as if they were physically logged.
226 */ 219 */
227 if (tp) 220 if (tp)
228 xfs_icreate_log(tp, agno, agbno, XFS_IALLOC_INODES(mp), 221 xfs_icreate_log(tp, agno, agbno, mp->m_ialloc_inos,
229 mp->m_sb.sb_inodesize, length, gen); 222 mp->m_sb.sb_inodesize, length, gen);
230 } else if (xfs_sb_version_hasnlink(&mp->m_sb)) 223 } else if (xfs_sb_version_hasnlink(&mp->m_sb))
231 version = 2; 224 version = 2;
@@ -246,7 +239,7 @@ xfs_ialloc_inode_init(
246 /* Initialize the inode buffers and log them appropriately. */ 239 /* Initialize the inode buffers and log them appropriately. */
247 fbuf->b_ops = &xfs_inode_buf_ops; 240 fbuf->b_ops = &xfs_inode_buf_ops;
248 xfs_buf_zero(fbuf, 0, BBTOB(fbuf->b_length)); 241 xfs_buf_zero(fbuf, 0, BBTOB(fbuf->b_length));
249 for (i = 0; i < ninodes; i++) { 242 for (i = 0; i < inodes_per_cluster; i++) {
250 int ioffset = i << mp->m_sb.sb_inodelog; 243 int ioffset = i << mp->m_sb.sb_inodelog;
251 uint isize = xfs_dinode_size(version); 244 uint isize = xfs_dinode_size(version);
252 245
@@ -329,11 +322,11 @@ xfs_ialloc_ag_alloc(
329 * Locking will ensure that we don't have two callers in here 322 * Locking will ensure that we don't have two callers in here
330 * at one time. 323 * at one time.
331 */ 324 */
332 newlen = XFS_IALLOC_INODES(args.mp); 325 newlen = args.mp->m_ialloc_inos;
333 if (args.mp->m_maxicount && 326 if (args.mp->m_maxicount &&
334 args.mp->m_sb.sb_icount + newlen > args.mp->m_maxicount) 327 args.mp->m_sb.sb_icount + newlen > args.mp->m_maxicount)
335 return XFS_ERROR(ENOSPC); 328 return XFS_ERROR(ENOSPC);
336 args.minlen = args.maxlen = XFS_IALLOC_BLOCKS(args.mp); 329 args.minlen = args.maxlen = args.mp->m_ialloc_blks;
337 /* 330 /*
338 * First try to allocate inodes contiguous with the last-allocated 331 * First try to allocate inodes contiguous with the last-allocated
339 * chunk of inodes. If the filesystem is striped, this will fill 332 * chunk of inodes. If the filesystem is striped, this will fill
@@ -343,7 +336,7 @@ xfs_ialloc_ag_alloc(
343 newino = be32_to_cpu(agi->agi_newino); 336 newino = be32_to_cpu(agi->agi_newino);
344 agno = be32_to_cpu(agi->agi_seqno); 337 agno = be32_to_cpu(agi->agi_seqno);
345 args.agbno = XFS_AGINO_TO_AGBNO(args.mp, newino) + 338 args.agbno = XFS_AGINO_TO_AGBNO(args.mp, newino) +
346 XFS_IALLOC_BLOCKS(args.mp); 339 args.mp->m_ialloc_blks;
347 if (likely(newino != NULLAGINO && 340 if (likely(newino != NULLAGINO &&
348 (args.agbno < be32_to_cpu(agi->agi_length)))) { 341 (args.agbno < be32_to_cpu(agi->agi_length)))) {
349 args.fsbno = XFS_AGB_TO_FSB(args.mp, agno, args.agbno); 342 args.fsbno = XFS_AGB_TO_FSB(args.mp, agno, args.agbno);
@@ -585,7 +578,7 @@ xfs_ialloc_ag_select(
585 * Is there enough free space for the file plus a block of 578 * Is there enough free space for the file plus a block of
586 * inodes? (if we need to allocate some)? 579 * inodes? (if we need to allocate some)?
587 */ 580 */
588 ineed = XFS_IALLOC_BLOCKS(mp); 581 ineed = mp->m_ialloc_blks;
589 longest = pag->pagf_longest; 582 longest = pag->pagf_longest;
590 if (!longest) 583 if (!longest)
591 longest = pag->pagf_flcount > 0; 584 longest = pag->pagf_flcount > 0;
@@ -999,7 +992,7 @@ xfs_dialloc(
999 * inode. 992 * inode.
1000 */ 993 */
1001 if (mp->m_maxicount && 994 if (mp->m_maxicount &&
1002 mp->m_sb.sb_icount + XFS_IALLOC_INODES(mp) > mp->m_maxicount) { 995 mp->m_sb.sb_icount + mp->m_ialloc_inos > mp->m_maxicount) {
1003 noroom = 1; 996 noroom = 1;
1004 okalloc = 0; 997 okalloc = 0;
1005 } 998 }
@@ -1202,7 +1195,7 @@ xfs_difree(
1202 * When an inode cluster is free, it becomes eligible for removal 1195 * When an inode cluster is free, it becomes eligible for removal
1203 */ 1196 */
1204 if (!(mp->m_flags & XFS_MOUNT_IKEEP) && 1197 if (!(mp->m_flags & XFS_MOUNT_IKEEP) &&
1205 (rec.ir_freecount == XFS_IALLOC_INODES(mp))) { 1198 (rec.ir_freecount == mp->m_ialloc_inos)) {
1206 1199
1207 *delete = 1; 1200 *delete = 1;
1208 *first_ino = XFS_AGINO_TO_INO(mp, agno, rec.ir_startino); 1201 *first_ino = XFS_AGINO_TO_INO(mp, agno, rec.ir_startino);
@@ -1212,7 +1205,7 @@ xfs_difree(
1212 * AGI and Superblock inode counts, and mark the disk space 1205 * AGI and Superblock inode counts, and mark the disk space
1213 * to be freed when the transaction is committed. 1206 * to be freed when the transaction is committed.
1214 */ 1207 */
1215 ilen = XFS_IALLOC_INODES(mp); 1208 ilen = mp->m_ialloc_inos;
1216 be32_add_cpu(&agi->agi_count, -ilen); 1209 be32_add_cpu(&agi->agi_count, -ilen);
1217 be32_add_cpu(&agi->agi_freecount, -(ilen - 1)); 1210 be32_add_cpu(&agi->agi_freecount, -(ilen - 1));
1218 xfs_ialloc_log_agi(tp, agbp, XFS_AGI_COUNT | XFS_AGI_FREECOUNT); 1211 xfs_ialloc_log_agi(tp, agbp, XFS_AGI_COUNT | XFS_AGI_FREECOUNT);
@@ -1228,9 +1221,9 @@ xfs_difree(
1228 goto error0; 1221 goto error0;
1229 } 1222 }
1230 1223
1231 xfs_bmap_add_free(XFS_AGB_TO_FSB(mp, 1224 xfs_bmap_add_free(XFS_AGB_TO_FSB(mp, agno,
1232 agno, XFS_AGINO_TO_AGBNO(mp, rec.ir_startino)), 1225 XFS_AGINO_TO_AGBNO(mp, rec.ir_startino)),
1233 XFS_IALLOC_BLOCKS(mp), flist, mp); 1226 mp->m_ialloc_blks, flist, mp);
1234 } else { 1227 } else {
1235 *delete = 0; 1228 *delete = 0;
1236 1229
@@ -1311,7 +1304,7 @@ xfs_imap_lookup(
1311 1304
1312 /* check that the returned record contains the required inode */ 1305 /* check that the returned record contains the required inode */
1313 if (rec.ir_startino > agino || 1306 if (rec.ir_startino > agino ||
1314 rec.ir_startino + XFS_IALLOC_INODES(mp) <= agino) 1307 rec.ir_startino + mp->m_ialloc_inos <= agino)
1315 return EINVAL; 1308 return EINVAL;
1316 1309
1317 /* for untrusted inodes check it is allocated first */ 1310 /* for untrusted inodes check it is allocated first */
@@ -1384,7 +1377,7 @@ xfs_imap(
1384 return XFS_ERROR(EINVAL); 1377 return XFS_ERROR(EINVAL);
1385 } 1378 }
1386 1379
1387 blks_per_cluster = XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_blocklog; 1380 blks_per_cluster = xfs_icluster_size_fsb(mp);
1388 1381
1389 /* 1382 /*
1390 * For bulkstat and handle lookups, we have an untrusted inode number 1383 * For bulkstat and handle lookups, we have an untrusted inode number
@@ -1405,7 +1398,7 @@ xfs_imap(
1405 * If the inode cluster size is the same as the blocksize or 1398 * If the inode cluster size is the same as the blocksize or
1406 * smaller we get to the buffer by simple arithmetics. 1399 * smaller we get to the buffer by simple arithmetics.
1407 */ 1400 */
1408 if (XFS_INODE_CLUSTER_SIZE(mp) <= mp->m_sb.sb_blocksize) { 1401 if (blks_per_cluster == 1) {
1409 offset = XFS_INO_TO_OFFSET(mp, ino); 1402 offset = XFS_INO_TO_OFFSET(mp, ino);
1410 ASSERT(offset < mp->m_sb.sb_inopblock); 1403 ASSERT(offset < mp->m_sb.sb_inopblock);
1411 1404
diff --git a/fs/xfs/xfs_ialloc.h b/fs/xfs/xfs_ialloc.h
index a8f76a5ff418..812365d17e67 100644
--- a/fs/xfs/xfs_ialloc.h
+++ b/fs/xfs/xfs_ialloc.h
@@ -25,17 +25,18 @@ struct xfs_mount;
25struct xfs_trans; 25struct xfs_trans;
26struct xfs_btree_cur; 26struct xfs_btree_cur;
27 27
28/* 28/* Move inodes in clusters of this size */
29 * Allocation parameters for inode allocation.
30 */
31#define XFS_IALLOC_INODES(mp) (mp)->m_ialloc_inos
32#define XFS_IALLOC_BLOCKS(mp) (mp)->m_ialloc_blks
33
34/*
35 * Move inodes in clusters of this size.
36 */
37#define XFS_INODE_BIG_CLUSTER_SIZE 8192 29#define XFS_INODE_BIG_CLUSTER_SIZE 8192
38#define XFS_INODE_CLUSTER_SIZE(mp) (mp)->m_inode_cluster_size 30
31/* Calculate and return the number of filesystem blocks per inode cluster */
32static inline int
33xfs_icluster_size_fsb(
34 struct xfs_mount *mp)
35{
36 if (mp->m_sb.sb_blocksize >= mp->m_inode_cluster_size)
37 return 1;
38 return mp->m_inode_cluster_size >> mp->m_sb.sb_blocklog;
39}
39 40
40/* 41/*
41 * Make an inode pointer out of the buffer/offset. 42 * Make an inode pointer out of the buffer/offset.
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index 001aa893ed59..833028cf205f 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -2141,8 +2141,8 @@ xfs_ifree_cluster(
2141{ 2141{
2142 xfs_mount_t *mp = free_ip->i_mount; 2142 xfs_mount_t *mp = free_ip->i_mount;
2143 int blks_per_cluster; 2143 int blks_per_cluster;
2144 int inodes_per_cluster;
2144 int nbufs; 2145 int nbufs;
2145 int ninodes;
2146 int i, j; 2146 int i, j;
2147 xfs_daddr_t blkno; 2147 xfs_daddr_t blkno;
2148 xfs_buf_t *bp; 2148 xfs_buf_t *bp;
@@ -2152,18 +2152,11 @@ xfs_ifree_cluster(
2152 struct xfs_perag *pag; 2152 struct xfs_perag *pag;
2153 2153
2154 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, inum)); 2154 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, inum));
2155 if (mp->m_sb.sb_blocksize >= XFS_INODE_CLUSTER_SIZE(mp)) { 2155 blks_per_cluster = xfs_icluster_size_fsb(mp);
2156 blks_per_cluster = 1; 2156 inodes_per_cluster = blks_per_cluster << mp->m_sb.sb_inopblog;
2157 ninodes = mp->m_sb.sb_inopblock; 2157 nbufs = mp->m_ialloc_blks / blks_per_cluster;
2158 nbufs = XFS_IALLOC_BLOCKS(mp);
2159 } else {
2160 blks_per_cluster = XFS_INODE_CLUSTER_SIZE(mp) /
2161 mp->m_sb.sb_blocksize;
2162 ninodes = blks_per_cluster * mp->m_sb.sb_inopblock;
2163 nbufs = XFS_IALLOC_BLOCKS(mp) / blks_per_cluster;
2164 }
2165 2158
2166 for (j = 0; j < nbufs; j++, inum += ninodes) { 2159 for (j = 0; j < nbufs; j++, inum += inodes_per_cluster) {
2167 blkno = XFS_AGB_TO_DADDR(mp, XFS_INO_TO_AGNO(mp, inum), 2160 blkno = XFS_AGB_TO_DADDR(mp, XFS_INO_TO_AGNO(mp, inum),
2168 XFS_INO_TO_AGBNO(mp, inum)); 2161 XFS_INO_TO_AGBNO(mp, inum));
2169 2162
@@ -2225,7 +2218,7 @@ xfs_ifree_cluster(
2225 * transaction stale above, which means there is no point in 2218 * transaction stale above, which means there is no point in
2226 * even trying to lock them. 2219 * even trying to lock them.
2227 */ 2220 */
2228 for (i = 0; i < ninodes; i++) { 2221 for (i = 0; i < inodes_per_cluster; i++) {
2229retry: 2222retry:
2230 rcu_read_lock(); 2223 rcu_read_lock();
2231 ip = radix_tree_lookup(&pag->pag_ici_root, 2224 ip = radix_tree_lookup(&pag->pag_ici_root,
@@ -2906,13 +2899,13 @@ xfs_iflush_cluster(
2906 2899
2907 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino)); 2900 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
2908 2901
2909 inodes_per_cluster = XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_inodelog; 2902 inodes_per_cluster = mp->m_inode_cluster_size >> mp->m_sb.sb_inodelog;
2910 ilist_size = inodes_per_cluster * sizeof(xfs_inode_t *); 2903 ilist_size = inodes_per_cluster * sizeof(xfs_inode_t *);
2911 ilist = kmem_alloc(ilist_size, KM_MAYFAIL|KM_NOFS); 2904 ilist = kmem_alloc(ilist_size, KM_MAYFAIL|KM_NOFS);
2912 if (!ilist) 2905 if (!ilist)
2913 goto out_put; 2906 goto out_put;
2914 2907
2915 mask = ~(((XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_inodelog)) - 1); 2908 mask = ~(((mp->m_inode_cluster_size >> mp->m_sb.sb_inodelog)) - 1);
2916 first_index = XFS_INO_TO_AGINO(mp, ip->i_ino) & mask; 2909 first_index = XFS_INO_TO_AGINO(mp, ip->i_ino) & mask;
2917 rcu_read_lock(); 2910 rcu_read_lock();
2918 /* really need a gang lookup range call here */ 2911 /* really need a gang lookup range call here */
diff --git a/fs/xfs/xfs_itable.c b/fs/xfs/xfs_itable.c
index c237ad15d500..f46338285152 100644
--- a/fs/xfs/xfs_itable.c
+++ b/fs/xfs/xfs_itable.c
@@ -209,9 +209,8 @@ xfs_bulkstat(
209 xfs_inobt_rec_incore_t *irbuf; /* start of irec buffer */ 209 xfs_inobt_rec_incore_t *irbuf; /* start of irec buffer */
210 xfs_inobt_rec_incore_t *irbufend; /* end of good irec buffer entries */ 210 xfs_inobt_rec_incore_t *irbufend; /* end of good irec buffer entries */
211 xfs_ino_t lastino; /* last inode number returned */ 211 xfs_ino_t lastino; /* last inode number returned */
212 int nbcluster; /* # of blocks in a cluster */ 212 int blks_per_cluster; /* # of blocks per cluster */
213 int nicluster; /* # of inodes in a cluster */ 213 int inodes_per_cluster;/* # of inodes per cluster */
214 int nimask; /* mask for inode clusters */
215 int nirbuf; /* size of irbuf */ 214 int nirbuf; /* size of irbuf */
216 int rval; /* return value error code */ 215 int rval; /* return value error code */
217 int tmp; /* result value from btree calls */ 216 int tmp; /* result value from btree calls */
@@ -243,11 +242,8 @@ xfs_bulkstat(
243 *done = 0; 242 *done = 0;
244 fmterror = 0; 243 fmterror = 0;
245 ubufp = ubuffer; 244 ubufp = ubuffer;
246 nicluster = mp->m_sb.sb_blocksize >= XFS_INODE_CLUSTER_SIZE(mp) ? 245 blks_per_cluster = xfs_icluster_size_fsb(mp);
247 mp->m_sb.sb_inopblock : 246 inodes_per_cluster = blks_per_cluster << mp->m_sb.sb_inopblog;
248 (XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_inodelog);
249 nimask = ~(nicluster - 1);
250 nbcluster = nicluster >> mp->m_sb.sb_inopblog;
251 irbuf = kmem_zalloc_greedy(&irbsize, PAGE_SIZE, PAGE_SIZE * 4); 247 irbuf = kmem_zalloc_greedy(&irbsize, PAGE_SIZE, PAGE_SIZE * 4);
252 if (!irbuf) 248 if (!irbuf)
253 return ENOMEM; 249 return ENOMEM;
@@ -390,12 +386,12 @@ xfs_bulkstat(
390 agbno = XFS_AGINO_TO_AGBNO(mp, r.ir_startino); 386 agbno = XFS_AGINO_TO_AGBNO(mp, r.ir_startino);
391 for (chunkidx = 0; 387 for (chunkidx = 0;
392 chunkidx < XFS_INODES_PER_CHUNK; 388 chunkidx < XFS_INODES_PER_CHUNK;
393 chunkidx += nicluster, 389 chunkidx += inodes_per_cluster,
394 agbno += nbcluster) { 390 agbno += blks_per_cluster) {
395 if (xfs_inobt_maskn(chunkidx, nicluster) 391 if (xfs_inobt_maskn(chunkidx,
396 & ~r.ir_free) 392 inodes_per_cluster) & ~r.ir_free)
397 xfs_btree_reada_bufs(mp, agno, 393 xfs_btree_reada_bufs(mp, agno,
398 agbno, nbcluster, 394 agbno, blks_per_cluster,
399 &xfs_inode_buf_ops); 395 &xfs_inode_buf_ops);
400 } 396 }
401 blk_finish_plug(&plug); 397 blk_finish_plug(&plug);
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index 07ab52ca8aba..22b6f35765c1 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -2523,19 +2523,19 @@ xlog_recover_buffer_pass2(
2523 * 2523 *
2524 * Also make sure that only inode buffers with good sizes stay in 2524 * Also make sure that only inode buffers with good sizes stay in
2525 * the buffer cache. The kernel moves inodes in buffers of 1 block 2525 * the buffer cache. The kernel moves inodes in buffers of 1 block
2526 * or XFS_INODE_CLUSTER_SIZE bytes, whichever is bigger. The inode 2526 * or mp->m_inode_cluster_size bytes, whichever is bigger. The inode
2527 * buffers in the log can be a different size if the log was generated 2527 * buffers in the log can be a different size if the log was generated
2528 * by an older kernel using unclustered inode buffers or a newer kernel 2528 * by an older kernel using unclustered inode buffers or a newer kernel
2529 * running with a different inode cluster size. Regardless, if the 2529 * running with a different inode cluster size. Regardless, if the
2530 * the inode buffer size isn't MAX(blocksize, XFS_INODE_CLUSTER_SIZE) 2530 * the inode buffer size isn't MAX(blocksize, mp->m_inode_cluster_size)
2531 * for *our* value of XFS_INODE_CLUSTER_SIZE, then we need to keep 2531 * for *our* value of mp->m_inode_cluster_size, then we need to keep
2532 * the buffer out of the buffer cache so that the buffer won't 2532 * the buffer out of the buffer cache so that the buffer won't
2533 * overlap with future reads of those inodes. 2533 * overlap with future reads of those inodes.
2534 */ 2534 */
2535 if (XFS_DINODE_MAGIC == 2535 if (XFS_DINODE_MAGIC ==
2536 be16_to_cpu(*((__be16 *)xfs_buf_offset(bp, 0))) && 2536 be16_to_cpu(*((__be16 *)xfs_buf_offset(bp, 0))) &&
2537 (BBTOB(bp->b_io_length) != MAX(log->l_mp->m_sb.sb_blocksize, 2537 (BBTOB(bp->b_io_length) != MAX(log->l_mp->m_sb.sb_blocksize,
2538 (__uint32_t)XFS_INODE_CLUSTER_SIZE(log->l_mp)))) { 2538 (__uint32_t)log->l_mp->m_inode_cluster_size))) {
2539 xfs_buf_stale(bp); 2539 xfs_buf_stale(bp);
2540 error = xfs_bwrite(bp); 2540 error = xfs_bwrite(bp);
2541 } else { 2541 } else {
@@ -3208,10 +3208,10 @@ xlog_recover_do_icreate_pass2(
3208 } 3208 }
3209 3209
3210 /* existing allocation is fixed value */ 3210 /* existing allocation is fixed value */
3211 ASSERT(count == XFS_IALLOC_INODES(mp)); 3211 ASSERT(count == mp->m_ialloc_inos);
3212 ASSERT(length == XFS_IALLOC_BLOCKS(mp)); 3212 ASSERT(length == mp->m_ialloc_blks);
3213 if (count != XFS_IALLOC_INODES(mp) || 3213 if (count != mp->m_ialloc_inos ||
3214 length != XFS_IALLOC_BLOCKS(mp)) { 3214 length != mp->m_ialloc_blks) {
3215 xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad count 2"); 3215 xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad count 2");
3216 return EINVAL; 3216 return EINVAL;
3217 } 3217 }
diff --git a/fs/xfs/xfs_trans_resv.c b/fs/xfs/xfs_trans_resv.c
index 2fd59c0dae66..2ffd3e331b49 100644
--- a/fs/xfs/xfs_trans_resv.c
+++ b/fs/xfs/xfs_trans_resv.c
@@ -174,7 +174,7 @@ xfs_calc_itruncate_reservation(
174 xfs_calc_buf_res(5, 0) + 174 xfs_calc_buf_res(5, 0) +
175 xfs_calc_buf_res(XFS_ALLOCFREE_LOG_COUNT(mp, 1), 175 xfs_calc_buf_res(XFS_ALLOCFREE_LOG_COUNT(mp, 1),
176 XFS_FSB_TO_B(mp, 1)) + 176 XFS_FSB_TO_B(mp, 1)) +
177 xfs_calc_buf_res(2 + XFS_IALLOC_BLOCKS(mp) + 177 xfs_calc_buf_res(2 + mp->m_ialloc_blks +
178 mp->m_in_maxlevels, 0))); 178 mp->m_in_maxlevels, 0)));
179} 179}
180 180
@@ -282,7 +282,7 @@ xfs_calc_create_resv_modify(
282 * For create we can allocate some inodes giving: 282 * For create we can allocate some inodes giving:
283 * the agi and agf of the ag getting the new inodes: 2 * sectorsize 283 * the agi and agf of the ag getting the new inodes: 2 * sectorsize
284 * the superblock for the nlink flag: sector size 284 * the superblock for the nlink flag: sector size
285 * the inode blocks allocated: XFS_IALLOC_BLOCKS * blocksize 285 * the inode blocks allocated: mp->m_ialloc_blks * blocksize
286 * the inode btree: max depth * blocksize 286 * the inode btree: max depth * blocksize
287 * the allocation btrees: 2 trees * (max depth - 1) * block size 287 * the allocation btrees: 2 trees * (max depth - 1) * block size
288 */ 288 */
@@ -292,7 +292,7 @@ xfs_calc_create_resv_alloc(
292{ 292{
293 return xfs_calc_buf_res(2, mp->m_sb.sb_sectsize) + 293 return xfs_calc_buf_res(2, mp->m_sb.sb_sectsize) +
294 mp->m_sb.sb_sectsize + 294 mp->m_sb.sb_sectsize +
295 xfs_calc_buf_res(XFS_IALLOC_BLOCKS(mp), XFS_FSB_TO_B(mp, 1)) + 295 xfs_calc_buf_res(mp->m_ialloc_blks, XFS_FSB_TO_B(mp, 1)) +
296 xfs_calc_buf_res(mp->m_in_maxlevels, XFS_FSB_TO_B(mp, 1)) + 296 xfs_calc_buf_res(mp->m_in_maxlevels, XFS_FSB_TO_B(mp, 1)) +
297 xfs_calc_buf_res(XFS_ALLOCFREE_LOG_COUNT(mp, 1), 297 xfs_calc_buf_res(XFS_ALLOCFREE_LOG_COUNT(mp, 1),
298 XFS_FSB_TO_B(mp, 1)); 298 XFS_FSB_TO_B(mp, 1));
@@ -385,9 +385,9 @@ xfs_calc_ifree_reservation(
385 xfs_calc_inode_res(mp, 1) + 385 xfs_calc_inode_res(mp, 1) +
386 xfs_calc_buf_res(2, mp->m_sb.sb_sectsize) + 386 xfs_calc_buf_res(2, mp->m_sb.sb_sectsize) +
387 xfs_calc_buf_res(1, XFS_FSB_TO_B(mp, 1)) + 387 xfs_calc_buf_res(1, XFS_FSB_TO_B(mp, 1)) +
388 max_t(uint, XFS_FSB_TO_B(mp, 1), XFS_INODE_CLUSTER_SIZE(mp)) + 388 max_t(uint, XFS_FSB_TO_B(mp, 1), mp->m_inode_cluster_size) +
389 xfs_calc_buf_res(1, 0) + 389 xfs_calc_buf_res(1, 0) +
390 xfs_calc_buf_res(2 + XFS_IALLOC_BLOCKS(mp) + 390 xfs_calc_buf_res(2 + mp->m_ialloc_blks +
391 mp->m_in_maxlevels, 0) + 391 mp->m_in_maxlevels, 0) +
392 xfs_calc_buf_res(XFS_ALLOCFREE_LOG_COUNT(mp, 1), 392 xfs_calc_buf_res(XFS_ALLOCFREE_LOG_COUNT(mp, 1),
393 XFS_FSB_TO_B(mp, 1)); 393 XFS_FSB_TO_B(mp, 1));
diff --git a/fs/xfs/xfs_trans_space.h b/fs/xfs/xfs_trans_space.h
index 7d2c920dfb9c..af5dbe06cb65 100644
--- a/fs/xfs/xfs_trans_space.h
+++ b/fs/xfs/xfs_trans_space.h
@@ -47,7 +47,7 @@
47#define XFS_DIRREMOVE_SPACE_RES(mp) \ 47#define XFS_DIRREMOVE_SPACE_RES(mp) \
48 XFS_DAREMOVE_SPACE_RES(mp, XFS_DATA_FORK) 48 XFS_DAREMOVE_SPACE_RES(mp, XFS_DATA_FORK)
49#define XFS_IALLOC_SPACE_RES(mp) \ 49#define XFS_IALLOC_SPACE_RES(mp) \
50 (XFS_IALLOC_BLOCKS(mp) + (mp)->m_in_maxlevels - 1) 50 ((mp)->m_ialloc_blks + (mp)->m_in_maxlevels - 1)
51 51
52/* 52/*
53 * Space reservation values for various transactions. 53 * Space reservation values for various transactions.