summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDave Chinner <dchinner@redhat.com>2018-06-08 12:54:22 -0400
committerDarrick J. Wong <darrick.wong@oracle.com>2018-06-08 13:07:52 -0400
commit0703a8e1c17e2cba742eafe640be3b60f77352c4 (patch)
treef52e7880cb8e77ef601a5937b1586271e2389b9b
parentbb3d48dcf86a97dc25fe9fc2c11938e19cb4399a (diff)
xfs: replace do_mod with native operations
do_mod() is a hold-over from when we have different sizes for file offsets and and other internal values for 40 bit XFS filesystems. Hence depending on build flags variables passed to do_mod() could change size. We no longer support those small format filesystems and hence everything is of fixed size theses days, even on 32 bit platforms. As such, we can convert all the do_mod() callers to platform optimised modulus operations as defined by linux/math64.h. Individual conversions depend on the types of variables being used. Signed-Off-By: Dave Chinner <dchinner@redhat.com> Reviewed-by: Darrick J. Wong <darrick.wong@oracle.com> Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
-rw-r--r--fs/xfs/libxfs/xfs_bmap.c37
-rw-r--r--fs/xfs/xfs_bmap_util.c12
-rw-r--r--fs/xfs/xfs_inode.c2
-rw-r--r--fs/xfs/xfs_iomap.h4
-rw-r--r--fs/xfs/xfs_linux.h19
-rw-r--r--fs/xfs/xfs_log_recover.c32
-rw-r--r--fs/xfs/xfs_rtalloc.c10
7 files changed, 66 insertions, 50 deletions
diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
index 6a79a07528cf..01628f0c9a0c 100644
--- a/fs/xfs/libxfs/xfs_bmap.c
+++ b/fs/xfs/libxfs/xfs_bmap.c
@@ -2923,7 +2923,7 @@ xfs_bmap_extsize_align(
2923 * perform this alignment, or if a truncate shot us in the 2923 * perform this alignment, or if a truncate shot us in the
2924 * foot. 2924 * foot.
2925 */ 2925 */
2926 temp = do_mod(orig_off, extsz); 2926 div_u64_rem(orig_off, extsz, &temp);
2927 if (temp) { 2927 if (temp) {
2928 align_alen += temp; 2928 align_alen += temp;
2929 align_off -= temp; 2929 align_off -= temp;
@@ -3497,15 +3497,17 @@ xfs_bmap_btalloc(
3497 /* apply extent size hints if obtained earlier */ 3497 /* apply extent size hints if obtained earlier */
3498 if (align) { 3498 if (align) {
3499 args.prod = align; 3499 args.prod = align;
3500 if ((args.mod = (xfs_extlen_t)do_mod(ap->offset, args.prod))) 3500 div_u64_rem(ap->offset, args.prod, &args.mod);
3501 args.mod = (xfs_extlen_t)(args.prod - args.mod); 3501 if (args.mod)
3502 args.mod = args.prod - args.mod;
3502 } else if (mp->m_sb.sb_blocksize >= PAGE_SIZE) { 3503 } else if (mp->m_sb.sb_blocksize >= PAGE_SIZE) {
3503 args.prod = 1; 3504 args.prod = 1;
3504 args.mod = 0; 3505 args.mod = 0;
3505 } else { 3506 } else {
3506 args.prod = PAGE_SIZE >> mp->m_sb.sb_blocklog; 3507 args.prod = PAGE_SIZE >> mp->m_sb.sb_blocklog;
3507 if ((args.mod = (xfs_extlen_t)(do_mod(ap->offset, args.prod)))) 3508 div_u64_rem(ap->offset, args.prod, &args.mod);
3508 args.mod = (xfs_extlen_t)(args.prod - args.mod); 3509 if (args.mod)
3510 args.mod = args.prod - args.mod;
3509 } 3511 }
3510 /* 3512 /*
3511 * If we are not low on available data blocks, and the 3513 * If we are not low on available data blocks, and the
@@ -4953,13 +4955,15 @@ xfs_bmap_del_extent_real(
4953 if (whichfork == XFS_DATA_FORK && XFS_IS_REALTIME_INODE(ip)) { 4955 if (whichfork == XFS_DATA_FORK && XFS_IS_REALTIME_INODE(ip)) {
4954 xfs_fsblock_t bno; 4956 xfs_fsblock_t bno;
4955 xfs_filblks_t len; 4957 xfs_filblks_t len;
4958 xfs_extlen_t mod;
4959
4960 bno = div_u64_rem(del->br_startblock, mp->m_sb.sb_rextsize,
4961 &mod);
4962 ASSERT(mod == 0);
4963 len = div_u64_rem(del->br_blockcount, mp->m_sb.sb_rextsize,
4964 &mod);
4965 ASSERT(mod == 0);
4956 4966
4957 ASSERT(do_mod(del->br_blockcount, mp->m_sb.sb_rextsize) == 0);
4958 ASSERT(do_mod(del->br_startblock, mp->m_sb.sb_rextsize) == 0);
4959 bno = del->br_startblock;
4960 len = del->br_blockcount;
4961 do_div(bno, mp->m_sb.sb_rextsize);
4962 do_div(len, mp->m_sb.sb_rextsize);
4963 error = xfs_rtfree_extent(tp, bno, (xfs_extlen_t)len); 4967 error = xfs_rtfree_extent(tp, bno, (xfs_extlen_t)len);
4964 if (error) 4968 if (error)
4965 goto done; 4969 goto done;
@@ -5296,9 +5300,12 @@ __xfs_bunmapi(
5296 del.br_blockcount = max_len; 5300 del.br_blockcount = max_len;
5297 } 5301 }
5298 5302
5303 if (!isrt)
5304 goto delete;
5305
5299 sum = del.br_startblock + del.br_blockcount; 5306 sum = del.br_startblock + del.br_blockcount;
5300 if (isrt && 5307 div_u64_rem(sum, mp->m_sb.sb_rextsize, &mod);
5301 (mod = do_mod(sum, mp->m_sb.sb_rextsize))) { 5308 if (mod) {
5302 /* 5309 /*
5303 * Realtime extent not lined up at the end. 5310 * Realtime extent not lined up at the end.
5304 * The extent could have been split into written 5311 * The extent could have been split into written
@@ -5345,7 +5352,8 @@ __xfs_bunmapi(
5345 goto error0; 5352 goto error0;
5346 goto nodelete; 5353 goto nodelete;
5347 } 5354 }
5348 if (isrt && (mod = do_mod(del.br_startblock, mp->m_sb.sb_rextsize))) { 5355 div_u64_rem(del.br_startblock, mp->m_sb.sb_rextsize, &mod);
5356 if (mod) {
5349 /* 5357 /*
5350 * Realtime extent is lined up at the end but not 5358 * Realtime extent is lined up at the end but not
5351 * at the front. We'll get rid of full extents if 5359 * at the front. We'll get rid of full extents if
@@ -5414,6 +5422,7 @@ __xfs_bunmapi(
5414 } 5422 }
5415 } 5423 }
5416 5424
5425delete:
5417 if (wasdel) { 5426 if (wasdel) {
5418 error = xfs_bmap_del_extent_delay(ip, whichfork, &icur, 5427 error = xfs_bmap_del_extent_delay(ip, whichfork, &icur,
5419 &got, &del); 5428 &got, &del);
diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c
index 7d26933a542f..c35009a86699 100644
--- a/fs/xfs/xfs_bmap_util.c
+++ b/fs/xfs/xfs_bmap_util.c
@@ -80,6 +80,7 @@ xfs_bmap_rtalloc(
80 int error; /* error return value */ 80 int error; /* error return value */
81 xfs_mount_t *mp; /* mount point structure */ 81 xfs_mount_t *mp; /* mount point structure */
82 xfs_extlen_t prod = 0; /* product factor for allocators */ 82 xfs_extlen_t prod = 0; /* product factor for allocators */
83 xfs_extlen_t mod = 0; /* product factor for allocators */
83 xfs_extlen_t ralen = 0; /* realtime allocation length */ 84 xfs_extlen_t ralen = 0; /* realtime allocation length */
84 xfs_extlen_t align; /* minimum allocation alignment */ 85 xfs_extlen_t align; /* minimum allocation alignment */
85 xfs_rtblock_t rtb; 86 xfs_rtblock_t rtb;
@@ -99,7 +100,8 @@ xfs_bmap_rtalloc(
99 * If the offset & length are not perfectly aligned 100 * If the offset & length are not perfectly aligned
100 * then kill prod, it will just get us in trouble. 101 * then kill prod, it will just get us in trouble.
101 */ 102 */
102 if (do_mod(ap->offset, align) || ap->length % align) 103 div_u64_rem(ap->offset, align, &mod);
104 if (mod || ap->length % align)
103 prod = 1; 105 prod = 1;
104 /* 106 /*
105 * Set ralen to be the actual requested length in rtextents. 107 * Set ralen to be the actual requested length in rtextents.
@@ -936,9 +938,11 @@ xfs_alloc_file_space(
936 do_div(s, extsz); 938 do_div(s, extsz);
937 s *= extsz; 939 s *= extsz;
938 e = startoffset_fsb + allocatesize_fsb; 940 e = startoffset_fsb + allocatesize_fsb;
939 if ((temp = do_mod(startoffset_fsb, extsz))) 941 div_u64_rem(startoffset_fsb, extsz, &temp);
942 if (temp)
940 e += temp; 943 e += temp;
941 if ((temp = do_mod(e, extsz))) 944 div_u64_rem(e, extsz, &temp);
945 if (temp)
942 e += extsz - temp; 946 e += extsz - temp;
943 } else { 947 } else {
944 s = 0; 948 s = 0;
@@ -1099,7 +1103,7 @@ xfs_adjust_extent_unmap_boundaries(
1099 1103
1100 if (nimap && imap.br_startblock != HOLESTARTBLOCK) { 1104 if (nimap && imap.br_startblock != HOLESTARTBLOCK) {
1101 ASSERT(imap.br_startblock != DELAYSTARTBLOCK); 1105 ASSERT(imap.br_startblock != DELAYSTARTBLOCK);
1102 mod = do_mod(imap.br_startblock, mp->m_sb.sb_rextsize); 1106 div_u64_rem(imap.br_startblock, mp->m_sb.sb_rextsize, &mod);
1103 if (mod) 1107 if (mod)
1104 *startoffset_fsb += mp->m_sb.sb_rextsize - mod; 1108 *startoffset_fsb += mp->m_sb.sb_rextsize - mod;
1105 } 1109 }
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index 6cda0f08b045..4a2e5e13c569 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -2258,7 +2258,7 @@ xfs_ifree_cluster(
2258 */ 2258 */
2259 ioffset = inum - xic->first_ino; 2259 ioffset = inum - xic->first_ino;
2260 if ((xic->alloc & XFS_INOBT_MASK(ioffset)) == 0) { 2260 if ((xic->alloc & XFS_INOBT_MASK(ioffset)) == 0) {
2261 ASSERT(do_mod(ioffset, inodes_per_cluster) == 0); 2261 ASSERT(ioffset % inodes_per_cluster == 0);
2262 continue; 2262 continue;
2263 } 2263 }
2264 2264
diff --git a/fs/xfs/xfs_iomap.h b/fs/xfs/xfs_iomap.h
index b0c98d4faa5b..83474c9cede9 100644
--- a/fs/xfs/xfs_iomap.h
+++ b/fs/xfs/xfs_iomap.h
@@ -30,10 +30,10 @@ xfs_aligned_fsb_count(
30 if (extsz) { 30 if (extsz) {
31 xfs_extlen_t align; 31 xfs_extlen_t align;
32 32
33 align = do_mod(offset_fsb, extsz); 33 div_u64_rem(offset_fsb, extsz, &align);
34 if (align) 34 if (align)
35 count_fsb += align; 35 count_fsb += align;
36 align = do_mod(count_fsb, extsz); 36 div_u64_rem(count_fsb, extsz, &align);
37 if (align) 37 if (align)
38 count_fsb += extsz - align; 38 count_fsb += extsz - align;
39 } 39 }
diff --git a/fs/xfs/xfs_linux.h b/fs/xfs/xfs_linux.h
index 0fcb6295aa5d..edbd5a210df2 100644
--- a/fs/xfs/xfs_linux.h
+++ b/fs/xfs/xfs_linux.h
@@ -207,25 +207,6 @@ static inline xfs_dev_t linux_to_xfs_dev_t(dev_t dev)
207#define xfs_sort(a,n,s,fn) sort(a,n,s,fn,NULL) 207#define xfs_sort(a,n,s,fn) sort(a,n,s,fn,NULL)
208#define xfs_stack_trace() dump_stack() 208#define xfs_stack_trace() dump_stack()
209 209
210/* Side effect free 64 bit mod operation */
211static inline __u32 xfs_do_mod(void *a, __u32 b, int n)
212{
213 switch (n) {
214 case 4:
215 return *(__u32 *)a % b;
216 case 8:
217 {
218 __u64 c = *(__u64 *)a;
219 return do_div(c, b);
220 }
221 }
222
223 /* NOTREACHED */
224 return 0;
225}
226
227#define do_mod(a, b) xfs_do_mod(&(a), (b), sizeof(a))
228
229static inline uint64_t roundup_64(uint64_t x, uint32_t y) 210static inline uint64_t roundup_64(uint64_t x, uint32_t y)
230{ 211{
231 x += y - 1; 212 x += y - 1;
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index b1aedf73d09d..b181b5f57a19 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -1236,6 +1236,25 @@ xlog_verify_head(
1236} 1236}
1237 1237
1238/* 1238/*
1239 * We need to make sure we handle log wrapping properly, so we can't use the
1240 * calculated logbno directly. Make sure it wraps to the correct bno inside the
1241 * log.
1242 *
1243 * The log is limited to 32 bit sizes, so we use the appropriate modulus
1244 * operation here and cast it back to a 64 bit daddr on return.
1245 */
1246static inline xfs_daddr_t
1247xlog_wrap_logbno(
1248 struct xlog *log,
1249 xfs_daddr_t bno)
1250{
1251 int mod;
1252
1253 div_s64_rem(bno, log->l_logBBsize, &mod);
1254 return mod;
1255}
1256
1257/*
1239 * Check whether the head of the log points to an unmount record. In other 1258 * Check whether the head of the log points to an unmount record. In other
1240 * words, determine whether the log is clean. If so, update the in-core state 1259 * words, determine whether the log is clean. If so, update the in-core state
1241 * appropriately. 1260 * appropriately.
@@ -1283,12 +1302,13 @@ xlog_check_unmount_rec(
1283 } else { 1302 } else {
1284 hblks = 1; 1303 hblks = 1;
1285 } 1304 }
1286 after_umount_blk = rhead_blk + hblks + BTOBB(be32_to_cpu(rhead->h_len)); 1305
1287 after_umount_blk = do_mod(after_umount_blk, log->l_logBBsize); 1306 after_umount_blk = xlog_wrap_logbno(log,
1307 rhead_blk + hblks + BTOBB(be32_to_cpu(rhead->h_len)));
1308
1288 if (*head_blk == after_umount_blk && 1309 if (*head_blk == after_umount_blk &&
1289 be32_to_cpu(rhead->h_num_logops) == 1) { 1310 be32_to_cpu(rhead->h_num_logops) == 1) {
1290 umount_data_blk = rhead_blk + hblks; 1311 umount_data_blk = xlog_wrap_logbno(log, rhead_blk + hblks);
1291 umount_data_blk = do_mod(umount_data_blk, log->l_logBBsize);
1292 error = xlog_bread(log, umount_data_blk, 1, bp, &offset); 1312 error = xlog_bread(log, umount_data_blk, 1, bp, &offset);
1293 if (error) 1313 if (error)
1294 return error; 1314 return error;
@@ -5459,9 +5479,7 @@ xlog_do_recovery_pass(
5459 */ 5479 */
5460 if (blk_no + bblks <= log->l_logBBsize || 5480 if (blk_no + bblks <= log->l_logBBsize ||
5461 blk_no >= log->l_logBBsize) { 5481 blk_no >= log->l_logBBsize) {
5462 /* mod blk_no in case the header wrapped and 5482 rblk_no = xlog_wrap_logbno(log, blk_no);
5463 * pushed it beyond the end of the log */
5464 rblk_no = do_mod(blk_no, log->l_logBBsize);
5465 error = xlog_bread(log, rblk_no, bblks, dbp, 5483 error = xlog_bread(log, rblk_no, bblks, dbp,
5466 &offset); 5484 &offset);
5467 if (error) 5485 if (error)
diff --git a/fs/xfs/xfs_rtalloc.c b/fs/xfs/xfs_rtalloc.c
index 80bbfe604ce0..329d4d26c13e 100644
--- a/fs/xfs/xfs_rtalloc.c
+++ b/fs/xfs/xfs_rtalloc.c
@@ -301,8 +301,12 @@ xfs_rtallocate_extent_block(
301 /* 301 /*
302 * If size should be a multiple of prod, make that so. 302 * If size should be a multiple of prod, make that so.
303 */ 303 */
304 if (prod > 1 && (p = do_mod(bestlen, prod))) 304 if (prod > 1) {
305 bestlen -= p; 305 div_u64_rem(bestlen, prod, &p);
306 if (p)
307 bestlen -= p;
308 }
309
306 /* 310 /*
307 * Allocate besti for bestlen & return that. 311 * Allocate besti for bestlen & return that.
308 */ 312 */
@@ -1263,7 +1267,7 @@ xfs_rtpick_extent(
1263 b = (mp->m_sb.sb_rextents * ((resid << 1) + 1ULL)) >> 1267 b = (mp->m_sb.sb_rextents * ((resid << 1) + 1ULL)) >>
1264 (log2 + 1); 1268 (log2 + 1);
1265 if (b >= mp->m_sb.sb_rextents) 1269 if (b >= mp->m_sb.sb_rextents)
1266 b = do_mod(b, mp->m_sb.sb_rextents); 1270 div64_u64_rem(b, mp->m_sb.sb_rextents, &b);
1267 if (b + len > mp->m_sb.sb_rextents) 1271 if (b + len > mp->m_sb.sb_rextents)
1268 b = mp->m_sb.sb_rextents - len; 1272 b = mp->m_sb.sb_rextents - len;
1269 } 1273 }