diff options
author | Dave Chinner <david@fromorbit.com> | 2015-08-19 19:28:45 -0400 |
---|---|---|
committer | Dave Chinner <david@fromorbit.com> | 2015-08-19 19:28:45 -0400 |
commit | aa493382cb8c5768ba452d87f175fc2aff63911d (patch) | |
tree | 85896c37038d3f5eda1283d090eeb95ee6ec9c82 | |
parent | 5be203ad115c1d8294e8685253e05fcea0202e04 (diff) | |
parent | 3403ccc0c9f069c40ea751a93ac6746f5ef2116a (diff) |
Merge branch 'xfs-misc-fixes-for-4.3-2' into for-next
-rw-r--r-- | fs/xfs/libxfs/xfs_attr.c | 2 | ||||
-rw-r--r-- | fs/xfs/libxfs/xfs_da_btree.c | 23 | ||||
-rw-r--r-- | fs/xfs/libxfs/xfs_da_format.h | 11 | ||||
-rw-r--r-- | fs/xfs/libxfs/xfs_dir2.c | 3 | ||||
-rw-r--r-- | fs/xfs/libxfs/xfs_dir2_data.c | 3 | ||||
-rw-r--r-- | fs/xfs/libxfs/xfs_sb.c | 3 | ||||
-rw-r--r-- | fs/xfs/xfs_dir2_readdir.c | 11 | ||||
-rw-r--r-- | fs/xfs/xfs_dquot.c | 2 | ||||
-rw-r--r-- | fs/xfs/xfs_file.c | 51 | ||||
-rw-r--r-- | fs/xfs/xfs_fsops.c | 6 | ||||
-rw-r--r-- | fs/xfs/xfs_inode.c | 116 | ||||
-rw-r--r-- | fs/xfs/xfs_inode.h | 85 | ||||
-rw-r--r-- | fs/xfs/xfs_log_recover.c | 14 | ||||
-rw-r--r-- | fs/xfs/xfs_super.c | 4 | ||||
-rw-r--r-- | fs/xfs/xfs_symlink.c | 7 |
15 files changed, 235 insertions, 106 deletions
diff --git a/fs/xfs/libxfs/xfs_attr.c b/fs/xfs/libxfs/xfs_attr.c index 3349c9a1e845..ff065578969f 100644 --- a/fs/xfs/libxfs/xfs_attr.c +++ b/fs/xfs/libxfs/xfs_attr.c | |||
@@ -139,6 +139,8 @@ xfs_attr_get( | |||
139 | 139 | ||
140 | args.value = value; | 140 | args.value = value; |
141 | args.valuelen = *valuelenp; | 141 | args.valuelen = *valuelenp; |
142 | /* Entirely possible to look up a name which doesn't exist */ | ||
143 | args.op_flags = XFS_DA_OP_OKNOENT; | ||
142 | 144 | ||
143 | lock_mode = xfs_ilock_attr_map_shared(ip); | 145 | lock_mode = xfs_ilock_attr_map_shared(ip); |
144 | if (!xfs_inode_hasattr(ip)) | 146 | if (!xfs_inode_hasattr(ip)) |
diff --git a/fs/xfs/libxfs/xfs_da_btree.c b/fs/xfs/libxfs/xfs_da_btree.c index 3264d81488db..cd2201f5ab52 100644 --- a/fs/xfs/libxfs/xfs_da_btree.c +++ b/fs/xfs/libxfs/xfs_da_btree.c | |||
@@ -1822,6 +1822,7 @@ xfs_da3_path_shift( | |||
1822 | struct xfs_da_args *args; | 1822 | struct xfs_da_args *args; |
1823 | struct xfs_da_node_entry *btree; | 1823 | struct xfs_da_node_entry *btree; |
1824 | struct xfs_da3_icnode_hdr nodehdr; | 1824 | struct xfs_da3_icnode_hdr nodehdr; |
1825 | struct xfs_buf *bp; | ||
1825 | xfs_dablk_t blkno = 0; | 1826 | xfs_dablk_t blkno = 0; |
1826 | int level; | 1827 | int level; |
1827 | int error; | 1828 | int error; |
@@ -1866,20 +1867,24 @@ xfs_da3_path_shift( | |||
1866 | */ | 1867 | */ |
1867 | for (blk++, level++; level < path->active; blk++, level++) { | 1868 | for (blk++, level++; level < path->active; blk++, level++) { |
1868 | /* | 1869 | /* |
1869 | * Release the old block. | 1870 | * Read the next child block into a local buffer. |
1870 | * (if it's dirty, trans won't actually let go) | ||
1871 | */ | 1871 | */ |
1872 | if (release) | 1872 | error = xfs_da3_node_read(args->trans, dp, blkno, -1, &bp, |
1873 | xfs_trans_brelse(args->trans, blk->bp); | 1873 | args->whichfork); |
1874 | if (error) | ||
1875 | return error; | ||
1874 | 1876 | ||
1875 | /* | 1877 | /* |
1876 | * Read the next child block. | 1878 | * Release the old block (if it's dirty, the trans doesn't |
1879 | * actually let go) and swap the local buffer into the path | ||
1880 | * structure. This ensures failure of the above read doesn't set | ||
1881 | * a NULL buffer in an active slot in the path. | ||
1877 | */ | 1882 | */ |
1883 | if (release) | ||
1884 | xfs_trans_brelse(args->trans, blk->bp); | ||
1878 | blk->blkno = blkno; | 1885 | blk->blkno = blkno; |
1879 | error = xfs_da3_node_read(args->trans, dp, blkno, -1, | 1886 | blk->bp = bp; |
1880 | &blk->bp, args->whichfork); | 1887 | |
1881 | if (error) | ||
1882 | return error; | ||
1883 | info = blk->bp->b_addr; | 1888 | info = blk->bp->b_addr; |
1884 | ASSERT(info->magic == cpu_to_be16(XFS_DA_NODE_MAGIC) || | 1889 | ASSERT(info->magic == cpu_to_be16(XFS_DA_NODE_MAGIC) || |
1885 | info->magic == cpu_to_be16(XFS_DA3_NODE_MAGIC) || | 1890 | info->magic == cpu_to_be16(XFS_DA3_NODE_MAGIC) || |
diff --git a/fs/xfs/libxfs/xfs_da_format.h b/fs/xfs/libxfs/xfs_da_format.h index 74bcbabfa523..b14bbd6bb05f 100644 --- a/fs/xfs/libxfs/xfs_da_format.h +++ b/fs/xfs/libxfs/xfs_da_format.h | |||
@@ -680,8 +680,15 @@ typedef struct xfs_attr_leaf_name_remote { | |||
680 | typedef struct xfs_attr_leafblock { | 680 | typedef struct xfs_attr_leafblock { |
681 | xfs_attr_leaf_hdr_t hdr; /* constant-structure header block */ | 681 | xfs_attr_leaf_hdr_t hdr; /* constant-structure header block */ |
682 | xfs_attr_leaf_entry_t entries[1]; /* sorted on key, not name */ | 682 | xfs_attr_leaf_entry_t entries[1]; /* sorted on key, not name */ |
683 | xfs_attr_leaf_name_local_t namelist; /* grows from bottom of buf */ | 683 | /* |
684 | xfs_attr_leaf_name_remote_t valuelist; /* grows from bottom of buf */ | 684 | * The rest of the block contains the following structures after the |
685 | * leaf entries, growing from the bottom up. The variables are never | ||
686 | * referenced and definining them can actually make gcc optimize away | ||
687 | * accesses to the 'entries' array above index 0 so don't do that. | ||
688 | * | ||
689 | * xfs_attr_leaf_name_local_t namelist; | ||
690 | * xfs_attr_leaf_name_remote_t valuelist; | ||
691 | */ | ||
685 | } xfs_attr_leafblock_t; | 692 | } xfs_attr_leafblock_t; |
686 | 693 | ||
687 | /* | 694 | /* |
diff --git a/fs/xfs/libxfs/xfs_dir2.c b/fs/xfs/libxfs/xfs_dir2.c index e0ba97610f01..9de401d297e5 100644 --- a/fs/xfs/libxfs/xfs_dir2.c +++ b/fs/xfs/libxfs/xfs_dir2.c | |||
@@ -362,6 +362,7 @@ xfs_dir_lookup( | |||
362 | struct xfs_da_args *args; | 362 | struct xfs_da_args *args; |
363 | int rval; | 363 | int rval; |
364 | int v; /* type-checking value */ | 364 | int v; /* type-checking value */ |
365 | int lock_mode; | ||
365 | 366 | ||
366 | ASSERT(S_ISDIR(dp->i_d.di_mode)); | 367 | ASSERT(S_ISDIR(dp->i_d.di_mode)); |
367 | XFS_STATS_INC(xs_dir_lookup); | 368 | XFS_STATS_INC(xs_dir_lookup); |
@@ -387,6 +388,7 @@ xfs_dir_lookup( | |||
387 | if (ci_name) | 388 | if (ci_name) |
388 | args->op_flags |= XFS_DA_OP_CILOOKUP; | 389 | args->op_flags |= XFS_DA_OP_CILOOKUP; |
389 | 390 | ||
391 | lock_mode = xfs_ilock_data_map_shared(dp); | ||
390 | if (dp->i_d.di_format == XFS_DINODE_FMT_LOCAL) { | 392 | if (dp->i_d.di_format == XFS_DINODE_FMT_LOCAL) { |
391 | rval = xfs_dir2_sf_lookup(args); | 393 | rval = xfs_dir2_sf_lookup(args); |
392 | goto out_check_rval; | 394 | goto out_check_rval; |
@@ -419,6 +421,7 @@ out_check_rval: | |||
419 | } | 421 | } |
420 | } | 422 | } |
421 | out_free: | 423 | out_free: |
424 | xfs_iunlock(dp, lock_mode); | ||
422 | kmem_free(args); | 425 | kmem_free(args); |
423 | return rval; | 426 | return rval; |
424 | } | 427 | } |
diff --git a/fs/xfs/libxfs/xfs_dir2_data.c b/fs/xfs/libxfs/xfs_dir2_data.c index 6a57fdbc63ef..824131e71bc5 100644 --- a/fs/xfs/libxfs/xfs_dir2_data.c +++ b/fs/xfs/libxfs/xfs_dir2_data.c | |||
@@ -252,7 +252,8 @@ xfs_dir3_data_reada_verify( | |||
252 | return; | 252 | return; |
253 | case cpu_to_be32(XFS_DIR2_DATA_MAGIC): | 253 | case cpu_to_be32(XFS_DIR2_DATA_MAGIC): |
254 | case cpu_to_be32(XFS_DIR3_DATA_MAGIC): | 254 | case cpu_to_be32(XFS_DIR3_DATA_MAGIC): |
255 | xfs_dir3_data_verify(bp); | 255 | bp->b_ops = &xfs_dir3_data_buf_ops; |
256 | bp->b_ops->verify_read(bp); | ||
256 | return; | 257 | return; |
257 | default: | 258 | default: |
258 | xfs_buf_ioerror(bp, -EFSCORRUPTED); | 259 | xfs_buf_ioerror(bp, -EFSCORRUPTED); |
diff --git a/fs/xfs/libxfs/xfs_sb.c b/fs/xfs/libxfs/xfs_sb.c index 21009dbdc21d..47425140f343 100644 --- a/fs/xfs/libxfs/xfs_sb.c +++ b/fs/xfs/libxfs/xfs_sb.c | |||
@@ -186,9 +186,6 @@ xfs_mount_validate_sb( | |||
186 | if (xfs_sb_version_hassparseinodes(sbp)) { | 186 | if (xfs_sb_version_hassparseinodes(sbp)) { |
187 | uint32_t align; | 187 | uint32_t align; |
188 | 188 | ||
189 | xfs_alert(mp, | ||
190 | "EXPERIMENTAL sparse inode feature enabled. Use at your own risk!"); | ||
191 | |||
192 | align = XFS_INODES_PER_CHUNK * sbp->sb_inodesize | 189 | align = XFS_INODES_PER_CHUNK * sbp->sb_inodesize |
193 | >> sbp->sb_blocklog; | 190 | >> sbp->sb_blocklog; |
194 | if (sbp->sb_inoalignmt != align) { | 191 | if (sbp->sb_inoalignmt != align) { |
diff --git a/fs/xfs/xfs_dir2_readdir.c b/fs/xfs/xfs_dir2_readdir.c index 098cd78fe708..a989a9c7edb7 100644 --- a/fs/xfs/xfs_dir2_readdir.c +++ b/fs/xfs/xfs_dir2_readdir.c | |||
@@ -171,6 +171,7 @@ xfs_dir2_block_getdents( | |||
171 | int wantoff; /* starting block offset */ | 171 | int wantoff; /* starting block offset */ |
172 | xfs_off_t cook; | 172 | xfs_off_t cook; |
173 | struct xfs_da_geometry *geo = args->geo; | 173 | struct xfs_da_geometry *geo = args->geo; |
174 | int lock_mode; | ||
174 | 175 | ||
175 | /* | 176 | /* |
176 | * If the block number in the offset is out of range, we're done. | 177 | * If the block number in the offset is out of range, we're done. |
@@ -178,7 +179,9 @@ xfs_dir2_block_getdents( | |||
178 | if (xfs_dir2_dataptr_to_db(geo, ctx->pos) > geo->datablk) | 179 | if (xfs_dir2_dataptr_to_db(geo, ctx->pos) > geo->datablk) |
179 | return 0; | 180 | return 0; |
180 | 181 | ||
182 | lock_mode = xfs_ilock_data_map_shared(dp); | ||
181 | error = xfs_dir3_block_read(NULL, dp, &bp); | 183 | error = xfs_dir3_block_read(NULL, dp, &bp); |
184 | xfs_iunlock(dp, lock_mode); | ||
182 | if (error) | 185 | if (error) |
183 | return error; | 186 | return error; |
184 | 187 | ||
@@ -529,9 +532,12 @@ xfs_dir2_leaf_getdents( | |||
529 | * current buffer, need to get another one. | 532 | * current buffer, need to get another one. |
530 | */ | 533 | */ |
531 | if (!bp || ptr >= (char *)bp->b_addr + geo->blksize) { | 534 | if (!bp || ptr >= (char *)bp->b_addr + geo->blksize) { |
535 | int lock_mode; | ||
532 | 536 | ||
537 | lock_mode = xfs_ilock_data_map_shared(dp); | ||
533 | error = xfs_dir2_leaf_readbuf(args, bufsize, map_info, | 538 | error = xfs_dir2_leaf_readbuf(args, bufsize, map_info, |
534 | &curoff, &bp); | 539 | &curoff, &bp); |
540 | xfs_iunlock(dp, lock_mode); | ||
535 | if (error || !map_info->map_valid) | 541 | if (error || !map_info->map_valid) |
536 | break; | 542 | break; |
537 | 543 | ||
@@ -653,7 +659,6 @@ xfs_readdir( | |||
653 | struct xfs_da_args args = { NULL }; | 659 | struct xfs_da_args args = { NULL }; |
654 | int rval; | 660 | int rval; |
655 | int v; | 661 | int v; |
656 | uint lock_mode; | ||
657 | 662 | ||
658 | trace_xfs_readdir(dp); | 663 | trace_xfs_readdir(dp); |
659 | 664 | ||
@@ -666,7 +671,7 @@ xfs_readdir( | |||
666 | args.dp = dp; | 671 | args.dp = dp; |
667 | args.geo = dp->i_mount->m_dir_geo; | 672 | args.geo = dp->i_mount->m_dir_geo; |
668 | 673 | ||
669 | lock_mode = xfs_ilock_data_map_shared(dp); | 674 | xfs_ilock(dp, XFS_IOLOCK_SHARED); |
670 | if (dp->i_d.di_format == XFS_DINODE_FMT_LOCAL) | 675 | if (dp->i_d.di_format == XFS_DINODE_FMT_LOCAL) |
671 | rval = xfs_dir2_sf_getdents(&args, ctx); | 676 | rval = xfs_dir2_sf_getdents(&args, ctx); |
672 | else if ((rval = xfs_dir2_isblock(&args, &v))) | 677 | else if ((rval = xfs_dir2_isblock(&args, &v))) |
@@ -675,7 +680,7 @@ xfs_readdir( | |||
675 | rval = xfs_dir2_block_getdents(&args, ctx); | 680 | rval = xfs_dir2_block_getdents(&args, ctx); |
676 | else | 681 | else |
677 | rval = xfs_dir2_leaf_getdents(&args, ctx, bufsize); | 682 | rval = xfs_dir2_leaf_getdents(&args, ctx, bufsize); |
678 | xfs_iunlock(dp, lock_mode); | 683 | xfs_iunlock(dp, XFS_IOLOCK_SHARED); |
679 | 684 | ||
680 | return rval; | 685 | return rval; |
681 | } | 686 | } |
diff --git a/fs/xfs/xfs_dquot.c b/fs/xfs/xfs_dquot.c index 6964d7ceba96..30cb3afb67f0 100644 --- a/fs/xfs/xfs_dquot.c +++ b/fs/xfs/xfs_dquot.c | |||
@@ -251,7 +251,7 @@ xfs_qm_init_dquot_blk( | |||
251 | d->dd_diskdq.d_id = cpu_to_be32(curid); | 251 | d->dd_diskdq.d_id = cpu_to_be32(curid); |
252 | d->dd_diskdq.d_flags = type; | 252 | d->dd_diskdq.d_flags = type; |
253 | if (xfs_sb_version_hascrc(&mp->m_sb)) { | 253 | if (xfs_sb_version_hascrc(&mp->m_sb)) { |
254 | uuid_copy(&d->dd_uuid, &mp->m_sb.sb_uuid); | 254 | uuid_copy(&d->dd_uuid, &mp->m_sb.sb_meta_uuid); |
255 | xfs_update_cksum((char *)d, sizeof(struct xfs_dqblk), | 255 | xfs_update_cksum((char *)d, sizeof(struct xfs_dqblk), |
256 | XFS_DQUOT_CRC_OFF); | 256 | XFS_DQUOT_CRC_OFF); |
257 | } | 257 | } |
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c index db4acc1c3e73..de2c2376242b 100644 --- a/fs/xfs/xfs_file.c +++ b/fs/xfs/xfs_file.c | |||
@@ -317,24 +317,33 @@ xfs_file_read_iter( | |||
317 | return -EIO; | 317 | return -EIO; |
318 | 318 | ||
319 | /* | 319 | /* |
320 | * Locking is a bit tricky here. If we take an exclusive lock | 320 | * Locking is a bit tricky here. If we take an exclusive lock for direct |
321 | * for direct IO, we effectively serialise all new concurrent | 321 | * IO, we effectively serialise all new concurrent read IO to this file |
322 | * read IO to this file and block it behind IO that is currently in | 322 | * and block it behind IO that is currently in progress because IO in |
323 | * progress because IO in progress holds the IO lock shared. We only | 323 | * progress holds the IO lock shared. We only need to hold the lock |
324 | * need to hold the lock exclusive to blow away the page cache, so | 324 | * exclusive to blow away the page cache, so only take lock exclusively |
325 | * only take lock exclusively if the page cache needs invalidation. | 325 | * if the page cache needs invalidation. This allows the normal direct |
326 | * This allows the normal direct IO case of no page cache pages to | 326 | * IO case of no page cache pages to proceeed concurrently without |
327 | * proceeed concurrently without serialisation. | 327 | * serialisation. |
328 | */ | 328 | */ |
329 | xfs_rw_ilock(ip, XFS_IOLOCK_SHARED); | 329 | xfs_rw_ilock(ip, XFS_IOLOCK_SHARED); |
330 | if ((ioflags & XFS_IO_ISDIRECT) && inode->i_mapping->nrpages) { | 330 | if ((ioflags & XFS_IO_ISDIRECT) && inode->i_mapping->nrpages) { |
331 | xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED); | 331 | xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED); |
332 | xfs_rw_ilock(ip, XFS_IOLOCK_EXCL); | 332 | xfs_rw_ilock(ip, XFS_IOLOCK_EXCL); |
333 | 333 | ||
334 | /* | ||
335 | * The generic dio code only flushes the range of the particular | ||
336 | * I/O. Because we take an exclusive lock here, this whole | ||
337 | * sequence is considerably more expensive for us. This has a | ||
338 | * noticeable performance impact for any file with cached pages, | ||
339 | * even when outside of the range of the particular I/O. | ||
340 | * | ||
341 | * Hence, amortize the cost of the lock against a full file | ||
342 | * flush and reduce the chances of repeated iolock cycles going | ||
343 | * forward. | ||
344 | */ | ||
334 | if (inode->i_mapping->nrpages) { | 345 | if (inode->i_mapping->nrpages) { |
335 | ret = filemap_write_and_wait_range( | 346 | ret = filemap_write_and_wait(VFS_I(ip)->i_mapping); |
336 | VFS_I(ip)->i_mapping, | ||
337 | pos, pos + size - 1); | ||
338 | if (ret) { | 347 | if (ret) { |
339 | xfs_rw_iunlock(ip, XFS_IOLOCK_EXCL); | 348 | xfs_rw_iunlock(ip, XFS_IOLOCK_EXCL); |
340 | return ret; | 349 | return ret; |
@@ -345,9 +354,7 @@ xfs_file_read_iter( | |||
345 | * we fail to invalidate a page, but this should never | 354 | * we fail to invalidate a page, but this should never |
346 | * happen on XFS. Warn if it does fail. | 355 | * happen on XFS. Warn if it does fail. |
347 | */ | 356 | */ |
348 | ret = invalidate_inode_pages2_range(VFS_I(ip)->i_mapping, | 357 | ret = invalidate_inode_pages2(VFS_I(ip)->i_mapping); |
349 | pos >> PAGE_CACHE_SHIFT, | ||
350 | (pos + size - 1) >> PAGE_CACHE_SHIFT); | ||
351 | WARN_ON_ONCE(ret); | 358 | WARN_ON_ONCE(ret); |
352 | ret = 0; | 359 | ret = 0; |
353 | } | 360 | } |
@@ -733,19 +740,19 @@ xfs_file_dio_aio_write( | |||
733 | pos = iocb->ki_pos; | 740 | pos = iocb->ki_pos; |
734 | end = pos + count - 1; | 741 | end = pos + count - 1; |
735 | 742 | ||
743 | /* | ||
744 | * See xfs_file_read_iter() for why we do a full-file flush here. | ||
745 | */ | ||
736 | if (mapping->nrpages) { | 746 | if (mapping->nrpages) { |
737 | ret = filemap_write_and_wait_range(VFS_I(ip)->i_mapping, | 747 | ret = filemap_write_and_wait(VFS_I(ip)->i_mapping); |
738 | pos, end); | ||
739 | if (ret) | 748 | if (ret) |
740 | goto out; | 749 | goto out; |
741 | /* | 750 | /* |
742 | * Invalidate whole pages. This can return an error if | 751 | * Invalidate whole pages. This can return an error if we fail |
743 | * we fail to invalidate a page, but this should never | 752 | * to invalidate a page, but this should never happen on XFS. |
744 | * happen on XFS. Warn if it does fail. | 753 | * Warn if it does fail. |
745 | */ | 754 | */ |
746 | ret = invalidate_inode_pages2_range(VFS_I(ip)->i_mapping, | 755 | ret = invalidate_inode_pages2(VFS_I(ip)->i_mapping); |
747 | pos >> PAGE_CACHE_SHIFT, | ||
748 | end >> PAGE_CACHE_SHIFT); | ||
749 | WARN_ON_ONCE(ret); | 756 | WARN_ON_ONCE(ret); |
750 | ret = 0; | 757 | ret = 0; |
751 | } | 758 | } |
diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c index 9b3438a7680f..ee3aaa0a5317 100644 --- a/fs/xfs/xfs_fsops.c +++ b/fs/xfs/xfs_fsops.c | |||
@@ -250,7 +250,7 @@ xfs_growfs_data_private( | |||
250 | agf->agf_freeblks = cpu_to_be32(tmpsize); | 250 | agf->agf_freeblks = cpu_to_be32(tmpsize); |
251 | agf->agf_longest = cpu_to_be32(tmpsize); | 251 | agf->agf_longest = cpu_to_be32(tmpsize); |
252 | if (xfs_sb_version_hascrc(&mp->m_sb)) | 252 | if (xfs_sb_version_hascrc(&mp->m_sb)) |
253 | uuid_copy(&agf->agf_uuid, &mp->m_sb.sb_uuid); | 253 | uuid_copy(&agf->agf_uuid, &mp->m_sb.sb_meta_uuid); |
254 | 254 | ||
255 | error = xfs_bwrite(bp); | 255 | error = xfs_bwrite(bp); |
256 | xfs_buf_relse(bp); | 256 | xfs_buf_relse(bp); |
@@ -273,7 +273,7 @@ xfs_growfs_data_private( | |||
273 | if (xfs_sb_version_hascrc(&mp->m_sb)) { | 273 | if (xfs_sb_version_hascrc(&mp->m_sb)) { |
274 | agfl->agfl_magicnum = cpu_to_be32(XFS_AGFL_MAGIC); | 274 | agfl->agfl_magicnum = cpu_to_be32(XFS_AGFL_MAGIC); |
275 | agfl->agfl_seqno = cpu_to_be32(agno); | 275 | agfl->agfl_seqno = cpu_to_be32(agno); |
276 | uuid_copy(&agfl->agfl_uuid, &mp->m_sb.sb_uuid); | 276 | uuid_copy(&agfl->agfl_uuid, &mp->m_sb.sb_meta_uuid); |
277 | } | 277 | } |
278 | 278 | ||
279 | agfl_bno = XFS_BUF_TO_AGFL_BNO(mp, bp); | 279 | agfl_bno = XFS_BUF_TO_AGFL_BNO(mp, bp); |
@@ -309,7 +309,7 @@ xfs_growfs_data_private( | |||
309 | agi->agi_newino = cpu_to_be32(NULLAGINO); | 309 | agi->agi_newino = cpu_to_be32(NULLAGINO); |
310 | agi->agi_dirino = cpu_to_be32(NULLAGINO); | 310 | agi->agi_dirino = cpu_to_be32(NULLAGINO); |
311 | if (xfs_sb_version_hascrc(&mp->m_sb)) | 311 | if (xfs_sb_version_hascrc(&mp->m_sb)) |
312 | uuid_copy(&agi->agi_uuid, &mp->m_sb.sb_uuid); | 312 | uuid_copy(&agi->agi_uuid, &mp->m_sb.sb_meta_uuid); |
313 | if (xfs_sb_version_hasfinobt(&mp->m_sb)) { | 313 | if (xfs_sb_version_hasfinobt(&mp->m_sb)) { |
314 | agi->agi_free_root = cpu_to_be32(XFS_FIBT_BLOCK(mp)); | 314 | agi->agi_free_root = cpu_to_be32(XFS_FIBT_BLOCK(mp)); |
315 | agi->agi_free_level = cpu_to_be32(1); | 315 | agi->agi_free_level = cpu_to_be32(1); |
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c index d8230ba1b471..30555f8fd44b 100644 --- a/fs/xfs/xfs_inode.c +++ b/fs/xfs/xfs_inode.c | |||
@@ -164,7 +164,7 @@ xfs_ilock( | |||
164 | (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL)); | 164 | (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL)); |
165 | ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) != | 165 | ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) != |
166 | (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)); | 166 | (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)); |
167 | ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_DEP_MASK)) == 0); | 167 | ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_SUBCLASS_MASK)) == 0); |
168 | 168 | ||
169 | if (lock_flags & XFS_IOLOCK_EXCL) | 169 | if (lock_flags & XFS_IOLOCK_EXCL) |
170 | mrupdate_nested(&ip->i_iolock, XFS_IOLOCK_DEP(lock_flags)); | 170 | mrupdate_nested(&ip->i_iolock, XFS_IOLOCK_DEP(lock_flags)); |
@@ -212,7 +212,7 @@ xfs_ilock_nowait( | |||
212 | (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL)); | 212 | (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL)); |
213 | ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) != | 213 | ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) != |
214 | (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)); | 214 | (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)); |
215 | ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_DEP_MASK)) == 0); | 215 | ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_SUBCLASS_MASK)) == 0); |
216 | 216 | ||
217 | if (lock_flags & XFS_IOLOCK_EXCL) { | 217 | if (lock_flags & XFS_IOLOCK_EXCL) { |
218 | if (!mrtryupdate(&ip->i_iolock)) | 218 | if (!mrtryupdate(&ip->i_iolock)) |
@@ -281,7 +281,7 @@ xfs_iunlock( | |||
281 | (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL)); | 281 | (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL)); |
282 | ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) != | 282 | ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) != |
283 | (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)); | 283 | (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)); |
284 | ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_DEP_MASK)) == 0); | 284 | ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_SUBCLASS_MASK)) == 0); |
285 | ASSERT(lock_flags != 0); | 285 | ASSERT(lock_flags != 0); |
286 | 286 | ||
287 | if (lock_flags & XFS_IOLOCK_EXCL) | 287 | if (lock_flags & XFS_IOLOCK_EXCL) |
@@ -362,32 +362,52 @@ int xfs_lots_retries; | |||
362 | int xfs_lock_delays; | 362 | int xfs_lock_delays; |
363 | #endif | 363 | #endif |
364 | 364 | ||
365 | #ifdef CONFIG_LOCKDEP | ||
366 | static bool | ||
367 | xfs_lockdep_subclass_ok( | ||
368 | int subclass) | ||
369 | { | ||
370 | return subclass < MAX_LOCKDEP_SUBCLASSES; | ||
371 | } | ||
372 | #else | ||
373 | #define xfs_lockdep_subclass_ok(subclass) (true) | ||
374 | #endif | ||
375 | |||
365 | /* | 376 | /* |
366 | * Bump the subclass so xfs_lock_inodes() acquires each lock with a different | 377 | * Bump the subclass so xfs_lock_inodes() acquires each lock with a different |
367 | * value. This shouldn't be called for page fault locking, but we also need to | 378 | * value. This can be called for any type of inode lock combination, including |
368 | * ensure we don't overrun the number of lockdep subclasses for the iolock or | 379 | * parent locking. Care must be taken to ensure we don't overrun the subclass |
369 | * mmaplock as that is limited to 12 by the mmap lock lockdep annotations. | 380 | * storage fields in the class mask we build. |
370 | */ | 381 | */ |
371 | static inline int | 382 | static inline int |
372 | xfs_lock_inumorder(int lock_mode, int subclass) | 383 | xfs_lock_inumorder(int lock_mode, int subclass) |
373 | { | 384 | { |
385 | int class = 0; | ||
386 | |||
387 | ASSERT(!(lock_mode & (XFS_ILOCK_PARENT | XFS_ILOCK_RTBITMAP | | ||
388 | XFS_ILOCK_RTSUM))); | ||
389 | ASSERT(xfs_lockdep_subclass_ok(subclass)); | ||
390 | |||
374 | if (lock_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL)) { | 391 | if (lock_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL)) { |
375 | ASSERT(subclass + XFS_LOCK_INUMORDER < | 392 | ASSERT(subclass <= XFS_IOLOCK_MAX_SUBCLASS); |
376 | (1 << (XFS_MMAPLOCK_SHIFT - XFS_IOLOCK_SHIFT))); | 393 | ASSERT(xfs_lockdep_subclass_ok(subclass + |
377 | lock_mode |= (subclass + XFS_LOCK_INUMORDER) << XFS_IOLOCK_SHIFT; | 394 | XFS_IOLOCK_PARENT_VAL)); |
395 | class += subclass << XFS_IOLOCK_SHIFT; | ||
396 | if (lock_mode & XFS_IOLOCK_PARENT) | ||
397 | class += XFS_IOLOCK_PARENT_VAL << XFS_IOLOCK_SHIFT; | ||
378 | } | 398 | } |
379 | 399 | ||
380 | if (lock_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) { | 400 | if (lock_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) { |
381 | ASSERT(subclass + XFS_LOCK_INUMORDER < | 401 | ASSERT(subclass <= XFS_MMAPLOCK_MAX_SUBCLASS); |
382 | (1 << (XFS_ILOCK_SHIFT - XFS_MMAPLOCK_SHIFT))); | 402 | class += subclass << XFS_MMAPLOCK_SHIFT; |
383 | lock_mode |= (subclass + XFS_LOCK_INUMORDER) << | ||
384 | XFS_MMAPLOCK_SHIFT; | ||
385 | } | 403 | } |
386 | 404 | ||
387 | if (lock_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)) | 405 | if (lock_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)) { |
388 | lock_mode |= (subclass + XFS_LOCK_INUMORDER) << XFS_ILOCK_SHIFT; | 406 | ASSERT(subclass <= XFS_ILOCK_MAX_SUBCLASS); |
407 | class += subclass << XFS_ILOCK_SHIFT; | ||
408 | } | ||
389 | 409 | ||
390 | return lock_mode; | 410 | return (lock_mode & ~XFS_LOCK_SUBCLASS_MASK) | class; |
391 | } | 411 | } |
392 | 412 | ||
393 | /* | 413 | /* |
@@ -399,6 +419,11 @@ xfs_lock_inumorder(int lock_mode, int subclass) | |||
399 | * transaction (such as truncate). This can result in deadlock since the long | 419 | * transaction (such as truncate). This can result in deadlock since the long |
400 | * running trans might need to wait for the inode we just locked in order to | 420 | * running trans might need to wait for the inode we just locked in order to |
401 | * push the tail and free space in the log. | 421 | * push the tail and free space in the log. |
422 | * | ||
423 | * xfs_lock_inodes() can only be used to lock one type of lock at a time - | ||
424 | * the iolock, the mmaplock or the ilock, but not more than one at a time. If we | ||
425 | * lock more than one at a time, lockdep will report false positives saying we | ||
426 | * have violated locking orders. | ||
402 | */ | 427 | */ |
403 | void | 428 | void |
404 | xfs_lock_inodes( | 429 | xfs_lock_inodes( |
@@ -409,8 +434,29 @@ xfs_lock_inodes( | |||
409 | int attempts = 0, i, j, try_lock; | 434 | int attempts = 0, i, j, try_lock; |
410 | xfs_log_item_t *lp; | 435 | xfs_log_item_t *lp; |
411 | 436 | ||
412 | /* currently supports between 2 and 5 inodes */ | 437 | /* |
438 | * Currently supports between 2 and 5 inodes with exclusive locking. We | ||
439 | * support an arbitrary depth of locking here, but absolute limits on | ||
440 | * inodes depend on the the type of locking and the limits placed by | ||
441 | * lockdep annotations in xfs_lock_inumorder. These are all checked by | ||
442 | * the asserts. | ||
443 | */ | ||
413 | ASSERT(ips && inodes >= 2 && inodes <= 5); | 444 | ASSERT(ips && inodes >= 2 && inodes <= 5); |
445 | ASSERT(lock_mode & (XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL | | ||
446 | XFS_ILOCK_EXCL)); | ||
447 | ASSERT(!(lock_mode & (XFS_IOLOCK_SHARED | XFS_MMAPLOCK_SHARED | | ||
448 | XFS_ILOCK_SHARED))); | ||
449 | ASSERT(!(lock_mode & XFS_IOLOCK_EXCL) || | ||
450 | inodes <= XFS_IOLOCK_MAX_SUBCLASS + 1); | ||
451 | ASSERT(!(lock_mode & XFS_MMAPLOCK_EXCL) || | ||
452 | inodes <= XFS_MMAPLOCK_MAX_SUBCLASS + 1); | ||
453 | ASSERT(!(lock_mode & XFS_ILOCK_EXCL) || | ||
454 | inodes <= XFS_ILOCK_MAX_SUBCLASS + 1); | ||
455 | |||
456 | if (lock_mode & XFS_IOLOCK_EXCL) { | ||
457 | ASSERT(!(lock_mode & (XFS_MMAPLOCK_EXCL | XFS_ILOCK_EXCL))); | ||
458 | } else if (lock_mode & XFS_MMAPLOCK_EXCL) | ||
459 | ASSERT(!(lock_mode & XFS_ILOCK_EXCL)); | ||
414 | 460 | ||
415 | try_lock = 0; | 461 | try_lock = 0; |
416 | i = 0; | 462 | i = 0; |
@@ -629,30 +675,29 @@ xfs_lookup( | |||
629 | { | 675 | { |
630 | xfs_ino_t inum; | 676 | xfs_ino_t inum; |
631 | int error; | 677 | int error; |
632 | uint lock_mode; | ||
633 | 678 | ||
634 | trace_xfs_lookup(dp, name); | 679 | trace_xfs_lookup(dp, name); |
635 | 680 | ||
636 | if (XFS_FORCED_SHUTDOWN(dp->i_mount)) | 681 | if (XFS_FORCED_SHUTDOWN(dp->i_mount)) |
637 | return -EIO; | 682 | return -EIO; |
638 | 683 | ||
639 | lock_mode = xfs_ilock_data_map_shared(dp); | 684 | xfs_ilock(dp, XFS_IOLOCK_SHARED); |
640 | error = xfs_dir_lookup(NULL, dp, name, &inum, ci_name); | 685 | error = xfs_dir_lookup(NULL, dp, name, &inum, ci_name); |
641 | xfs_iunlock(dp, lock_mode); | ||
642 | |||
643 | if (error) | 686 | if (error) |
644 | goto out; | 687 | goto out_unlock; |
645 | 688 | ||
646 | error = xfs_iget(dp->i_mount, NULL, inum, 0, 0, ipp); | 689 | error = xfs_iget(dp->i_mount, NULL, inum, 0, 0, ipp); |
647 | if (error) | 690 | if (error) |
648 | goto out_free_name; | 691 | goto out_free_name; |
649 | 692 | ||
693 | xfs_iunlock(dp, XFS_IOLOCK_SHARED); | ||
650 | return 0; | 694 | return 0; |
651 | 695 | ||
652 | out_free_name: | 696 | out_free_name: |
653 | if (ci_name) | 697 | if (ci_name) |
654 | kmem_free(ci_name->name); | 698 | kmem_free(ci_name->name); |
655 | out: | 699 | out_unlock: |
700 | xfs_iunlock(dp, XFS_IOLOCK_SHARED); | ||
656 | *ipp = NULL; | 701 | *ipp = NULL; |
657 | return error; | 702 | return error; |
658 | } | 703 | } |
@@ -787,7 +832,7 @@ xfs_ialloc( | |||
787 | 832 | ||
788 | if (ip->i_d.di_version == 3) { | 833 | if (ip->i_d.di_version == 3) { |
789 | ASSERT(ip->i_d.di_ino == ino); | 834 | ASSERT(ip->i_d.di_ino == ino); |
790 | ASSERT(uuid_equal(&ip->i_d.di_uuid, &mp->m_sb.sb_uuid)); | 835 | ASSERT(uuid_equal(&ip->i_d.di_uuid, &mp->m_sb.sb_meta_uuid)); |
791 | ip->i_d.di_crc = 0; | 836 | ip->i_d.di_crc = 0; |
792 | ip->i_d.di_changecount = 1; | 837 | ip->i_d.di_changecount = 1; |
793 | ip->i_d.di_lsn = 0; | 838 | ip->i_d.di_lsn = 0; |
@@ -1149,7 +1194,8 @@ xfs_create( | |||
1149 | goto out_trans_cancel; | 1194 | goto out_trans_cancel; |
1150 | 1195 | ||
1151 | 1196 | ||
1152 | xfs_ilock(dp, XFS_ILOCK_EXCL | XFS_ILOCK_PARENT); | 1197 | xfs_ilock(dp, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL | |
1198 | XFS_IOLOCK_PARENT | XFS_ILOCK_PARENT); | ||
1153 | unlock_dp_on_error = true; | 1199 | unlock_dp_on_error = true; |
1154 | 1200 | ||
1155 | xfs_bmap_init(&free_list, &first_block); | 1201 | xfs_bmap_init(&free_list, &first_block); |
@@ -1185,7 +1231,7 @@ xfs_create( | |||
1185 | * the transaction cancel unlocking dp so don't do it explicitly in the | 1231 | * the transaction cancel unlocking dp so don't do it explicitly in the |
1186 | * error path. | 1232 | * error path. |
1187 | */ | 1233 | */ |
1188 | xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL); | 1234 | xfs_trans_ijoin(tp, dp, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL); |
1189 | unlock_dp_on_error = false; | 1235 | unlock_dp_on_error = false; |
1190 | 1236 | ||
1191 | error = xfs_dir_createname(tp, dp, name, ip->i_ino, | 1237 | error = xfs_dir_createname(tp, dp, name, ip->i_ino, |
@@ -1258,7 +1304,7 @@ xfs_create( | |||
1258 | xfs_qm_dqrele(pdqp); | 1304 | xfs_qm_dqrele(pdqp); |
1259 | 1305 | ||
1260 | if (unlock_dp_on_error) | 1306 | if (unlock_dp_on_error) |
1261 | xfs_iunlock(dp, XFS_ILOCK_EXCL); | 1307 | xfs_iunlock(dp, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL); |
1262 | return error; | 1308 | return error; |
1263 | } | 1309 | } |
1264 | 1310 | ||
@@ -1403,10 +1449,11 @@ xfs_link( | |||
1403 | if (error) | 1449 | if (error) |
1404 | goto error_return; | 1450 | goto error_return; |
1405 | 1451 | ||
1452 | xfs_ilock(tdp, XFS_IOLOCK_EXCL | XFS_IOLOCK_PARENT); | ||
1406 | xfs_lock_two_inodes(sip, tdp, XFS_ILOCK_EXCL); | 1453 | xfs_lock_two_inodes(sip, tdp, XFS_ILOCK_EXCL); |
1407 | 1454 | ||
1408 | xfs_trans_ijoin(tp, sip, XFS_ILOCK_EXCL); | 1455 | xfs_trans_ijoin(tp, sip, XFS_ILOCK_EXCL); |
1409 | xfs_trans_ijoin(tp, tdp, XFS_ILOCK_EXCL); | 1456 | xfs_trans_ijoin(tp, tdp, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL); |
1410 | 1457 | ||
1411 | /* | 1458 | /* |
1412 | * If we are using project inheritance, we only allow hard link | 1459 | * If we are using project inheritance, we only allow hard link |
@@ -2510,9 +2557,10 @@ xfs_remove( | |||
2510 | goto out_trans_cancel; | 2557 | goto out_trans_cancel; |
2511 | } | 2558 | } |
2512 | 2559 | ||
2560 | xfs_ilock(dp, XFS_IOLOCK_EXCL | XFS_IOLOCK_PARENT); | ||
2513 | xfs_lock_two_inodes(dp, ip, XFS_ILOCK_EXCL); | 2561 | xfs_lock_two_inodes(dp, ip, XFS_ILOCK_EXCL); |
2514 | 2562 | ||
2515 | xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL); | 2563 | xfs_trans_ijoin(tp, dp, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL); |
2516 | xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); | 2564 | xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); |
2517 | 2565 | ||
2518 | /* | 2566 | /* |
@@ -2893,6 +2941,12 @@ xfs_rename( | |||
2893 | * whether the target directory is the same as the source | 2941 | * whether the target directory is the same as the source |
2894 | * directory, we can lock from 2 to 4 inodes. | 2942 | * directory, we can lock from 2 to 4 inodes. |
2895 | */ | 2943 | */ |
2944 | if (!new_parent) | ||
2945 | xfs_ilock(src_dp, XFS_IOLOCK_EXCL | XFS_IOLOCK_PARENT); | ||
2946 | else | ||
2947 | xfs_lock_two_inodes(src_dp, target_dp, | ||
2948 | XFS_IOLOCK_EXCL | XFS_IOLOCK_PARENT); | ||
2949 | |||
2896 | xfs_lock_inodes(inodes, num_inodes, XFS_ILOCK_EXCL); | 2950 | xfs_lock_inodes(inodes, num_inodes, XFS_ILOCK_EXCL); |
2897 | 2951 | ||
2898 | /* | 2952 | /* |
@@ -2900,9 +2954,9 @@ xfs_rename( | |||
2900 | * we can rely on either trans_commit or trans_cancel to unlock | 2954 | * we can rely on either trans_commit or trans_cancel to unlock |
2901 | * them. | 2955 | * them. |
2902 | */ | 2956 | */ |
2903 | xfs_trans_ijoin(tp, src_dp, XFS_ILOCK_EXCL); | 2957 | xfs_trans_ijoin(tp, src_dp, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL); |
2904 | if (new_parent) | 2958 | if (new_parent) |
2905 | xfs_trans_ijoin(tp, target_dp, XFS_ILOCK_EXCL); | 2959 | xfs_trans_ijoin(tp, target_dp, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL); |
2906 | xfs_trans_ijoin(tp, src_ip, XFS_ILOCK_EXCL); | 2960 | xfs_trans_ijoin(tp, src_ip, XFS_ILOCK_EXCL); |
2907 | if (target_ip) | 2961 | if (target_ip) |
2908 | xfs_trans_ijoin(tp, target_ip, XFS_ILOCK_EXCL); | 2962 | xfs_trans_ijoin(tp, target_ip, XFS_ILOCK_EXCL); |
diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h index 8f22d20368d8..ca9e11989cbd 100644 --- a/fs/xfs/xfs_inode.h +++ b/fs/xfs/xfs_inode.h | |||
@@ -284,9 +284,9 @@ static inline int xfs_isiflocked(struct xfs_inode *ip) | |||
284 | * Flags for lockdep annotations. | 284 | * Flags for lockdep annotations. |
285 | * | 285 | * |
286 | * XFS_LOCK_PARENT - for directory operations that require locking a | 286 | * XFS_LOCK_PARENT - for directory operations that require locking a |
287 | * parent directory inode and a child entry inode. The parent gets locked | 287 | * parent directory inode and a child entry inode. IOLOCK requires nesting, |
288 | * with this flag so it gets a lockdep subclass of 1 and the child entry | 288 | * MMAPLOCK does not support this class, ILOCK requires a single subclass |
289 | * lock will have a lockdep subclass of 0. | 289 | * to differentiate parent from child. |
290 | * | 290 | * |
291 | * XFS_LOCK_RTBITMAP/XFS_LOCK_RTSUM - the realtime device bitmap and summary | 291 | * XFS_LOCK_RTBITMAP/XFS_LOCK_RTSUM - the realtime device bitmap and summary |
292 | * inodes do not participate in the normal lock order, and thus have their | 292 | * inodes do not participate in the normal lock order, and thus have their |
@@ -295,30 +295,63 @@ static inline int xfs_isiflocked(struct xfs_inode *ip) | |||
295 | * XFS_LOCK_INUMORDER - for locking several inodes at the some time | 295 | * XFS_LOCK_INUMORDER - for locking several inodes at the some time |
296 | * with xfs_lock_inodes(). This flag is used as the starting subclass | 296 | * with xfs_lock_inodes(). This flag is used as the starting subclass |
297 | * and each subsequent lock acquired will increment the subclass by one. | 297 | * and each subsequent lock acquired will increment the subclass by one. |
298 | * So the first lock acquired will have a lockdep subclass of 4, the | 298 | * However, MAX_LOCKDEP_SUBCLASSES == 8, which means we are greatly |
299 | * second lock will have a lockdep subclass of 5, and so on. It is | 299 | * limited to the subclasses we can represent via nesting. We need at least |
300 | * the responsibility of the class builder to shift this to the correct | 300 | * 5 inodes nest depth for the ILOCK through rename, and we also have to support |
301 | * portion of the lock_mode lockdep mask. | 301 | * XFS_ILOCK_PARENT, which gives 6 subclasses. Then we have XFS_ILOCK_RTBITMAP |
302 | * and XFS_ILOCK_RTSUM, which are another 2 unique subclasses, so that's all | ||
303 | * 8 subclasses supported by lockdep. | ||
304 | * | ||
305 | * This also means we have to number the sub-classes in the lowest bits of | ||
306 | * the mask we keep, and we have to ensure we never exceed 3 bits of lockdep | ||
307 | * mask and we can't use bit-masking to build the subclasses. What a mess. | ||
308 | * | ||
309 | * Bit layout: | ||
310 | * | ||
311 | * Bit Lock Region | ||
312 | * 16-19 XFS_IOLOCK_SHIFT dependencies | ||
313 | * 20-23 XFS_MMAPLOCK_SHIFT dependencies | ||
314 | * 24-31 XFS_ILOCK_SHIFT dependencies | ||
315 | * | ||
316 | * IOLOCK values | ||
317 | * | ||
318 | * 0-3 subclass value | ||
319 | * 4-7 PARENT subclass values | ||
320 | * | ||
321 | * MMAPLOCK values | ||
322 | * | ||
323 | * 0-3 subclass value | ||
324 | * 4-7 unused | ||
325 | * | ||
326 | * ILOCK values | ||
327 | * 0-4 subclass values | ||
328 | * 5 PARENT subclass (not nestable) | ||
329 | * 6 RTBITMAP subclass (not nestable) | ||
330 | * 7 RTSUM subclass (not nestable) | ||
331 | * | ||
302 | */ | 332 | */ |
303 | #define XFS_LOCK_PARENT 1 | 333 | #define XFS_IOLOCK_SHIFT 16 |
304 | #define XFS_LOCK_RTBITMAP 2 | 334 | #define XFS_IOLOCK_PARENT_VAL 4 |
305 | #define XFS_LOCK_RTSUM 3 | 335 | #define XFS_IOLOCK_MAX_SUBCLASS (XFS_IOLOCK_PARENT_VAL - 1) |
306 | #define XFS_LOCK_INUMORDER 4 | 336 | #define XFS_IOLOCK_DEP_MASK 0x000f0000 |
307 | 337 | #define XFS_IOLOCK_PARENT (XFS_IOLOCK_PARENT_VAL << XFS_IOLOCK_SHIFT) | |
308 | #define XFS_IOLOCK_SHIFT 16 | 338 | |
309 | #define XFS_IOLOCK_PARENT (XFS_LOCK_PARENT << XFS_IOLOCK_SHIFT) | 339 | #define XFS_MMAPLOCK_SHIFT 20 |
310 | 340 | #define XFS_MMAPLOCK_NUMORDER 0 | |
311 | #define XFS_MMAPLOCK_SHIFT 20 | 341 | #define XFS_MMAPLOCK_MAX_SUBCLASS 3 |
312 | 342 | #define XFS_MMAPLOCK_DEP_MASK 0x00f00000 | |
313 | #define XFS_ILOCK_SHIFT 24 | 343 | |
314 | #define XFS_ILOCK_PARENT (XFS_LOCK_PARENT << XFS_ILOCK_SHIFT) | 344 | #define XFS_ILOCK_SHIFT 24 |
315 | #define XFS_ILOCK_RTBITMAP (XFS_LOCK_RTBITMAP << XFS_ILOCK_SHIFT) | 345 | #define XFS_ILOCK_PARENT_VAL 5 |
316 | #define XFS_ILOCK_RTSUM (XFS_LOCK_RTSUM << XFS_ILOCK_SHIFT) | 346 | #define XFS_ILOCK_MAX_SUBCLASS (XFS_ILOCK_PARENT_VAL - 1) |
317 | 347 | #define XFS_ILOCK_RTBITMAP_VAL 6 | |
318 | #define XFS_IOLOCK_DEP_MASK 0x000f0000 | 348 | #define XFS_ILOCK_RTSUM_VAL 7 |
319 | #define XFS_MMAPLOCK_DEP_MASK 0x00f00000 | 349 | #define XFS_ILOCK_DEP_MASK 0xff000000 |
320 | #define XFS_ILOCK_DEP_MASK 0xff000000 | 350 | #define XFS_ILOCK_PARENT (XFS_ILOCK_PARENT_VAL << XFS_ILOCK_SHIFT) |
321 | #define XFS_LOCK_DEP_MASK (XFS_IOLOCK_DEP_MASK | \ | 351 | #define XFS_ILOCK_RTBITMAP (XFS_ILOCK_RTBITMAP_VAL << XFS_ILOCK_SHIFT) |
352 | #define XFS_ILOCK_RTSUM (XFS_ILOCK_RTSUM_VAL << XFS_ILOCK_SHIFT) | ||
353 | |||
354 | #define XFS_LOCK_SUBCLASS_MASK (XFS_IOLOCK_DEP_MASK | \ | ||
322 | XFS_MMAPLOCK_DEP_MASK | \ | 355 | XFS_MMAPLOCK_DEP_MASK | \ |
323 | XFS_ILOCK_DEP_MASK) | 356 | XFS_ILOCK_DEP_MASK) |
324 | 357 | ||
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c index 2e40f5e3cdf2..512a0945d52a 100644 --- a/fs/xfs/xfs_log_recover.c +++ b/fs/xfs/xfs_log_recover.c | |||
@@ -1895,15 +1895,25 @@ xlog_recover_get_buf_lsn( | |||
1895 | */ | 1895 | */ |
1896 | goto recover_immediately; | 1896 | goto recover_immediately; |
1897 | case XFS_SB_MAGIC: | 1897 | case XFS_SB_MAGIC: |
1898 | /* | ||
1899 | * superblock uuids are magic. We may or may not have a | ||
1900 | * sb_meta_uuid on disk, but it will be set in the in-core | ||
1901 | * superblock. We set the uuid pointer for verification | ||
1902 | * according to the superblock feature mask to ensure we check | ||
1903 | * the relevant UUID in the superblock. | ||
1904 | */ | ||
1898 | lsn = be64_to_cpu(((struct xfs_dsb *)blk)->sb_lsn); | 1905 | lsn = be64_to_cpu(((struct xfs_dsb *)blk)->sb_lsn); |
1899 | uuid = &((struct xfs_dsb *)blk)->sb_uuid; | 1906 | if (xfs_sb_version_hasmetauuid(&mp->m_sb)) |
1907 | uuid = &((struct xfs_dsb *)blk)->sb_meta_uuid; | ||
1908 | else | ||
1909 | uuid = &((struct xfs_dsb *)blk)->sb_uuid; | ||
1900 | break; | 1910 | break; |
1901 | default: | 1911 | default: |
1902 | break; | 1912 | break; |
1903 | } | 1913 | } |
1904 | 1914 | ||
1905 | if (lsn != (xfs_lsn_t)-1) { | 1915 | if (lsn != (xfs_lsn_t)-1) { |
1906 | if (!uuid_equal(&mp->m_sb.sb_uuid, uuid)) | 1916 | if (!uuid_equal(&mp->m_sb.sb_meta_uuid, uuid)) |
1907 | goto recover_immediately; | 1917 | goto recover_immediately; |
1908 | return lsn; | 1918 | return lsn; |
1909 | } | 1919 | } |
diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c index 1fb16562c159..f98ce83b7bc4 100644 --- a/fs/xfs/xfs_super.c +++ b/fs/xfs/xfs_super.c | |||
@@ -1528,6 +1528,10 @@ xfs_fs_fill_super( | |||
1528 | } | 1528 | } |
1529 | } | 1529 | } |
1530 | 1530 | ||
1531 | if (xfs_sb_version_hassparseinodes(&mp->m_sb)) | ||
1532 | xfs_alert(mp, | ||
1533 | "EXPERIMENTAL sparse inode feature enabled. Use at your own risk!"); | ||
1534 | |||
1531 | error = xfs_mountfs(mp); | 1535 | error = xfs_mountfs(mp); |
1532 | if (error) | 1536 | if (error) |
1533 | goto out_filestream_unmount; | 1537 | goto out_filestream_unmount; |
diff --git a/fs/xfs/xfs_symlink.c b/fs/xfs/xfs_symlink.c index 05c44bf51b5f..996481eeb491 100644 --- a/fs/xfs/xfs_symlink.c +++ b/fs/xfs/xfs_symlink.c | |||
@@ -240,7 +240,8 @@ xfs_symlink( | |||
240 | if (error) | 240 | if (error) |
241 | goto out_trans_cancel; | 241 | goto out_trans_cancel; |
242 | 242 | ||
243 | xfs_ilock(dp, XFS_ILOCK_EXCL | XFS_ILOCK_PARENT); | 243 | xfs_ilock(dp, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL | |
244 | XFS_IOLOCK_PARENT | XFS_ILOCK_PARENT); | ||
244 | unlock_dp_on_error = true; | 245 | unlock_dp_on_error = true; |
245 | 246 | ||
246 | /* | 247 | /* |
@@ -288,7 +289,7 @@ xfs_symlink( | |||
288 | * the transaction cancel unlocking dp so don't do it explicitly in the | 289 | * the transaction cancel unlocking dp so don't do it explicitly in the |
289 | * error path. | 290 | * error path. |
290 | */ | 291 | */ |
291 | xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL); | 292 | xfs_trans_ijoin(tp, dp, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL); |
292 | unlock_dp_on_error = false; | 293 | unlock_dp_on_error = false; |
293 | 294 | ||
294 | /* | 295 | /* |
@@ -421,7 +422,7 @@ out_release_inode: | |||
421 | xfs_qm_dqrele(pdqp); | 422 | xfs_qm_dqrele(pdqp); |
422 | 423 | ||
423 | if (unlock_dp_on_error) | 424 | if (unlock_dp_on_error) |
424 | xfs_iunlock(dp, XFS_ILOCK_EXCL); | 425 | xfs_iunlock(dp, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL); |
425 | return error; | 426 | return error; |
426 | } | 427 | } |
427 | 428 | ||