aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2016-11-29 22:33:25 -0500
committerDave Chinner <david@fromorbit.com>2016-11-29 22:33:25 -0500
commit6552321831dce87ff5c466a55b58d472732caadc (patch)
tree84f3de7b89690c84fd13e2efa7a85d4918d342e3
parentf8319483f57f1ca22370f4150bb990aca7728a67 (diff)
xfs: remove i_iolock and use i_rwsem in the VFS inode instead
This patch drops the XFS-own i_iolock and uses the VFS i_rwsem which recently replaced i_mutex instead. This means we only have to take one lock instead of two in many fast path operations, and we can also shrink the xfs_inode structure. Thanks to the xfs_ilock family there is very little churn, the only thing of note is that we need to switch to use the lock_two_directory helper for taking the i_rwsem on two inodes in a few places to make sure our lock order matches the one used in the VFS. Signed-off-by: Christoph Hellwig <hch@lst.de> Tested-by: Jens Axboe <axboe@fb.com> Reviewed-by: Darrick J. Wong <darrick.wong@oracle.com> Signed-off-by: Dave Chinner <david@fromorbit.com>
-rw-r--r--fs/xfs/xfs_aops.c7
-rw-r--r--fs/xfs/xfs_bmap_util.c12
-rw-r--r--fs/xfs/xfs_dir2_readdir.c2
-rw-r--r--fs/xfs/xfs_file.c79
-rw-r--r--fs/xfs/xfs_icache.c6
-rw-r--r--fs/xfs/xfs_inode.c82
-rw-r--r--fs/xfs/xfs_inode.h7
-rw-r--r--fs/xfs/xfs_ioctl.c2
-rw-r--r--fs/xfs/xfs_iops.c14
-rw-r--r--fs/xfs/xfs_pnfs.c7
-rw-r--r--fs/xfs/xfs_pnfs.h4
-rw-r--r--fs/xfs/xfs_reflink.c14
-rw-r--r--fs/xfs/xfs_super.c2
-rw-r--r--fs/xfs/xfs_symlink.c7
14 files changed, 86 insertions, 159 deletions
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
index ab266d66124d..e8f6c2bcd4a4 100644
--- a/fs/xfs/xfs_aops.c
+++ b/fs/xfs/xfs_aops.c
@@ -1585,7 +1585,6 @@ xfs_vm_bmap(
1585 struct xfs_inode *ip = XFS_I(inode); 1585 struct xfs_inode *ip = XFS_I(inode);
1586 1586
1587 trace_xfs_vm_bmap(XFS_I(inode)); 1587 trace_xfs_vm_bmap(XFS_I(inode));
1588 xfs_ilock(ip, XFS_IOLOCK_SHARED);
1589 1588
1590 /* 1589 /*
1591 * The swap code (ab-)uses ->bmap to get a block mapping and then 1590 * The swap code (ab-)uses ->bmap to get a block mapping and then
@@ -1593,12 +1592,10 @@ xfs_vm_bmap(
1593 * that on reflinks inodes, so we have to skip out here. And yes, 1592 * that on reflinks inodes, so we have to skip out here. And yes,
1594 * 0 is the magic code for a bmap error.. 1593 * 0 is the magic code for a bmap error..
1595 */ 1594 */
1596 if (xfs_is_reflink_inode(ip)) { 1595 if (xfs_is_reflink_inode(ip))
1597 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
1598 return 0; 1596 return 0;
1599 } 1597
1600 filemap_write_and_wait(mapping); 1598 filemap_write_and_wait(mapping);
1601 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
1602 return generic_block_bmap(mapping, block, xfs_get_blocks); 1599 return generic_block_bmap(mapping, block, xfs_get_blocks);
1603} 1600}
1604 1601
diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c
index 0670a8bd5818..b9abce524c33 100644
--- a/fs/xfs/xfs_bmap_util.c
+++ b/fs/xfs/xfs_bmap_util.c
@@ -1935,8 +1935,8 @@ xfs_swap_extents(
1935 * page cache safely. Once we have done this we can take the ilocks and 1935 * page cache safely. Once we have done this we can take the ilocks and
1936 * do the rest of the checks. 1936 * do the rest of the checks.
1937 */ 1937 */
1938 lock_flags = XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL; 1938 lock_two_nondirectories(VFS_I(ip), VFS_I(tip));
1939 xfs_lock_two_inodes(ip, tip, XFS_IOLOCK_EXCL); 1939 lock_flags = XFS_MMAPLOCK_EXCL;
1940 xfs_lock_two_inodes(ip, tip, XFS_MMAPLOCK_EXCL); 1940 xfs_lock_two_inodes(ip, tip, XFS_MMAPLOCK_EXCL);
1941 1941
1942 /* Verify that both files have the same format */ 1942 /* Verify that both files have the same format */
@@ -2076,15 +2076,13 @@ xfs_swap_extents(
2076 trace_xfs_swap_extent_after(ip, 0); 2076 trace_xfs_swap_extent_after(ip, 0);
2077 trace_xfs_swap_extent_after(tip, 1); 2077 trace_xfs_swap_extent_after(tip, 1);
2078 2078
2079out_unlock:
2079 xfs_iunlock(ip, lock_flags); 2080 xfs_iunlock(ip, lock_flags);
2080 xfs_iunlock(tip, lock_flags); 2081 xfs_iunlock(tip, lock_flags);
2082 unlock_two_nondirectories(VFS_I(ip), VFS_I(tip));
2081 return error; 2083 return error;
2082 2084
2083out_trans_cancel: 2085out_trans_cancel:
2084 xfs_trans_cancel(tp); 2086 xfs_trans_cancel(tp);
2085 2087 goto out_unlock;
2086out_unlock:
2087 xfs_iunlock(ip, lock_flags);
2088 xfs_iunlock(tip, lock_flags);
2089 return error;
2090} 2088}
diff --git a/fs/xfs/xfs_dir2_readdir.c b/fs/xfs/xfs_dir2_readdir.c
index 29816981b50a..003a99b83bd8 100644
--- a/fs/xfs/xfs_dir2_readdir.c
+++ b/fs/xfs/xfs_dir2_readdir.c
@@ -677,7 +677,6 @@ xfs_readdir(
677 args.dp = dp; 677 args.dp = dp;
678 args.geo = dp->i_mount->m_dir_geo; 678 args.geo = dp->i_mount->m_dir_geo;
679 679
680 xfs_ilock(dp, XFS_IOLOCK_SHARED);
681 if (dp->i_d.di_format == XFS_DINODE_FMT_LOCAL) 680 if (dp->i_d.di_format == XFS_DINODE_FMT_LOCAL)
682 rval = xfs_dir2_sf_getdents(&args, ctx); 681 rval = xfs_dir2_sf_getdents(&args, ctx);
683 else if ((rval = xfs_dir2_isblock(&args, &v))) 682 else if ((rval = xfs_dir2_isblock(&args, &v)))
@@ -686,7 +685,6 @@ xfs_readdir(
686 rval = xfs_dir2_block_getdents(&args, ctx); 685 rval = xfs_dir2_block_getdents(&args, ctx);
687 else 686 else
688 rval = xfs_dir2_leaf_getdents(&args, ctx, bufsize); 687 rval = xfs_dir2_leaf_getdents(&args, ctx, bufsize);
689 xfs_iunlock(dp, XFS_IOLOCK_SHARED);
690 688
691 return rval; 689 return rval;
692} 690}
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
index d818c160451f..d054b73b56fb 100644
--- a/fs/xfs/xfs_file.c
+++ b/fs/xfs/xfs_file.c
@@ -48,40 +48,6 @@
48static const struct vm_operations_struct xfs_file_vm_ops; 48static const struct vm_operations_struct xfs_file_vm_ops;
49 49
50/* 50/*
51 * Locking primitives for read and write IO paths to ensure we consistently use
52 * and order the inode->i_mutex, ip->i_lock and ip->i_iolock.
53 */
54static inline void
55xfs_rw_ilock(
56 struct xfs_inode *ip,
57 int type)
58{
59 if (type & XFS_IOLOCK_EXCL)
60 inode_lock(VFS_I(ip));
61 xfs_ilock(ip, type);
62}
63
64static inline void
65xfs_rw_iunlock(
66 struct xfs_inode *ip,
67 int type)
68{
69 xfs_iunlock(ip, type);
70 if (type & XFS_IOLOCK_EXCL)
71 inode_unlock(VFS_I(ip));
72}
73
74static inline void
75xfs_rw_ilock_demote(
76 struct xfs_inode *ip,
77 int type)
78{
79 xfs_ilock_demote(ip, type);
80 if (type & XFS_IOLOCK_EXCL)
81 inode_unlock(VFS_I(ip));
82}
83
84/*
85 * Clear the specified ranges to zero through either the pagecache or DAX. 51 * Clear the specified ranges to zero through either the pagecache or DAX.
86 * Holes and unwritten extents will be left as-is as they already are zeroed. 52 * Holes and unwritten extents will be left as-is as they already are zeroed.
87 */ 53 */
@@ -273,7 +239,7 @@ xfs_file_dio_aio_read(
273 239
274 file_accessed(iocb->ki_filp); 240 file_accessed(iocb->ki_filp);
275 241
276 xfs_rw_ilock(ip, XFS_IOLOCK_SHARED); 242 xfs_ilock(ip, XFS_IOLOCK_SHARED);
277 if (mapping->nrpages) { 243 if (mapping->nrpages) {
278 ret = filemap_write_and_wait_range(mapping, iocb->ki_pos, end); 244 ret = filemap_write_and_wait_range(mapping, iocb->ki_pos, end);
279 if (ret) 245 if (ret)
@@ -299,7 +265,7 @@ xfs_file_dio_aio_read(
299 } 265 }
300 266
301out_unlock: 267out_unlock:
302 xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED); 268 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
303 return ret; 269 return ret;
304} 270}
305 271
@@ -317,9 +283,9 @@ xfs_file_dax_read(
317 if (!count) 283 if (!count)
318 return 0; /* skip atime */ 284 return 0; /* skip atime */
319 285
320 xfs_rw_ilock(ip, XFS_IOLOCK_SHARED); 286 xfs_ilock(ip, XFS_IOLOCK_SHARED);
321 ret = dax_iomap_rw(iocb, to, &xfs_iomap_ops); 287 ret = dax_iomap_rw(iocb, to, &xfs_iomap_ops);
322 xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED); 288 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
323 289
324 file_accessed(iocb->ki_filp); 290 file_accessed(iocb->ki_filp);
325 return ret; 291 return ret;
@@ -335,9 +301,9 @@ xfs_file_buffered_aio_read(
335 301
336 trace_xfs_file_buffered_read(ip, iov_iter_count(to), iocb->ki_pos); 302 trace_xfs_file_buffered_read(ip, iov_iter_count(to), iocb->ki_pos);
337 303
338 xfs_rw_ilock(ip, XFS_IOLOCK_SHARED); 304 xfs_ilock(ip, XFS_IOLOCK_SHARED);
339 ret = generic_file_read_iter(iocb, to); 305 ret = generic_file_read_iter(iocb, to);
340 xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED); 306 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
341 307
342 return ret; 308 return ret;
343} 309}
@@ -418,15 +384,18 @@ restart:
418 if (error <= 0) 384 if (error <= 0)
419 return error; 385 return error;
420 386
421 error = xfs_break_layouts(inode, iolock, true); 387 error = xfs_break_layouts(inode, iolock);
422 if (error) 388 if (error)
423 return error; 389 return error;
424 390
425 /* For changing security info in file_remove_privs() we need i_mutex */ 391 /*
392 * For changing security info in file_remove_privs() we need i_rwsem
393 * exclusively.
394 */
426 if (*iolock == XFS_IOLOCK_SHARED && !IS_NOSEC(inode)) { 395 if (*iolock == XFS_IOLOCK_SHARED && !IS_NOSEC(inode)) {
427 xfs_rw_iunlock(ip, *iolock); 396 xfs_iunlock(ip, *iolock);
428 *iolock = XFS_IOLOCK_EXCL; 397 *iolock = XFS_IOLOCK_EXCL;
429 xfs_rw_ilock(ip, *iolock); 398 xfs_ilock(ip, *iolock);
430 goto restart; 399 goto restart;
431 } 400 }
432 /* 401 /*
@@ -451,9 +420,9 @@ restart:
451 spin_unlock(&ip->i_flags_lock); 420 spin_unlock(&ip->i_flags_lock);
452 if (!drained_dio) { 421 if (!drained_dio) {
453 if (*iolock == XFS_IOLOCK_SHARED) { 422 if (*iolock == XFS_IOLOCK_SHARED) {
454 xfs_rw_iunlock(ip, *iolock); 423 xfs_iunlock(ip, *iolock);
455 *iolock = XFS_IOLOCK_EXCL; 424 *iolock = XFS_IOLOCK_EXCL;
456 xfs_rw_ilock(ip, *iolock); 425 xfs_ilock(ip, *iolock);
457 iov_iter_reexpand(from, count); 426 iov_iter_reexpand(from, count);
458 } 427 }
459 /* 428 /*
@@ -559,7 +528,7 @@ xfs_file_dio_aio_write(
559 iolock = XFS_IOLOCK_SHARED; 528 iolock = XFS_IOLOCK_SHARED;
560 } 529 }
561 530
562 xfs_rw_ilock(ip, iolock); 531 xfs_ilock(ip, iolock);
563 532
564 ret = xfs_file_aio_write_checks(iocb, from, &iolock); 533 ret = xfs_file_aio_write_checks(iocb, from, &iolock);
565 if (ret) 534 if (ret)
@@ -591,7 +560,7 @@ xfs_file_dio_aio_write(
591 if (unaligned_io) 560 if (unaligned_io)
592 inode_dio_wait(inode); 561 inode_dio_wait(inode);
593 else if (iolock == XFS_IOLOCK_EXCL) { 562 else if (iolock == XFS_IOLOCK_EXCL) {
594 xfs_rw_ilock_demote(ip, XFS_IOLOCK_EXCL); 563 xfs_ilock_demote(ip, XFS_IOLOCK_EXCL);
595 iolock = XFS_IOLOCK_SHARED; 564 iolock = XFS_IOLOCK_SHARED;
596 } 565 }
597 566
@@ -621,7 +590,7 @@ xfs_file_dio_aio_write(
621 iov_iter_advance(from, ret); 590 iov_iter_advance(from, ret);
622 } 591 }
623out: 592out:
624 xfs_rw_iunlock(ip, iolock); 593 xfs_iunlock(ip, iolock);
625 594
626 /* 595 /*
627 * No fallback to buffered IO on errors for XFS, direct IO will either 596 * No fallback to buffered IO on errors for XFS, direct IO will either
@@ -643,7 +612,7 @@ xfs_file_dax_write(
643 size_t count; 612 size_t count;
644 loff_t pos; 613 loff_t pos;
645 614
646 xfs_rw_ilock(ip, iolock); 615 xfs_ilock(ip, iolock);
647 ret = xfs_file_aio_write_checks(iocb, from, &iolock); 616 ret = xfs_file_aio_write_checks(iocb, from, &iolock);
648 if (ret) 617 if (ret)
649 goto out; 618 goto out;
@@ -652,15 +621,13 @@ xfs_file_dax_write(
652 count = iov_iter_count(from); 621 count = iov_iter_count(from);
653 622
654 trace_xfs_file_dax_write(ip, count, pos); 623 trace_xfs_file_dax_write(ip, count, pos);
655
656 ret = dax_iomap_rw(iocb, from, &xfs_iomap_ops); 624 ret = dax_iomap_rw(iocb, from, &xfs_iomap_ops);
657 if (ret > 0 && iocb->ki_pos > i_size_read(inode)) { 625 if (ret > 0 && iocb->ki_pos > i_size_read(inode)) {
658 i_size_write(inode, iocb->ki_pos); 626 i_size_write(inode, iocb->ki_pos);
659 error = xfs_setfilesize(ip, pos, ret); 627 error = xfs_setfilesize(ip, pos, ret);
660 } 628 }
661
662out: 629out:
663 xfs_rw_iunlock(ip, iolock); 630 xfs_iunlock(ip, iolock);
664 return error ? error : ret; 631 return error ? error : ret;
665} 632}
666 633
@@ -677,7 +644,7 @@ xfs_file_buffered_aio_write(
677 int enospc = 0; 644 int enospc = 0;
678 int iolock = XFS_IOLOCK_EXCL; 645 int iolock = XFS_IOLOCK_EXCL;
679 646
680 xfs_rw_ilock(ip, iolock); 647 xfs_ilock(ip, iolock);
681 648
682 ret = xfs_file_aio_write_checks(iocb, from, &iolock); 649 ret = xfs_file_aio_write_checks(iocb, from, &iolock);
683 if (ret) 650 if (ret)
@@ -721,7 +688,7 @@ write_retry:
721 688
722 current->backing_dev_info = NULL; 689 current->backing_dev_info = NULL;
723out: 690out:
724 xfs_rw_iunlock(ip, iolock); 691 xfs_iunlock(ip, iolock);
725 return ret; 692 return ret;
726} 693}
727 694
@@ -797,7 +764,7 @@ xfs_file_fallocate(
797 return -EOPNOTSUPP; 764 return -EOPNOTSUPP;
798 765
799 xfs_ilock(ip, iolock); 766 xfs_ilock(ip, iolock);
800 error = xfs_break_layouts(inode, &iolock, false); 767 error = xfs_break_layouts(inode, &iolock);
801 if (error) 768 if (error)
802 goto out_unlock; 769 goto out_unlock;
803 770
diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c
index 9c3e5c6ddf20..ff4d6311c7f4 100644
--- a/fs/xfs/xfs_icache.c
+++ b/fs/xfs/xfs_icache.c
@@ -70,8 +70,6 @@ xfs_inode_alloc(
70 ASSERT(!xfs_isiflocked(ip)); 70 ASSERT(!xfs_isiflocked(ip));
71 ASSERT(ip->i_ino == 0); 71 ASSERT(ip->i_ino == 0);
72 72
73 mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", ip->i_ino);
74
75 /* initialise the xfs inode */ 73 /* initialise the xfs inode */
76 ip->i_ino = ino; 74 ip->i_ino = ino;
77 ip->i_mount = mp; 75 ip->i_mount = mp;
@@ -394,8 +392,8 @@ xfs_iget_cache_hit(
394 xfs_inode_clear_reclaim_tag(pag, ip->i_ino); 392 xfs_inode_clear_reclaim_tag(pag, ip->i_ino);
395 inode->i_state = I_NEW; 393 inode->i_state = I_NEW;
396 394
397 ASSERT(!rwsem_is_locked(&ip->i_iolock.mr_lock)); 395 ASSERT(!rwsem_is_locked(&inode->i_rwsem));
398 mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", ip->i_ino); 396 init_rwsem(&inode->i_rwsem);
399 397
400 spin_unlock(&ip->i_flags_lock); 398 spin_unlock(&ip->i_flags_lock);
401 spin_unlock(&pag->pag_ici_lock); 399 spin_unlock(&pag->pag_ici_lock);
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index 4e560e6a12c1..e9ab42d8965b 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -142,31 +142,31 @@ xfs_ilock_attr_map_shared(
142} 142}
143 143
144/* 144/*
145 * The xfs inode contains 3 multi-reader locks: the i_iolock the i_mmap_lock and 145 * In addition to i_rwsem in the VFS inode, the xfs inode contains 2
146 * the i_lock. This routine allows various combinations of the locks to be 146 * multi-reader locks: i_mmap_lock and the i_lock. This routine allows
147 * obtained. 147 * various combinations of the locks to be obtained.
148 * 148 *
149 * The 3 locks should always be ordered so that the IO lock is obtained first, 149 * The 3 locks should always be ordered so that the IO lock is obtained first,
150 * the mmap lock second and the ilock last in order to prevent deadlock. 150 * the mmap lock second and the ilock last in order to prevent deadlock.
151 * 151 *
152 * Basic locking order: 152 * Basic locking order:
153 * 153 *
154 * i_iolock -> i_mmap_lock -> page_lock -> i_ilock 154 * i_rwsem -> i_mmap_lock -> page_lock -> i_ilock
155 * 155 *
156 * mmap_sem locking order: 156 * mmap_sem locking order:
157 * 157 *
158 * i_iolock -> page lock -> mmap_sem 158 * i_rwsem -> page lock -> mmap_sem
159 * mmap_sem -> i_mmap_lock -> page_lock 159 * mmap_sem -> i_mmap_lock -> page_lock
160 * 160 *
161 * The difference in mmap_sem locking order mean that we cannot hold the 161 * The difference in mmap_sem locking order mean that we cannot hold the
162 * i_mmap_lock over syscall based read(2)/write(2) based IO. These IO paths can 162 * i_mmap_lock over syscall based read(2)/write(2) based IO. These IO paths can
163 * fault in pages during copy in/out (for buffered IO) or require the mmap_sem 163 * fault in pages during copy in/out (for buffered IO) or require the mmap_sem
164 * in get_user_pages() to map the user pages into the kernel address space for 164 * in get_user_pages() to map the user pages into the kernel address space for
165 * direct IO. Similarly the i_iolock cannot be taken inside a page fault because 165 * direct IO. Similarly the i_rwsem cannot be taken inside a page fault because
166 * page faults already hold the mmap_sem. 166 * page faults already hold the mmap_sem.
167 * 167 *
168 * Hence to serialise fully against both syscall and mmap based IO, we need to 168 * Hence to serialise fully against both syscall and mmap based IO, we need to
169 * take both the i_iolock and the i_mmap_lock. These locks should *only* be both 169 * take both the i_rwsem and the i_mmap_lock. These locks should *only* be both
170 * taken in places where we need to invalidate the page cache in a race 170 * taken in places where we need to invalidate the page cache in a race
171 * free manner (e.g. truncate, hole punch and other extent manipulation 171 * free manner (e.g. truncate, hole punch and other extent manipulation
172 * functions). 172 * functions).
@@ -191,10 +191,13 @@ xfs_ilock(
191 (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)); 191 (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
192 ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_SUBCLASS_MASK)) == 0); 192 ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_SUBCLASS_MASK)) == 0);
193 193
194 if (lock_flags & XFS_IOLOCK_EXCL) 194 if (lock_flags & XFS_IOLOCK_EXCL) {
195 mrupdate_nested(&ip->i_iolock, XFS_IOLOCK_DEP(lock_flags)); 195 down_write_nested(&VFS_I(ip)->i_rwsem,
196 else if (lock_flags & XFS_IOLOCK_SHARED) 196 XFS_IOLOCK_DEP(lock_flags));
197 mraccess_nested(&ip->i_iolock, XFS_IOLOCK_DEP(lock_flags)); 197 } else if (lock_flags & XFS_IOLOCK_SHARED) {
198 down_read_nested(&VFS_I(ip)->i_rwsem,
199 XFS_IOLOCK_DEP(lock_flags));
200 }
198 201
199 if (lock_flags & XFS_MMAPLOCK_EXCL) 202 if (lock_flags & XFS_MMAPLOCK_EXCL)
200 mrupdate_nested(&ip->i_mmaplock, XFS_MMAPLOCK_DEP(lock_flags)); 203 mrupdate_nested(&ip->i_mmaplock, XFS_MMAPLOCK_DEP(lock_flags));
@@ -240,10 +243,10 @@ xfs_ilock_nowait(
240 ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_SUBCLASS_MASK)) == 0); 243 ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_SUBCLASS_MASK)) == 0);
241 244
242 if (lock_flags & XFS_IOLOCK_EXCL) { 245 if (lock_flags & XFS_IOLOCK_EXCL) {
243 if (!mrtryupdate(&ip->i_iolock)) 246 if (!down_write_trylock(&VFS_I(ip)->i_rwsem))
244 goto out; 247 goto out;
245 } else if (lock_flags & XFS_IOLOCK_SHARED) { 248 } else if (lock_flags & XFS_IOLOCK_SHARED) {
246 if (!mrtryaccess(&ip->i_iolock)) 249 if (!down_read_trylock(&VFS_I(ip)->i_rwsem))
247 goto out; 250 goto out;
248 } 251 }
249 252
@@ -271,9 +274,9 @@ out_undo_mmaplock:
271 mrunlock_shared(&ip->i_mmaplock); 274 mrunlock_shared(&ip->i_mmaplock);
272out_undo_iolock: 275out_undo_iolock:
273 if (lock_flags & XFS_IOLOCK_EXCL) 276 if (lock_flags & XFS_IOLOCK_EXCL)
274 mrunlock_excl(&ip->i_iolock); 277 up_write(&VFS_I(ip)->i_rwsem);
275 else if (lock_flags & XFS_IOLOCK_SHARED) 278 else if (lock_flags & XFS_IOLOCK_SHARED)
276 mrunlock_shared(&ip->i_iolock); 279 up_read(&VFS_I(ip)->i_rwsem);
277out: 280out:
278 return 0; 281 return 0;
279} 282}
@@ -310,9 +313,9 @@ xfs_iunlock(
310 ASSERT(lock_flags != 0); 313 ASSERT(lock_flags != 0);
311 314
312 if (lock_flags & XFS_IOLOCK_EXCL) 315 if (lock_flags & XFS_IOLOCK_EXCL)
313 mrunlock_excl(&ip->i_iolock); 316 up_write(&VFS_I(ip)->i_rwsem);
314 else if (lock_flags & XFS_IOLOCK_SHARED) 317 else if (lock_flags & XFS_IOLOCK_SHARED)
315 mrunlock_shared(&ip->i_iolock); 318 up_read(&VFS_I(ip)->i_rwsem);
316 319
317 if (lock_flags & XFS_MMAPLOCK_EXCL) 320 if (lock_flags & XFS_MMAPLOCK_EXCL)
318 mrunlock_excl(&ip->i_mmaplock); 321 mrunlock_excl(&ip->i_mmaplock);
@@ -345,7 +348,7 @@ xfs_ilock_demote(
345 if (lock_flags & XFS_MMAPLOCK_EXCL) 348 if (lock_flags & XFS_MMAPLOCK_EXCL)
346 mrdemote(&ip->i_mmaplock); 349 mrdemote(&ip->i_mmaplock);
347 if (lock_flags & XFS_IOLOCK_EXCL) 350 if (lock_flags & XFS_IOLOCK_EXCL)
348 mrdemote(&ip->i_iolock); 351 downgrade_write(&VFS_I(ip)->i_rwsem);
349 352
350 trace_xfs_ilock_demote(ip, lock_flags, _RET_IP_); 353 trace_xfs_ilock_demote(ip, lock_flags, _RET_IP_);
351} 354}
@@ -370,8 +373,9 @@ xfs_isilocked(
370 373
371 if (lock_flags & (XFS_IOLOCK_EXCL|XFS_IOLOCK_SHARED)) { 374 if (lock_flags & (XFS_IOLOCK_EXCL|XFS_IOLOCK_SHARED)) {
372 if (!(lock_flags & XFS_IOLOCK_SHARED)) 375 if (!(lock_flags & XFS_IOLOCK_SHARED))
373 return !!ip->i_iolock.mr_writer; 376 return !debug_locks ||
374 return rwsem_is_locked(&ip->i_iolock.mr_lock); 377 lockdep_is_held_type(&VFS_I(ip)->i_rwsem, 0);
378 return rwsem_is_locked(&VFS_I(ip)->i_rwsem);
375 } 379 }
376 380
377 ASSERT(0); 381 ASSERT(0);
@@ -421,11 +425,7 @@ xfs_lock_inumorder(int lock_mode, int subclass)
421 425
422 if (lock_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL)) { 426 if (lock_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL)) {
423 ASSERT(subclass <= XFS_IOLOCK_MAX_SUBCLASS); 427 ASSERT(subclass <= XFS_IOLOCK_MAX_SUBCLASS);
424 ASSERT(xfs_lockdep_subclass_ok(subclass +
425 XFS_IOLOCK_PARENT_VAL));
426 class += subclass << XFS_IOLOCK_SHIFT; 428 class += subclass << XFS_IOLOCK_SHIFT;
427 if (lock_mode & XFS_IOLOCK_PARENT)
428 class += XFS_IOLOCK_PARENT_VAL << XFS_IOLOCK_SHIFT;
429 } 429 }
430 430
431 if (lock_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) { 431 if (lock_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) {
@@ -477,8 +477,6 @@ xfs_lock_inodes(
477 XFS_ILOCK_EXCL)); 477 XFS_ILOCK_EXCL));
478 ASSERT(!(lock_mode & (XFS_IOLOCK_SHARED | XFS_MMAPLOCK_SHARED | 478 ASSERT(!(lock_mode & (XFS_IOLOCK_SHARED | XFS_MMAPLOCK_SHARED |
479 XFS_ILOCK_SHARED))); 479 XFS_ILOCK_SHARED)));
480 ASSERT(!(lock_mode & XFS_IOLOCK_EXCL) ||
481 inodes <= XFS_IOLOCK_MAX_SUBCLASS + 1);
482 ASSERT(!(lock_mode & XFS_MMAPLOCK_EXCL) || 480 ASSERT(!(lock_mode & XFS_MMAPLOCK_EXCL) ||
483 inodes <= XFS_MMAPLOCK_MAX_SUBCLASS + 1); 481 inodes <= XFS_MMAPLOCK_MAX_SUBCLASS + 1);
484 ASSERT(!(lock_mode & XFS_ILOCK_EXCL) || 482 ASSERT(!(lock_mode & XFS_ILOCK_EXCL) ||
@@ -581,10 +579,8 @@ xfs_lock_two_inodes(
581 int attempts = 0; 579 int attempts = 0;
582 xfs_log_item_t *lp; 580 xfs_log_item_t *lp;
583 581
584 if (lock_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL)) { 582 ASSERT(!(lock_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL)));
585 ASSERT(!(lock_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL))); 583 if (lock_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL))
586 ASSERT(!(lock_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)));
587 } else if (lock_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL))
588 ASSERT(!(lock_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL))); 584 ASSERT(!(lock_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)));
589 585
590 ASSERT(ip0->i_ino != ip1->i_ino); 586 ASSERT(ip0->i_ino != ip1->i_ino);
@@ -715,7 +711,6 @@ xfs_lookup(
715 if (XFS_FORCED_SHUTDOWN(dp->i_mount)) 711 if (XFS_FORCED_SHUTDOWN(dp->i_mount))
716 return -EIO; 712 return -EIO;
717 713
718 xfs_ilock(dp, XFS_IOLOCK_SHARED);
719 error = xfs_dir_lookup(NULL, dp, name, &inum, ci_name); 714 error = xfs_dir_lookup(NULL, dp, name, &inum, ci_name);
720 if (error) 715 if (error)
721 goto out_unlock; 716 goto out_unlock;
@@ -724,14 +719,12 @@ xfs_lookup(
724 if (error) 719 if (error)
725 goto out_free_name; 720 goto out_free_name;
726 721
727 xfs_iunlock(dp, XFS_IOLOCK_SHARED);
728 return 0; 722 return 0;
729 723
730out_free_name: 724out_free_name:
731 if (ci_name) 725 if (ci_name)
732 kmem_free(ci_name->name); 726 kmem_free(ci_name->name);
733out_unlock: 727out_unlock:
734 xfs_iunlock(dp, XFS_IOLOCK_SHARED);
735 *ipp = NULL; 728 *ipp = NULL;
736 return error; 729 return error;
737} 730}
@@ -1215,8 +1208,7 @@ xfs_create(
1215 if (error) 1208 if (error)
1216 goto out_release_inode; 1209 goto out_release_inode;
1217 1210
1218 xfs_ilock(dp, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL | 1211 xfs_ilock(dp, XFS_ILOCK_EXCL | XFS_ILOCK_PARENT);
1219 XFS_IOLOCK_PARENT | XFS_ILOCK_PARENT);
1220 unlock_dp_on_error = true; 1212 unlock_dp_on_error = true;
1221 1213
1222 xfs_defer_init(&dfops, &first_block); 1214 xfs_defer_init(&dfops, &first_block);
@@ -1252,7 +1244,7 @@ xfs_create(
1252 * the transaction cancel unlocking dp so don't do it explicitly in the 1244 * the transaction cancel unlocking dp so don't do it explicitly in the
1253 * error path. 1245 * error path.
1254 */ 1246 */
1255 xfs_trans_ijoin(tp, dp, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL); 1247 xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL);
1256 unlock_dp_on_error = false; 1248 unlock_dp_on_error = false;
1257 1249
1258 error = xfs_dir_createname(tp, dp, name, ip->i_ino, 1250 error = xfs_dir_createname(tp, dp, name, ip->i_ino,
@@ -1325,7 +1317,7 @@ xfs_create(
1325 xfs_qm_dqrele(pdqp); 1317 xfs_qm_dqrele(pdqp);
1326 1318
1327 if (unlock_dp_on_error) 1319 if (unlock_dp_on_error)
1328 xfs_iunlock(dp, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL); 1320 xfs_iunlock(dp, XFS_ILOCK_EXCL);
1329 return error; 1321 return error;
1330} 1322}
1331 1323
@@ -1466,11 +1458,10 @@ xfs_link(
1466 if (error) 1458 if (error)
1467 goto std_return; 1459 goto std_return;
1468 1460
1469 xfs_ilock(tdp, XFS_IOLOCK_EXCL | XFS_IOLOCK_PARENT);
1470 xfs_lock_two_inodes(sip, tdp, XFS_ILOCK_EXCL); 1461 xfs_lock_two_inodes(sip, tdp, XFS_ILOCK_EXCL);
1471 1462
1472 xfs_trans_ijoin(tp, sip, XFS_ILOCK_EXCL); 1463 xfs_trans_ijoin(tp, sip, XFS_ILOCK_EXCL);
1473 xfs_trans_ijoin(tp, tdp, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL); 1464 xfs_trans_ijoin(tp, tdp, XFS_ILOCK_EXCL);
1474 1465
1475 /* 1466 /*
1476 * If we are using project inheritance, we only allow hard link 1467 * If we are using project inheritance, we only allow hard link
@@ -2579,10 +2570,9 @@ xfs_remove(
2579 goto std_return; 2570 goto std_return;
2580 } 2571 }
2581 2572
2582 xfs_ilock(dp, XFS_IOLOCK_EXCL | XFS_IOLOCK_PARENT);
2583 xfs_lock_two_inodes(dp, ip, XFS_ILOCK_EXCL); 2573 xfs_lock_two_inodes(dp, ip, XFS_ILOCK_EXCL);
2584 2574
2585 xfs_trans_ijoin(tp, dp, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL); 2575 xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL);
2586 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); 2576 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
2587 2577
2588 /* 2578 /*
@@ -2963,12 +2953,6 @@ xfs_rename(
2963 * whether the target directory is the same as the source 2953 * whether the target directory is the same as the source
2964 * directory, we can lock from 2 to 4 inodes. 2954 * directory, we can lock from 2 to 4 inodes.
2965 */ 2955 */
2966 if (!new_parent)
2967 xfs_ilock(src_dp, XFS_IOLOCK_EXCL | XFS_IOLOCK_PARENT);
2968 else
2969 xfs_lock_two_inodes(src_dp, target_dp,
2970 XFS_IOLOCK_EXCL | XFS_IOLOCK_PARENT);
2971
2972 xfs_lock_inodes(inodes, num_inodes, XFS_ILOCK_EXCL); 2956 xfs_lock_inodes(inodes, num_inodes, XFS_ILOCK_EXCL);
2973 2957
2974 /* 2958 /*
@@ -2976,9 +2960,9 @@ xfs_rename(
2976 * we can rely on either trans_commit or trans_cancel to unlock 2960 * we can rely on either trans_commit or trans_cancel to unlock
2977 * them. 2961 * them.
2978 */ 2962 */
2979 xfs_trans_ijoin(tp, src_dp, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL); 2963 xfs_trans_ijoin(tp, src_dp, XFS_ILOCK_EXCL);
2980 if (new_parent) 2964 if (new_parent)
2981 xfs_trans_ijoin(tp, target_dp, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL); 2965 xfs_trans_ijoin(tp, target_dp, XFS_ILOCK_EXCL);
2982 xfs_trans_ijoin(tp, src_ip, XFS_ILOCK_EXCL); 2966 xfs_trans_ijoin(tp, src_ip, XFS_ILOCK_EXCL);
2983 if (target_ip) 2967 if (target_ip)
2984 xfs_trans_ijoin(tp, target_ip, XFS_ILOCK_EXCL); 2968 xfs_trans_ijoin(tp, target_ip, XFS_ILOCK_EXCL);
diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h
index 71e8a81c91a3..10dcf27b4c85 100644
--- a/fs/xfs/xfs_inode.h
+++ b/fs/xfs/xfs_inode.h
@@ -56,7 +56,6 @@ typedef struct xfs_inode {
56 /* Transaction and locking information. */ 56 /* Transaction and locking information. */
57 struct xfs_inode_log_item *i_itemp; /* logging information */ 57 struct xfs_inode_log_item *i_itemp; /* logging information */
58 mrlock_t i_lock; /* inode lock */ 58 mrlock_t i_lock; /* inode lock */
59 mrlock_t i_iolock; /* inode IO lock */
60 mrlock_t i_mmaplock; /* inode mmap IO lock */ 59 mrlock_t i_mmaplock; /* inode mmap IO lock */
61 atomic_t i_pincount; /* inode pin count */ 60 atomic_t i_pincount; /* inode pin count */
62 spinlock_t i_flags_lock; /* inode i_flags lock */ 61 spinlock_t i_flags_lock; /* inode i_flags lock */
@@ -333,7 +332,7 @@ static inline void xfs_ifunlock(struct xfs_inode *ip)
333 * IOLOCK values 332 * IOLOCK values
334 * 333 *
335 * 0-3 subclass value 334 * 0-3 subclass value
336 * 4-7 PARENT subclass values 335 * 4-7 unused
337 * 336 *
338 * MMAPLOCK values 337 * MMAPLOCK values
339 * 338 *
@@ -348,10 +347,8 @@ static inline void xfs_ifunlock(struct xfs_inode *ip)
348 * 347 *
349 */ 348 */
350#define XFS_IOLOCK_SHIFT 16 349#define XFS_IOLOCK_SHIFT 16
351#define XFS_IOLOCK_PARENT_VAL 4 350#define XFS_IOLOCK_MAX_SUBCLASS 3
352#define XFS_IOLOCK_MAX_SUBCLASS (XFS_IOLOCK_PARENT_VAL - 1)
353#define XFS_IOLOCK_DEP_MASK 0x000f0000 351#define XFS_IOLOCK_DEP_MASK 0x000f0000
354#define XFS_IOLOCK_PARENT (XFS_IOLOCK_PARENT_VAL << XFS_IOLOCK_SHIFT)
355 352
356#define XFS_MMAPLOCK_SHIFT 20 353#define XFS_MMAPLOCK_SHIFT 20
357#define XFS_MMAPLOCK_NUMORDER 0 354#define XFS_MMAPLOCK_NUMORDER 0
diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
index a39197501a7c..fc563b82aea6 100644
--- a/fs/xfs/xfs_ioctl.c
+++ b/fs/xfs/xfs_ioctl.c
@@ -639,7 +639,7 @@ xfs_ioc_space(
639 return error; 639 return error;
640 640
641 xfs_ilock(ip, iolock); 641 xfs_ilock(ip, iolock);
642 error = xfs_break_layouts(inode, &iolock, false); 642 error = xfs_break_layouts(inode, &iolock);
643 if (error) 643 if (error)
644 goto out_unlock; 644 goto out_unlock;
645 645
diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
index 405a65cd9d6b..c962999a87ab 100644
--- a/fs/xfs/xfs_iops.c
+++ b/fs/xfs/xfs_iops.c
@@ -983,15 +983,13 @@ xfs_vn_setattr(
983 struct xfs_inode *ip = XFS_I(d_inode(dentry)); 983 struct xfs_inode *ip = XFS_I(d_inode(dentry));
984 uint iolock = XFS_IOLOCK_EXCL; 984 uint iolock = XFS_IOLOCK_EXCL;
985 985
986 xfs_ilock(ip, iolock); 986 error = xfs_break_layouts(d_inode(dentry), &iolock);
987 error = xfs_break_layouts(d_inode(dentry), &iolock, true); 987 if (error)
988 if (!error) { 988 return error;
989 xfs_ilock(ip, XFS_MMAPLOCK_EXCL);
990 iolock |= XFS_MMAPLOCK_EXCL;
991 989
992 error = xfs_vn_setattr_size(dentry, iattr); 990 xfs_ilock(ip, XFS_MMAPLOCK_EXCL);
993 } 991 error = xfs_setattr_size(ip, iattr);
994 xfs_iunlock(ip, iolock); 992 xfs_iunlock(ip, XFS_MMAPLOCK_EXCL);
995 } else { 993 } else {
996 error = xfs_vn_setattr_nonsize(dentry, iattr); 994 error = xfs_vn_setattr_nonsize(dentry, iattr);
997 } 995 }
diff --git a/fs/xfs/xfs_pnfs.c b/fs/xfs/xfs_pnfs.c
index 93a7aafa56d6..2f2dc3c09ad0 100644
--- a/fs/xfs/xfs_pnfs.c
+++ b/fs/xfs/xfs_pnfs.c
@@ -32,8 +32,7 @@
32int 32int
33xfs_break_layouts( 33xfs_break_layouts(
34 struct inode *inode, 34 struct inode *inode,
35 uint *iolock, 35 uint *iolock)
36 bool with_imutex)
37{ 36{
38 struct xfs_inode *ip = XFS_I(inode); 37 struct xfs_inode *ip = XFS_I(inode);
39 int error; 38 int error;
@@ -42,12 +41,8 @@ xfs_break_layouts(
42 41
43 while ((error = break_layout(inode, false) == -EWOULDBLOCK)) { 42 while ((error = break_layout(inode, false) == -EWOULDBLOCK)) {
44 xfs_iunlock(ip, *iolock); 43 xfs_iunlock(ip, *iolock);
45 if (with_imutex && (*iolock & XFS_IOLOCK_EXCL))
46 inode_unlock(inode);
47 error = break_layout(inode, true); 44 error = break_layout(inode, true);
48 *iolock = XFS_IOLOCK_EXCL; 45 *iolock = XFS_IOLOCK_EXCL;
49 if (with_imutex)
50 inode_lock(inode);
51 xfs_ilock(ip, *iolock); 46 xfs_ilock(ip, *iolock);
52 } 47 }
53 48
diff --git a/fs/xfs/xfs_pnfs.h b/fs/xfs/xfs_pnfs.h
index e8339f74966b..b587cb99b2b7 100644
--- a/fs/xfs/xfs_pnfs.h
+++ b/fs/xfs/xfs_pnfs.h
@@ -8,10 +8,10 @@ int xfs_fs_map_blocks(struct inode *inode, loff_t offset, u64 length,
8int xfs_fs_commit_blocks(struct inode *inode, struct iomap *maps, int nr_maps, 8int xfs_fs_commit_blocks(struct inode *inode, struct iomap *maps, int nr_maps,
9 struct iattr *iattr); 9 struct iattr *iattr);
10 10
11int xfs_break_layouts(struct inode *inode, uint *iolock, bool with_imutex); 11int xfs_break_layouts(struct inode *inode, uint *iolock);
12#else 12#else
13static inline int 13static inline int
14xfs_break_layouts(struct inode *inode, uint *iolock, bool with_imutex) 14xfs_break_layouts(struct inode *inode, uint *iolock)
15{ 15{
16 return 0; 16 return 0;
17} 17}
diff --git a/fs/xfs/xfs_reflink.c b/fs/xfs/xfs_reflink.c
index becf2465dd23..88fd03c66e99 100644
--- a/fs/xfs/xfs_reflink.c
+++ b/fs/xfs/xfs_reflink.c
@@ -1251,13 +1251,11 @@ xfs_reflink_remap_range(
1251 return -EIO; 1251 return -EIO;
1252 1252
1253 /* Lock both files against IO */ 1253 /* Lock both files against IO */
1254 if (same_inode) { 1254 lock_two_nondirectories(inode_in, inode_out);
1255 xfs_ilock(src, XFS_IOLOCK_EXCL); 1255 if (same_inode)
1256 xfs_ilock(src, XFS_MMAPLOCK_EXCL); 1256 xfs_ilock(src, XFS_MMAPLOCK_EXCL);
1257 } else { 1257 else
1258 xfs_lock_two_inodes(src, dest, XFS_IOLOCK_EXCL);
1259 xfs_lock_two_inodes(src, dest, XFS_MMAPLOCK_EXCL); 1258 xfs_lock_two_inodes(src, dest, XFS_MMAPLOCK_EXCL);
1260 }
1261 1259
1262 /* Don't touch certain kinds of inodes */ 1260 /* Don't touch certain kinds of inodes */
1263 ret = -EPERM; 1261 ret = -EPERM;
@@ -1402,11 +1400,9 @@ xfs_reflink_remap_range(
1402 1400
1403out_unlock: 1401out_unlock:
1404 xfs_iunlock(src, XFS_MMAPLOCK_EXCL); 1402 xfs_iunlock(src, XFS_MMAPLOCK_EXCL);
1405 xfs_iunlock(src, XFS_IOLOCK_EXCL); 1403 if (!same_inode)
1406 if (src->i_ino != dest->i_ino) {
1407 xfs_iunlock(dest, XFS_MMAPLOCK_EXCL); 1404 xfs_iunlock(dest, XFS_MMAPLOCK_EXCL);
1408 xfs_iunlock(dest, XFS_IOLOCK_EXCL); 1405 unlock_two_nondirectories(inode_in, inode_out);
1409 }
1410 if (ret) 1406 if (ret)
1411 trace_xfs_reflink_remap_range_error(dest, ret, _RET_IP_); 1407 trace_xfs_reflink_remap_range_error(dest, ret, _RET_IP_);
1412 return ret; 1408 return ret;
diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
index ade4691e3f74..563d1d146b8c 100644
--- a/fs/xfs/xfs_super.c
+++ b/fs/xfs/xfs_super.c
@@ -943,7 +943,7 @@ xfs_fs_destroy_inode(
943 943
944 trace_xfs_destroy_inode(ip); 944 trace_xfs_destroy_inode(ip);
945 945
946 ASSERT(!rwsem_is_locked(&ip->i_iolock.mr_lock)); 946 ASSERT(!rwsem_is_locked(&inode->i_rwsem));
947 XFS_STATS_INC(ip->i_mount, vn_rele); 947 XFS_STATS_INC(ip->i_mount, vn_rele);
948 XFS_STATS_INC(ip->i_mount, vn_remove); 948 XFS_STATS_INC(ip->i_mount, vn_remove);
949 949
diff --git a/fs/xfs/xfs_symlink.c b/fs/xfs/xfs_symlink.c
index 58142aeeeea6..f2cb45ed1d54 100644
--- a/fs/xfs/xfs_symlink.c
+++ b/fs/xfs/xfs_symlink.c
@@ -238,8 +238,7 @@ xfs_symlink(
238 if (error) 238 if (error)
239 goto out_release_inode; 239 goto out_release_inode;
240 240
241 xfs_ilock(dp, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL | 241 xfs_ilock(dp, XFS_ILOCK_EXCL | XFS_ILOCK_PARENT);
242 XFS_IOLOCK_PARENT | XFS_ILOCK_PARENT);
243 unlock_dp_on_error = true; 242 unlock_dp_on_error = true;
244 243
245 /* 244 /*
@@ -287,7 +286,7 @@ xfs_symlink(
287 * the transaction cancel unlocking dp so don't do it explicitly in the 286 * the transaction cancel unlocking dp so don't do it explicitly in the
288 * error path. 287 * error path.
289 */ 288 */
290 xfs_trans_ijoin(tp, dp, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL); 289 xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL);
291 unlock_dp_on_error = false; 290 unlock_dp_on_error = false;
292 291
293 /* 292 /*
@@ -412,7 +411,7 @@ out_release_inode:
412 xfs_qm_dqrele(pdqp); 411 xfs_qm_dqrele(pdqp);
413 412
414 if (unlock_dp_on_error) 413 if (unlock_dp_on_error)
415 xfs_iunlock(dp, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL); 414 xfs_iunlock(dp, XFS_ILOCK_EXCL);
416 return error; 415 return error;
417} 416}
418 417