aboutsummaryrefslogtreecommitdiffstats
path: root/fs/xfs/xfs_iget.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/xfs/xfs_iget.c')
-rw-r--r--fs/xfs/xfs_iget.c251
1 files changed, 0 insertions, 251 deletions
diff --git a/fs/xfs/xfs_iget.c b/fs/xfs/xfs_iget.c
index 069c5ceb9459..ea9a5fa49a48 100644
--- a/fs/xfs/xfs_iget.c
+++ b/fs/xfs/xfs_iget.c
@@ -453,254 +453,3 @@ out_error_or_again:
453 return error; 453 return error;
454} 454}
455 455
456/*
457 * This is a wrapper routine around the xfs_ilock() routine
458 * used to centralize some grungy code. It is used in places
459 * that wish to lock the inode solely for reading the extents.
460 * The reason these places can't just call xfs_ilock(SHARED)
461 * is that the inode lock also guards to bringing in of the
462 * extents from disk for a file in b-tree format. If the inode
463 * is in b-tree format, then we need to lock the inode exclusively
464 * until the extents are read in. Locking it exclusively all
465 * the time would limit our parallelism unnecessarily, though.
466 * What we do instead is check to see if the extents have been
467 * read in yet, and only lock the inode exclusively if they
468 * have not.
469 *
470 * The function returns a value which should be given to the
471 * corresponding xfs_iunlock_map_shared(). This value is
472 * the mode in which the lock was actually taken.
473 */
474uint
475xfs_ilock_map_shared(
476 xfs_inode_t *ip)
477{
478 uint lock_mode;
479
480 if ((ip->i_d.di_format == XFS_DINODE_FMT_BTREE) &&
481 ((ip->i_df.if_flags & XFS_IFEXTENTS) == 0)) {
482 lock_mode = XFS_ILOCK_EXCL;
483 } else {
484 lock_mode = XFS_ILOCK_SHARED;
485 }
486
487 xfs_ilock(ip, lock_mode);
488
489 return lock_mode;
490}
491
492/*
493 * This is simply the unlock routine to go with xfs_ilock_map_shared().
494 * All it does is call xfs_iunlock() with the given lock_mode.
495 */
496void
497xfs_iunlock_map_shared(
498 xfs_inode_t *ip,
499 unsigned int lock_mode)
500{
501 xfs_iunlock(ip, lock_mode);
502}
503
504/*
505 * The xfs inode contains 2 locks: a multi-reader lock called the
506 * i_iolock and a multi-reader lock called the i_lock. This routine
507 * allows either or both of the locks to be obtained.
508 *
509 * The 2 locks should always be ordered so that the IO lock is
510 * obtained first in order to prevent deadlock.
511 *
512 * ip -- the inode being locked
513 * lock_flags -- this parameter indicates the inode's locks
514 * to be locked. It can be:
515 * XFS_IOLOCK_SHARED,
516 * XFS_IOLOCK_EXCL,
517 * XFS_ILOCK_SHARED,
518 * XFS_ILOCK_EXCL,
519 * XFS_IOLOCK_SHARED | XFS_ILOCK_SHARED,
520 * XFS_IOLOCK_SHARED | XFS_ILOCK_EXCL,
521 * XFS_IOLOCK_EXCL | XFS_ILOCK_SHARED,
522 * XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL
523 */
524void
525xfs_ilock(
526 xfs_inode_t *ip,
527 uint lock_flags)
528{
529 /*
530 * You can't set both SHARED and EXCL for the same lock,
531 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
532 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
533 */
534 ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
535 (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
536 ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
537 (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
538 ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_DEP_MASK)) == 0);
539
540 if (lock_flags & XFS_IOLOCK_EXCL)
541 mrupdate_nested(&ip->i_iolock, XFS_IOLOCK_DEP(lock_flags));
542 else if (lock_flags & XFS_IOLOCK_SHARED)
543 mraccess_nested(&ip->i_iolock, XFS_IOLOCK_DEP(lock_flags));
544
545 if (lock_flags & XFS_ILOCK_EXCL)
546 mrupdate_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags));
547 else if (lock_flags & XFS_ILOCK_SHARED)
548 mraccess_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags));
549
550 trace_xfs_ilock(ip, lock_flags, _RET_IP_);
551}
552
553/*
554 * This is just like xfs_ilock(), except that the caller
555 * is guaranteed not to sleep. It returns 1 if it gets
556 * the requested locks and 0 otherwise. If the IO lock is
557 * obtained but the inode lock cannot be, then the IO lock
558 * is dropped before returning.
559 *
560 * ip -- the inode being locked
561 * lock_flags -- this parameter indicates the inode's locks to be
562 * to be locked. See the comment for xfs_ilock() for a list
563 * of valid values.
564 */
565int
566xfs_ilock_nowait(
567 xfs_inode_t *ip,
568 uint lock_flags)
569{
570 /*
571 * You can't set both SHARED and EXCL for the same lock,
572 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
573 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
574 */
575 ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
576 (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
577 ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
578 (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
579 ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_DEP_MASK)) == 0);
580
581 if (lock_flags & XFS_IOLOCK_EXCL) {
582 if (!mrtryupdate(&ip->i_iolock))
583 goto out;
584 } else if (lock_flags & XFS_IOLOCK_SHARED) {
585 if (!mrtryaccess(&ip->i_iolock))
586 goto out;
587 }
588 if (lock_flags & XFS_ILOCK_EXCL) {
589 if (!mrtryupdate(&ip->i_lock))
590 goto out_undo_iolock;
591 } else if (lock_flags & XFS_ILOCK_SHARED) {
592 if (!mrtryaccess(&ip->i_lock))
593 goto out_undo_iolock;
594 }
595 trace_xfs_ilock_nowait(ip, lock_flags, _RET_IP_);
596 return 1;
597
598 out_undo_iolock:
599 if (lock_flags & XFS_IOLOCK_EXCL)
600 mrunlock_excl(&ip->i_iolock);
601 else if (lock_flags & XFS_IOLOCK_SHARED)
602 mrunlock_shared(&ip->i_iolock);
603 out:
604 return 0;
605}
606
607/*
608 * xfs_iunlock() is used to drop the inode locks acquired with
609 * xfs_ilock() and xfs_ilock_nowait(). The caller must pass
610 * in the flags given to xfs_ilock() or xfs_ilock_nowait() so
611 * that we know which locks to drop.
612 *
613 * ip -- the inode being unlocked
614 * lock_flags -- this parameter indicates the inode's locks to be
615 * to be unlocked. See the comment for xfs_ilock() for a list
616 * of valid values for this parameter.
617 *
618 */
619void
620xfs_iunlock(
621 xfs_inode_t *ip,
622 uint lock_flags)
623{
624 /*
625 * You can't set both SHARED and EXCL for the same lock,
626 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
627 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
628 */
629 ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
630 (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
631 ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
632 (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
633 ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_DEP_MASK)) == 0);
634 ASSERT(lock_flags != 0);
635
636 if (lock_flags & XFS_IOLOCK_EXCL)
637 mrunlock_excl(&ip->i_iolock);
638 else if (lock_flags & XFS_IOLOCK_SHARED)
639 mrunlock_shared(&ip->i_iolock);
640
641 if (lock_flags & XFS_ILOCK_EXCL)
642 mrunlock_excl(&ip->i_lock);
643 else if (lock_flags & XFS_ILOCK_SHARED)
644 mrunlock_shared(&ip->i_lock);
645
646 trace_xfs_iunlock(ip, lock_flags, _RET_IP_);
647}
648
649/*
650 * give up write locks. the i/o lock cannot be held nested
651 * if it is being demoted.
652 */
653void
654xfs_ilock_demote(
655 xfs_inode_t *ip,
656 uint lock_flags)
657{
658 ASSERT(lock_flags & (XFS_IOLOCK_EXCL|XFS_ILOCK_EXCL));
659 ASSERT((lock_flags & ~(XFS_IOLOCK_EXCL|XFS_ILOCK_EXCL)) == 0);
660
661 if (lock_flags & XFS_ILOCK_EXCL)
662 mrdemote(&ip->i_lock);
663 if (lock_flags & XFS_IOLOCK_EXCL)
664 mrdemote(&ip->i_iolock);
665
666 trace_xfs_ilock_demote(ip, lock_flags, _RET_IP_);
667}
668
669#ifdef DEBUG
670int
671xfs_isilocked(
672 xfs_inode_t *ip,
673 uint lock_flags)
674{
675 if (lock_flags & (XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)) {
676 if (!(lock_flags & XFS_ILOCK_SHARED))
677 return !!ip->i_lock.mr_writer;
678 return rwsem_is_locked(&ip->i_lock.mr_lock);
679 }
680
681 if (lock_flags & (XFS_IOLOCK_EXCL|XFS_IOLOCK_SHARED)) {
682 if (!(lock_flags & XFS_IOLOCK_SHARED))
683 return !!ip->i_iolock.mr_writer;
684 return rwsem_is_locked(&ip->i_iolock.mr_lock);
685 }
686
687 ASSERT(0);
688 return 0;
689}
690#endif
691
692void
693__xfs_iflock(
694 struct xfs_inode *ip)
695{
696 wait_queue_head_t *wq = bit_waitqueue(&ip->i_flags, __XFS_IFLOCK_BIT);
697 DEFINE_WAIT_BIT(wait, &ip->i_flags, __XFS_IFLOCK_BIT);
698
699 do {
700 prepare_to_wait_exclusive(wq, &wait.wait, TASK_UNINTERRUPTIBLE);
701 if (xfs_isiflocked(ip))
702 io_schedule();
703 } while (!xfs_iflock_nowait(ip));
704
705 finish_wait(wq, &wait.wait);
706}