aboutsummaryrefslogtreecommitdiffstats
path: root/fs/configfs
diff options
context:
space:
mode:
authorJoel Becker <Joel.Becker@oracle.com>2008-12-17 17:23:52 -0500
committerMark Fasheh <mfasheh@suse.com>2009-02-02 17:20:18 -0500
commit0e0333429a6280e6eb3c98845e4eed90d5f8078a (patch)
tree5c04f9892c52faedfaa5b879a23f96bf77d02953 /fs/configfs
parentf8afead7169f0f28a4b421bcbdb510e52a2d094d (diff)
configfs: Silence lockdep on mkdir(), rmdir() and configfs_depend_item()
When attaching default groups (subdirs) of a new group (in mkdir() or in configfs_register()), configfs recursively takes inode's mutexes along the path from the parent of the new group to the default subdirs. This is needed to ensure that the VFS will not race with operations on these sub-dirs. This is safe for the following reasons: - the VFS allows one to lock first an inode and second one of its children (The lock subclasses for this pattern are respectively I_MUTEX_PARENT and I_MUTEX_CHILD); - from this rule any inode path can be recursively locked in descending order as long as it stays under a single mountpoint and does not follow symlinks. Unfortunately lockdep does not know (yet?) how to handle such recursion. I've tried to use Peter Zijlstra's lock_set_subclass() helper to upgrade i_mutexes from I_MUTEX_CHILD to I_MUTEX_PARENT when we know that we might recursively lock some of their descendant, but this usage does not seem to fit the purpose of lock_set_subclass() because it leads to several i_mutex locked with subclass I_MUTEX_PARENT by the same task. >From inside configfs it is not possible to serialize those recursive locking with a top-level one, because mkdir() and rmdir() are already called with inodes locked by the VFS. So using some mutex_lock_nest_lock() is not an option. I am proposing two solutions: 1) one that wraps recursive mutex_lock()s with lockdep_off()/lockdep_on(). 2) (as suggested earlier by Peter Zijlstra) one that puts the i_mutexes recursively locked in different classes based on their depth from the top-level config_group created. This induces an arbitrary limit (MAX_LOCK_DEPTH - 2 == 46) on the nesting of configfs default groups whenever lockdep is activated but this limit looks reasonably high. Unfortunately, this alos isolates VFS operations on configfs default groups from the others and thus lowers the chances to detect locking issues. This patch implements solution 1). Solution 2) looks better from lockdep's point of view, but fails with configfs_depend_item(). This needs to rework the locking scheme of configfs_depend_item() by removing the variable lock recursion depth, and I think that it's doable thanks to the configfs_dirent_lock. For now, let's stick to solution 1). Signed-off-by: Louis Rilling <louis.rilling@kerlabs.com> Acked-by: Joel Becker <joel.becker@oracle.com> Signed-off-by: Mark Fasheh <mfasheh@suse.com>
Diffstat (limited to 'fs/configfs')
-rw-r--r--fs/configfs/dir.c59
1 files changed, 59 insertions, 0 deletions
diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
index 8e93341f3e82..9c2358391147 100644
--- a/fs/configfs/dir.c
+++ b/fs/configfs/dir.c
@@ -553,12 +553,24 @@ static void detach_groups(struct config_group *group)
553 553
554 child = sd->s_dentry; 554 child = sd->s_dentry;
555 555
556 /*
557 * Note: we hide this from lockdep since we have no way
558 * to teach lockdep about recursive
559 * I_MUTEX_PARENT -> I_MUTEX_CHILD patterns along a path
560 * in an inode tree, which are valid as soon as
561 * I_MUTEX_PARENT -> I_MUTEX_CHILD is valid from a
562 * parent inode to one of its children.
563 */
564 lockdep_off();
556 mutex_lock(&child->d_inode->i_mutex); 565 mutex_lock(&child->d_inode->i_mutex);
566 lockdep_on();
557 567
558 configfs_detach_group(sd->s_element); 568 configfs_detach_group(sd->s_element);
559 child->d_inode->i_flags |= S_DEAD; 569 child->d_inode->i_flags |= S_DEAD;
560 570
571 lockdep_off();
561 mutex_unlock(&child->d_inode->i_mutex); 572 mutex_unlock(&child->d_inode->i_mutex);
573 lockdep_on();
562 574
563 d_delete(child); 575 d_delete(child);
564 dput(child); 576 dput(child);
@@ -748,11 +760,22 @@ static int configfs_attach_item(struct config_item *parent_item,
748 * We are going to remove an inode and its dentry but 760 * We are going to remove an inode and its dentry but
749 * the VFS may already have hit and used them. Thus, 761 * the VFS may already have hit and used them. Thus,
750 * we must lock them as rmdir() would. 762 * we must lock them as rmdir() would.
763 *
764 * Note: we hide this from lockdep since we have no way
765 * to teach lockdep about recursive
766 * I_MUTEX_PARENT -> I_MUTEX_CHILD patterns along a path
767 * in an inode tree, which are valid as soon as
768 * I_MUTEX_PARENT -> I_MUTEX_CHILD is valid from a
769 * parent inode to one of its children.
751 */ 770 */
771 lockdep_off();
752 mutex_lock(&dentry->d_inode->i_mutex); 772 mutex_lock(&dentry->d_inode->i_mutex);
773 lockdep_on();
753 configfs_remove_dir(item); 774 configfs_remove_dir(item);
754 dentry->d_inode->i_flags |= S_DEAD; 775 dentry->d_inode->i_flags |= S_DEAD;
776 lockdep_off();
755 mutex_unlock(&dentry->d_inode->i_mutex); 777 mutex_unlock(&dentry->d_inode->i_mutex);
778 lockdep_on();
756 d_delete(dentry); 779 d_delete(dentry);
757 } 780 }
758 } 781 }
@@ -787,14 +810,25 @@ static int configfs_attach_group(struct config_item *parent_item,
787 * 810 *
788 * We must also lock the inode to remove it safely in case of 811 * We must also lock the inode to remove it safely in case of
789 * error, as rmdir() would. 812 * error, as rmdir() would.
813 *
814 * Note: we hide this from lockdep since we have no way
815 * to teach lockdep about recursive
816 * I_MUTEX_PARENT -> I_MUTEX_CHILD patterns along a path
817 * in an inode tree, which are valid as soon as
818 * I_MUTEX_PARENT -> I_MUTEX_CHILD is valid from a
819 * parent inode to one of its children.
790 */ 820 */
821 lockdep_off();
791 mutex_lock_nested(&dentry->d_inode->i_mutex, I_MUTEX_CHILD); 822 mutex_lock_nested(&dentry->d_inode->i_mutex, I_MUTEX_CHILD);
823 lockdep_on();
792 ret = populate_groups(to_config_group(item)); 824 ret = populate_groups(to_config_group(item));
793 if (ret) { 825 if (ret) {
794 configfs_detach_item(item); 826 configfs_detach_item(item);
795 dentry->d_inode->i_flags |= S_DEAD; 827 dentry->d_inode->i_flags |= S_DEAD;
796 } 828 }
829 lockdep_off();
797 mutex_unlock(&dentry->d_inode->i_mutex); 830 mutex_unlock(&dentry->d_inode->i_mutex);
831 lockdep_on();
798 if (ret) 832 if (ret)
799 d_delete(dentry); 833 d_delete(dentry);
800 } 834 }
@@ -956,7 +990,17 @@ static int configfs_depend_prep(struct dentry *origin,
956 BUG_ON(!origin || !sd); 990 BUG_ON(!origin || !sd);
957 991
958 /* Lock this guy on the way down */ 992 /* Lock this guy on the way down */
993 /*
994 * Note: we hide this from lockdep since we have no way
995 * to teach lockdep about recursive
996 * I_MUTEX_PARENT -> I_MUTEX_CHILD patterns along a path
997 * in an inode tree, which are valid as soon as
998 * I_MUTEX_PARENT -> I_MUTEX_CHILD is valid from a
999 * parent inode to one of its children.
1000 */
1001 lockdep_off();
959 mutex_lock(&sd->s_dentry->d_inode->i_mutex); 1002 mutex_lock(&sd->s_dentry->d_inode->i_mutex);
1003 lockdep_on();
960 if (sd->s_element == target) /* Boo-yah */ 1004 if (sd->s_element == target) /* Boo-yah */
961 goto out; 1005 goto out;
962 1006
@@ -970,7 +1014,9 @@ static int configfs_depend_prep(struct dentry *origin,
970 } 1014 }
971 1015
972 /* We looped all our children and didn't find target */ 1016 /* We looped all our children and didn't find target */
1017 lockdep_off();
973 mutex_unlock(&sd->s_dentry->d_inode->i_mutex); 1018 mutex_unlock(&sd->s_dentry->d_inode->i_mutex);
1019 lockdep_on();
974 ret = -ENOENT; 1020 ret = -ENOENT;
975 1021
976out: 1022out:
@@ -990,11 +1036,16 @@ static void configfs_depend_rollback(struct dentry *origin,
990 struct dentry *dentry = item->ci_dentry; 1036 struct dentry *dentry = item->ci_dentry;
991 1037
992 while (dentry != origin) { 1038 while (dentry != origin) {
1039 /* See comments in configfs_depend_prep() */
1040 lockdep_off();
993 mutex_unlock(&dentry->d_inode->i_mutex); 1041 mutex_unlock(&dentry->d_inode->i_mutex);
1042 lockdep_on();
994 dentry = dentry->d_parent; 1043 dentry = dentry->d_parent;
995 } 1044 }
996 1045
1046 lockdep_off();
997 mutex_unlock(&origin->d_inode->i_mutex); 1047 mutex_unlock(&origin->d_inode->i_mutex);
1048 lockdep_on();
998} 1049}
999 1050
1000int configfs_depend_item(struct configfs_subsystem *subsys, 1051int configfs_depend_item(struct configfs_subsystem *subsys,
@@ -1329,8 +1380,16 @@ static int configfs_rmdir(struct inode *dir, struct dentry *dentry)
1329 } 1380 }
1330 1381
1331 /* Wait until the racing operation terminates */ 1382 /* Wait until the racing operation terminates */
1383 /*
1384 * Note: we hide this from lockdep since we are locked
1385 * with subclass I_MUTEX_NORMAL from vfs_rmdir() (why
1386 * not I_MUTEX_CHILD?), and I_MUTEX_XATTR or
1387 * I_MUTEX_QUOTA are not relevant for the locked inode.
1388 */
1389 lockdep_off();
1332 mutex_lock(wait_mutex); 1390 mutex_lock(wait_mutex);
1333 mutex_unlock(wait_mutex); 1391 mutex_unlock(wait_mutex);
1392 lockdep_on();
1334 } 1393 }
1335 } while (ret == -EAGAIN); 1394 } while (ret == -EAGAIN);
1336 1395