diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2009-06-12 21:21:19 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-06-12 21:21:19 -0400 |
commit | f3ad116588151b3371ae4e092290e4f48e62b8bb (patch) | |
tree | d652861a54132e35de2a80631f671cd7be0b26f6 | |
parent | d645727bdc2aed8e2e0e9496248f735481b5049a (diff) | |
parent | 420118caa32c8ccdf9fce5a623b9de3f951573c5 (diff) |
Merge branch 'upstream-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jlbec/configfs
* 'upstream-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jlbec/configfs:
configfs: Rework configfs_depend_item() locking and make lockdep happy
configfs: Silence lockdep on mkdir() and rmdir()
-rw-r--r-- | fs/configfs/configfs_internal.h | 3 | ||||
-rw-r--r-- | fs/configfs/dir.c | 196 | ||||
-rw-r--r-- | fs/configfs/inode.c | 38 |
3 files changed, 178 insertions, 59 deletions
diff --git a/fs/configfs/configfs_internal.h b/fs/configfs/configfs_internal.h index 762d287123ca..da6061a6df40 100644 --- a/fs/configfs/configfs_internal.h +++ b/fs/configfs/configfs_internal.h | |||
@@ -39,6 +39,9 @@ struct configfs_dirent { | |||
39 | umode_t s_mode; | 39 | umode_t s_mode; |
40 | struct dentry * s_dentry; | 40 | struct dentry * s_dentry; |
41 | struct iattr * s_iattr; | 41 | struct iattr * s_iattr; |
42 | #ifdef CONFIG_LOCKDEP | ||
43 | int s_depth; | ||
44 | #endif | ||
42 | }; | 45 | }; |
43 | 46 | ||
44 | #define CONFIGFS_ROOT 0x0001 | 47 | #define CONFIGFS_ROOT 0x0001 |
diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c index 05373db21a4e..8e48b52205aa 100644 --- a/fs/configfs/dir.c +++ b/fs/configfs/dir.c | |||
@@ -78,11 +78,97 @@ static const struct dentry_operations configfs_dentry_ops = { | |||
78 | .d_delete = configfs_d_delete, | 78 | .d_delete = configfs_d_delete, |
79 | }; | 79 | }; |
80 | 80 | ||
81 | #ifdef CONFIG_LOCKDEP | ||
82 | |||
83 | /* | ||
84 | * Helpers to make lockdep happy with our recursive locking of default groups' | ||
85 | * inodes (see configfs_attach_group() and configfs_detach_group()). | ||
86 | * We put default groups i_mutexes in separate classes according to their depth | ||
87 | * from the youngest non-default group ancestor. | ||
88 | * | ||
89 | * For a non-default group A having default groups A/B, A/C, and A/C/D, default | ||
90 | * groups A/B and A/C will have their inode's mutex in class | ||
91 | * default_group_class[0], and default group A/C/D will be in | ||
92 | * default_group_class[1]. | ||
93 | * | ||
94 | * The lock classes are declared and assigned in inode.c, according to the | ||
95 | * s_depth value. | ||
96 | * The s_depth value is initialized to -1, adjusted to >= 0 when attaching | ||
97 | * default groups, and reset to -1 when all default groups are attached. During | ||
98 | * attachment, if configfs_create() sees s_depth > 0, the lock class of the new | ||
99 | * inode's mutex is set to default_group_class[s_depth - 1]. | ||
100 | */ | ||
101 | |||
102 | static void configfs_init_dirent_depth(struct configfs_dirent *sd) | ||
103 | { | ||
104 | sd->s_depth = -1; | ||
105 | } | ||
106 | |||
107 | static void configfs_set_dir_dirent_depth(struct configfs_dirent *parent_sd, | ||
108 | struct configfs_dirent *sd) | ||
109 | { | ||
110 | int parent_depth = parent_sd->s_depth; | ||
111 | |||
112 | if (parent_depth >= 0) | ||
113 | sd->s_depth = parent_depth + 1; | ||
114 | } | ||
115 | |||
116 | static void | ||
117 | configfs_adjust_dir_dirent_depth_before_populate(struct configfs_dirent *sd) | ||
118 | { | ||
119 | /* | ||
120 | * item's i_mutex class is already setup, so s_depth is now only | ||
121 | * used to set new sub-directories s_depth, which is always done | ||
122 | * with item's i_mutex locked. | ||
123 | */ | ||
124 | /* | ||
125 | * sd->s_depth == -1 iff we are a non default group. | ||
126 | * else (we are a default group) sd->s_depth > 0 (see | ||
127 | * create_dir()). | ||
128 | */ | ||
129 | if (sd->s_depth == -1) | ||
130 | /* | ||
131 | * We are a non default group and we are going to create | ||
132 | * default groups. | ||
133 | */ | ||
134 | sd->s_depth = 0; | ||
135 | } | ||
136 | |||
137 | static void | ||
138 | configfs_adjust_dir_dirent_depth_after_populate(struct configfs_dirent *sd) | ||
139 | { | ||
140 | /* We will not create default groups anymore. */ | ||
141 | sd->s_depth = -1; | ||
142 | } | ||
143 | |||
144 | #else /* CONFIG_LOCKDEP */ | ||
145 | |||
146 | static void configfs_init_dirent_depth(struct configfs_dirent *sd) | ||
147 | { | ||
148 | } | ||
149 | |||
150 | static void configfs_set_dir_dirent_depth(struct configfs_dirent *parent_sd, | ||
151 | struct configfs_dirent *sd) | ||
152 | { | ||
153 | } | ||
154 | |||
155 | static void | ||
156 | configfs_adjust_dir_dirent_depth_before_populate(struct configfs_dirent *sd) | ||
157 | { | ||
158 | } | ||
159 | |||
160 | static void | ||
161 | configfs_adjust_dir_dirent_depth_after_populate(struct configfs_dirent *sd) | ||
162 | { | ||
163 | } | ||
164 | |||
165 | #endif /* CONFIG_LOCKDEP */ | ||
166 | |||
81 | /* | 167 | /* |
82 | * Allocates a new configfs_dirent and links it to the parent configfs_dirent | 168 | * Allocates a new configfs_dirent and links it to the parent configfs_dirent |
83 | */ | 169 | */ |
84 | static struct configfs_dirent *configfs_new_dirent(struct configfs_dirent * parent_sd, | 170 | static struct configfs_dirent *configfs_new_dirent(struct configfs_dirent *parent_sd, |
85 | void * element) | 171 | void *element, int type) |
86 | { | 172 | { |
87 | struct configfs_dirent * sd; | 173 | struct configfs_dirent * sd; |
88 | 174 | ||
@@ -94,6 +180,8 @@ static struct configfs_dirent *configfs_new_dirent(struct configfs_dirent * pare | |||
94 | INIT_LIST_HEAD(&sd->s_links); | 180 | INIT_LIST_HEAD(&sd->s_links); |
95 | INIT_LIST_HEAD(&sd->s_children); | 181 | INIT_LIST_HEAD(&sd->s_children); |
96 | sd->s_element = element; | 182 | sd->s_element = element; |
183 | sd->s_type = type; | ||
184 | configfs_init_dirent_depth(sd); | ||
97 | spin_lock(&configfs_dirent_lock); | 185 | spin_lock(&configfs_dirent_lock); |
98 | if (parent_sd->s_type & CONFIGFS_USET_DROPPING) { | 186 | if (parent_sd->s_type & CONFIGFS_USET_DROPPING) { |
99 | spin_unlock(&configfs_dirent_lock); | 187 | spin_unlock(&configfs_dirent_lock); |
@@ -138,12 +226,11 @@ int configfs_make_dirent(struct configfs_dirent * parent_sd, | |||
138 | { | 226 | { |
139 | struct configfs_dirent * sd; | 227 | struct configfs_dirent * sd; |
140 | 228 | ||
141 | sd = configfs_new_dirent(parent_sd, element); | 229 | sd = configfs_new_dirent(parent_sd, element, type); |
142 | if (IS_ERR(sd)) | 230 | if (IS_ERR(sd)) |
143 | return PTR_ERR(sd); | 231 | return PTR_ERR(sd); |
144 | 232 | ||
145 | sd->s_mode = mode; | 233 | sd->s_mode = mode; |
146 | sd->s_type = type; | ||
147 | sd->s_dentry = dentry; | 234 | sd->s_dentry = dentry; |
148 | if (dentry) { | 235 | if (dentry) { |
149 | dentry->d_fsdata = configfs_get(sd); | 236 | dentry->d_fsdata = configfs_get(sd); |
@@ -187,6 +274,7 @@ static int create_dir(struct config_item * k, struct dentry * p, | |||
187 | error = configfs_make_dirent(p->d_fsdata, d, k, mode, | 274 | error = configfs_make_dirent(p->d_fsdata, d, k, mode, |
188 | CONFIGFS_DIR | CONFIGFS_USET_CREATING); | 275 | CONFIGFS_DIR | CONFIGFS_USET_CREATING); |
189 | if (!error) { | 276 | if (!error) { |
277 | configfs_set_dir_dirent_depth(p->d_fsdata, d->d_fsdata); | ||
190 | error = configfs_create(d, mode, init_dir); | 278 | error = configfs_create(d, mode, init_dir); |
191 | if (!error) { | 279 | if (!error) { |
192 | inc_nlink(p->d_inode); | 280 | inc_nlink(p->d_inode); |
@@ -789,11 +877,13 @@ static int configfs_attach_group(struct config_item *parent_item, | |||
789 | * error, as rmdir() would. | 877 | * error, as rmdir() would. |
790 | */ | 878 | */ |
791 | mutex_lock_nested(&dentry->d_inode->i_mutex, I_MUTEX_CHILD); | 879 | mutex_lock_nested(&dentry->d_inode->i_mutex, I_MUTEX_CHILD); |
880 | configfs_adjust_dir_dirent_depth_before_populate(sd); | ||
792 | ret = populate_groups(to_config_group(item)); | 881 | ret = populate_groups(to_config_group(item)); |
793 | if (ret) { | 882 | if (ret) { |
794 | configfs_detach_item(item); | 883 | configfs_detach_item(item); |
795 | dentry->d_inode->i_flags |= S_DEAD; | 884 | dentry->d_inode->i_flags |= S_DEAD; |
796 | } | 885 | } |
886 | configfs_adjust_dir_dirent_depth_after_populate(sd); | ||
797 | mutex_unlock(&dentry->d_inode->i_mutex); | 887 | mutex_unlock(&dentry->d_inode->i_mutex); |
798 | if (ret) | 888 | if (ret) |
799 | d_delete(dentry); | 889 | d_delete(dentry); |
@@ -916,11 +1006,11 @@ static int configfs_dump(struct configfs_dirent *sd, int level) | |||
916 | * Note, btw, that this can be called at *any* time, even when a configfs | 1006 | * Note, btw, that this can be called at *any* time, even when a configfs |
917 | * subsystem isn't registered, or when configfs is loading or unloading. | 1007 | * subsystem isn't registered, or when configfs is loading or unloading. |
918 | * Just like configfs_register_subsystem(). So we take the same | 1008 | * Just like configfs_register_subsystem(). So we take the same |
919 | * precautions. We pin the filesystem. We lock each i_mutex _in_order_ | 1009 | * precautions. We pin the filesystem. We lock configfs_dirent_lock. |
920 | * on our way down the tree. If we can find the target item in the | 1010 | * If we can find the target item in the |
921 | * configfs tree, it must be part of the subsystem tree as well, so we | 1011 | * configfs tree, it must be part of the subsystem tree as well, so we |
922 | * do not need the subsystem semaphore. Holding the i_mutex chain locks | 1012 | * do not need the subsystem semaphore. Holding configfs_dirent_lock helps |
923 | * out mkdir() and rmdir(), who might be racing us. | 1013 | * locking out mkdir() and rmdir(), who might be racing us. |
924 | */ | 1014 | */ |
925 | 1015 | ||
926 | /* | 1016 | /* |
@@ -933,17 +1023,21 @@ static int configfs_dump(struct configfs_dirent *sd, int level) | |||
933 | * do that so we can unlock it if we find nothing. | 1023 | * do that so we can unlock it if we find nothing. |
934 | * | 1024 | * |
935 | * Here we do a depth-first search of the dentry hierarchy looking for | 1025 | * Here we do a depth-first search of the dentry hierarchy looking for |
936 | * our object. We take i_mutex on each step of the way down. IT IS | 1026 | * our object. |
937 | * ESSENTIAL THAT i_mutex LOCKING IS ORDERED. If we come back up a branch, | 1027 | * We deliberately ignore items tagged as dropping since they are virtually |
938 | * we'll drop the i_mutex. | 1028 | * dead, as well as items in the middle of attachment since they virtually |
1029 | * do not exist yet. This completes the locking out of racing mkdir() and | ||
1030 | * rmdir(). | ||
1031 | * Note: subdirectories in the middle of attachment start with s_type = | ||
1032 | * CONFIGFS_DIR|CONFIGFS_USET_CREATING set by create_dir(). When | ||
1033 | * CONFIGFS_USET_CREATING is set, we ignore the item. The actual set of | ||
1034 | * s_type is in configfs_new_dirent(), which has configfs_dirent_lock. | ||
939 | * | 1035 | * |
940 | * If the target is not found, -ENOENT is bubbled up and we have released | 1036 | * If the target is not found, -ENOENT is bubbled up. |
941 | * all locks. If the target was found, the locks will be cleared by | ||
942 | * configfs_depend_rollback(). | ||
943 | * | 1037 | * |
944 | * This adds a requirement that all config_items be unique! | 1038 | * This adds a requirement that all config_items be unique! |
945 | * | 1039 | * |
946 | * This is recursive because the locking traversal is tricky. There isn't | 1040 | * This is recursive. There isn't |
947 | * much on the stack, though, so folks that need this function - be careful | 1041 | * much on the stack, though, so folks that need this function - be careful |
948 | * about your stack! Patches will be accepted to make it iterative. | 1042 | * about your stack! Patches will be accepted to make it iterative. |
949 | */ | 1043 | */ |
@@ -955,13 +1049,13 @@ static int configfs_depend_prep(struct dentry *origin, | |||
955 | 1049 | ||
956 | BUG_ON(!origin || !sd); | 1050 | BUG_ON(!origin || !sd); |
957 | 1051 | ||
958 | /* Lock this guy on the way down */ | ||
959 | mutex_lock(&sd->s_dentry->d_inode->i_mutex); | ||
960 | if (sd->s_element == target) /* Boo-yah */ | 1052 | if (sd->s_element == target) /* Boo-yah */ |
961 | goto out; | 1053 | goto out; |
962 | 1054 | ||
963 | list_for_each_entry(child_sd, &sd->s_children, s_sibling) { | 1055 | list_for_each_entry(child_sd, &sd->s_children, s_sibling) { |
964 | if (child_sd->s_type & CONFIGFS_DIR) { | 1056 | if ((child_sd->s_type & CONFIGFS_DIR) && |
1057 | !(child_sd->s_type & CONFIGFS_USET_DROPPING) && | ||
1058 | !(child_sd->s_type & CONFIGFS_USET_CREATING)) { | ||
965 | ret = configfs_depend_prep(child_sd->s_dentry, | 1059 | ret = configfs_depend_prep(child_sd->s_dentry, |
966 | target); | 1060 | target); |
967 | if (!ret) | 1061 | if (!ret) |
@@ -970,33 +1064,12 @@ static int configfs_depend_prep(struct dentry *origin, | |||
970 | } | 1064 | } |
971 | 1065 | ||
972 | /* We looped all our children and didn't find target */ | 1066 | /* We looped all our children and didn't find target */ |
973 | mutex_unlock(&sd->s_dentry->d_inode->i_mutex); | ||
974 | ret = -ENOENT; | 1067 | ret = -ENOENT; |
975 | 1068 | ||
976 | out: | 1069 | out: |
977 | return ret; | 1070 | return ret; |
978 | } | 1071 | } |
979 | 1072 | ||
980 | /* | ||
981 | * This is ONLY called if configfs_depend_prep() did its job. So we can | ||
982 | * trust the entire path from item back up to origin. | ||
983 | * | ||
984 | * We walk backwards from item, unlocking each i_mutex. We finish by | ||
985 | * unlocking origin. | ||
986 | */ | ||
987 | static void configfs_depend_rollback(struct dentry *origin, | ||
988 | struct config_item *item) | ||
989 | { | ||
990 | struct dentry *dentry = item->ci_dentry; | ||
991 | |||
992 | while (dentry != origin) { | ||
993 | mutex_unlock(&dentry->d_inode->i_mutex); | ||
994 | dentry = dentry->d_parent; | ||
995 | } | ||
996 | |||
997 | mutex_unlock(&origin->d_inode->i_mutex); | ||
998 | } | ||
999 | |||
1000 | int configfs_depend_item(struct configfs_subsystem *subsys, | 1073 | int configfs_depend_item(struct configfs_subsystem *subsys, |
1001 | struct config_item *target) | 1074 | struct config_item *target) |
1002 | { | 1075 | { |
@@ -1037,17 +1110,21 @@ int configfs_depend_item(struct configfs_subsystem *subsys, | |||
1037 | 1110 | ||
1038 | /* Ok, now we can trust subsys/s_item */ | 1111 | /* Ok, now we can trust subsys/s_item */ |
1039 | 1112 | ||
1040 | /* Scan the tree, locking i_mutex recursively, return 0 if found */ | 1113 | spin_lock(&configfs_dirent_lock); |
1114 | /* Scan the tree, return 0 if found */ | ||
1041 | ret = configfs_depend_prep(subsys_sd->s_dentry, target); | 1115 | ret = configfs_depend_prep(subsys_sd->s_dentry, target); |
1042 | if (ret) | 1116 | if (ret) |
1043 | goto out_unlock_fs; | 1117 | goto out_unlock_dirent_lock; |
1044 | 1118 | ||
1045 | /* We hold all i_mutexes from the subsystem down to the target */ | 1119 | /* |
1120 | * We are sure that the item is not about to be removed by rmdir(), and | ||
1121 | * not in the middle of attachment by mkdir(). | ||
1122 | */ | ||
1046 | p = target->ci_dentry->d_fsdata; | 1123 | p = target->ci_dentry->d_fsdata; |
1047 | p->s_dependent_count += 1; | 1124 | p->s_dependent_count += 1; |
1048 | 1125 | ||
1049 | configfs_depend_rollback(subsys_sd->s_dentry, target); | 1126 | out_unlock_dirent_lock: |
1050 | 1127 | spin_unlock(&configfs_dirent_lock); | |
1051 | out_unlock_fs: | 1128 | out_unlock_fs: |
1052 | mutex_unlock(&configfs_sb->s_root->d_inode->i_mutex); | 1129 | mutex_unlock(&configfs_sb->s_root->d_inode->i_mutex); |
1053 | 1130 | ||
@@ -1072,10 +1149,10 @@ void configfs_undepend_item(struct configfs_subsystem *subsys, | |||
1072 | struct configfs_dirent *sd; | 1149 | struct configfs_dirent *sd; |
1073 | 1150 | ||
1074 | /* | 1151 | /* |
1075 | * Since we can trust everything is pinned, we just need i_mutex | 1152 | * Since we can trust everything is pinned, we just need |
1076 | * on the item. | 1153 | * configfs_dirent_lock. |
1077 | */ | 1154 | */ |
1078 | mutex_lock(&target->ci_dentry->d_inode->i_mutex); | 1155 | spin_lock(&configfs_dirent_lock); |
1079 | 1156 | ||
1080 | sd = target->ci_dentry->d_fsdata; | 1157 | sd = target->ci_dentry->d_fsdata; |
1081 | BUG_ON(sd->s_dependent_count < 1); | 1158 | BUG_ON(sd->s_dependent_count < 1); |
@@ -1086,7 +1163,7 @@ void configfs_undepend_item(struct configfs_subsystem *subsys, | |||
1086 | * After this unlock, we cannot trust the item to stay alive! | 1163 | * After this unlock, we cannot trust the item to stay alive! |
1087 | * DO NOT REFERENCE item after this unlock. | 1164 | * DO NOT REFERENCE item after this unlock. |
1088 | */ | 1165 | */ |
1089 | mutex_unlock(&target->ci_dentry->d_inode->i_mutex); | 1166 | spin_unlock(&configfs_dirent_lock); |
1090 | } | 1167 | } |
1091 | EXPORT_SYMBOL(configfs_undepend_item); | 1168 | EXPORT_SYMBOL(configfs_undepend_item); |
1092 | 1169 | ||
@@ -1286,13 +1363,6 @@ static int configfs_rmdir(struct inode *dir, struct dentry *dentry) | |||
1286 | if (sd->s_type & CONFIGFS_USET_DEFAULT) | 1363 | if (sd->s_type & CONFIGFS_USET_DEFAULT) |
1287 | return -EPERM; | 1364 | return -EPERM; |
1288 | 1365 | ||
1289 | /* | ||
1290 | * Here's where we check for dependents. We're protected by | ||
1291 | * i_mutex. | ||
1292 | */ | ||
1293 | if (sd->s_dependent_count) | ||
1294 | return -EBUSY; | ||
1295 | |||
1296 | /* Get a working ref until we have the child */ | 1366 | /* Get a working ref until we have the child */ |
1297 | parent_item = configfs_get_config_item(dentry->d_parent); | 1367 | parent_item = configfs_get_config_item(dentry->d_parent); |
1298 | subsys = to_config_group(parent_item)->cg_subsys; | 1368 | subsys = to_config_group(parent_item)->cg_subsys; |
@@ -1316,9 +1386,17 @@ static int configfs_rmdir(struct inode *dir, struct dentry *dentry) | |||
1316 | 1386 | ||
1317 | mutex_lock(&configfs_symlink_mutex); | 1387 | mutex_lock(&configfs_symlink_mutex); |
1318 | spin_lock(&configfs_dirent_lock); | 1388 | spin_lock(&configfs_dirent_lock); |
1319 | ret = configfs_detach_prep(dentry, &wait_mutex); | 1389 | /* |
1320 | if (ret) | 1390 | * Here's where we check for dependents. We're protected by |
1321 | configfs_detach_rollback(dentry); | 1391 | * configfs_dirent_lock. |
1392 | * If no dependent, atomically tag the item as dropping. | ||
1393 | */ | ||
1394 | ret = sd->s_dependent_count ? -EBUSY : 0; | ||
1395 | if (!ret) { | ||
1396 | ret = configfs_detach_prep(dentry, &wait_mutex); | ||
1397 | if (ret) | ||
1398 | configfs_detach_rollback(dentry); | ||
1399 | } | ||
1322 | spin_unlock(&configfs_dirent_lock); | 1400 | spin_unlock(&configfs_dirent_lock); |
1323 | mutex_unlock(&configfs_symlink_mutex); | 1401 | mutex_unlock(&configfs_symlink_mutex); |
1324 | 1402 | ||
@@ -1429,7 +1507,7 @@ static int configfs_dir_open(struct inode *inode, struct file *file) | |||
1429 | */ | 1507 | */ |
1430 | err = -ENOENT; | 1508 | err = -ENOENT; |
1431 | if (configfs_dirent_is_ready(parent_sd)) { | 1509 | if (configfs_dirent_is_ready(parent_sd)) { |
1432 | file->private_data = configfs_new_dirent(parent_sd, NULL); | 1510 | file->private_data = configfs_new_dirent(parent_sd, NULL, 0); |
1433 | if (IS_ERR(file->private_data)) | 1511 | if (IS_ERR(file->private_data)) |
1434 | err = PTR_ERR(file->private_data); | 1512 | err = PTR_ERR(file->private_data); |
1435 | else | 1513 | else |
diff --git a/fs/configfs/inode.c b/fs/configfs/inode.c index 5d349d38e056..4921e7426d95 100644 --- a/fs/configfs/inode.c +++ b/fs/configfs/inode.c | |||
@@ -33,10 +33,15 @@ | |||
33 | #include <linux/backing-dev.h> | 33 | #include <linux/backing-dev.h> |
34 | #include <linux/capability.h> | 34 | #include <linux/capability.h> |
35 | #include <linux/sched.h> | 35 | #include <linux/sched.h> |
36 | #include <linux/lockdep.h> | ||
36 | 37 | ||
37 | #include <linux/configfs.h> | 38 | #include <linux/configfs.h> |
38 | #include "configfs_internal.h" | 39 | #include "configfs_internal.h" |
39 | 40 | ||
41 | #ifdef CONFIG_LOCKDEP | ||
42 | static struct lock_class_key default_group_class[MAX_LOCK_DEPTH]; | ||
43 | #endif | ||
44 | |||
40 | extern struct super_block * configfs_sb; | 45 | extern struct super_block * configfs_sb; |
41 | 46 | ||
42 | static const struct address_space_operations configfs_aops = { | 47 | static const struct address_space_operations configfs_aops = { |
@@ -150,6 +155,38 @@ struct inode * configfs_new_inode(mode_t mode, struct configfs_dirent * sd) | |||
150 | return inode; | 155 | return inode; |
151 | } | 156 | } |
152 | 157 | ||
158 | #ifdef CONFIG_LOCKDEP | ||
159 | |||
160 | static void configfs_set_inode_lock_class(struct configfs_dirent *sd, | ||
161 | struct inode *inode) | ||
162 | { | ||
163 | int depth = sd->s_depth; | ||
164 | |||
165 | if (depth > 0) { | ||
166 | if (depth <= ARRAY_SIZE(default_group_class)) { | ||
167 | lockdep_set_class(&inode->i_mutex, | ||
168 | &default_group_class[depth - 1]); | ||
169 | } else { | ||
170 | /* | ||
171 | * In practice the maximum level of locking depth is | ||
172 | * already reached. Just inform about possible reasons. | ||
173 | */ | ||
174 | printk(KERN_INFO "configfs: Too many levels of inodes" | ||
175 | " for the locking correctness validator.\n"); | ||
176 | printk(KERN_INFO "Spurious warnings may appear.\n"); | ||
177 | } | ||
178 | } | ||
179 | } | ||
180 | |||
181 | #else /* CONFIG_LOCKDEP */ | ||
182 | |||
183 | static void configfs_set_inode_lock_class(struct configfs_dirent *sd, | ||
184 | struct inode *inode) | ||
185 | { | ||
186 | } | ||
187 | |||
188 | #endif /* CONFIG_LOCKDEP */ | ||
189 | |||
153 | int configfs_create(struct dentry * dentry, int mode, int (*init)(struct inode *)) | 190 | int configfs_create(struct dentry * dentry, int mode, int (*init)(struct inode *)) |
154 | { | 191 | { |
155 | int error = 0; | 192 | int error = 0; |
@@ -162,6 +199,7 @@ int configfs_create(struct dentry * dentry, int mode, int (*init)(struct inode * | |||
162 | struct inode *p_inode = dentry->d_parent->d_inode; | 199 | struct inode *p_inode = dentry->d_parent->d_inode; |
163 | p_inode->i_mtime = p_inode->i_ctime = CURRENT_TIME; | 200 | p_inode->i_mtime = p_inode->i_ctime = CURRENT_TIME; |
164 | } | 201 | } |
202 | configfs_set_inode_lock_class(sd, inode); | ||
165 | goto Proceed; | 203 | goto Proceed; |
166 | } | 204 | } |
167 | else | 205 | else |