aboutsummaryrefslogtreecommitdiffstats
path: root/fs/super.c
diff options
context:
space:
mode:
authorAl Viro <viro@zeniv.linux.org.uk>2013-10-01 15:09:58 -0400
committerAl Viro <viro@zeniv.linux.org.uk>2013-10-24 23:35:00 -0400
commit7eb5e8826911f2792179f99e77e75fbb7ef53a4a (patch)
treee46c2665e53bed578d2c6e813ad80e00a4d5d22f /fs/super.c
parent966c1f75f8e1e8e44d8277f7cc3598f6abbdda2f (diff)
uninline destroy_super(), consolidate alloc_super()
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Diffstat (limited to 'fs/super.c')
-rw-r--r--fs/super.c206
1 files changed, 86 insertions, 120 deletions
diff --git a/fs/super.c b/fs/super.c
index 0225c20f8770..efa6e488a95c 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -129,33 +129,27 @@ static unsigned long super_cache_count(struct shrinker *shrink,
129 return total_objects; 129 return total_objects;
130} 130}
131 131
132static int init_sb_writers(struct super_block *s, struct file_system_type *type) 132/**
133{ 133 * destroy_super - frees a superblock
134 int err; 134 * @s: superblock to free
135 int i; 135 *
136 136 * Frees a superblock.
137 for (i = 0; i < SB_FREEZE_LEVELS; i++) { 137 */
138 err = percpu_counter_init(&s->s_writers.counter[i], 0); 138static void destroy_super(struct super_block *s)
139 if (err < 0)
140 goto err_out;
141 lockdep_init_map(&s->s_writers.lock_map[i], sb_writers_name[i],
142 &type->s_writers_key[i], 0);
143 }
144 init_waitqueue_head(&s->s_writers.wait);
145 init_waitqueue_head(&s->s_writers.wait_unfrozen);
146 return 0;
147err_out:
148 while (--i >= 0)
149 percpu_counter_destroy(&s->s_writers.counter[i]);
150 return err;
151}
152
153static void destroy_sb_writers(struct super_block *s)
154{ 139{
155 int i; 140 int i;
156 141 list_lru_destroy(&s->s_dentry_lru);
142 list_lru_destroy(&s->s_inode_lru);
143#ifdef CONFIG_SMP
144 free_percpu(s->s_files);
145#endif
157 for (i = 0; i < SB_FREEZE_LEVELS; i++) 146 for (i = 0; i < SB_FREEZE_LEVELS; i++)
158 percpu_counter_destroy(&s->s_writers.counter[i]); 147 percpu_counter_destroy(&s->s_writers.counter[i]);
148 security_sb_free(s);
149 WARN_ON(!list_empty(&s->s_mounts));
150 kfree(s->s_subtype);
151 kfree(s->s_options);
152 kfree(s);
159} 153}
160 154
161/** 155/**
@@ -170,111 +164,83 @@ static struct super_block *alloc_super(struct file_system_type *type, int flags)
170{ 164{
171 struct super_block *s = kzalloc(sizeof(struct super_block), GFP_USER); 165 struct super_block *s = kzalloc(sizeof(struct super_block), GFP_USER);
172 static const struct super_operations default_op; 166 static const struct super_operations default_op;
167 int i;
168
169 if (!s)
170 return NULL;
173 171
174 if (s) { 172 if (security_sb_alloc(s))
175 if (security_sb_alloc(s)) 173 goto fail;
176 goto out_free_sb;
177 174
178#ifdef CONFIG_SMP 175#ifdef CONFIG_SMP
179 s->s_files = alloc_percpu(struct list_head); 176 s->s_files = alloc_percpu(struct list_head);
180 if (!s->s_files) 177 if (!s->s_files)
181 goto err_out; 178 goto fail;
182 else { 179 for_each_possible_cpu(i)
183 int i; 180 INIT_LIST_HEAD(per_cpu_ptr(s->s_files, i));
184
185 for_each_possible_cpu(i)
186 INIT_LIST_HEAD(per_cpu_ptr(s->s_files, i));
187 }
188#else 181#else
189 INIT_LIST_HEAD(&s->s_files); 182 INIT_LIST_HEAD(&s->s_files);
190#endif 183#endif
191 if (init_sb_writers(s, type)) 184 for (i = 0; i < SB_FREEZE_LEVELS; i++) {
192 goto err_out; 185 if (percpu_counter_init(&s->s_writers.counter[i], 0) < 0)
193 s->s_flags = flags; 186 goto fail;
194 s->s_bdi = &default_backing_dev_info; 187 lockdep_init_map(&s->s_writers.lock_map[i], sb_writers_name[i],
195 INIT_HLIST_NODE(&s->s_instances); 188 &type->s_writers_key[i], 0);
196 INIT_HLIST_BL_HEAD(&s->s_anon);
197 INIT_LIST_HEAD(&s->s_inodes);
198
199 if (list_lru_init(&s->s_dentry_lru))
200 goto err_out;
201 if (list_lru_init(&s->s_inode_lru))
202 goto err_out_dentry_lru;
203
204 INIT_LIST_HEAD(&s->s_mounts);
205 init_rwsem(&s->s_umount);
206 lockdep_set_class(&s->s_umount, &type->s_umount_key);
207 /*
208 * sget() can have s_umount recursion.
209 *
210 * When it cannot find a suitable sb, it allocates a new
211 * one (this one), and tries again to find a suitable old
212 * one.
213 *
214 * In case that succeeds, it will acquire the s_umount
215 * lock of the old one. Since these are clearly distrinct
216 * locks, and this object isn't exposed yet, there's no
217 * risk of deadlocks.
218 *
219 * Annotate this by putting this lock in a different
220 * subclass.
221 */
222 down_write_nested(&s->s_umount, SINGLE_DEPTH_NESTING);
223 s->s_count = 1;
224 atomic_set(&s->s_active, 1);
225 mutex_init(&s->s_vfs_rename_mutex);
226 lockdep_set_class(&s->s_vfs_rename_mutex, &type->s_vfs_rename_key);
227 mutex_init(&s->s_dquot.dqio_mutex);
228 mutex_init(&s->s_dquot.dqonoff_mutex);
229 init_rwsem(&s->s_dquot.dqptr_sem);
230 s->s_maxbytes = MAX_NON_LFS;
231 s->s_op = &default_op;
232 s->s_time_gran = 1000000000;
233 s->cleancache_poolid = -1;
234
235 s->s_shrink.seeks = DEFAULT_SEEKS;
236 s->s_shrink.scan_objects = super_cache_scan;
237 s->s_shrink.count_objects = super_cache_count;
238 s->s_shrink.batch = 1024;
239 s->s_shrink.flags = SHRINKER_NUMA_AWARE;
240 } 189 }
241out: 190 init_waitqueue_head(&s->s_writers.wait);
191 init_waitqueue_head(&s->s_writers.wait_unfrozen);
192 s->s_flags = flags;
193 s->s_bdi = &default_backing_dev_info;
194 INIT_HLIST_NODE(&s->s_instances);
195 INIT_HLIST_BL_HEAD(&s->s_anon);
196 INIT_LIST_HEAD(&s->s_inodes);
197
198 if (list_lru_init(&s->s_dentry_lru))
199 goto fail;
200 if (list_lru_init(&s->s_inode_lru))
201 goto fail;
202
203 INIT_LIST_HEAD(&s->s_mounts);
204 init_rwsem(&s->s_umount);
205 lockdep_set_class(&s->s_umount, &type->s_umount_key);
206 /*
207 * sget() can have s_umount recursion.
208 *
209 * When it cannot find a suitable sb, it allocates a new
210 * one (this one), and tries again to find a suitable old
211 * one.
212 *
213 * In case that succeeds, it will acquire the s_umount
214 * lock of the old one. Since these are clearly distrinct
215 * locks, and this object isn't exposed yet, there's no
216 * risk of deadlocks.
217 *
218 * Annotate this by putting this lock in a different
219 * subclass.
220 */
221 down_write_nested(&s->s_umount, SINGLE_DEPTH_NESTING);
222 s->s_count = 1;
223 atomic_set(&s->s_active, 1);
224 mutex_init(&s->s_vfs_rename_mutex);
225 lockdep_set_class(&s->s_vfs_rename_mutex, &type->s_vfs_rename_key);
226 mutex_init(&s->s_dquot.dqio_mutex);
227 mutex_init(&s->s_dquot.dqonoff_mutex);
228 init_rwsem(&s->s_dquot.dqptr_sem);
229 s->s_maxbytes = MAX_NON_LFS;
230 s->s_op = &default_op;
231 s->s_time_gran = 1000000000;
232 s->cleancache_poolid = -1;
233
234 s->s_shrink.seeks = DEFAULT_SEEKS;
235 s->s_shrink.scan_objects = super_cache_scan;
236 s->s_shrink.count_objects = super_cache_count;
237 s->s_shrink.batch = 1024;
238 s->s_shrink.flags = SHRINKER_NUMA_AWARE;
242 return s; 239 return s;
243 240
244err_out_dentry_lru: 241fail:
245 list_lru_destroy(&s->s_dentry_lru); 242 destroy_super(s);
246err_out: 243 return NULL;
247 security_sb_free(s);
248#ifdef CONFIG_SMP
249 if (s->s_files)
250 free_percpu(s->s_files);
251#endif
252 destroy_sb_writers(s);
253out_free_sb:
254 kfree(s);
255 s = NULL;
256 goto out;
257}
258
259/**
260 * destroy_super - frees a superblock
261 * @s: superblock to free
262 *
263 * Frees a superblock.
264 */
265static inline void destroy_super(struct super_block *s)
266{
267 list_lru_destroy(&s->s_dentry_lru);
268 list_lru_destroy(&s->s_inode_lru);
269#ifdef CONFIG_SMP
270 free_percpu(s->s_files);
271#endif
272 destroy_sb_writers(s);
273 security_sb_free(s);
274 WARN_ON(!list_empty(&s->s_mounts));
275 kfree(s->s_subtype);
276 kfree(s->s_options);
277 kfree(s);
278} 244}
279 245
280/* Superblock refcounting */ 246/* Superblock refcounting */