diff options
Diffstat (limited to 'fs')
-rw-r--r-- | fs/nilfs2/the_nilfs.c | 524 |
1 files changed, 524 insertions, 0 deletions
diff --git a/fs/nilfs2/the_nilfs.c b/fs/nilfs2/the_nilfs.c new file mode 100644 index 000000000000..852e0bf3a3c5 --- /dev/null +++ b/fs/nilfs2/the_nilfs.c | |||
@@ -0,0 +1,524 @@ | |||
1 | /* | ||
2 | * the_nilfs.c - the_nilfs shared structure. | ||
3 | * | ||
4 | * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation; either version 2 of the License, or | ||
9 | * (at your option) any later version. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program; if not, write to the Free Software | ||
18 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | ||
19 | * | ||
20 | * Written by Ryusuke Konishi <ryusuke@osrg.net> | ||
21 | * | ||
22 | */ | ||
23 | |||
24 | #include <linux/buffer_head.h> | ||
25 | #include <linux/slab.h> | ||
26 | #include <linux/blkdev.h> | ||
27 | #include <linux/backing-dev.h> | ||
28 | #include "nilfs.h" | ||
29 | #include "segment.h" | ||
30 | #include "alloc.h" | ||
31 | #include "cpfile.h" | ||
32 | #include "sufile.h" | ||
33 | #include "dat.h" | ||
34 | #include "seglist.h" | ||
35 | #include "segbuf.h" | ||
36 | |||
37 | void nilfs_set_last_segment(struct the_nilfs *nilfs, | ||
38 | sector_t start_blocknr, u64 seq, __u64 cno) | ||
39 | { | ||
40 | spin_lock(&nilfs->ns_last_segment_lock); | ||
41 | nilfs->ns_last_pseg = start_blocknr; | ||
42 | nilfs->ns_last_seq = seq; | ||
43 | nilfs->ns_last_cno = cno; | ||
44 | spin_unlock(&nilfs->ns_last_segment_lock); | ||
45 | } | ||
46 | |||
47 | /** | ||
48 | * alloc_nilfs - allocate the_nilfs structure | ||
49 | * @bdev: block device to which the_nilfs is related | ||
50 | * | ||
51 | * alloc_nilfs() allocates memory for the_nilfs and | ||
52 | * initializes its reference count and locks. | ||
53 | * | ||
54 | * Return Value: On success, pointer to the_nilfs is returned. | ||
55 | * On error, NULL is returned. | ||
56 | */ | ||
57 | struct the_nilfs *alloc_nilfs(struct block_device *bdev) | ||
58 | { | ||
59 | struct the_nilfs *nilfs; | ||
60 | |||
61 | nilfs = kzalloc(sizeof(*nilfs), GFP_KERNEL); | ||
62 | if (!nilfs) | ||
63 | return NULL; | ||
64 | |||
65 | nilfs->ns_bdev = bdev; | ||
66 | atomic_set(&nilfs->ns_count, 1); | ||
67 | atomic_set(&nilfs->ns_writer_refcount, -1); | ||
68 | atomic_set(&nilfs->ns_ndirtyblks, 0); | ||
69 | init_rwsem(&nilfs->ns_sem); | ||
70 | mutex_init(&nilfs->ns_writer_mutex); | ||
71 | INIT_LIST_HEAD(&nilfs->ns_supers); | ||
72 | spin_lock_init(&nilfs->ns_last_segment_lock); | ||
73 | nilfs->ns_gc_inodes_h = NULL; | ||
74 | INIT_LIST_HEAD(&nilfs->ns_used_segments); | ||
75 | init_rwsem(&nilfs->ns_segctor_sem); | ||
76 | init_waitqueue_head(&nilfs->ns_cleanerd_wq); | ||
77 | |||
78 | return nilfs; | ||
79 | } | ||
80 | |||
81 | /** | ||
82 | * put_nilfs - release a reference to the_nilfs | ||
83 | * @nilfs: the_nilfs structure to be released | ||
84 | * | ||
85 | * put_nilfs() decrements a reference counter of the_nilfs. | ||
86 | * If the reference count reaches zero, the_nilfs is freed. | ||
87 | */ | ||
88 | void put_nilfs(struct the_nilfs *nilfs) | ||
89 | { | ||
90 | if (!atomic_dec_and_test(&nilfs->ns_count)) | ||
91 | return; | ||
92 | /* | ||
93 | * Increment of ns_count never occur below because the caller | ||
94 | * of get_nilfs() holds at least one reference to the_nilfs. | ||
95 | * Thus its exclusion control is not required here. | ||
96 | */ | ||
97 | might_sleep(); | ||
98 | if (nilfs_loaded(nilfs)) { | ||
99 | nilfs_dispose_used_segments(nilfs); | ||
100 | nilfs_mdt_clear(nilfs->ns_sufile); | ||
101 | nilfs_mdt_destroy(nilfs->ns_sufile); | ||
102 | nilfs_mdt_clear(nilfs->ns_cpfile); | ||
103 | nilfs_mdt_destroy(nilfs->ns_cpfile); | ||
104 | nilfs_mdt_clear(nilfs->ns_dat); | ||
105 | nilfs_mdt_destroy(nilfs->ns_dat); | ||
106 | /* XXX: how and when to clear nilfs->ns_gc_dat? */ | ||
107 | nilfs_mdt_destroy(nilfs->ns_gc_dat); | ||
108 | } | ||
109 | if (nilfs_init(nilfs)) { | ||
110 | nilfs_destroy_gccache(nilfs); | ||
111 | brelse(nilfs->ns_sbh); | ||
112 | } | ||
113 | kfree(nilfs); | ||
114 | } | ||
115 | |||
116 | static int nilfs_load_super_root(struct the_nilfs *nilfs, | ||
117 | struct nilfs_sb_info *sbi, sector_t sr_block) | ||
118 | { | ||
119 | struct buffer_head *bh_sr; | ||
120 | struct nilfs_super_root *raw_sr; | ||
121 | unsigned dat_entry_size, segment_usage_size, checkpoint_size; | ||
122 | unsigned inode_size; | ||
123 | int err; | ||
124 | |||
125 | err = nilfs_read_super_root_block(sbi->s_super, sr_block, &bh_sr, 1); | ||
126 | if (unlikely(err)) | ||
127 | return err; | ||
128 | |||
129 | down_read(&nilfs->ns_sem); | ||
130 | dat_entry_size = le16_to_cpu(nilfs->ns_sbp->s_dat_entry_size); | ||
131 | checkpoint_size = le16_to_cpu(nilfs->ns_sbp->s_checkpoint_size); | ||
132 | segment_usage_size = le16_to_cpu(nilfs->ns_sbp->s_segment_usage_size); | ||
133 | up_read(&nilfs->ns_sem); | ||
134 | |||
135 | inode_size = nilfs->ns_inode_size; | ||
136 | |||
137 | err = -ENOMEM; | ||
138 | nilfs->ns_dat = nilfs_mdt_new( | ||
139 | nilfs, NULL, NILFS_DAT_INO, NILFS_DAT_GFP); | ||
140 | if (unlikely(!nilfs->ns_dat)) | ||
141 | goto failed; | ||
142 | |||
143 | nilfs->ns_gc_dat = nilfs_mdt_new( | ||
144 | nilfs, NULL, NILFS_DAT_INO, NILFS_DAT_GFP); | ||
145 | if (unlikely(!nilfs->ns_gc_dat)) | ||
146 | goto failed_dat; | ||
147 | |||
148 | nilfs->ns_cpfile = nilfs_mdt_new( | ||
149 | nilfs, NULL, NILFS_CPFILE_INO, NILFS_CPFILE_GFP); | ||
150 | if (unlikely(!nilfs->ns_cpfile)) | ||
151 | goto failed_gc_dat; | ||
152 | |||
153 | nilfs->ns_sufile = nilfs_mdt_new( | ||
154 | nilfs, NULL, NILFS_SUFILE_INO, NILFS_SUFILE_GFP); | ||
155 | if (unlikely(!nilfs->ns_sufile)) | ||
156 | goto failed_cpfile; | ||
157 | |||
158 | err = nilfs_palloc_init_blockgroup(nilfs->ns_dat, dat_entry_size); | ||
159 | if (unlikely(err)) | ||
160 | goto failed_sufile; | ||
161 | |||
162 | err = nilfs_palloc_init_blockgroup(nilfs->ns_gc_dat, dat_entry_size); | ||
163 | if (unlikely(err)) | ||
164 | goto failed_sufile; | ||
165 | |||
166 | nilfs_mdt_set_shadow(nilfs->ns_dat, nilfs->ns_gc_dat); | ||
167 | nilfs_mdt_set_entry_size(nilfs->ns_cpfile, checkpoint_size, | ||
168 | sizeof(struct nilfs_cpfile_header)); | ||
169 | nilfs_mdt_set_entry_size(nilfs->ns_sufile, segment_usage_size, | ||
170 | sizeof(struct nilfs_sufile_header)); | ||
171 | |||
172 | err = nilfs_mdt_read_inode_direct( | ||
173 | nilfs->ns_dat, bh_sr, NILFS_SR_DAT_OFFSET(inode_size)); | ||
174 | if (unlikely(err)) | ||
175 | goto failed_sufile; | ||
176 | |||
177 | err = nilfs_mdt_read_inode_direct( | ||
178 | nilfs->ns_cpfile, bh_sr, NILFS_SR_CPFILE_OFFSET(inode_size)); | ||
179 | if (unlikely(err)) | ||
180 | goto failed_sufile; | ||
181 | |||
182 | err = nilfs_mdt_read_inode_direct( | ||
183 | nilfs->ns_sufile, bh_sr, NILFS_SR_SUFILE_OFFSET(inode_size)); | ||
184 | if (unlikely(err)) | ||
185 | goto failed_sufile; | ||
186 | |||
187 | raw_sr = (struct nilfs_super_root *)bh_sr->b_data; | ||
188 | nilfs->ns_nongc_ctime = le64_to_cpu(raw_sr->sr_nongc_ctime); | ||
189 | |||
190 | failed: | ||
191 | brelse(bh_sr); | ||
192 | return err; | ||
193 | |||
194 | failed_sufile: | ||
195 | nilfs_mdt_destroy(nilfs->ns_sufile); | ||
196 | |||
197 | failed_cpfile: | ||
198 | nilfs_mdt_destroy(nilfs->ns_cpfile); | ||
199 | |||
200 | failed_gc_dat: | ||
201 | nilfs_mdt_destroy(nilfs->ns_gc_dat); | ||
202 | |||
203 | failed_dat: | ||
204 | nilfs_mdt_destroy(nilfs->ns_dat); | ||
205 | goto failed; | ||
206 | } | ||
207 | |||
208 | static void nilfs_init_recovery_info(struct nilfs_recovery_info *ri) | ||
209 | { | ||
210 | memset(ri, 0, sizeof(*ri)); | ||
211 | INIT_LIST_HEAD(&ri->ri_used_segments); | ||
212 | } | ||
213 | |||
214 | static void nilfs_clear_recovery_info(struct nilfs_recovery_info *ri) | ||
215 | { | ||
216 | nilfs_dispose_segment_list(&ri->ri_used_segments); | ||
217 | } | ||
218 | |||
219 | /** | ||
220 | * load_nilfs - load and recover the nilfs | ||
221 | * @nilfs: the_nilfs structure to be released | ||
222 | * @sbi: nilfs_sb_info used to recover past segment | ||
223 | * | ||
224 | * load_nilfs() searches and load the latest super root, | ||
225 | * attaches the last segment, and does recovery if needed. | ||
226 | * The caller must call this exclusively for simultaneous mounts. | ||
227 | */ | ||
228 | int load_nilfs(struct the_nilfs *nilfs, struct nilfs_sb_info *sbi) | ||
229 | { | ||
230 | struct nilfs_recovery_info ri; | ||
231 | unsigned int s_flags = sbi->s_super->s_flags; | ||
232 | int really_read_only = bdev_read_only(nilfs->ns_bdev); | ||
233 | unsigned valid_fs; | ||
234 | int err = 0; | ||
235 | |||
236 | nilfs_init_recovery_info(&ri); | ||
237 | |||
238 | down_write(&nilfs->ns_sem); | ||
239 | valid_fs = (nilfs->ns_mount_state & NILFS_VALID_FS); | ||
240 | up_write(&nilfs->ns_sem); | ||
241 | |||
242 | if (!valid_fs && (s_flags & MS_RDONLY)) { | ||
243 | printk(KERN_INFO "NILFS: INFO: recovery " | ||
244 | "required for readonly filesystem.\n"); | ||
245 | if (really_read_only) { | ||
246 | printk(KERN_ERR "NILFS: write access " | ||
247 | "unavailable, cannot proceed.\n"); | ||
248 | err = -EROFS; | ||
249 | goto failed; | ||
250 | } | ||
251 | printk(KERN_INFO "NILFS: write access will " | ||
252 | "be enabled during recovery.\n"); | ||
253 | sbi->s_super->s_flags &= ~MS_RDONLY; | ||
254 | } | ||
255 | |||
256 | err = nilfs_search_super_root(nilfs, sbi, &ri); | ||
257 | if (unlikely(err)) { | ||
258 | printk(KERN_ERR "NILFS: error searching super root.\n"); | ||
259 | goto failed; | ||
260 | } | ||
261 | |||
262 | err = nilfs_load_super_root(nilfs, sbi, ri.ri_super_root); | ||
263 | if (unlikely(err)) { | ||
264 | printk(KERN_ERR "NILFS: error loading super root.\n"); | ||
265 | goto failed; | ||
266 | } | ||
267 | |||
268 | if (!valid_fs) { | ||
269 | err = nilfs_recover_logical_segments(nilfs, sbi, &ri); | ||
270 | if (unlikely(err)) { | ||
271 | nilfs_mdt_destroy(nilfs->ns_cpfile); | ||
272 | nilfs_mdt_destroy(nilfs->ns_sufile); | ||
273 | nilfs_mdt_destroy(nilfs->ns_dat); | ||
274 | goto failed; | ||
275 | } | ||
276 | if (ri.ri_need_recovery == NILFS_RECOVERY_SR_UPDATED) { | ||
277 | down_write(&nilfs->ns_sem); | ||
278 | nilfs_update_last_segment(sbi, 0); | ||
279 | up_write(&nilfs->ns_sem); | ||
280 | } | ||
281 | } | ||
282 | |||
283 | set_nilfs_loaded(nilfs); | ||
284 | |||
285 | failed: | ||
286 | nilfs_clear_recovery_info(&ri); | ||
287 | sbi->s_super->s_flags = s_flags; | ||
288 | return err; | ||
289 | } | ||
290 | |||
291 | static unsigned long long nilfs_max_size(unsigned int blkbits) | ||
292 | { | ||
293 | unsigned int max_bits; | ||
294 | unsigned long long res = MAX_LFS_FILESIZE; /* page cache limit */ | ||
295 | |||
296 | max_bits = blkbits + NILFS_BMAP_KEY_BIT; /* bmap size limit */ | ||
297 | if (max_bits < 64) | ||
298 | res = min_t(unsigned long long, res, (1ULL << max_bits) - 1); | ||
299 | return res; | ||
300 | } | ||
301 | |||
302 | static int | ||
303 | nilfs_store_disk_layout(struct the_nilfs *nilfs, struct super_block *sb, | ||
304 | struct nilfs_super_block *sbp) | ||
305 | { | ||
306 | if (le32_to_cpu(sbp->s_rev_level) != NILFS_CURRENT_REV) { | ||
307 | printk(KERN_ERR "NILFS: revision mismatch " | ||
308 | "(superblock rev.=%d.%d, current rev.=%d.%d). " | ||
309 | "Please check the version of mkfs.nilfs.\n", | ||
310 | le32_to_cpu(sbp->s_rev_level), | ||
311 | le16_to_cpu(sbp->s_minor_rev_level), | ||
312 | NILFS_CURRENT_REV, NILFS_MINOR_REV); | ||
313 | return -EINVAL; | ||
314 | } | ||
315 | nilfs->ns_inode_size = le16_to_cpu(sbp->s_inode_size); | ||
316 | nilfs->ns_first_ino = le32_to_cpu(sbp->s_first_ino); | ||
317 | |||
318 | nilfs->ns_blocks_per_segment = le32_to_cpu(sbp->s_blocks_per_segment); | ||
319 | if (nilfs->ns_blocks_per_segment < NILFS_SEG_MIN_BLOCKS) { | ||
320 | printk(KERN_ERR "NILFS: too short segment. \n"); | ||
321 | return -EINVAL; | ||
322 | } | ||
323 | |||
324 | nilfs->ns_first_data_block = le64_to_cpu(sbp->s_first_data_block); | ||
325 | nilfs->ns_nsegments = le64_to_cpu(sbp->s_nsegments); | ||
326 | nilfs->ns_r_segments_percentage = | ||
327 | le32_to_cpu(sbp->s_r_segments_percentage); | ||
328 | nilfs->ns_nrsvsegs = | ||
329 | max_t(unsigned long, NILFS_MIN_NRSVSEGS, | ||
330 | DIV_ROUND_UP(nilfs->ns_nsegments * | ||
331 | nilfs->ns_r_segments_percentage, 100)); | ||
332 | nilfs->ns_crc_seed = le32_to_cpu(sbp->s_crc_seed); | ||
333 | return 0; | ||
334 | } | ||
335 | |||
336 | /** | ||
337 | * init_nilfs - initialize a NILFS instance. | ||
338 | * @nilfs: the_nilfs structure | ||
339 | * @sbi: nilfs_sb_info | ||
340 | * @sb: super block | ||
341 | * @data: mount options | ||
342 | * | ||
343 | * init_nilfs() performs common initialization per block device (e.g. | ||
344 | * reading the super block, getting disk layout information, initializing | ||
345 | * shared fields in the_nilfs). It takes on some portion of the jobs | ||
346 | * typically done by a fill_super() routine. This division arises from | ||
347 | * the nature that multiple NILFS instances may be simultaneously | ||
348 | * mounted on a device. | ||
349 | * For multiple mounts on the same device, only the first mount | ||
350 | * invokes these tasks. | ||
351 | * | ||
352 | * Return Value: On success, 0 is returned. On error, a negative error | ||
353 | * code is returned. | ||
354 | */ | ||
355 | int init_nilfs(struct the_nilfs *nilfs, struct nilfs_sb_info *sbi, char *data) | ||
356 | { | ||
357 | struct super_block *sb = sbi->s_super; | ||
358 | struct buffer_head *sbh; | ||
359 | struct nilfs_super_block *sbp; | ||
360 | struct backing_dev_info *bdi; | ||
361 | int blocksize; | ||
362 | int err = 0; | ||
363 | |||
364 | down_write(&nilfs->ns_sem); | ||
365 | if (nilfs_init(nilfs)) { | ||
366 | /* Load values from existing the_nilfs */ | ||
367 | sbp = nilfs->ns_sbp; | ||
368 | err = nilfs_store_magic_and_option(sb, sbp, data); | ||
369 | if (err) | ||
370 | goto out; | ||
371 | |||
372 | blocksize = BLOCK_SIZE << le32_to_cpu(sbp->s_log_block_size); | ||
373 | if (sb->s_blocksize != blocksize && | ||
374 | !sb_set_blocksize(sb, blocksize)) { | ||
375 | printk(KERN_ERR "NILFS: blocksize %d unfit to device\n", | ||
376 | blocksize); | ||
377 | err = -EINVAL; | ||
378 | } | ||
379 | sb->s_maxbytes = nilfs_max_size(sb->s_blocksize_bits); | ||
380 | goto out; | ||
381 | } | ||
382 | |||
383 | sbp = nilfs_load_super_block(sb, &sbh); | ||
384 | if (!sbp) { | ||
385 | err = -EINVAL; | ||
386 | goto out; | ||
387 | } | ||
388 | err = nilfs_store_magic_and_option(sb, sbp, data); | ||
389 | if (err) | ||
390 | goto failed_sbh; | ||
391 | |||
392 | blocksize = BLOCK_SIZE << le32_to_cpu(sbp->s_log_block_size); | ||
393 | if (sb->s_blocksize != blocksize) { | ||
394 | sbp = nilfs_reload_super_block(sb, &sbh, blocksize); | ||
395 | if (!sbp) { | ||
396 | err = -EINVAL; | ||
397 | goto out; | ||
398 | /* not failed_sbh; sbh is released automatically | ||
399 | when reloading fails. */ | ||
400 | } | ||
401 | } | ||
402 | nilfs->ns_blocksize_bits = sb->s_blocksize_bits; | ||
403 | |||
404 | err = nilfs_store_disk_layout(nilfs, sb, sbp); | ||
405 | if (err) | ||
406 | goto failed_sbh; | ||
407 | |||
408 | sb->s_maxbytes = nilfs_max_size(sb->s_blocksize_bits); | ||
409 | |||
410 | nilfs->ns_mount_state = le16_to_cpu(sbp->s_state); | ||
411 | nilfs->ns_sbh = sbh; | ||
412 | nilfs->ns_sbp = sbp; | ||
413 | |||
414 | bdi = nilfs->ns_bdev->bd_inode_backing_dev_info; | ||
415 | if (!bdi) | ||
416 | bdi = nilfs->ns_bdev->bd_inode->i_mapping->backing_dev_info; | ||
417 | nilfs->ns_bdi = bdi ? : &default_backing_dev_info; | ||
418 | |||
419 | /* Finding last segment */ | ||
420 | nilfs->ns_last_pseg = le64_to_cpu(sbp->s_last_pseg); | ||
421 | nilfs->ns_last_cno = le64_to_cpu(sbp->s_last_cno); | ||
422 | nilfs->ns_last_seq = le64_to_cpu(sbp->s_last_seq); | ||
423 | |||
424 | nilfs->ns_seg_seq = nilfs->ns_last_seq; | ||
425 | nilfs->ns_segnum = | ||
426 | nilfs_get_segnum_of_block(nilfs, nilfs->ns_last_pseg); | ||
427 | nilfs->ns_cno = nilfs->ns_last_cno + 1; | ||
428 | if (nilfs->ns_segnum >= nilfs->ns_nsegments) { | ||
429 | printk(KERN_ERR "NILFS invalid last segment number.\n"); | ||
430 | err = -EINVAL; | ||
431 | goto failed_sbh; | ||
432 | } | ||
433 | /* Dummy values */ | ||
434 | nilfs->ns_free_segments_count = | ||
435 | nilfs->ns_nsegments - (nilfs->ns_segnum + 1); | ||
436 | |||
437 | /* Initialize gcinode cache */ | ||
438 | err = nilfs_init_gccache(nilfs); | ||
439 | if (err) | ||
440 | goto failed_sbh; | ||
441 | |||
442 | set_nilfs_init(nilfs); | ||
443 | err = 0; | ||
444 | out: | ||
445 | up_write(&nilfs->ns_sem); | ||
446 | return err; | ||
447 | |||
448 | failed_sbh: | ||
449 | brelse(sbh); | ||
450 | goto out; | ||
451 | } | ||
452 | |||
453 | int nilfs_count_free_blocks(struct the_nilfs *nilfs, sector_t *nblocks) | ||
454 | { | ||
455 | struct inode *dat = nilfs_dat_inode(nilfs); | ||
456 | unsigned long ncleansegs; | ||
457 | int err; | ||
458 | |||
459 | down_read(&NILFS_MDT(dat)->mi_sem); /* XXX */ | ||
460 | err = nilfs_sufile_get_ncleansegs(nilfs->ns_sufile, &ncleansegs); | ||
461 | up_read(&NILFS_MDT(dat)->mi_sem); /* XXX */ | ||
462 | if (likely(!err)) | ||
463 | *nblocks = (sector_t)ncleansegs * nilfs->ns_blocks_per_segment; | ||
464 | return err; | ||
465 | } | ||
466 | |||
467 | void nilfs_dispose_used_segments(struct the_nilfs *nilfs) | ||
468 | { | ||
469 | struct nilfs_segment_entry *ent, *n; | ||
470 | |||
471 | /* nilfs->sem must be locked by the caller. */ | ||
472 | if (!nilfs_loaded(nilfs)) | ||
473 | return; | ||
474 | |||
475 | list_for_each_entry_safe(ent, n, &nilfs->ns_used_segments, list) { | ||
476 | list_del_init(&ent->list); | ||
477 | nilfs_segment_usage_clear_volatile_active(ent->raw_su); | ||
478 | nilfs_close_segment_entry(ent, nilfs->ns_sufile); | ||
479 | nilfs_free_segment_entry(ent); | ||
480 | } | ||
481 | } | ||
482 | |||
483 | int nilfs_near_disk_full(struct the_nilfs *nilfs) | ||
484 | { | ||
485 | struct inode *sufile = nilfs->ns_sufile; | ||
486 | unsigned long ncleansegs, nincsegs; | ||
487 | int ret; | ||
488 | |||
489 | ret = nilfs_sufile_get_ncleansegs(sufile, &ncleansegs); | ||
490 | if (likely(!ret)) { | ||
491 | nincsegs = atomic_read(&nilfs->ns_ndirtyblks) / | ||
492 | nilfs->ns_blocks_per_segment + 1; | ||
493 | if (ncleansegs <= nilfs->ns_nrsvsegs + nincsegs) | ||
494 | ret++; | ||
495 | } | ||
496 | return ret; | ||
497 | } | ||
498 | |||
499 | int nilfs_checkpoint_is_mounted(struct the_nilfs *nilfs, __u64 cno, | ||
500 | int snapshot_mount) | ||
501 | { | ||
502 | struct nilfs_sb_info *sbi; | ||
503 | int ret = 0; | ||
504 | |||
505 | down_read(&nilfs->ns_sem); | ||
506 | if (cno == 0 || cno > nilfs->ns_cno) | ||
507 | goto out_unlock; | ||
508 | |||
509 | list_for_each_entry(sbi, &nilfs->ns_supers, s_list) { | ||
510 | if (sbi->s_snapshot_cno == cno && | ||
511 | (!snapshot_mount || nilfs_test_opt(sbi, SNAPSHOT))) { | ||
512 | /* exclude read-only mounts */ | ||
513 | ret++; | ||
514 | break; | ||
515 | } | ||
516 | } | ||
517 | /* for protecting recent checkpoints */ | ||
518 | if (cno >= nilfs_last_cno(nilfs)) | ||
519 | ret++; | ||
520 | |||
521 | out_unlock: | ||
522 | up_read(&nilfs->ns_sem); | ||
523 | return ret; | ||
524 | } | ||