aboutsummaryrefslogtreecommitdiffstats
path: root/fs/nilfs2/sufile.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/nilfs2/sufile.c')
-rw-r--r--fs/nilfs2/sufile.c290
1 files changed, 104 insertions, 186 deletions
diff --git a/fs/nilfs2/sufile.c b/fs/nilfs2/sufile.c
index c774cf397e2f..98e68677f045 100644
--- a/fs/nilfs2/sufile.c
+++ b/fs/nilfs2/sufile.c
@@ -93,6 +93,52 @@ nilfs_sufile_get_segment_usage_block(struct inode *sufile, __u64 segnum,
93 create, NULL, bhp); 93 create, NULL, bhp);
94} 94}
95 95
96static void nilfs_sufile_mod_counter(struct buffer_head *header_bh,
97 u64 ncleanadd, u64 ndirtyadd)
98{
99 struct nilfs_sufile_header *header;
100 void *kaddr;
101
102 kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
103 header = kaddr + bh_offset(header_bh);
104 le64_add_cpu(&header->sh_ncleansegs, ncleanadd);
105 le64_add_cpu(&header->sh_ndirtysegs, ndirtyadd);
106 kunmap_atomic(kaddr, KM_USER0);
107
108 nilfs_mdt_mark_buffer_dirty(header_bh);
109}
110
111int nilfs_sufile_update(struct inode *sufile, __u64 segnum, int create,
112 void (*dofunc)(struct inode *, __u64,
113 struct buffer_head *,
114 struct buffer_head *))
115{
116 struct buffer_head *header_bh, *bh;
117 int ret;
118
119 if (unlikely(segnum >= nilfs_sufile_get_nsegments(sufile))) {
120 printk(KERN_WARNING "%s: invalid segment number: %llu\n",
121 __func__, (unsigned long long)segnum);
122 return -EINVAL;
123 }
124 down_write(&NILFS_MDT(sufile)->mi_sem);
125
126 ret = nilfs_sufile_get_header_block(sufile, &header_bh);
127 if (ret < 0)
128 goto out_sem;
129
130 ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, create, &bh);
131 if (!ret) {
132 dofunc(sufile, segnum, header_bh, bh);
133 brelse(bh);
134 }
135 brelse(header_bh);
136
137 out_sem:
138 up_write(&NILFS_MDT(sufile)->mi_sem);
139 return ret;
140}
141
96/** 142/**
97 * nilfs_sufile_alloc - allocate a segment 143 * nilfs_sufile_alloc - allocate a segment
98 * @sufile: inode of segment usage file 144 * @sufile: inode of segment usage file
@@ -113,7 +159,6 @@ nilfs_sufile_get_segment_usage_block(struct inode *sufile, __u64 segnum,
113int nilfs_sufile_alloc(struct inode *sufile, __u64 *segnump) 159int nilfs_sufile_alloc(struct inode *sufile, __u64 *segnump)
114{ 160{
115 struct buffer_head *header_bh, *su_bh; 161 struct buffer_head *header_bh, *su_bh;
116 struct the_nilfs *nilfs;
117 struct nilfs_sufile_header *header; 162 struct nilfs_sufile_header *header;
118 struct nilfs_segment_usage *su; 163 struct nilfs_segment_usage *su;
119 size_t susz = NILFS_MDT(sufile)->mi_entry_size; 164 size_t susz = NILFS_MDT(sufile)->mi_entry_size;
@@ -124,8 +169,6 @@ int nilfs_sufile_alloc(struct inode *sufile, __u64 *segnump)
124 169
125 down_write(&NILFS_MDT(sufile)->mi_sem); 170 down_write(&NILFS_MDT(sufile)->mi_sem);
126 171
127 nilfs = NILFS_MDT(sufile)->mi_nilfs;
128
129 ret = nilfs_sufile_get_header_block(sufile, &header_bh); 172 ret = nilfs_sufile_get_header_block(sufile, &header_bh);
130 if (ret < 0) 173 if (ret < 0)
131 goto out_sem; 174 goto out_sem;
@@ -192,165 +235,84 @@ int nilfs_sufile_alloc(struct inode *sufile, __u64 *segnump)
192 return ret; 235 return ret;
193} 236}
194 237
195/** 238void nilfs_sufile_do_cancel_free(struct inode *sufile, __u64 segnum,
196 * nilfs_sufile_cancel_free - 239 struct buffer_head *header_bh,
197 * @sufile: inode of segment usage file 240 struct buffer_head *su_bh)
198 * @segnum: segment number
199 *
200 * Description:
201 *
202 * Return Value: On success, 0 is returned. On error, one of the following
203 * negative error codes is returned.
204 *
205 * %-EIO - I/O error.
206 *
207 * %-ENOMEM - Insufficient amount of memory available.
208 */
209int nilfs_sufile_cancel_free(struct inode *sufile, __u64 segnum)
210{ 241{
211 struct buffer_head *header_bh, *su_bh;
212 struct the_nilfs *nilfs;
213 struct nilfs_sufile_header *header;
214 struct nilfs_segment_usage *su; 242 struct nilfs_segment_usage *su;
215 void *kaddr; 243 void *kaddr;
216 int ret;
217
218 down_write(&NILFS_MDT(sufile)->mi_sem);
219
220 nilfs = NILFS_MDT(sufile)->mi_nilfs;
221
222 ret = nilfs_sufile_get_header_block(sufile, &header_bh);
223 if (ret < 0)
224 goto out_sem;
225
226 ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0, &su_bh);
227 if (ret < 0)
228 goto out_header;
229 244
230 kaddr = kmap_atomic(su_bh->b_page, KM_USER0); 245 kaddr = kmap_atomic(su_bh->b_page, KM_USER0);
231 su = nilfs_sufile_block_get_segment_usage( 246 su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
232 sufile, segnum, su_bh, kaddr);
233 if (unlikely(!nilfs_segment_usage_clean(su))) { 247 if (unlikely(!nilfs_segment_usage_clean(su))) {
234 printk(KERN_WARNING "%s: segment %llu must be clean\n", 248 printk(KERN_WARNING "%s: segment %llu must be clean\n",
235 __func__, (unsigned long long)segnum); 249 __func__, (unsigned long long)segnum);
236 kunmap_atomic(kaddr, KM_USER0); 250 kunmap_atomic(kaddr, KM_USER0);
237 goto out_su_bh; 251 return;
238 } 252 }
239 nilfs_segment_usage_set_dirty(su); 253 nilfs_segment_usage_set_dirty(su);
240 kunmap_atomic(kaddr, KM_USER0); 254 kunmap_atomic(kaddr, KM_USER0);
241 255
242 kaddr = kmap_atomic(header_bh->b_page, KM_USER0); 256 nilfs_sufile_mod_counter(header_bh, -1, 1);
243 header = nilfs_sufile_block_get_header(sufile, header_bh, kaddr);
244 le64_add_cpu(&header->sh_ncleansegs, -1);
245 le64_add_cpu(&header->sh_ndirtysegs, 1);
246 kunmap_atomic(kaddr, KM_USER0);
247
248 nilfs_mdt_mark_buffer_dirty(header_bh);
249 nilfs_mdt_mark_buffer_dirty(su_bh); 257 nilfs_mdt_mark_buffer_dirty(su_bh);
250 nilfs_mdt_mark_dirty(sufile); 258 nilfs_mdt_mark_dirty(sufile);
251
252 out_su_bh:
253 brelse(su_bh);
254 out_header:
255 brelse(header_bh);
256 out_sem:
257 up_write(&NILFS_MDT(sufile)->mi_sem);
258 return ret;
259} 259}
260 260
261/** 261void nilfs_sufile_do_scrap(struct inode *sufile, __u64 segnum,
262 * nilfs_sufile_freev - free segments 262 struct buffer_head *header_bh,
263 * @sufile: inode of segment usage file 263 struct buffer_head *su_bh)
264 * @segnum: array of segment numbers
265 * @nsegs: number of segments
266 *
267 * Description: nilfs_sufile_freev() frees segments specified by @segnum and
268 * @nsegs, which must have been returned by a previous call to
269 * nilfs_sufile_alloc().
270 *
271 * Return Value: On success, 0 is returned. On error, one of the following
272 * negative error codes is returned.
273 *
274 * %-EIO - I/O error.
275 *
276 * %-ENOMEM - Insufficient amount of memory available.
277 */
278#define NILFS_SUFILE_FREEV_PREALLOC 16
279int nilfs_sufile_freev(struct inode *sufile, __u64 *segnum, size_t nsegs)
280{ 264{
281 struct buffer_head *header_bh, **su_bh,
282 *su_bh_prealloc[NILFS_SUFILE_FREEV_PREALLOC];
283 struct the_nilfs *nilfs;
284 struct nilfs_sufile_header *header;
285 struct nilfs_segment_usage *su; 265 struct nilfs_segment_usage *su;
286 void *kaddr; 266 void *kaddr;
287 int ret, i; 267 int clean, dirty;
288 268
289 down_write(&NILFS_MDT(sufile)->mi_sem); 269 kaddr = kmap_atomic(su_bh->b_page, KM_USER0);
290 270 su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
291 nilfs = NILFS_MDT(sufile)->mi_nilfs; 271 if (su->su_flags == cpu_to_le32(1UL << NILFS_SEGMENT_USAGE_DIRTY) &&
292 272 su->su_nblocks == cpu_to_le32(0)) {
293 /* prepare resources */
294 if (nsegs <= NILFS_SUFILE_FREEV_PREALLOC)
295 su_bh = su_bh_prealloc;
296 else {
297 su_bh = kmalloc(sizeof(*su_bh) * nsegs, GFP_NOFS);
298 if (su_bh == NULL) {
299 ret = -ENOMEM;
300 goto out_sem;
301 }
302 }
303
304 ret = nilfs_sufile_get_header_block(sufile, &header_bh);
305 if (ret < 0)
306 goto out_su_bh;
307 for (i = 0; i < nsegs; i++) {
308 ret = nilfs_sufile_get_segment_usage_block(sufile, segnum[i],
309 0, &su_bh[i]);
310 if (ret < 0)
311 goto out_bh;
312 }
313
314 /* free segments */
315 for (i = 0; i < nsegs; i++) {
316 kaddr = kmap_atomic(su_bh[i]->b_page, KM_USER0);
317 su = nilfs_sufile_block_get_segment_usage(
318 sufile, segnum[i], su_bh[i], kaddr);
319 WARN_ON(nilfs_segment_usage_error(su));
320 nilfs_segment_usage_set_clean(su);
321 kunmap_atomic(kaddr, KM_USER0); 273 kunmap_atomic(kaddr, KM_USER0);
322 nilfs_mdt_mark_buffer_dirty(su_bh[i]); 274 return;
323 } 275 }
324 kaddr = kmap_atomic(header_bh->b_page, KM_USER0); 276 clean = nilfs_segment_usage_clean(su);
325 header = nilfs_sufile_block_get_header(sufile, header_bh, kaddr); 277 dirty = nilfs_segment_usage_dirty(su);
326 le64_add_cpu(&header->sh_ncleansegs, nsegs); 278
327 le64_add_cpu(&header->sh_ndirtysegs, -(u64)nsegs); 279 /* make the segment garbage */
280 su->su_lastmod = cpu_to_le64(0);
281 su->su_nblocks = cpu_to_le32(0);
282 su->su_flags = cpu_to_le32(1UL << NILFS_SEGMENT_USAGE_DIRTY);
328 kunmap_atomic(kaddr, KM_USER0); 283 kunmap_atomic(kaddr, KM_USER0);
329 nilfs_mdt_mark_buffer_dirty(header_bh); 284
285 nilfs_sufile_mod_counter(header_bh, clean ? (u64)-1 : 0, dirty ? 0 : 1);
286 nilfs_mdt_mark_buffer_dirty(su_bh);
330 nilfs_mdt_mark_dirty(sufile); 287 nilfs_mdt_mark_dirty(sufile);
288}
331 289
332 out_bh: 290void nilfs_sufile_do_free(struct inode *sufile, __u64 segnum,
333 for (i--; i >= 0; i--) 291 struct buffer_head *header_bh,
334 brelse(su_bh[i]); 292 struct buffer_head *su_bh)
335 brelse(header_bh); 293{
294 struct nilfs_segment_usage *su;
295 void *kaddr;
296 int sudirty;
336 297
337 out_su_bh: 298 kaddr = kmap_atomic(su_bh->b_page, KM_USER0);
338 if (su_bh != su_bh_prealloc) 299 su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
339 kfree(su_bh); 300 if (nilfs_segment_usage_clean(su)) {
301 printk(KERN_WARNING "%s: segment %llu is already clean\n",
302 __func__, (unsigned long long)segnum);
303 kunmap_atomic(kaddr, KM_USER0);
304 return;
305 }
306 WARN_ON(nilfs_segment_usage_error(su));
307 WARN_ON(!nilfs_segment_usage_dirty(su));
340 308
341 out_sem: 309 sudirty = nilfs_segment_usage_dirty(su);
342 up_write(&NILFS_MDT(sufile)->mi_sem); 310 nilfs_segment_usage_set_clean(su);
343 return ret; 311 kunmap_atomic(kaddr, KM_USER0);
344} 312 nilfs_mdt_mark_buffer_dirty(su_bh);
345 313
346/** 314 nilfs_sufile_mod_counter(header_bh, 1, sudirty ? (u64)-1 : 0);
347 * nilfs_sufile_free - 315 nilfs_mdt_mark_dirty(sufile);
348 * @sufile:
349 * @segnum:
350 */
351int nilfs_sufile_free(struct inode *sufile, __u64 segnum)
352{
353 return nilfs_sufile_freev(sufile, &segnum, 1);
354} 316}
355 317
356/** 318/**
@@ -500,72 +462,28 @@ int nilfs_sufile_get_ncleansegs(struct inode *sufile, unsigned long *nsegsp)
500 return ret; 462 return ret;
501} 463}
502 464
503/** 465void nilfs_sufile_do_set_error(struct inode *sufile, __u64 segnum,
504 * nilfs_sufile_set_error - mark a segment as erroneous 466 struct buffer_head *header_bh,
505 * @sufile: inode of segment usage file 467 struct buffer_head *su_bh)
506 * @segnum: segment number
507 *
508 * Description: nilfs_sufile_set_error() marks the segment specified by
509 * @segnum as erroneous. The error segment will never be used again.
510 *
511 * Return Value: On success, 0 is returned. On error, one of the following
512 * negative error codes is returned.
513 *
514 * %-EIO - I/O error.
515 *
516 * %-ENOMEM - Insufficient amount of memory available.
517 *
518 * %-EINVAL - Invalid segment usage number.
519 */
520int nilfs_sufile_set_error(struct inode *sufile, __u64 segnum)
521{ 468{
522 struct buffer_head *header_bh, *su_bh;
523 struct nilfs_segment_usage *su; 469 struct nilfs_segment_usage *su;
524 struct nilfs_sufile_header *header;
525 void *kaddr; 470 void *kaddr;
526 int ret; 471 int suclean;
527
528 if (unlikely(segnum >= nilfs_sufile_get_nsegments(sufile))) {
529 printk(KERN_WARNING "%s: invalid segment number: %llu\n",
530 __func__, (unsigned long long)segnum);
531 return -EINVAL;
532 }
533 down_write(&NILFS_MDT(sufile)->mi_sem);
534
535 ret = nilfs_sufile_get_header_block(sufile, &header_bh);
536 if (ret < 0)
537 goto out_sem;
538 ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0, &su_bh);
539 if (ret < 0)
540 goto out_header;
541 472
542 kaddr = kmap_atomic(su_bh->b_page, KM_USER0); 473 kaddr = kmap_atomic(su_bh->b_page, KM_USER0);
543 su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr); 474 su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
544 if (nilfs_segment_usage_error(su)) { 475 if (nilfs_segment_usage_error(su)) {
545 kunmap_atomic(kaddr, KM_USER0); 476 kunmap_atomic(kaddr, KM_USER0);
546 brelse(su_bh); 477 return;
547 goto out_header;
548 } 478 }
549 479 suclean = nilfs_segment_usage_clean(su);
550 nilfs_segment_usage_set_error(su); 480 nilfs_segment_usage_set_error(su);
551 kunmap_atomic(kaddr, KM_USER0); 481 kunmap_atomic(kaddr, KM_USER0);
552 brelse(su_bh);
553 482
554 kaddr = kmap_atomic(header_bh->b_page, KM_USER0); 483 if (suclean)
555 header = nilfs_sufile_block_get_header(sufile, header_bh, kaddr); 484 nilfs_sufile_mod_counter(header_bh, -1, 0);
556 le64_add_cpu(&header->sh_ndirtysegs, -1);
557 kunmap_atomic(kaddr, KM_USER0);
558 nilfs_mdt_mark_buffer_dirty(header_bh);
559 nilfs_mdt_mark_buffer_dirty(su_bh); 485 nilfs_mdt_mark_buffer_dirty(su_bh);
560 nilfs_mdt_mark_dirty(sufile); 486 nilfs_mdt_mark_dirty(sufile);
561 brelse(su_bh);
562
563 out_header:
564 brelse(header_bh);
565
566 out_sem:
567 up_write(&NILFS_MDT(sufile)->mi_sem);
568 return ret;
569} 487}
570 488
571/** 489/**
@@ -625,7 +543,7 @@ ssize_t nilfs_sufile_get_suinfo(struct inode *sufile, __u64 segnum,
625 si[i + j].sui_nblocks = le32_to_cpu(su->su_nblocks); 543 si[i + j].sui_nblocks = le32_to_cpu(su->su_nblocks);
626 si[i + j].sui_flags = le32_to_cpu(su->su_flags) & 544 si[i + j].sui_flags = le32_to_cpu(su->su_flags) &
627 ~(1UL << NILFS_SEGMENT_USAGE_ACTIVE); 545 ~(1UL << NILFS_SEGMENT_USAGE_ACTIVE);
628 if (nilfs_segment_is_active(nilfs, segnum + i + j)) 546 if (nilfs_segment_is_active(nilfs, segnum + j))
629 si[i + j].sui_flags |= 547 si[i + j].sui_flags |=
630 (1UL << NILFS_SEGMENT_USAGE_ACTIVE); 548 (1UL << NILFS_SEGMENT_USAGE_ACTIVE);
631 } 549 }