aboutsummaryrefslogtreecommitdiffstats
path: root/fs/nilfs2/segbuf.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/nilfs2/segbuf.c')
-rw-r--r--fs/nilfs2/segbuf.c185
1 files changed, 131 insertions, 54 deletions
diff --git a/fs/nilfs2/segbuf.c b/fs/nilfs2/segbuf.c
index e6d9e37fa241..645c78656aa0 100644
--- a/fs/nilfs2/segbuf.c
+++ b/fs/nilfs2/segbuf.c
@@ -24,10 +24,22 @@
24#include <linux/buffer_head.h> 24#include <linux/buffer_head.h>
25#include <linux/writeback.h> 25#include <linux/writeback.h>
26#include <linux/crc32.h> 26#include <linux/crc32.h>
27#include <linux/backing-dev.h>
27#include "page.h" 28#include "page.h"
28#include "segbuf.h" 29#include "segbuf.h"
29 30
30 31
32struct nilfs_write_info {
33 struct the_nilfs *nilfs;
34 struct bio *bio;
35 int start, end; /* The region to be submitted */
36 int rest_blocks;
37 int max_pages;
38 int nr_vecs;
39 sector_t blocknr;
40};
41
42
31static struct kmem_cache *nilfs_segbuf_cachep; 43static struct kmem_cache *nilfs_segbuf_cachep;
32 44
33static void nilfs_segbuf_init_once(void *obj) 45static void nilfs_segbuf_init_once(void *obj)
@@ -63,6 +75,11 @@ struct nilfs_segment_buffer *nilfs_segbuf_new(struct super_block *sb)
63 INIT_LIST_HEAD(&segbuf->sb_list); 75 INIT_LIST_HEAD(&segbuf->sb_list);
64 INIT_LIST_HEAD(&segbuf->sb_segsum_buffers); 76 INIT_LIST_HEAD(&segbuf->sb_segsum_buffers);
65 INIT_LIST_HEAD(&segbuf->sb_payload_buffers); 77 INIT_LIST_HEAD(&segbuf->sb_payload_buffers);
78
79 init_completion(&segbuf->sb_bio_event);
80 atomic_set(&segbuf->sb_err, 0);
81 segbuf->sb_nbio = 0;
82
66 return segbuf; 83 return segbuf;
67} 84}
68 85
@@ -83,6 +100,22 @@ void nilfs_segbuf_map(struct nilfs_segment_buffer *segbuf, __u64 segnum,
83 segbuf->sb_fseg_end - segbuf->sb_pseg_start + 1; 100 segbuf->sb_fseg_end - segbuf->sb_pseg_start + 1;
84} 101}
85 102
103/**
104 * nilfs_segbuf_map_cont - map a new log behind a given log
105 * @segbuf: new segment buffer
106 * @prev: segment buffer containing a log to be continued
107 */
108void nilfs_segbuf_map_cont(struct nilfs_segment_buffer *segbuf,
109 struct nilfs_segment_buffer *prev)
110{
111 segbuf->sb_segnum = prev->sb_segnum;
112 segbuf->sb_fseg_start = prev->sb_fseg_start;
113 segbuf->sb_fseg_end = prev->sb_fseg_end;
114 segbuf->sb_pseg_start = prev->sb_pseg_start + prev->sb_sum.nblocks;
115 segbuf->sb_rest_blocks =
116 segbuf->sb_fseg_end - segbuf->sb_pseg_start + 1;
117}
118
86void nilfs_segbuf_set_next_segnum(struct nilfs_segment_buffer *segbuf, 119void nilfs_segbuf_set_next_segnum(struct nilfs_segment_buffer *segbuf,
87 __u64 nextnum, struct the_nilfs *nilfs) 120 __u64 nextnum, struct the_nilfs *nilfs)
88{ 121{
@@ -132,8 +165,6 @@ int nilfs_segbuf_reset(struct nilfs_segment_buffer *segbuf, unsigned flags,
132 segbuf->sb_sum.sumbytes = sizeof(struct nilfs_segment_summary); 165 segbuf->sb_sum.sumbytes = sizeof(struct nilfs_segment_summary);
133 segbuf->sb_sum.nfinfo = segbuf->sb_sum.nfileblk = 0; 166 segbuf->sb_sum.nfinfo = segbuf->sb_sum.nfileblk = 0;
134 segbuf->sb_sum.ctime = ctime; 167 segbuf->sb_sum.ctime = ctime;
135
136 segbuf->sb_io_error = 0;
137 return 0; 168 return 0;
138} 169}
139 170
@@ -219,7 +250,7 @@ void nilfs_segbuf_fill_in_data_crc(struct nilfs_segment_buffer *segbuf,
219 raw_sum->ss_datasum = cpu_to_le32(crc); 250 raw_sum->ss_datasum = cpu_to_le32(crc);
220} 251}
221 252
222void nilfs_release_buffers(struct list_head *list) 253static void nilfs_release_buffers(struct list_head *list)
223{ 254{
224 struct buffer_head *bh, *n; 255 struct buffer_head *bh, *n;
225 256
@@ -241,13 +272,56 @@ void nilfs_release_buffers(struct list_head *list)
241 } 272 }
242} 273}
243 274
275static void nilfs_segbuf_clear(struct nilfs_segment_buffer *segbuf)
276{
277 nilfs_release_buffers(&segbuf->sb_segsum_buffers);
278 nilfs_release_buffers(&segbuf->sb_payload_buffers);
279}
280
281/*
282 * Iterators for segment buffers
283 */
284void nilfs_clear_logs(struct list_head *logs)
285{
286 struct nilfs_segment_buffer *segbuf;
287
288 list_for_each_entry(segbuf, logs, sb_list)
289 nilfs_segbuf_clear(segbuf);
290}
291
292void nilfs_truncate_logs(struct list_head *logs,
293 struct nilfs_segment_buffer *last)
294{
295 struct nilfs_segment_buffer *n, *segbuf;
296
297 segbuf = list_prepare_entry(last, logs, sb_list);
298 list_for_each_entry_safe_continue(segbuf, n, logs, sb_list) {
299 list_del_init(&segbuf->sb_list);
300 nilfs_segbuf_clear(segbuf);
301 nilfs_segbuf_free(segbuf);
302 }
303}
304
305int nilfs_wait_on_logs(struct list_head *logs)
306{
307 struct nilfs_segment_buffer *segbuf;
308 int err;
309
310 list_for_each_entry(segbuf, logs, sb_list) {
311 err = nilfs_segbuf_wait(segbuf);
312 if (err)
313 return err;
314 }
315 return 0;
316}
317
244/* 318/*
245 * BIO operations 319 * BIO operations
246 */ 320 */
247static void nilfs_end_bio_write(struct bio *bio, int err) 321static void nilfs_end_bio_write(struct bio *bio, int err)
248{ 322{
249 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); 323 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
250 struct nilfs_write_info *wi = bio->bi_private; 324 struct nilfs_segment_buffer *segbuf = bio->bi_private;
251 325
252 if (err == -EOPNOTSUPP) { 326 if (err == -EOPNOTSUPP) {
253 set_bit(BIO_EOPNOTSUPP, &bio->bi_flags); 327 set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
@@ -256,21 +330,22 @@ static void nilfs_end_bio_write(struct bio *bio, int err)
256 } 330 }
257 331
258 if (!uptodate) 332 if (!uptodate)
259 atomic_inc(&wi->err); 333 atomic_inc(&segbuf->sb_err);
260 334
261 bio_put(bio); 335 bio_put(bio);
262 complete(&wi->bio_event); 336 complete(&segbuf->sb_bio_event);
263} 337}
264 338
265static int nilfs_submit_seg_bio(struct nilfs_write_info *wi, int mode) 339static int nilfs_segbuf_submit_bio(struct nilfs_segment_buffer *segbuf,
340 struct nilfs_write_info *wi, int mode)
266{ 341{
267 struct bio *bio = wi->bio; 342 struct bio *bio = wi->bio;
268 int err; 343 int err;
269 344
270 if (wi->nbio > 0 && bdi_write_congested(wi->bdi)) { 345 if (segbuf->sb_nbio > 0 && bdi_write_congested(wi->nilfs->ns_bdi)) {
271 wait_for_completion(&wi->bio_event); 346 wait_for_completion(&segbuf->sb_bio_event);
272 wi->nbio--; 347 segbuf->sb_nbio--;
273 if (unlikely(atomic_read(&wi->err))) { 348 if (unlikely(atomic_read(&segbuf->sb_err))) {
274 bio_put(bio); 349 bio_put(bio);
275 err = -EIO; 350 err = -EIO;
276 goto failed; 351 goto failed;
@@ -278,7 +353,7 @@ static int nilfs_submit_seg_bio(struct nilfs_write_info *wi, int mode)
278 } 353 }
279 354
280 bio->bi_end_io = nilfs_end_bio_write; 355 bio->bi_end_io = nilfs_end_bio_write;
281 bio->bi_private = wi; 356 bio->bi_private = segbuf;
282 bio_get(bio); 357 bio_get(bio);
283 submit_bio(mode, bio); 358 submit_bio(mode, bio);
284 if (bio_flagged(bio, BIO_EOPNOTSUPP)) { 359 if (bio_flagged(bio, BIO_EOPNOTSUPP)) {
@@ -286,7 +361,7 @@ static int nilfs_submit_seg_bio(struct nilfs_write_info *wi, int mode)
286 err = -EOPNOTSUPP; 361 err = -EOPNOTSUPP;
287 goto failed; 362 goto failed;
288 } 363 }
289 wi->nbio++; 364 segbuf->sb_nbio++;
290 bio_put(bio); 365 bio_put(bio);
291 366
292 wi->bio = NULL; 367 wi->bio = NULL;
@@ -301,17 +376,15 @@ static int nilfs_submit_seg_bio(struct nilfs_write_info *wi, int mode)
301} 376}
302 377
303/** 378/**
304 * nilfs_alloc_seg_bio - allocate a bio for writing segment. 379 * nilfs_alloc_seg_bio - allocate a new bio for writing log
305 * @sb: super block 380 * @nilfs: nilfs object
306 * @start: beginning disk block number of this BIO. 381 * @start: start block number of the bio
307 * @nr_vecs: request size of page vector. 382 * @nr_vecs: request size of page vector.
308 * 383 *
309 * alloc_seg_bio() allocates a new BIO structure and initialize it.
310 *
311 * Return Value: On success, pointer to the struct bio is returned. 384 * Return Value: On success, pointer to the struct bio is returned.
312 * On error, NULL is returned. 385 * On error, NULL is returned.
313 */ 386 */
314static struct bio *nilfs_alloc_seg_bio(struct super_block *sb, sector_t start, 387static struct bio *nilfs_alloc_seg_bio(struct the_nilfs *nilfs, sector_t start,
315 int nr_vecs) 388 int nr_vecs)
316{ 389{
317 struct bio *bio; 390 struct bio *bio;
@@ -322,36 +395,33 @@ static struct bio *nilfs_alloc_seg_bio(struct super_block *sb, sector_t start,
322 bio = bio_alloc(GFP_NOIO, nr_vecs); 395 bio = bio_alloc(GFP_NOIO, nr_vecs);
323 } 396 }
324 if (likely(bio)) { 397 if (likely(bio)) {
325 bio->bi_bdev = sb->s_bdev; 398 bio->bi_bdev = nilfs->ns_bdev;
326 bio->bi_sector = (sector_t)start << (sb->s_blocksize_bits - 9); 399 bio->bi_sector = start << (nilfs->ns_blocksize_bits - 9);
327 } 400 }
328 return bio; 401 return bio;
329} 402}
330 403
331void nilfs_segbuf_prepare_write(struct nilfs_segment_buffer *segbuf, 404static void nilfs_segbuf_prepare_write(struct nilfs_segment_buffer *segbuf,
332 struct nilfs_write_info *wi) 405 struct nilfs_write_info *wi)
333{ 406{
334 wi->bio = NULL; 407 wi->bio = NULL;
335 wi->rest_blocks = segbuf->sb_sum.nblocks; 408 wi->rest_blocks = segbuf->sb_sum.nblocks;
336 wi->max_pages = bio_get_nr_vecs(wi->sb->s_bdev); 409 wi->max_pages = bio_get_nr_vecs(wi->nilfs->ns_bdev);
337 wi->nr_vecs = min(wi->max_pages, wi->rest_blocks); 410 wi->nr_vecs = min(wi->max_pages, wi->rest_blocks);
338 wi->start = wi->end = 0; 411 wi->start = wi->end = 0;
339 wi->nbio = 0;
340 wi->blocknr = segbuf->sb_pseg_start; 412 wi->blocknr = segbuf->sb_pseg_start;
341
342 atomic_set(&wi->err, 0);
343 init_completion(&wi->bio_event);
344} 413}
345 414
346static int nilfs_submit_bh(struct nilfs_write_info *wi, struct buffer_head *bh, 415static int nilfs_segbuf_submit_bh(struct nilfs_segment_buffer *segbuf,
347 int mode) 416 struct nilfs_write_info *wi,
417 struct buffer_head *bh, int mode)
348{ 418{
349 int len, err; 419 int len, err;
350 420
351 BUG_ON(wi->nr_vecs <= 0); 421 BUG_ON(wi->nr_vecs <= 0);
352 repeat: 422 repeat:
353 if (!wi->bio) { 423 if (!wi->bio) {
354 wi->bio = nilfs_alloc_seg_bio(wi->sb, wi->blocknr + wi->end, 424 wi->bio = nilfs_alloc_seg_bio(wi->nilfs, wi->blocknr + wi->end,
355 wi->nr_vecs); 425 wi->nr_vecs);
356 if (unlikely(!wi->bio)) 426 if (unlikely(!wi->bio))
357 return -ENOMEM; 427 return -ENOMEM;
@@ -363,76 +433,83 @@ static int nilfs_submit_bh(struct nilfs_write_info *wi, struct buffer_head *bh,
363 return 0; 433 return 0;
364 } 434 }
365 /* bio is FULL */ 435 /* bio is FULL */
366 err = nilfs_submit_seg_bio(wi, mode); 436 err = nilfs_segbuf_submit_bio(segbuf, wi, mode);
367 /* never submit current bh */ 437 /* never submit current bh */
368 if (likely(!err)) 438 if (likely(!err))
369 goto repeat; 439 goto repeat;
370 return err; 440 return err;
371} 441}
372 442
443/**
444 * nilfs_segbuf_write - submit write requests of a log
445 * @segbuf: buffer storing a log to be written
446 * @nilfs: nilfs object
447 *
448 * Return Value: On Success, 0 is returned. On Error, one of the following
449 * negative error code is returned.
450 *
451 * %-EIO - I/O error
452 *
453 * %-ENOMEM - Insufficient memory available.
454 */
373int nilfs_segbuf_write(struct nilfs_segment_buffer *segbuf, 455int nilfs_segbuf_write(struct nilfs_segment_buffer *segbuf,
374 struct nilfs_write_info *wi) 456 struct the_nilfs *nilfs)
375{ 457{
458 struct nilfs_write_info wi;
376 struct buffer_head *bh; 459 struct buffer_head *bh;
377 int res, rw = WRITE; 460 int res = 0, rw = WRITE;
461
462 wi.nilfs = nilfs;
463 nilfs_segbuf_prepare_write(segbuf, &wi);
378 464
379 list_for_each_entry(bh, &segbuf->sb_segsum_buffers, b_assoc_buffers) { 465 list_for_each_entry(bh, &segbuf->sb_segsum_buffers, b_assoc_buffers) {
380 res = nilfs_submit_bh(wi, bh, rw); 466 res = nilfs_segbuf_submit_bh(segbuf, &wi, bh, rw);
381 if (unlikely(res)) 467 if (unlikely(res))
382 goto failed_bio; 468 goto failed_bio;
383 } 469 }
384 470
385 list_for_each_entry(bh, &segbuf->sb_payload_buffers, b_assoc_buffers) { 471 list_for_each_entry(bh, &segbuf->sb_payload_buffers, b_assoc_buffers) {
386 res = nilfs_submit_bh(wi, bh, rw); 472 res = nilfs_segbuf_submit_bh(segbuf, &wi, bh, rw);
387 if (unlikely(res)) 473 if (unlikely(res))
388 goto failed_bio; 474 goto failed_bio;
389 } 475 }
390 476
391 if (wi->bio) { 477 if (wi.bio) {
392 /* 478 /*
393 * Last BIO is always sent through the following 479 * Last BIO is always sent through the following
394 * submission. 480 * submission.
395 */ 481 */
396 rw |= (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG); 482 rw |= (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG);
397 res = nilfs_submit_seg_bio(wi, rw); 483 res = nilfs_segbuf_submit_bio(segbuf, &wi, rw);
398 if (unlikely(res))
399 goto failed_bio;
400 } 484 }
401 485
402 res = 0;
403 out:
404 return res;
405
406 failed_bio: 486 failed_bio:
407 atomic_inc(&wi->err); 487 return res;
408 goto out;
409} 488}
410 489
411/** 490/**
412 * nilfs_segbuf_wait - wait for completion of requested BIOs 491 * nilfs_segbuf_wait - wait for completion of requested BIOs
413 * @wi: nilfs_write_info 492 * @segbuf: segment buffer
414 * 493 *
415 * Return Value: On Success, 0 is returned. On Error, one of the following 494 * Return Value: On Success, 0 is returned. On Error, one of the following
416 * negative error code is returned. 495 * negative error code is returned.
417 * 496 *
418 * %-EIO - I/O error 497 * %-EIO - I/O error
419 */ 498 */
420int nilfs_segbuf_wait(struct nilfs_segment_buffer *segbuf, 499int nilfs_segbuf_wait(struct nilfs_segment_buffer *segbuf)
421 struct nilfs_write_info *wi)
422{ 500{
423 int err = 0; 501 int err = 0;
424 502
425 if (!wi->nbio) 503 if (!segbuf->sb_nbio)
426 return 0; 504 return 0;
427 505
428 do { 506 do {
429 wait_for_completion(&wi->bio_event); 507 wait_for_completion(&segbuf->sb_bio_event);
430 } while (--wi->nbio > 0); 508 } while (--segbuf->sb_nbio > 0);
431 509
432 if (unlikely(atomic_read(&wi->err) > 0)) { 510 if (unlikely(atomic_read(&segbuf->sb_err) > 0)) {
433 printk(KERN_ERR "NILFS: IO error writing segment\n"); 511 printk(KERN_ERR "NILFS: IO error writing segment\n");
434 err = -EIO; 512 err = -EIO;
435 segbuf->sb_io_error = 1;
436 } 513 }
437 return err; 514 return err;
438} 515}