aboutsummaryrefslogtreecommitdiffstats
path: root/fs/nilfs2/segment.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/nilfs2/segment.c')
-rw-r--r--fs/nilfs2/segment.c146
1 files changed, 77 insertions, 69 deletions
diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
index 17584c524486..6a7dbd8451db 100644
--- a/fs/nilfs2/segment.c
+++ b/fs/nilfs2/segment.c
@@ -32,6 +32,7 @@
32#include <linux/kthread.h> 32#include <linux/kthread.h>
33#include <linux/crc32.h> 33#include <linux/crc32.h>
34#include <linux/pagevec.h> 34#include <linux/pagevec.h>
35#include <linux/slab.h>
35#include "nilfs.h" 36#include "nilfs.h"
36#include "btnode.h" 37#include "btnode.h"
37#include "page.h" 38#include "page.h"
@@ -141,7 +142,7 @@ int nilfs_init_transaction_cache(void)
141} 142}
142 143
143/** 144/**
144 * nilfs_detroy_transaction_cache - destroy the cache for transaction info 145 * nilfs_destroy_transaction_cache - destroy the cache for transaction info
145 * 146 *
146 * nilfs_destroy_transaction_cache() frees the slab cache for the struct 147 * nilfs_destroy_transaction_cache() frees the slab cache for the struct
147 * nilfs_transaction_info. 148 * nilfs_transaction_info.
@@ -201,7 +202,7 @@ static int nilfs_prepare_segment_lock(struct nilfs_transaction_info *ti)
201 * This function allocates a nilfs_transaction_info struct to keep context 202 * This function allocates a nilfs_transaction_info struct to keep context
202 * information on it. It is initialized and hooked onto the current task in 203 * information on it. It is initialized and hooked onto the current task in
203 * the outermost call. If a pre-allocated struct is given to @ti, it is used 204 * the outermost call. If a pre-allocated struct is given to @ti, it is used
204 * instead; othewise a new struct is assigned from a slab. 205 * instead; otherwise a new struct is assigned from a slab.
205 * 206 *
206 * When @vacancy_check flag is set, this function will check the amount of 207 * When @vacancy_check flag is set, this function will check the amount of
207 * free space, and will wait for the GC to reclaim disk space if low capacity. 208 * free space, and will wait for the GC to reclaim disk space if low capacity.
@@ -1510,6 +1511,12 @@ static int nilfs_segctor_collect(struct nilfs_sc_info *sci,
1510 if (mode != SC_LSEG_SR || sci->sc_stage.scnt < NILFS_ST_CPFILE) 1511 if (mode != SC_LSEG_SR || sci->sc_stage.scnt < NILFS_ST_CPFILE)
1511 break; 1512 break;
1512 1513
1514 nilfs_clear_logs(&sci->sc_segbufs);
1515
1516 err = nilfs_segctor_extend_segments(sci, nilfs, nadd);
1517 if (unlikely(err))
1518 return err;
1519
1513 if (sci->sc_stage.flags & NILFS_CF_SUFREED) { 1520 if (sci->sc_stage.flags & NILFS_CF_SUFREED) {
1514 err = nilfs_sufile_cancel_freev(nilfs->ns_sufile, 1521 err = nilfs_sufile_cancel_freev(nilfs->ns_sufile,
1515 sci->sc_freesegs, 1522 sci->sc_freesegs,
@@ -1517,12 +1524,6 @@ static int nilfs_segctor_collect(struct nilfs_sc_info *sci,
1517 NULL); 1524 NULL);
1518 WARN_ON(err); /* do not happen */ 1525 WARN_ON(err); /* do not happen */
1519 } 1526 }
1520 nilfs_clear_logs(&sci->sc_segbufs);
1521
1522 err = nilfs_segctor_extend_segments(sci, nilfs, nadd);
1523 if (unlikely(err))
1524 return err;
1525
1526 nadd = min_t(int, nadd << 1, SC_MAX_SEGDELTA); 1527 nadd = min_t(int, nadd << 1, SC_MAX_SEGDELTA);
1527 sci->sc_stage = prev_stage; 1528 sci->sc_stage = prev_stage;
1528 } 1529 }
@@ -1764,14 +1765,9 @@ static int nilfs_segctor_prepare_write(struct nilfs_sc_info *sci,
1764static int nilfs_segctor_write(struct nilfs_sc_info *sci, 1765static int nilfs_segctor_write(struct nilfs_sc_info *sci,
1765 struct the_nilfs *nilfs) 1766 struct the_nilfs *nilfs)
1766{ 1767{
1767 struct nilfs_segment_buffer *segbuf; 1768 int ret;
1768 int ret = 0;
1769 1769
1770 list_for_each_entry(segbuf, &sci->sc_segbufs, sb_list) { 1770 ret = nilfs_write_logs(&sci->sc_segbufs, nilfs);
1771 ret = nilfs_segbuf_write(segbuf, nilfs);
1772 if (ret)
1773 break;
1774 }
1775 list_splice_tail_init(&sci->sc_segbufs, &sci->sc_write_logs); 1771 list_splice_tail_init(&sci->sc_segbufs, &sci->sc_write_logs);
1776 return ret; 1772 return ret;
1777} 1773}
@@ -1902,8 +1898,7 @@ static void nilfs_segctor_abort_construction(struct nilfs_sc_info *sci,
1902 1898
1903 list_splice_tail_init(&sci->sc_write_logs, &logs); 1899 list_splice_tail_init(&sci->sc_write_logs, &logs);
1904 ret = nilfs_wait_on_logs(&logs); 1900 ret = nilfs_wait_on_logs(&logs);
1905 if (ret) 1901 nilfs_abort_logs(&logs, NULL, sci->sc_super_root, ret ? : err);
1906 nilfs_abort_logs(&logs, NULL, sci->sc_super_root, ret);
1907 1902
1908 list_splice_tail_init(&sci->sc_segbufs, &logs); 1903 list_splice_tail_init(&sci->sc_segbufs, &logs);
1909 nilfs_cancel_segusage(&logs, nilfs->ns_sufile); 1904 nilfs_cancel_segusage(&logs, nilfs->ns_sufile);
@@ -1937,8 +1932,7 @@ static void nilfs_segctor_complete_write(struct nilfs_sc_info *sci)
1937{ 1932{
1938 struct nilfs_segment_buffer *segbuf; 1933 struct nilfs_segment_buffer *segbuf;
1939 struct page *bd_page = NULL, *fs_page = NULL; 1934 struct page *bd_page = NULL, *fs_page = NULL;
1940 struct nilfs_sb_info *sbi = sci->sc_sbi; 1935 struct the_nilfs *nilfs = sci->sc_sbi->s_nilfs;
1941 struct the_nilfs *nilfs = sbi->s_nilfs;
1942 int update_sr = (sci->sc_super_root != NULL); 1936 int update_sr = (sci->sc_super_root != NULL);
1943 1937
1944 list_for_each_entry(segbuf, &sci->sc_write_logs, sb_list) { 1938 list_for_each_entry(segbuf, &sci->sc_write_logs, sb_list) {
@@ -2020,7 +2014,7 @@ static void nilfs_segctor_complete_write(struct nilfs_sc_info *sci)
2020 if (update_sr) { 2014 if (update_sr) {
2021 nilfs_set_last_segment(nilfs, segbuf->sb_pseg_start, 2015 nilfs_set_last_segment(nilfs, segbuf->sb_pseg_start,
2022 segbuf->sb_sum.seg_seq, nilfs->ns_cno++); 2016 segbuf->sb_sum.seg_seq, nilfs->ns_cno++);
2023 sbi->s_super->s_dirt = 1; 2017 set_nilfs_sb_dirty(nilfs);
2024 2018
2025 clear_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags); 2019 clear_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags);
2026 clear_bit(NILFS_SC_DIRTY, &sci->sc_flags); 2020 clear_bit(NILFS_SC_DIRTY, &sci->sc_flags);
@@ -2220,7 +2214,7 @@ static int nilfs_segctor_do_construct(struct nilfs_sc_info *sci, int mode)
2220} 2214}
2221 2215
2222/** 2216/**
2223 * nilfs_secgtor_start_timer - set timer of background write 2217 * nilfs_segctor_start_timer - set timer of background write
2224 * @sci: nilfs_sc_info 2218 * @sci: nilfs_sc_info
2225 * 2219 *
2226 * If the timer has already been set, it ignores the new request. 2220 * If the timer has already been set, it ignores the new request.
@@ -2425,43 +2419,43 @@ int nilfs_construct_dsync_segment(struct super_block *sb, struct inode *inode,
2425 return err; 2419 return err;
2426} 2420}
2427 2421
2428struct nilfs_segctor_req {
2429 int mode;
2430 __u32 seq_accepted;
2431 int sc_err; /* construction failure */
2432 int sb_err; /* super block writeback failure */
2433};
2434
2435#define FLUSH_FILE_BIT (0x1) /* data file only */ 2422#define FLUSH_FILE_BIT (0x1) /* data file only */
2436#define FLUSH_DAT_BIT (1 << NILFS_DAT_INO) /* DAT only */ 2423#define FLUSH_DAT_BIT (1 << NILFS_DAT_INO) /* DAT only */
2437 2424
2438static void nilfs_segctor_accept(struct nilfs_sc_info *sci, 2425/**
2439 struct nilfs_segctor_req *req) 2426 * nilfs_segctor_accept - record accepted sequence count of log-write requests
2427 * @sci: segment constructor object
2428 */
2429static void nilfs_segctor_accept(struct nilfs_sc_info *sci)
2440{ 2430{
2441 req->sc_err = req->sb_err = 0;
2442 spin_lock(&sci->sc_state_lock); 2431 spin_lock(&sci->sc_state_lock);
2443 req->seq_accepted = sci->sc_seq_request; 2432 sci->sc_seq_accepted = sci->sc_seq_request;
2444 spin_unlock(&sci->sc_state_lock); 2433 spin_unlock(&sci->sc_state_lock);
2445 2434
2446 if (sci->sc_timer) 2435 if (sci->sc_timer)
2447 del_timer_sync(sci->sc_timer); 2436 del_timer_sync(sci->sc_timer);
2448} 2437}
2449 2438
2450static void nilfs_segctor_notify(struct nilfs_sc_info *sci, 2439/**
2451 struct nilfs_segctor_req *req) 2440 * nilfs_segctor_notify - notify the result of request to caller threads
2441 * @sci: segment constructor object
2442 * @mode: mode of log forming
2443 * @err: error code to be notified
2444 */
2445static void nilfs_segctor_notify(struct nilfs_sc_info *sci, int mode, int err)
2452{ 2446{
2453 /* Clear requests (even when the construction failed) */ 2447 /* Clear requests (even when the construction failed) */
2454 spin_lock(&sci->sc_state_lock); 2448 spin_lock(&sci->sc_state_lock);
2455 2449
2456 if (req->mode == SC_LSEG_SR) { 2450 if (mode == SC_LSEG_SR) {
2457 sci->sc_state &= ~NILFS_SEGCTOR_COMMIT; 2451 sci->sc_state &= ~NILFS_SEGCTOR_COMMIT;
2458 sci->sc_seq_done = req->seq_accepted; 2452 sci->sc_seq_done = sci->sc_seq_accepted;
2459 nilfs_segctor_wakeup(sci, req->sc_err ? : req->sb_err); 2453 nilfs_segctor_wakeup(sci, err);
2460 sci->sc_flush_request = 0; 2454 sci->sc_flush_request = 0;
2461 } else { 2455 } else {
2462 if (req->mode == SC_FLUSH_FILE) 2456 if (mode == SC_FLUSH_FILE)
2463 sci->sc_flush_request &= ~FLUSH_FILE_BIT; 2457 sci->sc_flush_request &= ~FLUSH_FILE_BIT;
2464 else if (req->mode == SC_FLUSH_DAT) 2458 else if (mode == SC_FLUSH_DAT)
2465 sci->sc_flush_request &= ~FLUSH_DAT_BIT; 2459 sci->sc_flush_request &= ~FLUSH_DAT_BIT;
2466 2460
2467 /* re-enable timer if checkpoint creation was not done */ 2461 /* re-enable timer if checkpoint creation was not done */
@@ -2472,30 +2466,37 @@ static void nilfs_segctor_notify(struct nilfs_sc_info *sci,
2472 spin_unlock(&sci->sc_state_lock); 2466 spin_unlock(&sci->sc_state_lock);
2473} 2467}
2474 2468
2475static int nilfs_segctor_construct(struct nilfs_sc_info *sci, 2469/**
2476 struct nilfs_segctor_req *req) 2470 * nilfs_segctor_construct - form logs and write them to disk
2471 * @sci: segment constructor object
2472 * @mode: mode of log forming
2473 */
2474static int nilfs_segctor_construct(struct nilfs_sc_info *sci, int mode)
2477{ 2475{
2478 struct nilfs_sb_info *sbi = sci->sc_sbi; 2476 struct nilfs_sb_info *sbi = sci->sc_sbi;
2479 struct the_nilfs *nilfs = sbi->s_nilfs; 2477 struct the_nilfs *nilfs = sbi->s_nilfs;
2480 int err = 0; 2478 int err = 0;
2481 2479
2480 nilfs_segctor_accept(sci);
2481
2482 if (nilfs_discontinued(nilfs)) 2482 if (nilfs_discontinued(nilfs))
2483 req->mode = SC_LSEG_SR; 2483 mode = SC_LSEG_SR;
2484 if (!nilfs_segctor_confirm(sci)) { 2484 if (!nilfs_segctor_confirm(sci))
2485 err = nilfs_segctor_do_construct(sci, req->mode); 2485 err = nilfs_segctor_do_construct(sci, mode);
2486 req->sc_err = err; 2486
2487 }
2488 if (likely(!err)) { 2487 if (likely(!err)) {
2489 if (req->mode != SC_FLUSH_DAT) 2488 if (mode != SC_FLUSH_DAT)
2490 atomic_set(&nilfs->ns_ndirtyblks, 0); 2489 atomic_set(&nilfs->ns_ndirtyblks, 0);
2491 if (test_bit(NILFS_SC_SUPER_ROOT, &sci->sc_flags) && 2490 if (test_bit(NILFS_SC_SUPER_ROOT, &sci->sc_flags) &&
2492 nilfs_discontinued(nilfs)) { 2491 nilfs_discontinued(nilfs)) {
2493 down_write(&nilfs->ns_sem); 2492 down_write(&nilfs->ns_sem);
2494 req->sb_err = nilfs_commit_super(sbi, 2493 err = nilfs_commit_super(
2495 nilfs_altsb_need_update(nilfs)); 2494 sbi, nilfs_altsb_need_update(nilfs));
2496 up_write(&nilfs->ns_sem); 2495 up_write(&nilfs->ns_sem);
2497 } 2496 }
2498 } 2497 }
2498
2499 nilfs_segctor_notify(sci, mode, err);
2499 return err; 2500 return err;
2500} 2501}
2501 2502
@@ -2526,7 +2527,6 @@ int nilfs_clean_segments(struct super_block *sb, struct nilfs_argv *argv,
2526 struct nilfs_sc_info *sci = NILFS_SC(sbi); 2527 struct nilfs_sc_info *sci = NILFS_SC(sbi);
2527 struct the_nilfs *nilfs = sbi->s_nilfs; 2528 struct the_nilfs *nilfs = sbi->s_nilfs;
2528 struct nilfs_transaction_info ti; 2529 struct nilfs_transaction_info ti;
2529 struct nilfs_segctor_req req = { .mode = SC_LSEG_SR };
2530 int err; 2530 int err;
2531 2531
2532 if (unlikely(!sci)) 2532 if (unlikely(!sci))
@@ -2547,10 +2547,8 @@ int nilfs_clean_segments(struct super_block *sb, struct nilfs_argv *argv,
2547 list_splice_tail_init(&nilfs->ns_gc_inodes, &sci->sc_gc_inodes); 2547 list_splice_tail_init(&nilfs->ns_gc_inodes, &sci->sc_gc_inodes);
2548 2548
2549 for (;;) { 2549 for (;;) {
2550 nilfs_segctor_accept(sci, &req); 2550 err = nilfs_segctor_construct(sci, SC_LSEG_SR);
2551 err = nilfs_segctor_construct(sci, &req);
2552 nilfs_remove_written_gcinodes(nilfs, &sci->sc_gc_inodes); 2551 nilfs_remove_written_gcinodes(nilfs, &sci->sc_gc_inodes);
2553 nilfs_segctor_notify(sci, &req);
2554 2552
2555 if (likely(!err)) 2553 if (likely(!err))
2556 break; 2554 break;
@@ -2560,6 +2558,16 @@ int nilfs_clean_segments(struct super_block *sb, struct nilfs_argv *argv,
2560 set_current_state(TASK_INTERRUPTIBLE); 2558 set_current_state(TASK_INTERRUPTIBLE);
2561 schedule_timeout(sci->sc_interval); 2559 schedule_timeout(sci->sc_interval);
2562 } 2560 }
2561 if (nilfs_test_opt(sbi, DISCARD)) {
2562 int ret = nilfs_discard_segments(nilfs, sci->sc_freesegs,
2563 sci->sc_nfreesegs);
2564 if (ret) {
2565 printk(KERN_WARNING
2566 "NILFS warning: error %d on discard request, "
2567 "turning discards off for the device\n", ret);
2568 nilfs_clear_opt(sbi, DISCARD);
2569 }
2570 }
2563 2571
2564 out_unlock: 2572 out_unlock:
2565 sci->sc_freesegs = NULL; 2573 sci->sc_freesegs = NULL;
@@ -2573,13 +2581,9 @@ static void nilfs_segctor_thread_construct(struct nilfs_sc_info *sci, int mode)
2573{ 2581{
2574 struct nilfs_sb_info *sbi = sci->sc_sbi; 2582 struct nilfs_sb_info *sbi = sci->sc_sbi;
2575 struct nilfs_transaction_info ti; 2583 struct nilfs_transaction_info ti;
2576 struct nilfs_segctor_req req = { .mode = mode };
2577 2584
2578 nilfs_transaction_lock(sbi, &ti, 0); 2585 nilfs_transaction_lock(sbi, &ti, 0);
2579 2586 nilfs_segctor_construct(sci, mode);
2580 nilfs_segctor_accept(sci, &req);
2581 nilfs_segctor_construct(sci, &req);
2582 nilfs_segctor_notify(sci, &req);
2583 2587
2584 /* 2588 /*
2585 * Unclosed segment should be retried. We do this using sc_timer. 2589 * Unclosed segment should be retried. We do this using sc_timer.
@@ -2635,6 +2639,7 @@ static int nilfs_segctor_flush_mode(struct nilfs_sc_info *sci)
2635static int nilfs_segctor_thread(void *arg) 2639static int nilfs_segctor_thread(void *arg)
2636{ 2640{
2637 struct nilfs_sc_info *sci = (struct nilfs_sc_info *)arg; 2641 struct nilfs_sc_info *sci = (struct nilfs_sc_info *)arg;
2642 struct the_nilfs *nilfs = sci->sc_sbi->s_nilfs;
2638 struct timer_list timer; 2643 struct timer_list timer;
2639 int timeout = 0; 2644 int timeout = 0;
2640 2645
@@ -2680,7 +2685,6 @@ static int nilfs_segctor_thread(void *arg)
2680 } else { 2685 } else {
2681 DEFINE_WAIT(wait); 2686 DEFINE_WAIT(wait);
2682 int should_sleep = 1; 2687 int should_sleep = 1;
2683 struct the_nilfs *nilfs;
2684 2688
2685 prepare_to_wait(&sci->sc_wait_daemon, &wait, 2689 prepare_to_wait(&sci->sc_wait_daemon, &wait,
2686 TASK_INTERRUPTIBLE); 2690 TASK_INTERRUPTIBLE);
@@ -2701,8 +2705,8 @@ static int nilfs_segctor_thread(void *arg)
2701 finish_wait(&sci->sc_wait_daemon, &wait); 2705 finish_wait(&sci->sc_wait_daemon, &wait);
2702 timeout = ((sci->sc_state & NILFS_SEGCTOR_COMMIT) && 2706 timeout = ((sci->sc_state & NILFS_SEGCTOR_COMMIT) &&
2703 time_after_eq(jiffies, sci->sc_timer->expires)); 2707 time_after_eq(jiffies, sci->sc_timer->expires));
2704 nilfs = sci->sc_sbi->s_nilfs; 2708
2705 if (sci->sc_super->s_dirt && nilfs_sb_need_update(nilfs)) 2709 if (nilfs_sb_dirty(nilfs) && nilfs_sb_need_update(nilfs))
2706 set_nilfs_discontinued(nilfs); 2710 set_nilfs_discontinued(nilfs);
2707 } 2711 }
2708 goto loop; 2712 goto loop;
@@ -2797,12 +2801,9 @@ static void nilfs_segctor_write_out(struct nilfs_sc_info *sci)
2797 do { 2801 do {
2798 struct nilfs_sb_info *sbi = sci->sc_sbi; 2802 struct nilfs_sb_info *sbi = sci->sc_sbi;
2799 struct nilfs_transaction_info ti; 2803 struct nilfs_transaction_info ti;
2800 struct nilfs_segctor_req req = { .mode = SC_LSEG_SR };
2801 2804
2802 nilfs_transaction_lock(sbi, &ti, 0); 2805 nilfs_transaction_lock(sbi, &ti, 0);
2803 nilfs_segctor_accept(sci, &req); 2806 ret = nilfs_segctor_construct(sci, SC_LSEG_SR);
2804 ret = nilfs_segctor_construct(sci, &req);
2805 nilfs_segctor_notify(sci, &req);
2806 nilfs_transaction_unlock(sbi); 2807 nilfs_transaction_unlock(sbi);
2807 2808
2808 } while (ret && retrycount-- > 0); 2809 } while (ret && retrycount-- > 0);
@@ -2829,7 +2830,7 @@ static void nilfs_segctor_destroy(struct nilfs_sc_info *sci)
2829 || sci->sc_seq_request != sci->sc_seq_done); 2830 || sci->sc_seq_request != sci->sc_seq_done);
2830 spin_unlock(&sci->sc_state_lock); 2831 spin_unlock(&sci->sc_state_lock);
2831 2832
2832 if (flag || nilfs_segctor_confirm(sci)) 2833 if (flag || !nilfs_segctor_confirm(sci))
2833 nilfs_segctor_write_out(sci); 2834 nilfs_segctor_write_out(sci);
2834 2835
2835 WARN_ON(!list_empty(&sci->sc_copied_buffers)); 2836 WARN_ON(!list_empty(&sci->sc_copied_buffers));
@@ -2853,7 +2854,7 @@ static void nilfs_segctor_destroy(struct nilfs_sc_info *sci)
2853 * @sbi: nilfs_sb_info 2854 * @sbi: nilfs_sb_info
2854 * 2855 *
2855 * nilfs_attach_segment_constructor() allocates a struct nilfs_sc_info, 2856 * nilfs_attach_segment_constructor() allocates a struct nilfs_sc_info,
2856 * initilizes it, and starts the segment constructor. 2857 * initializes it, and starts the segment constructor.
2857 * 2858 *
2858 * Return Value: On success, 0 is returned. On error, one of the following 2859 * Return Value: On success, 0 is returned. On error, one of the following
2859 * negative error code is returned. 2860 * negative error code is returned.
@@ -2865,8 +2866,15 @@ int nilfs_attach_segment_constructor(struct nilfs_sb_info *sbi)
2865 struct the_nilfs *nilfs = sbi->s_nilfs; 2866 struct the_nilfs *nilfs = sbi->s_nilfs;
2866 int err; 2867 int err;
2867 2868
2868 /* Each field of nilfs_segctor is cleared through the initialization 2869 if (NILFS_SC(sbi)) {
2869 of super-block info */ 2870 /*
2871 * This happens if the filesystem was remounted
2872 * read/write after nilfs_error degenerated it into a
2873 * read-only mount.
2874 */
2875 nilfs_detach_segment_constructor(sbi);
2876 }
2877
2870 sbi->s_sc_info = nilfs_segctor_new(sbi); 2878 sbi->s_sc_info = nilfs_segctor_new(sbi);
2871 if (!sbi->s_sc_info) 2879 if (!sbi->s_sc_info)
2872 return -ENOMEM; 2880 return -ENOMEM;