aboutsummaryrefslogtreecommitdiffstats
path: root/fs/logfs
diff options
context:
space:
mode:
Diffstat (limited to 'fs/logfs')
-rw-r--r--fs/logfs/dev_bdev.c10
-rw-r--r--fs/logfs/dir.c6
-rw-r--r--fs/logfs/gc.c1
-rw-r--r--fs/logfs/inode.c1
-rw-r--r--fs/logfs/journal.c8
-rw-r--r--fs/logfs/logfs.h1
-rw-r--r--fs/logfs/readwrite.c14
-rw-r--r--fs/logfs/segment.c55
-rw-r--r--fs/logfs/super.c16
9 files changed, 75 insertions, 37 deletions
diff --git a/fs/logfs/dev_bdev.c b/fs/logfs/dev_bdev.c
index 9718c22f186d..243c00071f76 100644
--- a/fs/logfs/dev_bdev.c
+++ b/fs/logfs/dev_bdev.c
@@ -9,6 +9,7 @@
9#include <linux/bio.h> 9#include <linux/bio.h>
10#include <linux/blkdev.h> 10#include <linux/blkdev.h>
11#include <linux/buffer_head.h> 11#include <linux/buffer_head.h>
12#include <linux/gfp.h>
12 13
13#define PAGE_OFS(ofs) ((ofs) & (PAGE_SIZE-1)) 14#define PAGE_OFS(ofs) ((ofs) & (PAGE_SIZE-1))
14 15
@@ -80,6 +81,7 @@ static void writeseg_end_io(struct bio *bio, int err)
80 prefetchw(&bvec->bv_page->flags); 81 prefetchw(&bvec->bv_page->flags);
81 82
82 end_page_writeback(page); 83 end_page_writeback(page);
84 page_cache_release(page);
83 } while (bvec >= bio->bi_io_vec); 85 } while (bvec >= bio->bi_io_vec);
84 bio_put(bio); 86 bio_put(bio);
85 if (atomic_dec_and_test(&super->s_pending_writes)) 87 if (atomic_dec_and_test(&super->s_pending_writes))
@@ -97,8 +99,10 @@ static int __bdev_writeseg(struct super_block *sb, u64 ofs, pgoff_t index,
97 unsigned int max_pages = queue_max_hw_sectors(q) >> (PAGE_SHIFT - 9); 99 unsigned int max_pages = queue_max_hw_sectors(q) >> (PAGE_SHIFT - 9);
98 int i; 100 int i;
99 101
102 if (max_pages > BIO_MAX_PAGES)
103 max_pages = BIO_MAX_PAGES;
100 bio = bio_alloc(GFP_NOFS, max_pages); 104 bio = bio_alloc(GFP_NOFS, max_pages);
101 BUG_ON(!bio); /* FIXME: handle this */ 105 BUG_ON(!bio);
102 106
103 for (i = 0; i < nr_pages; i++) { 107 for (i = 0; i < nr_pages; i++) {
104 if (i >= max_pages) { 108 if (i >= max_pages) {
@@ -191,8 +195,10 @@ static int do_erase(struct super_block *sb, u64 ofs, pgoff_t index,
191 unsigned int max_pages = queue_max_hw_sectors(q) >> (PAGE_SHIFT - 9); 195 unsigned int max_pages = queue_max_hw_sectors(q) >> (PAGE_SHIFT - 9);
192 int i; 196 int i;
193 197
198 if (max_pages > BIO_MAX_PAGES)
199 max_pages = BIO_MAX_PAGES;
194 bio = bio_alloc(GFP_NOFS, max_pages); 200 bio = bio_alloc(GFP_NOFS, max_pages);
195 BUG_ON(!bio); /* FIXME: handle this */ 201 BUG_ON(!bio);
196 202
197 for (i = 0; i < nr_pages; i++) { 203 for (i = 0; i < nr_pages; i++) {
198 if (i >= max_pages) { 204 if (i >= max_pages) {
diff --git a/fs/logfs/dir.c b/fs/logfs/dir.c
index 56a8bfbb0120..2396a85c0f55 100644
--- a/fs/logfs/dir.c
+++ b/fs/logfs/dir.c
@@ -6,7 +6,7 @@
6 * Copyright (c) 2005-2008 Joern Engel <joern@logfs.org> 6 * Copyright (c) 2005-2008 Joern Engel <joern@logfs.org>
7 */ 7 */
8#include "logfs.h" 8#include "logfs.h"
9 9#include <linux/slab.h>
10 10
11/* 11/*
12 * Atomic dir operations 12 * Atomic dir operations
@@ -303,12 +303,12 @@ static int __logfs_readdir(struct file *file, void *buf, filldir_t filldir)
303 (filler_t *)logfs_readpage, NULL); 303 (filler_t *)logfs_readpage, NULL);
304 if (IS_ERR(page)) 304 if (IS_ERR(page))
305 return PTR_ERR(page); 305 return PTR_ERR(page);
306 dd = kmap_atomic(page, KM_USER0); 306 dd = kmap(page);
307 BUG_ON(dd->namelen == 0); 307 BUG_ON(dd->namelen == 0);
308 308
309 full = filldir(buf, (char *)dd->name, be16_to_cpu(dd->namelen), 309 full = filldir(buf, (char *)dd->name, be16_to_cpu(dd->namelen),
310 pos, be64_to_cpu(dd->ino), dd->type); 310 pos, be64_to_cpu(dd->ino), dd->type);
311 kunmap_atomic(dd, KM_USER0); 311 kunmap(page);
312 page_cache_release(page); 312 page_cache_release(page);
313 if (full) 313 if (full)
314 break; 314 break;
diff --git a/fs/logfs/gc.c b/fs/logfs/gc.c
index 92949f95a901..84e36f52fe95 100644
--- a/fs/logfs/gc.c
+++ b/fs/logfs/gc.c
@@ -7,6 +7,7 @@
7 */ 7 */
8#include "logfs.h" 8#include "logfs.h"
9#include <linux/sched.h> 9#include <linux/sched.h>
10#include <linux/slab.h>
10 11
11/* 12/*
12 * Wear leveling needs to kick in when the difference between low erase 13 * Wear leveling needs to kick in when the difference between low erase
diff --git a/fs/logfs/inode.c b/fs/logfs/inode.c
index 33ec1aeaeec4..14ed27274da2 100644
--- a/fs/logfs/inode.c
+++ b/fs/logfs/inode.c
@@ -6,6 +6,7 @@
6 * Copyright (c) 2005-2008 Joern Engel <joern@logfs.org> 6 * Copyright (c) 2005-2008 Joern Engel <joern@logfs.org>
7 */ 7 */
8#include "logfs.h" 8#include "logfs.h"
9#include <linux/slab.h>
9#include <linux/writeback.h> 10#include <linux/writeback.h>
10#include <linux/backing-dev.h> 11#include <linux/backing-dev.h>
11 12
diff --git a/fs/logfs/journal.c b/fs/logfs/journal.c
index 6ad30a4c9052..33bd260b8309 100644
--- a/fs/logfs/journal.c
+++ b/fs/logfs/journal.c
@@ -6,6 +6,7 @@
6 * Copyright (c) 2005-2008 Joern Engel <joern@logfs.org> 6 * Copyright (c) 2005-2008 Joern Engel <joern@logfs.org>
7 */ 7 */
8#include "logfs.h" 8#include "logfs.h"
9#include <linux/slab.h>
9 10
10static void logfs_calc_free(struct super_block *sb) 11static void logfs_calc_free(struct super_block *sb)
11{ 12{
@@ -800,6 +801,7 @@ void do_logfs_journal_wl_pass(struct super_block *sb)
800{ 801{
801 struct logfs_super *super = logfs_super(sb); 802 struct logfs_super *super = logfs_super(sb);
802 struct logfs_area *area = super->s_journal_area; 803 struct logfs_area *area = super->s_journal_area;
804 struct btree_head32 *head = &super->s_reserved_segments;
803 u32 segno, ec; 805 u32 segno, ec;
804 int i, err; 806 int i, err;
805 807
@@ -807,6 +809,7 @@ void do_logfs_journal_wl_pass(struct super_block *sb)
807 /* Drop old segments */ 809 /* Drop old segments */
808 journal_for_each(i) 810 journal_for_each(i)
809 if (super->s_journal_seg[i]) { 811 if (super->s_journal_seg[i]) {
812 btree_remove32(head, super->s_journal_seg[i]);
810 logfs_set_segment_unreserved(sb, 813 logfs_set_segment_unreserved(sb,
811 super->s_journal_seg[i], 814 super->s_journal_seg[i],
812 super->s_journal_ec[i]); 815 super->s_journal_ec[i]);
@@ -819,8 +822,13 @@ void do_logfs_journal_wl_pass(struct super_block *sb)
819 super->s_journal_seg[i] = segno; 822 super->s_journal_seg[i] = segno;
820 super->s_journal_ec[i] = ec; 823 super->s_journal_ec[i] = ec;
821 logfs_set_segment_reserved(sb, segno); 824 logfs_set_segment_reserved(sb, segno);
825 err = btree_insert32(head, segno, (void *)1, GFP_KERNEL);
826 BUG_ON(err); /* mempool should prevent this */
827 err = logfs_erase_segment(sb, segno, 1);
828 BUG_ON(err); /* FIXME: remount-ro would be nicer */
822 } 829 }
823 /* Manually move journal_area */ 830 /* Manually move journal_area */
831 freeseg(sb, area->a_segno);
824 area->a_segno = super->s_journal_seg[0]; 832 area->a_segno = super->s_journal_seg[0];
825 area->a_is_open = 0; 833 area->a_is_open = 0;
826 area->a_used_bytes = 0; 834 area->a_used_bytes = 0;
diff --git a/fs/logfs/logfs.h b/fs/logfs/logfs.h
index 129779431373..b84b0eec6024 100644
--- a/fs/logfs/logfs.h
+++ b/fs/logfs/logfs.h
@@ -587,6 +587,7 @@ void move_page_to_btree(struct page *page);
587int logfs_init_mapping(struct super_block *sb); 587int logfs_init_mapping(struct super_block *sb);
588void logfs_sync_area(struct logfs_area *area); 588void logfs_sync_area(struct logfs_area *area);
589void logfs_sync_segments(struct super_block *sb); 589void logfs_sync_segments(struct super_block *sb);
590void freeseg(struct super_block *sb, u32 segno);
590 591
591/* area handling */ 592/* area handling */
592int logfs_init_areas(struct super_block *sb); 593int logfs_init_areas(struct super_block *sb);
diff --git a/fs/logfs/readwrite.c b/fs/logfs/readwrite.c
index 7a23b3e7c0a7..bff40253dfb2 100644
--- a/fs/logfs/readwrite.c
+++ b/fs/logfs/readwrite.c
@@ -18,6 +18,7 @@
18 */ 18 */
19#include "logfs.h" 19#include "logfs.h"
20#include <linux/sched.h> 20#include <linux/sched.h>
21#include <linux/slab.h>
21 22
22static u64 adjust_bix(u64 bix, level_t level) 23static u64 adjust_bix(u64 bix, level_t level)
23{ 24{
@@ -1594,7 +1595,6 @@ int logfs_delete(struct inode *inode, pgoff_t index,
1594 return ret; 1595 return ret;
1595} 1596}
1596 1597
1597/* Rewrite cannot mark the inode dirty but has to write it immediatly. */
1598int logfs_rewrite_block(struct inode *inode, u64 bix, u64 ofs, 1598int logfs_rewrite_block(struct inode *inode, u64 bix, u64 ofs,
1599 gc_level_t gc_level, long flags) 1599 gc_level_t gc_level, long flags)
1600{ 1600{
@@ -1611,6 +1611,18 @@ int logfs_rewrite_block(struct inode *inode, u64 bix, u64 ofs,
1611 if (level != 0) 1611 if (level != 0)
1612 alloc_indirect_block(inode, page, 0); 1612 alloc_indirect_block(inode, page, 0);
1613 err = logfs_write_buf(inode, page, flags); 1613 err = logfs_write_buf(inode, page, flags);
1614 if (!err && shrink_level(gc_level) == 0) {
1615 /* Rewrite cannot mark the inode dirty but has to
1616 * write it immediatly.
1617 * Q: Can't we just create an alias for the inode
1618 * instead? And if not, why not?
1619 */
1620 if (inode->i_ino == LOGFS_INO_MASTER)
1621 logfs_write_anchor(inode->i_sb);
1622 else {
1623 err = __logfs_write_inode(inode, flags);
1624 }
1625 }
1614 } 1626 }
1615 logfs_put_write_page(page); 1627 logfs_put_write_page(page);
1616 return err; 1628 return err;
diff --git a/fs/logfs/segment.c b/fs/logfs/segment.c
index 1a14f9910d55..801a3a141625 100644
--- a/fs/logfs/segment.c
+++ b/fs/logfs/segment.c
@@ -10,6 +10,7 @@
10 * three kinds of objects: inodes, dentries and blocks, both data and indirect. 10 * three kinds of objects: inodes, dentries and blocks, both data and indirect.
11 */ 11 */
12#include "logfs.h" 12#include "logfs.h"
13#include <linux/slab.h>
13 14
14static int logfs_mark_segment_bad(struct super_block *sb, u32 segno) 15static int logfs_mark_segment_bad(struct super_block *sb, u32 segno)
15{ 16{
@@ -93,50 +94,58 @@ void __logfs_buf_write(struct logfs_area *area, u64 ofs, void *buf, size_t len,
93 } while (len); 94 } while (len);
94} 95}
95 96
96/* 97static void pad_partial_page(struct logfs_area *area)
97 * bdev_writeseg will write full pages. Memset the tail to prevent data leaks.
98 */
99static void pad_wbuf(struct logfs_area *area, int final)
100{ 98{
101 struct super_block *sb = area->a_sb; 99 struct super_block *sb = area->a_sb;
102 struct logfs_super *super = logfs_super(sb);
103 struct page *page; 100 struct page *page;
104 u64 ofs = dev_ofs(sb, area->a_segno, area->a_used_bytes); 101 u64 ofs = dev_ofs(sb, area->a_segno, area->a_used_bytes);
105 pgoff_t index = ofs >> PAGE_SHIFT; 102 pgoff_t index = ofs >> PAGE_SHIFT;
106 long offset = ofs & (PAGE_SIZE-1); 103 long offset = ofs & (PAGE_SIZE-1);
107 u32 len = PAGE_SIZE - offset; 104 u32 len = PAGE_SIZE - offset;
108 105
109 if (len == PAGE_SIZE) { 106 if (len % PAGE_SIZE) {
110 /* The math in this function can surely use some love */ 107 page = get_mapping_page(sb, index, 0);
111 len = 0;
112 }
113 if (len) {
114 BUG_ON(area->a_used_bytes >= super->s_segsize);
115
116 page = get_mapping_page(area->a_sb, index, 0);
117 BUG_ON(!page); /* FIXME: reserve a pool */ 108 BUG_ON(!page); /* FIXME: reserve a pool */
118 memset(page_address(page) + offset, 0xff, len); 109 memset(page_address(page) + offset, 0xff, len);
119 SetPagePrivate(page); 110 SetPagePrivate(page);
120 page_cache_release(page); 111 page_cache_release(page);
121 } 112 }
113}
122 114
123 if (!final) 115static void pad_full_pages(struct logfs_area *area)
124 return; 116{
117 struct super_block *sb = area->a_sb;
118 struct logfs_super *super = logfs_super(sb);
119 u64 ofs = dev_ofs(sb, area->a_segno, area->a_used_bytes);
120 u32 len = super->s_segsize - area->a_used_bytes;
121 pgoff_t index = PAGE_CACHE_ALIGN(ofs) >> PAGE_CACHE_SHIFT;
122 pgoff_t no_indizes = len >> PAGE_CACHE_SHIFT;
123 struct page *page;
125 124
126 area->a_used_bytes += len; 125 while (no_indizes) {
127 for ( ; area->a_used_bytes < super->s_segsize; 126 page = get_mapping_page(sb, index, 0);
128 area->a_used_bytes += PAGE_SIZE) {
129 /* Memset another page */
130 index++;
131 page = get_mapping_page(area->a_sb, index, 0);
132 BUG_ON(!page); /* FIXME: reserve a pool */ 127 BUG_ON(!page); /* FIXME: reserve a pool */
133 memset(page_address(page), 0xff, PAGE_SIZE); 128 SetPageUptodate(page);
129 memset(page_address(page), 0xff, PAGE_CACHE_SIZE);
134 SetPagePrivate(page); 130 SetPagePrivate(page);
135 page_cache_release(page); 131 page_cache_release(page);
132 index++;
133 no_indizes--;
136 } 134 }
137} 135}
138 136
139/* 137/*
138 * bdev_writeseg will write full pages. Memset the tail to prevent data leaks.
139 * Also make sure we allocate (and memset) all pages for final writeout.
140 */
141static void pad_wbuf(struct logfs_area *area, int final)
142{
143 pad_partial_page(area);
144 if (final)
145 pad_full_pages(area);
146}
147
148/*
140 * We have to be careful with the alias tree. Since lookup is done by bix, 149 * We have to be careful with the alias tree. Since lookup is done by bix,
141 * it needs to be normalized, so 14, 15, 16, etc. all match when dealing with 150 * it needs to be normalized, so 14, 15, 16, etc. all match when dealing with
142 * indirect blocks. So always use it through accessor functions. 151 * indirect blocks. So always use it through accessor functions.
@@ -683,7 +692,7 @@ int logfs_segment_delete(struct inode *inode, struct logfs_shadow *shadow)
683 return 0; 692 return 0;
684} 693}
685 694
686static void freeseg(struct super_block *sb, u32 segno) 695void freeseg(struct super_block *sb, u32 segno)
687{ 696{
688 struct logfs_super *super = logfs_super(sb); 697 struct logfs_super *super = logfs_super(sb);
689 struct address_space *mapping = super->s_mapping_inode->i_mapping; 698 struct address_space *mapping = super->s_mapping_inode->i_mapping;
diff --git a/fs/logfs/super.c b/fs/logfs/super.c
index c66beab78dee..b60bfac3263c 100644
--- a/fs/logfs/super.c
+++ b/fs/logfs/super.c
@@ -11,6 +11,7 @@
11 */ 11 */
12#include "logfs.h" 12#include "logfs.h"
13#include <linux/bio.h> 13#include <linux/bio.h>
14#include <linux/slab.h>
14#include <linux/mtd/mtd.h> 15#include <linux/mtd/mtd.h>
15#include <linux/statfs.h> 16#include <linux/statfs.h>
16#include <linux/buffer_head.h> 17#include <linux/buffer_head.h>
@@ -277,7 +278,7 @@ static int logfs_recover_sb(struct super_block *sb)
277 } 278 }
278 if (valid0 && valid1 && ds_cmp(ds0, ds1)) { 279 if (valid0 && valid1 && ds_cmp(ds0, ds1)) {
279 printk(KERN_INFO"Superblocks don't match - fixing.\n"); 280 printk(KERN_INFO"Superblocks don't match - fixing.\n");
280 return write_one_sb(sb, super->s_devops->find_last_sb); 281 return logfs_write_sb(sb);
281 } 282 }
282 /* If neither is valid now, something's wrong. Didn't we properly 283 /* If neither is valid now, something's wrong. Didn't we properly
283 * check them before?!? */ 284 * check them before?!? */
@@ -289,6 +290,10 @@ static int logfs_make_writeable(struct super_block *sb)
289{ 290{
290 int err; 291 int err;
291 292
293 err = logfs_open_segfile(sb);
294 if (err)
295 return err;
296
292 /* Repair any broken superblock copies */ 297 /* Repair any broken superblock copies */
293 err = logfs_recover_sb(sb); 298 err = logfs_recover_sb(sb);
294 if (err) 299 if (err)
@@ -299,10 +304,6 @@ static int logfs_make_writeable(struct super_block *sb)
299 if (err) 304 if (err)
300 return err; 305 return err;
301 306
302 err = logfs_open_segfile(sb);
303 if (err)
304 return err;
305
306 /* Do one GC pass before any data gets dirtied */ 307 /* Do one GC pass before any data gets dirtied */
307 logfs_gc_pass(sb); 308 logfs_gc_pass(sb);
308 309
@@ -328,7 +329,7 @@ static int logfs_get_sb_final(struct super_block *sb, struct vfsmount *mnt)
328 329
329 sb->s_root = d_alloc_root(rootdir); 330 sb->s_root = d_alloc_root(rootdir);
330 if (!sb->s_root) 331 if (!sb->s_root)
331 goto fail; 332 goto fail2;
332 333
333 super->s_erase_page = alloc_pages(GFP_KERNEL, 0); 334 super->s_erase_page = alloc_pages(GFP_KERNEL, 0);
334 if (!super->s_erase_page) 335 if (!super->s_erase_page)
@@ -572,8 +573,7 @@ int logfs_get_sb_device(struct file_system_type *type, int flags,
572 return 0; 573 return 0;
573 574
574err1: 575err1:
575 up_write(&sb->s_umount); 576 deactivate_locked_super(sb);
576 deactivate_super(sb);
577 return err; 577 return err;
578err0: 578err0:
579 kfree(super); 579 kfree(super);