aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJoern Engel <joern@logfs.org>2010-03-28 06:47:09 -0400
committerJoern Engel <joern@logfs.org>2010-03-28 07:00:08 -0400
commit81def6b9862764924a99ac1b680e73ac8c80ac64 (patch)
tree8f7a408a355172bfb53bf2e69981a154dde3b01a
parent193219172691e29813821dc8433317768c6ed1a3 (diff)
Simplify and fix pad_wbuf
A comment in the old code read: /* The math in this function can surely use some love */ And indeed it did. In the case that area->a_used_bytes is exactly 4096 bytes below segment size it fell apart. pad_wbuf is now split into two helpers that are significantly less complicated. Signed-off-by: Joern Engel <joern@logfs.org>
-rw-r--r--fs/logfs/segment.c52
1 files changed, 30 insertions, 22 deletions
diff --git a/fs/logfs/segment.c b/fs/logfs/segment.c
index 1a14f9910d55..ff9d7f3b4d36 100644
--- a/fs/logfs/segment.c
+++ b/fs/logfs/segment.c
@@ -93,50 +93,58 @@ void __logfs_buf_write(struct logfs_area *area, u64 ofs, void *buf, size_t len,
93 } while (len); 93 } while (len);
94} 94}
95 95
96/* 96static void pad_partial_page(struct logfs_area *area)
97 * bdev_writeseg will write full pages. Memset the tail to prevent data leaks.
98 */
99static void pad_wbuf(struct logfs_area *area, int final)
100{ 97{
101 struct super_block *sb = area->a_sb; 98 struct super_block *sb = area->a_sb;
102 struct logfs_super *super = logfs_super(sb);
103 struct page *page; 99 struct page *page;
104 u64 ofs = dev_ofs(sb, area->a_segno, area->a_used_bytes); 100 u64 ofs = dev_ofs(sb, area->a_segno, area->a_used_bytes);
105 pgoff_t index = ofs >> PAGE_SHIFT; 101 pgoff_t index = ofs >> PAGE_SHIFT;
106 long offset = ofs & (PAGE_SIZE-1); 102 long offset = ofs & (PAGE_SIZE-1);
107 u32 len = PAGE_SIZE - offset; 103 u32 len = PAGE_SIZE - offset;
108 104
109 if (len == PAGE_SIZE) { 105 if (len % PAGE_SIZE) {
110 /* The math in this function can surely use some love */ 106 page = get_mapping_page(sb, index, 0);
111 len = 0;
112 }
113 if (len) {
114 BUG_ON(area->a_used_bytes >= super->s_segsize);
115
116 page = get_mapping_page(area->a_sb, index, 0);
117 BUG_ON(!page); /* FIXME: reserve a pool */ 107 BUG_ON(!page); /* FIXME: reserve a pool */
118 memset(page_address(page) + offset, 0xff, len); 108 memset(page_address(page) + offset, 0xff, len);
119 SetPagePrivate(page); 109 SetPagePrivate(page);
120 page_cache_release(page); 110 page_cache_release(page);
121 } 111 }
112}
122 113
123 if (!final) 114static void pad_full_pages(struct logfs_area *area)
124 return; 115{
116 struct super_block *sb = area->a_sb;
117 struct logfs_super *super = logfs_super(sb);
118 u64 ofs = dev_ofs(sb, area->a_segno, area->a_used_bytes);
119 u32 len = super->s_segsize - area->a_used_bytes;
120 pgoff_t index = PAGE_CACHE_ALIGN(ofs) >> PAGE_CACHE_SHIFT;
121 pgoff_t no_indizes = len >> PAGE_CACHE_SHIFT;
122 struct page *page;
125 123
126 area->a_used_bytes += len; 124 while (no_indizes) {
127 for ( ; area->a_used_bytes < super->s_segsize; 125 page = get_mapping_page(sb, index, 0);
128 area->a_used_bytes += PAGE_SIZE) {
129 /* Memset another page */
130 index++;
131 page = get_mapping_page(area->a_sb, index, 0);
132 BUG_ON(!page); /* FIXME: reserve a pool */ 126 BUG_ON(!page); /* FIXME: reserve a pool */
133 memset(page_address(page), 0xff, PAGE_SIZE); 127 SetPageUptodate(page);
128 memset(page_address(page), 0xff, PAGE_CACHE_SIZE);
134 SetPagePrivate(page); 129 SetPagePrivate(page);
135 page_cache_release(page); 130 page_cache_release(page);
131 index++;
132 no_indizes--;
136 } 133 }
137} 134}
138 135
139/* 136/*
137 * bdev_writeseg will write full pages. Memset the tail to prevent data leaks.
138 * Also make sure we allocate (and memset) all pages for final writeout.
139 */
140static void pad_wbuf(struct logfs_area *area, int final)
141{
142 pad_partial_page(area);
143 if (final)
144 pad_full_pages(area);
145}
146
147/*
140 * We have to be careful with the alias tree. Since lookup is done by bix, 148 * We have to be careful with the alias tree. Since lookup is done by bix,
141 * it needs to be normalized, so 14, 15, 16, etc. all match when dealing with 149 * it needs to be normalized, so 14, 15, 16, etc. all match when dealing with
142 * indirect blocks. So always use it through accessor functions. 150 * indirect blocks. So always use it through accessor functions.