diff options
Diffstat (limited to 'fs/logfs/journal.c')
-rw-r--r-- | fs/logfs/journal.c | 879 |
1 files changed, 879 insertions, 0 deletions
diff --git a/fs/logfs/journal.c b/fs/logfs/journal.c new file mode 100644 index 000000000000..7a023dbba9f8 --- /dev/null +++ b/fs/logfs/journal.c | |||
@@ -0,0 +1,879 @@ | |||
1 | /* | ||
2 | * fs/logfs/journal.c - journal handling code | ||
3 | * | ||
4 | * As should be obvious for Linux kernel code, license is GPLv2 | ||
5 | * | ||
6 | * Copyright (c) 2005-2008 Joern Engel <joern@logfs.org> | ||
7 | */ | ||
8 | #include "logfs.h" | ||
9 | |||
10 | static void logfs_calc_free(struct super_block *sb) | ||
11 | { | ||
12 | struct logfs_super *super = logfs_super(sb); | ||
13 | u64 reserve, no_segs = super->s_no_segs; | ||
14 | s64 free; | ||
15 | int i; | ||
16 | |||
17 | /* superblock segments */ | ||
18 | no_segs -= 2; | ||
19 | super->s_no_journal_segs = 0; | ||
20 | /* journal */ | ||
21 | journal_for_each(i) | ||
22 | if (super->s_journal_seg[i]) { | ||
23 | no_segs--; | ||
24 | super->s_no_journal_segs++; | ||
25 | } | ||
26 | |||
27 | /* open segments plus one extra per level for GC */ | ||
28 | no_segs -= 2 * super->s_total_levels; | ||
29 | |||
30 | free = no_segs * (super->s_segsize - LOGFS_SEGMENT_RESERVE); | ||
31 | free -= super->s_used_bytes; | ||
32 | /* just a bit extra */ | ||
33 | free -= super->s_total_levels * 4096; | ||
34 | |||
35 | /* Bad blocks are 'paid' for with speed reserve - the filesystem | ||
36 | * simply gets slower as bad blocks accumulate. Until the bad blocks | ||
37 | * exceed the speed reserve - then the filesystem gets smaller. | ||
38 | */ | ||
39 | reserve = super->s_bad_segments + super->s_bad_seg_reserve; | ||
40 | reserve *= super->s_segsize - LOGFS_SEGMENT_RESERVE; | ||
41 | reserve = max(reserve, super->s_speed_reserve); | ||
42 | free -= reserve; | ||
43 | if (free < 0) | ||
44 | free = 0; | ||
45 | |||
46 | super->s_free_bytes = free; | ||
47 | } | ||
48 | |||
49 | static void reserve_sb_and_journal(struct super_block *sb) | ||
50 | { | ||
51 | struct logfs_super *super = logfs_super(sb); | ||
52 | struct btree_head32 *head = &super->s_reserved_segments; | ||
53 | int i, err; | ||
54 | |||
55 | err = btree_insert32(head, seg_no(sb, super->s_sb_ofs[0]), (void *)1, | ||
56 | GFP_KERNEL); | ||
57 | BUG_ON(err); | ||
58 | |||
59 | err = btree_insert32(head, seg_no(sb, super->s_sb_ofs[1]), (void *)1, | ||
60 | GFP_KERNEL); | ||
61 | BUG_ON(err); | ||
62 | |||
63 | journal_for_each(i) { | ||
64 | if (!super->s_journal_seg[i]) | ||
65 | continue; | ||
66 | err = btree_insert32(head, super->s_journal_seg[i], (void *)1, | ||
67 | GFP_KERNEL); | ||
68 | BUG_ON(err); | ||
69 | } | ||
70 | } | ||
71 | |||
72 | static void read_dynsb(struct super_block *sb, | ||
73 | struct logfs_je_dynsb *dynsb) | ||
74 | { | ||
75 | struct logfs_super *super = logfs_super(sb); | ||
76 | |||
77 | super->s_gec = be64_to_cpu(dynsb->ds_gec); | ||
78 | super->s_sweeper = be64_to_cpu(dynsb->ds_sweeper); | ||
79 | super->s_victim_ino = be64_to_cpu(dynsb->ds_victim_ino); | ||
80 | super->s_rename_dir = be64_to_cpu(dynsb->ds_rename_dir); | ||
81 | super->s_rename_pos = be64_to_cpu(dynsb->ds_rename_pos); | ||
82 | super->s_used_bytes = be64_to_cpu(dynsb->ds_used_bytes); | ||
83 | super->s_generation = be32_to_cpu(dynsb->ds_generation); | ||
84 | } | ||
85 | |||
86 | static void read_anchor(struct super_block *sb, | ||
87 | struct logfs_je_anchor *da) | ||
88 | { | ||
89 | struct logfs_super *super = logfs_super(sb); | ||
90 | struct inode *inode = super->s_master_inode; | ||
91 | struct logfs_inode *li = logfs_inode(inode); | ||
92 | int i; | ||
93 | |||
94 | super->s_last_ino = be64_to_cpu(da->da_last_ino); | ||
95 | li->li_flags = 0; | ||
96 | li->li_height = da->da_height; | ||
97 | i_size_write(inode, be64_to_cpu(da->da_size)); | ||
98 | li->li_used_bytes = be64_to_cpu(da->da_used_bytes); | ||
99 | |||
100 | for (i = 0; i < LOGFS_EMBEDDED_FIELDS; i++) | ||
101 | li->li_data[i] = be64_to_cpu(da->da_data[i]); | ||
102 | } | ||
103 | |||
104 | static void read_erasecount(struct super_block *sb, | ||
105 | struct logfs_je_journal_ec *ec) | ||
106 | { | ||
107 | struct logfs_super *super = logfs_super(sb); | ||
108 | int i; | ||
109 | |||
110 | journal_for_each(i) | ||
111 | super->s_journal_ec[i] = be32_to_cpu(ec->ec[i]); | ||
112 | } | ||
113 | |||
114 | static int read_area(struct super_block *sb, struct logfs_je_area *a) | ||
115 | { | ||
116 | struct logfs_super *super = logfs_super(sb); | ||
117 | struct logfs_area *area = super->s_area[a->gc_level]; | ||
118 | u64 ofs; | ||
119 | u32 writemask = ~(super->s_writesize - 1); | ||
120 | |||
121 | if (a->gc_level >= LOGFS_NO_AREAS) | ||
122 | return -EIO; | ||
123 | if (a->vim != VIM_DEFAULT) | ||
124 | return -EIO; /* TODO: close area and continue */ | ||
125 | |||
126 | area->a_used_bytes = be32_to_cpu(a->used_bytes); | ||
127 | area->a_written_bytes = area->a_used_bytes & writemask; | ||
128 | area->a_segno = be32_to_cpu(a->segno); | ||
129 | if (area->a_segno) | ||
130 | area->a_is_open = 1; | ||
131 | |||
132 | ofs = dev_ofs(sb, area->a_segno, area->a_written_bytes); | ||
133 | if (super->s_writesize > 1) | ||
134 | logfs_buf_recover(area, ofs, a + 1, super->s_writesize); | ||
135 | else | ||
136 | logfs_buf_recover(area, ofs, NULL, 0); | ||
137 | return 0; | ||
138 | } | ||
139 | |||
140 | static void *unpack(void *from, void *to) | ||
141 | { | ||
142 | struct logfs_journal_header *jh = from; | ||
143 | void *data = from + sizeof(struct logfs_journal_header); | ||
144 | int err; | ||
145 | size_t inlen, outlen; | ||
146 | |||
147 | inlen = be16_to_cpu(jh->h_len); | ||
148 | outlen = be16_to_cpu(jh->h_datalen); | ||
149 | |||
150 | if (jh->h_compr == COMPR_NONE) | ||
151 | memcpy(to, data, inlen); | ||
152 | else { | ||
153 | err = logfs_uncompress(data, to, inlen, outlen); | ||
154 | BUG_ON(err); | ||
155 | } | ||
156 | return to; | ||
157 | } | ||
158 | |||
159 | static int __read_je_header(struct super_block *sb, u64 ofs, | ||
160 | struct logfs_journal_header *jh) | ||
161 | { | ||
162 | struct logfs_super *super = logfs_super(sb); | ||
163 | size_t bufsize = max_t(size_t, sb->s_blocksize, super->s_writesize) | ||
164 | + MAX_JOURNAL_HEADER; | ||
165 | u16 type, len, datalen; | ||
166 | int err; | ||
167 | |||
168 | /* read header only */ | ||
169 | err = wbuf_read(sb, ofs, sizeof(*jh), jh); | ||
170 | if (err) | ||
171 | return err; | ||
172 | type = be16_to_cpu(jh->h_type); | ||
173 | len = be16_to_cpu(jh->h_len); | ||
174 | datalen = be16_to_cpu(jh->h_datalen); | ||
175 | if (len > sb->s_blocksize) | ||
176 | return -EIO; | ||
177 | if ((type < JE_FIRST) || (type > JE_LAST)) | ||
178 | return -EIO; | ||
179 | if (datalen > bufsize) | ||
180 | return -EIO; | ||
181 | return 0; | ||
182 | } | ||
183 | |||
184 | static int __read_je_payload(struct super_block *sb, u64 ofs, | ||
185 | struct logfs_journal_header *jh) | ||
186 | { | ||
187 | u16 len; | ||
188 | int err; | ||
189 | |||
190 | len = be16_to_cpu(jh->h_len); | ||
191 | err = wbuf_read(sb, ofs + sizeof(*jh), len, jh + 1); | ||
192 | if (err) | ||
193 | return err; | ||
194 | if (jh->h_crc != logfs_crc32(jh, len + sizeof(*jh), 4)) { | ||
195 | /* Old code was confused. It forgot about the header length | ||
196 | * and stopped calculating the crc 16 bytes before the end | ||
197 | * of data - ick! | ||
198 | * FIXME: Remove this hack once the old code is fixed. | ||
199 | */ | ||
200 | if (jh->h_crc == logfs_crc32(jh, len, 4)) | ||
201 | WARN_ON_ONCE(1); | ||
202 | else | ||
203 | return -EIO; | ||
204 | } | ||
205 | return 0; | ||
206 | } | ||
207 | |||
208 | /* | ||
209 | * jh needs to be large enough to hold the complete entry, not just the header | ||
210 | */ | ||
211 | static int __read_je(struct super_block *sb, u64 ofs, | ||
212 | struct logfs_journal_header *jh) | ||
213 | { | ||
214 | int err; | ||
215 | |||
216 | err = __read_je_header(sb, ofs, jh); | ||
217 | if (err) | ||
218 | return err; | ||
219 | return __read_je_payload(sb, ofs, jh); | ||
220 | } | ||
221 | |||
222 | static int read_je(struct super_block *sb, u64 ofs) | ||
223 | { | ||
224 | struct logfs_super *super = logfs_super(sb); | ||
225 | struct logfs_journal_header *jh = super->s_compressed_je; | ||
226 | void *scratch = super->s_je; | ||
227 | u16 type, datalen; | ||
228 | int err; | ||
229 | |||
230 | err = __read_je(sb, ofs, jh); | ||
231 | if (err) | ||
232 | return err; | ||
233 | type = be16_to_cpu(jh->h_type); | ||
234 | datalen = be16_to_cpu(jh->h_datalen); | ||
235 | |||
236 | switch (type) { | ||
237 | case JE_DYNSB: | ||
238 | read_dynsb(sb, unpack(jh, scratch)); | ||
239 | break; | ||
240 | case JE_ANCHOR: | ||
241 | read_anchor(sb, unpack(jh, scratch)); | ||
242 | break; | ||
243 | case JE_ERASECOUNT: | ||
244 | read_erasecount(sb, unpack(jh, scratch)); | ||
245 | break; | ||
246 | case JE_AREA: | ||
247 | read_area(sb, unpack(jh, scratch)); | ||
248 | break; | ||
249 | case JE_OBJ_ALIAS: | ||
250 | err = logfs_load_object_aliases(sb, unpack(jh, scratch), | ||
251 | datalen); | ||
252 | break; | ||
253 | default: | ||
254 | WARN_ON_ONCE(1); | ||
255 | return -EIO; | ||
256 | } | ||
257 | return err; | ||
258 | } | ||
259 | |||
260 | static int logfs_read_segment(struct super_block *sb, u32 segno) | ||
261 | { | ||
262 | struct logfs_super *super = logfs_super(sb); | ||
263 | struct logfs_journal_header *jh = super->s_compressed_je; | ||
264 | u64 ofs, seg_ofs = dev_ofs(sb, segno, 0); | ||
265 | u32 h_ofs, last_ofs = 0; | ||
266 | u16 len, datalen, last_len; | ||
267 | int i, err; | ||
268 | |||
269 | /* search for most recent commit */ | ||
270 | for (h_ofs = 0; h_ofs < super->s_segsize; h_ofs += sizeof(*jh)) { | ||
271 | ofs = seg_ofs + h_ofs; | ||
272 | err = __read_je_header(sb, ofs, jh); | ||
273 | if (err) | ||
274 | continue; | ||
275 | if (jh->h_type != cpu_to_be16(JE_COMMIT)) | ||
276 | continue; | ||
277 | err = __read_je_payload(sb, ofs, jh); | ||
278 | if (err) | ||
279 | continue; | ||
280 | len = be16_to_cpu(jh->h_len); | ||
281 | datalen = be16_to_cpu(jh->h_datalen); | ||
282 | if ((datalen > sizeof(super->s_je_array)) || | ||
283 | (datalen % sizeof(__be64))) | ||
284 | continue; | ||
285 | last_ofs = h_ofs; | ||
286 | last_len = datalen; | ||
287 | h_ofs += ALIGN(len, sizeof(*jh)) - sizeof(*jh); | ||
288 | } | ||
289 | /* read commit */ | ||
290 | if (last_ofs == 0) | ||
291 | return -ENOENT; | ||
292 | ofs = seg_ofs + last_ofs; | ||
293 | log_journal("Read commit from %llx\n", ofs); | ||
294 | err = __read_je(sb, ofs, jh); | ||
295 | BUG_ON(err); /* We should have caught it in the scan loop already */ | ||
296 | if (err) | ||
297 | return err; | ||
298 | /* uncompress */ | ||
299 | unpack(jh, super->s_je_array); | ||
300 | super->s_no_je = last_len / sizeof(__be64); | ||
301 | /* iterate over array */ | ||
302 | for (i = 0; i < super->s_no_je; i++) { | ||
303 | err = read_je(sb, be64_to_cpu(super->s_je_array[i])); | ||
304 | if (err) | ||
305 | return err; | ||
306 | } | ||
307 | super->s_journal_area->a_segno = segno; | ||
308 | return 0; | ||
309 | } | ||
310 | |||
311 | static u64 read_gec(struct super_block *sb, u32 segno) | ||
312 | { | ||
313 | struct logfs_segment_header sh; | ||
314 | __be32 crc; | ||
315 | int err; | ||
316 | |||
317 | if (!segno) | ||
318 | return 0; | ||
319 | err = wbuf_read(sb, dev_ofs(sb, segno, 0), sizeof(sh), &sh); | ||
320 | if (err) | ||
321 | return 0; | ||
322 | crc = logfs_crc32(&sh, sizeof(sh), 4); | ||
323 | if (crc != sh.crc) { | ||
324 | WARN_ON(sh.gec != cpu_to_be64(0xffffffffffffffffull)); | ||
325 | /* Most likely it was just erased */ | ||
326 | return 0; | ||
327 | } | ||
328 | return be64_to_cpu(sh.gec); | ||
329 | } | ||
330 | |||
331 | static int logfs_read_journal(struct super_block *sb) | ||
332 | { | ||
333 | struct logfs_super *super = logfs_super(sb); | ||
334 | u64 gec[LOGFS_JOURNAL_SEGS], max; | ||
335 | u32 segno; | ||
336 | int i, max_i; | ||
337 | |||
338 | max = 0; | ||
339 | max_i = -1; | ||
340 | journal_for_each(i) { | ||
341 | segno = super->s_journal_seg[i]; | ||
342 | gec[i] = read_gec(sb, super->s_journal_seg[i]); | ||
343 | if (gec[i] > max) { | ||
344 | max = gec[i]; | ||
345 | max_i = i; | ||
346 | } | ||
347 | } | ||
348 | if (max_i == -1) | ||
349 | return -EIO; | ||
350 | /* FIXME: Try older segments in case of error */ | ||
351 | return logfs_read_segment(sb, super->s_journal_seg[max_i]); | ||
352 | } | ||
353 | |||
354 | /* | ||
355 | * First search the current segment (outer loop), then pick the next segment | ||
356 | * in the array, skipping any zero entries (inner loop). | ||
357 | */ | ||
358 | static void journal_get_free_segment(struct logfs_area *area) | ||
359 | { | ||
360 | struct logfs_super *super = logfs_super(area->a_sb); | ||
361 | int i; | ||
362 | |||
363 | journal_for_each(i) { | ||
364 | if (area->a_segno != super->s_journal_seg[i]) | ||
365 | continue; | ||
366 | |||
367 | do { | ||
368 | i++; | ||
369 | if (i == LOGFS_JOURNAL_SEGS) | ||
370 | i = 0; | ||
371 | } while (!super->s_journal_seg[i]); | ||
372 | |||
373 | area->a_segno = super->s_journal_seg[i]; | ||
374 | area->a_erase_count = ++(super->s_journal_ec[i]); | ||
375 | log_journal("Journal now at %x (ec %x)\n", area->a_segno, | ||
376 | area->a_erase_count); | ||
377 | return; | ||
378 | } | ||
379 | BUG(); | ||
380 | } | ||
381 | |||
382 | static void journal_get_erase_count(struct logfs_area *area) | ||
383 | { | ||
384 | /* erase count is stored globally and incremented in | ||
385 | * journal_get_free_segment() - nothing to do here */ | ||
386 | } | ||
387 | |||
388 | static int journal_erase_segment(struct logfs_area *area) | ||
389 | { | ||
390 | struct super_block *sb = area->a_sb; | ||
391 | struct logfs_segment_header sh; | ||
392 | u64 ofs; | ||
393 | int err; | ||
394 | |||
395 | err = logfs_erase_segment(sb, area->a_segno); | ||
396 | if (err) | ||
397 | return err; | ||
398 | |||
399 | sh.pad = 0; | ||
400 | sh.type = SEG_JOURNAL; | ||
401 | sh.level = 0; | ||
402 | sh.segno = cpu_to_be32(area->a_segno); | ||
403 | sh.ec = cpu_to_be32(area->a_erase_count); | ||
404 | sh.gec = cpu_to_be64(logfs_super(sb)->s_gec); | ||
405 | sh.crc = logfs_crc32(&sh, sizeof(sh), 4); | ||
406 | |||
407 | /* This causes a bug in segment.c. Not yet. */ | ||
408 | //logfs_set_segment_erased(sb, area->a_segno, area->a_erase_count, 0); | ||
409 | |||
410 | ofs = dev_ofs(sb, area->a_segno, 0); | ||
411 | area->a_used_bytes = ALIGN(sizeof(sh), 16); | ||
412 | logfs_buf_write(area, ofs, &sh, sizeof(sh)); | ||
413 | return 0; | ||
414 | } | ||
415 | |||
416 | static size_t __logfs_write_header(struct logfs_super *super, | ||
417 | struct logfs_journal_header *jh, size_t len, size_t datalen, | ||
418 | u16 type, u8 compr) | ||
419 | { | ||
420 | jh->h_len = cpu_to_be16(len); | ||
421 | jh->h_type = cpu_to_be16(type); | ||
422 | jh->h_version = cpu_to_be16(++super->s_last_version); | ||
423 | jh->h_datalen = cpu_to_be16(datalen); | ||
424 | jh->h_compr = compr; | ||
425 | jh->h_pad[0] = 'H'; | ||
426 | jh->h_pad[1] = 'A'; | ||
427 | jh->h_pad[2] = 'T'; | ||
428 | jh->h_crc = logfs_crc32(jh, len + sizeof(*jh), 4); | ||
429 | return ALIGN(len, 16) + sizeof(*jh); | ||
430 | } | ||
431 | |||
432 | static size_t logfs_write_header(struct logfs_super *super, | ||
433 | struct logfs_journal_header *jh, size_t datalen, u16 type) | ||
434 | { | ||
435 | size_t len = datalen; | ||
436 | |||
437 | return __logfs_write_header(super, jh, len, datalen, type, COMPR_NONE); | ||
438 | } | ||
439 | |||
440 | static inline size_t logfs_journal_erasecount_size(struct logfs_super *super) | ||
441 | { | ||
442 | return LOGFS_JOURNAL_SEGS * sizeof(__be32); | ||
443 | } | ||
444 | |||
445 | static void *logfs_write_erasecount(struct super_block *sb, void *_ec, | ||
446 | u16 *type, size_t *len) | ||
447 | { | ||
448 | struct logfs_super *super = logfs_super(sb); | ||
449 | struct logfs_je_journal_ec *ec = _ec; | ||
450 | int i; | ||
451 | |||
452 | journal_for_each(i) | ||
453 | ec->ec[i] = cpu_to_be32(super->s_journal_ec[i]); | ||
454 | *type = JE_ERASECOUNT; | ||
455 | *len = logfs_journal_erasecount_size(super); | ||
456 | return ec; | ||
457 | } | ||
458 | |||
459 | static void account_shadow(void *_shadow, unsigned long _sb, u64 ignore, | ||
460 | size_t ignore2) | ||
461 | { | ||
462 | struct logfs_shadow *shadow = _shadow; | ||
463 | struct super_block *sb = (void *)_sb; | ||
464 | struct logfs_super *super = logfs_super(sb); | ||
465 | |||
466 | /* consume new space */ | ||
467 | super->s_free_bytes -= shadow->new_len; | ||
468 | super->s_used_bytes += shadow->new_len; | ||
469 | super->s_dirty_used_bytes -= shadow->new_len; | ||
470 | |||
471 | /* free up old space */ | ||
472 | super->s_free_bytes += shadow->old_len; | ||
473 | super->s_used_bytes -= shadow->old_len; | ||
474 | super->s_dirty_free_bytes -= shadow->old_len; | ||
475 | |||
476 | logfs_set_segment_used(sb, shadow->old_ofs, -shadow->old_len); | ||
477 | logfs_set_segment_used(sb, shadow->new_ofs, shadow->new_len); | ||
478 | |||
479 | log_journal("account_shadow(%llx, %llx, %x) %llx->%llx %x->%x\n", | ||
480 | shadow->ino, shadow->bix, shadow->gc_level, | ||
481 | shadow->old_ofs, shadow->new_ofs, | ||
482 | shadow->old_len, shadow->new_len); | ||
483 | mempool_free(shadow, super->s_shadow_pool); | ||
484 | } | ||
485 | |||
486 | static void account_shadows(struct super_block *sb) | ||
487 | { | ||
488 | struct logfs_super *super = logfs_super(sb); | ||
489 | struct inode *inode = super->s_master_inode; | ||
490 | struct logfs_inode *li = logfs_inode(inode); | ||
491 | struct shadow_tree *tree = &super->s_shadow_tree; | ||
492 | |||
493 | btree_grim_visitor64(&tree->new, (unsigned long)sb, account_shadow); | ||
494 | btree_grim_visitor64(&tree->old, (unsigned long)sb, account_shadow); | ||
495 | |||
496 | if (li->li_block) { | ||
497 | /* | ||
498 | * We never actually use the structure, when attached to the | ||
499 | * master inode. But it is easier to always free it here than | ||
500 | * to have checks in several places elsewhere when allocating | ||
501 | * it. | ||
502 | */ | ||
503 | li->li_block->ops->free_block(sb, li->li_block); | ||
504 | } | ||
505 | BUG_ON((s64)li->li_used_bytes < 0); | ||
506 | } | ||
507 | |||
508 | static void *__logfs_write_anchor(struct super_block *sb, void *_da, | ||
509 | u16 *type, size_t *len) | ||
510 | { | ||
511 | struct logfs_super *super = logfs_super(sb); | ||
512 | struct logfs_je_anchor *da = _da; | ||
513 | struct inode *inode = super->s_master_inode; | ||
514 | struct logfs_inode *li = logfs_inode(inode); | ||
515 | int i; | ||
516 | |||
517 | da->da_height = li->li_height; | ||
518 | da->da_last_ino = cpu_to_be64(super->s_last_ino); | ||
519 | da->da_size = cpu_to_be64(i_size_read(inode)); | ||
520 | da->da_used_bytes = cpu_to_be64(li->li_used_bytes); | ||
521 | for (i = 0; i < LOGFS_EMBEDDED_FIELDS; i++) | ||
522 | da->da_data[i] = cpu_to_be64(li->li_data[i]); | ||
523 | *type = JE_ANCHOR; | ||
524 | *len = sizeof(*da); | ||
525 | return da; | ||
526 | } | ||
527 | |||
528 | static void *logfs_write_dynsb(struct super_block *sb, void *_dynsb, | ||
529 | u16 *type, size_t *len) | ||
530 | { | ||
531 | struct logfs_super *super = logfs_super(sb); | ||
532 | struct logfs_je_dynsb *dynsb = _dynsb; | ||
533 | |||
534 | dynsb->ds_gec = cpu_to_be64(super->s_gec); | ||
535 | dynsb->ds_sweeper = cpu_to_be64(super->s_sweeper); | ||
536 | dynsb->ds_victim_ino = cpu_to_be64(super->s_victim_ino); | ||
537 | dynsb->ds_rename_dir = cpu_to_be64(super->s_rename_dir); | ||
538 | dynsb->ds_rename_pos = cpu_to_be64(super->s_rename_pos); | ||
539 | dynsb->ds_used_bytes = cpu_to_be64(super->s_used_bytes); | ||
540 | dynsb->ds_generation = cpu_to_be32(super->s_generation); | ||
541 | *type = JE_DYNSB; | ||
542 | *len = sizeof(*dynsb); | ||
543 | return dynsb; | ||
544 | } | ||
545 | |||
546 | static void write_wbuf(struct super_block *sb, struct logfs_area *area, | ||
547 | void *wbuf) | ||
548 | { | ||
549 | struct logfs_super *super = logfs_super(sb); | ||
550 | struct address_space *mapping = super->s_mapping_inode->i_mapping; | ||
551 | u64 ofs; | ||
552 | pgoff_t index; | ||
553 | int page_ofs; | ||
554 | struct page *page; | ||
555 | |||
556 | ofs = dev_ofs(sb, area->a_segno, | ||
557 | area->a_used_bytes & ~(super->s_writesize - 1)); | ||
558 | index = ofs >> PAGE_SHIFT; | ||
559 | page_ofs = ofs & (PAGE_SIZE - 1); | ||
560 | |||
561 | page = find_lock_page(mapping, index); | ||
562 | BUG_ON(!page); | ||
563 | memcpy(wbuf, page_address(page) + page_ofs, super->s_writesize); | ||
564 | unlock_page(page); | ||
565 | } | ||
566 | |||
567 | static void *logfs_write_area(struct super_block *sb, void *_a, | ||
568 | u16 *type, size_t *len) | ||
569 | { | ||
570 | struct logfs_super *super = logfs_super(sb); | ||
571 | struct logfs_area *area = super->s_area[super->s_sum_index]; | ||
572 | struct logfs_je_area *a = _a; | ||
573 | |||
574 | a->vim = VIM_DEFAULT; | ||
575 | a->gc_level = super->s_sum_index; | ||
576 | a->used_bytes = cpu_to_be32(area->a_used_bytes); | ||
577 | a->segno = cpu_to_be32(area->a_segno); | ||
578 | if (super->s_writesize > 1) | ||
579 | write_wbuf(sb, area, a + 1); | ||
580 | |||
581 | *type = JE_AREA; | ||
582 | *len = sizeof(*a) + super->s_writesize; | ||
583 | return a; | ||
584 | } | ||
585 | |||
586 | static void *logfs_write_commit(struct super_block *sb, void *h, | ||
587 | u16 *type, size_t *len) | ||
588 | { | ||
589 | struct logfs_super *super = logfs_super(sb); | ||
590 | |||
591 | *type = JE_COMMIT; | ||
592 | *len = super->s_no_je * sizeof(__be64); | ||
593 | return super->s_je_array; | ||
594 | } | ||
595 | |||
596 | static size_t __logfs_write_je(struct super_block *sb, void *buf, u16 type, | ||
597 | size_t len) | ||
598 | { | ||
599 | struct logfs_super *super = logfs_super(sb); | ||
600 | void *header = super->s_compressed_je; | ||
601 | void *data = header + sizeof(struct logfs_journal_header); | ||
602 | ssize_t compr_len, pad_len; | ||
603 | u8 compr = COMPR_ZLIB; | ||
604 | |||
605 | if (len == 0) | ||
606 | return logfs_write_header(super, header, 0, type); | ||
607 | |||
608 | compr_len = logfs_compress(buf, data, len, sb->s_blocksize); | ||
609 | if (compr_len < 0 || type == JE_ANCHOR) { | ||
610 | BUG_ON(len > sb->s_blocksize); | ||
611 | memcpy(data, buf, len); | ||
612 | compr_len = len; | ||
613 | compr = COMPR_NONE; | ||
614 | } | ||
615 | |||
616 | pad_len = ALIGN(compr_len, 16); | ||
617 | memset(data + compr_len, 0, pad_len - compr_len); | ||
618 | |||
619 | return __logfs_write_header(super, header, compr_len, len, type, compr); | ||
620 | } | ||
621 | |||
622 | static s64 logfs_get_free_bytes(struct logfs_area *area, size_t *bytes, | ||
623 | int must_pad) | ||
624 | { | ||
625 | u32 writesize = logfs_super(area->a_sb)->s_writesize; | ||
626 | s32 ofs; | ||
627 | int ret; | ||
628 | |||
629 | ret = logfs_open_area(area, *bytes); | ||
630 | if (ret) | ||
631 | return -EAGAIN; | ||
632 | |||
633 | ofs = area->a_used_bytes; | ||
634 | area->a_used_bytes += *bytes; | ||
635 | |||
636 | if (must_pad) { | ||
637 | area->a_used_bytes = ALIGN(area->a_used_bytes, writesize); | ||
638 | *bytes = area->a_used_bytes - ofs; | ||
639 | } | ||
640 | |||
641 | return dev_ofs(area->a_sb, area->a_segno, ofs); | ||
642 | } | ||
643 | |||
644 | static int logfs_write_je_buf(struct super_block *sb, void *buf, u16 type, | ||
645 | size_t buf_len) | ||
646 | { | ||
647 | struct logfs_super *super = logfs_super(sb); | ||
648 | struct logfs_area *area = super->s_journal_area; | ||
649 | struct logfs_journal_header *jh = super->s_compressed_je; | ||
650 | size_t len; | ||
651 | int must_pad = 0; | ||
652 | s64 ofs; | ||
653 | |||
654 | len = __logfs_write_je(sb, buf, type, buf_len); | ||
655 | if (jh->h_type == cpu_to_be16(JE_COMMIT)) | ||
656 | must_pad = 1; | ||
657 | |||
658 | ofs = logfs_get_free_bytes(area, &len, must_pad); | ||
659 | if (ofs < 0) | ||
660 | return ofs; | ||
661 | logfs_buf_write(area, ofs, super->s_compressed_je, len); | ||
662 | super->s_je_array[super->s_no_je++] = cpu_to_be64(ofs); | ||
663 | return 0; | ||
664 | } | ||
665 | |||
666 | static int logfs_write_je(struct super_block *sb, | ||
667 | void* (*write)(struct super_block *sb, void *scratch, | ||
668 | u16 *type, size_t *len)) | ||
669 | { | ||
670 | void *buf; | ||
671 | size_t len; | ||
672 | u16 type; | ||
673 | |||
674 | buf = write(sb, logfs_super(sb)->s_je, &type, &len); | ||
675 | return logfs_write_je_buf(sb, buf, type, len); | ||
676 | } | ||
677 | |||
678 | int write_alias_journal(struct super_block *sb, u64 ino, u64 bix, | ||
679 | level_t level, int child_no, __be64 val) | ||
680 | { | ||
681 | struct logfs_super *super = logfs_super(sb); | ||
682 | struct logfs_obj_alias *oa = super->s_je; | ||
683 | int err = 0, fill = super->s_je_fill; | ||
684 | |||
685 | log_aliases("logfs_write_obj_aliases #%x(%llx, %llx, %x, %x) %llx\n", | ||
686 | fill, ino, bix, level, child_no, be64_to_cpu(val)); | ||
687 | oa[fill].ino = cpu_to_be64(ino); | ||
688 | oa[fill].bix = cpu_to_be64(bix); | ||
689 | oa[fill].val = val; | ||
690 | oa[fill].level = (__force u8)level; | ||
691 | oa[fill].child_no = cpu_to_be16(child_no); | ||
692 | fill++; | ||
693 | if (fill >= sb->s_blocksize / sizeof(*oa)) { | ||
694 | err = logfs_write_je_buf(sb, oa, JE_OBJ_ALIAS, sb->s_blocksize); | ||
695 | fill = 0; | ||
696 | } | ||
697 | |||
698 | super->s_je_fill = fill; | ||
699 | return err; | ||
700 | } | ||
701 | |||
702 | static int logfs_write_obj_aliases(struct super_block *sb) | ||
703 | { | ||
704 | struct logfs_super *super = logfs_super(sb); | ||
705 | int err; | ||
706 | |||
707 | log_journal("logfs_write_obj_aliases: %d aliases to write\n", | ||
708 | super->s_no_object_aliases); | ||
709 | super->s_je_fill = 0; | ||
710 | err = logfs_write_obj_aliases_pagecache(sb); | ||
711 | if (err) | ||
712 | return err; | ||
713 | |||
714 | if (super->s_je_fill) | ||
715 | err = logfs_write_je_buf(sb, super->s_je, JE_OBJ_ALIAS, | ||
716 | super->s_je_fill | ||
717 | * sizeof(struct logfs_obj_alias)); | ||
718 | return err; | ||
719 | } | ||
720 | |||
721 | /* | ||
722 | * Write all journal entries. The goto logic ensures that all journal entries | ||
723 | * are written whenever a new segment is used. It is ugly and potentially a | ||
724 | * bit wasteful, but robustness is more important. With this we can *always* | ||
725 | * erase all journal segments except the one containing the most recent commit. | ||
726 | */ | ||
727 | void logfs_write_anchor(struct inode *inode) | ||
728 | { | ||
729 | struct super_block *sb = inode->i_sb; | ||
730 | struct logfs_super *super = logfs_super(sb); | ||
731 | struct logfs_area *area = super->s_journal_area; | ||
732 | int i, err; | ||
733 | |||
734 | BUG_ON(logfs_super(sb)->s_flags & LOGFS_SB_FLAG_SHUTDOWN); | ||
735 | mutex_lock(&super->s_journal_mutex); | ||
736 | |||
737 | /* Do this first or suffer corruption */ | ||
738 | logfs_sync_segments(sb); | ||
739 | account_shadows(sb); | ||
740 | |||
741 | again: | ||
742 | super->s_no_je = 0; | ||
743 | for_each_area(i) { | ||
744 | if (!super->s_area[i]->a_is_open) | ||
745 | continue; | ||
746 | super->s_sum_index = i; | ||
747 | err = logfs_write_je(sb, logfs_write_area); | ||
748 | if (err) | ||
749 | goto again; | ||
750 | } | ||
751 | err = logfs_write_obj_aliases(sb); | ||
752 | if (err) | ||
753 | goto again; | ||
754 | err = logfs_write_je(sb, logfs_write_erasecount); | ||
755 | if (err) | ||
756 | goto again; | ||
757 | err = logfs_write_je(sb, __logfs_write_anchor); | ||
758 | if (err) | ||
759 | goto again; | ||
760 | err = logfs_write_je(sb, logfs_write_dynsb); | ||
761 | if (err) | ||
762 | goto again; | ||
763 | /* | ||
764 | * Order is imperative. First we sync all writes, including the | ||
765 | * non-committed journal writes. Then we write the final commit and | ||
766 | * sync the current journal segment. | ||
767 | * There is a theoretical bug here. Syncing the journal segment will | ||
768 | * write a number of journal entries and the final commit. All these | ||
769 | * are written in a single operation. If the device layer writes the | ||
770 | * data back-to-front, the commit will precede the other journal | ||
771 | * entries, leaving a race window. | ||
772 | * Two fixes are possible. Preferred is to fix the device layer to | ||
773 | * ensure writes happen front-to-back. Alternatively we can insert | ||
774 | * another logfs_sync_area() super->s_devops->sync() combo before | ||
775 | * writing the commit. | ||
776 | */ | ||
777 | /* | ||
778 | * On another subject, super->s_devops->sync is usually not necessary. | ||
779 | * Unless called from sys_sync or friends, a barrier would suffice. | ||
780 | */ | ||
781 | super->s_devops->sync(sb); | ||
782 | err = logfs_write_je(sb, logfs_write_commit); | ||
783 | if (err) | ||
784 | goto again; | ||
785 | log_journal("Write commit to %llx\n", | ||
786 | be64_to_cpu(super->s_je_array[super->s_no_je - 1])); | ||
787 | logfs_sync_area(area); | ||
788 | BUG_ON(area->a_used_bytes != area->a_written_bytes); | ||
789 | super->s_devops->sync(sb); | ||
790 | |||
791 | mutex_unlock(&super->s_journal_mutex); | ||
792 | return; | ||
793 | } | ||
794 | |||
795 | void do_logfs_journal_wl_pass(struct super_block *sb) | ||
796 | { | ||
797 | struct logfs_super *super = logfs_super(sb); | ||
798 | struct logfs_area *area = super->s_journal_area; | ||
799 | u32 segno, ec; | ||
800 | int i, err; | ||
801 | |||
802 | log_journal("Journal requires wear-leveling.\n"); | ||
803 | /* Drop old segments */ | ||
804 | journal_for_each(i) | ||
805 | if (super->s_journal_seg[i]) { | ||
806 | logfs_set_segment_unreserved(sb, | ||
807 | super->s_journal_seg[i], | ||
808 | super->s_journal_ec[i]); | ||
809 | super->s_journal_seg[i] = 0; | ||
810 | super->s_journal_ec[i] = 0; | ||
811 | } | ||
812 | /* Get new segments */ | ||
813 | for (i = 0; i < super->s_no_journal_segs; i++) { | ||
814 | segno = get_best_cand(sb, &super->s_reserve_list, &ec); | ||
815 | super->s_journal_seg[i] = segno; | ||
816 | super->s_journal_ec[i] = ec; | ||
817 | logfs_set_segment_reserved(sb, segno); | ||
818 | } | ||
819 | /* Manually move journal_area */ | ||
820 | area->a_segno = super->s_journal_seg[0]; | ||
821 | area->a_is_open = 0; | ||
822 | area->a_used_bytes = 0; | ||
823 | /* Write journal */ | ||
824 | logfs_write_anchor(super->s_master_inode); | ||
825 | /* Write superblocks */ | ||
826 | err = logfs_write_sb(sb); | ||
827 | BUG_ON(err); | ||
828 | } | ||
829 | |||
830 | static const struct logfs_area_ops journal_area_ops = { | ||
831 | .get_free_segment = journal_get_free_segment, | ||
832 | .get_erase_count = journal_get_erase_count, | ||
833 | .erase_segment = journal_erase_segment, | ||
834 | }; | ||
835 | |||
836 | int logfs_init_journal(struct super_block *sb) | ||
837 | { | ||
838 | struct logfs_super *super = logfs_super(sb); | ||
839 | size_t bufsize = max_t(size_t, sb->s_blocksize, super->s_writesize) | ||
840 | + MAX_JOURNAL_HEADER; | ||
841 | int ret = -ENOMEM; | ||
842 | |||
843 | mutex_init(&super->s_journal_mutex); | ||
844 | btree_init_mempool32(&super->s_reserved_segments, super->s_btree_pool); | ||
845 | |||
846 | super->s_je = kzalloc(bufsize, GFP_KERNEL); | ||
847 | if (!super->s_je) | ||
848 | return ret; | ||
849 | |||
850 | super->s_compressed_je = kzalloc(bufsize, GFP_KERNEL); | ||
851 | if (!super->s_compressed_je) | ||
852 | return ret; | ||
853 | |||
854 | super->s_master_inode = logfs_new_meta_inode(sb, LOGFS_INO_MASTER); | ||
855 | if (IS_ERR(super->s_master_inode)) | ||
856 | return PTR_ERR(super->s_master_inode); | ||
857 | |||
858 | ret = logfs_read_journal(sb); | ||
859 | if (ret) | ||
860 | return -EIO; | ||
861 | |||
862 | reserve_sb_and_journal(sb); | ||
863 | logfs_calc_free(sb); | ||
864 | |||
865 | super->s_journal_area->a_ops = &journal_area_ops; | ||
866 | return 0; | ||
867 | } | ||
868 | |||
869 | void logfs_cleanup_journal(struct super_block *sb) | ||
870 | { | ||
871 | struct logfs_super *super = logfs_super(sb); | ||
872 | |||
873 | btree_grim_visitor32(&super->s_reserved_segments, 0, NULL); | ||
874 | destroy_meta_inode(super->s_master_inode); | ||
875 | super->s_master_inode = NULL; | ||
876 | |||
877 | kfree(super->s_compressed_je); | ||
878 | kfree(super->s_je); | ||
879 | } | ||