diff options
author | David Sterba <dsterba@suse.cz> | 2011-10-24 08:47:57 -0400 |
---|---|---|
committer | David Sterba <dsterba@suse.cz> | 2011-10-24 08:47:57 -0400 |
commit | afd582ac8f10382002a72b4d17d9c2db328ed8b8 (patch) | |
tree | 91246c1296c06cc0d5add8d10452e7fb110ed920 /fs/btrfs/free-space-cache.c | |
parent | c3b92c8787367a8bb53d57d9789b558f1295cc96 (diff) | |
parent | 016fc6a63e465d5b94e4028f6d05d9703e195428 (diff) |
Merge remote-tracking branch 'remotes/josef/for-chris' into btrfs-next-stable
Diffstat (limited to 'fs/btrfs/free-space-cache.c')
-rw-r--r-- | fs/btrfs/free-space-cache.c | 902 |
1 files changed, 545 insertions, 357 deletions
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index 41ac927401d0..de205d59b74b 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <linux/sched.h> | 20 | #include <linux/sched.h> |
21 | #include <linux/slab.h> | 21 | #include <linux/slab.h> |
22 | #include <linux/math64.h> | 22 | #include <linux/math64.h> |
23 | #include <linux/ratelimit.h> | ||
23 | #include "ctree.h" | 24 | #include "ctree.h" |
24 | #include "free-space-cache.h" | 25 | #include "free-space-cache.h" |
25 | #include "transaction.h" | 26 | #include "transaction.h" |
@@ -84,6 +85,7 @@ struct inode *lookup_free_space_inode(struct btrfs_root *root, | |||
84 | *block_group, struct btrfs_path *path) | 85 | *block_group, struct btrfs_path *path) |
85 | { | 86 | { |
86 | struct inode *inode = NULL; | 87 | struct inode *inode = NULL; |
88 | u32 flags = BTRFS_INODE_NODATASUM | BTRFS_INODE_NODATACOW; | ||
87 | 89 | ||
88 | spin_lock(&block_group->lock); | 90 | spin_lock(&block_group->lock); |
89 | if (block_group->inode) | 91 | if (block_group->inode) |
@@ -98,13 +100,14 @@ struct inode *lookup_free_space_inode(struct btrfs_root *root, | |||
98 | return inode; | 100 | return inode; |
99 | 101 | ||
100 | spin_lock(&block_group->lock); | 102 | spin_lock(&block_group->lock); |
101 | if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM) { | 103 | if (!((BTRFS_I(inode)->flags & flags) == flags)) { |
102 | printk(KERN_INFO "Old style space inode found, converting.\n"); | 104 | printk(KERN_INFO "Old style space inode found, converting.\n"); |
103 | BTRFS_I(inode)->flags &= ~BTRFS_INODE_NODATASUM; | 105 | BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM | |
106 | BTRFS_INODE_NODATACOW; | ||
104 | block_group->disk_cache_state = BTRFS_DC_CLEAR; | 107 | block_group->disk_cache_state = BTRFS_DC_CLEAR; |
105 | } | 108 | } |
106 | 109 | ||
107 | if (!btrfs_fs_closing(root->fs_info)) { | 110 | if (!block_group->iref) { |
108 | block_group->inode = igrab(inode); | 111 | block_group->inode = igrab(inode); |
109 | block_group->iref = 1; | 112 | block_group->iref = 1; |
110 | } | 113 | } |
@@ -122,12 +125,17 @@ int __create_free_space_inode(struct btrfs_root *root, | |||
122 | struct btrfs_free_space_header *header; | 125 | struct btrfs_free_space_header *header; |
123 | struct btrfs_inode_item *inode_item; | 126 | struct btrfs_inode_item *inode_item; |
124 | struct extent_buffer *leaf; | 127 | struct extent_buffer *leaf; |
128 | u64 flags = BTRFS_INODE_NOCOMPRESS | BTRFS_INODE_PREALLOC; | ||
125 | int ret; | 129 | int ret; |
126 | 130 | ||
127 | ret = btrfs_insert_empty_inode(trans, root, path, ino); | 131 | ret = btrfs_insert_empty_inode(trans, root, path, ino); |
128 | if (ret) | 132 | if (ret) |
129 | return ret; | 133 | return ret; |
130 | 134 | ||
135 | /* We inline crc's for the free disk space cache */ | ||
136 | if (ino != BTRFS_FREE_INO_OBJECTID) | ||
137 | flags |= BTRFS_INODE_NODATASUM | BTRFS_INODE_NODATACOW; | ||
138 | |||
131 | leaf = path->nodes[0]; | 139 | leaf = path->nodes[0]; |
132 | inode_item = btrfs_item_ptr(leaf, path->slots[0], | 140 | inode_item = btrfs_item_ptr(leaf, path->slots[0], |
133 | struct btrfs_inode_item); | 141 | struct btrfs_inode_item); |
@@ -140,8 +148,7 @@ int __create_free_space_inode(struct btrfs_root *root, | |||
140 | btrfs_set_inode_uid(leaf, inode_item, 0); | 148 | btrfs_set_inode_uid(leaf, inode_item, 0); |
141 | btrfs_set_inode_gid(leaf, inode_item, 0); | 149 | btrfs_set_inode_gid(leaf, inode_item, 0); |
142 | btrfs_set_inode_mode(leaf, inode_item, S_IFREG | 0600); | 150 | btrfs_set_inode_mode(leaf, inode_item, S_IFREG | 0600); |
143 | btrfs_set_inode_flags(leaf, inode_item, BTRFS_INODE_NOCOMPRESS | | 151 | btrfs_set_inode_flags(leaf, inode_item, flags); |
144 | BTRFS_INODE_PREALLOC); | ||
145 | btrfs_set_inode_nlink(leaf, inode_item, 1); | 152 | btrfs_set_inode_nlink(leaf, inode_item, 1); |
146 | btrfs_set_inode_transid(leaf, inode_item, trans->transid); | 153 | btrfs_set_inode_transid(leaf, inode_item, trans->transid); |
147 | btrfs_set_inode_block_group(leaf, inode_item, offset); | 154 | btrfs_set_inode_block_group(leaf, inode_item, offset); |
@@ -196,9 +203,7 @@ int btrfs_truncate_free_space_cache(struct btrfs_root *root, | |||
196 | 203 | ||
197 | rsv = trans->block_rsv; | 204 | rsv = trans->block_rsv; |
198 | trans->block_rsv = root->orphan_block_rsv; | 205 | trans->block_rsv = root->orphan_block_rsv; |
199 | ret = btrfs_block_rsv_check(trans, root, | 206 | ret = btrfs_block_rsv_check(root, root->orphan_block_rsv, 5); |
200 | root->orphan_block_rsv, | ||
201 | 0, 5); | ||
202 | if (ret) | 207 | if (ret) |
203 | return ret; | 208 | return ret; |
204 | 209 | ||
@@ -242,26 +247,342 @@ static int readahead_cache(struct inode *inode) | |||
242 | return 0; | 247 | return 0; |
243 | } | 248 | } |
244 | 249 | ||
250 | struct io_ctl { | ||
251 | void *cur, *orig; | ||
252 | struct page *page; | ||
253 | struct page **pages; | ||
254 | struct btrfs_root *root; | ||
255 | unsigned long size; | ||
256 | int index; | ||
257 | int num_pages; | ||
258 | unsigned check_crcs:1; | ||
259 | }; | ||
260 | |||
261 | static int io_ctl_init(struct io_ctl *io_ctl, struct inode *inode, | ||
262 | struct btrfs_root *root) | ||
263 | { | ||
264 | memset(io_ctl, 0, sizeof(struct io_ctl)); | ||
265 | io_ctl->num_pages = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> | ||
266 | PAGE_CACHE_SHIFT; | ||
267 | io_ctl->pages = kzalloc(sizeof(struct page *) * io_ctl->num_pages, | ||
268 | GFP_NOFS); | ||
269 | if (!io_ctl->pages) | ||
270 | return -ENOMEM; | ||
271 | io_ctl->root = root; | ||
272 | if (btrfs_ino(inode) != BTRFS_FREE_INO_OBJECTID) | ||
273 | io_ctl->check_crcs = 1; | ||
274 | return 0; | ||
275 | } | ||
276 | |||
277 | static void io_ctl_free(struct io_ctl *io_ctl) | ||
278 | { | ||
279 | kfree(io_ctl->pages); | ||
280 | } | ||
281 | |||
282 | static void io_ctl_unmap_page(struct io_ctl *io_ctl) | ||
283 | { | ||
284 | if (io_ctl->cur) { | ||
285 | kunmap(io_ctl->page); | ||
286 | io_ctl->cur = NULL; | ||
287 | io_ctl->orig = NULL; | ||
288 | } | ||
289 | } | ||
290 | |||
291 | static void io_ctl_map_page(struct io_ctl *io_ctl, int clear) | ||
292 | { | ||
293 | WARN_ON(io_ctl->cur); | ||
294 | BUG_ON(io_ctl->index >= io_ctl->num_pages); | ||
295 | io_ctl->page = io_ctl->pages[io_ctl->index++]; | ||
296 | io_ctl->cur = kmap(io_ctl->page); | ||
297 | io_ctl->orig = io_ctl->cur; | ||
298 | io_ctl->size = PAGE_CACHE_SIZE; | ||
299 | if (clear) | ||
300 | memset(io_ctl->cur, 0, PAGE_CACHE_SIZE); | ||
301 | } | ||
302 | |||
303 | static void io_ctl_drop_pages(struct io_ctl *io_ctl) | ||
304 | { | ||
305 | int i; | ||
306 | |||
307 | io_ctl_unmap_page(io_ctl); | ||
308 | |||
309 | for (i = 0; i < io_ctl->num_pages; i++) { | ||
310 | ClearPageChecked(io_ctl->pages[i]); | ||
311 | unlock_page(io_ctl->pages[i]); | ||
312 | page_cache_release(io_ctl->pages[i]); | ||
313 | } | ||
314 | } | ||
315 | |||
316 | static int io_ctl_prepare_pages(struct io_ctl *io_ctl, struct inode *inode, | ||
317 | int uptodate) | ||
318 | { | ||
319 | struct page *page; | ||
320 | gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping); | ||
321 | int i; | ||
322 | |||
323 | for (i = 0; i < io_ctl->num_pages; i++) { | ||
324 | page = find_or_create_page(inode->i_mapping, i, mask); | ||
325 | if (!page) { | ||
326 | io_ctl_drop_pages(io_ctl); | ||
327 | return -ENOMEM; | ||
328 | } | ||
329 | io_ctl->pages[i] = page; | ||
330 | if (uptodate && !PageUptodate(page)) { | ||
331 | btrfs_readpage(NULL, page); | ||
332 | lock_page(page); | ||
333 | if (!PageUptodate(page)) { | ||
334 | printk(KERN_ERR "btrfs: error reading free " | ||
335 | "space cache\n"); | ||
336 | io_ctl_drop_pages(io_ctl); | ||
337 | return -EIO; | ||
338 | } | ||
339 | } | ||
340 | } | ||
341 | |||
342 | return 0; | ||
343 | } | ||
344 | |||
345 | static void io_ctl_set_generation(struct io_ctl *io_ctl, u64 generation) | ||
346 | { | ||
347 | u64 *val; | ||
348 | |||
349 | io_ctl_map_page(io_ctl, 1); | ||
350 | |||
351 | /* | ||
352 | * Skip the csum areas. If we don't check crcs then we just have a | ||
353 | * 64bit chunk at the front of the first page. | ||
354 | */ | ||
355 | if (io_ctl->check_crcs) { | ||
356 | io_ctl->cur += (sizeof(u32) * io_ctl->num_pages); | ||
357 | io_ctl->size -= sizeof(u64) + (sizeof(u32) * io_ctl->num_pages); | ||
358 | } else { | ||
359 | io_ctl->cur += sizeof(u64); | ||
360 | io_ctl->size -= sizeof(u64) * 2; | ||
361 | } | ||
362 | |||
363 | val = io_ctl->cur; | ||
364 | *val = cpu_to_le64(generation); | ||
365 | io_ctl->cur += sizeof(u64); | ||
366 | } | ||
367 | |||
368 | static int io_ctl_check_generation(struct io_ctl *io_ctl, u64 generation) | ||
369 | { | ||
370 | u64 *gen; | ||
371 | |||
372 | /* | ||
373 | * Skip the crc area. If we don't check crcs then we just have a 64bit | ||
374 | * chunk at the front of the first page. | ||
375 | */ | ||
376 | if (io_ctl->check_crcs) { | ||
377 | io_ctl->cur += sizeof(u32) * io_ctl->num_pages; | ||
378 | io_ctl->size -= sizeof(u64) + | ||
379 | (sizeof(u32) * io_ctl->num_pages); | ||
380 | } else { | ||
381 | io_ctl->cur += sizeof(u64); | ||
382 | io_ctl->size -= sizeof(u64) * 2; | ||
383 | } | ||
384 | |||
385 | gen = io_ctl->cur; | ||
386 | if (le64_to_cpu(*gen) != generation) { | ||
387 | printk_ratelimited(KERN_ERR "btrfs: space cache generation " | ||
388 | "(%Lu) does not match inode (%Lu)\n", *gen, | ||
389 | generation); | ||
390 | io_ctl_unmap_page(io_ctl); | ||
391 | return -EIO; | ||
392 | } | ||
393 | io_ctl->cur += sizeof(u64); | ||
394 | return 0; | ||
395 | } | ||
396 | |||
397 | static void io_ctl_set_crc(struct io_ctl *io_ctl, int index) | ||
398 | { | ||
399 | u32 *tmp; | ||
400 | u32 crc = ~(u32)0; | ||
401 | unsigned offset = 0; | ||
402 | |||
403 | if (!io_ctl->check_crcs) { | ||
404 | io_ctl_unmap_page(io_ctl); | ||
405 | return; | ||
406 | } | ||
407 | |||
408 | if (index == 0) | ||
409 | offset = sizeof(u32) * io_ctl->num_pages;; | ||
410 | |||
411 | crc = btrfs_csum_data(io_ctl->root, io_ctl->orig + offset, crc, | ||
412 | PAGE_CACHE_SIZE - offset); | ||
413 | btrfs_csum_final(crc, (char *)&crc); | ||
414 | io_ctl_unmap_page(io_ctl); | ||
415 | tmp = kmap(io_ctl->pages[0]); | ||
416 | tmp += index; | ||
417 | *tmp = crc; | ||
418 | kunmap(io_ctl->pages[0]); | ||
419 | } | ||
420 | |||
421 | static int io_ctl_check_crc(struct io_ctl *io_ctl, int index) | ||
422 | { | ||
423 | u32 *tmp, val; | ||
424 | u32 crc = ~(u32)0; | ||
425 | unsigned offset = 0; | ||
426 | |||
427 | if (!io_ctl->check_crcs) { | ||
428 | io_ctl_map_page(io_ctl, 0); | ||
429 | return 0; | ||
430 | } | ||
431 | |||
432 | if (index == 0) | ||
433 | offset = sizeof(u32) * io_ctl->num_pages; | ||
434 | |||
435 | tmp = kmap(io_ctl->pages[0]); | ||
436 | tmp += index; | ||
437 | val = *tmp; | ||
438 | kunmap(io_ctl->pages[0]); | ||
439 | |||
440 | io_ctl_map_page(io_ctl, 0); | ||
441 | crc = btrfs_csum_data(io_ctl->root, io_ctl->orig + offset, crc, | ||
442 | PAGE_CACHE_SIZE - offset); | ||
443 | btrfs_csum_final(crc, (char *)&crc); | ||
444 | if (val != crc) { | ||
445 | printk_ratelimited(KERN_ERR "btrfs: csum mismatch on free " | ||
446 | "space cache\n"); | ||
447 | io_ctl_unmap_page(io_ctl); | ||
448 | return -EIO; | ||
449 | } | ||
450 | |||
451 | return 0; | ||
452 | } | ||
453 | |||
454 | static int io_ctl_add_entry(struct io_ctl *io_ctl, u64 offset, u64 bytes, | ||
455 | void *bitmap) | ||
456 | { | ||
457 | struct btrfs_free_space_entry *entry; | ||
458 | |||
459 | if (!io_ctl->cur) | ||
460 | return -ENOSPC; | ||
461 | |||
462 | entry = io_ctl->cur; | ||
463 | entry->offset = cpu_to_le64(offset); | ||
464 | entry->bytes = cpu_to_le64(bytes); | ||
465 | entry->type = (bitmap) ? BTRFS_FREE_SPACE_BITMAP : | ||
466 | BTRFS_FREE_SPACE_EXTENT; | ||
467 | io_ctl->cur += sizeof(struct btrfs_free_space_entry); | ||
468 | io_ctl->size -= sizeof(struct btrfs_free_space_entry); | ||
469 | |||
470 | if (io_ctl->size >= sizeof(struct btrfs_free_space_entry)) | ||
471 | return 0; | ||
472 | |||
473 | io_ctl_set_crc(io_ctl, io_ctl->index - 1); | ||
474 | |||
475 | /* No more pages to map */ | ||
476 | if (io_ctl->index >= io_ctl->num_pages) | ||
477 | return 0; | ||
478 | |||
479 | /* map the next page */ | ||
480 | io_ctl_map_page(io_ctl, 1); | ||
481 | return 0; | ||
482 | } | ||
483 | |||
484 | static int io_ctl_add_bitmap(struct io_ctl *io_ctl, void *bitmap) | ||
485 | { | ||
486 | if (!io_ctl->cur) | ||
487 | return -ENOSPC; | ||
488 | |||
489 | /* | ||
490 | * If we aren't at the start of the current page, unmap this one and | ||
491 | * map the next one if there is any left. | ||
492 | */ | ||
493 | if (io_ctl->cur != io_ctl->orig) { | ||
494 | io_ctl_set_crc(io_ctl, io_ctl->index - 1); | ||
495 | if (io_ctl->index >= io_ctl->num_pages) | ||
496 | return -ENOSPC; | ||
497 | io_ctl_map_page(io_ctl, 0); | ||
498 | } | ||
499 | |||
500 | memcpy(io_ctl->cur, bitmap, PAGE_CACHE_SIZE); | ||
501 | io_ctl_set_crc(io_ctl, io_ctl->index - 1); | ||
502 | if (io_ctl->index < io_ctl->num_pages) | ||
503 | io_ctl_map_page(io_ctl, 0); | ||
504 | return 0; | ||
505 | } | ||
506 | |||
507 | static void io_ctl_zero_remaining_pages(struct io_ctl *io_ctl) | ||
508 | { | ||
509 | /* | ||
510 | * If we're not on the boundary we know we've modified the page and we | ||
511 | * need to crc the page. | ||
512 | */ | ||
513 | if (io_ctl->cur != io_ctl->orig) | ||
514 | io_ctl_set_crc(io_ctl, io_ctl->index - 1); | ||
515 | else | ||
516 | io_ctl_unmap_page(io_ctl); | ||
517 | |||
518 | while (io_ctl->index < io_ctl->num_pages) { | ||
519 | io_ctl_map_page(io_ctl, 1); | ||
520 | io_ctl_set_crc(io_ctl, io_ctl->index - 1); | ||
521 | } | ||
522 | } | ||
523 | |||
524 | static int io_ctl_read_entry(struct io_ctl *io_ctl, | ||
525 | struct btrfs_free_space *entry, u8 *type) | ||
526 | { | ||
527 | struct btrfs_free_space_entry *e; | ||
528 | |||
529 | e = io_ctl->cur; | ||
530 | entry->offset = le64_to_cpu(e->offset); | ||
531 | entry->bytes = le64_to_cpu(e->bytes); | ||
532 | *type = e->type; | ||
533 | io_ctl->cur += sizeof(struct btrfs_free_space_entry); | ||
534 | io_ctl->size -= sizeof(struct btrfs_free_space_entry); | ||
535 | |||
536 | if (io_ctl->size >= sizeof(struct btrfs_free_space_entry)) | ||
537 | return 0; | ||
538 | |||
539 | io_ctl_unmap_page(io_ctl); | ||
540 | |||
541 | if (io_ctl->index >= io_ctl->num_pages) | ||
542 | return 0; | ||
543 | |||
544 | return io_ctl_check_crc(io_ctl, io_ctl->index); | ||
545 | } | ||
546 | |||
547 | static int io_ctl_read_bitmap(struct io_ctl *io_ctl, | ||
548 | struct btrfs_free_space *entry) | ||
549 | { | ||
550 | int ret; | ||
551 | |||
552 | if (io_ctl->cur && io_ctl->cur != io_ctl->orig) | ||
553 | io_ctl_unmap_page(io_ctl); | ||
554 | |||
555 | ret = io_ctl_check_crc(io_ctl, io_ctl->index); | ||
556 | if (ret) | ||
557 | return ret; | ||
558 | |||
559 | memcpy(entry->bitmap, io_ctl->cur, PAGE_CACHE_SIZE); | ||
560 | io_ctl_unmap_page(io_ctl); | ||
561 | |||
562 | return 0; | ||
563 | } | ||
564 | |||
245 | int __load_free_space_cache(struct btrfs_root *root, struct inode *inode, | 565 | int __load_free_space_cache(struct btrfs_root *root, struct inode *inode, |
246 | struct btrfs_free_space_ctl *ctl, | 566 | struct btrfs_free_space_ctl *ctl, |
247 | struct btrfs_path *path, u64 offset) | 567 | struct btrfs_path *path, u64 offset) |
248 | { | 568 | { |
249 | struct btrfs_free_space_header *header; | 569 | struct btrfs_free_space_header *header; |
250 | struct extent_buffer *leaf; | 570 | struct extent_buffer *leaf; |
251 | struct page *page; | 571 | struct io_ctl io_ctl; |
252 | struct btrfs_key key; | 572 | struct btrfs_key key; |
573 | struct btrfs_free_space *e, *n; | ||
253 | struct list_head bitmaps; | 574 | struct list_head bitmaps; |
254 | u64 num_entries; | 575 | u64 num_entries; |
255 | u64 num_bitmaps; | 576 | u64 num_bitmaps; |
256 | u64 generation; | 577 | u64 generation; |
257 | pgoff_t index = 0; | 578 | u8 type; |
258 | int ret = 0; | 579 | int ret = 0; |
259 | 580 | ||
260 | INIT_LIST_HEAD(&bitmaps); | 581 | INIT_LIST_HEAD(&bitmaps); |
261 | 582 | ||
262 | /* Nothing in the space cache, goodbye */ | 583 | /* Nothing in the space cache, goodbye */ |
263 | if (!i_size_read(inode)) | 584 | if (!i_size_read(inode)) |
264 | goto out; | 585 | return 0; |
265 | 586 | ||
266 | key.objectid = BTRFS_FREE_SPACE_OBJECTID; | 587 | key.objectid = BTRFS_FREE_SPACE_OBJECTID; |
267 | key.offset = offset; | 588 | key.offset = offset; |
@@ -269,11 +590,10 @@ int __load_free_space_cache(struct btrfs_root *root, struct inode *inode, | |||
269 | 590 | ||
270 | ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); | 591 | ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); |
271 | if (ret < 0) | 592 | if (ret < 0) |
272 | goto out; | 593 | return 0; |
273 | else if (ret > 0) { | 594 | else if (ret > 0) { |
274 | btrfs_release_path(path); | 595 | btrfs_release_path(path); |
275 | ret = 0; | 596 | return 0; |
276 | goto out; | ||
277 | } | 597 | } |
278 | 598 | ||
279 | ret = -1; | 599 | ret = -1; |
@@ -291,169 +611,100 @@ int __load_free_space_cache(struct btrfs_root *root, struct inode *inode, | |||
291 | " not match free space cache generation (%llu)\n", | 611 | " not match free space cache generation (%llu)\n", |
292 | (unsigned long long)BTRFS_I(inode)->generation, | 612 | (unsigned long long)BTRFS_I(inode)->generation, |
293 | (unsigned long long)generation); | 613 | (unsigned long long)generation); |
294 | goto out; | 614 | return 0; |
295 | } | 615 | } |
296 | 616 | ||
297 | if (!num_entries) | 617 | if (!num_entries) |
298 | goto out; | 618 | return 0; |
299 | 619 | ||
620 | io_ctl_init(&io_ctl, inode, root); | ||
300 | ret = readahead_cache(inode); | 621 | ret = readahead_cache(inode); |
301 | if (ret) | 622 | if (ret) |
302 | goto out; | 623 | goto out; |
303 | 624 | ||
304 | while (1) { | 625 | ret = io_ctl_prepare_pages(&io_ctl, inode, 1); |
305 | struct btrfs_free_space_entry *entry; | 626 | if (ret) |
306 | struct btrfs_free_space *e; | 627 | goto out; |
307 | void *addr; | ||
308 | unsigned long offset = 0; | ||
309 | int need_loop = 0; | ||
310 | 628 | ||
311 | if (!num_entries && !num_bitmaps) | 629 | ret = io_ctl_check_crc(&io_ctl, 0); |
312 | break; | 630 | if (ret) |
631 | goto free_cache; | ||
313 | 632 | ||
314 | page = find_or_create_page(inode->i_mapping, index, GFP_NOFS); | 633 | ret = io_ctl_check_generation(&io_ctl, generation); |
315 | if (!page) | 634 | if (ret) |
635 | goto free_cache; | ||
636 | |||
637 | while (num_entries) { | ||
638 | e = kmem_cache_zalloc(btrfs_free_space_cachep, | ||
639 | GFP_NOFS); | ||
640 | if (!e) | ||
316 | goto free_cache; | 641 | goto free_cache; |
317 | 642 | ||
318 | if (!PageUptodate(page)) { | 643 | ret = io_ctl_read_entry(&io_ctl, e, &type); |
319 | btrfs_readpage(NULL, page); | 644 | if (ret) { |
320 | lock_page(page); | 645 | kmem_cache_free(btrfs_free_space_cachep, e); |
321 | if (!PageUptodate(page)) { | 646 | goto free_cache; |
322 | unlock_page(page); | ||
323 | page_cache_release(page); | ||
324 | printk(KERN_ERR "btrfs: error reading free " | ||
325 | "space cache\n"); | ||
326 | goto free_cache; | ||
327 | } | ||
328 | } | 647 | } |
329 | addr = kmap(page); | ||
330 | 648 | ||
331 | if (index == 0) { | 649 | if (!e->bytes) { |
332 | u64 *gen; | 650 | kmem_cache_free(btrfs_free_space_cachep, e); |
651 | goto free_cache; | ||
652 | } | ||
333 | 653 | ||
334 | /* | 654 | if (type == BTRFS_FREE_SPACE_EXTENT) { |
335 | * We put a bogus crc in the front of the first page in | 655 | spin_lock(&ctl->tree_lock); |
336 | * case old kernels try to mount a fs with the new | 656 | ret = link_free_space(ctl, e); |
337 | * format to make sure they discard the cache. | 657 | spin_unlock(&ctl->tree_lock); |
338 | */ | 658 | if (ret) { |
339 | addr += sizeof(u64); | 659 | printk(KERN_ERR "Duplicate entries in " |
340 | offset += sizeof(u64); | 660 | "free space cache, dumping\n"); |
341 | 661 | kmem_cache_free(btrfs_free_space_cachep, e); | |
342 | gen = addr; | ||
343 | if (*gen != BTRFS_I(inode)->generation) { | ||
344 | printk(KERN_ERR "btrfs: space cache generation" | ||
345 | " (%llu) does not match inode (%llu)\n", | ||
346 | (unsigned long long)*gen, | ||
347 | (unsigned long long) | ||
348 | BTRFS_I(inode)->generation); | ||
349 | kunmap(page); | ||
350 | unlock_page(page); | ||
351 | page_cache_release(page); | ||
352 | goto free_cache; | 662 | goto free_cache; |
353 | } | 663 | } |
354 | addr += sizeof(u64); | 664 | } else { |
355 | offset += sizeof(u64); | 665 | BUG_ON(!num_bitmaps); |
356 | } | 666 | num_bitmaps--; |
357 | entry = addr; | 667 | e->bitmap = kzalloc(PAGE_CACHE_SIZE, GFP_NOFS); |
358 | 668 | if (!e->bitmap) { | |
359 | while (1) { | 669 | kmem_cache_free( |
360 | if (!num_entries) | 670 | btrfs_free_space_cachep, e); |
361 | break; | ||
362 | |||
363 | need_loop = 1; | ||
364 | e = kmem_cache_zalloc(btrfs_free_space_cachep, | ||
365 | GFP_NOFS); | ||
366 | if (!e) { | ||
367 | kunmap(page); | ||
368 | unlock_page(page); | ||
369 | page_cache_release(page); | ||
370 | goto free_cache; | 671 | goto free_cache; |
371 | } | 672 | } |
372 | 673 | spin_lock(&ctl->tree_lock); | |
373 | e->offset = le64_to_cpu(entry->offset); | 674 | ret = link_free_space(ctl, e); |
374 | e->bytes = le64_to_cpu(entry->bytes); | 675 | ctl->total_bitmaps++; |
375 | if (!e->bytes) { | 676 | ctl->op->recalc_thresholds(ctl); |
376 | kunmap(page); | 677 | spin_unlock(&ctl->tree_lock); |
678 | if (ret) { | ||
679 | printk(KERN_ERR "Duplicate entries in " | ||
680 | "free space cache, dumping\n"); | ||
377 | kmem_cache_free(btrfs_free_space_cachep, e); | 681 | kmem_cache_free(btrfs_free_space_cachep, e); |
378 | unlock_page(page); | ||
379 | page_cache_release(page); | ||
380 | goto free_cache; | 682 | goto free_cache; |
381 | } | 683 | } |
382 | 684 | list_add_tail(&e->list, &bitmaps); | |
383 | if (entry->type == BTRFS_FREE_SPACE_EXTENT) { | ||
384 | spin_lock(&ctl->tree_lock); | ||
385 | ret = link_free_space(ctl, e); | ||
386 | spin_unlock(&ctl->tree_lock); | ||
387 | if (ret) { | ||
388 | printk(KERN_ERR "Duplicate entries in " | ||
389 | "free space cache, dumping\n"); | ||
390 | kunmap(page); | ||
391 | unlock_page(page); | ||
392 | page_cache_release(page); | ||
393 | goto free_cache; | ||
394 | } | ||
395 | } else { | ||
396 | e->bitmap = kzalloc(PAGE_CACHE_SIZE, GFP_NOFS); | ||
397 | if (!e->bitmap) { | ||
398 | kunmap(page); | ||
399 | kmem_cache_free( | ||
400 | btrfs_free_space_cachep, e); | ||
401 | unlock_page(page); | ||
402 | page_cache_release(page); | ||
403 | goto free_cache; | ||
404 | } | ||
405 | spin_lock(&ctl->tree_lock); | ||
406 | ret = link_free_space(ctl, e); | ||
407 | ctl->total_bitmaps++; | ||
408 | ctl->op->recalc_thresholds(ctl); | ||
409 | spin_unlock(&ctl->tree_lock); | ||
410 | if (ret) { | ||
411 | printk(KERN_ERR "Duplicate entries in " | ||
412 | "free space cache, dumping\n"); | ||
413 | kunmap(page); | ||
414 | unlock_page(page); | ||
415 | page_cache_release(page); | ||
416 | goto free_cache; | ||
417 | } | ||
418 | list_add_tail(&e->list, &bitmaps); | ||
419 | } | ||
420 | |||
421 | num_entries--; | ||
422 | offset += sizeof(struct btrfs_free_space_entry); | ||
423 | if (offset + sizeof(struct btrfs_free_space_entry) >= | ||
424 | PAGE_CACHE_SIZE) | ||
425 | break; | ||
426 | entry++; | ||
427 | } | 685 | } |
428 | 686 | ||
429 | /* | 687 | num_entries--; |
430 | * We read an entry out of this page, we need to move on to the | 688 | } |
431 | * next page. | ||
432 | */ | ||
433 | if (need_loop) { | ||
434 | kunmap(page); | ||
435 | goto next; | ||
436 | } | ||
437 | 689 | ||
438 | /* | 690 | /* |
439 | * We add the bitmaps at the end of the entries in order that | 691 | * We add the bitmaps at the end of the entries in order that |
440 | * the bitmap entries are added to the cache. | 692 | * the bitmap entries are added to the cache. |
441 | */ | 693 | */ |
442 | e = list_entry(bitmaps.next, struct btrfs_free_space, list); | 694 | list_for_each_entry_safe(e, n, &bitmaps, list) { |
443 | list_del_init(&e->list); | 695 | list_del_init(&e->list); |
444 | memcpy(e->bitmap, addr, PAGE_CACHE_SIZE); | 696 | ret = io_ctl_read_bitmap(&io_ctl, e); |
445 | kunmap(page); | 697 | if (ret) |
446 | num_bitmaps--; | 698 | goto free_cache; |
447 | next: | ||
448 | unlock_page(page); | ||
449 | page_cache_release(page); | ||
450 | index++; | ||
451 | } | 699 | } |
452 | 700 | ||
701 | io_ctl_drop_pages(&io_ctl); | ||
453 | ret = 1; | 702 | ret = 1; |
454 | out: | 703 | out: |
704 | io_ctl_free(&io_ctl); | ||
455 | return ret; | 705 | return ret; |
456 | free_cache: | 706 | free_cache: |
707 | io_ctl_drop_pages(&io_ctl); | ||
457 | __btrfs_remove_free_space_cache(ctl); | 708 | __btrfs_remove_free_space_cache(ctl); |
458 | goto out; | 709 | goto out; |
459 | } | 710 | } |
@@ -465,7 +716,7 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info, | |||
465 | struct btrfs_root *root = fs_info->tree_root; | 716 | struct btrfs_root *root = fs_info->tree_root; |
466 | struct inode *inode; | 717 | struct inode *inode; |
467 | struct btrfs_path *path; | 718 | struct btrfs_path *path; |
468 | int ret; | 719 | int ret = 0; |
469 | bool matched; | 720 | bool matched; |
470 | u64 used = btrfs_block_group_used(&block_group->item); | 721 | u64 used = btrfs_block_group_used(&block_group->item); |
471 | 722 | ||
@@ -497,6 +748,14 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info, | |||
497 | return 0; | 748 | return 0; |
498 | } | 749 | } |
499 | 750 | ||
751 | /* We may have converted the inode and made the cache invalid. */ | ||
752 | spin_lock(&block_group->lock); | ||
753 | if (block_group->disk_cache_state != BTRFS_DC_WRITTEN) { | ||
754 | spin_unlock(&block_group->lock); | ||
755 | goto out; | ||
756 | } | ||
757 | spin_unlock(&block_group->lock); | ||
758 | |||
500 | ret = __load_free_space_cache(fs_info->tree_root, inode, ctl, | 759 | ret = __load_free_space_cache(fs_info->tree_root, inode, ctl, |
501 | path, block_group->key.objectid); | 760 | path, block_group->key.objectid); |
502 | btrfs_free_path(path); | 761 | btrfs_free_path(path); |
@@ -530,6 +789,19 @@ out: | |||
530 | return ret; | 789 | return ret; |
531 | } | 790 | } |
532 | 791 | ||
792 | /** | ||
793 | * __btrfs_write_out_cache - write out cached info to an inode | ||
794 | * @root - the root the inode belongs to | ||
795 | * @ctl - the free space cache we are going to write out | ||
796 | * @block_group - the block_group for this cache if it belongs to a block_group | ||
797 | * @trans - the trans handle | ||
798 | * @path - the path to use | ||
799 | * @offset - the offset for the key we'll insert | ||
800 | * | ||
801 | * This function writes out a free space cache struct to disk for quick recovery | ||
802 | * on mount. This will return 0 if it was successfull in writing the cache out, | ||
803 | * and -1 if it was not. | ||
804 | */ | ||
533 | int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode, | 805 | int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode, |
534 | struct btrfs_free_space_ctl *ctl, | 806 | struct btrfs_free_space_ctl *ctl, |
535 | struct btrfs_block_group_cache *block_group, | 807 | struct btrfs_block_group_cache *block_group, |
@@ -540,42 +812,24 @@ int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode, | |||
540 | struct extent_buffer *leaf; | 812 | struct extent_buffer *leaf; |
541 | struct rb_node *node; | 813 | struct rb_node *node; |
542 | struct list_head *pos, *n; | 814 | struct list_head *pos, *n; |
543 | struct page **pages; | ||
544 | struct page *page; | ||
545 | struct extent_state *cached_state = NULL; | 815 | struct extent_state *cached_state = NULL; |
546 | struct btrfs_free_cluster *cluster = NULL; | 816 | struct btrfs_free_cluster *cluster = NULL; |
547 | struct extent_io_tree *unpin = NULL; | 817 | struct extent_io_tree *unpin = NULL; |
818 | struct io_ctl io_ctl; | ||
548 | struct list_head bitmap_list; | 819 | struct list_head bitmap_list; |
549 | struct btrfs_key key; | 820 | struct btrfs_key key; |
550 | u64 start, end, len; | 821 | u64 start, end, len; |
551 | u64 bytes = 0; | ||
552 | u32 crc = ~(u32)0; | ||
553 | int index = 0, num_pages = 0; | ||
554 | int entries = 0; | 822 | int entries = 0; |
555 | int bitmaps = 0; | 823 | int bitmaps = 0; |
556 | int ret = -1; | 824 | int ret; |
557 | bool next_page = false; | 825 | int err = -1; |
558 | bool out_of_space = false; | ||
559 | 826 | ||
560 | INIT_LIST_HEAD(&bitmap_list); | 827 | INIT_LIST_HEAD(&bitmap_list); |
561 | 828 | ||
562 | node = rb_first(&ctl->free_space_offset); | ||
563 | if (!node) | ||
564 | return 0; | ||
565 | |||
566 | if (!i_size_read(inode)) | 829 | if (!i_size_read(inode)) |
567 | return -1; | 830 | return -1; |
568 | 831 | ||
569 | num_pages = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> | 832 | io_ctl_init(&io_ctl, inode, root); |
570 | PAGE_CACHE_SHIFT; | ||
571 | |||
572 | filemap_write_and_wait(inode->i_mapping); | ||
573 | btrfs_wait_ordered_range(inode, inode->i_size & | ||
574 | ~(root->sectorsize - 1), (u64)-1); | ||
575 | |||
576 | pages = kzalloc(sizeof(struct page *) * num_pages, GFP_NOFS); | ||
577 | if (!pages) | ||
578 | return -1; | ||
579 | 833 | ||
580 | /* Get the cluster for this block_group if it exists */ | 834 | /* Get the cluster for this block_group if it exists */ |
581 | if (block_group && !list_empty(&block_group->cluster_list)) | 835 | if (block_group && !list_empty(&block_group->cluster_list)) |
@@ -589,30 +843,9 @@ int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode, | |||
589 | */ | 843 | */ |
590 | unpin = root->fs_info->pinned_extents; | 844 | unpin = root->fs_info->pinned_extents; |
591 | 845 | ||
592 | /* | 846 | /* Lock all pages first so we can lock the extent safely. */ |
593 | * Lock all pages first so we can lock the extent safely. | 847 | io_ctl_prepare_pages(&io_ctl, inode, 0); |
594 | * | ||
595 | * NOTE: Because we hold the ref the entire time we're going to write to | ||
596 | * the page find_get_page should never fail, so we don't do a check | ||
597 | * after find_get_page at this point. Just putting this here so people | ||
598 | * know and don't freak out. | ||
599 | */ | ||
600 | while (index < num_pages) { | ||
601 | page = find_or_create_page(inode->i_mapping, index, GFP_NOFS); | ||
602 | if (!page) { | ||
603 | int i; | ||
604 | |||
605 | for (i = 0; i < num_pages; i++) { | ||
606 | unlock_page(pages[i]); | ||
607 | page_cache_release(pages[i]); | ||
608 | } | ||
609 | goto out; | ||
610 | } | ||
611 | pages[index] = page; | ||
612 | index++; | ||
613 | } | ||
614 | 848 | ||
615 | index = 0; | ||
616 | lock_extent_bits(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1, | 849 | lock_extent_bits(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1, |
617 | 0, &cached_state, GFP_NOFS); | 850 | 0, &cached_state, GFP_NOFS); |
618 | 851 | ||
@@ -623,189 +856,111 @@ int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode, | |||
623 | if (block_group) | 856 | if (block_group) |
624 | start = block_group->key.objectid; | 857 | start = block_group->key.objectid; |
625 | 858 | ||
626 | /* Write out the extent entries */ | 859 | node = rb_first(&ctl->free_space_offset); |
627 | do { | 860 | if (!node && cluster) { |
628 | struct btrfs_free_space_entry *entry; | 861 | node = rb_first(&cluster->root); |
629 | void *addr, *orig; | 862 | cluster = NULL; |
630 | unsigned long offset = 0; | 863 | } |
631 | 864 | ||
632 | next_page = false; | 865 | /* Make sure we can fit our crcs into the first page */ |
866 | if (io_ctl.check_crcs && | ||
867 | (io_ctl.num_pages * sizeof(u32)) >= PAGE_CACHE_SIZE) { | ||
868 | WARN_ON(1); | ||
869 | goto out_nospc; | ||
870 | } | ||
633 | 871 | ||
634 | if (index >= num_pages) { | 872 | io_ctl_set_generation(&io_ctl, trans->transid); |
635 | out_of_space = true; | ||
636 | break; | ||
637 | } | ||
638 | 873 | ||
639 | page = pages[index]; | 874 | /* Write out the extent entries */ |
875 | while (node) { | ||
876 | struct btrfs_free_space *e; | ||
640 | 877 | ||
641 | orig = addr = kmap(page); | 878 | e = rb_entry(node, struct btrfs_free_space, offset_index); |
642 | if (index == 0) { | 879 | entries++; |
643 | u64 *gen; | ||
644 | 880 | ||
645 | /* | 881 | ret = io_ctl_add_entry(&io_ctl, e->offset, e->bytes, |
646 | * We're going to put in a bogus crc for this page to | 882 | e->bitmap); |
647 | * make sure that old kernels who aren't aware of this | 883 | if (ret) |
648 | * format will be sure to discard the cache. | 884 | goto out_nospc; |
649 | */ | ||
650 | addr += sizeof(u64); | ||
651 | offset += sizeof(u64); | ||
652 | 885 | ||
653 | gen = addr; | 886 | if (e->bitmap) { |
654 | *gen = trans->transid; | 887 | list_add_tail(&e->list, &bitmap_list); |
655 | addr += sizeof(u64); | 888 | bitmaps++; |
656 | offset += sizeof(u64); | ||
657 | } | 889 | } |
658 | entry = addr; | 890 | node = rb_next(node); |
659 | 891 | if (!node && cluster) { | |
660 | memset(addr, 0, PAGE_CACHE_SIZE - offset); | 892 | node = rb_first(&cluster->root); |
661 | while (node && !next_page) { | 893 | cluster = NULL; |
662 | struct btrfs_free_space *e; | ||
663 | |||
664 | e = rb_entry(node, struct btrfs_free_space, offset_index); | ||
665 | entries++; | ||
666 | |||
667 | entry->offset = cpu_to_le64(e->offset); | ||
668 | entry->bytes = cpu_to_le64(e->bytes); | ||
669 | if (e->bitmap) { | ||
670 | entry->type = BTRFS_FREE_SPACE_BITMAP; | ||
671 | list_add_tail(&e->list, &bitmap_list); | ||
672 | bitmaps++; | ||
673 | } else { | ||
674 | entry->type = BTRFS_FREE_SPACE_EXTENT; | ||
675 | } | ||
676 | node = rb_next(node); | ||
677 | if (!node && cluster) { | ||
678 | node = rb_first(&cluster->root); | ||
679 | cluster = NULL; | ||
680 | } | ||
681 | offset += sizeof(struct btrfs_free_space_entry); | ||
682 | if (offset + sizeof(struct btrfs_free_space_entry) >= | ||
683 | PAGE_CACHE_SIZE) | ||
684 | next_page = true; | ||
685 | entry++; | ||
686 | } | 894 | } |
895 | } | ||
687 | 896 | ||
688 | /* | 897 | /* |
689 | * We want to add any pinned extents to our free space cache | 898 | * We want to add any pinned extents to our free space cache |
690 | * so we don't leak the space | 899 | * so we don't leak the space |
691 | */ | 900 | */ |
692 | while (block_group && !next_page && | 901 | while (block_group && (start < block_group->key.objectid + |
693 | (start < block_group->key.objectid + | 902 | block_group->key.offset)) { |
694 | block_group->key.offset)) { | 903 | ret = find_first_extent_bit(unpin, start, &start, &end, |
695 | ret = find_first_extent_bit(unpin, start, &start, &end, | 904 | EXTENT_DIRTY); |
696 | EXTENT_DIRTY); | 905 | if (ret) { |
697 | if (ret) { | 906 | ret = 0; |
698 | ret = 0; | 907 | break; |
699 | break; | ||
700 | } | ||
701 | |||
702 | /* This pinned extent is out of our range */ | ||
703 | if (start >= block_group->key.objectid + | ||
704 | block_group->key.offset) | ||
705 | break; | ||
706 | |||
707 | len = block_group->key.objectid + | ||
708 | block_group->key.offset - start; | ||
709 | len = min(len, end + 1 - start); | ||
710 | |||
711 | entries++; | ||
712 | entry->offset = cpu_to_le64(start); | ||
713 | entry->bytes = cpu_to_le64(len); | ||
714 | entry->type = BTRFS_FREE_SPACE_EXTENT; | ||
715 | |||
716 | start = end + 1; | ||
717 | offset += sizeof(struct btrfs_free_space_entry); | ||
718 | if (offset + sizeof(struct btrfs_free_space_entry) >= | ||
719 | PAGE_CACHE_SIZE) | ||
720 | next_page = true; | ||
721 | entry++; | ||
722 | } | 908 | } |
723 | 909 | ||
724 | /* Generate bogus crc value */ | 910 | /* This pinned extent is out of our range */ |
725 | if (index == 0) { | 911 | if (start >= block_group->key.objectid + |
726 | u32 *tmp; | 912 | block_group->key.offset) |
727 | crc = btrfs_csum_data(root, orig + sizeof(u64), crc, | 913 | break; |
728 | PAGE_CACHE_SIZE - sizeof(u64)); | ||
729 | btrfs_csum_final(crc, (char *)&crc); | ||
730 | crc++; | ||
731 | tmp = orig; | ||
732 | *tmp = crc; | ||
733 | } | ||
734 | 914 | ||
735 | kunmap(page); | 915 | len = block_group->key.objectid + |
916 | block_group->key.offset - start; | ||
917 | len = min(len, end + 1 - start); | ||
736 | 918 | ||
737 | bytes += PAGE_CACHE_SIZE; | 919 | entries++; |
920 | ret = io_ctl_add_entry(&io_ctl, start, len, NULL); | ||
921 | if (ret) | ||
922 | goto out_nospc; | ||
738 | 923 | ||
739 | index++; | 924 | start = end + 1; |
740 | } while (node || next_page); | 925 | } |
741 | 926 | ||
742 | /* Write out the bitmaps */ | 927 | /* Write out the bitmaps */ |
743 | list_for_each_safe(pos, n, &bitmap_list) { | 928 | list_for_each_safe(pos, n, &bitmap_list) { |
744 | void *addr; | ||
745 | struct btrfs_free_space *entry = | 929 | struct btrfs_free_space *entry = |
746 | list_entry(pos, struct btrfs_free_space, list); | 930 | list_entry(pos, struct btrfs_free_space, list); |
747 | 931 | ||
748 | if (index >= num_pages) { | 932 | ret = io_ctl_add_bitmap(&io_ctl, entry->bitmap); |
749 | out_of_space = true; | 933 | if (ret) |
750 | break; | 934 | goto out_nospc; |
751 | } | ||
752 | page = pages[index]; | ||
753 | |||
754 | addr = kmap(page); | ||
755 | memcpy(addr, entry->bitmap, PAGE_CACHE_SIZE); | ||
756 | kunmap(page); | ||
757 | bytes += PAGE_CACHE_SIZE; | ||
758 | |||
759 | list_del_init(&entry->list); | 935 | list_del_init(&entry->list); |
760 | index++; | ||
761 | } | ||
762 | |||
763 | if (out_of_space) { | ||
764 | btrfs_drop_pages(pages, num_pages); | ||
765 | unlock_extent_cached(&BTRFS_I(inode)->io_tree, 0, | ||
766 | i_size_read(inode) - 1, &cached_state, | ||
767 | GFP_NOFS); | ||
768 | ret = 0; | ||
769 | goto out; | ||
770 | } | 936 | } |
771 | 937 | ||
772 | /* Zero out the rest of the pages just to make sure */ | 938 | /* Zero out the rest of the pages just to make sure */ |
773 | while (index < num_pages) { | 939 | io_ctl_zero_remaining_pages(&io_ctl); |
774 | void *addr; | ||
775 | 940 | ||
776 | page = pages[index]; | 941 | ret = btrfs_dirty_pages(root, inode, io_ctl.pages, io_ctl.num_pages, |
777 | addr = kmap(page); | 942 | 0, i_size_read(inode), &cached_state); |
778 | memset(addr, 0, PAGE_CACHE_SIZE); | 943 | io_ctl_drop_pages(&io_ctl); |
779 | kunmap(page); | ||
780 | bytes += PAGE_CACHE_SIZE; | ||
781 | index++; | ||
782 | } | ||
783 | |||
784 | ret = btrfs_dirty_pages(root, inode, pages, num_pages, 0, | ||
785 | bytes, &cached_state); | ||
786 | btrfs_drop_pages(pages, num_pages); | ||
787 | unlock_extent_cached(&BTRFS_I(inode)->io_tree, 0, | 944 | unlock_extent_cached(&BTRFS_I(inode)->io_tree, 0, |
788 | i_size_read(inode) - 1, &cached_state, GFP_NOFS); | 945 | i_size_read(inode) - 1, &cached_state, GFP_NOFS); |
789 | 946 | ||
790 | if (ret) { | 947 | if (ret) |
791 | ret = 0; | ||
792 | goto out; | 948 | goto out; |
793 | } | ||
794 | 949 | ||
795 | BTRFS_I(inode)->generation = trans->transid; | ||
796 | 950 | ||
797 | filemap_write_and_wait(inode->i_mapping); | 951 | ret = filemap_write_and_wait(inode->i_mapping); |
952 | if (ret) | ||
953 | goto out; | ||
798 | 954 | ||
799 | key.objectid = BTRFS_FREE_SPACE_OBJECTID; | 955 | key.objectid = BTRFS_FREE_SPACE_OBJECTID; |
800 | key.offset = offset; | 956 | key.offset = offset; |
801 | key.type = 0; | 957 | key.type = 0; |
802 | 958 | ||
803 | ret = btrfs_search_slot(trans, root, &key, path, 1, 1); | 959 | ret = btrfs_search_slot(trans, root, &key, path, 0, 1); |
804 | if (ret < 0) { | 960 | if (ret < 0) { |
805 | ret = -1; | 961 | clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, inode->i_size - 1, |
806 | clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, bytes - 1, | 962 | EXTENT_DIRTY | EXTENT_DELALLOC, 0, 0, NULL, |
807 | EXTENT_DIRTY | EXTENT_DELALLOC | | 963 | GFP_NOFS); |
808 | EXTENT_DO_ACCOUNTING, 0, 0, NULL, GFP_NOFS); | ||
809 | goto out; | 964 | goto out; |
810 | } | 965 | } |
811 | leaf = path->nodes[0]; | 966 | leaf = path->nodes[0]; |
@@ -816,15 +971,16 @@ int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode, | |||
816 | btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); | 971 | btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); |
817 | if (found_key.objectid != BTRFS_FREE_SPACE_OBJECTID || | 972 | if (found_key.objectid != BTRFS_FREE_SPACE_OBJECTID || |
818 | found_key.offset != offset) { | 973 | found_key.offset != offset) { |
819 | ret = -1; | 974 | clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, |
820 | clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, bytes - 1, | 975 | inode->i_size - 1, |
821 | EXTENT_DIRTY | EXTENT_DELALLOC | | 976 | EXTENT_DIRTY | EXTENT_DELALLOC, 0, 0, |
822 | EXTENT_DO_ACCOUNTING, 0, 0, NULL, | 977 | NULL, GFP_NOFS); |
823 | GFP_NOFS); | ||
824 | btrfs_release_path(path); | 978 | btrfs_release_path(path); |
825 | goto out; | 979 | goto out; |
826 | } | 980 | } |
827 | } | 981 | } |
982 | |||
983 | BTRFS_I(inode)->generation = trans->transid; | ||
828 | header = btrfs_item_ptr(leaf, path->slots[0], | 984 | header = btrfs_item_ptr(leaf, path->slots[0], |
829 | struct btrfs_free_space_header); | 985 | struct btrfs_free_space_header); |
830 | btrfs_set_free_space_entries(leaf, header, entries); | 986 | btrfs_set_free_space_entries(leaf, header, entries); |
@@ -833,16 +989,26 @@ int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode, | |||
833 | btrfs_mark_buffer_dirty(leaf); | 989 | btrfs_mark_buffer_dirty(leaf); |
834 | btrfs_release_path(path); | 990 | btrfs_release_path(path); |
835 | 991 | ||
836 | ret = 1; | 992 | err = 0; |
837 | |||
838 | out: | 993 | out: |
839 | kfree(pages); | 994 | io_ctl_free(&io_ctl); |
840 | if (ret != 1) { | 995 | if (err) { |
841 | invalidate_inode_pages2_range(inode->i_mapping, 0, index); | 996 | invalidate_inode_pages2(inode->i_mapping); |
842 | BTRFS_I(inode)->generation = 0; | 997 | BTRFS_I(inode)->generation = 0; |
843 | } | 998 | } |
844 | btrfs_update_inode(trans, root, inode); | 999 | btrfs_update_inode(trans, root, inode); |
845 | return ret; | 1000 | return err; |
1001 | |||
1002 | out_nospc: | ||
1003 | list_for_each_safe(pos, n, &bitmap_list) { | ||
1004 | struct btrfs_free_space *entry = | ||
1005 | list_entry(pos, struct btrfs_free_space, list); | ||
1006 | list_del_init(&entry->list); | ||
1007 | } | ||
1008 | io_ctl_drop_pages(&io_ctl); | ||
1009 | unlock_extent_cached(&BTRFS_I(inode)->io_tree, 0, | ||
1010 | i_size_read(inode) - 1, &cached_state, GFP_NOFS); | ||
1011 | goto out; | ||
846 | } | 1012 | } |
847 | 1013 | ||
848 | int btrfs_write_out_cache(struct btrfs_root *root, | 1014 | int btrfs_write_out_cache(struct btrfs_root *root, |
@@ -869,14 +1035,15 @@ int btrfs_write_out_cache(struct btrfs_root *root, | |||
869 | 1035 | ||
870 | ret = __btrfs_write_out_cache(root, inode, ctl, block_group, trans, | 1036 | ret = __btrfs_write_out_cache(root, inode, ctl, block_group, trans, |
871 | path, block_group->key.objectid); | 1037 | path, block_group->key.objectid); |
872 | if (ret < 0) { | 1038 | if (ret) { |
873 | spin_lock(&block_group->lock); | 1039 | spin_lock(&block_group->lock); |
874 | block_group->disk_cache_state = BTRFS_DC_ERROR; | 1040 | block_group->disk_cache_state = BTRFS_DC_ERROR; |
875 | spin_unlock(&block_group->lock); | 1041 | spin_unlock(&block_group->lock); |
876 | ret = 0; | 1042 | ret = 0; |
877 | 1043 | #ifdef DEBUG | |
878 | printk(KERN_ERR "btrfs: failed to write free space cace " | 1044 | printk(KERN_ERR "btrfs: failed to write free space cace " |
879 | "for block group %llu\n", block_group->key.objectid); | 1045 | "for block group %llu\n", block_group->key.objectid); |
1046 | #endif | ||
880 | } | 1047 | } |
881 | 1048 | ||
882 | iput(inode); | 1049 | iput(inode); |
@@ -2472,9 +2639,19 @@ int btrfs_trim_block_group(struct btrfs_block_group_cache *block_group, | |||
2472 | spin_unlock(&ctl->tree_lock); | 2639 | spin_unlock(&ctl->tree_lock); |
2473 | 2640 | ||
2474 | if (bytes >= minlen) { | 2641 | if (bytes >= minlen) { |
2475 | int update_ret; | 2642 | struct btrfs_space_info *space_info; |
2476 | update_ret = btrfs_update_reserved_bytes(block_group, | 2643 | int update = 0; |
2477 | bytes, 1, 1); | 2644 | |
2645 | space_info = block_group->space_info; | ||
2646 | spin_lock(&space_info->lock); | ||
2647 | spin_lock(&block_group->lock); | ||
2648 | if (!block_group->ro) { | ||
2649 | block_group->reserved += bytes; | ||
2650 | space_info->bytes_reserved += bytes; | ||
2651 | update = 1; | ||
2652 | } | ||
2653 | spin_unlock(&block_group->lock); | ||
2654 | spin_unlock(&space_info->lock); | ||
2478 | 2655 | ||
2479 | ret = btrfs_error_discard_extent(fs_info->extent_root, | 2656 | ret = btrfs_error_discard_extent(fs_info->extent_root, |
2480 | start, | 2657 | start, |
@@ -2482,9 +2659,16 @@ int btrfs_trim_block_group(struct btrfs_block_group_cache *block_group, | |||
2482 | &actually_trimmed); | 2659 | &actually_trimmed); |
2483 | 2660 | ||
2484 | btrfs_add_free_space(block_group, start, bytes); | 2661 | btrfs_add_free_space(block_group, start, bytes); |
2485 | if (!update_ret) | 2662 | if (update) { |
2486 | btrfs_update_reserved_bytes(block_group, | 2663 | spin_lock(&space_info->lock); |
2487 | bytes, 0, 1); | 2664 | spin_lock(&block_group->lock); |
2665 | if (block_group->ro) | ||
2666 | space_info->bytes_readonly += bytes; | ||
2667 | block_group->reserved -= bytes; | ||
2668 | space_info->bytes_reserved -= bytes; | ||
2669 | spin_unlock(&space_info->lock); | ||
2670 | spin_unlock(&block_group->lock); | ||
2671 | } | ||
2488 | 2672 | ||
2489 | if (ret) | 2673 | if (ret) |
2490 | break; | 2674 | break; |
@@ -2643,9 +2827,13 @@ int btrfs_write_out_ino_cache(struct btrfs_root *root, | |||
2643 | return 0; | 2827 | return 0; |
2644 | 2828 | ||
2645 | ret = __btrfs_write_out_cache(root, inode, ctl, NULL, trans, path, 0); | 2829 | ret = __btrfs_write_out_cache(root, inode, ctl, NULL, trans, path, 0); |
2646 | if (ret < 0) | 2830 | if (ret) { |
2831 | btrfs_delalloc_release_metadata(inode, inode->i_size); | ||
2832 | #ifdef DEBUG | ||
2647 | printk(KERN_ERR "btrfs: failed to write free ino cache " | 2833 | printk(KERN_ERR "btrfs: failed to write free ino cache " |
2648 | "for root %llu\n", root->root_key.objectid); | 2834 | "for root %llu\n", root->root_key.objectid); |
2835 | #endif | ||
2836 | } | ||
2649 | 2837 | ||
2650 | iput(inode); | 2838 | iput(inode); |
2651 | return ret; | 2839 | return ret; |