aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/free-space-cache.c
diff options
context:
space:
mode:
authorJosef Bacik <josef@redhat.com>2011-10-06 08:58:24 -0400
committerJosef Bacik <josef@redhat.com>2011-10-19 15:12:54 -0400
commit5b0e95bf607ddd59b39f52d3d55e6581c817b530 (patch)
treeda248f5492908ce8b9402beee68c6ee98aa3caed /fs/btrfs/free-space-cache.c
parent9a82ca659d8bfd99afc0e89bbde2202322df5755 (diff)
Btrfs: inline checksums into the disk free space cache
Yeah yeah I know this is how we used to do it and then I changed it, but damnit I'm changing it back. The fact is that writing out checksums will modify metadata, which could cause us to dirty a block group we've already written out, so we have to truncate it and all of it's checksums and re-write it which will write new checksums which could dirty a blockg roup that has already been written and you see where I'm going with this? This can cause unmount or really anything that depends on a transaction to commit to take it's sweet damned time to happen. So go back to the way it was, only this time we're specifically setting NODATACOW because we can't go through the COW pathway anyway and we're doing our own built-in cow'ing by truncating the free space cache. The other new thing is once we truncate the old cache and preallocate the new space, we don't need to do that song and dance at all for the rest of the transaction, we can just overwrite the existing space with the new cache if the block group changes for whatever reason, and the NODATACOW will let us do this fine. So keep track of which transaction we last cleared our cache in and if we cleared it in this transaction just say we're all setup and carry on. This survives xfstests and stress.sh. The inode cache will continue to use the normal csum infrastructure since it only gets written once and there will be no more modifications to the fs tree in a transaction commit. Signed-off-by: Josef Bacik <josef@redhat.com>
Diffstat (limited to 'fs/btrfs/free-space-cache.c')
-rw-r--r--fs/btrfs/free-space-cache.c211
1 files changed, 154 insertions, 57 deletions
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index abc924c9467c..5d40c1ed8225 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -85,6 +85,7 @@ struct inode *lookup_free_space_inode(struct btrfs_root *root,
85 *block_group, struct btrfs_path *path) 85 *block_group, struct btrfs_path *path)
86{ 86{
87 struct inode *inode = NULL; 87 struct inode *inode = NULL;
88 u32 flags = BTRFS_INODE_NODATASUM | BTRFS_INODE_NODATACOW;
88 89
89 spin_lock(&block_group->lock); 90 spin_lock(&block_group->lock);
90 if (block_group->inode) 91 if (block_group->inode)
@@ -99,9 +100,10 @@ struct inode *lookup_free_space_inode(struct btrfs_root *root,
99 return inode; 100 return inode;
100 101
101 spin_lock(&block_group->lock); 102 spin_lock(&block_group->lock);
102 if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM) { 103 if (!((BTRFS_I(inode)->flags & flags) == flags)) {
103 printk(KERN_INFO "Old style space inode found, converting.\n"); 104 printk(KERN_INFO "Old style space inode found, converting.\n");
104 BTRFS_I(inode)->flags &= ~BTRFS_INODE_NODATASUM; 105 BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM |
106 BTRFS_INODE_NODATACOW;
105 block_group->disk_cache_state = BTRFS_DC_CLEAR; 107 block_group->disk_cache_state = BTRFS_DC_CLEAR;
106 } 108 }
107 109
@@ -123,12 +125,17 @@ int __create_free_space_inode(struct btrfs_root *root,
123 struct btrfs_free_space_header *header; 125 struct btrfs_free_space_header *header;
124 struct btrfs_inode_item *inode_item; 126 struct btrfs_inode_item *inode_item;
125 struct extent_buffer *leaf; 127 struct extent_buffer *leaf;
128 u64 flags = BTRFS_INODE_NOCOMPRESS | BTRFS_INODE_PREALLOC;
126 int ret; 129 int ret;
127 130
128 ret = btrfs_insert_empty_inode(trans, root, path, ino); 131 ret = btrfs_insert_empty_inode(trans, root, path, ino);
129 if (ret) 132 if (ret)
130 return ret; 133 return ret;
131 134
135 /* We inline crc's for the free disk space cache */
136 if (ino != BTRFS_FREE_INO_OBJECTID)
137 flags |= BTRFS_INODE_NODATASUM | BTRFS_INODE_NODATACOW;
138
132 leaf = path->nodes[0]; 139 leaf = path->nodes[0];
133 inode_item = btrfs_item_ptr(leaf, path->slots[0], 140 inode_item = btrfs_item_ptr(leaf, path->slots[0],
134 struct btrfs_inode_item); 141 struct btrfs_inode_item);
@@ -141,8 +148,7 @@ int __create_free_space_inode(struct btrfs_root *root,
141 btrfs_set_inode_uid(leaf, inode_item, 0); 148 btrfs_set_inode_uid(leaf, inode_item, 0);
142 btrfs_set_inode_gid(leaf, inode_item, 0); 149 btrfs_set_inode_gid(leaf, inode_item, 0);
143 btrfs_set_inode_mode(leaf, inode_item, S_IFREG | 0600); 150 btrfs_set_inode_mode(leaf, inode_item, S_IFREG | 0600);
144 btrfs_set_inode_flags(leaf, inode_item, BTRFS_INODE_NOCOMPRESS | 151 btrfs_set_inode_flags(leaf, inode_item, flags);
145 BTRFS_INODE_PREALLOC);
146 btrfs_set_inode_nlink(leaf, inode_item, 1); 152 btrfs_set_inode_nlink(leaf, inode_item, 1);
147 btrfs_set_inode_transid(leaf, inode_item, trans->transid); 153 btrfs_set_inode_transid(leaf, inode_item, trans->transid);
148 btrfs_set_inode_block_group(leaf, inode_item, offset); 154 btrfs_set_inode_block_group(leaf, inode_item, offset);
@@ -249,6 +255,7 @@ struct io_ctl {
249 unsigned long size; 255 unsigned long size;
250 int index; 256 int index;
251 int num_pages; 257 int num_pages;
258 unsigned check_crcs:1;
252}; 259};
253 260
254static int io_ctl_init(struct io_ctl *io_ctl, struct inode *inode, 261static int io_ctl_init(struct io_ctl *io_ctl, struct inode *inode,
@@ -262,6 +269,8 @@ static int io_ctl_init(struct io_ctl *io_ctl, struct inode *inode,
262 if (!io_ctl->pages) 269 if (!io_ctl->pages)
263 return -ENOMEM; 270 return -ENOMEM;
264 io_ctl->root = root; 271 io_ctl->root = root;
272 if (btrfs_ino(inode) != BTRFS_FREE_INO_OBJECTID)
273 io_ctl->check_crcs = 1;
265 return 0; 274 return 0;
266} 275}
267 276
@@ -340,25 +349,39 @@ static void io_ctl_set_generation(struct io_ctl *io_ctl, u64 generation)
340 io_ctl_map_page(io_ctl, 1); 349 io_ctl_map_page(io_ctl, 1);
341 350
342 /* 351 /*
343 * Skip the first 64bits to make sure theres a bogus crc for old 352 * Skip the csum areas. If we don't check crcs then we just have a
344 * kernels 353 * 64bit chunk at the front of the first page.
345 */ 354 */
346 io_ctl->cur += sizeof(u64); 355 if (io_ctl->check_crcs) {
356 io_ctl->cur += (sizeof(u32) * io_ctl->num_pages);
357 io_ctl->size -= sizeof(u64) + (sizeof(u32) * io_ctl->num_pages);
358 } else {
359 io_ctl->cur += sizeof(u64);
360 io_ctl->size -= sizeof(u64) * 2;
361 }
347 362
348 val = io_ctl->cur; 363 val = io_ctl->cur;
349 *val = cpu_to_le64(generation); 364 *val = cpu_to_le64(generation);
350 io_ctl->cur += sizeof(u64); 365 io_ctl->cur += sizeof(u64);
351 io_ctl->size -= sizeof(u64) * 2;
352} 366}
353 367
354static int io_ctl_check_generation(struct io_ctl *io_ctl, u64 generation) 368static int io_ctl_check_generation(struct io_ctl *io_ctl, u64 generation)
355{ 369{
356 u64 *gen; 370 u64 *gen;
357 371
358 io_ctl_map_page(io_ctl, 0); 372 /*
373 * Skip the crc area. If we don't check crcs then we just have a 64bit
374 * chunk at the front of the first page.
375 */
376 if (io_ctl->check_crcs) {
377 io_ctl->cur += sizeof(u32) * io_ctl->num_pages;
378 io_ctl->size -= sizeof(u64) +
379 (sizeof(u32) * io_ctl->num_pages);
380 } else {
381 io_ctl->cur += sizeof(u64);
382 io_ctl->size -= sizeof(u64) * 2;
383 }
359 384
360 /* Skip the bogus crc area */
361 io_ctl->cur += sizeof(u64);
362 gen = io_ctl->cur; 385 gen = io_ctl->cur;
363 if (le64_to_cpu(*gen) != generation) { 386 if (le64_to_cpu(*gen) != generation) {
364 printk_ratelimited(KERN_ERR "btrfs: space cache generation " 387 printk_ratelimited(KERN_ERR "btrfs: space cache generation "
@@ -368,7 +391,63 @@ static int io_ctl_check_generation(struct io_ctl *io_ctl, u64 generation)
368 return -EIO; 391 return -EIO;
369 } 392 }
370 io_ctl->cur += sizeof(u64); 393 io_ctl->cur += sizeof(u64);
371 io_ctl->size -= sizeof(u64) * 2; 394 return 0;
395}
396
397static void io_ctl_set_crc(struct io_ctl *io_ctl, int index)
398{
399 u32 *tmp;
400 u32 crc = ~(u32)0;
401 unsigned offset = 0;
402
403 if (!io_ctl->check_crcs) {
404 io_ctl_unmap_page(io_ctl);
405 return;
406 }
407
408 if (index == 0)
409 offset = sizeof(u32) * io_ctl->num_pages;;
410
411 crc = btrfs_csum_data(io_ctl->root, io_ctl->orig + offset, crc,
412 PAGE_CACHE_SIZE - offset);
413 btrfs_csum_final(crc, (char *)&crc);
414 io_ctl_unmap_page(io_ctl);
415 tmp = kmap(io_ctl->pages[0]);
416 tmp += index;
417 *tmp = crc;
418 kunmap(io_ctl->pages[0]);
419}
420
421static int io_ctl_check_crc(struct io_ctl *io_ctl, int index)
422{
423 u32 *tmp, val;
424 u32 crc = ~(u32)0;
425 unsigned offset = 0;
426
427 if (!io_ctl->check_crcs) {
428 io_ctl_map_page(io_ctl, 0);
429 return 0;
430 }
431
432 if (index == 0)
433 offset = sizeof(u32) * io_ctl->num_pages;
434
435 tmp = kmap(io_ctl->pages[0]);
436 tmp += index;
437 val = *tmp;
438 kunmap(io_ctl->pages[0]);
439
440 io_ctl_map_page(io_ctl, 0);
441 crc = btrfs_csum_data(io_ctl->root, io_ctl->orig + offset, crc,
442 PAGE_CACHE_SIZE - offset);
443 btrfs_csum_final(crc, (char *)&crc);
444 if (val != crc) {
445 printk_ratelimited(KERN_ERR "btrfs: csum mismatch on free "
446 "space cache\n");
447 io_ctl_unmap_page(io_ctl);
448 return -EIO;
449 }
450
372 return 0; 451 return 0;
373} 452}
374 453
@@ -391,22 +470,7 @@ static int io_ctl_add_entry(struct io_ctl *io_ctl, u64 offset, u64 bytes,
391 if (io_ctl->size >= sizeof(struct btrfs_free_space_entry)) 470 if (io_ctl->size >= sizeof(struct btrfs_free_space_entry))
392 return 0; 471 return 0;
393 472
394 /* 473 io_ctl_set_crc(io_ctl, io_ctl->index - 1);
395 * index == 1 means the current page is 0, we need to generate a bogus
396 * crc for older kernels.
397 */
398 if (io_ctl->index == 1) {
399 u32 *tmp;
400 u32 crc = ~(u32)0;
401
402 crc = btrfs_csum_data(io_ctl->root, io_ctl->orig + sizeof(u64),
403 crc, PAGE_CACHE_SIZE - sizeof(u64));
404 btrfs_csum_final(crc, (char *)&crc);
405 crc++;
406 tmp = io_ctl->orig;
407 *tmp = crc;
408 }
409 io_ctl_unmap_page(io_ctl);
410 474
411 /* No more pages to map */ 475 /* No more pages to map */
412 if (io_ctl->index >= io_ctl->num_pages) 476 if (io_ctl->index >= io_ctl->num_pages)
@@ -427,14 +491,14 @@ static int io_ctl_add_bitmap(struct io_ctl *io_ctl, void *bitmap)
427 * map the next one if there is any left. 491 * map the next one if there is any left.
428 */ 492 */
429 if (io_ctl->cur != io_ctl->orig) { 493 if (io_ctl->cur != io_ctl->orig) {
430 io_ctl_unmap_page(io_ctl); 494 io_ctl_set_crc(io_ctl, io_ctl->index - 1);
431 if (io_ctl->index >= io_ctl->num_pages) 495 if (io_ctl->index >= io_ctl->num_pages)
432 return -ENOSPC; 496 return -ENOSPC;
433 io_ctl_map_page(io_ctl, 0); 497 io_ctl_map_page(io_ctl, 0);
434 } 498 }
435 499
436 memcpy(io_ctl->cur, bitmap, PAGE_CACHE_SIZE); 500 memcpy(io_ctl->cur, bitmap, PAGE_CACHE_SIZE);
437 io_ctl_unmap_page(io_ctl); 501 io_ctl_set_crc(io_ctl, io_ctl->index - 1);
438 if (io_ctl->index < io_ctl->num_pages) 502 if (io_ctl->index < io_ctl->num_pages)
439 io_ctl_map_page(io_ctl, 0); 503 io_ctl_map_page(io_ctl, 0);
440 return 0; 504 return 0;
@@ -442,51 +506,60 @@ static int io_ctl_add_bitmap(struct io_ctl *io_ctl, void *bitmap)
442 506
443static void io_ctl_zero_remaining_pages(struct io_ctl *io_ctl) 507static void io_ctl_zero_remaining_pages(struct io_ctl *io_ctl)
444{ 508{
445 io_ctl_unmap_page(io_ctl); 509 /*
510 * If we're not on the boundary we know we've modified the page and we
511 * need to crc the page.
512 */
513 if (io_ctl->cur != io_ctl->orig)
514 io_ctl_set_crc(io_ctl, io_ctl->index - 1);
515 else
516 io_ctl_unmap_page(io_ctl);
446 517
447 while (io_ctl->index < io_ctl->num_pages) { 518 while (io_ctl->index < io_ctl->num_pages) {
448 io_ctl_map_page(io_ctl, 1); 519 io_ctl_map_page(io_ctl, 1);
449 io_ctl_unmap_page(io_ctl); 520 io_ctl_set_crc(io_ctl, io_ctl->index - 1);
450 } 521 }
451} 522}
452 523
453static u8 io_ctl_read_entry(struct io_ctl *io_ctl, 524static int io_ctl_read_entry(struct io_ctl *io_ctl,
454 struct btrfs_free_space *entry) 525 struct btrfs_free_space *entry, u8 *type)
455{ 526{
456 struct btrfs_free_space_entry *e; 527 struct btrfs_free_space_entry *e;
457 u8 type;
458 528
459 e = io_ctl->cur; 529 e = io_ctl->cur;
460 entry->offset = le64_to_cpu(e->offset); 530 entry->offset = le64_to_cpu(e->offset);
461 entry->bytes = le64_to_cpu(e->bytes); 531 entry->bytes = le64_to_cpu(e->bytes);
462 type = e->type; 532 *type = e->type;
463 io_ctl->cur += sizeof(struct btrfs_free_space_entry); 533 io_ctl->cur += sizeof(struct btrfs_free_space_entry);
464 io_ctl->size -= sizeof(struct btrfs_free_space_entry); 534 io_ctl->size -= sizeof(struct btrfs_free_space_entry);
465 535
466 if (io_ctl->size >= sizeof(struct btrfs_free_space_entry)) 536 if (io_ctl->size >= sizeof(struct btrfs_free_space_entry))
467 return type; 537 return 0;
468 538
469 io_ctl_unmap_page(io_ctl); 539 io_ctl_unmap_page(io_ctl);
470 540
471 if (io_ctl->index >= io_ctl->num_pages) 541 if (io_ctl->index >= io_ctl->num_pages)
472 return type; 542 return 0;
473 543
474 io_ctl_map_page(io_ctl, 0); 544 return io_ctl_check_crc(io_ctl, io_ctl->index);
475 return type;
476} 545}
477 546
478static void io_ctl_read_bitmap(struct io_ctl *io_ctl, 547static int io_ctl_read_bitmap(struct io_ctl *io_ctl,
479 struct btrfs_free_space *entry) 548 struct btrfs_free_space *entry)
480{ 549{
481 BUG_ON(!io_ctl->cur); 550 int ret;
482 if (io_ctl->cur != io_ctl->orig) { 551
552 if (io_ctl->cur && io_ctl->cur != io_ctl->orig)
483 io_ctl_unmap_page(io_ctl); 553 io_ctl_unmap_page(io_ctl);
484 io_ctl_map_page(io_ctl, 0); 554
485 } 555 ret = io_ctl_check_crc(io_ctl, io_ctl->index);
556 if (ret)
557 return ret;
558
486 memcpy(entry->bitmap, io_ctl->cur, PAGE_CACHE_SIZE); 559 memcpy(entry->bitmap, io_ctl->cur, PAGE_CACHE_SIZE);
487 io_ctl_unmap_page(io_ctl); 560 io_ctl_unmap_page(io_ctl);
488 if (io_ctl->index < io_ctl->num_pages) 561
489 io_ctl_map_page(io_ctl, 0); 562 return 0;
490} 563}
491 564
492int __load_free_space_cache(struct btrfs_root *root, struct inode *inode, 565int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
@@ -553,6 +626,10 @@ int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
553 if (ret) 626 if (ret)
554 goto out; 627 goto out;
555 628
629 ret = io_ctl_check_crc(&io_ctl, 0);
630 if (ret)
631 goto free_cache;
632
556 ret = io_ctl_check_generation(&io_ctl, generation); 633 ret = io_ctl_check_generation(&io_ctl, generation);
557 if (ret) 634 if (ret)
558 goto free_cache; 635 goto free_cache;
@@ -563,7 +640,12 @@ int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
563 if (!e) 640 if (!e)
564 goto free_cache; 641 goto free_cache;
565 642
566 type = io_ctl_read_entry(&io_ctl, e); 643 ret = io_ctl_read_entry(&io_ctl, e, &type);
644 if (ret) {
645 kmem_cache_free(btrfs_free_space_cachep, e);
646 goto free_cache;
647 }
648
567 if (!e->bytes) { 649 if (!e->bytes) {
568 kmem_cache_free(btrfs_free_space_cachep, e); 650 kmem_cache_free(btrfs_free_space_cachep, e);
569 goto free_cache; 651 goto free_cache;
@@ -611,7 +693,9 @@ int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
611 */ 693 */
612 list_for_each_entry_safe(e, n, &bitmaps, list) { 694 list_for_each_entry_safe(e, n, &bitmaps, list) {
613 list_del_init(&e->list); 695 list_del_init(&e->list);
614 io_ctl_read_bitmap(&io_ctl, e); 696 ret = io_ctl_read_bitmap(&io_ctl, e);
697 if (ret)
698 goto free_cache;
615 } 699 }
616 700
617 io_ctl_drop_pages(&io_ctl); 701 io_ctl_drop_pages(&io_ctl);
@@ -632,7 +716,7 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info,
632 struct btrfs_root *root = fs_info->tree_root; 716 struct btrfs_root *root = fs_info->tree_root;
633 struct inode *inode; 717 struct inode *inode;
634 struct btrfs_path *path; 718 struct btrfs_path *path;
635 int ret; 719 int ret = 0;
636 bool matched; 720 bool matched;
637 u64 used = btrfs_block_group_used(&block_group->item); 721 u64 used = btrfs_block_group_used(&block_group->item);
638 722
@@ -664,6 +748,14 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info,
664 return 0; 748 return 0;
665 } 749 }
666 750
751 /* We may have converted the inode and made the cache invalid. */
752 spin_lock(&block_group->lock);
753 if (block_group->disk_cache_state != BTRFS_DC_WRITTEN) {
754 spin_unlock(&block_group->lock);
755 goto out;
756 }
757 spin_unlock(&block_group->lock);
758
667 ret = __load_free_space_cache(fs_info->tree_root, inode, ctl, 759 ret = __load_free_space_cache(fs_info->tree_root, inode, ctl,
668 path, block_group->key.objectid); 760 path, block_group->key.objectid);
669 btrfs_free_path(path); 761 btrfs_free_path(path);
@@ -774,6 +866,13 @@ int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
774 cluster = NULL; 866 cluster = NULL;
775 } 867 }
776 868
869 /* Make sure we can fit our crcs into the first page */
870 if (io_ctl.check_crcs &&
871 (io_ctl.num_pages * sizeof(u32)) >= PAGE_CACHE_SIZE) {
872 WARN_ON(1);
873 goto out_nospc;
874 }
875
777 io_ctl_set_generation(&io_ctl, trans->transid); 876 io_ctl_set_generation(&io_ctl, trans->transid);
778 877
779 /* Write out the extent entries */ 878 /* Write out the extent entries */
@@ -864,8 +963,8 @@ int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
864 ret = btrfs_search_slot(trans, root, &key, path, 0, 1); 963 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
865 if (ret < 0) { 964 if (ret < 0) {
866 clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, inode->i_size - 1, 965 clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, inode->i_size - 1,
867 EXTENT_DIRTY | EXTENT_DELALLOC | 966 EXTENT_DIRTY | EXTENT_DELALLOC, 0, 0, NULL,
868 EXTENT_DO_ACCOUNTING, 0, 0, NULL, GFP_NOFS); 967 GFP_NOFS);
869 goto out; 968 goto out;
870 } 969 }
871 leaf = path->nodes[0]; 970 leaf = path->nodes[0];
@@ -878,9 +977,8 @@ int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
878 found_key.offset != offset) { 977 found_key.offset != offset) {
879 clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, 978 clear_extent_bit(&BTRFS_I(inode)->io_tree, 0,
880 inode->i_size - 1, 979 inode->i_size - 1,
881 EXTENT_DIRTY | EXTENT_DELALLOC | 980 EXTENT_DIRTY | EXTENT_DELALLOC, 0, 0,
882 EXTENT_DO_ACCOUNTING, 0, 0, NULL, 981 NULL, GFP_NOFS);
883 GFP_NOFS);
884 btrfs_release_path(path); 982 btrfs_release_path(path);
885 goto out; 983 goto out;
886 } 984 }
@@ -942,7 +1040,6 @@ int btrfs_write_out_cache(struct btrfs_root *root,
942 ret = __btrfs_write_out_cache(root, inode, ctl, block_group, trans, 1040 ret = __btrfs_write_out_cache(root, inode, ctl, block_group, trans,
943 path, block_group->key.objectid); 1041 path, block_group->key.objectid);
944 if (ret) { 1042 if (ret) {
945 btrfs_delalloc_release_metadata(inode, inode->i_size);
946 spin_lock(&block_group->lock); 1043 spin_lock(&block_group->lock);
947 block_group->disk_cache_state = BTRFS_DC_ERROR; 1044 block_group->disk_cache_state = BTRFS_DC_ERROR;
948 spin_unlock(&block_group->lock); 1045 spin_unlock(&block_group->lock);