diff options
Diffstat (limited to 'fs/btrfs/ctree.h')
-rw-r--r-- | fs/btrfs/ctree.h | 282 |
1 files changed, 94 insertions, 188 deletions
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 0a61dff27f57..299e11e6c554 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <linux/kobject.h> | 19 | #include <linux/kobject.h> |
20 | #include <trace/events/btrfs.h> | 20 | #include <trace/events/btrfs.h> |
21 | #include <asm/kmap_types.h> | 21 | #include <asm/kmap_types.h> |
22 | #include <asm/unaligned.h> | ||
22 | #include <linux/pagemap.h> | 23 | #include <linux/pagemap.h> |
23 | #include <linux/btrfs.h> | 24 | #include <linux/btrfs.h> |
24 | #include <linux/btrfs_tree.h> | 25 | #include <linux/btrfs_tree.h> |
@@ -31,11 +32,13 @@ | |||
31 | #include "extent_io.h" | 32 | #include "extent_io.h" |
32 | #include "extent_map.h" | 33 | #include "extent_map.h" |
33 | #include "async-thread.h" | 34 | #include "async-thread.h" |
35 | #include "block-rsv.h" | ||
34 | 36 | ||
35 | struct btrfs_trans_handle; | 37 | struct btrfs_trans_handle; |
36 | struct btrfs_transaction; | 38 | struct btrfs_transaction; |
37 | struct btrfs_pending_snapshot; | 39 | struct btrfs_pending_snapshot; |
38 | struct btrfs_delayed_ref_root; | 40 | struct btrfs_delayed_ref_root; |
41 | struct btrfs_space_info; | ||
39 | extern struct kmem_cache *btrfs_trans_handle_cachep; | 42 | extern struct kmem_cache *btrfs_trans_handle_cachep; |
40 | extern struct kmem_cache *btrfs_bit_radix_cachep; | 43 | extern struct kmem_cache *btrfs_bit_radix_cachep; |
41 | extern struct kmem_cache *btrfs_path_cachep; | 44 | extern struct kmem_cache *btrfs_path_cachep; |
@@ -45,7 +48,16 @@ struct btrfs_ref; | |||
45 | 48 | ||
46 | #define BTRFS_MAGIC 0x4D5F53665248425FULL /* ascii _BHRfS_M, no null */ | 49 | #define BTRFS_MAGIC 0x4D5F53665248425FULL /* ascii _BHRfS_M, no null */ |
47 | 50 | ||
48 | #define BTRFS_MAX_MIRRORS 3 | 51 | /* |
52 | * Maximum number of mirrors that can be available for all profiles counting | ||
53 | * the target device of dev-replace as one. During an active device replace | ||
54 | * procedure, the target device of the copy operation is a mirror for the | ||
55 | * filesystem data as well that can be used to read data in order to repair | ||
56 | * read errors on other disks. | ||
57 | * | ||
58 | * Current value is derived from RAID1 with 2 copies. | ||
59 | */ | ||
60 | #define BTRFS_MAX_MIRRORS (2 + 1) | ||
49 | 61 | ||
50 | #define BTRFS_MAX_LEVEL 8 | 62 | #define BTRFS_MAX_LEVEL 8 |
51 | 63 | ||
@@ -72,6 +84,7 @@ struct btrfs_ref; | |||
72 | 84 | ||
73 | /* four bytes for CRC32 */ | 85 | /* four bytes for CRC32 */ |
74 | static const int btrfs_csum_sizes[] = { 4 }; | 86 | static const int btrfs_csum_sizes[] = { 4 }; |
87 | static const char *btrfs_csum_names[] = { "crc32c" }; | ||
75 | 88 | ||
76 | #define BTRFS_EMPTY_DIR_SIZE 0 | 89 | #define BTRFS_EMPTY_DIR_SIZE 0 |
77 | 90 | ||
@@ -99,10 +112,6 @@ static inline u32 count_max_extents(u64 size) | |||
99 | return div_u64(size + BTRFS_MAX_EXTENT_SIZE - 1, BTRFS_MAX_EXTENT_SIZE); | 112 | return div_u64(size + BTRFS_MAX_EXTENT_SIZE - 1, BTRFS_MAX_EXTENT_SIZE); |
100 | } | 113 | } |
101 | 114 | ||
102 | struct btrfs_mapping_tree { | ||
103 | struct extent_map_tree map_tree; | ||
104 | }; | ||
105 | |||
106 | static inline unsigned long btrfs_chunk_item_size(int num_stripes) | 115 | static inline unsigned long btrfs_chunk_item_size(int num_stripes) |
107 | { | 116 | { |
108 | BUG_ON(num_stripes == 0); | 117 | BUG_ON(num_stripes == 0); |
@@ -395,115 +404,6 @@ struct raid_kobject { | |||
395 | struct list_head list; | 404 | struct list_head list; |
396 | }; | 405 | }; |
397 | 406 | ||
398 | struct btrfs_space_info { | ||
399 | spinlock_t lock; | ||
400 | |||
401 | u64 total_bytes; /* total bytes in the space, | ||
402 | this doesn't take mirrors into account */ | ||
403 | u64 bytes_used; /* total bytes used, | ||
404 | this doesn't take mirrors into account */ | ||
405 | u64 bytes_pinned; /* total bytes pinned, will be freed when the | ||
406 | transaction finishes */ | ||
407 | u64 bytes_reserved; /* total bytes the allocator has reserved for | ||
408 | current allocations */ | ||
409 | u64 bytes_may_use; /* number of bytes that may be used for | ||
410 | delalloc/allocations */ | ||
411 | u64 bytes_readonly; /* total bytes that are read only */ | ||
412 | |||
413 | u64 max_extent_size; /* This will hold the maximum extent size of | ||
414 | the space info if we had an ENOSPC in the | ||
415 | allocator. */ | ||
416 | |||
417 | unsigned int full:1; /* indicates that we cannot allocate any more | ||
418 | chunks for this space */ | ||
419 | unsigned int chunk_alloc:1; /* set if we are allocating a chunk */ | ||
420 | |||
421 | unsigned int flush:1; /* set if we are trying to make space */ | ||
422 | |||
423 | unsigned int force_alloc; /* set if we need to force a chunk | ||
424 | alloc for this space */ | ||
425 | |||
426 | u64 disk_used; /* total bytes used on disk */ | ||
427 | u64 disk_total; /* total bytes on disk, takes mirrors into | ||
428 | account */ | ||
429 | |||
430 | u64 flags; | ||
431 | |||
432 | /* | ||
433 | * bytes_pinned is kept in line with what is actually pinned, as in | ||
434 | * we've called update_block_group and dropped the bytes_used counter | ||
435 | * and increased the bytes_pinned counter. However this means that | ||
436 | * bytes_pinned does not reflect the bytes that will be pinned once the | ||
437 | * delayed refs are flushed, so this counter is inc'ed every time we | ||
438 | * call btrfs_free_extent so it is a realtime count of what will be | ||
439 | * freed once the transaction is committed. It will be zeroed every | ||
440 | * time the transaction commits. | ||
441 | */ | ||
442 | struct percpu_counter total_bytes_pinned; | ||
443 | |||
444 | struct list_head list; | ||
445 | /* Protected by the spinlock 'lock'. */ | ||
446 | struct list_head ro_bgs; | ||
447 | struct list_head priority_tickets; | ||
448 | struct list_head tickets; | ||
449 | /* | ||
450 | * tickets_id just indicates the next ticket will be handled, so note | ||
451 | * it's not stored per ticket. | ||
452 | */ | ||
453 | u64 tickets_id; | ||
454 | |||
455 | struct rw_semaphore groups_sem; | ||
456 | /* for block groups in our same type */ | ||
457 | struct list_head block_groups[BTRFS_NR_RAID_TYPES]; | ||
458 | wait_queue_head_t wait; | ||
459 | |||
460 | struct kobject kobj; | ||
461 | struct kobject *block_group_kobjs[BTRFS_NR_RAID_TYPES]; | ||
462 | }; | ||
463 | |||
464 | /* | ||
465 | * Types of block reserves | ||
466 | */ | ||
467 | enum { | ||
468 | BTRFS_BLOCK_RSV_GLOBAL, | ||
469 | BTRFS_BLOCK_RSV_DELALLOC, | ||
470 | BTRFS_BLOCK_RSV_TRANS, | ||
471 | BTRFS_BLOCK_RSV_CHUNK, | ||
472 | BTRFS_BLOCK_RSV_DELOPS, | ||
473 | BTRFS_BLOCK_RSV_DELREFS, | ||
474 | BTRFS_BLOCK_RSV_EMPTY, | ||
475 | BTRFS_BLOCK_RSV_TEMP, | ||
476 | }; | ||
477 | |||
478 | struct btrfs_block_rsv { | ||
479 | u64 size; | ||
480 | u64 reserved; | ||
481 | struct btrfs_space_info *space_info; | ||
482 | spinlock_t lock; | ||
483 | unsigned short full; | ||
484 | unsigned short type; | ||
485 | unsigned short failfast; | ||
486 | |||
487 | /* | ||
488 | * Qgroup equivalent for @size @reserved | ||
489 | * | ||
490 | * Unlike normal @size/@reserved for inode rsv, qgroup doesn't care | ||
491 | * about things like csum size nor how many tree blocks it will need to | ||
492 | * reserve. | ||
493 | * | ||
494 | * Qgroup cares more about net change of the extent usage. | ||
495 | * | ||
496 | * So for one newly inserted file extent, in worst case it will cause | ||
497 | * leaf split and level increase, nodesize for each file extent is | ||
498 | * already too much. | ||
499 | * | ||
500 | * In short, qgroup_size/reserved is the upper limit of possible needed | ||
501 | * qgroup metadata reservation. | ||
502 | */ | ||
503 | u64 qgroup_rsv_size; | ||
504 | u64 qgroup_rsv_reserved; | ||
505 | }; | ||
506 | |||
507 | /* | 407 | /* |
508 | * free clusters are used to claim free space in relatively large chunks, | 408 | * free clusters are used to claim free space in relatively large chunks, |
509 | * allowing us to do less seeky writes. They are used for all metadata | 409 | * allowing us to do less seeky writes. They are used for all metadata |
@@ -786,11 +686,18 @@ enum { | |||
786 | /* | 686 | /* |
787 | * Indicate that balance has been set up from the ioctl and is in the | 687 | * Indicate that balance has been set up from the ioctl and is in the |
788 | * main phase. The fs_info::balance_ctl is initialized. | 688 | * main phase. The fs_info::balance_ctl is initialized. |
689 | * Set and cleared while holding fs_info::balance_mutex. | ||
789 | */ | 690 | */ |
790 | BTRFS_FS_BALANCE_RUNNING, | 691 | BTRFS_FS_BALANCE_RUNNING, |
791 | 692 | ||
792 | /* Indicate that the cleaner thread is awake and doing something. */ | 693 | /* Indicate that the cleaner thread is awake and doing something. */ |
793 | BTRFS_FS_CLEANER_RUNNING, | 694 | BTRFS_FS_CLEANER_RUNNING, |
695 | |||
696 | /* | ||
697 | * The checksumming has an optimized version and is considered fast, | ||
698 | * so we don't need to offload checksums to workqueues. | ||
699 | */ | ||
700 | BTRFS_FS_CSUM_IMPL_FAST, | ||
794 | }; | 701 | }; |
795 | 702 | ||
796 | struct btrfs_fs_info { | 703 | struct btrfs_fs_info { |
@@ -824,7 +731,7 @@ struct btrfs_fs_info { | |||
824 | struct extent_io_tree *pinned_extents; | 731 | struct extent_io_tree *pinned_extents; |
825 | 732 | ||
826 | /* logical->physical extent mapping */ | 733 | /* logical->physical extent mapping */ |
827 | struct btrfs_mapping_tree mapping_tree; | 734 | struct extent_map_tree mapping_tree; |
828 | 735 | ||
829 | /* | 736 | /* |
830 | * block reservation for extent, checksum, root tree and | 737 | * block reservation for extent, checksum, root tree and |
@@ -1160,6 +1067,14 @@ struct btrfs_fs_info { | |||
1160 | spinlock_t swapfile_pins_lock; | 1067 | spinlock_t swapfile_pins_lock; |
1161 | struct rb_root swapfile_pins; | 1068 | struct rb_root swapfile_pins; |
1162 | 1069 | ||
1070 | struct crypto_shash *csum_shash; | ||
1071 | |||
1072 | /* | ||
1073 | * Number of send operations in progress. | ||
1074 | * Updated while holding fs_info::balance_mutex. | ||
1075 | */ | ||
1076 | int send_in_progress; | ||
1077 | |||
1163 | #ifdef CONFIG_BTRFS_FS_REF_VERIFY | 1078 | #ifdef CONFIG_BTRFS_FS_REF_VERIFY |
1164 | spinlock_t ref_verify_lock; | 1079 | spinlock_t ref_verify_lock; |
1165 | struct rb_root block_tree; | 1080 | struct rb_root block_tree; |
@@ -2451,6 +2366,11 @@ static inline int btrfs_super_csum_size(const struct btrfs_super_block *s) | |||
2451 | return btrfs_csum_sizes[t]; | 2366 | return btrfs_csum_sizes[t]; |
2452 | } | 2367 | } |
2453 | 2368 | ||
2369 | static inline const char *btrfs_super_csum_name(u16 csum_type) | ||
2370 | { | ||
2371 | /* csum type is validated at mount time */ | ||
2372 | return btrfs_csum_names[csum_type]; | ||
2373 | } | ||
2454 | 2374 | ||
2455 | /* | 2375 | /* |
2456 | * The leaf data grows from end-to-front in the node. | 2376 | * The leaf data grows from end-to-front in the node. |
@@ -2642,6 +2562,16 @@ BTRFS_SETGET_STACK_FUNCS(stack_dev_replace_cursor_right, | |||
2642 | ((unsigned long)(BTRFS_LEAF_DATA_OFFSET + \ | 2562 | ((unsigned long)(BTRFS_LEAF_DATA_OFFSET + \ |
2643 | btrfs_item_offset_nr(leaf, slot))) | 2563 | btrfs_item_offset_nr(leaf, slot))) |
2644 | 2564 | ||
2565 | static inline u32 btrfs_crc32c(u32 crc, const void *address, unsigned length) | ||
2566 | { | ||
2567 | return crc32c(crc, address, length); | ||
2568 | } | ||
2569 | |||
2570 | static inline void btrfs_crc32c_final(u32 crc, u8 *result) | ||
2571 | { | ||
2572 | put_unaligned_le32(~crc, result); | ||
2573 | } | ||
2574 | |||
2645 | static inline u64 btrfs_name_hash(const char *name, int len) | 2575 | static inline u64 btrfs_name_hash(const char *name, int len) |
2646 | { | 2576 | { |
2647 | return crc32c((u32)~1, name, len); | 2577 | return crc32c((u32)~1, name, len); |
@@ -2656,12 +2586,6 @@ static inline u64 btrfs_extref_hash(u64 parent_objectid, const char *name, | |||
2656 | return (u64) crc32c(parent_objectid, name, len); | 2586 | return (u64) crc32c(parent_objectid, name, len); |
2657 | } | 2587 | } |
2658 | 2588 | ||
2659 | static inline bool btrfs_mixed_space_info(struct btrfs_space_info *space_info) | ||
2660 | { | ||
2661 | return ((space_info->flags & BTRFS_BLOCK_GROUP_METADATA) && | ||
2662 | (space_info->flags & BTRFS_BLOCK_GROUP_DATA)); | ||
2663 | } | ||
2664 | |||
2665 | static inline gfp_t btrfs_alloc_write_mask(struct address_space *mapping) | 2589 | static inline gfp_t btrfs_alloc_write_mask(struct address_space *mapping) |
2666 | { | 2590 | { |
2667 | return mapping_gfp_constraint(mapping, ~__GFP_FS); | 2591 | return mapping_gfp_constraint(mapping, ~__GFP_FS); |
@@ -2698,8 +2622,6 @@ static inline u64 btrfs_calc_trunc_metadata_size(struct btrfs_fs_info *fs_info, | |||
2698 | return (u64)fs_info->nodesize * BTRFS_MAX_LEVEL * num_items; | 2622 | return (u64)fs_info->nodesize * BTRFS_MAX_LEVEL * num_items; |
2699 | } | 2623 | } |
2700 | 2624 | ||
2701 | int btrfs_should_throttle_delayed_refs(struct btrfs_trans_handle *trans); | ||
2702 | bool btrfs_check_space_for_delayed_refs(struct btrfs_fs_info *fs_info); | ||
2703 | void btrfs_dec_block_group_reservations(struct btrfs_fs_info *fs_info, | 2625 | void btrfs_dec_block_group_reservations(struct btrfs_fs_info *fs_info, |
2704 | const u64 start); | 2626 | const u64 start); |
2705 | void btrfs_wait_block_group_reservations(struct btrfs_block_group_cache *bg); | 2627 | void btrfs_wait_block_group_reservations(struct btrfs_block_group_cache *bg); |
@@ -2814,17 +2736,28 @@ enum btrfs_flush_state { | |||
2814 | COMMIT_TRANS = 9, | 2736 | COMMIT_TRANS = 9, |
2815 | }; | 2737 | }; |
2816 | 2738 | ||
2817 | int btrfs_alloc_data_chunk_ondemand(struct btrfs_inode *inode, u64 bytes); | 2739 | /* |
2818 | int btrfs_check_data_free_space(struct inode *inode, | 2740 | * control flags for do_chunk_alloc's force field |
2819 | struct extent_changeset **reserved, u64 start, u64 len); | 2741 | * CHUNK_ALLOC_NO_FORCE means to only allocate a chunk |
2820 | void btrfs_free_reserved_data_space(struct inode *inode, | 2742 | * if we really need one. |
2821 | struct extent_changeset *reserved, u64 start, u64 len); | 2743 | * |
2822 | void btrfs_delalloc_release_space(struct inode *inode, | 2744 | * CHUNK_ALLOC_LIMITED means to only try and allocate one |
2823 | struct extent_changeset *reserved, | 2745 | * if we have very few chunks already allocated. This is |
2824 | u64 start, u64 len, bool qgroup_free); | 2746 | * used as part of the clustering code to help make sure |
2825 | void btrfs_free_reserved_data_space_noquota(struct inode *inode, u64 start, | 2747 | * we have a good pool of storage to cluster in, without |
2826 | u64 len); | 2748 | * filling the FS with empty chunks |
2827 | void btrfs_trans_release_chunk_metadata(struct btrfs_trans_handle *trans); | 2749 | * |
2750 | * CHUNK_ALLOC_FORCE means it must try to allocate one | ||
2751 | * | ||
2752 | */ | ||
2753 | enum btrfs_chunk_alloc_enum { | ||
2754 | CHUNK_ALLOC_NO_FORCE, | ||
2755 | CHUNK_ALLOC_LIMITED, | ||
2756 | CHUNK_ALLOC_FORCE, | ||
2757 | }; | ||
2758 | |||
2759 | int btrfs_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags, | ||
2760 | enum btrfs_chunk_alloc_enum force); | ||
2828 | int btrfs_subvolume_reserve_metadata(struct btrfs_root *root, | 2761 | int btrfs_subvolume_reserve_metadata(struct btrfs_root *root, |
2829 | struct btrfs_block_rsv *rsv, | 2762 | struct btrfs_block_rsv *rsv, |
2830 | int nitems, bool use_global_rsv); | 2763 | int nitems, bool use_global_rsv); |
@@ -2834,41 +2767,6 @@ void btrfs_delalloc_release_extents(struct btrfs_inode *inode, u64 num_bytes, | |||
2834 | bool qgroup_free); | 2767 | bool qgroup_free); |
2835 | 2768 | ||
2836 | int btrfs_delalloc_reserve_metadata(struct btrfs_inode *inode, u64 num_bytes); | 2769 | int btrfs_delalloc_reserve_metadata(struct btrfs_inode *inode, u64 num_bytes); |
2837 | void btrfs_delalloc_release_metadata(struct btrfs_inode *inode, u64 num_bytes, | ||
2838 | bool qgroup_free); | ||
2839 | int btrfs_delalloc_reserve_space(struct inode *inode, | ||
2840 | struct extent_changeset **reserved, u64 start, u64 len); | ||
2841 | void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv, unsigned short type); | ||
2842 | struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_fs_info *fs_info, | ||
2843 | unsigned short type); | ||
2844 | void btrfs_init_metadata_block_rsv(struct btrfs_fs_info *fs_info, | ||
2845 | struct btrfs_block_rsv *rsv, | ||
2846 | unsigned short type); | ||
2847 | void btrfs_free_block_rsv(struct btrfs_fs_info *fs_info, | ||
2848 | struct btrfs_block_rsv *rsv); | ||
2849 | int btrfs_block_rsv_add(struct btrfs_root *root, | ||
2850 | struct btrfs_block_rsv *block_rsv, u64 num_bytes, | ||
2851 | enum btrfs_reserve_flush_enum flush); | ||
2852 | int btrfs_block_rsv_check(struct btrfs_block_rsv *block_rsv, int min_factor); | ||
2853 | int btrfs_block_rsv_refill(struct btrfs_root *root, | ||
2854 | struct btrfs_block_rsv *block_rsv, u64 min_reserved, | ||
2855 | enum btrfs_reserve_flush_enum flush); | ||
2856 | int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv, | ||
2857 | struct btrfs_block_rsv *dst_rsv, u64 num_bytes, | ||
2858 | bool update_size); | ||
2859 | int btrfs_cond_migrate_bytes(struct btrfs_fs_info *fs_info, | ||
2860 | struct btrfs_block_rsv *dest, u64 num_bytes, | ||
2861 | int min_factor); | ||
2862 | void btrfs_block_rsv_release(struct btrfs_fs_info *fs_info, | ||
2863 | struct btrfs_block_rsv *block_rsv, | ||
2864 | u64 num_bytes); | ||
2865 | void btrfs_delayed_refs_rsv_release(struct btrfs_fs_info *fs_info, int nr); | ||
2866 | void btrfs_update_delayed_refs_rsv(struct btrfs_trans_handle *trans); | ||
2867 | int btrfs_delayed_refs_rsv_refill(struct btrfs_fs_info *fs_info, | ||
2868 | enum btrfs_reserve_flush_enum flush); | ||
2869 | void btrfs_migrate_to_delayed_refs_rsv(struct btrfs_fs_info *fs_info, | ||
2870 | struct btrfs_block_rsv *src, | ||
2871 | u64 num_bytes); | ||
2872 | int btrfs_inc_block_group_ro(struct btrfs_block_group_cache *cache); | 2770 | int btrfs_inc_block_group_ro(struct btrfs_block_group_cache *cache); |
2873 | void btrfs_dec_block_group_ro(struct btrfs_block_group_cache *cache); | 2771 | void btrfs_dec_block_group_ro(struct btrfs_block_group_cache *cache); |
2874 | void btrfs_put_block_group_cache(struct btrfs_fs_info *info); | 2772 | void btrfs_put_block_group_cache(struct btrfs_fs_info *info); |
@@ -3186,7 +3084,8 @@ int btrfs_find_name_in_ext_backref(struct extent_buffer *leaf, int slot, | |||
3186 | struct btrfs_dio_private; | 3084 | struct btrfs_dio_private; |
3187 | int btrfs_del_csums(struct btrfs_trans_handle *trans, | 3085 | int btrfs_del_csums(struct btrfs_trans_handle *trans, |
3188 | struct btrfs_fs_info *fs_info, u64 bytenr, u64 len); | 3086 | struct btrfs_fs_info *fs_info, u64 bytenr, u64 len); |
3189 | blk_status_t btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio, u32 *dst); | 3087 | blk_status_t btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio, |
3088 | u8 *dst); | ||
3190 | blk_status_t btrfs_lookup_bio_sums_dio(struct inode *inode, struct bio *bio, | 3089 | blk_status_t btrfs_lookup_bio_sums_dio(struct inode *inode, struct bio *bio, |
3191 | u64 logical_offset); | 3090 | u64 logical_offset); |
3192 | int btrfs_insert_file_extent(struct btrfs_trans_handle *trans, | 3091 | int btrfs_insert_file_extent(struct btrfs_trans_handle *trans, |
@@ -3514,8 +3413,7 @@ __cold | |||
3514 | static inline void assfail(const char *expr, const char *file, int line) | 3413 | static inline void assfail(const char *expr, const char *file, int line) |
3515 | { | 3414 | { |
3516 | if (IS_ENABLED(CONFIG_BTRFS_ASSERT)) { | 3415 | if (IS_ENABLED(CONFIG_BTRFS_ASSERT)) { |
3517 | pr_err("assertion failed: %s, file: %s, line: %d\n", | 3416 | pr_err("assertion failed: %s, in %s:%d\n", expr, file, line); |
3518 | expr, file, line); | ||
3519 | BUG(); | 3417 | BUG(); |
3520 | } | 3418 | } |
3521 | } | 3419 | } |
@@ -3599,10 +3497,11 @@ do { \ | |||
3599 | /* compatibility and incompatibility defines */ | 3497 | /* compatibility and incompatibility defines */ |
3600 | 3498 | ||
3601 | #define btrfs_set_fs_incompat(__fs_info, opt) \ | 3499 | #define btrfs_set_fs_incompat(__fs_info, opt) \ |
3602 | __btrfs_set_fs_incompat((__fs_info), BTRFS_FEATURE_INCOMPAT_##opt) | 3500 | __btrfs_set_fs_incompat((__fs_info), BTRFS_FEATURE_INCOMPAT_##opt, \ |
3501 | #opt) | ||
3603 | 3502 | ||
3604 | static inline void __btrfs_set_fs_incompat(struct btrfs_fs_info *fs_info, | 3503 | static inline void __btrfs_set_fs_incompat(struct btrfs_fs_info *fs_info, |
3605 | u64 flag) | 3504 | u64 flag, const char* name) |
3606 | { | 3505 | { |
3607 | struct btrfs_super_block *disk_super; | 3506 | struct btrfs_super_block *disk_super; |
3608 | u64 features; | 3507 | u64 features; |
@@ -3615,18 +3514,20 @@ static inline void __btrfs_set_fs_incompat(struct btrfs_fs_info *fs_info, | |||
3615 | if (!(features & flag)) { | 3514 | if (!(features & flag)) { |
3616 | features |= flag; | 3515 | features |= flag; |
3617 | btrfs_set_super_incompat_flags(disk_super, features); | 3516 | btrfs_set_super_incompat_flags(disk_super, features); |
3618 | btrfs_info(fs_info, "setting %llu feature flag", | 3517 | btrfs_info(fs_info, |
3619 | flag); | 3518 | "setting incompat feature flag for %s (0x%llx)", |
3519 | name, flag); | ||
3620 | } | 3520 | } |
3621 | spin_unlock(&fs_info->super_lock); | 3521 | spin_unlock(&fs_info->super_lock); |
3622 | } | 3522 | } |
3623 | } | 3523 | } |
3624 | 3524 | ||
3625 | #define btrfs_clear_fs_incompat(__fs_info, opt) \ | 3525 | #define btrfs_clear_fs_incompat(__fs_info, opt) \ |
3626 | __btrfs_clear_fs_incompat((__fs_info), BTRFS_FEATURE_INCOMPAT_##opt) | 3526 | __btrfs_clear_fs_incompat((__fs_info), BTRFS_FEATURE_INCOMPAT_##opt, \ |
3527 | #opt) | ||
3627 | 3528 | ||
3628 | static inline void __btrfs_clear_fs_incompat(struct btrfs_fs_info *fs_info, | 3529 | static inline void __btrfs_clear_fs_incompat(struct btrfs_fs_info *fs_info, |
3629 | u64 flag) | 3530 | u64 flag, const char* name) |
3630 | { | 3531 | { |
3631 | struct btrfs_super_block *disk_super; | 3532 | struct btrfs_super_block *disk_super; |
3632 | u64 features; | 3533 | u64 features; |
@@ -3639,8 +3540,9 @@ static inline void __btrfs_clear_fs_incompat(struct btrfs_fs_info *fs_info, | |||
3639 | if (features & flag) { | 3540 | if (features & flag) { |
3640 | features &= ~flag; | 3541 | features &= ~flag; |
3641 | btrfs_set_super_incompat_flags(disk_super, features); | 3542 | btrfs_set_super_incompat_flags(disk_super, features); |
3642 | btrfs_info(fs_info, "clearing %llu feature flag", | 3543 | btrfs_info(fs_info, |
3643 | flag); | 3544 | "clearing incompat feature flag for %s (0x%llx)", |
3545 | name, flag); | ||
3644 | } | 3546 | } |
3645 | spin_unlock(&fs_info->super_lock); | 3547 | spin_unlock(&fs_info->super_lock); |
3646 | } | 3548 | } |
@@ -3657,10 +3559,11 @@ static inline bool __btrfs_fs_incompat(struct btrfs_fs_info *fs_info, u64 flag) | |||
3657 | } | 3559 | } |
3658 | 3560 | ||
3659 | #define btrfs_set_fs_compat_ro(__fs_info, opt) \ | 3561 | #define btrfs_set_fs_compat_ro(__fs_info, opt) \ |
3660 | __btrfs_set_fs_compat_ro((__fs_info), BTRFS_FEATURE_COMPAT_RO_##opt) | 3562 | __btrfs_set_fs_compat_ro((__fs_info), BTRFS_FEATURE_COMPAT_RO_##opt, \ |
3563 | #opt) | ||
3661 | 3564 | ||
3662 | static inline void __btrfs_set_fs_compat_ro(struct btrfs_fs_info *fs_info, | 3565 | static inline void __btrfs_set_fs_compat_ro(struct btrfs_fs_info *fs_info, |
3663 | u64 flag) | 3566 | u64 flag, const char *name) |
3664 | { | 3567 | { |
3665 | struct btrfs_super_block *disk_super; | 3568 | struct btrfs_super_block *disk_super; |
3666 | u64 features; | 3569 | u64 features; |
@@ -3673,18 +3576,20 @@ static inline void __btrfs_set_fs_compat_ro(struct btrfs_fs_info *fs_info, | |||
3673 | if (!(features & flag)) { | 3576 | if (!(features & flag)) { |
3674 | features |= flag; | 3577 | features |= flag; |
3675 | btrfs_set_super_compat_ro_flags(disk_super, features); | 3578 | btrfs_set_super_compat_ro_flags(disk_super, features); |
3676 | btrfs_info(fs_info, "setting %llu ro feature flag", | 3579 | btrfs_info(fs_info, |
3677 | flag); | 3580 | "setting compat-ro feature flag for %s (0x%llx)", |
3581 | name, flag); | ||
3678 | } | 3582 | } |
3679 | spin_unlock(&fs_info->super_lock); | 3583 | spin_unlock(&fs_info->super_lock); |
3680 | } | 3584 | } |
3681 | } | 3585 | } |
3682 | 3586 | ||
3683 | #define btrfs_clear_fs_compat_ro(__fs_info, opt) \ | 3587 | #define btrfs_clear_fs_compat_ro(__fs_info, opt) \ |
3684 | __btrfs_clear_fs_compat_ro((__fs_info), BTRFS_FEATURE_COMPAT_RO_##opt) | 3588 | __btrfs_clear_fs_compat_ro((__fs_info), BTRFS_FEATURE_COMPAT_RO_##opt, \ |
3589 | #opt) | ||
3685 | 3590 | ||
3686 | static inline void __btrfs_clear_fs_compat_ro(struct btrfs_fs_info *fs_info, | 3591 | static inline void __btrfs_clear_fs_compat_ro(struct btrfs_fs_info *fs_info, |
3687 | u64 flag) | 3592 | u64 flag, const char *name) |
3688 | { | 3593 | { |
3689 | struct btrfs_super_block *disk_super; | 3594 | struct btrfs_super_block *disk_super; |
3690 | u64 features; | 3595 | u64 features; |
@@ -3697,8 +3602,9 @@ static inline void __btrfs_clear_fs_compat_ro(struct btrfs_fs_info *fs_info, | |||
3697 | if (features & flag) { | 3602 | if (features & flag) { |
3698 | features &= ~flag; | 3603 | features &= ~flag; |
3699 | btrfs_set_super_compat_ro_flags(disk_super, features); | 3604 | btrfs_set_super_compat_ro_flags(disk_super, features); |
3700 | btrfs_info(fs_info, "clearing %llu ro feature flag", | 3605 | btrfs_info(fs_info, |
3701 | flag); | 3606 | "clearing compat-ro feature flag for %s (0x%llx)", |
3607 | name, flag); | ||
3702 | } | 3608 | } |
3703 | spin_unlock(&fs_info->super_lock); | 3609 | spin_unlock(&fs_info->super_lock); |
3704 | } | 3610 | } |