aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTahsin Erdogan <tahsin@google.com>2017-06-22 10:29:53 -0400
committerTheodore Ts'o <tytso@mit.edu>2017-06-22 10:29:53 -0400
commitc07dfcb45877fbc6798fa042bab3c4b85378efd4 (patch)
treea626ee2c0841dd7370097b8a1e874941f34de4eb
parentb6d9029df083c0a9ce1d4eda1480105e635e0d61 (diff)
mbcache: make mbcache naming more generic
Make names more generic so that mbcache usage is not limited to block sharing. In a subsequent patch in the series ("ext4: xattr inode deduplication"), we start using the mbcache code for sharing xattr inodes. With that patch, old mb_cache_entry.e_block field could be holding either a block number or an inode number. Signed-off-by: Tahsin Erdogan <tahsin@google.com> Signed-off-by: Theodore Ts'o <tytso@mit.edu>
-rw-r--r--fs/ext2/xattr.c18
-rw-r--r--fs/ext4/xattr.c10
-rw-r--r--fs/mbcache.c43
-rw-r--r--include/linux/mbcache.h11
4 files changed, 40 insertions, 42 deletions
diff --git a/fs/ext2/xattr.c b/fs/ext2/xattr.c
index fbdb8f171893..1e5f76070580 100644
--- a/fs/ext2/xattr.c
+++ b/fs/ext2/xattr.c
@@ -493,8 +493,8 @@ bad_block: ext2_error(sb, "ext2_xattr_set",
493 * This must happen under buffer lock for 493 * This must happen under buffer lock for
494 * ext2_xattr_set2() to reliably detect modified block 494 * ext2_xattr_set2() to reliably detect modified block
495 */ 495 */
496 mb_cache_entry_delete_block(EXT2_SB(sb)->s_mb_cache, 496 mb_cache_entry_delete(EXT2_SB(sb)->s_mb_cache, hash,
497 hash, bh->b_blocknr); 497 bh->b_blocknr);
498 498
499 /* keep the buffer locked while modifying it. */ 499 /* keep the buffer locked while modifying it. */
500 } else { 500 } else {
@@ -721,8 +721,8 @@ ext2_xattr_set2(struct inode *inode, struct buffer_head *old_bh,
721 * This must happen under buffer lock for 721 * This must happen under buffer lock for
722 * ext2_xattr_set2() to reliably detect freed block 722 * ext2_xattr_set2() to reliably detect freed block
723 */ 723 */
724 mb_cache_entry_delete_block(ext2_mb_cache, 724 mb_cache_entry_delete(ext2_mb_cache, hash,
725 hash, old_bh->b_blocknr); 725 old_bh->b_blocknr);
726 /* Free the old block. */ 726 /* Free the old block. */
727 ea_bdebug(old_bh, "freeing"); 727 ea_bdebug(old_bh, "freeing");
728 ext2_free_blocks(inode, old_bh->b_blocknr, 1); 728 ext2_free_blocks(inode, old_bh->b_blocknr, 1);
@@ -795,8 +795,8 @@ ext2_xattr_delete_inode(struct inode *inode)
795 * This must happen under buffer lock for ext2_xattr_set2() to 795 * This must happen under buffer lock for ext2_xattr_set2() to
796 * reliably detect freed block 796 * reliably detect freed block
797 */ 797 */
798 mb_cache_entry_delete_block(EXT2_SB(inode->i_sb)->s_mb_cache, 798 mb_cache_entry_delete(EXT2_SB(inode->i_sb)->s_mb_cache, hash,
799 hash, bh->b_blocknr); 799 bh->b_blocknr);
800 ext2_free_blocks(inode, EXT2_I(inode)->i_file_acl, 1); 800 ext2_free_blocks(inode, EXT2_I(inode)->i_file_acl, 1);
801 get_bh(bh); 801 get_bh(bh);
802 bforget(bh); 802 bforget(bh);
@@ -907,11 +907,11 @@ again:
907 while (ce) { 907 while (ce) {
908 struct buffer_head *bh; 908 struct buffer_head *bh;
909 909
910 bh = sb_bread(inode->i_sb, ce->e_block); 910 bh = sb_bread(inode->i_sb, ce->e_value);
911 if (!bh) { 911 if (!bh) {
912 ext2_error(inode->i_sb, "ext2_xattr_cache_find", 912 ext2_error(inode->i_sb, "ext2_xattr_cache_find",
913 "inode %ld: block %ld read error", 913 "inode %ld: block %ld read error",
914 inode->i_ino, (unsigned long) ce->e_block); 914 inode->i_ino, (unsigned long) ce->e_value);
915 } else { 915 } else {
916 lock_buffer(bh); 916 lock_buffer(bh);
917 /* 917 /*
@@ -931,7 +931,7 @@ again:
931 } else if (le32_to_cpu(HDR(bh)->h_refcount) > 931 } else if (le32_to_cpu(HDR(bh)->h_refcount) >
932 EXT2_XATTR_REFCOUNT_MAX) { 932 EXT2_XATTR_REFCOUNT_MAX) {
933 ea_idebug(inode, "block %ld refcount %d>%d", 933 ea_idebug(inode, "block %ld refcount %d>%d",
934 (unsigned long) ce->e_block, 934 (unsigned long) ce->e_value,
935 le32_to_cpu(HDR(bh)->h_refcount), 935 le32_to_cpu(HDR(bh)->h_refcount),
936 EXT2_XATTR_REFCOUNT_MAX); 936 EXT2_XATTR_REFCOUNT_MAX);
937 } else if (!ext2_xattr_cmp(header, HDR(bh))) { 937 } else if (!ext2_xattr_cmp(header, HDR(bh))) {
diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
index fed54001c9e6..85da7792afd0 100644
--- a/fs/ext4/xattr.c
+++ b/fs/ext4/xattr.c
@@ -678,7 +678,7 @@ ext4_xattr_release_block(handle_t *handle, struct inode *inode,
678 * This must happen under buffer lock for 678 * This must happen under buffer lock for
679 * ext4_xattr_block_set() to reliably detect freed block 679 * ext4_xattr_block_set() to reliably detect freed block
680 */ 680 */
681 mb_cache_entry_delete_block(ext4_mb_cache, hash, bh->b_blocknr); 681 mb_cache_entry_delete(ext4_mb_cache, hash, bh->b_blocknr);
682 get_bh(bh); 682 get_bh(bh);
683 unlock_buffer(bh); 683 unlock_buffer(bh);
684 ext4_free_blocks(handle, inode, bh, 0, 1, 684 ext4_free_blocks(handle, inode, bh, 0, 1,
@@ -1113,8 +1113,8 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode,
1113 * ext4_xattr_block_set() to reliably detect modified 1113 * ext4_xattr_block_set() to reliably detect modified
1114 * block 1114 * block
1115 */ 1115 */
1116 mb_cache_entry_delete_block(ext4_mb_cache, hash, 1116 mb_cache_entry_delete(ext4_mb_cache, hash,
1117 bs->bh->b_blocknr); 1117 bs->bh->b_blocknr);
1118 ea_bdebug(bs->bh, "modifying in-place"); 1118 ea_bdebug(bs->bh, "modifying in-place");
1119 error = ext4_xattr_set_entry(i, s, handle, inode); 1119 error = ext4_xattr_set_entry(i, s, handle, inode);
1120 if (!error) { 1120 if (!error) {
@@ -2236,10 +2236,10 @@ ext4_xattr_cache_find(struct inode *inode, struct ext4_xattr_header *header,
2236 while (ce) { 2236 while (ce) {
2237 struct buffer_head *bh; 2237 struct buffer_head *bh;
2238 2238
2239 bh = sb_bread(inode->i_sb, ce->e_block); 2239 bh = sb_bread(inode->i_sb, ce->e_value);
2240 if (!bh) { 2240 if (!bh) {
2241 EXT4_ERROR_INODE(inode, "block %lu read error", 2241 EXT4_ERROR_INODE(inode, "block %lu read error",
2242 (unsigned long) ce->e_block); 2242 (unsigned long)ce->e_value);
2243 } else if (ext4_xattr_cmp(header, BHDR(bh)) == 0) { 2243 } else if (ext4_xattr_cmp(header, BHDR(bh)) == 0) {
2244 *pce = ce; 2244 *pce = ce;
2245 return bh; 2245 return bh;
diff --git a/fs/mbcache.c b/fs/mbcache.c
index b19be429d655..45a8d52dc991 100644
--- a/fs/mbcache.c
+++ b/fs/mbcache.c
@@ -10,7 +10,7 @@
10/* 10/*
11 * Mbcache is a simple key-value store. Keys need not be unique, however 11 * Mbcache is a simple key-value store. Keys need not be unique, however
12 * key-value pairs are expected to be unique (we use this fact in 12 * key-value pairs are expected to be unique (we use this fact in
13 * mb_cache_entry_delete_block()). 13 * mb_cache_entry_delete()).
14 * 14 *
15 * Ext2 and ext4 use this cache for deduplication of extended attribute blocks. 15 * Ext2 and ext4 use this cache for deduplication of extended attribute blocks.
16 * They use hash of a block contents as a key and block number as a value. 16 * They use hash of a block contents as a key and block number as a value.
@@ -62,15 +62,15 @@ static inline struct hlist_bl_head *mb_cache_entry_head(struct mb_cache *cache,
62 * @cache - cache where the entry should be created 62 * @cache - cache where the entry should be created
63 * @mask - gfp mask with which the entry should be allocated 63 * @mask - gfp mask with which the entry should be allocated
64 * @key - key of the entry 64 * @key - key of the entry
65 * @block - block that contains data 65 * @value - value of the entry
66 * @reusable - is the block reusable by other inodes? 66 * @reusable - is the entry reusable by others?
67 * 67 *
68 * Creates entry in @cache with key @key and records that data is stored in 68 * Creates entry in @cache with key @key and value @value. The function returns
69 * block @block. The function returns -EBUSY if entry with the same key 69 * -EBUSY if entry with the same key and value already exists in cache.
70 * and for the same block already exists in cache. Otherwise 0 is returned. 70 * Otherwise 0 is returned.
71 */ 71 */
72int mb_cache_entry_create(struct mb_cache *cache, gfp_t mask, u32 key, 72int mb_cache_entry_create(struct mb_cache *cache, gfp_t mask, u32 key,
73 sector_t block, bool reusable) 73 u64 value, bool reusable)
74{ 74{
75 struct mb_cache_entry *entry, *dup; 75 struct mb_cache_entry *entry, *dup;
76 struct hlist_bl_node *dup_node; 76 struct hlist_bl_node *dup_node;
@@ -91,12 +91,12 @@ int mb_cache_entry_create(struct mb_cache *cache, gfp_t mask, u32 key,
91 /* One ref for hash, one ref returned */ 91 /* One ref for hash, one ref returned */
92 atomic_set(&entry->e_refcnt, 1); 92 atomic_set(&entry->e_refcnt, 1);
93 entry->e_key = key; 93 entry->e_key = key;
94 entry->e_block = block; 94 entry->e_value = value;
95 entry->e_reusable = reusable; 95 entry->e_reusable = reusable;
96 head = mb_cache_entry_head(cache, key); 96 head = mb_cache_entry_head(cache, key);
97 hlist_bl_lock(head); 97 hlist_bl_lock(head);
98 hlist_bl_for_each_entry(dup, dup_node, head, e_hash_list) { 98 hlist_bl_for_each_entry(dup, dup_node, head, e_hash_list) {
99 if (dup->e_key == key && dup->e_block == block) { 99 if (dup->e_key == key && dup->e_value == value) {
100 hlist_bl_unlock(head); 100 hlist_bl_unlock(head);
101 kmem_cache_free(mb_entry_cache, entry); 101 kmem_cache_free(mb_entry_cache, entry);
102 return -EBUSY; 102 return -EBUSY;
@@ -187,13 +187,13 @@ struct mb_cache_entry *mb_cache_entry_find_next(struct mb_cache *cache,
187EXPORT_SYMBOL(mb_cache_entry_find_next); 187EXPORT_SYMBOL(mb_cache_entry_find_next);
188 188
189/* 189/*
190 * mb_cache_entry_get - get a cache entry by block number (and key) 190 * mb_cache_entry_get - get a cache entry by value (and key)
191 * @cache - cache we work with 191 * @cache - cache we work with
192 * @key - key of block number @block 192 * @key - key
193 * @block - block number 193 * @value - value
194 */ 194 */
195struct mb_cache_entry *mb_cache_entry_get(struct mb_cache *cache, u32 key, 195struct mb_cache_entry *mb_cache_entry_get(struct mb_cache *cache, u32 key,
196 sector_t block) 196 u64 value)
197{ 197{
198 struct hlist_bl_node *node; 198 struct hlist_bl_node *node;
199 struct hlist_bl_head *head; 199 struct hlist_bl_head *head;
@@ -202,7 +202,7 @@ struct mb_cache_entry *mb_cache_entry_get(struct mb_cache *cache, u32 key,
202 head = mb_cache_entry_head(cache, key); 202 head = mb_cache_entry_head(cache, key);
203 hlist_bl_lock(head); 203 hlist_bl_lock(head);
204 hlist_bl_for_each_entry(entry, node, head, e_hash_list) { 204 hlist_bl_for_each_entry(entry, node, head, e_hash_list) {
205 if (entry->e_key == key && entry->e_block == block) { 205 if (entry->e_key == key && entry->e_value == value) {
206 atomic_inc(&entry->e_refcnt); 206 atomic_inc(&entry->e_refcnt);
207 goto out; 207 goto out;
208 } 208 }
@@ -214,15 +214,14 @@ out:
214} 214}
215EXPORT_SYMBOL(mb_cache_entry_get); 215EXPORT_SYMBOL(mb_cache_entry_get);
216 216
217/* mb_cache_entry_delete_block - remove information about block from cache 217/* mb_cache_entry_delete - remove a cache entry
218 * @cache - cache we work with 218 * @cache - cache we work with
219 * @key - key of block @block 219 * @key - key
220 * @block - block number 220 * @value - value
221 * 221 *
222 * Remove entry from cache @cache with key @key with data stored in @block. 222 * Remove entry from cache @cache with key @key and value @value.
223 */ 223 */
224void mb_cache_entry_delete_block(struct mb_cache *cache, u32 key, 224void mb_cache_entry_delete(struct mb_cache *cache, u32 key, u64 value)
225 sector_t block)
226{ 225{
227 struct hlist_bl_node *node; 226 struct hlist_bl_node *node;
228 struct hlist_bl_head *head; 227 struct hlist_bl_head *head;
@@ -231,7 +230,7 @@ void mb_cache_entry_delete_block(struct mb_cache *cache, u32 key,
231 head = mb_cache_entry_head(cache, key); 230 head = mb_cache_entry_head(cache, key);
232 hlist_bl_lock(head); 231 hlist_bl_lock(head);
233 hlist_bl_for_each_entry(entry, node, head, e_hash_list) { 232 hlist_bl_for_each_entry(entry, node, head, e_hash_list) {
234 if (entry->e_key == key && entry->e_block == block) { 233 if (entry->e_key == key && entry->e_value == value) {
235 /* We keep hash list reference to keep entry alive */ 234 /* We keep hash list reference to keep entry alive */
236 hlist_bl_del_init(&entry->e_hash_list); 235 hlist_bl_del_init(&entry->e_hash_list);
237 hlist_bl_unlock(head); 236 hlist_bl_unlock(head);
@@ -248,7 +247,7 @@ void mb_cache_entry_delete_block(struct mb_cache *cache, u32 key,
248 } 247 }
249 hlist_bl_unlock(head); 248 hlist_bl_unlock(head);
250} 249}
251EXPORT_SYMBOL(mb_cache_entry_delete_block); 250EXPORT_SYMBOL(mb_cache_entry_delete);
252 251
253/* mb_cache_entry_touch - cache entry got used 252/* mb_cache_entry_touch - cache entry got used
254 * @cache - cache the entry belongs to 253 * @cache - cache the entry belongs to
diff --git a/include/linux/mbcache.h b/include/linux/mbcache.h
index 86c9a8b480c5..e1bc73414983 100644
--- a/include/linux/mbcache.h
+++ b/include/linux/mbcache.h
@@ -19,15 +19,15 @@ struct mb_cache_entry {
19 u32 e_key; 19 u32 e_key;
20 u32 e_referenced:1; 20 u32 e_referenced:1;
21 u32 e_reusable:1; 21 u32 e_reusable:1;
22 /* Block number of hashed block - stable during lifetime of the entry */ 22 /* User provided value - stable during lifetime of the entry */
23 sector_t e_block; 23 u64 e_value;
24}; 24};
25 25
26struct mb_cache *mb_cache_create(int bucket_bits); 26struct mb_cache *mb_cache_create(int bucket_bits);
27void mb_cache_destroy(struct mb_cache *cache); 27void mb_cache_destroy(struct mb_cache *cache);
28 28
29int mb_cache_entry_create(struct mb_cache *cache, gfp_t mask, u32 key, 29int mb_cache_entry_create(struct mb_cache *cache, gfp_t mask, u32 key,
30 sector_t block, bool reusable); 30 u64 value, bool reusable);
31void __mb_cache_entry_free(struct mb_cache_entry *entry); 31void __mb_cache_entry_free(struct mb_cache_entry *entry);
32static inline int mb_cache_entry_put(struct mb_cache *cache, 32static inline int mb_cache_entry_put(struct mb_cache *cache,
33 struct mb_cache_entry *entry) 33 struct mb_cache_entry *entry)
@@ -38,10 +38,9 @@ static inline int mb_cache_entry_put(struct mb_cache *cache,
38 return 1; 38 return 1;
39} 39}
40 40
41void mb_cache_entry_delete_block(struct mb_cache *cache, u32 key, 41void mb_cache_entry_delete(struct mb_cache *cache, u32 key, u64 value);
42 sector_t block);
43struct mb_cache_entry *mb_cache_entry_get(struct mb_cache *cache, u32 key, 42struct mb_cache_entry *mb_cache_entry_get(struct mb_cache *cache, u32 key,
44 sector_t block); 43 u64 value);
45struct mb_cache_entry *mb_cache_entry_find_first(struct mb_cache *cache, 44struct mb_cache_entry *mb_cache_entry_find_first(struct mb_cache *cache,
46 u32 key); 45 u32 key);
47struct mb_cache_entry *mb_cache_entry_find_next(struct mb_cache *cache, 46struct mb_cache_entry *mb_cache_entry_find_next(struct mb_cache *cache,