diff options
author | Tahsin Erdogan <tahsin@google.com> | 2017-06-22 11:53:15 -0400 |
---|---|---|
committer | Theodore Ts'o <tytso@mit.edu> | 2017-06-22 11:53:15 -0400 |
commit | b9fc761ea2d82e910e92f83d01bbbbe1f5e99bfc (patch) | |
tree | f902c1b80c61b08a08493f86414542bafe517899 | |
parent | daf8328172dffabb4a6b5e1970d6e9628669f51c (diff) |
ext4: strong binding of xattr inode references
To verify that a xattr entry is not pointing to the wrong xattr inode,
we currently check that the target inode has EXT4_EA_INODE_FL flag set and
also the entry size matches the target inode size.
For stronger validation, also incorporate crc32c hash of the value into
the e_hash field. This is done regardless of whether the entry lives in
the inode body or external attribute block.
Signed-off-by: Tahsin Erdogan <tahsin@google.com>
Signed-off-by: Theodore Ts'o <tytso@mit.edu>
-rw-r--r-- | fs/ext4/xattr.c | 104 |
1 files changed, 65 insertions, 39 deletions
diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c index 354c55c3f70c..a5ad0ccdd1cb 100644 --- a/fs/ext4/xattr.c +++ b/fs/ext4/xattr.c | |||
@@ -77,8 +77,8 @@ static void ext4_xattr_block_cache_insert(struct mb_cache *, | |||
77 | static struct buffer_head * | 77 | static struct buffer_head * |
78 | ext4_xattr_block_cache_find(struct inode *, struct ext4_xattr_header *, | 78 | ext4_xattr_block_cache_find(struct inode *, struct ext4_xattr_header *, |
79 | struct mb_cache_entry **); | 79 | struct mb_cache_entry **); |
80 | static void ext4_xattr_hash_entry(struct ext4_xattr_entry *entry, | 80 | static __le32 ext4_xattr_hash_entry(char *name, size_t name_len, __le32 *value, |
81 | void *value_base); | 81 | size_t value_count); |
82 | static void ext4_xattr_rehash(struct ext4_xattr_header *); | 82 | static void ext4_xattr_rehash(struct ext4_xattr_header *); |
83 | 83 | ||
84 | static const struct xattr_handler * const ext4_xattr_handler_map[] = { | 84 | static const struct xattr_handler * const ext4_xattr_handler_map[] = { |
@@ -380,7 +380,9 @@ error: | |||
380 | } | 380 | } |
381 | 381 | ||
382 | static int | 382 | static int |
383 | ext4_xattr_inode_verify_hash(struct inode *ea_inode, void *buffer, size_t size) | 383 | ext4_xattr_inode_verify_hashes(struct inode *ea_inode, |
384 | struct ext4_xattr_entry *entry, void *buffer, | ||
385 | size_t size) | ||
384 | { | 386 | { |
385 | u32 hash; | 387 | u32 hash; |
386 | 388 | ||
@@ -388,23 +390,35 @@ ext4_xattr_inode_verify_hash(struct inode *ea_inode, void *buffer, size_t size) | |||
388 | hash = ext4_xattr_inode_hash(EXT4_SB(ea_inode->i_sb), buffer, size); | 390 | hash = ext4_xattr_inode_hash(EXT4_SB(ea_inode->i_sb), buffer, size); |
389 | if (hash != ext4_xattr_inode_get_hash(ea_inode)) | 391 | if (hash != ext4_xattr_inode_get_hash(ea_inode)) |
390 | return -EFSCORRUPTED; | 392 | return -EFSCORRUPTED; |
393 | |||
394 | if (entry) { | ||
395 | __le32 e_hash, tmp_data; | ||
396 | |||
397 | /* Verify entry hash. */ | ||
398 | tmp_data = cpu_to_le32(hash); | ||
399 | e_hash = ext4_xattr_hash_entry(entry->e_name, entry->e_name_len, | ||
400 | &tmp_data, 1); | ||
401 | if (e_hash != entry->e_hash) | ||
402 | return -EFSCORRUPTED; | ||
403 | } | ||
391 | return 0; | 404 | return 0; |
392 | } | 405 | } |
393 | 406 | ||
394 | #define EXT4_XATTR_INODE_GET_PARENT(inode) ((__u32)(inode)->i_mtime.tv_sec) | 407 | #define EXT4_XATTR_INODE_GET_PARENT(inode) ((__u32)(inode)->i_mtime.tv_sec) |
395 | 408 | ||
396 | /* | 409 | /* |
397 | * Read the value from the EA inode. | 410 | * Read xattr value from the EA inode. |
398 | */ | 411 | */ |
399 | static int | 412 | static int |
400 | ext4_xattr_inode_get(struct inode *inode, unsigned long ea_ino, void *buffer, | 413 | ext4_xattr_inode_get(struct inode *inode, struct ext4_xattr_entry *entry, |
401 | size_t size) | 414 | void *buffer, size_t size) |
402 | { | 415 | { |
403 | struct mb_cache *ea_inode_cache = EA_INODE_CACHE(inode); | 416 | struct mb_cache *ea_inode_cache = EA_INODE_CACHE(inode); |
404 | struct inode *ea_inode; | 417 | struct inode *ea_inode; |
405 | int err; | 418 | int err; |
406 | 419 | ||
407 | err = ext4_xattr_inode_iget(inode, ea_ino, &ea_inode); | 420 | err = ext4_xattr_inode_iget(inode, le32_to_cpu(entry->e_value_inum), |
421 | &ea_inode); | ||
408 | if (err) { | 422 | if (err) { |
409 | ea_inode = NULL; | 423 | ea_inode = NULL; |
410 | goto out; | 424 | goto out; |
@@ -422,7 +436,7 @@ ext4_xattr_inode_get(struct inode *inode, unsigned long ea_ino, void *buffer, | |||
422 | if (err) | 436 | if (err) |
423 | goto out; | 437 | goto out; |
424 | 438 | ||
425 | err = ext4_xattr_inode_verify_hash(ea_inode, buffer, size); | 439 | err = ext4_xattr_inode_verify_hashes(ea_inode, entry, buffer, size); |
426 | /* | 440 | /* |
427 | * Compatibility check for old Lustre ea_inode implementation. Old | 441 | * Compatibility check for old Lustre ea_inode implementation. Old |
428 | * version does not have hash validation, but it has a backpointer | 442 | * version does not have hash validation, but it has a backpointer |
@@ -489,9 +503,8 @@ ext4_xattr_block_get(struct inode *inode, int name_index, const char *name, | |||
489 | if (size > buffer_size) | 503 | if (size > buffer_size) |
490 | goto cleanup; | 504 | goto cleanup; |
491 | if (entry->e_value_inum) { | 505 | if (entry->e_value_inum) { |
492 | error = ext4_xattr_inode_get(inode, | 506 | error = ext4_xattr_inode_get(inode, entry, buffer, |
493 | le32_to_cpu(entry->e_value_inum), | 507 | size); |
494 | buffer, size); | ||
495 | if (error) | 508 | if (error) |
496 | goto cleanup; | 509 | goto cleanup; |
497 | } else { | 510 | } else { |
@@ -539,9 +552,8 @@ ext4_xattr_ibody_get(struct inode *inode, int name_index, const char *name, | |||
539 | if (size > buffer_size) | 552 | if (size > buffer_size) |
540 | goto cleanup; | 553 | goto cleanup; |
541 | if (entry->e_value_inum) { | 554 | if (entry->e_value_inum) { |
542 | error = ext4_xattr_inode_get(inode, | 555 | error = ext4_xattr_inode_get(inode, entry, buffer, |
543 | le32_to_cpu(entry->e_value_inum), | 556 | size); |
544 | buffer, size); | ||
545 | if (error) | 557 | if (error) |
546 | goto cleanup; | 558 | goto cleanup; |
547 | } else { | 559 | } else { |
@@ -1400,8 +1412,8 @@ ext4_xattr_inode_cache_find(struct inode *inode, const void *value, | |||
1400 | (EXT4_I(ea_inode)->i_flags & EXT4_EA_INODE_FL) && | 1412 | (EXT4_I(ea_inode)->i_flags & EXT4_EA_INODE_FL) && |
1401 | i_size_read(ea_inode) == value_len && | 1413 | i_size_read(ea_inode) == value_len && |
1402 | !ext4_xattr_inode_read(ea_inode, ea_data, value_len) && | 1414 | !ext4_xattr_inode_read(ea_inode, ea_data, value_len) && |
1403 | !ext4_xattr_inode_verify_hash(ea_inode, ea_data, | 1415 | !ext4_xattr_inode_verify_hashes(ea_inode, NULL, ea_data, |
1404 | value_len) && | 1416 | value_len) && |
1405 | !memcmp(value, ea_data, value_len)) { | 1417 | !memcmp(value, ea_data, value_len)) { |
1406 | mb_cache_entry_touch(ea_inode_cache, ce); | 1418 | mb_cache_entry_touch(ea_inode_cache, ce); |
1407 | mb_cache_entry_put(ea_inode_cache, ce); | 1419 | mb_cache_entry_put(ea_inode_cache, ce); |
@@ -1665,12 +1677,36 @@ static int ext4_xattr_set_entry(struct ext4_xattr_info *i, | |||
1665 | here->e_value_size = cpu_to_le32(i->value_len); | 1677 | here->e_value_size = cpu_to_le32(i->value_len); |
1666 | } | 1678 | } |
1667 | 1679 | ||
1668 | if (is_block) { | 1680 | if (i->value) { |
1669 | if (i->value) | 1681 | __le32 hash = 0; |
1670 | ext4_xattr_hash_entry(here, s->base); | 1682 | |
1671 | ext4_xattr_rehash((struct ext4_xattr_header *)s->base); | 1683 | /* Entry hash calculation. */ |
1684 | if (in_inode) { | ||
1685 | __le32 crc32c_hash; | ||
1686 | |||
1687 | /* | ||
1688 | * Feed crc32c hash instead of the raw value for entry | ||
1689 | * hash calculation. This is to avoid walking | ||
1690 | * potentially long value buffer again. | ||
1691 | */ | ||
1692 | crc32c_hash = cpu_to_le32( | ||
1693 | ext4_xattr_inode_get_hash(new_ea_inode)); | ||
1694 | hash = ext4_xattr_hash_entry(here->e_name, | ||
1695 | here->e_name_len, | ||
1696 | &crc32c_hash, 1); | ||
1697 | } else if (is_block) { | ||
1698 | __le32 *value = s->base + min_offs - new_size; | ||
1699 | |||
1700 | hash = ext4_xattr_hash_entry(here->e_name, | ||
1701 | here->e_name_len, value, | ||
1702 | new_size >> 2); | ||
1703 | } | ||
1704 | here->e_hash = hash; | ||
1672 | } | 1705 | } |
1673 | 1706 | ||
1707 | if (is_block) | ||
1708 | ext4_xattr_rehash((struct ext4_xattr_header *)s->base); | ||
1709 | |||
1674 | ret = 0; | 1710 | ret = 0; |
1675 | out: | 1711 | out: |
1676 | iput(old_ea_inode); | 1712 | iput(old_ea_inode); |
@@ -2452,9 +2488,7 @@ static int ext4_xattr_move_to_block(handle_t *handle, struct inode *inode, | |||
2452 | 2488 | ||
2453 | /* Save the entry name and the entry value */ | 2489 | /* Save the entry name and the entry value */ |
2454 | if (entry->e_value_inum) { | 2490 | if (entry->e_value_inum) { |
2455 | error = ext4_xattr_inode_get(inode, | 2491 | error = ext4_xattr_inode_get(inode, entry, buffer, value_size); |
2456 | le32_to_cpu(entry->e_value_inum), | ||
2457 | buffer, value_size); | ||
2458 | if (error) | 2492 | if (error) |
2459 | goto out; | 2493 | goto out; |
2460 | } else { | 2494 | } else { |
@@ -2944,30 +2978,22 @@ ext4_xattr_block_cache_find(struct inode *inode, | |||
2944 | * | 2978 | * |
2945 | * Compute the hash of an extended attribute. | 2979 | * Compute the hash of an extended attribute. |
2946 | */ | 2980 | */ |
2947 | static void ext4_xattr_hash_entry(struct ext4_xattr_entry *entry, | 2981 | static __le32 ext4_xattr_hash_entry(char *name, size_t name_len, __le32 *value, |
2948 | void *value_base) | 2982 | size_t value_count) |
2949 | { | 2983 | { |
2950 | __u32 hash = 0; | 2984 | __u32 hash = 0; |
2951 | char *name = entry->e_name; | ||
2952 | int n; | ||
2953 | 2985 | ||
2954 | for (n = 0; n < entry->e_name_len; n++) { | 2986 | while (name_len--) { |
2955 | hash = (hash << NAME_HASH_SHIFT) ^ | 2987 | hash = (hash << NAME_HASH_SHIFT) ^ |
2956 | (hash >> (8*sizeof(hash) - NAME_HASH_SHIFT)) ^ | 2988 | (hash >> (8*sizeof(hash) - NAME_HASH_SHIFT)) ^ |
2957 | *name++; | 2989 | *name++; |
2958 | } | 2990 | } |
2959 | 2991 | while (value_count--) { | |
2960 | if (!entry->e_value_inum && entry->e_value_size) { | 2992 | hash = (hash << VALUE_HASH_SHIFT) ^ |
2961 | __le32 *value = (__le32 *)((char *)value_base + | 2993 | (hash >> (8*sizeof(hash) - VALUE_HASH_SHIFT)) ^ |
2962 | le16_to_cpu(entry->e_value_offs)); | 2994 | le32_to_cpu(*value++); |
2963 | for (n = (le32_to_cpu(entry->e_value_size) + | ||
2964 | EXT4_XATTR_ROUND) >> EXT4_XATTR_PAD_BITS; n; n--) { | ||
2965 | hash = (hash << VALUE_HASH_SHIFT) ^ | ||
2966 | (hash >> (8*sizeof(hash) - VALUE_HASH_SHIFT)) ^ | ||
2967 | le32_to_cpu(*value++); | ||
2968 | } | ||
2969 | } | 2995 | } |
2970 | entry->e_hash = cpu_to_le32(hash); | 2996 | return cpu_to_le32(hash); |
2971 | } | 2997 | } |
2972 | 2998 | ||
2973 | #undef NAME_HASH_SHIFT | 2999 | #undef NAME_HASH_SHIFT |