diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2017-11-14 15:59:42 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2017-11-14 15:59:42 -0500 |
commit | ae9a8c4bdc91202b4236372eed53c54d2297c71b (patch) | |
tree | 4596680ee808334d246ad2f93bdd743d76c3741a | |
parent | 32190f0afbf4f1c0a9142e5a886a078ee0b794fd (diff) | |
parent | 232530680290ba94ca37852ab10d9556ea28badf (diff) |
Merge tag 'ext4_for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/ext4
Pull ext4 updates from Ted Ts'o:
- Add support for online resizing of file systems with bigalloc
- Fix a two data corruption bugs involving DAX, as well as a corruption
bug after a crash during a racing fallocate and delayed allocation.
- Finally, a number of cleanups and optimizations.
* tag 'ext4_for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/ext4:
ext4: improve smp scalability for inode generation
ext4: add support for online resizing with bigalloc
ext4: mention noload when recovering on read-only device
Documentation: fix little inconsistencies
ext4: convert timers to use timer_setup()
jbd2: convert timers to use timer_setup()
ext4: remove duplicate extended attributes defs
ext4: add ext4_should_use_dax()
ext4: add sanity check for encryption + DAX
ext4: prevent data corruption with journaling + DAX
ext4: prevent data corruption with inline data + DAX
ext4: fix interaction between i_size, fallocate, and delalloc after a crash
ext4: retry allocations conservatively
ext4: Switch to iomap for SEEK_HOLE / SEEK_DATA
ext4: Add iomap support for inline data
iomap: Add IOMAP_F_DATA_INLINE flag
iomap: Switch from blkno to disk offset
-rw-r--r-- | fs/buffer.c | 4 | ||||
-rw-r--r-- | fs/dax.c | 2 | ||||
-rw-r--r-- | fs/ext2/inode.c | 4 | ||||
-rw-r--r-- | fs/ext4/Kconfig | 1 | ||||
-rw-r--r-- | fs/ext4/balloc.c | 15 | ||||
-rw-r--r-- | fs/ext4/ext4.h | 50 | ||||
-rw-r--r-- | fs/ext4/extents.c | 6 | ||||
-rw-r--r-- | fs/ext4/file.c | 263 | ||||
-rw-r--r-- | fs/ext4/ialloc.c | 4 | ||||
-rw-r--r-- | fs/ext4/inline.c | 43 | ||||
-rw-r--r-- | fs/ext4/inode.c | 153 | ||||
-rw-r--r-- | fs/ext4/ioctl.c | 30 | ||||
-rw-r--r-- | fs/ext4/mballoc.c | 28 | ||||
-rw-r--r-- | fs/ext4/resize.c | 104 | ||||
-rw-r--r-- | fs/ext4/super.c | 27 | ||||
-rw-r--r-- | fs/iomap.c | 13 | ||||
-rw-r--r-- | fs/jbd2/journal.c | 9 | ||||
-rw-r--r-- | fs/nfsd/blocklayout.c | 4 | ||||
-rw-r--r-- | fs/xfs/xfs_iomap.c | 6 | ||||
-rw-r--r-- | include/linux/iomap.h | 15 |
20 files changed, 274 insertions, 507 deletions
diff --git a/fs/buffer.c b/fs/buffer.c index 32ce01f0f95f..49b7e9bdcd1d 100644 --- a/fs/buffer.c +++ b/fs/buffer.c | |||
@@ -1979,8 +1979,8 @@ iomap_to_bh(struct inode *inode, sector_t block, struct buffer_head *bh, | |||
1979 | case IOMAP_MAPPED: | 1979 | case IOMAP_MAPPED: |
1980 | if (offset >= i_size_read(inode)) | 1980 | if (offset >= i_size_read(inode)) |
1981 | set_buffer_new(bh); | 1981 | set_buffer_new(bh); |
1982 | bh->b_blocknr = (iomap->blkno >> (inode->i_blkbits - 9)) + | 1982 | bh->b_blocknr = (iomap->addr + offset - iomap->offset) >> |
1983 | ((offset - iomap->offset) >> inode->i_blkbits); | 1983 | inode->i_blkbits; |
1984 | set_buffer_mapped(bh); | 1984 | set_buffer_mapped(bh); |
1985 | break; | 1985 | break; |
1986 | } | 1986 | } |
@@ -938,7 +938,7 @@ EXPORT_SYMBOL_GPL(__dax_zero_page_range); | |||
938 | 938 | ||
939 | static sector_t dax_iomap_sector(struct iomap *iomap, loff_t pos) | 939 | static sector_t dax_iomap_sector(struct iomap *iomap, loff_t pos) |
940 | { | 940 | { |
941 | return iomap->blkno + (((pos & PAGE_MASK) - iomap->offset) >> 9); | 941 | return (iomap->addr + (pos & PAGE_MASK) - iomap->offset) >> 9; |
942 | } | 942 | } |
943 | 943 | ||
944 | static loff_t | 944 | static loff_t |
diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c index 1442a4c734c8..9b2ac55ac34f 100644 --- a/fs/ext2/inode.c +++ b/fs/ext2/inode.c | |||
@@ -821,11 +821,11 @@ static int ext2_iomap_begin(struct inode *inode, loff_t offset, loff_t length, | |||
821 | 821 | ||
822 | if (ret == 0) { | 822 | if (ret == 0) { |
823 | iomap->type = IOMAP_HOLE; | 823 | iomap->type = IOMAP_HOLE; |
824 | iomap->blkno = IOMAP_NULL_BLOCK; | 824 | iomap->addr = IOMAP_NULL_ADDR; |
825 | iomap->length = 1 << blkbits; | 825 | iomap->length = 1 << blkbits; |
826 | } else { | 826 | } else { |
827 | iomap->type = IOMAP_MAPPED; | 827 | iomap->type = IOMAP_MAPPED; |
828 | iomap->blkno = (sector_t)bno << (blkbits - 9); | 828 | iomap->addr = (u64)bno << blkbits; |
829 | iomap->length = (u64)ret << blkbits; | 829 | iomap->length = (u64)ret << blkbits; |
830 | iomap->flags |= IOMAP_F_MERGED; | 830 | iomap->flags |= IOMAP_F_MERGED; |
831 | } | 831 | } |
diff --git a/fs/ext4/Kconfig b/fs/ext4/Kconfig index e38039fd96ff..73b850f5659c 100644 --- a/fs/ext4/Kconfig +++ b/fs/ext4/Kconfig | |||
@@ -37,6 +37,7 @@ config EXT4_FS | |||
37 | select CRC16 | 37 | select CRC16 |
38 | select CRYPTO | 38 | select CRYPTO |
39 | select CRYPTO_CRC32C | 39 | select CRYPTO_CRC32C |
40 | select FS_IOMAP | ||
40 | help | 41 | help |
41 | This is the next generation of the ext3 filesystem. | 42 | This is the next generation of the ext3 filesystem. |
42 | 43 | ||
diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c index d5ddfb96c83c..a943e568292e 100644 --- a/fs/ext4/balloc.c +++ b/fs/ext4/balloc.c | |||
@@ -601,22 +601,21 @@ int ext4_claim_free_clusters(struct ext4_sb_info *sbi, | |||
601 | * ext4_should_retry_alloc() is called when ENOSPC is returned, and if | 601 | * ext4_should_retry_alloc() is called when ENOSPC is returned, and if |
602 | * it is profitable to retry the operation, this function will wait | 602 | * it is profitable to retry the operation, this function will wait |
603 | * for the current or committing transaction to complete, and then | 603 | * for the current or committing transaction to complete, and then |
604 | * return TRUE. | 604 | * return TRUE. We will only retry once. |
605 | * | ||
606 | * if the total number of retries exceed three times, return FALSE. | ||
607 | */ | 605 | */ |
608 | int ext4_should_retry_alloc(struct super_block *sb, int *retries) | 606 | int ext4_should_retry_alloc(struct super_block *sb, int *retries) |
609 | { | 607 | { |
610 | if (!ext4_has_free_clusters(EXT4_SB(sb), 1, 0) || | 608 | if (!ext4_has_free_clusters(EXT4_SB(sb), 1, 0) || |
611 | (*retries)++ > 3 || | 609 | (*retries)++ > 1 || |
612 | !EXT4_SB(sb)->s_journal) | 610 | !EXT4_SB(sb)->s_journal) |
613 | return 0; | 611 | return 0; |
614 | 612 | ||
615 | jbd_debug(1, "%s: retrying operation after ENOSPC\n", sb->s_id); | ||
616 | |||
617 | smp_mb(); | 613 | smp_mb(); |
618 | if (EXT4_SB(sb)->s_mb_free_pending) | 614 | if (EXT4_SB(sb)->s_mb_free_pending == 0) |
619 | jbd2_journal_force_commit_nested(EXT4_SB(sb)->s_journal); | 615 | return 0; |
616 | |||
617 | jbd_debug(1, "%s: retrying operation after ENOSPC\n", sb->s_id); | ||
618 | jbd2_journal_force_commit_nested(EXT4_SB(sb)->s_journal); | ||
620 | return 1; | 619 | return 1; |
621 | } | 620 | } |
622 | 621 | ||
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index 27f38bb5046d..4e091eae38b1 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h | |||
@@ -544,8 +544,8 @@ struct ext4_new_group_data { | |||
544 | __u64 inode_table; | 544 | __u64 inode_table; |
545 | __u32 blocks_count; | 545 | __u32 blocks_count; |
546 | __u16 reserved_blocks; | 546 | __u16 reserved_blocks; |
547 | __u16 unused; | 547 | __u16 mdata_blocks; |
548 | __u32 free_blocks_count; | 548 | __u32 free_clusters_count; |
549 | }; | 549 | }; |
550 | 550 | ||
551 | /* Indexes used to index group tables in ext4_new_group_data */ | 551 | /* Indexes used to index group tables in ext4_new_group_data */ |
@@ -643,43 +643,6 @@ enum { | |||
643 | #define EXT4_IOC_GET_ENCRYPTION_PWSALT FS_IOC_GET_ENCRYPTION_PWSALT | 643 | #define EXT4_IOC_GET_ENCRYPTION_PWSALT FS_IOC_GET_ENCRYPTION_PWSALT |
644 | #define EXT4_IOC_GET_ENCRYPTION_POLICY FS_IOC_GET_ENCRYPTION_POLICY | 644 | #define EXT4_IOC_GET_ENCRYPTION_POLICY FS_IOC_GET_ENCRYPTION_POLICY |
645 | 645 | ||
646 | #ifndef FS_IOC_FSGETXATTR | ||
647 | /* Until the uapi changes get merged for project quota... */ | ||
648 | |||
649 | #define FS_IOC_FSGETXATTR _IOR('X', 31, struct fsxattr) | ||
650 | #define FS_IOC_FSSETXATTR _IOW('X', 32, struct fsxattr) | ||
651 | |||
652 | /* | ||
653 | * Structure for FS_IOC_FSGETXATTR and FS_IOC_FSSETXATTR. | ||
654 | */ | ||
655 | struct fsxattr { | ||
656 | __u32 fsx_xflags; /* xflags field value (get/set) */ | ||
657 | __u32 fsx_extsize; /* extsize field value (get/set)*/ | ||
658 | __u32 fsx_nextents; /* nextents field value (get) */ | ||
659 | __u32 fsx_projid; /* project identifier (get/set) */ | ||
660 | unsigned char fsx_pad[12]; | ||
661 | }; | ||
662 | |||
663 | /* | ||
664 | * Flags for the fsx_xflags field | ||
665 | */ | ||
666 | #define FS_XFLAG_REALTIME 0x00000001 /* data in realtime volume */ | ||
667 | #define FS_XFLAG_PREALLOC 0x00000002 /* preallocated file extents */ | ||
668 | #define FS_XFLAG_IMMUTABLE 0x00000008 /* file cannot be modified */ | ||
669 | #define FS_XFLAG_APPEND 0x00000010 /* all writes append */ | ||
670 | #define FS_XFLAG_SYNC 0x00000020 /* all writes synchronous */ | ||
671 | #define FS_XFLAG_NOATIME 0x00000040 /* do not update access time */ | ||
672 | #define FS_XFLAG_NODUMP 0x00000080 /* do not include in backups */ | ||
673 | #define FS_XFLAG_RTINHERIT 0x00000100 /* create with rt bit set */ | ||
674 | #define FS_XFLAG_PROJINHERIT 0x00000200 /* create with parents projid */ | ||
675 | #define FS_XFLAG_NOSYMLINKS 0x00000400 /* disallow symlink creation */ | ||
676 | #define FS_XFLAG_EXTSIZE 0x00000800 /* extent size allocator hint */ | ||
677 | #define FS_XFLAG_EXTSZINHERIT 0x00001000 /* inherit inode extent size */ | ||
678 | #define FS_XFLAG_NODEFRAG 0x00002000 /* do not defragment */ | ||
679 | #define FS_XFLAG_FILESTREAM 0x00004000 /* use filestream allocator */ | ||
680 | #define FS_XFLAG_HASATTR 0x80000000 /* no DIFLAG for this */ | ||
681 | #endif /* !defined(FS_IOC_FSGETXATTR) */ | ||
682 | |||
683 | #define EXT4_IOC_FSGETXATTR FS_IOC_FSGETXATTR | 646 | #define EXT4_IOC_FSGETXATTR FS_IOC_FSGETXATTR |
684 | #define EXT4_IOC_FSSETXATTR FS_IOC_FSSETXATTR | 647 | #define EXT4_IOC_FSSETXATTR FS_IOC_FSSETXATTR |
685 | 648 | ||
@@ -1391,8 +1354,6 @@ struct ext4_sb_info { | |||
1391 | int s_first_ino; | 1354 | int s_first_ino; |
1392 | unsigned int s_inode_readahead_blks; | 1355 | unsigned int s_inode_readahead_blks; |
1393 | unsigned int s_inode_goal; | 1356 | unsigned int s_inode_goal; |
1394 | spinlock_t s_next_gen_lock; | ||
1395 | u32 s_next_generation; | ||
1396 | u32 s_hash_seed[4]; | 1357 | u32 s_hash_seed[4]; |
1397 | int s_def_hash_version; | 1358 | int s_def_hash_version; |
1398 | int s_hash_unsigned; /* 3 if hash should be signed, 0 if not */ | 1359 | int s_hash_unsigned; /* 3 if hash should be signed, 0 if not */ |
@@ -2514,9 +2475,6 @@ extern void ext4_da_update_reserve_space(struct inode *inode, | |||
2514 | int used, int quota_claim); | 2475 | int used, int quota_claim); |
2515 | extern int ext4_issue_zeroout(struct inode *inode, ext4_lblk_t lblk, | 2476 | extern int ext4_issue_zeroout(struct inode *inode, ext4_lblk_t lblk, |
2516 | ext4_fsblk_t pblk, ext4_lblk_t len); | 2477 | ext4_fsblk_t pblk, ext4_lblk_t len); |
2517 | extern int ext4_get_next_extent(struct inode *inode, ext4_lblk_t lblk, | ||
2518 | unsigned int map_len, | ||
2519 | struct extent_status *result); | ||
2520 | 2478 | ||
2521 | /* indirect.c */ | 2479 | /* indirect.c */ |
2522 | extern int ext4_ind_map_blocks(handle_t *handle, struct inode *inode, | 2480 | extern int ext4_ind_map_blocks(handle_t *handle, struct inode *inode, |
@@ -3047,6 +3005,10 @@ extern struct buffer_head *ext4_get_first_inline_block(struct inode *inode, | |||
3047 | extern int ext4_inline_data_fiemap(struct inode *inode, | 3005 | extern int ext4_inline_data_fiemap(struct inode *inode, |
3048 | struct fiemap_extent_info *fieinfo, | 3006 | struct fiemap_extent_info *fieinfo, |
3049 | int *has_inline, __u64 start, __u64 len); | 3007 | int *has_inline, __u64 start, __u64 len); |
3008 | |||
3009 | struct iomap; | ||
3010 | extern int ext4_inline_data_iomap(struct inode *inode, struct iomap *iomap); | ||
3011 | |||
3050 | extern int ext4_try_to_evict_inline_data(handle_t *handle, | 3012 | extern int ext4_try_to_evict_inline_data(handle_t *handle, |
3051 | struct inode *inode, | 3013 | struct inode *inode, |
3052 | int needed); | 3014 | int needed); |
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c index 97f0fd06728d..07bca11749d4 100644 --- a/fs/ext4/extents.c +++ b/fs/ext4/extents.c | |||
@@ -4794,7 +4794,8 @@ static long ext4_zero_range(struct file *file, loff_t offset, | |||
4794 | } | 4794 | } |
4795 | 4795 | ||
4796 | if (!(mode & FALLOC_FL_KEEP_SIZE) && | 4796 | if (!(mode & FALLOC_FL_KEEP_SIZE) && |
4797 | offset + len > i_size_read(inode)) { | 4797 | (offset + len > i_size_read(inode) || |
4798 | offset + len > EXT4_I(inode)->i_disksize)) { | ||
4798 | new_size = offset + len; | 4799 | new_size = offset + len; |
4799 | ret = inode_newsize_ok(inode, new_size); | 4800 | ret = inode_newsize_ok(inode, new_size); |
4800 | if (ret) | 4801 | if (ret) |
@@ -4965,7 +4966,8 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len) | |||
4965 | } | 4966 | } |
4966 | 4967 | ||
4967 | if (!(mode & FALLOC_FL_KEEP_SIZE) && | 4968 | if (!(mode & FALLOC_FL_KEEP_SIZE) && |
4968 | offset + len > i_size_read(inode)) { | 4969 | (offset + len > i_size_read(inode) || |
4970 | offset + len > EXT4_I(inode)->i_disksize)) { | ||
4969 | new_size = offset + len; | 4971 | new_size = offset + len; |
4970 | ret = inode_newsize_ok(inode, new_size); | 4972 | ret = inode_newsize_ok(inode, new_size); |
4971 | if (ret) | 4973 | if (ret) |
diff --git a/fs/ext4/file.c b/fs/ext4/file.c index b937078bcff3..ad204d2724ac 100644 --- a/fs/ext4/file.c +++ b/fs/ext4/file.c | |||
@@ -21,6 +21,7 @@ | |||
21 | 21 | ||
22 | #include <linux/time.h> | 22 | #include <linux/time.h> |
23 | #include <linux/fs.h> | 23 | #include <linux/fs.h> |
24 | #include <linux/iomap.h> | ||
24 | #include <linux/mount.h> | 25 | #include <linux/mount.h> |
25 | #include <linux/path.h> | 26 | #include <linux/path.h> |
26 | #include <linux/dax.h> | 27 | #include <linux/dax.h> |
@@ -424,248 +425,6 @@ static int ext4_file_open(struct inode * inode, struct file * filp) | |||
424 | } | 425 | } |
425 | 426 | ||
426 | /* | 427 | /* |
427 | * Here we use ext4_map_blocks() to get a block mapping for a extent-based | ||
428 | * file rather than ext4_ext_walk_space() because we can introduce | ||
429 | * SEEK_DATA/SEEK_HOLE for block-mapped and extent-mapped file at the same | ||
430 | * function. When extent status tree has been fully implemented, it will | ||
431 | * track all extent status for a file and we can directly use it to | ||
432 | * retrieve the offset for SEEK_DATA/SEEK_HOLE. | ||
433 | */ | ||
434 | |||
435 | /* | ||
436 | * When we retrieve the offset for SEEK_DATA/SEEK_HOLE, we would need to | ||
437 | * lookup page cache to check whether or not there has some data between | ||
438 | * [startoff, endoff] because, if this range contains an unwritten extent, | ||
439 | * we determine this extent as a data or a hole according to whether the | ||
440 | * page cache has data or not. | ||
441 | */ | ||
442 | static int ext4_find_unwritten_pgoff(struct inode *inode, | ||
443 | int whence, | ||
444 | ext4_lblk_t end_blk, | ||
445 | loff_t *offset) | ||
446 | { | ||
447 | struct pagevec pvec; | ||
448 | unsigned int blkbits; | ||
449 | pgoff_t index; | ||
450 | pgoff_t end; | ||
451 | loff_t endoff; | ||
452 | loff_t startoff; | ||
453 | loff_t lastoff; | ||
454 | int found = 0; | ||
455 | |||
456 | blkbits = inode->i_sb->s_blocksize_bits; | ||
457 | startoff = *offset; | ||
458 | lastoff = startoff; | ||
459 | endoff = (loff_t)end_blk << blkbits; | ||
460 | |||
461 | index = startoff >> PAGE_SHIFT; | ||
462 | end = (endoff - 1) >> PAGE_SHIFT; | ||
463 | |||
464 | pagevec_init(&pvec, 0); | ||
465 | do { | ||
466 | int i; | ||
467 | unsigned long nr_pages; | ||
468 | |||
469 | nr_pages = pagevec_lookup_range(&pvec, inode->i_mapping, | ||
470 | &index, end); | ||
471 | if (nr_pages == 0) | ||
472 | break; | ||
473 | |||
474 | for (i = 0; i < nr_pages; i++) { | ||
475 | struct page *page = pvec.pages[i]; | ||
476 | struct buffer_head *bh, *head; | ||
477 | |||
478 | /* | ||
479 | * If current offset is smaller than the page offset, | ||
480 | * there is a hole at this offset. | ||
481 | */ | ||
482 | if (whence == SEEK_HOLE && lastoff < endoff && | ||
483 | lastoff < page_offset(pvec.pages[i])) { | ||
484 | found = 1; | ||
485 | *offset = lastoff; | ||
486 | goto out; | ||
487 | } | ||
488 | |||
489 | lock_page(page); | ||
490 | |||
491 | if (unlikely(page->mapping != inode->i_mapping)) { | ||
492 | unlock_page(page); | ||
493 | continue; | ||
494 | } | ||
495 | |||
496 | if (!page_has_buffers(page)) { | ||
497 | unlock_page(page); | ||
498 | continue; | ||
499 | } | ||
500 | |||
501 | if (page_has_buffers(page)) { | ||
502 | lastoff = page_offset(page); | ||
503 | bh = head = page_buffers(page); | ||
504 | do { | ||
505 | if (lastoff + bh->b_size <= startoff) | ||
506 | goto next; | ||
507 | if (buffer_uptodate(bh) || | ||
508 | buffer_unwritten(bh)) { | ||
509 | if (whence == SEEK_DATA) | ||
510 | found = 1; | ||
511 | } else { | ||
512 | if (whence == SEEK_HOLE) | ||
513 | found = 1; | ||
514 | } | ||
515 | if (found) { | ||
516 | *offset = max_t(loff_t, | ||
517 | startoff, lastoff); | ||
518 | unlock_page(page); | ||
519 | goto out; | ||
520 | } | ||
521 | next: | ||
522 | lastoff += bh->b_size; | ||
523 | bh = bh->b_this_page; | ||
524 | } while (bh != head); | ||
525 | } | ||
526 | |||
527 | lastoff = page_offset(page) + PAGE_SIZE; | ||
528 | unlock_page(page); | ||
529 | } | ||
530 | |||
531 | pagevec_release(&pvec); | ||
532 | } while (index <= end); | ||
533 | |||
534 | /* There are no pages upto endoff - that would be a hole in there. */ | ||
535 | if (whence == SEEK_HOLE && lastoff < endoff) { | ||
536 | found = 1; | ||
537 | *offset = lastoff; | ||
538 | } | ||
539 | out: | ||
540 | pagevec_release(&pvec); | ||
541 | return found; | ||
542 | } | ||
543 | |||
544 | /* | ||
545 | * ext4_seek_data() retrieves the offset for SEEK_DATA. | ||
546 | */ | ||
547 | static loff_t ext4_seek_data(struct file *file, loff_t offset, loff_t maxsize) | ||
548 | { | ||
549 | struct inode *inode = file->f_mapping->host; | ||
550 | struct extent_status es; | ||
551 | ext4_lblk_t start, last, end; | ||
552 | loff_t dataoff, isize; | ||
553 | int blkbits; | ||
554 | int ret; | ||
555 | |||
556 | inode_lock(inode); | ||
557 | |||
558 | isize = i_size_read(inode); | ||
559 | if (offset < 0 || offset >= isize) { | ||
560 | inode_unlock(inode); | ||
561 | return -ENXIO; | ||
562 | } | ||
563 | |||
564 | blkbits = inode->i_sb->s_blocksize_bits; | ||
565 | start = offset >> blkbits; | ||
566 | last = start; | ||
567 | end = isize >> blkbits; | ||
568 | dataoff = offset; | ||
569 | |||
570 | do { | ||
571 | ret = ext4_get_next_extent(inode, last, end - last + 1, &es); | ||
572 | if (ret <= 0) { | ||
573 | /* No extent found -> no data */ | ||
574 | if (ret == 0) | ||
575 | ret = -ENXIO; | ||
576 | inode_unlock(inode); | ||
577 | return ret; | ||
578 | } | ||
579 | |||
580 | last = es.es_lblk; | ||
581 | if (last != start) | ||
582 | dataoff = (loff_t)last << blkbits; | ||
583 | if (!ext4_es_is_unwritten(&es)) | ||
584 | break; | ||
585 | |||
586 | /* | ||
587 | * If there is a unwritten extent at this offset, | ||
588 | * it will be as a data or a hole according to page | ||
589 | * cache that has data or not. | ||
590 | */ | ||
591 | if (ext4_find_unwritten_pgoff(inode, SEEK_DATA, | ||
592 | es.es_lblk + es.es_len, &dataoff)) | ||
593 | break; | ||
594 | last += es.es_len; | ||
595 | dataoff = (loff_t)last << blkbits; | ||
596 | cond_resched(); | ||
597 | } while (last <= end); | ||
598 | |||
599 | inode_unlock(inode); | ||
600 | |||
601 | if (dataoff > isize) | ||
602 | return -ENXIO; | ||
603 | |||
604 | return vfs_setpos(file, dataoff, maxsize); | ||
605 | } | ||
606 | |||
607 | /* | ||
608 | * ext4_seek_hole() retrieves the offset for SEEK_HOLE. | ||
609 | */ | ||
610 | static loff_t ext4_seek_hole(struct file *file, loff_t offset, loff_t maxsize) | ||
611 | { | ||
612 | struct inode *inode = file->f_mapping->host; | ||
613 | struct extent_status es; | ||
614 | ext4_lblk_t start, last, end; | ||
615 | loff_t holeoff, isize; | ||
616 | int blkbits; | ||
617 | int ret; | ||
618 | |||
619 | inode_lock(inode); | ||
620 | |||
621 | isize = i_size_read(inode); | ||
622 | if (offset < 0 || offset >= isize) { | ||
623 | inode_unlock(inode); | ||
624 | return -ENXIO; | ||
625 | } | ||
626 | |||
627 | blkbits = inode->i_sb->s_blocksize_bits; | ||
628 | start = offset >> blkbits; | ||
629 | last = start; | ||
630 | end = isize >> blkbits; | ||
631 | holeoff = offset; | ||
632 | |||
633 | do { | ||
634 | ret = ext4_get_next_extent(inode, last, end - last + 1, &es); | ||
635 | if (ret < 0) { | ||
636 | inode_unlock(inode); | ||
637 | return ret; | ||
638 | } | ||
639 | /* Found a hole? */ | ||
640 | if (ret == 0 || es.es_lblk > last) { | ||
641 | if (last != start) | ||
642 | holeoff = (loff_t)last << blkbits; | ||
643 | break; | ||
644 | } | ||
645 | /* | ||
646 | * If there is a unwritten extent at this offset, | ||
647 | * it will be as a data or a hole according to page | ||
648 | * cache that has data or not. | ||
649 | */ | ||
650 | if (ext4_es_is_unwritten(&es) && | ||
651 | ext4_find_unwritten_pgoff(inode, SEEK_HOLE, | ||
652 | last + es.es_len, &holeoff)) | ||
653 | break; | ||
654 | |||
655 | last += es.es_len; | ||
656 | holeoff = (loff_t)last << blkbits; | ||
657 | cond_resched(); | ||
658 | } while (last <= end); | ||
659 | |||
660 | inode_unlock(inode); | ||
661 | |||
662 | if (holeoff > isize) | ||
663 | holeoff = isize; | ||
664 | |||
665 | return vfs_setpos(file, holeoff, maxsize); | ||
666 | } | ||
667 | |||
668 | /* | ||
669 | * ext4_llseek() handles both block-mapped and extent-mapped maxbytes values | 428 | * ext4_llseek() handles both block-mapped and extent-mapped maxbytes values |
670 | * by calling generic_file_llseek_size() with the appropriate maxbytes | 429 | * by calling generic_file_llseek_size() with the appropriate maxbytes |
671 | * value for each. | 430 | * value for each. |
@@ -681,18 +440,24 @@ loff_t ext4_llseek(struct file *file, loff_t offset, int whence) | |||
681 | maxbytes = inode->i_sb->s_maxbytes; | 440 | maxbytes = inode->i_sb->s_maxbytes; |
682 | 441 | ||
683 | switch (whence) { | 442 | switch (whence) { |
684 | case SEEK_SET: | 443 | default: |
685 | case SEEK_CUR: | ||
686 | case SEEK_END: | ||
687 | return generic_file_llseek_size(file, offset, whence, | 444 | return generic_file_llseek_size(file, offset, whence, |
688 | maxbytes, i_size_read(inode)); | 445 | maxbytes, i_size_read(inode)); |
689 | case SEEK_DATA: | ||
690 | return ext4_seek_data(file, offset, maxbytes); | ||
691 | case SEEK_HOLE: | 446 | case SEEK_HOLE: |
692 | return ext4_seek_hole(file, offset, maxbytes); | 447 | inode_lock_shared(inode); |
448 | offset = iomap_seek_hole(inode, offset, &ext4_iomap_ops); | ||
449 | inode_unlock_shared(inode); | ||
450 | break; | ||
451 | case SEEK_DATA: | ||
452 | inode_lock_shared(inode); | ||
453 | offset = iomap_seek_data(inode, offset, &ext4_iomap_ops); | ||
454 | inode_unlock_shared(inode); | ||
455 | break; | ||
693 | } | 456 | } |
694 | 457 | ||
695 | return -EINVAL; | 458 | if (offset < 0) |
459 | return offset; | ||
460 | return vfs_setpos(file, offset, maxbytes); | ||
696 | } | 461 | } |
697 | 462 | ||
698 | const struct file_operations ext4_file_operations = { | 463 | const struct file_operations ext4_file_operations = { |
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c index c5f697a3fad4..b4267d72f249 100644 --- a/fs/ext4/ialloc.c +++ b/fs/ext4/ialloc.c | |||
@@ -1139,9 +1139,7 @@ got: | |||
1139 | inode->i_ino); | 1139 | inode->i_ino); |
1140 | goto out; | 1140 | goto out; |
1141 | } | 1141 | } |
1142 | spin_lock(&sbi->s_next_gen_lock); | 1142 | inode->i_generation = prandom_u32(); |
1143 | inode->i_generation = sbi->s_next_generation++; | ||
1144 | spin_unlock(&sbi->s_next_gen_lock); | ||
1145 | 1143 | ||
1146 | /* Precompute checksum seed for inode metadata */ | 1144 | /* Precompute checksum seed for inode metadata */ |
1147 | if (ext4_has_metadata_csum(sb)) { | 1145 | if (ext4_has_metadata_csum(sb)) { |
diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c index 28c5c3abddb3..1367553c43bb 100644 --- a/fs/ext4/inline.c +++ b/fs/ext4/inline.c | |||
@@ -12,6 +12,7 @@ | |||
12 | * GNU General Public License for more details. | 12 | * GNU General Public License for more details. |
13 | */ | 13 | */ |
14 | 14 | ||
15 | #include <linux/iomap.h> | ||
15 | #include <linux/fiemap.h> | 16 | #include <linux/fiemap.h> |
16 | 17 | ||
17 | #include "ext4_jbd2.h" | 18 | #include "ext4_jbd2.h" |
@@ -302,11 +303,6 @@ static int ext4_create_inline_data(handle_t *handle, | |||
302 | EXT4_I(inode)->i_inline_size = len + EXT4_MIN_INLINE_DATA_SIZE; | 303 | EXT4_I(inode)->i_inline_size = len + EXT4_MIN_INLINE_DATA_SIZE; |
303 | ext4_clear_inode_flag(inode, EXT4_INODE_EXTENTS); | 304 | ext4_clear_inode_flag(inode, EXT4_INODE_EXTENTS); |
304 | ext4_set_inode_flag(inode, EXT4_INODE_INLINE_DATA); | 305 | ext4_set_inode_flag(inode, EXT4_INODE_INLINE_DATA); |
305 | /* | ||
306 | * Propagate changes to inode->i_flags as well - e.g. S_DAX may | ||
307 | * get cleared | ||
308 | */ | ||
309 | ext4_set_inode_flags(inode); | ||
310 | get_bh(is.iloc.bh); | 306 | get_bh(is.iloc.bh); |
311 | error = ext4_mark_iloc_dirty(handle, inode, &is.iloc); | 307 | error = ext4_mark_iloc_dirty(handle, inode, &is.iloc); |
312 | 308 | ||
@@ -451,11 +447,6 @@ static int ext4_destroy_inline_data_nolock(handle_t *handle, | |||
451 | } | 447 | } |
452 | } | 448 | } |
453 | ext4_clear_inode_flag(inode, EXT4_INODE_INLINE_DATA); | 449 | ext4_clear_inode_flag(inode, EXT4_INODE_INLINE_DATA); |
454 | /* | ||
455 | * Propagate changes to inode->i_flags as well - e.g. S_DAX may | ||
456 | * get set. | ||
457 | */ | ||
458 | ext4_set_inode_flags(inode); | ||
459 | 450 | ||
460 | get_bh(is.iloc.bh); | 451 | get_bh(is.iloc.bh); |
461 | error = ext4_mark_iloc_dirty(handle, inode, &is.iloc); | 452 | error = ext4_mark_iloc_dirty(handle, inode, &is.iloc); |
@@ -1827,6 +1818,38 @@ int ext4_destroy_inline_data(handle_t *handle, struct inode *inode) | |||
1827 | return ret; | 1818 | return ret; |
1828 | } | 1819 | } |
1829 | 1820 | ||
1821 | int ext4_inline_data_iomap(struct inode *inode, struct iomap *iomap) | ||
1822 | { | ||
1823 | __u64 addr; | ||
1824 | int error = -EAGAIN; | ||
1825 | struct ext4_iloc iloc; | ||
1826 | |||
1827 | down_read(&EXT4_I(inode)->xattr_sem); | ||
1828 | if (!ext4_has_inline_data(inode)) | ||
1829 | goto out; | ||
1830 | |||
1831 | error = ext4_get_inode_loc(inode, &iloc); | ||
1832 | if (error) | ||
1833 | goto out; | ||
1834 | |||
1835 | addr = (__u64)iloc.bh->b_blocknr << inode->i_sb->s_blocksize_bits; | ||
1836 | addr += (char *)ext4_raw_inode(&iloc) - iloc.bh->b_data; | ||
1837 | addr += offsetof(struct ext4_inode, i_block); | ||
1838 | |||
1839 | brelse(iloc.bh); | ||
1840 | |||
1841 | iomap->addr = addr; | ||
1842 | iomap->offset = 0; | ||
1843 | iomap->length = min_t(loff_t, ext4_get_inline_size(inode), | ||
1844 | i_size_read(inode)); | ||
1845 | iomap->type = 0; | ||
1846 | iomap->flags = IOMAP_F_DATA_INLINE; | ||
1847 | |||
1848 | out: | ||
1849 | up_read(&EXT4_I(inode)->xattr_sem); | ||
1850 | return error; | ||
1851 | } | ||
1852 | |||
1830 | int ext4_inline_data_fiemap(struct inode *inode, | 1853 | int ext4_inline_data_fiemap(struct inode *inode, |
1831 | struct fiemap_extent_info *fieinfo, | 1854 | struct fiemap_extent_info *fieinfo, |
1832 | int *has_inline, __u64 start, __u64 len) | 1855 | int *has_inline, __u64 start, __u64 len) |
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 168a1b499cdf..2633150e41b9 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c | |||
@@ -3394,7 +3394,6 @@ static int ext4_releasepage(struct page *page, gfp_t wait) | |||
3394 | return try_to_free_buffers(page); | 3394 | return try_to_free_buffers(page); |
3395 | } | 3395 | } |
3396 | 3396 | ||
3397 | #ifdef CONFIG_FS_DAX | ||
3398 | static int ext4_iomap_begin(struct inode *inode, loff_t offset, loff_t length, | 3397 | static int ext4_iomap_begin(struct inode *inode, loff_t offset, loff_t length, |
3399 | unsigned flags, struct iomap *iomap) | 3398 | unsigned flags, struct iomap *iomap) |
3400 | { | 3399 | { |
@@ -3403,17 +3402,54 @@ static int ext4_iomap_begin(struct inode *inode, loff_t offset, loff_t length, | |||
3403 | unsigned long first_block = offset >> blkbits; | 3402 | unsigned long first_block = offset >> blkbits; |
3404 | unsigned long last_block = (offset + length - 1) >> blkbits; | 3403 | unsigned long last_block = (offset + length - 1) >> blkbits; |
3405 | struct ext4_map_blocks map; | 3404 | struct ext4_map_blocks map; |
3405 | bool delalloc = false; | ||
3406 | int ret; | 3406 | int ret; |
3407 | 3407 | ||
3408 | if (WARN_ON_ONCE(ext4_has_inline_data(inode))) | 3408 | |
3409 | return -ERANGE; | 3409 | if (flags & IOMAP_REPORT) { |
3410 | if (ext4_has_inline_data(inode)) { | ||
3411 | ret = ext4_inline_data_iomap(inode, iomap); | ||
3412 | if (ret != -EAGAIN) { | ||
3413 | if (ret == 0 && offset >= iomap->length) | ||
3414 | ret = -ENOENT; | ||
3415 | return ret; | ||
3416 | } | ||
3417 | } | ||
3418 | } else { | ||
3419 | if (WARN_ON_ONCE(ext4_has_inline_data(inode))) | ||
3420 | return -ERANGE; | ||
3421 | } | ||
3410 | 3422 | ||
3411 | map.m_lblk = first_block; | 3423 | map.m_lblk = first_block; |
3412 | map.m_len = last_block - first_block + 1; | 3424 | map.m_len = last_block - first_block + 1; |
3413 | 3425 | ||
3414 | if (!(flags & IOMAP_WRITE)) { | 3426 | if (flags & IOMAP_REPORT) { |
3415 | ret = ext4_map_blocks(NULL, inode, &map, 0); | 3427 | ret = ext4_map_blocks(NULL, inode, &map, 0); |
3416 | } else { | 3428 | if (ret < 0) |
3429 | return ret; | ||
3430 | |||
3431 | if (ret == 0) { | ||
3432 | ext4_lblk_t end = map.m_lblk + map.m_len - 1; | ||
3433 | struct extent_status es; | ||
3434 | |||
3435 | ext4_es_find_delayed_extent_range(inode, map.m_lblk, end, &es); | ||
3436 | |||
3437 | if (!es.es_len || es.es_lblk > end) { | ||
3438 | /* entire range is a hole */ | ||
3439 | } else if (es.es_lblk > map.m_lblk) { | ||
3440 | /* range starts with a hole */ | ||
3441 | map.m_len = es.es_lblk - map.m_lblk; | ||
3442 | } else { | ||
3443 | ext4_lblk_t offs = 0; | ||
3444 | |||
3445 | if (es.es_lblk < map.m_lblk) | ||
3446 | offs = map.m_lblk - es.es_lblk; | ||
3447 | map.m_lblk = es.es_lblk + offs; | ||
3448 | map.m_len = es.es_len - offs; | ||
3449 | delalloc = true; | ||
3450 | } | ||
3451 | } | ||
3452 | } else if (flags & IOMAP_WRITE) { | ||
3417 | int dio_credits; | 3453 | int dio_credits; |
3418 | handle_t *handle; | 3454 | handle_t *handle; |
3419 | int retries = 0; | 3455 | int retries = 0; |
@@ -3464,17 +3500,21 @@ retry: | |||
3464 | } | 3500 | } |
3465 | } | 3501 | } |
3466 | ext4_journal_stop(handle); | 3502 | ext4_journal_stop(handle); |
3503 | } else { | ||
3504 | ret = ext4_map_blocks(NULL, inode, &map, 0); | ||
3505 | if (ret < 0) | ||
3506 | return ret; | ||
3467 | } | 3507 | } |
3468 | 3508 | ||
3469 | iomap->flags = 0; | 3509 | iomap->flags = 0; |
3470 | iomap->bdev = inode->i_sb->s_bdev; | 3510 | iomap->bdev = inode->i_sb->s_bdev; |
3471 | iomap->dax_dev = sbi->s_daxdev; | 3511 | iomap->dax_dev = sbi->s_daxdev; |
3472 | iomap->offset = first_block << blkbits; | 3512 | iomap->offset = first_block << blkbits; |
3513 | iomap->length = (u64)map.m_len << blkbits; | ||
3473 | 3514 | ||
3474 | if (ret == 0) { | 3515 | if (ret == 0) { |
3475 | iomap->type = IOMAP_HOLE; | 3516 | iomap->type = delalloc ? IOMAP_DELALLOC : IOMAP_HOLE; |
3476 | iomap->blkno = IOMAP_NULL_BLOCK; | 3517 | iomap->addr = IOMAP_NULL_ADDR; |
3477 | iomap->length = (u64)map.m_len << blkbits; | ||
3478 | } else { | 3518 | } else { |
3479 | if (map.m_flags & EXT4_MAP_MAPPED) { | 3519 | if (map.m_flags & EXT4_MAP_MAPPED) { |
3480 | iomap->type = IOMAP_MAPPED; | 3520 | iomap->type = IOMAP_MAPPED; |
@@ -3484,12 +3524,12 @@ retry: | |||
3484 | WARN_ON_ONCE(1); | 3524 | WARN_ON_ONCE(1); |
3485 | return -EIO; | 3525 | return -EIO; |
3486 | } | 3526 | } |
3487 | iomap->blkno = (sector_t)map.m_pblk << (blkbits - 9); | 3527 | iomap->addr = (u64)map.m_pblk << blkbits; |
3488 | iomap->length = (u64)map.m_len << blkbits; | ||
3489 | } | 3528 | } |
3490 | 3529 | ||
3491 | if (map.m_flags & EXT4_MAP_NEW) | 3530 | if (map.m_flags & EXT4_MAP_NEW) |
3492 | iomap->flags |= IOMAP_F_NEW; | 3531 | iomap->flags |= IOMAP_F_NEW; |
3532 | |||
3493 | return 0; | 3533 | return 0; |
3494 | } | 3534 | } |
3495 | 3535 | ||
@@ -3550,8 +3590,6 @@ const struct iomap_ops ext4_iomap_ops = { | |||
3550 | .iomap_end = ext4_iomap_end, | 3590 | .iomap_end = ext4_iomap_end, |
3551 | }; | 3591 | }; |
3552 | 3592 | ||
3553 | #endif | ||
3554 | |||
3555 | static int ext4_end_io_dio(struct kiocb *iocb, loff_t offset, | 3593 | static int ext4_end_io_dio(struct kiocb *iocb, loff_t offset, |
3556 | ssize_t size, void *private) | 3594 | ssize_t size, void *private) |
3557 | { | 3595 | { |
@@ -4573,6 +4611,21 @@ int ext4_get_inode_loc(struct inode *inode, struct ext4_iloc *iloc) | |||
4573 | !ext4_test_inode_state(inode, EXT4_STATE_XATTR)); | 4611 | !ext4_test_inode_state(inode, EXT4_STATE_XATTR)); |
4574 | } | 4612 | } |
4575 | 4613 | ||
4614 | static bool ext4_should_use_dax(struct inode *inode) | ||
4615 | { | ||
4616 | if (!test_opt(inode->i_sb, DAX)) | ||
4617 | return false; | ||
4618 | if (!S_ISREG(inode->i_mode)) | ||
4619 | return false; | ||
4620 | if (ext4_should_journal_data(inode)) | ||
4621 | return false; | ||
4622 | if (ext4_has_inline_data(inode)) | ||
4623 | return false; | ||
4624 | if (ext4_encrypted_inode(inode)) | ||
4625 | return false; | ||
4626 | return true; | ||
4627 | } | ||
4628 | |||
4576 | void ext4_set_inode_flags(struct inode *inode) | 4629 | void ext4_set_inode_flags(struct inode *inode) |
4577 | { | 4630 | { |
4578 | unsigned int flags = EXT4_I(inode)->i_flags; | 4631 | unsigned int flags = EXT4_I(inode)->i_flags; |
@@ -4588,9 +4641,7 @@ void ext4_set_inode_flags(struct inode *inode) | |||
4588 | new_fl |= S_NOATIME; | 4641 | new_fl |= S_NOATIME; |
4589 | if (flags & EXT4_DIRSYNC_FL) | 4642 | if (flags & EXT4_DIRSYNC_FL) |
4590 | new_fl |= S_DIRSYNC; | 4643 | new_fl |= S_DIRSYNC; |
4591 | if (test_opt(inode->i_sb, DAX) && S_ISREG(inode->i_mode) && | 4644 | if (ext4_should_use_dax(inode)) |
4592 | !ext4_should_journal_data(inode) && !ext4_has_inline_data(inode) && | ||
4593 | !(flags & EXT4_ENCRYPT_FL)) | ||
4594 | new_fl |= S_DAX; | 4645 | new_fl |= S_DAX; |
4595 | if (flags & EXT4_ENCRYPT_FL) | 4646 | if (flags & EXT4_ENCRYPT_FL) |
4596 | new_fl |= S_ENCRYPTED; | 4647 | new_fl |= S_ENCRYPTED; |
@@ -5966,11 +6017,6 @@ int ext4_change_inode_journal_flag(struct inode *inode, int val) | |||
5966 | ext4_clear_inode_flag(inode, EXT4_INODE_JOURNAL_DATA); | 6017 | ext4_clear_inode_flag(inode, EXT4_INODE_JOURNAL_DATA); |
5967 | } | 6018 | } |
5968 | ext4_set_aops(inode); | 6019 | ext4_set_aops(inode); |
5969 | /* | ||
5970 | * Update inode->i_flags after EXT4_INODE_JOURNAL_DATA was updated. | ||
5971 | * E.g. S_DAX may get cleared / set. | ||
5972 | */ | ||
5973 | ext4_set_inode_flags(inode); | ||
5974 | 6020 | ||
5975 | jbd2_journal_unlock_updates(journal); | 6021 | jbd2_journal_unlock_updates(journal); |
5976 | percpu_up_write(&sbi->s_journal_flag_rwsem); | 6022 | percpu_up_write(&sbi->s_journal_flag_rwsem); |
@@ -6106,70 +6152,3 @@ int ext4_filemap_fault(struct vm_fault *vmf) | |||
6106 | 6152 | ||
6107 | return err; | 6153 | return err; |
6108 | } | 6154 | } |
6109 | |||
6110 | /* | ||
6111 | * Find the first extent at or after @lblk in an inode that is not a hole. | ||
6112 | * Search for @map_len blocks at most. The extent is returned in @result. | ||
6113 | * | ||
6114 | * The function returns 1 if we found an extent. The function returns 0 in | ||
6115 | * case there is no extent at or after @lblk and in that case also sets | ||
6116 | * @result->es_len to 0. In case of error, the error code is returned. | ||
6117 | */ | ||
6118 | int ext4_get_next_extent(struct inode *inode, ext4_lblk_t lblk, | ||
6119 | unsigned int map_len, struct extent_status *result) | ||
6120 | { | ||
6121 | struct ext4_map_blocks map; | ||
6122 | struct extent_status es = {}; | ||
6123 | int ret; | ||
6124 | |||
6125 | map.m_lblk = lblk; | ||
6126 | map.m_len = map_len; | ||
6127 | |||
6128 | /* | ||
6129 | * For non-extent based files this loop may iterate several times since | ||
6130 | * we do not determine full hole size. | ||
6131 | */ | ||
6132 | while (map.m_len > 0) { | ||
6133 | ret = ext4_map_blocks(NULL, inode, &map, 0); | ||
6134 | if (ret < 0) | ||
6135 | return ret; | ||
6136 | /* There's extent covering m_lblk? Just return it. */ | ||
6137 | if (ret > 0) { | ||
6138 | int status; | ||
6139 | |||
6140 | ext4_es_store_pblock(result, map.m_pblk); | ||
6141 | result->es_lblk = map.m_lblk; | ||
6142 | result->es_len = map.m_len; | ||
6143 | if (map.m_flags & EXT4_MAP_UNWRITTEN) | ||
6144 | status = EXTENT_STATUS_UNWRITTEN; | ||
6145 | else | ||
6146 | status = EXTENT_STATUS_WRITTEN; | ||
6147 | ext4_es_store_status(result, status); | ||
6148 | return 1; | ||
6149 | } | ||
6150 | ext4_es_find_delayed_extent_range(inode, map.m_lblk, | ||
6151 | map.m_lblk + map.m_len - 1, | ||
6152 | &es); | ||
6153 | /* Is delalloc data before next block in extent tree? */ | ||
6154 | if (es.es_len && es.es_lblk < map.m_lblk + map.m_len) { | ||
6155 | ext4_lblk_t offset = 0; | ||
6156 | |||
6157 | if (es.es_lblk < lblk) | ||
6158 | offset = lblk - es.es_lblk; | ||
6159 | result->es_lblk = es.es_lblk + offset; | ||
6160 | ext4_es_store_pblock(result, | ||
6161 | ext4_es_pblock(&es) + offset); | ||
6162 | result->es_len = es.es_len - offset; | ||
6163 | ext4_es_store_status(result, ext4_es_status(&es)); | ||
6164 | |||
6165 | return 1; | ||
6166 | } | ||
6167 | /* There's a hole at m_lblk, advance us after it */ | ||
6168 | map.m_lblk += map.m_len; | ||
6169 | map_len -= map.m_len; | ||
6170 | map.m_len = map_len; | ||
6171 | cond_resched(); | ||
6172 | } | ||
6173 | result->es_len = 0; | ||
6174 | return 0; | ||
6175 | } | ||
diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c index 75d83471f65c..b7558f292420 100644 --- a/fs/ext4/ioctl.c +++ b/fs/ext4/ioctl.c | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <linux/mount.h> | 15 | #include <linux/mount.h> |
16 | #include <linux/file.h> | 16 | #include <linux/file.h> |
17 | #include <linux/quotaops.h> | 17 | #include <linux/quotaops.h> |
18 | #include <linux/random.h> | ||
18 | #include <linux/uuid.h> | 19 | #include <linux/uuid.h> |
19 | #include <linux/uaccess.h> | 20 | #include <linux/uaccess.h> |
20 | #include <linux/delay.h> | 21 | #include <linux/delay.h> |
@@ -99,7 +100,6 @@ static long swap_inode_boot_loader(struct super_block *sb, | |||
99 | int err; | 100 | int err; |
100 | struct inode *inode_bl; | 101 | struct inode *inode_bl; |
101 | struct ext4_inode_info *ei_bl; | 102 | struct ext4_inode_info *ei_bl; |
102 | struct ext4_sb_info *sbi = EXT4_SB(sb); | ||
103 | 103 | ||
104 | if (inode->i_nlink != 1 || !S_ISREG(inode->i_mode)) | 104 | if (inode->i_nlink != 1 || !S_ISREG(inode->i_mode)) |
105 | return -EINVAL; | 105 | return -EINVAL; |
@@ -158,10 +158,8 @@ static long swap_inode_boot_loader(struct super_block *sb, | |||
158 | 158 | ||
159 | inode->i_ctime = inode_bl->i_ctime = current_time(inode); | 159 | inode->i_ctime = inode_bl->i_ctime = current_time(inode); |
160 | 160 | ||
161 | spin_lock(&sbi->s_next_gen_lock); | 161 | inode->i_generation = prandom_u32(); |
162 | inode->i_generation = sbi->s_next_generation++; | 162 | inode_bl->i_generation = prandom_u32(); |
163 | inode_bl->i_generation = sbi->s_next_generation++; | ||
164 | spin_unlock(&sbi->s_next_gen_lock); | ||
165 | 163 | ||
166 | ext4_discard_preallocations(inode); | 164 | ext4_discard_preallocations(inode); |
167 | 165 | ||
@@ -291,10 +289,20 @@ flags_err: | |||
291 | if (err) | 289 | if (err) |
292 | goto flags_out; | 290 | goto flags_out; |
293 | 291 | ||
294 | if ((jflag ^ oldflags) & (EXT4_JOURNAL_DATA_FL)) | 292 | if ((jflag ^ oldflags) & (EXT4_JOURNAL_DATA_FL)) { |
293 | /* | ||
294 | * Changes to the journaling mode can cause unsafe changes to | ||
295 | * S_DAX if we are using the DAX mount option. | ||
296 | */ | ||
297 | if (test_opt(inode->i_sb, DAX)) { | ||
298 | err = -EBUSY; | ||
299 | goto flags_out; | ||
300 | } | ||
301 | |||
295 | err = ext4_change_inode_journal_flag(inode, jflag); | 302 | err = ext4_change_inode_journal_flag(inode, jflag); |
296 | if (err) | 303 | if (err) |
297 | goto flags_out; | 304 | goto flags_out; |
305 | } | ||
298 | if (migrate) { | 306 | if (migrate) { |
299 | if (flags & EXT4_EXTENTS_FL) | 307 | if (flags & EXT4_EXTENTS_FL) |
300 | err = ext4_ext_migrate(inode); | 308 | err = ext4_ext_migrate(inode); |
@@ -862,12 +870,6 @@ group_add_out: | |||
862 | int err = 0, err2 = 0; | 870 | int err = 0, err2 = 0; |
863 | ext4_group_t o_group = EXT4_SB(sb)->s_groups_count; | 871 | ext4_group_t o_group = EXT4_SB(sb)->s_groups_count; |
864 | 872 | ||
865 | if (ext4_has_feature_bigalloc(sb)) { | ||
866 | ext4_msg(sb, KERN_ERR, | ||
867 | "Online resizing not (yet) supported with bigalloc"); | ||
868 | return -EOPNOTSUPP; | ||
869 | } | ||
870 | |||
871 | if (copy_from_user(&n_blocks_count, (__u64 __user *)arg, | 873 | if (copy_from_user(&n_blocks_count, (__u64 __user *)arg, |
872 | sizeof(__u64))) { | 874 | sizeof(__u64))) { |
873 | return -EFAULT; | 875 | return -EFAULT; |
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c index 701085620cd8..d9f8b90a93ed 100644 --- a/fs/ext4/mballoc.c +++ b/fs/ext4/mballoc.c | |||
@@ -4994,8 +4994,11 @@ int ext4_group_add_blocks(handle_t *handle, struct super_block *sb, | |||
4994 | struct ext4_group_desc *desc; | 4994 | struct ext4_group_desc *desc; |
4995 | struct ext4_sb_info *sbi = EXT4_SB(sb); | 4995 | struct ext4_sb_info *sbi = EXT4_SB(sb); |
4996 | struct ext4_buddy e4b; | 4996 | struct ext4_buddy e4b; |
4997 | int err = 0, ret, blk_free_count; | 4997 | int err = 0, ret, free_clusters_count; |
4998 | ext4_grpblk_t blocks_freed; | 4998 | ext4_grpblk_t clusters_freed; |
4999 | ext4_fsblk_t first_cluster = EXT4_B2C(sbi, block); | ||
5000 | ext4_fsblk_t last_cluster = EXT4_B2C(sbi, block + count - 1); | ||
5001 | unsigned long cluster_count = last_cluster - first_cluster + 1; | ||
4999 | 5002 | ||
5000 | ext4_debug("Adding block(s) %llu-%llu\n", block, block + count - 1); | 5003 | ext4_debug("Adding block(s) %llu-%llu\n", block, block + count - 1); |
5001 | 5004 | ||
@@ -5007,8 +5010,8 @@ int ext4_group_add_blocks(handle_t *handle, struct super_block *sb, | |||
5007 | * Check to see if we are freeing blocks across a group | 5010 | * Check to see if we are freeing blocks across a group |
5008 | * boundary. | 5011 | * boundary. |
5009 | */ | 5012 | */ |
5010 | if (bit + count > EXT4_BLOCKS_PER_GROUP(sb)) { | 5013 | if (bit + cluster_count > EXT4_CLUSTERS_PER_GROUP(sb)) { |
5011 | ext4_warning(sb, "too much blocks added to group %u", | 5014 | ext4_warning(sb, "too many blocks added to group %u", |
5012 | block_group); | 5015 | block_group); |
5013 | err = -EINVAL; | 5016 | err = -EINVAL; |
5014 | goto error_return; | 5017 | goto error_return; |
@@ -5054,14 +5057,14 @@ int ext4_group_add_blocks(handle_t *handle, struct super_block *sb, | |||
5054 | if (err) | 5057 | if (err) |
5055 | goto error_return; | 5058 | goto error_return; |
5056 | 5059 | ||
5057 | for (i = 0, blocks_freed = 0; i < count; i++) { | 5060 | for (i = 0, clusters_freed = 0; i < cluster_count; i++) { |
5058 | BUFFER_TRACE(bitmap_bh, "clear bit"); | 5061 | BUFFER_TRACE(bitmap_bh, "clear bit"); |
5059 | if (!mb_test_bit(bit + i, bitmap_bh->b_data)) { | 5062 | if (!mb_test_bit(bit + i, bitmap_bh->b_data)) { |
5060 | ext4_error(sb, "bit already cleared for block %llu", | 5063 | ext4_error(sb, "bit already cleared for block %llu", |
5061 | (ext4_fsblk_t)(block + i)); | 5064 | (ext4_fsblk_t)(block + i)); |
5062 | BUFFER_TRACE(bitmap_bh, "bit already cleared"); | 5065 | BUFFER_TRACE(bitmap_bh, "bit already cleared"); |
5063 | } else { | 5066 | } else { |
5064 | blocks_freed++; | 5067 | clusters_freed++; |
5065 | } | 5068 | } |
5066 | } | 5069 | } |
5067 | 5070 | ||
@@ -5075,19 +5078,20 @@ int ext4_group_add_blocks(handle_t *handle, struct super_block *sb, | |||
5075 | * them with group lock_held | 5078 | * them with group lock_held |
5076 | */ | 5079 | */ |
5077 | ext4_lock_group(sb, block_group); | 5080 | ext4_lock_group(sb, block_group); |
5078 | mb_clear_bits(bitmap_bh->b_data, bit, count); | 5081 | mb_clear_bits(bitmap_bh->b_data, bit, cluster_count); |
5079 | mb_free_blocks(NULL, &e4b, bit, count); | 5082 | mb_free_blocks(NULL, &e4b, bit, cluster_count); |
5080 | blk_free_count = blocks_freed + ext4_free_group_clusters(sb, desc); | 5083 | free_clusters_count = clusters_freed + |
5081 | ext4_free_group_clusters_set(sb, desc, blk_free_count); | 5084 | ext4_free_group_clusters(sb, desc); |
5085 | ext4_free_group_clusters_set(sb, desc, free_clusters_count); | ||
5082 | ext4_block_bitmap_csum_set(sb, block_group, desc, bitmap_bh); | 5086 | ext4_block_bitmap_csum_set(sb, block_group, desc, bitmap_bh); |
5083 | ext4_group_desc_csum_set(sb, block_group, desc); | 5087 | ext4_group_desc_csum_set(sb, block_group, desc); |
5084 | ext4_unlock_group(sb, block_group); | 5088 | ext4_unlock_group(sb, block_group); |
5085 | percpu_counter_add(&sbi->s_freeclusters_counter, | 5089 | percpu_counter_add(&sbi->s_freeclusters_counter, |
5086 | EXT4_NUM_B2C(sbi, blocks_freed)); | 5090 | clusters_freed); |
5087 | 5091 | ||
5088 | if (sbi->s_log_groups_per_flex) { | 5092 | if (sbi->s_log_groups_per_flex) { |
5089 | ext4_group_t flex_group = ext4_flex_group(sbi, block_group); | 5093 | ext4_group_t flex_group = ext4_flex_group(sbi, block_group); |
5090 | atomic64_add(EXT4_NUM_B2C(sbi, blocks_freed), | 5094 | atomic64_add(clusters_freed, |
5091 | &sbi->s_flex_groups[flex_group].free_clusters); | 5095 | &sbi->s_flex_groups[flex_group].free_clusters); |
5092 | } | 5096 | } |
5093 | 5097 | ||
diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c index 1dac59c24792..50443bda8e98 100644 --- a/fs/ext4/resize.c +++ b/fs/ext4/resize.c | |||
@@ -107,7 +107,7 @@ static int verify_group_input(struct super_block *sb, | |||
107 | 107 | ||
108 | overhead = ext4_group_overhead_blocks(sb, group); | 108 | overhead = ext4_group_overhead_blocks(sb, group); |
109 | metaend = start + overhead; | 109 | metaend = start + overhead; |
110 | input->free_blocks_count = free_blocks_count = | 110 | input->free_clusters_count = free_blocks_count = |
111 | input->blocks_count - 2 - overhead - sbi->s_itb_per_group; | 111 | input->blocks_count - 2 - overhead - sbi->s_itb_per_group; |
112 | 112 | ||
113 | if (test_opt(sb, DEBUG)) | 113 | if (test_opt(sb, DEBUG)) |
@@ -258,6 +258,7 @@ static int ext4_alloc_group_tables(struct super_block *sb, | |||
258 | ext4_group_t last_group; | 258 | ext4_group_t last_group; |
259 | unsigned overhead; | 259 | unsigned overhead; |
260 | __u16 uninit_mask = (flexbg_size > 1) ? ~EXT4_BG_BLOCK_UNINIT : ~0; | 260 | __u16 uninit_mask = (flexbg_size > 1) ? ~EXT4_BG_BLOCK_UNINIT : ~0; |
261 | int i; | ||
261 | 262 | ||
262 | BUG_ON(flex_gd->count == 0 || group_data == NULL); | 263 | BUG_ON(flex_gd->count == 0 || group_data == NULL); |
263 | 264 | ||
@@ -294,7 +295,7 @@ next_group: | |||
294 | group_data[bb_index].block_bitmap = start_blk++; | 295 | group_data[bb_index].block_bitmap = start_blk++; |
295 | group = ext4_get_group_number(sb, start_blk - 1); | 296 | group = ext4_get_group_number(sb, start_blk - 1); |
296 | group -= group_data[0].group; | 297 | group -= group_data[0].group; |
297 | group_data[group].free_blocks_count--; | 298 | group_data[group].mdata_blocks++; |
298 | flex_gd->bg_flags[group] &= uninit_mask; | 299 | flex_gd->bg_flags[group] &= uninit_mask; |
299 | } | 300 | } |
300 | 301 | ||
@@ -305,7 +306,7 @@ next_group: | |||
305 | group_data[ib_index].inode_bitmap = start_blk++; | 306 | group_data[ib_index].inode_bitmap = start_blk++; |
306 | group = ext4_get_group_number(sb, start_blk - 1); | 307 | group = ext4_get_group_number(sb, start_blk - 1); |
307 | group -= group_data[0].group; | 308 | group -= group_data[0].group; |
308 | group_data[group].free_blocks_count--; | 309 | group_data[group].mdata_blocks++; |
309 | flex_gd->bg_flags[group] &= uninit_mask; | 310 | flex_gd->bg_flags[group] &= uninit_mask; |
310 | } | 311 | } |
311 | 312 | ||
@@ -324,15 +325,22 @@ next_group: | |||
324 | if (start_blk + itb > next_group_start) { | 325 | if (start_blk + itb > next_group_start) { |
325 | flex_gd->bg_flags[group + 1] &= uninit_mask; | 326 | flex_gd->bg_flags[group + 1] &= uninit_mask; |
326 | overhead = start_blk + itb - next_group_start; | 327 | overhead = start_blk + itb - next_group_start; |
327 | group_data[group + 1].free_blocks_count -= overhead; | 328 | group_data[group + 1].mdata_blocks += overhead; |
328 | itb -= overhead; | 329 | itb -= overhead; |
329 | } | 330 | } |
330 | 331 | ||
331 | group_data[group].free_blocks_count -= itb; | 332 | group_data[group].mdata_blocks += itb; |
332 | flex_gd->bg_flags[group] &= uninit_mask; | 333 | flex_gd->bg_flags[group] &= uninit_mask; |
333 | start_blk += EXT4_SB(sb)->s_itb_per_group; | 334 | start_blk += EXT4_SB(sb)->s_itb_per_group; |
334 | } | 335 | } |
335 | 336 | ||
337 | /* Update free clusters count to exclude metadata blocks */ | ||
338 | for (i = 0; i < flex_gd->count; i++) { | ||
339 | group_data[i].free_clusters_count -= | ||
340 | EXT4_NUM_B2C(EXT4_SB(sb), | ||
341 | group_data[i].mdata_blocks); | ||
342 | } | ||
343 | |||
336 | if (test_opt(sb, DEBUG)) { | 344 | if (test_opt(sb, DEBUG)) { |
337 | int i; | 345 | int i; |
338 | group = group_data[0].group; | 346 | group = group_data[0].group; |
@@ -342,12 +350,13 @@ next_group: | |||
342 | flexbg_size); | 350 | flexbg_size); |
343 | 351 | ||
344 | for (i = 0; i < flex_gd->count; i++) { | 352 | for (i = 0; i < flex_gd->count; i++) { |
345 | printk(KERN_DEBUG "adding %s group %u: %u " | 353 | ext4_debug( |
346 | "blocks (%d free)\n", | 354 | "adding %s group %u: %u blocks (%d free, %d mdata blocks)\n", |
347 | ext4_bg_has_super(sb, group + i) ? "normal" : | 355 | ext4_bg_has_super(sb, group + i) ? "normal" : |
348 | "no-super", group + i, | 356 | "no-super", group + i, |
349 | group_data[i].blocks_count, | 357 | group_data[i].blocks_count, |
350 | group_data[i].free_blocks_count); | 358 | group_data[i].free_clusters_count, |
359 | group_data[i].mdata_blocks); | ||
351 | } | 360 | } |
352 | } | 361 | } |
353 | return 0; | 362 | return 0; |
@@ -399,7 +408,7 @@ static int extend_or_restart_transaction(handle_t *handle, int thresh) | |||
399 | } | 408 | } |
400 | 409 | ||
401 | /* | 410 | /* |
402 | * set_flexbg_block_bitmap() mark @count blocks starting from @block used. | 411 | * set_flexbg_block_bitmap() mark clusters [@first_cluster, @last_cluster] used. |
403 | * | 412 | * |
404 | * Helper function for ext4_setup_new_group_blocks() which set . | 413 | * Helper function for ext4_setup_new_group_blocks() which set . |
405 | * | 414 | * |
@@ -409,22 +418,26 @@ static int extend_or_restart_transaction(handle_t *handle, int thresh) | |||
409 | */ | 418 | */ |
410 | static int set_flexbg_block_bitmap(struct super_block *sb, handle_t *handle, | 419 | static int set_flexbg_block_bitmap(struct super_block *sb, handle_t *handle, |
411 | struct ext4_new_flex_group_data *flex_gd, | 420 | struct ext4_new_flex_group_data *flex_gd, |
412 | ext4_fsblk_t block, ext4_group_t count) | 421 | ext4_fsblk_t first_cluster, ext4_fsblk_t last_cluster) |
413 | { | 422 | { |
423 | struct ext4_sb_info *sbi = EXT4_SB(sb); | ||
424 | ext4_group_t count = last_cluster - first_cluster + 1; | ||
414 | ext4_group_t count2; | 425 | ext4_group_t count2; |
415 | 426 | ||
416 | ext4_debug("mark blocks [%llu/%u] used\n", block, count); | 427 | ext4_debug("mark clusters [%llu-%llu] used\n", first_cluster, |
417 | for (count2 = count; count > 0; count -= count2, block += count2) { | 428 | last_cluster); |
429 | for (count2 = count; count > 0; | ||
430 | count -= count2, first_cluster += count2) { | ||
418 | ext4_fsblk_t start; | 431 | ext4_fsblk_t start; |
419 | struct buffer_head *bh; | 432 | struct buffer_head *bh; |
420 | ext4_group_t group; | 433 | ext4_group_t group; |
421 | int err; | 434 | int err; |
422 | 435 | ||
423 | group = ext4_get_group_number(sb, block); | 436 | group = ext4_get_group_number(sb, EXT4_C2B(sbi, first_cluster)); |
424 | start = ext4_group_first_block_no(sb, group); | 437 | start = EXT4_B2C(sbi, ext4_group_first_block_no(sb, group)); |
425 | group -= flex_gd->groups[0].group; | 438 | group -= flex_gd->groups[0].group; |
426 | 439 | ||
427 | count2 = EXT4_BLOCKS_PER_GROUP(sb) - (block - start); | 440 | count2 = EXT4_CLUSTERS_PER_GROUP(sb) - (first_cluster - start); |
428 | if (count2 > count) | 441 | if (count2 > count) |
429 | count2 = count; | 442 | count2 = count; |
430 | 443 | ||
@@ -445,9 +458,9 @@ static int set_flexbg_block_bitmap(struct super_block *sb, handle_t *handle, | |||
445 | err = ext4_journal_get_write_access(handle, bh); | 458 | err = ext4_journal_get_write_access(handle, bh); |
446 | if (err) | 459 | if (err) |
447 | return err; | 460 | return err; |
448 | ext4_debug("mark block bitmap %#04llx (+%llu/%u)\n", block, | 461 | ext4_debug("mark block bitmap %#04llx (+%llu/%u)\n", |
449 | block - start, count2); | 462 | first_cluster, first_cluster - start, count2); |
450 | ext4_set_bits(bh->b_data, block - start, count2); | 463 | ext4_set_bits(bh->b_data, first_cluster - start, count2); |
451 | 464 | ||
452 | err = ext4_handle_dirty_metadata(handle, NULL, bh); | 465 | err = ext4_handle_dirty_metadata(handle, NULL, bh); |
453 | if (unlikely(err)) | 466 | if (unlikely(err)) |
@@ -596,9 +609,10 @@ handle_bb: | |||
596 | if (overhead != 0) { | 609 | if (overhead != 0) { |
597 | ext4_debug("mark backup superblock %#04llx (+0)\n", | 610 | ext4_debug("mark backup superblock %#04llx (+0)\n", |
598 | start); | 611 | start); |
599 | ext4_set_bits(bh->b_data, 0, overhead); | 612 | ext4_set_bits(bh->b_data, 0, |
613 | EXT4_NUM_B2C(sbi, overhead)); | ||
600 | } | 614 | } |
601 | ext4_mark_bitmap_end(group_data[i].blocks_count, | 615 | ext4_mark_bitmap_end(EXT4_B2C(sbi, group_data[i].blocks_count), |
602 | sb->s_blocksize * 8, bh->b_data); | 616 | sb->s_blocksize * 8, bh->b_data); |
603 | err = ext4_handle_dirty_metadata(handle, NULL, bh); | 617 | err = ext4_handle_dirty_metadata(handle, NULL, bh); |
604 | if (err) | 618 | if (err) |
@@ -643,7 +657,11 @@ handle_ib: | |||
643 | continue; | 657 | continue; |
644 | } | 658 | } |
645 | err = set_flexbg_block_bitmap(sb, handle, | 659 | err = set_flexbg_block_bitmap(sb, handle, |
646 | flex_gd, start, count); | 660 | flex_gd, |
661 | EXT4_B2C(sbi, start), | ||
662 | EXT4_B2C(sbi, | ||
663 | start + count | ||
664 | - 1)); | ||
647 | if (err) | 665 | if (err) |
648 | goto out; | 666 | goto out; |
649 | count = group_table_count[j]; | 667 | count = group_table_count[j]; |
@@ -653,7 +671,11 @@ handle_ib: | |||
653 | 671 | ||
654 | if (count) { | 672 | if (count) { |
655 | err = set_flexbg_block_bitmap(sb, handle, | 673 | err = set_flexbg_block_bitmap(sb, handle, |
656 | flex_gd, start, count); | 674 | flex_gd, |
675 | EXT4_B2C(sbi, start), | ||
676 | EXT4_B2C(sbi, | ||
677 | start + count | ||
678 | - 1)); | ||
657 | if (err) | 679 | if (err) |
658 | goto out; | 680 | goto out; |
659 | } | 681 | } |
@@ -841,7 +863,8 @@ static int add_new_gdb(handle_t *handle, struct inode *inode, | |||
841 | ext4_std_error(sb, err); | 863 | ext4_std_error(sb, err); |
842 | goto exit_inode; | 864 | goto exit_inode; |
843 | } | 865 | } |
844 | inode->i_blocks -= (gdbackups + 1) * sb->s_blocksize >> 9; | 866 | inode->i_blocks -= (gdbackups + 1) * sb->s_blocksize >> |
867 | (9 - EXT4_SB(sb)->s_cluster_bits); | ||
845 | ext4_mark_iloc_dirty(handle, inode, &iloc); | 868 | ext4_mark_iloc_dirty(handle, inode, &iloc); |
846 | memset(gdb_bh->b_data, 0, sb->s_blocksize); | 869 | memset(gdb_bh->b_data, 0, sb->s_blocksize); |
847 | err = ext4_handle_dirty_metadata(handle, NULL, gdb_bh); | 870 | err = ext4_handle_dirty_metadata(handle, NULL, gdb_bh); |
@@ -936,6 +959,7 @@ static int reserve_backup_gdb(handle_t *handle, struct inode *inode, | |||
936 | { | 959 | { |
937 | struct super_block *sb = inode->i_sb; | 960 | struct super_block *sb = inode->i_sb; |
938 | int reserved_gdb =le16_to_cpu(EXT4_SB(sb)->s_es->s_reserved_gdt_blocks); | 961 | int reserved_gdb =le16_to_cpu(EXT4_SB(sb)->s_es->s_reserved_gdt_blocks); |
962 | int cluster_bits = EXT4_SB(sb)->s_cluster_bits; | ||
939 | struct buffer_head **primary; | 963 | struct buffer_head **primary; |
940 | struct buffer_head *dind; | 964 | struct buffer_head *dind; |
941 | struct ext4_iloc iloc; | 965 | struct ext4_iloc iloc; |
@@ -1011,7 +1035,8 @@ static int reserve_backup_gdb(handle_t *handle, struct inode *inode, | |||
1011 | if (!err) | 1035 | if (!err) |
1012 | err = err2; | 1036 | err = err2; |
1013 | } | 1037 | } |
1014 | inode->i_blocks += reserved_gdb * sb->s_blocksize >> 9; | 1038 | |
1039 | inode->i_blocks += reserved_gdb * sb->s_blocksize >> (9 - cluster_bits); | ||
1015 | ext4_mark_iloc_dirty(handle, inode, &iloc); | 1040 | ext4_mark_iloc_dirty(handle, inode, &iloc); |
1016 | 1041 | ||
1017 | exit_bh: | 1042 | exit_bh: |
@@ -1245,7 +1270,7 @@ static int ext4_setup_new_descs(handle_t *handle, struct super_block *sb, | |||
1245 | ext4_group_t group; | 1270 | ext4_group_t group; |
1246 | __u16 *bg_flags = flex_gd->bg_flags; | 1271 | __u16 *bg_flags = flex_gd->bg_flags; |
1247 | int i, gdb_off, gdb_num, err = 0; | 1272 | int i, gdb_off, gdb_num, err = 0; |
1248 | 1273 | ||
1249 | 1274 | ||
1250 | for (i = 0; i < flex_gd->count; i++, group_data++, bg_flags++) { | 1275 | for (i = 0; i < flex_gd->count; i++, group_data++, bg_flags++) { |
1251 | group = group_data->group; | 1276 | group = group_data->group; |
@@ -1272,7 +1297,7 @@ static int ext4_setup_new_descs(handle_t *handle, struct super_block *sb, | |||
1272 | 1297 | ||
1273 | ext4_inode_table_set(sb, gdp, group_data->inode_table); | 1298 | ext4_inode_table_set(sb, gdp, group_data->inode_table); |
1274 | ext4_free_group_clusters_set(sb, gdp, | 1299 | ext4_free_group_clusters_set(sb, gdp, |
1275 | EXT4_NUM_B2C(sbi, group_data->free_blocks_count)); | 1300 | group_data->free_clusters_count); |
1276 | ext4_free_inodes_set(sb, gdp, EXT4_INODES_PER_GROUP(sb)); | 1301 | ext4_free_inodes_set(sb, gdp, EXT4_INODES_PER_GROUP(sb)); |
1277 | if (ext4_has_group_desc_csum(sb)) | 1302 | if (ext4_has_group_desc_csum(sb)) |
1278 | ext4_itable_unused_set(sb, gdp, | 1303 | ext4_itable_unused_set(sb, gdp, |
@@ -1328,7 +1353,7 @@ static void ext4_update_super(struct super_block *sb, | |||
1328 | */ | 1353 | */ |
1329 | for (i = 0; i < flex_gd->count; i++) { | 1354 | for (i = 0; i < flex_gd->count; i++) { |
1330 | blocks_count += group_data[i].blocks_count; | 1355 | blocks_count += group_data[i].blocks_count; |
1331 | free_blocks += group_data[i].free_blocks_count; | 1356 | free_blocks += EXT4_C2B(sbi, group_data[i].free_clusters_count); |
1332 | } | 1357 | } |
1333 | 1358 | ||
1334 | reserved_blocks = ext4_r_blocks_count(es) * 100; | 1359 | reserved_blocks = ext4_r_blocks_count(es) * 100; |
@@ -1500,17 +1525,18 @@ static int ext4_setup_next_flex_gd(struct super_block *sb, | |||
1500 | ext4_fsblk_t n_blocks_count, | 1525 | ext4_fsblk_t n_blocks_count, |
1501 | unsigned long flexbg_size) | 1526 | unsigned long flexbg_size) |
1502 | { | 1527 | { |
1503 | struct ext4_super_block *es = EXT4_SB(sb)->s_es; | 1528 | struct ext4_sb_info *sbi = EXT4_SB(sb); |
1529 | struct ext4_super_block *es = sbi->s_es; | ||
1504 | struct ext4_new_group_data *group_data = flex_gd->groups; | 1530 | struct ext4_new_group_data *group_data = flex_gd->groups; |
1505 | ext4_fsblk_t o_blocks_count; | 1531 | ext4_fsblk_t o_blocks_count; |
1506 | ext4_group_t n_group; | 1532 | ext4_group_t n_group; |
1507 | ext4_group_t group; | 1533 | ext4_group_t group; |
1508 | ext4_group_t last_group; | 1534 | ext4_group_t last_group; |
1509 | ext4_grpblk_t last; | 1535 | ext4_grpblk_t last; |
1510 | ext4_grpblk_t blocks_per_group; | 1536 | ext4_grpblk_t clusters_per_group; |
1511 | unsigned long i; | 1537 | unsigned long i; |
1512 | 1538 | ||
1513 | blocks_per_group = EXT4_BLOCKS_PER_GROUP(sb); | 1539 | clusters_per_group = EXT4_CLUSTERS_PER_GROUP(sb); |
1514 | 1540 | ||
1515 | o_blocks_count = ext4_blocks_count(es); | 1541 | o_blocks_count = ext4_blocks_count(es); |
1516 | 1542 | ||
@@ -1531,9 +1557,10 @@ static int ext4_setup_next_flex_gd(struct super_block *sb, | |||
1531 | int overhead; | 1557 | int overhead; |
1532 | 1558 | ||
1533 | group_data[i].group = group + i; | 1559 | group_data[i].group = group + i; |
1534 | group_data[i].blocks_count = blocks_per_group; | 1560 | group_data[i].blocks_count = EXT4_BLOCKS_PER_GROUP(sb); |
1535 | overhead = ext4_group_overhead_blocks(sb, group + i); | 1561 | overhead = ext4_group_overhead_blocks(sb, group + i); |
1536 | group_data[i].free_blocks_count = blocks_per_group - overhead; | 1562 | group_data[i].mdata_blocks = overhead; |
1563 | group_data[i].free_clusters_count = EXT4_CLUSTERS_PER_GROUP(sb); | ||
1537 | if (ext4_has_group_desc_csum(sb)) { | 1564 | if (ext4_has_group_desc_csum(sb)) { |
1538 | flex_gd->bg_flags[i] = EXT4_BG_BLOCK_UNINIT | | 1565 | flex_gd->bg_flags[i] = EXT4_BG_BLOCK_UNINIT | |
1539 | EXT4_BG_INODE_UNINIT; | 1566 | EXT4_BG_INODE_UNINIT; |
@@ -1547,10 +1574,10 @@ static int ext4_setup_next_flex_gd(struct super_block *sb, | |||
1547 | /* We need to initialize block bitmap of last group. */ | 1574 | /* We need to initialize block bitmap of last group. */ |
1548 | flex_gd->bg_flags[i - 1] &= ~EXT4_BG_BLOCK_UNINIT; | 1575 | flex_gd->bg_flags[i - 1] &= ~EXT4_BG_BLOCK_UNINIT; |
1549 | 1576 | ||
1550 | if ((last_group == n_group) && (last != blocks_per_group - 1)) { | 1577 | if ((last_group == n_group) && (last != clusters_per_group - 1)) { |
1551 | group_data[i - 1].blocks_count = last + 1; | 1578 | group_data[i - 1].blocks_count = EXT4_C2B(sbi, last + 1); |
1552 | group_data[i - 1].free_blocks_count -= blocks_per_group- | 1579 | group_data[i - 1].free_clusters_count -= clusters_per_group - |
1553 | last - 1; | 1580 | last - 1; |
1554 | } | 1581 | } |
1555 | 1582 | ||
1556 | return 1; | 1583 | return 1; |
@@ -1797,7 +1824,8 @@ static int ext4_convert_meta_bg(struct super_block *sb, struct inode *inode) | |||
1797 | } | 1824 | } |
1798 | 1825 | ||
1799 | /* Do a quick sanity check of the resize inode */ | 1826 | /* Do a quick sanity check of the resize inode */ |
1800 | if (inode->i_blocks != 1 << (inode->i_blkbits - 9)) | 1827 | if (inode->i_blocks != 1 << (inode->i_blkbits - |
1828 | (9 - sbi->s_cluster_bits))) | ||
1801 | goto invalid_resize_inode; | 1829 | goto invalid_resize_inode; |
1802 | for (i = 0; i < EXT4_N_BLOCKS; i++) { | 1830 | for (i = 0; i < EXT4_N_BLOCKS; i++) { |
1803 | if (i == EXT4_DIND_BLOCK) { | 1831 | if (i == EXT4_DIND_BLOCK) { |
@@ -1960,7 +1988,7 @@ retry: | |||
1960 | if (n_group == o_group) | 1988 | if (n_group == o_group) |
1961 | add = n_blocks_count - o_blocks_count; | 1989 | add = n_blocks_count - o_blocks_count; |
1962 | else | 1990 | else |
1963 | add = EXT4_BLOCKS_PER_GROUP(sb) - (offset + 1); | 1991 | add = EXT4_C2B(sbi, EXT4_CLUSTERS_PER_GROUP(sb) - (offset + 1)); |
1964 | if (add > 0) { | 1992 | if (add > 0) { |
1965 | err = ext4_group_extend_no_check(sb, o_blocks_count, add); | 1993 | err = ext4_group_extend_no_check(sb, o_blocks_count, add); |
1966 | if (err) | 1994 | if (err) |
diff --git a/fs/ext4/super.c b/fs/ext4/super.c index e2557711a11c..0556cd036b69 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c | |||
@@ -1159,6 +1159,9 @@ static int ext4_set_context(struct inode *inode, const void *ctx, size_t len, | |||
1159 | if (inode->i_ino == EXT4_ROOT_INO) | 1159 | if (inode->i_ino == EXT4_ROOT_INO) |
1160 | return -EPERM; | 1160 | return -EPERM; |
1161 | 1161 | ||
1162 | if (WARN_ON_ONCE(IS_DAX(inode) && i_size_read(inode))) | ||
1163 | return -EINVAL; | ||
1164 | |||
1162 | res = ext4_convert_inline_data(inode); | 1165 | res = ext4_convert_inline_data(inode); |
1163 | if (res) | 1166 | if (res) |
1164 | return res; | 1167 | return res; |
@@ -2790,14 +2793,11 @@ static int ext4_feature_set_ok(struct super_block *sb, int readonly) | |||
2790 | * This function is called once a day if we have errors logged | 2793 | * This function is called once a day if we have errors logged |
2791 | * on the file system | 2794 | * on the file system |
2792 | */ | 2795 | */ |
2793 | static void print_daily_error_info(unsigned long arg) | 2796 | static void print_daily_error_info(struct timer_list *t) |
2794 | { | 2797 | { |
2795 | struct super_block *sb = (struct super_block *) arg; | 2798 | struct ext4_sb_info *sbi = from_timer(sbi, t, s_err_report); |
2796 | struct ext4_sb_info *sbi; | 2799 | struct super_block *sb = sbi->s_sb; |
2797 | struct ext4_super_block *es; | 2800 | struct ext4_super_block *es = sbi->s_es; |
2798 | |||
2799 | sbi = EXT4_SB(sb); | ||
2800 | es = sbi->s_es; | ||
2801 | 2801 | ||
2802 | if (es->s_error_count) | 2802 | if (es->s_error_count) |
2803 | /* fsck newer than v1.41.13 is needed to clean this condition. */ | 2803 | /* fsck newer than v1.41.13 is needed to clean this condition. */ |
@@ -3707,6 +3707,11 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) | |||
3707 | } | 3707 | } |
3708 | 3708 | ||
3709 | if (sbi->s_mount_opt & EXT4_MOUNT_DAX) { | 3709 | if (sbi->s_mount_opt & EXT4_MOUNT_DAX) { |
3710 | if (ext4_has_feature_inline_data(sb)) { | ||
3711 | ext4_msg(sb, KERN_ERR, "Cannot use DAX on a filesystem" | ||
3712 | " that may contain inline data"); | ||
3713 | goto failed_mount; | ||
3714 | } | ||
3710 | err = bdev_dax_supported(sb, blocksize); | 3715 | err = bdev_dax_supported(sb, blocksize); |
3711 | if (err) | 3716 | if (err) |
3712 | goto failed_mount; | 3717 | goto failed_mount; |
@@ -3976,11 +3981,8 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) | |||
3976 | } | 3981 | } |
3977 | 3982 | ||
3978 | sbi->s_gdb_count = db_count; | 3983 | sbi->s_gdb_count = db_count; |
3979 | get_random_bytes(&sbi->s_next_generation, sizeof(u32)); | ||
3980 | spin_lock_init(&sbi->s_next_gen_lock); | ||
3981 | 3984 | ||
3982 | setup_timer(&sbi->s_err_report, print_daily_error_info, | 3985 | timer_setup(&sbi->s_err_report, print_daily_error_info, 0); |
3983 | (unsigned long) sb); | ||
3984 | 3986 | ||
3985 | /* Register extent status tree shrinker */ | 3987 | /* Register extent status tree shrinker */ |
3986 | if (ext4_es_register_shrinker(sbi)) | 3988 | if (ext4_es_register_shrinker(sbi)) |
@@ -4613,7 +4615,8 @@ static int ext4_load_journal(struct super_block *sb, | |||
4613 | "required on readonly filesystem"); | 4615 | "required on readonly filesystem"); |
4614 | if (really_read_only) { | 4616 | if (really_read_only) { |
4615 | ext4_msg(sb, KERN_ERR, "write access " | 4617 | ext4_msg(sb, KERN_ERR, "write access " |
4616 | "unavailable, cannot proceed"); | 4618 | "unavailable, cannot proceed " |
4619 | "(try mounting with noload)"); | ||
4617 | return -EROFS; | 4620 | return -EROFS; |
4618 | } | 4621 | } |
4619 | ext4_msg(sb, KERN_INFO, "write access will " | 4622 | ext4_msg(sb, KERN_INFO, "write access will " |
diff --git a/fs/iomap.c b/fs/iomap.c index d4801f8dd4fd..5011a964a550 100644 --- a/fs/iomap.c +++ b/fs/iomap.c | |||
@@ -350,8 +350,8 @@ static int iomap_zero(struct inode *inode, loff_t pos, unsigned offset, | |||
350 | static int iomap_dax_zero(loff_t pos, unsigned offset, unsigned bytes, | 350 | static int iomap_dax_zero(loff_t pos, unsigned offset, unsigned bytes, |
351 | struct iomap *iomap) | 351 | struct iomap *iomap) |
352 | { | 352 | { |
353 | sector_t sector = iomap->blkno + | 353 | sector_t sector = (iomap->addr + |
354 | (((pos & ~(PAGE_SIZE - 1)) - iomap->offset) >> 9); | 354 | (pos & PAGE_MASK) - iomap->offset) >> 9; |
355 | 355 | ||
356 | return __dax_zero_page_range(iomap->bdev, iomap->dax_dev, sector, | 356 | return __dax_zero_page_range(iomap->bdev, iomap->dax_dev, sector, |
357 | offset, bytes); | 357 | offset, bytes); |
@@ -510,11 +510,12 @@ static int iomap_to_fiemap(struct fiemap_extent_info *fi, | |||
510 | flags |= FIEMAP_EXTENT_MERGED; | 510 | flags |= FIEMAP_EXTENT_MERGED; |
511 | if (iomap->flags & IOMAP_F_SHARED) | 511 | if (iomap->flags & IOMAP_F_SHARED) |
512 | flags |= FIEMAP_EXTENT_SHARED; | 512 | flags |= FIEMAP_EXTENT_SHARED; |
513 | if (iomap->flags & IOMAP_F_DATA_INLINE) | ||
514 | flags |= FIEMAP_EXTENT_DATA_INLINE; | ||
513 | 515 | ||
514 | return fiemap_fill_next_extent(fi, iomap->offset, | 516 | return fiemap_fill_next_extent(fi, iomap->offset, |
515 | iomap->blkno != IOMAP_NULL_BLOCK ? iomap->blkno << 9: 0, | 517 | iomap->addr != IOMAP_NULL_ADDR ? iomap->addr : 0, |
516 | iomap->length, flags); | 518 | iomap->length, flags); |
517 | |||
518 | } | 519 | } |
519 | 520 | ||
520 | static loff_t | 521 | static loff_t |
@@ -830,7 +831,7 @@ iomap_dio_zero(struct iomap_dio *dio, struct iomap *iomap, loff_t pos, | |||
830 | bio = bio_alloc(GFP_KERNEL, 1); | 831 | bio = bio_alloc(GFP_KERNEL, 1); |
831 | bio_set_dev(bio, iomap->bdev); | 832 | bio_set_dev(bio, iomap->bdev); |
832 | bio->bi_iter.bi_sector = | 833 | bio->bi_iter.bi_sector = |
833 | iomap->blkno + ((pos - iomap->offset) >> 9); | 834 | (iomap->addr + pos - iomap->offset) >> 9; |
834 | bio->bi_private = dio; | 835 | bio->bi_private = dio; |
835 | bio->bi_end_io = iomap_dio_bio_end_io; | 836 | bio->bi_end_io = iomap_dio_bio_end_io; |
836 | 837 | ||
@@ -909,7 +910,7 @@ iomap_dio_actor(struct inode *inode, loff_t pos, loff_t length, | |||
909 | bio = bio_alloc(GFP_KERNEL, nr_pages); | 910 | bio = bio_alloc(GFP_KERNEL, nr_pages); |
910 | bio_set_dev(bio, iomap->bdev); | 911 | bio_set_dev(bio, iomap->bdev); |
911 | bio->bi_iter.bi_sector = | 912 | bio->bi_iter.bi_sector = |
912 | iomap->blkno + ((pos - iomap->offset) >> 9); | 913 | (iomap->addr + pos - iomap->offset) >> 9; |
913 | bio->bi_write_hint = dio->iocb->ki_hint; | 914 | bio->bi_write_hint = dio->iocb->ki_hint; |
914 | bio->bi_private = dio; | 915 | bio->bi_private = dio; |
915 | bio->bi_end_io = iomap_dio_bio_end_io; | 916 | bio->bi_end_io = iomap_dio_bio_end_io; |
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c index 7d5ef3bf3f3e..d2a85c9720e9 100644 --- a/fs/jbd2/journal.c +++ b/fs/jbd2/journal.c | |||
@@ -165,11 +165,11 @@ static void jbd2_superblock_csum_set(journal_t *j, journal_superblock_t *sb) | |||
165 | * Helper function used to manage commit timeouts | 165 | * Helper function used to manage commit timeouts |
166 | */ | 166 | */ |
167 | 167 | ||
168 | static void commit_timeout(unsigned long __data) | 168 | static void commit_timeout(struct timer_list *t) |
169 | { | 169 | { |
170 | struct task_struct * p = (struct task_struct *) __data; | 170 | journal_t *journal = from_timer(journal, t, j_commit_timer); |
171 | 171 | ||
172 | wake_up_process(p); | 172 | wake_up_process(journal->j_task); |
173 | } | 173 | } |
174 | 174 | ||
175 | /* | 175 | /* |
@@ -197,8 +197,7 @@ static int kjournald2(void *arg) | |||
197 | * Set up an interval timer which can be used to trigger a commit wakeup | 197 | * Set up an interval timer which can be used to trigger a commit wakeup |
198 | * after the commit interval expires | 198 | * after the commit interval expires |
199 | */ | 199 | */ |
200 | setup_timer(&journal->j_commit_timer, commit_timeout, | 200 | timer_setup(&journal->j_commit_timer, commit_timeout, 0); |
201 | (unsigned long)current); | ||
202 | 201 | ||
203 | set_freezable(); | 202 | set_freezable(); |
204 | 203 | ||
diff --git a/fs/nfsd/blocklayout.c b/fs/nfsd/blocklayout.c index 3f880ae0966b..70b8bf781fce 100644 --- a/fs/nfsd/blocklayout.c +++ b/fs/nfsd/blocklayout.c | |||
@@ -66,7 +66,7 @@ nfsd4_block_proc_layoutget(struct inode *inode, const struct svc_fh *fhp, | |||
66 | bex->es = PNFS_BLOCK_READ_DATA; | 66 | bex->es = PNFS_BLOCK_READ_DATA; |
67 | else | 67 | else |
68 | bex->es = PNFS_BLOCK_READWRITE_DATA; | 68 | bex->es = PNFS_BLOCK_READWRITE_DATA; |
69 | bex->soff = (iomap.blkno << 9); | 69 | bex->soff = iomap.addr; |
70 | break; | 70 | break; |
71 | case IOMAP_UNWRITTEN: | 71 | case IOMAP_UNWRITTEN: |
72 | if (seg->iomode & IOMODE_RW) { | 72 | if (seg->iomode & IOMODE_RW) { |
@@ -79,7 +79,7 @@ nfsd4_block_proc_layoutget(struct inode *inode, const struct svc_fh *fhp, | |||
79 | } | 79 | } |
80 | 80 | ||
81 | bex->es = PNFS_BLOCK_INVALID_DATA; | 81 | bex->es = PNFS_BLOCK_INVALID_DATA; |
82 | bex->soff = (iomap.blkno << 9); | 82 | bex->soff = iomap.addr; |
83 | break; | 83 | break; |
84 | } | 84 | } |
85 | /*FALLTHRU*/ | 85 | /*FALLTHRU*/ |
diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c index f179bdf1644d..9744b4819e0d 100644 --- a/fs/xfs/xfs_iomap.c +++ b/fs/xfs/xfs_iomap.c | |||
@@ -54,13 +54,13 @@ xfs_bmbt_to_iomap( | |||
54 | struct xfs_mount *mp = ip->i_mount; | 54 | struct xfs_mount *mp = ip->i_mount; |
55 | 55 | ||
56 | if (imap->br_startblock == HOLESTARTBLOCK) { | 56 | if (imap->br_startblock == HOLESTARTBLOCK) { |
57 | iomap->blkno = IOMAP_NULL_BLOCK; | 57 | iomap->addr = IOMAP_NULL_ADDR; |
58 | iomap->type = IOMAP_HOLE; | 58 | iomap->type = IOMAP_HOLE; |
59 | } else if (imap->br_startblock == DELAYSTARTBLOCK) { | 59 | } else if (imap->br_startblock == DELAYSTARTBLOCK) { |
60 | iomap->blkno = IOMAP_NULL_BLOCK; | 60 | iomap->addr = IOMAP_NULL_ADDR; |
61 | iomap->type = IOMAP_DELALLOC; | 61 | iomap->type = IOMAP_DELALLOC; |
62 | } else { | 62 | } else { |
63 | iomap->blkno = xfs_fsb_to_db(ip, imap->br_startblock); | 63 | iomap->addr = BBTOB(xfs_fsb_to_db(ip, imap->br_startblock)); |
64 | if (imap->br_state == XFS_EXT_UNWRITTEN) | 64 | if (imap->br_state == XFS_EXT_UNWRITTEN) |
65 | iomap->type = IOMAP_UNWRITTEN; | 65 | iomap->type = IOMAP_UNWRITTEN; |
66 | else | 66 | else |
diff --git a/include/linux/iomap.h b/include/linux/iomap.h index 8a7c6d26b147..76ce247d3d4b 100644 --- a/include/linux/iomap.h +++ b/include/linux/iomap.h | |||
@@ -16,8 +16,8 @@ struct vm_fault; | |||
16 | */ | 16 | */ |
17 | #define IOMAP_HOLE 0x01 /* no blocks allocated, need allocation */ | 17 | #define IOMAP_HOLE 0x01 /* no blocks allocated, need allocation */ |
18 | #define IOMAP_DELALLOC 0x02 /* delayed allocation blocks */ | 18 | #define IOMAP_DELALLOC 0x02 /* delayed allocation blocks */ |
19 | #define IOMAP_MAPPED 0x03 /* blocks allocated @blkno */ | 19 | #define IOMAP_MAPPED 0x03 /* blocks allocated at @addr */ |
20 | #define IOMAP_UNWRITTEN 0x04 /* blocks allocated @blkno in unwritten state */ | 20 | #define IOMAP_UNWRITTEN 0x04 /* blocks allocated at @addr in unwritten state */ |
21 | 21 | ||
22 | /* | 22 | /* |
23 | * Flags for all iomap mappings: | 23 | * Flags for all iomap mappings: |
@@ -27,16 +27,17 @@ struct vm_fault; | |||
27 | /* | 27 | /* |
28 | * Flags that only need to be reported for IOMAP_REPORT requests: | 28 | * Flags that only need to be reported for IOMAP_REPORT requests: |
29 | */ | 29 | */ |
30 | #define IOMAP_F_MERGED 0x10 /* contains multiple blocks/extents */ | 30 | #define IOMAP_F_MERGED 0x10 /* contains multiple blocks/extents */ |
31 | #define IOMAP_F_SHARED 0x20 /* block shared with another file */ | 31 | #define IOMAP_F_SHARED 0x20 /* block shared with another file */ |
32 | #define IOMAP_F_DATA_INLINE 0x40 /* data inline in the inode */ | ||
32 | 33 | ||
33 | /* | 34 | /* |
34 | * Magic value for blkno: | 35 | * Magic value for addr: |
35 | */ | 36 | */ |
36 | #define IOMAP_NULL_BLOCK -1LL /* blkno is not valid */ | 37 | #define IOMAP_NULL_ADDR -1ULL /* addr is not valid */ |
37 | 38 | ||
38 | struct iomap { | 39 | struct iomap { |
39 | sector_t blkno; /* 1st sector of mapping, 512b units */ | 40 | u64 addr; /* disk offset of mapping, bytes */ |
40 | loff_t offset; /* file offset of mapping, bytes */ | 41 | loff_t offset; /* file offset of mapping, bytes */ |
41 | u64 length; /* length of mapping, bytes */ | 42 | u64 length; /* length of mapping, bytes */ |
42 | u16 type; /* type of mapping */ | 43 | u16 type; /* type of mapping */ |