diff options
Diffstat (limited to 'fs')
101 files changed, 1956 insertions, 1495 deletions
diff --git a/fs/Kconfig b/fs/Kconfig index f8fccaaad628..64d44efad7a5 100644 --- a/fs/Kconfig +++ b/fs/Kconfig | |||
| @@ -6,10 +6,6 @@ menu "File systems" | |||
| 6 | 6 | ||
| 7 | if BLOCK | 7 | if BLOCK |
| 8 | 8 | ||
| 9 | config FS_JOURNAL_INFO | ||
| 10 | bool | ||
| 11 | default n | ||
| 12 | |||
| 13 | source "fs/ext2/Kconfig" | 9 | source "fs/ext2/Kconfig" |
| 14 | source "fs/ext3/Kconfig" | 10 | source "fs/ext3/Kconfig" |
| 15 | source "fs/ext4/Kconfig" | 11 | source "fs/ext4/Kconfig" |
diff --git a/fs/anon_inodes.c b/fs/anon_inodes.c index 94f5110c4655..9f0bf13291e5 100644 --- a/fs/anon_inodes.c +++ b/fs/anon_inodes.c | |||
| @@ -35,14 +35,13 @@ static int anon_inodefs_get_sb(struct file_system_type *fs_type, int flags, | |||
| 35 | mnt); | 35 | mnt); |
| 36 | } | 36 | } |
| 37 | 37 | ||
| 38 | static int anon_inodefs_delete_dentry(struct dentry *dentry) | 38 | /* |
| 39 | * anon_inodefs_dname() is called from d_path(). | ||
| 40 | */ | ||
| 41 | static char *anon_inodefs_dname(struct dentry *dentry, char *buffer, int buflen) | ||
| 39 | { | 42 | { |
| 40 | /* | 43 | return dynamic_dname(dentry, buffer, buflen, "anon_inode:%s", |
| 41 | * We faked vfs to believe the dentry was hashed when we created it. | 44 | dentry->d_name.name); |
| 42 | * Now we restore the flag so that dput() will work correctly. | ||
| 43 | */ | ||
| 44 | dentry->d_flags |= DCACHE_UNHASHED; | ||
| 45 | return 1; | ||
| 46 | } | 45 | } |
| 47 | 46 | ||
| 48 | static struct file_system_type anon_inode_fs_type = { | 47 | static struct file_system_type anon_inode_fs_type = { |
| @@ -51,7 +50,7 @@ static struct file_system_type anon_inode_fs_type = { | |||
| 51 | .kill_sb = kill_anon_super, | 50 | .kill_sb = kill_anon_super, |
| 52 | }; | 51 | }; |
| 53 | static const struct dentry_operations anon_inodefs_dentry_operations = { | 52 | static const struct dentry_operations anon_inodefs_dentry_operations = { |
| 54 | .d_delete = anon_inodefs_delete_dentry, | 53 | .d_dname = anon_inodefs_dname, |
| 55 | }; | 54 | }; |
| 56 | 55 | ||
| 57 | /* | 56 | /* |
| @@ -119,18 +118,16 @@ struct file *anon_inode_getfile(const char *name, | |||
| 119 | atomic_inc(&anon_inode_inode->i_count); | 118 | atomic_inc(&anon_inode_inode->i_count); |
| 120 | 119 | ||
| 121 | path.dentry->d_op = &anon_inodefs_dentry_operations; | 120 | path.dentry->d_op = &anon_inodefs_dentry_operations; |
| 122 | /* Do not publish this dentry inside the global dentry hash table */ | ||
| 123 | path.dentry->d_flags &= ~DCACHE_UNHASHED; | ||
| 124 | d_instantiate(path.dentry, anon_inode_inode); | 121 | d_instantiate(path.dentry, anon_inode_inode); |
| 125 | 122 | ||
| 126 | error = -ENFILE; | 123 | error = -ENFILE; |
| 127 | file = alloc_file(&path, FMODE_READ | FMODE_WRITE, fops); | 124 | file = alloc_file(&path, OPEN_FMODE(flags), fops); |
| 128 | if (!file) | 125 | if (!file) |
| 129 | goto err_dput; | 126 | goto err_dput; |
| 130 | file->f_mapping = anon_inode_inode->i_mapping; | 127 | file->f_mapping = anon_inode_inode->i_mapping; |
| 131 | 128 | ||
| 132 | file->f_pos = 0; | 129 | file->f_pos = 0; |
| 133 | file->f_flags = O_RDWR | (flags & O_NONBLOCK); | 130 | file->f_flags = flags & (O_ACCMODE | O_NONBLOCK); |
| 134 | file->f_version = 0; | 131 | file->f_version = 0; |
| 135 | file->private_data = priv; | 132 | file->private_data = priv; |
| 136 | 133 | ||
diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c index b639dcf7c778..346b69405363 100644 --- a/fs/binfmt_aout.c +++ b/fs/binfmt_aout.c | |||
| @@ -32,7 +32,7 @@ | |||
| 32 | 32 | ||
| 33 | static int load_aout_binary(struct linux_binprm *, struct pt_regs * regs); | 33 | static int load_aout_binary(struct linux_binprm *, struct pt_regs * regs); |
| 34 | static int load_aout_library(struct file*); | 34 | static int load_aout_library(struct file*); |
| 35 | static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file, unsigned long limit); | 35 | static int aout_core_dump(struct coredump_params *cprm); |
| 36 | 36 | ||
| 37 | static struct linux_binfmt aout_format = { | 37 | static struct linux_binfmt aout_format = { |
| 38 | .module = THIS_MODULE, | 38 | .module = THIS_MODULE, |
| @@ -89,8 +89,9 @@ if (file->f_op->llseek) { \ | |||
| 89 | * dumping of the process results in another error.. | 89 | * dumping of the process results in another error.. |
| 90 | */ | 90 | */ |
| 91 | 91 | ||
| 92 | static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file, unsigned long limit) | 92 | static int aout_core_dump(struct coredump_params *cprm) |
| 93 | { | 93 | { |
| 94 | struct file *file = cprm->file; | ||
| 94 | mm_segment_t fs; | 95 | mm_segment_t fs; |
| 95 | int has_dumped = 0; | 96 | int has_dumped = 0; |
| 96 | unsigned long dump_start, dump_size; | 97 | unsigned long dump_start, dump_size; |
| @@ -108,16 +109,16 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file, u | |||
| 108 | current->flags |= PF_DUMPCORE; | 109 | current->flags |= PF_DUMPCORE; |
| 109 | strncpy(dump.u_comm, current->comm, sizeof(dump.u_comm)); | 110 | strncpy(dump.u_comm, current->comm, sizeof(dump.u_comm)); |
| 110 | dump.u_ar0 = offsetof(struct user, regs); | 111 | dump.u_ar0 = offsetof(struct user, regs); |
| 111 | dump.signal = signr; | 112 | dump.signal = cprm->signr; |
| 112 | aout_dump_thread(regs, &dump); | 113 | aout_dump_thread(cprm->regs, &dump); |
| 113 | 114 | ||
| 114 | /* If the size of the dump file exceeds the rlimit, then see what would happen | 115 | /* If the size of the dump file exceeds the rlimit, then see what would happen |
| 115 | if we wrote the stack, but not the data area. */ | 116 | if we wrote the stack, but not the data area. */ |
| 116 | if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > limit) | 117 | if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > cprm->limit) |
| 117 | dump.u_dsize = 0; | 118 | dump.u_dsize = 0; |
| 118 | 119 | ||
| 119 | /* Make sure we have enough room to write the stack and data areas. */ | 120 | /* Make sure we have enough room to write the stack and data areas. */ |
| 120 | if ((dump.u_ssize + 1) * PAGE_SIZE > limit) | 121 | if ((dump.u_ssize + 1) * PAGE_SIZE > cprm->limit) |
| 121 | dump.u_ssize = 0; | 122 | dump.u_ssize = 0; |
| 122 | 123 | ||
| 123 | /* make sure we actually have a data and stack area to dump */ | 124 | /* make sure we actually have a data and stack area to dump */ |
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c index 97b6e9efeb7f..edd90c49003c 100644 --- a/fs/binfmt_elf.c +++ b/fs/binfmt_elf.c | |||
| @@ -45,7 +45,7 @@ static unsigned long elf_map(struct file *, unsigned long, struct elf_phdr *, | |||
| 45 | * don't even try. | 45 | * don't even try. |
| 46 | */ | 46 | */ |
| 47 | #ifdef CONFIG_ELF_CORE | 47 | #ifdef CONFIG_ELF_CORE |
| 48 | static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, unsigned long limit); | 48 | static int elf_core_dump(struct coredump_params *cprm); |
| 49 | #else | 49 | #else |
| 50 | #define elf_core_dump NULL | 50 | #define elf_core_dump NULL |
| 51 | #endif | 51 | #endif |
| @@ -1272,8 +1272,9 @@ static int writenote(struct memelfnote *men, struct file *file, | |||
| 1272 | } | 1272 | } |
| 1273 | #undef DUMP_WRITE | 1273 | #undef DUMP_WRITE |
| 1274 | 1274 | ||
| 1275 | #define DUMP_WRITE(addr, nr) \ | 1275 | #define DUMP_WRITE(addr, nr) \ |
| 1276 | if ((size += (nr)) > limit || !dump_write(file, (addr), (nr))) \ | 1276 | if ((size += (nr)) > cprm->limit || \ |
| 1277 | !dump_write(cprm->file, (addr), (nr))) \ | ||
| 1277 | goto end_coredump; | 1278 | goto end_coredump; |
| 1278 | 1279 | ||
| 1279 | static void fill_elf_header(struct elfhdr *elf, int segs, | 1280 | static void fill_elf_header(struct elfhdr *elf, int segs, |
| @@ -1901,7 +1902,7 @@ static struct vm_area_struct *next_vma(struct vm_area_struct *this_vma, | |||
| 1901 | * and then they are actually written out. If we run out of core limit | 1902 | * and then they are actually written out. If we run out of core limit |
| 1902 | * we just truncate. | 1903 | * we just truncate. |
| 1903 | */ | 1904 | */ |
| 1904 | static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, unsigned long limit) | 1905 | static int elf_core_dump(struct coredump_params *cprm) |
| 1905 | { | 1906 | { |
| 1906 | int has_dumped = 0; | 1907 | int has_dumped = 0; |
| 1907 | mm_segment_t fs; | 1908 | mm_segment_t fs; |
| @@ -1947,7 +1948,7 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, un | |||
| 1947 | * notes. This also sets up the file header. | 1948 | * notes. This also sets up the file header. |
| 1948 | */ | 1949 | */ |
| 1949 | if (!fill_note_info(elf, segs + 1, /* including notes section */ | 1950 | if (!fill_note_info(elf, segs + 1, /* including notes section */ |
| 1950 | &info, signr, regs)) | 1951 | &info, cprm->signr, cprm->regs)) |
| 1951 | goto cleanup; | 1952 | goto cleanup; |
| 1952 | 1953 | ||
| 1953 | has_dumped = 1; | 1954 | has_dumped = 1; |
| @@ -2009,14 +2010,14 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, un | |||
| 2009 | #endif | 2010 | #endif |
| 2010 | 2011 | ||
| 2011 | /* write out the notes section */ | 2012 | /* write out the notes section */ |
| 2012 | if (!write_note_info(&info, file, &foffset)) | 2013 | if (!write_note_info(&info, cprm->file, &foffset)) |
| 2013 | goto end_coredump; | 2014 | goto end_coredump; |
| 2014 | 2015 | ||
| 2015 | if (elf_coredump_extra_notes_write(file, &foffset)) | 2016 | if (elf_coredump_extra_notes_write(cprm->file, &foffset)) |
| 2016 | goto end_coredump; | 2017 | goto end_coredump; |
| 2017 | 2018 | ||
| 2018 | /* Align to page */ | 2019 | /* Align to page */ |
| 2019 | if (!dump_seek(file, dataoff - foffset)) | 2020 | if (!dump_seek(cprm->file, dataoff - foffset)) |
| 2020 | goto end_coredump; | 2021 | goto end_coredump; |
| 2021 | 2022 | ||
| 2022 | for (vma = first_vma(current, gate_vma); vma != NULL; | 2023 | for (vma = first_vma(current, gate_vma); vma != NULL; |
| @@ -2033,12 +2034,13 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, un | |||
| 2033 | page = get_dump_page(addr); | 2034 | page = get_dump_page(addr); |
| 2034 | if (page) { | 2035 | if (page) { |
| 2035 | void *kaddr = kmap(page); | 2036 | void *kaddr = kmap(page); |
| 2036 | stop = ((size += PAGE_SIZE) > limit) || | 2037 | stop = ((size += PAGE_SIZE) > cprm->limit) || |
| 2037 | !dump_write(file, kaddr, PAGE_SIZE); | 2038 | !dump_write(cprm->file, kaddr, |
| 2039 | PAGE_SIZE); | ||
| 2038 | kunmap(page); | 2040 | kunmap(page); |
| 2039 | page_cache_release(page); | 2041 | page_cache_release(page); |
| 2040 | } else | 2042 | } else |
| 2041 | stop = !dump_seek(file, PAGE_SIZE); | 2043 | stop = !dump_seek(cprm->file, PAGE_SIZE); |
| 2042 | if (stop) | 2044 | if (stop) |
| 2043 | goto end_coredump; | 2045 | goto end_coredump; |
| 2044 | } | 2046 | } |
diff --git a/fs/binfmt_elf_fdpic.c b/fs/binfmt_elf_fdpic.c index 7b055385db8e..c25256a5c5b0 100644 --- a/fs/binfmt_elf_fdpic.c +++ b/fs/binfmt_elf_fdpic.c | |||
| @@ -76,7 +76,7 @@ static int elf_fdpic_map_file_by_direct_mmap(struct elf_fdpic_params *, | |||
| 76 | struct file *, struct mm_struct *); | 76 | struct file *, struct mm_struct *); |
| 77 | 77 | ||
| 78 | #ifdef CONFIG_ELF_CORE | 78 | #ifdef CONFIG_ELF_CORE |
| 79 | static int elf_fdpic_core_dump(long, struct pt_regs *, struct file *, unsigned long limit); | 79 | static int elf_fdpic_core_dump(struct coredump_params *cprm); |
| 80 | #endif | 80 | #endif |
| 81 | 81 | ||
| 82 | static struct linux_binfmt elf_fdpic_format = { | 82 | static struct linux_binfmt elf_fdpic_format = { |
| @@ -1326,8 +1326,9 @@ static int writenote(struct memelfnote *men, struct file *file) | |||
| 1326 | #undef DUMP_WRITE | 1326 | #undef DUMP_WRITE |
| 1327 | #undef DUMP_SEEK | 1327 | #undef DUMP_SEEK |
| 1328 | 1328 | ||
| 1329 | #define DUMP_WRITE(addr, nr) \ | 1329 | #define DUMP_WRITE(addr, nr) \ |
| 1330 | if ((size += (nr)) > limit || !dump_write(file, (addr), (nr))) \ | 1330 | if ((size += (nr)) > cprm->limit || \ |
| 1331 | !dump_write(cprm->file, (addr), (nr))) \ | ||
| 1331 | goto end_coredump; | 1332 | goto end_coredump; |
| 1332 | 1333 | ||
| 1333 | static inline void fill_elf_fdpic_header(struct elfhdr *elf, int segs) | 1334 | static inline void fill_elf_fdpic_header(struct elfhdr *elf, int segs) |
| @@ -1582,8 +1583,7 @@ static int elf_fdpic_dump_segments(struct file *file, size_t *size, | |||
| 1582 | * and then they are actually written out. If we run out of core limit | 1583 | * and then they are actually written out. If we run out of core limit |
| 1583 | * we just truncate. | 1584 | * we just truncate. |
| 1584 | */ | 1585 | */ |
| 1585 | static int elf_fdpic_core_dump(long signr, struct pt_regs *regs, | 1586 | static int elf_fdpic_core_dump(struct coredump_params *cprm) |
| 1586 | struct file *file, unsigned long limit) | ||
| 1587 | { | 1587 | { |
| 1588 | #define NUM_NOTES 6 | 1588 | #define NUM_NOTES 6 |
| 1589 | int has_dumped = 0; | 1589 | int has_dumped = 0; |
| @@ -1642,7 +1642,7 @@ static int elf_fdpic_core_dump(long signr, struct pt_regs *regs, | |||
| 1642 | goto cleanup; | 1642 | goto cleanup; |
| 1643 | #endif | 1643 | #endif |
| 1644 | 1644 | ||
| 1645 | if (signr) { | 1645 | if (cprm->signr) { |
| 1646 | struct core_thread *ct; | 1646 | struct core_thread *ct; |
| 1647 | struct elf_thread_status *tmp; | 1647 | struct elf_thread_status *tmp; |
| 1648 | 1648 | ||
| @@ -1661,14 +1661,14 @@ static int elf_fdpic_core_dump(long signr, struct pt_regs *regs, | |||
| 1661 | int sz; | 1661 | int sz; |
| 1662 | 1662 | ||
| 1663 | tmp = list_entry(t, struct elf_thread_status, list); | 1663 | tmp = list_entry(t, struct elf_thread_status, list); |
| 1664 | sz = elf_dump_thread_status(signr, tmp); | 1664 | sz = elf_dump_thread_status(cprm->signr, tmp); |
| 1665 | thread_status_size += sz; | 1665 | thread_status_size += sz; |
| 1666 | } | 1666 | } |
| 1667 | } | 1667 | } |
| 1668 | 1668 | ||
| 1669 | /* now collect the dump for the current */ | 1669 | /* now collect the dump for the current */ |
| 1670 | fill_prstatus(prstatus, current, signr); | 1670 | fill_prstatus(prstatus, current, cprm->signr); |
| 1671 | elf_core_copy_regs(&prstatus->pr_reg, regs); | 1671 | elf_core_copy_regs(&prstatus->pr_reg, cprm->regs); |
| 1672 | 1672 | ||
| 1673 | segs = current->mm->map_count; | 1673 | segs = current->mm->map_count; |
| 1674 | #ifdef ELF_CORE_EXTRA_PHDRS | 1674 | #ifdef ELF_CORE_EXTRA_PHDRS |
| @@ -1703,7 +1703,7 @@ static int elf_fdpic_core_dump(long signr, struct pt_regs *regs, | |||
| 1703 | 1703 | ||
| 1704 | /* Try to dump the FPU. */ | 1704 | /* Try to dump the FPU. */ |
| 1705 | if ((prstatus->pr_fpvalid = | 1705 | if ((prstatus->pr_fpvalid = |
| 1706 | elf_core_copy_task_fpregs(current, regs, fpu))) | 1706 | elf_core_copy_task_fpregs(current, cprm->regs, fpu))) |
| 1707 | fill_note(notes + numnote++, | 1707 | fill_note(notes + numnote++, |
| 1708 | "CORE", NT_PRFPREG, sizeof(*fpu), fpu); | 1708 | "CORE", NT_PRFPREG, sizeof(*fpu), fpu); |
| 1709 | #ifdef ELF_CORE_COPY_XFPREGS | 1709 | #ifdef ELF_CORE_COPY_XFPREGS |
| @@ -1774,7 +1774,7 @@ static int elf_fdpic_core_dump(long signr, struct pt_regs *regs, | |||
| 1774 | 1774 | ||
| 1775 | /* write out the notes section */ | 1775 | /* write out the notes section */ |
| 1776 | for (i = 0; i < numnote; i++) | 1776 | for (i = 0; i < numnote; i++) |
| 1777 | if (!writenote(notes + i, file)) | 1777 | if (!writenote(notes + i, cprm->file)) |
| 1778 | goto end_coredump; | 1778 | goto end_coredump; |
| 1779 | 1779 | ||
| 1780 | /* write out the thread status notes section */ | 1780 | /* write out the thread status notes section */ |
| @@ -1783,14 +1783,15 @@ static int elf_fdpic_core_dump(long signr, struct pt_regs *regs, | |||
| 1783 | list_entry(t, struct elf_thread_status, list); | 1783 | list_entry(t, struct elf_thread_status, list); |
| 1784 | 1784 | ||
| 1785 | for (i = 0; i < tmp->num_notes; i++) | 1785 | for (i = 0; i < tmp->num_notes; i++) |
| 1786 | if (!writenote(&tmp->notes[i], file)) | 1786 | if (!writenote(&tmp->notes[i], cprm->file)) |
| 1787 | goto end_coredump; | 1787 | goto end_coredump; |
| 1788 | } | 1788 | } |
| 1789 | 1789 | ||
| 1790 | if (!dump_seek(file, dataoff)) | 1790 | if (!dump_seek(cprm->file, dataoff)) |
| 1791 | goto end_coredump; | 1791 | goto end_coredump; |
| 1792 | 1792 | ||
| 1793 | if (elf_fdpic_dump_segments(file, &size, &limit, mm_flags) < 0) | 1793 | if (elf_fdpic_dump_segments(cprm->file, &size, &cprm->limit, |
| 1794 | mm_flags) < 0) | ||
| 1794 | goto end_coredump; | 1795 | goto end_coredump; |
| 1795 | 1796 | ||
| 1796 | #ifdef ELF_CORE_WRITE_EXTRA_DATA | 1797 | #ifdef ELF_CORE_WRITE_EXTRA_DATA |
diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c index a2796651e756..d4a00ea1054c 100644 --- a/fs/binfmt_flat.c +++ b/fs/binfmt_flat.c | |||
| @@ -87,7 +87,7 @@ static int load_flat_shared_library(int id, struct lib_info *p); | |||
| 87 | #endif | 87 | #endif |
| 88 | 88 | ||
| 89 | static int load_flat_binary(struct linux_binprm *, struct pt_regs * regs); | 89 | static int load_flat_binary(struct linux_binprm *, struct pt_regs * regs); |
| 90 | static int flat_core_dump(long signr, struct pt_regs *regs, struct file *file, unsigned long limit); | 90 | static int flat_core_dump(struct coredump_params *cprm); |
| 91 | 91 | ||
| 92 | static struct linux_binfmt flat_format = { | 92 | static struct linux_binfmt flat_format = { |
| 93 | .module = THIS_MODULE, | 93 | .module = THIS_MODULE, |
| @@ -102,10 +102,10 @@ static struct linux_binfmt flat_format = { | |||
| 102 | * Currently only a stub-function. | 102 | * Currently only a stub-function. |
| 103 | */ | 103 | */ |
| 104 | 104 | ||
| 105 | static int flat_core_dump(long signr, struct pt_regs *regs, struct file *file, unsigned long limit) | 105 | static int flat_core_dump(struct coredump_params *cprm) |
| 106 | { | 106 | { |
| 107 | printk("Process %s:%d received signr %d and should have core dumped\n", | 107 | printk("Process %s:%d received signr %d and should have core dumped\n", |
| 108 | current->comm, current->pid, (int) signr); | 108 | current->comm, current->pid, (int) cprm->signr); |
| 109 | return(1); | 109 | return(1); |
| 110 | } | 110 | } |
| 111 | 111 | ||
diff --git a/fs/binfmt_som.c b/fs/binfmt_som.c index eff74b9c9e77..2a9b5330cc5e 100644 --- a/fs/binfmt_som.c +++ b/fs/binfmt_som.c | |||
| @@ -43,7 +43,7 @@ static int load_som_library(struct file *); | |||
| 43 | * don't even try. | 43 | * don't even try. |
| 44 | */ | 44 | */ |
| 45 | #if 0 | 45 | #if 0 |
| 46 | static int som_core_dump(long signr, struct pt_regs *regs, unsigned long limit); | 46 | static int som_core_dump(struct coredump_params *cprm); |
| 47 | #else | 47 | #else |
| 48 | #define som_core_dump NULL | 48 | #define som_core_dump NULL |
| 49 | #endif | 49 | #endif |
diff --git a/fs/btrfs/Kconfig b/fs/btrfs/Kconfig index 402afe0a0bfb..7bb3c020e570 100644 --- a/fs/btrfs/Kconfig +++ b/fs/btrfs/Kconfig | |||
| @@ -4,7 +4,6 @@ config BTRFS_FS | |||
| 4 | select LIBCRC32C | 4 | select LIBCRC32C |
| 5 | select ZLIB_INFLATE | 5 | select ZLIB_INFLATE |
| 6 | select ZLIB_DEFLATE | 6 | select ZLIB_DEFLATE |
| 7 | select FS_JOURNAL_INFO | ||
| 8 | help | 7 | help |
| 9 | Btrfs is a new filesystem with extents, writable snapshotting, | 8 | Btrfs is a new filesystem with extents, writable snapshotting, |
| 10 | support for multiple devices and many more features. | 9 | support for multiple devices and many more features. |
diff --git a/fs/btrfs/acl.c b/fs/btrfs/acl.c index 52cbe47022bf..2e9e69987a82 100644 --- a/fs/btrfs/acl.c +++ b/fs/btrfs/acl.c | |||
| @@ -94,7 +94,8 @@ static int btrfs_xattr_acl_get(struct dentry *dentry, const char *name, | |||
| 94 | /* | 94 | /* |
| 95 | * Needs to be called with fs_mutex held | 95 | * Needs to be called with fs_mutex held |
| 96 | */ | 96 | */ |
| 97 | static int btrfs_set_acl(struct inode *inode, struct posix_acl *acl, int type) | 97 | static int btrfs_set_acl(struct btrfs_trans_handle *trans, |
| 98 | struct inode *inode, struct posix_acl *acl, int type) | ||
| 98 | { | 99 | { |
| 99 | int ret, size = 0; | 100 | int ret, size = 0; |
| 100 | const char *name; | 101 | const char *name; |
| @@ -140,8 +141,7 @@ static int btrfs_set_acl(struct inode *inode, struct posix_acl *acl, int type) | |||
| 140 | goto out; | 141 | goto out; |
| 141 | } | 142 | } |
| 142 | 143 | ||
| 143 | ret = __btrfs_setxattr(inode, name, value, size, 0); | 144 | ret = __btrfs_setxattr(trans, inode, name, value, size, 0); |
| 144 | |||
| 145 | out: | 145 | out: |
| 146 | kfree(value); | 146 | kfree(value); |
| 147 | 147 | ||
| @@ -154,7 +154,7 @@ out: | |||
| 154 | static int btrfs_xattr_acl_set(struct dentry *dentry, const char *name, | 154 | static int btrfs_xattr_acl_set(struct dentry *dentry, const char *name, |
| 155 | const void *value, size_t size, int flags, int type) | 155 | const void *value, size_t size, int flags, int type) |
| 156 | { | 156 | { |
| 157 | int ret = 0; | 157 | int ret; |
| 158 | struct posix_acl *acl = NULL; | 158 | struct posix_acl *acl = NULL; |
| 159 | 159 | ||
| 160 | if (value) { | 160 | if (value) { |
| @@ -167,7 +167,7 @@ static int btrfs_xattr_acl_set(struct dentry *dentry, const char *name, | |||
| 167 | } | 167 | } |
| 168 | } | 168 | } |
| 169 | 169 | ||
| 170 | ret = btrfs_set_acl(dentry->d_inode, acl, type); | 170 | ret = btrfs_set_acl(NULL, dentry->d_inode, acl, type); |
| 171 | 171 | ||
| 172 | posix_acl_release(acl); | 172 | posix_acl_release(acl); |
| 173 | 173 | ||
| @@ -196,7 +196,8 @@ int btrfs_check_acl(struct inode *inode, int mask) | |||
| 196 | * stuff has been fixed to work with that. If the locking stuff changes, we | 196 | * stuff has been fixed to work with that. If the locking stuff changes, we |
| 197 | * need to re-evaluate the acl locking stuff. | 197 | * need to re-evaluate the acl locking stuff. |
| 198 | */ | 198 | */ |
| 199 | int btrfs_init_acl(struct inode *inode, struct inode *dir) | 199 | int btrfs_init_acl(struct btrfs_trans_handle *trans, |
| 200 | struct inode *inode, struct inode *dir) | ||
| 200 | { | 201 | { |
| 201 | struct posix_acl *acl = NULL; | 202 | struct posix_acl *acl = NULL; |
| 202 | int ret = 0; | 203 | int ret = 0; |
| @@ -221,7 +222,8 @@ int btrfs_init_acl(struct inode *inode, struct inode *dir) | |||
| 221 | mode_t mode; | 222 | mode_t mode; |
| 222 | 223 | ||
| 223 | if (S_ISDIR(inode->i_mode)) { | 224 | if (S_ISDIR(inode->i_mode)) { |
| 224 | ret = btrfs_set_acl(inode, acl, ACL_TYPE_DEFAULT); | 225 | ret = btrfs_set_acl(trans, inode, acl, |
| 226 | ACL_TYPE_DEFAULT); | ||
| 225 | if (ret) | 227 | if (ret) |
| 226 | goto failed; | 228 | goto failed; |
| 227 | } | 229 | } |
| @@ -236,7 +238,7 @@ int btrfs_init_acl(struct inode *inode, struct inode *dir) | |||
| 236 | inode->i_mode = mode; | 238 | inode->i_mode = mode; |
| 237 | if (ret > 0) { | 239 | if (ret > 0) { |
| 238 | /* we need an acl */ | 240 | /* we need an acl */ |
| 239 | ret = btrfs_set_acl(inode, clone, | 241 | ret = btrfs_set_acl(trans, inode, clone, |
| 240 | ACL_TYPE_ACCESS); | 242 | ACL_TYPE_ACCESS); |
| 241 | } | 243 | } |
| 242 | } | 244 | } |
| @@ -269,7 +271,7 @@ int btrfs_acl_chmod(struct inode *inode) | |||
| 269 | 271 | ||
| 270 | ret = posix_acl_chmod_masq(clone, inode->i_mode); | 272 | ret = posix_acl_chmod_masq(clone, inode->i_mode); |
| 271 | if (!ret) | 273 | if (!ret) |
| 272 | ret = btrfs_set_acl(inode, clone, ACL_TYPE_ACCESS); | 274 | ret = btrfs_set_acl(NULL, inode, clone, ACL_TYPE_ACCESS); |
| 273 | 275 | ||
| 274 | posix_acl_release(clone); | 276 | posix_acl_release(clone); |
| 275 | 277 | ||
| @@ -297,7 +299,8 @@ int btrfs_acl_chmod(struct inode *inode) | |||
| 297 | return 0; | 299 | return 0; |
| 298 | } | 300 | } |
| 299 | 301 | ||
| 300 | int btrfs_init_acl(struct inode *inode, struct inode *dir) | 302 | int btrfs_init_acl(struct btrfs_trans_handle *trans, |
| 303 | struct inode *inode, struct inode *dir) | ||
| 301 | { | 304 | { |
| 302 | return 0; | 305 | return 0; |
| 303 | } | 306 | } |
diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h index f6783a42f010..3f1f50d9d916 100644 --- a/fs/btrfs/btrfs_inode.h +++ b/fs/btrfs/btrfs_inode.h | |||
| @@ -44,9 +44,6 @@ struct btrfs_inode { | |||
| 44 | */ | 44 | */ |
| 45 | struct extent_io_tree io_failure_tree; | 45 | struct extent_io_tree io_failure_tree; |
| 46 | 46 | ||
| 47 | /* held while inesrting or deleting extents from files */ | ||
| 48 | struct mutex extent_mutex; | ||
| 49 | |||
| 50 | /* held while logging the inode in tree-log.c */ | 47 | /* held while logging the inode in tree-log.c */ |
| 51 | struct mutex log_mutex; | 48 | struct mutex log_mutex; |
| 52 | 49 | ||
| @@ -166,7 +163,7 @@ static inline struct btrfs_inode *BTRFS_I(struct inode *inode) | |||
| 166 | 163 | ||
| 167 | static inline void btrfs_i_size_write(struct inode *inode, u64 size) | 164 | static inline void btrfs_i_size_write(struct inode *inode, u64 size) |
| 168 | { | 165 | { |
| 169 | inode->i_size = size; | 166 | i_size_write(inode, size); |
| 170 | BTRFS_I(inode)->disk_i_size = size; | 167 | BTRFS_I(inode)->disk_i_size = size; |
| 171 | } | 168 | } |
| 172 | 169 | ||
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index ec96f3a6d536..c4bc570a396e 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c | |||
| @@ -37,6 +37,11 @@ static int balance_node_right(struct btrfs_trans_handle *trans, | |||
| 37 | struct extent_buffer *src_buf); | 37 | struct extent_buffer *src_buf); |
| 38 | static int del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root, | 38 | static int del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root, |
| 39 | struct btrfs_path *path, int level, int slot); | 39 | struct btrfs_path *path, int level, int slot); |
| 40 | static int setup_items_for_insert(struct btrfs_trans_handle *trans, | ||
| 41 | struct btrfs_root *root, struct btrfs_path *path, | ||
| 42 | struct btrfs_key *cpu_key, u32 *data_size, | ||
| 43 | u32 total_data, u32 total_size, int nr); | ||
| 44 | |||
| 40 | 45 | ||
| 41 | struct btrfs_path *btrfs_alloc_path(void) | 46 | struct btrfs_path *btrfs_alloc_path(void) |
| 42 | { | 47 | { |
| @@ -451,9 +456,8 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans, | |||
| 451 | extent_buffer_get(cow); | 456 | extent_buffer_get(cow); |
| 452 | spin_unlock(&root->node_lock); | 457 | spin_unlock(&root->node_lock); |
| 453 | 458 | ||
| 454 | btrfs_free_extent(trans, root, buf->start, buf->len, | 459 | btrfs_free_tree_block(trans, root, buf->start, buf->len, |
| 455 | parent_start, root->root_key.objectid, | 460 | parent_start, root->root_key.objectid, level); |
| 456 | level, 0); | ||
| 457 | free_extent_buffer(buf); | 461 | free_extent_buffer(buf); |
| 458 | add_root_to_dirty_list(root); | 462 | add_root_to_dirty_list(root); |
| 459 | } else { | 463 | } else { |
| @@ -468,9 +472,8 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans, | |||
| 468 | btrfs_set_node_ptr_generation(parent, parent_slot, | 472 | btrfs_set_node_ptr_generation(parent, parent_slot, |
| 469 | trans->transid); | 473 | trans->transid); |
| 470 | btrfs_mark_buffer_dirty(parent); | 474 | btrfs_mark_buffer_dirty(parent); |
| 471 | btrfs_free_extent(trans, root, buf->start, buf->len, | 475 | btrfs_free_tree_block(trans, root, buf->start, buf->len, |
| 472 | parent_start, root->root_key.objectid, | 476 | parent_start, root->root_key.objectid, level); |
| 473 | level, 0); | ||
| 474 | } | 477 | } |
| 475 | if (unlock_orig) | 478 | if (unlock_orig) |
| 476 | btrfs_tree_unlock(buf); | 479 | btrfs_tree_unlock(buf); |
| @@ -1030,8 +1033,8 @@ static noinline int balance_level(struct btrfs_trans_handle *trans, | |||
| 1030 | btrfs_tree_unlock(mid); | 1033 | btrfs_tree_unlock(mid); |
| 1031 | /* once for the path */ | 1034 | /* once for the path */ |
| 1032 | free_extent_buffer(mid); | 1035 | free_extent_buffer(mid); |
| 1033 | ret = btrfs_free_extent(trans, root, mid->start, mid->len, | 1036 | ret = btrfs_free_tree_block(trans, root, mid->start, mid->len, |
| 1034 | 0, root->root_key.objectid, level, 1); | 1037 | 0, root->root_key.objectid, level); |
| 1035 | /* once for the root ptr */ | 1038 | /* once for the root ptr */ |
| 1036 | free_extent_buffer(mid); | 1039 | free_extent_buffer(mid); |
| 1037 | return ret; | 1040 | return ret; |
| @@ -1095,10 +1098,10 @@ static noinline int balance_level(struct btrfs_trans_handle *trans, | |||
| 1095 | 1); | 1098 | 1); |
| 1096 | if (wret) | 1099 | if (wret) |
| 1097 | ret = wret; | 1100 | ret = wret; |
| 1098 | wret = btrfs_free_extent(trans, root, bytenr, | 1101 | wret = btrfs_free_tree_block(trans, root, |
| 1099 | blocksize, 0, | 1102 | bytenr, blocksize, 0, |
| 1100 | root->root_key.objectid, | 1103 | root->root_key.objectid, |
| 1101 | level, 0); | 1104 | level); |
| 1102 | if (wret) | 1105 | if (wret) |
| 1103 | ret = wret; | 1106 | ret = wret; |
| 1104 | } else { | 1107 | } else { |
| @@ -1143,9 +1146,8 @@ static noinline int balance_level(struct btrfs_trans_handle *trans, | |||
| 1143 | wret = del_ptr(trans, root, path, level + 1, pslot); | 1146 | wret = del_ptr(trans, root, path, level + 1, pslot); |
| 1144 | if (wret) | 1147 | if (wret) |
| 1145 | ret = wret; | 1148 | ret = wret; |
| 1146 | wret = btrfs_free_extent(trans, root, bytenr, blocksize, | 1149 | wret = btrfs_free_tree_block(trans, root, bytenr, blocksize, |
| 1147 | 0, root->root_key.objectid, | 1150 | 0, root->root_key.objectid, level); |
| 1148 | level, 0); | ||
| 1149 | if (wret) | 1151 | if (wret) |
| 1150 | ret = wret; | 1152 | ret = wret; |
| 1151 | } else { | 1153 | } else { |
| @@ -2997,75 +2999,85 @@ again: | |||
| 2997 | return ret; | 2999 | return ret; |
| 2998 | } | 3000 | } |
| 2999 | 3001 | ||
| 3000 | /* | 3002 | static noinline int setup_leaf_for_split(struct btrfs_trans_handle *trans, |
| 3001 | * This function splits a single item into two items, | 3003 | struct btrfs_root *root, |
| 3002 | * giving 'new_key' to the new item and splitting the | 3004 | struct btrfs_path *path, int ins_len) |
| 3003 | * old one at split_offset (from the start of the item). | ||
| 3004 | * | ||
| 3005 | * The path may be released by this operation. After | ||
| 3006 | * the split, the path is pointing to the old item. The | ||
| 3007 | * new item is going to be in the same node as the old one. | ||
| 3008 | * | ||
| 3009 | * Note, the item being split must be smaller enough to live alone on | ||
| 3010 | * a tree block with room for one extra struct btrfs_item | ||
| 3011 | * | ||
| 3012 | * This allows us to split the item in place, keeping a lock on the | ||
| 3013 | * leaf the entire time. | ||
| 3014 | */ | ||
| 3015 | int btrfs_split_item(struct btrfs_trans_handle *trans, | ||
| 3016 | struct btrfs_root *root, | ||
| 3017 | struct btrfs_path *path, | ||
| 3018 | struct btrfs_key *new_key, | ||
| 3019 | unsigned long split_offset) | ||
| 3020 | { | 3005 | { |
| 3021 | u32 item_size; | 3006 | struct btrfs_key key; |
| 3022 | struct extent_buffer *leaf; | 3007 | struct extent_buffer *leaf; |
| 3023 | struct btrfs_key orig_key; | 3008 | struct btrfs_file_extent_item *fi; |
| 3024 | struct btrfs_item *item; | 3009 | u64 extent_len = 0; |
| 3025 | struct btrfs_item *new_item; | 3010 | u32 item_size; |
| 3026 | int ret = 0; | 3011 | int ret; |
| 3027 | int slot; | ||
| 3028 | u32 nritems; | ||
| 3029 | u32 orig_offset; | ||
| 3030 | struct btrfs_disk_key disk_key; | ||
| 3031 | char *buf; | ||
| 3032 | 3012 | ||
| 3033 | leaf = path->nodes[0]; | 3013 | leaf = path->nodes[0]; |
| 3034 | btrfs_item_key_to_cpu(leaf, &orig_key, path->slots[0]); | 3014 | btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); |
| 3035 | if (btrfs_leaf_free_space(root, leaf) >= sizeof(struct btrfs_item)) | 3015 | |
| 3036 | goto split; | 3016 | BUG_ON(key.type != BTRFS_EXTENT_DATA_KEY && |
| 3017 | key.type != BTRFS_EXTENT_CSUM_KEY); | ||
| 3018 | |||
| 3019 | if (btrfs_leaf_free_space(root, leaf) >= ins_len) | ||
| 3020 | return 0; | ||
| 3037 | 3021 | ||
| 3038 | item_size = btrfs_item_size_nr(leaf, path->slots[0]); | 3022 | item_size = btrfs_item_size_nr(leaf, path->slots[0]); |
| 3023 | if (key.type == BTRFS_EXTENT_DATA_KEY) { | ||
| 3024 | fi = btrfs_item_ptr(leaf, path->slots[0], | ||
| 3025 | struct btrfs_file_extent_item); | ||
| 3026 | extent_len = btrfs_file_extent_num_bytes(leaf, fi); | ||
| 3027 | } | ||
| 3039 | btrfs_release_path(root, path); | 3028 | btrfs_release_path(root, path); |
| 3040 | 3029 | ||
| 3041 | path->search_for_split = 1; | ||
| 3042 | path->keep_locks = 1; | 3030 | path->keep_locks = 1; |
| 3043 | 3031 | path->search_for_split = 1; | |
| 3044 | ret = btrfs_search_slot(trans, root, &orig_key, path, 0, 1); | 3032 | ret = btrfs_search_slot(trans, root, &key, path, 0, 1); |
| 3045 | path->search_for_split = 0; | 3033 | path->search_for_split = 0; |
| 3034 | if (ret < 0) | ||
| 3035 | goto err; | ||
| 3046 | 3036 | ||
| 3037 | ret = -EAGAIN; | ||
| 3038 | leaf = path->nodes[0]; | ||
| 3047 | /* if our item isn't there or got smaller, return now */ | 3039 | /* if our item isn't there or got smaller, return now */ |
| 3048 | if (ret != 0 || item_size != btrfs_item_size_nr(path->nodes[0], | 3040 | if (ret > 0 || item_size != btrfs_item_size_nr(leaf, path->slots[0])) |
| 3049 | path->slots[0])) { | 3041 | goto err; |
| 3050 | path->keep_locks = 0; | 3042 | |
| 3051 | return -EAGAIN; | 3043 | if (key.type == BTRFS_EXTENT_DATA_KEY) { |
| 3044 | fi = btrfs_item_ptr(leaf, path->slots[0], | ||
| 3045 | struct btrfs_file_extent_item); | ||
| 3046 | if (extent_len != btrfs_file_extent_num_bytes(leaf, fi)) | ||
| 3047 | goto err; | ||
| 3052 | } | 3048 | } |
| 3053 | 3049 | ||
| 3054 | btrfs_set_path_blocking(path); | 3050 | btrfs_set_path_blocking(path); |
| 3055 | ret = split_leaf(trans, root, &orig_key, path, | 3051 | ret = split_leaf(trans, root, &key, path, ins_len, 1); |
| 3056 | sizeof(struct btrfs_item), 1); | ||
| 3057 | path->keep_locks = 0; | ||
| 3058 | BUG_ON(ret); | 3052 | BUG_ON(ret); |
| 3059 | 3053 | ||
| 3054 | path->keep_locks = 0; | ||
| 3060 | btrfs_unlock_up_safe(path, 1); | 3055 | btrfs_unlock_up_safe(path, 1); |
| 3056 | return 0; | ||
| 3057 | err: | ||
| 3058 | path->keep_locks = 0; | ||
| 3059 | return ret; | ||
| 3060 | } | ||
| 3061 | |||
| 3062 | static noinline int split_item(struct btrfs_trans_handle *trans, | ||
| 3063 | struct btrfs_root *root, | ||
| 3064 | struct btrfs_path *path, | ||
| 3065 | struct btrfs_key *new_key, | ||
| 3066 | unsigned long split_offset) | ||
| 3067 | { | ||
| 3068 | struct extent_buffer *leaf; | ||
| 3069 | struct btrfs_item *item; | ||
| 3070 | struct btrfs_item *new_item; | ||
| 3071 | int slot; | ||
| 3072 | char *buf; | ||
| 3073 | u32 nritems; | ||
| 3074 | u32 item_size; | ||
| 3075 | u32 orig_offset; | ||
| 3076 | struct btrfs_disk_key disk_key; | ||
| 3077 | |||
| 3061 | leaf = path->nodes[0]; | 3078 | leaf = path->nodes[0]; |
| 3062 | BUG_ON(btrfs_leaf_free_space(root, leaf) < sizeof(struct btrfs_item)); | 3079 | BUG_ON(btrfs_leaf_free_space(root, leaf) < sizeof(struct btrfs_item)); |
| 3063 | 3080 | ||
| 3064 | split: | ||
| 3065 | /* | ||
| 3066 | * make sure any changes to the path from split_leaf leave it | ||
| 3067 | * in a blocking state | ||
| 3068 | */ | ||
| 3069 | btrfs_set_path_blocking(path); | 3081 | btrfs_set_path_blocking(path); |
| 3070 | 3082 | ||
| 3071 | item = btrfs_item_nr(leaf, path->slots[0]); | 3083 | item = btrfs_item_nr(leaf, path->slots[0]); |
| @@ -3073,19 +3085,19 @@ split: | |||
| 3073 | item_size = btrfs_item_size(leaf, item); | 3085 | item_size = btrfs_item_size(leaf, item); |
| 3074 | 3086 | ||
| 3075 | buf = kmalloc(item_size, GFP_NOFS); | 3087 | buf = kmalloc(item_size, GFP_NOFS); |
| 3088 | if (!buf) | ||
| 3089 | return -ENOMEM; | ||
| 3090 | |||
| 3076 | read_extent_buffer(leaf, buf, btrfs_item_ptr_offset(leaf, | 3091 | read_extent_buffer(leaf, buf, btrfs_item_ptr_offset(leaf, |
| 3077 | path->slots[0]), item_size); | 3092 | path->slots[0]), item_size); |
| 3078 | slot = path->slots[0] + 1; | ||
| 3079 | leaf = path->nodes[0]; | ||
| 3080 | 3093 | ||
| 3094 | slot = path->slots[0] + 1; | ||
| 3081 | nritems = btrfs_header_nritems(leaf); | 3095 | nritems = btrfs_header_nritems(leaf); |
| 3082 | |||
| 3083 | if (slot != nritems) { | 3096 | if (slot != nritems) { |
| 3084 | /* shift the items */ | 3097 | /* shift the items */ |
| 3085 | memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + 1), | 3098 | memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + 1), |
| 3086 | btrfs_item_nr_offset(slot), | 3099 | btrfs_item_nr_offset(slot), |
| 3087 | (nritems - slot) * sizeof(struct btrfs_item)); | 3100 | (nritems - slot) * sizeof(struct btrfs_item)); |
| 3088 | |||
| 3089 | } | 3101 | } |
| 3090 | 3102 | ||
| 3091 | btrfs_cpu_key_to_disk(&disk_key, new_key); | 3103 | btrfs_cpu_key_to_disk(&disk_key, new_key); |
| @@ -3113,16 +3125,81 @@ split: | |||
| 3113 | item_size - split_offset); | 3125 | item_size - split_offset); |
| 3114 | btrfs_mark_buffer_dirty(leaf); | 3126 | btrfs_mark_buffer_dirty(leaf); |
| 3115 | 3127 | ||
| 3116 | ret = 0; | 3128 | BUG_ON(btrfs_leaf_free_space(root, leaf) < 0); |
| 3117 | if (btrfs_leaf_free_space(root, leaf) < 0) { | ||
| 3118 | btrfs_print_leaf(root, leaf); | ||
| 3119 | BUG(); | ||
| 3120 | } | ||
| 3121 | kfree(buf); | 3129 | kfree(buf); |
| 3130 | return 0; | ||
| 3131 | } | ||
| 3132 | |||
| 3133 | /* | ||
| 3134 | * This function splits a single item into two items, | ||
| 3135 | * giving 'new_key' to the new item and splitting the | ||
| 3136 | * old one at split_offset (from the start of the item). | ||
| 3137 | * | ||
| 3138 | * The path may be released by this operation. After | ||
| 3139 | * the split, the path is pointing to the old item. The | ||
| 3140 | * new item is going to be in the same node as the old one. | ||
| 3141 | * | ||
| 3142 | * Note, the item being split must be smaller enough to live alone on | ||
| 3143 | * a tree block with room for one extra struct btrfs_item | ||
| 3144 | * | ||
| 3145 | * This allows us to split the item in place, keeping a lock on the | ||
| 3146 | * leaf the entire time. | ||
| 3147 | */ | ||
| 3148 | int btrfs_split_item(struct btrfs_trans_handle *trans, | ||
| 3149 | struct btrfs_root *root, | ||
| 3150 | struct btrfs_path *path, | ||
| 3151 | struct btrfs_key *new_key, | ||
| 3152 | unsigned long split_offset) | ||
| 3153 | { | ||
| 3154 | int ret; | ||
| 3155 | ret = setup_leaf_for_split(trans, root, path, | ||
| 3156 | sizeof(struct btrfs_item)); | ||
| 3157 | if (ret) | ||
| 3158 | return ret; | ||
| 3159 | |||
| 3160 | ret = split_item(trans, root, path, new_key, split_offset); | ||
| 3122 | return ret; | 3161 | return ret; |
| 3123 | } | 3162 | } |
| 3124 | 3163 | ||
| 3125 | /* | 3164 | /* |
| 3165 | * This function duplicate a item, giving 'new_key' to the new item. | ||
| 3166 | * It guarantees both items live in the same tree leaf and the new item | ||
| 3167 | * is contiguous with the original item. | ||
| 3168 | * | ||
| 3169 | * This allows us to split file extent in place, keeping a lock on the | ||
| 3170 | * leaf the entire time. | ||
| 3171 | */ | ||
| 3172 | int btrfs_duplicate_item(struct btrfs_trans_handle *trans, | ||
| 3173 | struct btrfs_root *root, | ||
| 3174 | struct btrfs_path *path, | ||
| 3175 | struct btrfs_key *new_key) | ||
| 3176 | { | ||
| 3177 | struct extent_buffer *leaf; | ||
| 3178 | int ret; | ||
| 3179 | u32 item_size; | ||
| 3180 | |||
| 3181 | leaf = path->nodes[0]; | ||
| 3182 | item_size = btrfs_item_size_nr(leaf, path->slots[0]); | ||
| 3183 | ret = setup_leaf_for_split(trans, root, path, | ||
| 3184 | item_size + sizeof(struct btrfs_item)); | ||
| 3185 | if (ret) | ||
| 3186 | return ret; | ||
| 3187 | |||
| 3188 | path->slots[0]++; | ||
| 3189 | ret = setup_items_for_insert(trans, root, path, new_key, &item_size, | ||
| 3190 | item_size, item_size + | ||
| 3191 | sizeof(struct btrfs_item), 1); | ||
| 3192 | BUG_ON(ret); | ||
| 3193 | |||
| 3194 | leaf = path->nodes[0]; | ||
| 3195 | memcpy_extent_buffer(leaf, | ||
| 3196 | btrfs_item_ptr_offset(leaf, path->slots[0]), | ||
| 3197 | btrfs_item_ptr_offset(leaf, path->slots[0] - 1), | ||
| 3198 | item_size); | ||
| 3199 | return 0; | ||
| 3200 | } | ||
| 3201 | |||
| 3202 | /* | ||
| 3126 | * make the item pointed to by the path smaller. new_size indicates | 3203 | * make the item pointed to by the path smaller. new_size indicates |
| 3127 | * how small to make it, and from_end tells us if we just chop bytes | 3204 | * how small to make it, and from_end tells us if we just chop bytes |
| 3128 | * off the end of the item or if we shift the item to chop bytes off | 3205 | * off the end of the item or if we shift the item to chop bytes off |
| @@ -3714,8 +3791,8 @@ static noinline int btrfs_del_leaf(struct btrfs_trans_handle *trans, | |||
| 3714 | */ | 3791 | */ |
| 3715 | btrfs_unlock_up_safe(path, 0); | 3792 | btrfs_unlock_up_safe(path, 0); |
| 3716 | 3793 | ||
| 3717 | ret = btrfs_free_extent(trans, root, leaf->start, leaf->len, | 3794 | ret = btrfs_free_tree_block(trans, root, leaf->start, leaf->len, |
| 3718 | 0, root->root_key.objectid, 0, 0); | 3795 | 0, root->root_key.objectid, 0); |
| 3719 | return ret; | 3796 | return ret; |
| 3720 | } | 3797 | } |
| 3721 | /* | 3798 | /* |
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 444b3e9b92a4..9f806dd04c27 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h | |||
| @@ -310,6 +310,9 @@ struct btrfs_header { | |||
| 310 | #define BTRFS_MAX_INLINE_DATA_SIZE(r) (BTRFS_LEAF_DATA_SIZE(r) - \ | 310 | #define BTRFS_MAX_INLINE_DATA_SIZE(r) (BTRFS_LEAF_DATA_SIZE(r) - \ |
| 311 | sizeof(struct btrfs_item) - \ | 311 | sizeof(struct btrfs_item) - \ |
| 312 | sizeof(struct btrfs_file_extent_item)) | 312 | sizeof(struct btrfs_file_extent_item)) |
| 313 | #define BTRFS_MAX_XATTR_SIZE(r) (BTRFS_LEAF_DATA_SIZE(r) - \ | ||
| 314 | sizeof(struct btrfs_item) -\ | ||
| 315 | sizeof(struct btrfs_dir_item)) | ||
| 313 | 316 | ||
| 314 | 317 | ||
| 315 | /* | 318 | /* |
| @@ -859,8 +862,9 @@ struct btrfs_fs_info { | |||
| 859 | struct mutex ordered_operations_mutex; | 862 | struct mutex ordered_operations_mutex; |
| 860 | struct rw_semaphore extent_commit_sem; | 863 | struct rw_semaphore extent_commit_sem; |
| 861 | 864 | ||
| 862 | struct rw_semaphore subvol_sem; | 865 | struct rw_semaphore cleanup_work_sem; |
| 863 | 866 | ||
| 867 | struct rw_semaphore subvol_sem; | ||
| 864 | struct srcu_struct subvol_srcu; | 868 | struct srcu_struct subvol_srcu; |
| 865 | 869 | ||
| 866 | struct list_head trans_list; | 870 | struct list_head trans_list; |
| @@ -868,6 +872,9 @@ struct btrfs_fs_info { | |||
| 868 | struct list_head dead_roots; | 872 | struct list_head dead_roots; |
| 869 | struct list_head caching_block_groups; | 873 | struct list_head caching_block_groups; |
| 870 | 874 | ||
| 875 | spinlock_t delayed_iput_lock; | ||
| 876 | struct list_head delayed_iputs; | ||
| 877 | |||
| 871 | atomic_t nr_async_submits; | 878 | atomic_t nr_async_submits; |
| 872 | atomic_t async_submit_draining; | 879 | atomic_t async_submit_draining; |
| 873 | atomic_t nr_async_bios; | 880 | atomic_t nr_async_bios; |
| @@ -1034,12 +1041,12 @@ struct btrfs_root { | |||
| 1034 | int ref_cows; | 1041 | int ref_cows; |
| 1035 | int track_dirty; | 1042 | int track_dirty; |
| 1036 | int in_radix; | 1043 | int in_radix; |
| 1044 | int clean_orphans; | ||
| 1037 | 1045 | ||
| 1038 | u64 defrag_trans_start; | 1046 | u64 defrag_trans_start; |
| 1039 | struct btrfs_key defrag_progress; | 1047 | struct btrfs_key defrag_progress; |
| 1040 | struct btrfs_key defrag_max; | 1048 | struct btrfs_key defrag_max; |
| 1041 | int defrag_running; | 1049 | int defrag_running; |
| 1042 | int defrag_level; | ||
| 1043 | char *name; | 1050 | char *name; |
| 1044 | int in_sysfs; | 1051 | int in_sysfs; |
| 1045 | 1052 | ||
| @@ -1975,6 +1982,10 @@ struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans, | |||
| 1975 | u64 parent, u64 root_objectid, | 1982 | u64 parent, u64 root_objectid, |
| 1976 | struct btrfs_disk_key *key, int level, | 1983 | struct btrfs_disk_key *key, int level, |
| 1977 | u64 hint, u64 empty_size); | 1984 | u64 hint, u64 empty_size); |
| 1985 | int btrfs_free_tree_block(struct btrfs_trans_handle *trans, | ||
| 1986 | struct btrfs_root *root, | ||
| 1987 | u64 bytenr, u32 blocksize, | ||
| 1988 | u64 parent, u64 root_objectid, int level); | ||
| 1978 | struct extent_buffer *btrfs_init_new_buffer(struct btrfs_trans_handle *trans, | 1989 | struct extent_buffer *btrfs_init_new_buffer(struct btrfs_trans_handle *trans, |
| 1979 | struct btrfs_root *root, | 1990 | struct btrfs_root *root, |
| 1980 | u64 bytenr, u32 blocksize, | 1991 | u64 bytenr, u32 blocksize, |
| @@ -2089,6 +2100,10 @@ int btrfs_split_item(struct btrfs_trans_handle *trans, | |||
| 2089 | struct btrfs_path *path, | 2100 | struct btrfs_path *path, |
| 2090 | struct btrfs_key *new_key, | 2101 | struct btrfs_key *new_key, |
| 2091 | unsigned long split_offset); | 2102 | unsigned long split_offset); |
| 2103 | int btrfs_duplicate_item(struct btrfs_trans_handle *trans, | ||
| 2104 | struct btrfs_root *root, | ||
| 2105 | struct btrfs_path *path, | ||
| 2106 | struct btrfs_key *new_key); | ||
| 2092 | int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root | 2107 | int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root |
| 2093 | *root, struct btrfs_key *key, struct btrfs_path *p, int | 2108 | *root, struct btrfs_key *key, struct btrfs_path *p, int |
| 2094 | ins_len, int cow); | 2109 | ins_len, int cow); |
| @@ -2196,9 +2211,10 @@ int btrfs_delete_one_dir_name(struct btrfs_trans_handle *trans, | |||
| 2196 | struct btrfs_path *path, | 2211 | struct btrfs_path *path, |
| 2197 | struct btrfs_dir_item *di); | 2212 | struct btrfs_dir_item *di); |
| 2198 | int btrfs_insert_xattr_item(struct btrfs_trans_handle *trans, | 2213 | int btrfs_insert_xattr_item(struct btrfs_trans_handle *trans, |
| 2199 | struct btrfs_root *root, const char *name, | 2214 | struct btrfs_root *root, |
| 2200 | u16 name_len, const void *data, u16 data_len, | 2215 | struct btrfs_path *path, u64 objectid, |
| 2201 | u64 dir); | 2216 | const char *name, u16 name_len, |
| 2217 | const void *data, u16 data_len); | ||
| 2202 | struct btrfs_dir_item *btrfs_lookup_xattr(struct btrfs_trans_handle *trans, | 2218 | struct btrfs_dir_item *btrfs_lookup_xattr(struct btrfs_trans_handle *trans, |
| 2203 | struct btrfs_root *root, | 2219 | struct btrfs_root *root, |
| 2204 | struct btrfs_path *path, u64 dir, | 2220 | struct btrfs_path *path, u64 dir, |
| @@ -2292,7 +2308,7 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans, | |||
| 2292 | struct inode *inode, u64 new_size, | 2308 | struct inode *inode, u64 new_size, |
| 2293 | u32 min_type); | 2309 | u32 min_type); |
| 2294 | 2310 | ||
| 2295 | int btrfs_start_delalloc_inodes(struct btrfs_root *root); | 2311 | int btrfs_start_delalloc_inodes(struct btrfs_root *root, int delay_iput); |
| 2296 | int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end); | 2312 | int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end); |
| 2297 | int btrfs_writepages(struct address_space *mapping, | 2313 | int btrfs_writepages(struct address_space *mapping, |
| 2298 | struct writeback_control *wbc); | 2314 | struct writeback_control *wbc); |
| @@ -2332,6 +2348,8 @@ int btrfs_orphan_del(struct btrfs_trans_handle *trans, struct inode *inode); | |||
| 2332 | void btrfs_orphan_cleanup(struct btrfs_root *root); | 2348 | void btrfs_orphan_cleanup(struct btrfs_root *root); |
| 2333 | int btrfs_cont_expand(struct inode *inode, loff_t size); | 2349 | int btrfs_cont_expand(struct inode *inode, loff_t size); |
| 2334 | int btrfs_invalidate_inodes(struct btrfs_root *root); | 2350 | int btrfs_invalidate_inodes(struct btrfs_root *root); |
| 2351 | void btrfs_add_delayed_iput(struct inode *inode); | ||
| 2352 | void btrfs_run_delayed_iputs(struct btrfs_root *root); | ||
| 2335 | extern const struct dentry_operations btrfs_dentry_operations; | 2353 | extern const struct dentry_operations btrfs_dentry_operations; |
| 2336 | 2354 | ||
| 2337 | /* ioctl.c */ | 2355 | /* ioctl.c */ |
| @@ -2345,12 +2363,9 @@ int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end, | |||
| 2345 | int skip_pinned); | 2363 | int skip_pinned); |
| 2346 | int btrfs_check_file(struct btrfs_root *root, struct inode *inode); | 2364 | int btrfs_check_file(struct btrfs_root *root, struct inode *inode); |
| 2347 | extern const struct file_operations btrfs_file_operations; | 2365 | extern const struct file_operations btrfs_file_operations; |
| 2348 | int btrfs_drop_extents(struct btrfs_trans_handle *trans, | 2366 | int btrfs_drop_extents(struct btrfs_trans_handle *trans, struct inode *inode, |
| 2349 | struct btrfs_root *root, struct inode *inode, | 2367 | u64 start, u64 end, u64 *hint_byte, int drop_cache); |
| 2350 | u64 start, u64 end, u64 locked_end, | ||
| 2351 | u64 inline_limit, u64 *hint_block, int drop_cache); | ||
| 2352 | int btrfs_mark_extent_written(struct btrfs_trans_handle *trans, | 2368 | int btrfs_mark_extent_written(struct btrfs_trans_handle *trans, |
| 2353 | struct btrfs_root *root, | ||
| 2354 | struct inode *inode, u64 start, u64 end); | 2369 | struct inode *inode, u64 start, u64 end); |
| 2355 | int btrfs_release_file(struct inode *inode, struct file *file); | 2370 | int btrfs_release_file(struct inode *inode, struct file *file); |
| 2356 | 2371 | ||
| @@ -2380,7 +2395,8 @@ int btrfs_check_acl(struct inode *inode, int mask); | |||
| 2380 | #else | 2395 | #else |
| 2381 | #define btrfs_check_acl NULL | 2396 | #define btrfs_check_acl NULL |
| 2382 | #endif | 2397 | #endif |
| 2383 | int btrfs_init_acl(struct inode *inode, struct inode *dir); | 2398 | int btrfs_init_acl(struct btrfs_trans_handle *trans, |
| 2399 | struct inode *inode, struct inode *dir); | ||
| 2384 | int btrfs_acl_chmod(struct inode *inode); | 2400 | int btrfs_acl_chmod(struct inode *inode); |
| 2385 | 2401 | ||
| 2386 | /* relocation.c */ | 2402 | /* relocation.c */ |
diff --git a/fs/btrfs/dir-item.c b/fs/btrfs/dir-item.c index f3a6075519cc..e9103b3baa49 100644 --- a/fs/btrfs/dir-item.c +++ b/fs/btrfs/dir-item.c | |||
| @@ -68,12 +68,12 @@ static struct btrfs_dir_item *insert_with_overflow(struct btrfs_trans_handle | |||
| 68 | * into the tree | 68 | * into the tree |
| 69 | */ | 69 | */ |
| 70 | int btrfs_insert_xattr_item(struct btrfs_trans_handle *trans, | 70 | int btrfs_insert_xattr_item(struct btrfs_trans_handle *trans, |
| 71 | struct btrfs_root *root, const char *name, | 71 | struct btrfs_root *root, |
| 72 | u16 name_len, const void *data, u16 data_len, | 72 | struct btrfs_path *path, u64 objectid, |
| 73 | u64 dir) | 73 | const char *name, u16 name_len, |
| 74 | const void *data, u16 data_len) | ||
| 74 | { | 75 | { |
| 75 | int ret = 0; | 76 | int ret = 0; |
| 76 | struct btrfs_path *path; | ||
| 77 | struct btrfs_dir_item *dir_item; | 77 | struct btrfs_dir_item *dir_item; |
| 78 | unsigned long name_ptr, data_ptr; | 78 | unsigned long name_ptr, data_ptr; |
| 79 | struct btrfs_key key, location; | 79 | struct btrfs_key key, location; |
| @@ -81,15 +81,11 @@ int btrfs_insert_xattr_item(struct btrfs_trans_handle *trans, | |||
| 81 | struct extent_buffer *leaf; | 81 | struct extent_buffer *leaf; |
| 82 | u32 data_size; | 82 | u32 data_size; |
| 83 | 83 | ||
| 84 | key.objectid = dir; | 84 | BUG_ON(name_len + data_len > BTRFS_MAX_XATTR_SIZE(root)); |
| 85 | |||
| 86 | key.objectid = objectid; | ||
| 85 | btrfs_set_key_type(&key, BTRFS_XATTR_ITEM_KEY); | 87 | btrfs_set_key_type(&key, BTRFS_XATTR_ITEM_KEY); |
| 86 | key.offset = btrfs_name_hash(name, name_len); | 88 | key.offset = btrfs_name_hash(name, name_len); |
| 87 | path = btrfs_alloc_path(); | ||
| 88 | if (!path) | ||
| 89 | return -ENOMEM; | ||
| 90 | if (name_len + data_len + sizeof(struct btrfs_dir_item) > | ||
| 91 | BTRFS_LEAF_DATA_SIZE(root) - sizeof(struct btrfs_item)) | ||
| 92 | return -ENOSPC; | ||
| 93 | 89 | ||
| 94 | data_size = sizeof(*dir_item) + name_len + data_len; | 90 | data_size = sizeof(*dir_item) + name_len + data_len; |
| 95 | dir_item = insert_with_overflow(trans, root, path, &key, data_size, | 91 | dir_item = insert_with_overflow(trans, root, path, &key, data_size, |
| @@ -117,7 +113,6 @@ int btrfs_insert_xattr_item(struct btrfs_trans_handle *trans, | |||
| 117 | write_extent_buffer(leaf, data, data_ptr, data_len); | 113 | write_extent_buffer(leaf, data, data_ptr, data_len); |
| 118 | btrfs_mark_buffer_dirty(path->nodes[0]); | 114 | btrfs_mark_buffer_dirty(path->nodes[0]); |
| 119 | 115 | ||
| 120 | btrfs_free_path(path); | ||
| 121 | return ret; | 116 | return ret; |
| 122 | } | 117 | } |
| 123 | 118 | ||
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 02b6afbd7450..009e3bd18f23 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c | |||
| @@ -892,6 +892,8 @@ static int __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize, | |||
| 892 | root->stripesize = stripesize; | 892 | root->stripesize = stripesize; |
| 893 | root->ref_cows = 0; | 893 | root->ref_cows = 0; |
| 894 | root->track_dirty = 0; | 894 | root->track_dirty = 0; |
| 895 | root->in_radix = 0; | ||
| 896 | root->clean_orphans = 0; | ||
| 895 | 897 | ||
| 896 | root->fs_info = fs_info; | 898 | root->fs_info = fs_info; |
| 897 | root->objectid = objectid; | 899 | root->objectid = objectid; |
| @@ -928,7 +930,6 @@ static int __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize, | |||
| 928 | root->defrag_trans_start = fs_info->generation; | 930 | root->defrag_trans_start = fs_info->generation; |
| 929 | init_completion(&root->kobj_unregister); | 931 | init_completion(&root->kobj_unregister); |
| 930 | root->defrag_running = 0; | 932 | root->defrag_running = 0; |
| 931 | root->defrag_level = 0; | ||
| 932 | root->root_key.objectid = objectid; | 933 | root->root_key.objectid = objectid; |
| 933 | root->anon_super.s_root = NULL; | 934 | root->anon_super.s_root = NULL; |
| 934 | root->anon_super.s_dev = 0; | 935 | root->anon_super.s_dev = 0; |
| @@ -980,12 +981,12 @@ int btrfs_free_log_root_tree(struct btrfs_trans_handle *trans, | |||
| 980 | 981 | ||
| 981 | while (1) { | 982 | while (1) { |
| 982 | ret = find_first_extent_bit(&log_root_tree->dirty_log_pages, | 983 | ret = find_first_extent_bit(&log_root_tree->dirty_log_pages, |
| 983 | 0, &start, &end, EXTENT_DIRTY); | 984 | 0, &start, &end, EXTENT_DIRTY | EXTENT_NEW); |
| 984 | if (ret) | 985 | if (ret) |
| 985 | break; | 986 | break; |
| 986 | 987 | ||
| 987 | clear_extent_dirty(&log_root_tree->dirty_log_pages, | 988 | clear_extent_bits(&log_root_tree->dirty_log_pages, start, end, |
| 988 | start, end, GFP_NOFS); | 989 | EXTENT_DIRTY | EXTENT_NEW, GFP_NOFS); |
| 989 | } | 990 | } |
| 990 | eb = fs_info->log_root_tree->node; | 991 | eb = fs_info->log_root_tree->node; |
| 991 | 992 | ||
| @@ -1210,8 +1211,10 @@ again: | |||
| 1210 | ret = radix_tree_insert(&fs_info->fs_roots_radix, | 1211 | ret = radix_tree_insert(&fs_info->fs_roots_radix, |
| 1211 | (unsigned long)root->root_key.objectid, | 1212 | (unsigned long)root->root_key.objectid, |
| 1212 | root); | 1213 | root); |
| 1213 | if (ret == 0) | 1214 | if (ret == 0) { |
| 1214 | root->in_radix = 1; | 1215 | root->in_radix = 1; |
| 1216 | root->clean_orphans = 1; | ||
| 1217 | } | ||
| 1215 | spin_unlock(&fs_info->fs_roots_radix_lock); | 1218 | spin_unlock(&fs_info->fs_roots_radix_lock); |
| 1216 | radix_tree_preload_end(); | 1219 | radix_tree_preload_end(); |
| 1217 | if (ret) { | 1220 | if (ret) { |
| @@ -1225,10 +1228,6 @@ again: | |||
| 1225 | ret = btrfs_find_dead_roots(fs_info->tree_root, | 1228 | ret = btrfs_find_dead_roots(fs_info->tree_root, |
| 1226 | root->root_key.objectid); | 1229 | root->root_key.objectid); |
| 1227 | WARN_ON(ret); | 1230 | WARN_ON(ret); |
| 1228 | |||
| 1229 | if (!(fs_info->sb->s_flags & MS_RDONLY)) | ||
| 1230 | btrfs_orphan_cleanup(root); | ||
| 1231 | |||
| 1232 | return root; | 1231 | return root; |
| 1233 | fail: | 1232 | fail: |
| 1234 | free_fs_root(root); | 1233 | free_fs_root(root); |
| @@ -1477,6 +1476,7 @@ static int cleaner_kthread(void *arg) | |||
| 1477 | 1476 | ||
| 1478 | if (!(root->fs_info->sb->s_flags & MS_RDONLY) && | 1477 | if (!(root->fs_info->sb->s_flags & MS_RDONLY) && |
| 1479 | mutex_trylock(&root->fs_info->cleaner_mutex)) { | 1478 | mutex_trylock(&root->fs_info->cleaner_mutex)) { |
| 1479 | btrfs_run_delayed_iputs(root); | ||
| 1480 | btrfs_clean_old_snapshots(root); | 1480 | btrfs_clean_old_snapshots(root); |
| 1481 | mutex_unlock(&root->fs_info->cleaner_mutex); | 1481 | mutex_unlock(&root->fs_info->cleaner_mutex); |
| 1482 | } | 1482 | } |
| @@ -1606,6 +1606,7 @@ struct btrfs_root *open_ctree(struct super_block *sb, | |||
| 1606 | INIT_RADIX_TREE(&fs_info->fs_roots_radix, GFP_ATOMIC); | 1606 | INIT_RADIX_TREE(&fs_info->fs_roots_radix, GFP_ATOMIC); |
| 1607 | INIT_LIST_HEAD(&fs_info->trans_list); | 1607 | INIT_LIST_HEAD(&fs_info->trans_list); |
| 1608 | INIT_LIST_HEAD(&fs_info->dead_roots); | 1608 | INIT_LIST_HEAD(&fs_info->dead_roots); |
| 1609 | INIT_LIST_HEAD(&fs_info->delayed_iputs); | ||
| 1609 | INIT_LIST_HEAD(&fs_info->hashers); | 1610 | INIT_LIST_HEAD(&fs_info->hashers); |
| 1610 | INIT_LIST_HEAD(&fs_info->delalloc_inodes); | 1611 | INIT_LIST_HEAD(&fs_info->delalloc_inodes); |
| 1611 | INIT_LIST_HEAD(&fs_info->ordered_operations); | 1612 | INIT_LIST_HEAD(&fs_info->ordered_operations); |
| @@ -1614,6 +1615,7 @@ struct btrfs_root *open_ctree(struct super_block *sb, | |||
| 1614 | spin_lock_init(&fs_info->new_trans_lock); | 1615 | spin_lock_init(&fs_info->new_trans_lock); |
| 1615 | spin_lock_init(&fs_info->ref_cache_lock); | 1616 | spin_lock_init(&fs_info->ref_cache_lock); |
| 1616 | spin_lock_init(&fs_info->fs_roots_radix_lock); | 1617 | spin_lock_init(&fs_info->fs_roots_radix_lock); |
| 1618 | spin_lock_init(&fs_info->delayed_iput_lock); | ||
| 1617 | 1619 | ||
| 1618 | init_completion(&fs_info->kobj_unregister); | 1620 | init_completion(&fs_info->kobj_unregister); |
| 1619 | fs_info->tree_root = tree_root; | 1621 | fs_info->tree_root = tree_root; |
| @@ -1689,6 +1691,7 @@ struct btrfs_root *open_ctree(struct super_block *sb, | |||
| 1689 | mutex_init(&fs_info->cleaner_mutex); | 1691 | mutex_init(&fs_info->cleaner_mutex); |
| 1690 | mutex_init(&fs_info->volume_mutex); | 1692 | mutex_init(&fs_info->volume_mutex); |
| 1691 | init_rwsem(&fs_info->extent_commit_sem); | 1693 | init_rwsem(&fs_info->extent_commit_sem); |
| 1694 | init_rwsem(&fs_info->cleanup_work_sem); | ||
| 1692 | init_rwsem(&fs_info->subvol_sem); | 1695 | init_rwsem(&fs_info->subvol_sem); |
| 1693 | 1696 | ||
| 1694 | btrfs_init_free_cluster(&fs_info->meta_alloc_cluster); | 1697 | btrfs_init_free_cluster(&fs_info->meta_alloc_cluster); |
| @@ -2386,8 +2389,14 @@ int btrfs_commit_super(struct btrfs_root *root) | |||
| 2386 | int ret; | 2389 | int ret; |
| 2387 | 2390 | ||
| 2388 | mutex_lock(&root->fs_info->cleaner_mutex); | 2391 | mutex_lock(&root->fs_info->cleaner_mutex); |
| 2392 | btrfs_run_delayed_iputs(root); | ||
| 2389 | btrfs_clean_old_snapshots(root); | 2393 | btrfs_clean_old_snapshots(root); |
| 2390 | mutex_unlock(&root->fs_info->cleaner_mutex); | 2394 | mutex_unlock(&root->fs_info->cleaner_mutex); |
| 2395 | |||
| 2396 | /* wait until ongoing cleanup work done */ | ||
| 2397 | down_write(&root->fs_info->cleanup_work_sem); | ||
| 2398 | up_write(&root->fs_info->cleanup_work_sem); | ||
| 2399 | |||
| 2391 | trans = btrfs_start_transaction(root, 1); | 2400 | trans = btrfs_start_transaction(root, 1); |
| 2392 | ret = btrfs_commit_transaction(trans, root); | 2401 | ret = btrfs_commit_transaction(trans, root); |
| 2393 | BUG_ON(ret); | 2402 | BUG_ON(ret); |
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 94627c4cc193..56e50137d0e6 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c | |||
| @@ -195,6 +195,14 @@ static int exclude_super_stripes(struct btrfs_root *root, | |||
| 195 | int stripe_len; | 195 | int stripe_len; |
| 196 | int i, nr, ret; | 196 | int i, nr, ret; |
| 197 | 197 | ||
| 198 | if (cache->key.objectid < BTRFS_SUPER_INFO_OFFSET) { | ||
| 199 | stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->key.objectid; | ||
| 200 | cache->bytes_super += stripe_len; | ||
| 201 | ret = add_excluded_extent(root, cache->key.objectid, | ||
| 202 | stripe_len); | ||
| 203 | BUG_ON(ret); | ||
| 204 | } | ||
| 205 | |||
| 198 | for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) { | 206 | for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) { |
| 199 | bytenr = btrfs_sb_offset(i); | 207 | bytenr = btrfs_sb_offset(i); |
| 200 | ret = btrfs_rmap_block(&root->fs_info->mapping_tree, | 208 | ret = btrfs_rmap_block(&root->fs_info->mapping_tree, |
| @@ -255,7 +263,7 @@ static u64 add_new_free_space(struct btrfs_block_group_cache *block_group, | |||
| 255 | if (ret) | 263 | if (ret) |
| 256 | break; | 264 | break; |
| 257 | 265 | ||
| 258 | if (extent_start == start) { | 266 | if (extent_start <= start) { |
| 259 | start = extent_end + 1; | 267 | start = extent_end + 1; |
| 260 | } else if (extent_start > start && extent_start < end) { | 268 | } else if (extent_start > start && extent_start < end) { |
| 261 | size = extent_start - start; | 269 | size = extent_start - start; |
| @@ -2880,9 +2888,9 @@ static noinline void flush_delalloc_async(struct btrfs_work *work) | |||
| 2880 | root = async->root; | 2888 | root = async->root; |
| 2881 | info = async->info; | 2889 | info = async->info; |
| 2882 | 2890 | ||
| 2883 | btrfs_start_delalloc_inodes(root); | 2891 | btrfs_start_delalloc_inodes(root, 0); |
| 2884 | wake_up(&info->flush_wait); | 2892 | wake_up(&info->flush_wait); |
| 2885 | btrfs_wait_ordered_extents(root, 0); | 2893 | btrfs_wait_ordered_extents(root, 0, 0); |
| 2886 | 2894 | ||
| 2887 | spin_lock(&info->lock); | 2895 | spin_lock(&info->lock); |
| 2888 | info->flushing = 0; | 2896 | info->flushing = 0; |
| @@ -2956,8 +2964,8 @@ static void flush_delalloc(struct btrfs_root *root, | |||
| 2956 | return; | 2964 | return; |
| 2957 | 2965 | ||
| 2958 | flush: | 2966 | flush: |
| 2959 | btrfs_start_delalloc_inodes(root); | 2967 | btrfs_start_delalloc_inodes(root, 0); |
| 2960 | btrfs_wait_ordered_extents(root, 0); | 2968 | btrfs_wait_ordered_extents(root, 0, 0); |
| 2961 | 2969 | ||
| 2962 | spin_lock(&info->lock); | 2970 | spin_lock(&info->lock); |
| 2963 | info->flushing = 0; | 2971 | info->flushing = 0; |
| @@ -3454,14 +3462,6 @@ static int update_block_group(struct btrfs_trans_handle *trans, | |||
| 3454 | else | 3462 | else |
| 3455 | old_val -= num_bytes; | 3463 | old_val -= num_bytes; |
| 3456 | btrfs_set_super_bytes_used(&info->super_copy, old_val); | 3464 | btrfs_set_super_bytes_used(&info->super_copy, old_val); |
| 3457 | |||
| 3458 | /* block accounting for root item */ | ||
| 3459 | old_val = btrfs_root_used(&root->root_item); | ||
| 3460 | if (alloc) | ||
| 3461 | old_val += num_bytes; | ||
| 3462 | else | ||
| 3463 | old_val -= num_bytes; | ||
| 3464 | btrfs_set_root_used(&root->root_item, old_val); | ||
| 3465 | spin_unlock(&info->delalloc_lock); | 3465 | spin_unlock(&info->delalloc_lock); |
| 3466 | 3466 | ||
| 3467 | while (total) { | 3467 | while (total) { |
| @@ -4049,6 +4049,21 @@ int btrfs_free_extent(struct btrfs_trans_handle *trans, | |||
| 4049 | return ret; | 4049 | return ret; |
| 4050 | } | 4050 | } |
| 4051 | 4051 | ||
| 4052 | int btrfs_free_tree_block(struct btrfs_trans_handle *trans, | ||
| 4053 | struct btrfs_root *root, | ||
| 4054 | u64 bytenr, u32 blocksize, | ||
| 4055 | u64 parent, u64 root_objectid, int level) | ||
| 4056 | { | ||
| 4057 | u64 used; | ||
| 4058 | spin_lock(&root->node_lock); | ||
| 4059 | used = btrfs_root_used(&root->root_item) - blocksize; | ||
| 4060 | btrfs_set_root_used(&root->root_item, used); | ||
| 4061 | spin_unlock(&root->node_lock); | ||
| 4062 | |||
| 4063 | return btrfs_free_extent(trans, root, bytenr, blocksize, | ||
| 4064 | parent, root_objectid, level, 0); | ||
| 4065 | } | ||
| 4066 | |||
| 4052 | static u64 stripe_align(struct btrfs_root *root, u64 val) | 4067 | static u64 stripe_align(struct btrfs_root *root, u64 val) |
| 4053 | { | 4068 | { |
| 4054 | u64 mask = ((u64)root->stripesize - 1); | 4069 | u64 mask = ((u64)root->stripesize - 1); |
| @@ -4578,7 +4593,6 @@ int btrfs_reserve_extent(struct btrfs_trans_handle *trans, | |||
| 4578 | { | 4593 | { |
| 4579 | int ret; | 4594 | int ret; |
| 4580 | u64 search_start = 0; | 4595 | u64 search_start = 0; |
| 4581 | struct btrfs_fs_info *info = root->fs_info; | ||
| 4582 | 4596 | ||
| 4583 | data = btrfs_get_alloc_profile(root, data); | 4597 | data = btrfs_get_alloc_profile(root, data); |
| 4584 | again: | 4598 | again: |
| @@ -4586,17 +4600,9 @@ again: | |||
| 4586 | * the only place that sets empty_size is btrfs_realloc_node, which | 4600 | * the only place that sets empty_size is btrfs_realloc_node, which |
| 4587 | * is not called recursively on allocations | 4601 | * is not called recursively on allocations |
| 4588 | */ | 4602 | */ |
| 4589 | if (empty_size || root->ref_cows) { | 4603 | if (empty_size || root->ref_cows) |
| 4590 | if (!(data & BTRFS_BLOCK_GROUP_METADATA)) { | ||
| 4591 | ret = do_chunk_alloc(trans, root->fs_info->extent_root, | ||
| 4592 | 2 * 1024 * 1024, | ||
| 4593 | BTRFS_BLOCK_GROUP_METADATA | | ||
| 4594 | (info->metadata_alloc_profile & | ||
| 4595 | info->avail_metadata_alloc_bits), 0); | ||
| 4596 | } | ||
| 4597 | ret = do_chunk_alloc(trans, root->fs_info->extent_root, | 4604 | ret = do_chunk_alloc(trans, root->fs_info->extent_root, |
| 4598 | num_bytes + 2 * 1024 * 1024, data, 0); | 4605 | num_bytes + 2 * 1024 * 1024, data, 0); |
| 4599 | } | ||
| 4600 | 4606 | ||
| 4601 | WARN_ON(num_bytes < root->sectorsize); | 4607 | WARN_ON(num_bytes < root->sectorsize); |
| 4602 | ret = find_free_extent(trans, root, num_bytes, empty_size, | 4608 | ret = find_free_extent(trans, root, num_bytes, empty_size, |
| @@ -4897,6 +4903,14 @@ static int alloc_tree_block(struct btrfs_trans_handle *trans, | |||
| 4897 | extent_op); | 4903 | extent_op); |
| 4898 | BUG_ON(ret); | 4904 | BUG_ON(ret); |
| 4899 | } | 4905 | } |
| 4906 | |||
| 4907 | if (root_objectid == root->root_key.objectid) { | ||
| 4908 | u64 used; | ||
| 4909 | spin_lock(&root->node_lock); | ||
| 4910 | used = btrfs_root_used(&root->root_item) + num_bytes; | ||
| 4911 | btrfs_set_root_used(&root->root_item, used); | ||
| 4912 | spin_unlock(&root->node_lock); | ||
| 4913 | } | ||
| 4900 | return ret; | 4914 | return ret; |
| 4901 | } | 4915 | } |
| 4902 | 4916 | ||
| @@ -4919,8 +4933,16 @@ struct extent_buffer *btrfs_init_new_buffer(struct btrfs_trans_handle *trans, | |||
| 4919 | btrfs_set_buffer_uptodate(buf); | 4933 | btrfs_set_buffer_uptodate(buf); |
| 4920 | 4934 | ||
| 4921 | if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) { | 4935 | if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) { |
| 4922 | set_extent_dirty(&root->dirty_log_pages, buf->start, | 4936 | /* |
| 4923 | buf->start + buf->len - 1, GFP_NOFS); | 4937 | * we allow two log transactions at a time, use different |
| 4938 | * EXENT bit to differentiate dirty pages. | ||
| 4939 | */ | ||
| 4940 | if (root->log_transid % 2 == 0) | ||
| 4941 | set_extent_dirty(&root->dirty_log_pages, buf->start, | ||
| 4942 | buf->start + buf->len - 1, GFP_NOFS); | ||
| 4943 | else | ||
| 4944 | set_extent_new(&root->dirty_log_pages, buf->start, | ||
| 4945 | buf->start + buf->len - 1, GFP_NOFS); | ||
| 4924 | } else { | 4946 | } else { |
| 4925 | set_extent_dirty(&trans->transaction->dirty_pages, buf->start, | 4947 | set_extent_dirty(&trans->transaction->dirty_pages, buf->start, |
| 4926 | buf->start + buf->len - 1, GFP_NOFS); | 4948 | buf->start + buf->len - 1, GFP_NOFS); |
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index 77f759302e12..feaa13b105d9 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c | |||
| @@ -179,18 +179,14 @@ int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end, | |||
| 179 | } | 179 | } |
| 180 | flags = em->flags; | 180 | flags = em->flags; |
| 181 | if (skip_pinned && test_bit(EXTENT_FLAG_PINNED, &em->flags)) { | 181 | if (skip_pinned && test_bit(EXTENT_FLAG_PINNED, &em->flags)) { |
| 182 | if (em->start <= start && | 182 | if (testend && em->start + em->len >= start + len) { |
| 183 | (!testend || em->start + em->len >= start + len)) { | ||
| 184 | free_extent_map(em); | 183 | free_extent_map(em); |
| 185 | write_unlock(&em_tree->lock); | 184 | write_unlock(&em_tree->lock); |
| 186 | break; | 185 | break; |
| 187 | } | 186 | } |
| 188 | if (start < em->start) { | 187 | start = em->start + em->len; |
| 189 | len = em->start - start; | 188 | if (testend) |
| 190 | } else { | ||
| 191 | len = start + len - (em->start + em->len); | 189 | len = start + len - (em->start + em->len); |
| 192 | start = em->start + em->len; | ||
| 193 | } | ||
| 194 | free_extent_map(em); | 190 | free_extent_map(em); |
| 195 | write_unlock(&em_tree->lock); | 191 | write_unlock(&em_tree->lock); |
| 196 | continue; | 192 | continue; |
| @@ -265,319 +261,247 @@ int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end, | |||
| 265 | * If an extent intersects the range but is not entirely inside the range | 261 | * If an extent intersects the range but is not entirely inside the range |
| 266 | * it is either truncated or split. Anything entirely inside the range | 262 | * it is either truncated or split. Anything entirely inside the range |
| 267 | * is deleted from the tree. | 263 | * is deleted from the tree. |
| 268 | * | ||
| 269 | * inline_limit is used to tell this code which offsets in the file to keep | ||
| 270 | * if they contain inline extents. | ||
| 271 | */ | 264 | */ |
| 272 | noinline int btrfs_drop_extents(struct btrfs_trans_handle *trans, | 265 | int btrfs_drop_extents(struct btrfs_trans_handle *trans, struct inode *inode, |
| 273 | struct btrfs_root *root, struct inode *inode, | 266 | u64 start, u64 end, u64 *hint_byte, int drop_cache) |
| 274 | u64 start, u64 end, u64 locked_end, | ||
| 275 | u64 inline_limit, u64 *hint_byte, int drop_cache) | ||
| 276 | { | 267 | { |
| 277 | u64 extent_end = 0; | 268 | struct btrfs_root *root = BTRFS_I(inode)->root; |
| 278 | u64 search_start = start; | ||
| 279 | u64 ram_bytes = 0; | ||
| 280 | u64 disk_bytenr = 0; | ||
| 281 | u64 orig_locked_end = locked_end; | ||
| 282 | u8 compression; | ||
| 283 | u8 encryption; | ||
| 284 | u16 other_encoding = 0; | ||
| 285 | struct extent_buffer *leaf; | 269 | struct extent_buffer *leaf; |
| 286 | struct btrfs_file_extent_item *extent; | 270 | struct btrfs_file_extent_item *fi; |
| 287 | struct btrfs_path *path; | 271 | struct btrfs_path *path; |
| 288 | struct btrfs_key key; | 272 | struct btrfs_key key; |
| 289 | struct btrfs_file_extent_item old; | 273 | struct btrfs_key new_key; |
| 290 | int keep; | 274 | u64 search_start = start; |
| 291 | int slot; | 275 | u64 disk_bytenr = 0; |
| 292 | int bookend; | 276 | u64 num_bytes = 0; |
| 293 | int found_type = 0; | 277 | u64 extent_offset = 0; |
| 294 | int found_extent; | 278 | u64 extent_end = 0; |
| 295 | int found_inline; | 279 | int del_nr = 0; |
| 280 | int del_slot = 0; | ||
| 281 | int extent_type; | ||
| 296 | int recow; | 282 | int recow; |
| 297 | int ret; | 283 | int ret; |
| 298 | 284 | ||
| 299 | inline_limit = 0; | ||
| 300 | if (drop_cache) | 285 | if (drop_cache) |
| 301 | btrfs_drop_extent_cache(inode, start, end - 1, 0); | 286 | btrfs_drop_extent_cache(inode, start, end - 1, 0); |
| 302 | 287 | ||
| 303 | path = btrfs_alloc_path(); | 288 | path = btrfs_alloc_path(); |
| 304 | if (!path) | 289 | if (!path) |
| 305 | return -ENOMEM; | 290 | return -ENOMEM; |
| 291 | |||
| 306 | while (1) { | 292 | while (1) { |
| 307 | recow = 0; | 293 | recow = 0; |
| 308 | btrfs_release_path(root, path); | ||
| 309 | ret = btrfs_lookup_file_extent(trans, root, path, inode->i_ino, | 294 | ret = btrfs_lookup_file_extent(trans, root, path, inode->i_ino, |
| 310 | search_start, -1); | 295 | search_start, -1); |
| 311 | if (ret < 0) | 296 | if (ret < 0) |
| 312 | goto out; | 297 | break; |
| 313 | if (ret > 0) { | 298 | if (ret > 0 && path->slots[0] > 0 && search_start == start) { |
| 314 | if (path->slots[0] == 0) { | 299 | leaf = path->nodes[0]; |
| 315 | ret = 0; | 300 | btrfs_item_key_to_cpu(leaf, &key, path->slots[0] - 1); |
| 316 | goto out; | 301 | if (key.objectid == inode->i_ino && |
| 317 | } | 302 | key.type == BTRFS_EXTENT_DATA_KEY) |
| 318 | path->slots[0]--; | 303 | path->slots[0]--; |
| 319 | } | 304 | } |
| 305 | ret = 0; | ||
| 320 | next_slot: | 306 | next_slot: |
| 321 | keep = 0; | ||
| 322 | bookend = 0; | ||
| 323 | found_extent = 0; | ||
| 324 | found_inline = 0; | ||
| 325 | compression = 0; | ||
| 326 | encryption = 0; | ||
| 327 | extent = NULL; | ||
| 328 | leaf = path->nodes[0]; | 307 | leaf = path->nodes[0]; |
| 329 | slot = path->slots[0]; | 308 | if (path->slots[0] >= btrfs_header_nritems(leaf)) { |
| 330 | ret = 0; | 309 | BUG_ON(del_nr > 0); |
| 331 | btrfs_item_key_to_cpu(leaf, &key, slot); | 310 | ret = btrfs_next_leaf(root, path); |
| 332 | if (btrfs_key_type(&key) == BTRFS_EXTENT_DATA_KEY && | 311 | if (ret < 0) |
| 333 | key.offset >= end) { | 312 | break; |
| 334 | goto out; | 313 | if (ret > 0) { |
| 335 | } | 314 | ret = 0; |
| 336 | if (btrfs_key_type(&key) > BTRFS_EXTENT_DATA_KEY || | 315 | break; |
| 337 | key.objectid != inode->i_ino) { | ||
| 338 | goto out; | ||
| 339 | } | ||
| 340 | if (recow) { | ||
| 341 | search_start = max(key.offset, start); | ||
| 342 | continue; | ||
| 343 | } | ||
| 344 | if (btrfs_key_type(&key) == BTRFS_EXTENT_DATA_KEY) { | ||
| 345 | extent = btrfs_item_ptr(leaf, slot, | ||
| 346 | struct btrfs_file_extent_item); | ||
| 347 | found_type = btrfs_file_extent_type(leaf, extent); | ||
| 348 | compression = btrfs_file_extent_compression(leaf, | ||
| 349 | extent); | ||
| 350 | encryption = btrfs_file_extent_encryption(leaf, | ||
| 351 | extent); | ||
| 352 | other_encoding = btrfs_file_extent_other_encoding(leaf, | ||
| 353 | extent); | ||
| 354 | if (found_type == BTRFS_FILE_EXTENT_REG || | ||
| 355 | found_type == BTRFS_FILE_EXTENT_PREALLOC) { | ||
| 356 | extent_end = | ||
| 357 | btrfs_file_extent_disk_bytenr(leaf, | ||
| 358 | extent); | ||
| 359 | if (extent_end) | ||
| 360 | *hint_byte = extent_end; | ||
| 361 | |||
| 362 | extent_end = key.offset + | ||
| 363 | btrfs_file_extent_num_bytes(leaf, extent); | ||
| 364 | ram_bytes = btrfs_file_extent_ram_bytes(leaf, | ||
| 365 | extent); | ||
| 366 | found_extent = 1; | ||
| 367 | } else if (found_type == BTRFS_FILE_EXTENT_INLINE) { | ||
| 368 | found_inline = 1; | ||
| 369 | extent_end = key.offset + | ||
| 370 | btrfs_file_extent_inline_len(leaf, extent); | ||
| 371 | } | 316 | } |
| 317 | leaf = path->nodes[0]; | ||
| 318 | recow = 1; | ||
| 319 | } | ||
| 320 | |||
| 321 | btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); | ||
| 322 | if (key.objectid > inode->i_ino || | ||
| 323 | key.type > BTRFS_EXTENT_DATA_KEY || key.offset >= end) | ||
| 324 | break; | ||
| 325 | |||
| 326 | fi = btrfs_item_ptr(leaf, path->slots[0], | ||
| 327 | struct btrfs_file_extent_item); | ||
| 328 | extent_type = btrfs_file_extent_type(leaf, fi); | ||
| 329 | |||
| 330 | if (extent_type == BTRFS_FILE_EXTENT_REG || | ||
| 331 | extent_type == BTRFS_FILE_EXTENT_PREALLOC) { | ||
| 332 | disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi); | ||
| 333 | num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi); | ||
| 334 | extent_offset = btrfs_file_extent_offset(leaf, fi); | ||
| 335 | extent_end = key.offset + | ||
| 336 | btrfs_file_extent_num_bytes(leaf, fi); | ||
| 337 | } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) { | ||
| 338 | extent_end = key.offset + | ||
| 339 | btrfs_file_extent_inline_len(leaf, fi); | ||
| 372 | } else { | 340 | } else { |
| 341 | WARN_ON(1); | ||
| 373 | extent_end = search_start; | 342 | extent_end = search_start; |
| 374 | } | 343 | } |
| 375 | 344 | ||
| 376 | /* we found nothing we can drop */ | 345 | if (extent_end <= search_start) { |
| 377 | if ((!found_extent && !found_inline) || | 346 | path->slots[0]++; |
| 378 | search_start >= extent_end) { | ||
| 379 | int nextret; | ||
| 380 | u32 nritems; | ||
| 381 | nritems = btrfs_header_nritems(leaf); | ||
| 382 | if (slot >= nritems - 1) { | ||
| 383 | nextret = btrfs_next_leaf(root, path); | ||
| 384 | if (nextret) | ||
| 385 | goto out; | ||
| 386 | recow = 1; | ||
| 387 | } else { | ||
| 388 | path->slots[0]++; | ||
| 389 | } | ||
| 390 | goto next_slot; | 347 | goto next_slot; |
| 391 | } | 348 | } |
| 392 | 349 | ||
| 393 | if (end <= extent_end && start >= key.offset && found_inline) | 350 | search_start = max(key.offset, start); |
| 394 | *hint_byte = EXTENT_MAP_INLINE; | 351 | if (recow) { |
| 395 | 352 | btrfs_release_path(root, path); | |
| 396 | if (found_extent) { | 353 | continue; |
| 397 | read_extent_buffer(leaf, &old, (unsigned long)extent, | ||
| 398 | sizeof(old)); | ||
| 399 | } | ||
| 400 | |||
| 401 | if (end < extent_end && end >= key.offset) { | ||
| 402 | bookend = 1; | ||
| 403 | if (found_inline && start <= key.offset) | ||
| 404 | keep = 1; | ||
| 405 | } | 354 | } |
| 406 | 355 | ||
| 407 | if (bookend && found_extent) { | 356 | /* |
| 408 | if (locked_end < extent_end) { | 357 | * | - range to drop - | |
| 409 | ret = try_lock_extent(&BTRFS_I(inode)->io_tree, | 358 | * | -------- extent -------- | |
| 410 | locked_end, extent_end - 1, | 359 | */ |
| 411 | GFP_NOFS); | 360 | if (start > key.offset && end < extent_end) { |
| 412 | if (!ret) { | 361 | BUG_ON(del_nr > 0); |
| 413 | btrfs_release_path(root, path); | 362 | BUG_ON(extent_type == BTRFS_FILE_EXTENT_INLINE); |
| 414 | lock_extent(&BTRFS_I(inode)->io_tree, | 363 | |
| 415 | locked_end, extent_end - 1, | 364 | memcpy(&new_key, &key, sizeof(new_key)); |
| 416 | GFP_NOFS); | 365 | new_key.offset = start; |
| 417 | locked_end = extent_end; | 366 | ret = btrfs_duplicate_item(trans, root, path, |
| 418 | continue; | 367 | &new_key); |
| 419 | } | 368 | if (ret == -EAGAIN) { |
| 420 | locked_end = extent_end; | 369 | btrfs_release_path(root, path); |
| 370 | continue; | ||
| 421 | } | 371 | } |
| 422 | disk_bytenr = le64_to_cpu(old.disk_bytenr); | 372 | if (ret < 0) |
| 423 | if (disk_bytenr != 0) { | 373 | break; |
| 374 | |||
| 375 | leaf = path->nodes[0]; | ||
| 376 | fi = btrfs_item_ptr(leaf, path->slots[0] - 1, | ||
| 377 | struct btrfs_file_extent_item); | ||
| 378 | btrfs_set_file_extent_num_bytes(leaf, fi, | ||
| 379 | start - key.offset); | ||
| 380 | |||
| 381 | fi = btrfs_item_ptr(leaf, path->slots[0], | ||
| 382 | struct btrfs_file_extent_item); | ||
| 383 | |||
| 384 | extent_offset += start - key.offset; | ||
| 385 | btrfs_set_file_extent_offset(leaf, fi, extent_offset); | ||
| 386 | btrfs_set_file_extent_num_bytes(leaf, fi, | ||
| 387 | extent_end - start); | ||
| 388 | btrfs_mark_buffer_dirty(leaf); | ||
| 389 | |||
| 390 | if (disk_bytenr > 0) { | ||
| 424 | ret = btrfs_inc_extent_ref(trans, root, | 391 | ret = btrfs_inc_extent_ref(trans, root, |
| 425 | disk_bytenr, | 392 | disk_bytenr, num_bytes, 0, |
| 426 | le64_to_cpu(old.disk_num_bytes), 0, | 393 | root->root_key.objectid, |
| 427 | root->root_key.objectid, | 394 | new_key.objectid, |
| 428 | key.objectid, key.offset - | 395 | start - extent_offset); |
| 429 | le64_to_cpu(old.offset)); | ||
| 430 | BUG_ON(ret); | 396 | BUG_ON(ret); |
| 397 | *hint_byte = disk_bytenr; | ||
| 431 | } | 398 | } |
| 399 | key.offset = start; | ||
| 432 | } | 400 | } |
| 401 | /* | ||
| 402 | * | ---- range to drop ----- | | ||
| 403 | * | -------- extent -------- | | ||
| 404 | */ | ||
| 405 | if (start <= key.offset && end < extent_end) { | ||
| 406 | BUG_ON(extent_type == BTRFS_FILE_EXTENT_INLINE); | ||
| 433 | 407 | ||
| 434 | if (found_inline) { | 408 | memcpy(&new_key, &key, sizeof(new_key)); |
| 435 | u64 mask = root->sectorsize - 1; | 409 | new_key.offset = end; |
| 436 | search_start = (extent_end + mask) & ~mask; | 410 | btrfs_set_item_key_safe(trans, root, path, &new_key); |
| 437 | } else | 411 | |
| 438 | search_start = extent_end; | 412 | extent_offset += end - key.offset; |
| 439 | 413 | btrfs_set_file_extent_offset(leaf, fi, extent_offset); | |
| 440 | /* truncate existing extent */ | 414 | btrfs_set_file_extent_num_bytes(leaf, fi, |
| 441 | if (start > key.offset) { | 415 | extent_end - end); |
| 442 | u64 new_num; | 416 | btrfs_mark_buffer_dirty(leaf); |
| 443 | u64 old_num; | 417 | if (disk_bytenr > 0) { |
| 444 | keep = 1; | 418 | inode_sub_bytes(inode, end - key.offset); |
| 445 | WARN_ON(start & (root->sectorsize - 1)); | 419 | *hint_byte = disk_bytenr; |
| 446 | if (found_extent) { | ||
| 447 | new_num = start - key.offset; | ||
| 448 | old_num = btrfs_file_extent_num_bytes(leaf, | ||
| 449 | extent); | ||
| 450 | *hint_byte = | ||
| 451 | btrfs_file_extent_disk_bytenr(leaf, | ||
| 452 | extent); | ||
| 453 | if (btrfs_file_extent_disk_bytenr(leaf, | ||
| 454 | extent)) { | ||
| 455 | inode_sub_bytes(inode, old_num - | ||
| 456 | new_num); | ||
| 457 | } | ||
| 458 | btrfs_set_file_extent_num_bytes(leaf, | ||
| 459 | extent, new_num); | ||
| 460 | btrfs_mark_buffer_dirty(leaf); | ||
| 461 | } else if (key.offset < inline_limit && | ||
| 462 | (end > extent_end) && | ||
| 463 | (inline_limit < extent_end)) { | ||
| 464 | u32 new_size; | ||
| 465 | new_size = btrfs_file_extent_calc_inline_size( | ||
| 466 | inline_limit - key.offset); | ||
| 467 | inode_sub_bytes(inode, extent_end - | ||
| 468 | inline_limit); | ||
| 469 | btrfs_set_file_extent_ram_bytes(leaf, extent, | ||
| 470 | new_size); | ||
| 471 | if (!compression && !encryption) { | ||
| 472 | btrfs_truncate_item(trans, root, path, | ||
| 473 | new_size, 1); | ||
| 474 | } | ||
| 475 | } | 420 | } |
| 421 | break; | ||
| 476 | } | 422 | } |
| 477 | /* delete the entire extent */ | ||
| 478 | if (!keep) { | ||
| 479 | if (found_inline) | ||
| 480 | inode_sub_bytes(inode, extent_end - | ||
| 481 | key.offset); | ||
| 482 | ret = btrfs_del_item(trans, root, path); | ||
| 483 | /* TODO update progress marker and return */ | ||
| 484 | BUG_ON(ret); | ||
| 485 | extent = NULL; | ||
| 486 | btrfs_release_path(root, path); | ||
| 487 | /* the extent will be freed later */ | ||
| 488 | } | ||
| 489 | if (bookend && found_inline && start <= key.offset) { | ||
| 490 | u32 new_size; | ||
| 491 | new_size = btrfs_file_extent_calc_inline_size( | ||
| 492 | extent_end - end); | ||
| 493 | inode_sub_bytes(inode, end - key.offset); | ||
| 494 | btrfs_set_file_extent_ram_bytes(leaf, extent, | ||
| 495 | new_size); | ||
| 496 | if (!compression && !encryption) | ||
| 497 | ret = btrfs_truncate_item(trans, root, path, | ||
| 498 | new_size, 0); | ||
| 499 | BUG_ON(ret); | ||
| 500 | } | ||
| 501 | /* create bookend, splitting the extent in two */ | ||
| 502 | if (bookend && found_extent) { | ||
| 503 | struct btrfs_key ins; | ||
| 504 | ins.objectid = inode->i_ino; | ||
| 505 | ins.offset = end; | ||
| 506 | btrfs_set_key_type(&ins, BTRFS_EXTENT_DATA_KEY); | ||
| 507 | 423 | ||
| 508 | btrfs_release_path(root, path); | 424 | search_start = extent_end; |
| 509 | path->leave_spinning = 1; | 425 | /* |
| 510 | ret = btrfs_insert_empty_item(trans, root, path, &ins, | 426 | * | ---- range to drop ----- | |
| 511 | sizeof(*extent)); | 427 | * | -------- extent -------- | |
| 512 | BUG_ON(ret); | 428 | */ |
| 429 | if (start > key.offset && end >= extent_end) { | ||
| 430 | BUG_ON(del_nr > 0); | ||
| 431 | BUG_ON(extent_type == BTRFS_FILE_EXTENT_INLINE); | ||
| 513 | 432 | ||
| 514 | leaf = path->nodes[0]; | 433 | btrfs_set_file_extent_num_bytes(leaf, fi, |
| 515 | extent = btrfs_item_ptr(leaf, path->slots[0], | 434 | start - key.offset); |
| 516 | struct btrfs_file_extent_item); | 435 | btrfs_mark_buffer_dirty(leaf); |
| 517 | write_extent_buffer(leaf, &old, | 436 | if (disk_bytenr > 0) { |
| 518 | (unsigned long)extent, sizeof(old)); | 437 | inode_sub_bytes(inode, extent_end - start); |
| 519 | 438 | *hint_byte = disk_bytenr; | |
| 520 | btrfs_set_file_extent_compression(leaf, extent, | 439 | } |
| 521 | compression); | 440 | if (end == extent_end) |
| 522 | btrfs_set_file_extent_encryption(leaf, extent, | 441 | break; |
| 523 | encryption); | ||
| 524 | btrfs_set_file_extent_other_encoding(leaf, extent, | ||
| 525 | other_encoding); | ||
| 526 | btrfs_set_file_extent_offset(leaf, extent, | ||
| 527 | le64_to_cpu(old.offset) + end - key.offset); | ||
| 528 | WARN_ON(le64_to_cpu(old.num_bytes) < | ||
| 529 | (extent_end - end)); | ||
| 530 | btrfs_set_file_extent_num_bytes(leaf, extent, | ||
| 531 | extent_end - end); | ||
| 532 | 442 | ||
| 533 | /* | 443 | path->slots[0]++; |
| 534 | * set the ram bytes to the size of the full extent | 444 | goto next_slot; |
| 535 | * before splitting. This is a worst case flag, | ||
| 536 | * but its the best we can do because we don't know | ||
| 537 | * how splitting affects compression | ||
| 538 | */ | ||
| 539 | btrfs_set_file_extent_ram_bytes(leaf, extent, | ||
| 540 | ram_bytes); | ||
| 541 | btrfs_set_file_extent_type(leaf, extent, found_type); | ||
| 542 | |||
| 543 | btrfs_unlock_up_safe(path, 1); | ||
| 544 | btrfs_mark_buffer_dirty(path->nodes[0]); | ||
| 545 | btrfs_set_lock_blocking(path->nodes[0]); | ||
| 546 | |||
| 547 | path->leave_spinning = 0; | ||
| 548 | btrfs_release_path(root, path); | ||
| 549 | if (disk_bytenr != 0) | ||
| 550 | inode_add_bytes(inode, extent_end - end); | ||
| 551 | } | 445 | } |
| 552 | 446 | ||
| 553 | if (found_extent && !keep) { | 447 | /* |
| 554 | u64 old_disk_bytenr = le64_to_cpu(old.disk_bytenr); | 448 | * | ---- range to drop ----- | |
| 449 | * | ------ extent ------ | | ||
| 450 | */ | ||
| 451 | if (start <= key.offset && end >= extent_end) { | ||
| 452 | if (del_nr == 0) { | ||
| 453 | del_slot = path->slots[0]; | ||
| 454 | del_nr = 1; | ||
| 455 | } else { | ||
| 456 | BUG_ON(del_slot + del_nr != path->slots[0]); | ||
| 457 | del_nr++; | ||
| 458 | } | ||
| 555 | 459 | ||
| 556 | if (old_disk_bytenr != 0) { | 460 | if (extent_type == BTRFS_FILE_EXTENT_INLINE) { |
| 557 | inode_sub_bytes(inode, | 461 | inode_sub_bytes(inode, |
| 558 | le64_to_cpu(old.num_bytes)); | 462 | extent_end - key.offset); |
| 463 | extent_end = ALIGN(extent_end, | ||
| 464 | root->sectorsize); | ||
| 465 | } else if (disk_bytenr > 0) { | ||
| 559 | ret = btrfs_free_extent(trans, root, | 466 | ret = btrfs_free_extent(trans, root, |
| 560 | old_disk_bytenr, | 467 | disk_bytenr, num_bytes, 0, |
| 561 | le64_to_cpu(old.disk_num_bytes), | 468 | root->root_key.objectid, |
| 562 | 0, root->root_key.objectid, | ||
| 563 | key.objectid, key.offset - | 469 | key.objectid, key.offset - |
| 564 | le64_to_cpu(old.offset)); | 470 | extent_offset); |
| 565 | BUG_ON(ret); | 471 | BUG_ON(ret); |
| 566 | *hint_byte = old_disk_bytenr; | 472 | inode_sub_bytes(inode, |
| 473 | extent_end - key.offset); | ||
| 474 | *hint_byte = disk_bytenr; | ||
| 567 | } | 475 | } |
| 568 | } | ||
| 569 | 476 | ||
| 570 | if (search_start >= end) { | 477 | if (end == extent_end) |
| 571 | ret = 0; | 478 | break; |
| 572 | goto out; | 479 | |
| 480 | if (path->slots[0] + 1 < btrfs_header_nritems(leaf)) { | ||
| 481 | path->slots[0]++; | ||
| 482 | goto next_slot; | ||
| 483 | } | ||
| 484 | |||
| 485 | ret = btrfs_del_items(trans, root, path, del_slot, | ||
| 486 | del_nr); | ||
| 487 | BUG_ON(ret); | ||
| 488 | |||
| 489 | del_nr = 0; | ||
| 490 | del_slot = 0; | ||
| 491 | |||
| 492 | btrfs_release_path(root, path); | ||
| 493 | continue; | ||
| 573 | } | 494 | } |
| 495 | |||
| 496 | BUG_ON(1); | ||
| 574 | } | 497 | } |
| 575 | out: | 498 | |
| 576 | btrfs_free_path(path); | 499 | if (del_nr > 0) { |
| 577 | if (locked_end > orig_locked_end) { | 500 | ret = btrfs_del_items(trans, root, path, del_slot, del_nr); |
| 578 | unlock_extent(&BTRFS_I(inode)->io_tree, orig_locked_end, | 501 | BUG_ON(ret); |
| 579 | locked_end - 1, GFP_NOFS); | ||
| 580 | } | 502 | } |
| 503 | |||
| 504 | btrfs_free_path(path); | ||
| 581 | return ret; | 505 | return ret; |
| 582 | } | 506 | } |
| 583 | 507 | ||
| @@ -620,23 +544,23 @@ static int extent_mergeable(struct extent_buffer *leaf, int slot, | |||
| 620 | * two or three. | 544 | * two or three. |
| 621 | */ | 545 | */ |
| 622 | int btrfs_mark_extent_written(struct btrfs_trans_handle *trans, | 546 | int btrfs_mark_extent_written(struct btrfs_trans_handle *trans, |
| 623 | struct btrfs_root *root, | ||
| 624 | struct inode *inode, u64 start, u64 end) | 547 | struct inode *inode, u64 start, u64 end) |
| 625 | { | 548 | { |
| 549 | struct btrfs_root *root = BTRFS_I(inode)->root; | ||
| 626 | struct extent_buffer *leaf; | 550 | struct extent_buffer *leaf; |
| 627 | struct btrfs_path *path; | 551 | struct btrfs_path *path; |
| 628 | struct btrfs_file_extent_item *fi; | 552 | struct btrfs_file_extent_item *fi; |
| 629 | struct btrfs_key key; | 553 | struct btrfs_key key; |
| 554 | struct btrfs_key new_key; | ||
| 630 | u64 bytenr; | 555 | u64 bytenr; |
| 631 | u64 num_bytes; | 556 | u64 num_bytes; |
| 632 | u64 extent_end; | 557 | u64 extent_end; |
| 633 | u64 orig_offset; | 558 | u64 orig_offset; |
| 634 | u64 other_start; | 559 | u64 other_start; |
| 635 | u64 other_end; | 560 | u64 other_end; |
| 636 | u64 split = start; | 561 | u64 split; |
| 637 | u64 locked_end = end; | 562 | int del_nr = 0; |
| 638 | int extent_type; | 563 | int del_slot = 0; |
| 639 | int split_end = 1; | ||
| 640 | int ret; | 564 | int ret; |
| 641 | 565 | ||
| 642 | btrfs_drop_extent_cache(inode, start, end - 1, 0); | 566 | btrfs_drop_extent_cache(inode, start, end - 1, 0); |
| @@ -644,12 +568,10 @@ int btrfs_mark_extent_written(struct btrfs_trans_handle *trans, | |||
| 644 | path = btrfs_alloc_path(); | 568 | path = btrfs_alloc_path(); |
| 645 | BUG_ON(!path); | 569 | BUG_ON(!path); |
| 646 | again: | 570 | again: |
| 571 | split = start; | ||
| 647 | key.objectid = inode->i_ino; | 572 | key.objectid = inode->i_ino; |
| 648 | key.type = BTRFS_EXTENT_DATA_KEY; | 573 | key.type = BTRFS_EXTENT_DATA_KEY; |
| 649 | if (split == start) | 574 | key.offset = split; |
| 650 | key.offset = split; | ||
| 651 | else | ||
| 652 | key.offset = split - 1; | ||
| 653 | 575 | ||
| 654 | ret = btrfs_search_slot(trans, root, &key, path, -1, 1); | 576 | ret = btrfs_search_slot(trans, root, &key, path, -1, 1); |
| 655 | if (ret > 0 && path->slots[0] > 0) | 577 | if (ret > 0 && path->slots[0] > 0) |
| @@ -661,8 +583,8 @@ again: | |||
| 661 | key.type != BTRFS_EXTENT_DATA_KEY); | 583 | key.type != BTRFS_EXTENT_DATA_KEY); |
| 662 | fi = btrfs_item_ptr(leaf, path->slots[0], | 584 | fi = btrfs_item_ptr(leaf, path->slots[0], |
| 663 | struct btrfs_file_extent_item); | 585 | struct btrfs_file_extent_item); |
| 664 | extent_type = btrfs_file_extent_type(leaf, fi); | 586 | BUG_ON(btrfs_file_extent_type(leaf, fi) != |
| 665 | BUG_ON(extent_type != BTRFS_FILE_EXTENT_PREALLOC); | 587 | BTRFS_FILE_EXTENT_PREALLOC); |
| 666 | extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi); | 588 | extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi); |
| 667 | BUG_ON(key.offset > start || extent_end < end); | 589 | BUG_ON(key.offset > start || extent_end < end); |
| 668 | 590 | ||
| @@ -670,150 +592,91 @@ again: | |||
| 670 | num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi); | 592 | num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi); |
| 671 | orig_offset = key.offset - btrfs_file_extent_offset(leaf, fi); | 593 | orig_offset = key.offset - btrfs_file_extent_offset(leaf, fi); |
| 672 | 594 | ||
| 673 | if (key.offset == start) | 595 | while (start > key.offset || end < extent_end) { |
| 674 | split = end; | 596 | if (key.offset == start) |
| 675 | 597 | split = end; | |
| 676 | if (key.offset == start && extent_end == end) { | 598 | |
| 677 | int del_nr = 0; | 599 | memcpy(&new_key, &key, sizeof(new_key)); |
| 678 | int del_slot = 0; | 600 | new_key.offset = split; |
| 679 | other_start = end; | 601 | ret = btrfs_duplicate_item(trans, root, path, &new_key); |
| 680 | other_end = 0; | 602 | if (ret == -EAGAIN) { |
| 681 | if (extent_mergeable(leaf, path->slots[0] + 1, inode->i_ino, | 603 | btrfs_release_path(root, path); |
| 682 | bytenr, &other_start, &other_end)) { | 604 | goto again; |
| 683 | extent_end = other_end; | ||
| 684 | del_slot = path->slots[0] + 1; | ||
| 685 | del_nr++; | ||
| 686 | ret = btrfs_free_extent(trans, root, bytenr, num_bytes, | ||
| 687 | 0, root->root_key.objectid, | ||
| 688 | inode->i_ino, orig_offset); | ||
| 689 | BUG_ON(ret); | ||
| 690 | } | ||
| 691 | other_start = 0; | ||
| 692 | other_end = start; | ||
| 693 | if (extent_mergeable(leaf, path->slots[0] - 1, inode->i_ino, | ||
| 694 | bytenr, &other_start, &other_end)) { | ||
| 695 | key.offset = other_start; | ||
| 696 | del_slot = path->slots[0]; | ||
| 697 | del_nr++; | ||
| 698 | ret = btrfs_free_extent(trans, root, bytenr, num_bytes, | ||
| 699 | 0, root->root_key.objectid, | ||
| 700 | inode->i_ino, orig_offset); | ||
| 701 | BUG_ON(ret); | ||
| 702 | } | ||
| 703 | split_end = 0; | ||
| 704 | if (del_nr == 0) { | ||
| 705 | btrfs_set_file_extent_type(leaf, fi, | ||
| 706 | BTRFS_FILE_EXTENT_REG); | ||
| 707 | goto done; | ||
| 708 | } | 605 | } |
| 606 | BUG_ON(ret < 0); | ||
| 709 | 607 | ||
| 710 | fi = btrfs_item_ptr(leaf, del_slot - 1, | 608 | leaf = path->nodes[0]; |
| 609 | fi = btrfs_item_ptr(leaf, path->slots[0] - 1, | ||
| 711 | struct btrfs_file_extent_item); | 610 | struct btrfs_file_extent_item); |
| 712 | btrfs_set_file_extent_type(leaf, fi, BTRFS_FILE_EXTENT_REG); | ||
| 713 | btrfs_set_file_extent_num_bytes(leaf, fi, | 611 | btrfs_set_file_extent_num_bytes(leaf, fi, |
| 714 | extent_end - key.offset); | 612 | split - key.offset); |
| 613 | |||
| 614 | fi = btrfs_item_ptr(leaf, path->slots[0], | ||
| 615 | struct btrfs_file_extent_item); | ||
| 616 | |||
| 617 | btrfs_set_file_extent_offset(leaf, fi, split - orig_offset); | ||
| 618 | btrfs_set_file_extent_num_bytes(leaf, fi, | ||
| 619 | extent_end - split); | ||
| 715 | btrfs_mark_buffer_dirty(leaf); | 620 | btrfs_mark_buffer_dirty(leaf); |
| 716 | 621 | ||
| 717 | ret = btrfs_del_items(trans, root, path, del_slot, del_nr); | 622 | ret = btrfs_inc_extent_ref(trans, root, bytenr, num_bytes, 0, |
| 623 | root->root_key.objectid, | ||
| 624 | inode->i_ino, orig_offset); | ||
| 718 | BUG_ON(ret); | 625 | BUG_ON(ret); |
| 719 | goto release; | ||
| 720 | } else if (split == start) { | ||
| 721 | if (locked_end < extent_end) { | ||
| 722 | ret = try_lock_extent(&BTRFS_I(inode)->io_tree, | ||
| 723 | locked_end, extent_end - 1, GFP_NOFS); | ||
| 724 | if (!ret) { | ||
| 725 | btrfs_release_path(root, path); | ||
| 726 | lock_extent(&BTRFS_I(inode)->io_tree, | ||
| 727 | locked_end, extent_end - 1, GFP_NOFS); | ||
| 728 | locked_end = extent_end; | ||
| 729 | goto again; | ||
| 730 | } | ||
| 731 | locked_end = extent_end; | ||
| 732 | } | ||
| 733 | btrfs_set_file_extent_num_bytes(leaf, fi, split - key.offset); | ||
| 734 | } else { | ||
| 735 | BUG_ON(key.offset != start); | ||
| 736 | key.offset = split; | ||
| 737 | btrfs_set_file_extent_offset(leaf, fi, key.offset - | ||
| 738 | orig_offset); | ||
| 739 | btrfs_set_file_extent_num_bytes(leaf, fi, extent_end - split); | ||
| 740 | btrfs_set_item_key_safe(trans, root, path, &key); | ||
| 741 | extent_end = split; | ||
| 742 | } | ||
| 743 | 626 | ||
| 744 | if (extent_end == end) { | 627 | if (split == start) { |
| 745 | split_end = 0; | 628 | key.offset = start; |
| 746 | extent_type = BTRFS_FILE_EXTENT_REG; | 629 | } else { |
| 747 | } | 630 | BUG_ON(start != key.offset); |
| 748 | if (extent_end == end && split == start) { | ||
| 749 | other_start = end; | ||
| 750 | other_end = 0; | ||
| 751 | if (extent_mergeable(leaf, path->slots[0] + 1, inode->i_ino, | ||
| 752 | bytenr, &other_start, &other_end)) { | ||
| 753 | path->slots[0]++; | ||
| 754 | fi = btrfs_item_ptr(leaf, path->slots[0], | ||
| 755 | struct btrfs_file_extent_item); | ||
| 756 | key.offset = split; | ||
| 757 | btrfs_set_item_key_safe(trans, root, path, &key); | ||
| 758 | btrfs_set_file_extent_offset(leaf, fi, key.offset - | ||
| 759 | orig_offset); | ||
| 760 | btrfs_set_file_extent_num_bytes(leaf, fi, | ||
| 761 | other_end - split); | ||
| 762 | goto done; | ||
| 763 | } | ||
| 764 | } | ||
| 765 | if (extent_end == end && split == end) { | ||
| 766 | other_start = 0; | ||
| 767 | other_end = start; | ||
| 768 | if (extent_mergeable(leaf, path->slots[0] - 1 , inode->i_ino, | ||
| 769 | bytenr, &other_start, &other_end)) { | ||
| 770 | path->slots[0]--; | 631 | path->slots[0]--; |
| 771 | fi = btrfs_item_ptr(leaf, path->slots[0], | 632 | extent_end = end; |
| 772 | struct btrfs_file_extent_item); | ||
| 773 | btrfs_set_file_extent_num_bytes(leaf, fi, extent_end - | ||
| 774 | other_start); | ||
| 775 | goto done; | ||
| 776 | } | 633 | } |
| 777 | } | 634 | } |
| 778 | 635 | ||
| 779 | btrfs_mark_buffer_dirty(leaf); | ||
| 780 | |||
| 781 | ret = btrfs_inc_extent_ref(trans, root, bytenr, num_bytes, 0, | ||
| 782 | root->root_key.objectid, | ||
| 783 | inode->i_ino, orig_offset); | ||
| 784 | BUG_ON(ret); | ||
| 785 | btrfs_release_path(root, path); | ||
| 786 | |||
| 787 | key.offset = start; | ||
| 788 | ret = btrfs_insert_empty_item(trans, root, path, &key, sizeof(*fi)); | ||
| 789 | BUG_ON(ret); | ||
| 790 | |||
| 791 | leaf = path->nodes[0]; | ||
| 792 | fi = btrfs_item_ptr(leaf, path->slots[0], | 636 | fi = btrfs_item_ptr(leaf, path->slots[0], |
| 793 | struct btrfs_file_extent_item); | 637 | struct btrfs_file_extent_item); |
| 794 | btrfs_set_file_extent_generation(leaf, fi, trans->transid); | ||
| 795 | btrfs_set_file_extent_type(leaf, fi, extent_type); | ||
| 796 | btrfs_set_file_extent_disk_bytenr(leaf, fi, bytenr); | ||
| 797 | btrfs_set_file_extent_disk_num_bytes(leaf, fi, num_bytes); | ||
| 798 | btrfs_set_file_extent_offset(leaf, fi, key.offset - orig_offset); | ||
| 799 | btrfs_set_file_extent_num_bytes(leaf, fi, extent_end - key.offset); | ||
| 800 | btrfs_set_file_extent_ram_bytes(leaf, fi, num_bytes); | ||
| 801 | btrfs_set_file_extent_compression(leaf, fi, 0); | ||
| 802 | btrfs_set_file_extent_encryption(leaf, fi, 0); | ||
| 803 | btrfs_set_file_extent_other_encoding(leaf, fi, 0); | ||
| 804 | done: | ||
| 805 | btrfs_mark_buffer_dirty(leaf); | ||
| 806 | 638 | ||
| 807 | release: | 639 | other_start = end; |
| 808 | btrfs_release_path(root, path); | 640 | other_end = 0; |
| 809 | if (split_end && split == start) { | 641 | if (extent_mergeable(leaf, path->slots[0] + 1, inode->i_ino, |
| 810 | split = end; | 642 | bytenr, &other_start, &other_end)) { |
| 811 | goto again; | 643 | extent_end = other_end; |
| 644 | del_slot = path->slots[0] + 1; | ||
| 645 | del_nr++; | ||
| 646 | ret = btrfs_free_extent(trans, root, bytenr, num_bytes, | ||
| 647 | 0, root->root_key.objectid, | ||
| 648 | inode->i_ino, orig_offset); | ||
| 649 | BUG_ON(ret); | ||
| 812 | } | 650 | } |
| 813 | if (locked_end > end) { | 651 | other_start = 0; |
| 814 | unlock_extent(&BTRFS_I(inode)->io_tree, end, locked_end - 1, | 652 | other_end = start; |
| 815 | GFP_NOFS); | 653 | if (extent_mergeable(leaf, path->slots[0] - 1, inode->i_ino, |
| 654 | bytenr, &other_start, &other_end)) { | ||
| 655 | key.offset = other_start; | ||
| 656 | del_slot = path->slots[0]; | ||
| 657 | del_nr++; | ||
| 658 | ret = btrfs_free_extent(trans, root, bytenr, num_bytes, | ||
| 659 | 0, root->root_key.objectid, | ||
| 660 | inode->i_ino, orig_offset); | ||
| 661 | BUG_ON(ret); | ||
| 816 | } | 662 | } |
| 663 | if (del_nr == 0) { | ||
| 664 | btrfs_set_file_extent_type(leaf, fi, | ||
| 665 | BTRFS_FILE_EXTENT_REG); | ||
| 666 | btrfs_mark_buffer_dirty(leaf); | ||
| 667 | goto out; | ||
| 668 | } | ||
| 669 | |||
| 670 | fi = btrfs_item_ptr(leaf, del_slot - 1, | ||
| 671 | struct btrfs_file_extent_item); | ||
| 672 | btrfs_set_file_extent_type(leaf, fi, BTRFS_FILE_EXTENT_REG); | ||
| 673 | btrfs_set_file_extent_num_bytes(leaf, fi, | ||
| 674 | extent_end - key.offset); | ||
| 675 | btrfs_mark_buffer_dirty(leaf); | ||
| 676 | |||
| 677 | ret = btrfs_del_items(trans, root, path, del_slot, del_nr); | ||
| 678 | BUG_ON(ret); | ||
| 679 | out: | ||
| 817 | btrfs_free_path(path); | 680 | btrfs_free_path(path); |
| 818 | return 0; | 681 | return 0; |
| 819 | } | 682 | } |
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index b3ad168a0bfc..5440bab23635 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c | |||
| @@ -88,13 +88,14 @@ static noinline int cow_file_range(struct inode *inode, | |||
| 88 | u64 start, u64 end, int *page_started, | 88 | u64 start, u64 end, int *page_started, |
| 89 | unsigned long *nr_written, int unlock); | 89 | unsigned long *nr_written, int unlock); |
| 90 | 90 | ||
| 91 | static int btrfs_init_inode_security(struct inode *inode, struct inode *dir) | 91 | static int btrfs_init_inode_security(struct btrfs_trans_handle *trans, |
| 92 | struct inode *inode, struct inode *dir) | ||
| 92 | { | 93 | { |
| 93 | int err; | 94 | int err; |
| 94 | 95 | ||
| 95 | err = btrfs_init_acl(inode, dir); | 96 | err = btrfs_init_acl(trans, inode, dir); |
| 96 | if (!err) | 97 | if (!err) |
| 97 | err = btrfs_xattr_security_init(inode, dir); | 98 | err = btrfs_xattr_security_init(trans, inode, dir); |
| 98 | return err; | 99 | return err; |
| 99 | } | 100 | } |
| 100 | 101 | ||
| @@ -188,8 +189,18 @@ static noinline int insert_inline_extent(struct btrfs_trans_handle *trans, | |||
| 188 | btrfs_mark_buffer_dirty(leaf); | 189 | btrfs_mark_buffer_dirty(leaf); |
| 189 | btrfs_free_path(path); | 190 | btrfs_free_path(path); |
| 190 | 191 | ||
| 192 | /* | ||
| 193 | * we're an inline extent, so nobody can | ||
| 194 | * extend the file past i_size without locking | ||
| 195 | * a page we already have locked. | ||
| 196 | * | ||
| 197 | * We must do any isize and inode updates | ||
| 198 | * before we unlock the pages. Otherwise we | ||
| 199 | * could end up racing with unlink. | ||
| 200 | */ | ||
| 191 | BTRFS_I(inode)->disk_i_size = inode->i_size; | 201 | BTRFS_I(inode)->disk_i_size = inode->i_size; |
| 192 | btrfs_update_inode(trans, root, inode); | 202 | btrfs_update_inode(trans, root, inode); |
| 203 | |||
| 193 | return 0; | 204 | return 0; |
| 194 | fail: | 205 | fail: |
| 195 | btrfs_free_path(path); | 206 | btrfs_free_path(path); |
| @@ -230,8 +241,7 @@ static noinline int cow_file_range_inline(struct btrfs_trans_handle *trans, | |||
| 230 | return 1; | 241 | return 1; |
| 231 | } | 242 | } |
| 232 | 243 | ||
| 233 | ret = btrfs_drop_extents(trans, root, inode, start, | 244 | ret = btrfs_drop_extents(trans, inode, start, aligned_end, |
| 234 | aligned_end, aligned_end, start, | ||
| 235 | &hint_byte, 1); | 245 | &hint_byte, 1); |
| 236 | BUG_ON(ret); | 246 | BUG_ON(ret); |
| 237 | 247 | ||
| @@ -416,7 +426,6 @@ again: | |||
| 416 | start, end, | 426 | start, end, |
| 417 | total_compressed, pages); | 427 | total_compressed, pages); |
| 418 | } | 428 | } |
| 419 | btrfs_end_transaction(trans, root); | ||
| 420 | if (ret == 0) { | 429 | if (ret == 0) { |
| 421 | /* | 430 | /* |
| 422 | * inline extent creation worked, we don't need | 431 | * inline extent creation worked, we don't need |
| @@ -430,9 +439,11 @@ again: | |||
| 430 | EXTENT_CLEAR_DELALLOC | | 439 | EXTENT_CLEAR_DELALLOC | |
| 431 | EXTENT_CLEAR_ACCOUNTING | | 440 | EXTENT_CLEAR_ACCOUNTING | |
| 432 | EXTENT_SET_WRITEBACK | EXTENT_END_WRITEBACK); | 441 | EXTENT_SET_WRITEBACK | EXTENT_END_WRITEBACK); |
| 433 | ret = 0; | 442 | |
| 443 | btrfs_end_transaction(trans, root); | ||
| 434 | goto free_pages_out; | 444 | goto free_pages_out; |
| 435 | } | 445 | } |
| 446 | btrfs_end_transaction(trans, root); | ||
| 436 | } | 447 | } |
| 437 | 448 | ||
| 438 | if (will_compress) { | 449 | if (will_compress) { |
| @@ -543,7 +554,6 @@ static noinline int submit_compressed_extents(struct inode *inode, | |||
| 543 | if (list_empty(&async_cow->extents)) | 554 | if (list_empty(&async_cow->extents)) |
| 544 | return 0; | 555 | return 0; |
| 545 | 556 | ||
| 546 | trans = btrfs_join_transaction(root, 1); | ||
| 547 | 557 | ||
| 548 | while (!list_empty(&async_cow->extents)) { | 558 | while (!list_empty(&async_cow->extents)) { |
| 549 | async_extent = list_entry(async_cow->extents.next, | 559 | async_extent = list_entry(async_cow->extents.next, |
| @@ -590,19 +600,15 @@ retry: | |||
| 590 | lock_extent(io_tree, async_extent->start, | 600 | lock_extent(io_tree, async_extent->start, |
| 591 | async_extent->start + async_extent->ram_size - 1, | 601 | async_extent->start + async_extent->ram_size - 1, |
| 592 | GFP_NOFS); | 602 | GFP_NOFS); |
| 593 | /* | ||
| 594 | * here we're doing allocation and writeback of the | ||
| 595 | * compressed pages | ||
| 596 | */ | ||
| 597 | btrfs_drop_extent_cache(inode, async_extent->start, | ||
| 598 | async_extent->start + | ||
| 599 | async_extent->ram_size - 1, 0); | ||
| 600 | 603 | ||
| 604 | trans = btrfs_join_transaction(root, 1); | ||
| 601 | ret = btrfs_reserve_extent(trans, root, | 605 | ret = btrfs_reserve_extent(trans, root, |
| 602 | async_extent->compressed_size, | 606 | async_extent->compressed_size, |
| 603 | async_extent->compressed_size, | 607 | async_extent->compressed_size, |
| 604 | 0, alloc_hint, | 608 | 0, alloc_hint, |
| 605 | (u64)-1, &ins, 1); | 609 | (u64)-1, &ins, 1); |
| 610 | btrfs_end_transaction(trans, root); | ||
| 611 | |||
| 606 | if (ret) { | 612 | if (ret) { |
| 607 | int i; | 613 | int i; |
| 608 | for (i = 0; i < async_extent->nr_pages; i++) { | 614 | for (i = 0; i < async_extent->nr_pages; i++) { |
| @@ -618,6 +624,14 @@ retry: | |||
| 618 | goto retry; | 624 | goto retry; |
| 619 | } | 625 | } |
| 620 | 626 | ||
| 627 | /* | ||
| 628 | * here we're doing allocation and writeback of the | ||
| 629 | * compressed pages | ||
| 630 | */ | ||
| 631 | btrfs_drop_extent_cache(inode, async_extent->start, | ||
| 632 | async_extent->start + | ||
| 633 | async_extent->ram_size - 1, 0); | ||
| 634 | |||
| 621 | em = alloc_extent_map(GFP_NOFS); | 635 | em = alloc_extent_map(GFP_NOFS); |
| 622 | em->start = async_extent->start; | 636 | em->start = async_extent->start; |
| 623 | em->len = async_extent->ram_size; | 637 | em->len = async_extent->ram_size; |
| @@ -649,8 +663,6 @@ retry: | |||
| 649 | BTRFS_ORDERED_COMPRESSED); | 663 | BTRFS_ORDERED_COMPRESSED); |
| 650 | BUG_ON(ret); | 664 | BUG_ON(ret); |
| 651 | 665 | ||
| 652 | btrfs_end_transaction(trans, root); | ||
| 653 | |||
| 654 | /* | 666 | /* |
| 655 | * clear dirty, set writeback and unlock the pages. | 667 | * clear dirty, set writeback and unlock the pages. |
| 656 | */ | 668 | */ |
| @@ -672,13 +684,11 @@ retry: | |||
| 672 | async_extent->nr_pages); | 684 | async_extent->nr_pages); |
| 673 | 685 | ||
| 674 | BUG_ON(ret); | 686 | BUG_ON(ret); |
| 675 | trans = btrfs_join_transaction(root, 1); | ||
| 676 | alloc_hint = ins.objectid + ins.offset; | 687 | alloc_hint = ins.objectid + ins.offset; |
| 677 | kfree(async_extent); | 688 | kfree(async_extent); |
| 678 | cond_resched(); | 689 | cond_resched(); |
| 679 | } | 690 | } |
| 680 | 691 | ||
| 681 | btrfs_end_transaction(trans, root); | ||
| 682 | return 0; | 692 | return 0; |
| 683 | } | 693 | } |
| 684 | 694 | ||
| @@ -742,6 +752,7 @@ static noinline int cow_file_range(struct inode *inode, | |||
| 742 | EXTENT_CLEAR_DIRTY | | 752 | EXTENT_CLEAR_DIRTY | |
| 743 | EXTENT_SET_WRITEBACK | | 753 | EXTENT_SET_WRITEBACK | |
| 744 | EXTENT_END_WRITEBACK); | 754 | EXTENT_END_WRITEBACK); |
| 755 | |||
| 745 | *nr_written = *nr_written + | 756 | *nr_written = *nr_written + |
| 746 | (end - start + PAGE_CACHE_SIZE) / PAGE_CACHE_SIZE; | 757 | (end - start + PAGE_CACHE_SIZE) / PAGE_CACHE_SIZE; |
| 747 | *page_started = 1; | 758 | *page_started = 1; |
| @@ -1596,7 +1607,6 @@ static int insert_reserved_file_extent(struct btrfs_trans_handle *trans, | |||
| 1596 | struct inode *inode, u64 file_pos, | 1607 | struct inode *inode, u64 file_pos, |
| 1597 | u64 disk_bytenr, u64 disk_num_bytes, | 1608 | u64 disk_bytenr, u64 disk_num_bytes, |
| 1598 | u64 num_bytes, u64 ram_bytes, | 1609 | u64 num_bytes, u64 ram_bytes, |
| 1599 | u64 locked_end, | ||
| 1600 | u8 compression, u8 encryption, | 1610 | u8 compression, u8 encryption, |
| 1601 | u16 other_encoding, int extent_type) | 1611 | u16 other_encoding, int extent_type) |
| 1602 | { | 1612 | { |
| @@ -1622,9 +1632,8 @@ static int insert_reserved_file_extent(struct btrfs_trans_handle *trans, | |||
| 1622 | * the caller is expected to unpin it and allow it to be merged | 1632 | * the caller is expected to unpin it and allow it to be merged |
| 1623 | * with the others. | 1633 | * with the others. |
| 1624 | */ | 1634 | */ |
| 1625 | ret = btrfs_drop_extents(trans, root, inode, file_pos, | 1635 | ret = btrfs_drop_extents(trans, inode, file_pos, file_pos + num_bytes, |
| 1626 | file_pos + num_bytes, locked_end, | 1636 | &hint, 0); |
| 1627 | file_pos, &hint, 0); | ||
| 1628 | BUG_ON(ret); | 1637 | BUG_ON(ret); |
| 1629 | 1638 | ||
| 1630 | ins.objectid = inode->i_ino; | 1639 | ins.objectid = inode->i_ino; |
| @@ -1730,23 +1739,32 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end) | |||
| 1730 | } | 1739 | } |
| 1731 | } | 1740 | } |
| 1732 | 1741 | ||
| 1733 | trans = btrfs_join_transaction(root, 1); | ||
| 1734 | |||
| 1735 | if (!ordered_extent) | 1742 | if (!ordered_extent) |
| 1736 | ordered_extent = btrfs_lookup_ordered_extent(inode, start); | 1743 | ordered_extent = btrfs_lookup_ordered_extent(inode, start); |
| 1737 | BUG_ON(!ordered_extent); | 1744 | BUG_ON(!ordered_extent); |
| 1738 | if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) | 1745 | if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) { |
| 1739 | goto nocow; | 1746 | BUG_ON(!list_empty(&ordered_extent->list)); |
| 1747 | ret = btrfs_ordered_update_i_size(inode, 0, ordered_extent); | ||
| 1748 | if (!ret) { | ||
| 1749 | trans = btrfs_join_transaction(root, 1); | ||
| 1750 | ret = btrfs_update_inode(trans, root, inode); | ||
| 1751 | BUG_ON(ret); | ||
| 1752 | btrfs_end_transaction(trans, root); | ||
| 1753 | } | ||
| 1754 | goto out; | ||
| 1755 | } | ||
| 1740 | 1756 | ||
| 1741 | lock_extent(io_tree, ordered_extent->file_offset, | 1757 | lock_extent(io_tree, ordered_extent->file_offset, |
| 1742 | ordered_extent->file_offset + ordered_extent->len - 1, | 1758 | ordered_extent->file_offset + ordered_extent->len - 1, |
| 1743 | GFP_NOFS); | 1759 | GFP_NOFS); |
| 1744 | 1760 | ||
| 1761 | trans = btrfs_join_transaction(root, 1); | ||
| 1762 | |||
| 1745 | if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags)) | 1763 | if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags)) |
| 1746 | compressed = 1; | 1764 | compressed = 1; |
| 1747 | if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) { | 1765 | if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) { |
| 1748 | BUG_ON(compressed); | 1766 | BUG_ON(compressed); |
| 1749 | ret = btrfs_mark_extent_written(trans, root, inode, | 1767 | ret = btrfs_mark_extent_written(trans, inode, |
| 1750 | ordered_extent->file_offset, | 1768 | ordered_extent->file_offset, |
| 1751 | ordered_extent->file_offset + | 1769 | ordered_extent->file_offset + |
| 1752 | ordered_extent->len); | 1770 | ordered_extent->len); |
| @@ -1758,8 +1776,6 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end) | |||
| 1758 | ordered_extent->disk_len, | 1776 | ordered_extent->disk_len, |
| 1759 | ordered_extent->len, | 1777 | ordered_extent->len, |
| 1760 | ordered_extent->len, | 1778 | ordered_extent->len, |
| 1761 | ordered_extent->file_offset + | ||
| 1762 | ordered_extent->len, | ||
| 1763 | compressed, 0, 0, | 1779 | compressed, 0, 0, |
| 1764 | BTRFS_FILE_EXTENT_REG); | 1780 | BTRFS_FILE_EXTENT_REG); |
| 1765 | unpin_extent_cache(&BTRFS_I(inode)->extent_tree, | 1781 | unpin_extent_cache(&BTRFS_I(inode)->extent_tree, |
| @@ -1770,22 +1786,20 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end) | |||
| 1770 | unlock_extent(io_tree, ordered_extent->file_offset, | 1786 | unlock_extent(io_tree, ordered_extent->file_offset, |
| 1771 | ordered_extent->file_offset + ordered_extent->len - 1, | 1787 | ordered_extent->file_offset + ordered_extent->len - 1, |
| 1772 | GFP_NOFS); | 1788 | GFP_NOFS); |
| 1773 | nocow: | ||
| 1774 | add_pending_csums(trans, inode, ordered_extent->file_offset, | 1789 | add_pending_csums(trans, inode, ordered_extent->file_offset, |
| 1775 | &ordered_extent->list); | 1790 | &ordered_extent->list); |
| 1776 | 1791 | ||
| 1777 | mutex_lock(&BTRFS_I(inode)->extent_mutex); | 1792 | /* this also removes the ordered extent from the tree */ |
| 1778 | btrfs_ordered_update_i_size(inode, ordered_extent); | 1793 | btrfs_ordered_update_i_size(inode, 0, ordered_extent); |
| 1779 | btrfs_update_inode(trans, root, inode); | 1794 | ret = btrfs_update_inode(trans, root, inode); |
| 1780 | btrfs_remove_ordered_extent(inode, ordered_extent); | 1795 | BUG_ON(ret); |
| 1781 | mutex_unlock(&BTRFS_I(inode)->extent_mutex); | 1796 | btrfs_end_transaction(trans, root); |
| 1782 | 1797 | out: | |
| 1783 | /* once for us */ | 1798 | /* once for us */ |
| 1784 | btrfs_put_ordered_extent(ordered_extent); | 1799 | btrfs_put_ordered_extent(ordered_extent); |
| 1785 | /* once for the tree */ | 1800 | /* once for the tree */ |
| 1786 | btrfs_put_ordered_extent(ordered_extent); | 1801 | btrfs_put_ordered_extent(ordered_extent); |
| 1787 | 1802 | ||
| 1788 | btrfs_end_transaction(trans, root); | ||
| 1789 | return 0; | 1803 | return 0; |
| 1790 | } | 1804 | } |
| 1791 | 1805 | ||
| @@ -2008,6 +2022,54 @@ zeroit: | |||
| 2008 | return -EIO; | 2022 | return -EIO; |
| 2009 | } | 2023 | } |
| 2010 | 2024 | ||
| 2025 | struct delayed_iput { | ||
| 2026 | struct list_head list; | ||
| 2027 | struct inode *inode; | ||
| 2028 | }; | ||
| 2029 | |||
| 2030 | void btrfs_add_delayed_iput(struct inode *inode) | ||
| 2031 | { | ||
| 2032 | struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info; | ||
| 2033 | struct delayed_iput *delayed; | ||
| 2034 | |||
| 2035 | if (atomic_add_unless(&inode->i_count, -1, 1)) | ||
| 2036 | return; | ||
| 2037 | |||
| 2038 | delayed = kmalloc(sizeof(*delayed), GFP_NOFS | __GFP_NOFAIL); | ||
| 2039 | delayed->inode = inode; | ||
| 2040 | |||
| 2041 | spin_lock(&fs_info->delayed_iput_lock); | ||
| 2042 | list_add_tail(&delayed->list, &fs_info->delayed_iputs); | ||
| 2043 | spin_unlock(&fs_info->delayed_iput_lock); | ||
| 2044 | } | ||
| 2045 | |||
| 2046 | void btrfs_run_delayed_iputs(struct btrfs_root *root) | ||
| 2047 | { | ||
| 2048 | LIST_HEAD(list); | ||
| 2049 | struct btrfs_fs_info *fs_info = root->fs_info; | ||
| 2050 | struct delayed_iput *delayed; | ||
| 2051 | int empty; | ||
| 2052 | |||
| 2053 | spin_lock(&fs_info->delayed_iput_lock); | ||
| 2054 | empty = list_empty(&fs_info->delayed_iputs); | ||
| 2055 | spin_unlock(&fs_info->delayed_iput_lock); | ||
| 2056 | if (empty) | ||
| 2057 | return; | ||
| 2058 | |||
| 2059 | down_read(&root->fs_info->cleanup_work_sem); | ||
| 2060 | spin_lock(&fs_info->delayed_iput_lock); | ||
| 2061 | list_splice_init(&fs_info->delayed_iputs, &list); | ||
| 2062 | spin_unlock(&fs_info->delayed_iput_lock); | ||
| 2063 | |||
| 2064 | while (!list_empty(&list)) { | ||
| 2065 | delayed = list_entry(list.next, struct delayed_iput, list); | ||
| 2066 | list_del(&delayed->list); | ||
| 2067 | iput(delayed->inode); | ||
| 2068 | kfree(delayed); | ||
| 2069 | } | ||
| 2070 | up_read(&root->fs_info->cleanup_work_sem); | ||
| 2071 | } | ||
| 2072 | |||
| 2011 | /* | 2073 | /* |
| 2012 | * This creates an orphan entry for the given inode in case something goes | 2074 | * This creates an orphan entry for the given inode in case something goes |
| 2013 | * wrong in the middle of an unlink/truncate. | 2075 | * wrong in the middle of an unlink/truncate. |
| @@ -2080,16 +2142,17 @@ void btrfs_orphan_cleanup(struct btrfs_root *root) | |||
| 2080 | struct inode *inode; | 2142 | struct inode *inode; |
| 2081 | int ret = 0, nr_unlink = 0, nr_truncate = 0; | 2143 | int ret = 0, nr_unlink = 0, nr_truncate = 0; |
| 2082 | 2144 | ||
| 2083 | path = btrfs_alloc_path(); | 2145 | if (!xchg(&root->clean_orphans, 0)) |
| 2084 | if (!path) | ||
| 2085 | return; | 2146 | return; |
| 2147 | |||
| 2148 | path = btrfs_alloc_path(); | ||
| 2149 | BUG_ON(!path); | ||
| 2086 | path->reada = -1; | 2150 | path->reada = -1; |
| 2087 | 2151 | ||
| 2088 | key.objectid = BTRFS_ORPHAN_OBJECTID; | 2152 | key.objectid = BTRFS_ORPHAN_OBJECTID; |
| 2089 | btrfs_set_key_type(&key, BTRFS_ORPHAN_ITEM_KEY); | 2153 | btrfs_set_key_type(&key, BTRFS_ORPHAN_ITEM_KEY); |
| 2090 | key.offset = (u64)-1; | 2154 | key.offset = (u64)-1; |
| 2091 | 2155 | ||
| 2092 | |||
| 2093 | while (1) { | 2156 | while (1) { |
| 2094 | ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); | 2157 | ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); |
| 2095 | if (ret < 0) { | 2158 | if (ret < 0) { |
| @@ -2834,37 +2897,40 @@ out: | |||
| 2834 | * min_type is the minimum key type to truncate down to. If set to 0, this | 2897 | * min_type is the minimum key type to truncate down to. If set to 0, this |
| 2835 | * will kill all the items on this inode, including the INODE_ITEM_KEY. | 2898 | * will kill all the items on this inode, including the INODE_ITEM_KEY. |
| 2836 | */ | 2899 | */ |
| 2837 | noinline int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans, | 2900 | int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans, |
| 2838 | struct btrfs_root *root, | 2901 | struct btrfs_root *root, |
| 2839 | struct inode *inode, | 2902 | struct inode *inode, |
| 2840 | u64 new_size, u32 min_type) | 2903 | u64 new_size, u32 min_type) |
| 2841 | { | 2904 | { |
| 2842 | int ret; | ||
| 2843 | struct btrfs_path *path; | 2905 | struct btrfs_path *path; |
| 2844 | struct btrfs_key key; | ||
| 2845 | struct btrfs_key found_key; | ||
| 2846 | u32 found_type = (u8)-1; | ||
| 2847 | struct extent_buffer *leaf; | 2906 | struct extent_buffer *leaf; |
| 2848 | struct btrfs_file_extent_item *fi; | 2907 | struct btrfs_file_extent_item *fi; |
| 2908 | struct btrfs_key key; | ||
| 2909 | struct btrfs_key found_key; | ||
| 2849 | u64 extent_start = 0; | 2910 | u64 extent_start = 0; |
| 2850 | u64 extent_num_bytes = 0; | 2911 | u64 extent_num_bytes = 0; |
| 2851 | u64 extent_offset = 0; | 2912 | u64 extent_offset = 0; |
| 2852 | u64 item_end = 0; | 2913 | u64 item_end = 0; |
| 2914 | u64 mask = root->sectorsize - 1; | ||
| 2915 | u32 found_type = (u8)-1; | ||
| 2853 | int found_extent; | 2916 | int found_extent; |
| 2854 | int del_item; | 2917 | int del_item; |
| 2855 | int pending_del_nr = 0; | 2918 | int pending_del_nr = 0; |
| 2856 | int pending_del_slot = 0; | 2919 | int pending_del_slot = 0; |
| 2857 | int extent_type = -1; | 2920 | int extent_type = -1; |
| 2858 | int encoding; | 2921 | int encoding; |
| 2859 | u64 mask = root->sectorsize - 1; | 2922 | int ret; |
| 2923 | int err = 0; | ||
| 2924 | |||
| 2925 | BUG_ON(new_size > 0 && min_type != BTRFS_EXTENT_DATA_KEY); | ||
| 2860 | 2926 | ||
| 2861 | if (root->ref_cows) | 2927 | if (root->ref_cows) |
| 2862 | btrfs_drop_extent_cache(inode, new_size & (~mask), (u64)-1, 0); | 2928 | btrfs_drop_extent_cache(inode, new_size & (~mask), (u64)-1, 0); |
| 2929 | |||
| 2863 | path = btrfs_alloc_path(); | 2930 | path = btrfs_alloc_path(); |
| 2864 | BUG_ON(!path); | 2931 | BUG_ON(!path); |
| 2865 | path->reada = -1; | 2932 | path->reada = -1; |
| 2866 | 2933 | ||
| 2867 | /* FIXME, add redo link to tree so we don't leak on crash */ | ||
| 2868 | key.objectid = inode->i_ino; | 2934 | key.objectid = inode->i_ino; |
| 2869 | key.offset = (u64)-1; | 2935 | key.offset = (u64)-1; |
| 2870 | key.type = (u8)-1; | 2936 | key.type = (u8)-1; |
| @@ -2872,17 +2938,17 @@ noinline int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans, | |||
| 2872 | search_again: | 2938 | search_again: |
| 2873 | path->leave_spinning = 1; | 2939 | path->leave_spinning = 1; |
| 2874 | ret = btrfs_search_slot(trans, root, &key, path, -1, 1); | 2940 | ret = btrfs_search_slot(trans, root, &key, path, -1, 1); |
| 2875 | if (ret < 0) | 2941 | if (ret < 0) { |
| 2876 | goto error; | 2942 | err = ret; |
| 2943 | goto out; | ||
| 2944 | } | ||
| 2877 | 2945 | ||
| 2878 | if (ret > 0) { | 2946 | if (ret > 0) { |
| 2879 | /* there are no items in the tree for us to truncate, we're | 2947 | /* there are no items in the tree for us to truncate, we're |
| 2880 | * done | 2948 | * done |
| 2881 | */ | 2949 | */ |
| 2882 | if (path->slots[0] == 0) { | 2950 | if (path->slots[0] == 0) |
| 2883 | ret = 0; | 2951 | goto out; |
| 2884 | goto error; | ||
| 2885 | } | ||
| 2886 | path->slots[0]--; | 2952 | path->slots[0]--; |
| 2887 | } | 2953 | } |
| 2888 | 2954 | ||
| @@ -2917,28 +2983,17 @@ search_again: | |||
| 2917 | } | 2983 | } |
| 2918 | item_end--; | 2984 | item_end--; |
| 2919 | } | 2985 | } |
| 2920 | if (item_end < new_size) { | 2986 | if (found_type > min_type) { |
| 2921 | if (found_type == BTRFS_DIR_ITEM_KEY) | 2987 | del_item = 1; |
| 2922 | found_type = BTRFS_INODE_ITEM_KEY; | 2988 | } else { |
| 2923 | else if (found_type == BTRFS_EXTENT_ITEM_KEY) | 2989 | if (item_end < new_size) |
| 2924 | found_type = BTRFS_EXTENT_DATA_KEY; | ||
| 2925 | else if (found_type == BTRFS_EXTENT_DATA_KEY) | ||
| 2926 | found_type = BTRFS_XATTR_ITEM_KEY; | ||
| 2927 | else if (found_type == BTRFS_XATTR_ITEM_KEY) | ||
| 2928 | found_type = BTRFS_INODE_REF_KEY; | ||
| 2929 | else if (found_type) | ||
| 2930 | found_type--; | ||
| 2931 | else | ||
| 2932 | break; | 2990 | break; |
| 2933 | btrfs_set_key_type(&key, found_type); | 2991 | if (found_key.offset >= new_size) |
| 2934 | goto next; | 2992 | del_item = 1; |
| 2993 | else | ||
| 2994 | del_item = 0; | ||
| 2935 | } | 2995 | } |
| 2936 | if (found_key.offset >= new_size) | ||
| 2937 | del_item = 1; | ||
| 2938 | else | ||
| 2939 | del_item = 0; | ||
| 2940 | found_extent = 0; | 2996 | found_extent = 0; |
| 2941 | |||
| 2942 | /* FIXME, shrink the extent if the ref count is only 1 */ | 2997 | /* FIXME, shrink the extent if the ref count is only 1 */ |
| 2943 | if (found_type != BTRFS_EXTENT_DATA_KEY) | 2998 | if (found_type != BTRFS_EXTENT_DATA_KEY) |
| 2944 | goto delete; | 2999 | goto delete; |
| @@ -3025,42 +3080,36 @@ delete: | |||
| 3025 | inode->i_ino, extent_offset); | 3080 | inode->i_ino, extent_offset); |
| 3026 | BUG_ON(ret); | 3081 | BUG_ON(ret); |
| 3027 | } | 3082 | } |
| 3028 | next: | ||
| 3029 | if (path->slots[0] == 0) { | ||
| 3030 | if (pending_del_nr) | ||
| 3031 | goto del_pending; | ||
| 3032 | btrfs_release_path(root, path); | ||
| 3033 | if (found_type == BTRFS_INODE_ITEM_KEY) | ||
| 3034 | break; | ||
| 3035 | goto search_again; | ||
| 3036 | } | ||
| 3037 | 3083 | ||
| 3038 | path->slots[0]--; | 3084 | if (found_type == BTRFS_INODE_ITEM_KEY) |
| 3039 | if (pending_del_nr && | 3085 | break; |
| 3040 | path->slots[0] + 1 != pending_del_slot) { | 3086 | |
| 3041 | struct btrfs_key debug; | 3087 | if (path->slots[0] == 0 || |
| 3042 | del_pending: | 3088 | path->slots[0] != pending_del_slot) { |
| 3043 | btrfs_item_key_to_cpu(path->nodes[0], &debug, | 3089 | if (root->ref_cows) { |
| 3044 | pending_del_slot); | 3090 | err = -EAGAIN; |
| 3045 | ret = btrfs_del_items(trans, root, path, | 3091 | goto out; |
| 3046 | pending_del_slot, | 3092 | } |
| 3047 | pending_del_nr); | 3093 | if (pending_del_nr) { |
| 3048 | BUG_ON(ret); | 3094 | ret = btrfs_del_items(trans, root, path, |
| 3049 | pending_del_nr = 0; | 3095 | pending_del_slot, |
| 3096 | pending_del_nr); | ||
| 3097 | BUG_ON(ret); | ||
| 3098 | pending_del_nr = 0; | ||
| 3099 | } | ||
| 3050 | btrfs_release_path(root, path); | 3100 | btrfs_release_path(root, path); |
| 3051 | if (found_type == BTRFS_INODE_ITEM_KEY) | ||
| 3052 | break; | ||
| 3053 | goto search_again; | 3101 | goto search_again; |
| 3102 | } else { | ||
| 3103 | path->slots[0]--; | ||
| 3054 | } | 3104 | } |
| 3055 | } | 3105 | } |
| 3056 | ret = 0; | 3106 | out: |
| 3057 | error: | ||
| 3058 | if (pending_del_nr) { | 3107 | if (pending_del_nr) { |
| 3059 | ret = btrfs_del_items(trans, root, path, pending_del_slot, | 3108 | ret = btrfs_del_items(trans, root, path, pending_del_slot, |
| 3060 | pending_del_nr); | 3109 | pending_del_nr); |
| 3061 | } | 3110 | } |
| 3062 | btrfs_free_path(path); | 3111 | btrfs_free_path(path); |
| 3063 | return ret; | 3112 | return err; |
| 3064 | } | 3113 | } |
| 3065 | 3114 | ||
| 3066 | /* | 3115 | /* |
| @@ -3180,10 +3229,6 @@ int btrfs_cont_expand(struct inode *inode, loff_t size) | |||
| 3180 | if (size <= hole_start) | 3229 | if (size <= hole_start) |
| 3181 | return 0; | 3230 | return 0; |
| 3182 | 3231 | ||
| 3183 | err = btrfs_truncate_page(inode->i_mapping, inode->i_size); | ||
| 3184 | if (err) | ||
| 3185 | return err; | ||
| 3186 | |||
| 3187 | while (1) { | 3232 | while (1) { |
| 3188 | struct btrfs_ordered_extent *ordered; | 3233 | struct btrfs_ordered_extent *ordered; |
| 3189 | btrfs_wait_ordered_range(inode, hole_start, | 3234 | btrfs_wait_ordered_range(inode, hole_start, |
| @@ -3196,9 +3241,6 @@ int btrfs_cont_expand(struct inode *inode, loff_t size) | |||
| 3196 | btrfs_put_ordered_extent(ordered); | 3241 | btrfs_put_ordered_extent(ordered); |
| 3197 | } | 3242 | } |
| 3198 | 3243 | ||
| 3199 | trans = btrfs_start_transaction(root, 1); | ||
| 3200 | btrfs_set_trans_block_group(trans, inode); | ||
| 3201 | |||
| 3202 | cur_offset = hole_start; | 3244 | cur_offset = hole_start; |
| 3203 | while (1) { | 3245 | while (1) { |
| 3204 | em = btrfs_get_extent(inode, NULL, 0, cur_offset, | 3246 | em = btrfs_get_extent(inode, NULL, 0, cur_offset, |
| @@ -3206,40 +3248,120 @@ int btrfs_cont_expand(struct inode *inode, loff_t size) | |||
| 3206 | BUG_ON(IS_ERR(em) || !em); | 3248 | BUG_ON(IS_ERR(em) || !em); |
| 3207 | last_byte = min(extent_map_end(em), block_end); | 3249 | last_byte = min(extent_map_end(em), block_end); |
| 3208 | last_byte = (last_byte + mask) & ~mask; | 3250 | last_byte = (last_byte + mask) & ~mask; |
| 3209 | if (test_bit(EXTENT_FLAG_VACANCY, &em->flags)) { | 3251 | if (!test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) { |
| 3210 | u64 hint_byte = 0; | 3252 | u64 hint_byte = 0; |
| 3211 | hole_size = last_byte - cur_offset; | 3253 | hole_size = last_byte - cur_offset; |
| 3212 | err = btrfs_drop_extents(trans, root, inode, | ||
| 3213 | cur_offset, | ||
| 3214 | cur_offset + hole_size, | ||
| 3215 | block_end, | ||
| 3216 | cur_offset, &hint_byte, 1); | ||
| 3217 | if (err) | ||
| 3218 | break; | ||
| 3219 | 3254 | ||
| 3220 | err = btrfs_reserve_metadata_space(root, 1); | 3255 | err = btrfs_reserve_metadata_space(root, 2); |
| 3221 | if (err) | 3256 | if (err) |
| 3222 | break; | 3257 | break; |
| 3223 | 3258 | ||
| 3259 | trans = btrfs_start_transaction(root, 1); | ||
| 3260 | btrfs_set_trans_block_group(trans, inode); | ||
| 3261 | |||
| 3262 | err = btrfs_drop_extents(trans, inode, cur_offset, | ||
| 3263 | cur_offset + hole_size, | ||
| 3264 | &hint_byte, 1); | ||
| 3265 | BUG_ON(err); | ||
| 3266 | |||
| 3224 | err = btrfs_insert_file_extent(trans, root, | 3267 | err = btrfs_insert_file_extent(trans, root, |
| 3225 | inode->i_ino, cur_offset, 0, | 3268 | inode->i_ino, cur_offset, 0, |
| 3226 | 0, hole_size, 0, hole_size, | 3269 | 0, hole_size, 0, hole_size, |
| 3227 | 0, 0, 0); | 3270 | 0, 0, 0); |
| 3271 | BUG_ON(err); | ||
| 3272 | |||
| 3228 | btrfs_drop_extent_cache(inode, hole_start, | 3273 | btrfs_drop_extent_cache(inode, hole_start, |
| 3229 | last_byte - 1, 0); | 3274 | last_byte - 1, 0); |
| 3230 | btrfs_unreserve_metadata_space(root, 1); | 3275 | |
| 3276 | btrfs_end_transaction(trans, root); | ||
| 3277 | btrfs_unreserve_metadata_space(root, 2); | ||
| 3231 | } | 3278 | } |
| 3232 | free_extent_map(em); | 3279 | free_extent_map(em); |
| 3233 | cur_offset = last_byte; | 3280 | cur_offset = last_byte; |
| 3234 | if (err || cur_offset >= block_end) | 3281 | if (cur_offset >= block_end) |
| 3235 | break; | 3282 | break; |
| 3236 | } | 3283 | } |
| 3237 | 3284 | ||
| 3238 | btrfs_end_transaction(trans, root); | ||
| 3239 | unlock_extent(io_tree, hole_start, block_end - 1, GFP_NOFS); | 3285 | unlock_extent(io_tree, hole_start, block_end - 1, GFP_NOFS); |
| 3240 | return err; | 3286 | return err; |
| 3241 | } | 3287 | } |
| 3242 | 3288 | ||
| 3289 | static int btrfs_setattr_size(struct inode *inode, struct iattr *attr) | ||
| 3290 | { | ||
| 3291 | struct btrfs_root *root = BTRFS_I(inode)->root; | ||
| 3292 | struct btrfs_trans_handle *trans; | ||
| 3293 | unsigned long nr; | ||
| 3294 | int ret; | ||
| 3295 | |||
| 3296 | if (attr->ia_size == inode->i_size) | ||
| 3297 | return 0; | ||
| 3298 | |||
| 3299 | if (attr->ia_size > inode->i_size) { | ||
| 3300 | unsigned long limit; | ||
| 3301 | limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur; | ||
| 3302 | if (attr->ia_size > inode->i_sb->s_maxbytes) | ||
| 3303 | return -EFBIG; | ||
| 3304 | if (limit != RLIM_INFINITY && attr->ia_size > limit) { | ||
| 3305 | send_sig(SIGXFSZ, current, 0); | ||
| 3306 | return -EFBIG; | ||
| 3307 | } | ||
| 3308 | } | ||
| 3309 | |||
| 3310 | ret = btrfs_reserve_metadata_space(root, 1); | ||
| 3311 | if (ret) | ||
| 3312 | return ret; | ||
| 3313 | |||
| 3314 | trans = btrfs_start_transaction(root, 1); | ||
| 3315 | btrfs_set_trans_block_group(trans, inode); | ||
| 3316 | |||
| 3317 | ret = btrfs_orphan_add(trans, inode); | ||
| 3318 | BUG_ON(ret); | ||
| 3319 | |||
| 3320 | nr = trans->blocks_used; | ||
| 3321 | btrfs_end_transaction(trans, root); | ||
| 3322 | btrfs_unreserve_metadata_space(root, 1); | ||
| 3323 | btrfs_btree_balance_dirty(root, nr); | ||
| 3324 | |||
| 3325 | if (attr->ia_size > inode->i_size) { | ||
| 3326 | ret = btrfs_cont_expand(inode, attr->ia_size); | ||
| 3327 | if (ret) { | ||
| 3328 | btrfs_truncate(inode); | ||
| 3329 | return ret; | ||
| 3330 | } | ||
| 3331 | |||
| 3332 | i_size_write(inode, attr->ia_size); | ||
| 3333 | btrfs_ordered_update_i_size(inode, inode->i_size, NULL); | ||
| 3334 | |||
| 3335 | trans = btrfs_start_transaction(root, 1); | ||
| 3336 | btrfs_set_trans_block_group(trans, inode); | ||
| 3337 | |||
| 3338 | ret = btrfs_update_inode(trans, root, inode); | ||
| 3339 | BUG_ON(ret); | ||
| 3340 | if (inode->i_nlink > 0) { | ||
| 3341 | ret = btrfs_orphan_del(trans, inode); | ||
| 3342 | BUG_ON(ret); | ||
| 3343 | } | ||
| 3344 | nr = trans->blocks_used; | ||
| 3345 | btrfs_end_transaction(trans, root); | ||
| 3346 | btrfs_btree_balance_dirty(root, nr); | ||
| 3347 | return 0; | ||
| 3348 | } | ||
| 3349 | |||
| 3350 | /* | ||
| 3351 | * We're truncating a file that used to have good data down to | ||
| 3352 | * zero. Make sure it gets into the ordered flush list so that | ||
| 3353 | * any new writes get down to disk quickly. | ||
| 3354 | */ | ||
| 3355 | if (attr->ia_size == 0) | ||
| 3356 | BTRFS_I(inode)->ordered_data_close = 1; | ||
| 3357 | |||
| 3358 | /* we don't support swapfiles, so vmtruncate shouldn't fail */ | ||
| 3359 | ret = vmtruncate(inode, attr->ia_size); | ||
| 3360 | BUG_ON(ret); | ||
| 3361 | |||
| 3362 | return 0; | ||
| 3363 | } | ||
| 3364 | |||
| 3243 | static int btrfs_setattr(struct dentry *dentry, struct iattr *attr) | 3365 | static int btrfs_setattr(struct dentry *dentry, struct iattr *attr) |
| 3244 | { | 3366 | { |
| 3245 | struct inode *inode = dentry->d_inode; | 3367 | struct inode *inode = dentry->d_inode; |
| @@ -3250,23 +3372,14 @@ static int btrfs_setattr(struct dentry *dentry, struct iattr *attr) | |||
| 3250 | return err; | 3372 | return err; |
| 3251 | 3373 | ||
| 3252 | if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) { | 3374 | if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) { |
| 3253 | if (attr->ia_size > inode->i_size) { | 3375 | err = btrfs_setattr_size(inode, attr); |
| 3254 | err = btrfs_cont_expand(inode, attr->ia_size); | 3376 | if (err) |
| 3255 | if (err) | 3377 | return err; |
| 3256 | return err; | ||
| 3257 | } else if (inode->i_size > 0 && | ||
| 3258 | attr->ia_size == 0) { | ||
| 3259 | |||
| 3260 | /* we're truncating a file that used to have good | ||
| 3261 | * data down to zero. Make sure it gets into | ||
| 3262 | * the ordered flush list so that any new writes | ||
| 3263 | * get down to disk quickly. | ||
| 3264 | */ | ||
| 3265 | BTRFS_I(inode)->ordered_data_close = 1; | ||
| 3266 | } | ||
| 3267 | } | 3378 | } |
| 3379 | attr->ia_valid &= ~ATTR_SIZE; | ||
| 3268 | 3380 | ||
| 3269 | err = inode_setattr(inode, attr); | 3381 | if (attr->ia_valid) |
| 3382 | err = inode_setattr(inode, attr); | ||
| 3270 | 3383 | ||
| 3271 | if (!err && ((attr->ia_valid & ATTR_MODE))) | 3384 | if (!err && ((attr->ia_valid & ATTR_MODE))) |
| 3272 | err = btrfs_acl_chmod(inode); | 3385 | err = btrfs_acl_chmod(inode); |
| @@ -3287,36 +3400,43 @@ void btrfs_delete_inode(struct inode *inode) | |||
| 3287 | } | 3400 | } |
| 3288 | btrfs_wait_ordered_range(inode, 0, (u64)-1); | 3401 | btrfs_wait_ordered_range(inode, 0, (u64)-1); |
| 3289 | 3402 | ||
| 3403 | if (root->fs_info->log_root_recovering) { | ||
| 3404 | BUG_ON(!list_empty(&BTRFS_I(inode)->i_orphan)); | ||
| 3405 | goto no_delete; | ||
| 3406 | } | ||
| 3407 | |||
| 3290 | if (inode->i_nlink > 0) { | 3408 | if (inode->i_nlink > 0) { |
| 3291 | BUG_ON(btrfs_root_refs(&root->root_item) != 0); | 3409 | BUG_ON(btrfs_root_refs(&root->root_item) != 0); |
| 3292 | goto no_delete; | 3410 | goto no_delete; |
| 3293 | } | 3411 | } |
| 3294 | 3412 | ||
| 3295 | btrfs_i_size_write(inode, 0); | 3413 | btrfs_i_size_write(inode, 0); |
| 3296 | trans = btrfs_join_transaction(root, 1); | ||
| 3297 | 3414 | ||
| 3298 | btrfs_set_trans_block_group(trans, inode); | 3415 | while (1) { |
| 3299 | ret = btrfs_truncate_inode_items(trans, root, inode, inode->i_size, 0); | 3416 | trans = btrfs_start_transaction(root, 1); |
| 3300 | if (ret) { | 3417 | btrfs_set_trans_block_group(trans, inode); |
| 3301 | btrfs_orphan_del(NULL, inode); | 3418 | ret = btrfs_truncate_inode_items(trans, root, inode, 0, 0); |
| 3302 | goto no_delete_lock; | ||
| 3303 | } | ||
| 3304 | 3419 | ||
| 3305 | btrfs_orphan_del(trans, inode); | 3420 | if (ret != -EAGAIN) |
| 3421 | break; | ||
| 3306 | 3422 | ||
| 3307 | nr = trans->blocks_used; | 3423 | nr = trans->blocks_used; |
| 3308 | clear_inode(inode); | 3424 | btrfs_end_transaction(trans, root); |
| 3425 | trans = NULL; | ||
| 3426 | btrfs_btree_balance_dirty(root, nr); | ||
| 3427 | } | ||
| 3309 | 3428 | ||
| 3310 | btrfs_end_transaction(trans, root); | 3429 | if (ret == 0) { |
| 3311 | btrfs_btree_balance_dirty(root, nr); | 3430 | ret = btrfs_orphan_del(trans, inode); |
| 3312 | return; | 3431 | BUG_ON(ret); |
| 3432 | } | ||
| 3313 | 3433 | ||
| 3314 | no_delete_lock: | ||
| 3315 | nr = trans->blocks_used; | 3434 | nr = trans->blocks_used; |
| 3316 | btrfs_end_transaction(trans, root); | 3435 | btrfs_end_transaction(trans, root); |
| 3317 | btrfs_btree_balance_dirty(root, nr); | 3436 | btrfs_btree_balance_dirty(root, nr); |
| 3318 | no_delete: | 3437 | no_delete: |
| 3319 | clear_inode(inode); | 3438 | clear_inode(inode); |
| 3439 | return; | ||
| 3320 | } | 3440 | } |
| 3321 | 3441 | ||
| 3322 | /* | 3442 | /* |
| @@ -3569,7 +3689,6 @@ static noinline void init_btrfs_i(struct inode *inode) | |||
| 3569 | INIT_LIST_HEAD(&BTRFS_I(inode)->ordered_operations); | 3689 | INIT_LIST_HEAD(&BTRFS_I(inode)->ordered_operations); |
| 3570 | RB_CLEAR_NODE(&BTRFS_I(inode)->rb_node); | 3690 | RB_CLEAR_NODE(&BTRFS_I(inode)->rb_node); |
| 3571 | btrfs_ordered_inode_tree_init(&BTRFS_I(inode)->ordered_tree); | 3691 | btrfs_ordered_inode_tree_init(&BTRFS_I(inode)->ordered_tree); |
| 3572 | mutex_init(&BTRFS_I(inode)->extent_mutex); | ||
| 3573 | mutex_init(&BTRFS_I(inode)->log_mutex); | 3692 | mutex_init(&BTRFS_I(inode)->log_mutex); |
| 3574 | } | 3693 | } |
| 3575 | 3694 | ||
| @@ -3695,6 +3814,13 @@ struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry) | |||
| 3695 | } | 3814 | } |
| 3696 | srcu_read_unlock(&root->fs_info->subvol_srcu, index); | 3815 | srcu_read_unlock(&root->fs_info->subvol_srcu, index); |
| 3697 | 3816 | ||
| 3817 | if (root != sub_root) { | ||
| 3818 | down_read(&root->fs_info->cleanup_work_sem); | ||
| 3819 | if (!(inode->i_sb->s_flags & MS_RDONLY)) | ||
| 3820 | btrfs_orphan_cleanup(sub_root); | ||
| 3821 | up_read(&root->fs_info->cleanup_work_sem); | ||
| 3822 | } | ||
| 3823 | |||
| 3698 | return inode; | 3824 | return inode; |
| 3699 | } | 3825 | } |
| 3700 | 3826 | ||
| @@ -4219,7 +4345,7 @@ static int btrfs_mknod(struct inode *dir, struct dentry *dentry, | |||
| 4219 | if (IS_ERR(inode)) | 4345 | if (IS_ERR(inode)) |
| 4220 | goto out_unlock; | 4346 | goto out_unlock; |
| 4221 | 4347 | ||
| 4222 | err = btrfs_init_inode_security(inode, dir); | 4348 | err = btrfs_init_inode_security(trans, inode, dir); |
| 4223 | if (err) { | 4349 | if (err) { |
| 4224 | drop_inode = 1; | 4350 | drop_inode = 1; |
| 4225 | goto out_unlock; | 4351 | goto out_unlock; |
| @@ -4290,7 +4416,7 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry, | |||
| 4290 | if (IS_ERR(inode)) | 4416 | if (IS_ERR(inode)) |
| 4291 | goto out_unlock; | 4417 | goto out_unlock; |
| 4292 | 4418 | ||
| 4293 | err = btrfs_init_inode_security(inode, dir); | 4419 | err = btrfs_init_inode_security(trans, inode, dir); |
| 4294 | if (err) { | 4420 | if (err) { |
| 4295 | drop_inode = 1; | 4421 | drop_inode = 1; |
| 4296 | goto out_unlock; | 4422 | goto out_unlock; |
| @@ -4336,6 +4462,10 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir, | |||
| 4336 | if (inode->i_nlink == 0) | 4462 | if (inode->i_nlink == 0) |
| 4337 | return -ENOENT; | 4463 | return -ENOENT; |
| 4338 | 4464 | ||
| 4465 | /* do not allow sys_link's with other subvols of the same device */ | ||
| 4466 | if (root->objectid != BTRFS_I(inode)->root->objectid) | ||
| 4467 | return -EPERM; | ||
| 4468 | |||
| 4339 | /* | 4469 | /* |
| 4340 | * 1 item for inode ref | 4470 | * 1 item for inode ref |
| 4341 | * 2 items for dir items | 4471 | * 2 items for dir items |
| @@ -4423,7 +4553,7 @@ static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, int mode) | |||
| 4423 | 4553 | ||
| 4424 | drop_on_err = 1; | 4554 | drop_on_err = 1; |
| 4425 | 4555 | ||
| 4426 | err = btrfs_init_inode_security(inode, dir); | 4556 | err = btrfs_init_inode_security(trans, inode, dir); |
| 4427 | if (err) | 4557 | if (err) |
| 4428 | goto out_fail; | 4558 | goto out_fail; |
| 4429 | 4559 | ||
| @@ -5074,17 +5204,20 @@ static void btrfs_truncate(struct inode *inode) | |||
| 5074 | unsigned long nr; | 5204 | unsigned long nr; |
| 5075 | u64 mask = root->sectorsize - 1; | 5205 | u64 mask = root->sectorsize - 1; |
| 5076 | 5206 | ||
| 5077 | if (!S_ISREG(inode->i_mode)) | 5207 | if (!S_ISREG(inode->i_mode)) { |
| 5078 | return; | 5208 | WARN_ON(1); |
| 5079 | if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) | ||
| 5080 | return; | 5209 | return; |
| 5210 | } | ||
| 5081 | 5211 | ||
| 5082 | ret = btrfs_truncate_page(inode->i_mapping, inode->i_size); | 5212 | ret = btrfs_truncate_page(inode->i_mapping, inode->i_size); |
| 5083 | if (ret) | 5213 | if (ret) |
| 5084 | return; | 5214 | return; |
| 5215 | |||
| 5085 | btrfs_wait_ordered_range(inode, inode->i_size & (~mask), (u64)-1); | 5216 | btrfs_wait_ordered_range(inode, inode->i_size & (~mask), (u64)-1); |
| 5217 | btrfs_ordered_update_i_size(inode, inode->i_size, NULL); | ||
| 5086 | 5218 | ||
| 5087 | trans = btrfs_start_transaction(root, 1); | 5219 | trans = btrfs_start_transaction(root, 1); |
| 5220 | btrfs_set_trans_block_group(trans, inode); | ||
| 5088 | 5221 | ||
| 5089 | /* | 5222 | /* |
| 5090 | * setattr is responsible for setting the ordered_data_close flag, | 5223 | * setattr is responsible for setting the ordered_data_close flag, |
| @@ -5106,21 +5239,32 @@ static void btrfs_truncate(struct inode *inode) | |||
| 5106 | if (inode->i_size == 0 && BTRFS_I(inode)->ordered_data_close) | 5239 | if (inode->i_size == 0 && BTRFS_I(inode)->ordered_data_close) |
| 5107 | btrfs_add_ordered_operation(trans, root, inode); | 5240 | btrfs_add_ordered_operation(trans, root, inode); |
| 5108 | 5241 | ||
| 5109 | btrfs_set_trans_block_group(trans, inode); | 5242 | while (1) { |
| 5110 | btrfs_i_size_write(inode, inode->i_size); | 5243 | ret = btrfs_truncate_inode_items(trans, root, inode, |
| 5244 | inode->i_size, | ||
| 5245 | BTRFS_EXTENT_DATA_KEY); | ||
| 5246 | if (ret != -EAGAIN) | ||
| 5247 | break; | ||
| 5111 | 5248 | ||
| 5112 | ret = btrfs_orphan_add(trans, inode); | 5249 | ret = btrfs_update_inode(trans, root, inode); |
| 5113 | if (ret) | 5250 | BUG_ON(ret); |
| 5114 | goto out; | 5251 | |
| 5115 | /* FIXME, add redo link to tree so we don't leak on crash */ | 5252 | nr = trans->blocks_used; |
| 5116 | ret = btrfs_truncate_inode_items(trans, root, inode, inode->i_size, | 5253 | btrfs_end_transaction(trans, root); |
| 5117 | BTRFS_EXTENT_DATA_KEY); | 5254 | btrfs_btree_balance_dirty(root, nr); |
| 5118 | btrfs_update_inode(trans, root, inode); | 5255 | |
| 5256 | trans = btrfs_start_transaction(root, 1); | ||
| 5257 | btrfs_set_trans_block_group(trans, inode); | ||
| 5258 | } | ||
| 5119 | 5259 | ||
| 5120 | ret = btrfs_orphan_del(trans, inode); | 5260 | if (ret == 0 && inode->i_nlink > 0) { |
| 5261 | ret = btrfs_orphan_del(trans, inode); | ||
| 5262 | BUG_ON(ret); | ||
| 5263 | } | ||
| 5264 | |||
| 5265 | ret = btrfs_update_inode(trans, root, inode); | ||
| 5121 | BUG_ON(ret); | 5266 | BUG_ON(ret); |
| 5122 | 5267 | ||
| 5123 | out: | ||
| 5124 | nr = trans->blocks_used; | 5268 | nr = trans->blocks_used; |
| 5125 | ret = btrfs_end_transaction_throttle(trans, root); | 5269 | ret = btrfs_end_transaction_throttle(trans, root); |
| 5126 | BUG_ON(ret); | 5270 | BUG_ON(ret); |
| @@ -5217,9 +5361,9 @@ void btrfs_destroy_inode(struct inode *inode) | |||
| 5217 | 5361 | ||
| 5218 | spin_lock(&root->list_lock); | 5362 | spin_lock(&root->list_lock); |
| 5219 | if (!list_empty(&BTRFS_I(inode)->i_orphan)) { | 5363 | if (!list_empty(&BTRFS_I(inode)->i_orphan)) { |
| 5220 | printk(KERN_ERR "BTRFS: inode %lu: inode still on the orphan" | 5364 | printk(KERN_INFO "BTRFS: inode %lu still on the orphan list\n", |
| 5221 | " list\n", inode->i_ino); | 5365 | inode->i_ino); |
| 5222 | dump_stack(); | 5366 | list_del_init(&BTRFS_I(inode)->i_orphan); |
| 5223 | } | 5367 | } |
| 5224 | spin_unlock(&root->list_lock); | 5368 | spin_unlock(&root->list_lock); |
| 5225 | 5369 | ||
| @@ -5476,7 +5620,7 @@ out_fail: | |||
| 5476 | * some fairly slow code that needs optimization. This walks the list | 5620 | * some fairly slow code that needs optimization. This walks the list |
| 5477 | * of all the inodes with pending delalloc and forces them to disk. | 5621 | * of all the inodes with pending delalloc and forces them to disk. |
| 5478 | */ | 5622 | */ |
| 5479 | int btrfs_start_delalloc_inodes(struct btrfs_root *root) | 5623 | int btrfs_start_delalloc_inodes(struct btrfs_root *root, int delay_iput) |
| 5480 | { | 5624 | { |
| 5481 | struct list_head *head = &root->fs_info->delalloc_inodes; | 5625 | struct list_head *head = &root->fs_info->delalloc_inodes; |
| 5482 | struct btrfs_inode *binode; | 5626 | struct btrfs_inode *binode; |
| @@ -5495,7 +5639,10 @@ int btrfs_start_delalloc_inodes(struct btrfs_root *root) | |||
| 5495 | spin_unlock(&root->fs_info->delalloc_lock); | 5639 | spin_unlock(&root->fs_info->delalloc_lock); |
| 5496 | if (inode) { | 5640 | if (inode) { |
| 5497 | filemap_flush(inode->i_mapping); | 5641 | filemap_flush(inode->i_mapping); |
| 5498 | iput(inode); | 5642 | if (delay_iput) |
| 5643 | btrfs_add_delayed_iput(inode); | ||
| 5644 | else | ||
| 5645 | iput(inode); | ||
| 5499 | } | 5646 | } |
| 5500 | cond_resched(); | 5647 | cond_resched(); |
| 5501 | spin_lock(&root->fs_info->delalloc_lock); | 5648 | spin_lock(&root->fs_info->delalloc_lock); |
| @@ -5569,7 +5716,7 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry, | |||
| 5569 | if (IS_ERR(inode)) | 5716 | if (IS_ERR(inode)) |
| 5570 | goto out_unlock; | 5717 | goto out_unlock; |
| 5571 | 5718 | ||
| 5572 | err = btrfs_init_inode_security(inode, dir); | 5719 | err = btrfs_init_inode_security(trans, inode, dir); |
| 5573 | if (err) { | 5720 | if (err) { |
| 5574 | drop_inode = 1; | 5721 | drop_inode = 1; |
| 5575 | goto out_unlock; | 5722 | goto out_unlock; |
| @@ -5641,10 +5788,10 @@ out_fail: | |||
| 5641 | return err; | 5788 | return err; |
| 5642 | } | 5789 | } |
| 5643 | 5790 | ||
| 5644 | static int prealloc_file_range(struct btrfs_trans_handle *trans, | 5791 | static int prealloc_file_range(struct inode *inode, u64 start, u64 end, |
| 5645 | struct inode *inode, u64 start, u64 end, | 5792 | u64 alloc_hint, int mode) |
| 5646 | u64 locked_end, u64 alloc_hint, int mode) | ||
| 5647 | { | 5793 | { |
| 5794 | struct btrfs_trans_handle *trans; | ||
| 5648 | struct btrfs_root *root = BTRFS_I(inode)->root; | 5795 | struct btrfs_root *root = BTRFS_I(inode)->root; |
| 5649 | struct btrfs_key ins; | 5796 | struct btrfs_key ins; |
| 5650 | u64 alloc_size; | 5797 | u64 alloc_size; |
| @@ -5655,43 +5802,56 @@ static int prealloc_file_range(struct btrfs_trans_handle *trans, | |||
| 5655 | while (num_bytes > 0) { | 5802 | while (num_bytes > 0) { |
| 5656 | alloc_size = min(num_bytes, root->fs_info->max_extent); | 5803 | alloc_size = min(num_bytes, root->fs_info->max_extent); |
| 5657 | 5804 | ||
| 5658 | ret = btrfs_reserve_metadata_space(root, 1); | 5805 | trans = btrfs_start_transaction(root, 1); |
| 5659 | if (ret) | ||
| 5660 | goto out; | ||
| 5661 | 5806 | ||
| 5662 | ret = btrfs_reserve_extent(trans, root, alloc_size, | 5807 | ret = btrfs_reserve_extent(trans, root, alloc_size, |
| 5663 | root->sectorsize, 0, alloc_hint, | 5808 | root->sectorsize, 0, alloc_hint, |
| 5664 | (u64)-1, &ins, 1); | 5809 | (u64)-1, &ins, 1); |
| 5665 | if (ret) { | 5810 | if (ret) { |
| 5666 | WARN_ON(1); | 5811 | WARN_ON(1); |
| 5667 | goto out; | 5812 | goto stop_trans; |
| 5813 | } | ||
| 5814 | |||
| 5815 | ret = btrfs_reserve_metadata_space(root, 3); | ||
| 5816 | if (ret) { | ||
| 5817 | btrfs_free_reserved_extent(root, ins.objectid, | ||
| 5818 | ins.offset); | ||
| 5819 | goto stop_trans; | ||
| 5668 | } | 5820 | } |
| 5821 | |||
| 5669 | ret = insert_reserved_file_extent(trans, inode, | 5822 | ret = insert_reserved_file_extent(trans, inode, |
| 5670 | cur_offset, ins.objectid, | 5823 | cur_offset, ins.objectid, |
| 5671 | ins.offset, ins.offset, | 5824 | ins.offset, ins.offset, |
| 5672 | ins.offset, locked_end, | 5825 | ins.offset, 0, 0, 0, |
| 5673 | 0, 0, 0, | ||
| 5674 | BTRFS_FILE_EXTENT_PREALLOC); | 5826 | BTRFS_FILE_EXTENT_PREALLOC); |
| 5675 | BUG_ON(ret); | 5827 | BUG_ON(ret); |
| 5676 | btrfs_drop_extent_cache(inode, cur_offset, | 5828 | btrfs_drop_extent_cache(inode, cur_offset, |
| 5677 | cur_offset + ins.offset -1, 0); | 5829 | cur_offset + ins.offset -1, 0); |
| 5830 | |||
| 5678 | num_bytes -= ins.offset; | 5831 | num_bytes -= ins.offset; |
| 5679 | cur_offset += ins.offset; | 5832 | cur_offset += ins.offset; |
| 5680 | alloc_hint = ins.objectid + ins.offset; | 5833 | alloc_hint = ins.objectid + ins.offset; |
| 5681 | btrfs_unreserve_metadata_space(root, 1); | 5834 | |
| 5682 | } | ||
| 5683 | out: | ||
| 5684 | if (cur_offset > start) { | ||
| 5685 | inode->i_ctime = CURRENT_TIME; | 5835 | inode->i_ctime = CURRENT_TIME; |
| 5686 | BTRFS_I(inode)->flags |= BTRFS_INODE_PREALLOC; | 5836 | BTRFS_I(inode)->flags |= BTRFS_INODE_PREALLOC; |
| 5687 | if (!(mode & FALLOC_FL_KEEP_SIZE) && | 5837 | if (!(mode & FALLOC_FL_KEEP_SIZE) && |
| 5688 | cur_offset > i_size_read(inode)) | 5838 | cur_offset > inode->i_size) { |
| 5689 | btrfs_i_size_write(inode, cur_offset); | 5839 | i_size_write(inode, cur_offset); |
| 5840 | btrfs_ordered_update_i_size(inode, cur_offset, NULL); | ||
| 5841 | } | ||
| 5842 | |||
| 5690 | ret = btrfs_update_inode(trans, root, inode); | 5843 | ret = btrfs_update_inode(trans, root, inode); |
| 5691 | BUG_ON(ret); | 5844 | BUG_ON(ret); |
| 5845 | |||
| 5846 | btrfs_end_transaction(trans, root); | ||
| 5847 | btrfs_unreserve_metadata_space(root, 3); | ||
| 5692 | } | 5848 | } |
| 5849 | return ret; | ||
| 5693 | 5850 | ||
| 5851 | stop_trans: | ||
| 5852 | btrfs_end_transaction(trans, root); | ||
| 5694 | return ret; | 5853 | return ret; |
| 5854 | |||
| 5695 | } | 5855 | } |
| 5696 | 5856 | ||
| 5697 | static long btrfs_fallocate(struct inode *inode, int mode, | 5857 | static long btrfs_fallocate(struct inode *inode, int mode, |
| @@ -5705,8 +5865,6 @@ static long btrfs_fallocate(struct inode *inode, int mode, | |||
| 5705 | u64 locked_end; | 5865 | u64 locked_end; |
| 5706 | u64 mask = BTRFS_I(inode)->root->sectorsize - 1; | 5866 | u64 mask = BTRFS_I(inode)->root->sectorsize - 1; |
| 5707 | struct extent_map *em; | 5867 | struct extent_map *em; |
| 5708 | struct btrfs_trans_handle *trans; | ||
| 5709 | struct btrfs_root *root; | ||
| 5710 | int ret; | 5868 | int ret; |
| 5711 | 5869 | ||
| 5712 | alloc_start = offset & ~mask; | 5870 | alloc_start = offset & ~mask; |
| @@ -5725,9 +5883,7 @@ static long btrfs_fallocate(struct inode *inode, int mode, | |||
| 5725 | goto out; | 5883 | goto out; |
| 5726 | } | 5884 | } |
| 5727 | 5885 | ||
| 5728 | root = BTRFS_I(inode)->root; | 5886 | ret = btrfs_check_data_free_space(BTRFS_I(inode)->root, inode, |
| 5729 | |||
| 5730 | ret = btrfs_check_data_free_space(root, inode, | ||
| 5731 | alloc_end - alloc_start); | 5887 | alloc_end - alloc_start); |
| 5732 | if (ret) | 5888 | if (ret) |
| 5733 | goto out; | 5889 | goto out; |
| @@ -5736,12 +5892,6 @@ static long btrfs_fallocate(struct inode *inode, int mode, | |||
| 5736 | while (1) { | 5892 | while (1) { |
| 5737 | struct btrfs_ordered_extent *ordered; | 5893 | struct btrfs_ordered_extent *ordered; |
| 5738 | 5894 | ||
| 5739 | trans = btrfs_start_transaction(BTRFS_I(inode)->root, 1); | ||
| 5740 | if (!trans) { | ||
| 5741 | ret = -EIO; | ||
| 5742 | goto out_free; | ||
| 5743 | } | ||
| 5744 | |||
| 5745 | /* the extent lock is ordered inside the running | 5895 | /* the extent lock is ordered inside the running |
| 5746 | * transaction | 5896 | * transaction |
| 5747 | */ | 5897 | */ |
| @@ -5755,8 +5905,6 @@ static long btrfs_fallocate(struct inode *inode, int mode, | |||
| 5755 | btrfs_put_ordered_extent(ordered); | 5905 | btrfs_put_ordered_extent(ordered); |
| 5756 | unlock_extent(&BTRFS_I(inode)->io_tree, | 5906 | unlock_extent(&BTRFS_I(inode)->io_tree, |
| 5757 | alloc_start, locked_end, GFP_NOFS); | 5907 | alloc_start, locked_end, GFP_NOFS); |
| 5758 | btrfs_end_transaction(trans, BTRFS_I(inode)->root); | ||
| 5759 | |||
| 5760 | /* | 5908 | /* |
| 5761 | * we can't wait on the range with the transaction | 5909 | * we can't wait on the range with the transaction |
| 5762 | * running or with the extent lock held | 5910 | * running or with the extent lock held |
| @@ -5777,10 +5925,12 @@ static long btrfs_fallocate(struct inode *inode, int mode, | |||
| 5777 | BUG_ON(IS_ERR(em) || !em); | 5925 | BUG_ON(IS_ERR(em) || !em); |
| 5778 | last_byte = min(extent_map_end(em), alloc_end); | 5926 | last_byte = min(extent_map_end(em), alloc_end); |
| 5779 | last_byte = (last_byte + mask) & ~mask; | 5927 | last_byte = (last_byte + mask) & ~mask; |
| 5780 | if (em->block_start == EXTENT_MAP_HOLE) { | 5928 | if (em->block_start == EXTENT_MAP_HOLE || |
| 5781 | ret = prealloc_file_range(trans, inode, cur_offset, | 5929 | (cur_offset >= inode->i_size && |
| 5782 | last_byte, locked_end + 1, | 5930 | !test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) { |
| 5783 | alloc_hint, mode); | 5931 | ret = prealloc_file_range(inode, |
| 5932 | cur_offset, last_byte, | ||
| 5933 | alloc_hint, mode); | ||
| 5784 | if (ret < 0) { | 5934 | if (ret < 0) { |
| 5785 | free_extent_map(em); | 5935 | free_extent_map(em); |
| 5786 | break; | 5936 | break; |
| @@ -5799,9 +5949,8 @@ static long btrfs_fallocate(struct inode *inode, int mode, | |||
| 5799 | unlock_extent(&BTRFS_I(inode)->io_tree, alloc_start, locked_end, | 5949 | unlock_extent(&BTRFS_I(inode)->io_tree, alloc_start, locked_end, |
| 5800 | GFP_NOFS); | 5950 | GFP_NOFS); |
| 5801 | 5951 | ||
| 5802 | btrfs_end_transaction(trans, BTRFS_I(inode)->root); | 5952 | btrfs_free_reserved_data_space(BTRFS_I(inode)->root, inode, |
| 5803 | out_free: | 5953 | alloc_end - alloc_start); |
| 5804 | btrfs_free_reserved_data_space(root, inode, alloc_end - alloc_start); | ||
| 5805 | out: | 5954 | out: |
| 5806 | mutex_unlock(&inode->i_mutex); | 5955 | mutex_unlock(&inode->i_mutex); |
| 5807 | return ret; | 5956 | return ret; |
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index cdbb054102b9..645a17927a8f 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c | |||
| @@ -237,7 +237,6 @@ static noinline int create_subvol(struct btrfs_root *root, | |||
| 237 | u64 objectid; | 237 | u64 objectid; |
| 238 | u64 new_dirid = BTRFS_FIRST_FREE_OBJECTID; | 238 | u64 new_dirid = BTRFS_FIRST_FREE_OBJECTID; |
| 239 | u64 index = 0; | 239 | u64 index = 0; |
| 240 | unsigned long nr = 1; | ||
| 241 | 240 | ||
| 242 | /* | 241 | /* |
| 243 | * 1 - inode item | 242 | * 1 - inode item |
| @@ -290,7 +289,7 @@ static noinline int create_subvol(struct btrfs_root *root, | |||
| 290 | btrfs_set_root_generation(&root_item, trans->transid); | 289 | btrfs_set_root_generation(&root_item, trans->transid); |
| 291 | btrfs_set_root_level(&root_item, 0); | 290 | btrfs_set_root_level(&root_item, 0); |
| 292 | btrfs_set_root_refs(&root_item, 1); | 291 | btrfs_set_root_refs(&root_item, 1); |
| 293 | btrfs_set_root_used(&root_item, 0); | 292 | btrfs_set_root_used(&root_item, leaf->len); |
| 294 | btrfs_set_root_last_snapshot(&root_item, 0); | 293 | btrfs_set_root_last_snapshot(&root_item, 0); |
| 295 | 294 | ||
| 296 | memset(&root_item.drop_progress, 0, sizeof(root_item.drop_progress)); | 295 | memset(&root_item.drop_progress, 0, sizeof(root_item.drop_progress)); |
| @@ -342,24 +341,21 @@ static noinline int create_subvol(struct btrfs_root *root, | |||
| 342 | 341 | ||
| 343 | d_instantiate(dentry, btrfs_lookup_dentry(dir, dentry)); | 342 | d_instantiate(dentry, btrfs_lookup_dentry(dir, dentry)); |
| 344 | fail: | 343 | fail: |
| 345 | nr = trans->blocks_used; | ||
| 346 | err = btrfs_commit_transaction(trans, root); | 344 | err = btrfs_commit_transaction(trans, root); |
| 347 | if (err && !ret) | 345 | if (err && !ret) |
| 348 | ret = err; | 346 | ret = err; |
| 349 | 347 | ||
| 350 | btrfs_unreserve_metadata_space(root, 6); | 348 | btrfs_unreserve_metadata_space(root, 6); |
| 351 | btrfs_btree_balance_dirty(root, nr); | ||
| 352 | return ret; | 349 | return ret; |
| 353 | } | 350 | } |
| 354 | 351 | ||
| 355 | static int create_snapshot(struct btrfs_root *root, struct dentry *dentry, | 352 | static int create_snapshot(struct btrfs_root *root, struct dentry *dentry, |
| 356 | char *name, int namelen) | 353 | char *name, int namelen) |
| 357 | { | 354 | { |
| 355 | struct inode *inode; | ||
| 358 | struct btrfs_pending_snapshot *pending_snapshot; | 356 | struct btrfs_pending_snapshot *pending_snapshot; |
| 359 | struct btrfs_trans_handle *trans; | 357 | struct btrfs_trans_handle *trans; |
| 360 | int ret = 0; | 358 | int ret; |
| 361 | int err; | ||
| 362 | unsigned long nr = 0; | ||
| 363 | 359 | ||
| 364 | if (!root->ref_cows) | 360 | if (!root->ref_cows) |
| 365 | return -EINVAL; | 361 | return -EINVAL; |
| @@ -372,20 +368,20 @@ static int create_snapshot(struct btrfs_root *root, struct dentry *dentry, | |||
| 372 | */ | 368 | */ |
| 373 | ret = btrfs_reserve_metadata_space(root, 6); | 369 | ret = btrfs_reserve_metadata_space(root, 6); |
| 374 | if (ret) | 370 | if (ret) |
| 375 | goto fail_unlock; | 371 | goto fail; |
| 376 | 372 | ||
| 377 | pending_snapshot = kzalloc(sizeof(*pending_snapshot), GFP_NOFS); | 373 | pending_snapshot = kzalloc(sizeof(*pending_snapshot), GFP_NOFS); |
| 378 | if (!pending_snapshot) { | 374 | if (!pending_snapshot) { |
| 379 | ret = -ENOMEM; | 375 | ret = -ENOMEM; |
| 380 | btrfs_unreserve_metadata_space(root, 6); | 376 | btrfs_unreserve_metadata_space(root, 6); |
| 381 | goto fail_unlock; | 377 | goto fail; |
| 382 | } | 378 | } |
| 383 | pending_snapshot->name = kmalloc(namelen + 1, GFP_NOFS); | 379 | pending_snapshot->name = kmalloc(namelen + 1, GFP_NOFS); |
| 384 | if (!pending_snapshot->name) { | 380 | if (!pending_snapshot->name) { |
| 385 | ret = -ENOMEM; | 381 | ret = -ENOMEM; |
| 386 | kfree(pending_snapshot); | 382 | kfree(pending_snapshot); |
| 387 | btrfs_unreserve_metadata_space(root, 6); | 383 | btrfs_unreserve_metadata_space(root, 6); |
| 388 | goto fail_unlock; | 384 | goto fail; |
| 389 | } | 385 | } |
| 390 | memcpy(pending_snapshot->name, name, namelen); | 386 | memcpy(pending_snapshot->name, name, namelen); |
| 391 | pending_snapshot->name[namelen] = '\0'; | 387 | pending_snapshot->name[namelen] = '\0'; |
| @@ -395,10 +391,19 @@ static int create_snapshot(struct btrfs_root *root, struct dentry *dentry, | |||
| 395 | pending_snapshot->root = root; | 391 | pending_snapshot->root = root; |
| 396 | list_add(&pending_snapshot->list, | 392 | list_add(&pending_snapshot->list, |
| 397 | &trans->transaction->pending_snapshots); | 393 | &trans->transaction->pending_snapshots); |
| 398 | err = btrfs_commit_transaction(trans, root); | 394 | ret = btrfs_commit_transaction(trans, root); |
| 395 | BUG_ON(ret); | ||
| 396 | btrfs_unreserve_metadata_space(root, 6); | ||
| 399 | 397 | ||
| 400 | fail_unlock: | 398 | inode = btrfs_lookup_dentry(dentry->d_parent->d_inode, dentry); |
| 401 | btrfs_btree_balance_dirty(root, nr); | 399 | if (IS_ERR(inode)) { |
| 400 | ret = PTR_ERR(inode); | ||
| 401 | goto fail; | ||
| 402 | } | ||
| 403 | BUG_ON(!inode); | ||
| 404 | d_instantiate(dentry, inode); | ||
| 405 | ret = 0; | ||
| 406 | fail: | ||
| 402 | return ret; | 407 | return ret; |
| 403 | } | 408 | } |
| 404 | 409 | ||
| @@ -1027,8 +1032,7 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd, | |||
| 1027 | BUG_ON(!trans); | 1032 | BUG_ON(!trans); |
| 1028 | 1033 | ||
| 1029 | /* punch hole in destination first */ | 1034 | /* punch hole in destination first */ |
| 1030 | btrfs_drop_extents(trans, root, inode, off, off + len, | 1035 | btrfs_drop_extents(trans, inode, off, off + len, &hint_byte, 1); |
| 1031 | off + len, 0, &hint_byte, 1); | ||
| 1032 | 1036 | ||
| 1033 | /* clone data */ | 1037 | /* clone data */ |
| 1034 | key.objectid = src->i_ino; | 1038 | key.objectid = src->i_ino; |
diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c index 5799bc46a309..b10a49d4bc6a 100644 --- a/fs/btrfs/ordered-data.c +++ b/fs/btrfs/ordered-data.c | |||
| @@ -291,16 +291,16 @@ int btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry) | |||
| 291 | 291 | ||
| 292 | /* | 292 | /* |
| 293 | * remove an ordered extent from the tree. No references are dropped | 293 | * remove an ordered extent from the tree. No references are dropped |
| 294 | * but, anyone waiting on this extent is woken up. | 294 | * and you must wake_up entry->wait. You must hold the tree mutex |
| 295 | * while you call this function. | ||
| 295 | */ | 296 | */ |
| 296 | int btrfs_remove_ordered_extent(struct inode *inode, | 297 | static int __btrfs_remove_ordered_extent(struct inode *inode, |
| 297 | struct btrfs_ordered_extent *entry) | 298 | struct btrfs_ordered_extent *entry) |
| 298 | { | 299 | { |
| 299 | struct btrfs_ordered_inode_tree *tree; | 300 | struct btrfs_ordered_inode_tree *tree; |
| 300 | struct rb_node *node; | 301 | struct rb_node *node; |
| 301 | 302 | ||
| 302 | tree = &BTRFS_I(inode)->ordered_tree; | 303 | tree = &BTRFS_I(inode)->ordered_tree; |
| 303 | mutex_lock(&tree->mutex); | ||
| 304 | node = &entry->rb_node; | 304 | node = &entry->rb_node; |
| 305 | rb_erase(node, &tree->tree); | 305 | rb_erase(node, &tree->tree); |
| 306 | tree->last = NULL; | 306 | tree->last = NULL; |
| @@ -326,16 +326,34 @@ int btrfs_remove_ordered_extent(struct inode *inode, | |||
| 326 | } | 326 | } |
| 327 | spin_unlock(&BTRFS_I(inode)->root->fs_info->ordered_extent_lock); | 327 | spin_unlock(&BTRFS_I(inode)->root->fs_info->ordered_extent_lock); |
| 328 | 328 | ||
| 329 | return 0; | ||
| 330 | } | ||
| 331 | |||
| 332 | /* | ||
| 333 | * remove an ordered extent from the tree. No references are dropped | ||
| 334 | * but any waiters are woken. | ||
| 335 | */ | ||
| 336 | int btrfs_remove_ordered_extent(struct inode *inode, | ||
| 337 | struct btrfs_ordered_extent *entry) | ||
| 338 | { | ||
| 339 | struct btrfs_ordered_inode_tree *tree; | ||
| 340 | int ret; | ||
| 341 | |||
| 342 | tree = &BTRFS_I(inode)->ordered_tree; | ||
| 343 | mutex_lock(&tree->mutex); | ||
| 344 | ret = __btrfs_remove_ordered_extent(inode, entry); | ||
| 329 | mutex_unlock(&tree->mutex); | 345 | mutex_unlock(&tree->mutex); |
| 330 | wake_up(&entry->wait); | 346 | wake_up(&entry->wait); |
| 331 | return 0; | 347 | |
| 348 | return ret; | ||
| 332 | } | 349 | } |
| 333 | 350 | ||
| 334 | /* | 351 | /* |
| 335 | * wait for all the ordered extents in a root. This is done when balancing | 352 | * wait for all the ordered extents in a root. This is done when balancing |
| 336 | * space between drives. | 353 | * space between drives. |
| 337 | */ | 354 | */ |
| 338 | int btrfs_wait_ordered_extents(struct btrfs_root *root, int nocow_only) | 355 | int btrfs_wait_ordered_extents(struct btrfs_root *root, |
| 356 | int nocow_only, int delay_iput) | ||
| 339 | { | 357 | { |
| 340 | struct list_head splice; | 358 | struct list_head splice; |
| 341 | struct list_head *cur; | 359 | struct list_head *cur; |
| @@ -372,7 +390,10 @@ int btrfs_wait_ordered_extents(struct btrfs_root *root, int nocow_only) | |||
| 372 | if (inode) { | 390 | if (inode) { |
| 373 | btrfs_start_ordered_extent(inode, ordered, 1); | 391 | btrfs_start_ordered_extent(inode, ordered, 1); |
| 374 | btrfs_put_ordered_extent(ordered); | 392 | btrfs_put_ordered_extent(ordered); |
| 375 | iput(inode); | 393 | if (delay_iput) |
| 394 | btrfs_add_delayed_iput(inode); | ||
| 395 | else | ||
| 396 | iput(inode); | ||
| 376 | } else { | 397 | } else { |
| 377 | btrfs_put_ordered_extent(ordered); | 398 | btrfs_put_ordered_extent(ordered); |
| 378 | } | 399 | } |
| @@ -430,7 +451,7 @@ again: | |||
| 430 | btrfs_wait_ordered_range(inode, 0, (u64)-1); | 451 | btrfs_wait_ordered_range(inode, 0, (u64)-1); |
| 431 | else | 452 | else |
| 432 | filemap_flush(inode->i_mapping); | 453 | filemap_flush(inode->i_mapping); |
| 433 | iput(inode); | 454 | btrfs_add_delayed_iput(inode); |
| 434 | } | 455 | } |
| 435 | 456 | ||
| 436 | cond_resched(); | 457 | cond_resched(); |
| @@ -589,7 +610,7 @@ out: | |||
| 589 | * After an extent is done, call this to conditionally update the on disk | 610 | * After an extent is done, call this to conditionally update the on disk |
| 590 | * i_size. i_size is updated to cover any fully written part of the file. | 611 | * i_size. i_size is updated to cover any fully written part of the file. |
| 591 | */ | 612 | */ |
| 592 | int btrfs_ordered_update_i_size(struct inode *inode, | 613 | int btrfs_ordered_update_i_size(struct inode *inode, u64 offset, |
| 593 | struct btrfs_ordered_extent *ordered) | 614 | struct btrfs_ordered_extent *ordered) |
| 594 | { | 615 | { |
| 595 | struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree; | 616 | struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree; |
| @@ -597,18 +618,30 @@ int btrfs_ordered_update_i_size(struct inode *inode, | |||
| 597 | u64 disk_i_size; | 618 | u64 disk_i_size; |
| 598 | u64 new_i_size; | 619 | u64 new_i_size; |
| 599 | u64 i_size_test; | 620 | u64 i_size_test; |
| 621 | u64 i_size = i_size_read(inode); | ||
| 600 | struct rb_node *node; | 622 | struct rb_node *node; |
| 623 | struct rb_node *prev = NULL; | ||
| 601 | struct btrfs_ordered_extent *test; | 624 | struct btrfs_ordered_extent *test; |
| 625 | int ret = 1; | ||
| 626 | |||
| 627 | if (ordered) | ||
| 628 | offset = entry_end(ordered); | ||
| 602 | 629 | ||
| 603 | mutex_lock(&tree->mutex); | 630 | mutex_lock(&tree->mutex); |
| 604 | disk_i_size = BTRFS_I(inode)->disk_i_size; | 631 | disk_i_size = BTRFS_I(inode)->disk_i_size; |
| 605 | 632 | ||
| 633 | /* truncate file */ | ||
| 634 | if (disk_i_size > i_size) { | ||
| 635 | BTRFS_I(inode)->disk_i_size = i_size; | ||
| 636 | ret = 0; | ||
| 637 | goto out; | ||
| 638 | } | ||
| 639 | |||
| 606 | /* | 640 | /* |
| 607 | * if the disk i_size is already at the inode->i_size, or | 641 | * if the disk i_size is already at the inode->i_size, or |
| 608 | * this ordered extent is inside the disk i_size, we're done | 642 | * this ordered extent is inside the disk i_size, we're done |
| 609 | */ | 643 | */ |
| 610 | if (disk_i_size >= inode->i_size || | 644 | if (disk_i_size == i_size || offset <= disk_i_size) { |
| 611 | ordered->file_offset + ordered->len <= disk_i_size) { | ||
| 612 | goto out; | 645 | goto out; |
| 613 | } | 646 | } |
| 614 | 647 | ||
| @@ -616,8 +649,7 @@ int btrfs_ordered_update_i_size(struct inode *inode, | |||
| 616 | * we can't update the disk_isize if there are delalloc bytes | 649 | * we can't update the disk_isize if there are delalloc bytes |
| 617 | * between disk_i_size and this ordered extent | 650 | * between disk_i_size and this ordered extent |
| 618 | */ | 651 | */ |
| 619 | if (test_range_bit(io_tree, disk_i_size, | 652 | if (test_range_bit(io_tree, disk_i_size, offset - 1, |
| 620 | ordered->file_offset + ordered->len - 1, | ||
| 621 | EXTENT_DELALLOC, 0, NULL)) { | 653 | EXTENT_DELALLOC, 0, NULL)) { |
| 622 | goto out; | 654 | goto out; |
| 623 | } | 655 | } |
| @@ -626,20 +658,32 @@ int btrfs_ordered_update_i_size(struct inode *inode, | |||
| 626 | * if we find an ordered extent then we can't update disk i_size | 658 | * if we find an ordered extent then we can't update disk i_size |
| 627 | * yet | 659 | * yet |
| 628 | */ | 660 | */ |
| 629 | node = &ordered->rb_node; | 661 | if (ordered) { |
| 630 | while (1) { | 662 | node = rb_prev(&ordered->rb_node); |
| 631 | node = rb_prev(node); | 663 | } else { |
| 632 | if (!node) | 664 | prev = tree_search(tree, offset); |
| 633 | break; | 665 | /* |
| 666 | * we insert file extents without involving ordered struct, | ||
| 667 | * so there should be no ordered struct cover this offset | ||
| 668 | */ | ||
| 669 | if (prev) { | ||
| 670 | test = rb_entry(prev, struct btrfs_ordered_extent, | ||
| 671 | rb_node); | ||
| 672 | BUG_ON(offset_in_entry(test, offset)); | ||
| 673 | } | ||
| 674 | node = prev; | ||
| 675 | } | ||
| 676 | while (node) { | ||
| 634 | test = rb_entry(node, struct btrfs_ordered_extent, rb_node); | 677 | test = rb_entry(node, struct btrfs_ordered_extent, rb_node); |
| 635 | if (test->file_offset + test->len <= disk_i_size) | 678 | if (test->file_offset + test->len <= disk_i_size) |
| 636 | break; | 679 | break; |
| 637 | if (test->file_offset >= inode->i_size) | 680 | if (test->file_offset >= i_size) |
| 638 | break; | 681 | break; |
| 639 | if (test->file_offset >= disk_i_size) | 682 | if (test->file_offset >= disk_i_size) |
| 640 | goto out; | 683 | goto out; |
| 684 | node = rb_prev(node); | ||
| 641 | } | 685 | } |
| 642 | new_i_size = min_t(u64, entry_end(ordered), i_size_read(inode)); | 686 | new_i_size = min_t(u64, offset, i_size); |
| 643 | 687 | ||
| 644 | /* | 688 | /* |
| 645 | * at this point, we know we can safely update i_size to at least | 689 | * at this point, we know we can safely update i_size to at least |
| @@ -647,7 +691,14 @@ int btrfs_ordered_update_i_size(struct inode *inode, | |||
| 647 | * walk forward and see if ios from higher up in the file have | 691 | * walk forward and see if ios from higher up in the file have |
| 648 | * finished. | 692 | * finished. |
| 649 | */ | 693 | */ |
| 650 | node = rb_next(&ordered->rb_node); | 694 | if (ordered) { |
| 695 | node = rb_next(&ordered->rb_node); | ||
| 696 | } else { | ||
| 697 | if (prev) | ||
| 698 | node = rb_next(prev); | ||
| 699 | else | ||
| 700 | node = rb_first(&tree->tree); | ||
| 701 | } | ||
| 651 | i_size_test = 0; | 702 | i_size_test = 0; |
| 652 | if (node) { | 703 | if (node) { |
| 653 | /* | 704 | /* |
| @@ -655,10 +706,10 @@ int btrfs_ordered_update_i_size(struct inode *inode, | |||
| 655 | * between our ordered extent and the next one. | 706 | * between our ordered extent and the next one. |
| 656 | */ | 707 | */ |
| 657 | test = rb_entry(node, struct btrfs_ordered_extent, rb_node); | 708 | test = rb_entry(node, struct btrfs_ordered_extent, rb_node); |
| 658 | if (test->file_offset > entry_end(ordered)) | 709 | if (test->file_offset > offset) |
| 659 | i_size_test = test->file_offset; | 710 | i_size_test = test->file_offset; |
| 660 | } else { | 711 | } else { |
| 661 | i_size_test = i_size_read(inode); | 712 | i_size_test = i_size; |
| 662 | } | 713 | } |
| 663 | 714 | ||
| 664 | /* | 715 | /* |
| @@ -667,15 +718,25 @@ int btrfs_ordered_update_i_size(struct inode *inode, | |||
| 667 | * are no delalloc bytes in this area, it is safe to update | 718 | * are no delalloc bytes in this area, it is safe to update |
| 668 | * disk_i_size to the end of the region. | 719 | * disk_i_size to the end of the region. |
| 669 | */ | 720 | */ |
| 670 | if (i_size_test > entry_end(ordered) && | 721 | if (i_size_test > offset && |
| 671 | !test_range_bit(io_tree, entry_end(ordered), i_size_test - 1, | 722 | !test_range_bit(io_tree, offset, i_size_test - 1, |
| 672 | EXTENT_DELALLOC, 0, NULL)) { | 723 | EXTENT_DELALLOC, 0, NULL)) { |
| 673 | new_i_size = min_t(u64, i_size_test, i_size_read(inode)); | 724 | new_i_size = min_t(u64, i_size_test, i_size); |
| 674 | } | 725 | } |
| 675 | BTRFS_I(inode)->disk_i_size = new_i_size; | 726 | BTRFS_I(inode)->disk_i_size = new_i_size; |
| 727 | ret = 0; | ||
| 676 | out: | 728 | out: |
| 729 | /* | ||
| 730 | * we need to remove the ordered extent with the tree lock held | ||
| 731 | * so that other people calling this function don't find our fully | ||
| 732 | * processed ordered entry and skip updating the i_size | ||
| 733 | */ | ||
| 734 | if (ordered) | ||
| 735 | __btrfs_remove_ordered_extent(inode, ordered); | ||
| 677 | mutex_unlock(&tree->mutex); | 736 | mutex_unlock(&tree->mutex); |
| 678 | return 0; | 737 | if (ordered) |
| 738 | wake_up(&ordered->wait); | ||
| 739 | return ret; | ||
| 679 | } | 740 | } |
| 680 | 741 | ||
| 681 | /* | 742 | /* |
diff --git a/fs/btrfs/ordered-data.h b/fs/btrfs/ordered-data.h index f82e87488ca8..1fe1282ef47c 100644 --- a/fs/btrfs/ordered-data.h +++ b/fs/btrfs/ordered-data.h | |||
| @@ -150,12 +150,13 @@ void btrfs_start_ordered_extent(struct inode *inode, | |||
| 150 | int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len); | 150 | int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len); |
| 151 | struct btrfs_ordered_extent * | 151 | struct btrfs_ordered_extent * |
| 152 | btrfs_lookup_first_ordered_extent(struct inode * inode, u64 file_offset); | 152 | btrfs_lookup_first_ordered_extent(struct inode * inode, u64 file_offset); |
| 153 | int btrfs_ordered_update_i_size(struct inode *inode, | 153 | int btrfs_ordered_update_i_size(struct inode *inode, u64 offset, |
| 154 | struct btrfs_ordered_extent *ordered); | 154 | struct btrfs_ordered_extent *ordered); |
| 155 | int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr, u32 *sum); | 155 | int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr, u32 *sum); |
| 156 | int btrfs_wait_ordered_extents(struct btrfs_root *root, int nocow_only); | ||
| 157 | int btrfs_run_ordered_operations(struct btrfs_root *root, int wait); | 156 | int btrfs_run_ordered_operations(struct btrfs_root *root, int wait); |
| 158 | int btrfs_add_ordered_operation(struct btrfs_trans_handle *trans, | 157 | int btrfs_add_ordered_operation(struct btrfs_trans_handle *trans, |
| 159 | struct btrfs_root *root, | 158 | struct btrfs_root *root, |
| 160 | struct inode *inode); | 159 | struct inode *inode); |
| 160 | int btrfs_wait_ordered_extents(struct btrfs_root *root, | ||
| 161 | int nocow_only, int delay_iput); | ||
| 161 | #endif | 162 | #endif |
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c index cfcc93c93a7b..a9728680eca8 100644 --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c | |||
| @@ -1561,6 +1561,20 @@ static int invalidate_extent_cache(struct btrfs_root *root, | |||
| 1561 | return 0; | 1561 | return 0; |
| 1562 | } | 1562 | } |
| 1563 | 1563 | ||
| 1564 | static void put_inodes(struct list_head *list) | ||
| 1565 | { | ||
| 1566 | struct inodevec *ivec; | ||
| 1567 | while (!list_empty(list)) { | ||
| 1568 | ivec = list_entry(list->next, struct inodevec, list); | ||
| 1569 | list_del(&ivec->list); | ||
| 1570 | while (ivec->nr > 0) { | ||
| 1571 | ivec->nr--; | ||
| 1572 | iput(ivec->inode[ivec->nr]); | ||
| 1573 | } | ||
| 1574 | kfree(ivec); | ||
| 1575 | } | ||
| 1576 | } | ||
| 1577 | |||
| 1564 | static int find_next_key(struct btrfs_path *path, int level, | 1578 | static int find_next_key(struct btrfs_path *path, int level, |
| 1565 | struct btrfs_key *key) | 1579 | struct btrfs_key *key) |
| 1566 | 1580 | ||
| @@ -1723,6 +1737,11 @@ static noinline_for_stack int merge_reloc_root(struct reloc_control *rc, | |||
| 1723 | 1737 | ||
| 1724 | btrfs_btree_balance_dirty(root, nr); | 1738 | btrfs_btree_balance_dirty(root, nr); |
| 1725 | 1739 | ||
| 1740 | /* | ||
| 1741 | * put inodes outside transaction, otherwise we may deadlock. | ||
| 1742 | */ | ||
| 1743 | put_inodes(&inode_list); | ||
| 1744 | |||
| 1726 | if (replaced && rc->stage == UPDATE_DATA_PTRS) | 1745 | if (replaced && rc->stage == UPDATE_DATA_PTRS) |
| 1727 | invalidate_extent_cache(root, &key, &next_key); | 1746 | invalidate_extent_cache(root, &key, &next_key); |
| 1728 | } | 1747 | } |
| @@ -1752,19 +1771,7 @@ out: | |||
| 1752 | 1771 | ||
| 1753 | btrfs_btree_balance_dirty(root, nr); | 1772 | btrfs_btree_balance_dirty(root, nr); |
| 1754 | 1773 | ||
| 1755 | /* | 1774 | put_inodes(&inode_list); |
| 1756 | * put inodes while we aren't holding the tree locks | ||
| 1757 | */ | ||
| 1758 | while (!list_empty(&inode_list)) { | ||
| 1759 | struct inodevec *ivec; | ||
| 1760 | ivec = list_entry(inode_list.next, struct inodevec, list); | ||
| 1761 | list_del(&ivec->list); | ||
| 1762 | while (ivec->nr > 0) { | ||
| 1763 | ivec->nr--; | ||
| 1764 | iput(ivec->inode[ivec->nr]); | ||
| 1765 | } | ||
| 1766 | kfree(ivec); | ||
| 1767 | } | ||
| 1768 | 1775 | ||
| 1769 | if (replaced && rc->stage == UPDATE_DATA_PTRS) | 1776 | if (replaced && rc->stage == UPDATE_DATA_PTRS) |
| 1770 | invalidate_extent_cache(root, &key, &next_key); | 1777 | invalidate_extent_cache(root, &key, &next_key); |
| @@ -3534,8 +3541,8 @@ int btrfs_relocate_block_group(struct btrfs_root *extent_root, u64 group_start) | |||
| 3534 | (unsigned long long)rc->block_group->key.objectid, | 3541 | (unsigned long long)rc->block_group->key.objectid, |
| 3535 | (unsigned long long)rc->block_group->flags); | 3542 | (unsigned long long)rc->block_group->flags); |
| 3536 | 3543 | ||
| 3537 | btrfs_start_delalloc_inodes(fs_info->tree_root); | 3544 | btrfs_start_delalloc_inodes(fs_info->tree_root, 0); |
| 3538 | btrfs_wait_ordered_extents(fs_info->tree_root, 0); | 3545 | btrfs_wait_ordered_extents(fs_info->tree_root, 0, 0); |
| 3539 | 3546 | ||
| 3540 | while (1) { | 3547 | while (1) { |
| 3541 | rc->extents_found = 0; | 3548 | rc->extents_found = 0; |
| @@ -3755,6 +3762,7 @@ out: | |||
| 3755 | BTRFS_DATA_RELOC_TREE_OBJECTID); | 3762 | BTRFS_DATA_RELOC_TREE_OBJECTID); |
| 3756 | if (IS_ERR(fs_root)) | 3763 | if (IS_ERR(fs_root)) |
| 3757 | err = PTR_ERR(fs_root); | 3764 | err = PTR_ERR(fs_root); |
| 3765 | btrfs_orphan_cleanup(fs_root); | ||
| 3758 | } | 3766 | } |
| 3759 | return err; | 3767 | return err; |
| 3760 | } | 3768 | } |
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index 752a5463bf53..3f9b45704fcd 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c | |||
| @@ -128,6 +128,7 @@ int btrfs_parse_options(struct btrfs_root *root, char *options) | |||
| 128 | substring_t args[MAX_OPT_ARGS]; | 128 | substring_t args[MAX_OPT_ARGS]; |
| 129 | char *p, *num; | 129 | char *p, *num; |
| 130 | int intarg; | 130 | int intarg; |
| 131 | int ret = 0; | ||
| 131 | 132 | ||
| 132 | if (!options) | 133 | if (!options) |
| 133 | return 0; | 134 | return 0; |
| @@ -262,12 +263,18 @@ int btrfs_parse_options(struct btrfs_root *root, char *options) | |||
| 262 | case Opt_discard: | 263 | case Opt_discard: |
| 263 | btrfs_set_opt(info->mount_opt, DISCARD); | 264 | btrfs_set_opt(info->mount_opt, DISCARD); |
| 264 | break; | 265 | break; |
| 266 | case Opt_err: | ||
| 267 | printk(KERN_INFO "btrfs: unrecognized mount option " | ||
| 268 | "'%s'\n", p); | ||
| 269 | ret = -EINVAL; | ||
| 270 | goto out; | ||
| 265 | default: | 271 | default: |
| 266 | break; | 272 | break; |
| 267 | } | 273 | } |
| 268 | } | 274 | } |
| 275 | out: | ||
| 269 | kfree(options); | 276 | kfree(options); |
| 270 | return 0; | 277 | return ret; |
| 271 | } | 278 | } |
| 272 | 279 | ||
| 273 | /* | 280 | /* |
| @@ -405,8 +412,8 @@ int btrfs_sync_fs(struct super_block *sb, int wait) | |||
| 405 | return 0; | 412 | return 0; |
| 406 | } | 413 | } |
| 407 | 414 | ||
| 408 | btrfs_start_delalloc_inodes(root); | 415 | btrfs_start_delalloc_inodes(root, 0); |
| 409 | btrfs_wait_ordered_extents(root, 0); | 416 | btrfs_wait_ordered_extents(root, 0, 0); |
| 410 | 417 | ||
| 411 | trans = btrfs_start_transaction(root, 1); | 418 | trans = btrfs_start_transaction(root, 1); |
| 412 | ret = btrfs_commit_transaction(trans, root); | 419 | ret = btrfs_commit_transaction(trans, root); |
| @@ -450,6 +457,8 @@ static int btrfs_show_options(struct seq_file *seq, struct vfsmount *vfs) | |||
| 450 | seq_puts(seq, ",notreelog"); | 457 | seq_puts(seq, ",notreelog"); |
| 451 | if (btrfs_test_opt(root, FLUSHONCOMMIT)) | 458 | if (btrfs_test_opt(root, FLUSHONCOMMIT)) |
| 452 | seq_puts(seq, ",flushoncommit"); | 459 | seq_puts(seq, ",flushoncommit"); |
| 460 | if (btrfs_test_opt(root, DISCARD)) | ||
| 461 | seq_puts(seq, ",discard"); | ||
| 453 | if (!(root->fs_info->sb->s_flags & MS_POSIXACL)) | 462 | if (!(root->fs_info->sb->s_flags & MS_POSIXACL)) |
| 454 | seq_puts(seq, ",noacl"); | 463 | seq_puts(seq, ",noacl"); |
| 455 | return 0; | 464 | return 0; |
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index c207e8c32c9b..b2acc79f1b34 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c | |||
| @@ -333,6 +333,9 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans, | |||
| 333 | memset(trans, 0, sizeof(*trans)); | 333 | memset(trans, 0, sizeof(*trans)); |
| 334 | kmem_cache_free(btrfs_trans_handle_cachep, trans); | 334 | kmem_cache_free(btrfs_trans_handle_cachep, trans); |
| 335 | 335 | ||
| 336 | if (throttle) | ||
| 337 | btrfs_run_delayed_iputs(root); | ||
| 338 | |||
| 336 | return 0; | 339 | return 0; |
| 337 | } | 340 | } |
| 338 | 341 | ||
| @@ -354,7 +357,7 @@ int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans, | |||
| 354 | * those extents are sent to disk but does not wait on them | 357 | * those extents are sent to disk but does not wait on them |
| 355 | */ | 358 | */ |
| 356 | int btrfs_write_marked_extents(struct btrfs_root *root, | 359 | int btrfs_write_marked_extents(struct btrfs_root *root, |
| 357 | struct extent_io_tree *dirty_pages) | 360 | struct extent_io_tree *dirty_pages, int mark) |
| 358 | { | 361 | { |
| 359 | int ret; | 362 | int ret; |
| 360 | int err = 0; | 363 | int err = 0; |
| @@ -367,7 +370,7 @@ int btrfs_write_marked_extents(struct btrfs_root *root, | |||
| 367 | 370 | ||
| 368 | while (1) { | 371 | while (1) { |
| 369 | ret = find_first_extent_bit(dirty_pages, start, &start, &end, | 372 | ret = find_first_extent_bit(dirty_pages, start, &start, &end, |
| 370 | EXTENT_DIRTY); | 373 | mark); |
| 371 | if (ret) | 374 | if (ret) |
| 372 | break; | 375 | break; |
| 373 | while (start <= end) { | 376 | while (start <= end) { |
| @@ -413,7 +416,7 @@ int btrfs_write_marked_extents(struct btrfs_root *root, | |||
| 413 | * on all the pages and clear them from the dirty pages state tree | 416 | * on all the pages and clear them from the dirty pages state tree |
| 414 | */ | 417 | */ |
| 415 | int btrfs_wait_marked_extents(struct btrfs_root *root, | 418 | int btrfs_wait_marked_extents(struct btrfs_root *root, |
| 416 | struct extent_io_tree *dirty_pages) | 419 | struct extent_io_tree *dirty_pages, int mark) |
| 417 | { | 420 | { |
| 418 | int ret; | 421 | int ret; |
| 419 | int err = 0; | 422 | int err = 0; |
| @@ -425,12 +428,12 @@ int btrfs_wait_marked_extents(struct btrfs_root *root, | |||
| 425 | unsigned long index; | 428 | unsigned long index; |
| 426 | 429 | ||
| 427 | while (1) { | 430 | while (1) { |
| 428 | ret = find_first_extent_bit(dirty_pages, 0, &start, &end, | 431 | ret = find_first_extent_bit(dirty_pages, start, &start, &end, |
| 429 | EXTENT_DIRTY); | 432 | mark); |
| 430 | if (ret) | 433 | if (ret) |
| 431 | break; | 434 | break; |
| 432 | 435 | ||
| 433 | clear_extent_dirty(dirty_pages, start, end, GFP_NOFS); | 436 | clear_extent_bits(dirty_pages, start, end, mark, GFP_NOFS); |
| 434 | while (start <= end) { | 437 | while (start <= end) { |
| 435 | index = start >> PAGE_CACHE_SHIFT; | 438 | index = start >> PAGE_CACHE_SHIFT; |
| 436 | start = (u64)(index + 1) << PAGE_CACHE_SHIFT; | 439 | start = (u64)(index + 1) << PAGE_CACHE_SHIFT; |
| @@ -460,13 +463,13 @@ int btrfs_wait_marked_extents(struct btrfs_root *root, | |||
| 460 | * those extents are on disk for transaction or log commit | 463 | * those extents are on disk for transaction or log commit |
| 461 | */ | 464 | */ |
| 462 | int btrfs_write_and_wait_marked_extents(struct btrfs_root *root, | 465 | int btrfs_write_and_wait_marked_extents(struct btrfs_root *root, |
| 463 | struct extent_io_tree *dirty_pages) | 466 | struct extent_io_tree *dirty_pages, int mark) |
| 464 | { | 467 | { |
| 465 | int ret; | 468 | int ret; |
| 466 | int ret2; | 469 | int ret2; |
| 467 | 470 | ||
| 468 | ret = btrfs_write_marked_extents(root, dirty_pages); | 471 | ret = btrfs_write_marked_extents(root, dirty_pages, mark); |
| 469 | ret2 = btrfs_wait_marked_extents(root, dirty_pages); | 472 | ret2 = btrfs_wait_marked_extents(root, dirty_pages, mark); |
| 470 | return ret || ret2; | 473 | return ret || ret2; |
| 471 | } | 474 | } |
| 472 | 475 | ||
| @@ -479,7 +482,8 @@ int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans, | |||
| 479 | return filemap_write_and_wait(btree_inode->i_mapping); | 482 | return filemap_write_and_wait(btree_inode->i_mapping); |
| 480 | } | 483 | } |
| 481 | return btrfs_write_and_wait_marked_extents(root, | 484 | return btrfs_write_and_wait_marked_extents(root, |
| 482 | &trans->transaction->dirty_pages); | 485 | &trans->transaction->dirty_pages, |
| 486 | EXTENT_DIRTY); | ||
| 483 | } | 487 | } |
| 484 | 488 | ||
| 485 | /* | 489 | /* |
| @@ -497,13 +501,16 @@ static int update_cowonly_root(struct btrfs_trans_handle *trans, | |||
| 497 | { | 501 | { |
| 498 | int ret; | 502 | int ret; |
| 499 | u64 old_root_bytenr; | 503 | u64 old_root_bytenr; |
| 504 | u64 old_root_used; | ||
| 500 | struct btrfs_root *tree_root = root->fs_info->tree_root; | 505 | struct btrfs_root *tree_root = root->fs_info->tree_root; |
| 501 | 506 | ||
| 507 | old_root_used = btrfs_root_used(&root->root_item); | ||
| 502 | btrfs_write_dirty_block_groups(trans, root); | 508 | btrfs_write_dirty_block_groups(trans, root); |
| 503 | 509 | ||
| 504 | while (1) { | 510 | while (1) { |
| 505 | old_root_bytenr = btrfs_root_bytenr(&root->root_item); | 511 | old_root_bytenr = btrfs_root_bytenr(&root->root_item); |
| 506 | if (old_root_bytenr == root->node->start) | 512 | if (old_root_bytenr == root->node->start && |
| 513 | old_root_used == btrfs_root_used(&root->root_item)) | ||
| 507 | break; | 514 | break; |
| 508 | 515 | ||
| 509 | btrfs_set_root_node(&root->root_item, root->node); | 516 | btrfs_set_root_node(&root->root_item, root->node); |
| @@ -512,6 +519,7 @@ static int update_cowonly_root(struct btrfs_trans_handle *trans, | |||
| 512 | &root->root_item); | 519 | &root->root_item); |
| 513 | BUG_ON(ret); | 520 | BUG_ON(ret); |
| 514 | 521 | ||
| 522 | old_root_used = btrfs_root_used(&root->root_item); | ||
| 515 | ret = btrfs_write_dirty_block_groups(trans, root); | 523 | ret = btrfs_write_dirty_block_groups(trans, root); |
| 516 | BUG_ON(ret); | 524 | BUG_ON(ret); |
| 517 | } | 525 | } |
| @@ -795,7 +803,6 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans, | |||
| 795 | memcpy(&pending->root_key, &key, sizeof(key)); | 803 | memcpy(&pending->root_key, &key, sizeof(key)); |
| 796 | fail: | 804 | fail: |
| 797 | kfree(new_root_item); | 805 | kfree(new_root_item); |
| 798 | btrfs_unreserve_metadata_space(root, 6); | ||
| 799 | return ret; | 806 | return ret; |
| 800 | } | 807 | } |
| 801 | 808 | ||
| @@ -807,7 +814,6 @@ static noinline int finish_pending_snapshot(struct btrfs_fs_info *fs_info, | |||
| 807 | u64 index = 0; | 814 | u64 index = 0; |
| 808 | struct btrfs_trans_handle *trans; | 815 | struct btrfs_trans_handle *trans; |
| 809 | struct inode *parent_inode; | 816 | struct inode *parent_inode; |
| 810 | struct inode *inode; | ||
| 811 | struct btrfs_root *parent_root; | 817 | struct btrfs_root *parent_root; |
| 812 | 818 | ||
| 813 | parent_inode = pending->dentry->d_parent->d_inode; | 819 | parent_inode = pending->dentry->d_parent->d_inode; |
| @@ -839,8 +845,6 @@ static noinline int finish_pending_snapshot(struct btrfs_fs_info *fs_info, | |||
| 839 | 845 | ||
| 840 | BUG_ON(ret); | 846 | BUG_ON(ret); |
| 841 | 847 | ||
| 842 | inode = btrfs_lookup_dentry(parent_inode, pending->dentry); | ||
| 843 | d_instantiate(pending->dentry, inode); | ||
| 844 | fail: | 848 | fail: |
| 845 | btrfs_end_transaction(trans, fs_info->fs_root); | 849 | btrfs_end_transaction(trans, fs_info->fs_root); |
| 846 | return ret; | 850 | return ret; |
| @@ -994,11 +998,11 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans, | |||
| 994 | mutex_unlock(&root->fs_info->trans_mutex); | 998 | mutex_unlock(&root->fs_info->trans_mutex); |
| 995 | 999 | ||
| 996 | if (flush_on_commit) { | 1000 | if (flush_on_commit) { |
| 997 | btrfs_start_delalloc_inodes(root); | 1001 | btrfs_start_delalloc_inodes(root, 1); |
| 998 | ret = btrfs_wait_ordered_extents(root, 0); | 1002 | ret = btrfs_wait_ordered_extents(root, 0, 1); |
| 999 | BUG_ON(ret); | 1003 | BUG_ON(ret); |
| 1000 | } else if (snap_pending) { | 1004 | } else if (snap_pending) { |
| 1001 | ret = btrfs_wait_ordered_extents(root, 1); | 1005 | ret = btrfs_wait_ordered_extents(root, 0, 1); |
| 1002 | BUG_ON(ret); | 1006 | BUG_ON(ret); |
| 1003 | } | 1007 | } |
| 1004 | 1008 | ||
| @@ -1116,6 +1120,10 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans, | |||
| 1116 | current->journal_info = NULL; | 1120 | current->journal_info = NULL; |
| 1117 | 1121 | ||
| 1118 | kmem_cache_free(btrfs_trans_handle_cachep, trans); | 1122 | kmem_cache_free(btrfs_trans_handle_cachep, trans); |
| 1123 | |||
| 1124 | if (current != root->fs_info->transaction_kthread) | ||
| 1125 | btrfs_run_delayed_iputs(root); | ||
| 1126 | |||
| 1119 | return ret; | 1127 | return ret; |
| 1120 | } | 1128 | } |
| 1121 | 1129 | ||
diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h index d4e3e7a6938c..93c7ccb33118 100644 --- a/fs/btrfs/transaction.h +++ b/fs/btrfs/transaction.h | |||
| @@ -107,10 +107,10 @@ void btrfs_throttle(struct btrfs_root *root); | |||
| 107 | int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans, | 107 | int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans, |
| 108 | struct btrfs_root *root); | 108 | struct btrfs_root *root); |
| 109 | int btrfs_write_and_wait_marked_extents(struct btrfs_root *root, | 109 | int btrfs_write_and_wait_marked_extents(struct btrfs_root *root, |
| 110 | struct extent_io_tree *dirty_pages); | 110 | struct extent_io_tree *dirty_pages, int mark); |
| 111 | int btrfs_write_marked_extents(struct btrfs_root *root, | 111 | int btrfs_write_marked_extents(struct btrfs_root *root, |
| 112 | struct extent_io_tree *dirty_pages); | 112 | struct extent_io_tree *dirty_pages, int mark); |
| 113 | int btrfs_wait_marked_extents(struct btrfs_root *root, | 113 | int btrfs_wait_marked_extents(struct btrfs_root *root, |
| 114 | struct extent_io_tree *dirty_pages); | 114 | struct extent_io_tree *dirty_pages, int mark); |
| 115 | int btrfs_transaction_in_commit(struct btrfs_fs_info *info); | 115 | int btrfs_transaction_in_commit(struct btrfs_fs_info *info); |
| 116 | #endif | 116 | #endif |
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index 741666a7676a..4a9434b622ec 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c | |||
| @@ -542,8 +542,8 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans, | |||
| 542 | 542 | ||
| 543 | saved_nbytes = inode_get_bytes(inode); | 543 | saved_nbytes = inode_get_bytes(inode); |
| 544 | /* drop any overlapping extents */ | 544 | /* drop any overlapping extents */ |
| 545 | ret = btrfs_drop_extents(trans, root, inode, | 545 | ret = btrfs_drop_extents(trans, inode, start, extent_end, |
| 546 | start, extent_end, extent_end, start, &alloc_hint, 1); | 546 | &alloc_hint, 1); |
| 547 | BUG_ON(ret); | 547 | BUG_ON(ret); |
| 548 | 548 | ||
| 549 | if (found_type == BTRFS_FILE_EXTENT_REG || | 549 | if (found_type == BTRFS_FILE_EXTENT_REG || |
| @@ -930,6 +930,17 @@ out_nowrite: | |||
| 930 | return 0; | 930 | return 0; |
| 931 | } | 931 | } |
| 932 | 932 | ||
| 933 | static int insert_orphan_item(struct btrfs_trans_handle *trans, | ||
| 934 | struct btrfs_root *root, u64 offset) | ||
| 935 | { | ||
| 936 | int ret; | ||
| 937 | ret = btrfs_find_orphan_item(root, offset); | ||
| 938 | if (ret > 0) | ||
| 939 | ret = btrfs_insert_orphan_item(trans, root, offset); | ||
| 940 | return ret; | ||
| 941 | } | ||
| 942 | |||
| 943 | |||
| 933 | /* | 944 | /* |
| 934 | * There are a few corners where the link count of the file can't | 945 | * There are a few corners where the link count of the file can't |
| 935 | * be properly maintained during replay. So, instead of adding | 946 | * be properly maintained during replay. So, instead of adding |
| @@ -997,9 +1008,13 @@ static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans, | |||
| 997 | } | 1008 | } |
| 998 | BTRFS_I(inode)->index_cnt = (u64)-1; | 1009 | BTRFS_I(inode)->index_cnt = (u64)-1; |
| 999 | 1010 | ||
| 1000 | if (inode->i_nlink == 0 && S_ISDIR(inode->i_mode)) { | 1011 | if (inode->i_nlink == 0) { |
| 1001 | ret = replay_dir_deletes(trans, root, NULL, path, | 1012 | if (S_ISDIR(inode->i_mode)) { |
| 1002 | inode->i_ino, 1); | 1013 | ret = replay_dir_deletes(trans, root, NULL, path, |
| 1014 | inode->i_ino, 1); | ||
| 1015 | BUG_ON(ret); | ||
| 1016 | } | ||
| 1017 | ret = insert_orphan_item(trans, root, inode->i_ino); | ||
| 1003 | BUG_ON(ret); | 1018 | BUG_ON(ret); |
| 1004 | } | 1019 | } |
| 1005 | btrfs_free_path(path); | 1020 | btrfs_free_path(path); |
| @@ -1587,7 +1602,6 @@ static int replay_one_buffer(struct btrfs_root *log, struct extent_buffer *eb, | |||
| 1587 | /* inode keys are done during the first stage */ | 1602 | /* inode keys are done during the first stage */ |
| 1588 | if (key.type == BTRFS_INODE_ITEM_KEY && | 1603 | if (key.type == BTRFS_INODE_ITEM_KEY && |
| 1589 | wc->stage == LOG_WALK_REPLAY_INODES) { | 1604 | wc->stage == LOG_WALK_REPLAY_INODES) { |
| 1590 | struct inode *inode; | ||
| 1591 | struct btrfs_inode_item *inode_item; | 1605 | struct btrfs_inode_item *inode_item; |
| 1592 | u32 mode; | 1606 | u32 mode; |
| 1593 | 1607 | ||
| @@ -1603,31 +1617,16 @@ static int replay_one_buffer(struct btrfs_root *log, struct extent_buffer *eb, | |||
| 1603 | eb, i, &key); | 1617 | eb, i, &key); |
| 1604 | BUG_ON(ret); | 1618 | BUG_ON(ret); |
| 1605 | 1619 | ||
| 1606 | /* for regular files, truncate away | 1620 | /* for regular files, make sure corresponding |
| 1607 | * extents past the new EOF | 1621 | * orhpan item exist. extents past the new EOF |
| 1622 | * will be truncated later by orphan cleanup. | ||
| 1608 | */ | 1623 | */ |
| 1609 | if (S_ISREG(mode)) { | 1624 | if (S_ISREG(mode)) { |
| 1610 | inode = read_one_inode(root, | 1625 | ret = insert_orphan_item(wc->trans, root, |
| 1611 | key.objectid); | 1626 | key.objectid); |
| 1612 | BUG_ON(!inode); | ||
| 1613 | |||
| 1614 | ret = btrfs_truncate_inode_items(wc->trans, | ||
| 1615 | root, inode, inode->i_size, | ||
| 1616 | BTRFS_EXTENT_DATA_KEY); | ||
| 1617 | BUG_ON(ret); | 1627 | BUG_ON(ret); |
| 1618 | |||
| 1619 | /* if the nlink count is zero here, the iput | ||
| 1620 | * will free the inode. We bump it to make | ||
| 1621 | * sure it doesn't get freed until the link | ||
| 1622 | * count fixup is done | ||
| 1623 | */ | ||
| 1624 | if (inode->i_nlink == 0) { | ||
| 1625 | btrfs_inc_nlink(inode); | ||
| 1626 | btrfs_update_inode(wc->trans, | ||
| 1627 | root, inode); | ||
| 1628 | } | ||
| 1629 | iput(inode); | ||
| 1630 | } | 1628 | } |
| 1629 | |||
| 1631 | ret = link_to_fixup_dir(wc->trans, root, | 1630 | ret = link_to_fixup_dir(wc->trans, root, |
| 1632 | path, key.objectid); | 1631 | path, key.objectid); |
| 1633 | BUG_ON(ret); | 1632 | BUG_ON(ret); |
| @@ -1977,10 +1976,11 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans, | |||
| 1977 | { | 1976 | { |
| 1978 | int index1; | 1977 | int index1; |
| 1979 | int index2; | 1978 | int index2; |
| 1979 | int mark; | ||
| 1980 | int ret; | 1980 | int ret; |
| 1981 | struct btrfs_root *log = root->log_root; | 1981 | struct btrfs_root *log = root->log_root; |
| 1982 | struct btrfs_root *log_root_tree = root->fs_info->log_root_tree; | 1982 | struct btrfs_root *log_root_tree = root->fs_info->log_root_tree; |
| 1983 | u64 log_transid = 0; | 1983 | unsigned long log_transid = 0; |
| 1984 | 1984 | ||
| 1985 | mutex_lock(&root->log_mutex); | 1985 | mutex_lock(&root->log_mutex); |
| 1986 | index1 = root->log_transid % 2; | 1986 | index1 = root->log_transid % 2; |
| @@ -2014,24 +2014,29 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans, | |||
| 2014 | goto out; | 2014 | goto out; |
| 2015 | } | 2015 | } |
| 2016 | 2016 | ||
| 2017 | log_transid = root->log_transid; | ||
| 2018 | if (log_transid % 2 == 0) | ||
| 2019 | mark = EXTENT_DIRTY; | ||
| 2020 | else | ||
| 2021 | mark = EXTENT_NEW; | ||
| 2022 | |||
| 2017 | /* we start IO on all the marked extents here, but we don't actually | 2023 | /* we start IO on all the marked extents here, but we don't actually |
| 2018 | * wait for them until later. | 2024 | * wait for them until later. |
| 2019 | */ | 2025 | */ |
| 2020 | ret = btrfs_write_marked_extents(log, &log->dirty_log_pages); | 2026 | ret = btrfs_write_marked_extents(log, &log->dirty_log_pages, mark); |
| 2021 | BUG_ON(ret); | 2027 | BUG_ON(ret); |
| 2022 | 2028 | ||
| 2023 | btrfs_set_root_node(&log->root_item, log->node); | 2029 | btrfs_set_root_node(&log->root_item, log->node); |
| 2024 | 2030 | ||
| 2025 | root->log_batch = 0; | 2031 | root->log_batch = 0; |
| 2026 | log_transid = root->log_transid; | ||
| 2027 | root->log_transid++; | 2032 | root->log_transid++; |
| 2028 | log->log_transid = root->log_transid; | 2033 | log->log_transid = root->log_transid; |
| 2029 | root->log_start_pid = 0; | 2034 | root->log_start_pid = 0; |
| 2030 | smp_mb(); | 2035 | smp_mb(); |
| 2031 | /* | 2036 | /* |
| 2032 | * log tree has been flushed to disk, new modifications of | 2037 | * IO has been started, blocks of the log tree have WRITTEN flag set |
| 2033 | * the log will be written to new positions. so it's safe to | 2038 | * in their headers. new modifications of the log will be written to |
| 2034 | * allow log writers to go in. | 2039 | * new positions. so it's safe to allow log writers to go in. |
| 2035 | */ | 2040 | */ |
| 2036 | mutex_unlock(&root->log_mutex); | 2041 | mutex_unlock(&root->log_mutex); |
| 2037 | 2042 | ||
| @@ -2052,7 +2057,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans, | |||
| 2052 | 2057 | ||
| 2053 | index2 = log_root_tree->log_transid % 2; | 2058 | index2 = log_root_tree->log_transid % 2; |
| 2054 | if (atomic_read(&log_root_tree->log_commit[index2])) { | 2059 | if (atomic_read(&log_root_tree->log_commit[index2])) { |
| 2055 | btrfs_wait_marked_extents(log, &log->dirty_log_pages); | 2060 | btrfs_wait_marked_extents(log, &log->dirty_log_pages, mark); |
| 2056 | wait_log_commit(trans, log_root_tree, | 2061 | wait_log_commit(trans, log_root_tree, |
| 2057 | log_root_tree->log_transid); | 2062 | log_root_tree->log_transid); |
| 2058 | mutex_unlock(&log_root_tree->log_mutex); | 2063 | mutex_unlock(&log_root_tree->log_mutex); |
| @@ -2072,16 +2077,17 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans, | |||
| 2072 | * check the full commit flag again | 2077 | * check the full commit flag again |
| 2073 | */ | 2078 | */ |
| 2074 | if (root->fs_info->last_trans_log_full_commit == trans->transid) { | 2079 | if (root->fs_info->last_trans_log_full_commit == trans->transid) { |
| 2075 | btrfs_wait_marked_extents(log, &log->dirty_log_pages); | 2080 | btrfs_wait_marked_extents(log, &log->dirty_log_pages, mark); |
| 2076 | mutex_unlock(&log_root_tree->log_mutex); | 2081 | mutex_unlock(&log_root_tree->log_mutex); |
| 2077 | ret = -EAGAIN; | 2082 | ret = -EAGAIN; |
| 2078 | goto out_wake_log_root; | 2083 | goto out_wake_log_root; |
| 2079 | } | 2084 | } |
| 2080 | 2085 | ||
| 2081 | ret = btrfs_write_and_wait_marked_extents(log_root_tree, | 2086 | ret = btrfs_write_and_wait_marked_extents(log_root_tree, |
| 2082 | &log_root_tree->dirty_log_pages); | 2087 | &log_root_tree->dirty_log_pages, |
| 2088 | EXTENT_DIRTY | EXTENT_NEW); | ||
| 2083 | BUG_ON(ret); | 2089 | BUG_ON(ret); |
| 2084 | btrfs_wait_marked_extents(log, &log->dirty_log_pages); | 2090 | btrfs_wait_marked_extents(log, &log->dirty_log_pages, mark); |
| 2085 | 2091 | ||
| 2086 | btrfs_set_super_log_root(&root->fs_info->super_for_commit, | 2092 | btrfs_set_super_log_root(&root->fs_info->super_for_commit, |
| 2087 | log_root_tree->node->start); | 2093 | log_root_tree->node->start); |
| @@ -2147,12 +2153,12 @@ int btrfs_free_log(struct btrfs_trans_handle *trans, struct btrfs_root *root) | |||
| 2147 | 2153 | ||
| 2148 | while (1) { | 2154 | while (1) { |
| 2149 | ret = find_first_extent_bit(&log->dirty_log_pages, | 2155 | ret = find_first_extent_bit(&log->dirty_log_pages, |
| 2150 | 0, &start, &end, EXTENT_DIRTY); | 2156 | 0, &start, &end, EXTENT_DIRTY | EXTENT_NEW); |
| 2151 | if (ret) | 2157 | if (ret) |
| 2152 | break; | 2158 | break; |
| 2153 | 2159 | ||
| 2154 | clear_extent_dirty(&log->dirty_log_pages, | 2160 | clear_extent_bits(&log->dirty_log_pages, start, end, |
| 2155 | start, end, GFP_NOFS); | 2161 | EXTENT_DIRTY | EXTENT_NEW, GFP_NOFS); |
| 2156 | } | 2162 | } |
| 2157 | 2163 | ||
| 2158 | if (log->log_transid > 0) { | 2164 | if (log->log_transid > 0) { |
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 7eda483d7b5a..198cff28766d 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c | |||
| @@ -2209,7 +2209,7 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans, | |||
| 2209 | max_chunk_size = 10 * calc_size; | 2209 | max_chunk_size = 10 * calc_size; |
| 2210 | min_stripe_size = 64 * 1024 * 1024; | 2210 | min_stripe_size = 64 * 1024 * 1024; |
| 2211 | } else if (type & BTRFS_BLOCK_GROUP_METADATA) { | 2211 | } else if (type & BTRFS_BLOCK_GROUP_METADATA) { |
| 2212 | max_chunk_size = 4 * calc_size; | 2212 | max_chunk_size = 256 * 1024 * 1024; |
| 2213 | min_stripe_size = 32 * 1024 * 1024; | 2213 | min_stripe_size = 32 * 1024 * 1024; |
| 2214 | } else if (type & BTRFS_BLOCK_GROUP_SYSTEM) { | 2214 | } else if (type & BTRFS_BLOCK_GROUP_SYSTEM) { |
| 2215 | calc_size = 8 * 1024 * 1024; | 2215 | calc_size = 8 * 1024 * 1024; |
diff --git a/fs/btrfs/xattr.c b/fs/btrfs/xattr.c index b6dd5967c48a..193b58f7d3f3 100644 --- a/fs/btrfs/xattr.c +++ b/fs/btrfs/xattr.c | |||
| @@ -85,22 +85,23 @@ out: | |||
| 85 | return ret; | 85 | return ret; |
| 86 | } | 86 | } |
| 87 | 87 | ||
| 88 | int __btrfs_setxattr(struct inode *inode, const char *name, | 88 | static int do_setxattr(struct btrfs_trans_handle *trans, |
| 89 | const void *value, size_t size, int flags) | 89 | struct inode *inode, const char *name, |
| 90 | const void *value, size_t size, int flags) | ||
| 90 | { | 91 | { |
| 91 | struct btrfs_dir_item *di; | 92 | struct btrfs_dir_item *di; |
| 92 | struct btrfs_root *root = BTRFS_I(inode)->root; | 93 | struct btrfs_root *root = BTRFS_I(inode)->root; |
| 93 | struct btrfs_trans_handle *trans; | ||
| 94 | struct btrfs_path *path; | 94 | struct btrfs_path *path; |
| 95 | int ret = 0, mod = 0; | 95 | size_t name_len = strlen(name); |
| 96 | int ret = 0; | ||
| 97 | |||
| 98 | if (name_len + size > BTRFS_MAX_XATTR_SIZE(root)) | ||
| 99 | return -ENOSPC; | ||
| 96 | 100 | ||
| 97 | path = btrfs_alloc_path(); | 101 | path = btrfs_alloc_path(); |
| 98 | if (!path) | 102 | if (!path) |
| 99 | return -ENOMEM; | 103 | return -ENOMEM; |
| 100 | 104 | ||
| 101 | trans = btrfs_join_transaction(root, 1); | ||
| 102 | btrfs_set_trans_block_group(trans, inode); | ||
| 103 | |||
| 104 | /* first lets see if we already have this xattr */ | 105 | /* first lets see if we already have this xattr */ |
| 105 | di = btrfs_lookup_xattr(trans, root, path, inode->i_ino, name, | 106 | di = btrfs_lookup_xattr(trans, root, path, inode->i_ino, name, |
| 106 | strlen(name), -1); | 107 | strlen(name), -1); |
| @@ -118,15 +119,12 @@ int __btrfs_setxattr(struct inode *inode, const char *name, | |||
| 118 | } | 119 | } |
| 119 | 120 | ||
| 120 | ret = btrfs_delete_one_dir_name(trans, root, path, di); | 121 | ret = btrfs_delete_one_dir_name(trans, root, path, di); |
| 121 | if (ret) | 122 | BUG_ON(ret); |
| 122 | goto out; | ||
| 123 | btrfs_release_path(root, path); | 123 | btrfs_release_path(root, path); |
| 124 | 124 | ||
| 125 | /* if we don't have a value then we are removing the xattr */ | 125 | /* if we don't have a value then we are removing the xattr */ |
| 126 | if (!value) { | 126 | if (!value) |
| 127 | mod = 1; | ||
| 128 | goto out; | 127 | goto out; |
| 129 | } | ||
| 130 | } else { | 128 | } else { |
| 131 | btrfs_release_path(root, path); | 129 | btrfs_release_path(root, path); |
| 132 | 130 | ||
| @@ -138,20 +136,45 @@ int __btrfs_setxattr(struct inode *inode, const char *name, | |||
| 138 | } | 136 | } |
| 139 | 137 | ||
| 140 | /* ok we have to create a completely new xattr */ | 138 | /* ok we have to create a completely new xattr */ |
| 141 | ret = btrfs_insert_xattr_item(trans, root, name, strlen(name), | 139 | ret = btrfs_insert_xattr_item(trans, root, path, inode->i_ino, |
| 142 | value, size, inode->i_ino); | 140 | name, name_len, value, size); |
| 141 | BUG_ON(ret); | ||
| 142 | out: | ||
| 143 | btrfs_free_path(path); | ||
| 144 | return ret; | ||
| 145 | } | ||
| 146 | |||
| 147 | int __btrfs_setxattr(struct btrfs_trans_handle *trans, | ||
| 148 | struct inode *inode, const char *name, | ||
| 149 | const void *value, size_t size, int flags) | ||
| 150 | { | ||
| 151 | struct btrfs_root *root = BTRFS_I(inode)->root; | ||
| 152 | int ret; | ||
| 153 | |||
| 154 | if (trans) | ||
| 155 | return do_setxattr(trans, inode, name, value, size, flags); | ||
| 156 | |||
| 157 | ret = btrfs_reserve_metadata_space(root, 2); | ||
| 143 | if (ret) | 158 | if (ret) |
| 144 | goto out; | 159 | return ret; |
| 145 | mod = 1; | ||
| 146 | 160 | ||
| 147 | out: | 161 | trans = btrfs_start_transaction(root, 1); |
| 148 | if (mod) { | 162 | if (!trans) { |
| 149 | inode->i_ctime = CURRENT_TIME; | 163 | ret = -ENOMEM; |
| 150 | ret = btrfs_update_inode(trans, root, inode); | 164 | goto out; |
| 151 | } | 165 | } |
| 166 | btrfs_set_trans_block_group(trans, inode); | ||
| 152 | 167 | ||
| 153 | btrfs_end_transaction(trans, root); | 168 | ret = do_setxattr(trans, inode, name, value, size, flags); |
| 154 | btrfs_free_path(path); | 169 | if (ret) |
| 170 | goto out; | ||
| 171 | |||
| 172 | inode->i_ctime = CURRENT_TIME; | ||
| 173 | ret = btrfs_update_inode(trans, root, inode); | ||
| 174 | BUG_ON(ret); | ||
| 175 | out: | ||
| 176 | btrfs_end_transaction_throttle(trans, root); | ||
| 177 | btrfs_unreserve_metadata_space(root, 2); | ||
| 155 | return ret; | 178 | return ret; |
| 156 | } | 179 | } |
| 157 | 180 | ||
| @@ -314,7 +337,9 @@ int btrfs_setxattr(struct dentry *dentry, const char *name, const void *value, | |||
| 314 | 337 | ||
| 315 | if (size == 0) | 338 | if (size == 0) |
| 316 | value = ""; /* empty EA, do not remove */ | 339 | value = ""; /* empty EA, do not remove */ |
| 317 | return __btrfs_setxattr(dentry->d_inode, name, value, size, flags); | 340 | |
| 341 | return __btrfs_setxattr(NULL, dentry->d_inode, name, value, size, | ||
| 342 | flags); | ||
| 318 | } | 343 | } |
| 319 | 344 | ||
| 320 | int btrfs_removexattr(struct dentry *dentry, const char *name) | 345 | int btrfs_removexattr(struct dentry *dentry, const char *name) |
| @@ -329,10 +354,13 @@ int btrfs_removexattr(struct dentry *dentry, const char *name) | |||
| 329 | 354 | ||
| 330 | if (!btrfs_is_valid_xattr(name)) | 355 | if (!btrfs_is_valid_xattr(name)) |
| 331 | return -EOPNOTSUPP; | 356 | return -EOPNOTSUPP; |
| 332 | return __btrfs_setxattr(dentry->d_inode, name, NULL, 0, XATTR_REPLACE); | 357 | |
| 358 | return __btrfs_setxattr(NULL, dentry->d_inode, name, NULL, 0, | ||
| 359 | XATTR_REPLACE); | ||
| 333 | } | 360 | } |
| 334 | 361 | ||
| 335 | int btrfs_xattr_security_init(struct inode *inode, struct inode *dir) | 362 | int btrfs_xattr_security_init(struct btrfs_trans_handle *trans, |
| 363 | struct inode *inode, struct inode *dir) | ||
| 336 | { | 364 | { |
| 337 | int err; | 365 | int err; |
| 338 | size_t len; | 366 | size_t len; |
| @@ -354,7 +382,7 @@ int btrfs_xattr_security_init(struct inode *inode, struct inode *dir) | |||
| 354 | } else { | 382 | } else { |
| 355 | strcpy(name, XATTR_SECURITY_PREFIX); | 383 | strcpy(name, XATTR_SECURITY_PREFIX); |
| 356 | strcpy(name + XATTR_SECURITY_PREFIX_LEN, suffix); | 384 | strcpy(name + XATTR_SECURITY_PREFIX_LEN, suffix); |
| 357 | err = __btrfs_setxattr(inode, name, value, len, 0); | 385 | err = __btrfs_setxattr(trans, inode, name, value, len, 0); |
| 358 | kfree(name); | 386 | kfree(name); |
| 359 | } | 387 | } |
| 360 | 388 | ||
diff --git a/fs/btrfs/xattr.h b/fs/btrfs/xattr.h index c71e9c3cf3f7..721efa0346e0 100644 --- a/fs/btrfs/xattr.h +++ b/fs/btrfs/xattr.h | |||
| @@ -27,15 +27,16 @@ extern struct xattr_handler *btrfs_xattr_handlers[]; | |||
| 27 | 27 | ||
| 28 | extern ssize_t __btrfs_getxattr(struct inode *inode, const char *name, | 28 | extern ssize_t __btrfs_getxattr(struct inode *inode, const char *name, |
| 29 | void *buffer, size_t size); | 29 | void *buffer, size_t size); |
| 30 | extern int __btrfs_setxattr(struct inode *inode, const char *name, | 30 | extern int __btrfs_setxattr(struct btrfs_trans_handle *trans, |
| 31 | const void *value, size_t size, int flags); | 31 | struct inode *inode, const char *name, |
| 32 | 32 | const void *value, size_t size, int flags); | |
| 33 | extern ssize_t btrfs_getxattr(struct dentry *dentry, const char *name, | 33 | extern ssize_t btrfs_getxattr(struct dentry *dentry, const char *name, |
| 34 | void *buffer, size_t size); | 34 | void *buffer, size_t size); |
| 35 | extern int btrfs_setxattr(struct dentry *dentry, const char *name, | 35 | extern int btrfs_setxattr(struct dentry *dentry, const char *name, |
| 36 | const void *value, size_t size, int flags); | 36 | const void *value, size_t size, int flags); |
| 37 | extern int btrfs_removexattr(struct dentry *dentry, const char *name); | 37 | extern int btrfs_removexattr(struct dentry *dentry, const char *name); |
| 38 | 38 | ||
| 39 | extern int btrfs_xattr_security_init(struct inode *inode, struct inode *dir); | 39 | extern int btrfs_xattr_security_init(struct btrfs_trans_handle *trans, |
| 40 | struct inode *inode, struct inode *dir); | ||
| 40 | 41 | ||
| 41 | #endif /* __XATTR__ */ | 42 | #endif /* __XATTR__ */ |
diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c index 14cbc831422a..332dd00f0894 100644 --- a/fs/compat_ioctl.c +++ b/fs/compat_ioctl.c | |||
| @@ -1600,8 +1600,6 @@ static long do_ioctl_trans(int fd, unsigned int cmd, | |||
| 1600 | case KDSKBMETA: | 1600 | case KDSKBMETA: |
| 1601 | case KDSKBLED: | 1601 | case KDSKBLED: |
| 1602 | case KDSETLED: | 1602 | case KDSETLED: |
| 1603 | /* SG stuff */ | ||
| 1604 | case SG_SET_TRANSFORM: | ||
| 1605 | /* AUTOFS */ | 1603 | /* AUTOFS */ |
| 1606 | case AUTOFS_IOC_READY: | 1604 | case AUTOFS_IOC_READY: |
| 1607 | case AUTOFS_IOC_FAIL: | 1605 | case AUTOFS_IOC_FAIL: |
diff --git a/fs/direct-io.c b/fs/direct-io.c index 4012885d027f..e82adc2debb7 100644 --- a/fs/direct-io.c +++ b/fs/direct-io.c | |||
| @@ -1206,7 +1206,7 @@ __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode, | |||
| 1206 | * NOTE: filesystems with their own locking have to handle this | 1206 | * NOTE: filesystems with their own locking have to handle this |
| 1207 | * on their own. | 1207 | * on their own. |
| 1208 | */ | 1208 | */ |
| 1209 | if (dio->flags & DIO_LOCKING) { | 1209 | if (flags & DIO_LOCKING) { |
| 1210 | if (unlikely((rw & WRITE) && retval < 0)) { | 1210 | if (unlikely((rw & WRITE) && retval < 0)) { |
| 1211 | loff_t isize = i_size_read(inode); | 1211 | loff_t isize = i_size_read(inode); |
| 1212 | if (end > isize) | 1212 | if (end > isize) |
diff --git a/fs/ecryptfs/dentry.c b/fs/ecryptfs/dentry.c index 2dda5ade75bc..8f006a0d6076 100644 --- a/fs/ecryptfs/dentry.c +++ b/fs/ecryptfs/dentry.c | |||
| @@ -62,7 +62,7 @@ static int ecryptfs_d_revalidate(struct dentry *dentry, struct nameidata *nd) | |||
| 62 | struct inode *lower_inode = | 62 | struct inode *lower_inode = |
| 63 | ecryptfs_inode_to_lower(dentry->d_inode); | 63 | ecryptfs_inode_to_lower(dentry->d_inode); |
| 64 | 64 | ||
| 65 | fsstack_copy_attr_all(dentry->d_inode, lower_inode, NULL); | 65 | fsstack_copy_attr_all(dentry->d_inode, lower_inode); |
| 66 | } | 66 | } |
| 67 | out: | 67 | out: |
| 68 | return rc; | 68 | return rc; |
diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c index 056fed62d0de..429ca0b3ba08 100644 --- a/fs/ecryptfs/inode.c +++ b/fs/ecryptfs/inode.c | |||
| @@ -626,9 +626,9 @@ ecryptfs_rename(struct inode *old_dir, struct dentry *old_dentry, | |||
| 626 | lower_new_dir_dentry->d_inode, lower_new_dentry); | 626 | lower_new_dir_dentry->d_inode, lower_new_dentry); |
| 627 | if (rc) | 627 | if (rc) |
| 628 | goto out_lock; | 628 | goto out_lock; |
| 629 | fsstack_copy_attr_all(new_dir, lower_new_dir_dentry->d_inode, NULL); | 629 | fsstack_copy_attr_all(new_dir, lower_new_dir_dentry->d_inode); |
| 630 | if (new_dir != old_dir) | 630 | if (new_dir != old_dir) |
| 631 | fsstack_copy_attr_all(old_dir, lower_old_dir_dentry->d_inode, NULL); | 631 | fsstack_copy_attr_all(old_dir, lower_old_dir_dentry->d_inode); |
| 632 | out_lock: | 632 | out_lock: |
| 633 | unlock_rename(lower_old_dir_dentry, lower_new_dir_dentry); | 633 | unlock_rename(lower_old_dir_dentry, lower_new_dir_dentry); |
| 634 | dput(lower_new_dentry->d_parent); | 634 | dput(lower_new_dentry->d_parent); |
| @@ -967,7 +967,7 @@ static int ecryptfs_setattr(struct dentry *dentry, struct iattr *ia) | |||
| 967 | rc = notify_change(lower_dentry, ia); | 967 | rc = notify_change(lower_dentry, ia); |
| 968 | mutex_unlock(&lower_dentry->d_inode->i_mutex); | 968 | mutex_unlock(&lower_dentry->d_inode->i_mutex); |
| 969 | out: | 969 | out: |
| 970 | fsstack_copy_attr_all(inode, lower_inode, NULL); | 970 | fsstack_copy_attr_all(inode, lower_inode); |
| 971 | return rc; | 971 | return rc; |
| 972 | } | 972 | } |
| 973 | 973 | ||
diff --git a/fs/ecryptfs/main.c b/fs/ecryptfs/main.c index 101fe4c7b1ee..567bc4b9f70a 100644 --- a/fs/ecryptfs/main.c +++ b/fs/ecryptfs/main.c | |||
| @@ -189,7 +189,7 @@ int ecryptfs_interpose(struct dentry *lower_dentry, struct dentry *dentry, | |||
| 189 | init_special_inode(inode, lower_inode->i_mode, | 189 | init_special_inode(inode, lower_inode->i_mode, |
| 190 | lower_inode->i_rdev); | 190 | lower_inode->i_rdev); |
| 191 | dentry->d_op = &ecryptfs_dops; | 191 | dentry->d_op = &ecryptfs_dops; |
| 192 | fsstack_copy_attr_all(inode, lower_inode, NULL); | 192 | fsstack_copy_attr_all(inode, lower_inode); |
| 193 | /* This size will be overwritten for real files w/ headers and | 193 | /* This size will be overwritten for real files w/ headers and |
| 194 | * other metadata */ | 194 | * other metadata */ |
| 195 | fsstack_copy_inode_size(inode, lower_inode); | 195 | fsstack_copy_inode_size(inode, lower_inode); |
diff --git a/fs/eventfd.c b/fs/eventfd.c index 8b47e4200e65..d26402ff06ea 100644 --- a/fs/eventfd.c +++ b/fs/eventfd.c | |||
| @@ -339,7 +339,7 @@ struct file *eventfd_file_create(unsigned int count, int flags) | |||
| 339 | ctx->flags = flags; | 339 | ctx->flags = flags; |
| 340 | 340 | ||
| 341 | file = anon_inode_getfile("[eventfd]", &eventfd_fops, ctx, | 341 | file = anon_inode_getfile("[eventfd]", &eventfd_fops, ctx, |
| 342 | flags & EFD_SHARED_FCNTL_FLAGS); | 342 | O_RDWR | (flags & EFD_SHARED_FCNTL_FLAGS)); |
| 343 | if (IS_ERR(file)) | 343 | if (IS_ERR(file)) |
| 344 | eventfd_free_ctx(ctx); | 344 | eventfd_free_ctx(ctx); |
| 345 | 345 | ||
diff --git a/fs/eventpoll.c b/fs/eventpoll.c index 366c503f9657..bd056a5b4efc 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c | |||
| @@ -1206,7 +1206,7 @@ SYSCALL_DEFINE1(epoll_create1, int, flags) | |||
| 1206 | * a file structure and a free file descriptor. | 1206 | * a file structure and a free file descriptor. |
| 1207 | */ | 1207 | */ |
| 1208 | error = anon_inode_getfd("[eventpoll]", &eventpoll_fops, ep, | 1208 | error = anon_inode_getfd("[eventpoll]", &eventpoll_fops, ep, |
| 1209 | flags & O_CLOEXEC); | 1209 | O_RDWR | (flags & O_CLOEXEC)); |
| 1210 | if (error < 0) | 1210 | if (error < 0) |
| 1211 | ep_free(ep); | 1211 | ep_free(ep); |
| 1212 | 1212 | ||
| @@ -826,7 +826,9 @@ static int de_thread(struct task_struct *tsk) | |||
| 826 | attach_pid(tsk, PIDTYPE_PID, task_pid(leader)); | 826 | attach_pid(tsk, PIDTYPE_PID, task_pid(leader)); |
| 827 | transfer_pid(leader, tsk, PIDTYPE_PGID); | 827 | transfer_pid(leader, tsk, PIDTYPE_PGID); |
| 828 | transfer_pid(leader, tsk, PIDTYPE_SID); | 828 | transfer_pid(leader, tsk, PIDTYPE_SID); |
| 829 | |||
| 829 | list_replace_rcu(&leader->tasks, &tsk->tasks); | 830 | list_replace_rcu(&leader->tasks, &tsk->tasks); |
| 831 | list_replace_init(&leader->sibling, &tsk->sibling); | ||
| 830 | 832 | ||
| 831 | tsk->group_leader = tsk; | 833 | tsk->group_leader = tsk; |
| 832 | leader->group_leader = tsk; | 834 | leader->group_leader = tsk; |
| @@ -1761,17 +1763,20 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs) | |||
| 1761 | struct mm_struct *mm = current->mm; | 1763 | struct mm_struct *mm = current->mm; |
| 1762 | struct linux_binfmt * binfmt; | 1764 | struct linux_binfmt * binfmt; |
| 1763 | struct inode * inode; | 1765 | struct inode * inode; |
| 1764 | struct file * file; | ||
| 1765 | const struct cred *old_cred; | 1766 | const struct cred *old_cred; |
| 1766 | struct cred *cred; | 1767 | struct cred *cred; |
| 1767 | int retval = 0; | 1768 | int retval = 0; |
| 1768 | int flag = 0; | 1769 | int flag = 0; |
| 1769 | int ispipe = 0; | 1770 | int ispipe = 0; |
| 1770 | unsigned long core_limit = current->signal->rlim[RLIMIT_CORE].rlim_cur; | ||
| 1771 | char **helper_argv = NULL; | 1771 | char **helper_argv = NULL; |
| 1772 | int helper_argc = 0; | 1772 | int helper_argc = 0; |
| 1773 | int dump_count = 0; | 1773 | int dump_count = 0; |
| 1774 | static atomic_t core_dump_count = ATOMIC_INIT(0); | 1774 | static atomic_t core_dump_count = ATOMIC_INIT(0); |
| 1775 | struct coredump_params cprm = { | ||
| 1776 | .signr = signr, | ||
| 1777 | .regs = regs, | ||
| 1778 | .limit = current->signal->rlim[RLIMIT_CORE].rlim_cur, | ||
| 1779 | }; | ||
| 1775 | 1780 | ||
| 1776 | audit_core_dumps(signr); | 1781 | audit_core_dumps(signr); |
| 1777 | 1782 | ||
| @@ -1827,15 +1832,15 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs) | |||
| 1827 | ispipe = format_corename(corename, signr); | 1832 | ispipe = format_corename(corename, signr); |
| 1828 | unlock_kernel(); | 1833 | unlock_kernel(); |
| 1829 | 1834 | ||
| 1830 | if ((!ispipe) && (core_limit < binfmt->min_coredump)) | 1835 | if ((!ispipe) && (cprm.limit < binfmt->min_coredump)) |
| 1831 | goto fail_unlock; | 1836 | goto fail_unlock; |
| 1832 | 1837 | ||
| 1833 | if (ispipe) { | 1838 | if (ispipe) { |
| 1834 | if (core_limit == 0) { | 1839 | if (cprm.limit == 0) { |
| 1835 | /* | 1840 | /* |
| 1836 | * Normally core limits are irrelevant to pipes, since | 1841 | * Normally core limits are irrelevant to pipes, since |
| 1837 | * we're not writing to the file system, but we use | 1842 | * we're not writing to the file system, but we use |
| 1838 | * core_limit of 0 here as a speacial value. Any | 1843 | * cprm.limit of 0 here as a speacial value. Any |
| 1839 | * non-zero limit gets set to RLIM_INFINITY below, but | 1844 | * non-zero limit gets set to RLIM_INFINITY below, but |
| 1840 | * a limit of 0 skips the dump. This is a consistent | 1845 | * a limit of 0 skips the dump. This is a consistent |
| 1841 | * way to catch recursive crashes. We can still crash | 1846 | * way to catch recursive crashes. We can still crash |
| @@ -1868,25 +1873,25 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs) | |||
| 1868 | goto fail_dropcount; | 1873 | goto fail_dropcount; |
| 1869 | } | 1874 | } |
| 1870 | 1875 | ||
| 1871 | core_limit = RLIM_INFINITY; | 1876 | cprm.limit = RLIM_INFINITY; |
| 1872 | 1877 | ||
| 1873 | /* SIGPIPE can happen, but it's just never processed */ | 1878 | /* SIGPIPE can happen, but it's just never processed */ |
| 1874 | if (call_usermodehelper_pipe(helper_argv[0], helper_argv, NULL, | 1879 | if (call_usermodehelper_pipe(helper_argv[0], helper_argv, NULL, |
| 1875 | &file)) { | 1880 | &cprm.file)) { |
| 1876 | printk(KERN_INFO "Core dump to %s pipe failed\n", | 1881 | printk(KERN_INFO "Core dump to %s pipe failed\n", |
| 1877 | corename); | 1882 | corename); |
| 1878 | goto fail_dropcount; | 1883 | goto fail_dropcount; |
| 1879 | } | 1884 | } |
| 1880 | } else | 1885 | } else |
| 1881 | file = filp_open(corename, | 1886 | cprm.file = filp_open(corename, |
| 1882 | O_CREAT | 2 | O_NOFOLLOW | O_LARGEFILE | flag, | 1887 | O_CREAT | 2 | O_NOFOLLOW | O_LARGEFILE | flag, |
| 1883 | 0600); | 1888 | 0600); |
| 1884 | if (IS_ERR(file)) | 1889 | if (IS_ERR(cprm.file)) |
| 1885 | goto fail_dropcount; | 1890 | goto fail_dropcount; |
| 1886 | inode = file->f_path.dentry->d_inode; | 1891 | inode = cprm.file->f_path.dentry->d_inode; |
| 1887 | if (inode->i_nlink > 1) | 1892 | if (inode->i_nlink > 1) |
| 1888 | goto close_fail; /* multiple links - don't dump */ | 1893 | goto close_fail; /* multiple links - don't dump */ |
| 1889 | if (!ispipe && d_unhashed(file->f_path.dentry)) | 1894 | if (!ispipe && d_unhashed(cprm.file->f_path.dentry)) |
| 1890 | goto close_fail; | 1895 | goto close_fail; |
| 1891 | 1896 | ||
| 1892 | /* AK: actually i see no reason to not allow this for named pipes etc., | 1897 | /* AK: actually i see no reason to not allow this for named pipes etc., |
| @@ -1899,21 +1904,22 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs) | |||
| 1899 | */ | 1904 | */ |
| 1900 | if (inode->i_uid != current_fsuid()) | 1905 | if (inode->i_uid != current_fsuid()) |
| 1901 | goto close_fail; | 1906 | goto close_fail; |
| 1902 | if (!file->f_op) | 1907 | if (!cprm.file->f_op) |
| 1903 | goto close_fail; | 1908 | goto close_fail; |
| 1904 | if (!file->f_op->write) | 1909 | if (!cprm.file->f_op->write) |
| 1905 | goto close_fail; | 1910 | goto close_fail; |
| 1906 | if (!ispipe && do_truncate(file->f_path.dentry, 0, 0, file) != 0) | 1911 | if (!ispipe && |
| 1912 | do_truncate(cprm.file->f_path.dentry, 0, 0, cprm.file) != 0) | ||
| 1907 | goto close_fail; | 1913 | goto close_fail; |
| 1908 | 1914 | ||
| 1909 | retval = binfmt->core_dump(signr, regs, file, core_limit); | 1915 | retval = binfmt->core_dump(&cprm); |
| 1910 | 1916 | ||
| 1911 | if (retval) | 1917 | if (retval) |
| 1912 | current->signal->group_exit_code |= 0x80; | 1918 | current->signal->group_exit_code |= 0x80; |
| 1913 | close_fail: | 1919 | close_fail: |
| 1914 | if (ispipe && core_pipe_limit) | 1920 | if (ispipe && core_pipe_limit) |
| 1915 | wait_for_dump_helpers(file); | 1921 | wait_for_dump_helpers(cprm.file); |
| 1916 | filp_close(file, NULL); | 1922 | filp_close(cprm.file, NULL); |
| 1917 | fail_dropcount: | 1923 | fail_dropcount: |
| 1918 | if (dump_count) | 1924 | if (dump_count) |
| 1919 | atomic_dec(&core_dump_count); | 1925 | atomic_dec(&core_dump_count); |
diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c index ad14227f509e..455e6e6e5cb9 100644 --- a/fs/ext3/inode.c +++ b/fs/ext3/inode.c | |||
| @@ -970,7 +970,7 @@ static int ext3_get_block(struct inode *inode, sector_t iblock, | |||
| 970 | if (max_blocks > DIO_MAX_BLOCKS) | 970 | if (max_blocks > DIO_MAX_BLOCKS) |
| 971 | max_blocks = DIO_MAX_BLOCKS; | 971 | max_blocks = DIO_MAX_BLOCKS; |
| 972 | handle = ext3_journal_start(inode, DIO_CREDITS + | 972 | handle = ext3_journal_start(inode, DIO_CREDITS + |
| 973 | 2 * EXT3_QUOTA_TRANS_BLOCKS(inode->i_sb)); | 973 | EXT3_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb)); |
| 974 | if (IS_ERR(handle)) { | 974 | if (IS_ERR(handle)) { |
| 975 | ret = PTR_ERR(handle); | 975 | ret = PTR_ERR(handle); |
| 976 | goto out; | 976 | goto out; |
| @@ -3146,8 +3146,8 @@ int ext3_setattr(struct dentry *dentry, struct iattr *attr) | |||
| 3146 | 3146 | ||
| 3147 | /* (user+group)*(old+new) structure, inode write (sb, | 3147 | /* (user+group)*(old+new) structure, inode write (sb, |
| 3148 | * inode block, ? - but truncate inode update has it) */ | 3148 | * inode block, ? - but truncate inode update has it) */ |
| 3149 | handle = ext3_journal_start(inode, 2*(EXT3_QUOTA_INIT_BLOCKS(inode->i_sb)+ | 3149 | handle = ext3_journal_start(inode, EXT3_MAXQUOTAS_INIT_BLOCKS(inode->i_sb)+ |
| 3150 | EXT3_QUOTA_DEL_BLOCKS(inode->i_sb))+3); | 3150 | EXT3_MAXQUOTAS_DEL_BLOCKS(inode->i_sb)+3); |
| 3151 | if (IS_ERR(handle)) { | 3151 | if (IS_ERR(handle)) { |
| 3152 | error = PTR_ERR(handle); | 3152 | error = PTR_ERR(handle); |
| 3153 | goto err_out; | 3153 | goto err_out; |
| @@ -3239,7 +3239,7 @@ static int ext3_writepage_trans_blocks(struct inode *inode) | |||
| 3239 | #ifdef CONFIG_QUOTA | 3239 | #ifdef CONFIG_QUOTA |
| 3240 | /* We know that structure was already allocated during vfs_dq_init so | 3240 | /* We know that structure was already allocated during vfs_dq_init so |
| 3241 | * we will be updating only the data blocks + inodes */ | 3241 | * we will be updating only the data blocks + inodes */ |
| 3242 | ret += 2*EXT3_QUOTA_TRANS_BLOCKS(inode->i_sb); | 3242 | ret += EXT3_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb); |
| 3243 | #endif | 3243 | #endif |
| 3244 | 3244 | ||
| 3245 | return ret; | 3245 | return ret; |
diff --git a/fs/ext3/namei.c b/fs/ext3/namei.c index aad6400c9b77..7b0e44f7d66f 100644 --- a/fs/ext3/namei.c +++ b/fs/ext3/namei.c | |||
| @@ -1699,7 +1699,7 @@ static int ext3_create (struct inode * dir, struct dentry * dentry, int mode, | |||
| 1699 | retry: | 1699 | retry: |
| 1700 | handle = ext3_journal_start(dir, EXT3_DATA_TRANS_BLOCKS(dir->i_sb) + | 1700 | handle = ext3_journal_start(dir, EXT3_DATA_TRANS_BLOCKS(dir->i_sb) + |
| 1701 | EXT3_INDEX_EXTRA_TRANS_BLOCKS + 3 + | 1701 | EXT3_INDEX_EXTRA_TRANS_BLOCKS + 3 + |
| 1702 | 2*EXT3_QUOTA_INIT_BLOCKS(dir->i_sb)); | 1702 | EXT3_MAXQUOTAS_INIT_BLOCKS(dir->i_sb)); |
| 1703 | if (IS_ERR(handle)) | 1703 | if (IS_ERR(handle)) |
| 1704 | return PTR_ERR(handle); | 1704 | return PTR_ERR(handle); |
| 1705 | 1705 | ||
| @@ -1733,7 +1733,7 @@ static int ext3_mknod (struct inode * dir, struct dentry *dentry, | |||
| 1733 | retry: | 1733 | retry: |
| 1734 | handle = ext3_journal_start(dir, EXT3_DATA_TRANS_BLOCKS(dir->i_sb) + | 1734 | handle = ext3_journal_start(dir, EXT3_DATA_TRANS_BLOCKS(dir->i_sb) + |
| 1735 | EXT3_INDEX_EXTRA_TRANS_BLOCKS + 3 + | 1735 | EXT3_INDEX_EXTRA_TRANS_BLOCKS + 3 + |
| 1736 | 2*EXT3_QUOTA_INIT_BLOCKS(dir->i_sb)); | 1736 | EXT3_MAXQUOTAS_INIT_BLOCKS(dir->i_sb)); |
| 1737 | if (IS_ERR(handle)) | 1737 | if (IS_ERR(handle)) |
| 1738 | return PTR_ERR(handle); | 1738 | return PTR_ERR(handle); |
| 1739 | 1739 | ||
| @@ -1769,7 +1769,7 @@ static int ext3_mkdir(struct inode * dir, struct dentry * dentry, int mode) | |||
| 1769 | retry: | 1769 | retry: |
| 1770 | handle = ext3_journal_start(dir, EXT3_DATA_TRANS_BLOCKS(dir->i_sb) + | 1770 | handle = ext3_journal_start(dir, EXT3_DATA_TRANS_BLOCKS(dir->i_sb) + |
| 1771 | EXT3_INDEX_EXTRA_TRANS_BLOCKS + 3 + | 1771 | EXT3_INDEX_EXTRA_TRANS_BLOCKS + 3 + |
| 1772 | 2*EXT3_QUOTA_INIT_BLOCKS(dir->i_sb)); | 1772 | EXT3_MAXQUOTAS_INIT_BLOCKS(dir->i_sb)); |
| 1773 | if (IS_ERR(handle)) | 1773 | if (IS_ERR(handle)) |
| 1774 | return PTR_ERR(handle); | 1774 | return PTR_ERR(handle); |
| 1775 | 1775 | ||
| @@ -1920,7 +1920,7 @@ int ext3_orphan_add(handle_t *handle, struct inode *inode) | |||
| 1920 | struct ext3_iloc iloc; | 1920 | struct ext3_iloc iloc; |
| 1921 | int err = 0, rc; | 1921 | int err = 0, rc; |
| 1922 | 1922 | ||
| 1923 | lock_super(sb); | 1923 | mutex_lock(&EXT3_SB(sb)->s_orphan_lock); |
| 1924 | if (!list_empty(&EXT3_I(inode)->i_orphan)) | 1924 | if (!list_empty(&EXT3_I(inode)->i_orphan)) |
| 1925 | goto out_unlock; | 1925 | goto out_unlock; |
| 1926 | 1926 | ||
| @@ -1929,9 +1929,13 @@ int ext3_orphan_add(handle_t *handle, struct inode *inode) | |||
| 1929 | 1929 | ||
| 1930 | /* @@@ FIXME: Observation from aviro: | 1930 | /* @@@ FIXME: Observation from aviro: |
| 1931 | * I think I can trigger J_ASSERT in ext3_orphan_add(). We block | 1931 | * I think I can trigger J_ASSERT in ext3_orphan_add(). We block |
| 1932 | * here (on lock_super()), so race with ext3_link() which might bump | 1932 | * here (on s_orphan_lock), so race with ext3_link() which might bump |
| 1933 | * ->i_nlink. For, say it, character device. Not a regular file, | 1933 | * ->i_nlink. For, say it, character device. Not a regular file, |
| 1934 | * not a directory, not a symlink and ->i_nlink > 0. | 1934 | * not a directory, not a symlink and ->i_nlink > 0. |
| 1935 | * | ||
| 1936 | * tytso, 4/25/2009: I'm not sure how that could happen; | ||
| 1937 | * shouldn't the fs core protect us from these sort of | ||
| 1938 | * unlink()/link() races? | ||
| 1935 | */ | 1939 | */ |
| 1936 | J_ASSERT ((S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || | 1940 | J_ASSERT ((S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || |
| 1937 | S_ISLNK(inode->i_mode)) || inode->i_nlink == 0); | 1941 | S_ISLNK(inode->i_mode)) || inode->i_nlink == 0); |
| @@ -1968,7 +1972,7 @@ int ext3_orphan_add(handle_t *handle, struct inode *inode) | |||
| 1968 | jbd_debug(4, "orphan inode %lu will point to %d\n", | 1972 | jbd_debug(4, "orphan inode %lu will point to %d\n", |
| 1969 | inode->i_ino, NEXT_ORPHAN(inode)); | 1973 | inode->i_ino, NEXT_ORPHAN(inode)); |
| 1970 | out_unlock: | 1974 | out_unlock: |
| 1971 | unlock_super(sb); | 1975 | mutex_unlock(&EXT3_SB(sb)->s_orphan_lock); |
| 1972 | ext3_std_error(inode->i_sb, err); | 1976 | ext3_std_error(inode->i_sb, err); |
| 1973 | return err; | 1977 | return err; |
| 1974 | } | 1978 | } |
| @@ -1986,11 +1990,9 @@ int ext3_orphan_del(handle_t *handle, struct inode *inode) | |||
| 1986 | struct ext3_iloc iloc; | 1990 | struct ext3_iloc iloc; |
| 1987 | int err = 0; | 1991 | int err = 0; |
| 1988 | 1992 | ||
| 1989 | lock_super(inode->i_sb); | 1993 | mutex_lock(&EXT3_SB(inode->i_sb)->s_orphan_lock); |
| 1990 | if (list_empty(&ei->i_orphan)) { | 1994 | if (list_empty(&ei->i_orphan)) |
| 1991 | unlock_super(inode->i_sb); | 1995 | goto out; |
| 1992 | return 0; | ||
| 1993 | } | ||
| 1994 | 1996 | ||
| 1995 | ino_next = NEXT_ORPHAN(inode); | 1997 | ino_next = NEXT_ORPHAN(inode); |
| 1996 | prev = ei->i_orphan.prev; | 1998 | prev = ei->i_orphan.prev; |
| @@ -2040,7 +2042,7 @@ int ext3_orphan_del(handle_t *handle, struct inode *inode) | |||
| 2040 | out_err: | 2042 | out_err: |
| 2041 | ext3_std_error(inode->i_sb, err); | 2043 | ext3_std_error(inode->i_sb, err); |
| 2042 | out: | 2044 | out: |
| 2043 | unlock_super(inode->i_sb); | 2045 | mutex_unlock(&EXT3_SB(inode->i_sb)->s_orphan_lock); |
| 2044 | return err; | 2046 | return err; |
| 2045 | 2047 | ||
| 2046 | out_brelse: | 2048 | out_brelse: |
| @@ -2175,7 +2177,7 @@ static int ext3_symlink (struct inode * dir, | |||
| 2175 | retry: | 2177 | retry: |
| 2176 | handle = ext3_journal_start(dir, EXT3_DATA_TRANS_BLOCKS(dir->i_sb) + | 2178 | handle = ext3_journal_start(dir, EXT3_DATA_TRANS_BLOCKS(dir->i_sb) + |
| 2177 | EXT3_INDEX_EXTRA_TRANS_BLOCKS + 5 + | 2179 | EXT3_INDEX_EXTRA_TRANS_BLOCKS + 5 + |
| 2178 | 2*EXT3_QUOTA_INIT_BLOCKS(dir->i_sb)); | 2180 | EXT3_MAXQUOTAS_INIT_BLOCKS(dir->i_sb)); |
| 2179 | if (IS_ERR(handle)) | 2181 | if (IS_ERR(handle)) |
| 2180 | return PTR_ERR(handle); | 2182 | return PTR_ERR(handle); |
| 2181 | 2183 | ||
diff --git a/fs/ext3/resize.c b/fs/ext3/resize.c index 5f83b6179178..54351ac7cef9 100644 --- a/fs/ext3/resize.c +++ b/fs/ext3/resize.c | |||
| @@ -209,7 +209,7 @@ static int setup_new_group_blocks(struct super_block *sb, | |||
| 209 | if (IS_ERR(handle)) | 209 | if (IS_ERR(handle)) |
| 210 | return PTR_ERR(handle); | 210 | return PTR_ERR(handle); |
| 211 | 211 | ||
| 212 | lock_super(sb); | 212 | mutex_lock(&sbi->s_resize_lock); |
| 213 | if (input->group != sbi->s_groups_count) { | 213 | if (input->group != sbi->s_groups_count) { |
| 214 | err = -EBUSY; | 214 | err = -EBUSY; |
| 215 | goto exit_journal; | 215 | goto exit_journal; |
| @@ -324,7 +324,7 @@ exit_bh: | |||
| 324 | brelse(bh); | 324 | brelse(bh); |
| 325 | 325 | ||
| 326 | exit_journal: | 326 | exit_journal: |
| 327 | unlock_super(sb); | 327 | mutex_unlock(&sbi->s_resize_lock); |
| 328 | if ((err2 = ext3_journal_stop(handle)) && !err) | 328 | if ((err2 = ext3_journal_stop(handle)) && !err) |
| 329 | err = err2; | 329 | err = err2; |
| 330 | 330 | ||
| @@ -662,11 +662,12 @@ exit_free: | |||
| 662 | * important part is that the new block and inode counts are in the backup | 662 | * important part is that the new block and inode counts are in the backup |
| 663 | * superblocks, and the location of the new group metadata in the GDT backups. | 663 | * superblocks, and the location of the new group metadata in the GDT backups. |
| 664 | * | 664 | * |
| 665 | * We do not need lock_super() for this, because these blocks are not | 665 | * We do not need take the s_resize_lock for this, because these |
| 666 | * otherwise touched by the filesystem code when it is mounted. We don't | 666 | * blocks are not otherwise touched by the filesystem code when it is |
| 667 | * need to worry about last changing from sbi->s_groups_count, because the | 667 | * mounted. We don't need to worry about last changing from |
| 668 | * worst that can happen is that we do not copy the full number of backups | 668 | * sbi->s_groups_count, because the worst that can happen is that we |
| 669 | * at this time. The resize which changed s_groups_count will backup again. | 669 | * do not copy the full number of backups at this time. The resize |
| 670 | * which changed s_groups_count will backup again. | ||
| 670 | */ | 671 | */ |
| 671 | static void update_backups(struct super_block *sb, | 672 | static void update_backups(struct super_block *sb, |
| 672 | int blk_off, char *data, int size) | 673 | int blk_off, char *data, int size) |
| @@ -825,7 +826,7 @@ int ext3_group_add(struct super_block *sb, struct ext3_new_group_data *input) | |||
| 825 | goto exit_put; | 826 | goto exit_put; |
| 826 | } | 827 | } |
| 827 | 828 | ||
| 828 | lock_super(sb); | 829 | mutex_lock(&sbi->s_resize_lock); |
| 829 | if (input->group != sbi->s_groups_count) { | 830 | if (input->group != sbi->s_groups_count) { |
| 830 | ext3_warning(sb, __func__, | 831 | ext3_warning(sb, __func__, |
| 831 | "multiple resizers run on filesystem!"); | 832 | "multiple resizers run on filesystem!"); |
| @@ -856,7 +857,7 @@ int ext3_group_add(struct super_block *sb, struct ext3_new_group_data *input) | |||
| 856 | /* | 857 | /* |
| 857 | * OK, now we've set up the new group. Time to make it active. | 858 | * OK, now we've set up the new group. Time to make it active. |
| 858 | * | 859 | * |
| 859 | * Current kernels don't lock all allocations via lock_super(), | 860 | * We do not lock all allocations via s_resize_lock |
| 860 | * so we have to be safe wrt. concurrent accesses the group | 861 | * so we have to be safe wrt. concurrent accesses the group |
| 861 | * data. So we need to be careful to set all of the relevant | 862 | * data. So we need to be careful to set all of the relevant |
| 862 | * group descriptor data etc. *before* we enable the group. | 863 | * group descriptor data etc. *before* we enable the group. |
| @@ -900,12 +901,12 @@ int ext3_group_add(struct super_block *sb, struct ext3_new_group_data *input) | |||
| 900 | * | 901 | * |
| 901 | * The precise rules we use are: | 902 | * The precise rules we use are: |
| 902 | * | 903 | * |
| 903 | * * Writers of s_groups_count *must* hold lock_super | 904 | * * Writers of s_groups_count *must* hold s_resize_lock |
| 904 | * AND | 905 | * AND |
| 905 | * * Writers must perform a smp_wmb() after updating all dependent | 906 | * * Writers must perform a smp_wmb() after updating all dependent |
| 906 | * data and before modifying the groups count | 907 | * data and before modifying the groups count |
| 907 | * | 908 | * |
| 908 | * * Readers must hold lock_super() over the access | 909 | * * Readers must hold s_resize_lock over the access |
| 909 | * OR | 910 | * OR |
| 910 | * * Readers must perform an smp_rmb() after reading the groups count | 911 | * * Readers must perform an smp_rmb() after reading the groups count |
| 911 | * and before reading any dependent data. | 912 | * and before reading any dependent data. |
| @@ -936,7 +937,7 @@ int ext3_group_add(struct super_block *sb, struct ext3_new_group_data *input) | |||
| 936 | ext3_journal_dirty_metadata(handle, sbi->s_sbh); | 937 | ext3_journal_dirty_metadata(handle, sbi->s_sbh); |
| 937 | 938 | ||
| 938 | exit_journal: | 939 | exit_journal: |
| 939 | unlock_super(sb); | 940 | mutex_unlock(&sbi->s_resize_lock); |
| 940 | if ((err2 = ext3_journal_stop(handle)) && !err) | 941 | if ((err2 = ext3_journal_stop(handle)) && !err) |
| 941 | err = err2; | 942 | err = err2; |
| 942 | if (!err) { | 943 | if (!err) { |
| @@ -973,7 +974,7 @@ int ext3_group_extend(struct super_block *sb, struct ext3_super_block *es, | |||
| 973 | 974 | ||
| 974 | /* We don't need to worry about locking wrt other resizers just | 975 | /* We don't need to worry about locking wrt other resizers just |
| 975 | * yet: we're going to revalidate es->s_blocks_count after | 976 | * yet: we're going to revalidate es->s_blocks_count after |
| 976 | * taking lock_super() below. */ | 977 | * taking the s_resize_lock below. */ |
| 977 | o_blocks_count = le32_to_cpu(es->s_blocks_count); | 978 | o_blocks_count = le32_to_cpu(es->s_blocks_count); |
| 978 | o_groups_count = EXT3_SB(sb)->s_groups_count; | 979 | o_groups_count = EXT3_SB(sb)->s_groups_count; |
| 979 | 980 | ||
| @@ -1045,11 +1046,11 @@ int ext3_group_extend(struct super_block *sb, struct ext3_super_block *es, | |||
| 1045 | goto exit_put; | 1046 | goto exit_put; |
| 1046 | } | 1047 | } |
| 1047 | 1048 | ||
| 1048 | lock_super(sb); | 1049 | mutex_lock(&EXT3_SB(sb)->s_resize_lock); |
| 1049 | if (o_blocks_count != le32_to_cpu(es->s_blocks_count)) { | 1050 | if (o_blocks_count != le32_to_cpu(es->s_blocks_count)) { |
| 1050 | ext3_warning(sb, __func__, | 1051 | ext3_warning(sb, __func__, |
| 1051 | "multiple resizers run on filesystem!"); | 1052 | "multiple resizers run on filesystem!"); |
| 1052 | unlock_super(sb); | 1053 | mutex_unlock(&EXT3_SB(sb)->s_resize_lock); |
| 1053 | ext3_journal_stop(handle); | 1054 | ext3_journal_stop(handle); |
| 1054 | err = -EBUSY; | 1055 | err = -EBUSY; |
| 1055 | goto exit_put; | 1056 | goto exit_put; |
| @@ -1059,13 +1060,13 @@ int ext3_group_extend(struct super_block *sb, struct ext3_super_block *es, | |||
| 1059 | EXT3_SB(sb)->s_sbh))) { | 1060 | EXT3_SB(sb)->s_sbh))) { |
| 1060 | ext3_warning(sb, __func__, | 1061 | ext3_warning(sb, __func__, |
| 1061 | "error %d on journal write access", err); | 1062 | "error %d on journal write access", err); |
| 1062 | unlock_super(sb); | 1063 | mutex_unlock(&EXT3_SB(sb)->s_resize_lock); |
| 1063 | ext3_journal_stop(handle); | 1064 | ext3_journal_stop(handle); |
| 1064 | goto exit_put; | 1065 | goto exit_put; |
| 1065 | } | 1066 | } |
| 1066 | es->s_blocks_count = cpu_to_le32(o_blocks_count + add); | 1067 | es->s_blocks_count = cpu_to_le32(o_blocks_count + add); |
| 1067 | ext3_journal_dirty_metadata(handle, EXT3_SB(sb)->s_sbh); | 1068 | ext3_journal_dirty_metadata(handle, EXT3_SB(sb)->s_sbh); |
| 1068 | unlock_super(sb); | 1069 | mutex_unlock(&EXT3_SB(sb)->s_resize_lock); |
| 1069 | ext3_debug("freeing blocks %lu through "E3FSBLK"\n", o_blocks_count, | 1070 | ext3_debug("freeing blocks %lu through "E3FSBLK"\n", o_blocks_count, |
| 1070 | o_blocks_count + add); | 1071 | o_blocks_count + add); |
| 1071 | ext3_free_blocks_sb(handle, sb, o_blocks_count, add, &freed_blocks); | 1072 | ext3_free_blocks_sb(handle, sb, o_blocks_count, add, &freed_blocks); |
diff --git a/fs/ext3/super.c b/fs/ext3/super.c index 7ad1e8c30bd0..afa2b569da10 100644 --- a/fs/ext3/super.c +++ b/fs/ext3/super.c | |||
| @@ -1928,6 +1928,8 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent) | |||
| 1928 | sb->dq_op = &ext3_quota_operations; | 1928 | sb->dq_op = &ext3_quota_operations; |
| 1929 | #endif | 1929 | #endif |
| 1930 | INIT_LIST_HEAD(&sbi->s_orphan); /* unlinked but open files */ | 1930 | INIT_LIST_HEAD(&sbi->s_orphan); /* unlinked but open files */ |
| 1931 | mutex_init(&sbi->s_orphan_lock); | ||
| 1932 | mutex_init(&sbi->s_resize_lock); | ||
| 1931 | 1933 | ||
| 1932 | sb->s_root = NULL; | 1934 | sb->s_root = NULL; |
| 1933 | 1935 | ||
| @@ -2014,14 +2016,7 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent) | |||
| 2014 | } | 2016 | } |
| 2015 | 2017 | ||
| 2016 | ext3_setup_super (sb, es, sb->s_flags & MS_RDONLY); | 2018 | ext3_setup_super (sb, es, sb->s_flags & MS_RDONLY); |
| 2017 | /* | 2019 | |
| 2018 | * akpm: core read_super() calls in here with the superblock locked. | ||
| 2019 | * That deadlocks, because orphan cleanup needs to lock the superblock | ||
| 2020 | * in numerous places. Here we just pop the lock - it's relatively | ||
| 2021 | * harmless, because we are now ready to accept write_super() requests, | ||
| 2022 | * and aviro says that's the only reason for hanging onto the | ||
| 2023 | * superblock lock. | ||
| 2024 | */ | ||
| 2025 | EXT3_SB(sb)->s_mount_state |= EXT3_ORPHAN_FS; | 2020 | EXT3_SB(sb)->s_mount_state |= EXT3_ORPHAN_FS; |
| 2026 | ext3_orphan_cleanup(sb, es); | 2021 | ext3_orphan_cleanup(sb, es); |
| 2027 | EXT3_SB(sb)->s_mount_state &= ~EXT3_ORPHAN_FS; | 2022 | EXT3_SB(sb)->s_mount_state &= ~EXT3_ORPHAN_FS; |
| @@ -2403,13 +2398,11 @@ static void ext3_mark_recovery_complete(struct super_block * sb, | |||
| 2403 | if (journal_flush(journal) < 0) | 2398 | if (journal_flush(journal) < 0) |
| 2404 | goto out; | 2399 | goto out; |
| 2405 | 2400 | ||
| 2406 | lock_super(sb); | ||
| 2407 | if (EXT3_HAS_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_RECOVER) && | 2401 | if (EXT3_HAS_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_RECOVER) && |
| 2408 | sb->s_flags & MS_RDONLY) { | 2402 | sb->s_flags & MS_RDONLY) { |
| 2409 | EXT3_CLEAR_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_RECOVER); | 2403 | EXT3_CLEAR_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_RECOVER); |
| 2410 | ext3_commit_super(sb, es, 1); | 2404 | ext3_commit_super(sb, es, 1); |
| 2411 | } | 2405 | } |
| 2412 | unlock_super(sb); | ||
| 2413 | 2406 | ||
| 2414 | out: | 2407 | out: |
| 2415 | journal_unlock_updates(journal); | 2408 | journal_unlock_updates(journal); |
| @@ -2601,13 +2594,7 @@ static int ext3_remount (struct super_block * sb, int * flags, char * data) | |||
| 2601 | (sbi->s_mount_state & EXT3_VALID_FS)) | 2594 | (sbi->s_mount_state & EXT3_VALID_FS)) |
| 2602 | es->s_state = cpu_to_le16(sbi->s_mount_state); | 2595 | es->s_state = cpu_to_le16(sbi->s_mount_state); |
| 2603 | 2596 | ||
| 2604 | /* | ||
| 2605 | * We have to unlock super so that we can wait for | ||
| 2606 | * transactions. | ||
| 2607 | */ | ||
| 2608 | unlock_super(sb); | ||
| 2609 | ext3_mark_recovery_complete(sb, es); | 2597 | ext3_mark_recovery_complete(sb, es); |
| 2610 | lock_super(sb); | ||
| 2611 | } else { | 2598 | } else { |
| 2612 | __le32 ret; | 2599 | __le32 ret; |
| 2613 | if ((ret = EXT3_HAS_RO_COMPAT_FEATURE(sb, | 2600 | if ((ret = EXT3_HAS_RO_COMPAT_FEATURE(sb, |
diff --git a/fs/ext4/Kconfig b/fs/ext4/Kconfig index e5f6774846e4..9acf7e808139 100644 --- a/fs/ext4/Kconfig +++ b/fs/ext4/Kconfig | |||
| @@ -2,7 +2,6 @@ config EXT4_FS | |||
| 2 | tristate "The Extended 4 (ext4) filesystem" | 2 | tristate "The Extended 4 (ext4) filesystem" |
| 3 | select JBD2 | 3 | select JBD2 |
| 4 | select CRC16 | 4 | select CRC16 |
| 5 | select FS_JOURNAL_INFO | ||
| 6 | help | 5 | help |
| 7 | This is the next generation of the ext3 filesystem. | 6 | This is the next generation of the ext3 filesystem. |
| 8 | 7 | ||
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index ab31e65d46d0..56f9271ee8cc 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h | |||
| @@ -704,6 +704,10 @@ struct ext4_inode_info { | |||
| 704 | __u16 i_extra_isize; | 704 | __u16 i_extra_isize; |
| 705 | 705 | ||
| 706 | spinlock_t i_block_reservation_lock; | 706 | spinlock_t i_block_reservation_lock; |
| 707 | #ifdef CONFIG_QUOTA | ||
| 708 | /* quota space reservation, managed internally by quota code */ | ||
| 709 | qsize_t i_reserved_quota; | ||
| 710 | #endif | ||
| 707 | 711 | ||
| 708 | /* completed async DIOs that might need unwritten extents handling */ | 712 | /* completed async DIOs that might need unwritten extents handling */ |
| 709 | struct list_head i_aio_dio_complete_list; | 713 | struct list_head i_aio_dio_complete_list; |
| @@ -1435,7 +1439,7 @@ extern int ext4_chunk_trans_blocks(struct inode *, int nrblocks); | |||
| 1435 | extern int ext4_block_truncate_page(handle_t *handle, | 1439 | extern int ext4_block_truncate_page(handle_t *handle, |
| 1436 | struct address_space *mapping, loff_t from); | 1440 | struct address_space *mapping, loff_t from); |
| 1437 | extern int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf); | 1441 | extern int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf); |
| 1438 | extern qsize_t ext4_get_reserved_space(struct inode *inode); | 1442 | extern qsize_t *ext4_get_reserved_space(struct inode *inode); |
| 1439 | extern int flush_aio_dio_completed_IO(struct inode *inode); | 1443 | extern int flush_aio_dio_completed_IO(struct inode *inode); |
| 1440 | /* ioctl.c */ | 1444 | /* ioctl.c */ |
| 1441 | extern long ext4_ioctl(struct file *, unsigned int, unsigned long); | 1445 | extern long ext4_ioctl(struct file *, unsigned int, unsigned long); |
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 5352db1a3086..ab807963a614 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c | |||
| @@ -1003,17 +1003,12 @@ out: | |||
| 1003 | return err; | 1003 | return err; |
| 1004 | } | 1004 | } |
| 1005 | 1005 | ||
| 1006 | qsize_t ext4_get_reserved_space(struct inode *inode) | 1006 | #ifdef CONFIG_QUOTA |
| 1007 | qsize_t *ext4_get_reserved_space(struct inode *inode) | ||
| 1007 | { | 1008 | { |
| 1008 | unsigned long long total; | 1009 | return &EXT4_I(inode)->i_reserved_quota; |
| 1009 | |||
| 1010 | spin_lock(&EXT4_I(inode)->i_block_reservation_lock); | ||
| 1011 | total = EXT4_I(inode)->i_reserved_data_blocks + | ||
| 1012 | EXT4_I(inode)->i_reserved_meta_blocks; | ||
| 1013 | spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); | ||
| 1014 | |||
| 1015 | return (total << inode->i_blkbits); | ||
| 1016 | } | 1010 | } |
| 1011 | #endif | ||
| 1017 | /* | 1012 | /* |
| 1018 | * Calculate the number of metadata blocks need to reserve | 1013 | * Calculate the number of metadata blocks need to reserve |
| 1019 | * to allocate @blocks for non extent file based file | 1014 | * to allocate @blocks for non extent file based file |
| @@ -1051,7 +1046,7 @@ static int ext4_calc_metadata_amount(struct inode *inode, int blocks) | |||
| 1051 | static void ext4_da_update_reserve_space(struct inode *inode, int used) | 1046 | static void ext4_da_update_reserve_space(struct inode *inode, int used) |
| 1052 | { | 1047 | { |
| 1053 | struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); | 1048 | struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); |
| 1054 | int total, mdb, mdb_free; | 1049 | int total, mdb, mdb_free, mdb_claim = 0; |
| 1055 | 1050 | ||
| 1056 | spin_lock(&EXT4_I(inode)->i_block_reservation_lock); | 1051 | spin_lock(&EXT4_I(inode)->i_block_reservation_lock); |
| 1057 | /* recalculate the number of metablocks still need to be reserved */ | 1052 | /* recalculate the number of metablocks still need to be reserved */ |
| @@ -1064,7 +1059,9 @@ static void ext4_da_update_reserve_space(struct inode *inode, int used) | |||
| 1064 | 1059 | ||
| 1065 | if (mdb_free) { | 1060 | if (mdb_free) { |
| 1066 | /* Account for allocated meta_blocks */ | 1061 | /* Account for allocated meta_blocks */ |
| 1067 | mdb_free -= EXT4_I(inode)->i_allocated_meta_blocks; | 1062 | mdb_claim = EXT4_I(inode)->i_allocated_meta_blocks; |
| 1063 | BUG_ON(mdb_free < mdb_claim); | ||
| 1064 | mdb_free -= mdb_claim; | ||
| 1068 | 1065 | ||
| 1069 | /* update fs dirty blocks counter */ | 1066 | /* update fs dirty blocks counter */ |
| 1070 | percpu_counter_sub(&sbi->s_dirtyblocks_counter, mdb_free); | 1067 | percpu_counter_sub(&sbi->s_dirtyblocks_counter, mdb_free); |
| @@ -1075,8 +1072,11 @@ static void ext4_da_update_reserve_space(struct inode *inode, int used) | |||
| 1075 | /* update per-inode reservations */ | 1072 | /* update per-inode reservations */ |
| 1076 | BUG_ON(used > EXT4_I(inode)->i_reserved_data_blocks); | 1073 | BUG_ON(used > EXT4_I(inode)->i_reserved_data_blocks); |
| 1077 | EXT4_I(inode)->i_reserved_data_blocks -= used; | 1074 | EXT4_I(inode)->i_reserved_data_blocks -= used; |
| 1075 | percpu_counter_sub(&sbi->s_dirtyblocks_counter, used + mdb_claim); | ||
| 1078 | spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); | 1076 | spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); |
| 1079 | 1077 | ||
| 1078 | vfs_dq_claim_block(inode, used + mdb_claim); | ||
| 1079 | |||
| 1080 | /* | 1080 | /* |
| 1081 | * free those over-booking quota for metadata blocks | 1081 | * free those over-booking quota for metadata blocks |
| 1082 | */ | 1082 | */ |
| @@ -1816,19 +1816,17 @@ repeat: | |||
| 1816 | 1816 | ||
| 1817 | md_needed = mdblocks - EXT4_I(inode)->i_reserved_meta_blocks; | 1817 | md_needed = mdblocks - EXT4_I(inode)->i_reserved_meta_blocks; |
| 1818 | total = md_needed + nrblocks; | 1818 | total = md_needed + nrblocks; |
| 1819 | spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); | ||
| 1819 | 1820 | ||
| 1820 | /* | 1821 | /* |
| 1821 | * Make quota reservation here to prevent quota overflow | 1822 | * Make quota reservation here to prevent quota overflow |
| 1822 | * later. Real quota accounting is done at pages writeout | 1823 | * later. Real quota accounting is done at pages writeout |
| 1823 | * time. | 1824 | * time. |
| 1824 | */ | 1825 | */ |
| 1825 | if (vfs_dq_reserve_block(inode, total)) { | 1826 | if (vfs_dq_reserve_block(inode, total)) |
| 1826 | spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); | ||
| 1827 | return -EDQUOT; | 1827 | return -EDQUOT; |
| 1828 | } | ||
| 1829 | 1828 | ||
| 1830 | if (ext4_claim_free_blocks(sbi, total)) { | 1829 | if (ext4_claim_free_blocks(sbi, total)) { |
| 1831 | spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); | ||
| 1832 | vfs_dq_release_reservation_block(inode, total); | 1830 | vfs_dq_release_reservation_block(inode, total); |
| 1833 | if (ext4_should_retry_alloc(inode->i_sb, &retries)) { | 1831 | if (ext4_should_retry_alloc(inode->i_sb, &retries)) { |
| 1834 | yield(); | 1832 | yield(); |
| @@ -1836,10 +1834,11 @@ repeat: | |||
| 1836 | } | 1834 | } |
| 1837 | return -ENOSPC; | 1835 | return -ENOSPC; |
| 1838 | } | 1836 | } |
| 1837 | spin_lock(&EXT4_I(inode)->i_block_reservation_lock); | ||
| 1839 | EXT4_I(inode)->i_reserved_data_blocks += nrblocks; | 1838 | EXT4_I(inode)->i_reserved_data_blocks += nrblocks; |
| 1840 | EXT4_I(inode)->i_reserved_meta_blocks = mdblocks; | 1839 | EXT4_I(inode)->i_reserved_meta_blocks += md_needed; |
| 1841 | |||
| 1842 | spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); | 1840 | spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); |
| 1841 | |||
| 1843 | return 0; /* success */ | 1842 | return 0; /* success */ |
| 1844 | } | 1843 | } |
| 1845 | 1844 | ||
| @@ -4794,6 +4793,9 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino) | |||
| 4794 | ((__u64)le16_to_cpu(raw_inode->i_file_acl_high)) << 32; | 4793 | ((__u64)le16_to_cpu(raw_inode->i_file_acl_high)) << 32; |
| 4795 | inode->i_size = ext4_isize(raw_inode); | 4794 | inode->i_size = ext4_isize(raw_inode); |
| 4796 | ei->i_disksize = inode->i_size; | 4795 | ei->i_disksize = inode->i_size; |
| 4796 | #ifdef CONFIG_QUOTA | ||
| 4797 | ei->i_reserved_quota = 0; | ||
| 4798 | #endif | ||
| 4797 | inode->i_generation = le32_to_cpu(raw_inode->i_generation); | 4799 | inode->i_generation = le32_to_cpu(raw_inode->i_generation); |
| 4798 | ei->i_block_group = iloc.block_group; | 4800 | ei->i_block_group = iloc.block_group; |
| 4799 | ei->i_last_alloc_group = ~0; | 4801 | ei->i_last_alloc_group = ~0; |
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c index b1fd3daadc9c..d34afad3e137 100644 --- a/fs/ext4/mballoc.c +++ b/fs/ext4/mballoc.c | |||
| @@ -2755,12 +2755,6 @@ ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac, | |||
| 2755 | if (!(ac->ac_flags & EXT4_MB_DELALLOC_RESERVED)) | 2755 | if (!(ac->ac_flags & EXT4_MB_DELALLOC_RESERVED)) |
| 2756 | /* release all the reserved blocks if non delalloc */ | 2756 | /* release all the reserved blocks if non delalloc */ |
| 2757 | percpu_counter_sub(&sbi->s_dirtyblocks_counter, reserv_blks); | 2757 | percpu_counter_sub(&sbi->s_dirtyblocks_counter, reserv_blks); |
| 2758 | else { | ||
| 2759 | percpu_counter_sub(&sbi->s_dirtyblocks_counter, | ||
| 2760 | ac->ac_b_ex.fe_len); | ||
| 2761 | /* convert reserved quota blocks to real quota blocks */ | ||
| 2762 | vfs_dq_claim_block(ac->ac_inode, ac->ac_b_ex.fe_len); | ||
| 2763 | } | ||
| 2764 | 2758 | ||
| 2765 | if (sbi->s_log_groups_per_flex) { | 2759 | if (sbi->s_log_groups_per_flex) { |
| 2766 | ext4_group_t flex_group = ext4_flex_group(sbi, | 2760 | ext4_group_t flex_group = ext4_flex_group(sbi, |
diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 827bde1f2594..6ed9aa91f27d 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c | |||
| @@ -704,6 +704,9 @@ static struct inode *ext4_alloc_inode(struct super_block *sb) | |||
| 704 | ei->i_allocated_meta_blocks = 0; | 704 | ei->i_allocated_meta_blocks = 0; |
| 705 | ei->i_delalloc_reserved_flag = 0; | 705 | ei->i_delalloc_reserved_flag = 0; |
| 706 | spin_lock_init(&(ei->i_block_reservation_lock)); | 706 | spin_lock_init(&(ei->i_block_reservation_lock)); |
| 707 | #ifdef CONFIG_QUOTA | ||
| 708 | ei->i_reserved_quota = 0; | ||
| 709 | #endif | ||
| 707 | INIT_LIST_HEAD(&ei->i_aio_dio_complete_list); | 710 | INIT_LIST_HEAD(&ei->i_aio_dio_complete_list); |
| 708 | ei->cur_aio_dio = NULL; | 711 | ei->cur_aio_dio = NULL; |
| 709 | ei->i_sync_tid = 0; | 712 | ei->i_sync_tid = 0; |
| @@ -1014,7 +1017,9 @@ static const struct dquot_operations ext4_quota_operations = { | |||
| 1014 | .reserve_space = dquot_reserve_space, | 1017 | .reserve_space = dquot_reserve_space, |
| 1015 | .claim_space = dquot_claim_space, | 1018 | .claim_space = dquot_claim_space, |
| 1016 | .release_rsv = dquot_release_reserved_space, | 1019 | .release_rsv = dquot_release_reserved_space, |
| 1020 | #ifdef CONFIG_QUOTA | ||
| 1017 | .get_reserved_space = ext4_get_reserved_space, | 1021 | .get_reserved_space = ext4_get_reserved_space, |
| 1022 | #endif | ||
| 1018 | .alloc_inode = dquot_alloc_inode, | 1023 | .alloc_inode = dquot_alloc_inode, |
| 1019 | .free_space = dquot_free_space, | 1024 | .free_space = dquot_free_space, |
| 1020 | .free_inode = dquot_free_inode, | 1025 | .free_inode = dquot_free_inode, |
diff --git a/fs/file_table.c b/fs/file_table.c index 0afacf654398..69652c5bd5f0 100644 --- a/fs/file_table.c +++ b/fs/file_table.c | |||
| @@ -186,10 +186,8 @@ struct file *alloc_file(struct path *path, fmode_t mode, | |||
| 186 | * that we can do debugging checks at __fput() | 186 | * that we can do debugging checks at __fput() |
| 187 | */ | 187 | */ |
| 188 | if ((mode & FMODE_WRITE) && !special_file(path->dentry->d_inode->i_mode)) { | 188 | if ((mode & FMODE_WRITE) && !special_file(path->dentry->d_inode->i_mode)) { |
| 189 | int error = 0; | ||
| 190 | file_take_write(file); | 189 | file_take_write(file); |
| 191 | error = mnt_clone_write(path->mnt); | 190 | WARN_ON(mnt_clone_write(path->mnt)); |
| 192 | WARN_ON(error); | ||
| 193 | } | 191 | } |
| 194 | ima_counts_get(file); | 192 | ima_counts_get(file); |
| 195 | return file; | 193 | return file; |
diff --git a/fs/gfs2/Kconfig b/fs/gfs2/Kconfig index b192c661caa6..4dcddf83326f 100644 --- a/fs/gfs2/Kconfig +++ b/fs/gfs2/Kconfig | |||
| @@ -10,7 +10,6 @@ config GFS2_FS | |||
| 10 | select SLOW_WORK | 10 | select SLOW_WORK |
| 11 | select QUOTA | 11 | select QUOTA |
| 12 | select QUOTACTL | 12 | select QUOTACTL |
| 13 | select FS_JOURNAL_INFO | ||
| 14 | help | 13 | help |
| 15 | A cluster filesystem. | 14 | A cluster filesystem. |
| 16 | 15 | ||
diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c index 3ff32fa793da..6e220f4eee7d 100644 --- a/fs/gfs2/inode.c +++ b/fs/gfs2/inode.c | |||
| @@ -125,7 +125,7 @@ static struct inode *gfs2_iget_skip(struct super_block *sb, | |||
| 125 | * directory entry when gfs2_inode_lookup() is invoked. Part of the code | 125 | * directory entry when gfs2_inode_lookup() is invoked. Part of the code |
| 126 | * segment inside gfs2_inode_lookup code needs to get moved around. | 126 | * segment inside gfs2_inode_lookup code needs to get moved around. |
| 127 | * | 127 | * |
| 128 | * Clean up I_LOCK and I_NEW as well. | 128 | * Clears I_NEW as well. |
| 129 | **/ | 129 | **/ |
| 130 | 130 | ||
| 131 | void gfs2_set_iop(struct inode *inode) | 131 | void gfs2_set_iop(struct inode *inode) |
diff --git a/fs/inode.c b/fs/inode.c index 06c1f02de611..03dfeb2e3928 100644 --- a/fs/inode.c +++ b/fs/inode.c | |||
| @@ -113,7 +113,7 @@ static void wake_up_inode(struct inode *inode) | |||
| 113 | * Prevent speculative execution through spin_unlock(&inode_lock); | 113 | * Prevent speculative execution through spin_unlock(&inode_lock); |
| 114 | */ | 114 | */ |
| 115 | smp_mb(); | 115 | smp_mb(); |
| 116 | wake_up_bit(&inode->i_state, __I_LOCK); | 116 | wake_up_bit(&inode->i_state, __I_NEW); |
| 117 | } | 117 | } |
| 118 | 118 | ||
| 119 | /** | 119 | /** |
| @@ -690,17 +690,17 @@ void unlock_new_inode(struct inode *inode) | |||
| 690 | } | 690 | } |
| 691 | #endif | 691 | #endif |
| 692 | /* | 692 | /* |
| 693 | * This is special! We do not need the spinlock when clearing I_LOCK, | 693 | * This is special! We do not need the spinlock when clearing I_NEW, |
| 694 | * because we're guaranteed that nobody else tries to do anything about | 694 | * because we're guaranteed that nobody else tries to do anything about |
| 695 | * the state of the inode when it is locked, as we just created it (so | 695 | * the state of the inode when it is locked, as we just created it (so |
| 696 | * there can be no old holders that haven't tested I_LOCK). | 696 | * there can be no old holders that haven't tested I_NEW). |
| 697 | * However we must emit the memory barrier so that other CPUs reliably | 697 | * However we must emit the memory barrier so that other CPUs reliably |
| 698 | * see the clearing of I_LOCK after the other inode initialisation has | 698 | * see the clearing of I_NEW after the other inode initialisation has |
| 699 | * completed. | 699 | * completed. |
| 700 | */ | 700 | */ |
| 701 | smp_mb(); | 701 | smp_mb(); |
| 702 | WARN_ON((inode->i_state & (I_LOCK|I_NEW)) != (I_LOCK|I_NEW)); | 702 | WARN_ON(!(inode->i_state & I_NEW)); |
| 703 | inode->i_state &= ~(I_LOCK|I_NEW); | 703 | inode->i_state &= ~I_NEW; |
| 704 | wake_up_inode(inode); | 704 | wake_up_inode(inode); |
| 705 | } | 705 | } |
| 706 | EXPORT_SYMBOL(unlock_new_inode); | 706 | EXPORT_SYMBOL(unlock_new_inode); |
| @@ -731,7 +731,7 @@ static struct inode *get_new_inode(struct super_block *sb, | |||
| 731 | goto set_failed; | 731 | goto set_failed; |
| 732 | 732 | ||
| 733 | __inode_add_to_lists(sb, head, inode); | 733 | __inode_add_to_lists(sb, head, inode); |
| 734 | inode->i_state = I_LOCK|I_NEW; | 734 | inode->i_state = I_NEW; |
| 735 | spin_unlock(&inode_lock); | 735 | spin_unlock(&inode_lock); |
| 736 | 736 | ||
| 737 | /* Return the locked inode with I_NEW set, the | 737 | /* Return the locked inode with I_NEW set, the |
| @@ -778,7 +778,7 @@ static struct inode *get_new_inode_fast(struct super_block *sb, | |||
| 778 | if (!old) { | 778 | if (!old) { |
| 779 | inode->i_ino = ino; | 779 | inode->i_ino = ino; |
| 780 | __inode_add_to_lists(sb, head, inode); | 780 | __inode_add_to_lists(sb, head, inode); |
| 781 | inode->i_state = I_LOCK|I_NEW; | 781 | inode->i_state = I_NEW; |
| 782 | spin_unlock(&inode_lock); | 782 | spin_unlock(&inode_lock); |
| 783 | 783 | ||
| 784 | /* Return the locked inode with I_NEW set, the | 784 | /* Return the locked inode with I_NEW set, the |
| @@ -1083,7 +1083,7 @@ int insert_inode_locked(struct inode *inode) | |||
| 1083 | ino_t ino = inode->i_ino; | 1083 | ino_t ino = inode->i_ino; |
| 1084 | struct hlist_head *head = inode_hashtable + hash(sb, ino); | 1084 | struct hlist_head *head = inode_hashtable + hash(sb, ino); |
| 1085 | 1085 | ||
| 1086 | inode->i_state |= I_LOCK|I_NEW; | 1086 | inode->i_state |= I_NEW; |
| 1087 | while (1) { | 1087 | while (1) { |
| 1088 | struct hlist_node *node; | 1088 | struct hlist_node *node; |
| 1089 | struct inode *old = NULL; | 1089 | struct inode *old = NULL; |
| @@ -1120,7 +1120,7 @@ int insert_inode_locked4(struct inode *inode, unsigned long hashval, | |||
| 1120 | struct super_block *sb = inode->i_sb; | 1120 | struct super_block *sb = inode->i_sb; |
| 1121 | struct hlist_head *head = inode_hashtable + hash(sb, hashval); | 1121 | struct hlist_head *head = inode_hashtable + hash(sb, hashval); |
| 1122 | 1122 | ||
| 1123 | inode->i_state |= I_LOCK|I_NEW; | 1123 | inode->i_state |= I_NEW; |
| 1124 | 1124 | ||
| 1125 | while (1) { | 1125 | while (1) { |
| 1126 | struct hlist_node *node; | 1126 | struct hlist_node *node; |
| @@ -1510,7 +1510,7 @@ EXPORT_SYMBOL(inode_wait); | |||
| 1510 | * until the deletion _might_ have completed. Callers are responsible | 1510 | * until the deletion _might_ have completed. Callers are responsible |
| 1511 | * to recheck inode state. | 1511 | * to recheck inode state. |
| 1512 | * | 1512 | * |
| 1513 | * It doesn't matter if I_LOCK is not set initially, a call to | 1513 | * It doesn't matter if I_NEW is not set initially, a call to |
| 1514 | * wake_up_inode() after removing from the hash list will DTRT. | 1514 | * wake_up_inode() after removing from the hash list will DTRT. |
| 1515 | * | 1515 | * |
| 1516 | * This is called with inode_lock held. | 1516 | * This is called with inode_lock held. |
| @@ -1518,8 +1518,8 @@ EXPORT_SYMBOL(inode_wait); | |||
| 1518 | static void __wait_on_freeing_inode(struct inode *inode) | 1518 | static void __wait_on_freeing_inode(struct inode *inode) |
| 1519 | { | 1519 | { |
| 1520 | wait_queue_head_t *wq; | 1520 | wait_queue_head_t *wq; |
| 1521 | DEFINE_WAIT_BIT(wait, &inode->i_state, __I_LOCK); | 1521 | DEFINE_WAIT_BIT(wait, &inode->i_state, __I_NEW); |
| 1522 | wq = bit_waitqueue(&inode->i_state, __I_LOCK); | 1522 | wq = bit_waitqueue(&inode->i_state, __I_NEW); |
| 1523 | prepare_to_wait(wq, &wait.wait, TASK_UNINTERRUPTIBLE); | 1523 | prepare_to_wait(wq, &wait.wait, TASK_UNINTERRUPTIBLE); |
| 1524 | spin_unlock(&inode_lock); | 1524 | spin_unlock(&inode_lock); |
| 1525 | schedule(); | 1525 | schedule(); |
diff --git a/fs/internal.h b/fs/internal.h index f67cd141d9a8..e96a1667d749 100644 --- a/fs/internal.h +++ b/fs/internal.h | |||
| @@ -85,3 +85,10 @@ extern struct file *get_empty_filp(void); | |||
| 85 | * super.c | 85 | * super.c |
| 86 | */ | 86 | */ |
| 87 | extern int do_remount_sb(struct super_block *, int, void *, int); | 87 | extern int do_remount_sb(struct super_block *, int, void *, int); |
| 88 | |||
| 89 | /* | ||
| 90 | * open.c | ||
| 91 | */ | ||
| 92 | struct nameidata; | ||
| 93 | extern struct file *nameidata_to_filp(struct nameidata *); | ||
| 94 | extern void release_open_intent(struct nameidata *); | ||
diff --git a/fs/jbd/Kconfig b/fs/jbd/Kconfig index a8408983abd4..4e28beeed157 100644 --- a/fs/jbd/Kconfig +++ b/fs/jbd/Kconfig | |||
| @@ -1,6 +1,5 @@ | |||
| 1 | config JBD | 1 | config JBD |
| 2 | tristate | 2 | tristate |
| 3 | select FS_JOURNAL_INFO | ||
| 4 | help | 3 | help |
| 5 | This is a generic journalling layer for block devices. It is | 4 | This is a generic journalling layer for block devices. It is |
| 6 | currently used by the ext3 file system, but it could also be | 5 | currently used by the ext3 file system, but it could also be |
diff --git a/fs/jbd/journal.c b/fs/jbd/journal.c index 4160afad6d00..bd224eec9b07 100644 --- a/fs/jbd/journal.c +++ b/fs/jbd/journal.c | |||
| @@ -1913,7 +1913,7 @@ static void __init jbd_create_debugfs_entry(void) | |||
| 1913 | { | 1913 | { |
| 1914 | jbd_debugfs_dir = debugfs_create_dir("jbd", NULL); | 1914 | jbd_debugfs_dir = debugfs_create_dir("jbd", NULL); |
| 1915 | if (jbd_debugfs_dir) | 1915 | if (jbd_debugfs_dir) |
| 1916 | jbd_debug = debugfs_create_u8("jbd-debug", S_IRUGO, | 1916 | jbd_debug = debugfs_create_u8("jbd-debug", S_IRUGO | S_IWUSR, |
| 1917 | jbd_debugfs_dir, | 1917 | jbd_debugfs_dir, |
| 1918 | &journal_enable_debug); | 1918 | &journal_enable_debug); |
| 1919 | } | 1919 | } |
diff --git a/fs/jbd2/Kconfig b/fs/jbd2/Kconfig index 0f7d1ceafdfd..f32f346f4b0a 100644 --- a/fs/jbd2/Kconfig +++ b/fs/jbd2/Kconfig | |||
| @@ -1,7 +1,6 @@ | |||
| 1 | config JBD2 | 1 | config JBD2 |
| 2 | tristate | 2 | tristate |
| 3 | select CRC32 | 3 | select CRC32 |
| 4 | select FS_JOURNAL_INFO | ||
| 5 | help | 4 | help |
| 6 | This is a generic journaling layer for block devices that support | 5 | This is a generic journaling layer for block devices that support |
| 7 | both 32-bit and 64-bit block numbers. It is currently used by | 6 | both 32-bit and 64-bit block numbers. It is currently used by |
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c index b7ca3a92a4db..17af879e6e9e 100644 --- a/fs/jbd2/journal.c +++ b/fs/jbd2/journal.c | |||
| @@ -2115,7 +2115,8 @@ static void __init jbd2_create_debugfs_entry(void) | |||
| 2115 | { | 2115 | { |
| 2116 | jbd2_debugfs_dir = debugfs_create_dir("jbd2", NULL); | 2116 | jbd2_debugfs_dir = debugfs_create_dir("jbd2", NULL); |
| 2117 | if (jbd2_debugfs_dir) | 2117 | if (jbd2_debugfs_dir) |
| 2118 | jbd2_debug = debugfs_create_u8(JBD2_DEBUG_NAME, S_IRUGO, | 2118 | jbd2_debug = debugfs_create_u8(JBD2_DEBUG_NAME, |
| 2119 | S_IRUGO | S_IWUSR, | ||
| 2119 | jbd2_debugfs_dir, | 2120 | jbd2_debugfs_dir, |
| 2120 | &jbd2_journal_enable_debug); | 2121 | &jbd2_journal_enable_debug); |
| 2121 | } | 2122 | } |
diff --git a/fs/jfs/jfs_txnmgr.c b/fs/jfs/jfs_txnmgr.c index f26e4d03ada5..d945ea76b445 100644 --- a/fs/jfs/jfs_txnmgr.c +++ b/fs/jfs/jfs_txnmgr.c | |||
| @@ -1292,7 +1292,7 @@ int txCommit(tid_t tid, /* transaction identifier */ | |||
| 1292 | */ | 1292 | */ |
| 1293 | /* | 1293 | /* |
| 1294 | * I believe this code is no longer needed. Splitting I_LOCK | 1294 | * I believe this code is no longer needed. Splitting I_LOCK |
| 1295 | * into two bits, I_LOCK and I_SYNC should prevent this | 1295 | * into two bits, I_NEW and I_SYNC should prevent this |
| 1296 | * deadlock as well. But since I don't have a JFS testload | 1296 | * deadlock as well. But since I don't have a JFS testload |
| 1297 | * to verify this, only a trivial s/I_LOCK/I_SYNC/ was done. | 1297 | * to verify this, only a trivial s/I_LOCK/I_SYNC/ was done. |
| 1298 | * Joern | 1298 | * Joern |
diff --git a/fs/jfs/super.c b/fs/jfs/super.c index 2234c73fc577..d929a822a74e 100644 --- a/fs/jfs/super.c +++ b/fs/jfs/super.c | |||
| @@ -524,7 +524,7 @@ static int jfs_fill_super(struct super_block *sb, void *data, int silent) | |||
| 524 | * Page cache is indexed by long. | 524 | * Page cache is indexed by long. |
| 525 | * I would use MAX_LFS_FILESIZE, but it's only half as big | 525 | * I would use MAX_LFS_FILESIZE, but it's only half as big |
| 526 | */ | 526 | */ |
| 527 | sb->s_maxbytes = min(((u64) PAGE_CACHE_SIZE << 32) - 1, sb->s_maxbytes); | 527 | sb->s_maxbytes = min(((u64) PAGE_CACHE_SIZE << 32) - 1, (u64)sb->s_maxbytes); |
| 528 | #endif | 528 | #endif |
| 529 | sb->s_time_gran = 1; | 529 | sb->s_time_gran = 1; |
| 530 | return 0; | 530 | return 0; |
diff --git a/fs/namei.c b/fs/namei.c index d2783c8a770b..68921d9b5302 100644 --- a/fs/namei.c +++ b/fs/namei.c | |||
| @@ -37,8 +37,6 @@ | |||
| 37 | 37 | ||
| 38 | #include "internal.h" | 38 | #include "internal.h" |
| 39 | 39 | ||
| 40 | #define ACC_MODE(x) ("\000\004\002\006"[(x)&O_ACCMODE]) | ||
| 41 | |||
| 42 | /* [Feb-1997 T. Schoebel-Theuer] | 40 | /* [Feb-1997 T. Schoebel-Theuer] |
| 43 | * Fundamental changes in the pathname lookup mechanisms (namei) | 41 | * Fundamental changes in the pathname lookup mechanisms (namei) |
| 44 | * were necessary because of omirr. The reason is that omirr needs | 42 | * were necessary because of omirr. The reason is that omirr needs |
| @@ -1640,6 +1638,7 @@ struct file *do_filp_open(int dfd, const char *pathname, | |||
| 1640 | if (filp == NULL) | 1638 | if (filp == NULL) |
| 1641 | return ERR_PTR(-ENFILE); | 1639 | return ERR_PTR(-ENFILE); |
| 1642 | nd.intent.open.file = filp; | 1640 | nd.intent.open.file = filp; |
| 1641 | filp->f_flags = open_flag; | ||
| 1643 | nd.intent.open.flags = flag; | 1642 | nd.intent.open.flags = flag; |
| 1644 | nd.intent.open.create_mode = 0; | 1643 | nd.intent.open.create_mode = 0; |
| 1645 | error = do_path_lookup(dfd, pathname, | 1644 | error = do_path_lookup(dfd, pathname, |
| @@ -1685,6 +1684,7 @@ struct file *do_filp_open(int dfd, const char *pathname, | |||
| 1685 | if (filp == NULL) | 1684 | if (filp == NULL) |
| 1686 | goto exit_parent; | 1685 | goto exit_parent; |
| 1687 | nd.intent.open.file = filp; | 1686 | nd.intent.open.file = filp; |
| 1687 | filp->f_flags = open_flag; | ||
| 1688 | nd.intent.open.flags = flag; | 1688 | nd.intent.open.flags = flag; |
| 1689 | nd.intent.open.create_mode = mode; | 1689 | nd.intent.open.create_mode = mode; |
| 1690 | dir = nd.path.dentry; | 1690 | dir = nd.path.dentry; |
| @@ -1725,7 +1725,7 @@ do_last: | |||
| 1725 | mnt_drop_write(nd.path.mnt); | 1725 | mnt_drop_write(nd.path.mnt); |
| 1726 | goto exit; | 1726 | goto exit; |
| 1727 | } | 1727 | } |
| 1728 | filp = nameidata_to_filp(&nd, open_flag); | 1728 | filp = nameidata_to_filp(&nd); |
| 1729 | mnt_drop_write(nd.path.mnt); | 1729 | mnt_drop_write(nd.path.mnt); |
| 1730 | if (nd.root.mnt) | 1730 | if (nd.root.mnt) |
| 1731 | path_put(&nd.root); | 1731 | path_put(&nd.root); |
| @@ -1764,7 +1764,7 @@ do_last: | |||
| 1764 | 1764 | ||
| 1765 | path_to_nameidata(&path, &nd); | 1765 | path_to_nameidata(&path, &nd); |
| 1766 | error = -EISDIR; | 1766 | error = -EISDIR; |
| 1767 | if (path.dentry->d_inode && S_ISDIR(path.dentry->d_inode->i_mode)) | 1767 | if (S_ISDIR(path.dentry->d_inode->i_mode)) |
| 1768 | goto exit; | 1768 | goto exit; |
| 1769 | ok: | 1769 | ok: |
| 1770 | /* | 1770 | /* |
| @@ -1789,7 +1789,7 @@ ok: | |||
| 1789 | mnt_drop_write(nd.path.mnt); | 1789 | mnt_drop_write(nd.path.mnt); |
| 1790 | goto exit; | 1790 | goto exit; |
| 1791 | } | 1791 | } |
| 1792 | filp = nameidata_to_filp(&nd, open_flag); | 1792 | filp = nameidata_to_filp(&nd); |
| 1793 | if (!IS_ERR(filp)) { | 1793 | if (!IS_ERR(filp)) { |
| 1794 | error = ima_path_check(&filp->f_path, filp->f_mode & | 1794 | error = ima_path_check(&filp->f_path, filp->f_mode & |
| 1795 | (MAY_READ | MAY_WRITE | MAY_EXEC)); | 1795 | (MAY_READ | MAY_WRITE | MAY_EXEC)); |
diff --git a/fs/namespace.c b/fs/namespace.c index faab1273281e..7d70d63ceb29 100644 --- a/fs/namespace.c +++ b/fs/namespace.c | |||
| @@ -2068,7 +2068,7 @@ struct mnt_namespace *copy_mnt_ns(unsigned long flags, struct mnt_namespace *ns, | |||
| 2068 | * create_mnt_ns - creates a private namespace and adds a root filesystem | 2068 | * create_mnt_ns - creates a private namespace and adds a root filesystem |
| 2069 | * @mnt: pointer to the new root filesystem mountpoint | 2069 | * @mnt: pointer to the new root filesystem mountpoint |
| 2070 | */ | 2070 | */ |
| 2071 | static struct mnt_namespace *create_mnt_ns(struct vfsmount *mnt) | 2071 | struct mnt_namespace *create_mnt_ns(struct vfsmount *mnt) |
| 2072 | { | 2072 | { |
| 2073 | struct mnt_namespace *new_ns; | 2073 | struct mnt_namespace *new_ns; |
| 2074 | 2074 | ||
| @@ -2080,6 +2080,7 @@ static struct mnt_namespace *create_mnt_ns(struct vfsmount *mnt) | |||
| 2080 | } | 2080 | } |
| 2081 | return new_ns; | 2081 | return new_ns; |
| 2082 | } | 2082 | } |
| 2083 | EXPORT_SYMBOL(create_mnt_ns); | ||
| 2083 | 2084 | ||
| 2084 | SYSCALL_DEFINE5(mount, char __user *, dev_name, char __user *, dir_name, | 2085 | SYSCALL_DEFINE5(mount, char __user *, dev_name, char __user *, dir_name, |
| 2085 | char __user *, type, unsigned long, flags, void __user *, data) | 2086 | char __user *, type, unsigned long, flags, void __user *, data) |
diff --git a/fs/nfs/super.c b/fs/nfs/super.c index d5b112bcf3de..ce907efc5508 100644 --- a/fs/nfs/super.c +++ b/fs/nfs/super.c | |||
| @@ -2648,13 +2648,21 @@ out_freepage: | |||
| 2648 | static int nfs_follow_remote_path(struct vfsmount *root_mnt, | 2648 | static int nfs_follow_remote_path(struct vfsmount *root_mnt, |
| 2649 | const char *export_path, struct vfsmount *mnt_target) | 2649 | const char *export_path, struct vfsmount *mnt_target) |
| 2650 | { | 2650 | { |
| 2651 | struct mnt_namespace *ns_private; | ||
| 2651 | struct nameidata nd; | 2652 | struct nameidata nd; |
| 2652 | struct super_block *s; | 2653 | struct super_block *s; |
| 2653 | int ret; | 2654 | int ret; |
| 2654 | 2655 | ||
| 2656 | ns_private = create_mnt_ns(root_mnt); | ||
| 2657 | ret = PTR_ERR(ns_private); | ||
| 2658 | if (IS_ERR(ns_private)) | ||
| 2659 | goto out_mntput; | ||
| 2660 | |||
| 2655 | ret = vfs_path_lookup(root_mnt->mnt_root, root_mnt, | 2661 | ret = vfs_path_lookup(root_mnt->mnt_root, root_mnt, |
| 2656 | export_path, LOOKUP_FOLLOW, &nd); | 2662 | export_path, LOOKUP_FOLLOW, &nd); |
| 2657 | 2663 | ||
| 2664 | put_mnt_ns(ns_private); | ||
| 2665 | |||
| 2658 | if (ret != 0) | 2666 | if (ret != 0) |
| 2659 | goto out_err; | 2667 | goto out_err; |
| 2660 | 2668 | ||
diff --git a/fs/nfsd/nfsfh.c b/fs/nfsd/nfsfh.c index 1c12177b908c..55c8e63af0be 100644 --- a/fs/nfsd/nfsfh.c +++ b/fs/nfsd/nfsfh.c | |||
| @@ -89,7 +89,7 @@ static __be32 nfsd_setuser_and_check_port(struct svc_rqst *rqstp, | |||
| 89 | int flags = nfsexp_flags(rqstp, exp); | 89 | int flags = nfsexp_flags(rqstp, exp); |
| 90 | 90 | ||
| 91 | /* Check if the request originated from a secure port. */ | 91 | /* Check if the request originated from a secure port. */ |
| 92 | if (!rqstp->rq_secure && (flags & NFSEXP_INSECURE_PORT)) { | 92 | if (!rqstp->rq_secure && !(flags & NFSEXP_INSECURE_PORT)) { |
| 93 | RPC_IFDEBUG(char buf[RPC_MAX_ADDRBUFLEN]); | 93 | RPC_IFDEBUG(char buf[RPC_MAX_ADDRBUFLEN]); |
| 94 | dprintk(KERN_WARNING | 94 | dprintk(KERN_WARNING |
| 95 | "nfsd: request from insecure port %s!\n", | 95 | "nfsd: request from insecure port %s!\n", |
diff --git a/fs/nilfs2/Kconfig b/fs/nilfs2/Kconfig index 1225af7b2166..251da07b2a1d 100644 --- a/fs/nilfs2/Kconfig +++ b/fs/nilfs2/Kconfig | |||
| @@ -2,7 +2,6 @@ config NILFS2_FS | |||
| 2 | tristate "NILFS2 file system support (EXPERIMENTAL)" | 2 | tristate "NILFS2 file system support (EXPERIMENTAL)" |
| 3 | depends on EXPERIMENTAL | 3 | depends on EXPERIMENTAL |
| 4 | select CRC32 | 4 | select CRC32 |
| 5 | select FS_JOURNAL_INFO | ||
| 6 | help | 5 | help |
| 7 | NILFS2 is a log-structured file system (LFS) supporting continuous | 6 | NILFS2 is a log-structured file system (LFS) supporting continuous |
| 8 | snapshotting. In addition to versioning capability of the entire | 7 | snapshotting. In addition to versioning capability of the entire |
diff --git a/fs/ntfs/inode.c b/fs/ntfs/inode.c index 9938034762cc..dc2505abb6d7 100644 --- a/fs/ntfs/inode.c +++ b/fs/ntfs/inode.c | |||
| @@ -530,7 +530,7 @@ err_corrupt_attr: | |||
| 530 | * the ntfs inode. | 530 | * the ntfs inode. |
| 531 | * | 531 | * |
| 532 | * Q: What locks are held when the function is called? | 532 | * Q: What locks are held when the function is called? |
| 533 | * A: i_state has I_LOCK set, hence the inode is locked, also | 533 | * A: i_state has I_NEW set, hence the inode is locked, also |
| 534 | * i_count is set to 1, so it is not going to go away | 534 | * i_count is set to 1, so it is not going to go away |
| 535 | * i_flags is set to 0 and we have no business touching it. Only an ioctl() | 535 | * i_flags is set to 0 and we have no business touching it. Only an ioctl() |
| 536 | * is allowed to write to them. We should of course be honouring them but | 536 | * is allowed to write to them. We should of course be honouring them but |
| @@ -1207,7 +1207,7 @@ err_out: | |||
| 1207 | * necessary fields in @vi as well as initializing the ntfs inode. | 1207 | * necessary fields in @vi as well as initializing the ntfs inode. |
| 1208 | * | 1208 | * |
| 1209 | * Q: What locks are held when the function is called? | 1209 | * Q: What locks are held when the function is called? |
| 1210 | * A: i_state has I_LOCK set, hence the inode is locked, also | 1210 | * A: i_state has I_NEW set, hence the inode is locked, also |
| 1211 | * i_count is set to 1, so it is not going to go away | 1211 | * i_count is set to 1, so it is not going to go away |
| 1212 | * | 1212 | * |
| 1213 | * Return 0 on success and -errno on error. In the error case, the inode will | 1213 | * Return 0 on success and -errno on error. In the error case, the inode will |
| @@ -1474,7 +1474,7 @@ err_out: | |||
| 1474 | * normal directory inodes. | 1474 | * normal directory inodes. |
| 1475 | * | 1475 | * |
| 1476 | * Q: What locks are held when the function is called? | 1476 | * Q: What locks are held when the function is called? |
| 1477 | * A: i_state has I_LOCK set, hence the inode is locked, also | 1477 | * A: i_state has I_NEW set, hence the inode is locked, also |
| 1478 | * i_count is set to 1, so it is not going to go away | 1478 | * i_count is set to 1, so it is not going to go away |
| 1479 | * | 1479 | * |
| 1480 | * Return 0 on success and -errno on error. In the error case, the inode will | 1480 | * Return 0 on success and -errno on error. In the error case, the inode will |
diff --git a/fs/ocfs2/Kconfig b/fs/ocfs2/Kconfig index 701b7a3a872e..0d840669698e 100644 --- a/fs/ocfs2/Kconfig +++ b/fs/ocfs2/Kconfig | |||
| @@ -6,6 +6,7 @@ config OCFS2_FS | |||
| 6 | select CRC32 | 6 | select CRC32 |
| 7 | select QUOTA | 7 | select QUOTA |
| 8 | select QUOTA_TREE | 8 | select QUOTA_TREE |
| 9 | select FS_POSIX_ACL | ||
| 9 | help | 10 | help |
| 10 | OCFS2 is a general purpose extent based shared disk cluster file | 11 | OCFS2 is a general purpose extent based shared disk cluster file |
| 11 | system with many similarities to ext3. It supports 64 bit inode | 12 | system with many similarities to ext3. It supports 64 bit inode |
| @@ -74,12 +75,3 @@ config OCFS2_DEBUG_FS | |||
| 74 | This option will enable expensive consistency checks. Enable | 75 | This option will enable expensive consistency checks. Enable |
| 75 | this option for debugging only as it is likely to decrease | 76 | this option for debugging only as it is likely to decrease |
| 76 | performance of the filesystem. | 77 | performance of the filesystem. |
| 77 | |||
| 78 | config OCFS2_FS_POSIX_ACL | ||
| 79 | bool "OCFS2 POSIX Access Control Lists" | ||
| 80 | depends on OCFS2_FS | ||
| 81 | select FS_POSIX_ACL | ||
| 82 | default n | ||
| 83 | help | ||
| 84 | Posix Access Control Lists (ACLs) support permissions for users and | ||
| 85 | groups beyond the owner/group/world scheme. | ||
diff --git a/fs/ocfs2/Makefile b/fs/ocfs2/Makefile index 31f25ce32c97..600d2d2ade11 100644 --- a/fs/ocfs2/Makefile +++ b/fs/ocfs2/Makefile | |||
| @@ -39,11 +39,8 @@ ocfs2-objs := \ | |||
| 39 | ver.o \ | 39 | ver.o \ |
| 40 | quota_local.o \ | 40 | quota_local.o \ |
| 41 | quota_global.o \ | 41 | quota_global.o \ |
| 42 | xattr.o | 42 | xattr.o \ |
| 43 | 43 | acl.o | |
| 44 | ifeq ($(CONFIG_OCFS2_FS_POSIX_ACL),y) | ||
| 45 | ocfs2-objs += acl.o | ||
| 46 | endif | ||
| 47 | 44 | ||
| 48 | ocfs2_stackglue-objs := stackglue.o | 45 | ocfs2_stackglue-objs := stackglue.o |
| 49 | ocfs2_stack_o2cb-objs := stack_o2cb.o | 46 | ocfs2_stack_o2cb-objs := stack_o2cb.o |
diff --git a/fs/ocfs2/acl.c b/fs/ocfs2/acl.c index e3e47415d851..0501974bedd0 100644 --- a/fs/ocfs2/acl.c +++ b/fs/ocfs2/acl.c | |||
| @@ -98,15 +98,11 @@ static struct posix_acl *ocfs2_get_acl_nolock(struct inode *inode, | |||
| 98 | int type, | 98 | int type, |
| 99 | struct buffer_head *di_bh) | 99 | struct buffer_head *di_bh) |
| 100 | { | 100 | { |
| 101 | struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); | ||
| 102 | int name_index; | 101 | int name_index; |
| 103 | char *value = NULL; | 102 | char *value = NULL; |
| 104 | struct posix_acl *acl; | 103 | struct posix_acl *acl; |
| 105 | int retval; | 104 | int retval; |
| 106 | 105 | ||
| 107 | if (!(osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL)) | ||
| 108 | return NULL; | ||
| 109 | |||
| 110 | switch (type) { | 106 | switch (type) { |
| 111 | case ACL_TYPE_ACCESS: | 107 | case ACL_TYPE_ACCESS: |
| 112 | name_index = OCFS2_XATTR_INDEX_POSIX_ACL_ACCESS; | 108 | name_index = OCFS2_XATTR_INDEX_POSIX_ACL_ACCESS; |
diff --git a/fs/ocfs2/acl.h b/fs/ocfs2/acl.h index 8f6389ed4da5..5c5d31f05853 100644 --- a/fs/ocfs2/acl.h +++ b/fs/ocfs2/acl.h | |||
| @@ -26,8 +26,6 @@ struct ocfs2_acl_entry { | |||
| 26 | __le32 e_id; | 26 | __le32 e_id; |
| 27 | }; | 27 | }; |
| 28 | 28 | ||
| 29 | #ifdef CONFIG_OCFS2_FS_POSIX_ACL | ||
| 30 | |||
| 31 | extern int ocfs2_check_acl(struct inode *, int); | 29 | extern int ocfs2_check_acl(struct inode *, int); |
| 32 | extern int ocfs2_acl_chmod(struct inode *); | 30 | extern int ocfs2_acl_chmod(struct inode *); |
| 33 | extern int ocfs2_init_acl(handle_t *, struct inode *, struct inode *, | 31 | extern int ocfs2_init_acl(handle_t *, struct inode *, struct inode *, |
| @@ -35,24 +33,4 @@ extern int ocfs2_init_acl(handle_t *, struct inode *, struct inode *, | |||
| 35 | struct ocfs2_alloc_context *, | 33 | struct ocfs2_alloc_context *, |
| 36 | struct ocfs2_alloc_context *); | 34 | struct ocfs2_alloc_context *); |
| 37 | 35 | ||
| 38 | #else /* CONFIG_OCFS2_FS_POSIX_ACL*/ | ||
| 39 | |||
| 40 | #define ocfs2_check_acl NULL | ||
| 41 | static inline int ocfs2_acl_chmod(struct inode *inode) | ||
| 42 | { | ||
| 43 | return 0; | ||
| 44 | } | ||
| 45 | static inline int ocfs2_init_acl(handle_t *handle, | ||
| 46 | struct inode *inode, | ||
| 47 | struct inode *dir, | ||
| 48 | struct buffer_head *di_bh, | ||
| 49 | struct buffer_head *dir_bh, | ||
| 50 | struct ocfs2_alloc_context *meta_ac, | ||
| 51 | struct ocfs2_alloc_context *data_ac) | ||
| 52 | { | ||
| 53 | return 0; | ||
| 54 | } | ||
| 55 | |||
| 56 | #endif /* CONFIG_OCFS2_FS_POSIX_ACL*/ | ||
| 57 | |||
| 58 | #endif /* OCFS2_ACL_H */ | 36 | #endif /* OCFS2_ACL_H */ |
diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c index fb4e672579b8..d17bdc718f74 100644 --- a/fs/ocfs2/alloc.c +++ b/fs/ocfs2/alloc.c | |||
| @@ -1765,9 +1765,9 @@ set_and_inc: | |||
| 1765 | * | 1765 | * |
| 1766 | * The array index of the subtree root is passed back. | 1766 | * The array index of the subtree root is passed back. |
| 1767 | */ | 1767 | */ |
| 1768 | static int ocfs2_find_subtree_root(struct ocfs2_extent_tree *et, | 1768 | int ocfs2_find_subtree_root(struct ocfs2_extent_tree *et, |
| 1769 | struct ocfs2_path *left, | 1769 | struct ocfs2_path *left, |
| 1770 | struct ocfs2_path *right) | 1770 | struct ocfs2_path *right) |
| 1771 | { | 1771 | { |
| 1772 | int i = 0; | 1772 | int i = 0; |
| 1773 | 1773 | ||
| @@ -2872,8 +2872,8 @@ out: | |||
| 2872 | * This looks similar, but is subtly different to | 2872 | * This looks similar, but is subtly different to |
| 2873 | * ocfs2_find_cpos_for_left_leaf(). | 2873 | * ocfs2_find_cpos_for_left_leaf(). |
| 2874 | */ | 2874 | */ |
| 2875 | static int ocfs2_find_cpos_for_right_leaf(struct super_block *sb, | 2875 | int ocfs2_find_cpos_for_right_leaf(struct super_block *sb, |
| 2876 | struct ocfs2_path *path, u32 *cpos) | 2876 | struct ocfs2_path *path, u32 *cpos) |
| 2877 | { | 2877 | { |
| 2878 | int i, j, ret = 0; | 2878 | int i, j, ret = 0; |
| 2879 | u64 blkno; | 2879 | u64 blkno; |
diff --git a/fs/ocfs2/alloc.h b/fs/ocfs2/alloc.h index 9c122d574464..1db4359ccb90 100644 --- a/fs/ocfs2/alloc.h +++ b/fs/ocfs2/alloc.h | |||
| @@ -317,4 +317,9 @@ int ocfs2_path_bh_journal_access(handle_t *handle, | |||
| 317 | int ocfs2_journal_access_path(struct ocfs2_caching_info *ci, | 317 | int ocfs2_journal_access_path(struct ocfs2_caching_info *ci, |
| 318 | handle_t *handle, | 318 | handle_t *handle, |
| 319 | struct ocfs2_path *path); | 319 | struct ocfs2_path *path); |
| 320 | int ocfs2_find_cpos_for_right_leaf(struct super_block *sb, | ||
| 321 | struct ocfs2_path *path, u32 *cpos); | ||
| 322 | int ocfs2_find_subtree_root(struct ocfs2_extent_tree *et, | ||
| 323 | struct ocfs2_path *left, | ||
| 324 | struct ocfs2_path *right); | ||
| 320 | #endif /* OCFS2_ALLOC_H */ | 325 | #endif /* OCFS2_ALLOC_H */ |
diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c index c452d116b892..eda5b8bcddd5 100644 --- a/fs/ocfs2/cluster/heartbeat.c +++ b/fs/ocfs2/cluster/heartbeat.c | |||
| @@ -176,7 +176,8 @@ static void o2hb_write_timeout(struct work_struct *work) | |||
| 176 | 176 | ||
| 177 | static void o2hb_arm_write_timeout(struct o2hb_region *reg) | 177 | static void o2hb_arm_write_timeout(struct o2hb_region *reg) |
| 178 | { | 178 | { |
| 179 | mlog(0, "Queue write timeout for %u ms\n", O2HB_MAX_WRITE_TIMEOUT_MS); | 179 | mlog(ML_HEARTBEAT, "Queue write timeout for %u ms\n", |
| 180 | O2HB_MAX_WRITE_TIMEOUT_MS); | ||
| 180 | 181 | ||
| 181 | cancel_delayed_work(®->hr_write_timeout_work); | 182 | cancel_delayed_work(®->hr_write_timeout_work); |
| 182 | reg->hr_last_timeout_start = jiffies; | 183 | reg->hr_last_timeout_start = jiffies; |
| @@ -874,7 +875,8 @@ static int o2hb_thread(void *data) | |||
| 874 | do_gettimeofday(&after_hb); | 875 | do_gettimeofday(&after_hb); |
| 875 | elapsed_msec = o2hb_elapsed_msecs(&before_hb, &after_hb); | 876 | elapsed_msec = o2hb_elapsed_msecs(&before_hb, &after_hb); |
| 876 | 877 | ||
| 877 | mlog(0, "start = %lu.%lu, end = %lu.%lu, msec = %u\n", | 878 | mlog(ML_HEARTBEAT, |
| 879 | "start = %lu.%lu, end = %lu.%lu, msec = %u\n", | ||
| 878 | before_hb.tv_sec, (unsigned long) before_hb.tv_usec, | 880 | before_hb.tv_sec, (unsigned long) before_hb.tv_usec, |
| 879 | after_hb.tv_sec, (unsigned long) after_hb.tv_usec, | 881 | after_hb.tv_sec, (unsigned long) after_hb.tv_usec, |
| 880 | elapsed_msec); | 882 | elapsed_msec); |
diff --git a/fs/ocfs2/cluster/nodemanager.c b/fs/ocfs2/cluster/nodemanager.c index 7ee6188bc79a..c81142e3ef84 100644 --- a/fs/ocfs2/cluster/nodemanager.c +++ b/fs/ocfs2/cluster/nodemanager.c | |||
| @@ -35,6 +35,10 @@ | |||
| 35 | * cluster references throughout where nodes are looked up */ | 35 | * cluster references throughout where nodes are looked up */ |
| 36 | struct o2nm_cluster *o2nm_single_cluster = NULL; | 36 | struct o2nm_cluster *o2nm_single_cluster = NULL; |
| 37 | 37 | ||
| 38 | char *o2nm_fence_method_desc[O2NM_FENCE_METHODS] = { | ||
| 39 | "reset", /* O2NM_FENCE_RESET */ | ||
| 40 | "panic", /* O2NM_FENCE_PANIC */ | ||
| 41 | }; | ||
| 38 | 42 | ||
| 39 | struct o2nm_node *o2nm_get_node_by_num(u8 node_num) | 43 | struct o2nm_node *o2nm_get_node_by_num(u8 node_num) |
| 40 | { | 44 | { |
| @@ -579,6 +583,43 @@ static ssize_t o2nm_cluster_attr_reconnect_delay_ms_write( | |||
| 579 | return o2nm_cluster_attr_write(page, count, | 583 | return o2nm_cluster_attr_write(page, count, |
| 580 | &cluster->cl_reconnect_delay_ms); | 584 | &cluster->cl_reconnect_delay_ms); |
| 581 | } | 585 | } |
| 586 | |||
| 587 | static ssize_t o2nm_cluster_attr_fence_method_read( | ||
| 588 | struct o2nm_cluster *cluster, char *page) | ||
| 589 | { | ||
| 590 | ssize_t ret = 0; | ||
| 591 | |||
| 592 | if (cluster) | ||
| 593 | ret = sprintf(page, "%s\n", | ||
| 594 | o2nm_fence_method_desc[cluster->cl_fence_method]); | ||
| 595 | return ret; | ||
| 596 | } | ||
| 597 | |||
| 598 | static ssize_t o2nm_cluster_attr_fence_method_write( | ||
| 599 | struct o2nm_cluster *cluster, const char *page, size_t count) | ||
| 600 | { | ||
| 601 | unsigned int i; | ||
| 602 | |||
| 603 | if (page[count - 1] != '\n') | ||
| 604 | goto bail; | ||
| 605 | |||
| 606 | for (i = 0; i < O2NM_FENCE_METHODS; ++i) { | ||
| 607 | if (count != strlen(o2nm_fence_method_desc[i]) + 1) | ||
| 608 | continue; | ||
| 609 | if (strncasecmp(page, o2nm_fence_method_desc[i], count - 1)) | ||
| 610 | continue; | ||
| 611 | if (cluster->cl_fence_method != i) { | ||
| 612 | printk(KERN_INFO "ocfs2: Changing fence method to %s\n", | ||
| 613 | o2nm_fence_method_desc[i]); | ||
| 614 | cluster->cl_fence_method = i; | ||
| 615 | } | ||
| 616 | return count; | ||
| 617 | } | ||
| 618 | |||
| 619 | bail: | ||
| 620 | return -EINVAL; | ||
| 621 | } | ||
| 622 | |||
| 582 | static struct o2nm_cluster_attribute o2nm_cluster_attr_idle_timeout_ms = { | 623 | static struct o2nm_cluster_attribute o2nm_cluster_attr_idle_timeout_ms = { |
| 583 | .attr = { .ca_owner = THIS_MODULE, | 624 | .attr = { .ca_owner = THIS_MODULE, |
| 584 | .ca_name = "idle_timeout_ms", | 625 | .ca_name = "idle_timeout_ms", |
| @@ -603,10 +644,19 @@ static struct o2nm_cluster_attribute o2nm_cluster_attr_reconnect_delay_ms = { | |||
| 603 | .store = o2nm_cluster_attr_reconnect_delay_ms_write, | 644 | .store = o2nm_cluster_attr_reconnect_delay_ms_write, |
| 604 | }; | 645 | }; |
| 605 | 646 | ||
| 647 | static struct o2nm_cluster_attribute o2nm_cluster_attr_fence_method = { | ||
| 648 | .attr = { .ca_owner = THIS_MODULE, | ||
| 649 | .ca_name = "fence_method", | ||
| 650 | .ca_mode = S_IRUGO | S_IWUSR }, | ||
| 651 | .show = o2nm_cluster_attr_fence_method_read, | ||
| 652 | .store = o2nm_cluster_attr_fence_method_write, | ||
| 653 | }; | ||
| 654 | |||
| 606 | static struct configfs_attribute *o2nm_cluster_attrs[] = { | 655 | static struct configfs_attribute *o2nm_cluster_attrs[] = { |
| 607 | &o2nm_cluster_attr_idle_timeout_ms.attr, | 656 | &o2nm_cluster_attr_idle_timeout_ms.attr, |
| 608 | &o2nm_cluster_attr_keepalive_delay_ms.attr, | 657 | &o2nm_cluster_attr_keepalive_delay_ms.attr, |
| 609 | &o2nm_cluster_attr_reconnect_delay_ms.attr, | 658 | &o2nm_cluster_attr_reconnect_delay_ms.attr, |
| 659 | &o2nm_cluster_attr_fence_method.attr, | ||
| 610 | NULL, | 660 | NULL, |
| 611 | }; | 661 | }; |
| 612 | static ssize_t o2nm_cluster_show(struct config_item *item, | 662 | static ssize_t o2nm_cluster_show(struct config_item *item, |
| @@ -778,6 +828,7 @@ static struct config_group *o2nm_cluster_group_make_group(struct config_group *g | |||
| 778 | cluster->cl_reconnect_delay_ms = O2NET_RECONNECT_DELAY_MS_DEFAULT; | 828 | cluster->cl_reconnect_delay_ms = O2NET_RECONNECT_DELAY_MS_DEFAULT; |
| 779 | cluster->cl_idle_timeout_ms = O2NET_IDLE_TIMEOUT_MS_DEFAULT; | 829 | cluster->cl_idle_timeout_ms = O2NET_IDLE_TIMEOUT_MS_DEFAULT; |
| 780 | cluster->cl_keepalive_delay_ms = O2NET_KEEPALIVE_DELAY_MS_DEFAULT; | 830 | cluster->cl_keepalive_delay_ms = O2NET_KEEPALIVE_DELAY_MS_DEFAULT; |
| 831 | cluster->cl_fence_method = O2NM_FENCE_RESET; | ||
| 781 | 832 | ||
| 782 | ret = &cluster->cl_group; | 833 | ret = &cluster->cl_group; |
| 783 | o2nm_single_cluster = cluster; | 834 | o2nm_single_cluster = cluster; |
diff --git a/fs/ocfs2/cluster/nodemanager.h b/fs/ocfs2/cluster/nodemanager.h index c992ea0da4ad..09ea2d388bbb 100644 --- a/fs/ocfs2/cluster/nodemanager.h +++ b/fs/ocfs2/cluster/nodemanager.h | |||
| @@ -33,6 +33,12 @@ | |||
| 33 | #include <linux/configfs.h> | 33 | #include <linux/configfs.h> |
| 34 | #include <linux/rbtree.h> | 34 | #include <linux/rbtree.h> |
| 35 | 35 | ||
| 36 | enum o2nm_fence_method { | ||
| 37 | O2NM_FENCE_RESET = 0, | ||
| 38 | O2NM_FENCE_PANIC, | ||
| 39 | O2NM_FENCE_METHODS, /* Number of fence methods */ | ||
| 40 | }; | ||
| 41 | |||
| 36 | struct o2nm_node { | 42 | struct o2nm_node { |
| 37 | spinlock_t nd_lock; | 43 | spinlock_t nd_lock; |
| 38 | struct config_item nd_item; | 44 | struct config_item nd_item; |
| @@ -58,6 +64,7 @@ struct o2nm_cluster { | |||
| 58 | unsigned int cl_idle_timeout_ms; | 64 | unsigned int cl_idle_timeout_ms; |
| 59 | unsigned int cl_keepalive_delay_ms; | 65 | unsigned int cl_keepalive_delay_ms; |
| 60 | unsigned int cl_reconnect_delay_ms; | 66 | unsigned int cl_reconnect_delay_ms; |
| 67 | enum o2nm_fence_method cl_fence_method; | ||
| 61 | 68 | ||
| 62 | /* this bitmap is part of a hack for disk bitmap.. will go eventually. - zab */ | 69 | /* this bitmap is part of a hack for disk bitmap.. will go eventually. - zab */ |
| 63 | unsigned long cl_nodes_bitmap[BITS_TO_LONGS(O2NM_MAX_NODES)]; | 70 | unsigned long cl_nodes_bitmap[BITS_TO_LONGS(O2NM_MAX_NODES)]; |
diff --git a/fs/ocfs2/cluster/quorum.c b/fs/ocfs2/cluster/quorum.c index bbacf7da48a4..639024033fce 100644 --- a/fs/ocfs2/cluster/quorum.c +++ b/fs/ocfs2/cluster/quorum.c | |||
| @@ -74,8 +74,20 @@ static void o2quo_fence_self(void) | |||
| 74 | * threads can still schedule, etc, etc */ | 74 | * threads can still schedule, etc, etc */ |
| 75 | o2hb_stop_all_regions(); | 75 | o2hb_stop_all_regions(); |
| 76 | 76 | ||
| 77 | printk("ocfs2 is very sorry to be fencing this system by restarting\n"); | 77 | switch (o2nm_single_cluster->cl_fence_method) { |
| 78 | emergency_restart(); | 78 | case O2NM_FENCE_PANIC: |
| 79 | panic("*** ocfs2 is very sorry to be fencing this system by " | ||
| 80 | "panicing ***\n"); | ||
| 81 | break; | ||
| 82 | default: | ||
| 83 | WARN_ON(o2nm_single_cluster->cl_fence_method >= | ||
| 84 | O2NM_FENCE_METHODS); | ||
| 85 | case O2NM_FENCE_RESET: | ||
| 86 | printk(KERN_ERR "*** ocfs2 is very sorry to be fencing this " | ||
| 87 | "system by restarting ***\n"); | ||
| 88 | emergency_restart(); | ||
| 89 | break; | ||
| 90 | }; | ||
| 79 | } | 91 | } |
| 80 | 92 | ||
| 81 | /* Indicate that a timeout occured on a hearbeat region write. The | 93 | /* Indicate that a timeout occured on a hearbeat region write. The |
diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c index d9fa3d22e17c..2f9e4e19a4f2 100644 --- a/fs/ocfs2/dlm/dlmrecovery.c +++ b/fs/ocfs2/dlm/dlmrecovery.c | |||
| @@ -2589,6 +2589,14 @@ retry: | |||
| 2589 | "begin reco msg (%d)\n", dlm->name, nodenum, ret); | 2589 | "begin reco msg (%d)\n", dlm->name, nodenum, ret); |
| 2590 | ret = 0; | 2590 | ret = 0; |
| 2591 | } | 2591 | } |
| 2592 | if (ret == -EAGAIN) { | ||
| 2593 | mlog(0, "%s: trying to start recovery of node " | ||
| 2594 | "%u, but node %u is waiting for last recovery " | ||
| 2595 | "to complete, backoff for a bit\n", dlm->name, | ||
| 2596 | dead_node, nodenum); | ||
| 2597 | msleep(100); | ||
| 2598 | goto retry; | ||
| 2599 | } | ||
| 2592 | if (ret < 0) { | 2600 | if (ret < 0) { |
| 2593 | struct dlm_lock_resource *res; | 2601 | struct dlm_lock_resource *res; |
| 2594 | /* this is now a serious problem, possibly ENOMEM | 2602 | /* this is now a serious problem, possibly ENOMEM |
| @@ -2608,14 +2616,6 @@ retry: | |||
| 2608 | * another ENOMEM */ | 2616 | * another ENOMEM */ |
| 2609 | msleep(100); | 2617 | msleep(100); |
| 2610 | goto retry; | 2618 | goto retry; |
| 2611 | } else if (ret == EAGAIN) { | ||
| 2612 | mlog(0, "%s: trying to start recovery of node " | ||
| 2613 | "%u, but node %u is waiting for last recovery " | ||
| 2614 | "to complete, backoff for a bit\n", dlm->name, | ||
| 2615 | dead_node, nodenum); | ||
| 2616 | /* TODO Look into replacing msleep with cond_resched() */ | ||
| 2617 | msleep(100); | ||
| 2618 | goto retry; | ||
| 2619 | } | 2619 | } |
| 2620 | } | 2620 | } |
| 2621 | 2621 | ||
| @@ -2639,7 +2639,7 @@ int dlm_begin_reco_handler(struct o2net_msg *msg, u32 len, void *data, | |||
| 2639 | dlm->name, br->node_idx, br->dead_node, | 2639 | dlm->name, br->node_idx, br->dead_node, |
| 2640 | dlm->reco.dead_node, dlm->reco.new_master); | 2640 | dlm->reco.dead_node, dlm->reco.new_master); |
| 2641 | spin_unlock(&dlm->spinlock); | 2641 | spin_unlock(&dlm->spinlock); |
| 2642 | return EAGAIN; | 2642 | return -EAGAIN; |
| 2643 | } | 2643 | } |
| 2644 | spin_unlock(&dlm->spinlock); | 2644 | spin_unlock(&dlm->spinlock); |
| 2645 | 2645 | ||
diff --git a/fs/ocfs2/extent_map.c b/fs/ocfs2/extent_map.c index 843db64e9d4a..d35a27f4523e 100644 --- a/fs/ocfs2/extent_map.c +++ b/fs/ocfs2/extent_map.c | |||
| @@ -37,6 +37,7 @@ | |||
| 37 | #include "extent_map.h" | 37 | #include "extent_map.h" |
| 38 | #include "inode.h" | 38 | #include "inode.h" |
| 39 | #include "super.h" | 39 | #include "super.h" |
| 40 | #include "symlink.h" | ||
| 40 | 41 | ||
| 41 | #include "buffer_head_io.h" | 42 | #include "buffer_head_io.h" |
| 42 | 43 | ||
| @@ -703,6 +704,12 @@ out: | |||
| 703 | return ret; | 704 | return ret; |
| 704 | } | 705 | } |
| 705 | 706 | ||
| 707 | /* | ||
| 708 | * The ocfs2_fiemap_inline() may be a little bit misleading, since | ||
| 709 | * it not only handles the fiemap for inlined files, but also deals | ||
| 710 | * with the fast symlink, cause they have no difference for extent | ||
| 711 | * mapping per se. | ||
| 712 | */ | ||
| 706 | static int ocfs2_fiemap_inline(struct inode *inode, struct buffer_head *di_bh, | 713 | static int ocfs2_fiemap_inline(struct inode *inode, struct buffer_head *di_bh, |
| 707 | struct fiemap_extent_info *fieinfo, | 714 | struct fiemap_extent_info *fieinfo, |
| 708 | u64 map_start) | 715 | u64 map_start) |
| @@ -715,11 +722,18 @@ static int ocfs2_fiemap_inline(struct inode *inode, struct buffer_head *di_bh, | |||
| 715 | struct ocfs2_inode_info *oi = OCFS2_I(inode); | 722 | struct ocfs2_inode_info *oi = OCFS2_I(inode); |
| 716 | 723 | ||
| 717 | di = (struct ocfs2_dinode *)di_bh->b_data; | 724 | di = (struct ocfs2_dinode *)di_bh->b_data; |
| 718 | id_count = le16_to_cpu(di->id2.i_data.id_count); | 725 | if (ocfs2_inode_is_fast_symlink(inode)) |
| 726 | id_count = ocfs2_fast_symlink_chars(inode->i_sb); | ||
| 727 | else | ||
| 728 | id_count = le16_to_cpu(di->id2.i_data.id_count); | ||
| 719 | 729 | ||
| 720 | if (map_start < id_count) { | 730 | if (map_start < id_count) { |
| 721 | phys = oi->ip_blkno << inode->i_sb->s_blocksize_bits; | 731 | phys = oi->ip_blkno << inode->i_sb->s_blocksize_bits; |
| 722 | phys += offsetof(struct ocfs2_dinode, id2.i_data.id_data); | 732 | if (ocfs2_inode_is_fast_symlink(inode)) |
| 733 | phys += offsetof(struct ocfs2_dinode, id2.i_symlink); | ||
| 734 | else | ||
| 735 | phys += offsetof(struct ocfs2_dinode, | ||
| 736 | id2.i_data.id_data); | ||
| 723 | 737 | ||
| 724 | ret = fiemap_fill_next_extent(fieinfo, 0, phys, id_count, | 738 | ret = fiemap_fill_next_extent(fieinfo, 0, phys, id_count, |
| 725 | flags); | 739 | flags); |
| @@ -756,9 +770,10 @@ int ocfs2_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, | |||
| 756 | down_read(&OCFS2_I(inode)->ip_alloc_sem); | 770 | down_read(&OCFS2_I(inode)->ip_alloc_sem); |
| 757 | 771 | ||
| 758 | /* | 772 | /* |
| 759 | * Handle inline-data separately. | 773 | * Handle inline-data and fast symlink separately. |
| 760 | */ | 774 | */ |
| 761 | if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) { | 775 | if ((OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) || |
| 776 | ocfs2_inode_is_fast_symlink(inode)) { | ||
| 762 | ret = ocfs2_fiemap_inline(inode, di_bh, fieinfo, map_start); | 777 | ret = ocfs2_fiemap_inline(inode, di_bh, fieinfo, map_start); |
| 763 | goto out_unlock; | 778 | goto out_unlock; |
| 764 | } | 779 | } |
| @@ -786,6 +801,8 @@ int ocfs2_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, | |||
| 786 | fe_flags = 0; | 801 | fe_flags = 0; |
| 787 | if (rec.e_flags & OCFS2_EXT_UNWRITTEN) | 802 | if (rec.e_flags & OCFS2_EXT_UNWRITTEN) |
| 788 | fe_flags |= FIEMAP_EXTENT_UNWRITTEN; | 803 | fe_flags |= FIEMAP_EXTENT_UNWRITTEN; |
| 804 | if (rec.e_flags & OCFS2_EXT_REFCOUNTED) | ||
| 805 | fe_flags |= FIEMAP_EXTENT_SHARED; | ||
| 789 | if (is_last) | 806 | if (is_last) |
| 790 | fe_flags |= FIEMAP_EXTENT_LAST; | 807 | fe_flags |= FIEMAP_EXTENT_LAST; |
| 791 | len_bytes = (u64)le16_to_cpu(rec.e_leaf_clusters) << osb->s_clustersize_bits; | 808 | len_bytes = (u64)le16_to_cpu(rec.e_leaf_clusters) << osb->s_clustersize_bits; |
diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c index f010b22b1c44..50fb26a6a5f5 100644 --- a/fs/ocfs2/namei.c +++ b/fs/ocfs2/namei.c | |||
| @@ -2108,6 +2108,7 @@ int ocfs2_create_inode_in_orphan(struct inode *dir, | |||
| 2108 | } | 2108 | } |
| 2109 | did_quota_inode = 1; | 2109 | did_quota_inode = 1; |
| 2110 | 2110 | ||
| 2111 | inode->i_nlink = 0; | ||
| 2111 | /* do the real work now. */ | 2112 | /* do the real work now. */ |
| 2112 | status = ocfs2_mknod_locked(osb, dir, inode, | 2113 | status = ocfs2_mknod_locked(osb, dir, inode, |
| 2113 | 0, &new_di_bh, parent_di_bh, handle, | 2114 | 0, &new_di_bh, parent_di_bh, handle, |
| @@ -2136,6 +2137,7 @@ int ocfs2_create_inode_in_orphan(struct inode *dir, | |||
| 2136 | if (status < 0) | 2137 | if (status < 0) |
| 2137 | mlog_errno(status); | 2138 | mlog_errno(status); |
| 2138 | 2139 | ||
| 2140 | insert_inode_hash(inode); | ||
| 2139 | leave: | 2141 | leave: |
| 2140 | if (status < 0 && did_quota_inode) | 2142 | if (status < 0 && did_quota_inode) |
| 2141 | vfs_dq_free_inode(inode); | 2143 | vfs_dq_free_inode(inode); |
| @@ -2267,6 +2269,8 @@ int ocfs2_mv_orphaned_inode_to_new(struct inode *dir, | |||
| 2267 | di = (struct ocfs2_dinode *)di_bh->b_data; | 2269 | di = (struct ocfs2_dinode *)di_bh->b_data; |
| 2268 | le32_add_cpu(&di->i_flags, -OCFS2_ORPHANED_FL); | 2270 | le32_add_cpu(&di->i_flags, -OCFS2_ORPHANED_FL); |
| 2269 | di->i_orphaned_slot = 0; | 2271 | di->i_orphaned_slot = 0; |
| 2272 | inode->i_nlink = 1; | ||
| 2273 | ocfs2_set_links_count(di, inode->i_nlink); | ||
| 2270 | ocfs2_journal_dirty(handle, di_bh); | 2274 | ocfs2_journal_dirty(handle, di_bh); |
| 2271 | 2275 | ||
| 2272 | status = ocfs2_add_entry(handle, dentry, inode, | 2276 | status = ocfs2_add_entry(handle, dentry, inode, |
| @@ -2284,7 +2288,6 @@ int ocfs2_mv_orphaned_inode_to_new(struct inode *dir, | |||
| 2284 | goto out_commit; | 2288 | goto out_commit; |
| 2285 | } | 2289 | } |
| 2286 | 2290 | ||
| 2287 | insert_inode_hash(inode); | ||
| 2288 | dentry->d_op = &ocfs2_dentry_ops; | 2291 | dentry->d_op = &ocfs2_dentry_ops; |
| 2289 | d_instantiate(dentry, inode); | 2292 | d_instantiate(dentry, inode); |
| 2290 | status = 0; | 2293 | status = 0; |
| @@ -2326,4 +2329,5 @@ const struct inode_operations ocfs2_dir_iops = { | |||
| 2326 | .getxattr = generic_getxattr, | 2329 | .getxattr = generic_getxattr, |
| 2327 | .listxattr = ocfs2_listxattr, | 2330 | .listxattr = ocfs2_listxattr, |
| 2328 | .removexattr = generic_removexattr, | 2331 | .removexattr = generic_removexattr, |
| 2332 | .fiemap = ocfs2_fiemap, | ||
| 2329 | }; | 2333 | }; |
diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h index d963d8638709..9362eea7424b 100644 --- a/fs/ocfs2/ocfs2.h +++ b/fs/ocfs2/ocfs2.h | |||
| @@ -245,9 +245,11 @@ enum ocfs2_mount_options | |||
| 245 | OCFS2_MOUNT_LOCALFLOCKS = 1 << 5, /* No cluster aware user file locks */ | 245 | OCFS2_MOUNT_LOCALFLOCKS = 1 << 5, /* No cluster aware user file locks */ |
| 246 | OCFS2_MOUNT_NOUSERXATTR = 1 << 6, /* No user xattr */ | 246 | OCFS2_MOUNT_NOUSERXATTR = 1 << 6, /* No user xattr */ |
| 247 | OCFS2_MOUNT_INODE64 = 1 << 7, /* Allow inode numbers > 2^32 */ | 247 | OCFS2_MOUNT_INODE64 = 1 << 7, /* Allow inode numbers > 2^32 */ |
| 248 | OCFS2_MOUNT_POSIX_ACL = 1 << 8, /* POSIX access control lists */ | 248 | OCFS2_MOUNT_POSIX_ACL = 1 << 8, /* Force POSIX access control lists */ |
| 249 | OCFS2_MOUNT_USRQUOTA = 1 << 9, /* We support user quotas */ | 249 | OCFS2_MOUNT_NO_POSIX_ACL = 1 << 9, /* Disable POSIX access |
| 250 | OCFS2_MOUNT_GRPQUOTA = 1 << 10, /* We support group quotas */ | 250 | control lists */ |
| 251 | OCFS2_MOUNT_USRQUOTA = 1 << 10, /* We support user quotas */ | ||
| 252 | OCFS2_MOUNT_GRPQUOTA = 1 << 11, /* We support group quotas */ | ||
| 251 | }; | 253 | }; |
| 252 | 254 | ||
| 253 | #define OCFS2_OSB_SOFT_RO 0x0001 | 255 | #define OCFS2_OSB_SOFT_RO 0x0001 |
diff --git a/fs/ocfs2/ocfs2_fs.h b/fs/ocfs2/ocfs2_fs.h index e9431e4a5e7c..1a1a679e51b5 100644 --- a/fs/ocfs2/ocfs2_fs.h +++ b/fs/ocfs2/ocfs2_fs.h | |||
| @@ -1202,7 +1202,7 @@ struct ocfs2_local_disk_dqinfo { | |||
| 1202 | /* Header of one chunk of a quota file */ | 1202 | /* Header of one chunk of a quota file */ |
| 1203 | struct ocfs2_local_disk_chunk { | 1203 | struct ocfs2_local_disk_chunk { |
| 1204 | __le32 dqc_free; /* Number of free entries in the bitmap */ | 1204 | __le32 dqc_free; /* Number of free entries in the bitmap */ |
| 1205 | u8 dqc_bitmap[0]; /* Bitmap of entries in the corresponding | 1205 | __u8 dqc_bitmap[0]; /* Bitmap of entries in the corresponding |
| 1206 | * chunk of quota file */ | 1206 | * chunk of quota file */ |
| 1207 | }; | 1207 | }; |
| 1208 | 1208 | ||
diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c index 30967e3f5e43..74db2be75dd6 100644 --- a/fs/ocfs2/refcounttree.c +++ b/fs/ocfs2/refcounttree.c | |||
| @@ -276,7 +276,7 @@ static void ocfs2_erase_refcount_tree_from_list(struct ocfs2_super *osb, | |||
| 276 | spin_unlock(&osb->osb_lock); | 276 | spin_unlock(&osb->osb_lock); |
| 277 | } | 277 | } |
| 278 | 278 | ||
| 279 | void ocfs2_kref_remove_refcount_tree(struct kref *kref) | 279 | static void ocfs2_kref_remove_refcount_tree(struct kref *kref) |
| 280 | { | 280 | { |
| 281 | struct ocfs2_refcount_tree *tree = | 281 | struct ocfs2_refcount_tree *tree = |
| 282 | container_of(kref, struct ocfs2_refcount_tree, rf_getcnt); | 282 | container_of(kref, struct ocfs2_refcount_tree, rf_getcnt); |
| @@ -524,23 +524,6 @@ out: | |||
| 524 | return ret; | 524 | return ret; |
| 525 | } | 525 | } |
| 526 | 526 | ||
| 527 | int ocfs2_lock_refcount_tree_by_inode(struct inode *inode, int rw, | ||
| 528 | struct ocfs2_refcount_tree **ret_tree, | ||
| 529 | struct buffer_head **ref_bh) | ||
| 530 | { | ||
| 531 | int ret; | ||
| 532 | u64 ref_blkno; | ||
| 533 | |||
| 534 | ret = ocfs2_get_refcount_block(inode, &ref_blkno); | ||
| 535 | if (ret) { | ||
| 536 | mlog_errno(ret); | ||
| 537 | return ret; | ||
| 538 | } | ||
| 539 | |||
| 540 | return ocfs2_lock_refcount_tree(OCFS2_SB(inode->i_sb), ref_blkno, | ||
| 541 | rw, ret_tree, ref_bh); | ||
| 542 | } | ||
| 543 | |||
| 544 | void ocfs2_unlock_refcount_tree(struct ocfs2_super *osb, | 527 | void ocfs2_unlock_refcount_tree(struct ocfs2_super *osb, |
| 545 | struct ocfs2_refcount_tree *tree, int rw) | 528 | struct ocfs2_refcount_tree *tree, int rw) |
| 546 | { | 529 | { |
| @@ -969,6 +952,103 @@ out: | |||
| 969 | } | 952 | } |
| 970 | 953 | ||
| 971 | /* | 954 | /* |
| 955 | * Find the end range for a leaf refcount block indicated by | ||
| 956 | * el->l_recs[index].e_blkno. | ||
| 957 | */ | ||
| 958 | static int ocfs2_get_refcount_cpos_end(struct ocfs2_caching_info *ci, | ||
| 959 | struct buffer_head *ref_root_bh, | ||
| 960 | struct ocfs2_extent_block *eb, | ||
| 961 | struct ocfs2_extent_list *el, | ||
| 962 | int index, u32 *cpos_end) | ||
| 963 | { | ||
| 964 | int ret, i, subtree_root; | ||
| 965 | u32 cpos; | ||
| 966 | u64 blkno; | ||
| 967 | struct super_block *sb = ocfs2_metadata_cache_get_super(ci); | ||
| 968 | struct ocfs2_path *left_path = NULL, *right_path = NULL; | ||
| 969 | struct ocfs2_extent_tree et; | ||
| 970 | struct ocfs2_extent_list *tmp_el; | ||
| 971 | |||
| 972 | if (index < le16_to_cpu(el->l_next_free_rec) - 1) { | ||
| 973 | /* | ||
| 974 | * We have a extent rec after index, so just use the e_cpos | ||
| 975 | * of the next extent rec. | ||
| 976 | */ | ||
| 977 | *cpos_end = le32_to_cpu(el->l_recs[index+1].e_cpos); | ||
| 978 | return 0; | ||
| 979 | } | ||
| 980 | |||
| 981 | if (!eb || (eb && !eb->h_next_leaf_blk)) { | ||
| 982 | /* | ||
| 983 | * We are the last extent rec, so any high cpos should | ||
| 984 | * be stored in this leaf refcount block. | ||
| 985 | */ | ||
| 986 | *cpos_end = UINT_MAX; | ||
| 987 | return 0; | ||
| 988 | } | ||
| 989 | |||
| 990 | /* | ||
| 991 | * If the extent block isn't the last one, we have to find | ||
| 992 | * the subtree root between this extent block and the next | ||
| 993 | * leaf extent block and get the corresponding e_cpos from | ||
| 994 | * the subroot. Otherwise we may corrupt the b-tree. | ||
| 995 | */ | ||
| 996 | ocfs2_init_refcount_extent_tree(&et, ci, ref_root_bh); | ||
| 997 | |||
| 998 | left_path = ocfs2_new_path_from_et(&et); | ||
| 999 | if (!left_path) { | ||
| 1000 | ret = -ENOMEM; | ||
| 1001 | mlog_errno(ret); | ||
| 1002 | goto out; | ||
| 1003 | } | ||
| 1004 | |||
| 1005 | cpos = le32_to_cpu(eb->h_list.l_recs[index].e_cpos); | ||
| 1006 | ret = ocfs2_find_path(ci, left_path, cpos); | ||
| 1007 | if (ret) { | ||
| 1008 | mlog_errno(ret); | ||
| 1009 | goto out; | ||
| 1010 | } | ||
| 1011 | |||
| 1012 | right_path = ocfs2_new_path_from_path(left_path); | ||
| 1013 | if (!right_path) { | ||
| 1014 | ret = -ENOMEM; | ||
| 1015 | mlog_errno(ret); | ||
| 1016 | goto out; | ||
| 1017 | } | ||
| 1018 | |||
| 1019 | ret = ocfs2_find_cpos_for_right_leaf(sb, left_path, &cpos); | ||
| 1020 | if (ret) { | ||
| 1021 | mlog_errno(ret); | ||
| 1022 | goto out; | ||
| 1023 | } | ||
| 1024 | |||
| 1025 | ret = ocfs2_find_path(ci, right_path, cpos); | ||
| 1026 | if (ret) { | ||
| 1027 | mlog_errno(ret); | ||
| 1028 | goto out; | ||
| 1029 | } | ||
| 1030 | |||
| 1031 | subtree_root = ocfs2_find_subtree_root(&et, left_path, | ||
| 1032 | right_path); | ||
| 1033 | |||
| 1034 | tmp_el = left_path->p_node[subtree_root].el; | ||
| 1035 | blkno = left_path->p_node[subtree_root+1].bh->b_blocknr; | ||
| 1036 | for (i = 0; i < le32_to_cpu(tmp_el->l_next_free_rec); i++) { | ||
| 1037 | if (le64_to_cpu(tmp_el->l_recs[i].e_blkno) == blkno) { | ||
| 1038 | *cpos_end = le32_to_cpu(tmp_el->l_recs[i+1].e_cpos); | ||
| 1039 | break; | ||
| 1040 | } | ||
| 1041 | } | ||
| 1042 | |||
| 1043 | BUG_ON(i == le32_to_cpu(tmp_el->l_next_free_rec)); | ||
| 1044 | |||
| 1045 | out: | ||
| 1046 | ocfs2_free_path(left_path); | ||
| 1047 | ocfs2_free_path(right_path); | ||
| 1048 | return ret; | ||
| 1049 | } | ||
| 1050 | |||
| 1051 | /* | ||
| 972 | * Given a cpos and len, try to find the refcount record which contains cpos. | 1052 | * Given a cpos and len, try to find the refcount record which contains cpos. |
| 973 | * 1. If cpos can be found in one refcount record, return the record. | 1053 | * 1. If cpos can be found in one refcount record, return the record. |
| 974 | * 2. If cpos can't be found, return a fake record which start from cpos | 1054 | * 2. If cpos can't be found, return a fake record which start from cpos |
| @@ -983,10 +1063,10 @@ static int ocfs2_get_refcount_rec(struct ocfs2_caching_info *ci, | |||
| 983 | struct buffer_head **ret_bh) | 1063 | struct buffer_head **ret_bh) |
| 984 | { | 1064 | { |
| 985 | int ret = 0, i, found; | 1065 | int ret = 0, i, found; |
| 986 | u32 low_cpos; | 1066 | u32 low_cpos, uninitialized_var(cpos_end); |
| 987 | struct ocfs2_extent_list *el; | 1067 | struct ocfs2_extent_list *el; |
| 988 | struct ocfs2_extent_rec *tmp, *rec = NULL; | 1068 | struct ocfs2_extent_rec *rec = NULL; |
| 989 | struct ocfs2_extent_block *eb; | 1069 | struct ocfs2_extent_block *eb = NULL; |
| 990 | struct buffer_head *eb_bh = NULL, *ref_leaf_bh = NULL; | 1070 | struct buffer_head *eb_bh = NULL, *ref_leaf_bh = NULL; |
| 991 | struct super_block *sb = ocfs2_metadata_cache_get_super(ci); | 1071 | struct super_block *sb = ocfs2_metadata_cache_get_super(ci); |
| 992 | struct ocfs2_refcount_block *rb = | 1072 | struct ocfs2_refcount_block *rb = |
| @@ -1034,12 +1114,16 @@ static int ocfs2_get_refcount_rec(struct ocfs2_caching_info *ci, | |||
| 1034 | } | 1114 | } |
| 1035 | } | 1115 | } |
| 1036 | 1116 | ||
| 1037 | /* adjust len when we have ocfs2_extent_rec after it. */ | 1117 | if (found) { |
| 1038 | if (found && i < le16_to_cpu(el->l_next_free_rec) - 1) { | 1118 | ret = ocfs2_get_refcount_cpos_end(ci, ref_root_bh, |
| 1039 | tmp = &el->l_recs[i+1]; | 1119 | eb, el, i, &cpos_end); |
| 1120 | if (ret) { | ||
| 1121 | mlog_errno(ret); | ||
| 1122 | goto out; | ||
| 1123 | } | ||
| 1040 | 1124 | ||
| 1041 | if (le32_to_cpu(tmp->e_cpos) < cpos + len) | 1125 | if (cpos_end < low_cpos + len) |
| 1042 | len = le32_to_cpu(tmp->e_cpos) - cpos; | 1126 | len = cpos_end - low_cpos; |
| 1043 | } | 1127 | } |
| 1044 | 1128 | ||
| 1045 | ret = ocfs2_read_refcount_block(ci, le64_to_cpu(rec->e_blkno), | 1129 | ret = ocfs2_read_refcount_block(ci, le64_to_cpu(rec->e_blkno), |
| @@ -1418,7 +1502,7 @@ static int ocfs2_divide_leaf_refcount_block(struct buffer_head *ref_leaf_bh, | |||
| 1418 | 1502 | ||
| 1419 | /* change old and new rl_used accordingly. */ | 1503 | /* change old and new rl_used accordingly. */ |
| 1420 | le16_add_cpu(&rl->rl_used, -num_moved); | 1504 | le16_add_cpu(&rl->rl_used, -num_moved); |
| 1421 | new_rl->rl_used = cpu_to_le32(num_moved); | 1505 | new_rl->rl_used = cpu_to_le16(num_moved); |
| 1422 | 1506 | ||
| 1423 | sort(&rl->rl_recs, le16_to_cpu(rl->rl_used), | 1507 | sort(&rl->rl_recs, le16_to_cpu(rl->rl_used), |
| 1424 | sizeof(struct ocfs2_refcount_rec), | 1508 | sizeof(struct ocfs2_refcount_rec), |
| @@ -1797,7 +1881,8 @@ static int ocfs2_split_refcount_rec(handle_t *handle, | |||
| 1797 | recs_need++; | 1881 | recs_need++; |
| 1798 | 1882 | ||
| 1799 | /* If the leaf block don't have enough record, expand it. */ | 1883 | /* If the leaf block don't have enough record, expand it. */ |
| 1800 | if (le16_to_cpu(rf_list->rl_used) + recs_need > rf_list->rl_count) { | 1884 | if (le16_to_cpu(rf_list->rl_used) + recs_need > |
| 1885 | le16_to_cpu(rf_list->rl_count)) { | ||
| 1801 | struct ocfs2_refcount_rec tmp_rec; | 1886 | struct ocfs2_refcount_rec tmp_rec; |
| 1802 | u64 cpos = le64_to_cpu(orig_rec->r_cpos); | 1887 | u64 cpos = le64_to_cpu(orig_rec->r_cpos); |
| 1803 | len = le32_to_cpu(orig_rec->r_clusters); | 1888 | len = le32_to_cpu(orig_rec->r_clusters); |
| @@ -1859,7 +1944,7 @@ static int ocfs2_split_refcount_rec(handle_t *handle, | |||
| 1859 | memcpy(tail_rec, orig_rec, sizeof(struct ocfs2_refcount_rec)); | 1944 | memcpy(tail_rec, orig_rec, sizeof(struct ocfs2_refcount_rec)); |
| 1860 | le64_add_cpu(&tail_rec->r_cpos, | 1945 | le64_add_cpu(&tail_rec->r_cpos, |
| 1861 | le32_to_cpu(tail_rec->r_clusters) - len); | 1946 | le32_to_cpu(tail_rec->r_clusters) - len); |
| 1862 | tail_rec->r_clusters = le32_to_cpu(len); | 1947 | tail_rec->r_clusters = cpu_to_le32(len); |
| 1863 | } | 1948 | } |
| 1864 | 1949 | ||
| 1865 | /* | 1950 | /* |
| @@ -3840,8 +3925,7 @@ static int ocfs2_add_refcounted_extent(struct inode *inode, | |||
| 3840 | } | 3925 | } |
| 3841 | 3926 | ||
| 3842 | ret = ocfs2_insert_extent(handle, et, cpos, | 3927 | ret = ocfs2_insert_extent(handle, et, cpos, |
| 3843 | cpu_to_le64(ocfs2_clusters_to_blocks(inode->i_sb, | 3928 | ocfs2_clusters_to_blocks(inode->i_sb, p_cluster), |
| 3844 | p_cluster)), | ||
| 3845 | num_clusters, ext_flags, meta_ac); | 3929 | num_clusters, ext_flags, meta_ac); |
| 3846 | if (ret) { | 3930 | if (ret) { |
| 3847 | mlog_errno(ret); | 3931 | mlog_errno(ret); |
| @@ -4253,8 +4337,8 @@ static int ocfs2_user_path_parent(const char __user *path, | |||
| 4253 | * @new_dentry: target dentry | 4337 | * @new_dentry: target dentry |
| 4254 | * @preserve: if true, preserve all file attributes | 4338 | * @preserve: if true, preserve all file attributes |
| 4255 | */ | 4339 | */ |
| 4256 | int ocfs2_vfs_reflink(struct dentry *old_dentry, struct inode *dir, | 4340 | static int ocfs2_vfs_reflink(struct dentry *old_dentry, struct inode *dir, |
| 4257 | struct dentry *new_dentry, bool preserve) | 4341 | struct dentry *new_dentry, bool preserve) |
| 4258 | { | 4342 | { |
| 4259 | struct inode *inode = old_dentry->d_inode; | 4343 | struct inode *inode = old_dentry->d_inode; |
| 4260 | int error; | 4344 | int error; |
diff --git a/fs/ocfs2/stack_user.c b/fs/ocfs2/stack_user.c index ff4c798a5635..da78a2a334fd 100644 --- a/fs/ocfs2/stack_user.c +++ b/fs/ocfs2/stack_user.c | |||
| @@ -814,7 +814,7 @@ static int fs_protocol_compare(struct ocfs2_protocol_version *existing, | |||
| 814 | static int user_cluster_connect(struct ocfs2_cluster_connection *conn) | 814 | static int user_cluster_connect(struct ocfs2_cluster_connection *conn) |
| 815 | { | 815 | { |
| 816 | dlm_lockspace_t *fsdlm; | 816 | dlm_lockspace_t *fsdlm; |
| 817 | struct ocfs2_live_connection *control; | 817 | struct ocfs2_live_connection *uninitialized_var(control); |
| 818 | int rc = 0; | 818 | int rc = 0; |
| 819 | 819 | ||
| 820 | BUG_ON(conn == NULL); | 820 | BUG_ON(conn == NULL); |
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c index 14f47d2bfe02..26069917a9f5 100644 --- a/fs/ocfs2/super.c +++ b/fs/ocfs2/super.c | |||
| @@ -100,6 +100,8 @@ struct mount_options | |||
| 100 | static int ocfs2_parse_options(struct super_block *sb, char *options, | 100 | static int ocfs2_parse_options(struct super_block *sb, char *options, |
| 101 | struct mount_options *mopt, | 101 | struct mount_options *mopt, |
| 102 | int is_remount); | 102 | int is_remount); |
| 103 | static int ocfs2_check_set_options(struct super_block *sb, | ||
| 104 | struct mount_options *options); | ||
| 103 | static int ocfs2_show_options(struct seq_file *s, struct vfsmount *mnt); | 105 | static int ocfs2_show_options(struct seq_file *s, struct vfsmount *mnt); |
| 104 | static void ocfs2_put_super(struct super_block *sb); | 106 | static void ocfs2_put_super(struct super_block *sb); |
| 105 | static int ocfs2_mount_volume(struct super_block *sb); | 107 | static int ocfs2_mount_volume(struct super_block *sb); |
| @@ -600,7 +602,8 @@ static int ocfs2_remount(struct super_block *sb, int *flags, char *data) | |||
| 600 | 602 | ||
| 601 | lock_kernel(); | 603 | lock_kernel(); |
| 602 | 604 | ||
| 603 | if (!ocfs2_parse_options(sb, data, &parsed_options, 1)) { | 605 | if (!ocfs2_parse_options(sb, data, &parsed_options, 1) || |
| 606 | !ocfs2_check_set_options(sb, &parsed_options)) { | ||
| 604 | ret = -EINVAL; | 607 | ret = -EINVAL; |
| 605 | goto out; | 608 | goto out; |
| 606 | } | 609 | } |
| @@ -691,8 +694,6 @@ unlock_osb: | |||
| 691 | if (!ret) { | 694 | if (!ret) { |
| 692 | /* Only save off the new mount options in case of a successful | 695 | /* Only save off the new mount options in case of a successful |
| 693 | * remount. */ | 696 | * remount. */ |
| 694 | if (!(osb->s_feature_incompat & OCFS2_FEATURE_INCOMPAT_XATTR)) | ||
| 695 | parsed_options.mount_opt &= ~OCFS2_MOUNT_POSIX_ACL; | ||
| 696 | osb->s_mount_opt = parsed_options.mount_opt; | 697 | osb->s_mount_opt = parsed_options.mount_opt; |
| 697 | osb->s_atime_quantum = parsed_options.atime_quantum; | 698 | osb->s_atime_quantum = parsed_options.atime_quantum; |
| 698 | osb->preferred_slot = parsed_options.slot; | 699 | osb->preferred_slot = parsed_options.slot; |
| @@ -701,6 +702,10 @@ unlock_osb: | |||
| 701 | 702 | ||
| 702 | if (!ocfs2_is_hard_readonly(osb)) | 703 | if (!ocfs2_is_hard_readonly(osb)) |
| 703 | ocfs2_set_journal_params(osb); | 704 | ocfs2_set_journal_params(osb); |
| 705 | |||
| 706 | sb->s_flags = (sb->s_flags & ~MS_POSIXACL) | | ||
| 707 | ((osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL) ? | ||
| 708 | MS_POSIXACL : 0); | ||
| 704 | } | 709 | } |
| 705 | out: | 710 | out: |
| 706 | unlock_kernel(); | 711 | unlock_kernel(); |
| @@ -1011,31 +1016,16 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent) | |||
| 1011 | brelse(bh); | 1016 | brelse(bh); |
| 1012 | bh = NULL; | 1017 | bh = NULL; |
| 1013 | 1018 | ||
| 1014 | if (!(osb->s_feature_incompat & OCFS2_FEATURE_INCOMPAT_XATTR)) | 1019 | if (!ocfs2_check_set_options(sb, &parsed_options)) { |
| 1015 | parsed_options.mount_opt &= ~OCFS2_MOUNT_POSIX_ACL; | 1020 | status = -EINVAL; |
| 1016 | 1021 | goto read_super_error; | |
| 1022 | } | ||
| 1017 | osb->s_mount_opt = parsed_options.mount_opt; | 1023 | osb->s_mount_opt = parsed_options.mount_opt; |
| 1018 | osb->s_atime_quantum = parsed_options.atime_quantum; | 1024 | osb->s_atime_quantum = parsed_options.atime_quantum; |
| 1019 | osb->preferred_slot = parsed_options.slot; | 1025 | osb->preferred_slot = parsed_options.slot; |
| 1020 | osb->osb_commit_interval = parsed_options.commit_interval; | 1026 | osb->osb_commit_interval = parsed_options.commit_interval; |
| 1021 | osb->local_alloc_default_bits = ocfs2_megabytes_to_clusters(sb, parsed_options.localalloc_opt); | 1027 | osb->local_alloc_default_bits = ocfs2_megabytes_to_clusters(sb, parsed_options.localalloc_opt); |
| 1022 | osb->local_alloc_bits = osb->local_alloc_default_bits; | 1028 | osb->local_alloc_bits = osb->local_alloc_default_bits; |
| 1023 | if (osb->s_mount_opt & OCFS2_MOUNT_USRQUOTA && | ||
| 1024 | !OCFS2_HAS_RO_COMPAT_FEATURE(sb, | ||
| 1025 | OCFS2_FEATURE_RO_COMPAT_USRQUOTA)) { | ||
| 1026 | status = -EINVAL; | ||
| 1027 | mlog(ML_ERROR, "User quotas were requested, but this " | ||
| 1028 | "filesystem does not have the feature enabled.\n"); | ||
| 1029 | goto read_super_error; | ||
| 1030 | } | ||
| 1031 | if (osb->s_mount_opt & OCFS2_MOUNT_GRPQUOTA && | ||
| 1032 | !OCFS2_HAS_RO_COMPAT_FEATURE(sb, | ||
| 1033 | OCFS2_FEATURE_RO_COMPAT_GRPQUOTA)) { | ||
| 1034 | status = -EINVAL; | ||
| 1035 | mlog(ML_ERROR, "Group quotas were requested, but this " | ||
| 1036 | "filesystem does not have the feature enabled.\n"); | ||
| 1037 | goto read_super_error; | ||
| 1038 | } | ||
| 1039 | 1029 | ||
| 1040 | status = ocfs2_verify_userspace_stack(osb, &parsed_options); | 1030 | status = ocfs2_verify_userspace_stack(osb, &parsed_options); |
| 1041 | if (status) | 1031 | if (status) |
| @@ -1245,6 +1235,40 @@ static struct file_system_type ocfs2_fs_type = { | |||
| 1245 | .next = NULL | 1235 | .next = NULL |
| 1246 | }; | 1236 | }; |
| 1247 | 1237 | ||
| 1238 | static int ocfs2_check_set_options(struct super_block *sb, | ||
| 1239 | struct mount_options *options) | ||
| 1240 | { | ||
| 1241 | if (options->mount_opt & OCFS2_MOUNT_USRQUOTA && | ||
| 1242 | !OCFS2_HAS_RO_COMPAT_FEATURE(sb, | ||
| 1243 | OCFS2_FEATURE_RO_COMPAT_USRQUOTA)) { | ||
| 1244 | mlog(ML_ERROR, "User quotas were requested, but this " | ||
| 1245 | "filesystem does not have the feature enabled.\n"); | ||
| 1246 | return 0; | ||
| 1247 | } | ||
| 1248 | if (options->mount_opt & OCFS2_MOUNT_GRPQUOTA && | ||
| 1249 | !OCFS2_HAS_RO_COMPAT_FEATURE(sb, | ||
| 1250 | OCFS2_FEATURE_RO_COMPAT_GRPQUOTA)) { | ||
| 1251 | mlog(ML_ERROR, "Group quotas were requested, but this " | ||
| 1252 | "filesystem does not have the feature enabled.\n"); | ||
| 1253 | return 0; | ||
| 1254 | } | ||
| 1255 | if (options->mount_opt & OCFS2_MOUNT_POSIX_ACL && | ||
| 1256 | !OCFS2_HAS_INCOMPAT_FEATURE(sb, OCFS2_FEATURE_INCOMPAT_XATTR)) { | ||
| 1257 | mlog(ML_ERROR, "ACL support requested but extended attributes " | ||
| 1258 | "feature is not enabled\n"); | ||
| 1259 | return 0; | ||
| 1260 | } | ||
| 1261 | /* No ACL setting specified? Use XATTR feature... */ | ||
| 1262 | if (!(options->mount_opt & (OCFS2_MOUNT_POSIX_ACL | | ||
| 1263 | OCFS2_MOUNT_NO_POSIX_ACL))) { | ||
| 1264 | if (OCFS2_HAS_INCOMPAT_FEATURE(sb, OCFS2_FEATURE_INCOMPAT_XATTR)) | ||
| 1265 | options->mount_opt |= OCFS2_MOUNT_POSIX_ACL; | ||
| 1266 | else | ||
| 1267 | options->mount_opt |= OCFS2_MOUNT_NO_POSIX_ACL; | ||
| 1268 | } | ||
| 1269 | return 1; | ||
| 1270 | } | ||
| 1271 | |||
| 1248 | static int ocfs2_parse_options(struct super_block *sb, | 1272 | static int ocfs2_parse_options(struct super_block *sb, |
| 1249 | char *options, | 1273 | char *options, |
| 1250 | struct mount_options *mopt, | 1274 | struct mount_options *mopt, |
| @@ -1392,40 +1416,19 @@ static int ocfs2_parse_options(struct super_block *sb, | |||
| 1392 | mopt->mount_opt |= OCFS2_MOUNT_INODE64; | 1416 | mopt->mount_opt |= OCFS2_MOUNT_INODE64; |
| 1393 | break; | 1417 | break; |
| 1394 | case Opt_usrquota: | 1418 | case Opt_usrquota: |
| 1395 | /* We check only on remount, otherwise features | ||
| 1396 | * aren't yet initialized. */ | ||
| 1397 | if (is_remount && !OCFS2_HAS_RO_COMPAT_FEATURE(sb, | ||
| 1398 | OCFS2_FEATURE_RO_COMPAT_USRQUOTA)) { | ||
| 1399 | mlog(ML_ERROR, "User quota requested but " | ||
| 1400 | "filesystem feature is not set\n"); | ||
| 1401 | status = 0; | ||
| 1402 | goto bail; | ||
| 1403 | } | ||
| 1404 | mopt->mount_opt |= OCFS2_MOUNT_USRQUOTA; | 1419 | mopt->mount_opt |= OCFS2_MOUNT_USRQUOTA; |
| 1405 | break; | 1420 | break; |
| 1406 | case Opt_grpquota: | 1421 | case Opt_grpquota: |
| 1407 | if (is_remount && !OCFS2_HAS_RO_COMPAT_FEATURE(sb, | ||
| 1408 | OCFS2_FEATURE_RO_COMPAT_GRPQUOTA)) { | ||
| 1409 | mlog(ML_ERROR, "Group quota requested but " | ||
| 1410 | "filesystem feature is not set\n"); | ||
| 1411 | status = 0; | ||
| 1412 | goto bail; | ||
| 1413 | } | ||
| 1414 | mopt->mount_opt |= OCFS2_MOUNT_GRPQUOTA; | 1422 | mopt->mount_opt |= OCFS2_MOUNT_GRPQUOTA; |
| 1415 | break; | 1423 | break; |
| 1416 | #ifdef CONFIG_OCFS2_FS_POSIX_ACL | ||
| 1417 | case Opt_acl: | 1424 | case Opt_acl: |
| 1418 | mopt->mount_opt |= OCFS2_MOUNT_POSIX_ACL; | 1425 | mopt->mount_opt |= OCFS2_MOUNT_POSIX_ACL; |
| 1426 | mopt->mount_opt &= ~OCFS2_MOUNT_NO_POSIX_ACL; | ||
| 1419 | break; | 1427 | break; |
| 1420 | case Opt_noacl: | 1428 | case Opt_noacl: |
| 1429 | mopt->mount_opt |= OCFS2_MOUNT_NO_POSIX_ACL; | ||
| 1421 | mopt->mount_opt &= ~OCFS2_MOUNT_POSIX_ACL; | 1430 | mopt->mount_opt &= ~OCFS2_MOUNT_POSIX_ACL; |
| 1422 | break; | 1431 | break; |
| 1423 | #else | ||
| 1424 | case Opt_acl: | ||
| 1425 | case Opt_noacl: | ||
| 1426 | printk(KERN_INFO "ocfs2 (no)acl options not supported\n"); | ||
| 1427 | break; | ||
| 1428 | #endif | ||
| 1429 | default: | 1432 | default: |
| 1430 | mlog(ML_ERROR, | 1433 | mlog(ML_ERROR, |
| 1431 | "Unrecognized mount option \"%s\" " | 1434 | "Unrecognized mount option \"%s\" " |
| @@ -1502,12 +1505,10 @@ static int ocfs2_show_options(struct seq_file *s, struct vfsmount *mnt) | |||
| 1502 | if (opts & OCFS2_MOUNT_INODE64) | 1505 | if (opts & OCFS2_MOUNT_INODE64) |
| 1503 | seq_printf(s, ",inode64"); | 1506 | seq_printf(s, ",inode64"); |
| 1504 | 1507 | ||
| 1505 | #ifdef CONFIG_OCFS2_FS_POSIX_ACL | ||
| 1506 | if (opts & OCFS2_MOUNT_POSIX_ACL) | 1508 | if (opts & OCFS2_MOUNT_POSIX_ACL) |
| 1507 | seq_printf(s, ",acl"); | 1509 | seq_printf(s, ",acl"); |
| 1508 | else | 1510 | else |
| 1509 | seq_printf(s, ",noacl"); | 1511 | seq_printf(s, ",noacl"); |
| 1510 | #endif | ||
| 1511 | 1512 | ||
| 1512 | return 0; | 1513 | return 0; |
| 1513 | } | 1514 | } |
diff --git a/fs/ocfs2/symlink.c b/fs/ocfs2/symlink.c index e3421030a69f..49b133ccbf11 100644 --- a/fs/ocfs2/symlink.c +++ b/fs/ocfs2/symlink.c | |||
| @@ -163,6 +163,7 @@ const struct inode_operations ocfs2_symlink_inode_operations = { | |||
| 163 | .getxattr = generic_getxattr, | 163 | .getxattr = generic_getxattr, |
| 164 | .listxattr = ocfs2_listxattr, | 164 | .listxattr = ocfs2_listxattr, |
| 165 | .removexattr = generic_removexattr, | 165 | .removexattr = generic_removexattr, |
| 166 | .fiemap = ocfs2_fiemap, | ||
| 166 | }; | 167 | }; |
| 167 | const struct inode_operations ocfs2_fast_symlink_inode_operations = { | 168 | const struct inode_operations ocfs2_fast_symlink_inode_operations = { |
| 168 | .readlink = ocfs2_readlink, | 169 | .readlink = ocfs2_readlink, |
| @@ -174,4 +175,5 @@ const struct inode_operations ocfs2_fast_symlink_inode_operations = { | |||
| 174 | .getxattr = generic_getxattr, | 175 | .getxattr = generic_getxattr, |
| 175 | .listxattr = ocfs2_listxattr, | 176 | .listxattr = ocfs2_listxattr, |
| 176 | .removexattr = generic_removexattr, | 177 | .removexattr = generic_removexattr, |
| 178 | .fiemap = ocfs2_fiemap, | ||
| 177 | }; | 179 | }; |
diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c index 43c114831c0d..8fc6fb071c6d 100644 --- a/fs/ocfs2/xattr.c +++ b/fs/ocfs2/xattr.c | |||
| @@ -98,10 +98,8 @@ static struct ocfs2_xattr_def_value_root def_xv = { | |||
| 98 | 98 | ||
| 99 | struct xattr_handler *ocfs2_xattr_handlers[] = { | 99 | struct xattr_handler *ocfs2_xattr_handlers[] = { |
| 100 | &ocfs2_xattr_user_handler, | 100 | &ocfs2_xattr_user_handler, |
| 101 | #ifdef CONFIG_OCFS2_FS_POSIX_ACL | ||
| 102 | &ocfs2_xattr_acl_access_handler, | 101 | &ocfs2_xattr_acl_access_handler, |
| 103 | &ocfs2_xattr_acl_default_handler, | 102 | &ocfs2_xattr_acl_default_handler, |
| 104 | #endif | ||
| 105 | &ocfs2_xattr_trusted_handler, | 103 | &ocfs2_xattr_trusted_handler, |
| 106 | &ocfs2_xattr_security_handler, | 104 | &ocfs2_xattr_security_handler, |
| 107 | NULL | 105 | NULL |
| @@ -109,12 +107,10 @@ struct xattr_handler *ocfs2_xattr_handlers[] = { | |||
| 109 | 107 | ||
| 110 | static struct xattr_handler *ocfs2_xattr_handler_map[OCFS2_XATTR_MAX] = { | 108 | static struct xattr_handler *ocfs2_xattr_handler_map[OCFS2_XATTR_MAX] = { |
| 111 | [OCFS2_XATTR_INDEX_USER] = &ocfs2_xattr_user_handler, | 109 | [OCFS2_XATTR_INDEX_USER] = &ocfs2_xattr_user_handler, |
| 112 | #ifdef CONFIG_OCFS2_FS_POSIX_ACL | ||
| 113 | [OCFS2_XATTR_INDEX_POSIX_ACL_ACCESS] | 110 | [OCFS2_XATTR_INDEX_POSIX_ACL_ACCESS] |
| 114 | = &ocfs2_xattr_acl_access_handler, | 111 | = &ocfs2_xattr_acl_access_handler, |
| 115 | [OCFS2_XATTR_INDEX_POSIX_ACL_DEFAULT] | 112 | [OCFS2_XATTR_INDEX_POSIX_ACL_DEFAULT] |
| 116 | = &ocfs2_xattr_acl_default_handler, | 113 | = &ocfs2_xattr_acl_default_handler, |
| 117 | #endif | ||
| 118 | [OCFS2_XATTR_INDEX_TRUSTED] = &ocfs2_xattr_trusted_handler, | 114 | [OCFS2_XATTR_INDEX_TRUSTED] = &ocfs2_xattr_trusted_handler, |
| 119 | [OCFS2_XATTR_INDEX_SECURITY] = &ocfs2_xattr_security_handler, | 115 | [OCFS2_XATTR_INDEX_SECURITY] = &ocfs2_xattr_security_handler, |
| 120 | }; | 116 | }; |
| @@ -6064,7 +6060,7 @@ static int ocfs2_value_metas_in_xattr_header(struct super_block *sb, | |||
| 6064 | * to the extent block, so just calculate a maximum record num. | 6060 | * to the extent block, so just calculate a maximum record num. |
| 6065 | */ | 6061 | */ |
| 6066 | if (!xv->xr_list.l_tree_depth) | 6062 | if (!xv->xr_list.l_tree_depth) |
| 6067 | *num_recs += xv->xr_list.l_next_free_rec; | 6063 | *num_recs += le16_to_cpu(xv->xr_list.l_next_free_rec); |
| 6068 | else | 6064 | else |
| 6069 | *num_recs += ocfs2_clusters_for_bytes(sb, | 6065 | *num_recs += ocfs2_clusters_for_bytes(sb, |
| 6070 | XATTR_SIZE_MAX); | 6066 | XATTR_SIZE_MAX); |
diff --git a/fs/ocfs2/xattr.h b/fs/ocfs2/xattr.h index 08e36389f56d..abd72a47f520 100644 --- a/fs/ocfs2/xattr.h +++ b/fs/ocfs2/xattr.h | |||
| @@ -40,10 +40,8 @@ struct ocfs2_security_xattr_info { | |||
| 40 | extern struct xattr_handler ocfs2_xattr_user_handler; | 40 | extern struct xattr_handler ocfs2_xattr_user_handler; |
| 41 | extern struct xattr_handler ocfs2_xattr_trusted_handler; | 41 | extern struct xattr_handler ocfs2_xattr_trusted_handler; |
| 42 | extern struct xattr_handler ocfs2_xattr_security_handler; | 42 | extern struct xattr_handler ocfs2_xattr_security_handler; |
| 43 | #ifdef CONFIG_OCFS2_FS_POSIX_ACL | ||
| 44 | extern struct xattr_handler ocfs2_xattr_acl_access_handler; | 43 | extern struct xattr_handler ocfs2_xattr_acl_access_handler; |
| 45 | extern struct xattr_handler ocfs2_xattr_acl_default_handler; | 44 | extern struct xattr_handler ocfs2_xattr_acl_default_handler; |
| 46 | #endif | ||
| 47 | extern struct xattr_handler *ocfs2_xattr_handlers[]; | 45 | extern struct xattr_handler *ocfs2_xattr_handlers[]; |
| 48 | 46 | ||
| 49 | ssize_t ocfs2_listxattr(struct dentry *, char *, size_t); | 47 | ssize_t ocfs2_listxattr(struct dentry *, char *, size_t); |
| @@ -821,15 +821,14 @@ static inline int __get_file_write_access(struct inode *inode, | |||
| 821 | } | 821 | } |
| 822 | 822 | ||
| 823 | static struct file *__dentry_open(struct dentry *dentry, struct vfsmount *mnt, | 823 | static struct file *__dentry_open(struct dentry *dentry, struct vfsmount *mnt, |
| 824 | int flags, struct file *f, | 824 | struct file *f, |
| 825 | int (*open)(struct inode *, struct file *), | 825 | int (*open)(struct inode *, struct file *), |
| 826 | const struct cred *cred) | 826 | const struct cred *cred) |
| 827 | { | 827 | { |
| 828 | struct inode *inode; | 828 | struct inode *inode; |
| 829 | int error; | 829 | int error; |
| 830 | 830 | ||
| 831 | f->f_flags = flags; | 831 | f->f_mode = OPEN_FMODE(f->f_flags) | FMODE_LSEEK | |
| 832 | f->f_mode = (__force fmode_t)((flags+1) & O_ACCMODE) | FMODE_LSEEK | | ||
| 833 | FMODE_PREAD | FMODE_PWRITE; | 832 | FMODE_PREAD | FMODE_PWRITE; |
| 834 | inode = dentry->d_inode; | 833 | inode = dentry->d_inode; |
| 835 | if (f->f_mode & FMODE_WRITE) { | 834 | if (f->f_mode & FMODE_WRITE) { |
| @@ -930,7 +929,6 @@ struct file *lookup_instantiate_filp(struct nameidata *nd, struct dentry *dentry | |||
| 930 | if (IS_ERR(dentry)) | 929 | if (IS_ERR(dentry)) |
| 931 | goto out_err; | 930 | goto out_err; |
| 932 | nd->intent.open.file = __dentry_open(dget(dentry), mntget(nd->path.mnt), | 931 | nd->intent.open.file = __dentry_open(dget(dentry), mntget(nd->path.mnt), |
| 933 | nd->intent.open.flags - 1, | ||
| 934 | nd->intent.open.file, | 932 | nd->intent.open.file, |
| 935 | open, cred); | 933 | open, cred); |
| 936 | out: | 934 | out: |
| @@ -949,7 +947,7 @@ EXPORT_SYMBOL_GPL(lookup_instantiate_filp); | |||
| 949 | * | 947 | * |
| 950 | * Note that this function destroys the original nameidata | 948 | * Note that this function destroys the original nameidata |
| 951 | */ | 949 | */ |
| 952 | struct file *nameidata_to_filp(struct nameidata *nd, int flags) | 950 | struct file *nameidata_to_filp(struct nameidata *nd) |
| 953 | { | 951 | { |
| 954 | const struct cred *cred = current_cred(); | 952 | const struct cred *cred = current_cred(); |
| 955 | struct file *filp; | 953 | struct file *filp; |
| @@ -958,7 +956,7 @@ struct file *nameidata_to_filp(struct nameidata *nd, int flags) | |||
| 958 | filp = nd->intent.open.file; | 956 | filp = nd->intent.open.file; |
| 959 | /* Has the filesystem initialised the file for us? */ | 957 | /* Has the filesystem initialised the file for us? */ |
| 960 | if (filp->f_path.dentry == NULL) | 958 | if (filp->f_path.dentry == NULL) |
| 961 | filp = __dentry_open(nd->path.dentry, nd->path.mnt, flags, filp, | 959 | filp = __dentry_open(nd->path.dentry, nd->path.mnt, filp, |
| 962 | NULL, cred); | 960 | NULL, cred); |
| 963 | else | 961 | else |
| 964 | path_put(&nd->path); | 962 | path_put(&nd->path); |
| @@ -997,7 +995,8 @@ struct file *dentry_open(struct dentry *dentry, struct vfsmount *mnt, int flags, | |||
| 997 | return ERR_PTR(error); | 995 | return ERR_PTR(error); |
| 998 | } | 996 | } |
| 999 | 997 | ||
| 1000 | return __dentry_open(dentry, mnt, flags, f, NULL, cred); | 998 | f->f_flags = flags; |
| 999 | return __dentry_open(dentry, mnt, f, NULL, cred); | ||
| 1001 | } | 1000 | } |
| 1002 | EXPORT_SYMBOL(dentry_open); | 1001 | EXPORT_SYMBOL(dentry_open); |
| 1003 | 1002 | ||
| @@ -906,17 +906,6 @@ void free_pipe_info(struct inode *inode) | |||
| 906 | } | 906 | } |
| 907 | 907 | ||
| 908 | static struct vfsmount *pipe_mnt __read_mostly; | 908 | static struct vfsmount *pipe_mnt __read_mostly; |
| 909 | static int pipefs_delete_dentry(struct dentry *dentry) | ||
| 910 | { | ||
| 911 | /* | ||
| 912 | * At creation time, we pretended this dentry was hashed | ||
| 913 | * (by clearing DCACHE_UNHASHED bit in d_flags) | ||
| 914 | * At delete time, we restore the truth : not hashed. | ||
| 915 | * (so that dput() can proceed correctly) | ||
| 916 | */ | ||
| 917 | dentry->d_flags |= DCACHE_UNHASHED; | ||
| 918 | return 0; | ||
| 919 | } | ||
| 920 | 909 | ||
| 921 | /* | 910 | /* |
| 922 | * pipefs_dname() is called from d_path(). | 911 | * pipefs_dname() is called from d_path(). |
| @@ -928,7 +917,6 @@ static char *pipefs_dname(struct dentry *dentry, char *buffer, int buflen) | |||
| 928 | } | 917 | } |
| 929 | 918 | ||
| 930 | static const struct dentry_operations pipefs_dentry_operations = { | 919 | static const struct dentry_operations pipefs_dentry_operations = { |
| 931 | .d_delete = pipefs_delete_dentry, | ||
| 932 | .d_dname = pipefs_dname, | 920 | .d_dname = pipefs_dname, |
| 933 | }; | 921 | }; |
| 934 | 922 | ||
| @@ -989,12 +977,6 @@ struct file *create_write_pipe(int flags) | |||
| 989 | path.mnt = mntget(pipe_mnt); | 977 | path.mnt = mntget(pipe_mnt); |
| 990 | 978 | ||
| 991 | path.dentry->d_op = &pipefs_dentry_operations; | 979 | path.dentry->d_op = &pipefs_dentry_operations; |
| 992 | /* | ||
| 993 | * We dont want to publish this dentry into global dentry hash table. | ||
| 994 | * We pretend dentry is already hashed, by unsetting DCACHE_UNHASHED | ||
| 995 | * This permits a working /proc/$pid/fd/XXX on pipes | ||
| 996 | */ | ||
| 997 | path.dentry->d_flags &= ~DCACHE_UNHASHED; | ||
| 998 | d_instantiate(path.dentry, inode); | 980 | d_instantiate(path.dentry, inode); |
| 999 | 981 | ||
| 1000 | err = -ENFILE; | 982 | err = -ENFILE; |
diff --git a/fs/proc/array.c b/fs/proc/array.c index 4badde179b18..f560325c444f 100644 --- a/fs/proc/array.c +++ b/fs/proc/array.c | |||
| @@ -134,13 +134,16 @@ static inline void task_name(struct seq_file *m, struct task_struct *p) | |||
| 134 | * simple bit tests. | 134 | * simple bit tests. |
| 135 | */ | 135 | */ |
| 136 | static const char *task_state_array[] = { | 136 | static const char *task_state_array[] = { |
| 137 | "R (running)", /* 0 */ | 137 | "R (running)", /* 0 */ |
| 138 | "S (sleeping)", /* 1 */ | 138 | "S (sleeping)", /* 1 */ |
| 139 | "D (disk sleep)", /* 2 */ | 139 | "D (disk sleep)", /* 2 */ |
| 140 | "T (stopped)", /* 4 */ | 140 | "T (stopped)", /* 4 */ |
| 141 | "T (tracing stop)", /* 8 */ | 141 | "t (tracing stop)", /* 8 */ |
| 142 | "Z (zombie)", /* 16 */ | 142 | "Z (zombie)", /* 16 */ |
| 143 | "X (dead)" /* 32 */ | 143 | "X (dead)", /* 32 */ |
| 144 | "x (dead)", /* 64 */ | ||
| 145 | "K (wakekill)", /* 128 */ | ||
| 146 | "W (waking)", /* 256 */ | ||
| 144 | }; | 147 | }; |
| 145 | 148 | ||
| 146 | static inline const char *get_task_state(struct task_struct *tsk) | 149 | static inline const char *get_task_state(struct task_struct *tsk) |
| @@ -148,6 +151,8 @@ static inline const char *get_task_state(struct task_struct *tsk) | |||
| 148 | unsigned int state = (tsk->state & TASK_REPORT) | tsk->exit_state; | 151 | unsigned int state = (tsk->state & TASK_REPORT) | tsk->exit_state; |
| 149 | const char **p = &task_state_array[0]; | 152 | const char **p = &task_state_array[0]; |
| 150 | 153 | ||
| 154 | BUILD_BUG_ON(1 + ilog2(TASK_STATE_MAX) != ARRAY_SIZE(task_state_array)); | ||
| 155 | |||
| 151 | while (state) { | 156 | while (state) { |
| 152 | p++; | 157 | p++; |
| 153 | state >>= 1; | 158 | state >>= 1; |
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c index cd6bb9a33c13..dea86abdf2e7 100644 --- a/fs/quota/dquot.c +++ b/fs/quota/dquot.c | |||
| @@ -323,6 +323,30 @@ int dquot_mark_dquot_dirty(struct dquot *dquot) | |||
| 323 | } | 323 | } |
| 324 | EXPORT_SYMBOL(dquot_mark_dquot_dirty); | 324 | EXPORT_SYMBOL(dquot_mark_dquot_dirty); |
| 325 | 325 | ||
| 326 | /* Dirtify all the dquots - this can block when journalling */ | ||
| 327 | static inline int mark_all_dquot_dirty(struct dquot * const *dquot) | ||
| 328 | { | ||
| 329 | int ret, err, cnt; | ||
| 330 | |||
| 331 | ret = err = 0; | ||
| 332 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { | ||
| 333 | if (dquot[cnt]) | ||
| 334 | /* Even in case of error we have to continue */ | ||
| 335 | ret = mark_dquot_dirty(dquot[cnt]); | ||
| 336 | if (!err) | ||
| 337 | err = ret; | ||
| 338 | } | ||
| 339 | return err; | ||
| 340 | } | ||
| 341 | |||
| 342 | static inline void dqput_all(struct dquot **dquot) | ||
| 343 | { | ||
| 344 | unsigned int cnt; | ||
| 345 | |||
| 346 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) | ||
| 347 | dqput(dquot[cnt]); | ||
| 348 | } | ||
| 349 | |||
| 326 | /* This function needs dq_list_lock */ | 350 | /* This function needs dq_list_lock */ |
| 327 | static inline int clear_dquot_dirty(struct dquot *dquot) | 351 | static inline int clear_dquot_dirty(struct dquot *dquot) |
| 328 | { | 352 | { |
| @@ -1268,8 +1292,7 @@ int dquot_initialize(struct inode *inode, int type) | |||
| 1268 | out_err: | 1292 | out_err: |
| 1269 | up_write(&sb_dqopt(sb)->dqptr_sem); | 1293 | up_write(&sb_dqopt(sb)->dqptr_sem); |
| 1270 | /* Drop unused references */ | 1294 | /* Drop unused references */ |
| 1271 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) | 1295 | dqput_all(got); |
| 1272 | dqput(got[cnt]); | ||
| 1273 | return ret; | 1296 | return ret; |
| 1274 | } | 1297 | } |
| 1275 | EXPORT_SYMBOL(dquot_initialize); | 1298 | EXPORT_SYMBOL(dquot_initialize); |
| @@ -1288,9 +1311,7 @@ int dquot_drop(struct inode *inode) | |||
| 1288 | inode->i_dquot[cnt] = NULL; | 1311 | inode->i_dquot[cnt] = NULL; |
| 1289 | } | 1312 | } |
| 1290 | up_write(&sb_dqopt(inode->i_sb)->dqptr_sem); | 1313 | up_write(&sb_dqopt(inode->i_sb)->dqptr_sem); |
| 1291 | 1314 | dqput_all(put); | |
| 1292 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) | ||
| 1293 | dqput(put[cnt]); | ||
| 1294 | return 0; | 1315 | return 0; |
| 1295 | } | 1316 | } |
| 1296 | EXPORT_SYMBOL(dquot_drop); | 1317 | EXPORT_SYMBOL(dquot_drop); |
| @@ -1319,6 +1340,67 @@ void vfs_dq_drop(struct inode *inode) | |||
| 1319 | EXPORT_SYMBOL(vfs_dq_drop); | 1340 | EXPORT_SYMBOL(vfs_dq_drop); |
| 1320 | 1341 | ||
| 1321 | /* | 1342 | /* |
| 1343 | * inode_reserved_space is managed internally by quota, and protected by | ||
| 1344 | * i_lock similar to i_blocks+i_bytes. | ||
| 1345 | */ | ||
| 1346 | static qsize_t *inode_reserved_space(struct inode * inode) | ||
| 1347 | { | ||
| 1348 | /* Filesystem must explicitly define it's own method in order to use | ||
| 1349 | * quota reservation interface */ | ||
| 1350 | BUG_ON(!inode->i_sb->dq_op->get_reserved_space); | ||
| 1351 | return inode->i_sb->dq_op->get_reserved_space(inode); | ||
| 1352 | } | ||
| 1353 | |||
| 1354 | static void inode_add_rsv_space(struct inode *inode, qsize_t number) | ||
| 1355 | { | ||
| 1356 | spin_lock(&inode->i_lock); | ||
| 1357 | *inode_reserved_space(inode) += number; | ||
| 1358 | spin_unlock(&inode->i_lock); | ||
| 1359 | } | ||
| 1360 | |||
| 1361 | |||
| 1362 | static void inode_claim_rsv_space(struct inode *inode, qsize_t number) | ||
| 1363 | { | ||
| 1364 | spin_lock(&inode->i_lock); | ||
| 1365 | *inode_reserved_space(inode) -= number; | ||
| 1366 | __inode_add_bytes(inode, number); | ||
| 1367 | spin_unlock(&inode->i_lock); | ||
| 1368 | } | ||
| 1369 | |||
| 1370 | static void inode_sub_rsv_space(struct inode *inode, qsize_t number) | ||
| 1371 | { | ||
| 1372 | spin_lock(&inode->i_lock); | ||
| 1373 | *inode_reserved_space(inode) -= number; | ||
| 1374 | spin_unlock(&inode->i_lock); | ||
| 1375 | } | ||
| 1376 | |||
| 1377 | static qsize_t inode_get_rsv_space(struct inode *inode) | ||
| 1378 | { | ||
| 1379 | qsize_t ret; | ||
| 1380 | spin_lock(&inode->i_lock); | ||
| 1381 | ret = *inode_reserved_space(inode); | ||
| 1382 | spin_unlock(&inode->i_lock); | ||
| 1383 | return ret; | ||
| 1384 | } | ||
| 1385 | |||
| 1386 | static void inode_incr_space(struct inode *inode, qsize_t number, | ||
| 1387 | int reserve) | ||
| 1388 | { | ||
| 1389 | if (reserve) | ||
| 1390 | inode_add_rsv_space(inode, number); | ||
| 1391 | else | ||
| 1392 | inode_add_bytes(inode, number); | ||
| 1393 | } | ||
| 1394 | |||
| 1395 | static void inode_decr_space(struct inode *inode, qsize_t number, int reserve) | ||
| 1396 | { | ||
| 1397 | if (reserve) | ||
| 1398 | inode_sub_rsv_space(inode, number); | ||
| 1399 | else | ||
| 1400 | inode_sub_bytes(inode, number); | ||
| 1401 | } | ||
| 1402 | |||
| 1403 | /* | ||
| 1322 | * Following four functions update i_blocks+i_bytes fields and | 1404 | * Following four functions update i_blocks+i_bytes fields and |
| 1323 | * quota information (together with appropriate checks) | 1405 | * quota information (together with appropriate checks) |
| 1324 | * NOTE: We absolutely rely on the fact that caller dirties | 1406 | * NOTE: We absolutely rely on the fact that caller dirties |
| @@ -1336,6 +1418,21 @@ int __dquot_alloc_space(struct inode *inode, qsize_t number, | |||
| 1336 | int cnt, ret = QUOTA_OK; | 1418 | int cnt, ret = QUOTA_OK; |
| 1337 | char warntype[MAXQUOTAS]; | 1419 | char warntype[MAXQUOTAS]; |
| 1338 | 1420 | ||
| 1421 | /* | ||
| 1422 | * First test before acquiring mutex - solves deadlocks when we | ||
| 1423 | * re-enter the quota code and are already holding the mutex | ||
| 1424 | */ | ||
| 1425 | if (IS_NOQUOTA(inode)) { | ||
| 1426 | inode_incr_space(inode, number, reserve); | ||
| 1427 | goto out; | ||
| 1428 | } | ||
| 1429 | |||
| 1430 | down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); | ||
| 1431 | if (IS_NOQUOTA(inode)) { | ||
| 1432 | inode_incr_space(inode, number, reserve); | ||
| 1433 | goto out_unlock; | ||
| 1434 | } | ||
| 1435 | |||
| 1339 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) | 1436 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) |
| 1340 | warntype[cnt] = QUOTA_NL_NOWARN; | 1437 | warntype[cnt] = QUOTA_NL_NOWARN; |
| 1341 | 1438 | ||
| @@ -1346,7 +1443,8 @@ int __dquot_alloc_space(struct inode *inode, qsize_t number, | |||
| 1346 | if (check_bdq(inode->i_dquot[cnt], number, warn, warntype+cnt) | 1443 | if (check_bdq(inode->i_dquot[cnt], number, warn, warntype+cnt) |
| 1347 | == NO_QUOTA) { | 1444 | == NO_QUOTA) { |
| 1348 | ret = NO_QUOTA; | 1445 | ret = NO_QUOTA; |
| 1349 | goto out_unlock; | 1446 | spin_unlock(&dq_data_lock); |
| 1447 | goto out_flush_warn; | ||
| 1350 | } | 1448 | } |
| 1351 | } | 1449 | } |
| 1352 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { | 1450 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { |
| @@ -1357,64 +1455,29 @@ int __dquot_alloc_space(struct inode *inode, qsize_t number, | |||
| 1357 | else | 1455 | else |
| 1358 | dquot_incr_space(inode->i_dquot[cnt], number); | 1456 | dquot_incr_space(inode->i_dquot[cnt], number); |
| 1359 | } | 1457 | } |
| 1360 | if (!reserve) | 1458 | inode_incr_space(inode, number, reserve); |
| 1361 | inode_add_bytes(inode, number); | ||
| 1362 | out_unlock: | ||
| 1363 | spin_unlock(&dq_data_lock); | 1459 | spin_unlock(&dq_data_lock); |
| 1460 | |||
| 1461 | if (reserve) | ||
| 1462 | goto out_flush_warn; | ||
| 1463 | mark_all_dquot_dirty(inode->i_dquot); | ||
| 1464 | out_flush_warn: | ||
| 1364 | flush_warnings(inode->i_dquot, warntype); | 1465 | flush_warnings(inode->i_dquot, warntype); |
| 1466 | out_unlock: | ||
| 1467 | up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); | ||
| 1468 | out: | ||
| 1365 | return ret; | 1469 | return ret; |
| 1366 | } | 1470 | } |
| 1367 | 1471 | ||
| 1368 | int dquot_alloc_space(struct inode *inode, qsize_t number, int warn) | 1472 | int dquot_alloc_space(struct inode *inode, qsize_t number, int warn) |
| 1369 | { | 1473 | { |
| 1370 | int cnt, ret = QUOTA_OK; | 1474 | return __dquot_alloc_space(inode, number, warn, 0); |
| 1371 | |||
| 1372 | /* | ||
| 1373 | * First test before acquiring mutex - solves deadlocks when we | ||
| 1374 | * re-enter the quota code and are already holding the mutex | ||
| 1375 | */ | ||
| 1376 | if (IS_NOQUOTA(inode)) { | ||
| 1377 | inode_add_bytes(inode, number); | ||
| 1378 | goto out; | ||
| 1379 | } | ||
| 1380 | |||
| 1381 | down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); | ||
| 1382 | if (IS_NOQUOTA(inode)) { | ||
| 1383 | inode_add_bytes(inode, number); | ||
| 1384 | goto out_unlock; | ||
| 1385 | } | ||
| 1386 | |||
| 1387 | ret = __dquot_alloc_space(inode, number, warn, 0); | ||
| 1388 | if (ret == NO_QUOTA) | ||
| 1389 | goto out_unlock; | ||
| 1390 | |||
| 1391 | /* Dirtify all the dquots - this can block when journalling */ | ||
| 1392 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) | ||
| 1393 | if (inode->i_dquot[cnt]) | ||
| 1394 | mark_dquot_dirty(inode->i_dquot[cnt]); | ||
| 1395 | out_unlock: | ||
| 1396 | up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); | ||
| 1397 | out: | ||
| 1398 | return ret; | ||
| 1399 | } | 1475 | } |
| 1400 | EXPORT_SYMBOL(dquot_alloc_space); | 1476 | EXPORT_SYMBOL(dquot_alloc_space); |
| 1401 | 1477 | ||
| 1402 | int dquot_reserve_space(struct inode *inode, qsize_t number, int warn) | 1478 | int dquot_reserve_space(struct inode *inode, qsize_t number, int warn) |
| 1403 | { | 1479 | { |
| 1404 | int ret = QUOTA_OK; | 1480 | return __dquot_alloc_space(inode, number, warn, 1); |
| 1405 | |||
| 1406 | if (IS_NOQUOTA(inode)) | ||
| 1407 | goto out; | ||
| 1408 | |||
| 1409 | down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); | ||
| 1410 | if (IS_NOQUOTA(inode)) | ||
| 1411 | goto out_unlock; | ||
| 1412 | |||
| 1413 | ret = __dquot_alloc_space(inode, number, warn, 1); | ||
| 1414 | out_unlock: | ||
| 1415 | up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); | ||
| 1416 | out: | ||
| 1417 | return ret; | ||
| 1418 | } | 1481 | } |
| 1419 | EXPORT_SYMBOL(dquot_reserve_space); | 1482 | EXPORT_SYMBOL(dquot_reserve_space); |
| 1420 | 1483 | ||
| @@ -1455,10 +1518,7 @@ int dquot_alloc_inode(const struct inode *inode, qsize_t number) | |||
| 1455 | warn_put_all: | 1518 | warn_put_all: |
| 1456 | spin_unlock(&dq_data_lock); | 1519 | spin_unlock(&dq_data_lock); |
| 1457 | if (ret == QUOTA_OK) | 1520 | if (ret == QUOTA_OK) |
| 1458 | /* Dirtify all the dquots - this can block when journalling */ | 1521 | mark_all_dquot_dirty(inode->i_dquot); |
| 1459 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) | ||
| 1460 | if (inode->i_dquot[cnt]) | ||
| 1461 | mark_dquot_dirty(inode->i_dquot[cnt]); | ||
| 1462 | flush_warnings(inode->i_dquot, warntype); | 1522 | flush_warnings(inode->i_dquot, warntype); |
| 1463 | up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); | 1523 | up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); |
| 1464 | return ret; | 1524 | return ret; |
| @@ -1471,14 +1531,14 @@ int dquot_claim_space(struct inode *inode, qsize_t number) | |||
| 1471 | int ret = QUOTA_OK; | 1531 | int ret = QUOTA_OK; |
| 1472 | 1532 | ||
| 1473 | if (IS_NOQUOTA(inode)) { | 1533 | if (IS_NOQUOTA(inode)) { |
| 1474 | inode_add_bytes(inode, number); | 1534 | inode_claim_rsv_space(inode, number); |
| 1475 | goto out; | 1535 | goto out; |
| 1476 | } | 1536 | } |
| 1477 | 1537 | ||
| 1478 | down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); | 1538 | down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); |
| 1479 | if (IS_NOQUOTA(inode)) { | 1539 | if (IS_NOQUOTA(inode)) { |
| 1480 | up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); | 1540 | up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); |
| 1481 | inode_add_bytes(inode, number); | 1541 | inode_claim_rsv_space(inode, number); |
| 1482 | goto out; | 1542 | goto out; |
| 1483 | } | 1543 | } |
| 1484 | 1544 | ||
| @@ -1490,12 +1550,9 @@ int dquot_claim_space(struct inode *inode, qsize_t number) | |||
| 1490 | number); | 1550 | number); |
| 1491 | } | 1551 | } |
| 1492 | /* Update inode bytes */ | 1552 | /* Update inode bytes */ |
| 1493 | inode_add_bytes(inode, number); | 1553 | inode_claim_rsv_space(inode, number); |
| 1494 | spin_unlock(&dq_data_lock); | 1554 | spin_unlock(&dq_data_lock); |
| 1495 | /* Dirtify all the dquots - this can block when journalling */ | 1555 | mark_all_dquot_dirty(inode->i_dquot); |
| 1496 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) | ||
| 1497 | if (inode->i_dquot[cnt]) | ||
| 1498 | mark_dquot_dirty(inode->i_dquot[cnt]); | ||
| 1499 | up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); | 1556 | up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); |
| 1500 | out: | 1557 | out: |
| 1501 | return ret; | 1558 | return ret; |
| @@ -1503,38 +1560,9 @@ out: | |||
| 1503 | EXPORT_SYMBOL(dquot_claim_space); | 1560 | EXPORT_SYMBOL(dquot_claim_space); |
| 1504 | 1561 | ||
| 1505 | /* | 1562 | /* |
| 1506 | * Release reserved quota space | ||
| 1507 | */ | ||
| 1508 | void dquot_release_reserved_space(struct inode *inode, qsize_t number) | ||
| 1509 | { | ||
| 1510 | int cnt; | ||
| 1511 | |||
| 1512 | if (IS_NOQUOTA(inode)) | ||
| 1513 | goto out; | ||
| 1514 | |||
| 1515 | down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); | ||
| 1516 | if (IS_NOQUOTA(inode)) | ||
| 1517 | goto out_unlock; | ||
| 1518 | |||
| 1519 | spin_lock(&dq_data_lock); | ||
| 1520 | /* Release reserved dquots */ | ||
| 1521 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { | ||
| 1522 | if (inode->i_dquot[cnt]) | ||
| 1523 | dquot_free_reserved_space(inode->i_dquot[cnt], number); | ||
| 1524 | } | ||
| 1525 | spin_unlock(&dq_data_lock); | ||
| 1526 | |||
| 1527 | out_unlock: | ||
| 1528 | up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); | ||
| 1529 | out: | ||
| 1530 | return; | ||
| 1531 | } | ||
| 1532 | EXPORT_SYMBOL(dquot_release_reserved_space); | ||
| 1533 | |||
| 1534 | /* | ||
| 1535 | * This operation can block, but only after everything is updated | 1563 | * This operation can block, but only after everything is updated |
| 1536 | */ | 1564 | */ |
| 1537 | int dquot_free_space(struct inode *inode, qsize_t number) | 1565 | int __dquot_free_space(struct inode *inode, qsize_t number, int reserve) |
| 1538 | { | 1566 | { |
| 1539 | unsigned int cnt; | 1567 | unsigned int cnt; |
| 1540 | char warntype[MAXQUOTAS]; | 1568 | char warntype[MAXQUOTAS]; |
| @@ -1543,7 +1571,7 @@ int dquot_free_space(struct inode *inode, qsize_t number) | |||
| 1543 | * re-enter the quota code and are already holding the mutex */ | 1571 | * re-enter the quota code and are already holding the mutex */ |
| 1544 | if (IS_NOQUOTA(inode)) { | 1572 | if (IS_NOQUOTA(inode)) { |
| 1545 | out_sub: | 1573 | out_sub: |
| 1546 | inode_sub_bytes(inode, number); | 1574 | inode_decr_space(inode, number, reserve); |
| 1547 | return QUOTA_OK; | 1575 | return QUOTA_OK; |
| 1548 | } | 1576 | } |
| 1549 | 1577 | ||
| @@ -1558,21 +1586,40 @@ out_sub: | |||
| 1558 | if (!inode->i_dquot[cnt]) | 1586 | if (!inode->i_dquot[cnt]) |
| 1559 | continue; | 1587 | continue; |
| 1560 | warntype[cnt] = info_bdq_free(inode->i_dquot[cnt], number); | 1588 | warntype[cnt] = info_bdq_free(inode->i_dquot[cnt], number); |
| 1561 | dquot_decr_space(inode->i_dquot[cnt], number); | 1589 | if (reserve) |
| 1590 | dquot_free_reserved_space(inode->i_dquot[cnt], number); | ||
| 1591 | else | ||
| 1592 | dquot_decr_space(inode->i_dquot[cnt], number); | ||
| 1562 | } | 1593 | } |
| 1563 | inode_sub_bytes(inode, number); | 1594 | inode_decr_space(inode, number, reserve); |
| 1564 | spin_unlock(&dq_data_lock); | 1595 | spin_unlock(&dq_data_lock); |
| 1565 | /* Dirtify all the dquots - this can block when journalling */ | 1596 | |
| 1566 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) | 1597 | if (reserve) |
| 1567 | if (inode->i_dquot[cnt]) | 1598 | goto out_unlock; |
| 1568 | mark_dquot_dirty(inode->i_dquot[cnt]); | 1599 | mark_all_dquot_dirty(inode->i_dquot); |
| 1600 | out_unlock: | ||
| 1569 | flush_warnings(inode->i_dquot, warntype); | 1601 | flush_warnings(inode->i_dquot, warntype); |
| 1570 | up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); | 1602 | up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); |
| 1571 | return QUOTA_OK; | 1603 | return QUOTA_OK; |
| 1572 | } | 1604 | } |
| 1605 | |||
| 1606 | int dquot_free_space(struct inode *inode, qsize_t number) | ||
| 1607 | { | ||
| 1608 | return __dquot_free_space(inode, number, 0); | ||
| 1609 | } | ||
| 1573 | EXPORT_SYMBOL(dquot_free_space); | 1610 | EXPORT_SYMBOL(dquot_free_space); |
| 1574 | 1611 | ||
| 1575 | /* | 1612 | /* |
| 1613 | * Release reserved quota space | ||
| 1614 | */ | ||
| 1615 | void dquot_release_reserved_space(struct inode *inode, qsize_t number) | ||
| 1616 | { | ||
| 1617 | __dquot_free_space(inode, number, 1); | ||
| 1618 | |||
| 1619 | } | ||
| 1620 | EXPORT_SYMBOL(dquot_release_reserved_space); | ||
| 1621 | |||
| 1622 | /* | ||
| 1576 | * This operation can block, but only after everything is updated | 1623 | * This operation can block, but only after everything is updated |
| 1577 | */ | 1624 | */ |
| 1578 | int dquot_free_inode(const struct inode *inode, qsize_t number) | 1625 | int dquot_free_inode(const struct inode *inode, qsize_t number) |
| @@ -1599,10 +1646,7 @@ int dquot_free_inode(const struct inode *inode, qsize_t number) | |||
| 1599 | dquot_decr_inodes(inode->i_dquot[cnt], number); | 1646 | dquot_decr_inodes(inode->i_dquot[cnt], number); |
| 1600 | } | 1647 | } |
| 1601 | spin_unlock(&dq_data_lock); | 1648 | spin_unlock(&dq_data_lock); |
| 1602 | /* Dirtify all the dquots - this can block when journalling */ | 1649 | mark_all_dquot_dirty(inode->i_dquot); |
| 1603 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) | ||
| 1604 | if (inode->i_dquot[cnt]) | ||
| 1605 | mark_dquot_dirty(inode->i_dquot[cnt]); | ||
| 1606 | flush_warnings(inode->i_dquot, warntype); | 1650 | flush_warnings(inode->i_dquot, warntype); |
| 1607 | up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); | 1651 | up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); |
| 1608 | return QUOTA_OK; | 1652 | return QUOTA_OK; |
| @@ -1610,19 +1654,6 @@ int dquot_free_inode(const struct inode *inode, qsize_t number) | |||
| 1610 | EXPORT_SYMBOL(dquot_free_inode); | 1654 | EXPORT_SYMBOL(dquot_free_inode); |
| 1611 | 1655 | ||
| 1612 | /* | 1656 | /* |
| 1613 | * call back function, get reserved quota space from underlying fs | ||
| 1614 | */ | ||
| 1615 | qsize_t dquot_get_reserved_space(struct inode *inode) | ||
| 1616 | { | ||
| 1617 | qsize_t reserved_space = 0; | ||
| 1618 | |||
| 1619 | if (sb_any_quota_active(inode->i_sb) && | ||
| 1620 | inode->i_sb->dq_op->get_reserved_space) | ||
| 1621 | reserved_space = inode->i_sb->dq_op->get_reserved_space(inode); | ||
| 1622 | return reserved_space; | ||
| 1623 | } | ||
| 1624 | |||
| 1625 | /* | ||
| 1626 | * Transfer the number of inode and blocks from one diskquota to an other. | 1657 | * Transfer the number of inode and blocks from one diskquota to an other. |
| 1627 | * | 1658 | * |
| 1628 | * This operation can block, but only after everything is updated | 1659 | * This operation can block, but only after everything is updated |
| @@ -1665,7 +1696,7 @@ int dquot_transfer(struct inode *inode, struct iattr *iattr) | |||
| 1665 | } | 1696 | } |
| 1666 | spin_lock(&dq_data_lock); | 1697 | spin_lock(&dq_data_lock); |
| 1667 | cur_space = inode_get_bytes(inode); | 1698 | cur_space = inode_get_bytes(inode); |
| 1668 | rsv_space = dquot_get_reserved_space(inode); | 1699 | rsv_space = inode_get_rsv_space(inode); |
| 1669 | space = cur_space + rsv_space; | 1700 | space = cur_space + rsv_space; |
| 1670 | /* Build the transfer_from list and check the limits */ | 1701 | /* Build the transfer_from list and check the limits */ |
| 1671 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { | 1702 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { |
| @@ -1709,25 +1740,18 @@ int dquot_transfer(struct inode *inode, struct iattr *iattr) | |||
| 1709 | spin_unlock(&dq_data_lock); | 1740 | spin_unlock(&dq_data_lock); |
| 1710 | up_write(&sb_dqopt(inode->i_sb)->dqptr_sem); | 1741 | up_write(&sb_dqopt(inode->i_sb)->dqptr_sem); |
| 1711 | 1742 | ||
| 1712 | /* Dirtify all the dquots - this can block when journalling */ | 1743 | mark_all_dquot_dirty(transfer_from); |
| 1713 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { | 1744 | mark_all_dquot_dirty(transfer_to); |
| 1714 | if (transfer_from[cnt]) | 1745 | /* The reference we got is transferred to the inode */ |
| 1715 | mark_dquot_dirty(transfer_from[cnt]); | 1746 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) |
| 1716 | if (transfer_to[cnt]) { | 1747 | transfer_to[cnt] = NULL; |
| 1717 | mark_dquot_dirty(transfer_to[cnt]); | ||
| 1718 | /* The reference we got is transferred to the inode */ | ||
| 1719 | transfer_to[cnt] = NULL; | ||
| 1720 | } | ||
| 1721 | } | ||
| 1722 | warn_put_all: | 1748 | warn_put_all: |
| 1723 | flush_warnings(transfer_to, warntype_to); | 1749 | flush_warnings(transfer_to, warntype_to); |
| 1724 | flush_warnings(transfer_from, warntype_from_inodes); | 1750 | flush_warnings(transfer_from, warntype_from_inodes); |
| 1725 | flush_warnings(transfer_from, warntype_from_space); | 1751 | flush_warnings(transfer_from, warntype_from_space); |
| 1726 | put_all: | 1752 | put_all: |
| 1727 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { | 1753 | dqput_all(transfer_from); |
| 1728 | dqput(transfer_from[cnt]); | 1754 | dqput_all(transfer_to); |
| 1729 | dqput(transfer_to[cnt]); | ||
| 1730 | } | ||
| 1731 | return ret; | 1755 | return ret; |
| 1732 | over_quota: | 1756 | over_quota: |
| 1733 | spin_unlock(&dq_data_lock); | 1757 | spin_unlock(&dq_data_lock); |
diff --git a/fs/quota/quota_v2.c b/fs/quota/quota_v2.c index 3dfc23e02135..e3da02f4986f 100644 --- a/fs/quota/quota_v2.c +++ b/fs/quota/quota_v2.c | |||
| @@ -97,8 +97,11 @@ static int v2_read_file_info(struct super_block *sb, int type) | |||
| 97 | unsigned int version; | 97 | unsigned int version; |
| 98 | 98 | ||
| 99 | if (!v2_read_header(sb, type, &dqhead)) | 99 | if (!v2_read_header(sb, type, &dqhead)) |
| 100 | return 0; | 100 | return -1; |
| 101 | version = le32_to_cpu(dqhead.dqh_version); | 101 | version = le32_to_cpu(dqhead.dqh_version); |
| 102 | if ((info->dqi_fmt_id == QFMT_VFS_V0 && version != 0) || | ||
| 103 | (info->dqi_fmt_id == QFMT_VFS_V1 && version != 1)) | ||
| 104 | return -1; | ||
| 102 | 105 | ||
| 103 | size = sb->s_op->quota_read(sb, type, (char *)&dinfo, | 106 | size = sb->s_op->quota_read(sb, type, (char *)&dinfo, |
| 104 | sizeof(struct v2_disk_dqinfo), V2_DQINFOOFF); | 107 | sizeof(struct v2_disk_dqinfo), V2_DQINFOOFF); |
| @@ -120,8 +123,8 @@ static int v2_read_file_info(struct super_block *sb, int type) | |||
| 120 | info->dqi_maxilimit = 0xffffffff; | 123 | info->dqi_maxilimit = 0xffffffff; |
| 121 | } else { | 124 | } else { |
| 122 | /* used space is stored as unsigned 64-bit value */ | 125 | /* used space is stored as unsigned 64-bit value */ |
| 123 | info->dqi_maxblimit = 0xffffffffffffffff; /* 2^64-1 */ | 126 | info->dqi_maxblimit = 0xffffffffffffffffULL; /* 2^64-1 */ |
| 124 | info->dqi_maxilimit = 0xffffffffffffffff; | 127 | info->dqi_maxilimit = 0xffffffffffffffffULL; |
| 125 | } | 128 | } |
| 126 | info->dqi_bgrace = le32_to_cpu(dinfo.dqi_bgrace); | 129 | info->dqi_bgrace = le32_to_cpu(dinfo.dqi_bgrace); |
| 127 | info->dqi_igrace = le32_to_cpu(dinfo.dqi_igrace); | 130 | info->dqi_igrace = le32_to_cpu(dinfo.dqi_igrace); |
diff --git a/fs/ramfs/file-nommu.c b/fs/ramfs/file-nommu.c index 32fae4040ebf..2efc57173fd7 100644 --- a/fs/ramfs/file-nommu.c +++ b/fs/ramfs/file-nommu.c | |||
| @@ -60,7 +60,7 @@ const struct inode_operations ramfs_file_inode_operations = { | |||
| 60 | */ | 60 | */ |
| 61 | int ramfs_nommu_expand_for_mapping(struct inode *inode, size_t newsize) | 61 | int ramfs_nommu_expand_for_mapping(struct inode *inode, size_t newsize) |
| 62 | { | 62 | { |
| 63 | unsigned long npages, xpages, loop, limit; | 63 | unsigned long npages, xpages, loop; |
| 64 | struct page *pages; | 64 | struct page *pages; |
| 65 | unsigned order; | 65 | unsigned order; |
| 66 | void *data; | 66 | void *data; |
diff --git a/fs/reiserfs/Kconfig b/fs/reiserfs/Kconfig index ac7cd75c86f8..513f431038f9 100644 --- a/fs/reiserfs/Kconfig +++ b/fs/reiserfs/Kconfig | |||
| @@ -1,7 +1,6 @@ | |||
| 1 | config REISERFS_FS | 1 | config REISERFS_FS |
| 2 | tristate "Reiserfs support" | 2 | tristate "Reiserfs support" |
| 3 | select CRC32 | 3 | select CRC32 |
| 4 | select FS_JOURNAL_INFO | ||
| 5 | help | 4 | help |
| 6 | Stores not just filenames but the files themselves in a balanced | 5 | Stores not just filenames but the files themselves in a balanced |
| 7 | tree. Uses journalling. | 6 | tree. Uses journalling. |
diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c index 3a28e7751b3c..290ae38fca8a 100644 --- a/fs/reiserfs/inode.c +++ b/fs/reiserfs/inode.c | |||
| @@ -2538,6 +2538,12 @@ static int reiserfs_writepage(struct page *page, struct writeback_control *wbc) | |||
| 2538 | return reiserfs_write_full_page(page, wbc); | 2538 | return reiserfs_write_full_page(page, wbc); |
| 2539 | } | 2539 | } |
| 2540 | 2540 | ||
| 2541 | static void reiserfs_truncate_failed_write(struct inode *inode) | ||
| 2542 | { | ||
| 2543 | truncate_inode_pages(inode->i_mapping, inode->i_size); | ||
| 2544 | reiserfs_truncate_file(inode, 0); | ||
| 2545 | } | ||
| 2546 | |||
| 2541 | static int reiserfs_write_begin(struct file *file, | 2547 | static int reiserfs_write_begin(struct file *file, |
| 2542 | struct address_space *mapping, | 2548 | struct address_space *mapping, |
| 2543 | loff_t pos, unsigned len, unsigned flags, | 2549 | loff_t pos, unsigned len, unsigned flags, |
| @@ -2604,6 +2610,8 @@ static int reiserfs_write_begin(struct file *file, | |||
| 2604 | if (ret) { | 2610 | if (ret) { |
| 2605 | unlock_page(page); | 2611 | unlock_page(page); |
| 2606 | page_cache_release(page); | 2612 | page_cache_release(page); |
| 2613 | /* Truncate allocated blocks */ | ||
| 2614 | reiserfs_truncate_failed_write(inode); | ||
| 2607 | } | 2615 | } |
| 2608 | return ret; | 2616 | return ret; |
| 2609 | } | 2617 | } |
| @@ -2701,9 +2709,7 @@ static int reiserfs_write_end(struct file *file, struct address_space *mapping, | |||
| 2701 | ** transaction tracking stuff when the size changes. So, we have | 2709 | ** transaction tracking stuff when the size changes. So, we have |
| 2702 | ** to do the i_size updates here. | 2710 | ** to do the i_size updates here. |
| 2703 | */ | 2711 | */ |
| 2704 | pos += copied; | 2712 | if (pos + copied > inode->i_size) { |
| 2705 | |||
| 2706 | if (pos > inode->i_size) { | ||
| 2707 | struct reiserfs_transaction_handle myth; | 2713 | struct reiserfs_transaction_handle myth; |
| 2708 | lock_depth = reiserfs_write_lock_once(inode->i_sb); | 2714 | lock_depth = reiserfs_write_lock_once(inode->i_sb); |
| 2709 | locked = true; | 2715 | locked = true; |
| @@ -2721,7 +2727,7 @@ static int reiserfs_write_end(struct file *file, struct address_space *mapping, | |||
| 2721 | goto journal_error; | 2727 | goto journal_error; |
| 2722 | 2728 | ||
| 2723 | reiserfs_update_inode_transaction(inode); | 2729 | reiserfs_update_inode_transaction(inode); |
| 2724 | inode->i_size = pos; | 2730 | inode->i_size = pos + copied; |
| 2725 | /* | 2731 | /* |
| 2726 | * this will just nest into our transaction. It's important | 2732 | * this will just nest into our transaction. It's important |
| 2727 | * to use mark_inode_dirty so the inode gets pushed around on the | 2733 | * to use mark_inode_dirty so the inode gets pushed around on the |
| @@ -2751,6 +2757,10 @@ static int reiserfs_write_end(struct file *file, struct address_space *mapping, | |||
| 2751 | reiserfs_write_unlock_once(inode->i_sb, lock_depth); | 2757 | reiserfs_write_unlock_once(inode->i_sb, lock_depth); |
| 2752 | unlock_page(page); | 2758 | unlock_page(page); |
| 2753 | page_cache_release(page); | 2759 | page_cache_release(page); |
| 2760 | |||
| 2761 | if (pos + len > inode->i_size) | ||
| 2762 | reiserfs_truncate_failed_write(inode); | ||
| 2763 | |||
| 2754 | return ret == 0 ? copied : ret; | 2764 | return ret == 0 ? copied : ret; |
| 2755 | 2765 | ||
| 2756 | journal_error: | 2766 | journal_error: |
diff --git a/fs/signalfd.c b/fs/signalfd.c index b07565c94386..1dabe4ee02fe 100644 --- a/fs/signalfd.c +++ b/fs/signalfd.c | |||
| @@ -236,7 +236,7 @@ SYSCALL_DEFINE4(signalfd4, int, ufd, sigset_t __user *, user_mask, | |||
| 236 | * anon_inode_getfd() will install the fd. | 236 | * anon_inode_getfd() will install the fd. |
| 237 | */ | 237 | */ |
| 238 | ufd = anon_inode_getfd("[signalfd]", &signalfd_fops, ctx, | 238 | ufd = anon_inode_getfd("[signalfd]", &signalfd_fops, ctx, |
| 239 | flags & (O_CLOEXEC | O_NONBLOCK)); | 239 | O_RDWR | (flags & (O_CLOEXEC | O_NONBLOCK))); |
| 240 | if (ufd < 0) | 240 | if (ufd < 0) |
| 241 | kfree(ctx); | 241 | kfree(ctx); |
| 242 | } else { | 242 | } else { |
diff --git a/fs/stack.c b/fs/stack.c index 67716f6a1a4a..4a6f7f440658 100644 --- a/fs/stack.c +++ b/fs/stack.c | |||
| @@ -7,18 +7,63 @@ | |||
| 7 | * This function cannot be inlined since i_size_{read,write} is rather | 7 | * This function cannot be inlined since i_size_{read,write} is rather |
| 8 | * heavy-weight on 32-bit systems | 8 | * heavy-weight on 32-bit systems |
| 9 | */ | 9 | */ |
| 10 | void fsstack_copy_inode_size(struct inode *dst, const struct inode *src) | 10 | void fsstack_copy_inode_size(struct inode *dst, struct inode *src) |
| 11 | { | 11 | { |
| 12 | i_size_write(dst, i_size_read((struct inode *)src)); | 12 | loff_t i_size; |
| 13 | dst->i_blocks = src->i_blocks; | 13 | blkcnt_t i_blocks; |
| 14 | |||
| 15 | /* | ||
| 16 | * i_size_read() includes its own seqlocking and protection from | ||
| 17 | * preemption (see include/linux/fs.h): we need nothing extra for | ||
| 18 | * that here, and prefer to avoid nesting locks than attempt to keep | ||
| 19 | * i_size and i_blocks in sync together. | ||
| 20 | */ | ||
| 21 | i_size = i_size_read(src); | ||
| 22 | |||
| 23 | /* | ||
| 24 | * But if CONFIG_LBDAF (on 32-bit), we ought to make an effort to | ||
| 25 | * keep the two halves of i_blocks in sync despite SMP or PREEMPT - | ||
| 26 | * though stat's generic_fillattr() doesn't bother, and we won't be | ||
| 27 | * applying quotas (where i_blocks does become important) at the | ||
| 28 | * upper level. | ||
| 29 | * | ||
| 30 | * We don't actually know what locking is used at the lower level; | ||
| 31 | * but if it's a filesystem that supports quotas, it will be using | ||
| 32 | * i_lock as in inode_add_bytes(). tmpfs uses other locking, and | ||
| 33 | * its 32-bit is (just) able to exceed 2TB i_size with the aid of | ||
| 34 | * holes; but its i_blocks cannot carry into the upper long without | ||
| 35 | * almost 2TB swap - let's ignore that case. | ||
| 36 | */ | ||
| 37 | if (sizeof(i_blocks) > sizeof(long)) | ||
| 38 | spin_lock(&src->i_lock); | ||
| 39 | i_blocks = src->i_blocks; | ||
| 40 | if (sizeof(i_blocks) > sizeof(long)) | ||
| 41 | spin_unlock(&src->i_lock); | ||
| 42 | |||
| 43 | /* | ||
| 44 | * If CONFIG_SMP or CONFIG_PREEMPT on 32-bit, it's vital for | ||
| 45 | * fsstack_copy_inode_size() to hold some lock around | ||
| 46 | * i_size_write(), otherwise i_size_read() may spin forever (see | ||
| 47 | * include/linux/fs.h). We don't necessarily hold i_mutex when this | ||
| 48 | * is called, so take i_lock for that case. | ||
| 49 | * | ||
| 50 | * And if CONFIG_LBADF (on 32-bit), continue our effort to keep the | ||
| 51 | * two halves of i_blocks in sync despite SMP or PREEMPT: use i_lock | ||
| 52 | * for that case too, and do both at once by combining the tests. | ||
| 53 | * | ||
| 54 | * There is none of this locking overhead in the 64-bit case. | ||
| 55 | */ | ||
| 56 | if (sizeof(i_size) > sizeof(long) || sizeof(i_blocks) > sizeof(long)) | ||
| 57 | spin_lock(&dst->i_lock); | ||
| 58 | i_size_write(dst, i_size); | ||
| 59 | dst->i_blocks = i_blocks; | ||
| 60 | if (sizeof(i_size) > sizeof(long) || sizeof(i_blocks) > sizeof(long)) | ||
| 61 | spin_unlock(&dst->i_lock); | ||
| 14 | } | 62 | } |
| 15 | EXPORT_SYMBOL_GPL(fsstack_copy_inode_size); | 63 | EXPORT_SYMBOL_GPL(fsstack_copy_inode_size); |
| 16 | 64 | ||
| 17 | /* copy all attributes; get_nlinks is optional way to override the i_nlink | 65 | /* copy all attributes */ |
| 18 | * copying | 66 | void fsstack_copy_attr_all(struct inode *dest, const struct inode *src) |
| 19 | */ | ||
| 20 | void fsstack_copy_attr_all(struct inode *dest, const struct inode *src, | ||
| 21 | int (*get_nlinks)(struct inode *)) | ||
| 22 | { | 67 | { |
| 23 | dest->i_mode = src->i_mode; | 68 | dest->i_mode = src->i_mode; |
| 24 | dest->i_uid = src->i_uid; | 69 | dest->i_uid = src->i_uid; |
| @@ -29,14 +74,6 @@ void fsstack_copy_attr_all(struct inode *dest, const struct inode *src, | |||
| 29 | dest->i_ctime = src->i_ctime; | 74 | dest->i_ctime = src->i_ctime; |
| 30 | dest->i_blkbits = src->i_blkbits; | 75 | dest->i_blkbits = src->i_blkbits; |
| 31 | dest->i_flags = src->i_flags; | 76 | dest->i_flags = src->i_flags; |
| 32 | 77 | dest->i_nlink = src->i_nlink; | |
| 33 | /* | ||
| 34 | * Update the nlinks AFTER updating the above fields, because the | ||
| 35 | * get_links callback may depend on them. | ||
| 36 | */ | ||
| 37 | if (!get_nlinks) | ||
| 38 | dest->i_nlink = src->i_nlink; | ||
| 39 | else | ||
| 40 | dest->i_nlink = (*get_nlinks)(dest); | ||
| 41 | } | 78 | } |
| 42 | EXPORT_SYMBOL_GPL(fsstack_copy_attr_all); | 79 | EXPORT_SYMBOL_GPL(fsstack_copy_attr_all); |
| @@ -401,9 +401,9 @@ SYSCALL_DEFINE4(fstatat64, int, dfd, char __user *, filename, | |||
| 401 | } | 401 | } |
| 402 | #endif /* __ARCH_WANT_STAT64 */ | 402 | #endif /* __ARCH_WANT_STAT64 */ |
| 403 | 403 | ||
| 404 | void inode_add_bytes(struct inode *inode, loff_t bytes) | 404 | /* Caller is here responsible for sufficient locking (ie. inode->i_lock) */ |
| 405 | void __inode_add_bytes(struct inode *inode, loff_t bytes) | ||
| 405 | { | 406 | { |
| 406 | spin_lock(&inode->i_lock); | ||
| 407 | inode->i_blocks += bytes >> 9; | 407 | inode->i_blocks += bytes >> 9; |
| 408 | bytes &= 511; | 408 | bytes &= 511; |
| 409 | inode->i_bytes += bytes; | 409 | inode->i_bytes += bytes; |
| @@ -411,6 +411,12 @@ void inode_add_bytes(struct inode *inode, loff_t bytes) | |||
| 411 | inode->i_blocks++; | 411 | inode->i_blocks++; |
| 412 | inode->i_bytes -= 512; | 412 | inode->i_bytes -= 512; |
| 413 | } | 413 | } |
| 414 | } | ||
| 415 | |||
| 416 | void inode_add_bytes(struct inode *inode, loff_t bytes) | ||
| 417 | { | ||
| 418 | spin_lock(&inode->i_lock); | ||
| 419 | __inode_add_bytes(inode, bytes); | ||
| 414 | spin_unlock(&inode->i_lock); | 420 | spin_unlock(&inode->i_lock); |
| 415 | } | 421 | } |
| 416 | 422 | ||
diff --git a/fs/super.c b/fs/super.c index 19eb70b374bc..aff046b0fe78 100644 --- a/fs/super.c +++ b/fs/super.c | |||
| @@ -901,8 +901,9 @@ int get_sb_single(struct file_system_type *fs_type, | |||
| 901 | return error; | 901 | return error; |
| 902 | } | 902 | } |
| 903 | s->s_flags |= MS_ACTIVE; | 903 | s->s_flags |= MS_ACTIVE; |
| 904 | } else { | ||
| 905 | do_remount_sb(s, flags, data, 0); | ||
| 904 | } | 906 | } |
| 905 | do_remount_sb(s, flags, data, 0); | ||
| 906 | simple_set_mnt(mnt, s); | 907 | simple_set_mnt(mnt, s); |
| 907 | return 0; | 908 | return 0; |
| 908 | } | 909 | } |
| @@ -355,6 +355,7 @@ SYSCALL_DEFINE(sync_file_range)(int fd, loff_t offset, loff_t nbytes, | |||
| 355 | { | 355 | { |
| 356 | int ret; | 356 | int ret; |
| 357 | struct file *file; | 357 | struct file *file; |
| 358 | struct address_space *mapping; | ||
| 358 | loff_t endbyte; /* inclusive */ | 359 | loff_t endbyte; /* inclusive */ |
| 359 | int fput_needed; | 360 | int fput_needed; |
| 360 | umode_t i_mode; | 361 | umode_t i_mode; |
| @@ -405,7 +406,28 @@ SYSCALL_DEFINE(sync_file_range)(int fd, loff_t offset, loff_t nbytes, | |||
| 405 | !S_ISLNK(i_mode)) | 406 | !S_ISLNK(i_mode)) |
| 406 | goto out_put; | 407 | goto out_put; |
| 407 | 408 | ||
| 408 | ret = do_sync_mapping_range(file->f_mapping, offset, endbyte, flags); | 409 | mapping = file->f_mapping; |
| 410 | if (!mapping) { | ||
| 411 | ret = -EINVAL; | ||
| 412 | goto out_put; | ||
| 413 | } | ||
| 414 | |||
| 415 | ret = 0; | ||
| 416 | if (flags & SYNC_FILE_RANGE_WAIT_BEFORE) { | ||
| 417 | ret = filemap_fdatawait_range(mapping, offset, endbyte); | ||
| 418 | if (ret < 0) | ||
| 419 | goto out_put; | ||
| 420 | } | ||
| 421 | |||
| 422 | if (flags & SYNC_FILE_RANGE_WRITE) { | ||
| 423 | ret = filemap_fdatawrite_range(mapping, offset, endbyte); | ||
| 424 | if (ret < 0) | ||
| 425 | goto out_put; | ||
| 426 | } | ||
| 427 | |||
| 428 | if (flags & SYNC_FILE_RANGE_WAIT_AFTER) | ||
| 429 | ret = filemap_fdatawait_range(mapping, offset, endbyte); | ||
| 430 | |||
| 409 | out_put: | 431 | out_put: |
| 410 | fput_light(file, fput_needed); | 432 | fput_light(file, fput_needed); |
| 411 | out: | 433 | out: |
| @@ -437,38 +459,3 @@ asmlinkage long SyS_sync_file_range2(long fd, long flags, | |||
| 437 | } | 459 | } |
| 438 | SYSCALL_ALIAS(sys_sync_file_range2, SyS_sync_file_range2); | 460 | SYSCALL_ALIAS(sys_sync_file_range2, SyS_sync_file_range2); |
| 439 | #endif | 461 | #endif |
| 440 | |||
| 441 | /* | ||
| 442 | * `endbyte' is inclusive | ||
| 443 | */ | ||
| 444 | int do_sync_mapping_range(struct address_space *mapping, loff_t offset, | ||
| 445 | loff_t endbyte, unsigned int flags) | ||
| 446 | { | ||
| 447 | int ret; | ||
| 448 | |||
| 449 | if (!mapping) { | ||
| 450 | ret = -EINVAL; | ||
| 451 | goto out; | ||
| 452 | } | ||
| 453 | |||
| 454 | ret = 0; | ||
| 455 | if (flags & SYNC_FILE_RANGE_WAIT_BEFORE) { | ||
| 456 | ret = filemap_fdatawait_range(mapping, offset, endbyte); | ||
| 457 | if (ret < 0) | ||
| 458 | goto out; | ||
| 459 | } | ||
| 460 | |||
| 461 | if (flags & SYNC_FILE_RANGE_WRITE) { | ||
| 462 | ret = __filemap_fdatawrite_range(mapping, offset, endbyte, | ||
| 463 | WB_SYNC_ALL); | ||
| 464 | if (ret < 0) | ||
| 465 | goto out; | ||
| 466 | } | ||
| 467 | |||
| 468 | if (flags & SYNC_FILE_RANGE_WAIT_AFTER) { | ||
| 469 | ret = filemap_fdatawait_range(mapping, offset, endbyte); | ||
| 470 | } | ||
| 471 | out: | ||
| 472 | return ret; | ||
| 473 | } | ||
| 474 | EXPORT_SYMBOL_GPL(do_sync_mapping_range); | ||
diff --git a/fs/sysfs/bin.c b/fs/sysfs/bin.c index 60c702bc10ae..a0a500af24a1 100644 --- a/fs/sysfs/bin.c +++ b/fs/sysfs/bin.c | |||
| @@ -483,7 +483,8 @@ void unmap_bin_file(struct sysfs_dirent *attr_sd) | |||
| 483 | * @attr: attribute descriptor. | 483 | * @attr: attribute descriptor. |
| 484 | */ | 484 | */ |
| 485 | 485 | ||
| 486 | int sysfs_create_bin_file(struct kobject * kobj, struct bin_attribute * attr) | 486 | int sysfs_create_bin_file(struct kobject *kobj, |
| 487 | const struct bin_attribute *attr) | ||
| 487 | { | 488 | { |
| 488 | BUG_ON(!kobj || !kobj->sd || !attr); | 489 | BUG_ON(!kobj || !kobj->sd || !attr); |
| 489 | 490 | ||
| @@ -497,7 +498,8 @@ int sysfs_create_bin_file(struct kobject * kobj, struct bin_attribute * attr) | |||
| 497 | * @attr: attribute descriptor. | 498 | * @attr: attribute descriptor. |
| 498 | */ | 499 | */ |
| 499 | 500 | ||
| 500 | void sysfs_remove_bin_file(struct kobject * kobj, struct bin_attribute * attr) | 501 | void sysfs_remove_bin_file(struct kobject *kobj, |
| 502 | const struct bin_attribute *attr) | ||
| 501 | { | 503 | { |
| 502 | sysfs_hash_and_remove(kobj->sd, attr->attr.name); | 504 | sysfs_hash_and_remove(kobj->sd, attr->attr.name); |
| 503 | } | 505 | } |
diff --git a/fs/timerfd.c b/fs/timerfd.c index b042bd7034b1..1bfc95ad5f71 100644 --- a/fs/timerfd.c +++ b/fs/timerfd.c | |||
| @@ -200,7 +200,7 @@ SYSCALL_DEFINE2(timerfd_create, int, clockid, int, flags) | |||
| 200 | hrtimer_init(&ctx->tmr, clockid, HRTIMER_MODE_ABS); | 200 | hrtimer_init(&ctx->tmr, clockid, HRTIMER_MODE_ABS); |
| 201 | 201 | ||
| 202 | ufd = anon_inode_getfd("[timerfd]", &timerfd_fops, ctx, | 202 | ufd = anon_inode_getfd("[timerfd]", &timerfd_fops, ctx, |
| 203 | flags & TFD_SHARED_FCNTL_FLAGS); | 203 | O_RDWR | (flags & TFD_SHARED_FCNTL_FLAGS)); |
| 204 | if (ufd < 0) | 204 | if (ufd < 0) |
| 205 | kfree(ctx); | 205 | kfree(ctx); |
| 206 | 206 | ||
diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c index 39849f887e72..16a6444330ec 100644 --- a/fs/ubifs/file.c +++ b/fs/ubifs/file.c | |||
| @@ -45,7 +45,7 @@ | |||
| 45 | * | 45 | * |
| 46 | * Similarly, @i_mutex is not always locked in 'ubifs_readpage()', e.g., the | 46 | * Similarly, @i_mutex is not always locked in 'ubifs_readpage()', e.g., the |
| 47 | * read-ahead path does not lock it ("sys_read -> generic_file_aio_read -> | 47 | * read-ahead path does not lock it ("sys_read -> generic_file_aio_read -> |
| 48 | * ondemand_readahead -> readpage"). In case of readahead, @I_LOCK flag is not | 48 | * ondemand_readahead -> readpage"). In case of readahead, @I_SYNC flag is not |
| 49 | * set as well. However, UBIFS disables readahead. | 49 | * set as well. However, UBIFS disables readahead. |
| 50 | */ | 50 | */ |
| 51 | 51 | ||
diff --git a/fs/xfs/linux-2.6/xfs_iops.c b/fs/xfs/linux-2.6/xfs_iops.c index 1d5b298ba8b2..225946012d0b 100644 --- a/fs/xfs/linux-2.6/xfs_iops.c +++ b/fs/xfs/linux-2.6/xfs_iops.c | |||
| @@ -794,7 +794,7 @@ xfs_setup_inode( | |||
| 794 | struct inode *inode = &ip->i_vnode; | 794 | struct inode *inode = &ip->i_vnode; |
| 795 | 795 | ||
| 796 | inode->i_ino = ip->i_ino; | 796 | inode->i_ino = ip->i_ino; |
| 797 | inode->i_state = I_NEW|I_LOCK; | 797 | inode->i_state = I_NEW; |
| 798 | inode_add_to_lists(ip->i_mount->m_super, inode); | 798 | inode_add_to_lists(ip->i_mount->m_super, inode); |
| 799 | 799 | ||
| 800 | inode->i_mode = ip->i_d.di_mode; | 800 | inode->i_mode = ip->i_d.di_mode; |
diff --git a/fs/xfs/xfs_iget.c b/fs/xfs/xfs_iget.c index 0de36c2a46f1..fa402a6bbbcf 100644 --- a/fs/xfs/xfs_iget.c +++ b/fs/xfs/xfs_iget.c | |||
| @@ -91,7 +91,7 @@ xfs_inode_alloc( | |||
| 91 | ip->i_new_size = 0; | 91 | ip->i_new_size = 0; |
| 92 | 92 | ||
| 93 | /* prevent anyone from using this yet */ | 93 | /* prevent anyone from using this yet */ |
| 94 | VFS_I(ip)->i_state = I_NEW|I_LOCK; | 94 | VFS_I(ip)->i_state = I_NEW; |
| 95 | 95 | ||
| 96 | return ip; | 96 | return ip; |
| 97 | } | 97 | } |
| @@ -217,7 +217,7 @@ xfs_iget_cache_hit( | |||
| 217 | trace_xfs_iget_reclaim(ip); | 217 | trace_xfs_iget_reclaim(ip); |
| 218 | goto out_error; | 218 | goto out_error; |
| 219 | } | 219 | } |
| 220 | inode->i_state = I_LOCK|I_NEW; | 220 | inode->i_state = I_NEW; |
| 221 | } else { | 221 | } else { |
| 222 | /* If the VFS inode is being torn down, pause and try again. */ | 222 | /* If the VFS inode is being torn down, pause and try again. */ |
| 223 | if (!igrab(inode)) { | 223 | if (!igrab(inode)) { |
