diff options
| author | Ingo Molnar <mingo@elte.hu> | 2008-08-14 06:19:59 -0400 |
|---|---|---|
| committer | Ingo Molnar <mingo@elte.hu> | 2008-08-14 06:19:59 -0400 |
| commit | 8d7ccaa545490cdffdfaff0842436a8dd85cf47b (patch) | |
| tree | 8129b5907161bc6ae26deb3645ce1e280c5e1f51 /fs/hugetlbfs/inode.c | |
| parent | b2139aa0eec330c711c5a279db361e5ef1178e78 (diff) | |
| parent | 30a2f3c60a84092c8084dfe788b710f8d0768cd4 (diff) | |
Merge commit 'v2.6.27-rc3' into x86/prototypes
Conflicts:
include/asm-x86/dma-mapping.h
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'fs/hugetlbfs/inode.c')
| -rw-r--r-- | fs/hugetlbfs/inode.c | 103 |
1 files changed, 69 insertions, 34 deletions
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index aeabf80f81a5..3f58923fb39b 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c | |||
| @@ -53,6 +53,7 @@ int sysctl_hugetlb_shm_group; | |||
| 53 | enum { | 53 | enum { |
| 54 | Opt_size, Opt_nr_inodes, | 54 | Opt_size, Opt_nr_inodes, |
| 55 | Opt_mode, Opt_uid, Opt_gid, | 55 | Opt_mode, Opt_uid, Opt_gid, |
| 56 | Opt_pagesize, | ||
| 56 | Opt_err, | 57 | Opt_err, |
| 57 | }; | 58 | }; |
| 58 | 59 | ||
| @@ -62,6 +63,7 @@ static match_table_t tokens = { | |||
| 62 | {Opt_mode, "mode=%o"}, | 63 | {Opt_mode, "mode=%o"}, |
| 63 | {Opt_uid, "uid=%u"}, | 64 | {Opt_uid, "uid=%u"}, |
| 64 | {Opt_gid, "gid=%u"}, | 65 | {Opt_gid, "gid=%u"}, |
| 66 | {Opt_pagesize, "pagesize=%s"}, | ||
| 65 | {Opt_err, NULL}, | 67 | {Opt_err, NULL}, |
| 66 | }; | 68 | }; |
| 67 | 69 | ||
| @@ -80,6 +82,7 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma) | |||
| 80 | struct inode *inode = file->f_path.dentry->d_inode; | 82 | struct inode *inode = file->f_path.dentry->d_inode; |
| 81 | loff_t len, vma_len; | 83 | loff_t len, vma_len; |
| 82 | int ret; | 84 | int ret; |
| 85 | struct hstate *h = hstate_file(file); | ||
| 83 | 86 | ||
| 84 | /* | 87 | /* |
| 85 | * vma address alignment (but not the pgoff alignment) has | 88 | * vma address alignment (but not the pgoff alignment) has |
| @@ -92,7 +95,7 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma) | |||
| 92 | vma->vm_flags |= VM_HUGETLB | VM_RESERVED; | 95 | vma->vm_flags |= VM_HUGETLB | VM_RESERVED; |
| 93 | vma->vm_ops = &hugetlb_vm_ops; | 96 | vma->vm_ops = &hugetlb_vm_ops; |
| 94 | 97 | ||
| 95 | if (vma->vm_pgoff & ~(HPAGE_MASK >> PAGE_SHIFT)) | 98 | if (vma->vm_pgoff & ~(huge_page_mask(h) >> PAGE_SHIFT)) |
| 96 | return -EINVAL; | 99 | return -EINVAL; |
| 97 | 100 | ||
| 98 | vma_len = (loff_t)(vma->vm_end - vma->vm_start); | 101 | vma_len = (loff_t)(vma->vm_end - vma->vm_start); |
| @@ -103,9 +106,9 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma) | |||
| 103 | ret = -ENOMEM; | 106 | ret = -ENOMEM; |
| 104 | len = vma_len + ((loff_t)vma->vm_pgoff << PAGE_SHIFT); | 107 | len = vma_len + ((loff_t)vma->vm_pgoff << PAGE_SHIFT); |
| 105 | 108 | ||
| 106 | if (vma->vm_flags & VM_MAYSHARE && | 109 | if (hugetlb_reserve_pages(inode, |
| 107 | hugetlb_reserve_pages(inode, vma->vm_pgoff >> (HPAGE_SHIFT-PAGE_SHIFT), | 110 | vma->vm_pgoff >> huge_page_order(h), |
| 108 | len >> HPAGE_SHIFT)) | 111 | len >> huge_page_shift(h), vma)) |
| 109 | goto out; | 112 | goto out; |
| 110 | 113 | ||
| 111 | ret = 0; | 114 | ret = 0; |
| @@ -130,20 +133,21 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr, | |||
| 130 | struct mm_struct *mm = current->mm; | 133 | struct mm_struct *mm = current->mm; |
| 131 | struct vm_area_struct *vma; | 134 | struct vm_area_struct *vma; |
| 132 | unsigned long start_addr; | 135 | unsigned long start_addr; |
| 136 | struct hstate *h = hstate_file(file); | ||
| 133 | 137 | ||
| 134 | if (len & ~HPAGE_MASK) | 138 | if (len & ~huge_page_mask(h)) |
| 135 | return -EINVAL; | 139 | return -EINVAL; |
| 136 | if (len > TASK_SIZE) | 140 | if (len > TASK_SIZE) |
| 137 | return -ENOMEM; | 141 | return -ENOMEM; |
| 138 | 142 | ||
| 139 | if (flags & MAP_FIXED) { | 143 | if (flags & MAP_FIXED) { |
| 140 | if (prepare_hugepage_range(addr, len)) | 144 | if (prepare_hugepage_range(file, addr, len)) |
| 141 | return -EINVAL; | 145 | return -EINVAL; |
| 142 | return addr; | 146 | return addr; |
| 143 | } | 147 | } |
| 144 | 148 | ||
| 145 | if (addr) { | 149 | if (addr) { |
| 146 | addr = ALIGN(addr, HPAGE_SIZE); | 150 | addr = ALIGN(addr, huge_page_size(h)); |
| 147 | vma = find_vma(mm, addr); | 151 | vma = find_vma(mm, addr); |
| 148 | if (TASK_SIZE - len >= addr && | 152 | if (TASK_SIZE - len >= addr && |
| 149 | (!vma || addr + len <= vma->vm_start)) | 153 | (!vma || addr + len <= vma->vm_start)) |
| @@ -156,7 +160,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr, | |||
| 156 | start_addr = TASK_UNMAPPED_BASE; | 160 | start_addr = TASK_UNMAPPED_BASE; |
| 157 | 161 | ||
| 158 | full_search: | 162 | full_search: |
| 159 | addr = ALIGN(start_addr, HPAGE_SIZE); | 163 | addr = ALIGN(start_addr, huge_page_size(h)); |
| 160 | 164 | ||
| 161 | for (vma = find_vma(mm, addr); ; vma = vma->vm_next) { | 165 | for (vma = find_vma(mm, addr); ; vma = vma->vm_next) { |
| 162 | /* At this point: (!vma || addr < vma->vm_end). */ | 166 | /* At this point: (!vma || addr < vma->vm_end). */ |
| @@ -174,7 +178,7 @@ full_search: | |||
| 174 | 178 | ||
| 175 | if (!vma || addr + len <= vma->vm_start) | 179 | if (!vma || addr + len <= vma->vm_start) |
| 176 | return addr; | 180 | return addr; |
| 177 | addr = ALIGN(vma->vm_end, HPAGE_SIZE); | 181 | addr = ALIGN(vma->vm_end, huge_page_size(h)); |
| 178 | } | 182 | } |
| 179 | } | 183 | } |
| 180 | #endif | 184 | #endif |
| @@ -225,10 +229,11 @@ hugetlbfs_read_actor(struct page *page, unsigned long offset, | |||
| 225 | static ssize_t hugetlbfs_read(struct file *filp, char __user *buf, | 229 | static ssize_t hugetlbfs_read(struct file *filp, char __user *buf, |
| 226 | size_t len, loff_t *ppos) | 230 | size_t len, loff_t *ppos) |
| 227 | { | 231 | { |
| 232 | struct hstate *h = hstate_file(filp); | ||
| 228 | struct address_space *mapping = filp->f_mapping; | 233 | struct address_space *mapping = filp->f_mapping; |
| 229 | struct inode *inode = mapping->host; | 234 | struct inode *inode = mapping->host; |
| 230 | unsigned long index = *ppos >> HPAGE_SHIFT; | 235 | unsigned long index = *ppos >> huge_page_shift(h); |
| 231 | unsigned long offset = *ppos & ~HPAGE_MASK; | 236 | unsigned long offset = *ppos & ~huge_page_mask(h); |
| 232 | unsigned long end_index; | 237 | unsigned long end_index; |
| 233 | loff_t isize; | 238 | loff_t isize; |
| 234 | ssize_t retval = 0; | 239 | ssize_t retval = 0; |
| @@ -243,17 +248,17 @@ static ssize_t hugetlbfs_read(struct file *filp, char __user *buf, | |||
| 243 | if (!isize) | 248 | if (!isize) |
| 244 | goto out; | 249 | goto out; |
| 245 | 250 | ||
| 246 | end_index = (isize - 1) >> HPAGE_SHIFT; | 251 | end_index = (isize - 1) >> huge_page_shift(h); |
| 247 | for (;;) { | 252 | for (;;) { |
| 248 | struct page *page; | 253 | struct page *page; |
| 249 | int nr, ret; | 254 | unsigned long nr, ret; |
| 250 | 255 | ||
| 251 | /* nr is the maximum number of bytes to copy from this page */ | 256 | /* nr is the maximum number of bytes to copy from this page */ |
| 252 | nr = HPAGE_SIZE; | 257 | nr = huge_page_size(h); |
| 253 | if (index >= end_index) { | 258 | if (index >= end_index) { |
| 254 | if (index > end_index) | 259 | if (index > end_index) |
| 255 | goto out; | 260 | goto out; |
| 256 | nr = ((isize - 1) & ~HPAGE_MASK) + 1; | 261 | nr = ((isize - 1) & ~huge_page_mask(h)) + 1; |
| 257 | if (nr <= offset) { | 262 | if (nr <= offset) { |
| 258 | goto out; | 263 | goto out; |
| 259 | } | 264 | } |
| @@ -287,8 +292,8 @@ static ssize_t hugetlbfs_read(struct file *filp, char __user *buf, | |||
| 287 | offset += ret; | 292 | offset += ret; |
| 288 | retval += ret; | 293 | retval += ret; |
| 289 | len -= ret; | 294 | len -= ret; |
| 290 | index += offset >> HPAGE_SHIFT; | 295 | index += offset >> huge_page_shift(h); |
| 291 | offset &= ~HPAGE_MASK; | 296 | offset &= ~huge_page_mask(h); |
| 292 | 297 | ||
| 293 | if (page) | 298 | if (page) |
| 294 | page_cache_release(page); | 299 | page_cache_release(page); |
| @@ -298,7 +303,7 @@ static ssize_t hugetlbfs_read(struct file *filp, char __user *buf, | |||
| 298 | break; | 303 | break; |
| 299 | } | 304 | } |
| 300 | out: | 305 | out: |
| 301 | *ppos = ((loff_t)index << HPAGE_SHIFT) + offset; | 306 | *ppos = ((loff_t)index << huge_page_shift(h)) + offset; |
| 302 | mutex_unlock(&inode->i_mutex); | 307 | mutex_unlock(&inode->i_mutex); |
| 303 | return retval; | 308 | return retval; |
| 304 | } | 309 | } |
| @@ -339,8 +344,9 @@ static void truncate_huge_page(struct page *page) | |||
| 339 | 344 | ||
| 340 | static void truncate_hugepages(struct inode *inode, loff_t lstart) | 345 | static void truncate_hugepages(struct inode *inode, loff_t lstart) |
| 341 | { | 346 | { |
| 347 | struct hstate *h = hstate_inode(inode); | ||
| 342 | struct address_space *mapping = &inode->i_data; | 348 | struct address_space *mapping = &inode->i_data; |
| 343 | const pgoff_t start = lstart >> HPAGE_SHIFT; | 349 | const pgoff_t start = lstart >> huge_page_shift(h); |
| 344 | struct pagevec pvec; | 350 | struct pagevec pvec; |
| 345 | pgoff_t next; | 351 | pgoff_t next; |
| 346 | int i, freed = 0; | 352 | int i, freed = 0; |
| @@ -441,7 +447,7 @@ hugetlb_vmtruncate_list(struct prio_tree_root *root, pgoff_t pgoff) | |||
| 441 | v_offset = 0; | 447 | v_offset = 0; |
| 442 | 448 | ||
| 443 | __unmap_hugepage_range(vma, | 449 | __unmap_hugepage_range(vma, |
| 444 | vma->vm_start + v_offset, vma->vm_end); | 450 | vma->vm_start + v_offset, vma->vm_end, NULL); |
| 445 | } | 451 | } |
| 446 | } | 452 | } |
| 447 | 453 | ||
| @@ -449,8 +455,9 @@ static int hugetlb_vmtruncate(struct inode *inode, loff_t offset) | |||
| 449 | { | 455 | { |
| 450 | pgoff_t pgoff; | 456 | pgoff_t pgoff; |
| 451 | struct address_space *mapping = inode->i_mapping; | 457 | struct address_space *mapping = inode->i_mapping; |
| 458 | struct hstate *h = hstate_inode(inode); | ||
| 452 | 459 | ||
| 453 | BUG_ON(offset & ~HPAGE_MASK); | 460 | BUG_ON(offset & ~huge_page_mask(h)); |
| 454 | pgoff = offset >> PAGE_SHIFT; | 461 | pgoff = offset >> PAGE_SHIFT; |
| 455 | 462 | ||
| 456 | i_size_write(inode, offset); | 463 | i_size_write(inode, offset); |
| @@ -465,6 +472,7 @@ static int hugetlb_vmtruncate(struct inode *inode, loff_t offset) | |||
| 465 | static int hugetlbfs_setattr(struct dentry *dentry, struct iattr *attr) | 472 | static int hugetlbfs_setattr(struct dentry *dentry, struct iattr *attr) |
| 466 | { | 473 | { |
| 467 | struct inode *inode = dentry->d_inode; | 474 | struct inode *inode = dentry->d_inode; |
| 475 | struct hstate *h = hstate_inode(inode); | ||
| 468 | int error; | 476 | int error; |
| 469 | unsigned int ia_valid = attr->ia_valid; | 477 | unsigned int ia_valid = attr->ia_valid; |
| 470 | 478 | ||
| @@ -476,7 +484,7 @@ static int hugetlbfs_setattr(struct dentry *dentry, struct iattr *attr) | |||
| 476 | 484 | ||
| 477 | if (ia_valid & ATTR_SIZE) { | 485 | if (ia_valid & ATTR_SIZE) { |
| 478 | error = -EINVAL; | 486 | error = -EINVAL; |
| 479 | if (!(attr->ia_size & ~HPAGE_MASK)) | 487 | if (!(attr->ia_size & ~huge_page_mask(h))) |
| 480 | error = hugetlb_vmtruncate(inode, attr->ia_size); | 488 | error = hugetlb_vmtruncate(inode, attr->ia_size); |
| 481 | if (error) | 489 | if (error) |
| 482 | goto out; | 490 | goto out; |
| @@ -610,9 +618,10 @@ static int hugetlbfs_set_page_dirty(struct page *page) | |||
| 610 | static int hugetlbfs_statfs(struct dentry *dentry, struct kstatfs *buf) | 618 | static int hugetlbfs_statfs(struct dentry *dentry, struct kstatfs *buf) |
| 611 | { | 619 | { |
| 612 | struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(dentry->d_sb); | 620 | struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(dentry->d_sb); |
| 621 | struct hstate *h = hstate_inode(dentry->d_inode); | ||
| 613 | 622 | ||
| 614 | buf->f_type = HUGETLBFS_MAGIC; | 623 | buf->f_type = HUGETLBFS_MAGIC; |
| 615 | buf->f_bsize = HPAGE_SIZE; | 624 | buf->f_bsize = huge_page_size(h); |
| 616 | if (sbinfo) { | 625 | if (sbinfo) { |
| 617 | spin_lock(&sbinfo->stat_lock); | 626 | spin_lock(&sbinfo->stat_lock); |
| 618 | /* If no limits set, just report 0 for max/free/used | 627 | /* If no limits set, just report 0 for max/free/used |
| @@ -696,7 +705,7 @@ static const struct address_space_operations hugetlbfs_aops = { | |||
| 696 | }; | 705 | }; |
| 697 | 706 | ||
| 698 | 707 | ||
| 699 | static void init_once(struct kmem_cache *cachep, void *foo) | 708 | static void init_once(void *foo) |
| 700 | { | 709 | { |
| 701 | struct hugetlbfs_inode_info *ei = (struct hugetlbfs_inode_info *)foo; | 710 | struct hugetlbfs_inode_info *ei = (struct hugetlbfs_inode_info *)foo; |
| 702 | 711 | ||
| @@ -743,6 +752,8 @@ hugetlbfs_parse_options(char *options, struct hugetlbfs_config *pconfig) | |||
| 743 | char *p, *rest; | 752 | char *p, *rest; |
| 744 | substring_t args[MAX_OPT_ARGS]; | 753 | substring_t args[MAX_OPT_ARGS]; |
| 745 | int option; | 754 | int option; |
| 755 | unsigned long long size = 0; | ||
| 756 | enum { NO_SIZE, SIZE_STD, SIZE_PERCENT } setsize = NO_SIZE; | ||
| 746 | 757 | ||
| 747 | if (!options) | 758 | if (!options) |
| 748 | return 0; | 759 | return 0; |
| @@ -773,17 +784,13 @@ hugetlbfs_parse_options(char *options, struct hugetlbfs_config *pconfig) | |||
| 773 | break; | 784 | break; |
| 774 | 785 | ||
| 775 | case Opt_size: { | 786 | case Opt_size: { |
| 776 | unsigned long long size; | ||
| 777 | /* memparse() will accept a K/M/G without a digit */ | 787 | /* memparse() will accept a K/M/G without a digit */ |
| 778 | if (!isdigit(*args[0].from)) | 788 | if (!isdigit(*args[0].from)) |
| 779 | goto bad_val; | 789 | goto bad_val; |
| 780 | size = memparse(args[0].from, &rest); | 790 | size = memparse(args[0].from, &rest); |
| 781 | if (*rest == '%') { | 791 | setsize = SIZE_STD; |
| 782 | size <<= HPAGE_SHIFT; | 792 | if (*rest == '%') |
| 783 | size *= max_huge_pages; | 793 | setsize = SIZE_PERCENT; |
| 784 | do_div(size, 100); | ||
| 785 | } | ||
| 786 | pconfig->nr_blocks = (size >> HPAGE_SHIFT); | ||
| 787 | break; | 794 | break; |
| 788 | } | 795 | } |
| 789 | 796 | ||
| @@ -794,6 +801,19 @@ hugetlbfs_parse_options(char *options, struct hugetlbfs_config *pconfig) | |||
| 794 | pconfig->nr_inodes = memparse(args[0].from, &rest); | 801 | pconfig->nr_inodes = memparse(args[0].from, &rest); |
| 795 | break; | 802 | break; |
| 796 | 803 | ||
| 804 | case Opt_pagesize: { | ||
| 805 | unsigned long ps; | ||
| 806 | ps = memparse(args[0].from, &rest); | ||
| 807 | pconfig->hstate = size_to_hstate(ps); | ||
| 808 | if (!pconfig->hstate) { | ||
| 809 | printk(KERN_ERR | ||
| 810 | "hugetlbfs: Unsupported page size %lu MB\n", | ||
| 811 | ps >> 20); | ||
| 812 | return -EINVAL; | ||
| 813 | } | ||
| 814 | break; | ||
| 815 | } | ||
| 816 | |||
| 797 | default: | 817 | default: |
| 798 | printk(KERN_ERR "hugetlbfs: Bad mount option: \"%s\"\n", | 818 | printk(KERN_ERR "hugetlbfs: Bad mount option: \"%s\"\n", |
| 799 | p); | 819 | p); |
| @@ -801,6 +821,18 @@ hugetlbfs_parse_options(char *options, struct hugetlbfs_config *pconfig) | |||
| 801 | break; | 821 | break; |
| 802 | } | 822 | } |
| 803 | } | 823 | } |
| 824 | |||
| 825 | /* Do size after hstate is set up */ | ||
| 826 | if (setsize > NO_SIZE) { | ||
| 827 | struct hstate *h = pconfig->hstate; | ||
| 828 | if (setsize == SIZE_PERCENT) { | ||
| 829 | size <<= huge_page_shift(h); | ||
| 830 | size *= h->max_huge_pages; | ||
| 831 | do_div(size, 100); | ||
| 832 | } | ||
| 833 | pconfig->nr_blocks = (size >> huge_page_shift(h)); | ||
| 834 | } | ||
| 835 | |||
| 804 | return 0; | 836 | return 0; |
| 805 | 837 | ||
| 806 | bad_val: | 838 | bad_val: |
| @@ -825,6 +857,7 @@ hugetlbfs_fill_super(struct super_block *sb, void *data, int silent) | |||
| 825 | config.uid = current->fsuid; | 857 | config.uid = current->fsuid; |
| 826 | config.gid = current->fsgid; | 858 | config.gid = current->fsgid; |
| 827 | config.mode = 0755; | 859 | config.mode = 0755; |
| 860 | config.hstate = &default_hstate; | ||
| 828 | ret = hugetlbfs_parse_options(data, &config); | 861 | ret = hugetlbfs_parse_options(data, &config); |
| 829 | if (ret) | 862 | if (ret) |
| 830 | return ret; | 863 | return ret; |
| @@ -833,14 +866,15 @@ hugetlbfs_fill_super(struct super_block *sb, void *data, int silent) | |||
| 833 | if (!sbinfo) | 866 | if (!sbinfo) |
| 834 | return -ENOMEM; | 867 | return -ENOMEM; |
| 835 | sb->s_fs_info = sbinfo; | 868 | sb->s_fs_info = sbinfo; |
| 869 | sbinfo->hstate = config.hstate; | ||
| 836 | spin_lock_init(&sbinfo->stat_lock); | 870 | spin_lock_init(&sbinfo->stat_lock); |
| 837 | sbinfo->max_blocks = config.nr_blocks; | 871 | sbinfo->max_blocks = config.nr_blocks; |
| 838 | sbinfo->free_blocks = config.nr_blocks; | 872 | sbinfo->free_blocks = config.nr_blocks; |
| 839 | sbinfo->max_inodes = config.nr_inodes; | 873 | sbinfo->max_inodes = config.nr_inodes; |
| 840 | sbinfo->free_inodes = config.nr_inodes; | 874 | sbinfo->free_inodes = config.nr_inodes; |
| 841 | sb->s_maxbytes = MAX_LFS_FILESIZE; | 875 | sb->s_maxbytes = MAX_LFS_FILESIZE; |
| 842 | sb->s_blocksize = HPAGE_SIZE; | 876 | sb->s_blocksize = huge_page_size(config.hstate); |
| 843 | sb->s_blocksize_bits = HPAGE_SHIFT; | 877 | sb->s_blocksize_bits = huge_page_shift(config.hstate); |
| 844 | sb->s_magic = HUGETLBFS_MAGIC; | 878 | sb->s_magic = HUGETLBFS_MAGIC; |
| 845 | sb->s_op = &hugetlbfs_ops; | 879 | sb->s_op = &hugetlbfs_ops; |
| 846 | sb->s_time_gran = 1; | 880 | sb->s_time_gran = 1; |
| @@ -942,7 +976,8 @@ struct file *hugetlb_file_setup(const char *name, size_t size) | |||
| 942 | goto out_dentry; | 976 | goto out_dentry; |
| 943 | 977 | ||
| 944 | error = -ENOMEM; | 978 | error = -ENOMEM; |
| 945 | if (hugetlb_reserve_pages(inode, 0, size >> HPAGE_SHIFT)) | 979 | if (hugetlb_reserve_pages(inode, 0, |
| 980 | size >> huge_page_shift(hstate_inode(inode)), NULL)) | ||
| 946 | goto out_inode; | 981 | goto out_inode; |
| 947 | 982 | ||
| 948 | d_instantiate(dentry, inode); | 983 | d_instantiate(dentry, inode); |
