diff options
Diffstat (limited to 'mm')
-rw-r--r-- | mm/filemap.c | 1 | ||||
-rw-r--r-- | mm/mmap.c | 6 | ||||
-rw-r--r-- | mm/shmem.c | 45 | ||||
-rw-r--r-- | mm/slub.c | 33 | ||||
-rw-r--r-- | mm/swap.c | 1 | ||||
-rw-r--r-- | mm/truncate.c | 10 |
6 files changed, 46 insertions, 50 deletions
diff --git a/mm/filemap.c b/mm/filemap.c index 45a2d18df849..20e5642e9f9f 100644 --- a/mm/filemap.c +++ b/mm/filemap.c | |||
@@ -151,6 +151,7 @@ void remove_from_page_cache(struct page *page) | |||
151 | spin_unlock_irq(&mapping->tree_lock); | 151 | spin_unlock_irq(&mapping->tree_lock); |
152 | mem_cgroup_uncharge_cache_page(page); | 152 | mem_cgroup_uncharge_cache_page(page); |
153 | } | 153 | } |
154 | EXPORT_SYMBOL(remove_from_page_cache); | ||
154 | 155 | ||
155 | static int sync_page(void *word) | 156 | static int sync_page(void *word) |
156 | { | 157 | { |
@@ -1734,8 +1734,10 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address) | |||
1734 | grow = (address - vma->vm_end) >> PAGE_SHIFT; | 1734 | grow = (address - vma->vm_end) >> PAGE_SHIFT; |
1735 | 1735 | ||
1736 | error = acct_stack_growth(vma, size, grow); | 1736 | error = acct_stack_growth(vma, size, grow); |
1737 | if (!error) | 1737 | if (!error) { |
1738 | vma->vm_end = address; | 1738 | vma->vm_end = address; |
1739 | perf_event_mmap(vma); | ||
1740 | } | ||
1739 | } | 1741 | } |
1740 | anon_vma_unlock(vma); | 1742 | anon_vma_unlock(vma); |
1741 | return error; | 1743 | return error; |
@@ -1781,6 +1783,7 @@ static int expand_downwards(struct vm_area_struct *vma, | |||
1781 | if (!error) { | 1783 | if (!error) { |
1782 | vma->vm_start = address; | 1784 | vma->vm_start = address; |
1783 | vma->vm_pgoff -= grow; | 1785 | vma->vm_pgoff -= grow; |
1786 | perf_event_mmap(vma); | ||
1784 | } | 1787 | } |
1785 | } | 1788 | } |
1786 | anon_vma_unlock(vma); | 1789 | anon_vma_unlock(vma); |
@@ -2208,6 +2211,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len) | |||
2208 | vma->vm_page_prot = vm_get_page_prot(flags); | 2211 | vma->vm_page_prot = vm_get_page_prot(flags); |
2209 | vma_link(mm, vma, prev, rb_link, rb_parent); | 2212 | vma_link(mm, vma, prev, rb_link, rb_parent); |
2210 | out: | 2213 | out: |
2214 | perf_event_mmap(vma); | ||
2211 | mm->total_vm += len >> PAGE_SHIFT; | 2215 | mm->total_vm += len >> PAGE_SHIFT; |
2212 | if (flags & VM_LOCKED) { | 2216 | if (flags & VM_LOCKED) { |
2213 | if (!mlock_vma_pages_range(vma, addr, addr + len)) | 2217 | if (!mlock_vma_pages_range(vma, addr, addr + len)) |
diff --git a/mm/shmem.c b/mm/shmem.c index 855eaf5b8d5b..7e5030ae18ff 100644 --- a/mm/shmem.c +++ b/mm/shmem.c | |||
@@ -727,10 +727,11 @@ done2: | |||
727 | if (inode->i_mapping->nrpages && (info->flags & SHMEM_PAGEIN)) { | 727 | if (inode->i_mapping->nrpages && (info->flags & SHMEM_PAGEIN)) { |
728 | /* | 728 | /* |
729 | * Call truncate_inode_pages again: racing shmem_unuse_inode | 729 | * Call truncate_inode_pages again: racing shmem_unuse_inode |
730 | * may have swizzled a page in from swap since vmtruncate or | 730 | * may have swizzled a page in from swap since |
731 | * generic_delete_inode did it, before we lowered next_index. | 731 | * truncate_pagecache or generic_delete_inode did it, before we |
732 | * Also, though shmem_getpage checks i_size before adding to | 732 | * lowered next_index. Also, though shmem_getpage checks |
733 | * cache, no recheck after: so fix the narrow window there too. | 733 | * i_size before adding to cache, no recheck after: so fix the |
734 | * narrow window there too. | ||
734 | * | 735 | * |
735 | * Recalling truncate_inode_pages_range and unmap_mapping_range | 736 | * Recalling truncate_inode_pages_range and unmap_mapping_range |
736 | * every time for punch_hole (which never got a chance to clear | 737 | * every time for punch_hole (which never got a chance to clear |
@@ -760,19 +761,16 @@ done2: | |||
760 | } | 761 | } |
761 | } | 762 | } |
762 | 763 | ||
763 | static void shmem_truncate(struct inode *inode) | ||
764 | { | ||
765 | shmem_truncate_range(inode, inode->i_size, (loff_t)-1); | ||
766 | } | ||
767 | |||
768 | static int shmem_notify_change(struct dentry *dentry, struct iattr *attr) | 764 | static int shmem_notify_change(struct dentry *dentry, struct iattr *attr) |
769 | { | 765 | { |
770 | struct inode *inode = dentry->d_inode; | 766 | struct inode *inode = dentry->d_inode; |
771 | struct page *page = NULL; | ||
772 | int error; | 767 | int error; |
773 | 768 | ||
774 | if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) { | 769 | if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) { |
775 | if (attr->ia_size < inode->i_size) { | 770 | loff_t newsize = attr->ia_size; |
771 | struct page *page = NULL; | ||
772 | |||
773 | if (newsize < inode->i_size) { | ||
776 | /* | 774 | /* |
777 | * If truncating down to a partial page, then | 775 | * If truncating down to a partial page, then |
778 | * if that page is already allocated, hold it | 776 | * if that page is already allocated, hold it |
@@ -780,9 +778,9 @@ static int shmem_notify_change(struct dentry *dentry, struct iattr *attr) | |||
780 | * truncate_partial_page cannnot miss it were | 778 | * truncate_partial_page cannnot miss it were |
781 | * it assigned to swap. | 779 | * it assigned to swap. |
782 | */ | 780 | */ |
783 | if (attr->ia_size & (PAGE_CACHE_SIZE-1)) { | 781 | if (newsize & (PAGE_CACHE_SIZE-1)) { |
784 | (void) shmem_getpage(inode, | 782 | (void) shmem_getpage(inode, |
785 | attr->ia_size>>PAGE_CACHE_SHIFT, | 783 | newsize >> PAGE_CACHE_SHIFT, |
786 | &page, SGP_READ, NULL); | 784 | &page, SGP_READ, NULL); |
787 | if (page) | 785 | if (page) |
788 | unlock_page(page); | 786 | unlock_page(page); |
@@ -794,24 +792,29 @@ static int shmem_notify_change(struct dentry *dentry, struct iattr *attr) | |||
794 | * if it's being fully truncated to zero-length: the | 792 | * if it's being fully truncated to zero-length: the |
795 | * nrpages check is efficient enough in that case. | 793 | * nrpages check is efficient enough in that case. |
796 | */ | 794 | */ |
797 | if (attr->ia_size) { | 795 | if (newsize) { |
798 | struct shmem_inode_info *info = SHMEM_I(inode); | 796 | struct shmem_inode_info *info = SHMEM_I(inode); |
799 | spin_lock(&info->lock); | 797 | spin_lock(&info->lock); |
800 | info->flags &= ~SHMEM_PAGEIN; | 798 | info->flags &= ~SHMEM_PAGEIN; |
801 | spin_unlock(&info->lock); | 799 | spin_unlock(&info->lock); |
802 | } | 800 | } |
803 | } | 801 | } |
802 | |||
803 | error = simple_setsize(inode, newsize); | ||
804 | if (page) | ||
805 | page_cache_release(page); | ||
806 | if (error) | ||
807 | return error; | ||
808 | shmem_truncate_range(inode, newsize, (loff_t)-1); | ||
804 | } | 809 | } |
805 | 810 | ||
806 | error = inode_change_ok(inode, attr); | 811 | error = inode_change_ok(inode, attr); |
807 | if (!error) | 812 | if (!error) |
808 | error = inode_setattr(inode, attr); | 813 | generic_setattr(inode, attr); |
809 | #ifdef CONFIG_TMPFS_POSIX_ACL | 814 | #ifdef CONFIG_TMPFS_POSIX_ACL |
810 | if (!error && (attr->ia_valid & ATTR_MODE)) | 815 | if (!error && (attr->ia_valid & ATTR_MODE)) |
811 | error = generic_acl_chmod(inode); | 816 | error = generic_acl_chmod(inode); |
812 | #endif | 817 | #endif |
813 | if (page) | ||
814 | page_cache_release(page); | ||
815 | return error; | 818 | return error; |
816 | } | 819 | } |
817 | 820 | ||
@@ -819,11 +822,11 @@ static void shmem_delete_inode(struct inode *inode) | |||
819 | { | 822 | { |
820 | struct shmem_inode_info *info = SHMEM_I(inode); | 823 | struct shmem_inode_info *info = SHMEM_I(inode); |
821 | 824 | ||
822 | if (inode->i_op->truncate == shmem_truncate) { | 825 | if (inode->i_mapping->a_ops == &shmem_aops) { |
823 | truncate_inode_pages(inode->i_mapping, 0); | 826 | truncate_inode_pages(inode->i_mapping, 0); |
824 | shmem_unacct_size(info->flags, inode->i_size); | 827 | shmem_unacct_size(info->flags, inode->i_size); |
825 | inode->i_size = 0; | 828 | inode->i_size = 0; |
826 | shmem_truncate(inode); | 829 | shmem_truncate_range(inode, 0, (loff_t)-1); |
827 | if (!list_empty(&info->swaplist)) { | 830 | if (!list_empty(&info->swaplist)) { |
828 | mutex_lock(&shmem_swaplist_mutex); | 831 | mutex_lock(&shmem_swaplist_mutex); |
829 | list_del_init(&info->swaplist); | 832 | list_del_init(&info->swaplist); |
@@ -2022,7 +2025,6 @@ static const struct inode_operations shmem_symlink_inline_operations = { | |||
2022 | }; | 2025 | }; |
2023 | 2026 | ||
2024 | static const struct inode_operations shmem_symlink_inode_operations = { | 2027 | static const struct inode_operations shmem_symlink_inode_operations = { |
2025 | .truncate = shmem_truncate, | ||
2026 | .readlink = generic_readlink, | 2028 | .readlink = generic_readlink, |
2027 | .follow_link = shmem_follow_link, | 2029 | .follow_link = shmem_follow_link, |
2028 | .put_link = shmem_put_link, | 2030 | .put_link = shmem_put_link, |
@@ -2433,14 +2435,13 @@ static const struct file_operations shmem_file_operations = { | |||
2433 | .write = do_sync_write, | 2435 | .write = do_sync_write, |
2434 | .aio_read = shmem_file_aio_read, | 2436 | .aio_read = shmem_file_aio_read, |
2435 | .aio_write = generic_file_aio_write, | 2437 | .aio_write = generic_file_aio_write, |
2436 | .fsync = simple_sync_file, | 2438 | .fsync = noop_fsync, |
2437 | .splice_read = generic_file_splice_read, | 2439 | .splice_read = generic_file_splice_read, |
2438 | .splice_write = generic_file_splice_write, | 2440 | .splice_write = generic_file_splice_write, |
2439 | #endif | 2441 | #endif |
2440 | }; | 2442 | }; |
2441 | 2443 | ||
2442 | static const struct inode_operations shmem_inode_operations = { | 2444 | static const struct inode_operations shmem_inode_operations = { |
2443 | .truncate = shmem_truncate, | ||
2444 | .setattr = shmem_notify_change, | 2445 | .setattr = shmem_notify_change, |
2445 | .truncate_range = shmem_truncate_range, | 2446 | .truncate_range = shmem_truncate_range, |
2446 | #ifdef CONFIG_TMPFS_POSIX_ACL | 2447 | #ifdef CONFIG_TMPFS_POSIX_ACL |
@@ -2136,7 +2136,7 @@ static void free_kmem_cache_nodes(struct kmem_cache *s) | |||
2136 | 2136 | ||
2137 | for_each_node_state(node, N_NORMAL_MEMORY) { | 2137 | for_each_node_state(node, N_NORMAL_MEMORY) { |
2138 | struct kmem_cache_node *n = s->node[node]; | 2138 | struct kmem_cache_node *n = s->node[node]; |
2139 | if (n && n != &s->local_node) | 2139 | if (n) |
2140 | kmem_cache_free(kmalloc_caches, n); | 2140 | kmem_cache_free(kmalloc_caches, n); |
2141 | s->node[node] = NULL; | 2141 | s->node[node] = NULL; |
2142 | } | 2142 | } |
@@ -2145,33 +2145,22 @@ static void free_kmem_cache_nodes(struct kmem_cache *s) | |||
2145 | static int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags) | 2145 | static int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags) |
2146 | { | 2146 | { |
2147 | int node; | 2147 | int node; |
2148 | int local_node; | ||
2149 | |||
2150 | if (slab_state >= UP && (s < kmalloc_caches || | ||
2151 | s >= kmalloc_caches + KMALLOC_CACHES)) | ||
2152 | local_node = page_to_nid(virt_to_page(s)); | ||
2153 | else | ||
2154 | local_node = 0; | ||
2155 | 2148 | ||
2156 | for_each_node_state(node, N_NORMAL_MEMORY) { | 2149 | for_each_node_state(node, N_NORMAL_MEMORY) { |
2157 | struct kmem_cache_node *n; | 2150 | struct kmem_cache_node *n; |
2158 | 2151 | ||
2159 | if (local_node == node) | 2152 | if (slab_state == DOWN) { |
2160 | n = &s->local_node; | 2153 | early_kmem_cache_node_alloc(gfpflags, node); |
2161 | else { | 2154 | continue; |
2162 | if (slab_state == DOWN) { | 2155 | } |
2163 | early_kmem_cache_node_alloc(gfpflags, node); | 2156 | n = kmem_cache_alloc_node(kmalloc_caches, |
2164 | continue; | 2157 | gfpflags, node); |
2165 | } | ||
2166 | n = kmem_cache_alloc_node(kmalloc_caches, | ||
2167 | gfpflags, node); | ||
2168 | |||
2169 | if (!n) { | ||
2170 | free_kmem_cache_nodes(s); | ||
2171 | return 0; | ||
2172 | } | ||
2173 | 2158 | ||
2159 | if (!n) { | ||
2160 | free_kmem_cache_nodes(s); | ||
2161 | return 0; | ||
2174 | } | 2162 | } |
2163 | |||
2175 | s->node[node] = n; | 2164 | s->node[node] = n; |
2176 | init_kmem_cache_node(n, s); | 2165 | init_kmem_cache_node(n, s); |
2177 | } | 2166 | } |
@@ -224,6 +224,7 @@ void __lru_cache_add(struct page *page, enum lru_list lru) | |||
224 | ____pagevec_lru_add(pvec, lru); | 224 | ____pagevec_lru_add(pvec, lru); |
225 | put_cpu_var(lru_add_pvecs); | 225 | put_cpu_var(lru_add_pvecs); |
226 | } | 226 | } |
227 | EXPORT_SYMBOL(__lru_cache_add); | ||
227 | 228 | ||
228 | /** | 229 | /** |
229 | * lru_cache_add_lru - add a page to a page list | 230 | * lru_cache_add_lru - add a page to a page list |
diff --git a/mm/truncate.c b/mm/truncate.c index f42675a3615d..937571b8b233 100644 --- a/mm/truncate.c +++ b/mm/truncate.c | |||
@@ -548,18 +548,18 @@ EXPORT_SYMBOL(truncate_pagecache); | |||
548 | * NOTE! We have to be ready to update the memory sharing | 548 | * NOTE! We have to be ready to update the memory sharing |
549 | * between the file and the memory map for a potential last | 549 | * between the file and the memory map for a potential last |
550 | * incomplete page. Ugly, but necessary. | 550 | * incomplete page. Ugly, but necessary. |
551 | * | ||
552 | * This function is deprecated and simple_setsize or truncate_pagecache | ||
553 | * should be used instead. | ||
551 | */ | 554 | */ |
552 | int vmtruncate(struct inode *inode, loff_t offset) | 555 | int vmtruncate(struct inode *inode, loff_t offset) |
553 | { | 556 | { |
554 | loff_t oldsize; | ||
555 | int error; | 557 | int error; |
556 | 558 | ||
557 | error = inode_newsize_ok(inode, offset); | 559 | error = simple_setsize(inode, offset); |
558 | if (error) | 560 | if (error) |
559 | return error; | 561 | return error; |
560 | oldsize = inode->i_size; | 562 | |
561 | i_size_write(inode, offset); | ||
562 | truncate_pagecache(inode, oldsize, offset); | ||
563 | if (inode->i_op->truncate) | 563 | if (inode->i_op->truncate) |
564 | inode->i_op->truncate(inode); | 564 | inode->i_op->truncate(inode); |
565 | 565 | ||