diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2017-11-15 22:42:40 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2017-11-15 22:42:40 -0500 |
commit | 7c225c69f86c934e3be9be63ecde754e286838d7 (patch) | |
tree | ff2df419b0c4886b37407235f7d21215e4cf45e4 /mm/shmem.c | |
parent | 6363b3f3ac5be096d08c8c504128befa0c033529 (diff) | |
parent | 1b7176aea0a924ac59c6a283129d3e8eb00aa915 (diff) |
Merge branch 'akpm' (patches from Andrew)
Merge updates from Andrew Morton:
- a few misc bits
- ocfs2 updates
- almost all of MM
* emailed patches from Andrew Morton <akpm@linux-foundation.org>: (131 commits)
memory hotplug: fix comments when adding section
mm: make alloc_node_mem_map a void call if we don't have CONFIG_FLAT_NODE_MEM_MAP
mm: simplify nodemask printing
mm,oom_reaper: remove pointless kthread_run() error check
mm/page_ext.c: check if page_ext is not prepared
writeback: remove unused function parameter
mm: do not rely on preempt_count in print_vma_addr
mm, sparse: do not swamp log with huge vmemmap allocation failures
mm/hmm: remove redundant variable align_end
mm/list_lru.c: mark expected switch fall-through
mm/shmem.c: mark expected switch fall-through
mm/page_alloc.c: broken deferred calculation
mm: don't warn about allocations which stall for too long
fs: fuse: account fuse_inode slab memory as reclaimable
mm, page_alloc: fix potential false positive in __zone_watermark_ok
mm: mlock: remove lru_add_drain_all()
mm, sysctl: make NUMA stats configurable
shmem: convert shmem_init_inodecache() to void
Unify migrate_pages and move_pages access checks
mm, pagevec: rename pagevec drained field
...
Diffstat (limited to 'mm/shmem.c')
-rw-r--r-- | mm/shmem.c | 17 |
1 files changed, 7 insertions, 10 deletions
diff --git a/mm/shmem.c b/mm/shmem.c index 07a1d22807be..ab22eaa2412e 100644 --- a/mm/shmem.c +++ b/mm/shmem.c | |||
@@ -338,7 +338,7 @@ static int shmem_radix_tree_replace(struct address_space *mapping, | |||
338 | if (item != expected) | 338 | if (item != expected) |
339 | return -ENOENT; | 339 | return -ENOENT; |
340 | __radix_tree_replace(&mapping->page_tree, node, pslot, | 340 | __radix_tree_replace(&mapping->page_tree, node, pslot, |
341 | replacement, NULL, NULL); | 341 | replacement, NULL); |
342 | return 0; | 342 | return 0; |
343 | } | 343 | } |
344 | 344 | ||
@@ -747,7 +747,7 @@ void shmem_unlock_mapping(struct address_space *mapping) | |||
747 | pgoff_t indices[PAGEVEC_SIZE]; | 747 | pgoff_t indices[PAGEVEC_SIZE]; |
748 | pgoff_t index = 0; | 748 | pgoff_t index = 0; |
749 | 749 | ||
750 | pagevec_init(&pvec, 0); | 750 | pagevec_init(&pvec); |
751 | /* | 751 | /* |
752 | * Minor point, but we might as well stop if someone else SHM_LOCKs it. | 752 | * Minor point, but we might as well stop if someone else SHM_LOCKs it. |
753 | */ | 753 | */ |
@@ -790,7 +790,7 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend, | |||
790 | if (lend == -1) | 790 | if (lend == -1) |
791 | end = -1; /* unsigned, so actually very big */ | 791 | end = -1; /* unsigned, so actually very big */ |
792 | 792 | ||
793 | pagevec_init(&pvec, 0); | 793 | pagevec_init(&pvec); |
794 | index = start; | 794 | index = start; |
795 | while (index < end) { | 795 | while (index < end) { |
796 | pvec.nr = find_get_entries(mapping, index, | 796 | pvec.nr = find_get_entries(mapping, index, |
@@ -2528,7 +2528,7 @@ static pgoff_t shmem_seek_hole_data(struct address_space *mapping, | |||
2528 | bool done = false; | 2528 | bool done = false; |
2529 | int i; | 2529 | int i; |
2530 | 2530 | ||
2531 | pagevec_init(&pvec, 0); | 2531 | pagevec_init(&pvec); |
2532 | pvec.nr = 1; /* start small: we may be there already */ | 2532 | pvec.nr = 1; /* start small: we may be there already */ |
2533 | while (!done) { | 2533 | while (!done) { |
2534 | pvec.nr = find_get_entries(mapping, index, | 2534 | pvec.nr = find_get_entries(mapping, index, |
@@ -3862,12 +3862,11 @@ static void shmem_init_inode(void *foo) | |||
3862 | inode_init_once(&info->vfs_inode); | 3862 | inode_init_once(&info->vfs_inode); |
3863 | } | 3863 | } |
3864 | 3864 | ||
3865 | static int shmem_init_inodecache(void) | 3865 | static void shmem_init_inodecache(void) |
3866 | { | 3866 | { |
3867 | shmem_inode_cachep = kmem_cache_create("shmem_inode_cache", | 3867 | shmem_inode_cachep = kmem_cache_create("shmem_inode_cache", |
3868 | sizeof(struct shmem_inode_info), | 3868 | sizeof(struct shmem_inode_info), |
3869 | 0, SLAB_PANIC|SLAB_ACCOUNT, shmem_init_inode); | 3869 | 0, SLAB_PANIC|SLAB_ACCOUNT, shmem_init_inode); |
3870 | return 0; | ||
3871 | } | 3870 | } |
3872 | 3871 | ||
3873 | static void shmem_destroy_inodecache(void) | 3872 | static void shmem_destroy_inodecache(void) |
@@ -3991,9 +3990,7 @@ int __init shmem_init(void) | |||
3991 | if (shmem_inode_cachep) | 3990 | if (shmem_inode_cachep) |
3992 | return 0; | 3991 | return 0; |
3993 | 3992 | ||
3994 | error = shmem_init_inodecache(); | 3993 | shmem_init_inodecache(); |
3995 | if (error) | ||
3996 | goto out3; | ||
3997 | 3994 | ||
3998 | error = register_filesystem(&shmem_fs_type); | 3995 | error = register_filesystem(&shmem_fs_type); |
3999 | if (error) { | 3996 | if (error) { |
@@ -4020,7 +4017,6 @@ out1: | |||
4020 | unregister_filesystem(&shmem_fs_type); | 4017 | unregister_filesystem(&shmem_fs_type); |
4021 | out2: | 4018 | out2: |
4022 | shmem_destroy_inodecache(); | 4019 | shmem_destroy_inodecache(); |
4023 | out3: | ||
4024 | shm_mnt = ERR_PTR(error); | 4020 | shm_mnt = ERR_PTR(error); |
4025 | return error; | 4021 | return error; |
4026 | } | 4022 | } |
@@ -4102,6 +4098,7 @@ bool shmem_huge_enabled(struct vm_area_struct *vma) | |||
4102 | if (i_size >= HPAGE_PMD_SIZE && | 4098 | if (i_size >= HPAGE_PMD_SIZE && |
4103 | i_size >> PAGE_SHIFT >= off) | 4099 | i_size >> PAGE_SHIFT >= off) |
4104 | return true; | 4100 | return true; |
4101 | /* fall through */ | ||
4105 | case SHMEM_HUGE_ADVISE: | 4102 | case SHMEM_HUGE_ADVISE: |
4106 | /* TODO: implement fadvise() hints */ | 4103 | /* TODO: implement fadvise() hints */ |
4107 | return (vma->vm_flags & VM_HUGEPAGE); | 4104 | return (vma->vm_flags & VM_HUGEPAGE); |