aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-12-12 13:44:49 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2015-12-12 13:44:49 -0500
commit800f1ac4791fbb515009844d579d4c7bf4b762f6 (patch)
treed1377dfa6d140b6c14b55cce345246d8faeb16b7
parenta971526e4db7992362e938e53e2deb57e847ecc9 (diff)
parent9530d0fe129c0197d5df13319ccefd08a827383b (diff)
Merge branch 'akpm' (patches from Andrew)
Merge misc fixes from Andrew Morton: "17 fixes" * emailed patches from Andrew Morton <akpm@linux-foundation.org>: MIPS: fix DMA contiguous allocation sh64: fix __NR_fgetxattr ocfs2: fix SGID not inherited issue mm/oom_kill.c: avoid attempting to kill init sharing same memory drivers/base/memory.c: prohibit offlining of memory blocks with missing sections tmpfs: fix shmem_evict_inode() warnings on i_blocks mm/hugetlb.c: fix resv map memory leak for placeholder entries mm: hugetlb: call huge_pte_alloc() only if ptep is null kernel: remove stop_machine() Kconfig dependency mm: kmemleak: mark kmemleak_init prototype as __init mm: fix kerneldoc on mem_cgroup_replace_page osd fs: __r4w_get_page rely on PageUptodate for uptodate MAINTAINERS: make Vladimir co-maintainer of the memory controller mm, vmstat: allow WQ concurrency to discover memory reclaim doesn't make any progress mm: fix swapped Movable and Reclaimable in /proc/pagetypeinfo memcg: fix memory.high target mm: hugetlb: fix hugepage memory leak caused by wrong reserve count
-rw-r--r--MAINTAINERS1
-rw-r--r--arch/mips/mm/dma-default.c2
-rw-r--r--arch/sh/include/uapi/asm/unistd_64.h2
-rw-r--r--drivers/base/memory.c4
-rw-r--r--fs/exofs/inode.c5
-rw-r--r--fs/nfs/objlayout/objio_osd.c5
-rw-r--r--fs/ocfs2/namei.c4
-rw-r--r--include/linux/kmemleak.h2
-rw-r--r--include/linux/stop_machine.h6
-rw-r--r--init/Kconfig7
-rw-r--r--kernel/stop_machine.c4
-rw-r--r--mm/backing-dev.c19
-rw-r--r--mm/hugetlb.c27
-rw-r--r--mm/memcontrol.c4
-rw-r--r--mm/oom_kill.c2
-rw-r--r--mm/page_alloc.c3
-rw-r--r--mm/shmem.c34
-rw-r--r--mm/vmstat.c8
18 files changed, 77 insertions, 62 deletions
diff --git a/MAINTAINERS b/MAINTAINERS
index 38df53f828e1..9bff63cf326e 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -2975,6 +2975,7 @@ F: kernel/cpuset.c
2975CONTROL GROUP - MEMORY RESOURCE CONTROLLER (MEMCG) 2975CONTROL GROUP - MEMORY RESOURCE CONTROLLER (MEMCG)
2976M: Johannes Weiner <hannes@cmpxchg.org> 2976M: Johannes Weiner <hannes@cmpxchg.org>
2977M: Michal Hocko <mhocko@kernel.org> 2977M: Michal Hocko <mhocko@kernel.org>
2978M: Vladimir Davydov <vdavydov@virtuozzo.com>
2978L: cgroups@vger.kernel.org 2979L: cgroups@vger.kernel.org
2979L: linux-mm@kvack.org 2980L: linux-mm@kvack.org
2980S: Maintained 2981S: Maintained
diff --git a/arch/mips/mm/dma-default.c b/arch/mips/mm/dma-default.c
index d8117be729a2..730d394ce5f0 100644
--- a/arch/mips/mm/dma-default.c
+++ b/arch/mips/mm/dma-default.c
@@ -145,7 +145,7 @@ static void *mips_dma_alloc_coherent(struct device *dev, size_t size,
145 145
146 gfp = massage_gfp_flags(dev, gfp); 146 gfp = massage_gfp_flags(dev, gfp);
147 147
148 if (IS_ENABLED(CONFIG_DMA_CMA) && !(gfp & GFP_ATOMIC)) 148 if (IS_ENABLED(CONFIG_DMA_CMA) && gfpflags_allow_blocking(gfp))
149 page = dma_alloc_from_contiguous(dev, 149 page = dma_alloc_from_contiguous(dev,
150 count, get_order(size)); 150 count, get_order(size));
151 if (!page) 151 if (!page)
diff --git a/arch/sh/include/uapi/asm/unistd_64.h b/arch/sh/include/uapi/asm/unistd_64.h
index e6820c86e8c7..47ebd5b5ed55 100644
--- a/arch/sh/include/uapi/asm/unistd_64.h
+++ b/arch/sh/include/uapi/asm/unistd_64.h
@@ -278,7 +278,7 @@
278#define __NR_fsetxattr 256 278#define __NR_fsetxattr 256
279#define __NR_getxattr 257 279#define __NR_getxattr 257
280#define __NR_lgetxattr 258 280#define __NR_lgetxattr 258
281#define __NR_fgetxattr 269 281#define __NR_fgetxattr 259
282#define __NR_listxattr 260 282#define __NR_listxattr 260
283#define __NR_llistxattr 261 283#define __NR_llistxattr 261
284#define __NR_flistxattr 262 284#define __NR_flistxattr 262
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index 2804aed3f416..25425d3f2575 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -303,6 +303,10 @@ static int memory_subsys_offline(struct device *dev)
303 if (mem->state == MEM_OFFLINE) 303 if (mem->state == MEM_OFFLINE)
304 return 0; 304 return 0;
305 305
306 /* Can't offline block with non-present sections */
307 if (mem->section_count != sections_per_block)
308 return -EINVAL;
309
306 return memory_block_change_state(mem, MEM_OFFLINE, MEM_ONLINE); 310 return memory_block_change_state(mem, MEM_OFFLINE, MEM_ONLINE);
307} 311}
308 312
diff --git a/fs/exofs/inode.c b/fs/exofs/inode.c
index 73c64daa0f55..60f03b78914e 100644
--- a/fs/exofs/inode.c
+++ b/fs/exofs/inode.c
@@ -592,10 +592,7 @@ static struct page *__r4w_get_page(void *priv, u64 offset, bool *uptodate)
592 } 592 }
593 unlock_page(page); 593 unlock_page(page);
594 } 594 }
595 if (PageDirty(page) || PageWriteback(page)) 595 *uptodate = PageUptodate(page);
596 *uptodate = true;
597 else
598 *uptodate = PageUptodate(page);
599 EXOFS_DBGMSG2("index=0x%lx uptodate=%d\n", index, *uptodate); 596 EXOFS_DBGMSG2("index=0x%lx uptodate=%d\n", index, *uptodate);
600 return page; 597 return page;
601 } else { 598 } else {
diff --git a/fs/nfs/objlayout/objio_osd.c b/fs/nfs/objlayout/objio_osd.c
index 5c0c6b58157f..9aebffb40505 100644
--- a/fs/nfs/objlayout/objio_osd.c
+++ b/fs/nfs/objlayout/objio_osd.c
@@ -476,10 +476,7 @@ static struct page *__r4w_get_page(void *priv, u64 offset, bool *uptodate)
476 } 476 }
477 unlock_page(page); 477 unlock_page(page);
478 } 478 }
479 if (PageDirty(page) || PageWriteback(page)) 479 *uptodate = PageUptodate(page);
480 *uptodate = true;
481 else
482 *uptodate = PageUptodate(page);
483 dprintk("%s: index=0x%lx uptodate=%d\n", __func__, index, *uptodate); 480 dprintk("%s: index=0x%lx uptodate=%d\n", __func__, index, *uptodate);
484 return page; 481 return page;
485} 482}
diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c
index a03f6f433075..3123408da935 100644
--- a/fs/ocfs2/namei.c
+++ b/fs/ocfs2/namei.c
@@ -367,13 +367,11 @@ static int ocfs2_mknod(struct inode *dir,
367 goto leave; 367 goto leave;
368 } 368 }
369 369
370 status = posix_acl_create(dir, &mode, &default_acl, &acl); 370 status = posix_acl_create(dir, &inode->i_mode, &default_acl, &acl);
371 if (status) { 371 if (status) {
372 mlog_errno(status); 372 mlog_errno(status);
373 goto leave; 373 goto leave;
374 } 374 }
375 /* update inode->i_mode after mask with "umask". */
376 inode->i_mode = mode;
377 375
378 handle = ocfs2_start_trans(osb, ocfs2_mknod_credits(osb->sb, 376 handle = ocfs2_start_trans(osb, ocfs2_mknod_credits(osb->sb,
379 S_ISDIR(mode), 377 S_ISDIR(mode),
diff --git a/include/linux/kmemleak.h b/include/linux/kmemleak.h
index d0a1f99e24e3..4894c6888bc6 100644
--- a/include/linux/kmemleak.h
+++ b/include/linux/kmemleak.h
@@ -25,7 +25,7 @@
25 25
26#ifdef CONFIG_DEBUG_KMEMLEAK 26#ifdef CONFIG_DEBUG_KMEMLEAK
27 27
28extern void kmemleak_init(void) __ref; 28extern void kmemleak_init(void) __init;
29extern void kmemleak_alloc(const void *ptr, size_t size, int min_count, 29extern void kmemleak_alloc(const void *ptr, size_t size, int min_count,
30 gfp_t gfp) __ref; 30 gfp_t gfp) __ref;
31extern void kmemleak_alloc_percpu(const void __percpu *ptr, size_t size, 31extern void kmemleak_alloc_percpu(const void __percpu *ptr, size_t size,
diff --git a/include/linux/stop_machine.h b/include/linux/stop_machine.h
index 0adedca24c5b..0e1b1540597a 100644
--- a/include/linux/stop_machine.h
+++ b/include/linux/stop_machine.h
@@ -99,7 +99,7 @@ static inline int try_stop_cpus(const struct cpumask *cpumask,
99 * grabbing every spinlock (and more). So the "read" side to such a 99 * grabbing every spinlock (and more). So the "read" side to such a
100 * lock is anything which disables preemption. 100 * lock is anything which disables preemption.
101 */ 101 */
102#if defined(CONFIG_STOP_MACHINE) && defined(CONFIG_SMP) 102#if defined(CONFIG_SMP) || defined(CONFIG_HOTPLUG_CPU)
103 103
104/** 104/**
105 * stop_machine: freeze the machine on all CPUs and run this function 105 * stop_machine: freeze the machine on all CPUs and run this function
@@ -118,7 +118,7 @@ int stop_machine(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus);
118 118
119int stop_machine_from_inactive_cpu(cpu_stop_fn_t fn, void *data, 119int stop_machine_from_inactive_cpu(cpu_stop_fn_t fn, void *data,
120 const struct cpumask *cpus); 120 const struct cpumask *cpus);
121#else /* CONFIG_STOP_MACHINE && CONFIG_SMP */ 121#else /* CONFIG_SMP || CONFIG_HOTPLUG_CPU */
122 122
123static inline int stop_machine(cpu_stop_fn_t fn, void *data, 123static inline int stop_machine(cpu_stop_fn_t fn, void *data,
124 const struct cpumask *cpus) 124 const struct cpumask *cpus)
@@ -137,5 +137,5 @@ static inline int stop_machine_from_inactive_cpu(cpu_stop_fn_t fn, void *data,
137 return stop_machine(fn, data, cpus); 137 return stop_machine(fn, data, cpus);
138} 138}
139 139
140#endif /* CONFIG_STOP_MACHINE && CONFIG_SMP */ 140#endif /* CONFIG_SMP || CONFIG_HOTPLUG_CPU */
141#endif /* _LINUX_STOP_MACHINE */ 141#endif /* _LINUX_STOP_MACHINE */
diff --git a/init/Kconfig b/init/Kconfig
index c24b6f767bf0..235c7a2c0d20 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -2030,13 +2030,6 @@ config INIT_ALL_POSSIBLE
2030 it was better to provide this option than to break all the archs 2030 it was better to provide this option than to break all the archs
2031 and have several arch maintainers pursuing me down dark alleys. 2031 and have several arch maintainers pursuing me down dark alleys.
2032 2032
2033config STOP_MACHINE
2034 bool
2035 default y
2036 depends on (SMP && MODULE_UNLOAD) || HOTPLUG_CPU
2037 help
2038 Need stop_machine() primitive.
2039
2040source "block/Kconfig" 2033source "block/Kconfig"
2041 2034
2042config PREEMPT_NOTIFIERS 2035config PREEMPT_NOTIFIERS
diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
index 867bc20e1ef1..a3bbaee77c58 100644
--- a/kernel/stop_machine.c
+++ b/kernel/stop_machine.c
@@ -531,7 +531,7 @@ static int __init cpu_stop_init(void)
531} 531}
532early_initcall(cpu_stop_init); 532early_initcall(cpu_stop_init);
533 533
534#ifdef CONFIG_STOP_MACHINE 534#if defined(CONFIG_SMP) || defined(CONFIG_HOTPLUG_CPU)
535 535
536static int __stop_machine(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus) 536static int __stop_machine(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus)
537{ 537{
@@ -631,4 +631,4 @@ int stop_machine_from_inactive_cpu(cpu_stop_fn_t fn, void *data,
631 return ret ?: done.ret; 631 return ret ?: done.ret;
632} 632}
633 633
634#endif /* CONFIG_STOP_MACHINE */ 634#endif /* CONFIG_SMP || CONFIG_HOTPLUG_CPU */
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index 8ed2ffd963c5..7340353f8aea 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -957,8 +957,9 @@ EXPORT_SYMBOL(congestion_wait);
957 * jiffies for either a BDI to exit congestion of the given @sync queue 957 * jiffies for either a BDI to exit congestion of the given @sync queue
958 * or a write to complete. 958 * or a write to complete.
959 * 959 *
960 * In the absence of zone congestion, cond_resched() is called to yield 960 * In the absence of zone congestion, a short sleep or a cond_resched is
961 * the processor if necessary but otherwise does not sleep. 961 * performed to yield the processor and to allow other subsystems to make
962 * a forward progress.
962 * 963 *
963 * The return value is 0 if the sleep is for the full timeout. Otherwise, 964 * The return value is 0 if the sleep is for the full timeout. Otherwise,
964 * it is the number of jiffies that were still remaining when the function 965 * it is the number of jiffies that were still remaining when the function
@@ -978,7 +979,19 @@ long wait_iff_congested(struct zone *zone, int sync, long timeout)
978 */ 979 */
979 if (atomic_read(&nr_wb_congested[sync]) == 0 || 980 if (atomic_read(&nr_wb_congested[sync]) == 0 ||
980 !test_bit(ZONE_CONGESTED, &zone->flags)) { 981 !test_bit(ZONE_CONGESTED, &zone->flags)) {
981 cond_resched(); 982
983 /*
984 * Memory allocation/reclaim might be called from a WQ
985 * context and the current implementation of the WQ
986 * concurrency control doesn't recognize that a particular
987 * WQ is congested if the worker thread is looping without
988 * ever sleeping. Therefore we have to do a short sleep
989 * here rather than calling cond_resched().
990 */
991 if (current->flags & PF_WQ_WORKER)
992 schedule_timeout(1);
993 else
994 cond_resched();
982 995
983 /* In case we scheduled, work out time remaining */ 996 /* In case we scheduled, work out time remaining */
984 ret = timeout - (jiffies - start); 997 ret = timeout - (jiffies - start);
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 827bb02a43a4..ef6963b577fd 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -372,8 +372,10 @@ retry_locked:
372 spin_unlock(&resv->lock); 372 spin_unlock(&resv->lock);
373 373
374 trg = kmalloc(sizeof(*trg), GFP_KERNEL); 374 trg = kmalloc(sizeof(*trg), GFP_KERNEL);
375 if (!trg) 375 if (!trg) {
376 kfree(nrg);
376 return -ENOMEM; 377 return -ENOMEM;
378 }
377 379
378 spin_lock(&resv->lock); 380 spin_lock(&resv->lock);
379 list_add(&trg->link, &resv->region_cache); 381 list_add(&trg->link, &resv->region_cache);
@@ -483,8 +485,16 @@ static long region_del(struct resv_map *resv, long f, long t)
483retry: 485retry:
484 spin_lock(&resv->lock); 486 spin_lock(&resv->lock);
485 list_for_each_entry_safe(rg, trg, head, link) { 487 list_for_each_entry_safe(rg, trg, head, link) {
486 if (rg->to <= f) 488 /*
489 * Skip regions before the range to be deleted. file_region
490 * ranges are normally of the form [from, to). However, there
491 * may be a "placeholder" entry in the map which is of the form
492 * (from, to) with from == to. Check for placeholder entries
493 * at the beginning of the range to be deleted.
494 */
495 if (rg->to <= f && (rg->to != rg->from || rg->to != f))
487 continue; 496 continue;
497
488 if (rg->from >= t) 498 if (rg->from >= t)
489 break; 499 break;
490 500
@@ -1886,7 +1896,10 @@ struct page *alloc_huge_page(struct vm_area_struct *vma,
1886 page = __alloc_buddy_huge_page_with_mpol(h, vma, addr); 1896 page = __alloc_buddy_huge_page_with_mpol(h, vma, addr);
1887 if (!page) 1897 if (!page)
1888 goto out_uncharge_cgroup; 1898 goto out_uncharge_cgroup;
1889 1899 if (!avoid_reserve && vma_has_reserves(vma, gbl_chg)) {
1900 SetPagePrivate(page);
1901 h->resv_huge_pages--;
1902 }
1890 spin_lock(&hugetlb_lock); 1903 spin_lock(&hugetlb_lock);
1891 list_move(&page->lru, &h->hugepage_activelist); 1904 list_move(&page->lru, &h->hugepage_activelist);
1892 /* Fall through */ 1905 /* Fall through */
@@ -3693,12 +3706,12 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
3693 } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry))) 3706 } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry)))
3694 return VM_FAULT_HWPOISON_LARGE | 3707 return VM_FAULT_HWPOISON_LARGE |
3695 VM_FAULT_SET_HINDEX(hstate_index(h)); 3708 VM_FAULT_SET_HINDEX(hstate_index(h));
3709 } else {
3710 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
3711 if (!ptep)
3712 return VM_FAULT_OOM;
3696 } 3713 }
3697 3714
3698 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
3699 if (!ptep)
3700 return VM_FAULT_OOM;
3701
3702 mapping = vma->vm_file->f_mapping; 3715 mapping = vma->vm_file->f_mapping;
3703 idx = vma_hugecache_offset(h, vma, address); 3716 idx = vma_hugecache_offset(h, vma, address);
3704 3717
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index c92a65b2b4ab..e234c21a5e6c 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -2128,7 +2128,7 @@ done_restock:
2128 */ 2128 */
2129 do { 2129 do {
2130 if (page_counter_read(&memcg->memory) > memcg->high) { 2130 if (page_counter_read(&memcg->memory) > memcg->high) {
2131 current->memcg_nr_pages_over_high += nr_pages; 2131 current->memcg_nr_pages_over_high += batch;
2132 set_notify_resume(current); 2132 set_notify_resume(current);
2133 break; 2133 break;
2134 } 2134 }
@@ -5512,11 +5512,11 @@ void mem_cgroup_uncharge_list(struct list_head *page_list)
5512 * mem_cgroup_replace_page - migrate a charge to another page 5512 * mem_cgroup_replace_page - migrate a charge to another page
5513 * @oldpage: currently charged page 5513 * @oldpage: currently charged page
5514 * @newpage: page to transfer the charge to 5514 * @newpage: page to transfer the charge to
5515 * @lrucare: either or both pages might be on the LRU already
5516 * 5515 *
5517 * Migrate the charge from @oldpage to @newpage. 5516 * Migrate the charge from @oldpage to @newpage.
5518 * 5517 *
5519 * Both pages must be locked, @newpage->mapping must be set up. 5518 * Both pages must be locked, @newpage->mapping must be set up.
5519 * Either or both pages might be on the LRU already.
5520 */ 5520 */
5521void mem_cgroup_replace_page(struct page *oldpage, struct page *newpage) 5521void mem_cgroup_replace_page(struct page *oldpage, struct page *newpage)
5522{ 5522{
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index d13a33918fa2..c12680993ff3 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -608,6 +608,8 @@ void oom_kill_process(struct oom_control *oc, struct task_struct *p,
608 continue; 608 continue;
609 if (unlikely(p->flags & PF_KTHREAD)) 609 if (unlikely(p->flags & PF_KTHREAD))
610 continue; 610 continue;
611 if (is_global_init(p))
612 continue;
611 if (p->signal->oom_score_adj == OOM_SCORE_ADJ_MIN) 613 if (p->signal->oom_score_adj == OOM_SCORE_ADJ_MIN)
612 continue; 614 continue;
613 615
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 17a3c66639a9..9d666df5ef95 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -3647,8 +3647,9 @@ static void show_migration_types(unsigned char type)
3647{ 3647{
3648 static const char types[MIGRATE_TYPES] = { 3648 static const char types[MIGRATE_TYPES] = {
3649 [MIGRATE_UNMOVABLE] = 'U', 3649 [MIGRATE_UNMOVABLE] = 'U',
3650 [MIGRATE_RECLAIMABLE] = 'E',
3651 [MIGRATE_MOVABLE] = 'M', 3650 [MIGRATE_MOVABLE] = 'M',
3651 [MIGRATE_RECLAIMABLE] = 'E',
3652 [MIGRATE_HIGHATOMIC] = 'H',
3652#ifdef CONFIG_CMA 3653#ifdef CONFIG_CMA
3653 [MIGRATE_CMA] = 'C', 3654 [MIGRATE_CMA] = 'C',
3654#endif 3655#endif
diff --git a/mm/shmem.c b/mm/shmem.c
index 9187eee4128b..2afcdbbdb685 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -843,14 +843,14 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
843 list_add_tail(&info->swaplist, &shmem_swaplist); 843 list_add_tail(&info->swaplist, &shmem_swaplist);
844 844
845 if (add_to_swap_cache(page, swap, GFP_ATOMIC) == 0) { 845 if (add_to_swap_cache(page, swap, GFP_ATOMIC) == 0) {
846 swap_shmem_alloc(swap);
847 shmem_delete_from_page_cache(page, swp_to_radix_entry(swap));
848
849 spin_lock(&info->lock); 846 spin_lock(&info->lock);
850 info->swapped++;
851 shmem_recalc_inode(inode); 847 shmem_recalc_inode(inode);
848 info->swapped++;
852 spin_unlock(&info->lock); 849 spin_unlock(&info->lock);
853 850
851 swap_shmem_alloc(swap);
852 shmem_delete_from_page_cache(page, swp_to_radix_entry(swap));
853
854 mutex_unlock(&shmem_swaplist_mutex); 854 mutex_unlock(&shmem_swaplist_mutex);
855 BUG_ON(page_mapped(page)); 855 BUG_ON(page_mapped(page));
856 swap_writepage(page, wbc); 856 swap_writepage(page, wbc);
@@ -1078,7 +1078,7 @@ repeat:
1078 if (sgp != SGP_WRITE && sgp != SGP_FALLOC && 1078 if (sgp != SGP_WRITE && sgp != SGP_FALLOC &&
1079 ((loff_t)index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) { 1079 ((loff_t)index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) {
1080 error = -EINVAL; 1080 error = -EINVAL;
1081 goto failed; 1081 goto unlock;
1082 } 1082 }
1083 1083
1084 if (page && sgp == SGP_WRITE) 1084 if (page && sgp == SGP_WRITE)
@@ -1246,11 +1246,15 @@ clear:
1246 /* Perhaps the file has been truncated since we checked */ 1246 /* Perhaps the file has been truncated since we checked */
1247 if (sgp != SGP_WRITE && sgp != SGP_FALLOC && 1247 if (sgp != SGP_WRITE && sgp != SGP_FALLOC &&
1248 ((loff_t)index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) { 1248 ((loff_t)index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) {
1249 if (alloced) {
1250 ClearPageDirty(page);
1251 delete_from_page_cache(page);
1252 spin_lock(&info->lock);
1253 shmem_recalc_inode(inode);
1254 spin_unlock(&info->lock);
1255 }
1249 error = -EINVAL; 1256 error = -EINVAL;
1250 if (alloced) 1257 goto unlock;
1251 goto trunc;
1252 else
1253 goto failed;
1254 } 1258 }
1255 *pagep = page; 1259 *pagep = page;
1256 return 0; 1260 return 0;
@@ -1258,23 +1262,13 @@ clear:
1258 /* 1262 /*
1259 * Error recovery. 1263 * Error recovery.
1260 */ 1264 */
1261trunc:
1262 info = SHMEM_I(inode);
1263 ClearPageDirty(page);
1264 delete_from_page_cache(page);
1265 spin_lock(&info->lock);
1266 info->alloced--;
1267 inode->i_blocks -= BLOCKS_PER_PAGE;
1268 spin_unlock(&info->lock);
1269decused: 1265decused:
1270 sbinfo = SHMEM_SB(inode->i_sb);
1271 if (sbinfo->max_blocks) 1266 if (sbinfo->max_blocks)
1272 percpu_counter_add(&sbinfo->used_blocks, -1); 1267 percpu_counter_add(&sbinfo->used_blocks, -1);
1273unacct: 1268unacct:
1274 shmem_unacct_blocks(info->flags, 1); 1269 shmem_unacct_blocks(info->flags, 1);
1275failed: 1270failed:
1276 if (swap.val && error != -EINVAL && 1271 if (swap.val && !shmem_confirm_swap(mapping, index, swap))
1277 !shmem_confirm_swap(mapping, index, swap))
1278 error = -EEXIST; 1272 error = -EEXIST;
1279unlock: 1273unlock:
1280 if (page) { 1274 if (page) {
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 879a2be23325..0d5712b0206c 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -921,8 +921,8 @@ static void walk_zones_in_node(struct seq_file *m, pg_data_t *pgdat,
921#ifdef CONFIG_PROC_FS 921#ifdef CONFIG_PROC_FS
922static char * const migratetype_names[MIGRATE_TYPES] = { 922static char * const migratetype_names[MIGRATE_TYPES] = {
923 "Unmovable", 923 "Unmovable",
924 "Reclaimable",
925 "Movable", 924 "Movable",
925 "Reclaimable",
926 "HighAtomic", 926 "HighAtomic",
927#ifdef CONFIG_CMA 927#ifdef CONFIG_CMA
928 "CMA", 928 "CMA",
@@ -1379,6 +1379,7 @@ static const struct file_operations proc_vmstat_file_operations = {
1379#endif /* CONFIG_PROC_FS */ 1379#endif /* CONFIG_PROC_FS */
1380 1380
1381#ifdef CONFIG_SMP 1381#ifdef CONFIG_SMP
1382static struct workqueue_struct *vmstat_wq;
1382static DEFINE_PER_CPU(struct delayed_work, vmstat_work); 1383static DEFINE_PER_CPU(struct delayed_work, vmstat_work);
1383int sysctl_stat_interval __read_mostly = HZ; 1384int sysctl_stat_interval __read_mostly = HZ;
1384static cpumask_var_t cpu_stat_off; 1385static cpumask_var_t cpu_stat_off;
@@ -1391,7 +1392,7 @@ static void vmstat_update(struct work_struct *w)
1391 * to occur in the future. Keep on running the 1392 * to occur in the future. Keep on running the
1392 * update worker thread. 1393 * update worker thread.
1393 */ 1394 */
1394 schedule_delayed_work_on(smp_processor_id(), 1395 queue_delayed_work_on(smp_processor_id(), vmstat_wq,
1395 this_cpu_ptr(&vmstat_work), 1396 this_cpu_ptr(&vmstat_work),
1396 round_jiffies_relative(sysctl_stat_interval)); 1397 round_jiffies_relative(sysctl_stat_interval));
1397 } else { 1398 } else {
@@ -1460,7 +1461,7 @@ static void vmstat_shepherd(struct work_struct *w)
1460 if (need_update(cpu) && 1461 if (need_update(cpu) &&
1461 cpumask_test_and_clear_cpu(cpu, cpu_stat_off)) 1462 cpumask_test_and_clear_cpu(cpu, cpu_stat_off))
1462 1463
1463 schedule_delayed_work_on(cpu, 1464 queue_delayed_work_on(cpu, vmstat_wq,
1464 &per_cpu(vmstat_work, cpu), 0); 1465 &per_cpu(vmstat_work, cpu), 0);
1465 1466
1466 put_online_cpus(); 1467 put_online_cpus();
@@ -1549,6 +1550,7 @@ static int __init setup_vmstat(void)
1549 1550
1550 start_shepherd_timer(); 1551 start_shepherd_timer();
1551 cpu_notifier_register_done(); 1552 cpu_notifier_register_done();
1553 vmstat_wq = alloc_workqueue("vmstat", WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1552#endif 1554#endif
1553#ifdef CONFIG_PROC_FS 1555#ifdef CONFIG_PROC_FS
1554 proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations); 1556 proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);