aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-06-10 11:00:47 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-06-10 11:00:47 -0400
commit9557c3cfdaa792c7db04b86f47b5dd1e6dc5ec4f (patch)
tree7e3ec6a4ae1417c417156978f3bb792842ef04f0
parent147d9e7bcad3b8d5465f6eea6292731e7f35dee8 (diff)
parent18aba41cbfbcd138e9f6d8d446427d8b7691c194 (diff)
Merge branch 'akpm' (patches from Andrew)
Merge misc fixes from Andrew Morton: "7 fixes" * emailed patches from Andrew Morton <akpm@linux-foundation.org>: mm/fadvise.c: do not discard partial pages with POSIX_FADV_DONTNEED mm: introduce dedicated WQ_MEM_RECLAIM workqueue to do lru_add_drain_all kernel/relay.c: fix potential memory leak mm: thp: broken page count after commit aa88b68c3b1d revert "mm: memcontrol: fix possible css ref leak on oom" kasan: change memory hot-add error messages to info messages mm/hugetlb: fix huge page reserve accounting for private mappings
-rw-r--r--kernel/relay.c1
-rw-r--r--mm/fadvise.c11
-rw-r--r--mm/hugetlb.c42
-rw-r--r--mm/kasan/kasan.c4
-rw-r--r--mm/memcontrol.c2
-rw-r--r--mm/swap.c20
-rw-r--r--mm/swap_state.c5
7 files changed, 78 insertions, 7 deletions
diff --git a/kernel/relay.c b/kernel/relay.c
index 074994bcfa9b..04d7cf3ef8cf 100644
--- a/kernel/relay.c
+++ b/kernel/relay.c
@@ -614,6 +614,7 @@ free_bufs:
614 614
615 kref_put(&chan->kref, relay_destroy_channel); 615 kref_put(&chan->kref, relay_destroy_channel);
616 mutex_unlock(&relay_channels_mutex); 616 mutex_unlock(&relay_channels_mutex);
617 kfree(chan);
617 return NULL; 618 return NULL;
618} 619}
619EXPORT_SYMBOL_GPL(relay_open); 620EXPORT_SYMBOL_GPL(relay_open);
diff --git a/mm/fadvise.c b/mm/fadvise.c
index b8024fa7101d..6c707bfe02fd 100644
--- a/mm/fadvise.c
+++ b/mm/fadvise.c
@@ -126,6 +126,17 @@ SYSCALL_DEFINE4(fadvise64_64, int, fd, loff_t, offset, loff_t, len, int, advice)
126 */ 126 */
127 start_index = (offset+(PAGE_SIZE-1)) >> PAGE_SHIFT; 127 start_index = (offset+(PAGE_SIZE-1)) >> PAGE_SHIFT;
128 end_index = (endbyte >> PAGE_SHIFT); 128 end_index = (endbyte >> PAGE_SHIFT);
129 if ((endbyte & ~PAGE_MASK) != ~PAGE_MASK) {
130 /* First page is tricky as 0 - 1 = -1, but pgoff_t
131 * is unsigned, so the end_index >= start_index
132 * check below would be true and we'll discard the whole
133 * file cache which is not what was asked.
134 */
135 if (end_index == 0)
136 break;
137
138 end_index--;
139 }
129 140
130 if (end_index >= start_index) { 141 if (end_index >= start_index) {
131 unsigned long count = invalidate_mapping_pages(mapping, 142 unsigned long count = invalidate_mapping_pages(mapping,
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index d26162e81fea..388c2bb9b55c 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -832,8 +832,27 @@ static bool vma_has_reserves(struct vm_area_struct *vma, long chg)
832 * Only the process that called mmap() has reserves for 832 * Only the process that called mmap() has reserves for
833 * private mappings. 833 * private mappings.
834 */ 834 */
835 if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) 835 if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
836 return true; 836 /*
837 * Like the shared case above, a hole punch or truncate
838 * could have been performed on the private mapping.
839 * Examine the value of chg to determine if reserves
840 * actually exist or were previously consumed.
841 * Very Subtle - The value of chg comes from a previous
842 * call to vma_needs_reserves(). The reserve map for
843 * private mappings has different (opposite) semantics
844 * than that of shared mappings. vma_needs_reserves()
845 * has already taken this difference in semantics into
846 * account. Therefore, the meaning of chg is the same
847 * as in the shared case above. Code could easily be
848 * combined, but keeping it separate draws attention to
849 * subtle differences.
850 */
851 if (chg)
852 return false;
853 else
854 return true;
855 }
837 856
838 return false; 857 return false;
839} 858}
@@ -1816,6 +1835,25 @@ static long __vma_reservation_common(struct hstate *h,
1816 1835
1817 if (vma->vm_flags & VM_MAYSHARE) 1836 if (vma->vm_flags & VM_MAYSHARE)
1818 return ret; 1837 return ret;
1838 else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) && ret >= 0) {
1839 /*
1840 * In most cases, reserves always exist for private mappings.
1841 * However, a file associated with mapping could have been
1842 * hole punched or truncated after reserves were consumed.
1843 * As subsequent fault on such a range will not use reserves.
1844 * Subtle - The reserve map for private mappings has the
1845 * opposite meaning than that of shared mappings. If NO
1846 * entry is in the reserve map, it means a reservation exists.
1847 * If an entry exists in the reserve map, it means the
1848 * reservation has already been consumed. As a result, the
1849 * return value of this routine is the opposite of the
1850 * value returned from reserve map manipulation routines above.
1851 */
1852 if (ret)
1853 return 0;
1854 else
1855 return 1;
1856 }
1819 else 1857 else
1820 return ret < 0 ? ret : 0; 1858 return ret < 0 ? ret : 0;
1821} 1859}
diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c
index 18b6a2b8d183..28439acda6ec 100644
--- a/mm/kasan/kasan.c
+++ b/mm/kasan/kasan.c
@@ -763,8 +763,8 @@ static int kasan_mem_notifier(struct notifier_block *nb,
763 763
764static int __init kasan_memhotplug_init(void) 764static int __init kasan_memhotplug_init(void)
765{ 765{
766 pr_err("WARNING: KASAN doesn't support memory hot-add\n"); 766 pr_info("WARNING: KASAN doesn't support memory hot-add\n");
767 pr_err("Memory hot-add will be disabled\n"); 767 pr_info("Memory hot-add will be disabled\n");
768 768
769 hotplug_memory_notifier(kasan_mem_notifier, 0); 769 hotplug_memory_notifier(kasan_mem_notifier, 0);
770 770
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 58c69c94402a..75e74408cc8f 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -1608,7 +1608,7 @@ static void memcg_oom_recover(struct mem_cgroup *memcg)
1608 1608
1609static void mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order) 1609static void mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order)
1610{ 1610{
1611 if (!current->memcg_may_oom || current->memcg_in_oom) 1611 if (!current->memcg_may_oom)
1612 return; 1612 return;
1613 /* 1613 /*
1614 * We are in the middle of the charge context here, so we 1614 * We are in the middle of the charge context here, so we
diff --git a/mm/swap.c b/mm/swap.c
index 95916142fc46..59f5fafa6e1f 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -667,6 +667,24 @@ static void lru_add_drain_per_cpu(struct work_struct *dummy)
667 667
668static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work); 668static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work);
669 669
670/*
671 * lru_add_drain_wq is used to do lru_add_drain_all() from a WQ_MEM_RECLAIM
672 * workqueue, aiding in getting memory freed.
673 */
674static struct workqueue_struct *lru_add_drain_wq;
675
676static int __init lru_init(void)
677{
678 lru_add_drain_wq = alloc_workqueue("lru-add-drain", WQ_MEM_RECLAIM, 0);
679
680 if (WARN(!lru_add_drain_wq,
681 "Failed to create workqueue lru_add_drain_wq"))
682 return -ENOMEM;
683
684 return 0;
685}
686early_initcall(lru_init);
687
670void lru_add_drain_all(void) 688void lru_add_drain_all(void)
671{ 689{
672 static DEFINE_MUTEX(lock); 690 static DEFINE_MUTEX(lock);
@@ -686,7 +704,7 @@ void lru_add_drain_all(void)
686 pagevec_count(&per_cpu(lru_deactivate_pvecs, cpu)) || 704 pagevec_count(&per_cpu(lru_deactivate_pvecs, cpu)) ||
687 need_activate_page_drain(cpu)) { 705 need_activate_page_drain(cpu)) {
688 INIT_WORK(work, lru_add_drain_per_cpu); 706 INIT_WORK(work, lru_add_drain_per_cpu);
689 schedule_work_on(cpu, work); 707 queue_work_on(cpu, lru_add_drain_wq, work);
690 cpumask_set_cpu(cpu, &has_work); 708 cpumask_set_cpu(cpu, &has_work);
691 } 709 }
692 } 710 }
diff --git a/mm/swap_state.c b/mm/swap_state.c
index 0d457e7db8d6..c99463ac02fb 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -252,7 +252,10 @@ static inline void free_swap_cache(struct page *page)
252void free_page_and_swap_cache(struct page *page) 252void free_page_and_swap_cache(struct page *page)
253{ 253{
254 free_swap_cache(page); 254 free_swap_cache(page);
255 put_page(page); 255 if (is_huge_zero_page(page))
256 put_huge_zero_page();
257 else
258 put_page(page);
256} 259}
257 260
258/* 261/*