aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2016-09-01 12:33:46 -0400
committerThomas Gleixner <tglx@linutronix.de>2016-09-01 12:33:46 -0400
commit0cb7bf61b1e9f05027de58c80f9b46a714d24e35 (patch)
tree41fb55cf62d07b425122f9a8b96412c0d8eb99c5 /mm
parentaa877175e7a9982233ed8f10cb4bfddd78d82741 (diff)
parent3eab887a55424fc2c27553b7bfe32330df83f7b8 (diff)
Merge branch 'linus' into smp/hotplug
Apply upstream changes to avoid conflicts with pending patches.
Diffstat (limited to 'mm')
-rw-r--r--mm/Kconfig9
-rw-r--r--mm/huge_memory.c7
-rw-r--r--mm/hugetlb.c1
-rw-r--r--mm/kasan/quarantine.c7
-rw-r--r--mm/memcontrol.c68
-rw-r--r--mm/memory_hotplug.c2
-rw-r--r--mm/oom_kill.c2
-rw-r--r--mm/page_alloc.c52
-rw-r--r--mm/readahead.c9
-rw-r--r--mm/rmap.c7
-rw-r--r--mm/shmem.c4
-rw-r--r--mm/slub.c6
-rw-r--r--mm/usercopy.c4
13 files changed, 135 insertions, 43 deletions
diff --git a/mm/Kconfig b/mm/Kconfig
index 78a23c5c302d..be0ee11fa0d9 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -262,7 +262,14 @@ config COMPACTION
262 select MIGRATION 262 select MIGRATION
263 depends on MMU 263 depends on MMU
264 help 264 help
265 Allows the compaction of memory for the allocation of huge pages. 265 Compaction is the only memory management component to form
266 high order (larger physically contiguous) memory blocks
267 reliably. The page allocator relies on compaction heavily and
268 the lack of the feature can lead to unexpected OOM killer
269 invocations for high order memory requests. You shouldn't
270 disable this option unless there really is a strong reason for
271 it and then we would be really interested to hear about that at
272 linux-mm@kvack.org.
266 273
267# 274#
268# support for page migration 275# support for page migration
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 2373f0a7d340..2db2112aa31e 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1512,7 +1512,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
1512 struct page *page; 1512 struct page *page;
1513 pgtable_t pgtable; 1513 pgtable_t pgtable;
1514 pmd_t _pmd; 1514 pmd_t _pmd;
1515 bool young, write, dirty; 1515 bool young, write, dirty, soft_dirty;
1516 unsigned long addr; 1516 unsigned long addr;
1517 int i; 1517 int i;
1518 1518
@@ -1546,6 +1546,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
1546 write = pmd_write(*pmd); 1546 write = pmd_write(*pmd);
1547 young = pmd_young(*pmd); 1547 young = pmd_young(*pmd);
1548 dirty = pmd_dirty(*pmd); 1548 dirty = pmd_dirty(*pmd);
1549 soft_dirty = pmd_soft_dirty(*pmd);
1549 1550
1550 pmdp_huge_split_prepare(vma, haddr, pmd); 1551 pmdp_huge_split_prepare(vma, haddr, pmd);
1551 pgtable = pgtable_trans_huge_withdraw(mm, pmd); 1552 pgtable = pgtable_trans_huge_withdraw(mm, pmd);
@@ -1562,6 +1563,8 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
1562 swp_entry_t swp_entry; 1563 swp_entry_t swp_entry;
1563 swp_entry = make_migration_entry(page + i, write); 1564 swp_entry = make_migration_entry(page + i, write);
1564 entry = swp_entry_to_pte(swp_entry); 1565 entry = swp_entry_to_pte(swp_entry);
1566 if (soft_dirty)
1567 entry = pte_swp_mksoft_dirty(entry);
1565 } else { 1568 } else {
1566 entry = mk_pte(page + i, vma->vm_page_prot); 1569 entry = mk_pte(page + i, vma->vm_page_prot);
1567 entry = maybe_mkwrite(entry, vma); 1570 entry = maybe_mkwrite(entry, vma);
@@ -1569,6 +1572,8 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
1569 entry = pte_wrprotect(entry); 1572 entry = pte_wrprotect(entry);
1570 if (!young) 1573 if (!young)
1571 entry = pte_mkold(entry); 1574 entry = pte_mkold(entry);
1575 if (soft_dirty)
1576 entry = pte_mksoft_dirty(entry);
1572 } 1577 }
1573 if (dirty) 1578 if (dirty)
1574 SetPageDirty(page + i); 1579 SetPageDirty(page + i);
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index b9aa1b0b38b0..87e11d8ad536 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1448,6 +1448,7 @@ static void dissolve_free_huge_page(struct page *page)
1448 list_del(&page->lru); 1448 list_del(&page->lru);
1449 h->free_huge_pages--; 1449 h->free_huge_pages--;
1450 h->free_huge_pages_node[nid]--; 1450 h->free_huge_pages_node[nid]--;
1451 h->max_huge_pages--;
1451 update_and_free_page(h, page); 1452 update_and_free_page(h, page);
1452 } 1453 }
1453 spin_unlock(&hugetlb_lock); 1454 spin_unlock(&hugetlb_lock);
diff --git a/mm/kasan/quarantine.c b/mm/kasan/quarantine.c
index b6728a33a4ac..baabaad4a4aa 100644
--- a/mm/kasan/quarantine.c
+++ b/mm/kasan/quarantine.c
@@ -217,11 +217,8 @@ void quarantine_reduce(void)
217 new_quarantine_size = (READ_ONCE(totalram_pages) << PAGE_SHIFT) / 217 new_quarantine_size = (READ_ONCE(totalram_pages) << PAGE_SHIFT) /
218 QUARANTINE_FRACTION; 218 QUARANTINE_FRACTION;
219 percpu_quarantines = QUARANTINE_PERCPU_SIZE * num_online_cpus(); 219 percpu_quarantines = QUARANTINE_PERCPU_SIZE * num_online_cpus();
220 if (WARN_ONCE(new_quarantine_size < percpu_quarantines, 220 new_quarantine_size = (new_quarantine_size < percpu_quarantines) ?
221 "Too little memory, disabling global KASAN quarantine.\n")) 221 0 : new_quarantine_size - percpu_quarantines;
222 new_quarantine_size = 0;
223 else
224 new_quarantine_size -= percpu_quarantines;
225 WRITE_ONCE(quarantine_size, new_quarantine_size); 222 WRITE_ONCE(quarantine_size, new_quarantine_size);
226 223
227 last = global_quarantine.head; 224 last = global_quarantine.head;
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index e74d7080ec9e..9a6a51a7c416 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -4077,14 +4077,14 @@ static struct cftype mem_cgroup_legacy_files[] = {
4077 4077
4078static DEFINE_IDR(mem_cgroup_idr); 4078static DEFINE_IDR(mem_cgroup_idr);
4079 4079
4080static void mem_cgroup_id_get(struct mem_cgroup *memcg) 4080static void mem_cgroup_id_get_many(struct mem_cgroup *memcg, unsigned int n)
4081{ 4081{
4082 atomic_inc(&memcg->id.ref); 4082 atomic_add(n, &memcg->id.ref);
4083} 4083}
4084 4084
4085static void mem_cgroup_id_put(struct mem_cgroup *memcg) 4085static void mem_cgroup_id_put_many(struct mem_cgroup *memcg, unsigned int n)
4086{ 4086{
4087 if (atomic_dec_and_test(&memcg->id.ref)) { 4087 if (atomic_sub_and_test(n, &memcg->id.ref)) {
4088 idr_remove(&mem_cgroup_idr, memcg->id.id); 4088 idr_remove(&mem_cgroup_idr, memcg->id.id);
4089 memcg->id.id = 0; 4089 memcg->id.id = 0;
4090 4090
@@ -4093,6 +4093,16 @@ static void mem_cgroup_id_put(struct mem_cgroup *memcg)
4093 } 4093 }
4094} 4094}
4095 4095
4096static inline void mem_cgroup_id_get(struct mem_cgroup *memcg)
4097{
4098 mem_cgroup_id_get_many(memcg, 1);
4099}
4100
4101static inline void mem_cgroup_id_put(struct mem_cgroup *memcg)
4102{
4103 mem_cgroup_id_put_many(memcg, 1);
4104}
4105
4096/** 4106/**
4097 * mem_cgroup_from_id - look up a memcg from a memcg id 4107 * mem_cgroup_from_id - look up a memcg from a memcg id
4098 * @id: the memcg id to look up 4108 * @id: the memcg id to look up
@@ -4727,6 +4737,8 @@ static void __mem_cgroup_clear_mc(void)
4727 if (!mem_cgroup_is_root(mc.from)) 4737 if (!mem_cgroup_is_root(mc.from))
4728 page_counter_uncharge(&mc.from->memsw, mc.moved_swap); 4738 page_counter_uncharge(&mc.from->memsw, mc.moved_swap);
4729 4739
4740 mem_cgroup_id_put_many(mc.from, mc.moved_swap);
4741
4730 /* 4742 /*
4731 * we charged both to->memory and to->memsw, so we 4743 * we charged both to->memory and to->memsw, so we
4732 * should uncharge to->memory. 4744 * should uncharge to->memory.
@@ -4734,9 +4746,9 @@ static void __mem_cgroup_clear_mc(void)
4734 if (!mem_cgroup_is_root(mc.to)) 4746 if (!mem_cgroup_is_root(mc.to))
4735 page_counter_uncharge(&mc.to->memory, mc.moved_swap); 4747 page_counter_uncharge(&mc.to->memory, mc.moved_swap);
4736 4748
4737 css_put_many(&mc.from->css, mc.moved_swap); 4749 mem_cgroup_id_get_many(mc.to, mc.moved_swap);
4750 css_put_many(&mc.to->css, mc.moved_swap);
4738 4751
4739 /* we've already done css_get(mc.to) */
4740 mc.moved_swap = 0; 4752 mc.moved_swap = 0;
4741 } 4753 }
4742 memcg_oom_recover(from); 4754 memcg_oom_recover(from);
@@ -5791,6 +5803,24 @@ static int __init mem_cgroup_init(void)
5791subsys_initcall(mem_cgroup_init); 5803subsys_initcall(mem_cgroup_init);
5792 5804
5793#ifdef CONFIG_MEMCG_SWAP 5805#ifdef CONFIG_MEMCG_SWAP
5806static struct mem_cgroup *mem_cgroup_id_get_online(struct mem_cgroup *memcg)
5807{
5808 while (!atomic_inc_not_zero(&memcg->id.ref)) {
5809 /*
5810 * The root cgroup cannot be destroyed, so it's refcount must
5811 * always be >= 1.
5812 */
5813 if (WARN_ON_ONCE(memcg == root_mem_cgroup)) {
5814 VM_BUG_ON(1);
5815 break;
5816 }
5817 memcg = parent_mem_cgroup(memcg);
5818 if (!memcg)
5819 memcg = root_mem_cgroup;
5820 }
5821 return memcg;
5822}
5823
5794/** 5824/**
5795 * mem_cgroup_swapout - transfer a memsw charge to swap 5825 * mem_cgroup_swapout - transfer a memsw charge to swap
5796 * @page: page whose memsw charge to transfer 5826 * @page: page whose memsw charge to transfer
@@ -5800,7 +5830,7 @@ subsys_initcall(mem_cgroup_init);
5800 */ 5830 */
5801void mem_cgroup_swapout(struct page *page, swp_entry_t entry) 5831void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
5802{ 5832{
5803 struct mem_cgroup *memcg; 5833 struct mem_cgroup *memcg, *swap_memcg;
5804 unsigned short oldid; 5834 unsigned short oldid;
5805 5835
5806 VM_BUG_ON_PAGE(PageLRU(page), page); 5836 VM_BUG_ON_PAGE(PageLRU(page), page);
@@ -5815,16 +5845,27 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
5815 if (!memcg) 5845 if (!memcg)
5816 return; 5846 return;
5817 5847
5818 mem_cgroup_id_get(memcg); 5848 /*
5819 oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg)); 5849 * In case the memcg owning these pages has been offlined and doesn't
5850 * have an ID allocated to it anymore, charge the closest online
5851 * ancestor for the swap instead and transfer the memory+swap charge.
5852 */
5853 swap_memcg = mem_cgroup_id_get_online(memcg);
5854 oldid = swap_cgroup_record(entry, mem_cgroup_id(swap_memcg));
5820 VM_BUG_ON_PAGE(oldid, page); 5855 VM_BUG_ON_PAGE(oldid, page);
5821 mem_cgroup_swap_statistics(memcg, true); 5856 mem_cgroup_swap_statistics(swap_memcg, true);
5822 5857
5823 page->mem_cgroup = NULL; 5858 page->mem_cgroup = NULL;
5824 5859
5825 if (!mem_cgroup_is_root(memcg)) 5860 if (!mem_cgroup_is_root(memcg))
5826 page_counter_uncharge(&memcg->memory, 1); 5861 page_counter_uncharge(&memcg->memory, 1);
5827 5862
5863 if (memcg != swap_memcg) {
5864 if (!mem_cgroup_is_root(swap_memcg))
5865 page_counter_charge(&swap_memcg->memsw, 1);
5866 page_counter_uncharge(&memcg->memsw, 1);
5867 }
5868
5828 /* 5869 /*
5829 * Interrupts should be disabled here because the caller holds the 5870 * Interrupts should be disabled here because the caller holds the
5830 * mapping->tree_lock lock which is taken with interrupts-off. It is 5871 * mapping->tree_lock lock which is taken with interrupts-off. It is
@@ -5863,11 +5904,14 @@ int mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry)
5863 if (!memcg) 5904 if (!memcg)
5864 return 0; 5905 return 0;
5865 5906
5907 memcg = mem_cgroup_id_get_online(memcg);
5908
5866 if (!mem_cgroup_is_root(memcg) && 5909 if (!mem_cgroup_is_root(memcg) &&
5867 !page_counter_try_charge(&memcg->swap, 1, &counter)) 5910 !page_counter_try_charge(&memcg->swap, 1, &counter)) {
5911 mem_cgroup_id_put(memcg);
5868 return -ENOMEM; 5912 return -ENOMEM;
5913 }
5869 5914
5870 mem_cgroup_id_get(memcg);
5871 oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg)); 5915 oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg));
5872 VM_BUG_ON_PAGE(oldid, page); 5916 VM_BUG_ON_PAGE(oldid, page);
5873 mem_cgroup_swap_statistics(memcg, true); 5917 mem_cgroup_swap_statistics(memcg, true);
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 3894b65b1555..41266dc29f33 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -1219,6 +1219,7 @@ static pg_data_t __ref *hotadd_new_pgdat(int nid, u64 start)
1219 1219
1220 /* init node's zones as empty zones, we don't have any present pages.*/ 1220 /* init node's zones as empty zones, we don't have any present pages.*/
1221 free_area_init_node(nid, zones_size, start_pfn, zholes_size); 1221 free_area_init_node(nid, zones_size, start_pfn, zholes_size);
1222 pgdat->per_cpu_nodestats = alloc_percpu(struct per_cpu_nodestat);
1222 1223
1223 /* 1224 /*
1224 * The node we allocated has no zone fallback lists. For avoiding 1225 * The node we allocated has no zone fallback lists. For avoiding
@@ -1249,6 +1250,7 @@ static pg_data_t __ref *hotadd_new_pgdat(int nid, u64 start)
1249static void rollback_node_hotadd(int nid, pg_data_t *pgdat) 1250static void rollback_node_hotadd(int nid, pg_data_t *pgdat)
1250{ 1251{
1251 arch_refresh_nodedata(nid, NULL); 1252 arch_refresh_nodedata(nid, NULL);
1253 free_percpu(pgdat->per_cpu_nodestats);
1252 arch_free_nodedata(pgdat); 1254 arch_free_nodedata(pgdat);
1253 return; 1255 return;
1254} 1256}
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index 7d0a275df822..d53a9aa00977 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -764,7 +764,7 @@ bool task_will_free_mem(struct task_struct *task)
764{ 764{
765 struct mm_struct *mm = task->mm; 765 struct mm_struct *mm = task->mm;
766 struct task_struct *p; 766 struct task_struct *p;
767 bool ret; 767 bool ret = true;
768 768
769 /* 769 /*
770 * Skip tasks without mm because it might have passed its exit_mm and 770 * Skip tasks without mm because it might have passed its exit_mm and
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index ee744fa3b93d..3fbe73a6fe4b 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -4060,7 +4060,7 @@ long si_mem_available(void)
4060 int lru; 4060 int lru;
4061 4061
4062 for (lru = LRU_BASE; lru < NR_LRU_LISTS; lru++) 4062 for (lru = LRU_BASE; lru < NR_LRU_LISTS; lru++)
4063 pages[lru] = global_page_state(NR_LRU_BASE + lru); 4063 pages[lru] = global_node_page_state(NR_LRU_BASE + lru);
4064 4064
4065 for_each_zone(zone) 4065 for_each_zone(zone)
4066 wmark_low += zone->watermark[WMARK_LOW]; 4066 wmark_low += zone->watermark[WMARK_LOW];
@@ -4757,6 +4757,8 @@ int local_memory_node(int node)
4757} 4757}
4758#endif 4758#endif
4759 4759
4760static void setup_min_unmapped_ratio(void);
4761static void setup_min_slab_ratio(void);
4760#else /* CONFIG_NUMA */ 4762#else /* CONFIG_NUMA */
4761 4763
4762static void set_zonelist_order(void) 4764static void set_zonelist_order(void)
@@ -5878,9 +5880,6 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat)
5878 zone->managed_pages = is_highmem_idx(j) ? realsize : freesize; 5880 zone->managed_pages = is_highmem_idx(j) ? realsize : freesize;
5879#ifdef CONFIG_NUMA 5881#ifdef CONFIG_NUMA
5880 zone->node = nid; 5882 zone->node = nid;
5881 pgdat->min_unmapped_pages += (freesize*sysctl_min_unmapped_ratio)
5882 / 100;
5883 pgdat->min_slab_pages += (freesize * sysctl_min_slab_ratio) / 100;
5884#endif 5883#endif
5885 zone->name = zone_names[j]; 5884 zone->name = zone_names[j];
5886 zone->zone_pgdat = pgdat; 5885 zone->zone_pgdat = pgdat;
@@ -6801,6 +6800,12 @@ int __meminit init_per_zone_wmark_min(void)
6801 setup_per_zone_wmarks(); 6800 setup_per_zone_wmarks();
6802 refresh_zone_stat_thresholds(); 6801 refresh_zone_stat_thresholds();
6803 setup_per_zone_lowmem_reserve(); 6802 setup_per_zone_lowmem_reserve();
6803
6804#ifdef CONFIG_NUMA
6805 setup_min_unmapped_ratio();
6806 setup_min_slab_ratio();
6807#endif
6808
6804 return 0; 6809 return 0;
6805} 6810}
6806core_initcall(init_per_zone_wmark_min) 6811core_initcall(init_per_zone_wmark_min)
@@ -6842,43 +6847,58 @@ int watermark_scale_factor_sysctl_handler(struct ctl_table *table, int write,
6842} 6847}
6843 6848
6844#ifdef CONFIG_NUMA 6849#ifdef CONFIG_NUMA
6850static void setup_min_unmapped_ratio(void)
6851{
6852 pg_data_t *pgdat;
6853 struct zone *zone;
6854
6855 for_each_online_pgdat(pgdat)
6856 pgdat->min_unmapped_pages = 0;
6857
6858 for_each_zone(zone)
6859 zone->zone_pgdat->min_unmapped_pages += (zone->managed_pages *
6860 sysctl_min_unmapped_ratio) / 100;
6861}
6862
6863
6845int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *table, int write, 6864int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *table, int write,
6846 void __user *buffer, size_t *length, loff_t *ppos) 6865 void __user *buffer, size_t *length, loff_t *ppos)
6847{ 6866{
6848 struct pglist_data *pgdat;
6849 struct zone *zone;
6850 int rc; 6867 int rc;
6851 6868
6852 rc = proc_dointvec_minmax(table, write, buffer, length, ppos); 6869 rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
6853 if (rc) 6870 if (rc)
6854 return rc; 6871 return rc;
6855 6872
6873 setup_min_unmapped_ratio();
6874
6875 return 0;
6876}
6877
6878static void setup_min_slab_ratio(void)
6879{
6880 pg_data_t *pgdat;
6881 struct zone *zone;
6882
6856 for_each_online_pgdat(pgdat) 6883 for_each_online_pgdat(pgdat)
6857 pgdat->min_slab_pages = 0; 6884 pgdat->min_slab_pages = 0;
6858 6885
6859 for_each_zone(zone) 6886 for_each_zone(zone)
6860 zone->zone_pgdat->min_unmapped_pages += (zone->managed_pages * 6887 zone->zone_pgdat->min_slab_pages += (zone->managed_pages *
6861 sysctl_min_unmapped_ratio) / 100; 6888 sysctl_min_slab_ratio) / 100;
6862 return 0;
6863} 6889}
6864 6890
6865int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *table, int write, 6891int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *table, int write,
6866 void __user *buffer, size_t *length, loff_t *ppos) 6892 void __user *buffer, size_t *length, loff_t *ppos)
6867{ 6893{
6868 struct pglist_data *pgdat;
6869 struct zone *zone;
6870 int rc; 6894 int rc;
6871 6895
6872 rc = proc_dointvec_minmax(table, write, buffer, length, ppos); 6896 rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
6873 if (rc) 6897 if (rc)
6874 return rc; 6898 return rc;
6875 6899
6876 for_each_online_pgdat(pgdat) 6900 setup_min_slab_ratio();
6877 pgdat->min_slab_pages = 0;
6878 6901
6879 for_each_zone(zone)
6880 zone->zone_pgdat->min_slab_pages += (zone->managed_pages *
6881 sysctl_min_slab_ratio) / 100;
6882 return 0; 6902 return 0;
6883} 6903}
6884#endif 6904#endif
diff --git a/mm/readahead.c b/mm/readahead.c
index 65ec288dc057..c8a955b1297e 100644
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@ -8,6 +8,7 @@
8 */ 8 */
9 9
10#include <linux/kernel.h> 10#include <linux/kernel.h>
11#include <linux/dax.h>
11#include <linux/gfp.h> 12#include <linux/gfp.h>
12#include <linux/export.h> 13#include <linux/export.h>
13#include <linux/blkdev.h> 14#include <linux/blkdev.h>
@@ -544,6 +545,14 @@ do_readahead(struct address_space *mapping, struct file *filp,
544 if (!mapping || !mapping->a_ops) 545 if (!mapping || !mapping->a_ops)
545 return -EINVAL; 546 return -EINVAL;
546 547
548 /*
549 * Readahead doesn't make sense for DAX inodes, but we don't want it
550 * to report a failure either. Instead, we just return success and
551 * don't do any work.
552 */
553 if (dax_mapping(mapping))
554 return 0;
555
547 return force_page_cache_readahead(mapping, filp, index, nr); 556 return force_page_cache_readahead(mapping, filp, index, nr);
548} 557}
549 558
diff --git a/mm/rmap.c b/mm/rmap.c
index 709bc83703b1..1ef36404e7b2 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1284,8 +1284,9 @@ void page_add_file_rmap(struct page *page, bool compound)
1284 VM_BUG_ON_PAGE(!PageSwapBacked(page), page); 1284 VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
1285 __inc_node_page_state(page, NR_SHMEM_PMDMAPPED); 1285 __inc_node_page_state(page, NR_SHMEM_PMDMAPPED);
1286 } else { 1286 } else {
1287 if (PageTransCompound(page)) { 1287 if (PageTransCompound(page) && page_mapping(page)) {
1288 VM_BUG_ON_PAGE(!PageLocked(page), page); 1288 VM_WARN_ON_ONCE(!PageLocked(page));
1289
1289 SetPageDoubleMap(compound_head(page)); 1290 SetPageDoubleMap(compound_head(page));
1290 if (PageMlocked(page)) 1291 if (PageMlocked(page))
1291 clear_page_mlock(compound_head(page)); 1292 clear_page_mlock(compound_head(page));
@@ -1303,7 +1304,7 @@ static void page_remove_file_rmap(struct page *page, bool compound)
1303{ 1304{
1304 int i, nr = 1; 1305 int i, nr = 1;
1305 1306
1306 VM_BUG_ON_PAGE(compound && !PageTransHuge(page), page); 1307 VM_BUG_ON_PAGE(compound && !PageHead(page), page);
1307 lock_page_memcg(page); 1308 lock_page_memcg(page);
1308 1309
1309 /* Hugepages are not counted in NR_FILE_MAPPED for now. */ 1310 /* Hugepages are not counted in NR_FILE_MAPPED for now. */
diff --git a/mm/shmem.c b/mm/shmem.c
index 7f7748a0f9e1..fd8b2b5741b1 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -3975,7 +3975,9 @@ static ssize_t shmem_enabled_store(struct kobject *kobj,
3975 3975
3976struct kobj_attribute shmem_enabled_attr = 3976struct kobj_attribute shmem_enabled_attr =
3977 __ATTR(shmem_enabled, 0644, shmem_enabled_show, shmem_enabled_store); 3977 __ATTR(shmem_enabled, 0644, shmem_enabled_show, shmem_enabled_store);
3978#endif /* CONFIG_TRANSPARENT_HUGE_PAGECACHE && CONFIG_SYSFS */
3978 3979
3980#ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
3979bool shmem_huge_enabled(struct vm_area_struct *vma) 3981bool shmem_huge_enabled(struct vm_area_struct *vma)
3980{ 3982{
3981 struct inode *inode = file_inode(vma->vm_file); 3983 struct inode *inode = file_inode(vma->vm_file);
@@ -4006,7 +4008,7 @@ bool shmem_huge_enabled(struct vm_area_struct *vma)
4006 return false; 4008 return false;
4007 } 4009 }
4008} 4010}
4009#endif /* CONFIG_TRANSPARENT_HUGE_PAGECACHE && CONFIG_SYSFS */ 4011#endif /* CONFIG_TRANSPARENT_HUGE_PAGECACHE */
4010 4012
4011#else /* !CONFIG_SHMEM */ 4013#else /* !CONFIG_SHMEM */
4012 4014
diff --git a/mm/slub.c b/mm/slub.c
index cead06394e9e..9adae58462f8 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -3629,6 +3629,7 @@ static void list_slab_objects(struct kmem_cache *s, struct page *page,
3629 */ 3629 */
3630static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n) 3630static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
3631{ 3631{
3632 LIST_HEAD(discard);
3632 struct page *page, *h; 3633 struct page *page, *h;
3633 3634
3634 BUG_ON(irqs_disabled()); 3635 BUG_ON(irqs_disabled());
@@ -3636,13 +3637,16 @@ static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
3636 list_for_each_entry_safe(page, h, &n->partial, lru) { 3637 list_for_each_entry_safe(page, h, &n->partial, lru) {
3637 if (!page->inuse) { 3638 if (!page->inuse) {
3638 remove_partial(n, page); 3639 remove_partial(n, page);
3639 discard_slab(s, page); 3640 list_add(&page->lru, &discard);
3640 } else { 3641 } else {
3641 list_slab_objects(s, page, 3642 list_slab_objects(s, page,
3642 "Objects remaining in %s on __kmem_cache_shutdown()"); 3643 "Objects remaining in %s on __kmem_cache_shutdown()");
3643 } 3644 }
3644 } 3645 }
3645 spin_unlock_irq(&n->list_lock); 3646 spin_unlock_irq(&n->list_lock);
3647
3648 list_for_each_entry_safe(page, h, &discard, lru)
3649 discard_slab(s, page);
3646} 3650}
3647 3651
3648/* 3652/*
diff --git a/mm/usercopy.c b/mm/usercopy.c
index 8ebae91a6b55..a3cc3052f830 100644
--- a/mm/usercopy.c
+++ b/mm/usercopy.c
@@ -83,7 +83,7 @@ static bool overlaps(const void *ptr, unsigned long n, unsigned long low,
83 unsigned long check_high = check_low + n; 83 unsigned long check_high = check_low + n;
84 84
85 /* Does not overlap if entirely above or entirely below. */ 85 /* Does not overlap if entirely above or entirely below. */
86 if (check_low >= high || check_high < low) 86 if (check_low >= high || check_high <= low)
87 return false; 87 return false;
88 88
89 return true; 89 return true;
@@ -124,7 +124,7 @@ static inline const char *check_kernel_text_object(const void *ptr,
124static inline const char *check_bogus_address(const void *ptr, unsigned long n) 124static inline const char *check_bogus_address(const void *ptr, unsigned long n)
125{ 125{
126 /* Reject if object wraps past end of memory. */ 126 /* Reject if object wraps past end of memory. */
127 if (ptr + n < ptr) 127 if ((unsigned long)ptr + n < (unsigned long)ptr)
128 return "<wrapped address>"; 128 return "<wrapped address>";
129 129
130 /* Reject if NULL or ZERO-allocation. */ 130 /* Reject if NULL or ZERO-allocation. */