aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/huge_memory.c2
-rw-r--r--mm/ksm.c2
-rw-r--r--mm/memcontrol.c14
-rw-r--r--mm/memory-failure.c2
-rw-r--r--mm/page_alloc.c30
-rw-r--r--mm/swap.c4
6 files changed, 41 insertions, 13 deletions
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 4df39b1bde91..1546655a2d78 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1961,7 +1961,7 @@ out:
1961 return ret; 1961 return ret;
1962} 1962}
1963 1963
1964#define VM_NO_THP (VM_SPECIAL|VM_MIXEDMAP|VM_HUGETLB|VM_SHARED|VM_MAYSHARE) 1964#define VM_NO_THP (VM_SPECIAL | VM_HUGETLB | VM_SHARED | VM_MAYSHARE)
1965 1965
1966int hugepage_madvise(struct vm_area_struct *vma, 1966int hugepage_madvise(struct vm_area_struct *vma,
1967 unsigned long *vm_flags, int advice) 1967 unsigned long *vm_flags, int advice)
diff --git a/mm/ksm.c b/mm/ksm.c
index aa4c7c7250c1..68710e80994a 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -444,7 +444,7 @@ static void break_cow(struct rmap_item *rmap_item)
444static struct page *page_trans_compound_anon(struct page *page) 444static struct page *page_trans_compound_anon(struct page *page)
445{ 445{
446 if (PageTransCompound(page)) { 446 if (PageTransCompound(page)) {
447 struct page *head = compound_trans_head(page); 447 struct page *head = compound_head(page);
448 /* 448 /*
449 * head may actually be splitted and freed from under 449 * head may actually be splitted and freed from under
450 * us but it's ok here. 450 * us but it's ok here.
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index ce7a8cc7b404..5b6b0039f725 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -1127,8 +1127,8 @@ skip_node:
1127 * skipping css reference should be safe. 1127 * skipping css reference should be safe.
1128 */ 1128 */
1129 if (next_css) { 1129 if (next_css) {
1130 if ((next_css->flags & CSS_ONLINE) && 1130 if ((next_css == &root->css) ||
1131 (next_css == &root->css || css_tryget(next_css))) 1131 ((next_css->flags & CSS_ONLINE) && css_tryget(next_css)))
1132 return mem_cgroup_from_css(next_css); 1132 return mem_cgroup_from_css(next_css);
1133 1133
1134 prev_css = next_css; 1134 prev_css = next_css;
@@ -6595,6 +6595,7 @@ static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
6595{ 6595{
6596 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 6596 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
6597 struct mem_cgroup_event *event, *tmp; 6597 struct mem_cgroup_event *event, *tmp;
6598 struct cgroup_subsys_state *iter;
6598 6599
6599 /* 6600 /*
6600 * Unregister events and notify userspace. 6601 * Unregister events and notify userspace.
@@ -6611,7 +6612,14 @@ static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
6611 kmem_cgroup_css_offline(memcg); 6612 kmem_cgroup_css_offline(memcg);
6612 6613
6613 mem_cgroup_invalidate_reclaim_iterators(memcg); 6614 mem_cgroup_invalidate_reclaim_iterators(memcg);
6614 mem_cgroup_reparent_charges(memcg); 6615
6616 /*
6617 * This requires that offlining is serialized. Right now that is
6618 * guaranteed because css_killed_work_fn() holds the cgroup_mutex.
6619 */
6620 css_for_each_descendant_post(iter, css)
6621 mem_cgroup_reparent_charges(mem_cgroup_from_css(iter));
6622
6615 mem_cgroup_destroy_all_caches(memcg); 6623 mem_cgroup_destroy_all_caches(memcg);
6616 vmpressure_cleanup(&memcg->vmpressure); 6624 vmpressure_cleanup(&memcg->vmpressure);
6617} 6625}
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 2f2f34a4e77d..90002ea43638 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -1651,7 +1651,7 @@ int soft_offline_page(struct page *page, int flags)
1651{ 1651{
1652 int ret; 1652 int ret;
1653 unsigned long pfn = page_to_pfn(page); 1653 unsigned long pfn = page_to_pfn(page);
1654 struct page *hpage = compound_trans_head(page); 1654 struct page *hpage = compound_head(page);
1655 1655
1656 if (PageHWPoison(page)) { 1656 if (PageHWPoison(page)) {
1657 pr_info("soft offline: %#lx page already poisoned\n", pfn); 1657 pr_info("soft offline: %#lx page already poisoned\n", pfn);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index e3758a09a009..3bac76ae4b30 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -369,9 +369,11 @@ void prep_compound_page(struct page *page, unsigned long order)
369 __SetPageHead(page); 369 __SetPageHead(page);
370 for (i = 1; i < nr_pages; i++) { 370 for (i = 1; i < nr_pages; i++) {
371 struct page *p = page + i; 371 struct page *p = page + i;
372 __SetPageTail(p);
373 set_page_count(p, 0); 372 set_page_count(p, 0);
374 p->first_page = page; 373 p->first_page = page;
374 /* Make sure p->first_page is always valid for PageTail() */
375 smp_wmb();
376 __SetPageTail(p);
375 } 377 }
376} 378}
377 379
@@ -1236,6 +1238,15 @@ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
1236 } 1238 }
1237 local_irq_restore(flags); 1239 local_irq_restore(flags);
1238} 1240}
1241static bool gfp_thisnode_allocation(gfp_t gfp_mask)
1242{
1243 return (gfp_mask & GFP_THISNODE) == GFP_THISNODE;
1244}
1245#else
1246static bool gfp_thisnode_allocation(gfp_t gfp_mask)
1247{
1248 return false;
1249}
1239#endif 1250#endif
1240 1251
1241/* 1252/*
@@ -1572,7 +1583,13 @@ again:
1572 get_pageblock_migratetype(page)); 1583 get_pageblock_migratetype(page));
1573 } 1584 }
1574 1585
1575 __mod_zone_page_state(zone, NR_ALLOC_BATCH, -(1 << order)); 1586 /*
1587 * NOTE: GFP_THISNODE allocations do not partake in the kswapd
1588 * aging protocol, so they can't be fair.
1589 */
1590 if (!gfp_thisnode_allocation(gfp_flags))
1591 __mod_zone_page_state(zone, NR_ALLOC_BATCH, -(1 << order));
1592
1576 __count_zone_vm_events(PGALLOC, zone, 1 << order); 1593 __count_zone_vm_events(PGALLOC, zone, 1 << order);
1577 zone_statistics(preferred_zone, zone, gfp_flags); 1594 zone_statistics(preferred_zone, zone, gfp_flags);
1578 local_irq_restore(flags); 1595 local_irq_restore(flags);
@@ -1944,8 +1961,12 @@ zonelist_scan:
1944 * ultimately fall back to remote zones that do not 1961 * ultimately fall back to remote zones that do not
1945 * partake in the fairness round-robin cycle of this 1962 * partake in the fairness round-robin cycle of this
1946 * zonelist. 1963 * zonelist.
1964 *
1965 * NOTE: GFP_THISNODE allocations do not partake in
1966 * the kswapd aging protocol, so they can't be fair.
1947 */ 1967 */
1948 if (alloc_flags & ALLOC_WMARK_LOW) { 1968 if ((alloc_flags & ALLOC_WMARK_LOW) &&
1969 !gfp_thisnode_allocation(gfp_mask)) {
1949 if (zone_page_state(zone, NR_ALLOC_BATCH) <= 0) 1970 if (zone_page_state(zone, NR_ALLOC_BATCH) <= 0)
1950 continue; 1971 continue;
1951 if (!zone_local(preferred_zone, zone)) 1972 if (!zone_local(preferred_zone, zone))
@@ -2501,8 +2522,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
2501 * allowed per node queues are empty and that nodes are 2522 * allowed per node queues are empty and that nodes are
2502 * over allocated. 2523 * over allocated.
2503 */ 2524 */
2504 if (IS_ENABLED(CONFIG_NUMA) && 2525 if (gfp_thisnode_allocation(gfp_mask))
2505 (gfp_mask & GFP_THISNODE) == GFP_THISNODE)
2506 goto nopage; 2526 goto nopage;
2507 2527
2508restart: 2528restart:
diff --git a/mm/swap.c b/mm/swap.c
index b31ba67d440a..0092097b3f4c 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -98,7 +98,7 @@ static void put_compound_page(struct page *page)
98 } 98 }
99 99
100 /* __split_huge_page_refcount can run under us */ 100 /* __split_huge_page_refcount can run under us */
101 page_head = compound_trans_head(page); 101 page_head = compound_head(page);
102 102
103 /* 103 /*
104 * THP can not break up slab pages so avoid taking 104 * THP can not break up slab pages so avoid taking
@@ -253,7 +253,7 @@ bool __get_page_tail(struct page *page)
253 */ 253 */
254 unsigned long flags; 254 unsigned long flags;
255 bool got; 255 bool got;
256 struct page *page_head = compound_trans_head(page); 256 struct page *page_head = compound_head(page);
257 257
258 /* Ref to put_compound_page() comment. */ 258 /* Ref to put_compound_page() comment. */
259 if (!__compound_tail_refcounted(page_head)) { 259 if (!__compound_tail_refcounted(page_head)) {