diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2018-12-21 17:59:00 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2018-12-21 17:59:00 -0500 |
commit | 23203e3f34c97f4ddd6e353adba45161880a52a4 (patch) | |
tree | 66c71acca99210b68b7c2e490d0639c57376c638 | |
parent | 6cafab50eea327e0d198cc9579a60440fc959756 (diff) | |
parent | 17e2e7d7e1b83fa324b3f099bfe426659aa3c2a4 (diff) |
Merge branch 'akpm' (patches from Andrew)
Merge misc fixes from Andrew Morton:
"4 fixes"
* emailed patches from Andrew Morton <akpm@linux-foundation.org>:
mm, page_alloc: fix has_unmovable_pages for HugePages
fork,memcg: fix crash in free_thread_stack on memcg charge fail
mm: thp: fix flags for pmd migration when split
mm, memory_hotplug: initialize struct pages for the full memory section
-rw-r--r-- | kernel/fork.c | 9 | ||||
-rw-r--r-- | mm/huge_memory.c | 20 | ||||
-rw-r--r-- | mm/page_alloc.c | 19 |
3 files changed, 35 insertions, 13 deletions
diff --git a/kernel/fork.c b/kernel/fork.c index 07cddff89c7b..e2a5156bc9c3 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
@@ -240,8 +240,10 @@ static unsigned long *alloc_thread_stack_node(struct task_struct *tsk, int node) | |||
240 | * free_thread_stack() can be called in interrupt context, | 240 | * free_thread_stack() can be called in interrupt context, |
241 | * so cache the vm_struct. | 241 | * so cache the vm_struct. |
242 | */ | 242 | */ |
243 | if (stack) | 243 | if (stack) { |
244 | tsk->stack_vm_area = find_vm_area(stack); | 244 | tsk->stack_vm_area = find_vm_area(stack); |
245 | tsk->stack = stack; | ||
246 | } | ||
245 | return stack; | 247 | return stack; |
246 | #else | 248 | #else |
247 | struct page *page = alloc_pages_node(node, THREADINFO_GFP, | 249 | struct page *page = alloc_pages_node(node, THREADINFO_GFP, |
@@ -288,7 +290,10 @@ static struct kmem_cache *thread_stack_cache; | |||
288 | static unsigned long *alloc_thread_stack_node(struct task_struct *tsk, | 290 | static unsigned long *alloc_thread_stack_node(struct task_struct *tsk, |
289 | int node) | 291 | int node) |
290 | { | 292 | { |
291 | return kmem_cache_alloc_node(thread_stack_cache, THREADINFO_GFP, node); | 293 | unsigned long *stack; |
294 | stack = kmem_cache_alloc_node(thread_stack_cache, THREADINFO_GFP, node); | ||
295 | tsk->stack = stack; | ||
296 | return stack; | ||
292 | } | 297 | } |
293 | 298 | ||
294 | static void free_thread_stack(struct task_struct *tsk) | 299 | static void free_thread_stack(struct task_struct *tsk) |
diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 5da55b38b1b7..e84a10b0d310 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c | |||
@@ -2144,23 +2144,25 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd, | |||
2144 | */ | 2144 | */ |
2145 | old_pmd = pmdp_invalidate(vma, haddr, pmd); | 2145 | old_pmd = pmdp_invalidate(vma, haddr, pmd); |
2146 | 2146 | ||
2147 | #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION | ||
2148 | pmd_migration = is_pmd_migration_entry(old_pmd); | 2147 | pmd_migration = is_pmd_migration_entry(old_pmd); |
2149 | if (pmd_migration) { | 2148 | if (unlikely(pmd_migration)) { |
2150 | swp_entry_t entry; | 2149 | swp_entry_t entry; |
2151 | 2150 | ||
2152 | entry = pmd_to_swp_entry(old_pmd); | 2151 | entry = pmd_to_swp_entry(old_pmd); |
2153 | page = pfn_to_page(swp_offset(entry)); | 2152 | page = pfn_to_page(swp_offset(entry)); |
2154 | } else | 2153 | write = is_write_migration_entry(entry); |
2155 | #endif | 2154 | young = false; |
2155 | soft_dirty = pmd_swp_soft_dirty(old_pmd); | ||
2156 | } else { | ||
2156 | page = pmd_page(old_pmd); | 2157 | page = pmd_page(old_pmd); |
2158 | if (pmd_dirty(old_pmd)) | ||
2159 | SetPageDirty(page); | ||
2160 | write = pmd_write(old_pmd); | ||
2161 | young = pmd_young(old_pmd); | ||
2162 | soft_dirty = pmd_soft_dirty(old_pmd); | ||
2163 | } | ||
2157 | VM_BUG_ON_PAGE(!page_count(page), page); | 2164 | VM_BUG_ON_PAGE(!page_count(page), page); |
2158 | page_ref_add(page, HPAGE_PMD_NR - 1); | 2165 | page_ref_add(page, HPAGE_PMD_NR - 1); |
2159 | if (pmd_dirty(old_pmd)) | ||
2160 | SetPageDirty(page); | ||
2161 | write = pmd_write(old_pmd); | ||
2162 | young = pmd_young(old_pmd); | ||
2163 | soft_dirty = pmd_soft_dirty(old_pmd); | ||
2164 | 2166 | ||
2165 | /* | 2167 | /* |
2166 | * Withdraw the table only after we mark the pmd entry invalid. | 2168 | * Withdraw the table only after we mark the pmd entry invalid. |
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 2ec9cc407216..e95b5b7c9c3d 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -5542,6 +5542,18 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone, | |||
5542 | cond_resched(); | 5542 | cond_resched(); |
5543 | } | 5543 | } |
5544 | } | 5544 | } |
5545 | #ifdef CONFIG_SPARSEMEM | ||
5546 | /* | ||
5547 | * If the zone does not span the rest of the section then | ||
5548 | * we should at least initialize those pages. Otherwise we | ||
5549 | * could blow up on a poisoned page in some paths which depend | ||
5550 | * on full sections being initialized (e.g. memory hotplug). | ||
5551 | */ | ||
5552 | while (end_pfn % PAGES_PER_SECTION) { | ||
5553 | __init_single_page(pfn_to_page(end_pfn), end_pfn, zone, nid); | ||
5554 | end_pfn++; | ||
5555 | } | ||
5556 | #endif | ||
5545 | } | 5557 | } |
5546 | 5558 | ||
5547 | #ifdef CONFIG_ZONE_DEVICE | 5559 | #ifdef CONFIG_ZONE_DEVICE |
@@ -7802,11 +7814,14 @@ bool has_unmovable_pages(struct zone *zone, struct page *page, int count, | |||
7802 | * handle each tail page individually in migration. | 7814 | * handle each tail page individually in migration. |
7803 | */ | 7815 | */ |
7804 | if (PageHuge(page)) { | 7816 | if (PageHuge(page)) { |
7817 | struct page *head = compound_head(page); | ||
7818 | unsigned int skip_pages; | ||
7805 | 7819 | ||
7806 | if (!hugepage_migration_supported(page_hstate(page))) | 7820 | if (!hugepage_migration_supported(page_hstate(head))) |
7807 | goto unmovable; | 7821 | goto unmovable; |
7808 | 7822 | ||
7809 | iter = round_up(iter + 1, 1<<compound_order(page)) - 1; | 7823 | skip_pages = (1 << compound_order(head)) - (page - head); |
7824 | iter += skip_pages - 1; | ||
7810 | continue; | 7825 | continue; |
7811 | } | 7826 | } |
7812 | 7827 | ||