diff options
Diffstat (limited to 'mm')
-rw-r--r-- | mm/Kconfig | 2 | ||||
-rw-r--r-- | mm/bounce.c | 2 | ||||
-rw-r--r-- | mm/compaction.c | 7 | ||||
-rw-r--r-- | mm/hwpoison-inject.c | 5 | ||||
-rw-r--r-- | mm/madvise.c | 5 | ||||
-rw-r--r-- | mm/memory-failure.c | 8 | ||||
-rw-r--r-- | mm/migrate.c | 2 | ||||
-rw-r--r-- | mm/mlock.c | 8 | ||||
-rw-r--r-- | mm/page_alloc.c | 4 | ||||
-rw-r--r-- | mm/vmscan.c | 4 |
10 files changed, 31 insertions, 16 deletions
diff --git a/mm/Kconfig b/mm/Kconfig index 026771a9b097..394838f489eb 100644 --- a/mm/Kconfig +++ b/mm/Kconfig | |||
@@ -183,7 +183,7 @@ config MEMORY_HOTPLUG_SPARSE | |||
183 | config MEMORY_HOTREMOVE | 183 | config MEMORY_HOTREMOVE |
184 | bool "Allow for memory hot remove" | 184 | bool "Allow for memory hot remove" |
185 | select MEMORY_ISOLATION | 185 | select MEMORY_ISOLATION |
186 | select HAVE_BOOTMEM_INFO_NODE if X86_64 | 186 | select HAVE_BOOTMEM_INFO_NODE if (X86_64 || PPC64) |
187 | depends on MEMORY_HOTPLUG && ARCH_ENABLE_MEMORY_HOTREMOVE | 187 | depends on MEMORY_HOTPLUG && ARCH_ENABLE_MEMORY_HOTREMOVE |
188 | depends on MIGRATION | 188 | depends on MIGRATION |
189 | 189 | ||
diff --git a/mm/bounce.c b/mm/bounce.c index c9f0a4339a7d..5a7d58fb883b 100644 --- a/mm/bounce.c +++ b/mm/bounce.c | |||
@@ -204,6 +204,8 @@ static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig, | |||
204 | struct bio_vec *to, *from; | 204 | struct bio_vec *to, *from; |
205 | unsigned i; | 205 | unsigned i; |
206 | 206 | ||
207 | if (force) | ||
208 | goto bounce; | ||
207 | bio_for_each_segment(from, *bio_orig, i) | 209 | bio_for_each_segment(from, *bio_orig, i) |
208 | if (page_to_pfn(from->bv_page) > queue_bounce_pfn(q)) | 210 | if (page_to_pfn(from->bv_page) > queue_bounce_pfn(q)) |
209 | goto bounce; | 211 | goto bounce; |
diff --git a/mm/compaction.c b/mm/compaction.c index c43789388cd8..b5326b141a25 100644 --- a/mm/compaction.c +++ b/mm/compaction.c | |||
@@ -677,6 +677,13 @@ static void isolate_freepages(struct zone *zone, | |||
677 | pfn -= pageblock_nr_pages) { | 677 | pfn -= pageblock_nr_pages) { |
678 | unsigned long isolated; | 678 | unsigned long isolated; |
679 | 679 | ||
680 | /* | ||
681 | * This can iterate a massively long zone without finding any | ||
682 | * suitable migration targets, so periodically check if we need | ||
683 | * to schedule. | ||
684 | */ | ||
685 | cond_resched(); | ||
686 | |||
680 | if (!pfn_valid(pfn)) | 687 | if (!pfn_valid(pfn)) |
681 | continue; | 688 | continue; |
682 | 689 | ||
diff --git a/mm/hwpoison-inject.c b/mm/hwpoison-inject.c index afc2daa91c60..4c84678371eb 100644 --- a/mm/hwpoison-inject.c +++ b/mm/hwpoison-inject.c | |||
@@ -20,8 +20,6 @@ static int hwpoison_inject(void *data, u64 val) | |||
20 | if (!capable(CAP_SYS_ADMIN)) | 20 | if (!capable(CAP_SYS_ADMIN)) |
21 | return -EPERM; | 21 | return -EPERM; |
22 | 22 | ||
23 | if (!hwpoison_filter_enable) | ||
24 | goto inject; | ||
25 | if (!pfn_valid(pfn)) | 23 | if (!pfn_valid(pfn)) |
26 | return -ENXIO; | 24 | return -ENXIO; |
27 | 25 | ||
@@ -33,6 +31,9 @@ static int hwpoison_inject(void *data, u64 val) | |||
33 | if (!get_page_unless_zero(hpage)) | 31 | if (!get_page_unless_zero(hpage)) |
34 | return 0; | 32 | return 0; |
35 | 33 | ||
34 | if (!hwpoison_filter_enable) | ||
35 | goto inject; | ||
36 | |||
36 | if (!PageLRU(p) && !PageHuge(p)) | 37 | if (!PageLRU(p) && !PageHuge(p)) |
37 | shake_page(p, 0); | 38 | shake_page(p, 0); |
38 | /* | 39 | /* |
diff --git a/mm/madvise.c b/mm/madvise.c index 6975bc812542..539eeb96b323 100644 --- a/mm/madvise.c +++ b/mm/madvise.c | |||
@@ -343,10 +343,11 @@ static long madvise_remove(struct vm_area_struct *vma, | |||
343 | */ | 343 | */ |
344 | static int madvise_hwpoison(int bhv, unsigned long start, unsigned long end) | 344 | static int madvise_hwpoison(int bhv, unsigned long start, unsigned long end) |
345 | { | 345 | { |
346 | struct page *p; | ||
346 | if (!capable(CAP_SYS_ADMIN)) | 347 | if (!capable(CAP_SYS_ADMIN)) |
347 | return -EPERM; | 348 | return -EPERM; |
348 | for (; start < end; start += PAGE_SIZE) { | 349 | for (; start < end; start += PAGE_SIZE << |
349 | struct page *p; | 350 | compound_order(compound_head(p))) { |
350 | int ret; | 351 | int ret; |
351 | 352 | ||
352 | ret = get_user_pages_fast(start, 1, 0, &p); | 353 | ret = get_user_pages_fast(start, 1, 0, &p); |
diff --git a/mm/memory-failure.c b/mm/memory-failure.c index 947ed5413279..bf3351b5115e 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c | |||
@@ -1114,8 +1114,10 @@ int memory_failure(unsigned long pfn, int trapno, int flags) | |||
1114 | * shake_page could have turned it free. | 1114 | * shake_page could have turned it free. |
1115 | */ | 1115 | */ |
1116 | if (is_free_buddy_page(p)) { | 1116 | if (is_free_buddy_page(p)) { |
1117 | action_result(pfn, "free buddy, 2nd try", | 1117 | if (flags & MF_COUNT_INCREASED) |
1118 | DELAYED); | 1118 | action_result(pfn, "free buddy", DELAYED); |
1119 | else | ||
1120 | action_result(pfn, "free buddy, 2nd try", DELAYED); | ||
1119 | return 0; | 1121 | return 0; |
1120 | } | 1122 | } |
1121 | action_result(pfn, "non LRU", IGNORED); | 1123 | action_result(pfn, "non LRU", IGNORED); |
@@ -1349,7 +1351,7 @@ int unpoison_memory(unsigned long pfn) | |||
1349 | * worked by memory_failure() and the page lock is not held yet. | 1351 | * worked by memory_failure() and the page lock is not held yet. |
1350 | * In such case, we yield to memory_failure() and make unpoison fail. | 1352 | * In such case, we yield to memory_failure() and make unpoison fail. |
1351 | */ | 1353 | */ |
1352 | if (PageTransHuge(page)) { | 1354 | if (!PageHuge(page) && PageTransHuge(page)) { |
1353 | pr_info("MCE: Memory failure is now running on %#lx\n", pfn); | 1355 | pr_info("MCE: Memory failure is now running on %#lx\n", pfn); |
1354 | return 0; | 1356 | return 0; |
1355 | } | 1357 | } |
diff --git a/mm/migrate.c b/mm/migrate.c index 9c8d5f59d30b..a26bccd44ccb 100644 --- a/mm/migrate.c +++ b/mm/migrate.c | |||
@@ -107,7 +107,7 @@ void putback_movable_pages(struct list_head *l) | |||
107 | list_del(&page->lru); | 107 | list_del(&page->lru); |
108 | dec_zone_page_state(page, NR_ISOLATED_ANON + | 108 | dec_zone_page_state(page, NR_ISOLATED_ANON + |
109 | page_is_file_cache(page)); | 109 | page_is_file_cache(page)); |
110 | if (unlikely(balloon_page_movable(page))) | 110 | if (unlikely(isolated_balloon_page(page))) |
111 | balloon_page_putback(page); | 111 | balloon_page_putback(page); |
112 | else | 112 | else |
113 | putback_lru_page(page); | 113 | putback_lru_page(page); |
diff --git a/mm/mlock.c b/mm/mlock.c index 67ba6da7d0e3..d480cd6fc475 100644 --- a/mm/mlock.c +++ b/mm/mlock.c | |||
@@ -379,10 +379,14 @@ static unsigned long __munlock_pagevec_fill(struct pagevec *pvec, | |||
379 | 379 | ||
380 | /* | 380 | /* |
381 | * Initialize pte walk starting at the already pinned page where we | 381 | * Initialize pte walk starting at the already pinned page where we |
382 | * are sure that there is a pte. | 382 | * are sure that there is a pte, as it was pinned under the same |
383 | * mmap_sem write op. | ||
383 | */ | 384 | */ |
384 | pte = get_locked_pte(vma->vm_mm, start, &ptl); | 385 | pte = get_locked_pte(vma->vm_mm, start, &ptl); |
385 | end = min(end, pmd_addr_end(start, end)); | 386 | /* Make sure we do not cross the page table boundary */ |
387 | end = pgd_addr_end(start, end); | ||
388 | end = pud_addr_end(start, end); | ||
389 | end = pmd_addr_end(start, end); | ||
386 | 390 | ||
387 | /* The page next to the pinned page is the first we will try to get */ | 391 | /* The page next to the pinned page is the first we will try to get */ |
388 | start += PAGE_SIZE; | 392 | start += PAGE_SIZE; |
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 0ee638f76ebe..dd886fac451a 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -6366,10 +6366,6 @@ __offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn) | |||
6366 | list_del(&page->lru); | 6366 | list_del(&page->lru); |
6367 | rmv_page_order(page); | 6367 | rmv_page_order(page); |
6368 | zone->free_area[order].nr_free--; | 6368 | zone->free_area[order].nr_free--; |
6369 | #ifdef CONFIG_HIGHMEM | ||
6370 | if (PageHighMem(page)) | ||
6371 | totalhigh_pages -= 1 << order; | ||
6372 | #endif | ||
6373 | for (i = 0; i < (1 << order); i++) | 6369 | for (i = 0; i < (1 << order); i++) |
6374 | SetPageReserved((page+i)); | 6370 | SetPageReserved((page+i)); |
6375 | pfn += (1 << order); | 6371 | pfn += (1 << order); |
diff --git a/mm/vmscan.c b/mm/vmscan.c index beb35778c69f..53f2f82f83ae 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
@@ -48,6 +48,7 @@ | |||
48 | #include <asm/div64.h> | 48 | #include <asm/div64.h> |
49 | 49 | ||
50 | #include <linux/swapops.h> | 50 | #include <linux/swapops.h> |
51 | #include <linux/balloon_compaction.h> | ||
51 | 52 | ||
52 | #include "internal.h" | 53 | #include "internal.h" |
53 | 54 | ||
@@ -1113,7 +1114,8 @@ unsigned long reclaim_clean_pages_from_list(struct zone *zone, | |||
1113 | LIST_HEAD(clean_pages); | 1114 | LIST_HEAD(clean_pages); |
1114 | 1115 | ||
1115 | list_for_each_entry_safe(page, next, page_list, lru) { | 1116 | list_for_each_entry_safe(page, next, page_list, lru) { |
1116 | if (page_is_file_cache(page) && !PageDirty(page)) { | 1117 | if (page_is_file_cache(page) && !PageDirty(page) && |
1118 | !isolated_balloon_page(page)) { | ||
1117 | ClearPageActive(page); | 1119 | ClearPageActive(page); |
1118 | list_move(&page->lru, &clean_pages); | 1120 | list_move(&page->lru, &clean_pages); |
1119 | } | 1121 | } |