aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/base/Kconfig1
-rw-r--r--include/linux/page-isolation.h13
-rw-r--r--mm/Kconfig5
-rw-r--r--mm/Makefile5
-rw-r--r--mm/page_alloc.c80
-rw-r--r--mm/page_isolation.c71
6 files changed, 93 insertions, 82 deletions
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig
index 9b21469482ae..08b4c5209384 100644
--- a/drivers/base/Kconfig
+++ b/drivers/base/Kconfig
@@ -196,6 +196,7 @@ config CMA
196 bool "Contiguous Memory Allocator (EXPERIMENTAL)" 196 bool "Contiguous Memory Allocator (EXPERIMENTAL)"
197 depends on HAVE_DMA_CONTIGUOUS && HAVE_MEMBLOCK && EXPERIMENTAL 197 depends on HAVE_DMA_CONTIGUOUS && HAVE_MEMBLOCK && EXPERIMENTAL
198 select MIGRATION 198 select MIGRATION
199 select MEMORY_ISOLATION
199 help 200 help
200 This enables the Contiguous Memory Allocator which allows drivers 201 This enables the Contiguous Memory Allocator which allows drivers
201 to allocate big physically-contiguous blocks of memory for use with 202 to allocate big physically-contiguous blocks of memory for use with
diff --git a/include/linux/page-isolation.h b/include/linux/page-isolation.h
index 3bdcab30ca41..105077aa7685 100644
--- a/include/linux/page-isolation.h
+++ b/include/linux/page-isolation.h
@@ -1,6 +1,11 @@
1#ifndef __LINUX_PAGEISOLATION_H 1#ifndef __LINUX_PAGEISOLATION_H
2#define __LINUX_PAGEISOLATION_H 2#define __LINUX_PAGEISOLATION_H
3 3
4
5bool has_unmovable_pages(struct zone *zone, struct page *page, int count);
6void set_pageblock_migratetype(struct page *page, int migratetype);
7int move_freepages_block(struct zone *zone, struct page *page,
8 int migratetype);
4/* 9/*
5 * Changes migrate type in [start_pfn, end_pfn) to be MIGRATE_ISOLATE. 10 * Changes migrate type in [start_pfn, end_pfn) to be MIGRATE_ISOLATE.
6 * If specified range includes migrate types other than MOVABLE or CMA, 11 * If specified range includes migrate types other than MOVABLE or CMA,
@@ -10,7 +15,7 @@
10 * free all pages in the range. test_page_isolated() can be used for 15 * free all pages in the range. test_page_isolated() can be used for
11 * test it. 16 * test it.
12 */ 17 */
13extern int 18int
14start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, 19start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
15 unsigned migratetype); 20 unsigned migratetype);
16 21
@@ -18,7 +23,7 @@ start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
18 * Changes MIGRATE_ISOLATE to MIGRATE_MOVABLE. 23 * Changes MIGRATE_ISOLATE to MIGRATE_MOVABLE.
19 * target range is [start_pfn, end_pfn) 24 * target range is [start_pfn, end_pfn)
20 */ 25 */
21extern int 26int
22undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, 27undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
23 unsigned migratetype); 28 unsigned migratetype);
24 29
@@ -30,8 +35,8 @@ int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn);
30/* 35/*
31 * Internal functions. Changes pageblock's migrate type. 36 * Internal functions. Changes pageblock's migrate type.
32 */ 37 */
33extern int set_migratetype_isolate(struct page *page); 38int set_migratetype_isolate(struct page *page);
34extern void unset_migratetype_isolate(struct page *page, unsigned migratetype); 39void unset_migratetype_isolate(struct page *page, unsigned migratetype);
35 40
36 41
37#endif 42#endif
diff --git a/mm/Kconfig b/mm/Kconfig
index 82fed4eb2b6f..d5c8019c6627 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -140,9 +140,13 @@ config ARCH_DISCARD_MEMBLOCK
140config NO_BOOTMEM 140config NO_BOOTMEM
141 boolean 141 boolean
142 142
143config MEMORY_ISOLATION
144 boolean
145
143# eventually, we can have this option just 'select SPARSEMEM' 146# eventually, we can have this option just 'select SPARSEMEM'
144config MEMORY_HOTPLUG 147config MEMORY_HOTPLUG
145 bool "Allow for memory hot-add" 148 bool "Allow for memory hot-add"
149 select MEMORY_ISOLATION
146 depends on SPARSEMEM || X86_64_ACPI_NUMA 150 depends on SPARSEMEM || X86_64_ACPI_NUMA
147 depends on HOTPLUG && ARCH_ENABLE_MEMORY_HOTPLUG 151 depends on HOTPLUG && ARCH_ENABLE_MEMORY_HOTPLUG
148 depends on (IA64 || X86 || PPC_BOOK3S_64 || SUPERH || S390) 152 depends on (IA64 || X86 || PPC_BOOK3S_64 || SUPERH || S390)
@@ -272,6 +276,7 @@ config MEMORY_FAILURE
272 depends on MMU 276 depends on MMU
273 depends on ARCH_SUPPORTS_MEMORY_FAILURE 277 depends on ARCH_SUPPORTS_MEMORY_FAILURE
274 bool "Enable recovery from hardware memory errors" 278 bool "Enable recovery from hardware memory errors"
279 select MEMORY_ISOLATION
275 help 280 help
276 Enables code to recover from some memory failures on systems 281 Enables code to recover from some memory failures on systems
277 with MCA recovery. This allows a system to continue running 282 with MCA recovery. This allows a system to continue running
diff --git a/mm/Makefile b/mm/Makefile
index 290bbfe33698..92753e2d82da 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -15,8 +15,8 @@ obj-y := filemap.o mempool.o oom_kill.o fadvise.o \
15 maccess.o page_alloc.o page-writeback.o \ 15 maccess.o page_alloc.o page-writeback.o \
16 readahead.o swap.o truncate.o vmscan.o shmem.o \ 16 readahead.o swap.o truncate.o vmscan.o shmem.o \
17 prio_tree.o util.o mmzone.o vmstat.o backing-dev.o \ 17 prio_tree.o util.o mmzone.o vmstat.o backing-dev.o \
18 page_isolation.o mm_init.o mmu_context.o percpu.o \ 18 mm_init.o mmu_context.o percpu.o slab_common.o \
19 compaction.o slab_common.o $(mmu-y) 19 compaction.o $(mmu-y)
20 20
21obj-y += init-mm.o 21obj-y += init-mm.o
22 22
@@ -56,3 +56,4 @@ obj-$(CONFIG_HWPOISON_INJECT) += hwpoison-inject.o
56obj-$(CONFIG_DEBUG_KMEMLEAK) += kmemleak.o 56obj-$(CONFIG_DEBUG_KMEMLEAK) += kmemleak.o
57obj-$(CONFIG_DEBUG_KMEMLEAK_TEST) += kmemleak-test.o 57obj-$(CONFIG_DEBUG_KMEMLEAK_TEST) += kmemleak-test.o
58obj-$(CONFIG_CLEANCACHE) += cleancache.o 58obj-$(CONFIG_CLEANCACHE) += cleancache.o
59obj-$(CONFIG_MEMORY_ISOLATION) += page_isolation.o
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 667338e80e94..228194728ccd 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -51,7 +51,6 @@
51#include <linux/page_cgroup.h> 51#include <linux/page_cgroup.h>
52#include <linux/debugobjects.h> 52#include <linux/debugobjects.h>
53#include <linux/kmemleak.h> 53#include <linux/kmemleak.h>
54#include <linux/memory.h>
55#include <linux/compaction.h> 54#include <linux/compaction.h>
56#include <trace/events/kmem.h> 55#include <trace/events/kmem.h>
57#include <linux/ftrace_event.h> 56#include <linux/ftrace_event.h>
@@ -219,7 +218,7 @@ EXPORT_SYMBOL(nr_online_nodes);
219 218
220int page_group_by_mobility_disabled __read_mostly; 219int page_group_by_mobility_disabled __read_mostly;
221 220
222static void set_pageblock_migratetype(struct page *page, int migratetype) 221void set_pageblock_migratetype(struct page *page, int migratetype)
223{ 222{
224 223
225 if (unlikely(page_group_by_mobility_disabled)) 224 if (unlikely(page_group_by_mobility_disabled))
@@ -954,7 +953,7 @@ static int move_freepages(struct zone *zone,
954 return pages_moved; 953 return pages_moved;
955} 954}
956 955
957static int move_freepages_block(struct zone *zone, struct page *page, 956int move_freepages_block(struct zone *zone, struct page *page,
958 int migratetype) 957 int migratetype)
959{ 958{
960 unsigned long start_pfn, end_pfn; 959 unsigned long start_pfn, end_pfn;
@@ -5463,8 +5462,7 @@ void set_pageblock_flags_group(struct page *page, unsigned long flags,
5463 * MIGRATE_MOVABLE block might include unmovable pages. It means you can't 5462 * MIGRATE_MOVABLE block might include unmovable pages. It means you can't
5464 * expect this function should be exact. 5463 * expect this function should be exact.
5465 */ 5464 */
5466static bool 5465bool has_unmovable_pages(struct zone *zone, struct page *page, int count)
5467__has_unmovable_pages(struct zone *zone, struct page *page, int count)
5468{ 5466{
5469 unsigned long pfn, iter, found; 5467 unsigned long pfn, iter, found;
5470 int mt; 5468 int mt;
@@ -5541,77 +5539,7 @@ bool is_pageblock_removable_nolock(struct page *page)
5541 zone->zone_start_pfn + zone->spanned_pages <= pfn) 5539 zone->zone_start_pfn + zone->spanned_pages <= pfn)
5542 return false; 5540 return false;
5543 5541
5544 return !__has_unmovable_pages(zone, page, 0); 5542 return !has_unmovable_pages(zone, page, 0);
5545}
5546
5547int set_migratetype_isolate(struct page *page)
5548{
5549 struct zone *zone;
5550 unsigned long flags, pfn;
5551 struct memory_isolate_notify arg;
5552 int notifier_ret;
5553 int ret = -EBUSY;
5554
5555 zone = page_zone(page);
5556
5557 spin_lock_irqsave(&zone->lock, flags);
5558
5559 pfn = page_to_pfn(page);
5560 arg.start_pfn = pfn;
5561 arg.nr_pages = pageblock_nr_pages;
5562 arg.pages_found = 0;
5563
5564 /*
5565 * It may be possible to isolate a pageblock even if the
5566 * migratetype is not MIGRATE_MOVABLE. The memory isolation
5567 * notifier chain is used by balloon drivers to return the
5568 * number of pages in a range that are held by the balloon
5569 * driver to shrink memory. If all the pages are accounted for
5570 * by balloons, are free, or on the LRU, isolation can continue.
5571 * Later, for example, when memory hotplug notifier runs, these
5572 * pages reported as "can be isolated" should be isolated(freed)
5573 * by the balloon driver through the memory notifier chain.
5574 */
5575 notifier_ret = memory_isolate_notify(MEM_ISOLATE_COUNT, &arg);
5576 notifier_ret = notifier_to_errno(notifier_ret);
5577 if (notifier_ret)
5578 goto out;
5579 /*
5580 * FIXME: Now, memory hotplug doesn't call shrink_slab() by itself.
5581 * We just check MOVABLE pages.
5582 */
5583 if (!__has_unmovable_pages(zone, page, arg.pages_found))
5584 ret = 0;
5585 /*
5586 * Unmovable means "not-on-lru" pages. If Unmovable pages are
5587 * larger than removable-by-driver pages reported by notifier,
5588 * we'll fail.
5589 */
5590
5591out:
5592 if (!ret) {
5593 set_pageblock_migratetype(page, MIGRATE_ISOLATE);
5594 move_freepages_block(zone, page, MIGRATE_ISOLATE);
5595 }
5596
5597 spin_unlock_irqrestore(&zone->lock, flags);
5598 if (!ret)
5599 drain_all_pages();
5600 return ret;
5601}
5602
5603void unset_migratetype_isolate(struct page *page, unsigned migratetype)
5604{
5605 struct zone *zone;
5606 unsigned long flags;
5607 zone = page_zone(page);
5608 spin_lock_irqsave(&zone->lock, flags);
5609 if (get_pageblock_migratetype(page) != MIGRATE_ISOLATE)
5610 goto out;
5611 set_pageblock_migratetype(page, migratetype);
5612 move_freepages_block(zone, page, migratetype);
5613out:
5614 spin_unlock_irqrestore(&zone->lock, flags);
5615} 5543}
5616 5544
5617#ifdef CONFIG_CMA 5545#ifdef CONFIG_CMA
diff --git a/mm/page_isolation.c b/mm/page_isolation.c
index c9f04774f2b8..fb482cf438da 100644
--- a/mm/page_isolation.c
+++ b/mm/page_isolation.c
@@ -5,8 +5,79 @@
5#include <linux/mm.h> 5#include <linux/mm.h>
6#include <linux/page-isolation.h> 6#include <linux/page-isolation.h>
7#include <linux/pageblock-flags.h> 7#include <linux/pageblock-flags.h>
8#include <linux/memory.h>
8#include "internal.h" 9#include "internal.h"
9 10
11int set_migratetype_isolate(struct page *page)
12{
13 struct zone *zone;
14 unsigned long flags, pfn;
15 struct memory_isolate_notify arg;
16 int notifier_ret;
17 int ret = -EBUSY;
18
19 zone = page_zone(page);
20
21 spin_lock_irqsave(&zone->lock, flags);
22
23 pfn = page_to_pfn(page);
24 arg.start_pfn = pfn;
25 arg.nr_pages = pageblock_nr_pages;
26 arg.pages_found = 0;
27
28 /*
29 * It may be possible to isolate a pageblock even if the
30 * migratetype is not MIGRATE_MOVABLE. The memory isolation
31 * notifier chain is used by balloon drivers to return the
32 * number of pages in a range that are held by the balloon
33 * driver to shrink memory. If all the pages are accounted for
34 * by balloons, are free, or on the LRU, isolation can continue.
35 * Later, for example, when memory hotplug notifier runs, these
36 * pages reported as "can be isolated" should be isolated(freed)
37 * by the balloon driver through the memory notifier chain.
38 */
39 notifier_ret = memory_isolate_notify(MEM_ISOLATE_COUNT, &arg);
40 notifier_ret = notifier_to_errno(notifier_ret);
41 if (notifier_ret)
42 goto out;
43 /*
44 * FIXME: Now, memory hotplug doesn't call shrink_slab() by itself.
45 * We just check MOVABLE pages.
46 */
47 if (!has_unmovable_pages(zone, page, arg.pages_found))
48 ret = 0;
49
50 /*
51 * immobile means "not-on-lru" paes. If immobile is larger than
52 * removable-by-driver pages reported by notifier, we'll fail.
53 */
54
55out:
56 if (!ret) {
57 set_pageblock_migratetype(page, MIGRATE_ISOLATE);
58 move_freepages_block(zone, page, MIGRATE_ISOLATE);
59 }
60
61 spin_unlock_irqrestore(&zone->lock, flags);
62 if (!ret)
63 drain_all_pages();
64 return ret;
65}
66
67void unset_migratetype_isolate(struct page *page, unsigned migratetype)
68{
69 struct zone *zone;
70 unsigned long flags;
71 zone = page_zone(page);
72 spin_lock_irqsave(&zone->lock, flags);
73 if (get_pageblock_migratetype(page) != MIGRATE_ISOLATE)
74 goto out;
75 set_pageblock_migratetype(page, migratetype);
76 move_freepages_block(zone, page, migratetype);
77out:
78 spin_unlock_irqrestore(&zone->lock, flags);
79}
80
10static inline struct page * 81static inline struct page *
11__first_valid_page(unsigned long pfn, unsigned long nr_pages) 82__first_valid_page(unsigned long pfn, unsigned long nr_pages)
12{ 83{