diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-10-09 03:23:15 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-10-09 03:23:15 -0400 |
commit | 9e2d8656f5e8aa214e66b462680cf86b210b74a8 (patch) | |
tree | f67d62e896cedf75599ea45f9ecf9999c6ad24cd /mm/memory_hotplug.c | |
parent | 1ea4f4f8405cc1ceec23f2d261bc3775785e6712 (diff) | |
parent | 9e695d2ecc8451cc2c1603d60b5c8e7f5581923a (diff) |
Merge branch 'akpm' (Andrew's patch-bomb)
Merge patches from Andrew Morton:
"A few misc things and very nearly all of the MM tree. A tremendous
amount of stuff (again), including a significant rbtree library
rework."
* emailed patches from Andrew Morton <akpm@linux-foundation.org>: (160 commits)
sparc64: Support transparent huge pages.
mm: thp: Use more portable PMD clearing sequenece in zap_huge_pmd().
mm: Add and use update_mmu_cache_pmd() in transparent huge page code.
sparc64: Document PGD and PMD layout.
sparc64: Eliminate PTE table memory wastage.
sparc64: Halve the size of PTE tables
sparc64: Only support 4MB huge pages and 8KB base pages.
memory-hotplug: suppress "Trying to free nonexistent resource <XXXXXXXXXXXXXXXX-YYYYYYYYYYYYYYYY>" warning
mm: memcg: clean up mm_match_cgroup() signature
mm: document PageHuge somewhat
mm: use %pK for /proc/vmallocinfo
mm, thp: fix mlock statistics
mm, thp: fix mapped pages avoiding unevictable list on mlock
memory-hotplug: update memory block's state and notify userspace
memory-hotplug: preparation to notify memory block's state at memory hot remove
mm: avoid section mismatch warning for memblock_type_name
make GFP_NOTRACK definition unconditional
cma: decrease cc.nr_migratepages after reclaiming pagelist
CMA: migrate mlocked pages
kpageflags: fix wrong KPF_THP on non-huge compound pages
...
Diffstat (limited to 'mm/memory_hotplug.c')
-rw-r--r-- | mm/memory_hotplug.c | 77 |
1 files changed, 63 insertions, 14 deletions
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index 6a5b90d0cfd7..56b758ae57d2 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c | |||
@@ -106,6 +106,7 @@ static void get_page_bootmem(unsigned long info, struct page *page, | |||
106 | void __ref put_page_bootmem(struct page *page) | 106 | void __ref put_page_bootmem(struct page *page) |
107 | { | 107 | { |
108 | unsigned long type; | 108 | unsigned long type; |
109 | struct zone *zone; | ||
109 | 110 | ||
110 | type = (unsigned long) page->lru.next; | 111 | type = (unsigned long) page->lru.next; |
111 | BUG_ON(type < MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE || | 112 | BUG_ON(type < MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE || |
@@ -116,6 +117,12 @@ void __ref put_page_bootmem(struct page *page) | |||
116 | set_page_private(page, 0); | 117 | set_page_private(page, 0); |
117 | INIT_LIST_HEAD(&page->lru); | 118 | INIT_LIST_HEAD(&page->lru); |
118 | __free_pages_bootmem(page, 0); | 119 | __free_pages_bootmem(page, 0); |
120 | |||
121 | zone = page_zone(page); | ||
122 | zone_span_writelock(zone); | ||
123 | zone->present_pages++; | ||
124 | zone_span_writeunlock(zone); | ||
125 | totalram_pages++; | ||
119 | } | 126 | } |
120 | 127 | ||
121 | } | 128 | } |
@@ -362,11 +369,11 @@ int __remove_pages(struct zone *zone, unsigned long phys_start_pfn, | |||
362 | BUG_ON(phys_start_pfn & ~PAGE_SECTION_MASK); | 369 | BUG_ON(phys_start_pfn & ~PAGE_SECTION_MASK); |
363 | BUG_ON(nr_pages % PAGES_PER_SECTION); | 370 | BUG_ON(nr_pages % PAGES_PER_SECTION); |
364 | 371 | ||
372 | release_mem_region(phys_start_pfn << PAGE_SHIFT, nr_pages * PAGE_SIZE); | ||
373 | |||
365 | sections_to_remove = nr_pages / PAGES_PER_SECTION; | 374 | sections_to_remove = nr_pages / PAGES_PER_SECTION; |
366 | for (i = 0; i < sections_to_remove; i++) { | 375 | for (i = 0; i < sections_to_remove; i++) { |
367 | unsigned long pfn = phys_start_pfn + i*PAGES_PER_SECTION; | 376 | unsigned long pfn = phys_start_pfn + i*PAGES_PER_SECTION; |
368 | release_mem_region(pfn << PAGE_SHIFT, | ||
369 | PAGES_PER_SECTION << PAGE_SHIFT); | ||
370 | ret = __remove_section(zone, __pfn_to_section(pfn)); | 377 | ret = __remove_section(zone, __pfn_to_section(pfn)); |
371 | if (ret) | 378 | if (ret) |
372 | break; | 379 | break; |
@@ -756,13 +763,6 @@ static unsigned long scan_lru_pages(unsigned long start, unsigned long end) | |||
756 | return 0; | 763 | return 0; |
757 | } | 764 | } |
758 | 765 | ||
759 | static struct page * | ||
760 | hotremove_migrate_alloc(struct page *page, unsigned long private, int **x) | ||
761 | { | ||
762 | /* This should be improooooved!! */ | ||
763 | return alloc_page(GFP_HIGHUSER_MOVABLE); | ||
764 | } | ||
765 | |||
766 | #define NR_OFFLINE_AT_ONCE_PAGES (256) | 766 | #define NR_OFFLINE_AT_ONCE_PAGES (256) |
767 | static int | 767 | static int |
768 | do_migrate_range(unsigned long start_pfn, unsigned long end_pfn) | 768 | do_migrate_range(unsigned long start_pfn, unsigned long end_pfn) |
@@ -813,8 +813,12 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn) | |||
813 | putback_lru_pages(&source); | 813 | putback_lru_pages(&source); |
814 | goto out; | 814 | goto out; |
815 | } | 815 | } |
816 | /* this function returns # of failed pages */ | 816 | |
817 | ret = migrate_pages(&source, hotremove_migrate_alloc, 0, | 817 | /* |
818 | * alloc_migrate_target should be improooooved!! | ||
819 | * migrate_pages returns # of failed pages. | ||
820 | */ | ||
821 | ret = migrate_pages(&source, alloc_migrate_target, 0, | ||
818 | true, MIGRATE_SYNC); | 822 | true, MIGRATE_SYNC); |
819 | if (ret) | 823 | if (ret) |
820 | putback_lru_pages(&source); | 824 | putback_lru_pages(&source); |
@@ -870,7 +874,7 @@ check_pages_isolated(unsigned long start_pfn, unsigned long end_pfn) | |||
870 | return offlined; | 874 | return offlined; |
871 | } | 875 | } |
872 | 876 | ||
873 | static int __ref offline_pages(unsigned long start_pfn, | 877 | static int __ref __offline_pages(unsigned long start_pfn, |
874 | unsigned long end_pfn, unsigned long timeout) | 878 | unsigned long end_pfn, unsigned long timeout) |
875 | { | 879 | { |
876 | unsigned long pfn, nr_pages, expire; | 880 | unsigned long pfn, nr_pages, expire; |
@@ -970,8 +974,13 @@ repeat: | |||
970 | 974 | ||
971 | init_per_zone_wmark_min(); | 975 | init_per_zone_wmark_min(); |
972 | 976 | ||
973 | if (!populated_zone(zone)) | 977 | if (!populated_zone(zone)) { |
974 | zone_pcp_reset(zone); | 978 | zone_pcp_reset(zone); |
979 | mutex_lock(&zonelists_mutex); | ||
980 | build_all_zonelists(NULL, NULL); | ||
981 | mutex_unlock(&zonelists_mutex); | ||
982 | } else | ||
983 | zone_pcp_update(zone); | ||
975 | 984 | ||
976 | if (!node_present_pages(node)) { | 985 | if (!node_present_pages(node)) { |
977 | node_clear_state(node, N_HIGH_MEMORY); | 986 | node_clear_state(node, N_HIGH_MEMORY); |
@@ -998,15 +1007,55 @@ out: | |||
998 | return ret; | 1007 | return ret; |
999 | } | 1008 | } |
1000 | 1009 | ||
1010 | int offline_pages(unsigned long start_pfn, unsigned long nr_pages) | ||
1011 | { | ||
1012 | return __offline_pages(start_pfn, start_pfn + nr_pages, 120 * HZ); | ||
1013 | } | ||
1014 | |||
1001 | int remove_memory(u64 start, u64 size) | 1015 | int remove_memory(u64 start, u64 size) |
1002 | { | 1016 | { |
1017 | struct memory_block *mem = NULL; | ||
1018 | struct mem_section *section; | ||
1003 | unsigned long start_pfn, end_pfn; | 1019 | unsigned long start_pfn, end_pfn; |
1020 | unsigned long pfn, section_nr; | ||
1021 | int ret; | ||
1004 | 1022 | ||
1005 | start_pfn = PFN_DOWN(start); | 1023 | start_pfn = PFN_DOWN(start); |
1006 | end_pfn = start_pfn + PFN_DOWN(size); | 1024 | end_pfn = start_pfn + PFN_DOWN(size); |
1007 | return offline_pages(start_pfn, end_pfn, 120 * HZ); | 1025 | |
1026 | for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) { | ||
1027 | section_nr = pfn_to_section_nr(pfn); | ||
1028 | if (!present_section_nr(section_nr)) | ||
1029 | continue; | ||
1030 | |||
1031 | section = __nr_to_section(section_nr); | ||
1032 | /* same memblock? */ | ||
1033 | if (mem) | ||
1034 | if ((section_nr >= mem->start_section_nr) && | ||
1035 | (section_nr <= mem->end_section_nr)) | ||
1036 | continue; | ||
1037 | |||
1038 | mem = find_memory_block_hinted(section, mem); | ||
1039 | if (!mem) | ||
1040 | continue; | ||
1041 | |||
1042 | ret = offline_memory_block(mem); | ||
1043 | if (ret) { | ||
1044 | kobject_put(&mem->dev.kobj); | ||
1045 | return ret; | ||
1046 | } | ||
1047 | } | ||
1048 | |||
1049 | if (mem) | ||
1050 | kobject_put(&mem->dev.kobj); | ||
1051 | |||
1052 | return 0; | ||
1008 | } | 1053 | } |
1009 | #else | 1054 | #else |
1055 | int offline_pages(unsigned long start_pfn, unsigned long nr_pages) | ||
1056 | { | ||
1057 | return -EINVAL; | ||
1058 | } | ||
1010 | int remove_memory(u64 start, u64 size) | 1059 | int remove_memory(u64 start, u64 size) |
1011 | { | 1060 | { |
1012 | return -EINVAL; | 1061 | return -EINVAL; |