diff options
author | Yasunori Goto <y-goto@jp.fujitsu.com> | 2008-04-28 05:13:31 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-04-28 11:58:25 -0400 |
commit | 04753278769f3b6c3b79a080edb52f21d83bf6e2 (patch) | |
tree | 0dff4088b44016b6d04930b2fc09419412821aa2 /mm/memory_hotplug.c | |
parent | 7f2e9525ba55b1c42ad6c4a5a59d7eb7bdd9be72 (diff) |
memory hotplug: register section/node id to free
This patch set is to free pages which is allocated by bootmem for
memory-hotremove. Some structures of memory management are allocated by
bootmem. ex) memmap, etc.
To remove memory physically, some of them must be freed according to
circumstance. This patch set makes basis to free those pages, and free
memmaps.
Basic my idea is using remain members of struct page to remember information
of users of bootmem (section number or node id). When the section is
removing, kernel can confirm it. By this information, some issues can be
solved.
1) When the memmap of removing section is allocated on other
section by bootmem, it should/can be free.
2) When the memmap of removing section is allocated on the
same section, it shouldn't be freed. Because the section has to be
logical memory offlined already and all pages must be isolated against
page allocater. If it is freed, page allocator may use it which will
be removed physically soon.
3) When removing section has other section's memmap,
kernel will be able to show easily which section should be removed
before it for user. (Not implemented yet)
4) When the above case 2), the page isolation will be able to check and skip
memmap's page when logical memory offline (offline_pages()).
Current page isolation code fails in this case because this page is
just reserved page and it can't distinguish this pages can be
removed or not. But, it will be able to do by this patch.
(Not implemented yet.)
5) The node information like pgdat has similar issues. But, this
will be able to be solved too by this.
(Not implemented yet, but, remembering node id in the pages.)
Fortunately, current bootmem allocator just keeps PageReserved flags,
and doesn't use any other members of page struct. The users of
bootmem doesn't use them too.
This patch:
This is to register information which is node or section's id. Kernel can
distinguish which node/section uses the pages allcated by bootmem. This is
basis for hot-remove sections or nodes.
Signed-off-by: Yasunori Goto <y-goto@jp.fujitsu.com>
Cc: Badari Pulavarty <pbadari@us.ibm.com>
Cc: Yinghai Lu <yhlu.kernel@gmail.com>
Cc: Yasunori Goto <y-goto@jp.fujitsu.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/memory_hotplug.c')
-rw-r--r-- | mm/memory_hotplug.c | 99 |
1 files changed, 98 insertions, 1 deletions
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index c8b3ca79de2d..cba36ef0d506 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c | |||
@@ -58,8 +58,105 @@ static void release_memory_resource(struct resource *res) | |||
58 | return; | 58 | return; |
59 | } | 59 | } |
60 | 60 | ||
61 | |||
62 | #ifdef CONFIG_MEMORY_HOTPLUG_SPARSE | 61 | #ifdef CONFIG_MEMORY_HOTPLUG_SPARSE |
62 | #ifndef CONFIG_SPARSEMEM_VMEMMAP | ||
63 | static void get_page_bootmem(unsigned long info, struct page *page, int magic) | ||
64 | { | ||
65 | atomic_set(&page->_mapcount, magic); | ||
66 | SetPagePrivate(page); | ||
67 | set_page_private(page, info); | ||
68 | atomic_inc(&page->_count); | ||
69 | } | ||
70 | |||
71 | void put_page_bootmem(struct page *page) | ||
72 | { | ||
73 | int magic; | ||
74 | |||
75 | magic = atomic_read(&page->_mapcount); | ||
76 | BUG_ON(magic >= -1); | ||
77 | |||
78 | if (atomic_dec_return(&page->_count) == 1) { | ||
79 | ClearPagePrivate(page); | ||
80 | set_page_private(page, 0); | ||
81 | reset_page_mapcount(page); | ||
82 | __free_pages_bootmem(page, 0); | ||
83 | } | ||
84 | |||
85 | } | ||
86 | |||
87 | void register_page_bootmem_info_section(unsigned long start_pfn) | ||
88 | { | ||
89 | unsigned long *usemap, mapsize, section_nr, i; | ||
90 | struct mem_section *ms; | ||
91 | struct page *page, *memmap; | ||
92 | |||
93 | if (!pfn_valid(start_pfn)) | ||
94 | return; | ||
95 | |||
96 | section_nr = pfn_to_section_nr(start_pfn); | ||
97 | ms = __nr_to_section(section_nr); | ||
98 | |||
99 | /* Get section's memmap address */ | ||
100 | memmap = sparse_decode_mem_map(ms->section_mem_map, section_nr); | ||
101 | |||
102 | /* | ||
103 | * Get page for the memmap's phys address | ||
104 | * XXX: need more consideration for sparse_vmemmap... | ||
105 | */ | ||
106 | page = virt_to_page(memmap); | ||
107 | mapsize = sizeof(struct page) * PAGES_PER_SECTION; | ||
108 | mapsize = PAGE_ALIGN(mapsize) >> PAGE_SHIFT; | ||
109 | |||
110 | /* remember memmap's page */ | ||
111 | for (i = 0; i < mapsize; i++, page++) | ||
112 | get_page_bootmem(section_nr, page, SECTION_INFO); | ||
113 | |||
114 | usemap = __nr_to_section(section_nr)->pageblock_flags; | ||
115 | page = virt_to_page(usemap); | ||
116 | |||
117 | mapsize = PAGE_ALIGN(usemap_size()) >> PAGE_SHIFT; | ||
118 | |||
119 | for (i = 0; i < mapsize; i++, page++) | ||
120 | get_page_bootmem(section_nr, page, MIX_INFO); | ||
121 | |||
122 | } | ||
123 | |||
124 | void register_page_bootmem_info_node(struct pglist_data *pgdat) | ||
125 | { | ||
126 | unsigned long i, pfn, end_pfn, nr_pages; | ||
127 | int node = pgdat->node_id; | ||
128 | struct page *page; | ||
129 | struct zone *zone; | ||
130 | |||
131 | nr_pages = PAGE_ALIGN(sizeof(struct pglist_data)) >> PAGE_SHIFT; | ||
132 | page = virt_to_page(pgdat); | ||
133 | |||
134 | for (i = 0; i < nr_pages; i++, page++) | ||
135 | get_page_bootmem(node, page, NODE_INFO); | ||
136 | |||
137 | zone = &pgdat->node_zones[0]; | ||
138 | for (; zone < pgdat->node_zones + MAX_NR_ZONES - 1; zone++) { | ||
139 | if (zone->wait_table) { | ||
140 | nr_pages = zone->wait_table_hash_nr_entries | ||
141 | * sizeof(wait_queue_head_t); | ||
142 | nr_pages = PAGE_ALIGN(nr_pages) >> PAGE_SHIFT; | ||
143 | page = virt_to_page(zone->wait_table); | ||
144 | |||
145 | for (i = 0; i < nr_pages; i++, page++) | ||
146 | get_page_bootmem(node, page, NODE_INFO); | ||
147 | } | ||
148 | } | ||
149 | |||
150 | pfn = pgdat->node_start_pfn; | ||
151 | end_pfn = pfn + pgdat->node_spanned_pages; | ||
152 | |||
153 | /* register_section info */ | ||
154 | for (; pfn < end_pfn; pfn += PAGES_PER_SECTION) | ||
155 | register_page_bootmem_info_section(pfn); | ||
156 | |||
157 | } | ||
158 | #endif /* !CONFIG_SPARSEMEM_VMEMMAP */ | ||
159 | |||
63 | static int __add_zone(struct zone *zone, unsigned long phys_start_pfn) | 160 | static int __add_zone(struct zone *zone, unsigned long phys_start_pfn) |
64 | { | 161 | { |
65 | struct pglist_data *pgdat = zone->zone_pgdat; | 162 | struct pglist_data *pgdat = zone->zone_pgdat; |