diff options
Diffstat (limited to 'mm/memory_hotplug.c')
-rw-r--r-- | mm/memory_hotplug.c | 184 |
1 files changed, 183 insertions, 1 deletions
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index 0fb330271271..c4ba85c8cb00 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c | |||
@@ -58,8 +58,105 @@ static void release_memory_resource(struct resource *res) | |||
58 | return; | 58 | return; |
59 | } | 59 | } |
60 | 60 | ||
61 | |||
62 | #ifdef CONFIG_MEMORY_HOTPLUG_SPARSE | 61 | #ifdef CONFIG_MEMORY_HOTPLUG_SPARSE |
62 | #ifndef CONFIG_SPARSEMEM_VMEMMAP | ||
63 | static void get_page_bootmem(unsigned long info, struct page *page, int magic) | ||
64 | { | ||
65 | atomic_set(&page->_mapcount, magic); | ||
66 | SetPagePrivate(page); | ||
67 | set_page_private(page, info); | ||
68 | atomic_inc(&page->_count); | ||
69 | } | ||
70 | |||
71 | void put_page_bootmem(struct page *page) | ||
72 | { | ||
73 | int magic; | ||
74 | |||
75 | magic = atomic_read(&page->_mapcount); | ||
76 | BUG_ON(magic >= -1); | ||
77 | |||
78 | if (atomic_dec_return(&page->_count) == 1) { | ||
79 | ClearPagePrivate(page); | ||
80 | set_page_private(page, 0); | ||
81 | reset_page_mapcount(page); | ||
82 | __free_pages_bootmem(page, 0); | ||
83 | } | ||
84 | |||
85 | } | ||
86 | |||
87 | void register_page_bootmem_info_section(unsigned long start_pfn) | ||
88 | { | ||
89 | unsigned long *usemap, mapsize, section_nr, i; | ||
90 | struct mem_section *ms; | ||
91 | struct page *page, *memmap; | ||
92 | |||
93 | if (!pfn_valid(start_pfn)) | ||
94 | return; | ||
95 | |||
96 | section_nr = pfn_to_section_nr(start_pfn); | ||
97 | ms = __nr_to_section(section_nr); | ||
98 | |||
99 | /* Get section's memmap address */ | ||
100 | memmap = sparse_decode_mem_map(ms->section_mem_map, section_nr); | ||
101 | |||
102 | /* | ||
103 | * Get page for the memmap's phys address | ||
104 | * XXX: need more consideration for sparse_vmemmap... | ||
105 | */ | ||
106 | page = virt_to_page(memmap); | ||
107 | mapsize = sizeof(struct page) * PAGES_PER_SECTION; | ||
108 | mapsize = PAGE_ALIGN(mapsize) >> PAGE_SHIFT; | ||
109 | |||
110 | /* remember memmap's page */ | ||
111 | for (i = 0; i < mapsize; i++, page++) | ||
112 | get_page_bootmem(section_nr, page, SECTION_INFO); | ||
113 | |||
114 | usemap = __nr_to_section(section_nr)->pageblock_flags; | ||
115 | page = virt_to_page(usemap); | ||
116 | |||
117 | mapsize = PAGE_ALIGN(usemap_size()) >> PAGE_SHIFT; | ||
118 | |||
119 | for (i = 0; i < mapsize; i++, page++) | ||
120 | get_page_bootmem(section_nr, page, MIX_INFO); | ||
121 | |||
122 | } | ||
123 | |||
124 | void register_page_bootmem_info_node(struct pglist_data *pgdat) | ||
125 | { | ||
126 | unsigned long i, pfn, end_pfn, nr_pages; | ||
127 | int node = pgdat->node_id; | ||
128 | struct page *page; | ||
129 | struct zone *zone; | ||
130 | |||
131 | nr_pages = PAGE_ALIGN(sizeof(struct pglist_data)) >> PAGE_SHIFT; | ||
132 | page = virt_to_page(pgdat); | ||
133 | |||
134 | for (i = 0; i < nr_pages; i++, page++) | ||
135 | get_page_bootmem(node, page, NODE_INFO); | ||
136 | |||
137 | zone = &pgdat->node_zones[0]; | ||
138 | for (; zone < pgdat->node_zones + MAX_NR_ZONES - 1; zone++) { | ||
139 | if (zone->wait_table) { | ||
140 | nr_pages = zone->wait_table_hash_nr_entries | ||
141 | * sizeof(wait_queue_head_t); | ||
142 | nr_pages = PAGE_ALIGN(nr_pages) >> PAGE_SHIFT; | ||
143 | page = virt_to_page(zone->wait_table); | ||
144 | |||
145 | for (i = 0; i < nr_pages; i++, page++) | ||
146 | get_page_bootmem(node, page, NODE_INFO); | ||
147 | } | ||
148 | } | ||
149 | |||
150 | pfn = pgdat->node_start_pfn; | ||
151 | end_pfn = pfn + pgdat->node_spanned_pages; | ||
152 | |||
153 | /* register_section info */ | ||
154 | for (; pfn < end_pfn; pfn += PAGES_PER_SECTION) | ||
155 | register_page_bootmem_info_section(pfn); | ||
156 | |||
157 | } | ||
158 | #endif /* !CONFIG_SPARSEMEM_VMEMMAP */ | ||
159 | |||
63 | static int __add_zone(struct zone *zone, unsigned long phys_start_pfn) | 160 | static int __add_zone(struct zone *zone, unsigned long phys_start_pfn) |
64 | { | 161 | { |
65 | struct pglist_data *pgdat = zone->zone_pgdat; | 162 | struct pglist_data *pgdat = zone->zone_pgdat; |
@@ -101,6 +198,36 @@ static int __add_section(struct zone *zone, unsigned long phys_start_pfn) | |||
101 | return register_new_memory(__pfn_to_section(phys_start_pfn)); | 198 | return register_new_memory(__pfn_to_section(phys_start_pfn)); |
102 | } | 199 | } |
103 | 200 | ||
201 | #ifdef CONFIG_SPARSEMEM_VMEMMAP | ||
202 | static int __remove_section(struct zone *zone, struct mem_section *ms) | ||
203 | { | ||
204 | /* | ||
205 | * XXX: Freeing memmap with vmemmap is not implement yet. | ||
206 | * This should be removed later. | ||
207 | */ | ||
208 | return -EBUSY; | ||
209 | } | ||
210 | #else | ||
211 | static int __remove_section(struct zone *zone, struct mem_section *ms) | ||
212 | { | ||
213 | unsigned long flags; | ||
214 | struct pglist_data *pgdat = zone->zone_pgdat; | ||
215 | int ret = -EINVAL; | ||
216 | |||
217 | if (!valid_section(ms)) | ||
218 | return ret; | ||
219 | |||
220 | ret = unregister_memory_section(ms); | ||
221 | if (ret) | ||
222 | return ret; | ||
223 | |||
224 | pgdat_resize_lock(pgdat, &flags); | ||
225 | sparse_remove_one_section(zone, ms); | ||
226 | pgdat_resize_unlock(pgdat, &flags); | ||
227 | return 0; | ||
228 | } | ||
229 | #endif | ||
230 | |||
104 | /* | 231 | /* |
105 | * Reasonably generic function for adding memory. It is | 232 | * Reasonably generic function for adding memory. It is |
106 | * expected that archs that support memory hotplug will | 233 | * expected that archs that support memory hotplug will |
@@ -134,6 +261,42 @@ int __add_pages(struct zone *zone, unsigned long phys_start_pfn, | |||
134 | } | 261 | } |
135 | EXPORT_SYMBOL_GPL(__add_pages); | 262 | EXPORT_SYMBOL_GPL(__add_pages); |
136 | 263 | ||
264 | /** | ||
265 | * __remove_pages() - remove sections of pages from a zone | ||
266 | * @zone: zone from which pages need to be removed | ||
267 | * @phys_start_pfn: starting pageframe (must be aligned to start of a section) | ||
268 | * @nr_pages: number of pages to remove (must be multiple of section size) | ||
269 | * | ||
270 | * Generic helper function to remove section mappings and sysfs entries | ||
271 | * for the section of the memory we are removing. Caller needs to make | ||
272 | * sure that pages are marked reserved and zones are adjust properly by | ||
273 | * calling offline_pages(). | ||
274 | */ | ||
275 | int __remove_pages(struct zone *zone, unsigned long phys_start_pfn, | ||
276 | unsigned long nr_pages) | ||
277 | { | ||
278 | unsigned long i, ret = 0; | ||
279 | int sections_to_remove; | ||
280 | |||
281 | /* | ||
282 | * We can only remove entire sections | ||
283 | */ | ||
284 | BUG_ON(phys_start_pfn & ~PAGE_SECTION_MASK); | ||
285 | BUG_ON(nr_pages % PAGES_PER_SECTION); | ||
286 | |||
287 | release_mem_region(phys_start_pfn << PAGE_SHIFT, nr_pages * PAGE_SIZE); | ||
288 | |||
289 | sections_to_remove = nr_pages / PAGES_PER_SECTION; | ||
290 | for (i = 0; i < sections_to_remove; i++) { | ||
291 | unsigned long pfn = phys_start_pfn + i*PAGES_PER_SECTION; | ||
292 | ret = __remove_section(zone, __pfn_to_section(pfn)); | ||
293 | if (ret) | ||
294 | break; | ||
295 | } | ||
296 | return ret; | ||
297 | } | ||
298 | EXPORT_SYMBOL_GPL(__remove_pages); | ||
299 | |||
137 | static void grow_zone_span(struct zone *zone, | 300 | static void grow_zone_span(struct zone *zone, |
138 | unsigned long start_pfn, unsigned long end_pfn) | 301 | unsigned long start_pfn, unsigned long end_pfn) |
139 | { | 302 | { |
@@ -164,6 +327,25 @@ static void grow_pgdat_span(struct pglist_data *pgdat, | |||
164 | pgdat->node_start_pfn; | 327 | pgdat->node_start_pfn; |
165 | } | 328 | } |
166 | 329 | ||
330 | void online_page(struct page *page) | ||
331 | { | ||
332 | totalram_pages++; | ||
333 | num_physpages++; | ||
334 | |||
335 | #ifdef CONFIG_HIGHMEM | ||
336 | if (PageHighMem(page)) | ||
337 | totalhigh_pages++; | ||
338 | #endif | ||
339 | |||
340 | #ifdef CONFIG_FLATMEM | ||
341 | max_mapnr = max(page_to_pfn(page), max_mapnr); | ||
342 | #endif | ||
343 | |||
344 | ClearPageReserved(page); | ||
345 | init_page_count(page); | ||
346 | __free_page(page); | ||
347 | } | ||
348 | |||
167 | static int online_pages_range(unsigned long start_pfn, unsigned long nr_pages, | 349 | static int online_pages_range(unsigned long start_pfn, unsigned long nr_pages, |
168 | void *arg) | 350 | void *arg) |
169 | { | 351 | { |