aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/mm/init_32.c36
-rw-r--r--arch/x86/mm/init_64.c9
2 files changed, 3 insertions, 42 deletions
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index 4a4761892951..de236e419cb5 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -287,47 +287,17 @@ static void __init permanent_kmaps_init(pgd_t *pgd_base)
287 pkmap_page_table = pte; 287 pkmap_page_table = pte;
288} 288}
289 289
290static void __meminit free_new_highpage(struct page *page)
291{
292 init_page_count(page);
293 __free_page(page);
294 totalhigh_pages++;
295}
296
297void __init add_one_highpage_init(struct page *page, int pfn, int bad_ppro) 290void __init add_one_highpage_init(struct page *page, int pfn, int bad_ppro)
298{ 291{
299 if (page_is_ram(pfn) && !(bad_ppro && page_kills_ppro(pfn))) { 292 if (page_is_ram(pfn) && !(bad_ppro && page_kills_ppro(pfn))) {
300 ClearPageReserved(page); 293 ClearPageReserved(page);
301 free_new_highpage(page); 294 init_page_count(page);
295 __free_page(page);
296 totalhigh_pages++;
302 } else 297 } else
303 SetPageReserved(page); 298 SetPageReserved(page);
304} 299}
305 300
306static int __meminit
307add_one_highpage_hotplug(struct page *page, unsigned long pfn)
308{
309 free_new_highpage(page);
310 totalram_pages++;
311#ifdef CONFIG_FLATMEM
312 max_mapnr = max(pfn, max_mapnr);
313#endif
314 num_physpages++;
315
316 return 0;
317}
318
319/*
320 * Not currently handling the NUMA case.
321 * Assuming single node and all memory that
322 * has been added dynamically that would be
323 * onlined here is in HIGHMEM.
324 */
325void __meminit online_page(struct page *page)
326{
327 ClearPageReserved(page);
328 add_one_highpage_hotplug(page, page_to_pfn(page));
329}
330
331#ifndef CONFIG_NUMA 301#ifndef CONFIG_NUMA
332static void __init set_highmem_pages_init(int bad_ppro) 302static void __init set_highmem_pages_init(int bad_ppro)
333{ 303{
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 5fbb8652cf59..32ba13b0f818 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -620,15 +620,6 @@ void __init paging_init(void)
620/* 620/*
621 * Memory hotplug specific functions 621 * Memory hotplug specific functions
622 */ 622 */
623void online_page(struct page *page)
624{
625 ClearPageReserved(page);
626 init_page_count(page);
627 __free_page(page);
628 totalram_pages++;
629 num_physpages++;
630}
631
632#ifdef CONFIG_MEMORY_HOTPLUG 623#ifdef CONFIG_MEMORY_HOTPLUG
633/* 624/*
634 * Memory is added always to NORMAL zone. This means you will never get 625 * Memory is added always to NORMAL zone. This means you will never get