aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/mm/init_32.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/mm/init_32.c')
-rw-r--r--arch/x86/mm/init_32.c86
1 files changed, 57 insertions, 29 deletions
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index d71be0eb0130..65d55056b6e7 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -225,13 +225,6 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
225 update_page_count(PG_LEVEL_4K, pages_4k); 225 update_page_count(PG_LEVEL_4K, pages_4k);
226} 226}
227 227
228static inline int page_kills_ppro(unsigned long pagenr)
229{
230 if (pagenr >= 0x70000 && pagenr <= 0x7003F)
231 return 1;
232 return 0;
233}
234
235/* 228/*
236 * devmem_is_allowed() checks to see if /dev/mem access to a certain address 229 * devmem_is_allowed() checks to see if /dev/mem access to a certain address
237 * is valid. The argument is a physical page number. 230 * is valid. The argument is a physical page number.
@@ -292,29 +285,60 @@ static void __init permanent_kmaps_init(pgd_t *pgd_base)
292 pkmap_page_table = pte; 285 pkmap_page_table = pte;
293} 286}
294 287
295void __init add_one_highpage_init(struct page *page, int pfn, int bad_ppro) 288static void __init add_one_highpage_init(struct page *page, int pfn)
296{ 289{
297 if (page_is_ram(pfn) && !(bad_ppro && page_kills_ppro(pfn))) { 290 ClearPageReserved(page);
298 ClearPageReserved(page); 291 init_page_count(page);
299 init_page_count(page); 292 __free_page(page);
300 __free_page(page); 293 totalhigh_pages++;
301 totalhigh_pages++;
302 } else
303 SetPageReserved(page);
304} 294}
305 295
306#ifndef CONFIG_NUMA 296struct add_highpages_data {
307static void __init set_highmem_pages_init(int bad_ppro) 297 unsigned long start_pfn;
298 unsigned long end_pfn;
299};
300
301static void __init add_highpages_work_fn(unsigned long start_pfn,
302 unsigned long end_pfn, void *datax)
308{ 303{
309 int pfn; 304 int node_pfn;
305 struct page *page;
306 unsigned long final_start_pfn, final_end_pfn;
307 struct add_highpages_data *data;
310 308
311 for (pfn = highstart_pfn; pfn < highend_pfn; pfn++) { 309 data = (struct add_highpages_data *)datax;
312 /* 310
313 * Holes under sparsemem might not have no mem_map[]: 311 final_start_pfn = max(start_pfn, data->start_pfn);
314 */ 312 final_end_pfn = min(end_pfn, data->end_pfn);
315 if (pfn_valid(pfn)) 313 if (final_start_pfn >= final_end_pfn)
316 add_one_highpage_init(pfn_to_page(pfn), pfn, bad_ppro); 314 return;
315
316 for (node_pfn = final_start_pfn; node_pfn < final_end_pfn;
317 node_pfn++) {
318 if (!pfn_valid(node_pfn))
319 continue;
320 page = pfn_to_page(node_pfn);
321 add_one_highpage_init(page, node_pfn);
317 } 322 }
323
324}
325
326void __init add_highpages_with_active_regions(int nid, unsigned long start_pfn,
327 unsigned long end_pfn)
328{
329 struct add_highpages_data data;
330
331 data.start_pfn = start_pfn;
332 data.end_pfn = end_pfn;
333
334 work_with_active_regions(nid, add_highpages_work_fn, &data);
335}
336
337#ifndef CONFIG_NUMA
338static void __init set_highmem_pages_init(void)
339{
340 add_highpages_with_active_regions(0, highstart_pfn, highend_pfn);
341
318 totalram_pages += totalhigh_pages; 342 totalram_pages += totalhigh_pages;
319} 343}
320#endif /* !CONFIG_NUMA */ 344#endif /* !CONFIG_NUMA */
@@ -322,7 +346,7 @@ static void __init set_highmem_pages_init(int bad_ppro)
322#else 346#else
323# define kmap_init() do { } while (0) 347# define kmap_init() do { } while (0)
324# define permanent_kmaps_init(pgd_base) do { } while (0) 348# define permanent_kmaps_init(pgd_base) do { } while (0)
325# define set_highmem_pages_init(bad_ppro) do { } while (0) 349# define set_highmem_pages_init() do { } while (0)
326#endif /* CONFIG_HIGHMEM */ 350#endif /* CONFIG_HIGHMEM */
327 351
328pteval_t __PAGE_KERNEL = _PAGE_KERNEL; 352pteval_t __PAGE_KERNEL = _PAGE_KERNEL;
@@ -569,13 +593,11 @@ static struct kcore_list kcore_mem, kcore_vmalloc;
569void __init mem_init(void) 593void __init mem_init(void)
570{ 594{
571 int codesize, reservedpages, datasize, initsize; 595 int codesize, reservedpages, datasize, initsize;
572 int tmp, bad_ppro; 596 int tmp;
573 597
574#ifdef CONFIG_FLATMEM 598#ifdef CONFIG_FLATMEM
575 BUG_ON(!mem_map); 599 BUG_ON(!mem_map);
576#endif 600#endif
577 bad_ppro = ppro_with_ram_bug();
578
579 /* this will put all low memory onto the freelists */ 601 /* this will put all low memory onto the freelists */
580 totalram_pages += free_all_bootmem(); 602 totalram_pages += free_all_bootmem();
581 603
@@ -587,7 +609,7 @@ void __init mem_init(void)
587 if (page_is_ram(tmp) && PageReserved(pfn_to_page(tmp))) 609 if (page_is_ram(tmp) && PageReserved(pfn_to_page(tmp)))
588 reservedpages++; 610 reservedpages++;
589 611
590 set_highmem_pages_init(bad_ppro); 612 set_highmem_pages_init();
591 613
592 codesize = (unsigned long) &_etext - (unsigned long) &_text; 614 codesize = (unsigned long) &_etext - (unsigned long) &_text;
593 datasize = (unsigned long) &_edata - (unsigned long) &_etext; 615 datasize = (unsigned long) &_edata - (unsigned long) &_etext;
@@ -776,3 +798,9 @@ void free_initrd_mem(unsigned long start, unsigned long end)
776 free_init_pages("initrd memory", start, end); 798 free_init_pages("initrd memory", start, end);
777} 799}
778#endif 800#endif
801
802int __init reserve_bootmem_generic(unsigned long phys, unsigned long len,
803 int flags)
804{
805 return reserve_bootmem(phys, len, flags);
806}