aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-01-11 22:12:10 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2012-01-11 22:12:10 -0500
commitd0b9706c20ebb4ba181dc26e52ac9a6861abf425 (patch)
tree436e89246fd5ebcf737cae27e135a1995155329b /arch/x86
parent02d929502ce7b57f4835d8bb7c828d36e6d9e8ce (diff)
parent54eed6cb16ec315565aaaf8e34252ca253a68b7b (diff)
Merge branch 'x86-mm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
* 'x86-mm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86/numa: Add constraints check for nid parameters mm, x86: Remove debug_pagealloc_enabled x86/mm: Initialize high mem before free_all_bootmem() arch/x86/kernel/e820.c: quiet sparse noise about plain integer as NULL pointer arch/x86/kernel/e820.c: Eliminate bubble sort from sanitize_e820_map() x86: Fix mmap random address range x86, mm: Unify zone_sizes_init() x86, mm: Prepare zone_sizes_init() for unification x86, mm: Use max_low_pfn for ZONE_NORMAL on 64-bit x86, mm: Wrap ZONE_DMA32 with CONFIG_ZONE_DMA32 x86, mm: Use max_pfn instead of highend_pfn x86, mm: Move zone init from paging_init() on 64-bit x86, mm: Use MAX_DMA_PFN for ZONE_DMA on 32-bit
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/include/asm/init.h2
-rw-r--r--arch/x86/kernel/e820.c59
-rw-r--r--arch/x86/mm/init.c23
-rw-r--r--arch/x86/mm/init_32.c29
-rw-r--r--arch/x86/mm/init_64.c11
-rw-r--r--arch/x86/mm/mmap.c4
-rw-r--r--arch/x86/mm/numa.c10
-rw-r--r--arch/x86/mm/pageattr.c6
8 files changed, 69 insertions, 75 deletions
diff --git a/arch/x86/include/asm/init.h b/arch/x86/include/asm/init.h
index 8dbe353e41e1..adcc0ae73d09 100644
--- a/arch/x86/include/asm/init.h
+++ b/arch/x86/include/asm/init.h
@@ -5,6 +5,8 @@
5extern void __init early_ioremap_page_table_range_init(void); 5extern void __init early_ioremap_page_table_range_init(void);
6#endif 6#endif
7 7
8extern void __init zone_sizes_init(void);
9
8extern unsigned long __init 10extern unsigned long __init
9kernel_physical_mapping_init(unsigned long start, 11kernel_physical_mapping_init(unsigned long start,
10 unsigned long end, 12 unsigned long end,
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
index 8071e2f3d6eb..174d938d576b 100644
--- a/arch/x86/kernel/e820.c
+++ b/arch/x86/kernel/e820.c
@@ -19,6 +19,7 @@
19#include <linux/acpi.h> 19#include <linux/acpi.h>
20#include <linux/firmware-map.h> 20#include <linux/firmware-map.h>
21#include <linux/memblock.h> 21#include <linux/memblock.h>
22#include <linux/sort.h>
22 23
23#include <asm/e820.h> 24#include <asm/e820.h>
24#include <asm/proto.h> 25#include <asm/proto.h>
@@ -227,22 +228,38 @@ void __init e820_print_map(char *who)
227 * ____________________33__ 228 * ____________________33__
228 * ______________________4_ 229 * ______________________4_
229 */ 230 */
231struct change_member {
232 struct e820entry *pbios; /* pointer to original bios entry */
233 unsigned long long addr; /* address for this change point */
234};
235
236static int __init cpcompare(const void *a, const void *b)
237{
238 struct change_member * const *app = a, * const *bpp = b;
239 const struct change_member *ap = *app, *bp = *bpp;
240
241 /*
242 * Inputs are pointers to two elements of change_point[]. If their
243 * addresses are unequal, their difference dominates. If the addresses
244 * are equal, then consider one that represents the end of its region
245 * to be greater than one that does not.
246 */
247 if (ap->addr != bp->addr)
248 return ap->addr > bp->addr ? 1 : -1;
249
250 return (ap->addr != ap->pbios->addr) - (bp->addr != bp->pbios->addr);
251}
230 252
231int __init sanitize_e820_map(struct e820entry *biosmap, int max_nr_map, 253int __init sanitize_e820_map(struct e820entry *biosmap, int max_nr_map,
232 u32 *pnr_map) 254 u32 *pnr_map)
233{ 255{
234 struct change_member {
235 struct e820entry *pbios; /* pointer to original bios entry */
236 unsigned long long addr; /* address for this change point */
237 };
238 static struct change_member change_point_list[2*E820_X_MAX] __initdata; 256 static struct change_member change_point_list[2*E820_X_MAX] __initdata;
239 static struct change_member *change_point[2*E820_X_MAX] __initdata; 257 static struct change_member *change_point[2*E820_X_MAX] __initdata;
240 static struct e820entry *overlap_list[E820_X_MAX] __initdata; 258 static struct e820entry *overlap_list[E820_X_MAX] __initdata;
241 static struct e820entry new_bios[E820_X_MAX] __initdata; 259 static struct e820entry new_bios[E820_X_MAX] __initdata;
242 struct change_member *change_tmp;
243 unsigned long current_type, last_type; 260 unsigned long current_type, last_type;
244 unsigned long long last_addr; 261 unsigned long long last_addr;
245 int chgidx, still_changing; 262 int chgidx;
246 int overlap_entries; 263 int overlap_entries;
247 int new_bios_entry; 264 int new_bios_entry;
248 int old_nr, new_nr, chg_nr; 265 int old_nr, new_nr, chg_nr;
@@ -279,35 +296,7 @@ int __init sanitize_e820_map(struct e820entry *biosmap, int max_nr_map,
279 chg_nr = chgidx; 296 chg_nr = chgidx;
280 297
281 /* sort change-point list by memory addresses (low -> high) */ 298 /* sort change-point list by memory addresses (low -> high) */
282 still_changing = 1; 299 sort(change_point, chg_nr, sizeof *change_point, cpcompare, NULL);
283 while (still_changing) {
284 still_changing = 0;
285 for (i = 1; i < chg_nr; i++) {
286 unsigned long long curaddr, lastaddr;
287 unsigned long long curpbaddr, lastpbaddr;
288
289 curaddr = change_point[i]->addr;
290 lastaddr = change_point[i - 1]->addr;
291 curpbaddr = change_point[i]->pbios->addr;
292 lastpbaddr = change_point[i - 1]->pbios->addr;
293
294 /*
295 * swap entries, when:
296 *
297 * curaddr > lastaddr or
298 * curaddr == lastaddr and curaddr == curpbaddr and
299 * lastaddr != lastpbaddr
300 */
301 if (curaddr < lastaddr ||
302 (curaddr == lastaddr && curaddr == curpbaddr &&
303 lastaddr != lastpbaddr)) {
304 change_tmp = change_point[i];
305 change_point[i] = change_point[i-1];
306 change_point[i-1] = change_tmp;
307 still_changing = 1;
308 }
309 }
310 }
311 300
312 /* create a new bios memory map, removing overlaps */ 301 /* create a new bios memory map, removing overlaps */
313 overlap_entries = 0; /* number of entries in the overlap table */ 302 overlap_entries = 0; /* number of entries in the overlap table */
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index a298914058f9..6cabf6570d64 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -3,6 +3,7 @@
3#include <linux/ioport.h> 3#include <linux/ioport.h>
4#include <linux/swap.h> 4#include <linux/swap.h>
5#include <linux/memblock.h> 5#include <linux/memblock.h>
6#include <linux/bootmem.h> /* for max_low_pfn */
6 7
7#include <asm/cacheflush.h> 8#include <asm/cacheflush.h>
8#include <asm/e820.h> 9#include <asm/e820.h>
@@ -15,6 +16,7 @@
15#include <asm/tlbflush.h> 16#include <asm/tlbflush.h>
16#include <asm/tlb.h> 17#include <asm/tlb.h>
17#include <asm/proto.h> 18#include <asm/proto.h>
19#include <asm/dma.h> /* for MAX_DMA_PFN */
18 20
19unsigned long __initdata pgt_buf_start; 21unsigned long __initdata pgt_buf_start;
20unsigned long __meminitdata pgt_buf_end; 22unsigned long __meminitdata pgt_buf_end;
@@ -392,3 +394,24 @@ void free_initrd_mem(unsigned long start, unsigned long end)
392 free_init_pages("initrd memory", start, PAGE_ALIGN(end)); 394 free_init_pages("initrd memory", start, PAGE_ALIGN(end));
393} 395}
394#endif 396#endif
397
398void __init zone_sizes_init(void)
399{
400 unsigned long max_zone_pfns[MAX_NR_ZONES];
401
402 memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
403
404#ifdef CONFIG_ZONE_DMA
405 max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN;
406#endif
407#ifdef CONFIG_ZONE_DMA32
408 max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN;
409#endif
410 max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
411#ifdef CONFIG_HIGHMEM
412 max_zone_pfns[ZONE_HIGHMEM] = max_pfn;
413#endif
414
415 free_area_init_nodes(max_zone_pfns);
416}
417
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index 0c1da394a634..8663f6c47ccb 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -668,22 +668,6 @@ void __init initmem_init(void)
668} 668}
669#endif /* !CONFIG_NEED_MULTIPLE_NODES */ 669#endif /* !CONFIG_NEED_MULTIPLE_NODES */
670 670
671static void __init zone_sizes_init(void)
672{
673 unsigned long max_zone_pfns[MAX_NR_ZONES];
674 memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
675#ifdef CONFIG_ZONE_DMA
676 max_zone_pfns[ZONE_DMA] =
677 virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT;
678#endif
679 max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
680#ifdef CONFIG_HIGHMEM
681 max_zone_pfns[ZONE_HIGHMEM] = highend_pfn;
682#endif
683
684 free_area_init_nodes(max_zone_pfns);
685}
686
687void __init setup_bootmem_allocator(void) 671void __init setup_bootmem_allocator(void)
688{ 672{
689 printk(KERN_INFO " mapped low ram: 0 - %08lx\n", 673 printk(KERN_INFO " mapped low ram: 0 - %08lx\n",
@@ -754,6 +738,17 @@ void __init mem_init(void)
754#ifdef CONFIG_FLATMEM 738#ifdef CONFIG_FLATMEM
755 BUG_ON(!mem_map); 739 BUG_ON(!mem_map);
756#endif 740#endif
741 /*
742 * With CONFIG_DEBUG_PAGEALLOC initialization of highmem pages has to
743 * be done before free_all_bootmem(). Memblock use free low memory for
744 * temporary data (see find_range_array()) and for this purpose can use
745 * pages that was already passed to the buddy allocator, hence marked as
746 * not accessible in the page tables when compiled with
747 * CONFIG_DEBUG_PAGEALLOC. Otherwise order of initialization is not
748 * important here.
749 */
750 set_highmem_pages_init();
751
757 /* this will put all low memory onto the freelists */ 752 /* this will put all low memory onto the freelists */
758 totalram_pages += free_all_bootmem(); 753 totalram_pages += free_all_bootmem();
759 754
@@ -765,8 +760,6 @@ void __init mem_init(void)
765 if (page_is_ram(tmp) && PageReserved(pfn_to_page(tmp))) 760 if (page_is_ram(tmp) && PageReserved(pfn_to_page(tmp)))
766 reservedpages++; 761 reservedpages++;
767 762
768 set_highmem_pages_init();
769
770 codesize = (unsigned long) &_etext - (unsigned long) &_text; 763 codesize = (unsigned long) &_etext - (unsigned long) &_text;
771 datasize = (unsigned long) &_edata - (unsigned long) &_etext; 764 datasize = (unsigned long) &_edata - (unsigned long) &_etext;
772 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin; 765 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index a8a56ce3a962..436a0309db33 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -614,15 +614,6 @@ void __init initmem_init(void)
614 614
615void __init paging_init(void) 615void __init paging_init(void)
616{ 616{
617 unsigned long max_zone_pfns[MAX_NR_ZONES];
618
619 memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
620#ifdef CONFIG_ZONE_DMA
621 max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN;
622#endif
623 max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN;
624 max_zone_pfns[ZONE_NORMAL] = max_pfn;
625
626 sparse_memory_present_with_active_regions(MAX_NUMNODES); 617 sparse_memory_present_with_active_regions(MAX_NUMNODES);
627 sparse_init(); 618 sparse_init();
628 619
@@ -634,7 +625,7 @@ void __init paging_init(void)
634 */ 625 */
635 node_clear_state(0, N_NORMAL_MEMORY); 626 node_clear_state(0, N_NORMAL_MEMORY);
636 627
637 free_area_init_nodes(max_zone_pfns); 628 zone_sizes_init();
638} 629}
639 630
640/* 631/*
diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
index 4b5ba85eb5c9..845df6835f9f 100644
--- a/arch/x86/mm/mmap.c
+++ b/arch/x86/mm/mmap.c
@@ -75,9 +75,9 @@ static unsigned long mmap_rnd(void)
75 */ 75 */
76 if (current->flags & PF_RANDOMIZE) { 76 if (current->flags & PF_RANDOMIZE) {
77 if (mmap_is_ia32()) 77 if (mmap_is_ia32())
78 rnd = (long)get_random_int() % (1<<8); 78 rnd = get_random_int() % (1<<8);
79 else 79 else
80 rnd = (long)(get_random_int() % (1<<28)); 80 rnd = get_random_int() % (1<<28);
81 } 81 }
82 return rnd << PAGE_SHIFT; 82 return rnd << PAGE_SHIFT;
83} 83}
diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
index 496f494593bf..020cd2e80873 100644
--- a/arch/x86/mm/numa.c
+++ b/arch/x86/mm/numa.c
@@ -422,8 +422,9 @@ static int __init numa_alloc_distance(void)
422 * calls are ignored until the distance table is reset with 422 * calls are ignored until the distance table is reset with
423 * numa_reset_distance(). 423 * numa_reset_distance().
424 * 424 *
425 * If @from or @to is higher than the highest known node at the time of 425 * If @from or @to is higher than the highest known node or lower than zero
426 * table creation or @distance doesn't make sense, the call is ignored. 426 * at the time of table creation or @distance doesn't make sense, the call
427 * is ignored.
427 * This is to allow simplification of specific NUMA config implementations. 428 * This is to allow simplification of specific NUMA config implementations.
428 */ 429 */
429void __init numa_set_distance(int from, int to, int distance) 430void __init numa_set_distance(int from, int to, int distance)
@@ -431,8 +432,9 @@ void __init numa_set_distance(int from, int to, int distance)
431 if (!numa_distance && numa_alloc_distance() < 0) 432 if (!numa_distance && numa_alloc_distance() < 0)
432 return; 433 return;
433 434
434 if (from >= numa_distance_cnt || to >= numa_distance_cnt) { 435 if (from >= numa_distance_cnt || to >= numa_distance_cnt ||
435 printk_once(KERN_DEBUG "NUMA: Debug: distance out of bound, from=%d to=%d distance=%d\n", 436 from < 0 || to < 0) {
437 pr_warn_once("NUMA: Warning: node ids are out of bound, from=%d to=%d distance=%d\n",
436 from, to, distance); 438 from, to, distance);
437 return; 439 return;
438 } 440 }
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index eda2acbb6e81..e1ebde315210 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -1334,12 +1334,6 @@ void kernel_map_pages(struct page *page, int numpages, int enable)
1334 } 1334 }
1335 1335
1336 /* 1336 /*
1337 * If page allocator is not up yet then do not call c_p_a():
1338 */
1339 if (!debug_pagealloc_enabled)
1340 return;
1341
1342 /*
1343 * The return value is ignored as the calls cannot fail. 1337 * The return value is ignored as the calls cannot fail.
1344 * Large pages for identity mappings are not used at boot time 1338 * Large pages for identity mappings are not used at boot time
1345 * and hence no memory allocations during large page split. 1339 * and hence no memory allocations during large page split.