diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2017-05-01 23:51:12 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2017-05-01 23:51:12 -0400 |
commit | 16b76293c5c81e6345323d7aef41b26e8390f62d (patch) | |
tree | f2bfdff7e795865c3254cb0ad97ebc66a7c5d212 | |
parent | 3dee9fb2a4ced89a13a4d4b72b0b7360b701e566 (diff) | |
parent | da63b6b20077469bd6bd96e07991ce145fc4fbc4 (diff) |
Merge branch 'x86-boot-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 boot updates from Ingo Molnar:
"The biggest changes in this cycle were:
- reworking of the e820 code: separate in-kernel and boot-ABI data
structures and apply a whole range of cleanups to the kernel side.
No change in functionality.
- enable KASLR by default: it's used by all major distros and it's
out of the experimental stage as well.
- ... misc fixes and cleanups"
* 'x86-boot-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (63 commits)
x86/KASLR: Fix kexec kernel boot crash when KASLR randomization fails
x86/reboot: Turn off KVM when halting a CPU
x86/boot: Fix BSS corruption/overwrite bug in early x86 kernel startup
x86: Enable KASLR by default
boot/param: Move next_arg() function to lib/cmdline.c for later reuse
x86/boot: Fix Sparse warning by including required header file
x86/boot/64: Rename start_cpu()
x86/xen: Update e820 table handling to the new core x86 E820 code
x86/boot: Fix pr_debug() API braindamage
xen, x86/headers: Add <linux/device.h> dependency to <asm/xen/page.h>
x86/boot/e820: Simplify e820__update_table()
x86/boot/e820: Separate the E820 ABI structures from the in-kernel structures
x86/boot/e820: Fix and clean up e820_type switch() statements
x86/boot/e820: Rename the remaining E820 APIs to the e820__*() prefix
x86/boot/e820: Remove unnecessary #include's
x86/boot/e820: Rename e820_mark_nosave_regions() to e820__register_nosave_regions()
x86/boot/e820: Rename e820_reserve_resources*() to e820__reserve_resources*()
x86/boot/e820: Use bool in query APIs
x86/boot/e820: Document e820__reserve_setup_data()
x86/boot/e820: Clean up __e820__update_table() et al
...
75 files changed, 1127 insertions, 944 deletions
diff --git a/Documentation/x86/zero-page.txt b/Documentation/x86/zero-page.txt index b8527c6b7646..97b7adbceda4 100644 --- a/Documentation/x86/zero-page.txt +++ b/Documentation/x86/zero-page.txt | |||
@@ -27,7 +27,7 @@ Offset Proto Name Meaning | |||
27 | 1C0/020 ALL efi_info EFI 32 information (struct efi_info) | 27 | 1C0/020 ALL efi_info EFI 32 information (struct efi_info) |
28 | 1E0/004 ALL alk_mem_k Alternative mem check, in KB | 28 | 1E0/004 ALL alk_mem_k Alternative mem check, in KB |
29 | 1E4/004 ALL scratch Scratch field for the kernel setup code | 29 | 1E4/004 ALL scratch Scratch field for the kernel setup code |
30 | 1E8/001 ALL e820_entries Number of entries in e820_map (below) | 30 | 1E8/001 ALL e820_entries Number of entries in e820_table (below) |
31 | 1E9/001 ALL eddbuf_entries Number of entries in eddbuf (below) | 31 | 1E9/001 ALL eddbuf_entries Number of entries in eddbuf (below) |
32 | 1EA/001 ALL edd_mbr_sig_buf_entries Number of entries in edd_mbr_sig_buffer | 32 | 1EA/001 ALL edd_mbr_sig_buf_entries Number of entries in edd_mbr_sig_buffer |
33 | (below) | 33 | (below) |
@@ -35,6 +35,6 @@ Offset Proto Name Meaning | |||
35 | 1EC/001 ALL secure_boot Secure boot is enabled in the firmware | 35 | 1EC/001 ALL secure_boot Secure boot is enabled in the firmware |
36 | 1EF/001 ALL sentinel Used to detect broken bootloaders | 36 | 1EF/001 ALL sentinel Used to detect broken bootloaders |
37 | 290/040 ALL edd_mbr_sig_buffer EDD MBR signatures | 37 | 290/040 ALL edd_mbr_sig_buffer EDD MBR signatures |
38 | 2D0/A00 ALL e820_map E820 memory map table | 38 | 2D0/A00 ALL e820_table E820 memory map table |
39 | (array of struct e820entry) | 39 | (array of struct e820_entry) |
40 | D00/1EC ALL eddbuf EDD data (array of struct edd_info) | 40 | D00/1EC ALL eddbuf EDD data (array of struct edd_info) |
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 2a00902e657a..a05571937ad3 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
@@ -1973,7 +1973,7 @@ config RELOCATABLE | |||
1973 | config RANDOMIZE_BASE | 1973 | config RANDOMIZE_BASE |
1974 | bool "Randomize the address of the kernel image (KASLR)" | 1974 | bool "Randomize the address of the kernel image (KASLR)" |
1975 | depends on RELOCATABLE | 1975 | depends on RELOCATABLE |
1976 | default n | 1976 | default y |
1977 | ---help--- | 1977 | ---help--- |
1978 | In support of Kernel Address Space Layout Randomization (KASLR), | 1978 | In support of Kernel Address Space Layout Randomization (KASLR), |
1979 | this randomizes the physical address at which the kernel image | 1979 | this randomizes the physical address at which the kernel image |
@@ -2003,7 +2003,7 @@ config RANDOMIZE_BASE | |||
2003 | theoretically possible, but the implementations are further | 2003 | theoretically possible, but the implementations are further |
2004 | limited due to memory layouts. | 2004 | limited due to memory layouts. |
2005 | 2005 | ||
2006 | If unsure, say N. | 2006 | If unsure, say Y. |
2007 | 2007 | ||
2008 | # Relocation on x86 needs some additional build support | 2008 | # Relocation on x86 needs some additional build support |
2009 | config X86_NEED_RELOCS | 2009 | config X86_NEED_RELOCS |
@@ -2052,7 +2052,7 @@ config RANDOMIZE_MEMORY | |||
2052 | configuration have in average 30,000 different possible virtual | 2052 | configuration have in average 30,000 different possible virtual |
2053 | addresses for each memory section. | 2053 | addresses for each memory section. |
2054 | 2054 | ||
2055 | If unsure, say N. | 2055 | If unsure, say Y. |
2056 | 2056 | ||
2057 | config RANDOMIZE_MEMORY_PHYSICAL_PADDING | 2057 | config RANDOMIZE_MEMORY_PHYSICAL_PADDING |
2058 | hex "Physical memory mapping padding" if EXPERT | 2058 | hex "Physical memory mapping padding" if EXPERT |
diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h index 9b42b6d1e902..ef5a9cc66fb8 100644 --- a/arch/x86/boot/boot.h +++ b/arch/x86/boot/boot.h | |||
@@ -16,7 +16,7 @@ | |||
16 | #ifndef BOOT_BOOT_H | 16 | #ifndef BOOT_BOOT_H |
17 | #define BOOT_BOOT_H | 17 | #define BOOT_BOOT_H |
18 | 18 | ||
19 | #define STACK_SIZE 512 /* Minimum number of bytes for stack */ | 19 | #define STACK_SIZE 1024 /* Minimum number of bytes for stack */ |
20 | 20 | ||
21 | #ifndef __ASSEMBLY__ | 21 | #ifndef __ASSEMBLY__ |
22 | 22 | ||
diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c index 801c7a158e55..cbf4b87f55b9 100644 --- a/arch/x86/boot/compressed/eboot.c +++ b/arch/x86/boot/compressed/eboot.c | |||
@@ -9,7 +9,9 @@ | |||
9 | 9 | ||
10 | #include <linux/efi.h> | 10 | #include <linux/efi.h> |
11 | #include <linux/pci.h> | 11 | #include <linux/pci.h> |
12 | |||
12 | #include <asm/efi.h> | 13 | #include <asm/efi.h> |
14 | #include <asm/e820/types.h> | ||
13 | #include <asm/setup.h> | 15 | #include <asm/setup.h> |
14 | #include <asm/desc.h> | 16 | #include <asm/desc.h> |
15 | 17 | ||
@@ -729,7 +731,7 @@ static void add_e820ext(struct boot_params *params, | |||
729 | unsigned long size; | 731 | unsigned long size; |
730 | 732 | ||
731 | e820ext->type = SETUP_E820_EXT; | 733 | e820ext->type = SETUP_E820_EXT; |
732 | e820ext->len = nr_entries * sizeof(struct e820entry); | 734 | e820ext->len = nr_entries * sizeof(struct boot_e820_entry); |
733 | e820ext->next = 0; | 735 | e820ext->next = 0; |
734 | 736 | ||
735 | data = (struct setup_data *)(unsigned long)params->hdr.setup_data; | 737 | data = (struct setup_data *)(unsigned long)params->hdr.setup_data; |
@@ -746,9 +748,9 @@ static void add_e820ext(struct boot_params *params, | |||
746 | static efi_status_t setup_e820(struct boot_params *params, | 748 | static efi_status_t setup_e820(struct boot_params *params, |
747 | struct setup_data *e820ext, u32 e820ext_size) | 749 | struct setup_data *e820ext, u32 e820ext_size) |
748 | { | 750 | { |
749 | struct e820entry *e820_map = ¶ms->e820_map[0]; | 751 | struct boot_e820_entry *entry = params->e820_table; |
750 | struct efi_info *efi = ¶ms->efi_info; | 752 | struct efi_info *efi = ¶ms->efi_info; |
751 | struct e820entry *prev = NULL; | 753 | struct boot_e820_entry *prev = NULL; |
752 | u32 nr_entries; | 754 | u32 nr_entries; |
753 | u32 nr_desc; | 755 | u32 nr_desc; |
754 | int i; | 756 | int i; |
@@ -773,15 +775,15 @@ static efi_status_t setup_e820(struct boot_params *params, | |||
773 | case EFI_MEMORY_MAPPED_IO: | 775 | case EFI_MEMORY_MAPPED_IO: |
774 | case EFI_MEMORY_MAPPED_IO_PORT_SPACE: | 776 | case EFI_MEMORY_MAPPED_IO_PORT_SPACE: |
775 | case EFI_PAL_CODE: | 777 | case EFI_PAL_CODE: |
776 | e820_type = E820_RESERVED; | 778 | e820_type = E820_TYPE_RESERVED; |
777 | break; | 779 | break; |
778 | 780 | ||
779 | case EFI_UNUSABLE_MEMORY: | 781 | case EFI_UNUSABLE_MEMORY: |
780 | e820_type = E820_UNUSABLE; | 782 | e820_type = E820_TYPE_UNUSABLE; |
781 | break; | 783 | break; |
782 | 784 | ||
783 | case EFI_ACPI_RECLAIM_MEMORY: | 785 | case EFI_ACPI_RECLAIM_MEMORY: |
784 | e820_type = E820_ACPI; | 786 | e820_type = E820_TYPE_ACPI; |
785 | break; | 787 | break; |
786 | 788 | ||
787 | case EFI_LOADER_CODE: | 789 | case EFI_LOADER_CODE: |
@@ -789,15 +791,15 @@ static efi_status_t setup_e820(struct boot_params *params, | |||
789 | case EFI_BOOT_SERVICES_CODE: | 791 | case EFI_BOOT_SERVICES_CODE: |
790 | case EFI_BOOT_SERVICES_DATA: | 792 | case EFI_BOOT_SERVICES_DATA: |
791 | case EFI_CONVENTIONAL_MEMORY: | 793 | case EFI_CONVENTIONAL_MEMORY: |
792 | e820_type = E820_RAM; | 794 | e820_type = E820_TYPE_RAM; |
793 | break; | 795 | break; |
794 | 796 | ||
795 | case EFI_ACPI_MEMORY_NVS: | 797 | case EFI_ACPI_MEMORY_NVS: |
796 | e820_type = E820_NVS; | 798 | e820_type = E820_TYPE_NVS; |
797 | break; | 799 | break; |
798 | 800 | ||
799 | case EFI_PERSISTENT_MEMORY: | 801 | case EFI_PERSISTENT_MEMORY: |
800 | e820_type = E820_PMEM; | 802 | e820_type = E820_TYPE_PMEM; |
801 | break; | 803 | break; |
802 | 804 | ||
803 | default: | 805 | default: |
@@ -811,26 +813,26 @@ static efi_status_t setup_e820(struct boot_params *params, | |||
811 | continue; | 813 | continue; |
812 | } | 814 | } |
813 | 815 | ||
814 | if (nr_entries == ARRAY_SIZE(params->e820_map)) { | 816 | if (nr_entries == ARRAY_SIZE(params->e820_table)) { |
815 | u32 need = (nr_desc - i) * sizeof(struct e820entry) + | 817 | u32 need = (nr_desc - i) * sizeof(struct e820_entry) + |
816 | sizeof(struct setup_data); | 818 | sizeof(struct setup_data); |
817 | 819 | ||
818 | if (!e820ext || e820ext_size < need) | 820 | if (!e820ext || e820ext_size < need) |
819 | return EFI_BUFFER_TOO_SMALL; | 821 | return EFI_BUFFER_TOO_SMALL; |
820 | 822 | ||
821 | /* boot_params map full, switch to e820 extended */ | 823 | /* boot_params map full, switch to e820 extended */ |
822 | e820_map = (struct e820entry *)e820ext->data; | 824 | entry = (struct boot_e820_entry *)e820ext->data; |
823 | } | 825 | } |
824 | 826 | ||
825 | e820_map->addr = d->phys_addr; | 827 | entry->addr = d->phys_addr; |
826 | e820_map->size = d->num_pages << PAGE_SHIFT; | 828 | entry->size = d->num_pages << PAGE_SHIFT; |
827 | e820_map->type = e820_type; | 829 | entry->type = e820_type; |
828 | prev = e820_map++; | 830 | prev = entry++; |
829 | nr_entries++; | 831 | nr_entries++; |
830 | } | 832 | } |
831 | 833 | ||
832 | if (nr_entries > ARRAY_SIZE(params->e820_map)) { | 834 | if (nr_entries > ARRAY_SIZE(params->e820_table)) { |
833 | u32 nr_e820ext = nr_entries - ARRAY_SIZE(params->e820_map); | 835 | u32 nr_e820ext = nr_entries - ARRAY_SIZE(params->e820_table); |
834 | 836 | ||
835 | add_e820ext(params, e820ext, nr_e820ext); | 837 | add_e820ext(params, e820ext, nr_e820ext); |
836 | nr_entries -= nr_e820ext; | 838 | nr_entries -= nr_e820ext; |
@@ -848,7 +850,7 @@ static efi_status_t alloc_e820ext(u32 nr_desc, struct setup_data **e820ext, | |||
848 | unsigned long size; | 850 | unsigned long size; |
849 | 851 | ||
850 | size = sizeof(struct setup_data) + | 852 | size = sizeof(struct setup_data) + |
851 | sizeof(struct e820entry) * nr_desc; | 853 | sizeof(struct e820_entry) * nr_desc; |
852 | 854 | ||
853 | if (*e820ext) { | 855 | if (*e820ext) { |
854 | efi_call_early(free_pool, *e820ext); | 856 | efi_call_early(free_pool, *e820ext); |
@@ -884,9 +886,9 @@ static efi_status_t exit_boot_func(efi_system_table_t *sys_table_arg, | |||
884 | 886 | ||
885 | if (first) { | 887 | if (first) { |
886 | nr_desc = *map->buff_size / *map->desc_size; | 888 | nr_desc = *map->buff_size / *map->desc_size; |
887 | if (nr_desc > ARRAY_SIZE(p->boot_params->e820_map)) { | 889 | if (nr_desc > ARRAY_SIZE(p->boot_params->e820_table)) { |
888 | u32 nr_e820ext = nr_desc - | 890 | u32 nr_e820ext = nr_desc - |
889 | ARRAY_SIZE(p->boot_params->e820_map); | 891 | ARRAY_SIZE(p->boot_params->e820_table); |
890 | 892 | ||
891 | status = alloc_e820ext(nr_e820ext, &p->e820ext, | 893 | status = alloc_e820ext(nr_e820ext, &p->e820ext, |
892 | &p->e820ext_size); | 894 | &p->e820ext_size); |
diff --git a/arch/x86/boot/compressed/kaslr.c b/arch/x86/boot/compressed/kaslr.c index 8b7c9e75edcb..54c24f0a43d3 100644 --- a/arch/x86/boot/compressed/kaslr.c +++ b/arch/x86/boot/compressed/kaslr.c | |||
@@ -426,7 +426,7 @@ static unsigned long slots_fetch_random(void) | |||
426 | return 0; | 426 | return 0; |
427 | } | 427 | } |
428 | 428 | ||
429 | static void process_e820_entry(struct e820entry *entry, | 429 | static void process_e820_entry(struct boot_e820_entry *entry, |
430 | unsigned long minimum, | 430 | unsigned long minimum, |
431 | unsigned long image_size) | 431 | unsigned long image_size) |
432 | { | 432 | { |
@@ -435,7 +435,7 @@ static void process_e820_entry(struct e820entry *entry, | |||
435 | unsigned long start_orig; | 435 | unsigned long start_orig; |
436 | 436 | ||
437 | /* Skip non-RAM entries. */ | 437 | /* Skip non-RAM entries. */ |
438 | if (entry->type != E820_RAM) | 438 | if (entry->type != E820_TYPE_RAM) |
439 | return; | 439 | return; |
440 | 440 | ||
441 | /* On 32-bit, ignore entries entirely above our maximum. */ | 441 | /* On 32-bit, ignore entries entirely above our maximum. */ |
@@ -518,7 +518,7 @@ static unsigned long find_random_phys_addr(unsigned long minimum, | |||
518 | 518 | ||
519 | /* Verify potential e820 positions, appending to slots list. */ | 519 | /* Verify potential e820 positions, appending to slots list. */ |
520 | for (i = 0; i < boot_params->e820_entries; i++) { | 520 | for (i = 0; i < boot_params->e820_entries; i++) { |
521 | process_e820_entry(&boot_params->e820_map[i], minimum, | 521 | process_e820_entry(&boot_params->e820_table[i], minimum, |
522 | image_size); | 522 | image_size); |
523 | if (slot_area_index == MAX_SLOT_AREA) { | 523 | if (slot_area_index == MAX_SLOT_AREA) { |
524 | debug_putstr("Aborted e820 scan (slot_areas full)!\n"); | 524 | debug_putstr("Aborted e820 scan (slot_areas full)!\n"); |
@@ -597,10 +597,17 @@ void choose_random_location(unsigned long input, | |||
597 | add_identity_map(random_addr, output_size); | 597 | add_identity_map(random_addr, output_size); |
598 | *output = random_addr; | 598 | *output = random_addr; |
599 | } | 599 | } |
600 | |||
601 | /* | ||
602 | * This loads the identity mapping page table. | ||
603 | * This should only be done if a new physical address | ||
604 | * is found for the kernel, otherwise we should keep | ||
605 | * the old page table to make it be like the "nokaslr" | ||
606 | * case. | ||
607 | */ | ||
608 | finalize_identity_maps(); | ||
600 | } | 609 | } |
601 | 610 | ||
602 | /* This actually loads the identity pagetable on x86_64. */ | ||
603 | finalize_identity_maps(); | ||
604 | 611 | ||
605 | /* Pick random virtual address starting from LOAD_PHYSICAL_ADDR. */ | 612 | /* Pick random virtual address starting from LOAD_PHYSICAL_ADDR. */ |
606 | if (IS_ENABLED(CONFIG_X86_64)) | 613 | if (IS_ENABLED(CONFIG_X86_64)) |
diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S index 3dd5be33aaa7..2ed8f0c25def 100644 --- a/arch/x86/boot/header.S +++ b/arch/x86/boot/header.S | |||
@@ -18,7 +18,6 @@ | |||
18 | #include <asm/segment.h> | 18 | #include <asm/segment.h> |
19 | #include <generated/utsrelease.h> | 19 | #include <generated/utsrelease.h> |
20 | #include <asm/boot.h> | 20 | #include <asm/boot.h> |
21 | #include <asm/e820.h> | ||
22 | #include <asm/page_types.h> | 21 | #include <asm/page_types.h> |
23 | #include <asm/setup.h> | 22 | #include <asm/setup.h> |
24 | #include <asm/bootparam.h> | 23 | #include <asm/bootparam.h> |
diff --git a/arch/x86/boot/memory.c b/arch/x86/boot/memory.c index db75d07c3645..d9c28c87e477 100644 --- a/arch/x86/boot/memory.c +++ b/arch/x86/boot/memory.c | |||
@@ -21,8 +21,8 @@ static int detect_memory_e820(void) | |||
21 | { | 21 | { |
22 | int count = 0; | 22 | int count = 0; |
23 | struct biosregs ireg, oreg; | 23 | struct biosregs ireg, oreg; |
24 | struct e820entry *desc = boot_params.e820_map; | 24 | struct boot_e820_entry *desc = boot_params.e820_table; |
25 | static struct e820entry buf; /* static so it is zeroed */ | 25 | static struct boot_e820_entry buf; /* static so it is zeroed */ |
26 | 26 | ||
27 | initregs(&ireg); | 27 | initregs(&ireg); |
28 | ireg.ax = 0xe820; | 28 | ireg.ax = 0xe820; |
@@ -66,7 +66,7 @@ static int detect_memory_e820(void) | |||
66 | 66 | ||
67 | *desc++ = buf; | 67 | *desc++ = buf; |
68 | count++; | 68 | count++; |
69 | } while (ireg.ebx && count < ARRAY_SIZE(boot_params.e820_map)); | 69 | } while (ireg.ebx && count < ARRAY_SIZE(boot_params.e820_table)); |
70 | 70 | ||
71 | return boot_params.e820_entries = count; | 71 | return boot_params.e820_entries = count; |
72 | } | 72 | } |
diff --git a/arch/x86/configs/i386_defconfig b/arch/x86/configs/i386_defconfig index 5fa6ee2c2dde..6cf79e1a6830 100644 --- a/arch/x86/configs/i386_defconfig +++ b/arch/x86/configs/i386_defconfig | |||
@@ -57,6 +57,8 @@ CONFIG_EFI=y | |||
57 | CONFIG_HZ_1000=y | 57 | CONFIG_HZ_1000=y |
58 | CONFIG_KEXEC=y | 58 | CONFIG_KEXEC=y |
59 | CONFIG_CRASH_DUMP=y | 59 | CONFIG_CRASH_DUMP=y |
60 | CONFIG_RANDOMIZE_BASE=y | ||
61 | CONFIG_RANDOMIZE_MEMORY=y | ||
60 | # CONFIG_COMPAT_VDSO is not set | 62 | # CONFIG_COMPAT_VDSO is not set |
61 | CONFIG_HIBERNATION=y | 63 | CONFIG_HIBERNATION=y |
62 | CONFIG_PM_DEBUG=y | 64 | CONFIG_PM_DEBUG=y |
diff --git a/arch/x86/configs/x86_64_defconfig b/arch/x86/configs/x86_64_defconfig index 6205d3b81e6d..de45f57b410d 100644 --- a/arch/x86/configs/x86_64_defconfig +++ b/arch/x86/configs/x86_64_defconfig | |||
@@ -55,6 +55,8 @@ CONFIG_EFI=y | |||
55 | CONFIG_HZ_1000=y | 55 | CONFIG_HZ_1000=y |
56 | CONFIG_KEXEC=y | 56 | CONFIG_KEXEC=y |
57 | CONFIG_CRASH_DUMP=y | 57 | CONFIG_CRASH_DUMP=y |
58 | CONFIG_RANDOMIZE_BASE=y | ||
59 | CONFIG_RANDOMIZE_MEMORY=y | ||
58 | # CONFIG_COMPAT_VDSO is not set | 60 | # CONFIG_COMPAT_VDSO is not set |
59 | CONFIG_HIBERNATION=y | 61 | CONFIG_HIBERNATION=y |
60 | CONFIG_PM_DEBUG=y | 62 | CONFIG_PM_DEBUG=y |
diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h index 395b69551fce..2efc768e4362 100644 --- a/arch/x86/include/asm/acpi.h +++ b/arch/x86/include/asm/acpi.h | |||
@@ -52,6 +52,8 @@ extern u8 acpi_sci_flags; | |||
52 | extern int acpi_sci_override_gsi; | 52 | extern int acpi_sci_override_gsi; |
53 | void acpi_pic_sci_set_trigger(unsigned int, u16); | 53 | void acpi_pic_sci_set_trigger(unsigned int, u16); |
54 | 54 | ||
55 | struct device; | ||
56 | |||
55 | extern int (*__acpi_register_gsi)(struct device *dev, u32 gsi, | 57 | extern int (*__acpi_register_gsi)(struct device *dev, u32 gsi, |
56 | int trigger, int polarity); | 58 | int trigger, int polarity); |
57 | extern void (*__acpi_unregister_gsi)(u32 gsi); | 59 | extern void (*__acpi_unregister_gsi)(u32 gsi); |
diff --git a/arch/x86/include/asm/e820.h b/arch/x86/include/asm/e820.h deleted file mode 100644 index 67313f3a9874..000000000000 --- a/arch/x86/include/asm/e820.h +++ /dev/null | |||
@@ -1,73 +0,0 @@ | |||
1 | #ifndef _ASM_X86_E820_H | ||
2 | #define _ASM_X86_E820_H | ||
3 | |||
4 | /* | ||
5 | * E820_X_MAX is the maximum size of the extended E820 table. The extended | ||
6 | * table may contain up to 3 extra E820 entries per possible NUMA node, so we | ||
7 | * make room for 3 * MAX_NUMNODES possible entries, beyond the standard 128. | ||
8 | * Also note that E820_X_MAX *must* be defined before we include uapi/asm/e820.h. | ||
9 | */ | ||
10 | #include <linux/numa.h> | ||
11 | #define E820_X_MAX (E820MAX + 3 * MAX_NUMNODES) | ||
12 | |||
13 | #include <uapi/asm/e820.h> | ||
14 | |||
15 | #ifndef __ASSEMBLY__ | ||
16 | /* see comment in arch/x86/kernel/e820.c */ | ||
17 | extern struct e820map *e820; | ||
18 | extern struct e820map *e820_saved; | ||
19 | |||
20 | extern unsigned long pci_mem_start; | ||
21 | extern int e820_any_mapped(u64 start, u64 end, unsigned type); | ||
22 | extern int e820_all_mapped(u64 start, u64 end, unsigned type); | ||
23 | extern void e820_add_region(u64 start, u64 size, int type); | ||
24 | extern void e820_print_map(char *who); | ||
25 | extern int | ||
26 | sanitize_e820_map(struct e820entry *biosmap, int max_nr_map, u32 *pnr_map); | ||
27 | extern u64 e820_update_range(u64 start, u64 size, unsigned old_type, | ||
28 | unsigned new_type); | ||
29 | extern u64 e820_remove_range(u64 start, u64 size, unsigned old_type, | ||
30 | int checktype); | ||
31 | extern void update_e820(void); | ||
32 | extern void e820_setup_gap(void); | ||
33 | struct setup_data; | ||
34 | extern void parse_e820_ext(u64 phys_addr, u32 data_len); | ||
35 | |||
36 | #if defined(CONFIG_X86_64) || \ | ||
37 | (defined(CONFIG_X86_32) && defined(CONFIG_HIBERNATION)) | ||
38 | extern void e820_mark_nosave_regions(unsigned long limit_pfn); | ||
39 | #else | ||
40 | static inline void e820_mark_nosave_regions(unsigned long limit_pfn) | ||
41 | { | ||
42 | } | ||
43 | #endif | ||
44 | |||
45 | extern unsigned long e820_end_of_ram_pfn(void); | ||
46 | extern unsigned long e820_end_of_low_ram_pfn(void); | ||
47 | extern u64 early_reserve_e820(u64 sizet, u64 align); | ||
48 | |||
49 | void memblock_x86_fill(void); | ||
50 | void memblock_find_dma_reserve(void); | ||
51 | |||
52 | extern void finish_e820_parsing(void); | ||
53 | extern void e820_reserve_resources(void); | ||
54 | extern void e820_reserve_resources_late(void); | ||
55 | extern void setup_memory_map(void); | ||
56 | extern char *default_machine_specific_memory_setup(void); | ||
57 | |||
58 | extern void e820_reallocate_tables(void); | ||
59 | |||
60 | /* | ||
61 | * Returns true iff the specified range [s,e) is completely contained inside | ||
62 | * the ISA region. | ||
63 | */ | ||
64 | static inline bool is_ISA_range(u64 s, u64 e) | ||
65 | { | ||
66 | return s >= ISA_START_ADDRESS && e <= ISA_END_ADDRESS; | ||
67 | } | ||
68 | |||
69 | #endif /* __ASSEMBLY__ */ | ||
70 | #include <linux/ioport.h> | ||
71 | |||
72 | #define HIGH_MEMORY (1024*1024) | ||
73 | #endif /* _ASM_X86_E820_H */ | ||
diff --git a/arch/x86/include/asm/e820/api.h b/arch/x86/include/asm/e820/api.h new file mode 100644 index 000000000000..8e0f8b85b209 --- /dev/null +++ b/arch/x86/include/asm/e820/api.h | |||
@@ -0,0 +1,50 @@ | |||
1 | #ifndef _ASM_E820_API_H | ||
2 | #define _ASM_E820_API_H | ||
3 | |||
4 | #include <asm/e820/types.h> | ||
5 | |||
6 | extern struct e820_table *e820_table; | ||
7 | extern struct e820_table *e820_table_firmware; | ||
8 | |||
9 | extern unsigned long pci_mem_start; | ||
10 | |||
11 | extern bool e820__mapped_any(u64 start, u64 end, enum e820_type type); | ||
12 | extern bool e820__mapped_all(u64 start, u64 end, enum e820_type type); | ||
13 | |||
14 | extern void e820__range_add (u64 start, u64 size, enum e820_type type); | ||
15 | extern u64 e820__range_update(u64 start, u64 size, enum e820_type old_type, enum e820_type new_type); | ||
16 | extern u64 e820__range_remove(u64 start, u64 size, enum e820_type old_type, bool check_type); | ||
17 | |||
18 | extern void e820__print_table(char *who); | ||
19 | extern int e820__update_table(struct e820_table *table); | ||
20 | extern void e820__update_table_print(void); | ||
21 | |||
22 | extern unsigned long e820__end_of_ram_pfn(void); | ||
23 | extern unsigned long e820__end_of_low_ram_pfn(void); | ||
24 | |||
25 | extern u64 e820__memblock_alloc_reserved(u64 size, u64 align); | ||
26 | extern void e820__memblock_setup(void); | ||
27 | |||
28 | extern void e820__reserve_setup_data(void); | ||
29 | extern void e820__finish_early_params(void); | ||
30 | extern void e820__reserve_resources(void); | ||
31 | extern void e820__reserve_resources_late(void); | ||
32 | |||
33 | extern void e820__memory_setup(void); | ||
34 | extern void e820__memory_setup_extended(u64 phys_addr, u32 data_len); | ||
35 | extern char *e820__memory_setup_default(void); | ||
36 | extern void e820__setup_pci_gap(void); | ||
37 | |||
38 | extern void e820__reallocate_tables(void); | ||
39 | extern void e820__register_nosave_regions(unsigned long limit_pfn); | ||
40 | |||
41 | /* | ||
42 | * Returns true iff the specified range [start,end) is completely contained inside | ||
43 | * the ISA region. | ||
44 | */ | ||
45 | static inline bool is_ISA_range(u64 start, u64 end) | ||
46 | { | ||
47 | return start >= ISA_START_ADDRESS && end <= ISA_END_ADDRESS; | ||
48 | } | ||
49 | |||
50 | #endif /* _ASM_E820_API_H */ | ||
diff --git a/arch/x86/include/asm/e820/types.h b/arch/x86/include/asm/e820/types.h new file mode 100644 index 000000000000..4adeed03a9a1 --- /dev/null +++ b/arch/x86/include/asm/e820/types.h | |||
@@ -0,0 +1,104 @@ | |||
1 | #ifndef _ASM_E820_TYPES_H | ||
2 | #define _ASM_E820_TYPES_H | ||
3 | |||
4 | #include <uapi/asm/bootparam.h> | ||
5 | |||
6 | /* | ||
7 | * These are the E820 types known to the kernel: | ||
8 | */ | ||
9 | enum e820_type { | ||
10 | E820_TYPE_RAM = 1, | ||
11 | E820_TYPE_RESERVED = 2, | ||
12 | E820_TYPE_ACPI = 3, | ||
13 | E820_TYPE_NVS = 4, | ||
14 | E820_TYPE_UNUSABLE = 5, | ||
15 | E820_TYPE_PMEM = 7, | ||
16 | |||
17 | /* | ||
18 | * This is a non-standardized way to represent ADR or | ||
19 | * NVDIMM regions that persist over a reboot. | ||
20 | * | ||
21 | * The kernel will ignore their special capabilities | ||
22 | * unless the CONFIG_X86_PMEM_LEGACY=y option is set. | ||
23 | * | ||
24 | * ( Note that older platforms also used 6 for the same | ||
25 | * type of memory, but newer versions switched to 12 as | ||
26 | * 6 was assigned differently. Some time they will learn... ) | ||
27 | */ | ||
28 | E820_TYPE_PRAM = 12, | ||
29 | |||
30 | /* | ||
31 | * Reserved RAM used by the kernel itself if | ||
32 | * CONFIG_INTEL_TXT=y is enabled, memory of this type | ||
33 | * will be included in the S3 integrity calculation | ||
34 | * and so should not include any memory that the BIOS | ||
35 | * might alter over the S3 transition: | ||
36 | */ | ||
37 | E820_TYPE_RESERVED_KERN = 128, | ||
38 | }; | ||
39 | |||
40 | /* | ||
41 | * A single E820 map entry, describing a memory range of [addr...addr+size-1], | ||
42 | * of 'type' memory type: | ||
43 | * | ||
44 | * (We pack it because there can be thousands of them on large systems.) | ||
45 | */ | ||
46 | struct e820_entry { | ||
47 | u64 addr; | ||
48 | u64 size; | ||
49 | enum e820_type type; | ||
50 | } __attribute__((packed)); | ||
51 | |||
52 | /* | ||
53 | * The legacy E820 BIOS limits us to 128 (E820_MAX_ENTRIES_ZEROPAGE) nodes | ||
54 | * due to the constrained space in the zeropage. | ||
55 | * | ||
56 | * On large systems we can easily have thousands of nodes with RAM, | ||
57 | * which cannot be fit into so few entries - so we have a mechanism | ||
58 | * to extend the e820 table size at build-time, via the E820_MAX_ENTRIES | ||
59 | * define below. | ||
60 | * | ||
61 | * ( Those extra entries are enumerated via the EFI memory map, not | ||
62 | * via the legacy zeropage mechanism. ) | ||
63 | * | ||
64 | * Size our internal memory map tables to have room for these additional | ||
65 | * entries, based on a heuristic calculation: up to three entries per | ||
66 | * NUMA node, plus E820_MAX_ENTRIES_ZEROPAGE for some extra space. | ||
67 | * | ||
68 | * This allows for bootstrap/firmware quirks such as possible duplicate | ||
69 | * E820 entries that might need room in the same arrays, prior to the | ||
70 | * call to e820__update_table() to remove duplicates. The allowance | ||
71 | * of three memory map entries per node is "enough" entries for | ||
72 | * the initial hardware platform motivating this mechanism to make | ||
73 | * use of additional EFI map entries. Future platforms may want | ||
74 | * to allow more than three entries per node or otherwise refine | ||
75 | * this size. | ||
76 | */ | ||
77 | |||
78 | #include <linux/numa.h> | ||
79 | |||
80 | #define E820_MAX_ENTRIES (E820_MAX_ENTRIES_ZEROPAGE + 3*MAX_NUMNODES) | ||
81 | |||
82 | /* | ||
83 | * The whole array of E820 entries: | ||
84 | */ | ||
85 | struct e820_table { | ||
86 | __u32 nr_entries; | ||
87 | struct e820_entry entries[E820_MAX_ENTRIES]; | ||
88 | }; | ||
89 | |||
90 | /* | ||
91 | * Various well-known legacy memory ranges in physical memory: | ||
92 | */ | ||
93 | #define ISA_START_ADDRESS 0x000a0000 | ||
94 | #define ISA_END_ADDRESS 0x00100000 | ||
95 | |||
96 | #define BIOS_BEGIN 0x000a0000 | ||
97 | #define BIOS_END 0x00100000 | ||
98 | |||
99 | #define HIGH_MEMORY 0x00100000 | ||
100 | |||
101 | #define BIOS_ROM_BASE 0xffe00000 | ||
102 | #define BIOS_ROM_END 0xffffffff | ||
103 | |||
104 | #endif /* _ASM_E820_TYPES_H */ | ||
diff --git a/arch/x86/include/asm/gart.h b/arch/x86/include/asm/gart.h index 156cd5d18d2a..1d268098ac2e 100644 --- a/arch/x86/include/asm/gart.h +++ b/arch/x86/include/asm/gart.h | |||
@@ -1,7 +1,7 @@ | |||
1 | #ifndef _ASM_X86_GART_H | 1 | #ifndef _ASM_X86_GART_H |
2 | #define _ASM_X86_GART_H | 2 | #define _ASM_X86_GART_H |
3 | 3 | ||
4 | #include <asm/e820.h> | 4 | #include <asm/e820/api.h> |
5 | 5 | ||
6 | extern void set_up_gart_resume(u32, u32); | 6 | extern void set_up_gart_resume(u32, u32); |
7 | 7 | ||
@@ -97,7 +97,7 @@ static inline int aperture_valid(u64 aper_base, u32 aper_size, u32 min_size) | |||
97 | printk(KERN_INFO "Aperture beyond 4GB. Ignoring.\n"); | 97 | printk(KERN_INFO "Aperture beyond 4GB. Ignoring.\n"); |
98 | return 0; | 98 | return 0; |
99 | } | 99 | } |
100 | if (e820_any_mapped(aper_base, aper_base + aper_size, E820_RAM)) { | 100 | if (e820__mapped_any(aper_base, aper_base + aper_size, E820_TYPE_RAM)) { |
101 | printk(KERN_INFO "Aperture pointing to e820 RAM. Ignoring.\n"); | 101 | printk(KERN_INFO "Aperture pointing to e820 RAM. Ignoring.\n"); |
102 | return 0; | 102 | return 0; |
103 | } | 103 | } |
diff --git a/arch/x86/include/asm/mpspec.h b/arch/x86/include/asm/mpspec.h index 32007041ef8c..831eb7895535 100644 --- a/arch/x86/include/asm/mpspec.h +++ b/arch/x86/include/asm/mpspec.h | |||
@@ -64,7 +64,7 @@ static inline void find_smp_config(void) | |||
64 | } | 64 | } |
65 | 65 | ||
66 | #ifdef CONFIG_X86_MPPARSE | 66 | #ifdef CONFIG_X86_MPPARSE |
67 | extern void early_reserve_e820_mpc_new(void); | 67 | extern void e820__memblock_alloc_reserved_mpc_new(void); |
68 | extern int enable_update_mptable; | 68 | extern int enable_update_mptable; |
69 | extern int default_mpc_apic_id(struct mpc_cpu *m); | 69 | extern int default_mpc_apic_id(struct mpc_cpu *m); |
70 | extern void default_smp_read_mpc_oem(struct mpc_table *mpc); | 70 | extern void default_smp_read_mpc_oem(struct mpc_table *mpc); |
@@ -76,7 +76,7 @@ extern void default_mpc_oem_bus_info(struct mpc_bus *m, char *str); | |||
76 | extern void default_find_smp_config(void); | 76 | extern void default_find_smp_config(void); |
77 | extern void default_get_smp_config(unsigned int early); | 77 | extern void default_get_smp_config(unsigned int early); |
78 | #else | 78 | #else |
79 | static inline void early_reserve_e820_mpc_new(void) { } | 79 | static inline void e820__memblock_alloc_reserved_mpc_new(void) { } |
80 | #define enable_update_mptable 0 | 80 | #define enable_update_mptable 0 |
81 | #define default_mpc_apic_id NULL | 81 | #define default_mpc_apic_id NULL |
82 | #define default_smp_read_mpc_oem NULL | 82 | #define default_smp_read_mpc_oem NULL |
diff --git a/arch/x86/include/asm/pci_x86.h b/arch/x86/include/asm/pci_x86.h index d08eacd298c2..9f1b21f372fe 100644 --- a/arch/x86/include/asm/pci_x86.h +++ b/arch/x86/include/asm/pci_x86.h | |||
@@ -4,6 +4,8 @@ | |||
4 | * (c) 1999 Martin Mares <mj@ucw.cz> | 4 | * (c) 1999 Martin Mares <mj@ucw.cz> |
5 | */ | 5 | */ |
6 | 6 | ||
7 | #include <linux/ioport.h> | ||
8 | |||
7 | #undef DEBUG | 9 | #undef DEBUG |
8 | 10 | ||
9 | #ifdef DEBUG | 11 | #ifdef DEBUG |
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index 585ee0d42d18..2197e5322df9 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h | |||
@@ -2,8 +2,6 @@ | |||
2 | #define _ASM_X86_PGTABLE_H | 2 | #define _ASM_X86_PGTABLE_H |
3 | 3 | ||
4 | #include <asm/page.h> | 4 | #include <asm/page.h> |
5 | #include <asm/e820.h> | ||
6 | |||
7 | #include <asm/pgtable_types.h> | 5 | #include <asm/pgtable_types.h> |
8 | 6 | ||
9 | /* | 7 | /* |
@@ -845,6 +843,7 @@ static inline int pgd_none(pgd_t pgd) | |||
845 | extern int direct_gbpages; | 843 | extern int direct_gbpages; |
846 | void init_mem_mapping(void); | 844 | void init_mem_mapping(void); |
847 | void early_alloc_pgt_buf(void); | 845 | void early_alloc_pgt_buf(void); |
846 | extern void memblock_find_dma_reserve(void); | ||
848 | 847 | ||
849 | #ifdef CONFIG_X86_64 | 848 | #ifdef CONFIG_X86_64 |
850 | /* Realmode trampoline initialization. */ | 849 | /* Realmode trampoline initialization. */ |
diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h index 33cbd3db97b9..64c5e745ebad 100644 --- a/arch/x86/include/asm/xen/page.h +++ b/arch/x86/include/asm/xen/page.h | |||
@@ -6,6 +6,7 @@ | |||
6 | #include <linux/spinlock.h> | 6 | #include <linux/spinlock.h> |
7 | #include <linux/pfn.h> | 7 | #include <linux/pfn.h> |
8 | #include <linux/mm.h> | 8 | #include <linux/mm.h> |
9 | #include <linux/device.h> | ||
9 | 10 | ||
10 | #include <linux/uaccess.h> | 11 | #include <linux/uaccess.h> |
11 | #include <asm/page.h> | 12 | #include <asm/page.h> |
diff --git a/arch/x86/include/uapi/asm/bootparam.h b/arch/x86/include/uapi/asm/bootparam.h index 07244ea16765..ddef37b16af2 100644 --- a/arch/x86/include/uapi/asm/bootparam.h +++ b/arch/x86/include/uapi/asm/bootparam.h | |||
@@ -34,7 +34,6 @@ | |||
34 | #include <linux/screen_info.h> | 34 | #include <linux/screen_info.h> |
35 | #include <linux/apm_bios.h> | 35 | #include <linux/apm_bios.h> |
36 | #include <linux/edd.h> | 36 | #include <linux/edd.h> |
37 | #include <asm/e820.h> | ||
38 | #include <asm/ist.h> | 37 | #include <asm/ist.h> |
39 | #include <video/edid.h> | 38 | #include <video/edid.h> |
40 | 39 | ||
@@ -111,6 +110,21 @@ struct efi_info { | |||
111 | __u32 efi_memmap_hi; | 110 | __u32 efi_memmap_hi; |
112 | }; | 111 | }; |
113 | 112 | ||
113 | /* | ||
114 | * This is the maximum number of entries in struct boot_params::e820_table | ||
115 | * (the zeropage), which is part of the x86 boot protocol ABI: | ||
116 | */ | ||
117 | #define E820_MAX_ENTRIES_ZEROPAGE 128 | ||
118 | |||
119 | /* | ||
120 | * The E820 memory region entry of the boot protocol ABI: | ||
121 | */ | ||
122 | struct boot_e820_entry { | ||
123 | __u64 addr; | ||
124 | __u64 size; | ||
125 | __u32 type; | ||
126 | } __attribute__((packed)); | ||
127 | |||
114 | /* The so-called "zeropage" */ | 128 | /* The so-called "zeropage" */ |
115 | struct boot_params { | 129 | struct boot_params { |
116 | struct screen_info screen_info; /* 0x000 */ | 130 | struct screen_info screen_info; /* 0x000 */ |
@@ -153,7 +167,7 @@ struct boot_params { | |||
153 | struct setup_header hdr; /* setup header */ /* 0x1f1 */ | 167 | struct setup_header hdr; /* setup header */ /* 0x1f1 */ |
154 | __u8 _pad7[0x290-0x1f1-sizeof(struct setup_header)]; | 168 | __u8 _pad7[0x290-0x1f1-sizeof(struct setup_header)]; |
155 | __u32 edd_mbr_sig_buffer[EDD_MBR_SIG_MAX]; /* 0x290 */ | 169 | __u32 edd_mbr_sig_buffer[EDD_MBR_SIG_MAX]; /* 0x290 */ |
156 | struct e820entry e820_map[E820MAX]; /* 0x2d0 */ | 170 | struct boot_e820_entry e820_table[E820_MAX_ENTRIES_ZEROPAGE]; /* 0x2d0 */ |
157 | __u8 _pad8[48]; /* 0xcd0 */ | 171 | __u8 _pad8[48]; /* 0xcd0 */ |
158 | struct edd_info eddbuf[EDDMAXNR]; /* 0xd00 */ | 172 | struct edd_info eddbuf[EDDMAXNR]; /* 0xd00 */ |
159 | __u8 _pad9[276]; /* 0xeec */ | 173 | __u8 _pad9[276]; /* 0xeec */ |
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index 70854988a963..6bb680671088 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c | |||
@@ -37,6 +37,7 @@ | |||
37 | #include <linux/pci.h> | 37 | #include <linux/pci.h> |
38 | #include <linux/efi-bgrt.h> | 38 | #include <linux/efi-bgrt.h> |
39 | 39 | ||
40 | #include <asm/e820/api.h> | ||
40 | #include <asm/irqdomain.h> | 41 | #include <asm/irqdomain.h> |
41 | #include <asm/pci_x86.h> | 42 | #include <asm/pci_x86.h> |
42 | #include <asm/pgtable.h> | 43 | #include <asm/pgtable.h> |
@@ -1723,6 +1724,6 @@ int __acpi_release_global_lock(unsigned int *lock) | |||
1723 | 1724 | ||
1724 | void __init arch_reserve_mem_area(acpi_physical_address addr, size_t size) | 1725 | void __init arch_reserve_mem_area(acpi_physical_address addr, size_t size) |
1725 | { | 1726 | { |
1726 | e820_add_region(addr, size, E820_ACPI); | 1727 | e820__range_add(addr, size, E820_TYPE_ACPI); |
1727 | update_e820(); | 1728 | e820__update_table_print(); |
1728 | } | 1729 | } |
diff --git a/arch/x86/kernel/aperture_64.c b/arch/x86/kernel/aperture_64.c index 0a2bb1f62e72..ef2859f9fcce 100644 --- a/arch/x86/kernel/aperture_64.c +++ b/arch/x86/kernel/aperture_64.c | |||
@@ -21,7 +21,7 @@ | |||
21 | #include <linux/pci.h> | 21 | #include <linux/pci.h> |
22 | #include <linux/bitops.h> | 22 | #include <linux/bitops.h> |
23 | #include <linux/suspend.h> | 23 | #include <linux/suspend.h> |
24 | #include <asm/e820.h> | 24 | #include <asm/e820/api.h> |
25 | #include <asm/io.h> | 25 | #include <asm/io.h> |
26 | #include <asm/iommu.h> | 26 | #include <asm/iommu.h> |
27 | #include <asm/gart.h> | 27 | #include <asm/gart.h> |
@@ -306,13 +306,13 @@ void __init early_gart_iommu_check(void) | |||
306 | fix = 1; | 306 | fix = 1; |
307 | 307 | ||
308 | if (gart_fix_e820 && !fix && aper_enabled) { | 308 | if (gart_fix_e820 && !fix && aper_enabled) { |
309 | if (e820_any_mapped(aper_base, aper_base + aper_size, | 309 | if (e820__mapped_any(aper_base, aper_base + aper_size, |
310 | E820_RAM)) { | 310 | E820_TYPE_RAM)) { |
311 | /* reserve it, so we can reuse it in second kernel */ | 311 | /* reserve it, so we can reuse it in second kernel */ |
312 | pr_info("e820: reserve [mem %#010Lx-%#010Lx] for GART\n", | 312 | pr_info("e820: reserve [mem %#010Lx-%#010Lx] for GART\n", |
313 | aper_base, aper_base + aper_size - 1); | 313 | aper_base, aper_base + aper_size - 1); |
314 | e820_add_region(aper_base, aper_size, E820_RESERVED); | 314 | e820__range_add(aper_base, aper_size, E820_TYPE_RESERVED); |
315 | update_e820(); | 315 | e820__update_table_print(); |
316 | } | 316 | } |
317 | } | 317 | } |
318 | 318 | ||
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index 875091d4609d..847650b14558 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c | |||
@@ -2631,7 +2631,7 @@ static int __init lapic_insert_resource(void) | |||
2631 | } | 2631 | } |
2632 | 2632 | ||
2633 | /* | 2633 | /* |
2634 | * need call insert after e820_reserve_resources() | 2634 | * need call insert after e820__reserve_resources() |
2635 | * that is using request_resource | 2635 | * that is using request_resource |
2636 | */ | 2636 | */ |
2637 | late_initcall(lapic_insert_resource); | 2637 | late_initcall(lapic_insert_resource); |
diff --git a/arch/x86/kernel/apic/apic_noop.c b/arch/x86/kernel/apic/apic_noop.c index b109e4389c92..2262eb6df796 100644 --- a/arch/x86/kernel/apic/apic_noop.c +++ b/arch/x86/kernel/apic/apic_noop.c | |||
@@ -26,7 +26,7 @@ | |||
26 | 26 | ||
27 | #include <linux/interrupt.h> | 27 | #include <linux/interrupt.h> |
28 | #include <asm/acpi.h> | 28 | #include <asm/acpi.h> |
29 | #include <asm/e820.h> | 29 | #include <asm/e820/api.h> |
30 | 30 | ||
31 | static void noop_init_apic_ldr(void) { } | 31 | static void noop_init_apic_ldr(void) { } |
32 | static void noop_send_IPI(int cpu, int vector) { } | 32 | static void noop_send_IPI(int cpu, int vector) { } |
diff --git a/arch/x86/kernel/apic/probe_32.c b/arch/x86/kernel/apic/probe_32.c index c48264e202fd..2e8f7f048f4f 100644 --- a/arch/x86/kernel/apic/probe_32.c +++ b/arch/x86/kernel/apic/probe_32.c | |||
@@ -25,7 +25,7 @@ | |||
25 | 25 | ||
26 | #include <linux/interrupt.h> | 26 | #include <linux/interrupt.h> |
27 | #include <asm/acpi.h> | 27 | #include <asm/acpi.h> |
28 | #include <asm/e820.h> | 28 | #include <asm/e820/api.h> |
29 | 29 | ||
30 | #ifdef CONFIG_HOTPLUG_CPU | 30 | #ifdef CONFIG_HOTPLUG_CPU |
31 | #define DEFAULT_SEND_IPI (1) | 31 | #define DEFAULT_SEND_IPI (1) |
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c index 86f20cc0a65e..b487b3a01615 100644 --- a/arch/x86/kernel/apic/x2apic_uv_x.c +++ b/arch/x86/kernel/apic/x2apic_uv_x.c | |||
@@ -34,6 +34,7 @@ | |||
34 | #include <asm/uv/bios.h> | 34 | #include <asm/uv/bios.h> |
35 | #include <asm/uv/uv.h> | 35 | #include <asm/uv/uv.h> |
36 | #include <asm/apic.h> | 36 | #include <asm/apic.h> |
37 | #include <asm/e820/api.h> | ||
37 | #include <asm/ipi.h> | 38 | #include <asm/ipi.h> |
38 | #include <asm/smp.h> | 39 | #include <asm/smp.h> |
39 | #include <asm/x86_init.h> | 40 | #include <asm/x86_init.h> |
diff --git a/arch/x86/kernel/cpu/centaur.c b/arch/x86/kernel/cpu/centaur.c index 43955ee6715b..44207b71fee1 100644 --- a/arch/x86/kernel/cpu/centaur.c +++ b/arch/x86/kernel/cpu/centaur.c | |||
@@ -3,7 +3,7 @@ | |||
3 | #include <linux/sched/clock.h> | 3 | #include <linux/sched/clock.h> |
4 | 4 | ||
5 | #include <asm/cpufeature.h> | 5 | #include <asm/cpufeature.h> |
6 | #include <asm/e820.h> | 6 | #include <asm/e820/api.h> |
7 | #include <asm/mtrr.h> | 7 | #include <asm/mtrr.h> |
8 | #include <asm/msr.h> | 8 | #include <asm/msr.h> |
9 | 9 | ||
diff --git a/arch/x86/kernel/cpu/mtrr/cleanup.c b/arch/x86/kernel/cpu/mtrr/cleanup.c index 3b442b64c72d..765afd599039 100644 --- a/arch/x86/kernel/cpu/mtrr/cleanup.c +++ b/arch/x86/kernel/cpu/mtrr/cleanup.c | |||
@@ -27,7 +27,7 @@ | |||
27 | #include <linux/range.h> | 27 | #include <linux/range.h> |
28 | 28 | ||
29 | #include <asm/processor.h> | 29 | #include <asm/processor.h> |
30 | #include <asm/e820.h> | 30 | #include <asm/e820/api.h> |
31 | #include <asm/mtrr.h> | 31 | #include <asm/mtrr.h> |
32 | #include <asm/msr.h> | 32 | #include <asm/msr.h> |
33 | 33 | ||
@@ -860,7 +860,7 @@ real_trim_memory(unsigned long start_pfn, unsigned long limit_pfn) | |||
860 | trim_size <<= PAGE_SHIFT; | 860 | trim_size <<= PAGE_SHIFT; |
861 | trim_size -= trim_start; | 861 | trim_size -= trim_start; |
862 | 862 | ||
863 | return e820_update_range(trim_start, trim_size, E820_RAM, E820_RESERVED); | 863 | return e820__range_update(trim_start, trim_size, E820_TYPE_RAM, E820_TYPE_RESERVED); |
864 | } | 864 | } |
865 | 865 | ||
866 | /** | 866 | /** |
@@ -978,7 +978,7 @@ int __init mtrr_trim_uncached_memory(unsigned long end_pfn) | |||
978 | WARN_ON(1); | 978 | WARN_ON(1); |
979 | 979 | ||
980 | pr_info("update e820 for mtrr\n"); | 980 | pr_info("update e820 for mtrr\n"); |
981 | update_e820(); | 981 | e820__update_table_print(); |
982 | 982 | ||
983 | return 1; | 983 | return 1; |
984 | } | 984 | } |
diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c index 24e87e74990d..2bce84d91c2b 100644 --- a/arch/x86/kernel/cpu/mtrr/main.c +++ b/arch/x86/kernel/cpu/mtrr/main.c | |||
@@ -48,7 +48,7 @@ | |||
48 | #include <linux/syscore_ops.h> | 48 | #include <linux/syscore_ops.h> |
49 | 49 | ||
50 | #include <asm/cpufeature.h> | 50 | #include <asm/cpufeature.h> |
51 | #include <asm/e820.h> | 51 | #include <asm/e820/api.h> |
52 | #include <asm/mtrr.h> | 52 | #include <asm/mtrr.h> |
53 | #include <asm/msr.h> | 53 | #include <asm/msr.h> |
54 | #include <asm/pat.h> | 54 | #include <asm/pat.h> |
diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c index 3741461c63a0..22217ece26c8 100644 --- a/arch/x86/kernel/crash.c +++ b/arch/x86/kernel/crash.c | |||
@@ -29,6 +29,7 @@ | |||
29 | #include <asm/nmi.h> | 29 | #include <asm/nmi.h> |
30 | #include <asm/hw_irq.h> | 30 | #include <asm/hw_irq.h> |
31 | #include <asm/apic.h> | 31 | #include <asm/apic.h> |
32 | #include <asm/e820/types.h> | ||
32 | #include <asm/io_apic.h> | 33 | #include <asm/io_apic.h> |
33 | #include <asm/hpet.h> | 34 | #include <asm/hpet.h> |
34 | #include <linux/kdebug.h> | 35 | #include <linux/kdebug.h> |
@@ -503,16 +504,16 @@ static int prepare_elf_headers(struct kimage *image, void **addr, | |||
503 | return ret; | 504 | return ret; |
504 | } | 505 | } |
505 | 506 | ||
506 | static int add_e820_entry(struct boot_params *params, struct e820entry *entry) | 507 | static int add_e820_entry(struct boot_params *params, struct e820_entry *entry) |
507 | { | 508 | { |
508 | unsigned int nr_e820_entries; | 509 | unsigned int nr_e820_entries; |
509 | 510 | ||
510 | nr_e820_entries = params->e820_entries; | 511 | nr_e820_entries = params->e820_entries; |
511 | if (nr_e820_entries >= E820MAX) | 512 | if (nr_e820_entries >= E820_MAX_ENTRIES_ZEROPAGE) |
512 | return 1; | 513 | return 1; |
513 | 514 | ||
514 | memcpy(¶ms->e820_map[nr_e820_entries], entry, | 515 | memcpy(¶ms->e820_table[nr_e820_entries], entry, |
515 | sizeof(struct e820entry)); | 516 | sizeof(struct e820_entry)); |
516 | params->e820_entries++; | 517 | params->e820_entries++; |
517 | return 0; | 518 | return 0; |
518 | } | 519 | } |
@@ -521,7 +522,7 @@ static int memmap_entry_callback(u64 start, u64 end, void *arg) | |||
521 | { | 522 | { |
522 | struct crash_memmap_data *cmd = arg; | 523 | struct crash_memmap_data *cmd = arg; |
523 | struct boot_params *params = cmd->params; | 524 | struct boot_params *params = cmd->params; |
524 | struct e820entry ei; | 525 | struct e820_entry ei; |
525 | 526 | ||
526 | ei.addr = start; | 527 | ei.addr = start; |
527 | ei.size = end - start + 1; | 528 | ei.size = end - start + 1; |
@@ -560,7 +561,7 @@ int crash_setup_memmap_entries(struct kimage *image, struct boot_params *params) | |||
560 | { | 561 | { |
561 | int i, ret = 0; | 562 | int i, ret = 0; |
562 | unsigned long flags; | 563 | unsigned long flags; |
563 | struct e820entry ei; | 564 | struct e820_entry ei; |
564 | struct crash_memmap_data cmd; | 565 | struct crash_memmap_data cmd; |
565 | struct crash_mem *cmem; | 566 | struct crash_mem *cmem; |
566 | 567 | ||
@@ -574,17 +575,17 @@ int crash_setup_memmap_entries(struct kimage *image, struct boot_params *params) | |||
574 | /* Add first 640K segment */ | 575 | /* Add first 640K segment */ |
575 | ei.addr = image->arch.backup_src_start; | 576 | ei.addr = image->arch.backup_src_start; |
576 | ei.size = image->arch.backup_src_sz; | 577 | ei.size = image->arch.backup_src_sz; |
577 | ei.type = E820_RAM; | 578 | ei.type = E820_TYPE_RAM; |
578 | add_e820_entry(params, &ei); | 579 | add_e820_entry(params, &ei); |
579 | 580 | ||
580 | /* Add ACPI tables */ | 581 | /* Add ACPI tables */ |
581 | cmd.type = E820_ACPI; | 582 | cmd.type = E820_TYPE_ACPI; |
582 | flags = IORESOURCE_MEM | IORESOURCE_BUSY; | 583 | flags = IORESOURCE_MEM | IORESOURCE_BUSY; |
583 | walk_iomem_res_desc(IORES_DESC_ACPI_TABLES, flags, 0, -1, &cmd, | 584 | walk_iomem_res_desc(IORES_DESC_ACPI_TABLES, flags, 0, -1, &cmd, |
584 | memmap_entry_callback); | 585 | memmap_entry_callback); |
585 | 586 | ||
586 | /* Add ACPI Non-volatile Storage */ | 587 | /* Add ACPI Non-volatile Storage */ |
587 | cmd.type = E820_NVS; | 588 | cmd.type = E820_TYPE_NVS; |
588 | walk_iomem_res_desc(IORES_DESC_ACPI_NV_STORAGE, flags, 0, -1, &cmd, | 589 | walk_iomem_res_desc(IORES_DESC_ACPI_NV_STORAGE, flags, 0, -1, &cmd, |
589 | memmap_entry_callback); | 590 | memmap_entry_callback); |
590 | 591 | ||
@@ -592,7 +593,7 @@ int crash_setup_memmap_entries(struct kimage *image, struct boot_params *params) | |||
592 | if (crashk_low_res.end) { | 593 | if (crashk_low_res.end) { |
593 | ei.addr = crashk_low_res.start; | 594 | ei.addr = crashk_low_res.start; |
594 | ei.size = crashk_low_res.end - crashk_low_res.start + 1; | 595 | ei.size = crashk_low_res.end - crashk_low_res.start + 1; |
595 | ei.type = E820_RAM; | 596 | ei.type = E820_TYPE_RAM; |
596 | add_e820_entry(params, &ei); | 597 | add_e820_entry(params, &ei); |
597 | } | 598 | } |
598 | 599 | ||
@@ -609,7 +610,7 @@ int crash_setup_memmap_entries(struct kimage *image, struct boot_params *params) | |||
609 | if (ei.size < PAGE_SIZE) | 610 | if (ei.size < PAGE_SIZE) |
610 | continue; | 611 | continue; |
611 | ei.addr = cmem->ranges[i].start; | 612 | ei.addr = cmem->ranges[i].start; |
612 | ei.type = E820_RAM; | 613 | ei.type = E820_TYPE_RAM; |
613 | add_e820_entry(params, &ei); | 614 | add_e820_entry(params, &ei); |
614 | } | 615 | } |
615 | 616 | ||
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c index b2bbad6ebe4d..6e9b26fa6d05 100644 --- a/arch/x86/kernel/e820.c +++ b/arch/x86/kernel/e820.c | |||
@@ -1,49 +1,55 @@ | |||
1 | /* | 1 | /* |
2 | * Handle the memory map. | 2 | * Low level x86 E820 memory map handling functions. |
3 | * The functions here do the job until bootmem takes over. | ||
4 | * | 3 | * |
5 | * Getting sanitize_e820_map() in sync with i386 version by applying change: | 4 | * The firmware and bootloader passes us the "E820 table", which is the primary |
6 | * - Provisions for empty E820 memory regions (reported by certain BIOSes). | 5 | * physical memory layout description available about x86 systems. |
7 | * Alex Achenbach <xela@slit.de>, December 2002. | ||
8 | * Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> | ||
9 | * | 6 | * |
7 | * The kernel takes the E820 memory layout and optionally modifies it with | ||
8 | * quirks and other tweaks, and feeds that into the generic Linux memory | ||
9 | * allocation code routines via a platform independent interface (memblock, etc.). | ||
10 | */ | 10 | */ |
11 | #include <linux/kernel.h> | ||
12 | #include <linux/types.h> | ||
13 | #include <linux/init.h> | ||
14 | #include <linux/crash_dump.h> | 11 | #include <linux/crash_dump.h> |
15 | #include <linux/export.h> | ||
16 | #include <linux/bootmem.h> | 12 | #include <linux/bootmem.h> |
17 | #include <linux/pfn.h> | ||
18 | #include <linux/suspend.h> | 13 | #include <linux/suspend.h> |
19 | #include <linux/acpi.h> | 14 | #include <linux/acpi.h> |
20 | #include <linux/firmware-map.h> | 15 | #include <linux/firmware-map.h> |
21 | #include <linux/memblock.h> | 16 | #include <linux/memblock.h> |
22 | #include <linux/sort.h> | 17 | #include <linux/sort.h> |
23 | 18 | ||
24 | #include <asm/e820.h> | 19 | #include <asm/e820/api.h> |
25 | #include <asm/proto.h> | ||
26 | #include <asm/setup.h> | 20 | #include <asm/setup.h> |
27 | #include <asm/cpufeature.h> | ||
28 | 21 | ||
29 | /* | 22 | /* |
30 | * The e820 map is the map that gets modified e.g. with command line parameters | 23 | * We organize the E820 table into two main data structures: |
31 | * and that is also registered with modifications in the kernel resource tree | ||
32 | * with the iomem_resource as parent. | ||
33 | * | 24 | * |
34 | * The e820_saved is directly saved after the BIOS-provided memory map is | 25 | * - 'e820_table_firmware': the original firmware version passed to us by the |
35 | * copied. It doesn't get modified afterwards. It's registered for the | 26 | * bootloader - not modified by the kernel. We use this to: |
36 | * /sys/firmware/memmap interface. | ||
37 | * | 27 | * |
38 | * That memory map is not modified and is used as base for kexec. The kexec'd | 28 | * - inform the user about the firmware's notion of memory layout |
39 | * kernel should get the same memory map as the firmware provides. Then the | 29 | * via /sys/firmware/memmap |
40 | * user can e.g. boot the original kernel with mem=1G while still booting the | 30 | * |
41 | * next kernel with full memory. | 31 | * - the hibernation code uses it to generate a kernel-independent MD5 |
32 | * fingerprint of the physical memory layout of a system. | ||
33 | * | ||
34 | * - kexec, which is a bootloader in disguise, uses the original E820 | ||
35 | * layout to pass to the kexec-ed kernel. This way the original kernel | ||
36 | * can have a restricted E820 map while the kexec()-ed kexec-kernel | ||
37 | * can have access to full memory - etc. | ||
38 | * | ||
39 | * - 'e820_table': this is the main E820 table that is massaged by the | ||
40 | * low level x86 platform code, or modified by boot parameters, before | ||
41 | * passed on to higher level MM layers. | ||
42 | * | ||
43 | * Once the E820 map has been converted to the standard Linux memory layout | ||
44 | * information its role stops - modifying it has no effect and does not get | ||
45 | * re-propagated. So itsmain role is a temporary bootstrap storage of firmware | ||
46 | * specific memory layout data during early bootup. | ||
42 | */ | 47 | */ |
43 | static struct e820map initial_e820 __initdata; | 48 | static struct e820_table e820_table_init __initdata; |
44 | static struct e820map initial_e820_saved __initdata; | 49 | static struct e820_table e820_table_firmware_init __initdata; |
45 | struct e820map *e820 __refdata = &initial_e820; | 50 | |
46 | struct e820map *e820_saved __refdata = &initial_e820_saved; | 51 | struct e820_table *e820_table __refdata = &e820_table_init; |
52 | struct e820_table *e820_table_firmware __refdata = &e820_table_firmware_init; | ||
47 | 53 | ||
48 | /* For PCI or other memory-mapped resources */ | 54 | /* For PCI or other memory-mapped resources */ |
49 | unsigned long pci_mem_start = 0xaeedbabe; | 55 | unsigned long pci_mem_start = 0xaeedbabe; |
@@ -55,51 +61,53 @@ EXPORT_SYMBOL(pci_mem_start); | |||
55 | * This function checks if any part of the range <start,end> is mapped | 61 | * This function checks if any part of the range <start,end> is mapped |
56 | * with type. | 62 | * with type. |
57 | */ | 63 | */ |
58 | int | 64 | bool e820__mapped_any(u64 start, u64 end, enum e820_type type) |
59 | e820_any_mapped(u64 start, u64 end, unsigned type) | ||
60 | { | 65 | { |
61 | int i; | 66 | int i; |
62 | 67 | ||
63 | for (i = 0; i < e820->nr_map; i++) { | 68 | for (i = 0; i < e820_table->nr_entries; i++) { |
64 | struct e820entry *ei = &e820->map[i]; | 69 | struct e820_entry *entry = &e820_table->entries[i]; |
65 | 70 | ||
66 | if (type && ei->type != type) | 71 | if (type && entry->type != type) |
67 | continue; | 72 | continue; |
68 | if (ei->addr >= end || ei->addr + ei->size <= start) | 73 | if (entry->addr >= end || entry->addr + entry->size <= start) |
69 | continue; | 74 | continue; |
70 | return 1; | 75 | return 1; |
71 | } | 76 | } |
72 | return 0; | 77 | return 0; |
73 | } | 78 | } |
74 | EXPORT_SYMBOL_GPL(e820_any_mapped); | 79 | EXPORT_SYMBOL_GPL(e820__mapped_any); |
75 | 80 | ||
76 | /* | 81 | /* |
77 | * This function checks if the entire range <start,end> is mapped with type. | 82 | * This function checks if the entire <start,end> range is mapped with 'type'. |
78 | * | 83 | * |
79 | * Note: this function only works correct if the e820 table is sorted and | 84 | * Note: this function only works correctly once the E820 table is sorted and |
80 | * not-overlapping, which is the case | 85 | * not-overlapping (at least for the range specified), which is the case normally. |
81 | */ | 86 | */ |
82 | int __init e820_all_mapped(u64 start, u64 end, unsigned type) | 87 | bool __init e820__mapped_all(u64 start, u64 end, enum e820_type type) |
83 | { | 88 | { |
84 | int i; | 89 | int i; |
85 | 90 | ||
86 | for (i = 0; i < e820->nr_map; i++) { | 91 | for (i = 0; i < e820_table->nr_entries; i++) { |
87 | struct e820entry *ei = &e820->map[i]; | 92 | struct e820_entry *entry = &e820_table->entries[i]; |
88 | 93 | ||
89 | if (type && ei->type != type) | 94 | if (type && entry->type != type) |
90 | continue; | 95 | continue; |
91 | /* is the region (part) in overlap with the current region ?*/ | 96 | |
92 | if (ei->addr >= end || ei->addr + ei->size <= start) | 97 | /* Is the region (part) in overlap with the current region? */ |
98 | if (entry->addr >= end || entry->addr + entry->size <= start) | ||
93 | continue; | 99 | continue; |
94 | 100 | ||
95 | /* if the region is at the beginning of <start,end> we move | 101 | /* |
96 | * start to the end of the region since it's ok until there | 102 | * If the region is at the beginning of <start,end> we move |
103 | * 'start' to the end of the region since it's ok until there | ||
97 | */ | 104 | */ |
98 | if (ei->addr <= start) | 105 | if (entry->addr <= start) |
99 | start = ei->addr + ei->size; | 106 | start = entry->addr + entry->size; |
107 | |||
100 | /* | 108 | /* |
101 | * if start is now at or beyond end, we're done, full | 109 | * If 'start' is now at or beyond 'end', we're done, full |
102 | * coverage | 110 | * coverage of the desired range exists: |
103 | */ | 111 | */ |
104 | if (start >= end) | 112 | if (start >= end) |
105 | return 1; | 113 | return 1; |
@@ -108,94 +116,77 @@ int __init e820_all_mapped(u64 start, u64 end, unsigned type) | |||
108 | } | 116 | } |
109 | 117 | ||
110 | /* | 118 | /* |
111 | * Add a memory region to the kernel e820 map. | 119 | * Add a memory region to the kernel E820 map. |
112 | */ | 120 | */ |
113 | static void __init __e820_add_region(struct e820map *e820x, u64 start, u64 size, | 121 | static void __init __e820__range_add(struct e820_table *table, u64 start, u64 size, enum e820_type type) |
114 | int type) | ||
115 | { | 122 | { |
116 | int x = e820x->nr_map; | 123 | int x = table->nr_entries; |
117 | 124 | ||
118 | if (x >= ARRAY_SIZE(e820x->map)) { | 125 | if (x >= ARRAY_SIZE(table->entries)) { |
119 | printk(KERN_ERR "e820: too many entries; ignoring [mem %#010llx-%#010llx]\n", | 126 | pr_err("e820: too many entries; ignoring [mem %#010llx-%#010llx]\n", start, start + size - 1); |
120 | (unsigned long long) start, | ||
121 | (unsigned long long) (start + size - 1)); | ||
122 | return; | 127 | return; |
123 | } | 128 | } |
124 | 129 | ||
125 | e820x->map[x].addr = start; | 130 | table->entries[x].addr = start; |
126 | e820x->map[x].size = size; | 131 | table->entries[x].size = size; |
127 | e820x->map[x].type = type; | 132 | table->entries[x].type = type; |
128 | e820x->nr_map++; | 133 | table->nr_entries++; |
129 | } | 134 | } |
130 | 135 | ||
131 | void __init e820_add_region(u64 start, u64 size, int type) | 136 | void __init e820__range_add(u64 start, u64 size, enum e820_type type) |
132 | { | 137 | { |
133 | __e820_add_region(e820, start, size, type); | 138 | __e820__range_add(e820_table, start, size, type); |
134 | } | 139 | } |
135 | 140 | ||
136 | static void __init e820_print_type(u32 type) | 141 | static void __init e820_print_type(enum e820_type type) |
137 | { | 142 | { |
138 | switch (type) { | 143 | switch (type) { |
139 | case E820_RAM: | 144 | case E820_TYPE_RAM: /* Fall through: */ |
140 | case E820_RESERVED_KERN: | 145 | case E820_TYPE_RESERVED_KERN: pr_cont("usable"); break; |
141 | printk(KERN_CONT "usable"); | 146 | case E820_TYPE_RESERVED: pr_cont("reserved"); break; |
142 | break; | 147 | case E820_TYPE_ACPI: pr_cont("ACPI data"); break; |
143 | case E820_RESERVED: | 148 | case E820_TYPE_NVS: pr_cont("ACPI NVS"); break; |
144 | printk(KERN_CONT "reserved"); | 149 | case E820_TYPE_UNUSABLE: pr_cont("unusable"); break; |
145 | break; | 150 | case E820_TYPE_PMEM: /* Fall through: */ |
146 | case E820_ACPI: | 151 | case E820_TYPE_PRAM: pr_cont("persistent (type %u)", type); break; |
147 | printk(KERN_CONT "ACPI data"); | 152 | default: pr_cont("type %u", type); break; |
148 | break; | ||
149 | case E820_NVS: | ||
150 | printk(KERN_CONT "ACPI NVS"); | ||
151 | break; | ||
152 | case E820_UNUSABLE: | ||
153 | printk(KERN_CONT "unusable"); | ||
154 | break; | ||
155 | case E820_PMEM: | ||
156 | case E820_PRAM: | ||
157 | printk(KERN_CONT "persistent (type %u)", type); | ||
158 | break; | ||
159 | default: | ||
160 | printk(KERN_CONT "type %u", type); | ||
161 | break; | ||
162 | } | 153 | } |
163 | } | 154 | } |
164 | 155 | ||
165 | void __init e820_print_map(char *who) | 156 | void __init e820__print_table(char *who) |
166 | { | 157 | { |
167 | int i; | 158 | int i; |
168 | 159 | ||
169 | for (i = 0; i < e820->nr_map; i++) { | 160 | for (i = 0; i < e820_table->nr_entries; i++) { |
170 | printk(KERN_INFO "%s: [mem %#018Lx-%#018Lx] ", who, | 161 | pr_info("%s: [mem %#018Lx-%#018Lx] ", who, |
171 | (unsigned long long) e820->map[i].addr, | 162 | e820_table->entries[i].addr, |
172 | (unsigned long long) | 163 | e820_table->entries[i].addr + e820_table->entries[i].size - 1); |
173 | (e820->map[i].addr + e820->map[i].size - 1)); | 164 | |
174 | e820_print_type(e820->map[i].type); | 165 | e820_print_type(e820_table->entries[i].type); |
175 | printk(KERN_CONT "\n"); | 166 | pr_cont("\n"); |
176 | } | 167 | } |
177 | } | 168 | } |
178 | 169 | ||
179 | /* | 170 | /* |
180 | * Sanitize the BIOS e820 map. | 171 | * Sanitize an E820 map. |
181 | * | 172 | * |
182 | * Some e820 responses include overlapping entries. The following | 173 | * Some E820 layouts include overlapping entries. The following |
183 | * replaces the original e820 map with a new one, removing overlaps, | 174 | * replaces the original E820 map with a new one, removing overlaps, |
184 | * and resolving conflicting memory types in favor of highest | 175 | * and resolving conflicting memory types in favor of highest |
185 | * numbered type. | 176 | * numbered type. |
186 | * | 177 | * |
187 | * The input parameter biosmap points to an array of 'struct | 178 | * The input parameter 'entries' points to an array of 'struct |
188 | * e820entry' which on entry has elements in the range [0, *pnr_map) | 179 | * e820_entry' which on entry has elements in the range [0, *nr_entries) |
189 | * valid, and which has space for up to max_nr_map entries. | 180 | * valid, and which has space for up to max_nr_entries entries. |
190 | * On return, the resulting sanitized e820 map entries will be in | 181 | * On return, the resulting sanitized E820 map entries will be in |
191 | * overwritten in the same location, starting at biosmap. | 182 | * overwritten in the same location, starting at 'entries'. |
192 | * | 183 | * |
193 | * The integer pointed to by pnr_map must be valid on entry (the | 184 | * The integer pointed to by nr_entries must be valid on entry (the |
194 | * current number of valid entries located at biosmap). If the | 185 | * current number of valid entries located at 'entries'). If the |
195 | * sanitizing succeeds the *pnr_map will be updated with the new | 186 | * sanitizing succeeds the *nr_entries will be updated with the new |
196 | * number of valid entries (something no more than max_nr_map). | 187 | * number of valid entries (something no more than max_nr_entries). |
197 | * | 188 | * |
198 | * The return value from sanitize_e820_map() is zero if it | 189 | * The return value from e820__update_table() is zero if it |
199 | * successfully 'sanitized' the map entries passed in, and is -1 | 190 | * successfully 'sanitized' the map entries passed in, and is -1 |
200 | * if it did nothing, which can happen if either of (1) it was | 191 | * if it did nothing, which can happen if either of (1) it was |
201 | * only passed one map entry, or (2) any of the input map entries | 192 | * only passed one map entry, or (2) any of the input map entries |
@@ -238,10 +229,17 @@ void __init e820_print_map(char *who) | |||
238 | * ______________________4_ | 229 | * ______________________4_ |
239 | */ | 230 | */ |
240 | struct change_member { | 231 | struct change_member { |
241 | struct e820entry *pbios; /* pointer to original bios entry */ | 232 | /* Pointer to the original entry: */ |
242 | unsigned long long addr; /* address for this change point */ | 233 | struct e820_entry *entry; |
234 | /* Address for this change point: */ | ||
235 | unsigned long long addr; | ||
243 | }; | 236 | }; |
244 | 237 | ||
238 | static struct change_member change_point_list[2*E820_MAX_ENTRIES] __initdata; | ||
239 | static struct change_member *change_point[2*E820_MAX_ENTRIES] __initdata; | ||
240 | static struct e820_entry *overlap_list[E820_MAX_ENTRIES] __initdata; | ||
241 | static struct e820_entry new_entries[E820_MAX_ENTRIES] __initdata; | ||
242 | |||
245 | static int __init cpcompare(const void *a, const void *b) | 243 | static int __init cpcompare(const void *a, const void *b) |
246 | { | 244 | { |
247 | struct change_member * const *app = a, * const *bpp = b; | 245 | struct change_member * const *app = a, * const *bpp = b; |
@@ -249,164 +247,141 @@ static int __init cpcompare(const void *a, const void *b) | |||
249 | 247 | ||
250 | /* | 248 | /* |
251 | * Inputs are pointers to two elements of change_point[]. If their | 249 | * Inputs are pointers to two elements of change_point[]. If their |
252 | * addresses are unequal, their difference dominates. If the addresses | 250 | * addresses are not equal, their difference dominates. If the addresses |
253 | * are equal, then consider one that represents the end of its region | 251 | * are equal, then consider one that represents the end of its region |
254 | * to be greater than one that does not. | 252 | * to be greater than one that does not. |
255 | */ | 253 | */ |
256 | if (ap->addr != bp->addr) | 254 | if (ap->addr != bp->addr) |
257 | return ap->addr > bp->addr ? 1 : -1; | 255 | return ap->addr > bp->addr ? 1 : -1; |
258 | 256 | ||
259 | return (ap->addr != ap->pbios->addr) - (bp->addr != bp->pbios->addr); | 257 | return (ap->addr != ap->entry->addr) - (bp->addr != bp->entry->addr); |
260 | } | 258 | } |
261 | 259 | ||
262 | int __init sanitize_e820_map(struct e820entry *biosmap, int max_nr_map, | 260 | int __init e820__update_table(struct e820_table *table) |
263 | u32 *pnr_map) | ||
264 | { | 261 | { |
265 | static struct change_member change_point_list[2*E820_X_MAX] __initdata; | 262 | struct e820_entry *entries = table->entries; |
266 | static struct change_member *change_point[2*E820_X_MAX] __initdata; | 263 | u32 max_nr_entries = ARRAY_SIZE(table->entries); |
267 | static struct e820entry *overlap_list[E820_X_MAX] __initdata; | 264 | enum e820_type current_type, last_type; |
268 | static struct e820entry new_bios[E820_X_MAX] __initdata; | ||
269 | unsigned long current_type, last_type; | ||
270 | unsigned long long last_addr; | 265 | unsigned long long last_addr; |
271 | int chgidx; | 266 | u32 new_nr_entries, overlap_entries; |
272 | int overlap_entries; | 267 | u32 i, chg_idx, chg_nr; |
273 | int new_bios_entry; | ||
274 | int old_nr, new_nr, chg_nr; | ||
275 | int i; | ||
276 | 268 | ||
277 | /* if there's only one memory region, don't bother */ | 269 | /* If there's only one memory region, don't bother: */ |
278 | if (*pnr_map < 2) | 270 | if (table->nr_entries < 2) |
279 | return -1; | 271 | return -1; |
280 | 272 | ||
281 | old_nr = *pnr_map; | 273 | table->nr_entries = table->nr_entries; |
282 | BUG_ON(old_nr > max_nr_map); | 274 | BUG_ON(table->nr_entries > max_nr_entries); |
283 | 275 | ||
284 | /* bail out if we find any unreasonable addresses in bios map */ | 276 | /* Bail out if we find any unreasonable addresses in the map: */ |
285 | for (i = 0; i < old_nr; i++) | 277 | for (i = 0; i < table->nr_entries; i++) { |
286 | if (biosmap[i].addr + biosmap[i].size < biosmap[i].addr) | 278 | if (entries[i].addr + entries[i].size < entries[i].addr) |
287 | return -1; | 279 | return -1; |
280 | } | ||
288 | 281 | ||
289 | /* create pointers for initial change-point information (for sorting) */ | 282 | /* Create pointers for initial change-point information (for sorting): */ |
290 | for (i = 0; i < 2 * old_nr; i++) | 283 | for (i = 0; i < 2 * table->nr_entries; i++) |
291 | change_point[i] = &change_point_list[i]; | 284 | change_point[i] = &change_point_list[i]; |
292 | 285 | ||
293 | /* record all known change-points (starting and ending addresses), | 286 | /* |
294 | omitting those that are for empty memory regions */ | 287 | * Record all known change-points (starting and ending addresses), |
295 | chgidx = 0; | 288 | * omitting empty memory regions: |
296 | for (i = 0; i < old_nr; i++) { | 289 | */ |
297 | if (biosmap[i].size != 0) { | 290 | chg_idx = 0; |
298 | change_point[chgidx]->addr = biosmap[i].addr; | 291 | for (i = 0; i < table->nr_entries; i++) { |
299 | change_point[chgidx++]->pbios = &biosmap[i]; | 292 | if (entries[i].size != 0) { |
300 | change_point[chgidx]->addr = biosmap[i].addr + | 293 | change_point[chg_idx]->addr = entries[i].addr; |
301 | biosmap[i].size; | 294 | change_point[chg_idx++]->entry = &entries[i]; |
302 | change_point[chgidx++]->pbios = &biosmap[i]; | 295 | change_point[chg_idx]->addr = entries[i].addr + entries[i].size; |
296 | change_point[chg_idx++]->entry = &entries[i]; | ||
303 | } | 297 | } |
304 | } | 298 | } |
305 | chg_nr = chgidx; | 299 | chg_nr = chg_idx; |
306 | 300 | ||
307 | /* sort change-point list by memory addresses (low -> high) */ | 301 | /* Sort change-point list by memory addresses (low -> high): */ |
308 | sort(change_point, chg_nr, sizeof *change_point, cpcompare, NULL); | 302 | sort(change_point, chg_nr, sizeof(*change_point), cpcompare, NULL); |
309 | 303 | ||
310 | /* create a new bios memory map, removing overlaps */ | 304 | /* Create a new memory map, removing overlaps: */ |
311 | overlap_entries = 0; /* number of entries in the overlap table */ | 305 | overlap_entries = 0; /* Number of entries in the overlap table */ |
312 | new_bios_entry = 0; /* index for creating new bios map entries */ | 306 | new_nr_entries = 0; /* Index for creating new map entries */ |
313 | last_type = 0; /* start with undefined memory type */ | 307 | last_type = 0; /* Start with undefined memory type */ |
314 | last_addr = 0; /* start with 0 as last starting address */ | 308 | last_addr = 0; /* Start with 0 as last starting address */ |
315 | 309 | ||
316 | /* loop through change-points, determining affect on the new bios map */ | 310 | /* Loop through change-points, determining effect on the new map: */ |
317 | for (chgidx = 0; chgidx < chg_nr; chgidx++) { | 311 | for (chg_idx = 0; chg_idx < chg_nr; chg_idx++) { |
318 | /* keep track of all overlapping bios entries */ | 312 | /* Keep track of all overlapping entries */ |
319 | if (change_point[chgidx]->addr == | 313 | if (change_point[chg_idx]->addr == change_point[chg_idx]->entry->addr) { |
320 | change_point[chgidx]->pbios->addr) { | 314 | /* Add map entry to overlap list (> 1 entry implies an overlap) */ |
321 | /* | 315 | overlap_list[overlap_entries++] = change_point[chg_idx]->entry; |
322 | * add map entry to overlap list (> 1 entry | ||
323 | * implies an overlap) | ||
324 | */ | ||
325 | overlap_list[overlap_entries++] = | ||
326 | change_point[chgidx]->pbios; | ||
327 | } else { | 316 | } else { |
328 | /* | 317 | /* Remove entry from list (order independent, so swap with last): */ |
329 | * remove entry from list (order independent, | ||
330 | * so swap with last) | ||
331 | */ | ||
332 | for (i = 0; i < overlap_entries; i++) { | 318 | for (i = 0; i < overlap_entries; i++) { |
333 | if (overlap_list[i] == | 319 | if (overlap_list[i] == change_point[chg_idx]->entry) |
334 | change_point[chgidx]->pbios) | 320 | overlap_list[i] = overlap_list[overlap_entries-1]; |
335 | overlap_list[i] = | ||
336 | overlap_list[overlap_entries-1]; | ||
337 | } | 321 | } |
338 | overlap_entries--; | 322 | overlap_entries--; |
339 | } | 323 | } |
340 | /* | 324 | /* |
341 | * if there are overlapping entries, decide which | 325 | * If there are overlapping entries, decide which |
342 | * "type" to use (larger value takes precedence -- | 326 | * "type" to use (larger value takes precedence -- |
343 | * 1=usable, 2,3,4,4+=unusable) | 327 | * 1=usable, 2,3,4,4+=unusable) |
344 | */ | 328 | */ |
345 | current_type = 0; | 329 | current_type = 0; |
346 | for (i = 0; i < overlap_entries; i++) | 330 | for (i = 0; i < overlap_entries; i++) { |
347 | if (overlap_list[i]->type > current_type) | 331 | if (overlap_list[i]->type > current_type) |
348 | current_type = overlap_list[i]->type; | 332 | current_type = overlap_list[i]->type; |
349 | /* | 333 | } |
350 | * continue building up new bios map based on this | 334 | |
351 | * information | 335 | /* Continue building up new map based on this information: */ |
352 | */ | 336 | if (current_type != last_type || current_type == E820_TYPE_PRAM) { |
353 | if (current_type != last_type || current_type == E820_PRAM) { | ||
354 | if (last_type != 0) { | 337 | if (last_type != 0) { |
355 | new_bios[new_bios_entry].size = | 338 | new_entries[new_nr_entries].size = change_point[chg_idx]->addr - last_addr; |
356 | change_point[chgidx]->addr - last_addr; | 339 | /* Move forward only if the new size was non-zero: */ |
357 | /* | 340 | if (new_entries[new_nr_entries].size != 0) |
358 | * move forward only if the new size | 341 | /* No more space left for new entries? */ |
359 | * was non-zero | 342 | if (++new_nr_entries >= max_nr_entries) |
360 | */ | ||
361 | if (new_bios[new_bios_entry].size != 0) | ||
362 | /* | ||
363 | * no more space left for new | ||
364 | * bios entries ? | ||
365 | */ | ||
366 | if (++new_bios_entry >= max_nr_map) | ||
367 | break; | 343 | break; |
368 | } | 344 | } |
369 | if (current_type != 0) { | 345 | if (current_type != 0) { |
370 | new_bios[new_bios_entry].addr = | 346 | new_entries[new_nr_entries].addr = change_point[chg_idx]->addr; |
371 | change_point[chgidx]->addr; | 347 | new_entries[new_nr_entries].type = current_type; |
372 | new_bios[new_bios_entry].type = current_type; | 348 | last_addr = change_point[chg_idx]->addr; |
373 | last_addr = change_point[chgidx]->addr; | ||
374 | } | 349 | } |
375 | last_type = current_type; | 350 | last_type = current_type; |
376 | } | 351 | } |
377 | } | 352 | } |
378 | /* retain count for new bios entries */ | ||
379 | new_nr = new_bios_entry; | ||
380 | 353 | ||
381 | /* copy new bios mapping into original location */ | 354 | /* Copy the new entries into the original location: */ |
382 | memcpy(biosmap, new_bios, new_nr * sizeof(struct e820entry)); | 355 | memcpy(entries, new_entries, new_nr_entries*sizeof(*entries)); |
383 | *pnr_map = new_nr; | 356 | table->nr_entries = new_nr_entries; |
384 | 357 | ||
385 | return 0; | 358 | return 0; |
386 | } | 359 | } |
387 | 360 | ||
388 | static int __init __append_e820_map(struct e820entry *biosmap, int nr_map) | 361 | static int __init __append_e820_table(struct boot_e820_entry *entries, u32 nr_entries) |
389 | { | 362 | { |
390 | while (nr_map) { | 363 | struct boot_e820_entry *entry = entries; |
391 | u64 start = biosmap->addr; | 364 | |
392 | u64 size = biosmap->size; | 365 | while (nr_entries) { |
366 | u64 start = entry->addr; | ||
367 | u64 size = entry->size; | ||
393 | u64 end = start + size - 1; | 368 | u64 end = start + size - 1; |
394 | u32 type = biosmap->type; | 369 | u32 type = entry->type; |
395 | 370 | ||
396 | /* Overflow in 64 bits? Ignore the memory map. */ | 371 | /* Ignore the entry on 64-bit overflow: */ |
397 | if (start > end && likely(size)) | 372 | if (start > end && likely(size)) |
398 | return -1; | 373 | return -1; |
399 | 374 | ||
400 | e820_add_region(start, size, type); | 375 | e820__range_add(start, size, type); |
401 | 376 | ||
402 | biosmap++; | 377 | entry++; |
403 | nr_map--; | 378 | nr_entries--; |
404 | } | 379 | } |
405 | return 0; | 380 | return 0; |
406 | } | 381 | } |
407 | 382 | ||
408 | /* | 383 | /* |
409 | * Copy the BIOS e820 map into a safe place. | 384 | * Copy the BIOS E820 map into a safe place. |
410 | * | 385 | * |
411 | * Sanity-check it while we're at it.. | 386 | * Sanity-check it while we're at it.. |
412 | * | 387 | * |
@@ -414,18 +389,17 @@ static int __init __append_e820_map(struct e820entry *biosmap, int nr_map) | |||
414 | * will have given us a memory map that we can use to properly | 389 | * will have given us a memory map that we can use to properly |
415 | * set up memory. If we aren't, we'll fake a memory map. | 390 | * set up memory. If we aren't, we'll fake a memory map. |
416 | */ | 391 | */ |
417 | static int __init append_e820_map(struct e820entry *biosmap, int nr_map) | 392 | static int __init append_e820_table(struct boot_e820_entry *entries, u32 nr_entries) |
418 | { | 393 | { |
419 | /* Only one memory region (or negative)? Ignore it */ | 394 | /* Only one memory region (or negative)? Ignore it */ |
420 | if (nr_map < 2) | 395 | if (nr_entries < 2) |
421 | return -1; | 396 | return -1; |
422 | 397 | ||
423 | return __append_e820_map(biosmap, nr_map); | 398 | return __append_e820_table(entries, nr_entries); |
424 | } | 399 | } |
425 | 400 | ||
426 | static u64 __init __e820_update_range(struct e820map *e820x, u64 start, | 401 | static u64 __init |
427 | u64 size, unsigned old_type, | 402 | __e820__range_update(struct e820_table *table, u64 start, u64 size, enum e820_type old_type, enum e820_type new_type) |
428 | unsigned new_type) | ||
429 | { | 403 | { |
430 | u64 end; | 404 | u64 end; |
431 | unsigned int i; | 405 | unsigned int i; |
@@ -437,77 +411,73 @@ static u64 __init __e820_update_range(struct e820map *e820x, u64 start, | |||
437 | size = ULLONG_MAX - start; | 411 | size = ULLONG_MAX - start; |
438 | 412 | ||
439 | end = start + size; | 413 | end = start + size; |
440 | printk(KERN_DEBUG "e820: update [mem %#010Lx-%#010Lx] ", | 414 | printk(KERN_DEBUG "e820: update [mem %#010Lx-%#010Lx] ", start, end - 1); |
441 | (unsigned long long) start, (unsigned long long) (end - 1)); | ||
442 | e820_print_type(old_type); | 415 | e820_print_type(old_type); |
443 | printk(KERN_CONT " ==> "); | 416 | pr_cont(" ==> "); |
444 | e820_print_type(new_type); | 417 | e820_print_type(new_type); |
445 | printk(KERN_CONT "\n"); | 418 | pr_cont("\n"); |
446 | 419 | ||
447 | for (i = 0; i < e820x->nr_map; i++) { | 420 | for (i = 0; i < table->nr_entries; i++) { |
448 | struct e820entry *ei = &e820x->map[i]; | 421 | struct e820_entry *entry = &table->entries[i]; |
449 | u64 final_start, final_end; | 422 | u64 final_start, final_end; |
450 | u64 ei_end; | 423 | u64 entry_end; |
451 | 424 | ||
452 | if (ei->type != old_type) | 425 | if (entry->type != old_type) |
453 | continue; | 426 | continue; |
454 | 427 | ||
455 | ei_end = ei->addr + ei->size; | 428 | entry_end = entry->addr + entry->size; |
456 | /* totally covered by new range? */ | 429 | |
457 | if (ei->addr >= start && ei_end <= end) { | 430 | /* Completely covered by new range? */ |
458 | ei->type = new_type; | 431 | if (entry->addr >= start && entry_end <= end) { |
459 | real_updated_size += ei->size; | 432 | entry->type = new_type; |
433 | real_updated_size += entry->size; | ||
460 | continue; | 434 | continue; |
461 | } | 435 | } |
462 | 436 | ||
463 | /* new range is totally covered? */ | 437 | /* New range is completely covered? */ |
464 | if (ei->addr < start && ei_end > end) { | 438 | if (entry->addr < start && entry_end > end) { |
465 | __e820_add_region(e820x, start, size, new_type); | 439 | __e820__range_add(table, start, size, new_type); |
466 | __e820_add_region(e820x, end, ei_end - end, ei->type); | 440 | __e820__range_add(table, end, entry_end - end, entry->type); |
467 | ei->size = start - ei->addr; | 441 | entry->size = start - entry->addr; |
468 | real_updated_size += size; | 442 | real_updated_size += size; |
469 | continue; | 443 | continue; |
470 | } | 444 | } |
471 | 445 | ||
472 | /* partially covered */ | 446 | /* Partially covered: */ |
473 | final_start = max(start, ei->addr); | 447 | final_start = max(start, entry->addr); |
474 | final_end = min(end, ei_end); | 448 | final_end = min(end, entry_end); |
475 | if (final_start >= final_end) | 449 | if (final_start >= final_end) |
476 | continue; | 450 | continue; |
477 | 451 | ||
478 | __e820_add_region(e820x, final_start, final_end - final_start, | 452 | __e820__range_add(table, final_start, final_end - final_start, new_type); |
479 | new_type); | ||
480 | 453 | ||
481 | real_updated_size += final_end - final_start; | 454 | real_updated_size += final_end - final_start; |
482 | 455 | ||
483 | /* | 456 | /* |
484 | * left range could be head or tail, so need to update | 457 | * Left range could be head or tail, so need to update |
485 | * size at first. | 458 | * its size first: |
486 | */ | 459 | */ |
487 | ei->size -= final_end - final_start; | 460 | entry->size -= final_end - final_start; |
488 | if (ei->addr < final_start) | 461 | if (entry->addr < final_start) |
489 | continue; | 462 | continue; |
490 | ei->addr = final_end; | 463 | |
464 | entry->addr = final_end; | ||
491 | } | 465 | } |
492 | return real_updated_size; | 466 | return real_updated_size; |
493 | } | 467 | } |
494 | 468 | ||
495 | u64 __init e820_update_range(u64 start, u64 size, unsigned old_type, | 469 | u64 __init e820__range_update(u64 start, u64 size, enum e820_type old_type, enum e820_type new_type) |
496 | unsigned new_type) | ||
497 | { | 470 | { |
498 | return __e820_update_range(e820, start, size, old_type, new_type); | 471 | return __e820__range_update(e820_table, start, size, old_type, new_type); |
499 | } | 472 | } |
500 | 473 | ||
501 | static u64 __init e820_update_range_saved(u64 start, u64 size, | 474 | static u64 __init e820__range_update_firmware(u64 start, u64 size, enum e820_type old_type, enum e820_type new_type) |
502 | unsigned old_type, unsigned new_type) | ||
503 | { | 475 | { |
504 | return __e820_update_range(e820_saved, start, size, old_type, | 476 | return __e820__range_update(e820_table_firmware, start, size, old_type, new_type); |
505 | new_type); | ||
506 | } | 477 | } |
507 | 478 | ||
508 | /* make e820 not cover the range */ | 479 | /* Remove a range of memory from the E820 table: */ |
509 | u64 __init e820_remove_range(u64 start, u64 size, unsigned old_type, | 480 | u64 __init e820__range_remove(u64 start, u64 size, enum e820_type old_type, bool check_type) |
510 | int checktype) | ||
511 | { | 481 | { |
512 | int i; | 482 | int i; |
513 | u64 end; | 483 | u64 end; |
@@ -517,85 +487,89 @@ u64 __init e820_remove_range(u64 start, u64 size, unsigned old_type, | |||
517 | size = ULLONG_MAX - start; | 487 | size = ULLONG_MAX - start; |
518 | 488 | ||
519 | end = start + size; | 489 | end = start + size; |
520 | printk(KERN_DEBUG "e820: remove [mem %#010Lx-%#010Lx] ", | 490 | printk(KERN_DEBUG "e820: remove [mem %#010Lx-%#010Lx] ", start, end - 1); |
521 | (unsigned long long) start, (unsigned long long) (end - 1)); | 491 | if (check_type) |
522 | if (checktype) | ||
523 | e820_print_type(old_type); | 492 | e820_print_type(old_type); |
524 | printk(KERN_CONT "\n"); | 493 | pr_cont("\n"); |
525 | 494 | ||
526 | for (i = 0; i < e820->nr_map; i++) { | 495 | for (i = 0; i < e820_table->nr_entries; i++) { |
527 | struct e820entry *ei = &e820->map[i]; | 496 | struct e820_entry *entry = &e820_table->entries[i]; |
528 | u64 final_start, final_end; | 497 | u64 final_start, final_end; |
529 | u64 ei_end; | 498 | u64 entry_end; |
530 | 499 | ||
531 | if (checktype && ei->type != old_type) | 500 | if (check_type && entry->type != old_type) |
532 | continue; | 501 | continue; |
533 | 502 | ||
534 | ei_end = ei->addr + ei->size; | 503 | entry_end = entry->addr + entry->size; |
535 | /* totally covered? */ | 504 | |
536 | if (ei->addr >= start && ei_end <= end) { | 505 | /* Completely covered? */ |
537 | real_removed_size += ei->size; | 506 | if (entry->addr >= start && entry_end <= end) { |
538 | memset(ei, 0, sizeof(struct e820entry)); | 507 | real_removed_size += entry->size; |
508 | memset(entry, 0, sizeof(*entry)); | ||
539 | continue; | 509 | continue; |
540 | } | 510 | } |
541 | 511 | ||
542 | /* new range is totally covered? */ | 512 | /* Is the new range completely covered? */ |
543 | if (ei->addr < start && ei_end > end) { | 513 | if (entry->addr < start && entry_end > end) { |
544 | e820_add_region(end, ei_end - end, ei->type); | 514 | e820__range_add(end, entry_end - end, entry->type); |
545 | ei->size = start - ei->addr; | 515 | entry->size = start - entry->addr; |
546 | real_removed_size += size; | 516 | real_removed_size += size; |
547 | continue; | 517 | continue; |
548 | } | 518 | } |
549 | 519 | ||
550 | /* partially covered */ | 520 | /* Partially covered: */ |
551 | final_start = max(start, ei->addr); | 521 | final_start = max(start, entry->addr); |
552 | final_end = min(end, ei_end); | 522 | final_end = min(end, entry_end); |
553 | if (final_start >= final_end) | 523 | if (final_start >= final_end) |
554 | continue; | 524 | continue; |
525 | |||
555 | real_removed_size += final_end - final_start; | 526 | real_removed_size += final_end - final_start; |
556 | 527 | ||
557 | /* | 528 | /* |
558 | * left range could be head or tail, so need to update | 529 | * Left range could be head or tail, so need to update |
559 | * size at first. | 530 | * the size first: |
560 | */ | 531 | */ |
561 | ei->size -= final_end - final_start; | 532 | entry->size -= final_end - final_start; |
562 | if (ei->addr < final_start) | 533 | if (entry->addr < final_start) |
563 | continue; | 534 | continue; |
564 | ei->addr = final_end; | 535 | |
536 | entry->addr = final_end; | ||
565 | } | 537 | } |
566 | return real_removed_size; | 538 | return real_removed_size; |
567 | } | 539 | } |
568 | 540 | ||
569 | void __init update_e820(void) | 541 | void __init e820__update_table_print(void) |
570 | { | 542 | { |
571 | if (sanitize_e820_map(e820->map, ARRAY_SIZE(e820->map), &e820->nr_map)) | 543 | if (e820__update_table(e820_table)) |
572 | return; | 544 | return; |
573 | printk(KERN_INFO "e820: modified physical RAM map:\n"); | 545 | |
574 | e820_print_map("modified"); | 546 | pr_info("e820: modified physical RAM map:\n"); |
547 | e820__print_table("modified"); | ||
575 | } | 548 | } |
576 | static void __init update_e820_saved(void) | 549 | |
550 | static void __init e820__update_table_firmware(void) | ||
577 | { | 551 | { |
578 | sanitize_e820_map(e820_saved->map, ARRAY_SIZE(e820_saved->map), | 552 | e820__update_table(e820_table_firmware); |
579 | &e820_saved->nr_map); | ||
580 | } | 553 | } |
554 | |||
581 | #define MAX_GAP_END 0x100000000ull | 555 | #define MAX_GAP_END 0x100000000ull |
556 | |||
582 | /* | 557 | /* |
583 | * Search for a gap in the e820 memory space from 0 to MAX_GAP_END. | 558 | * Search for a gap in the E820 memory space from 0 to MAX_GAP_END (4GB). |
584 | */ | 559 | */ |
585 | static int __init e820_search_gap(unsigned long *gapstart, | 560 | static int __init e820_search_gap(unsigned long *gapstart, unsigned long *gapsize) |
586 | unsigned long *gapsize) | ||
587 | { | 561 | { |
588 | unsigned long long last = MAX_GAP_END; | 562 | unsigned long long last = MAX_GAP_END; |
589 | int i = e820->nr_map; | 563 | int i = e820_table->nr_entries; |
590 | int found = 0; | 564 | int found = 0; |
591 | 565 | ||
592 | while (--i >= 0) { | 566 | while (--i >= 0) { |
593 | unsigned long long start = e820->map[i].addr; | 567 | unsigned long long start = e820_table->entries[i].addr; |
594 | unsigned long long end = start + e820->map[i].size; | 568 | unsigned long long end = start + e820_table->entries[i].size; |
595 | 569 | ||
596 | /* | 570 | /* |
597 | * Since "last" is at most 4GB, we know we'll | 571 | * Since "last" is at most 4GB, we know we'll |
598 | * fit in 32 bits if this condition is true | 572 | * fit in 32 bits if this condition is true: |
599 | */ | 573 | */ |
600 | if (last > end) { | 574 | if (last > end) { |
601 | unsigned long gap = last - end; | 575 | unsigned long gap = last - end; |
@@ -613,12 +587,14 @@ static int __init e820_search_gap(unsigned long *gapstart, | |||
613 | } | 587 | } |
614 | 588 | ||
615 | /* | 589 | /* |
616 | * Search for the biggest gap in the low 32 bits of the e820 | 590 | * Search for the biggest gap in the low 32 bits of the E820 |
617 | * memory space. We pass this space to PCI to assign MMIO resources | 591 | * memory space. We pass this space to the PCI subsystem, so |
618 | * for hotplug or unconfigured devices in. | 592 | * that it can assign MMIO resources for hotplug or |
593 | * unconfigured devices in. | ||
594 | * | ||
619 | * Hopefully the BIOS let enough space left. | 595 | * Hopefully the BIOS let enough space left. |
620 | */ | 596 | */ |
621 | __init void e820_setup_gap(void) | 597 | __init void e820__setup_pci_gap(void) |
622 | { | 598 | { |
623 | unsigned long gapstart, gapsize; | 599 | unsigned long gapstart, gapsize; |
624 | int found; | 600 | int found; |
@@ -629,138 +605,143 @@ __init void e820_setup_gap(void) | |||
629 | if (!found) { | 605 | if (!found) { |
630 | #ifdef CONFIG_X86_64 | 606 | #ifdef CONFIG_X86_64 |
631 | gapstart = (max_pfn << PAGE_SHIFT) + 1024*1024; | 607 | gapstart = (max_pfn << PAGE_SHIFT) + 1024*1024; |
632 | printk(KERN_ERR | 608 | pr_err( |
633 | "e820: cannot find a gap in the 32bit address range\n" | 609 | "e820: Cannot find an available gap in the 32-bit address range\n" |
634 | "e820: PCI devices with unassigned 32bit BARs may break!\n"); | 610 | "e820: PCI devices with unassigned 32-bit BARs may not work!\n"); |
635 | #else | 611 | #else |
636 | gapstart = 0x10000000; | 612 | gapstart = 0x10000000; |
637 | #endif | 613 | #endif |
638 | } | 614 | } |
639 | 615 | ||
640 | /* | 616 | /* |
641 | * e820_reserve_resources_late protect stolen RAM already | 617 | * e820__reserve_resources_late() protects stolen RAM already: |
642 | */ | 618 | */ |
643 | pci_mem_start = gapstart; | 619 | pci_mem_start = gapstart; |
644 | 620 | ||
645 | printk(KERN_INFO | 621 | pr_info("e820: [mem %#010lx-%#010lx] available for PCI devices\n", gapstart, gapstart + gapsize - 1); |
646 | "e820: [mem %#010lx-%#010lx] available for PCI devices\n", | ||
647 | gapstart, gapstart + gapsize - 1); | ||
648 | } | 622 | } |
649 | 623 | ||
650 | /* | 624 | /* |
651 | * Called late during init, in free_initmem(). | 625 | * Called late during init, in free_initmem(). |
652 | * | 626 | * |
653 | * Initial e820 and e820_saved are largish __initdata arrays. | 627 | * Initial e820_table and e820_table_firmware are largish __initdata arrays. |
654 | * Copy them to (usually much smaller) dynamically allocated area. | 628 | * |
655 | * This is done after all tweaks we ever do to them: | 629 | * Copy them to a (usually much smaller) dynamically allocated area that is |
656 | * all functions which modify them are __init functions, | 630 | * sized precisely after the number of e820 entries. |
657 | * they won't exist after this point. | 631 | * |
632 | * This is done after we've performed all the fixes and tweaks to the tables. | ||
633 | * All functions which modify them are __init functions, which won't exist | ||
634 | * after free_initmem(). | ||
658 | */ | 635 | */ |
659 | __init void e820_reallocate_tables(void) | 636 | __init void e820__reallocate_tables(void) |
660 | { | 637 | { |
661 | struct e820map *n; | 638 | struct e820_table *n; |
662 | int size; | 639 | int size; |
663 | 640 | ||
664 | size = offsetof(struct e820map, map) + sizeof(struct e820entry) * e820->nr_map; | 641 | size = offsetof(struct e820_table, entries) + sizeof(struct e820_entry)*e820_table->nr_entries; |
665 | n = kmalloc(size, GFP_KERNEL); | 642 | n = kmalloc(size, GFP_KERNEL); |
666 | BUG_ON(!n); | 643 | BUG_ON(!n); |
667 | memcpy(n, e820, size); | 644 | memcpy(n, e820_table, size); |
668 | e820 = n; | 645 | e820_table = n; |
669 | 646 | ||
670 | size = offsetof(struct e820map, map) + sizeof(struct e820entry) * e820_saved->nr_map; | 647 | size = offsetof(struct e820_table, entries) + sizeof(struct e820_entry)*e820_table_firmware->nr_entries; |
671 | n = kmalloc(size, GFP_KERNEL); | 648 | n = kmalloc(size, GFP_KERNEL); |
672 | BUG_ON(!n); | 649 | BUG_ON(!n); |
673 | memcpy(n, e820_saved, size); | 650 | memcpy(n, e820_table_firmware, size); |
674 | e820_saved = n; | 651 | e820_table_firmware = n; |
675 | } | 652 | } |
676 | 653 | ||
677 | /** | 654 | /* |
678 | * Because of the size limitation of struct boot_params, only first | 655 | * Because of the small fixed size of struct boot_params, only the first |
679 | * 128 E820 memory entries are passed to kernel via | 656 | * 128 E820 memory entries are passed to the kernel via boot_params.e820_table, |
680 | * boot_params.e820_map, others are passed via SETUP_E820_EXT node of | 657 | * the remaining (if any) entries are passed via the SETUP_E820_EXT node of |
681 | * linked list of struct setup_data, which is parsed here. | 658 | * struct setup_data, which is parsed here. |
682 | */ | 659 | */ |
683 | void __init parse_e820_ext(u64 phys_addr, u32 data_len) | 660 | void __init e820__memory_setup_extended(u64 phys_addr, u32 data_len) |
684 | { | 661 | { |
685 | int entries; | 662 | int entries; |
686 | struct e820entry *extmap; | 663 | struct boot_e820_entry *extmap; |
687 | struct setup_data *sdata; | 664 | struct setup_data *sdata; |
688 | 665 | ||
689 | sdata = early_memremap(phys_addr, data_len); | 666 | sdata = early_memremap(phys_addr, data_len); |
690 | entries = sdata->len / sizeof(struct e820entry); | 667 | entries = sdata->len / sizeof(*extmap); |
691 | extmap = (struct e820entry *)(sdata->data); | 668 | extmap = (struct boot_e820_entry *)(sdata->data); |
692 | __append_e820_map(extmap, entries); | 669 | |
693 | sanitize_e820_map(e820->map, ARRAY_SIZE(e820->map), &e820->nr_map); | 670 | __append_e820_table(extmap, entries); |
671 | e820__update_table(e820_table); | ||
672 | |||
694 | early_memunmap(sdata, data_len); | 673 | early_memunmap(sdata, data_len); |
695 | printk(KERN_INFO "e820: extended physical RAM map:\n"); | 674 | pr_info("e820: extended physical RAM map:\n"); |
696 | e820_print_map("extended"); | 675 | e820__print_table("extended"); |
697 | } | 676 | } |
698 | 677 | ||
699 | #if defined(CONFIG_X86_64) || \ | 678 | /* |
700 | (defined(CONFIG_X86_32) && defined(CONFIG_HIBERNATION)) | ||
701 | /** | ||
702 | * Find the ranges of physical addresses that do not correspond to | 679 | * Find the ranges of physical addresses that do not correspond to |
703 | * e820 RAM areas and mark the corresponding pages as nosave for | 680 | * E820 RAM areas and register the corresponding pages as 'nosave' for |
704 | * hibernation (32 bit) or software suspend and suspend to RAM (64 bit). | 681 | * hibernation (32-bit) or software suspend and suspend to RAM (64-bit). |
705 | * | 682 | * |
706 | * This function requires the e820 map to be sorted and without any | 683 | * This function requires the E820 map to be sorted and without any |
707 | * overlapping entries. | 684 | * overlapping entries. |
708 | */ | 685 | */ |
709 | void __init e820_mark_nosave_regions(unsigned long limit_pfn) | 686 | void __init e820__register_nosave_regions(unsigned long limit_pfn) |
710 | { | 687 | { |
711 | int i; | 688 | int i; |
712 | unsigned long pfn = 0; | 689 | unsigned long pfn = 0; |
713 | 690 | ||
714 | for (i = 0; i < e820->nr_map; i++) { | 691 | for (i = 0; i < e820_table->nr_entries; i++) { |
715 | struct e820entry *ei = &e820->map[i]; | 692 | struct e820_entry *entry = &e820_table->entries[i]; |
716 | 693 | ||
717 | if (pfn < PFN_UP(ei->addr)) | 694 | if (pfn < PFN_UP(entry->addr)) |
718 | register_nosave_region(pfn, PFN_UP(ei->addr)); | 695 | register_nosave_region(pfn, PFN_UP(entry->addr)); |
719 | 696 | ||
720 | pfn = PFN_DOWN(ei->addr + ei->size); | 697 | pfn = PFN_DOWN(entry->addr + entry->size); |
721 | 698 | ||
722 | if (ei->type != E820_RAM && ei->type != E820_RESERVED_KERN) | 699 | if (entry->type != E820_TYPE_RAM && entry->type != E820_TYPE_RESERVED_KERN) |
723 | register_nosave_region(PFN_UP(ei->addr), pfn); | 700 | register_nosave_region(PFN_UP(entry->addr), pfn); |
724 | 701 | ||
725 | if (pfn >= limit_pfn) | 702 | if (pfn >= limit_pfn) |
726 | break; | 703 | break; |
727 | } | 704 | } |
728 | } | 705 | } |
729 | #endif | ||
730 | 706 | ||
731 | #ifdef CONFIG_ACPI | 707 | #ifdef CONFIG_ACPI |
732 | /** | 708 | /* |
733 | * Mark ACPI NVS memory region, so that we can save/restore it during | 709 | * Register ACPI NVS memory regions, so that we can save/restore them during |
734 | * hibernation and the subsequent resume. | 710 | * hibernation and the subsequent resume: |
735 | */ | 711 | */ |
736 | static int __init e820_mark_nvs_memory(void) | 712 | static int __init e820__register_nvs_regions(void) |
737 | { | 713 | { |
738 | int i; | 714 | int i; |
739 | 715 | ||
740 | for (i = 0; i < e820->nr_map; i++) { | 716 | for (i = 0; i < e820_table->nr_entries; i++) { |
741 | struct e820entry *ei = &e820->map[i]; | 717 | struct e820_entry *entry = &e820_table->entries[i]; |
742 | 718 | ||
743 | if (ei->type == E820_NVS) | 719 | if (entry->type == E820_TYPE_NVS) |
744 | acpi_nvs_register(ei->addr, ei->size); | 720 | acpi_nvs_register(entry->addr, entry->size); |
745 | } | 721 | } |
746 | 722 | ||
747 | return 0; | 723 | return 0; |
748 | } | 724 | } |
749 | core_initcall(e820_mark_nvs_memory); | 725 | core_initcall(e820__register_nvs_regions); |
750 | #endif | 726 | #endif |
751 | 727 | ||
752 | /* | 728 | /* |
753 | * pre allocated 4k and reserved it in memblock and e820_saved | 729 | * Allocate the requested number of bytes with the requsted alignment |
730 | * and return (the physical address) to the caller. Also register this | ||
731 | * range in the 'firmware' E820 table as a reserved range. | ||
732 | * | ||
733 | * This allows kexec to fake a new mptable, as if it came from the real | ||
734 | * system. | ||
754 | */ | 735 | */ |
755 | u64 __init early_reserve_e820(u64 size, u64 align) | 736 | u64 __init e820__memblock_alloc_reserved(u64 size, u64 align) |
756 | { | 737 | { |
757 | u64 addr; | 738 | u64 addr; |
758 | 739 | ||
759 | addr = __memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE); | 740 | addr = __memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE); |
760 | if (addr) { | 741 | if (addr) { |
761 | e820_update_range_saved(addr, size, E820_RAM, E820_RESERVED); | 742 | e820__range_update_firmware(addr, size, E820_TYPE_RAM, E820_TYPE_RESERVED); |
762 | printk(KERN_INFO "e820: update e820_saved for early_reserve_e820\n"); | 743 | pr_info("e820: update e820_table_firmware for e820__memblock_alloc_reserved()\n"); |
763 | update_e820_saved(); | 744 | e820__update_table_firmware(); |
764 | } | 745 | } |
765 | 746 | ||
766 | return addr; | 747 | return addr; |
@@ -779,22 +760,22 @@ u64 __init early_reserve_e820(u64 size, u64 align) | |||
779 | /* | 760 | /* |
780 | * Find the highest page frame number we have available | 761 | * Find the highest page frame number we have available |
781 | */ | 762 | */ |
782 | static unsigned long __init e820_end_pfn(unsigned long limit_pfn, unsigned type) | 763 | static unsigned long __init e820_end_pfn(unsigned long limit_pfn, enum e820_type type) |
783 | { | 764 | { |
784 | int i; | 765 | int i; |
785 | unsigned long last_pfn = 0; | 766 | unsigned long last_pfn = 0; |
786 | unsigned long max_arch_pfn = MAX_ARCH_PFN; | 767 | unsigned long max_arch_pfn = MAX_ARCH_PFN; |
787 | 768 | ||
788 | for (i = 0; i < e820->nr_map; i++) { | 769 | for (i = 0; i < e820_table->nr_entries; i++) { |
789 | struct e820entry *ei = &e820->map[i]; | 770 | struct e820_entry *entry = &e820_table->entries[i]; |
790 | unsigned long start_pfn; | 771 | unsigned long start_pfn; |
791 | unsigned long end_pfn; | 772 | unsigned long end_pfn; |
792 | 773 | ||
793 | if (ei->type != type) | 774 | if (entry->type != type) |
794 | continue; | 775 | continue; |
795 | 776 | ||
796 | start_pfn = ei->addr >> PAGE_SHIFT; | 777 | start_pfn = entry->addr >> PAGE_SHIFT; |
797 | end_pfn = (ei->addr + ei->size) >> PAGE_SHIFT; | 778 | end_pfn = (entry->addr + entry->size) >> PAGE_SHIFT; |
798 | 779 | ||
799 | if (start_pfn >= limit_pfn) | 780 | if (start_pfn >= limit_pfn) |
800 | continue; | 781 | continue; |
@@ -809,18 +790,19 @@ static unsigned long __init e820_end_pfn(unsigned long limit_pfn, unsigned type) | |||
809 | if (last_pfn > max_arch_pfn) | 790 | if (last_pfn > max_arch_pfn) |
810 | last_pfn = max_arch_pfn; | 791 | last_pfn = max_arch_pfn; |
811 | 792 | ||
812 | printk(KERN_INFO "e820: last_pfn = %#lx max_arch_pfn = %#lx\n", | 793 | pr_info("e820: last_pfn = %#lx max_arch_pfn = %#lx\n", |
813 | last_pfn, max_arch_pfn); | 794 | last_pfn, max_arch_pfn); |
814 | return last_pfn; | 795 | return last_pfn; |
815 | } | 796 | } |
816 | unsigned long __init e820_end_of_ram_pfn(void) | 797 | |
798 | unsigned long __init e820__end_of_ram_pfn(void) | ||
817 | { | 799 | { |
818 | return e820_end_pfn(MAX_ARCH_PFN, E820_RAM); | 800 | return e820_end_pfn(MAX_ARCH_PFN, E820_TYPE_RAM); |
819 | } | 801 | } |
820 | 802 | ||
821 | unsigned long __init e820_end_of_low_ram_pfn(void) | 803 | unsigned long __init e820__end_of_low_ram_pfn(void) |
822 | { | 804 | { |
823 | return e820_end_pfn(1UL << (32 - PAGE_SHIFT), E820_RAM); | 805 | return e820_end_pfn(1UL << (32 - PAGE_SHIFT), E820_TYPE_RAM); |
824 | } | 806 | } |
825 | 807 | ||
826 | static void __init early_panic(char *msg) | 808 | static void __init early_panic(char *msg) |
@@ -831,7 +813,7 @@ static void __init early_panic(char *msg) | |||
831 | 813 | ||
832 | static int userdef __initdata; | 814 | static int userdef __initdata; |
833 | 815 | ||
834 | /* "mem=nopentium" disables the 4MB page tables. */ | 816 | /* The "mem=nopentium" boot option disables 4MB page tables on 32-bit kernels: */ |
835 | static int __init parse_memopt(char *p) | 817 | static int __init parse_memopt(char *p) |
836 | { | 818 | { |
837 | u64 mem_size; | 819 | u64 mem_size; |
@@ -844,17 +826,19 @@ static int __init parse_memopt(char *p) | |||
844 | setup_clear_cpu_cap(X86_FEATURE_PSE); | 826 | setup_clear_cpu_cap(X86_FEATURE_PSE); |
845 | return 0; | 827 | return 0; |
846 | #else | 828 | #else |
847 | printk(KERN_WARNING "mem=nopentium ignored! (only supported on x86_32)\n"); | 829 | pr_warn("mem=nopentium ignored! (only supported on x86_32)\n"); |
848 | return -EINVAL; | 830 | return -EINVAL; |
849 | #endif | 831 | #endif |
850 | } | 832 | } |
851 | 833 | ||
852 | userdef = 1; | 834 | userdef = 1; |
853 | mem_size = memparse(p, &p); | 835 | mem_size = memparse(p, &p); |
854 | /* don't remove all of memory when handling "mem={invalid}" param */ | 836 | |
837 | /* Don't remove all memory when getting "mem={invalid}" parameter: */ | ||
855 | if (mem_size == 0) | 838 | if (mem_size == 0) |
856 | return -EINVAL; | 839 | return -EINVAL; |
857 | e820_remove_range(mem_size, ULLONG_MAX - mem_size, E820_RAM, 1); | 840 | |
841 | e820__range_remove(mem_size, ULLONG_MAX - mem_size, E820_TYPE_RAM, 1); | ||
858 | 842 | ||
859 | return 0; | 843 | return 0; |
860 | } | 844 | } |
@@ -872,12 +856,12 @@ static int __init parse_memmap_one(char *p) | |||
872 | #ifdef CONFIG_CRASH_DUMP | 856 | #ifdef CONFIG_CRASH_DUMP |
873 | /* | 857 | /* |
874 | * If we are doing a crash dump, we still need to know | 858 | * If we are doing a crash dump, we still need to know |
875 | * the real mem size before original memory map is | 859 | * the real memory size before the original memory map is |
876 | * reset. | 860 | * reset. |
877 | */ | 861 | */ |
878 | saved_max_pfn = e820_end_of_ram_pfn(); | 862 | saved_max_pfn = e820__end_of_ram_pfn(); |
879 | #endif | 863 | #endif |
880 | e820->nr_map = 0; | 864 | e820_table->nr_entries = 0; |
881 | userdef = 1; | 865 | userdef = 1; |
882 | return 0; | 866 | return 0; |
883 | } | 867 | } |
@@ -890,21 +874,23 @@ static int __init parse_memmap_one(char *p) | |||
890 | userdef = 1; | 874 | userdef = 1; |
891 | if (*p == '@') { | 875 | if (*p == '@') { |
892 | start_at = memparse(p+1, &p); | 876 | start_at = memparse(p+1, &p); |
893 | e820_add_region(start_at, mem_size, E820_RAM); | 877 | e820__range_add(start_at, mem_size, E820_TYPE_RAM); |
894 | } else if (*p == '#') { | 878 | } else if (*p == '#') { |
895 | start_at = memparse(p+1, &p); | 879 | start_at = memparse(p+1, &p); |
896 | e820_add_region(start_at, mem_size, E820_ACPI); | 880 | e820__range_add(start_at, mem_size, E820_TYPE_ACPI); |
897 | } else if (*p == '$') { | 881 | } else if (*p == '$') { |
898 | start_at = memparse(p+1, &p); | 882 | start_at = memparse(p+1, &p); |
899 | e820_add_region(start_at, mem_size, E820_RESERVED); | 883 | e820__range_add(start_at, mem_size, E820_TYPE_RESERVED); |
900 | } else if (*p == '!') { | 884 | } else if (*p == '!') { |
901 | start_at = memparse(p+1, &p); | 885 | start_at = memparse(p+1, &p); |
902 | e820_add_region(start_at, mem_size, E820_PRAM); | 886 | e820__range_add(start_at, mem_size, E820_TYPE_PRAM); |
903 | } else | 887 | } else { |
904 | e820_remove_range(mem_size, ULLONG_MAX - mem_size, E820_RAM, 1); | 888 | e820__range_remove(mem_size, ULLONG_MAX - mem_size, E820_TYPE_RAM, 1); |
889 | } | ||
905 | 890 | ||
906 | return *p == '\0' ? 0 : -EINVAL; | 891 | return *p == '\0' ? 0 : -EINVAL; |
907 | } | 892 | } |
893 | |||
908 | static int __init parse_memmap_opt(char *str) | 894 | static int __init parse_memmap_opt(char *str) |
909 | { | 895 | { |
910 | while (str) { | 896 | while (str) { |
@@ -921,68 +907,97 @@ static int __init parse_memmap_opt(char *str) | |||
921 | } | 907 | } |
922 | early_param("memmap", parse_memmap_opt); | 908 | early_param("memmap", parse_memmap_opt); |
923 | 909 | ||
924 | void __init finish_e820_parsing(void) | 910 | /* |
911 | * Reserve all entries from the bootloader's extensible data nodes list, | ||
912 | * because if present we are going to use it later on to fetch e820 | ||
913 | * entries from it: | ||
914 | */ | ||
915 | void __init e820__reserve_setup_data(void) | ||
916 | { | ||
917 | struct setup_data *data; | ||
918 | u64 pa_data; | ||
919 | |||
920 | pa_data = boot_params.hdr.setup_data; | ||
921 | if (!pa_data) | ||
922 | return; | ||
923 | |||
924 | while (pa_data) { | ||
925 | data = early_memremap(pa_data, sizeof(*data)); | ||
926 | e820__range_update(pa_data, sizeof(*data)+data->len, E820_TYPE_RAM, E820_TYPE_RESERVED_KERN); | ||
927 | pa_data = data->next; | ||
928 | early_memunmap(data, sizeof(*data)); | ||
929 | } | ||
930 | |||
931 | e820__update_table(e820_table); | ||
932 | |||
933 | memcpy(e820_table_firmware, e820_table, sizeof(*e820_table_firmware)); | ||
934 | |||
935 | pr_info("extended physical RAM map:\n"); | ||
936 | e820__print_table("reserve setup_data"); | ||
937 | } | ||
938 | |||
939 | /* | ||
940 | * Called after parse_early_param(), after early parameters (such as mem=) | ||
941 | * have been processed, in which case we already have an E820 table filled in | ||
942 | * via the parameter callback function(s), but it's not sorted and printed yet: | ||
943 | */ | ||
944 | void __init e820__finish_early_params(void) | ||
925 | { | 945 | { |
926 | if (userdef) { | 946 | if (userdef) { |
927 | if (sanitize_e820_map(e820->map, ARRAY_SIZE(e820->map), | 947 | if (e820__update_table(e820_table) < 0) |
928 | &e820->nr_map) < 0) | ||
929 | early_panic("Invalid user supplied memory map"); | 948 | early_panic("Invalid user supplied memory map"); |
930 | 949 | ||
931 | printk(KERN_INFO "e820: user-defined physical RAM map:\n"); | 950 | pr_info("e820: user-defined physical RAM map:\n"); |
932 | e820_print_map("user"); | 951 | e820__print_table("user"); |
933 | } | 952 | } |
934 | } | 953 | } |
935 | 954 | ||
936 | static const char *__init e820_type_to_string(int e820_type) | 955 | static const char *__init e820_type_to_string(struct e820_entry *entry) |
937 | { | 956 | { |
938 | switch (e820_type) { | 957 | switch (entry->type) { |
939 | case E820_RESERVED_KERN: | 958 | case E820_TYPE_RESERVED_KERN: /* Fall-through: */ |
940 | case E820_RAM: return "System RAM"; | 959 | case E820_TYPE_RAM: return "System RAM"; |
941 | case E820_ACPI: return "ACPI Tables"; | 960 | case E820_TYPE_ACPI: return "ACPI Tables"; |
942 | case E820_NVS: return "ACPI Non-volatile Storage"; | 961 | case E820_TYPE_NVS: return "ACPI Non-volatile Storage"; |
943 | case E820_UNUSABLE: return "Unusable memory"; | 962 | case E820_TYPE_UNUSABLE: return "Unusable memory"; |
944 | case E820_PRAM: return "Persistent Memory (legacy)"; | 963 | case E820_TYPE_PRAM: return "Persistent Memory (legacy)"; |
945 | case E820_PMEM: return "Persistent Memory"; | 964 | case E820_TYPE_PMEM: return "Persistent Memory"; |
946 | default: return "reserved"; | 965 | case E820_TYPE_RESERVED: return "Reserved"; |
966 | default: return "Unknown E820 type"; | ||
947 | } | 967 | } |
948 | } | 968 | } |
949 | 969 | ||
950 | static unsigned long __init e820_type_to_iomem_type(int e820_type) | 970 | static unsigned long __init e820_type_to_iomem_type(struct e820_entry *entry) |
951 | { | 971 | { |
952 | switch (e820_type) { | 972 | switch (entry->type) { |
953 | case E820_RESERVED_KERN: | 973 | case E820_TYPE_RESERVED_KERN: /* Fall-through: */ |
954 | case E820_RAM: | 974 | case E820_TYPE_RAM: return IORESOURCE_SYSTEM_RAM; |
955 | return IORESOURCE_SYSTEM_RAM; | 975 | case E820_TYPE_ACPI: /* Fall-through: */ |
956 | case E820_ACPI: | 976 | case E820_TYPE_NVS: /* Fall-through: */ |
957 | case E820_NVS: | 977 | case E820_TYPE_UNUSABLE: /* Fall-through: */ |
958 | case E820_UNUSABLE: | 978 | case E820_TYPE_PRAM: /* Fall-through: */ |
959 | case E820_PRAM: | 979 | case E820_TYPE_PMEM: /* Fall-through: */ |
960 | case E820_PMEM: | 980 | case E820_TYPE_RESERVED: /* Fall-through: */ |
961 | default: | 981 | default: return IORESOURCE_MEM; |
962 | return IORESOURCE_MEM; | ||
963 | } | 982 | } |
964 | } | 983 | } |
965 | 984 | ||
966 | static unsigned long __init e820_type_to_iores_desc(int e820_type) | 985 | static unsigned long __init e820_type_to_iores_desc(struct e820_entry *entry) |
967 | { | 986 | { |
968 | switch (e820_type) { | 987 | switch (entry->type) { |
969 | case E820_ACPI: | 988 | case E820_TYPE_ACPI: return IORES_DESC_ACPI_TABLES; |
970 | return IORES_DESC_ACPI_TABLES; | 989 | case E820_TYPE_NVS: return IORES_DESC_ACPI_NV_STORAGE; |
971 | case E820_NVS: | 990 | case E820_TYPE_PMEM: return IORES_DESC_PERSISTENT_MEMORY; |
972 | return IORES_DESC_ACPI_NV_STORAGE; | 991 | case E820_TYPE_PRAM: return IORES_DESC_PERSISTENT_MEMORY_LEGACY; |
973 | case E820_PMEM: | 992 | case E820_TYPE_RESERVED_KERN: /* Fall-through: */ |
974 | return IORES_DESC_PERSISTENT_MEMORY; | 993 | case E820_TYPE_RAM: /* Fall-through: */ |
975 | case E820_PRAM: | 994 | case E820_TYPE_UNUSABLE: /* Fall-through: */ |
976 | return IORES_DESC_PERSISTENT_MEMORY_LEGACY; | 995 | case E820_TYPE_RESERVED: /* Fall-through: */ |
977 | case E820_RESERVED_KERN: | 996 | default: return IORES_DESC_NONE; |
978 | case E820_RAM: | ||
979 | case E820_UNUSABLE: | ||
980 | default: | ||
981 | return IORES_DESC_NONE; | ||
982 | } | 997 | } |
983 | } | 998 | } |
984 | 999 | ||
985 | static bool __init do_mark_busy(u32 type, struct resource *res) | 1000 | static bool __init do_mark_busy(enum e820_type type, struct resource *res) |
986 | { | 1001 | { |
987 | /* this is the legacy bios/dos rom-shadow + mmio region */ | 1002 | /* this is the legacy bios/dos rom-shadow + mmio region */ |
988 | if (res->start < (1ULL<<20)) | 1003 | if (res->start < (1ULL<<20)) |
@@ -993,61 +1008,71 @@ static bool __init do_mark_busy(u32 type, struct resource *res) | |||
993 | * for exclusive use of a driver | 1008 | * for exclusive use of a driver |
994 | */ | 1009 | */ |
995 | switch (type) { | 1010 | switch (type) { |
996 | case E820_RESERVED: | 1011 | case E820_TYPE_RESERVED: |
997 | case E820_PRAM: | 1012 | case E820_TYPE_PRAM: |
998 | case E820_PMEM: | 1013 | case E820_TYPE_PMEM: |
999 | return false; | 1014 | return false; |
1015 | case E820_TYPE_RESERVED_KERN: | ||
1016 | case E820_TYPE_RAM: | ||
1017 | case E820_TYPE_ACPI: | ||
1018 | case E820_TYPE_NVS: | ||
1019 | case E820_TYPE_UNUSABLE: | ||
1000 | default: | 1020 | default: |
1001 | return true; | 1021 | return true; |
1002 | } | 1022 | } |
1003 | } | 1023 | } |
1004 | 1024 | ||
1005 | /* | 1025 | /* |
1006 | * Mark e820 reserved areas as busy for the resource manager. | 1026 | * Mark E820 reserved areas as busy for the resource manager: |
1007 | */ | 1027 | */ |
1028 | |||
1008 | static struct resource __initdata *e820_res; | 1029 | static struct resource __initdata *e820_res; |
1009 | void __init e820_reserve_resources(void) | 1030 | |
1031 | void __init e820__reserve_resources(void) | ||
1010 | { | 1032 | { |
1011 | int i; | 1033 | int i; |
1012 | struct resource *res; | 1034 | struct resource *res; |
1013 | u64 end; | 1035 | u64 end; |
1014 | 1036 | ||
1015 | res = alloc_bootmem(sizeof(struct resource) * e820->nr_map); | 1037 | res = alloc_bootmem(sizeof(*res) * e820_table->nr_entries); |
1016 | e820_res = res; | 1038 | e820_res = res; |
1017 | for (i = 0; i < e820->nr_map; i++) { | 1039 | |
1018 | end = e820->map[i].addr + e820->map[i].size - 1; | 1040 | for (i = 0; i < e820_table->nr_entries; i++) { |
1041 | struct e820_entry *entry = e820_table->entries + i; | ||
1042 | |||
1043 | end = entry->addr + entry->size - 1; | ||
1019 | if (end != (resource_size_t)end) { | 1044 | if (end != (resource_size_t)end) { |
1020 | res++; | 1045 | res++; |
1021 | continue; | 1046 | continue; |
1022 | } | 1047 | } |
1023 | res->name = e820_type_to_string(e820->map[i].type); | 1048 | res->start = entry->addr; |
1024 | res->start = e820->map[i].addr; | 1049 | res->end = end; |
1025 | res->end = end; | 1050 | res->name = e820_type_to_string(entry); |
1026 | 1051 | res->flags = e820_type_to_iomem_type(entry); | |
1027 | res->flags = e820_type_to_iomem_type(e820->map[i].type); | 1052 | res->desc = e820_type_to_iores_desc(entry); |
1028 | res->desc = e820_type_to_iores_desc(e820->map[i].type); | ||
1029 | 1053 | ||
1030 | /* | 1054 | /* |
1031 | * don't register the region that could be conflicted with | 1055 | * Don't register the region that could be conflicted with |
1032 | * pci device BAR resource and insert them later in | 1056 | * PCI device BAR resources and insert them later in |
1033 | * pcibios_resource_survey() | 1057 | * pcibios_resource_survey(): |
1034 | */ | 1058 | */ |
1035 | if (do_mark_busy(e820->map[i].type, res)) { | 1059 | if (do_mark_busy(entry->type, res)) { |
1036 | res->flags |= IORESOURCE_BUSY; | 1060 | res->flags |= IORESOURCE_BUSY; |
1037 | insert_resource(&iomem_resource, res); | 1061 | insert_resource(&iomem_resource, res); |
1038 | } | 1062 | } |
1039 | res++; | 1063 | res++; |
1040 | } | 1064 | } |
1041 | 1065 | ||
1042 | for (i = 0; i < e820_saved->nr_map; i++) { | 1066 | for (i = 0; i < e820_table_firmware->nr_entries; i++) { |
1043 | struct e820entry *entry = &e820_saved->map[i]; | 1067 | struct e820_entry *entry = e820_table_firmware->entries + i; |
1044 | firmware_map_add_early(entry->addr, | 1068 | |
1045 | entry->addr + entry->size, | 1069 | firmware_map_add_early(entry->addr, entry->addr + entry->size, e820_type_to_string(entry)); |
1046 | e820_type_to_string(entry->type)); | ||
1047 | } | 1070 | } |
1048 | } | 1071 | } |
1049 | 1072 | ||
1050 | /* How much should we pad RAM ending depending on where it is? */ | 1073 | /* |
1074 | * How much should we pad the end of RAM, depending on where it is? | ||
1075 | */ | ||
1051 | static unsigned long __init ram_alignment(resource_size_t pos) | 1076 | static unsigned long __init ram_alignment(resource_size_t pos) |
1052 | { | 1077 | { |
1053 | unsigned long mb = pos >> 20; | 1078 | unsigned long mb = pos >> 20; |
@@ -1066,64 +1091,59 @@ static unsigned long __init ram_alignment(resource_size_t pos) | |||
1066 | 1091 | ||
1067 | #define MAX_RESOURCE_SIZE ((resource_size_t)-1) | 1092 | #define MAX_RESOURCE_SIZE ((resource_size_t)-1) |
1068 | 1093 | ||
1069 | void __init e820_reserve_resources_late(void) | 1094 | void __init e820__reserve_resources_late(void) |
1070 | { | 1095 | { |
1071 | int i; | 1096 | int i; |
1072 | struct resource *res; | 1097 | struct resource *res; |
1073 | 1098 | ||
1074 | res = e820_res; | 1099 | res = e820_res; |
1075 | for (i = 0; i < e820->nr_map; i++) { | 1100 | for (i = 0; i < e820_table->nr_entries; i++) { |
1076 | if (!res->parent && res->end) | 1101 | if (!res->parent && res->end) |
1077 | insert_resource_expand_to_fit(&iomem_resource, res); | 1102 | insert_resource_expand_to_fit(&iomem_resource, res); |
1078 | res++; | 1103 | res++; |
1079 | } | 1104 | } |
1080 | 1105 | ||
1081 | /* | 1106 | /* |
1082 | * Try to bump up RAM regions to reasonable boundaries to | 1107 | * Try to bump up RAM regions to reasonable boundaries, to |
1083 | * avoid stolen RAM: | 1108 | * avoid stolen RAM: |
1084 | */ | 1109 | */ |
1085 | for (i = 0; i < e820->nr_map; i++) { | 1110 | for (i = 0; i < e820_table->nr_entries; i++) { |
1086 | struct e820entry *entry = &e820->map[i]; | 1111 | struct e820_entry *entry = &e820_table->entries[i]; |
1087 | u64 start, end; | 1112 | u64 start, end; |
1088 | 1113 | ||
1089 | if (entry->type != E820_RAM) | 1114 | if (entry->type != E820_TYPE_RAM) |
1090 | continue; | 1115 | continue; |
1116 | |||
1091 | start = entry->addr + entry->size; | 1117 | start = entry->addr + entry->size; |
1092 | end = round_up(start, ram_alignment(start)) - 1; | 1118 | end = round_up(start, ram_alignment(start)) - 1; |
1093 | if (end > MAX_RESOURCE_SIZE) | 1119 | if (end > MAX_RESOURCE_SIZE) |
1094 | end = MAX_RESOURCE_SIZE; | 1120 | end = MAX_RESOURCE_SIZE; |
1095 | if (start >= end) | 1121 | if (start >= end) |
1096 | continue; | 1122 | continue; |
1097 | printk(KERN_DEBUG | 1123 | |
1098 | "e820: reserve RAM buffer [mem %#010llx-%#010llx]\n", | 1124 | printk(KERN_DEBUG "e820: reserve RAM buffer [mem %#010llx-%#010llx]\n", start, end); |
1099 | start, end); | 1125 | reserve_region_with_split(&iomem_resource, start, end, "RAM buffer"); |
1100 | reserve_region_with_split(&iomem_resource, start, end, | ||
1101 | "RAM buffer"); | ||
1102 | } | 1126 | } |
1103 | } | 1127 | } |
1104 | 1128 | ||
1105 | char *__init default_machine_specific_memory_setup(void) | 1129 | /* |
1130 | * Pass the firmware (bootloader) E820 map to the kernel and process it: | ||
1131 | */ | ||
1132 | char *__init e820__memory_setup_default(void) | ||
1106 | { | 1133 | { |
1107 | char *who = "BIOS-e820"; | 1134 | char *who = "BIOS-e820"; |
1108 | u32 new_nr; | 1135 | |
1109 | /* | 1136 | /* |
1110 | * Try to copy the BIOS-supplied E820-map. | 1137 | * Try to copy the BIOS-supplied E820-map. |
1111 | * | 1138 | * |
1112 | * Otherwise fake a memory map; one section from 0k->640k, | 1139 | * Otherwise fake a memory map; one section from 0k->640k, |
1113 | * the next section from 1mb->appropriate_mem_k | 1140 | * the next section from 1mb->appropriate_mem_k |
1114 | */ | 1141 | */ |
1115 | new_nr = boot_params.e820_entries; | 1142 | if (append_e820_table(boot_params.e820_table, boot_params.e820_entries) < 0) { |
1116 | sanitize_e820_map(boot_params.e820_map, | ||
1117 | ARRAY_SIZE(boot_params.e820_map), | ||
1118 | &new_nr); | ||
1119 | boot_params.e820_entries = new_nr; | ||
1120 | if (append_e820_map(boot_params.e820_map, boot_params.e820_entries) | ||
1121 | < 0) { | ||
1122 | u64 mem_size; | 1143 | u64 mem_size; |
1123 | 1144 | ||
1124 | /* compare results from other methods and take the greater */ | 1145 | /* Compare results from other methods and take the one that gives more RAM: */ |
1125 | if (boot_params.alt_mem_k | 1146 | if (boot_params.alt_mem_k < boot_params.screen_info.ext_mem_k) { |
1126 | < boot_params.screen_info.ext_mem_k) { | ||
1127 | mem_size = boot_params.screen_info.ext_mem_k; | 1147 | mem_size = boot_params.screen_info.ext_mem_k; |
1128 | who = "BIOS-88"; | 1148 | who = "BIOS-88"; |
1129 | } else { | 1149 | } else { |
@@ -1131,84 +1151,68 @@ char *__init default_machine_specific_memory_setup(void) | |||
1131 | who = "BIOS-e801"; | 1151 | who = "BIOS-e801"; |
1132 | } | 1152 | } |
1133 | 1153 | ||
1134 | e820->nr_map = 0; | 1154 | e820_table->nr_entries = 0; |
1135 | e820_add_region(0, LOWMEMSIZE(), E820_RAM); | 1155 | e820__range_add(0, LOWMEMSIZE(), E820_TYPE_RAM); |
1136 | e820_add_region(HIGH_MEMORY, mem_size << 10, E820_RAM); | 1156 | e820__range_add(HIGH_MEMORY, mem_size << 10, E820_TYPE_RAM); |
1137 | } | 1157 | } |
1138 | 1158 | ||
1139 | /* In case someone cares... */ | 1159 | /* We just appended a lot of ranges, sanitize the table: */ |
1160 | e820__update_table(e820_table); | ||
1161 | |||
1140 | return who; | 1162 | return who; |
1141 | } | 1163 | } |
1142 | 1164 | ||
1143 | void __init setup_memory_map(void) | 1165 | /* |
1166 | * Calls e820__memory_setup_default() in essence to pick up the firmware/bootloader | ||
1167 | * E820 map - with an optional platform quirk available for virtual platforms | ||
1168 | * to override this method of boot environment processing: | ||
1169 | */ | ||
1170 | void __init e820__memory_setup(void) | ||
1144 | { | 1171 | { |
1145 | char *who; | 1172 | char *who; |
1146 | 1173 | ||
1174 | /* This is a firmware interface ABI - make sure we don't break it: */ | ||
1175 | BUILD_BUG_ON(sizeof(struct boot_e820_entry) != 20); | ||
1176 | |||
1147 | who = x86_init.resources.memory_setup(); | 1177 | who = x86_init.resources.memory_setup(); |
1148 | memcpy(e820_saved, e820, sizeof(struct e820map)); | 1178 | |
1149 | printk(KERN_INFO "e820: BIOS-provided physical RAM map:\n"); | 1179 | memcpy(e820_table_firmware, e820_table, sizeof(*e820_table_firmware)); |
1150 | e820_print_map(who); | 1180 | |
1181 | pr_info("e820: BIOS-provided physical RAM map:\n"); | ||
1182 | e820__print_table(who); | ||
1151 | } | 1183 | } |
1152 | 1184 | ||
1153 | void __init memblock_x86_fill(void) | 1185 | void __init e820__memblock_setup(void) |
1154 | { | 1186 | { |
1155 | int i; | 1187 | int i; |
1156 | u64 end; | 1188 | u64 end; |
1157 | 1189 | ||
1158 | /* | 1190 | /* |
1159 | * EFI may have more than 128 entries | 1191 | * The bootstrap memblock region count maximum is 128 entries |
1160 | * We are safe to enable resizing, beause memblock_x86_fill() | 1192 | * (INIT_MEMBLOCK_REGIONS), but EFI might pass us more E820 entries |
1161 | * is rather later for x86 | 1193 | * than that - so allow memblock resizing. |
1194 | * | ||
1195 | * This is safe, because this call happens pretty late during x86 setup, | ||
1196 | * so we know about reserved memory regions already. (This is important | ||
1197 | * so that memblock resizing does no stomp over reserved areas.) | ||
1162 | */ | 1198 | */ |
1163 | memblock_allow_resize(); | 1199 | memblock_allow_resize(); |
1164 | 1200 | ||
1165 | for (i = 0; i < e820->nr_map; i++) { | 1201 | for (i = 0; i < e820_table->nr_entries; i++) { |
1166 | struct e820entry *ei = &e820->map[i]; | 1202 | struct e820_entry *entry = &e820_table->entries[i]; |
1167 | 1203 | ||
1168 | end = ei->addr + ei->size; | 1204 | end = entry->addr + entry->size; |
1169 | if (end != (resource_size_t)end) | 1205 | if (end != (resource_size_t)end) |
1170 | continue; | 1206 | continue; |
1171 | 1207 | ||
1172 | if (ei->type != E820_RAM && ei->type != E820_RESERVED_KERN) | 1208 | if (entry->type != E820_TYPE_RAM && entry->type != E820_TYPE_RESERVED_KERN) |
1173 | continue; | 1209 | continue; |
1174 | 1210 | ||
1175 | memblock_add(ei->addr, ei->size); | 1211 | memblock_add(entry->addr, entry->size); |
1176 | } | 1212 | } |
1177 | 1213 | ||
1178 | /* throw away partial pages */ | 1214 | /* Throw away partial pages: */ |
1179 | memblock_trim_memory(PAGE_SIZE); | 1215 | memblock_trim_memory(PAGE_SIZE); |
1180 | 1216 | ||
1181 | memblock_dump_all(); | 1217 | memblock_dump_all(); |
1182 | } | 1218 | } |
1183 | |||
1184 | void __init memblock_find_dma_reserve(void) | ||
1185 | { | ||
1186 | #ifdef CONFIG_X86_64 | ||
1187 | u64 nr_pages = 0, nr_free_pages = 0; | ||
1188 | unsigned long start_pfn, end_pfn; | ||
1189 | phys_addr_t start, end; | ||
1190 | int i; | ||
1191 | u64 u; | ||
1192 | |||
1193 | /* | ||
1194 | * need to find out used area below MAX_DMA_PFN | ||
1195 | * need to use memblock to get free size in [0, MAX_DMA_PFN] | ||
1196 | * at first, and assume boot_mem will not take below MAX_DMA_PFN | ||
1197 | */ | ||
1198 | for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, NULL) { | ||
1199 | start_pfn = min(start_pfn, MAX_DMA_PFN); | ||
1200 | end_pfn = min(end_pfn, MAX_DMA_PFN); | ||
1201 | nr_pages += end_pfn - start_pfn; | ||
1202 | } | ||
1203 | |||
1204 | for_each_free_mem_range(u, NUMA_NO_NODE, MEMBLOCK_NONE, &start, &end, | ||
1205 | NULL) { | ||
1206 | start_pfn = min_t(unsigned long, PFN_UP(start), MAX_DMA_PFN); | ||
1207 | end_pfn = min_t(unsigned long, PFN_DOWN(end), MAX_DMA_PFN); | ||
1208 | if (start_pfn < end_pfn) | ||
1209 | nr_free_pages += end_pfn - start_pfn; | ||
1210 | } | ||
1211 | |||
1212 | set_dma_reserve(nr_pages - nr_free_pages); | ||
1213 | #endif | ||
1214 | } | ||
diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c index 6a08e25a48d8..ff7e4b3988ed 100644 --- a/arch/x86/kernel/early-quirks.c +++ b/arch/x86/kernel/early-quirks.c | |||
@@ -546,8 +546,8 @@ intel_graphics_stolen(int num, int slot, int func, | |||
546 | &base, &end); | 546 | &base, &end); |
547 | 547 | ||
548 | /* Mark this space as reserved */ | 548 | /* Mark this space as reserved */ |
549 | e820_add_region(base, size, E820_RESERVED); | 549 | e820__range_add(base, size, E820_TYPE_RESERVED); |
550 | sanitize_e820_map(e820->map, ARRAY_SIZE(e820->map), &e820->nr_map); | 550 | e820__update_table(e820_table); |
551 | } | 551 | } |
552 | 552 | ||
553 | static void __init intel_graphics_quirks(int num, int slot, int func) | 553 | static void __init intel_graphics_quirks(int num, int slot, int func) |
diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c index e5fb436a6548..538ec012b371 100644 --- a/arch/x86/kernel/head32.c +++ b/arch/x86/kernel/head32.c | |||
@@ -12,7 +12,7 @@ | |||
12 | 12 | ||
13 | #include <asm/setup.h> | 13 | #include <asm/setup.h> |
14 | #include <asm/sections.h> | 14 | #include <asm/sections.h> |
15 | #include <asm/e820.h> | 15 | #include <asm/e820/api.h> |
16 | #include <asm/page.h> | 16 | #include <asm/page.h> |
17 | #include <asm/apic.h> | 17 | #include <asm/apic.h> |
18 | #include <asm/io_apic.h> | 18 | #include <asm/io_apic.h> |
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c index b5785c197e53..43b7002f44fb 100644 --- a/arch/x86/kernel/head64.c +++ b/arch/x86/kernel/head64.c | |||
@@ -24,7 +24,7 @@ | |||
24 | #include <asm/tlbflush.h> | 24 | #include <asm/tlbflush.h> |
25 | #include <asm/sections.h> | 25 | #include <asm/sections.h> |
26 | #include <asm/kdebug.h> | 26 | #include <asm/kdebug.h> |
27 | #include <asm/e820.h> | 27 | #include <asm/e820/api.h> |
28 | #include <asm/bios_ebda.h> | 28 | #include <asm/bios_ebda.h> |
29 | #include <asm/bootparam_utils.h> | 29 | #include <asm/bootparam_utils.h> |
30 | #include <asm/microcode.h> | 30 | #include <asm/microcode.h> |
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S index b467b14b03eb..ac9d327d2e42 100644 --- a/arch/x86/kernel/head_64.S +++ b/arch/x86/kernel/head_64.S | |||
@@ -269,10 +269,8 @@ ENTRY(secondary_startup_64) | |||
269 | /* rsi is pointer to real mode structure with interesting info. | 269 | /* rsi is pointer to real mode structure with interesting info. |
270 | pass it to C */ | 270 | pass it to C */ |
271 | movq %rsi, %rdi | 271 | movq %rsi, %rdi |
272 | jmp start_cpu | ||
273 | ENDPROC(secondary_startup_64) | ||
274 | 272 | ||
275 | ENTRY(start_cpu) | 273 | .Ljump_to_C_code: |
276 | /* | 274 | /* |
277 | * Jump to run C code and to be on a real kernel address. | 275 | * Jump to run C code and to be on a real kernel address. |
278 | * Since we are running on identity-mapped space we have to jump | 276 | * Since we are running on identity-mapped space we have to jump |
@@ -305,7 +303,7 @@ ENTRY(start_cpu) | |||
305 | pushq %rax # target address in negative space | 303 | pushq %rax # target address in negative space |
306 | lretq | 304 | lretq |
307 | .Lafter_lret: | 305 | .Lafter_lret: |
308 | ENDPROC(start_cpu) | 306 | ENDPROC(secondary_startup_64) |
309 | 307 | ||
310 | #include "verify_cpu.S" | 308 | #include "verify_cpu.S" |
311 | 309 | ||
@@ -313,11 +311,11 @@ ENDPROC(start_cpu) | |||
313 | /* | 311 | /* |
314 | * Boot CPU0 entry point. It's called from play_dead(). Everything has been set | 312 | * Boot CPU0 entry point. It's called from play_dead(). Everything has been set |
315 | * up already except stack. We just set up stack here. Then call | 313 | * up already except stack. We just set up stack here. Then call |
316 | * start_secondary() via start_cpu(). | 314 | * start_secondary() via .Ljump_to_C_code. |
317 | */ | 315 | */ |
318 | ENTRY(start_cpu0) | 316 | ENTRY(start_cpu0) |
319 | movq initial_stack(%rip), %rsp | 317 | movq initial_stack(%rip), %rsp |
320 | jmp start_cpu | 318 | jmp .Ljump_to_C_code |
321 | ENDPROC(start_cpu0) | 319 | ENDPROC(start_cpu0) |
322 | #endif | 320 | #endif |
323 | 321 | ||
diff --git a/arch/x86/kernel/kexec-bzimage64.c b/arch/x86/kernel/kexec-bzimage64.c index d0a814a9d96a..9d7fd5e6689a 100644 --- a/arch/x86/kernel/kexec-bzimage64.c +++ b/arch/x86/kernel/kexec-bzimage64.c | |||
@@ -25,6 +25,7 @@ | |||
25 | #include <asm/setup.h> | 25 | #include <asm/setup.h> |
26 | #include <asm/crash.h> | 26 | #include <asm/crash.h> |
27 | #include <asm/efi.h> | 27 | #include <asm/efi.h> |
28 | #include <asm/e820/api.h> | ||
28 | #include <asm/kexec-bzimage64.h> | 29 | #include <asm/kexec-bzimage64.h> |
29 | 30 | ||
30 | #define MAX_ELFCOREHDR_STR_LEN 30 /* elfcorehdr=0x<64bit-value> */ | 31 | #define MAX_ELFCOREHDR_STR_LEN 30 /* elfcorehdr=0x<64bit-value> */ |
@@ -99,15 +100,14 @@ static int setup_e820_entries(struct boot_params *params) | |||
99 | { | 100 | { |
100 | unsigned int nr_e820_entries; | 101 | unsigned int nr_e820_entries; |
101 | 102 | ||
102 | nr_e820_entries = e820_saved->nr_map; | 103 | nr_e820_entries = e820_table_firmware->nr_entries; |
103 | 104 | ||
104 | /* TODO: Pass entries more than E820MAX in bootparams setup data */ | 105 | /* TODO: Pass entries more than E820_MAX_ENTRIES_ZEROPAGE in bootparams setup data */ |
105 | if (nr_e820_entries > E820MAX) | 106 | if (nr_e820_entries > E820_MAX_ENTRIES_ZEROPAGE) |
106 | nr_e820_entries = E820MAX; | 107 | nr_e820_entries = E820_MAX_ENTRIES_ZEROPAGE; |
107 | 108 | ||
108 | params->e820_entries = nr_e820_entries; | 109 | params->e820_entries = nr_e820_entries; |
109 | memcpy(¶ms->e820_map, &e820_saved->map, | 110 | memcpy(¶ms->e820_table, &e820_table_firmware->entries, nr_e820_entries*sizeof(struct e820_entry)); |
110 | nr_e820_entries * sizeof(struct e820entry)); | ||
111 | 111 | ||
112 | return 0; | 112 | return 0; |
113 | } | 113 | } |
@@ -232,10 +232,10 @@ setup_boot_parameters(struct kimage *image, struct boot_params *params, | |||
232 | nr_e820_entries = params->e820_entries; | 232 | nr_e820_entries = params->e820_entries; |
233 | 233 | ||
234 | for (i = 0; i < nr_e820_entries; i++) { | 234 | for (i = 0; i < nr_e820_entries; i++) { |
235 | if (params->e820_map[i].type != E820_RAM) | 235 | if (params->e820_table[i].type != E820_TYPE_RAM) |
236 | continue; | 236 | continue; |
237 | start = params->e820_map[i].addr; | 237 | start = params->e820_table[i].addr; |
238 | end = params->e820_map[i].addr + params->e820_map[i].size - 1; | 238 | end = params->e820_table[i].addr + params->e820_table[i].size - 1; |
239 | 239 | ||
240 | if ((start <= 0x100000) && end > 0x100000) { | 240 | if ((start <= 0x100000) && end > 0x100000) { |
241 | mem_k = (end >> 10) - (0x100000 >> 10); | 241 | mem_k = (end >> 10) - (0x100000 >> 10); |
diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c index 0f8d20497383..0d904d759ff1 100644 --- a/arch/x86/kernel/mpparse.c +++ b/arch/x86/kernel/mpparse.c | |||
@@ -26,7 +26,7 @@ | |||
26 | #include <asm/io_apic.h> | 26 | #include <asm/io_apic.h> |
27 | #include <asm/proto.h> | 27 | #include <asm/proto.h> |
28 | #include <asm/bios_ebda.h> | 28 | #include <asm/bios_ebda.h> |
29 | #include <asm/e820.h> | 29 | #include <asm/e820/api.h> |
30 | #include <asm/setup.h> | 30 | #include <asm/setup.h> |
31 | #include <asm/smp.h> | 31 | #include <asm/smp.h> |
32 | 32 | ||
@@ -826,10 +826,10 @@ static int __init parse_alloc_mptable_opt(char *p) | |||
826 | } | 826 | } |
827 | early_param("alloc_mptable", parse_alloc_mptable_opt); | 827 | early_param("alloc_mptable", parse_alloc_mptable_opt); |
828 | 828 | ||
829 | void __init early_reserve_e820_mpc_new(void) | 829 | void __init e820__memblock_alloc_reserved_mpc_new(void) |
830 | { | 830 | { |
831 | if (enable_update_mptable && alloc_mptable) | 831 | if (enable_update_mptable && alloc_mptable) |
832 | mpc_new_phys = early_reserve_e820(mpc_new_length, 4); | 832 | mpc_new_phys = e820__memblock_alloc_reserved(mpc_new_length, 4); |
833 | } | 833 | } |
834 | 834 | ||
835 | static int __init update_mp_table(void) | 835 | static int __init update_mp_table(void) |
diff --git a/arch/x86/kernel/probe_roms.c b/arch/x86/kernel/probe_roms.c index d5f15c3f7b25..963e3fb56437 100644 --- a/arch/x86/kernel/probe_roms.c +++ b/arch/x86/kernel/probe_roms.c | |||
@@ -14,7 +14,7 @@ | |||
14 | 14 | ||
15 | #include <asm/probe_roms.h> | 15 | #include <asm/probe_roms.h> |
16 | #include <asm/pci-direct.h> | 16 | #include <asm/pci-direct.h> |
17 | #include <asm/e820.h> | 17 | #include <asm/e820/api.h> |
18 | #include <asm/mmzone.h> | 18 | #include <asm/mmzone.h> |
19 | #include <asm/setup.h> | 19 | #include <asm/setup.h> |
20 | #include <asm/sections.h> | 20 | #include <asm/sections.h> |
diff --git a/arch/x86/kernel/resource.c b/arch/x86/kernel/resource.c index 2408c1603438..5ab3895516ac 100644 --- a/arch/x86/kernel/resource.c +++ b/arch/x86/kernel/resource.c | |||
@@ -1,5 +1,5 @@ | |||
1 | #include <linux/ioport.h> | 1 | #include <linux/ioport.h> |
2 | #include <asm/e820.h> | 2 | #include <asm/e820/api.h> |
3 | 3 | ||
4 | static void resource_clip(struct resource *res, resource_size_t start, | 4 | static void resource_clip(struct resource *res, resource_size_t start, |
5 | resource_size_t end) | 5 | resource_size_t end) |
@@ -25,10 +25,10 @@ static void resource_clip(struct resource *res, resource_size_t start, | |||
25 | static void remove_e820_regions(struct resource *avail) | 25 | static void remove_e820_regions(struct resource *avail) |
26 | { | 26 | { |
27 | int i; | 27 | int i; |
28 | struct e820entry *entry; | 28 | struct e820_entry *entry; |
29 | 29 | ||
30 | for (i = 0; i < e820->nr_map; i++) { | 30 | for (i = 0; i < e820_table->nr_entries; i++) { |
31 | entry = &e820->map[i]; | 31 | entry = &e820_table->entries[i]; |
32 | 32 | ||
33 | resource_clip(avail, entry->addr, | 33 | resource_clip(avail, entry->addr, |
34 | entry->addr + entry->size - 1); | 34 | entry->addr + entry->size - 1); |
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index 4bf0c8926a1c..cab13f75908b 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c | |||
@@ -75,7 +75,7 @@ | |||
75 | #include <asm/mtrr.h> | 75 | #include <asm/mtrr.h> |
76 | #include <asm/apic.h> | 76 | #include <asm/apic.h> |
77 | #include <asm/realmode.h> | 77 | #include <asm/realmode.h> |
78 | #include <asm/e820.h> | 78 | #include <asm/e820/api.h> |
79 | #include <asm/mpspec.h> | 79 | #include <asm/mpspec.h> |
80 | #include <asm/setup.h> | 80 | #include <asm/setup.h> |
81 | #include <asm/efi.h> | 81 | #include <asm/efi.h> |
@@ -119,7 +119,7 @@ | |||
119 | * max_low_pfn_mapped: highest direct mapped pfn under 4GB | 119 | * max_low_pfn_mapped: highest direct mapped pfn under 4GB |
120 | * max_pfn_mapped: highest direct mapped pfn over 4GB | 120 | * max_pfn_mapped: highest direct mapped pfn over 4GB |
121 | * | 121 | * |
122 | * The direct mapping only covers E820_RAM regions, so the ranges and gaps are | 122 | * The direct mapping only covers E820_TYPE_RAM regions, so the ranges and gaps are |
123 | * represented by pfn_mapped | 123 | * represented by pfn_mapped |
124 | */ | 124 | */ |
125 | unsigned long max_low_pfn_mapped; | 125 | unsigned long max_low_pfn_mapped; |
@@ -426,7 +426,7 @@ static void __init parse_setup_data(void) | |||
426 | 426 | ||
427 | switch (data_type) { | 427 | switch (data_type) { |
428 | case SETUP_E820_EXT: | 428 | case SETUP_E820_EXT: |
429 | parse_e820_ext(pa_data, data_len); | 429 | e820__memory_setup_extended(pa_data, data_len); |
430 | break; | 430 | break; |
431 | case SETUP_DTB: | 431 | case SETUP_DTB: |
432 | add_dtb(pa_data); | 432 | add_dtb(pa_data); |
@@ -441,29 +441,6 @@ static void __init parse_setup_data(void) | |||
441 | } | 441 | } |
442 | } | 442 | } |
443 | 443 | ||
444 | static void __init e820_reserve_setup_data(void) | ||
445 | { | ||
446 | struct setup_data *data; | ||
447 | u64 pa_data; | ||
448 | |||
449 | pa_data = boot_params.hdr.setup_data; | ||
450 | if (!pa_data) | ||
451 | return; | ||
452 | |||
453 | while (pa_data) { | ||
454 | data = early_memremap(pa_data, sizeof(*data)); | ||
455 | e820_update_range(pa_data, sizeof(*data)+data->len, | ||
456 | E820_RAM, E820_RESERVED_KERN); | ||
457 | pa_data = data->next; | ||
458 | early_memunmap(data, sizeof(*data)); | ||
459 | } | ||
460 | |||
461 | sanitize_e820_map(e820->map, ARRAY_SIZE(e820->map), &e820->nr_map); | ||
462 | memcpy(e820_saved, e820, sizeof(struct e820map)); | ||
463 | printk(KERN_INFO "extended physical RAM map:\n"); | ||
464 | e820_print_map("reserve setup_data"); | ||
465 | } | ||
466 | |||
467 | static void __init memblock_x86_reserve_range_setup_data(void) | 444 | static void __init memblock_x86_reserve_range_setup_data(void) |
468 | { | 445 | { |
469 | struct setup_data *data; | 446 | struct setup_data *data; |
@@ -756,16 +733,16 @@ static void __init trim_bios_range(void) | |||
756 | * since some BIOSes are known to corrupt low memory. See the | 733 | * since some BIOSes are known to corrupt low memory. See the |
757 | * Kconfig help text for X86_RESERVE_LOW. | 734 | * Kconfig help text for X86_RESERVE_LOW. |
758 | */ | 735 | */ |
759 | e820_update_range(0, PAGE_SIZE, E820_RAM, E820_RESERVED); | 736 | e820__range_update(0, PAGE_SIZE, E820_TYPE_RAM, E820_TYPE_RESERVED); |
760 | 737 | ||
761 | /* | 738 | /* |
762 | * special case: Some BIOSen report the PC BIOS | 739 | * special case: Some BIOSen report the PC BIOS |
763 | * area (640->1Mb) as ram even though it is not. | 740 | * area (640->1Mb) as ram even though it is not. |
764 | * take them out. | 741 | * take them out. |
765 | */ | 742 | */ |
766 | e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1); | 743 | e820__range_remove(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_TYPE_RAM, 1); |
767 | 744 | ||
768 | sanitize_e820_map(e820->map, ARRAY_SIZE(e820->map), &e820->nr_map); | 745 | e820__update_table(e820_table); |
769 | } | 746 | } |
770 | 747 | ||
771 | /* called before trim_bios_range() to spare extra sanitize */ | 748 | /* called before trim_bios_range() to spare extra sanitize */ |
@@ -775,18 +752,18 @@ static void __init e820_add_kernel_range(void) | |||
775 | u64 size = __pa_symbol(_end) - start; | 752 | u64 size = __pa_symbol(_end) - start; |
776 | 753 | ||
777 | /* | 754 | /* |
778 | * Complain if .text .data and .bss are not marked as E820_RAM and | 755 | * Complain if .text .data and .bss are not marked as E820_TYPE_RAM and |
779 | * attempt to fix it by adding the range. We may have a confused BIOS, | 756 | * attempt to fix it by adding the range. We may have a confused BIOS, |
780 | * or the user may have used memmap=exactmap or memmap=xxM$yyM to | 757 | * or the user may have used memmap=exactmap or memmap=xxM$yyM to |
781 | * exclude kernel range. If we really are running on top non-RAM, | 758 | * exclude kernel range. If we really are running on top non-RAM, |
782 | * we will crash later anyways. | 759 | * we will crash later anyways. |
783 | */ | 760 | */ |
784 | if (e820_all_mapped(start, start + size, E820_RAM)) | 761 | if (e820__mapped_all(start, start + size, E820_TYPE_RAM)) |
785 | return; | 762 | return; |
786 | 763 | ||
787 | pr_warn(".text .data .bss are not marked as E820_RAM!\n"); | 764 | pr_warn(".text .data .bss are not marked as E820_TYPE_RAM!\n"); |
788 | e820_remove_range(start, size, E820_RAM, 0); | 765 | e820__range_remove(start, size, E820_TYPE_RAM, 0); |
789 | e820_add_region(start, size, E820_RAM); | 766 | e820__range_add(start, size, E820_TYPE_RAM); |
790 | } | 767 | } |
791 | 768 | ||
792 | static unsigned reserve_low = CONFIG_X86_RESERVE_LOW << 10; | 769 | static unsigned reserve_low = CONFIG_X86_RESERVE_LOW << 10; |
@@ -939,7 +916,7 @@ void __init setup_arch(char **cmdline_p) | |||
939 | x86_init.oem.arch_setup(); | 916 | x86_init.oem.arch_setup(); |
940 | 917 | ||
941 | iomem_resource.end = (1ULL << boot_cpu_data.x86_phys_bits) - 1; | 918 | iomem_resource.end = (1ULL << boot_cpu_data.x86_phys_bits) - 1; |
942 | setup_memory_map(); | 919 | e820__memory_setup(); |
943 | parse_setup_data(); | 920 | parse_setup_data(); |
944 | 921 | ||
945 | copy_edd(); | 922 | copy_edd(); |
@@ -1028,9 +1005,8 @@ void __init setup_arch(char **cmdline_p) | |||
1028 | early_dump_pci_devices(); | 1005 | early_dump_pci_devices(); |
1029 | #endif | 1006 | #endif |
1030 | 1007 | ||
1031 | /* update the e820_saved too */ | 1008 | e820__reserve_setup_data(); |
1032 | e820_reserve_setup_data(); | 1009 | e820__finish_early_params(); |
1033 | finish_e820_parsing(); | ||
1034 | 1010 | ||
1035 | if (efi_enabled(EFI_BOOT)) | 1011 | if (efi_enabled(EFI_BOOT)) |
1036 | efi_init(); | 1012 | efi_init(); |
@@ -1056,11 +1032,11 @@ void __init setup_arch(char **cmdline_p) | |||
1056 | trim_bios_range(); | 1032 | trim_bios_range(); |
1057 | #ifdef CONFIG_X86_32 | 1033 | #ifdef CONFIG_X86_32 |
1058 | if (ppro_with_ram_bug()) { | 1034 | if (ppro_with_ram_bug()) { |
1059 | e820_update_range(0x70000000ULL, 0x40000ULL, E820_RAM, | 1035 | e820__range_update(0x70000000ULL, 0x40000ULL, E820_TYPE_RAM, |
1060 | E820_RESERVED); | 1036 | E820_TYPE_RESERVED); |
1061 | sanitize_e820_map(e820->map, ARRAY_SIZE(e820->map), &e820->nr_map); | 1037 | e820__update_table(e820_table); |
1062 | printk(KERN_INFO "fixed physical RAM map:\n"); | 1038 | printk(KERN_INFO "fixed physical RAM map:\n"); |
1063 | e820_print_map("bad_ppro"); | 1039 | e820__print_table("bad_ppro"); |
1064 | } | 1040 | } |
1065 | #else | 1041 | #else |
1066 | early_gart_iommu_check(); | 1042 | early_gart_iommu_check(); |
@@ -1070,12 +1046,12 @@ void __init setup_arch(char **cmdline_p) | |||
1070 | * partially used pages are not usable - thus | 1046 | * partially used pages are not usable - thus |
1071 | * we are rounding upwards: | 1047 | * we are rounding upwards: |
1072 | */ | 1048 | */ |
1073 | max_pfn = e820_end_of_ram_pfn(); | 1049 | max_pfn = e820__end_of_ram_pfn(); |
1074 | 1050 | ||
1075 | /* update e820 for memory not covered by WB MTRRs */ | 1051 | /* update e820 for memory not covered by WB MTRRs */ |
1076 | mtrr_bp_init(); | 1052 | mtrr_bp_init(); |
1077 | if (mtrr_trim_uncached_memory(max_pfn)) | 1053 | if (mtrr_trim_uncached_memory(max_pfn)) |
1078 | max_pfn = e820_end_of_ram_pfn(); | 1054 | max_pfn = e820__end_of_ram_pfn(); |
1079 | 1055 | ||
1080 | max_possible_pfn = max_pfn; | 1056 | max_possible_pfn = max_pfn; |
1081 | 1057 | ||
@@ -1094,7 +1070,7 @@ void __init setup_arch(char **cmdline_p) | |||
1094 | /* How many end-of-memory variables you have, grandma! */ | 1070 | /* How many end-of-memory variables you have, grandma! */ |
1095 | /* need this before calling reserve_initrd */ | 1071 | /* need this before calling reserve_initrd */ |
1096 | if (max_pfn > (1UL<<(32 - PAGE_SHIFT))) | 1072 | if (max_pfn > (1UL<<(32 - PAGE_SHIFT))) |
1097 | max_low_pfn = e820_end_of_low_ram_pfn(); | 1073 | max_low_pfn = e820__end_of_low_ram_pfn(); |
1098 | else | 1074 | else |
1099 | max_low_pfn = max_pfn; | 1075 | max_low_pfn = max_pfn; |
1100 | 1076 | ||
@@ -1111,7 +1087,7 @@ void __init setup_arch(char **cmdline_p) | |||
1111 | early_alloc_pgt_buf(); | 1087 | early_alloc_pgt_buf(); |
1112 | 1088 | ||
1113 | /* | 1089 | /* |
1114 | * Need to conclude brk, before memblock_x86_fill() | 1090 | * Need to conclude brk, before e820__memblock_setup() |
1115 | * it could use memblock_find_in_range, could overlap with | 1091 | * it could use memblock_find_in_range, could overlap with |
1116 | * brk area. | 1092 | * brk area. |
1117 | */ | 1093 | */ |
@@ -1120,7 +1096,7 @@ void __init setup_arch(char **cmdline_p) | |||
1120 | cleanup_highmap(); | 1096 | cleanup_highmap(); |
1121 | 1097 | ||
1122 | memblock_set_current_limit(ISA_END_ADDRESS); | 1098 | memblock_set_current_limit(ISA_END_ADDRESS); |
1123 | memblock_x86_fill(); | 1099 | e820__memblock_setup(); |
1124 | 1100 | ||
1125 | reserve_bios_regions(); | 1101 | reserve_bios_regions(); |
1126 | 1102 | ||
@@ -1137,7 +1113,7 @@ void __init setup_arch(char **cmdline_p) | |||
1137 | } | 1113 | } |
1138 | 1114 | ||
1139 | /* preallocate 4k for mptable mpc */ | 1115 | /* preallocate 4k for mptable mpc */ |
1140 | early_reserve_e820_mpc_new(); | 1116 | e820__memblock_alloc_reserved_mpc_new(); |
1141 | 1117 | ||
1142 | #ifdef CONFIG_X86_CHECK_BIOS_CORRUPTION | 1118 | #ifdef CONFIG_X86_CHECK_BIOS_CORRUPTION |
1143 | setup_bios_corruption_check(); | 1119 | setup_bios_corruption_check(); |
@@ -1275,12 +1251,12 @@ void __init setup_arch(char **cmdline_p) | |||
1275 | 1251 | ||
1276 | kvm_guest_init(); | 1252 | kvm_guest_init(); |
1277 | 1253 | ||
1278 | e820_reserve_resources(); | 1254 | e820__reserve_resources(); |
1279 | e820_mark_nosave_regions(max_low_pfn); | 1255 | e820__register_nosave_regions(max_low_pfn); |
1280 | 1256 | ||
1281 | x86_init.resources.reserve_resources(); | 1257 | x86_init.resources.reserve_resources(); |
1282 | 1258 | ||
1283 | e820_setup_gap(); | 1259 | e820__setup_pci_gap(); |
1284 | 1260 | ||
1285 | #ifdef CONFIG_VT | 1261 | #ifdef CONFIG_VT |
1286 | #if defined(CONFIG_VGA_CONSOLE) | 1262 | #if defined(CONFIG_VGA_CONSOLE) |
diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c index 3cab8415389a..d798c0da451c 100644 --- a/arch/x86/kernel/smp.c +++ b/arch/x86/kernel/smp.c | |||
@@ -33,6 +33,7 @@ | |||
33 | #include <asm/mce.h> | 33 | #include <asm/mce.h> |
34 | #include <asm/trace/irq_vectors.h> | 34 | #include <asm/trace/irq_vectors.h> |
35 | #include <asm/kexec.h> | 35 | #include <asm/kexec.h> |
36 | #include <asm/virtext.h> | ||
36 | 37 | ||
37 | /* | 38 | /* |
38 | * Some notes on x86 processor bugs affecting SMP operation: | 39 | * Some notes on x86 processor bugs affecting SMP operation: |
@@ -162,6 +163,7 @@ static int smp_stop_nmi_callback(unsigned int val, struct pt_regs *regs) | |||
162 | if (raw_smp_processor_id() == atomic_read(&stopping_cpu)) | 163 | if (raw_smp_processor_id() == atomic_read(&stopping_cpu)) |
163 | return NMI_HANDLED; | 164 | return NMI_HANDLED; |
164 | 165 | ||
166 | cpu_emergency_vmxoff(); | ||
165 | stop_this_cpu(NULL); | 167 | stop_this_cpu(NULL); |
166 | 168 | ||
167 | return NMI_HANDLED; | 169 | return NMI_HANDLED; |
@@ -174,6 +176,7 @@ static int smp_stop_nmi_callback(unsigned int val, struct pt_regs *regs) | |||
174 | asmlinkage __visible void smp_reboot_interrupt(void) | 176 | asmlinkage __visible void smp_reboot_interrupt(void) |
175 | { | 177 | { |
176 | ipi_entering_ack_irq(); | 178 | ipi_entering_ack_irq(); |
179 | cpu_emergency_vmxoff(); | ||
177 | stop_this_cpu(NULL); | 180 | stop_this_cpu(NULL); |
178 | irq_exit(); | 181 | irq_exit(); |
179 | } | 182 | } |
diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c index b868fa1b812b..ccccd335ae01 100644 --- a/arch/x86/kernel/tboot.c +++ b/arch/x86/kernel/tboot.c | |||
@@ -42,7 +42,7 @@ | |||
42 | #include <asm/fixmap.h> | 42 | #include <asm/fixmap.h> |
43 | #include <asm/proto.h> | 43 | #include <asm/proto.h> |
44 | #include <asm/setup.h> | 44 | #include <asm/setup.h> |
45 | #include <asm/e820.h> | 45 | #include <asm/e820/api.h> |
46 | #include <asm/io.h> | 46 | #include <asm/io.h> |
47 | 47 | ||
48 | #include "../realmode/rm/wakeup.h" | 48 | #include "../realmode/rm/wakeup.h" |
@@ -68,9 +68,9 @@ void __init tboot_probe(void) | |||
68 | * also verify that it is mapped as we expect it before calling | 68 | * also verify that it is mapped as we expect it before calling |
69 | * set_fixmap(), to reduce chance of garbage value causing crash | 69 | * set_fixmap(), to reduce chance of garbage value causing crash |
70 | */ | 70 | */ |
71 | if (!e820_any_mapped(boot_params.tboot_addr, | 71 | if (!e820__mapped_any(boot_params.tboot_addr, |
72 | boot_params.tboot_addr, E820_RESERVED)) { | 72 | boot_params.tboot_addr, E820_TYPE_RESERVED)) { |
73 | pr_warning("non-0 tboot_addr but it is not of type E820_RESERVED\n"); | 73 | pr_warning("non-0 tboot_addr but it is not of type E820_TYPE_RESERVED\n"); |
74 | return; | 74 | return; |
75 | } | 75 | } |
76 | 76 | ||
@@ -188,12 +188,12 @@ static int tboot_setup_sleep(void) | |||
188 | 188 | ||
189 | tboot->num_mac_regions = 0; | 189 | tboot->num_mac_regions = 0; |
190 | 190 | ||
191 | for (i = 0; i < e820->nr_map; i++) { | 191 | for (i = 0; i < e820_table->nr_entries; i++) { |
192 | if ((e820->map[i].type != E820_RAM) | 192 | if ((e820_table->entries[i].type != E820_TYPE_RAM) |
193 | && (e820->map[i].type != E820_RESERVED_KERN)) | 193 | && (e820_table->entries[i].type != E820_TYPE_RESERVED_KERN)) |
194 | continue; | 194 | continue; |
195 | 195 | ||
196 | add_mac_region(e820->map[i].addr, e820->map[i].size); | 196 | add_mac_region(e820_table->entries[i].addr, e820_table->entries[i].size); |
197 | } | 197 | } |
198 | 198 | ||
199 | tboot->acpi_sinfo.kernel_s3_resume_vector = | 199 | tboot->acpi_sinfo.kernel_s3_resume_vector = |
diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c index 11a93f005268..a088b2c47f73 100644 --- a/arch/x86/kernel/x86_init.c +++ b/arch/x86/kernel/x86_init.c | |||
@@ -14,7 +14,7 @@ | |||
14 | #include <asm/mpspec.h> | 14 | #include <asm/mpspec.h> |
15 | #include <asm/setup.h> | 15 | #include <asm/setup.h> |
16 | #include <asm/apic.h> | 16 | #include <asm/apic.h> |
17 | #include <asm/e820.h> | 17 | #include <asm/e820/api.h> |
18 | #include <asm/time.h> | 18 | #include <asm/time.h> |
19 | #include <asm/irq.h> | 19 | #include <asm/irq.h> |
20 | #include <asm/io_apic.h> | 20 | #include <asm/io_apic.h> |
@@ -38,7 +38,7 @@ struct x86_init_ops x86_init __initdata = { | |||
38 | .resources = { | 38 | .resources = { |
39 | .probe_roms = probe_roms, | 39 | .probe_roms = probe_roms, |
40 | .reserve_resources = reserve_standard_io_resources, | 40 | .reserve_resources = reserve_standard_io_resources, |
41 | .memory_setup = default_machine_specific_memory_setup, | 41 | .memory_setup = e820__memory_setup_default, |
42 | }, | 42 | }, |
43 | 43 | ||
44 | .mpparse = { | 44 | .mpparse = { |
diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c index 3e4bf887a246..99472698c931 100644 --- a/arch/x86/lguest/boot.c +++ b/arch/x86/lguest/boot.c | |||
@@ -67,7 +67,7 @@ | |||
67 | #include <asm/pgtable.h> | 67 | #include <asm/pgtable.h> |
68 | #include <asm/desc.h> | 68 | #include <asm/desc.h> |
69 | #include <asm/setup.h> | 69 | #include <asm/setup.h> |
70 | #include <asm/e820.h> | 70 | #include <asm/e820/api.h> |
71 | #include <asm/mce.h> | 71 | #include <asm/mce.h> |
72 | #include <asm/io.h> | 72 | #include <asm/io.h> |
73 | #include <asm/fpu/api.h> | 73 | #include <asm/fpu/api.h> |
@@ -1180,9 +1180,9 @@ static __init char *lguest_memory_setup(void) | |||
1180 | * The Linux bootloader header contains an "e820" memory map: the | 1180 | * The Linux bootloader header contains an "e820" memory map: the |
1181 | * Launcher populated the first entry with our memory limit. | 1181 | * Launcher populated the first entry with our memory limit. |
1182 | */ | 1182 | */ |
1183 | e820_add_region(boot_params.e820_map[0].addr, | 1183 | e820__range_add(boot_params.e820_table[0].addr, |
1184 | boot_params.e820_map[0].size, | 1184 | boot_params.e820_table[0].size, |
1185 | boot_params.e820_map[0].type); | 1185 | boot_params.e820_table[0].type); |
1186 | 1186 | ||
1187 | /* This string is for the boot messages. */ | 1187 | /* This string is for the boot messages. */ |
1188 | return "LGUEST"; | 1188 | return "LGUEST"; |
diff --git a/arch/x86/lib/kaslr.c b/arch/x86/lib/kaslr.c index 121f59c6ee54..5761a4f19455 100644 --- a/arch/x86/lib/kaslr.c +++ b/arch/x86/lib/kaslr.c | |||
@@ -8,7 +8,7 @@ | |||
8 | #include <asm/kaslr.h> | 8 | #include <asm/kaslr.h> |
9 | #include <asm/msr.h> | 9 | #include <asm/msr.h> |
10 | #include <asm/archrandom.h> | 10 | #include <asm/archrandom.h> |
11 | #include <asm/e820.h> | 11 | #include <asm/e820/api.h> |
12 | #include <asm/io.h> | 12 | #include <asm/io.h> |
13 | 13 | ||
14 | /* | 14 | /* |
diff --git a/arch/x86/mm/amdtopology.c b/arch/x86/mm/amdtopology.c index d1c7de095808..91f501b2da3b 100644 --- a/arch/x86/mm/amdtopology.c +++ b/arch/x86/mm/amdtopology.c | |||
@@ -19,7 +19,7 @@ | |||
19 | #include <asm/types.h> | 19 | #include <asm/types.h> |
20 | #include <asm/mmzone.h> | 20 | #include <asm/mmzone.h> |
21 | #include <asm/proto.h> | 21 | #include <asm/proto.h> |
22 | #include <asm/e820.h> | 22 | #include <asm/e820/api.h> |
23 | #include <asm/pci-direct.h> | 23 | #include <asm/pci-direct.h> |
24 | #include <asm/numa.h> | 24 | #include <asm/numa.h> |
25 | #include <asm/mpspec.h> | 25 | #include <asm/mpspec.h> |
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c index 889e7619a091..138bad2fb6bc 100644 --- a/arch/x86/mm/init.c +++ b/arch/x86/mm/init.c | |||
@@ -6,7 +6,7 @@ | |||
6 | #include <linux/bootmem.h> /* for max_low_pfn */ | 6 | #include <linux/bootmem.h> /* for max_low_pfn */ |
7 | 7 | ||
8 | #include <asm/cacheflush.h> | 8 | #include <asm/cacheflush.h> |
9 | #include <asm/e820.h> | 9 | #include <asm/e820/api.h> |
10 | #include <asm/init.h> | 10 | #include <asm/init.h> |
11 | #include <asm/page.h> | 11 | #include <asm/page.h> |
12 | #include <asm/page_types.h> | 12 | #include <asm/page_types.h> |
@@ -373,14 +373,14 @@ static int __meminit split_mem_range(struct map_range *mr, int nr_range, | |||
373 | return nr_range; | 373 | return nr_range; |
374 | } | 374 | } |
375 | 375 | ||
376 | struct range pfn_mapped[E820_X_MAX]; | 376 | struct range pfn_mapped[E820_MAX_ENTRIES]; |
377 | int nr_pfn_mapped; | 377 | int nr_pfn_mapped; |
378 | 378 | ||
379 | static void add_pfn_range_mapped(unsigned long start_pfn, unsigned long end_pfn) | 379 | static void add_pfn_range_mapped(unsigned long start_pfn, unsigned long end_pfn) |
380 | { | 380 | { |
381 | nr_pfn_mapped = add_range_with_merge(pfn_mapped, E820_X_MAX, | 381 | nr_pfn_mapped = add_range_with_merge(pfn_mapped, E820_MAX_ENTRIES, |
382 | nr_pfn_mapped, start_pfn, end_pfn); | 382 | nr_pfn_mapped, start_pfn, end_pfn); |
383 | nr_pfn_mapped = clean_sort_range(pfn_mapped, E820_X_MAX); | 383 | nr_pfn_mapped = clean_sort_range(pfn_mapped, E820_MAX_ENTRIES); |
384 | 384 | ||
385 | max_pfn_mapped = max(max_pfn_mapped, end_pfn); | 385 | max_pfn_mapped = max(max_pfn_mapped, end_pfn); |
386 | 386 | ||
@@ -430,7 +430,7 @@ unsigned long __ref init_memory_mapping(unsigned long start, | |||
430 | 430 | ||
431 | /* | 431 | /* |
432 | * We need to iterate through the E820 memory map and create direct mappings | 432 | * We need to iterate through the E820 memory map and create direct mappings |
433 | * for only E820_RAM and E820_KERN_RESERVED regions. We cannot simply | 433 | * for only E820_TYPE_RAM and E820_KERN_RESERVED regions. We cannot simply |
434 | * create direct mappings for all pfns from [0 to max_low_pfn) and | 434 | * create direct mappings for all pfns from [0 to max_low_pfn) and |
435 | * [4GB to max_pfn) because of possible memory holes in high addresses | 435 | * [4GB to max_pfn) because of possible memory holes in high addresses |
436 | * that cannot be marked as UC by fixed/variable range MTRRs. | 436 | * that cannot be marked as UC by fixed/variable range MTRRs. |
@@ -720,7 +720,7 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end) | |||
720 | 720 | ||
721 | void __ref free_initmem(void) | 721 | void __ref free_initmem(void) |
722 | { | 722 | { |
723 | e820_reallocate_tables(); | 723 | e820__reallocate_tables(); |
724 | 724 | ||
725 | free_init_pages("unused kernel", | 725 | free_init_pages("unused kernel", |
726 | (unsigned long)(&__init_begin), | 726 | (unsigned long)(&__init_begin), |
@@ -743,6 +743,53 @@ void __init free_initrd_mem(unsigned long start, unsigned long end) | |||
743 | } | 743 | } |
744 | #endif | 744 | #endif |
745 | 745 | ||
746 | /* | ||
747 | * Calculate the precise size of the DMA zone (first 16 MB of RAM), | ||
748 | * and pass it to the MM layer - to help it set zone watermarks more | ||
749 | * accurately. | ||
750 | * | ||
751 | * Done on 64-bit systems only for the time being, although 32-bit systems | ||
752 | * might benefit from this as well. | ||
753 | */ | ||
754 | void __init memblock_find_dma_reserve(void) | ||
755 | { | ||
756 | #ifdef CONFIG_X86_64 | ||
757 | u64 nr_pages = 0, nr_free_pages = 0; | ||
758 | unsigned long start_pfn, end_pfn; | ||
759 | phys_addr_t start_addr, end_addr; | ||
760 | int i; | ||
761 | u64 u; | ||
762 | |||
763 | /* | ||
764 | * Iterate over all memory ranges (free and reserved ones alike), | ||
765 | * to calculate the total number of pages in the first 16 MB of RAM: | ||
766 | */ | ||
767 | nr_pages = 0; | ||
768 | for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, NULL) { | ||
769 | start_pfn = min(start_pfn, MAX_DMA_PFN); | ||
770 | end_pfn = min(end_pfn, MAX_DMA_PFN); | ||
771 | |||
772 | nr_pages += end_pfn - start_pfn; | ||
773 | } | ||
774 | |||
775 | /* | ||
776 | * Iterate over free memory ranges to calculate the number of free | ||
777 | * pages in the DMA zone, while not counting potential partial | ||
778 | * pages at the beginning or the end of the range: | ||
779 | */ | ||
780 | nr_free_pages = 0; | ||
781 | for_each_free_mem_range(u, NUMA_NO_NODE, MEMBLOCK_NONE, &start_addr, &end_addr, NULL) { | ||
782 | start_pfn = min_t(unsigned long, PFN_UP(start_addr), MAX_DMA_PFN); | ||
783 | end_pfn = min_t(unsigned long, PFN_DOWN(end_addr), MAX_DMA_PFN); | ||
784 | |||
785 | if (start_pfn < end_pfn) | ||
786 | nr_free_pages += end_pfn - start_pfn; | ||
787 | } | ||
788 | |||
789 | set_dma_reserve(nr_pages - nr_free_pages); | ||
790 | #endif | ||
791 | } | ||
792 | |||
746 | void __init zone_sizes_init(void) | 793 | void __init zone_sizes_init(void) |
747 | { | 794 | { |
748 | unsigned long max_zone_pfns[MAX_NR_ZONES]; | 795 | unsigned long max_zone_pfns[MAX_NR_ZONES]; |
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index 2b4b53e6793f..1fa97c941abe 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c | |||
@@ -38,7 +38,7 @@ | |||
38 | #include <asm/pgtable.h> | 38 | #include <asm/pgtable.h> |
39 | #include <asm/dma.h> | 39 | #include <asm/dma.h> |
40 | #include <asm/fixmap.h> | 40 | #include <asm/fixmap.h> |
41 | #include <asm/e820.h> | 41 | #include <asm/e820/api.h> |
42 | #include <asm/apic.h> | 42 | #include <asm/apic.h> |
43 | #include <asm/bugs.h> | 43 | #include <asm/bugs.h> |
44 | #include <asm/tlb.h> | 44 | #include <asm/tlb.h> |
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index 15173d37f399..f6da869810a8 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c | |||
@@ -41,7 +41,7 @@ | |||
41 | #include <asm/pgalloc.h> | 41 | #include <asm/pgalloc.h> |
42 | #include <asm/dma.h> | 42 | #include <asm/dma.h> |
43 | #include <asm/fixmap.h> | 43 | #include <asm/fixmap.h> |
44 | #include <asm/e820.h> | 44 | #include <asm/e820/api.h> |
45 | #include <asm/apic.h> | 45 | #include <asm/apic.h> |
46 | #include <asm/tlb.h> | 46 | #include <asm/tlb.h> |
47 | #include <asm/mmu_context.h> | 47 | #include <asm/mmu_context.h> |
@@ -337,10 +337,10 @@ phys_pte_init(pte_t *pte_page, unsigned long paddr, unsigned long paddr_end, | |||
337 | paddr_next = (paddr & PAGE_MASK) + PAGE_SIZE; | 337 | paddr_next = (paddr & PAGE_MASK) + PAGE_SIZE; |
338 | if (paddr >= paddr_end) { | 338 | if (paddr >= paddr_end) { |
339 | if (!after_bootmem && | 339 | if (!after_bootmem && |
340 | !e820_any_mapped(paddr & PAGE_MASK, paddr_next, | 340 | !e820__mapped_any(paddr & PAGE_MASK, paddr_next, |
341 | E820_RAM) && | 341 | E820_TYPE_RAM) && |
342 | !e820_any_mapped(paddr & PAGE_MASK, paddr_next, | 342 | !e820__mapped_any(paddr & PAGE_MASK, paddr_next, |
343 | E820_RESERVED_KERN)) | 343 | E820_TYPE_RESERVED_KERN)) |
344 | set_pte(pte, __pte(0)); | 344 | set_pte(pte, __pte(0)); |
345 | continue; | 345 | continue; |
346 | } | 346 | } |
@@ -392,10 +392,10 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long paddr, unsigned long paddr_end, | |||
392 | paddr_next = (paddr & PMD_MASK) + PMD_SIZE; | 392 | paddr_next = (paddr & PMD_MASK) + PMD_SIZE; |
393 | if (paddr >= paddr_end) { | 393 | if (paddr >= paddr_end) { |
394 | if (!after_bootmem && | 394 | if (!after_bootmem && |
395 | !e820_any_mapped(paddr & PMD_MASK, paddr_next, | 395 | !e820__mapped_any(paddr & PMD_MASK, paddr_next, |
396 | E820_RAM) && | 396 | E820_TYPE_RAM) && |
397 | !e820_any_mapped(paddr & PMD_MASK, paddr_next, | 397 | !e820__mapped_any(paddr & PMD_MASK, paddr_next, |
398 | E820_RESERVED_KERN)) | 398 | E820_TYPE_RESERVED_KERN)) |
399 | set_pmd(pmd, __pmd(0)); | 399 | set_pmd(pmd, __pmd(0)); |
400 | continue; | 400 | continue; |
401 | } | 401 | } |
@@ -478,10 +478,10 @@ phys_pud_init(pud_t *pud_page, unsigned long paddr, unsigned long paddr_end, | |||
478 | 478 | ||
479 | if (paddr >= paddr_end) { | 479 | if (paddr >= paddr_end) { |
480 | if (!after_bootmem && | 480 | if (!after_bootmem && |
481 | !e820_any_mapped(paddr & PUD_MASK, paddr_next, | 481 | !e820__mapped_any(paddr & PUD_MASK, paddr_next, |
482 | E820_RAM) && | 482 | E820_TYPE_RAM) && |
483 | !e820_any_mapped(paddr & PUD_MASK, paddr_next, | 483 | !e820__mapped_any(paddr & PUD_MASK, paddr_next, |
484 | E820_RESERVED_KERN)) | 484 | E820_TYPE_RESERVED_KERN)) |
485 | set_pud(pud, __pud(0)); | 485 | set_pud(pud, __pud(0)); |
486 | continue; | 486 | continue; |
487 | } | 487 | } |
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c index 7aaa2635862d..c43b6b33463a 100644 --- a/arch/x86/mm/ioremap.c +++ b/arch/x86/mm/ioremap.c | |||
@@ -9,12 +9,13 @@ | |||
9 | #include <linux/bootmem.h> | 9 | #include <linux/bootmem.h> |
10 | #include <linux/init.h> | 10 | #include <linux/init.h> |
11 | #include <linux/io.h> | 11 | #include <linux/io.h> |
12 | #include <linux/ioport.h> | ||
12 | #include <linux/slab.h> | 13 | #include <linux/slab.h> |
13 | #include <linux/vmalloc.h> | 14 | #include <linux/vmalloc.h> |
14 | #include <linux/mmiotrace.h> | 15 | #include <linux/mmiotrace.h> |
15 | 16 | ||
16 | #include <asm/cacheflush.h> | 17 | #include <asm/cacheflush.h> |
17 | #include <asm/e820.h> | 18 | #include <asm/e820/api.h> |
18 | #include <asm/fixmap.h> | 19 | #include <asm/fixmap.h> |
19 | #include <asm/pgtable.h> | 20 | #include <asm/pgtable.h> |
20 | #include <asm/tlbflush.h> | 21 | #include <asm/tlbflush.h> |
diff --git a/arch/x86/mm/kasan_init_64.c b/arch/x86/mm/kasan_init_64.c index 4c90cfdc128b..da92df32d0f1 100644 --- a/arch/x86/mm/kasan_init_64.c +++ b/arch/x86/mm/kasan_init_64.c | |||
@@ -8,11 +8,12 @@ | |||
8 | #include <linux/sched/task.h> | 8 | #include <linux/sched/task.h> |
9 | #include <linux/vmalloc.h> | 9 | #include <linux/vmalloc.h> |
10 | 10 | ||
11 | #include <asm/e820/types.h> | ||
11 | #include <asm/tlbflush.h> | 12 | #include <asm/tlbflush.h> |
12 | #include <asm/sections.h> | 13 | #include <asm/sections.h> |
13 | 14 | ||
14 | extern pgd_t early_level4_pgt[PTRS_PER_PGD]; | 15 | extern pgd_t early_level4_pgt[PTRS_PER_PGD]; |
15 | extern struct range pfn_mapped[E820_X_MAX]; | 16 | extern struct range pfn_mapped[E820_MAX_ENTRIES]; |
16 | 17 | ||
17 | static int __init map_range(struct range *range) | 18 | static int __init map_range(struct range *range) |
18 | { | 19 | { |
@@ -104,7 +105,7 @@ void __init kasan_init(void) | |||
104 | kasan_populate_zero_shadow((void *)KASAN_SHADOW_START, | 105 | kasan_populate_zero_shadow((void *)KASAN_SHADOW_START, |
105 | kasan_mem_to_shadow((void *)PAGE_OFFSET)); | 106 | kasan_mem_to_shadow((void *)PAGE_OFFSET)); |
106 | 107 | ||
107 | for (i = 0; i < E820_X_MAX; i++) { | 108 | for (i = 0; i < E820_MAX_ENTRIES; i++) { |
108 | if (pfn_mapped[i].end == 0) | 109 | if (pfn_mapped[i].end == 0) |
109 | break; | 110 | break; |
110 | 111 | ||
diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c index bef36622e408..4d434ddb75db 100644 --- a/arch/x86/mm/mmio-mod.c +++ b/arch/x86/mm/mmio-mod.c | |||
@@ -32,7 +32,7 @@ | |||
32 | #include <linux/kallsyms.h> | 32 | #include <linux/kallsyms.h> |
33 | #include <asm/pgtable.h> | 33 | #include <asm/pgtable.h> |
34 | #include <linux/mmiotrace.h> | 34 | #include <linux/mmiotrace.h> |
35 | #include <asm/e820.h> /* for ISA_START_ADDRESS */ | 35 | #include <asm/e820/api.h> /* for ISA_START_ADDRESS */ |
36 | #include <linux/atomic.h> | 36 | #include <linux/atomic.h> |
37 | #include <linux/percpu.h> | 37 | #include <linux/percpu.h> |
38 | #include <linux/cpu.h> | 38 | #include <linux/cpu.h> |
diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c index 12dcad7297a5..f9d99535f233 100644 --- a/arch/x86/mm/numa.c +++ b/arch/x86/mm/numa.c | |||
@@ -12,7 +12,7 @@ | |||
12 | #include <linux/sched.h> | 12 | #include <linux/sched.h> |
13 | #include <linux/topology.h> | 13 | #include <linux/topology.h> |
14 | 14 | ||
15 | #include <asm/e820.h> | 15 | #include <asm/e820/api.h> |
16 | #include <asm/proto.h> | 16 | #include <asm/proto.h> |
17 | #include <asm/dma.h> | 17 | #include <asm/dma.h> |
18 | #include <asm/amd_nb.h> | 18 | #include <asm/amd_nb.h> |
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index 28d42130243c..a57e8e02f457 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c | |||
@@ -15,7 +15,7 @@ | |||
15 | #include <linux/pci.h> | 15 | #include <linux/pci.h> |
16 | #include <linux/vmalloc.h> | 16 | #include <linux/vmalloc.h> |
17 | 17 | ||
18 | #include <asm/e820.h> | 18 | #include <asm/e820/api.h> |
19 | #include <asm/processor.h> | 19 | #include <asm/processor.h> |
20 | #include <asm/tlbflush.h> | 20 | #include <asm/tlbflush.h> |
21 | #include <asm/sections.h> | 21 | #include <asm/sections.h> |
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c index efc32bc6862b..9b78685b66e6 100644 --- a/arch/x86/mm/pat.c +++ b/arch/x86/mm/pat.c | |||
@@ -10,6 +10,7 @@ | |||
10 | #include <linux/seq_file.h> | 10 | #include <linux/seq_file.h> |
11 | #include <linux/bootmem.h> | 11 | #include <linux/bootmem.h> |
12 | #include <linux/debugfs.h> | 12 | #include <linux/debugfs.h> |
13 | #include <linux/ioport.h> | ||
13 | #include <linux/kernel.h> | 14 | #include <linux/kernel.h> |
14 | #include <linux/pfn_t.h> | 15 | #include <linux/pfn_t.h> |
15 | #include <linux/slab.h> | 16 | #include <linux/slab.h> |
@@ -23,7 +24,7 @@ | |||
23 | #include <asm/x86_init.h> | 24 | #include <asm/x86_init.h> |
24 | #include <asm/pgtable.h> | 25 | #include <asm/pgtable.h> |
25 | #include <asm/fcntl.h> | 26 | #include <asm/fcntl.h> |
26 | #include <asm/e820.h> | 27 | #include <asm/e820/api.h> |
27 | #include <asm/mtrr.h> | 28 | #include <asm/mtrr.h> |
28 | #include <asm/page.h> | 29 | #include <asm/page.h> |
29 | #include <asm/msr.h> | 30 | #include <asm/msr.h> |
diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c index 9adce776852b..de53c52551a5 100644 --- a/arch/x86/mm/pgtable_32.c +++ b/arch/x86/mm/pgtable_32.c | |||
@@ -12,7 +12,7 @@ | |||
12 | #include <asm/pgtable.h> | 12 | #include <asm/pgtable.h> |
13 | #include <asm/pgalloc.h> | 13 | #include <asm/pgalloc.h> |
14 | #include <asm/fixmap.h> | 14 | #include <asm/fixmap.h> |
15 | #include <asm/e820.h> | 15 | #include <asm/e820/api.h> |
16 | #include <asm/tlb.h> | 16 | #include <asm/tlb.h> |
17 | #include <asm/tlbflush.h> | 17 | #include <asm/tlbflush.h> |
18 | #include <asm/io.h> | 18 | #include <asm/io.h> |
diff --git a/arch/x86/mm/srat.c b/arch/x86/mm/srat.c index 35fe69529bc1..3ea20d61b523 100644 --- a/arch/x86/mm/srat.c +++ b/arch/x86/mm/srat.c | |||
@@ -18,7 +18,7 @@ | |||
18 | #include <linux/mm.h> | 18 | #include <linux/mm.h> |
19 | #include <asm/proto.h> | 19 | #include <asm/proto.h> |
20 | #include <asm/numa.h> | 20 | #include <asm/numa.h> |
21 | #include <asm/e820.h> | 21 | #include <asm/e820/api.h> |
22 | #include <asm/apic.h> | 22 | #include <asm/apic.h> |
23 | #include <asm/uv/uv.h> | 23 | #include <asm/uv/uv.h> |
24 | 24 | ||
diff --git a/arch/x86/pci/i386.c b/arch/x86/pci/i386.c index 0a9f2caf358f..6fa84d531f4f 100644 --- a/arch/x86/pci/i386.c +++ b/arch/x86/pci/i386.c | |||
@@ -34,7 +34,7 @@ | |||
34 | #include <linux/bootmem.h> | 34 | #include <linux/bootmem.h> |
35 | 35 | ||
36 | #include <asm/pat.h> | 36 | #include <asm/pat.h> |
37 | #include <asm/e820.h> | 37 | #include <asm/e820/api.h> |
38 | #include <asm/pci_x86.h> | 38 | #include <asm/pci_x86.h> |
39 | #include <asm/io_apic.h> | 39 | #include <asm/io_apic.h> |
40 | 40 | ||
@@ -398,7 +398,7 @@ void __init pcibios_resource_survey(void) | |||
398 | list_for_each_entry(bus, &pci_root_buses, node) | 398 | list_for_each_entry(bus, &pci_root_buses, node) |
399 | pcibios_allocate_resources(bus, 1); | 399 | pcibios_allocate_resources(bus, 1); |
400 | 400 | ||
401 | e820_reserve_resources_late(); | 401 | e820__reserve_resources_late(); |
402 | /* | 402 | /* |
403 | * Insert the IO APIC resources after PCI initialization has | 403 | * Insert the IO APIC resources after PCI initialization has |
404 | * occurred to handle IO APICS that are mapped in on a BAR in | 404 | * occurred to handle IO APICS that are mapped in on a BAR in |
diff --git a/arch/x86/pci/mmconfig-shared.c b/arch/x86/pci/mmconfig-shared.c index dd30b7e08bc2..d1b47d5bc9c3 100644 --- a/arch/x86/pci/mmconfig-shared.c +++ b/arch/x86/pci/mmconfig-shared.c | |||
@@ -18,7 +18,7 @@ | |||
18 | #include <linux/slab.h> | 18 | #include <linux/slab.h> |
19 | #include <linux/mutex.h> | 19 | #include <linux/mutex.h> |
20 | #include <linux/rculist.h> | 20 | #include <linux/rculist.h> |
21 | #include <asm/e820.h> | 21 | #include <asm/e820/api.h> |
22 | #include <asm/pci_x86.h> | 22 | #include <asm/pci_x86.h> |
23 | #include <asm/acpi.h> | 23 | #include <asm/acpi.h> |
24 | 24 | ||
@@ -423,7 +423,7 @@ static acpi_status find_mboard_resource(acpi_handle handle, u32 lvl, | |||
423 | return AE_OK; | 423 | return AE_OK; |
424 | } | 424 | } |
425 | 425 | ||
426 | static int is_acpi_reserved(u64 start, u64 end, unsigned not_used) | 426 | static bool is_acpi_reserved(u64 start, u64 end, unsigned not_used) |
427 | { | 427 | { |
428 | struct resource mcfg_res; | 428 | struct resource mcfg_res; |
429 | 429 | ||
@@ -440,11 +440,11 @@ static int is_acpi_reserved(u64 start, u64 end, unsigned not_used) | |||
440 | return mcfg_res.flags; | 440 | return mcfg_res.flags; |
441 | } | 441 | } |
442 | 442 | ||
443 | typedef int (*check_reserved_t)(u64 start, u64 end, unsigned type); | 443 | typedef bool (*check_reserved_t)(u64 start, u64 end, unsigned type); |
444 | 444 | ||
445 | static int __ref is_mmconf_reserved(check_reserved_t is_reserved, | 445 | static bool __ref is_mmconf_reserved(check_reserved_t is_reserved, |
446 | struct pci_mmcfg_region *cfg, | 446 | struct pci_mmcfg_region *cfg, |
447 | struct device *dev, int with_e820) | 447 | struct device *dev, int with_e820) |
448 | { | 448 | { |
449 | u64 addr = cfg->res.start; | 449 | u64 addr = cfg->res.start; |
450 | u64 size = resource_size(&cfg->res); | 450 | u64 size = resource_size(&cfg->res); |
@@ -452,7 +452,7 @@ static int __ref is_mmconf_reserved(check_reserved_t is_reserved, | |||
452 | int num_buses; | 452 | int num_buses; |
453 | char *method = with_e820 ? "E820" : "ACPI motherboard resources"; | 453 | char *method = with_e820 ? "E820" : "ACPI motherboard resources"; |
454 | 454 | ||
455 | while (!is_reserved(addr, addr + size, E820_RESERVED)) { | 455 | while (!is_reserved(addr, addr + size, E820_TYPE_RESERVED)) { |
456 | size >>= 1; | 456 | size >>= 1; |
457 | if (size < (16UL<<20)) | 457 | if (size < (16UL<<20)) |
458 | break; | 458 | break; |
@@ -494,8 +494,8 @@ static int __ref is_mmconf_reserved(check_reserved_t is_reserved, | |||
494 | return 1; | 494 | return 1; |
495 | } | 495 | } |
496 | 496 | ||
497 | static int __ref pci_mmcfg_check_reserved(struct device *dev, | 497 | static bool __ref |
498 | struct pci_mmcfg_region *cfg, int early) | 498 | pci_mmcfg_check_reserved(struct device *dev, struct pci_mmcfg_region *cfg, int early) |
499 | { | 499 | { |
500 | if (!early && !acpi_disabled) { | 500 | if (!early && !acpi_disabled) { |
501 | if (is_mmconf_reserved(is_acpi_reserved, cfg, dev, 0)) | 501 | if (is_mmconf_reserved(is_acpi_reserved, cfg, dev, 0)) |
@@ -514,7 +514,7 @@ static int __ref pci_mmcfg_check_reserved(struct device *dev, | |||
514 | } | 514 | } |
515 | 515 | ||
516 | /* | 516 | /* |
517 | * e820_all_mapped() is marked as __init. | 517 | * e820__mapped_all() is marked as __init. |
518 | * All entries from ACPI MCFG table have been checked at boot time. | 518 | * All entries from ACPI MCFG table have been checked at boot time. |
519 | * For MCFG information constructed from hotpluggable host bridge's | 519 | * For MCFG information constructed from hotpluggable host bridge's |
520 | * _CBA method, just assume it's reserved. | 520 | * _CBA method, just assume it's reserved. |
@@ -525,7 +525,7 @@ static int __ref pci_mmcfg_check_reserved(struct device *dev, | |||
525 | /* Don't try to do this check unless configuration | 525 | /* Don't try to do this check unless configuration |
526 | type 1 is available. how about type 2 ?*/ | 526 | type 1 is available. how about type 2 ?*/ |
527 | if (raw_pci_ops) | 527 | if (raw_pci_ops) |
528 | return is_mmconf_reserved(e820_all_mapped, cfg, dev, 1); | 528 | return is_mmconf_reserved(e820__mapped_all, cfg, dev, 1); |
529 | 529 | ||
530 | return 0; | 530 | return 0; |
531 | } | 531 | } |
diff --git a/arch/x86/pci/mmconfig_32.c b/arch/x86/pci/mmconfig_32.c index 43984bc1665a..3e9e166f6408 100644 --- a/arch/x86/pci/mmconfig_32.c +++ b/arch/x86/pci/mmconfig_32.c | |||
@@ -12,7 +12,7 @@ | |||
12 | #include <linux/pci.h> | 12 | #include <linux/pci.h> |
13 | #include <linux/init.h> | 13 | #include <linux/init.h> |
14 | #include <linux/rcupdate.h> | 14 | #include <linux/rcupdate.h> |
15 | #include <asm/e820.h> | 15 | #include <asm/e820/api.h> |
16 | #include <asm/pci_x86.h> | 16 | #include <asm/pci_x86.h> |
17 | 17 | ||
18 | /* Assume systems with more busses have correct MCFG */ | 18 | /* Assume systems with more busses have correct MCFG */ |
diff --git a/arch/x86/pci/mmconfig_64.c b/arch/x86/pci/mmconfig_64.c index bea52496aea6..f1c1aa0430ae 100644 --- a/arch/x86/pci/mmconfig_64.c +++ b/arch/x86/pci/mmconfig_64.c | |||
@@ -10,7 +10,7 @@ | |||
10 | #include <linux/acpi.h> | 10 | #include <linux/acpi.h> |
11 | #include <linux/bitmap.h> | 11 | #include <linux/bitmap.h> |
12 | #include <linux/rcupdate.h> | 12 | #include <linux/rcupdate.h> |
13 | #include <asm/e820.h> | 13 | #include <asm/e820/api.h> |
14 | #include <asm/pci_x86.h> | 14 | #include <asm/pci_x86.h> |
15 | 15 | ||
16 | #define PREFIX "PCI: " | 16 | #define PREFIX "PCI: " |
diff --git a/arch/x86/pci/pcbios.c b/arch/x86/pci/pcbios.c index 1d97cea3b3a4..29e9ba6ace9d 100644 --- a/arch/x86/pci/pcbios.c +++ b/arch/x86/pci/pcbios.c | |||
@@ -7,7 +7,9 @@ | |||
7 | #include <linux/slab.h> | 7 | #include <linux/slab.h> |
8 | #include <linux/module.h> | 8 | #include <linux/module.h> |
9 | #include <linux/uaccess.h> | 9 | #include <linux/uaccess.h> |
10 | |||
10 | #include <asm/pci_x86.h> | 11 | #include <asm/pci_x86.h> |
12 | #include <asm/e820/types.h> | ||
11 | #include <asm/pci-functions.h> | 13 | #include <asm/pci-functions.h> |
12 | #include <asm/cacheflush.h> | 14 | #include <asm/cacheflush.h> |
13 | 15 | ||
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c index 565dff3c9a12..a15cf815ac4e 100644 --- a/arch/x86/platform/efi/efi.c +++ b/arch/x86/platform/efi/efi.c | |||
@@ -47,6 +47,7 @@ | |||
47 | 47 | ||
48 | #include <asm/setup.h> | 48 | #include <asm/setup.h> |
49 | #include <asm/efi.h> | 49 | #include <asm/efi.h> |
50 | #include <asm/e820/api.h> | ||
50 | #include <asm/time.h> | 51 | #include <asm/time.h> |
51 | #include <asm/cacheflush.h> | 52 | #include <asm/cacheflush.h> |
52 | #include <asm/tlbflush.h> | 53 | #include <asm/tlbflush.h> |
@@ -139,21 +140,21 @@ static void __init do_add_efi_memmap(void) | |||
139 | case EFI_BOOT_SERVICES_DATA: | 140 | case EFI_BOOT_SERVICES_DATA: |
140 | case EFI_CONVENTIONAL_MEMORY: | 141 | case EFI_CONVENTIONAL_MEMORY: |
141 | if (md->attribute & EFI_MEMORY_WB) | 142 | if (md->attribute & EFI_MEMORY_WB) |
142 | e820_type = E820_RAM; | 143 | e820_type = E820_TYPE_RAM; |
143 | else | 144 | else |
144 | e820_type = E820_RESERVED; | 145 | e820_type = E820_TYPE_RESERVED; |
145 | break; | 146 | break; |
146 | case EFI_ACPI_RECLAIM_MEMORY: | 147 | case EFI_ACPI_RECLAIM_MEMORY: |
147 | e820_type = E820_ACPI; | 148 | e820_type = E820_TYPE_ACPI; |
148 | break; | 149 | break; |
149 | case EFI_ACPI_MEMORY_NVS: | 150 | case EFI_ACPI_MEMORY_NVS: |
150 | e820_type = E820_NVS; | 151 | e820_type = E820_TYPE_NVS; |
151 | break; | 152 | break; |
152 | case EFI_UNUSABLE_MEMORY: | 153 | case EFI_UNUSABLE_MEMORY: |
153 | e820_type = E820_UNUSABLE; | 154 | e820_type = E820_TYPE_UNUSABLE; |
154 | break; | 155 | break; |
155 | case EFI_PERSISTENT_MEMORY: | 156 | case EFI_PERSISTENT_MEMORY: |
156 | e820_type = E820_PMEM; | 157 | e820_type = E820_TYPE_PMEM; |
157 | break; | 158 | break; |
158 | default: | 159 | default: |
159 | /* | 160 | /* |
@@ -161,12 +162,12 @@ static void __init do_add_efi_memmap(void) | |||
161 | * EFI_RUNTIME_SERVICES_DATA EFI_MEMORY_MAPPED_IO | 162 | * EFI_RUNTIME_SERVICES_DATA EFI_MEMORY_MAPPED_IO |
162 | * EFI_MEMORY_MAPPED_IO_PORT_SPACE EFI_PAL_CODE | 163 | * EFI_MEMORY_MAPPED_IO_PORT_SPACE EFI_PAL_CODE |
163 | */ | 164 | */ |
164 | e820_type = E820_RESERVED; | 165 | e820_type = E820_TYPE_RESERVED; |
165 | break; | 166 | break; |
166 | } | 167 | } |
167 | e820_add_region(start, size, e820_type); | 168 | e820__range_add(start, size, e820_type); |
168 | } | 169 | } |
169 | sanitize_e820_map(e820->map, ARRAY_SIZE(e820->map), &e820->nr_map); | 170 | e820__update_table(e820_table); |
170 | } | 171 | } |
171 | 172 | ||
172 | int __init efi_memblock_x86_reserve_range(void) | 173 | int __init efi_memblock_x86_reserve_range(void) |
diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c index 6cbf9e036aa8..642a8698ad61 100644 --- a/arch/x86/platform/efi/efi_64.c +++ b/arch/x86/platform/efi/efi_64.c | |||
@@ -35,7 +35,7 @@ | |||
35 | 35 | ||
36 | #include <asm/setup.h> | 36 | #include <asm/setup.h> |
37 | #include <asm/page.h> | 37 | #include <asm/page.h> |
38 | #include <asm/e820.h> | 38 | #include <asm/e820/api.h> |
39 | #include <asm/pgtable.h> | 39 | #include <asm/pgtable.h> |
40 | #include <asm/tlbflush.h> | 40 | #include <asm/tlbflush.h> |
41 | #include <asm/proto.h> | 41 | #include <asm/proto.h> |
diff --git a/arch/x86/platform/efi/quirks.c b/arch/x86/platform/efi/quirks.c index cdfe8c628959..26615991d69c 100644 --- a/arch/x86/platform/efi/quirks.c +++ b/arch/x86/platform/efi/quirks.c | |||
@@ -11,6 +11,8 @@ | |||
11 | #include <linux/bootmem.h> | 11 | #include <linux/bootmem.h> |
12 | #include <linux/acpi.h> | 12 | #include <linux/acpi.h> |
13 | #include <linux/dmi.h> | 13 | #include <linux/dmi.h> |
14 | |||
15 | #include <asm/e820/api.h> | ||
14 | #include <asm/efi.h> | 16 | #include <asm/efi.h> |
15 | #include <asm/uv/uv.h> | 17 | #include <asm/uv/uv.h> |
16 | 18 | ||
@@ -244,14 +246,14 @@ void __init efi_arch_mem_reserve(phys_addr_t addr, u64 size) | |||
244 | * else. We must only reserve (and then free) regions: | 246 | * else. We must only reserve (and then free) regions: |
245 | * | 247 | * |
246 | * - Not within any part of the kernel | 248 | * - Not within any part of the kernel |
247 | * - Not the BIOS reserved area (E820_RESERVED, E820_NVS, etc) | 249 | * - Not the BIOS reserved area (E820_TYPE_RESERVED, E820_TYPE_NVS, etc) |
248 | */ | 250 | */ |
249 | static bool can_free_region(u64 start, u64 size) | 251 | static bool can_free_region(u64 start, u64 size) |
250 | { | 252 | { |
251 | if (start + size > __pa_symbol(_text) && start <= __pa_symbol(_end)) | 253 | if (start + size > __pa_symbol(_text) && start <= __pa_symbol(_end)) |
252 | return false; | 254 | return false; |
253 | 255 | ||
254 | if (!e820_all_mapped(start, start+size, E820_RAM)) | 256 | if (!e820__mapped_all(start, start+size, E820_TYPE_RAM)) |
255 | return false; | 257 | return false; |
256 | 258 | ||
257 | return true; | 259 | return true; |
@@ -284,7 +286,7 @@ void __init efi_reserve_boot_services(void) | |||
284 | * A good example of a critical region that must not be | 286 | * A good example of a critical region that must not be |
285 | * freed is page zero (first 4Kb of memory), which may | 287 | * freed is page zero (first 4Kb of memory), which may |
286 | * contain boot services code/data but is marked | 288 | * contain boot services code/data but is marked |
287 | * E820_RESERVED by trim_bios_range(). | 289 | * E820_TYPE_RESERVED by trim_bios_range(). |
288 | */ | 290 | */ |
289 | if (!already_reserved) { | 291 | if (!already_reserved) { |
290 | memblock_reserve(start, size); | 292 | memblock_reserve(start, size); |
diff --git a/arch/x86/power/hibernate_64.c b/arch/x86/power/hibernate_64.c index ded2e8272382..053801b022dd 100644 --- a/arch/x86/power/hibernate_64.c +++ b/arch/x86/power/hibernate_64.c | |||
@@ -16,6 +16,7 @@ | |||
16 | 16 | ||
17 | #include <crypto/hash.h> | 17 | #include <crypto/hash.h> |
18 | 18 | ||
19 | #include <asm/e820/api.h> | ||
19 | #include <asm/init.h> | 20 | #include <asm/init.h> |
20 | #include <asm/proto.h> | 21 | #include <asm/proto.h> |
21 | #include <asm/page.h> | 22 | #include <asm/page.h> |
@@ -195,12 +196,12 @@ struct restore_data_record { | |||
195 | 196 | ||
196 | #if IS_BUILTIN(CONFIG_CRYPTO_MD5) | 197 | #if IS_BUILTIN(CONFIG_CRYPTO_MD5) |
197 | /** | 198 | /** |
198 | * get_e820_md5 - calculate md5 according to given e820 map | 199 | * get_e820_md5 - calculate md5 according to given e820 table |
199 | * | 200 | * |
200 | * @map: the e820 map to be calculated | 201 | * @table: the e820 table to be calculated |
201 | * @buf: the md5 result to be stored to | 202 | * @buf: the md5 result to be stored to |
202 | */ | 203 | */ |
203 | static int get_e820_md5(struct e820map *map, void *buf) | 204 | static int get_e820_md5(struct e820_table *table, void *buf) |
204 | { | 205 | { |
205 | struct scatterlist sg; | 206 | struct scatterlist sg; |
206 | struct crypto_ahash *tfm; | 207 | struct crypto_ahash *tfm; |
@@ -213,10 +214,9 @@ static int get_e820_md5(struct e820map *map, void *buf) | |||
213 | 214 | ||
214 | { | 215 | { |
215 | AHASH_REQUEST_ON_STACK(req, tfm); | 216 | AHASH_REQUEST_ON_STACK(req, tfm); |
216 | size = offsetof(struct e820map, map) | 217 | size = offsetof(struct e820_table, entries) + sizeof(struct e820_entry) * table->nr_entries; |
217 | + sizeof(struct e820entry) * map->nr_map; | ||
218 | ahash_request_set_tfm(req, tfm); | 218 | ahash_request_set_tfm(req, tfm); |
219 | sg_init_one(&sg, (u8 *)map, size); | 219 | sg_init_one(&sg, (u8 *)table, size); |
220 | ahash_request_set_callback(req, 0, NULL, NULL); | 220 | ahash_request_set_callback(req, 0, NULL, NULL); |
221 | ahash_request_set_crypt(req, &sg, buf, size); | 221 | ahash_request_set_crypt(req, &sg, buf, size); |
222 | 222 | ||
@@ -231,7 +231,7 @@ static int get_e820_md5(struct e820map *map, void *buf) | |||
231 | 231 | ||
232 | static void hibernation_e820_save(void *buf) | 232 | static void hibernation_e820_save(void *buf) |
233 | { | 233 | { |
234 | get_e820_md5(e820_saved, buf); | 234 | get_e820_md5(e820_table_firmware, buf); |
235 | } | 235 | } |
236 | 236 | ||
237 | static bool hibernation_e820_mismatch(void *buf) | 237 | static bool hibernation_e820_mismatch(void *buf) |
@@ -244,7 +244,7 @@ static bool hibernation_e820_mismatch(void *buf) | |||
244 | if (!memcmp(result, buf, MD5_DIGEST_SIZE)) | 244 | if (!memcmp(result, buf, MD5_DIGEST_SIZE)) |
245 | return false; | 245 | return false; |
246 | 246 | ||
247 | ret = get_e820_md5(e820_saved, result); | 247 | ret = get_e820_md5(e820_table_firmware, result); |
248 | if (ret) | 248 | if (ret) |
249 | return true; | 249 | return true; |
250 | 250 | ||
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index ec1d5c46e58f..78243454f5e6 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c | |||
@@ -76,6 +76,7 @@ | |||
76 | #include <asm/mwait.h> | 76 | #include <asm/mwait.h> |
77 | #include <asm/pci_x86.h> | 77 | #include <asm/pci_x86.h> |
78 | #include <asm/cpu.h> | 78 | #include <asm/cpu.h> |
79 | #include <asm/e820/api.h> | ||
79 | 80 | ||
80 | #ifdef CONFIG_ACPI | 81 | #ifdef CONFIG_ACPI |
81 | #include <linux/acpi.h> | 82 | #include <linux/acpi.h> |
@@ -1690,34 +1691,32 @@ static void __init init_pvh_bootparams(void) | |||
1690 | 1691 | ||
1691 | memset(&pvh_bootparams, 0, sizeof(pvh_bootparams)); | 1692 | memset(&pvh_bootparams, 0, sizeof(pvh_bootparams)); |
1692 | 1693 | ||
1693 | memmap.nr_entries = ARRAY_SIZE(pvh_bootparams.e820_map); | 1694 | memmap.nr_entries = ARRAY_SIZE(pvh_bootparams.e820_table); |
1694 | set_xen_guest_handle(memmap.buffer, pvh_bootparams.e820_map); | 1695 | set_xen_guest_handle(memmap.buffer, pvh_bootparams.e820_table); |
1695 | rc = HYPERVISOR_memory_op(XENMEM_memory_map, &memmap); | 1696 | rc = HYPERVISOR_memory_op(XENMEM_memory_map, &memmap); |
1696 | if (rc) { | 1697 | if (rc) { |
1697 | xen_raw_printk("XENMEM_memory_map failed (%d)\n", rc); | 1698 | xen_raw_printk("XENMEM_memory_map failed (%d)\n", rc); |
1698 | BUG(); | 1699 | BUG(); |
1699 | } | 1700 | } |
1700 | 1701 | ||
1701 | if (memmap.nr_entries < E820MAX - 1) { | 1702 | if (memmap.nr_entries < E820_MAX_ENTRIES_ZEROPAGE - 1) { |
1702 | pvh_bootparams.e820_map[memmap.nr_entries].addr = | 1703 | pvh_bootparams.e820_table[memmap.nr_entries].addr = |
1703 | ISA_START_ADDRESS; | 1704 | ISA_START_ADDRESS; |
1704 | pvh_bootparams.e820_map[memmap.nr_entries].size = | 1705 | pvh_bootparams.e820_table[memmap.nr_entries].size = |
1705 | ISA_END_ADDRESS - ISA_START_ADDRESS; | 1706 | ISA_END_ADDRESS - ISA_START_ADDRESS; |
1706 | pvh_bootparams.e820_map[memmap.nr_entries].type = | 1707 | pvh_bootparams.e820_table[memmap.nr_entries].type = |
1707 | E820_RESERVED; | 1708 | E820_TYPE_RESERVED; |
1708 | memmap.nr_entries++; | 1709 | memmap.nr_entries++; |
1709 | } else | 1710 | } else |
1710 | xen_raw_printk("Warning: Can fit ISA range into e820\n"); | 1711 | xen_raw_printk("Warning: Can fit ISA range into e820\n"); |
1711 | 1712 | ||
1712 | sanitize_e820_map(pvh_bootparams.e820_map, | ||
1713 | ARRAY_SIZE(pvh_bootparams.e820_map), | ||
1714 | &memmap.nr_entries); | ||
1715 | |||
1716 | pvh_bootparams.e820_entries = memmap.nr_entries; | 1713 | pvh_bootparams.e820_entries = memmap.nr_entries; |
1717 | for (i = 0; i < pvh_bootparams.e820_entries; i++) | 1714 | for (i = 0; i < pvh_bootparams.e820_entries; i++) |
1718 | e820_add_region(pvh_bootparams.e820_map[i].addr, | 1715 | e820__range_add(pvh_bootparams.e820_table[i].addr, |
1719 | pvh_bootparams.e820_map[i].size, | 1716 | pvh_bootparams.e820_table[i].size, |
1720 | pvh_bootparams.e820_map[i].type); | 1717 | pvh_bootparams.e820_table[i].type); |
1718 | |||
1719 | e820__update_table(e820_table); | ||
1721 | 1720 | ||
1722 | pvh_bootparams.hdr.cmd_line_ptr = | 1721 | pvh_bootparams.hdr.cmd_line_ptr = |
1723 | pvh_start_info.cmdline_paddr; | 1722 | pvh_start_info.cmdline_paddr; |
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index 37cb5aad71de..1d68be6e3ff1 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c | |||
@@ -58,7 +58,7 @@ | |||
58 | #include <asm/mmu_context.h> | 58 | #include <asm/mmu_context.h> |
59 | #include <asm/setup.h> | 59 | #include <asm/setup.h> |
60 | #include <asm/paravirt.h> | 60 | #include <asm/paravirt.h> |
61 | #include <asm/e820.h> | 61 | #include <asm/e820/api.h> |
62 | #include <asm/linkage.h> | 62 | #include <asm/linkage.h> |
63 | #include <asm/page.h> | 63 | #include <asm/page.h> |
64 | #include <asm/init.h> | 64 | #include <asm/init.h> |
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c index a8c306cf8868..a5bf7c451435 100644 --- a/arch/x86/xen/setup.c +++ b/arch/x86/xen/setup.c | |||
@@ -14,7 +14,7 @@ | |||
14 | 14 | ||
15 | #include <asm/elf.h> | 15 | #include <asm/elf.h> |
16 | #include <asm/vdso.h> | 16 | #include <asm/vdso.h> |
17 | #include <asm/e820.h> | 17 | #include <asm/e820/api.h> |
18 | #include <asm/setup.h> | 18 | #include <asm/setup.h> |
19 | #include <asm/acpi.h> | 19 | #include <asm/acpi.h> |
20 | #include <asm/numa.h> | 20 | #include <asm/numa.h> |
@@ -41,8 +41,7 @@ struct xen_memory_region xen_extra_mem[XEN_EXTRA_MEM_MAX_REGIONS] __initdata; | |||
41 | unsigned long xen_released_pages; | 41 | unsigned long xen_released_pages; |
42 | 42 | ||
43 | /* E820 map used during setting up memory. */ | 43 | /* E820 map used during setting up memory. */ |
44 | static struct e820entry xen_e820_map[E820_X_MAX] __initdata; | 44 | static struct e820_table xen_e820_table __initdata; |
45 | static u32 xen_e820_map_entries __initdata; | ||
46 | 45 | ||
47 | /* | 46 | /* |
48 | * Buffer used to remap identity mapped pages. We only need the virtual space. | 47 | * Buffer used to remap identity mapped pages. We only need the virtual space. |
@@ -198,15 +197,15 @@ void __init xen_inv_extra_mem(void) | |||
198 | */ | 197 | */ |
199 | static unsigned long __init xen_find_pfn_range(unsigned long *min_pfn) | 198 | static unsigned long __init xen_find_pfn_range(unsigned long *min_pfn) |
200 | { | 199 | { |
201 | const struct e820entry *entry = xen_e820_map; | 200 | const struct e820_entry *entry = xen_e820_table.entries; |
202 | unsigned int i; | 201 | unsigned int i; |
203 | unsigned long done = 0; | 202 | unsigned long done = 0; |
204 | 203 | ||
205 | for (i = 0; i < xen_e820_map_entries; i++, entry++) { | 204 | for (i = 0; i < xen_e820_table.nr_entries; i++, entry++) { |
206 | unsigned long s_pfn; | 205 | unsigned long s_pfn; |
207 | unsigned long e_pfn; | 206 | unsigned long e_pfn; |
208 | 207 | ||
209 | if (entry->type != E820_RAM) | 208 | if (entry->type != E820_TYPE_RAM) |
210 | continue; | 209 | continue; |
211 | 210 | ||
212 | e_pfn = PFN_DOWN(entry->addr + entry->size); | 211 | e_pfn = PFN_DOWN(entry->addr + entry->size); |
@@ -457,7 +456,7 @@ static unsigned long __init xen_foreach_remap_area(unsigned long nr_pages, | |||
457 | { | 456 | { |
458 | phys_addr_t start = 0; | 457 | phys_addr_t start = 0; |
459 | unsigned long ret_val = 0; | 458 | unsigned long ret_val = 0; |
460 | const struct e820entry *entry = xen_e820_map; | 459 | const struct e820_entry *entry = xen_e820_table.entries; |
461 | int i; | 460 | int i; |
462 | 461 | ||
463 | /* | 462 | /* |
@@ -471,13 +470,13 @@ static unsigned long __init xen_foreach_remap_area(unsigned long nr_pages, | |||
471 | * example) the DMI tables in a reserved region that begins on | 470 | * example) the DMI tables in a reserved region that begins on |
472 | * a non-page boundary. | 471 | * a non-page boundary. |
473 | */ | 472 | */ |
474 | for (i = 0; i < xen_e820_map_entries; i++, entry++) { | 473 | for (i = 0; i < xen_e820_table.nr_entries; i++, entry++) { |
475 | phys_addr_t end = entry->addr + entry->size; | 474 | phys_addr_t end = entry->addr + entry->size; |
476 | if (entry->type == E820_RAM || i == xen_e820_map_entries - 1) { | 475 | if (entry->type == E820_TYPE_RAM || i == xen_e820_table.nr_entries - 1) { |
477 | unsigned long start_pfn = PFN_DOWN(start); | 476 | unsigned long start_pfn = PFN_DOWN(start); |
478 | unsigned long end_pfn = PFN_UP(end); | 477 | unsigned long end_pfn = PFN_UP(end); |
479 | 478 | ||
480 | if (entry->type == E820_RAM) | 479 | if (entry->type == E820_TYPE_RAM) |
481 | end_pfn = PFN_UP(entry->addr); | 480 | end_pfn = PFN_UP(entry->addr); |
482 | 481 | ||
483 | if (start_pfn < end_pfn) | 482 | if (start_pfn < end_pfn) |
@@ -591,28 +590,28 @@ static void __init xen_align_and_add_e820_region(phys_addr_t start, | |||
591 | phys_addr_t end = start + size; | 590 | phys_addr_t end = start + size; |
592 | 591 | ||
593 | /* Align RAM regions to page boundaries. */ | 592 | /* Align RAM regions to page boundaries. */ |
594 | if (type == E820_RAM) { | 593 | if (type == E820_TYPE_RAM) { |
595 | start = PAGE_ALIGN(start); | 594 | start = PAGE_ALIGN(start); |
596 | end &= ~((phys_addr_t)PAGE_SIZE - 1); | 595 | end &= ~((phys_addr_t)PAGE_SIZE - 1); |
597 | } | 596 | } |
598 | 597 | ||
599 | e820_add_region(start, end - start, type); | 598 | e820__range_add(start, end - start, type); |
600 | } | 599 | } |
601 | 600 | ||
602 | static void __init xen_ignore_unusable(void) | 601 | static void __init xen_ignore_unusable(void) |
603 | { | 602 | { |
604 | struct e820entry *entry = xen_e820_map; | 603 | struct e820_entry *entry = xen_e820_table.entries; |
605 | unsigned int i; | 604 | unsigned int i; |
606 | 605 | ||
607 | for (i = 0; i < xen_e820_map_entries; i++, entry++) { | 606 | for (i = 0; i < xen_e820_table.nr_entries; i++, entry++) { |
608 | if (entry->type == E820_UNUSABLE) | 607 | if (entry->type == E820_TYPE_UNUSABLE) |
609 | entry->type = E820_RAM; | 608 | entry->type = E820_TYPE_RAM; |
610 | } | 609 | } |
611 | } | 610 | } |
612 | 611 | ||
613 | bool __init xen_is_e820_reserved(phys_addr_t start, phys_addr_t size) | 612 | bool __init xen_is_e820_reserved(phys_addr_t start, phys_addr_t size) |
614 | { | 613 | { |
615 | struct e820entry *entry; | 614 | struct e820_entry *entry; |
616 | unsigned mapcnt; | 615 | unsigned mapcnt; |
617 | phys_addr_t end; | 616 | phys_addr_t end; |
618 | 617 | ||
@@ -620,10 +619,10 @@ bool __init xen_is_e820_reserved(phys_addr_t start, phys_addr_t size) | |||
620 | return false; | 619 | return false; |
621 | 620 | ||
622 | end = start + size; | 621 | end = start + size; |
623 | entry = xen_e820_map; | 622 | entry = xen_e820_table.entries; |
624 | 623 | ||
625 | for (mapcnt = 0; mapcnt < xen_e820_map_entries; mapcnt++) { | 624 | for (mapcnt = 0; mapcnt < xen_e820_table.nr_entries; mapcnt++) { |
626 | if (entry->type == E820_RAM && entry->addr <= start && | 625 | if (entry->type == E820_TYPE_RAM && entry->addr <= start && |
627 | (entry->addr + entry->size) >= end) | 626 | (entry->addr + entry->size) >= end) |
628 | return false; | 627 | return false; |
629 | 628 | ||
@@ -645,10 +644,10 @@ phys_addr_t __init xen_find_free_area(phys_addr_t size) | |||
645 | { | 644 | { |
646 | unsigned mapcnt; | 645 | unsigned mapcnt; |
647 | phys_addr_t addr, start; | 646 | phys_addr_t addr, start; |
648 | struct e820entry *entry = xen_e820_map; | 647 | struct e820_entry *entry = xen_e820_table.entries; |
649 | 648 | ||
650 | for (mapcnt = 0; mapcnt < xen_e820_map_entries; mapcnt++, entry++) { | 649 | for (mapcnt = 0; mapcnt < xen_e820_table.nr_entries; mapcnt++, entry++) { |
651 | if (entry->type != E820_RAM || entry->size < size) | 650 | if (entry->type != E820_TYPE_RAM || entry->size < size) |
652 | continue; | 651 | continue; |
653 | start = entry->addr; | 652 | start = entry->addr; |
654 | for (addr = start; addr < start + size; addr += PAGE_SIZE) { | 653 | for (addr = start; addr < start + size; addr += PAGE_SIZE) { |
@@ -750,8 +749,8 @@ char * __init xen_memory_setup(void) | |||
750 | max_pfn = min(max_pfn, xen_start_info->nr_pages); | 749 | max_pfn = min(max_pfn, xen_start_info->nr_pages); |
751 | mem_end = PFN_PHYS(max_pfn); | 750 | mem_end = PFN_PHYS(max_pfn); |
752 | 751 | ||
753 | memmap.nr_entries = ARRAY_SIZE(xen_e820_map); | 752 | memmap.nr_entries = ARRAY_SIZE(xen_e820_table.entries); |
754 | set_xen_guest_handle(memmap.buffer, xen_e820_map); | 753 | set_xen_guest_handle(memmap.buffer, xen_e820_table.entries); |
755 | 754 | ||
756 | op = xen_initial_domain() ? | 755 | op = xen_initial_domain() ? |
757 | XENMEM_machine_memory_map : | 756 | XENMEM_machine_memory_map : |
@@ -760,16 +759,16 @@ char * __init xen_memory_setup(void) | |||
760 | if (rc == -ENOSYS) { | 759 | if (rc == -ENOSYS) { |
761 | BUG_ON(xen_initial_domain()); | 760 | BUG_ON(xen_initial_domain()); |
762 | memmap.nr_entries = 1; | 761 | memmap.nr_entries = 1; |
763 | xen_e820_map[0].addr = 0ULL; | 762 | xen_e820_table.entries[0].addr = 0ULL; |
764 | xen_e820_map[0].size = mem_end; | 763 | xen_e820_table.entries[0].size = mem_end; |
765 | /* 8MB slack (to balance backend allocations). */ | 764 | /* 8MB slack (to balance backend allocations). */ |
766 | xen_e820_map[0].size += 8ULL << 20; | 765 | xen_e820_table.entries[0].size += 8ULL << 20; |
767 | xen_e820_map[0].type = E820_RAM; | 766 | xen_e820_table.entries[0].type = E820_TYPE_RAM; |
768 | rc = 0; | 767 | rc = 0; |
769 | } | 768 | } |
770 | BUG_ON(rc); | 769 | BUG_ON(rc); |
771 | BUG_ON(memmap.nr_entries == 0); | 770 | BUG_ON(memmap.nr_entries == 0); |
772 | xen_e820_map_entries = memmap.nr_entries; | 771 | xen_e820_table.nr_entries = memmap.nr_entries; |
773 | 772 | ||
774 | /* | 773 | /* |
775 | * Xen won't allow a 1:1 mapping to be created to UNUSABLE | 774 | * Xen won't allow a 1:1 mapping to be created to UNUSABLE |
@@ -783,8 +782,7 @@ char * __init xen_memory_setup(void) | |||
783 | xen_ignore_unusable(); | 782 | xen_ignore_unusable(); |
784 | 783 | ||
785 | /* Make sure the Xen-supplied memory map is well-ordered. */ | 784 | /* Make sure the Xen-supplied memory map is well-ordered. */ |
786 | sanitize_e820_map(xen_e820_map, ARRAY_SIZE(xen_e820_map), | 785 | e820__update_table(&xen_e820_table); |
787 | &xen_e820_map_entries); | ||
788 | 786 | ||
789 | max_pages = xen_get_max_pages(); | 787 | max_pages = xen_get_max_pages(); |
790 | 788 | ||
@@ -811,15 +809,15 @@ char * __init xen_memory_setup(void) | |||
811 | extra_pages = min3(EXTRA_MEM_RATIO * min(max_pfn, PFN_DOWN(MAXMEM)), | 809 | extra_pages = min3(EXTRA_MEM_RATIO * min(max_pfn, PFN_DOWN(MAXMEM)), |
812 | extra_pages, max_pages - max_pfn); | 810 | extra_pages, max_pages - max_pfn); |
813 | i = 0; | 811 | i = 0; |
814 | addr = xen_e820_map[0].addr; | 812 | addr = xen_e820_table.entries[0].addr; |
815 | size = xen_e820_map[0].size; | 813 | size = xen_e820_table.entries[0].size; |
816 | while (i < xen_e820_map_entries) { | 814 | while (i < xen_e820_table.nr_entries) { |
817 | bool discard = false; | 815 | bool discard = false; |
818 | 816 | ||
819 | chunk_size = size; | 817 | chunk_size = size; |
820 | type = xen_e820_map[i].type; | 818 | type = xen_e820_table.entries[i].type; |
821 | 819 | ||
822 | if (type == E820_RAM) { | 820 | if (type == E820_TYPE_RAM) { |
823 | if (addr < mem_end) { | 821 | if (addr < mem_end) { |
824 | chunk_size = min(size, mem_end - addr); | 822 | chunk_size = min(size, mem_end - addr); |
825 | } else if (extra_pages) { | 823 | } else if (extra_pages) { |
@@ -840,9 +838,9 @@ char * __init xen_memory_setup(void) | |||
840 | size -= chunk_size; | 838 | size -= chunk_size; |
841 | if (size == 0) { | 839 | if (size == 0) { |
842 | i++; | 840 | i++; |
843 | if (i < xen_e820_map_entries) { | 841 | if (i < xen_e820_table.nr_entries) { |
844 | addr = xen_e820_map[i].addr; | 842 | addr = xen_e820_table.entries[i].addr; |
845 | size = xen_e820_map[i].size; | 843 | size = xen_e820_table.entries[i].size; |
846 | } | 844 | } |
847 | } | 845 | } |
848 | } | 846 | } |
@@ -858,10 +856,9 @@ char * __init xen_memory_setup(void) | |||
858 | * reserve ISA memory anyway because too many things poke | 856 | * reserve ISA memory anyway because too many things poke |
859 | * about in there. | 857 | * about in there. |
860 | */ | 858 | */ |
861 | e820_add_region(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, | 859 | e820__range_add(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_TYPE_RESERVED); |
862 | E820_RESERVED); | ||
863 | 860 | ||
864 | sanitize_e820_map(e820->map, ARRAY_SIZE(e820->map), &e820->nr_map); | 861 | e820__update_table(e820_table); |
865 | 862 | ||
866 | /* | 863 | /* |
867 | * Check whether the kernel itself conflicts with the target E820 map. | 864 | * Check whether the kernel itself conflicts with the target E820 map. |
@@ -915,6 +912,37 @@ char * __init xen_memory_setup(void) | |||
915 | } | 912 | } |
916 | 913 | ||
917 | /* | 914 | /* |
915 | * Machine specific memory setup for auto-translated guests. | ||
916 | */ | ||
917 | char * __init xen_auto_xlated_memory_setup(void) | ||
918 | { | ||
919 | struct xen_memory_map memmap; | ||
920 | int i; | ||
921 | int rc; | ||
922 | |||
923 | memmap.nr_entries = ARRAY_SIZE(xen_e820_table.entries); | ||
924 | set_xen_guest_handle(memmap.buffer, xen_e820_table.entries); | ||
925 | |||
926 | rc = HYPERVISOR_memory_op(XENMEM_memory_map, &memmap); | ||
927 | if (rc < 0) | ||
928 | panic("No memory map (%d)\n", rc); | ||
929 | |||
930 | xen_e820_table.nr_entries = memmap.nr_entries; | ||
931 | |||
932 | e820__update_table(&xen_e820_table); | ||
933 | |||
934 | for (i = 0; i < xen_e820_table.nr_entries; i++) | ||
935 | e820__range_add(xen_e820_table.entries[i].addr, xen_e820_table.entries[i].size, xen_e820_table.entries[i].type); | ||
936 | |||
937 | /* Remove p2m info, it is not needed. */ | ||
938 | xen_start_info->mfn_list = 0; | ||
939 | xen_start_info->first_p2m_pfn = 0; | ||
940 | xen_start_info->nr_p2m_frames = 0; | ||
941 | |||
942 | return "Xen"; | ||
943 | } | ||
944 | |||
945 | /* | ||
918 | * Set the bit indicating "nosegneg" library variants should be used. | 946 | * Set the bit indicating "nosegneg" library variants should be used. |
919 | * We only need to bother in pure 32-bit mode; compat 32-bit processes | 947 | * We only need to bother in pure 32-bit mode; compat 32-bit processes |
920 | * can have un-truncated segments, so wrapping around is allowed. | 948 | * can have un-truncated segments, so wrapping around is allowed. |
@@ -999,8 +1027,8 @@ void __init xen_pvmmu_arch_setup(void) | |||
999 | void __init xen_arch_setup(void) | 1027 | void __init xen_arch_setup(void) |
1000 | { | 1028 | { |
1001 | xen_panic_handler_init(); | 1029 | xen_panic_handler_init(); |
1002 | 1030 | if (!xen_feature(XENFEAT_auto_translated_physmap)) | |
1003 | xen_pvmmu_arch_setup(); | 1031 | xen_pvmmu_arch_setup(); |
1004 | 1032 | ||
1005 | #ifdef CONFIG_ACPI | 1033 | #ifdef CONFIG_ACPI |
1006 | if (!(xen_start_info->flags & SIF_INITDOMAIN)) { | 1034 | if (!(xen_start_info->flags & SIF_INITDOMAIN)) { |
diff --git a/drivers/acpi/tables.c b/drivers/acpi/tables.c index 0dae722ab2ec..ff425390bfa8 100644 --- a/drivers/acpi/tables.c +++ b/drivers/acpi/tables.c | |||
@@ -540,7 +540,7 @@ void __init acpi_table_upgrade(void) | |||
540 | * But it's not enough on X86 because ioremap will | 540 | * But it's not enough on X86 because ioremap will |
541 | * complain later (used by acpi_os_map_memory) that the pages | 541 | * complain later (used by acpi_os_map_memory) that the pages |
542 | * that should get mapped are not marked "reserved". | 542 | * that should get mapped are not marked "reserved". |
543 | * Both memblock_reserve and e820_add_region (via arch_reserve_mem_area) | 543 | * Both memblock_reserve and e820__range_add (via arch_reserve_mem_area) |
544 | * works fine. | 544 | * works fine. |
545 | */ | 545 | */ |
546 | memblock_reserve(acpi_tables_addr, all_tables_size); | 546 | memblock_reserve(acpi_tables_addr, all_tables_size); |
diff --git a/drivers/char/agp/amd64-agp.c b/drivers/char/agp/amd64-agp.c index 0ef350010766..c99cd19d9147 100644 --- a/drivers/char/agp/amd64-agp.c +++ b/drivers/char/agp/amd64-agp.c | |||
@@ -14,7 +14,7 @@ | |||
14 | #include <linux/agp_backend.h> | 14 | #include <linux/agp_backend.h> |
15 | #include <linux/mmzone.h> | 15 | #include <linux/mmzone.h> |
16 | #include <asm/page.h> /* PAGE_SIZE */ | 16 | #include <asm/page.h> /* PAGE_SIZE */ |
17 | #include <asm/e820.h> | 17 | #include <asm/e820/api.h> |
18 | #include <asm/amd_nb.h> | 18 | #include <asm/amd_nb.h> |
19 | #include <asm/gart.h> | 19 | #include <asm/gart.h> |
20 | #include "agp.h" | 20 | #include "agp.h" |
diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 4c26dc3a8295..7ae256717a32 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h | |||
@@ -438,6 +438,7 @@ extern int get_option(char **str, int *pint); | |||
438 | extern char *get_options(const char *str, int nints, int *ints); | 438 | extern char *get_options(const char *str, int nints, int *ints); |
439 | extern unsigned long long memparse(const char *ptr, char **retptr); | 439 | extern unsigned long long memparse(const char *ptr, char **retptr); |
440 | extern bool parse_option_str(const char *str, const char *option); | 440 | extern bool parse_option_str(const char *str, const char *option); |
441 | extern char *next_arg(char *args, char **param, char **val); | ||
441 | 442 | ||
442 | extern int core_kernel_text(unsigned long addr); | 443 | extern int core_kernel_text(unsigned long addr); |
443 | extern int core_kernel_data(unsigned long addr); | 444 | extern int core_kernel_data(unsigned long addr); |
diff --git a/include/xen/page.h b/include/xen/page.h index 9dc46cb8a0fd..064194f6453e 100644 --- a/include/xen/page.h +++ b/include/xen/page.h | |||
@@ -38,7 +38,7 @@ struct xen_memory_region { | |||
38 | unsigned long n_pfns; | 38 | unsigned long n_pfns; |
39 | }; | 39 | }; |
40 | 40 | ||
41 | #define XEN_EXTRA_MEM_MAX_REGIONS 128 /* == E820MAX */ | 41 | #define XEN_EXTRA_MEM_MAX_REGIONS 128 /* == E820_MAX_ENTRIES_ZEROPAGE */ |
42 | 42 | ||
43 | extern __initdata | 43 | extern __initdata |
44 | struct xen_memory_region xen_extra_mem[XEN_EXTRA_MEM_MAX_REGIONS]; | 44 | struct xen_memory_region xen_extra_mem[XEN_EXTRA_MEM_MAX_REGIONS]; |
diff --git a/kernel/params.c b/kernel/params.c index a6d6149c0fe6..60b2d8101355 100644 --- a/kernel/params.c +++ b/kernel/params.c | |||
@@ -160,58 +160,6 @@ static int parse_one(char *param, | |||
160 | return -ENOENT; | 160 | return -ENOENT; |
161 | } | 161 | } |
162 | 162 | ||
163 | /* You can use " around spaces, but can't escape ". */ | ||
164 | /* Hyphens and underscores equivalent in parameter names. */ | ||
165 | static char *next_arg(char *args, char **param, char **val) | ||
166 | { | ||
167 | unsigned int i, equals = 0; | ||
168 | int in_quote = 0, quoted = 0; | ||
169 | char *next; | ||
170 | |||
171 | if (*args == '"') { | ||
172 | args++; | ||
173 | in_quote = 1; | ||
174 | quoted = 1; | ||
175 | } | ||
176 | |||
177 | for (i = 0; args[i]; i++) { | ||
178 | if (isspace(args[i]) && !in_quote) | ||
179 | break; | ||
180 | if (equals == 0) { | ||
181 | if (args[i] == '=') | ||
182 | equals = i; | ||
183 | } | ||
184 | if (args[i] == '"') | ||
185 | in_quote = !in_quote; | ||
186 | } | ||
187 | |||
188 | *param = args; | ||
189 | if (!equals) | ||
190 | *val = NULL; | ||
191 | else { | ||
192 | args[equals] = '\0'; | ||
193 | *val = args + equals + 1; | ||
194 | |||
195 | /* Don't include quotes in value. */ | ||
196 | if (**val == '"') { | ||
197 | (*val)++; | ||
198 | if (args[i-1] == '"') | ||
199 | args[i-1] = '\0'; | ||
200 | } | ||
201 | } | ||
202 | if (quoted && args[i-1] == '"') | ||
203 | args[i-1] = '\0'; | ||
204 | |||
205 | if (args[i]) { | ||
206 | args[i] = '\0'; | ||
207 | next = args + i + 1; | ||
208 | } else | ||
209 | next = args + i; | ||
210 | |||
211 | /* Chew up trailing spaces. */ | ||
212 | return skip_spaces(next); | ||
213 | } | ||
214 | |||
215 | /* Args looks like "foo=bar,bar2 baz=fuz wiz". */ | 163 | /* Args looks like "foo=bar,bar2 baz=fuz wiz". */ |
216 | char *parse_args(const char *doing, | 164 | char *parse_args(const char *doing, |
217 | char *args, | 165 | char *args, |
diff --git a/lib/cmdline.c b/lib/cmdline.c index 8f13cf73c2ec..3c6432df7e63 100644 --- a/lib/cmdline.c +++ b/lib/cmdline.c | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <linux/export.h> | 15 | #include <linux/export.h> |
16 | #include <linux/kernel.h> | 16 | #include <linux/kernel.h> |
17 | #include <linux/string.h> | 17 | #include <linux/string.h> |
18 | #include <linux/ctype.h> | ||
18 | 19 | ||
19 | /* | 20 | /* |
20 | * If a hyphen was found in get_option, this will handle the | 21 | * If a hyphen was found in get_option, this will handle the |
@@ -189,3 +190,59 @@ bool parse_option_str(const char *str, const char *option) | |||
189 | 190 | ||
190 | return false; | 191 | return false; |
191 | } | 192 | } |
193 | |||
194 | /* | ||
195 | * Parse a string to get a param value pair. | ||
196 | * You can use " around spaces, but can't escape ". | ||
197 | * Hyphens and underscores equivalent in parameter names. | ||
198 | */ | ||
199 | char *next_arg(char *args, char **param, char **val) | ||
200 | { | ||
201 | unsigned int i, equals = 0; | ||
202 | int in_quote = 0, quoted = 0; | ||
203 | char *next; | ||
204 | |||
205 | if (*args == '"') { | ||
206 | args++; | ||
207 | in_quote = 1; | ||
208 | quoted = 1; | ||
209 | } | ||
210 | |||
211 | for (i = 0; args[i]; i++) { | ||
212 | if (isspace(args[i]) && !in_quote) | ||
213 | break; | ||
214 | if (equals == 0) { | ||
215 | if (args[i] == '=') | ||
216 | equals = i; | ||
217 | } | ||
218 | if (args[i] == '"') | ||
219 | in_quote = !in_quote; | ||
220 | } | ||
221 | |||
222 | *param = args; | ||
223 | if (!equals) | ||
224 | *val = NULL; | ||
225 | else { | ||
226 | args[equals] = '\0'; | ||
227 | *val = args + equals + 1; | ||
228 | |||
229 | /* Don't include quotes in value. */ | ||
230 | if (**val == '"') { | ||
231 | (*val)++; | ||
232 | if (args[i-1] == '"') | ||
233 | args[i-1] = '\0'; | ||
234 | } | ||
235 | } | ||
236 | if (quoted && args[i-1] == '"') | ||
237 | args[i-1] = '\0'; | ||
238 | |||
239 | if (args[i]) { | ||
240 | args[i] = '\0'; | ||
241 | next = args + i + 1; | ||
242 | } else | ||
243 | next = args + i; | ||
244 | |||
245 | /* Chew up trailing spaces. */ | ||
246 | return skip_spaces(next); | ||
247 | //return next; | ||
248 | } | ||
diff --git a/tools/lguest/lguest.c b/tools/lguest/lguest.c index 5d19fdf80292..897cd6f3f687 100644 --- a/tools/lguest/lguest.c +++ b/tools/lguest/lguest.c | |||
@@ -3339,7 +3339,7 @@ int main(int argc, char *argv[]) | |||
3339 | * simple, single region. | 3339 | * simple, single region. |
3340 | */ | 3340 | */ |
3341 | boot->e820_entries = 1; | 3341 | boot->e820_entries = 1; |
3342 | boot->e820_map[0] = ((struct e820entry) { 0, mem, E820_RAM }); | 3342 | boot->e820_table[0] = ((struct e820_entry) { 0, mem, E820_TYPE_RAM }); |
3343 | /* | 3343 | /* |
3344 | * The boot header contains a command line pointer: we put the command | 3344 | * The boot header contains a command line pointer: we put the command |
3345 | * line after the boot header. | 3345 | * line after the boot header. |