aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/include/asm/efi.h6
-rw-r--r--arch/x86/kernel/apic/io_apic.c3
-rw-r--r--arch/x86/kernel/e820.c3
-rw-r--r--arch/x86/kernel/setup.c27
-rw-r--r--arch/x86/mm/init.c58
-rw-r--r--arch/x86/mm/init_64.c7
-rw-r--r--arch/x86/platform/efi/efi.c47
-rw-r--r--arch/x86/platform/efi/efi_64.c7
8 files changed, 100 insertions, 58 deletions
diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h
index c9dcc181d4d1..6e8fdf5ad113 100644
--- a/arch/x86/include/asm/efi.h
+++ b/arch/x86/include/asm/efi.h
@@ -35,7 +35,7 @@ extern unsigned long asmlinkage efi_call_phys(void *, ...);
35#define efi_call_virt6(f, a1, a2, a3, a4, a5, a6) \ 35#define efi_call_virt6(f, a1, a2, a3, a4, a5, a6) \
36 efi_call_virt(f, a1, a2, a3, a4, a5, a6) 36 efi_call_virt(f, a1, a2, a3, a4, a5, a6)
37 37
38#define efi_ioremap(addr, size, type) ioremap_cache(addr, size) 38#define efi_ioremap(addr, size, type, attr) ioremap_cache(addr, size)
39 39
40#else /* !CONFIG_X86_32 */ 40#else /* !CONFIG_X86_32 */
41 41
@@ -89,7 +89,7 @@ extern u64 efi_call6(void *fp, u64 arg1, u64 arg2, u64 arg3,
89 (u64)(a3), (u64)(a4), (u64)(a5), (u64)(a6)) 89 (u64)(a3), (u64)(a4), (u64)(a5), (u64)(a6))
90 90
91extern void __iomem *efi_ioremap(unsigned long addr, unsigned long size, 91extern void __iomem *efi_ioremap(unsigned long addr, unsigned long size,
92 u32 type); 92 u32 type, u64 attribute);
93 93
94#endif /* CONFIG_X86_32 */ 94#endif /* CONFIG_X86_32 */
95 95
@@ -98,6 +98,8 @@ extern void efi_set_executable(efi_memory_desc_t *md, bool executable);
98extern int efi_memblock_x86_reserve_range(void); 98extern int efi_memblock_x86_reserve_range(void);
99extern void efi_call_phys_prelog(void); 99extern void efi_call_phys_prelog(void);
100extern void efi_call_phys_epilog(void); 100extern void efi_call_phys_epilog(void);
101extern void efi_unmap_memmap(void);
102extern void efi_memory_uc(u64 addr, unsigned long size);
101 103
102#ifndef CONFIG_EFI 104#ifndef CONFIG_EFI
103/* 105/*
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index c265593ec2cd..1817fa911024 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -2257,6 +2257,9 @@ asmlinkage void smp_irq_move_cleanup_interrupt(void)
2257 continue; 2257 continue;
2258 2258
2259 cfg = irq_cfg(irq); 2259 cfg = irq_cfg(irq);
2260 if (!cfg)
2261 continue;
2262
2260 raw_spin_lock(&desc->lock); 2263 raw_spin_lock(&desc->lock);
2261 2264
2262 /* 2265 /*
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
index ed858e9e9a74..df06ade26bef 100644
--- a/arch/x86/kernel/e820.c
+++ b/arch/x86/kernel/e820.c
@@ -1077,6 +1077,9 @@ void __init memblock_x86_fill(void)
1077 memblock_add(ei->addr, ei->size); 1077 memblock_add(ei->addr, ei->size);
1078 } 1078 }
1079 1079
1080 /* throw away partial pages */
1081 memblock_trim_memory(PAGE_SIZE);
1082
1080 memblock_dump_all(); 1083 memblock_dump_all();
1081} 1084}
1082 1085
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 468e98dfd44e..ca45696f30fb 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -921,18 +921,19 @@ void __init setup_arch(char **cmdline_p)
921#ifdef CONFIG_X86_64 921#ifdef CONFIG_X86_64
922 if (max_pfn > max_low_pfn) { 922 if (max_pfn > max_low_pfn) {
923 int i; 923 int i;
924 for (i = 0; i < e820.nr_map; i++) { 924 unsigned long start, end;
925 struct e820entry *ei = &e820.map[i]; 925 unsigned long start_pfn, end_pfn;
926 926
927 if (ei->addr + ei->size <= 1UL << 32) 927 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn,
928 continue; 928 NULL) {
929 929
930 if (ei->type == E820_RESERVED) 930 end = PFN_PHYS(end_pfn);
931 if (end <= (1UL<<32))
931 continue; 932 continue;
932 933
934 start = PFN_PHYS(start_pfn);
933 max_pfn_mapped = init_memory_mapping( 935 max_pfn_mapped = init_memory_mapping(
934 ei->addr < 1UL << 32 ? 1UL << 32 : ei->addr, 936 max((1UL<<32), start), end);
935 ei->addr + ei->size);
936 } 937 }
937 938
938 /* can we preseve max_low_pfn ?*/ 939 /* can we preseve max_low_pfn ?*/
@@ -1048,6 +1049,18 @@ void __init setup_arch(char **cmdline_p)
1048 arch_init_ideal_nops(); 1049 arch_init_ideal_nops();
1049 1050
1050 register_refined_jiffies(CLOCK_TICK_RATE); 1051 register_refined_jiffies(CLOCK_TICK_RATE);
1052
1053#ifdef CONFIG_EFI
1054 /* Once setup is done above, disable efi_enabled on mismatched
1055 * firmware/kernel archtectures since there is no support for
1056 * runtime services.
1057 */
1058 if (efi_enabled && IS_ENABLED(CONFIG_X86_64) != efi_64bit) {
1059 pr_info("efi: Setup done, disabling due to 32/64-bit mismatch\n");
1060 efi_unmap_memmap();
1061 efi_enabled = 0;
1062 }
1063#endif
1051} 1064}
1052 1065
1053#ifdef CONFIG_X86_32 1066#ifdef CONFIG_X86_32
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index ab1f6a93b527..d7aea41563b3 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -35,40 +35,44 @@ struct map_range {
35 unsigned page_size_mask; 35 unsigned page_size_mask;
36}; 36};
37 37
38static void __init find_early_table_space(struct map_range *mr, unsigned long end, 38/*
39 int use_pse, int use_gbpages) 39 * First calculate space needed for kernel direct mapping page tables to cover
40 * mr[0].start to mr[nr_range - 1].end, while accounting for possible 2M and 1GB
41 * pages. Then find enough contiguous space for those page tables.
42 */
43static void __init find_early_table_space(struct map_range *mr, int nr_range)
40{ 44{
41 unsigned long puds, pmds, ptes, tables, start = 0, good_end = end; 45 int i;
46 unsigned long puds = 0, pmds = 0, ptes = 0, tables;
47 unsigned long start = 0, good_end;
42 phys_addr_t base; 48 phys_addr_t base;
43 49
44 puds = (end + PUD_SIZE - 1) >> PUD_SHIFT; 50 for (i = 0; i < nr_range; i++) {
45 tables = roundup(puds * sizeof(pud_t), PAGE_SIZE); 51 unsigned long range, extra;
46
47 if (use_gbpages) {
48 unsigned long extra;
49
50 extra = end - ((end>>PUD_SHIFT) << PUD_SHIFT);
51 pmds = (extra + PMD_SIZE - 1) >> PMD_SHIFT;
52 } else
53 pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT;
54 52
55 tables += roundup(pmds * sizeof(pmd_t), PAGE_SIZE); 53 range = mr[i].end - mr[i].start;
54 puds += (range + PUD_SIZE - 1) >> PUD_SHIFT;
56 55
57 if (use_pse) { 56 if (mr[i].page_size_mask & (1 << PG_LEVEL_1G)) {
58 unsigned long extra; 57 extra = range - ((range >> PUD_SHIFT) << PUD_SHIFT);
58 pmds += (extra + PMD_SIZE - 1) >> PMD_SHIFT;
59 } else {
60 pmds += (range + PMD_SIZE - 1) >> PMD_SHIFT;
61 }
59 62
60 extra = end - ((end>>PMD_SHIFT) << PMD_SHIFT); 63 if (mr[i].page_size_mask & (1 << PG_LEVEL_2M)) {
64 extra = range - ((range >> PMD_SHIFT) << PMD_SHIFT);
61#ifdef CONFIG_X86_32 65#ifdef CONFIG_X86_32
62 extra += PMD_SIZE; 66 extra += PMD_SIZE;
63#endif 67#endif
64 /* The first 2/4M doesn't use large pages. */ 68 ptes += (extra + PAGE_SIZE - 1) >> PAGE_SHIFT;
65 if (mr->start < PMD_SIZE) 69 } else {
66 extra += mr->end - mr->start; 70 ptes += (range + PAGE_SIZE - 1) >> PAGE_SHIFT;
67 71 }
68 ptes = (extra + PAGE_SIZE - 1) >> PAGE_SHIFT; 72 }
69 } else
70 ptes = (end + PAGE_SIZE - 1) >> PAGE_SHIFT;
71 73
74 tables = roundup(puds * sizeof(pud_t), PAGE_SIZE);
75 tables += roundup(pmds * sizeof(pmd_t), PAGE_SIZE);
72 tables += roundup(ptes * sizeof(pte_t), PAGE_SIZE); 76 tables += roundup(ptes * sizeof(pte_t), PAGE_SIZE);
73 77
74#ifdef CONFIG_X86_32 78#ifdef CONFIG_X86_32
@@ -86,7 +90,7 @@ static void __init find_early_table_space(struct map_range *mr, unsigned long en
86 pgt_buf_top = pgt_buf_start + (tables >> PAGE_SHIFT); 90 pgt_buf_top = pgt_buf_start + (tables >> PAGE_SHIFT);
87 91
88 printk(KERN_DEBUG "kernel direct mapping tables up to %#lx @ [mem %#010lx-%#010lx]\n", 92 printk(KERN_DEBUG "kernel direct mapping tables up to %#lx @ [mem %#010lx-%#010lx]\n",
89 end - 1, pgt_buf_start << PAGE_SHIFT, 93 mr[nr_range - 1].end - 1, pgt_buf_start << PAGE_SHIFT,
90 (pgt_buf_top << PAGE_SHIFT) - 1); 94 (pgt_buf_top << PAGE_SHIFT) - 1);
91} 95}
92 96
@@ -267,7 +271,7 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
267 * nodes are discovered. 271 * nodes are discovered.
268 */ 272 */
269 if (!after_bootmem) 273 if (!after_bootmem)
270 find_early_table_space(&mr[0], end, use_pse, use_gbpages); 274 find_early_table_space(mr, nr_range);
271 275
272 for (i = 0; i < nr_range; i++) 276 for (i = 0; i < nr_range; i++)
273 ret = kernel_physical_mapping_init(mr[i].start, mr[i].end, 277 ret = kernel_physical_mapping_init(mr[i].start, mr[i].end,
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 2b6b4a3c8beb..3baff255adac 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -386,7 +386,8 @@ phys_pte_init(pte_t *pte_page, unsigned long addr, unsigned long end,
386 * these mappings are more intelligent. 386 * these mappings are more intelligent.
387 */ 387 */
388 if (pte_val(*pte)) { 388 if (pte_val(*pte)) {
389 pages++; 389 if (!after_bootmem)
390 pages++;
390 continue; 391 continue;
391 } 392 }
392 393
@@ -451,6 +452,8 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end,
451 * attributes. 452 * attributes.
452 */ 453 */
453 if (page_size_mask & (1 << PG_LEVEL_2M)) { 454 if (page_size_mask & (1 << PG_LEVEL_2M)) {
455 if (!after_bootmem)
456 pages++;
454 last_map_addr = next; 457 last_map_addr = next;
455 continue; 458 continue;
456 } 459 }
@@ -526,6 +529,8 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
526 * attributes. 529 * attributes.
527 */ 530 */
528 if (page_size_mask & (1 << PG_LEVEL_1G)) { 531 if (page_size_mask & (1 << PG_LEVEL_1G)) {
532 if (!after_bootmem)
533 pages++;
529 last_map_addr = next; 534 last_map_addr = next;
530 continue; 535 continue;
531 } 536 }
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
index aded2a91162a..ad4439145f85 100644
--- a/arch/x86/platform/efi/efi.c
+++ b/arch/x86/platform/efi/efi.c
@@ -70,11 +70,15 @@ EXPORT_SYMBOL(efi);
70struct efi_memory_map memmap; 70struct efi_memory_map memmap;
71 71
72bool efi_64bit; 72bool efi_64bit;
73static bool efi_native;
74 73
75static struct efi efi_phys __initdata; 74static struct efi efi_phys __initdata;
76static efi_system_table_t efi_systab __initdata; 75static efi_system_table_t efi_systab __initdata;
77 76
77static inline bool efi_is_native(void)
78{
79 return IS_ENABLED(CONFIG_X86_64) == efi_64bit;
80}
81
78static int __init setup_noefi(char *arg) 82static int __init setup_noefi(char *arg)
79{ 83{
80 efi_enabled = 0; 84 efi_enabled = 0;
@@ -420,7 +424,7 @@ void __init efi_reserve_boot_services(void)
420 } 424 }
421} 425}
422 426
423static void __init efi_unmap_memmap(void) 427void __init efi_unmap_memmap(void)
424{ 428{
425 if (memmap.map) { 429 if (memmap.map) {
426 early_iounmap(memmap.map, memmap.nr_map * memmap.desc_size); 430 early_iounmap(memmap.map, memmap.nr_map * memmap.desc_size);
@@ -432,7 +436,7 @@ void __init efi_free_boot_services(void)
432{ 436{
433 void *p; 437 void *p;
434 438
435 if (!efi_native) 439 if (!efi_is_native())
436 return; 440 return;
437 441
438 for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) { 442 for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
@@ -684,12 +688,10 @@ void __init efi_init(void)
684 return; 688 return;
685 } 689 }
686 efi_phys.systab = (efi_system_table_t *)boot_params.efi_info.efi_systab; 690 efi_phys.systab = (efi_system_table_t *)boot_params.efi_info.efi_systab;
687 efi_native = !efi_64bit;
688#else 691#else
689 efi_phys.systab = (efi_system_table_t *) 692 efi_phys.systab = (efi_system_table_t *)
690 (boot_params.efi_info.efi_systab | 693 (boot_params.efi_info.efi_systab |
691 ((__u64)boot_params.efi_info.efi_systab_hi<<32)); 694 ((__u64)boot_params.efi_info.efi_systab_hi<<32));
692 efi_native = efi_64bit;
693#endif 695#endif
694 696
695 if (efi_systab_init(efi_phys.systab)) { 697 if (efi_systab_init(efi_phys.systab)) {
@@ -723,7 +725,7 @@ void __init efi_init(void)
723 * that doesn't match the kernel 32/64-bit mode. 725 * that doesn't match the kernel 32/64-bit mode.
724 */ 726 */
725 727
726 if (!efi_native) 728 if (!efi_is_native())
727 pr_info("No EFI runtime due to 32/64-bit mismatch with kernel\n"); 729 pr_info("No EFI runtime due to 32/64-bit mismatch with kernel\n");
728 else if (efi_runtime_init()) { 730 else if (efi_runtime_init()) {
729 efi_enabled = 0; 731 efi_enabled = 0;
@@ -735,7 +737,7 @@ void __init efi_init(void)
735 return; 737 return;
736 } 738 }
737#ifdef CONFIG_X86_32 739#ifdef CONFIG_X86_32
738 if (efi_native) { 740 if (efi_is_native()) {
739 x86_platform.get_wallclock = efi_get_time; 741 x86_platform.get_wallclock = efi_get_time;
740 x86_platform.set_wallclock = efi_set_rtc_mmss; 742 x86_platform.set_wallclock = efi_set_rtc_mmss;
741 } 743 }
@@ -810,6 +812,16 @@ void __iomem *efi_lookup_mapped_addr(u64 phys_addr)
810 return NULL; 812 return NULL;
811} 813}
812 814
815void efi_memory_uc(u64 addr, unsigned long size)
816{
817 unsigned long page_shift = 1UL << EFI_PAGE_SHIFT;
818 u64 npages;
819
820 npages = round_up(size, page_shift) / page_shift;
821 memrange_efi_to_native(&addr, &npages);
822 set_memory_uc(addr, npages);
823}
824
813/* 825/*
814 * This function will switch the EFI runtime services to virtual mode. 826 * This function will switch the EFI runtime services to virtual mode.
815 * Essentially, look through the EFI memmap and map every region that 827 * Essentially, look through the EFI memmap and map every region that
@@ -823,7 +835,7 @@ void __init efi_enter_virtual_mode(void)
823 efi_memory_desc_t *md, *prev_md = NULL; 835 efi_memory_desc_t *md, *prev_md = NULL;
824 efi_status_t status; 836 efi_status_t status;
825 unsigned long size; 837 unsigned long size;
826 u64 end, systab, addr, npages, end_pfn; 838 u64 end, systab, end_pfn;
827 void *p, *va, *new_memmap = NULL; 839 void *p, *va, *new_memmap = NULL;
828 int count = 0; 840 int count = 0;
829 841
@@ -834,7 +846,7 @@ void __init efi_enter_virtual_mode(void)
834 * non-native EFI 846 * non-native EFI
835 */ 847 */
836 848
837 if (!efi_native) { 849 if (!efi_is_native()) {
838 efi_unmap_memmap(); 850 efi_unmap_memmap();
839 return; 851 return;
840 } 852 }
@@ -879,10 +891,14 @@ void __init efi_enter_virtual_mode(void)
879 end_pfn = PFN_UP(end); 891 end_pfn = PFN_UP(end);
880 if (end_pfn <= max_low_pfn_mapped 892 if (end_pfn <= max_low_pfn_mapped
881 || (end_pfn > (1UL << (32 - PAGE_SHIFT)) 893 || (end_pfn > (1UL << (32 - PAGE_SHIFT))
882 && end_pfn <= max_pfn_mapped)) 894 && end_pfn <= max_pfn_mapped)) {
883 va = __va(md->phys_addr); 895 va = __va(md->phys_addr);
884 else 896
885 va = efi_ioremap(md->phys_addr, size, md->type); 897 if (!(md->attribute & EFI_MEMORY_WB))
898 efi_memory_uc((u64)(unsigned long)va, size);
899 } else
900 va = efi_ioremap(md->phys_addr, size,
901 md->type, md->attribute);
886 902
887 md->virt_addr = (u64) (unsigned long) va; 903 md->virt_addr = (u64) (unsigned long) va;
888 904
@@ -892,13 +908,6 @@ void __init efi_enter_virtual_mode(void)
892 continue; 908 continue;
893 } 909 }
894 910
895 if (!(md->attribute & EFI_MEMORY_WB)) {
896 addr = md->virt_addr;
897 npages = md->num_pages;
898 memrange_efi_to_native(&addr, &npages);
899 set_memory_uc(addr, npages);
900 }
901
902 systab = (u64) (unsigned long) efi_phys.systab; 911 systab = (u64) (unsigned long) efi_phys.systab;
903 if (md->phys_addr <= systab && systab < end) { 912 if (md->phys_addr <= systab && systab < end) {
904 systab += md->virt_addr - md->phys_addr; 913 systab += md->virt_addr - md->phys_addr;
diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
index ac3aa54e2654..95fd505dfeb6 100644
--- a/arch/x86/platform/efi/efi_64.c
+++ b/arch/x86/platform/efi/efi_64.c
@@ -82,7 +82,7 @@ void __init efi_call_phys_epilog(void)
82} 82}
83 83
84void __iomem *__init efi_ioremap(unsigned long phys_addr, unsigned long size, 84void __iomem *__init efi_ioremap(unsigned long phys_addr, unsigned long size,
85 u32 type) 85 u32 type, u64 attribute)
86{ 86{
87 unsigned long last_map_pfn; 87 unsigned long last_map_pfn;
88 88
@@ -92,8 +92,11 @@ void __iomem *__init efi_ioremap(unsigned long phys_addr, unsigned long size,
92 last_map_pfn = init_memory_mapping(phys_addr, phys_addr + size); 92 last_map_pfn = init_memory_mapping(phys_addr, phys_addr + size);
93 if ((last_map_pfn << PAGE_SHIFT) < phys_addr + size) { 93 if ((last_map_pfn << PAGE_SHIFT) < phys_addr + size) {
94 unsigned long top = last_map_pfn << PAGE_SHIFT; 94 unsigned long top = last_map_pfn << PAGE_SHIFT;
95 efi_ioremap(top, size - (top - phys_addr), type); 95 efi_ioremap(top, size - (top - phys_addr), type, attribute);
96 } 96 }
97 97
98 if (!(attribute & EFI_MEMORY_WB))
99 efi_memory_uc((u64)(unsigned long)__va(phys_addr), size);
100
98 return (void __iomem *)__va(phys_addr); 101 return (void __iomem *)__va(phys_addr);
99} 102}