diff options
Diffstat (limited to 'arch/x86')
-rw-r--r-- | arch/x86/include/asm/efi.h | 1 | ||||
-rw-r--r-- | arch/x86/include/asm/mce.h | 2 | ||||
-rw-r--r-- | arch/x86/kernel/aperture_64.c | 34 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/mcheck/mce.c | 25 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/mcheck/therm_throt.c | 3 | ||||
-rw-r--r-- | arch/x86/kernel/irq.c | 5 | ||||
-rw-r--r-- | arch/x86/platform/efi/efi.c | 78 | ||||
-rw-r--r-- | arch/x86/platform/efi/efi_64.c | 34 |
8 files changed, 96 insertions, 86 deletions
diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h index 8e4a16508d4e..7093e4a6a0bc 100644 --- a/arch/x86/include/asm/efi.h +++ b/arch/x86/include/asm/efi.h | |||
@@ -90,6 +90,7 @@ extern void __iomem *efi_ioremap(unsigned long addr, unsigned long size, | |||
90 | #endif /* CONFIG_X86_32 */ | 90 | #endif /* CONFIG_X86_32 */ |
91 | 91 | ||
92 | extern int add_efi_memmap; | 92 | extern int add_efi_memmap; |
93 | extern void efi_set_executable(efi_memory_desc_t *md, bool executable); | ||
93 | extern void efi_memblock_x86_reserve_range(void); | 94 | extern void efi_memblock_x86_reserve_range(void); |
94 | extern void efi_call_phys_prelog(void); | 95 | extern void efi_call_phys_prelog(void); |
95 | extern void efi_call_phys_epilog(void); | 96 | extern void efi_call_phys_epilog(void); |
diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h index eb16e94ae04f..021979a6e23f 100644 --- a/arch/x86/include/asm/mce.h +++ b/arch/x86/include/asm/mce.h | |||
@@ -142,8 +142,6 @@ static inline void winchip_mcheck_init(struct cpuinfo_x86 *c) {} | |||
142 | static inline void enable_p5_mce(void) {} | 142 | static inline void enable_p5_mce(void) {} |
143 | #endif | 143 | #endif |
144 | 144 | ||
145 | extern void (*x86_mce_decode_callback)(struct mce *m); | ||
146 | |||
147 | void mce_setup(struct mce *m); | 145 | void mce_setup(struct mce *m); |
148 | void mce_log(struct mce *m); | 146 | void mce_log(struct mce *m); |
149 | DECLARE_PER_CPU(struct sys_device, mce_dev); | 147 | DECLARE_PER_CPU(struct sys_device, mce_dev); |
diff --git a/arch/x86/kernel/aperture_64.c b/arch/x86/kernel/aperture_64.c index 73fb469908c6..3d2661ca6542 100644 --- a/arch/x86/kernel/aperture_64.c +++ b/arch/x86/kernel/aperture_64.c | |||
@@ -30,6 +30,22 @@ | |||
30 | #include <asm/amd_nb.h> | 30 | #include <asm/amd_nb.h> |
31 | #include <asm/x86_init.h> | 31 | #include <asm/x86_init.h> |
32 | 32 | ||
33 | /* | ||
34 | * Using 512M as goal, in case kexec will load kernel_big | ||
35 | * that will do the on-position decompress, and could overlap with | ||
36 | * with the gart aperture that is used. | ||
37 | * Sequence: | ||
38 | * kernel_small | ||
39 | * ==> kexec (with kdump trigger path or gart still enabled) | ||
40 | * ==> kernel_small (gart area become e820_reserved) | ||
41 | * ==> kexec (with kdump trigger path or gart still enabled) | ||
42 | * ==> kerne_big (uncompressed size will be big than 64M or 128M) | ||
43 | * So don't use 512M below as gart iommu, leave the space for kernel | ||
44 | * code for safe. | ||
45 | */ | ||
46 | #define GART_MIN_ADDR (512ULL << 20) | ||
47 | #define GART_MAX_ADDR (1ULL << 32) | ||
48 | |||
33 | int gart_iommu_aperture; | 49 | int gart_iommu_aperture; |
34 | int gart_iommu_aperture_disabled __initdata; | 50 | int gart_iommu_aperture_disabled __initdata; |
35 | int gart_iommu_aperture_allowed __initdata; | 51 | int gart_iommu_aperture_allowed __initdata; |
@@ -70,21 +86,9 @@ static u32 __init allocate_aperture(void) | |||
70 | * memory. Unfortunately we cannot move it up because that would | 86 | * memory. Unfortunately we cannot move it up because that would |
71 | * make the IOMMU useless. | 87 | * make the IOMMU useless. |
72 | */ | 88 | */ |
73 | /* | 89 | addr = memblock_find_in_range(GART_MIN_ADDR, GART_MAX_ADDR, |
74 | * using 512M as goal, in case kexec will load kernel_big | 90 | aper_size, aper_size); |
75 | * that will do the on position decompress, and could overlap with | 91 | if (addr == MEMBLOCK_ERROR || addr + aper_size > GART_MAX_ADDR) { |
76 | * that position with gart that is used. | ||
77 | * sequende: | ||
78 | * kernel_small | ||
79 | * ==> kexec (with kdump trigger path or previous doesn't shutdown gart) | ||
80 | * ==> kernel_small(gart area become e820_reserved) | ||
81 | * ==> kexec (with kdump trigger path or previous doesn't shutdown gart) | ||
82 | * ==> kerne_big (uncompressed size will be big than 64M or 128M) | ||
83 | * so don't use 512M below as gart iommu, leave the space for kernel | ||
84 | * code for safe | ||
85 | */ | ||
86 | addr = memblock_find_in_range(0, 1ULL<<32, aper_size, 512ULL<<20); | ||
87 | if (addr == MEMBLOCK_ERROR || addr + aper_size > 0xffffffff) { | ||
88 | printk(KERN_ERR | 92 | printk(KERN_ERR |
89 | "Cannot allocate aperture memory hole (%lx,%uK)\n", | 93 | "Cannot allocate aperture memory hole (%lx,%uK)\n", |
90 | addr, aper_size>>10); | 94 | addr, aper_size>>10); |
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c index 3385ea26f684..ff1ae9b6464d 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mcheck/mce.c | |||
@@ -105,20 +105,6 @@ static int cpu_missing; | |||
105 | ATOMIC_NOTIFIER_HEAD(x86_mce_decoder_chain); | 105 | ATOMIC_NOTIFIER_HEAD(x86_mce_decoder_chain); |
106 | EXPORT_SYMBOL_GPL(x86_mce_decoder_chain); | 106 | EXPORT_SYMBOL_GPL(x86_mce_decoder_chain); |
107 | 107 | ||
108 | static int default_decode_mce(struct notifier_block *nb, unsigned long val, | ||
109 | void *data) | ||
110 | { | ||
111 | pr_emerg(HW_ERR "No human readable MCE decoding support on this CPU type.\n"); | ||
112 | pr_emerg(HW_ERR "Run the message through 'mcelog --ascii' to decode.\n"); | ||
113 | |||
114 | return NOTIFY_STOP; | ||
115 | } | ||
116 | |||
117 | static struct notifier_block mce_dec_nb = { | ||
118 | .notifier_call = default_decode_mce, | ||
119 | .priority = -1, | ||
120 | }; | ||
121 | |||
122 | /* MCA banks polled by the period polling timer for corrected events */ | 108 | /* MCA banks polled by the period polling timer for corrected events */ |
123 | DEFINE_PER_CPU(mce_banks_t, mce_poll_banks) = { | 109 | DEFINE_PER_CPU(mce_banks_t, mce_poll_banks) = { |
124 | [0 ... BITS_TO_LONGS(MAX_NR_BANKS)-1] = ~0UL | 110 | [0 ... BITS_TO_LONGS(MAX_NR_BANKS)-1] = ~0UL |
@@ -212,6 +198,8 @@ void mce_log(struct mce *mce) | |||
212 | 198 | ||
213 | static void print_mce(struct mce *m) | 199 | static void print_mce(struct mce *m) |
214 | { | 200 | { |
201 | int ret = 0; | ||
202 | |||
215 | pr_emerg(HW_ERR "CPU %d: Machine Check Exception: %Lx Bank %d: %016Lx\n", | 203 | pr_emerg(HW_ERR "CPU %d: Machine Check Exception: %Lx Bank %d: %016Lx\n", |
216 | m->extcpu, m->mcgstatus, m->bank, m->status); | 204 | m->extcpu, m->mcgstatus, m->bank, m->status); |
217 | 205 | ||
@@ -239,7 +227,11 @@ static void print_mce(struct mce *m) | |||
239 | * Print out human-readable details about the MCE error, | 227 | * Print out human-readable details about the MCE error, |
240 | * (if the CPU has an implementation for that) | 228 | * (if the CPU has an implementation for that) |
241 | */ | 229 | */ |
242 | atomic_notifier_call_chain(&x86_mce_decoder_chain, 0, m); | 230 | ret = atomic_notifier_call_chain(&x86_mce_decoder_chain, 0, m); |
231 | if (ret == NOTIFY_STOP) | ||
232 | return; | ||
233 | |||
234 | pr_emerg_ratelimited(HW_ERR "Run the above through 'mcelog --ascii'\n"); | ||
243 | } | 235 | } |
244 | 236 | ||
245 | #define PANIC_TIMEOUT 5 /* 5 seconds */ | 237 | #define PANIC_TIMEOUT 5 /* 5 seconds */ |
@@ -590,7 +582,6 @@ void machine_check_poll(enum mcp_flags flags, mce_banks_t *b) | |||
590 | if (!(flags & MCP_DONTLOG) && !mce_dont_log_ce) { | 582 | if (!(flags & MCP_DONTLOG) && !mce_dont_log_ce) { |
591 | mce_log(&m); | 583 | mce_log(&m); |
592 | atomic_notifier_call_chain(&x86_mce_decoder_chain, 0, &m); | 584 | atomic_notifier_call_chain(&x86_mce_decoder_chain, 0, &m); |
593 | add_taint(TAINT_MACHINE_CHECK); | ||
594 | } | 585 | } |
595 | 586 | ||
596 | /* | 587 | /* |
@@ -1722,8 +1713,6 @@ __setup("mce", mcheck_enable); | |||
1722 | 1713 | ||
1723 | int __init mcheck_init(void) | 1714 | int __init mcheck_init(void) |
1724 | { | 1715 | { |
1725 | atomic_notifier_chain_register(&x86_mce_decoder_chain, &mce_dec_nb); | ||
1726 | |||
1727 | mcheck_intel_therm_init(); | 1716 | mcheck_intel_therm_init(); |
1728 | 1717 | ||
1729 | return 0; | 1718 | return 0; |
diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c index 0f034460260d..f5208ff28b5c 100644 --- a/arch/x86/kernel/cpu/mcheck/therm_throt.c +++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c | |||
@@ -187,8 +187,6 @@ static int therm_throt_process(bool new_event, int event, int level) | |||
187 | this_cpu, | 187 | this_cpu, |
188 | level == CORE_LEVEL ? "Core" : "Package", | 188 | level == CORE_LEVEL ? "Core" : "Package", |
189 | state->count); | 189 | state->count); |
190 | |||
191 | add_taint(TAINT_MACHINE_CHECK); | ||
192 | return 1; | 190 | return 1; |
193 | } | 191 | } |
194 | if (old_event) { | 192 | if (old_event) { |
@@ -393,7 +391,6 @@ static void unexpected_thermal_interrupt(void) | |||
393 | { | 391 | { |
394 | printk(KERN_ERR "CPU%d: Unexpected LVT thermal interrupt!\n", | 392 | printk(KERN_ERR "CPU%d: Unexpected LVT thermal interrupt!\n", |
395 | smp_processor_id()); | 393 | smp_processor_id()); |
396 | add_taint(TAINT_MACHINE_CHECK); | ||
397 | } | 394 | } |
398 | 395 | ||
399 | static void (*smp_thermal_vector)(void) = unexpected_thermal_interrupt; | 396 | static void (*smp_thermal_vector)(void) = unexpected_thermal_interrupt; |
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c index 1cb0b9fc78dc..6c0802eb2f7f 100644 --- a/arch/x86/kernel/irq.c +++ b/arch/x86/kernel/irq.c | |||
@@ -249,7 +249,7 @@ void fixup_irqs(void) | |||
249 | 249 | ||
250 | data = irq_desc_get_irq_data(desc); | 250 | data = irq_desc_get_irq_data(desc); |
251 | affinity = data->affinity; | 251 | affinity = data->affinity; |
252 | if (!irq_has_action(irq) || | 252 | if (!irq_has_action(irq) || irqd_is_per_cpu(data) || |
253 | cpumask_subset(affinity, cpu_online_mask)) { | 253 | cpumask_subset(affinity, cpu_online_mask)) { |
254 | raw_spin_unlock(&desc->lock); | 254 | raw_spin_unlock(&desc->lock); |
255 | continue; | 255 | continue; |
@@ -276,7 +276,8 @@ void fixup_irqs(void) | |||
276 | else if (!(warned++)) | 276 | else if (!(warned++)) |
277 | set_affinity = 0; | 277 | set_affinity = 0; |
278 | 278 | ||
279 | if (!irqd_can_move_in_process_context(data) && chip->irq_unmask) | 279 | if (!irqd_can_move_in_process_context(data) && |
280 | !irqd_irq_disabled(data) && chip->irq_unmask) | ||
280 | chip->irq_unmask(data); | 281 | chip->irq_unmask(data); |
281 | 282 | ||
282 | raw_spin_unlock(&desc->lock); | 283 | raw_spin_unlock(&desc->lock); |
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c index 0fe27d7c6258..b30aa26a8df2 100644 --- a/arch/x86/platform/efi/efi.c +++ b/arch/x86/platform/efi/efi.c | |||
@@ -145,17 +145,6 @@ static void virt_efi_reset_system(int reset_type, | |||
145 | data_size, data); | 145 | data_size, data); |
146 | } | 146 | } |
147 | 147 | ||
148 | static efi_status_t virt_efi_set_virtual_address_map( | ||
149 | unsigned long memory_map_size, | ||
150 | unsigned long descriptor_size, | ||
151 | u32 descriptor_version, | ||
152 | efi_memory_desc_t *virtual_map) | ||
153 | { | ||
154 | return efi_call_virt4(set_virtual_address_map, | ||
155 | memory_map_size, descriptor_size, | ||
156 | descriptor_version, virtual_map); | ||
157 | } | ||
158 | |||
159 | static efi_status_t __init phys_efi_set_virtual_address_map( | 148 | static efi_status_t __init phys_efi_set_virtual_address_map( |
160 | unsigned long memory_map_size, | 149 | unsigned long memory_map_size, |
161 | unsigned long descriptor_size, | 150 | unsigned long descriptor_size, |
@@ -468,11 +457,25 @@ void __init efi_init(void) | |||
468 | #endif | 457 | #endif |
469 | } | 458 | } |
470 | 459 | ||
460 | void __init efi_set_executable(efi_memory_desc_t *md, bool executable) | ||
461 | { | ||
462 | u64 addr, npages; | ||
463 | |||
464 | addr = md->virt_addr; | ||
465 | npages = md->num_pages; | ||
466 | |||
467 | memrange_efi_to_native(&addr, &npages); | ||
468 | |||
469 | if (executable) | ||
470 | set_memory_x(addr, npages); | ||
471 | else | ||
472 | set_memory_nx(addr, npages); | ||
473 | } | ||
474 | |||
471 | static void __init runtime_code_page_mkexec(void) | 475 | static void __init runtime_code_page_mkexec(void) |
472 | { | 476 | { |
473 | efi_memory_desc_t *md; | 477 | efi_memory_desc_t *md; |
474 | void *p; | 478 | void *p; |
475 | u64 addr, npages; | ||
476 | 479 | ||
477 | /* Make EFI runtime service code area executable */ | 480 | /* Make EFI runtime service code area executable */ |
478 | for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) { | 481 | for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) { |
@@ -481,10 +484,7 @@ static void __init runtime_code_page_mkexec(void) | |||
481 | if (md->type != EFI_RUNTIME_SERVICES_CODE) | 484 | if (md->type != EFI_RUNTIME_SERVICES_CODE) |
482 | continue; | 485 | continue; |
483 | 486 | ||
484 | addr = md->virt_addr; | 487 | efi_set_executable(md, true); |
485 | npages = md->num_pages; | ||
486 | memrange_efi_to_native(&addr, &npages); | ||
487 | set_memory_x(addr, npages); | ||
488 | } | 488 | } |
489 | } | 489 | } |
490 | 490 | ||
@@ -498,13 +498,42 @@ static void __init runtime_code_page_mkexec(void) | |||
498 | */ | 498 | */ |
499 | void __init efi_enter_virtual_mode(void) | 499 | void __init efi_enter_virtual_mode(void) |
500 | { | 500 | { |
501 | efi_memory_desc_t *md; | 501 | efi_memory_desc_t *md, *prev_md = NULL; |
502 | efi_status_t status; | 502 | efi_status_t status; |
503 | unsigned long size; | 503 | unsigned long size; |
504 | u64 end, systab, addr, npages, end_pfn; | 504 | u64 end, systab, addr, npages, end_pfn; |
505 | void *p, *va; | 505 | void *p, *va, *new_memmap = NULL; |
506 | int count = 0; | ||
506 | 507 | ||
507 | efi.systab = NULL; | 508 | efi.systab = NULL; |
509 | |||
510 | /* Merge contiguous regions of the same type and attribute */ | ||
511 | for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) { | ||
512 | u64 prev_size; | ||
513 | md = p; | ||
514 | |||
515 | if (!prev_md) { | ||
516 | prev_md = md; | ||
517 | continue; | ||
518 | } | ||
519 | |||
520 | if (prev_md->type != md->type || | ||
521 | prev_md->attribute != md->attribute) { | ||
522 | prev_md = md; | ||
523 | continue; | ||
524 | } | ||
525 | |||
526 | prev_size = prev_md->num_pages << EFI_PAGE_SHIFT; | ||
527 | |||
528 | if (md->phys_addr == (prev_md->phys_addr + prev_size)) { | ||
529 | prev_md->num_pages += md->num_pages; | ||
530 | md->type = EFI_RESERVED_TYPE; | ||
531 | md->attribute = 0; | ||
532 | continue; | ||
533 | } | ||
534 | prev_md = md; | ||
535 | } | ||
536 | |||
508 | for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) { | 537 | for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) { |
509 | md = p; | 538 | md = p; |
510 | if (!(md->attribute & EFI_MEMORY_RUNTIME)) | 539 | if (!(md->attribute & EFI_MEMORY_RUNTIME)) |
@@ -541,15 +570,21 @@ void __init efi_enter_virtual_mode(void) | |||
541 | systab += md->virt_addr - md->phys_addr; | 570 | systab += md->virt_addr - md->phys_addr; |
542 | efi.systab = (efi_system_table_t *) (unsigned long) systab; | 571 | efi.systab = (efi_system_table_t *) (unsigned long) systab; |
543 | } | 572 | } |
573 | new_memmap = krealloc(new_memmap, | ||
574 | (count + 1) * memmap.desc_size, | ||
575 | GFP_KERNEL); | ||
576 | memcpy(new_memmap + (count * memmap.desc_size), md, | ||
577 | memmap.desc_size); | ||
578 | count++; | ||
544 | } | 579 | } |
545 | 580 | ||
546 | BUG_ON(!efi.systab); | 581 | BUG_ON(!efi.systab); |
547 | 582 | ||
548 | status = phys_efi_set_virtual_address_map( | 583 | status = phys_efi_set_virtual_address_map( |
549 | memmap.desc_size * memmap.nr_map, | 584 | memmap.desc_size * count, |
550 | memmap.desc_size, | 585 | memmap.desc_size, |
551 | memmap.desc_version, | 586 | memmap.desc_version, |
552 | memmap.phys_map); | 587 | (efi_memory_desc_t *)__pa(new_memmap)); |
553 | 588 | ||
554 | if (status != EFI_SUCCESS) { | 589 | if (status != EFI_SUCCESS) { |
555 | printk(KERN_ALERT "Unable to switch EFI into virtual mode " | 590 | printk(KERN_ALERT "Unable to switch EFI into virtual mode " |
@@ -572,11 +607,12 @@ void __init efi_enter_virtual_mode(void) | |||
572 | efi.set_variable = virt_efi_set_variable; | 607 | efi.set_variable = virt_efi_set_variable; |
573 | efi.get_next_high_mono_count = virt_efi_get_next_high_mono_count; | 608 | efi.get_next_high_mono_count = virt_efi_get_next_high_mono_count; |
574 | efi.reset_system = virt_efi_reset_system; | 609 | efi.reset_system = virt_efi_reset_system; |
575 | efi.set_virtual_address_map = virt_efi_set_virtual_address_map; | 610 | efi.set_virtual_address_map = NULL; |
576 | if (__supported_pte_mask & _PAGE_NX) | 611 | if (__supported_pte_mask & _PAGE_NX) |
577 | runtime_code_page_mkexec(); | 612 | runtime_code_page_mkexec(); |
578 | early_iounmap(memmap.map, memmap.nr_map * memmap.desc_size); | 613 | early_iounmap(memmap.map, memmap.nr_map * memmap.desc_size); |
579 | memmap.map = NULL; | 614 | memmap.map = NULL; |
615 | kfree(new_memmap); | ||
580 | } | 616 | } |
581 | 617 | ||
582 | /* | 618 | /* |
diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c index ac0621a7ac3d..2649426a7905 100644 --- a/arch/x86/platform/efi/efi_64.c +++ b/arch/x86/platform/efi/efi_64.c | |||
@@ -41,22 +41,7 @@ | |||
41 | static pgd_t save_pgd __initdata; | 41 | static pgd_t save_pgd __initdata; |
42 | static unsigned long efi_flags __initdata; | 42 | static unsigned long efi_flags __initdata; |
43 | 43 | ||
44 | static void __init early_mapping_set_exec(unsigned long start, | 44 | static void __init early_code_mapping_set_exec(int executable) |
45 | unsigned long end, | ||
46 | int executable) | ||
47 | { | ||
48 | unsigned long num_pages; | ||
49 | |||
50 | start &= PMD_MASK; | ||
51 | end = (end + PMD_SIZE - 1) & PMD_MASK; | ||
52 | num_pages = (end - start) >> PAGE_SHIFT; | ||
53 | if (executable) | ||
54 | set_memory_x((unsigned long)__va(start), num_pages); | ||
55 | else | ||
56 | set_memory_nx((unsigned long)__va(start), num_pages); | ||
57 | } | ||
58 | |||
59 | static void __init early_runtime_code_mapping_set_exec(int executable) | ||
60 | { | 45 | { |
61 | efi_memory_desc_t *md; | 46 | efi_memory_desc_t *md; |
62 | void *p; | 47 | void *p; |
@@ -67,11 +52,8 @@ static void __init early_runtime_code_mapping_set_exec(int executable) | |||
67 | /* Make EFI runtime service code area executable */ | 52 | /* Make EFI runtime service code area executable */ |
68 | for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) { | 53 | for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) { |
69 | md = p; | 54 | md = p; |
70 | if (md->type == EFI_RUNTIME_SERVICES_CODE) { | 55 | if (md->type == EFI_RUNTIME_SERVICES_CODE) |
71 | unsigned long end; | 56 | efi_set_executable(md, executable); |
72 | end = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT); | ||
73 | early_mapping_set_exec(md->phys_addr, end, executable); | ||
74 | } | ||
75 | } | 57 | } |
76 | } | 58 | } |
77 | 59 | ||
@@ -79,7 +61,7 @@ void __init efi_call_phys_prelog(void) | |||
79 | { | 61 | { |
80 | unsigned long vaddress; | 62 | unsigned long vaddress; |
81 | 63 | ||
82 | early_runtime_code_mapping_set_exec(1); | 64 | early_code_mapping_set_exec(1); |
83 | local_irq_save(efi_flags); | 65 | local_irq_save(efi_flags); |
84 | vaddress = (unsigned long)__va(0x0UL); | 66 | vaddress = (unsigned long)__va(0x0UL); |
85 | save_pgd = *pgd_offset_k(0x0UL); | 67 | save_pgd = *pgd_offset_k(0x0UL); |
@@ -95,7 +77,7 @@ void __init efi_call_phys_epilog(void) | |||
95 | set_pgd(pgd_offset_k(0x0UL), save_pgd); | 77 | set_pgd(pgd_offset_k(0x0UL), save_pgd); |
96 | __flush_tlb_all(); | 78 | __flush_tlb_all(); |
97 | local_irq_restore(efi_flags); | 79 | local_irq_restore(efi_flags); |
98 | early_runtime_code_mapping_set_exec(0); | 80 | early_code_mapping_set_exec(0); |
99 | } | 81 | } |
100 | 82 | ||
101 | void __iomem *__init efi_ioremap(unsigned long phys_addr, unsigned long size, | 83 | void __iomem *__init efi_ioremap(unsigned long phys_addr, unsigned long size, |
@@ -107,8 +89,10 @@ void __iomem *__init efi_ioremap(unsigned long phys_addr, unsigned long size, | |||
107 | return ioremap(phys_addr, size); | 89 | return ioremap(phys_addr, size); |
108 | 90 | ||
109 | last_map_pfn = init_memory_mapping(phys_addr, phys_addr + size); | 91 | last_map_pfn = init_memory_mapping(phys_addr, phys_addr + size); |
110 | if ((last_map_pfn << PAGE_SHIFT) < phys_addr + size) | 92 | if ((last_map_pfn << PAGE_SHIFT) < phys_addr + size) { |
111 | return NULL; | 93 | unsigned long top = last_map_pfn << PAGE_SHIFT; |
94 | efi_ioremap(top, size - (top - phys_addr), type); | ||
95 | } | ||
112 | 96 | ||
113 | return (void __iomem *)__va(phys_addr); | 97 | return (void __iomem *)__va(phys_addr); |
114 | } | 98 | } |