diff options
author | James Morse <james.morse@arm.com> | 2017-11-06 13:44:24 -0500 |
---|---|---|
committer | Rafael J. Wysocki <rafael.j.wysocki@intel.com> | 2017-11-07 06:12:44 -0500 |
commit | 4f89fa286f6729312e227e7c2d764e8e7b9d340e (patch) | |
tree | 1e7dd47667d1c871dc741ccbfa90b6016f7e6164 | |
parent | c49870e89f4d2c21c76ebe90568246bb0f3572b7 (diff) |
ACPI / APEI: Replace ioremap_page_range() with fixmap
Replace ghes_io{re,un}map_pfn_{nmi,irq}()s use of ioremap_page_range()
with __set_fixmap() as ioremap_page_range() may sleep to allocate a new
level of page-table, even if its passed an existing final-address to
use in the mapping.
The GHES driver can only be enabled for architectures that select
HAVE_ACPI_APEI: Add fixmap entries to both x86 and arm64.
clear_fixmap() does the TLB invalidation in __set_fixmap() for arm64
and __set_pte_vaddr() for x86. In each case its the same as the
respective arch_apei_flush_tlb_one().
Reported-by: Fengguang Wu <fengguang.wu@intel.com>
Suggested-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: James Morse <james.morse@arm.com>
Reviewed-by: Borislav Petkov <bp@suse.de>
Tested-by: Tyler Baicar <tbaicar@codeaurora.org>
Tested-by: Toshi Kani <toshi.kani@hpe.com>
[ For the arm64 bits: ]
Acked-by: Will Deacon <will.deacon@arm.com>
[ For the x86 bits: ]
Acked-by: Ingo Molnar <mingo@kernel.org>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Cc: All applicable <stable@vger.kernel.org>
-rw-r--r-- | arch/arm64/include/asm/fixmap.h | 7 | ||||
-rw-r--r-- | arch/x86/include/asm/fixmap.h | 6 | ||||
-rw-r--r-- | drivers/acpi/apei/ghes.c | 44 |
3 files changed, 27 insertions, 30 deletions
diff --git a/arch/arm64/include/asm/fixmap.h b/arch/arm64/include/asm/fixmap.h index caf86be815ba..4052ec39e8db 100644 --- a/arch/arm64/include/asm/fixmap.h +++ b/arch/arm64/include/asm/fixmap.h | |||
@@ -51,6 +51,13 @@ enum fixed_addresses { | |||
51 | 51 | ||
52 | FIX_EARLYCON_MEM_BASE, | 52 | FIX_EARLYCON_MEM_BASE, |
53 | FIX_TEXT_POKE0, | 53 | FIX_TEXT_POKE0, |
54 | |||
55 | #ifdef CONFIG_ACPI_APEI_GHES | ||
56 | /* Used for GHES mapping from assorted contexts */ | ||
57 | FIX_APEI_GHES_IRQ, | ||
58 | FIX_APEI_GHES_NMI, | ||
59 | #endif /* CONFIG_ACPI_APEI_GHES */ | ||
60 | |||
54 | __end_of_permanent_fixed_addresses, | 61 | __end_of_permanent_fixed_addresses, |
55 | 62 | ||
56 | /* | 63 | /* |
diff --git a/arch/x86/include/asm/fixmap.h b/arch/x86/include/asm/fixmap.h index dcd9fb55e679..b0c505fe9a95 100644 --- a/arch/x86/include/asm/fixmap.h +++ b/arch/x86/include/asm/fixmap.h | |||
@@ -104,6 +104,12 @@ enum fixed_addresses { | |||
104 | FIX_GDT_REMAP_BEGIN, | 104 | FIX_GDT_REMAP_BEGIN, |
105 | FIX_GDT_REMAP_END = FIX_GDT_REMAP_BEGIN + NR_CPUS - 1, | 105 | FIX_GDT_REMAP_END = FIX_GDT_REMAP_BEGIN + NR_CPUS - 1, |
106 | 106 | ||
107 | #ifdef CONFIG_ACPI_APEI_GHES | ||
108 | /* Used for GHES mapping from assorted contexts */ | ||
109 | FIX_APEI_GHES_IRQ, | ||
110 | FIX_APEI_GHES_NMI, | ||
111 | #endif | ||
112 | |||
107 | __end_of_permanent_fixed_addresses, | 113 | __end_of_permanent_fixed_addresses, |
108 | 114 | ||
109 | /* | 115 | /* |
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c index cb7aceae3553..572b6c7303ed 100644 --- a/drivers/acpi/apei/ghes.c +++ b/drivers/acpi/apei/ghes.c | |||
@@ -51,6 +51,7 @@ | |||
51 | #include <acpi/actbl1.h> | 51 | #include <acpi/actbl1.h> |
52 | #include <acpi/ghes.h> | 52 | #include <acpi/ghes.h> |
53 | #include <acpi/apei.h> | 53 | #include <acpi/apei.h> |
54 | #include <asm/fixmap.h> | ||
54 | #include <asm/tlbflush.h> | 55 | #include <asm/tlbflush.h> |
55 | #include <ras/ras_event.h> | 56 | #include <ras/ras_event.h> |
56 | 57 | ||
@@ -112,7 +113,7 @@ static DEFINE_MUTEX(ghes_list_mutex); | |||
112 | * Because the memory area used to transfer hardware error information | 113 | * Because the memory area used to transfer hardware error information |
113 | * from BIOS to Linux can be determined only in NMI, IRQ or timer | 114 | * from BIOS to Linux can be determined only in NMI, IRQ or timer |
114 | * handler, but general ioremap can not be used in atomic context, so | 115 | * handler, but general ioremap can not be used in atomic context, so |
115 | * a special version of atomic ioremap is implemented for that. | 116 | * the fixmap is used instead. |
116 | */ | 117 | */ |
117 | 118 | ||
118 | /* | 119 | /* |
@@ -126,8 +127,8 @@ static DEFINE_MUTEX(ghes_list_mutex); | |||
126 | /* virtual memory area for atomic ioremap */ | 127 | /* virtual memory area for atomic ioremap */ |
127 | static struct vm_struct *ghes_ioremap_area; | 128 | static struct vm_struct *ghes_ioremap_area; |
128 | /* | 129 | /* |
129 | * These 2 spinlock is used to prevent atomic ioremap virtual memory | 130 | * These 2 spinlocks are used to prevent the fixmap entries from being used |
130 | * area from being mapped simultaneously. | 131 | * simultaneously. |
131 | */ | 132 | */ |
132 | static DEFINE_RAW_SPINLOCK(ghes_ioremap_lock_nmi); | 133 | static DEFINE_RAW_SPINLOCK(ghes_ioremap_lock_nmi); |
133 | static DEFINE_SPINLOCK(ghes_ioremap_lock_irq); | 134 | static DEFINE_SPINLOCK(ghes_ioremap_lock_irq); |
@@ -159,53 +160,36 @@ static void ghes_ioremap_exit(void) | |||
159 | 160 | ||
160 | static void __iomem *ghes_ioremap_pfn_nmi(u64 pfn) | 161 | static void __iomem *ghes_ioremap_pfn_nmi(u64 pfn) |
161 | { | 162 | { |
162 | unsigned long vaddr; | ||
163 | phys_addr_t paddr; | 163 | phys_addr_t paddr; |
164 | pgprot_t prot; | 164 | pgprot_t prot; |
165 | 165 | ||
166 | vaddr = (unsigned long)GHES_IOREMAP_NMI_PAGE(ghes_ioremap_area->addr); | ||
167 | |||
168 | paddr = pfn << PAGE_SHIFT; | 166 | paddr = pfn << PAGE_SHIFT; |
169 | prot = arch_apei_get_mem_attribute(paddr); | 167 | prot = arch_apei_get_mem_attribute(paddr); |
170 | ioremap_page_range(vaddr, vaddr + PAGE_SIZE, paddr, prot); | 168 | __set_fixmap(FIX_APEI_GHES_NMI, paddr, prot); |
171 | 169 | ||
172 | return (void __iomem *)vaddr; | 170 | return (void __iomem *) fix_to_virt(FIX_APEI_GHES_NMI); |
173 | } | 171 | } |
174 | 172 | ||
175 | static void __iomem *ghes_ioremap_pfn_irq(u64 pfn) | 173 | static void __iomem *ghes_ioremap_pfn_irq(u64 pfn) |
176 | { | 174 | { |
177 | unsigned long vaddr; | ||
178 | phys_addr_t paddr; | 175 | phys_addr_t paddr; |
179 | pgprot_t prot; | 176 | pgprot_t prot; |
180 | 177 | ||
181 | vaddr = (unsigned long)GHES_IOREMAP_IRQ_PAGE(ghes_ioremap_area->addr); | ||
182 | |||
183 | paddr = pfn << PAGE_SHIFT; | 178 | paddr = pfn << PAGE_SHIFT; |
184 | prot = arch_apei_get_mem_attribute(paddr); | 179 | prot = arch_apei_get_mem_attribute(paddr); |
180 | __set_fixmap(FIX_APEI_GHES_IRQ, paddr, prot); | ||
185 | 181 | ||
186 | ioremap_page_range(vaddr, vaddr + PAGE_SIZE, paddr, prot); | 182 | return (void __iomem *) fix_to_virt(FIX_APEI_GHES_IRQ); |
187 | |||
188 | return (void __iomem *)vaddr; | ||
189 | } | 183 | } |
190 | 184 | ||
191 | static void ghes_iounmap_nmi(void __iomem *vaddr_ptr) | 185 | static void ghes_iounmap_nmi(void) |
192 | { | 186 | { |
193 | unsigned long vaddr = (unsigned long __force)vaddr_ptr; | 187 | clear_fixmap(FIX_APEI_GHES_NMI); |
194 | void *base = ghes_ioremap_area->addr; | ||
195 | |||
196 | BUG_ON(vaddr != (unsigned long)GHES_IOREMAP_NMI_PAGE(base)); | ||
197 | unmap_kernel_range_noflush(vaddr, PAGE_SIZE); | ||
198 | arch_apei_flush_tlb_one(vaddr); | ||
199 | } | 188 | } |
200 | 189 | ||
201 | static void ghes_iounmap_irq(void __iomem *vaddr_ptr) | 190 | static void ghes_iounmap_irq(void) |
202 | { | 191 | { |
203 | unsigned long vaddr = (unsigned long __force)vaddr_ptr; | 192 | clear_fixmap(FIX_APEI_GHES_IRQ); |
204 | void *base = ghes_ioremap_area->addr; | ||
205 | |||
206 | BUG_ON(vaddr != (unsigned long)GHES_IOREMAP_IRQ_PAGE(base)); | ||
207 | unmap_kernel_range_noflush(vaddr, PAGE_SIZE); | ||
208 | arch_apei_flush_tlb_one(vaddr); | ||
209 | } | 193 | } |
210 | 194 | ||
211 | static int ghes_estatus_pool_init(void) | 195 | static int ghes_estatus_pool_init(void) |
@@ -361,10 +345,10 @@ static void ghes_copy_tofrom_phys(void *buffer, u64 paddr, u32 len, | |||
361 | paddr += trunk; | 345 | paddr += trunk; |
362 | buffer += trunk; | 346 | buffer += trunk; |
363 | if (in_nmi) { | 347 | if (in_nmi) { |
364 | ghes_iounmap_nmi(vaddr); | 348 | ghes_iounmap_nmi(); |
365 | raw_spin_unlock(&ghes_ioremap_lock_nmi); | 349 | raw_spin_unlock(&ghes_ioremap_lock_nmi); |
366 | } else { | 350 | } else { |
367 | ghes_iounmap_irq(vaddr); | 351 | ghes_iounmap_irq(); |
368 | spin_unlock_irqrestore(&ghes_ioremap_lock_irq, flags); | 352 | spin_unlock_irqrestore(&ghes_ioremap_lock_irq, flags); |
369 | } | 353 | } |
370 | } | 354 | } |