diff options
| -rw-r--r-- | arch/x86/include/asm/cpu_entry_area.h | 4 | ||||
| -rw-r--r-- | arch/x86/mm/cpu_entry_area.c | 4 |
2 files changed, 8 insertions, 0 deletions
diff --git a/arch/x86/include/asm/cpu_entry_area.h b/arch/x86/include/asm/cpu_entry_area.h index af8c312673de..9b406f067ecf 100644 --- a/arch/x86/include/asm/cpu_entry_area.h +++ b/arch/x86/include/asm/cpu_entry_area.h | |||
| @@ -99,6 +99,7 @@ struct cpu_entry_area { | |||
| 99 | #define CPU_ENTRY_AREA_TOT_SIZE (CPU_ENTRY_AREA_SIZE * NR_CPUS) | 99 | #define CPU_ENTRY_AREA_TOT_SIZE (CPU_ENTRY_AREA_SIZE * NR_CPUS) |
| 100 | 100 | ||
| 101 | DECLARE_PER_CPU(struct cpu_entry_area *, cpu_entry_area); | 101 | DECLARE_PER_CPU(struct cpu_entry_area *, cpu_entry_area); |
| 102 | DECLARE_PER_CPU(struct cea_exception_stacks *, cea_exception_stacks); | ||
| 102 | 103 | ||
| 103 | extern void setup_cpu_entry_areas(void); | 104 | extern void setup_cpu_entry_areas(void); |
| 104 | extern void cea_set_pte(void *cea_vaddr, phys_addr_t pa, pgprot_t flags); | 105 | extern void cea_set_pte(void *cea_vaddr, phys_addr_t pa, pgprot_t flags); |
| @@ -118,4 +119,7 @@ static inline struct entry_stack *cpu_entry_stack(int cpu) | |||
| 118 | return &get_cpu_entry_area(cpu)->entry_stack_page.stack; | 119 | return &get_cpu_entry_area(cpu)->entry_stack_page.stack; |
| 119 | } | 120 | } |
| 120 | 121 | ||
| 122 | #define __this_cpu_ist_top_va(name) \ | ||
| 123 | CEA_ESTACK_TOP(__this_cpu_read(cea_exception_stacks), name) | ||
| 124 | |||
| 121 | #endif | 125 | #endif |
diff --git a/arch/x86/mm/cpu_entry_area.c b/arch/x86/mm/cpu_entry_area.c index 2b1407662a6d..a00d0d059c8a 100644 --- a/arch/x86/mm/cpu_entry_area.c +++ b/arch/x86/mm/cpu_entry_area.c | |||
| @@ -14,6 +14,7 @@ static DEFINE_PER_CPU_PAGE_ALIGNED(struct entry_stack_page, entry_stack_storage) | |||
| 14 | 14 | ||
| 15 | #ifdef CONFIG_X86_64 | 15 | #ifdef CONFIG_X86_64 |
| 16 | static DEFINE_PER_CPU_PAGE_ALIGNED(struct exception_stacks, exception_stacks); | 16 | static DEFINE_PER_CPU_PAGE_ALIGNED(struct exception_stacks, exception_stacks); |
| 17 | DEFINE_PER_CPU(struct cea_exception_stacks*, cea_exception_stacks); | ||
| 17 | #endif | 18 | #endif |
| 18 | 19 | ||
| 19 | struct cpu_entry_area *get_cpu_entry_area(int cpu) | 20 | struct cpu_entry_area *get_cpu_entry_area(int cpu) |
| @@ -92,6 +93,9 @@ static void __init percpu_setup_exception_stacks(unsigned int cpu) | |||
| 92 | unsigned int npages; | 93 | unsigned int npages; |
| 93 | 94 | ||
| 94 | BUILD_BUG_ON(sizeof(exception_stacks) % PAGE_SIZE != 0); | 95 | BUILD_BUG_ON(sizeof(exception_stacks) % PAGE_SIZE != 0); |
| 96 | |||
| 97 | per_cpu(cea_exception_stacks, cpu) = &cea->estacks; | ||
| 98 | |||
| 95 | /* | 99 | /* |
| 96 | * The exceptions stack mappings in the per cpu area are protected | 100 | * The exceptions stack mappings in the per cpu area are protected |
| 97 | * by guard pages so each stack must be mapped separately. | 101 | * by guard pages so each stack must be mapped separately. |
