diff options
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r-- | arch/x86/kernel/apic/io_apic.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/amd.c | 4 | ||||
-rw-r--r-- | arch/x86/kernel/traps.c | 20 |
3 files changed, 13 insertions, 13 deletions
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index 84e33ff5a6d5..446702ed99dc 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c | |||
@@ -2588,8 +2588,8 @@ static struct resource * __init ioapic_setup_resources(void) | |||
2588 | res[num].flags = IORESOURCE_MEM | IORESOURCE_BUSY; | 2588 | res[num].flags = IORESOURCE_MEM | IORESOURCE_BUSY; |
2589 | snprintf(mem, IOAPIC_RESOURCE_NAME_SIZE, "IOAPIC %u", i); | 2589 | snprintf(mem, IOAPIC_RESOURCE_NAME_SIZE, "IOAPIC %u", i); |
2590 | mem += IOAPIC_RESOURCE_NAME_SIZE; | 2590 | mem += IOAPIC_RESOURCE_NAME_SIZE; |
2591 | ioapics[i].iomem_res = &res[num]; | ||
2591 | num++; | 2592 | num++; |
2592 | ioapics[i].iomem_res = res; | ||
2593 | } | 2593 | } |
2594 | 2594 | ||
2595 | ioapic_resources = res; | 2595 | ioapic_resources = res; |
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index c343a54bed39..f5c69d8974e1 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c | |||
@@ -674,14 +674,14 @@ static void init_amd_bd(struct cpuinfo_x86 *c) | |||
674 | u64 value; | 674 | u64 value; |
675 | 675 | ||
676 | /* re-enable TopologyExtensions if switched off by BIOS */ | 676 | /* re-enable TopologyExtensions if switched off by BIOS */ |
677 | if ((c->x86_model >= 0x10) && (c->x86_model <= 0x1f) && | 677 | if ((c->x86_model >= 0x10) && (c->x86_model <= 0x6f) && |
678 | !cpu_has(c, X86_FEATURE_TOPOEXT)) { | 678 | !cpu_has(c, X86_FEATURE_TOPOEXT)) { |
679 | 679 | ||
680 | if (msr_set_bit(0xc0011005, 54) > 0) { | 680 | if (msr_set_bit(0xc0011005, 54) > 0) { |
681 | rdmsrl(0xc0011005, value); | 681 | rdmsrl(0xc0011005, value); |
682 | if (value & BIT_64(54)) { | 682 | if (value & BIT_64(54)) { |
683 | set_cpu_cap(c, X86_FEATURE_TOPOEXT); | 683 | set_cpu_cap(c, X86_FEATURE_TOPOEXT); |
684 | pr_info(FW_INFO "CPU: Re-enabling disabled Topology Extensions Support.\n"); | 684 | pr_info_once(FW_INFO "CPU: Re-enabling disabled Topology Extensions Support.\n"); |
685 | } | 685 | } |
686 | } | 686 | } |
687 | } | 687 | } |
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index d1590486204a..00f03d82e69a 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c | |||
@@ -96,6 +96,12 @@ static inline void cond_local_irq_disable(struct pt_regs *regs) | |||
96 | local_irq_disable(); | 96 | local_irq_disable(); |
97 | } | 97 | } |
98 | 98 | ||
99 | /* | ||
100 | * In IST context, we explicitly disable preemption. This serves two | ||
101 | * purposes: it makes it much less likely that we would accidentally | ||
102 | * schedule in IST context and it will force a warning if we somehow | ||
103 | * manage to schedule by accident. | ||
104 | */ | ||
99 | void ist_enter(struct pt_regs *regs) | 105 | void ist_enter(struct pt_regs *regs) |
100 | { | 106 | { |
101 | if (user_mode(regs)) { | 107 | if (user_mode(regs)) { |
@@ -110,13 +116,7 @@ void ist_enter(struct pt_regs *regs) | |||
110 | rcu_nmi_enter(); | 116 | rcu_nmi_enter(); |
111 | } | 117 | } |
112 | 118 | ||
113 | /* | 119 | preempt_disable(); |
114 | * We are atomic because we're on the IST stack; or we're on | ||
115 | * x86_32, in which case we still shouldn't schedule; or we're | ||
116 | * on x86_64 and entered from user mode, in which case we're | ||
117 | * still atomic unless ist_begin_non_atomic is called. | ||
118 | */ | ||
119 | preempt_count_add(HARDIRQ_OFFSET); | ||
120 | 120 | ||
121 | /* This code is a bit fragile. Test it. */ | 121 | /* This code is a bit fragile. Test it. */ |
122 | RCU_LOCKDEP_WARN(!rcu_is_watching(), "ist_enter didn't work"); | 122 | RCU_LOCKDEP_WARN(!rcu_is_watching(), "ist_enter didn't work"); |
@@ -124,7 +124,7 @@ void ist_enter(struct pt_regs *regs) | |||
124 | 124 | ||
125 | void ist_exit(struct pt_regs *regs) | 125 | void ist_exit(struct pt_regs *regs) |
126 | { | 126 | { |
127 | preempt_count_sub(HARDIRQ_OFFSET); | 127 | preempt_enable_no_resched(); |
128 | 128 | ||
129 | if (!user_mode(regs)) | 129 | if (!user_mode(regs)) |
130 | rcu_nmi_exit(); | 130 | rcu_nmi_exit(); |
@@ -155,7 +155,7 @@ void ist_begin_non_atomic(struct pt_regs *regs) | |||
155 | BUG_ON((unsigned long)(current_top_of_stack() - | 155 | BUG_ON((unsigned long)(current_top_of_stack() - |
156 | current_stack_pointer()) >= THREAD_SIZE); | 156 | current_stack_pointer()) >= THREAD_SIZE); |
157 | 157 | ||
158 | preempt_count_sub(HARDIRQ_OFFSET); | 158 | preempt_enable_no_resched(); |
159 | } | 159 | } |
160 | 160 | ||
161 | /** | 161 | /** |
@@ -165,7 +165,7 @@ void ist_begin_non_atomic(struct pt_regs *regs) | |||
165 | */ | 165 | */ |
166 | void ist_end_non_atomic(void) | 166 | void ist_end_non_atomic(void) |
167 | { | 167 | { |
168 | preempt_count_add(HARDIRQ_OFFSET); | 168 | preempt_disable(); |
169 | } | 169 | } |
170 | 170 | ||
171 | static nokprobe_inline int | 171 | static nokprobe_inline int |