aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/vsmp_64.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel/vsmp_64.c')
-rw-r--r--arch/x86/kernel/vsmp_64.c44
1 files changed, 40 insertions, 4 deletions
diff --git a/arch/x86/kernel/vsmp_64.c b/arch/x86/kernel/vsmp_64.c
index 8eeb55a551b4..992f890283e9 100644
--- a/arch/x86/kernel/vsmp_64.c
+++ b/arch/x86/kernel/vsmp_64.c
@@ -16,6 +16,7 @@
16#include <linux/pci_ids.h> 16#include <linux/pci_ids.h>
17#include <linux/pci_regs.h> 17#include <linux/pci_regs.h>
18#include <linux/smp.h> 18#include <linux/smp.h>
19#include <linux/irq.h>
19 20
20#include <asm/apic.h> 21#include <asm/apic.h>
21#include <asm/pci-direct.h> 22#include <asm/pci-direct.h>
@@ -95,6 +96,18 @@ static void __init set_vsmp_pv_ops(void)
95 ctl = readl(address + 4); 96 ctl = readl(address + 4);
96 printk(KERN_INFO "vSMP CTL: capabilities:0x%08x control:0x%08x\n", 97 printk(KERN_INFO "vSMP CTL: capabilities:0x%08x control:0x%08x\n",
97 cap, ctl); 98 cap, ctl);
99
100 /* If possible, let the vSMP foundation route the interrupt optimally */
101#ifdef CONFIG_SMP
102 if (cap & ctl & BIT(8)) {
103 ctl &= ~BIT(8);
104#ifdef CONFIG_PROC_FS
105 /* Don't let users change irq affinity via procfs */
106 no_irq_affinity = 1;
107#endif
108 }
109#endif
110
98 if (cap & ctl & (1 << 4)) { 111 if (cap & ctl & (1 << 4)) {
99 /* Setup irq ops and turn on vSMP IRQ fastpath handling */ 112 /* Setup irq ops and turn on vSMP IRQ fastpath handling */
100 pv_irq_ops.irq_disable = PV_CALLEE_SAVE(vsmp_irq_disable); 113 pv_irq_ops.irq_disable = PV_CALLEE_SAVE(vsmp_irq_disable);
@@ -102,12 +115,11 @@ static void __init set_vsmp_pv_ops(void)
102 pv_irq_ops.save_fl = PV_CALLEE_SAVE(vsmp_save_fl); 115 pv_irq_ops.save_fl = PV_CALLEE_SAVE(vsmp_save_fl);
103 pv_irq_ops.restore_fl = PV_CALLEE_SAVE(vsmp_restore_fl); 116 pv_irq_ops.restore_fl = PV_CALLEE_SAVE(vsmp_restore_fl);
104 pv_init_ops.patch = vsmp_patch; 117 pv_init_ops.patch = vsmp_patch;
105
106 ctl &= ~(1 << 4); 118 ctl &= ~(1 << 4);
107 writel(ctl, address + 4);
108 ctl = readl(address + 4);
109 printk(KERN_INFO "vSMP CTL: control set to:0x%08x\n", ctl);
110 } 119 }
120 writel(ctl, address + 4);
121 ctl = readl(address + 4);
122 pr_info("vSMP CTL: control set to:0x%08x\n", ctl);
111 123
112 early_iounmap(address, 8); 124 early_iounmap(address, 8);
113} 125}
@@ -187,12 +199,36 @@ static void __init vsmp_cap_cpus(void)
187#endif 199#endif
188} 200}
189 201
202static int apicid_phys_pkg_id(int initial_apic_id, int index_msb)
203{
204 return hard_smp_processor_id() >> index_msb;
205}
206
207/*
208 * In vSMP, all cpus should be capable of handling interrupts, regardless of
209 * the APIC used.
210 */
211static void fill_vector_allocation_domain(int cpu, struct cpumask *retmask,
212 const struct cpumask *mask)
213{
214 cpumask_setall(retmask);
215}
216
217static void vsmp_apic_post_init(void)
218{
219 /* need to update phys_pkg_id */
220 apic->phys_pkg_id = apicid_phys_pkg_id;
221 apic->vector_allocation_domain = fill_vector_allocation_domain;
222}
223
190void __init vsmp_init(void) 224void __init vsmp_init(void)
191{ 225{
192 detect_vsmp_box(); 226 detect_vsmp_box();
193 if (!is_vsmp_box()) 227 if (!is_vsmp_box())
194 return; 228 return;
195 229
230 x86_platform.apic_post_init = vsmp_apic_post_init;
231
196 vsmp_cap_cpus(); 232 vsmp_cap_cpus();
197 233
198 set_vsmp_pv_ops(); 234 set_vsmp_pv_ops();