aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/Kconfig1
-rw-r--r--arch/x86/include/asm/cpufeature.h1
-rw-r--r--arch/x86/include/asm/fixmap.h4
-rw-r--r--arch/x86/include/asm/io.h6
-rw-r--r--arch/x86/include/asm/lguest_hcall.h2
-rw-r--r--arch/x86/include/asm/paravirt.h4
-rw-r--r--arch/x86/include/asm/processor.h6
-rw-r--r--arch/x86/include/asm/required-features.h2
-rw-r--r--arch/x86/include/asm/sigcontext.h6
-rw-r--r--arch/x86/include/asm/uv/uv_mmrs.h5
-rw-r--r--arch/x86/include/asm/xen/page.h3
-rw-r--r--arch/x86/include/asm/xsave.h3
-rw-r--r--arch/x86/kernel/apic/apic.c6
-rw-r--r--arch/x86/kernel/apic/x2apic_uv_x.c16
-rw-r--r--arch/x86/kernel/bios_uv.c3
-rw-r--r--arch/x86/kernel/cpu/addon_cpuid_features.c1
-rw-r--r--arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c81
-rw-r--r--arch/x86/kernel/cpu/cpufreq/longhaul.c1
-rw-r--r--arch/x86/kernel/ftrace.c2
-rw-r--r--arch/x86/kernel/microcode_core.c33
-rw-r--r--arch/x86/kernel/mpparse.c7
-rw-r--r--arch/x86/kernel/ptrace.c3
-rw-r--r--arch/x86/kernel/tlb_uv.c189
-rw-r--r--arch/x86/kernel/uv_sysfs.c4
-rw-r--r--arch/x86/kernel/xsave.c2
-rw-r--r--arch/x86/lguest/boot.c16
-rw-r--r--arch/x86/mm/ioremap.c23
-rw-r--r--arch/x86/mm/pat.c2
-rw-r--r--arch/x86/mm/pgtable.c3
-rw-r--r--arch/x86/xen/enlighten.c89
-rw-r--r--arch/x86/xen/mmu.c118
-rw-r--r--arch/x86/xen/mmu.h3
-rw-r--r--arch/x86/xen/smp.c4
-rw-r--r--arch/x86/xen/xen-ops.h2
34 files changed, 433 insertions, 218 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index bc25b9f5e4cd..c9086e6307a5 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -353,6 +353,7 @@ config X86_UV
353 bool "SGI Ultraviolet" 353 bool "SGI Ultraviolet"
354 depends on X86_64 354 depends on X86_64
355 depends on X86_EXTENDED_PLATFORM 355 depends on X86_EXTENDED_PLATFORM
356 depends on NUMA
356 select X86_X2APIC 357 select X86_X2APIC
357 ---help--- 358 ---help---
358 This option is needed in order to support SGI Ultraviolet systems. 359 This option is needed in order to support SGI Ultraviolet systems.
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
index 0beba0d1468d..bb83b1c397aa 100644
--- a/arch/x86/include/asm/cpufeature.h
+++ b/arch/x86/include/asm/cpufeature.h
@@ -154,6 +154,7 @@
154 * CPUID levels like 0x6, 0xA etc 154 * CPUID levels like 0x6, 0xA etc
155 */ 155 */
156#define X86_FEATURE_IDA (7*32+ 0) /* Intel Dynamic Acceleration */ 156#define X86_FEATURE_IDA (7*32+ 0) /* Intel Dynamic Acceleration */
157#define X86_FEATURE_ARAT (7*32+ 1) /* Always Running APIC Timer */
157 158
158/* Virtualization flags: Linux defined */ 159/* Virtualization flags: Linux defined */
159#define X86_FEATURE_TPR_SHADOW (8*32+ 0) /* Intel TPR Shadow */ 160#define X86_FEATURE_TPR_SHADOW (8*32+ 0) /* Intel TPR Shadow */
diff --git a/arch/x86/include/asm/fixmap.h b/arch/x86/include/asm/fixmap.h
index 81937a5dc77c..2d81af3974a0 100644
--- a/arch/x86/include/asm/fixmap.h
+++ b/arch/x86/include/asm/fixmap.h
@@ -151,11 +151,11 @@ extern pte_t *pkmap_page_table;
151 151
152void __native_set_fixmap(enum fixed_addresses idx, pte_t pte); 152void __native_set_fixmap(enum fixed_addresses idx, pte_t pte);
153void native_set_fixmap(enum fixed_addresses idx, 153void native_set_fixmap(enum fixed_addresses idx,
154 unsigned long phys, pgprot_t flags); 154 phys_addr_t phys, pgprot_t flags);
155 155
156#ifndef CONFIG_PARAVIRT 156#ifndef CONFIG_PARAVIRT
157static inline void __set_fixmap(enum fixed_addresses idx, 157static inline void __set_fixmap(enum fixed_addresses idx,
158 unsigned long phys, pgprot_t flags) 158 phys_addr_t phys, pgprot_t flags)
159{ 159{
160 native_set_fixmap(idx, phys, flags); 160 native_set_fixmap(idx, phys, flags);
161} 161}
diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
index e5383e3d2f8c..73739322b6d0 100644
--- a/arch/x86/include/asm/io.h
+++ b/arch/x86/include/asm/io.h
@@ -193,8 +193,10 @@ extern void __iomem *ioremap_wc(resource_size_t offset, unsigned long size);
193 */ 193 */
194extern void early_ioremap_init(void); 194extern void early_ioremap_init(void);
195extern void early_ioremap_reset(void); 195extern void early_ioremap_reset(void);
196extern void __iomem *early_ioremap(unsigned long offset, unsigned long size); 196extern void __iomem *early_ioremap(resource_size_t phys_addr,
197extern void __iomem *early_memremap(unsigned long offset, unsigned long size); 197 unsigned long size);
198extern void __iomem *early_memremap(resource_size_t phys_addr,
199 unsigned long size);
198extern void early_iounmap(void __iomem *addr, unsigned long size); 200extern void early_iounmap(void __iomem *addr, unsigned long size);
199 201
200#define IO_SPACE_LIMIT 0xffff 202#define IO_SPACE_LIMIT 0xffff
diff --git a/arch/x86/include/asm/lguest_hcall.h b/arch/x86/include/asm/lguest_hcall.h
index 0f4ee7148afe..faae1996487b 100644
--- a/arch/x86/include/asm/lguest_hcall.h
+++ b/arch/x86/include/asm/lguest_hcall.h
@@ -5,7 +5,6 @@
5#define LHCALL_FLUSH_ASYNC 0 5#define LHCALL_FLUSH_ASYNC 0
6#define LHCALL_LGUEST_INIT 1 6#define LHCALL_LGUEST_INIT 1
7#define LHCALL_SHUTDOWN 2 7#define LHCALL_SHUTDOWN 2
8#define LHCALL_LOAD_GDT 3
9#define LHCALL_NEW_PGTABLE 4 8#define LHCALL_NEW_PGTABLE 4
10#define LHCALL_FLUSH_TLB 5 9#define LHCALL_FLUSH_TLB 5
11#define LHCALL_LOAD_IDT_ENTRY 6 10#define LHCALL_LOAD_IDT_ENTRY 6
@@ -17,6 +16,7 @@
17#define LHCALL_SET_PMD 15 16#define LHCALL_SET_PMD 15
18#define LHCALL_LOAD_TLS 16 17#define LHCALL_LOAD_TLS 16
19#define LHCALL_NOTIFY 17 18#define LHCALL_NOTIFY 17
19#define LHCALL_LOAD_GDT_ENTRY 18
20 20
21#define LGUEST_TRAP_ENTRY 0x1F 21#define LGUEST_TRAP_ENTRY 0x1F
22 22
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index 7727aa8b7dda..378e3691c08c 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -347,7 +347,7 @@ struct pv_mmu_ops {
347 /* Sometimes the physical address is a pfn, and sometimes its 347 /* Sometimes the physical address is a pfn, and sometimes its
348 an mfn. We can tell which is which from the index. */ 348 an mfn. We can tell which is which from the index. */
349 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx, 349 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
350 unsigned long phys, pgprot_t flags); 350 phys_addr_t phys, pgprot_t flags);
351}; 351};
352 352
353struct raw_spinlock; 353struct raw_spinlock;
@@ -1432,7 +1432,7 @@ static inline void arch_leave_lazy_mmu_mode(void)
1432void arch_flush_lazy_mmu_mode(void); 1432void arch_flush_lazy_mmu_mode(void);
1433 1433
1434static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx, 1434static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
1435 unsigned long phys, pgprot_t flags) 1435 phys_addr_t phys, pgprot_t flags)
1436{ 1436{
1437 pv_mmu_ops.set_fixmap(idx, phys, flags); 1437 pv_mmu_ops.set_fixmap(idx, phys, flags);
1438} 1438}
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 34c52370f2fe..fcf4d92e7e04 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -352,6 +352,11 @@ struct i387_soft_struct {
352 u32 entry_eip; 352 u32 entry_eip;
353}; 353};
354 354
355struct ymmh_struct {
356 /* 16 * 16 bytes for each YMMH-reg = 256 bytes */
357 u32 ymmh_space[64];
358};
359
355struct xsave_hdr_struct { 360struct xsave_hdr_struct {
356 u64 xstate_bv; 361 u64 xstate_bv;
357 u64 reserved1[2]; 362 u64 reserved1[2];
@@ -361,6 +366,7 @@ struct xsave_hdr_struct {
361struct xsave_struct { 366struct xsave_struct {
362 struct i387_fxsave_struct i387; 367 struct i387_fxsave_struct i387;
363 struct xsave_hdr_struct xsave_hdr; 368 struct xsave_hdr_struct xsave_hdr;
369 struct ymmh_struct ymmh;
364 /* new processor state extensions will go here */ 370 /* new processor state extensions will go here */
365} __attribute__ ((packed, aligned (64))); 371} __attribute__ ((packed, aligned (64)));
366 372
diff --git a/arch/x86/include/asm/required-features.h b/arch/x86/include/asm/required-features.h
index d5cd6c586881..a4737dddfd58 100644
--- a/arch/x86/include/asm/required-features.h
+++ b/arch/x86/include/asm/required-features.h
@@ -50,7 +50,7 @@
50#ifdef CONFIG_X86_64 50#ifdef CONFIG_X86_64
51#define NEED_PSE 0 51#define NEED_PSE 0
52#define NEED_MSR (1<<(X86_FEATURE_MSR & 31)) 52#define NEED_MSR (1<<(X86_FEATURE_MSR & 31))
53#define NEED_PGE (1<<(X86_FEATURE_PGE & 31)) 53#define NEED_PGE 0
54#define NEED_FXSR (1<<(X86_FEATURE_FXSR & 31)) 54#define NEED_FXSR (1<<(X86_FEATURE_FXSR & 31))
55#define NEED_XMM (1<<(X86_FEATURE_XMM & 31)) 55#define NEED_XMM (1<<(X86_FEATURE_XMM & 31))
56#define NEED_XMM2 (1<<(X86_FEATURE_XMM2 & 31)) 56#define NEED_XMM2 (1<<(X86_FEATURE_XMM2 & 31))
diff --git a/arch/x86/include/asm/sigcontext.h b/arch/x86/include/asm/sigcontext.h
index ec666491aaa4..72e5a4491661 100644
--- a/arch/x86/include/asm/sigcontext.h
+++ b/arch/x86/include/asm/sigcontext.h
@@ -269,6 +269,11 @@ struct _xsave_hdr {
269 __u64 reserved2[5]; 269 __u64 reserved2[5];
270}; 270};
271 271
272struct _ymmh_state {
273 /* 16 * 16 bytes for each YMMH-reg */
274 __u32 ymmh_space[64];
275};
276
272/* 277/*
273 * Extended state pointed by the fpstate pointer in the sigcontext. 278 * Extended state pointed by the fpstate pointer in the sigcontext.
274 * In addition to the fpstate, information encoded in the xstate_hdr 279 * In addition to the fpstate, information encoded in the xstate_hdr
@@ -278,6 +283,7 @@ struct _xsave_hdr {
278struct _xstate { 283struct _xstate {
279 struct _fpstate fpstate; 284 struct _fpstate fpstate;
280 struct _xsave_hdr xstate_hdr; 285 struct _xsave_hdr xstate_hdr;
286 struct _ymmh_state ymmh;
281 /* new processor state extensions go here */ 287 /* new processor state extensions go here */
282}; 288};
283 289
diff --git a/arch/x86/include/asm/uv/uv_mmrs.h b/arch/x86/include/asm/uv/uv_mmrs.h
index db68ac8a5ac2..2cae46c7c8a2 100644
--- a/arch/x86/include/asm/uv/uv_mmrs.h
+++ b/arch/x86/include/asm/uv/uv_mmrs.h
@@ -17,6 +17,11 @@
17/* ========================================================================= */ 17/* ========================================================================= */
18/* UVH_BAU_DATA_CONFIG */ 18/* UVH_BAU_DATA_CONFIG */
19/* ========================================================================= */ 19/* ========================================================================= */
20#define UVH_LB_BAU_MISC_CONTROL 0x320170UL
21#define UV_ENABLE_INTD_SOFT_ACK_MODE_SHIFT 15
22#define UV_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHIFT 16
23#define UV_INTD_SOFT_ACK_TIMEOUT_PERIOD 0x000000000bUL
24/* 1011 timebase 7 (168millisec) * 3 ticks -> 500ms */
20#define UVH_BAU_DATA_CONFIG 0x61680UL 25#define UVH_BAU_DATA_CONFIG 0x61680UL
21#define UVH_BAU_DATA_CONFIG_32 0x0438 26#define UVH_BAU_DATA_CONFIG_32 0x0438
22 27
diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h
index 1a918dde46b5..018a0a400799 100644
--- a/arch/x86/include/asm/xen/page.h
+++ b/arch/x86/include/asm/xen/page.h
@@ -124,7 +124,8 @@ static inline unsigned long mfn_to_local_pfn(unsigned long mfn)
124 124
125/* VIRT <-> MACHINE conversion */ 125/* VIRT <-> MACHINE conversion */
126#define virt_to_machine(v) (phys_to_machine(XPADDR(__pa(v)))) 126#define virt_to_machine(v) (phys_to_machine(XPADDR(__pa(v))))
127#define virt_to_mfn(v) (pfn_to_mfn(PFN_DOWN(__pa(v)))) 127#define virt_to_pfn(v) (PFN_DOWN(__pa(v)))
128#define virt_to_mfn(v) (pfn_to_mfn(virt_to_pfn(v)))
128#define mfn_to_virt(m) (__va(mfn_to_pfn(m) << PAGE_SHIFT)) 129#define mfn_to_virt(m) (__va(mfn_to_pfn(m) << PAGE_SHIFT))
129 130
130static inline unsigned long pte_mfn(pte_t pte) 131static inline unsigned long pte_mfn(pte_t pte)
diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
index 08e9a1ac07a9..727acc152344 100644
--- a/arch/x86/include/asm/xsave.h
+++ b/arch/x86/include/asm/xsave.h
@@ -7,6 +7,7 @@
7 7
8#define XSTATE_FP 0x1 8#define XSTATE_FP 0x1
9#define XSTATE_SSE 0x2 9#define XSTATE_SSE 0x2
10#define XSTATE_YMM 0x4
10 11
11#define XSTATE_FPSSE (XSTATE_FP | XSTATE_SSE) 12#define XSTATE_FPSSE (XSTATE_FP | XSTATE_SSE)
12 13
@@ -15,7 +16,7 @@
15/* 16/*
16 * These are the features that the OS can handle currently. 17 * These are the features that the OS can handle currently.
17 */ 18 */
18#define XCNTXT_MASK (XSTATE_FP | XSTATE_SSE) 19#define XCNTXT_MASK (XSTATE_FP | XSTATE_SSE | XSTATE_YMM)
19 20
20#ifdef CONFIG_X86_64 21#ifdef CONFIG_X86_64
21#define REX_PREFIX "0x48, " 22#define REX_PREFIX "0x48, "
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index 098ec84b8c00..f2870920f246 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -431,6 +431,12 @@ static void __cpuinit setup_APIC_timer(void)
431{ 431{
432 struct clock_event_device *levt = &__get_cpu_var(lapic_events); 432 struct clock_event_device *levt = &__get_cpu_var(lapic_events);
433 433
434 if (cpu_has(&current_cpu_data, X86_FEATURE_ARAT)) {
435 lapic_clockevent.features &= ~CLOCK_EVT_FEAT_C3STOP;
436 /* Make LAPIC timer preferrable over percpu HPET */
437 lapic_clockevent.rating = 150;
438 }
439
434 memcpy(levt, &lapic_clockevent, sizeof(*levt)); 440 memcpy(levt, &lapic_clockevent, sizeof(*levt));
435 levt->cpumask = cpumask_of(smp_processor_id()); 441 levt->cpumask = cpumask_of(smp_processor_id());
436 442
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
index 49d2f5c739b7..d6712334ce4b 100644
--- a/arch/x86/kernel/apic/x2apic_uv_x.c
+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
@@ -563,7 +563,8 @@ void __init uv_system_init(void)
563 unsigned long gnode_upper, lowmem_redir_base, lowmem_redir_size; 563 unsigned long gnode_upper, lowmem_redir_base, lowmem_redir_size;
564 int bytes, nid, cpu, lcpu, pnode, blade, i, j, m_val, n_val; 564 int bytes, nid, cpu, lcpu, pnode, blade, i, j, m_val, n_val;
565 int max_pnode = 0; 565 int max_pnode = 0;
566 unsigned long mmr_base, present; 566 unsigned long mmr_base, present, paddr;
567 unsigned short pnode_mask;
567 568
568 map_low_mmrs(); 569 map_low_mmrs();
569 570
@@ -606,6 +607,7 @@ void __init uv_system_init(void)
606 } 607 }
607 } 608 }
608 609
610 pnode_mask = (1 << n_val) - 1;
609 node_id.v = uv_read_local_mmr(UVH_NODE_ID); 611 node_id.v = uv_read_local_mmr(UVH_NODE_ID);
610 gnode_upper = (((unsigned long)node_id.s.node_id) & 612 gnode_upper = (((unsigned long)node_id.s.node_id) &
611 ~((1 << n_val) - 1)) << m_val; 613 ~((1 << n_val) - 1)) << m_val;
@@ -629,7 +631,7 @@ void __init uv_system_init(void)
629 uv_cpu_hub_info(cpu)->numa_blade_id = blade; 631 uv_cpu_hub_info(cpu)->numa_blade_id = blade;
630 uv_cpu_hub_info(cpu)->blade_processor_id = lcpu; 632 uv_cpu_hub_info(cpu)->blade_processor_id = lcpu;
631 uv_cpu_hub_info(cpu)->pnode = pnode; 633 uv_cpu_hub_info(cpu)->pnode = pnode;
632 uv_cpu_hub_info(cpu)->pnode_mask = (1 << n_val) - 1; 634 uv_cpu_hub_info(cpu)->pnode_mask = pnode_mask;
633 uv_cpu_hub_info(cpu)->gpa_mask = (1 << (m_val + n_val)) - 1; 635 uv_cpu_hub_info(cpu)->gpa_mask = (1 << (m_val + n_val)) - 1;
634 uv_cpu_hub_info(cpu)->gnode_upper = gnode_upper; 636 uv_cpu_hub_info(cpu)->gnode_upper = gnode_upper;
635 uv_cpu_hub_info(cpu)->global_mmr_base = mmr_base; 637 uv_cpu_hub_info(cpu)->global_mmr_base = mmr_base;
@@ -645,6 +647,16 @@ void __init uv_system_init(void)
645 lcpu, blade); 647 lcpu, blade);
646 } 648 }
647 649
650 /* Add blade/pnode info for nodes without cpus */
651 for_each_online_node(nid) {
652 if (uv_node_to_blade[nid] >= 0)
653 continue;
654 paddr = node_start_pfn(nid) << PAGE_SHIFT;
655 pnode = (paddr >> m_val) & pnode_mask;
656 blade = boot_pnode_to_blade(pnode);
657 uv_node_to_blade[nid] = blade;
658 }
659
648 map_gru_high(max_pnode); 660 map_gru_high(max_pnode);
649 map_mmr_high(max_pnode); 661 map_mmr_high(max_pnode);
650 map_config_high(max_pnode); 662 map_config_high(max_pnode);
diff --git a/arch/x86/kernel/bios_uv.c b/arch/x86/kernel/bios_uv.c
index f63882728d91..63a88e1f987d 100644
--- a/arch/x86/kernel/bios_uv.c
+++ b/arch/x86/kernel/bios_uv.c
@@ -182,7 +182,8 @@ void uv_bios_init(void)
182 memcpy(&uv_systab, tab, sizeof(struct uv_systab)); 182 memcpy(&uv_systab, tab, sizeof(struct uv_systab));
183 iounmap(tab); 183 iounmap(tab);
184 184
185 printk(KERN_INFO "EFI UV System Table Revision %d\n", tab->revision); 185 printk(KERN_INFO "EFI UV System Table Revision %d\n",
186 uv_systab.revision);
186} 187}
187#else /* !CONFIG_EFI */ 188#else /* !CONFIG_EFI */
188 189
diff --git a/arch/x86/kernel/cpu/addon_cpuid_features.c b/arch/x86/kernel/cpu/addon_cpuid_features.c
index 8220ae69849d..c965e5212714 100644
--- a/arch/x86/kernel/cpu/addon_cpuid_features.c
+++ b/arch/x86/kernel/cpu/addon_cpuid_features.c
@@ -31,6 +31,7 @@ void __cpuinit init_scattered_cpuid_features(struct cpuinfo_x86 *c)
31 31
32 static const struct cpuid_bit __cpuinitconst cpuid_bits[] = { 32 static const struct cpuid_bit __cpuinitconst cpuid_bits[] = {
33 { X86_FEATURE_IDA, CR_EAX, 1, 0x00000006 }, 33 { X86_FEATURE_IDA, CR_EAX, 1, 0x00000006 },
34 { X86_FEATURE_ARAT, CR_EAX, 2, 0x00000006 },
34 { 0, 0, 0, 0 } 35 { 0, 0, 0, 0 }
35 }; 36 };
36 37
diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
index 19f6b9d27e83..ecdb682ab516 100644
--- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
+++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
@@ -68,6 +68,7 @@ struct acpi_cpufreq_data {
68 unsigned int max_freq; 68 unsigned int max_freq;
69 unsigned int resume; 69 unsigned int resume;
70 unsigned int cpu_feature; 70 unsigned int cpu_feature;
71 u64 saved_aperf, saved_mperf;
71}; 72};
72 73
73static DEFINE_PER_CPU(struct acpi_cpufreq_data *, drv_data); 74static DEFINE_PER_CPU(struct acpi_cpufreq_data *, drv_data);
@@ -152,7 +153,8 @@ struct drv_cmd {
152 u32 val; 153 u32 val;
153}; 154};
154 155
155static long do_drv_read(void *_cmd) 156/* Called via smp_call_function_single(), on the target CPU */
157static void do_drv_read(void *_cmd)
156{ 158{
157 struct drv_cmd *cmd = _cmd; 159 struct drv_cmd *cmd = _cmd;
158 u32 h; 160 u32 h;
@@ -169,10 +171,10 @@ static long do_drv_read(void *_cmd)
169 default: 171 default:
170 break; 172 break;
171 } 173 }
172 return 0;
173} 174}
174 175
175static long do_drv_write(void *_cmd) 176/* Called via smp_call_function_many(), on the target CPUs */
177static void do_drv_write(void *_cmd)
176{ 178{
177 struct drv_cmd *cmd = _cmd; 179 struct drv_cmd *cmd = _cmd;
178 u32 lo, hi; 180 u32 lo, hi;
@@ -191,23 +193,24 @@ static long do_drv_write(void *_cmd)
191 default: 193 default:
192 break; 194 break;
193 } 195 }
194 return 0;
195} 196}
196 197
197static void drv_read(struct drv_cmd *cmd) 198static void drv_read(struct drv_cmd *cmd)
198{ 199{
199 cmd->val = 0; 200 cmd->val = 0;
200 201
201 work_on_cpu(cpumask_any(cmd->mask), do_drv_read, cmd); 202 smp_call_function_single(cpumask_any(cmd->mask), do_drv_read, cmd, 1);
202} 203}
203 204
204static void drv_write(struct drv_cmd *cmd) 205static void drv_write(struct drv_cmd *cmd)
205{ 206{
206 unsigned int i; 207 int this_cpu;
207 208
208 for_each_cpu(i, cmd->mask) { 209 this_cpu = get_cpu();
209 work_on_cpu(i, do_drv_write, cmd); 210 if (cpumask_test_cpu(this_cpu, cmd->mask))
210 } 211 do_drv_write(cmd);
212 smp_call_function_many(cmd->mask, do_drv_write, cmd, 1);
213 put_cpu();
211} 214}
212 215
213static u32 get_cur_val(const struct cpumask *mask) 216static u32 get_cur_val(const struct cpumask *mask)
@@ -241,28 +244,23 @@ static u32 get_cur_val(const struct cpumask *mask)
241 return cmd.val; 244 return cmd.val;
242} 245}
243 246
244struct perf_cur { 247struct perf_pair {
245 union { 248 union {
246 struct { 249 struct {
247 u32 lo; 250 u32 lo;
248 u32 hi; 251 u32 hi;
249 } split; 252 } split;
250 u64 whole; 253 u64 whole;
251 } aperf_cur, mperf_cur; 254 } aperf, mperf;
252}; 255};
253 256
254 257/* Called via smp_call_function_single(), on the target CPU */
255static long read_measured_perf_ctrs(void *_cur) 258static void read_measured_perf_ctrs(void *_cur)
256{ 259{
257 struct perf_cur *cur = _cur; 260 struct perf_pair *cur = _cur;
258 261
259 rdmsr(MSR_IA32_APERF, cur->aperf_cur.split.lo, cur->aperf_cur.split.hi); 262 rdmsr(MSR_IA32_APERF, cur->aperf.split.lo, cur->aperf.split.hi);
260 rdmsr(MSR_IA32_MPERF, cur->mperf_cur.split.lo, cur->mperf_cur.split.hi); 263 rdmsr(MSR_IA32_MPERF, cur->mperf.split.lo, cur->mperf.split.hi);
261
262 wrmsr(MSR_IA32_APERF, 0, 0);
263 wrmsr(MSR_IA32_MPERF, 0, 0);
264
265 return 0;
266} 264}
267 265
268/* 266/*
@@ -281,52 +279,57 @@ static long read_measured_perf_ctrs(void *_cur)
281static unsigned int get_measured_perf(struct cpufreq_policy *policy, 279static unsigned int get_measured_perf(struct cpufreq_policy *policy,
282 unsigned int cpu) 280 unsigned int cpu)
283{ 281{
284 struct perf_cur cur; 282 struct perf_pair readin, cur;
285 unsigned int perf_percent; 283 unsigned int perf_percent;
286 unsigned int retval; 284 unsigned int retval;
287 285
288 if (!work_on_cpu(cpu, read_measured_perf_ctrs, &cur)) 286 if (smp_call_function_single(cpu, read_measured_perf_ctrs, &readin, 1))
289 return 0; 287 return 0;
290 288
289 cur.aperf.whole = readin.aperf.whole -
290 per_cpu(drv_data, cpu)->saved_aperf;
291 cur.mperf.whole = readin.mperf.whole -
292 per_cpu(drv_data, cpu)->saved_mperf;
293 per_cpu(drv_data, cpu)->saved_aperf = readin.aperf.whole;
294 per_cpu(drv_data, cpu)->saved_mperf = readin.mperf.whole;
295
291#ifdef __i386__ 296#ifdef __i386__
292 /* 297 /*
293 * We dont want to do 64 bit divide with 32 bit kernel 298 * We dont want to do 64 bit divide with 32 bit kernel
294 * Get an approximate value. Return failure in case we cannot get 299 * Get an approximate value. Return failure in case we cannot get
295 * an approximate value. 300 * an approximate value.
296 */ 301 */
297 if (unlikely(cur.aperf_cur.split.hi || cur.mperf_cur.split.hi)) { 302 if (unlikely(cur.aperf.split.hi || cur.mperf.split.hi)) {
298 int shift_count; 303 int shift_count;
299 u32 h; 304 u32 h;
300 305
301 h = max_t(u32, cur.aperf_cur.split.hi, cur.mperf_cur.split.hi); 306 h = max_t(u32, cur.aperf.split.hi, cur.mperf.split.hi);
302 shift_count = fls(h); 307 shift_count = fls(h);
303 308
304 cur.aperf_cur.whole >>= shift_count; 309 cur.aperf.whole >>= shift_count;
305 cur.mperf_cur.whole >>= shift_count; 310 cur.mperf.whole >>= shift_count;
306 } 311 }
307 312
308 if (((unsigned long)(-1) / 100) < cur.aperf_cur.split.lo) { 313 if (((unsigned long)(-1) / 100) < cur.aperf.split.lo) {
309 int shift_count = 7; 314 int shift_count = 7;
310 cur.aperf_cur.split.lo >>= shift_count; 315 cur.aperf.split.lo >>= shift_count;
311 cur.mperf_cur.split.lo >>= shift_count; 316 cur.mperf.split.lo >>= shift_count;
312 } 317 }
313 318
314 if (cur.aperf_cur.split.lo && cur.mperf_cur.split.lo) 319 if (cur.aperf.split.lo && cur.mperf.split.lo)
315 perf_percent = (cur.aperf_cur.split.lo * 100) / 320 perf_percent = (cur.aperf.split.lo * 100) / cur.mperf.split.lo;
316 cur.mperf_cur.split.lo;
317 else 321 else
318 perf_percent = 0; 322 perf_percent = 0;
319 323
320#else 324#else
321 if (unlikely(((unsigned long)(-1) / 100) < cur.aperf_cur.whole)) { 325 if (unlikely(((unsigned long)(-1) / 100) < cur.aperf.whole)) {
322 int shift_count = 7; 326 int shift_count = 7;
323 cur.aperf_cur.whole >>= shift_count; 327 cur.aperf.whole >>= shift_count;
324 cur.mperf_cur.whole >>= shift_count; 328 cur.mperf.whole >>= shift_count;
325 } 329 }
326 330
327 if (cur.aperf_cur.whole && cur.mperf_cur.whole) 331 if (cur.aperf.whole && cur.mperf.whole)
328 perf_percent = (cur.aperf_cur.whole * 100) / 332 perf_percent = (cur.aperf.whole * 100) / cur.mperf.whole;
329 cur.mperf_cur.whole;
330 else 333 else
331 perf_percent = 0; 334 perf_percent = 0;
332 335
diff --git a/arch/x86/kernel/cpu/cpufreq/longhaul.c b/arch/x86/kernel/cpu/cpufreq/longhaul.c
index 0bd48e65a0ca..ce2ed3e4aad9 100644
--- a/arch/x86/kernel/cpu/cpufreq/longhaul.c
+++ b/arch/x86/kernel/cpu/cpufreq/longhaul.c
@@ -33,7 +33,6 @@
33#include <linux/timex.h> 33#include <linux/timex.h>
34#include <linux/io.h> 34#include <linux/io.h>
35#include <linux/acpi.h> 35#include <linux/acpi.h>
36#include <linux/kernel.h>
37 36
38#include <asm/msr.h> 37#include <asm/msr.h>
39#include <acpi/processor.h> 38#include <acpi/processor.h>
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index 70a10ca100f6..18dfa30795c9 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -18,6 +18,8 @@
18#include <linux/init.h> 18#include <linux/init.h>
19#include <linux/list.h> 19#include <linux/list.h>
20 20
21#include <trace/syscall.h>
22
21#include <asm/cacheflush.h> 23#include <asm/cacheflush.h>
22#include <asm/ftrace.h> 24#include <asm/ftrace.h>
23#include <asm/nops.h> 25#include <asm/nops.h>
diff --git a/arch/x86/kernel/microcode_core.c b/arch/x86/kernel/microcode_core.c
index 4d420de9ac61..98c470c069d1 100644
--- a/arch/x86/kernel/microcode_core.c
+++ b/arch/x86/kernel/microcode_core.c
@@ -108,40 +108,29 @@ struct ucode_cpu_info ucode_cpu_info[NR_CPUS];
108EXPORT_SYMBOL_GPL(ucode_cpu_info); 108EXPORT_SYMBOL_GPL(ucode_cpu_info);
109 109
110#ifdef CONFIG_MICROCODE_OLD_INTERFACE 110#ifdef CONFIG_MICROCODE_OLD_INTERFACE
111struct update_for_cpu {
112 const void __user *buf;
113 size_t size;
114};
115
116static long update_for_cpu(void *_ufc)
117{
118 struct update_for_cpu *ufc = _ufc;
119 int error;
120
121 error = microcode_ops->request_microcode_user(smp_processor_id(),
122 ufc->buf, ufc->size);
123 if (error < 0)
124 return error;
125 if (!error)
126 microcode_ops->apply_microcode(smp_processor_id());
127 return error;
128}
129
130static int do_microcode_update(const void __user *buf, size_t size) 111static int do_microcode_update(const void __user *buf, size_t size)
131{ 112{
113 cpumask_t old;
132 int error = 0; 114 int error = 0;
133 int cpu; 115 int cpu;
134 struct update_for_cpu ufc = { .buf = buf, .size = size }; 116
117 old = current->cpus_allowed;
135 118
136 for_each_online_cpu(cpu) { 119 for_each_online_cpu(cpu) {
137 struct ucode_cpu_info *uci = ucode_cpu_info + cpu; 120 struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
138 121
139 if (!uci->valid) 122 if (!uci->valid)
140 continue; 123 continue;
141 error = work_on_cpu(cpu, update_for_cpu, &ufc); 124
125 set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
126 error = microcode_ops->request_microcode_user(cpu, buf, size);
142 if (error < 0) 127 if (error < 0)
143 break; 128 goto out;
129 if (!error)
130 microcode_ops->apply_microcode(cpu);
144 } 131 }
132out:
133 set_cpus_allowed_ptr(current, &old);
145 return error; 134 return error;
146} 135}
147 136
diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c
index dce99dca6cf8..70fd7e414c15 100644
--- a/arch/x86/kernel/mpparse.c
+++ b/arch/x86/kernel/mpparse.c
@@ -679,7 +679,7 @@ void __init get_smp_config(void)
679 __get_smp_config(0); 679 __get_smp_config(0);
680} 680}
681 681
682static void smp_reserve_bootmem(struct mpf_intel *mpf) 682static void __init smp_reserve_bootmem(struct mpf_intel *mpf)
683{ 683{
684 unsigned long size = get_mpc_size(mpf->physptr); 684 unsigned long size = get_mpc_size(mpf->physptr);
685#ifdef CONFIG_X86_32 685#ifdef CONFIG_X86_32
@@ -838,7 +838,7 @@ static int __init get_MP_intsrc_index(struct mpc_intsrc *m)
838 838
839static struct mpc_intsrc __initdata *m_spare[SPARE_SLOT_NUM]; 839static struct mpc_intsrc __initdata *m_spare[SPARE_SLOT_NUM];
840 840
841static void check_irq_src(struct mpc_intsrc *m, int *nr_m_spare) 841static void __init check_irq_src(struct mpc_intsrc *m, int *nr_m_spare)
842{ 842{
843 int i; 843 int i;
844 844
@@ -866,7 +866,8 @@ static void check_irq_src(struct mpc_intsrc *m, int *nr_m_spare)
866 } 866 }
867} 867}
868#else /* CONFIG_X86_IO_APIC */ 868#else /* CONFIG_X86_IO_APIC */
869static inline void check_irq_src(struct mpc_intsrc *m, int *nr_m_spare) {} 869static
870inline void __init check_irq_src(struct mpc_intsrc *m, int *nr_m_spare) {}
870#endif /* CONFIG_X86_IO_APIC */ 871#endif /* CONFIG_X86_IO_APIC */
871 872
872static int check_slot(unsigned long mpc_new_phys, unsigned long mpc_new_length, 873static int check_slot(unsigned long mpc_new_phys, unsigned long mpc_new_length,
diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
index fe9345c967de..23b7c8f017e2 100644
--- a/arch/x86/kernel/ptrace.c
+++ b/arch/x86/kernel/ptrace.c
@@ -21,7 +21,6 @@
21#include <linux/audit.h> 21#include <linux/audit.h>
22#include <linux/seccomp.h> 22#include <linux/seccomp.h>
23#include <linux/signal.h> 23#include <linux/signal.h>
24#include <linux/ftrace.h>
25 24
26#include <asm/uaccess.h> 25#include <asm/uaccess.h>
27#include <asm/pgtable.h> 26#include <asm/pgtable.h>
@@ -35,6 +34,8 @@
35#include <asm/proto.h> 34#include <asm/proto.h>
36#include <asm/ds.h> 35#include <asm/ds.h>
37 36
37#include <trace/syscall.h>
38
38#include "tls.h" 39#include "tls.h"
39 40
40enum x86_regset { 41enum x86_regset {
diff --git a/arch/x86/kernel/tlb_uv.c b/arch/x86/kernel/tlb_uv.c
index deb5ebb32c3b..ed0c33761e6d 100644
--- a/arch/x86/kernel/tlb_uv.c
+++ b/arch/x86/kernel/tlb_uv.c
@@ -25,6 +25,8 @@ static int uv_bau_retry_limit __read_mostly;
25 25
26/* position of pnode (which is nasid>>1): */ 26/* position of pnode (which is nasid>>1): */
27static int uv_nshift __read_mostly; 27static int uv_nshift __read_mostly;
28/* base pnode in this partition */
29static int uv_partition_base_pnode __read_mostly;
28 30
29static unsigned long uv_mmask __read_mostly; 31static unsigned long uv_mmask __read_mostly;
30 32
@@ -32,6 +34,34 @@ static DEFINE_PER_CPU(struct ptc_stats, ptcstats);
32static DEFINE_PER_CPU(struct bau_control, bau_control); 34static DEFINE_PER_CPU(struct bau_control, bau_control);
33 35
34/* 36/*
37 * Determine the first node on a blade.
38 */
39static int __init blade_to_first_node(int blade)
40{
41 int node, b;
42
43 for_each_online_node(node) {
44 b = uv_node_to_blade_id(node);
45 if (blade == b)
46 return node;
47 }
48 return -1; /* shouldn't happen */
49}
50
51/*
52 * Determine the apicid of the first cpu on a blade.
53 */
54static int __init blade_to_first_apicid(int blade)
55{
56 int cpu;
57
58 for_each_present_cpu(cpu)
59 if (blade == uv_cpu_to_blade_id(cpu))
60 return per_cpu(x86_cpu_to_apicid, cpu);
61 return -1;
62}
63
64/*
35 * Free a software acknowledge hardware resource by clearing its Pending 65 * Free a software acknowledge hardware resource by clearing its Pending
36 * bit. This will return a reply to the sender. 66 * bit. This will return a reply to the sender.
37 * If the message has timed out, a reply has already been sent by the 67 * If the message has timed out, a reply has already been sent by the
@@ -67,7 +97,7 @@ static void uv_bau_process_message(struct bau_payload_queue_entry *msg,
67 msp = __get_cpu_var(bau_control).msg_statuses + msg_slot; 97 msp = __get_cpu_var(bau_control).msg_statuses + msg_slot;
68 cpu = uv_blade_processor_id(); 98 cpu = uv_blade_processor_id();
69 msg->number_of_cpus = 99 msg->number_of_cpus =
70 uv_blade_nr_online_cpus(uv_node_to_blade_id(numa_node_id())); 100 uv_blade_nr_online_cpus(uv_node_to_blade_id(numa_node_id()));
71 this_cpu_mask = 1UL << cpu; 101 this_cpu_mask = 1UL << cpu;
72 if (msp->seen_by.bits & this_cpu_mask) 102 if (msp->seen_by.bits & this_cpu_mask)
73 return; 103 return;
@@ -215,14 +245,14 @@ static int uv_wait_completion(struct bau_desc *bau_desc,
215 * Returns @flush_mask if some remote flushing remains to be done. The 245 * Returns @flush_mask if some remote flushing remains to be done. The
216 * mask will have some bits still set. 246 * mask will have some bits still set.
217 */ 247 */
218const struct cpumask *uv_flush_send_and_wait(int cpu, int this_blade, 248const struct cpumask *uv_flush_send_and_wait(int cpu, int this_pnode,
219 struct bau_desc *bau_desc, 249 struct bau_desc *bau_desc,
220 struct cpumask *flush_mask) 250 struct cpumask *flush_mask)
221{ 251{
222 int completion_status = 0; 252 int completion_status = 0;
223 int right_shift; 253 int right_shift;
224 int tries = 0; 254 int tries = 0;
225 int blade; 255 int pnode;
226 int bit; 256 int bit;
227 unsigned long mmr_offset; 257 unsigned long mmr_offset;
228 unsigned long index; 258 unsigned long index;
@@ -265,8 +295,8 @@ const struct cpumask *uv_flush_send_and_wait(int cpu, int this_blade,
265 * use the IPI method of shootdown on them. 295 * use the IPI method of shootdown on them.
266 */ 296 */
267 for_each_cpu(bit, flush_mask) { 297 for_each_cpu(bit, flush_mask) {
268 blade = uv_cpu_to_blade_id(bit); 298 pnode = uv_cpu_to_pnode(bit);
269 if (blade == this_blade) 299 if (pnode == this_pnode)
270 continue; 300 continue;
271 cpumask_clear_cpu(bit, flush_mask); 301 cpumask_clear_cpu(bit, flush_mask);
272 } 302 }
@@ -309,16 +339,16 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
309 struct cpumask *flush_mask = __get_cpu_var(uv_flush_tlb_mask); 339 struct cpumask *flush_mask = __get_cpu_var(uv_flush_tlb_mask);
310 int i; 340 int i;
311 int bit; 341 int bit;
312 int blade; 342 int pnode;
313 int uv_cpu; 343 int uv_cpu;
314 int this_blade; 344 int this_pnode;
315 int locals = 0; 345 int locals = 0;
316 struct bau_desc *bau_desc; 346 struct bau_desc *bau_desc;
317 347
318 cpumask_andnot(flush_mask, cpumask, cpumask_of(cpu)); 348 cpumask_andnot(flush_mask, cpumask, cpumask_of(cpu));
319 349
320 uv_cpu = uv_blade_processor_id(); 350 uv_cpu = uv_blade_processor_id();
321 this_blade = uv_numa_blade_id(); 351 this_pnode = uv_hub_info->pnode;
322 bau_desc = __get_cpu_var(bau_control).descriptor_base; 352 bau_desc = __get_cpu_var(bau_control).descriptor_base;
323 bau_desc += UV_ITEMS_PER_DESCRIPTOR * uv_cpu; 353 bau_desc += UV_ITEMS_PER_DESCRIPTOR * uv_cpu;
324 354
@@ -326,13 +356,14 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
326 356
327 i = 0; 357 i = 0;
328 for_each_cpu(bit, flush_mask) { 358 for_each_cpu(bit, flush_mask) {
329 blade = uv_cpu_to_blade_id(bit); 359 pnode = uv_cpu_to_pnode(bit);
330 BUG_ON(blade > (UV_DISTRIBUTION_SIZE - 1)); 360 BUG_ON(pnode > (UV_DISTRIBUTION_SIZE - 1));
331 if (blade == this_blade) { 361 if (pnode == this_pnode) {
332 locals++; 362 locals++;
333 continue; 363 continue;
334 } 364 }
335 bau_node_set(blade, &bau_desc->distribution); 365 bau_node_set(pnode - uv_partition_base_pnode,
366 &bau_desc->distribution);
336 i++; 367 i++;
337 } 368 }
338 if (i == 0) { 369 if (i == 0) {
@@ -350,7 +381,7 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
350 bau_desc->payload.address = va; 381 bau_desc->payload.address = va;
351 bau_desc->payload.sending_cpu = cpu; 382 bau_desc->payload.sending_cpu = cpu;
352 383
353 return uv_flush_send_and_wait(uv_cpu, this_blade, bau_desc, flush_mask); 384 return uv_flush_send_and_wait(uv_cpu, this_pnode, bau_desc, flush_mask);
354} 385}
355 386
356/* 387/*
@@ -418,24 +449,58 @@ void uv_bau_message_interrupt(struct pt_regs *regs)
418 set_irq_regs(old_regs); 449 set_irq_regs(old_regs);
419} 450}
420 451
452/*
453 * uv_enable_timeouts
454 *
455 * Each target blade (i.e. blades that have cpu's) needs to have
456 * shootdown message timeouts enabled. The timeout does not cause
457 * an interrupt, but causes an error message to be returned to
458 * the sender.
459 */
421static void uv_enable_timeouts(void) 460static void uv_enable_timeouts(void)
422{ 461{
423 int i;
424 int blade; 462 int blade;
425 int last_blade; 463 int nblades;
426 int pnode; 464 int pnode;
427 int cur_cpu = 0; 465 unsigned long mmr_image;
428 unsigned long apicid;
429 466
430 last_blade = -1; 467 nblades = uv_num_possible_blades();
431 for_each_online_node(i) { 468
432 blade = uv_node_to_blade_id(i); 469 for (blade = 0; blade < nblades; blade++) {
433 if (blade == last_blade) 470 if (!uv_blade_nr_possible_cpus(blade))
434 continue; 471 continue;
435 last_blade = blade; 472
436 apicid = per_cpu(x86_cpu_to_apicid, cur_cpu);
437 pnode = uv_blade_to_pnode(blade); 473 pnode = uv_blade_to_pnode(blade);
438 cur_cpu += uv_blade_nr_possible_cpus(i); 474 mmr_image =
475 uv_read_global_mmr64(pnode, UVH_LB_BAU_MISC_CONTROL);
476 /*
477 * Set the timeout period and then lock it in, in three
478 * steps; captures and locks in the period.
479 *
480 * To program the period, the SOFT_ACK_MODE must be off.
481 */
482 mmr_image &= ~((unsigned long)1 <<
483 UV_ENABLE_INTD_SOFT_ACK_MODE_SHIFT);
484 uv_write_global_mmr64
485 (pnode, UVH_LB_BAU_MISC_CONTROL, mmr_image);
486 /*
487 * Set the 4-bit period.
488 */
489 mmr_image &= ~((unsigned long)0xf <<
490 UV_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHIFT);
491 mmr_image |= (UV_INTD_SOFT_ACK_TIMEOUT_PERIOD <<
492 UV_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHIFT);
493 uv_write_global_mmr64
494 (pnode, UVH_LB_BAU_MISC_CONTROL, mmr_image);
495 /*
496 * Subsequent reversals of the timebase bit (3) cause an
497 * immediate timeout of one or all INTD resources as
498 * indicated in bits 2:0 (7 causes all of them to timeout).
499 */
500 mmr_image |= ((unsigned long)1 <<
501 UV_ENABLE_INTD_SOFT_ACK_MODE_SHIFT);
502 uv_write_global_mmr64
503 (pnode, UVH_LB_BAU_MISC_CONTROL, mmr_image);
439 } 504 }
440} 505}
441 506
@@ -482,8 +547,7 @@ static int uv_ptc_seq_show(struct seq_file *file, void *data)
482 stat->requestee, stat->onetlb, stat->alltlb, 547 stat->requestee, stat->onetlb, stat->alltlb,
483 stat->s_retry, stat->d_retry, stat->ptc_i); 548 stat->s_retry, stat->d_retry, stat->ptc_i);
484 seq_printf(file, "%lx %ld %ld %ld %ld %ld %ld\n", 549 seq_printf(file, "%lx %ld %ld %ld %ld %ld %ld\n",
485 uv_read_global_mmr64(uv_blade_to_pnode 550 uv_read_global_mmr64(uv_cpu_to_pnode(cpu),
486 (uv_cpu_to_blade_id(cpu)),
487 UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE), 551 UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE),
488 stat->sflush, stat->dflush, 552 stat->sflush, stat->dflush,
489 stat->retriesok, stat->nomsg, 553 stat->retriesok, stat->nomsg,
@@ -617,16 +681,18 @@ static struct bau_control * __init uv_table_bases_init(int blade, int node)
617 * finish the initialization of the per-blade control structures 681 * finish the initialization of the per-blade control structures
618 */ 682 */
619static void __init 683static void __init
620uv_table_bases_finish(int blade, int node, int cur_cpu, 684uv_table_bases_finish(int blade,
621 struct bau_control *bau_tablesp, 685 struct bau_control *bau_tablesp,
622 struct bau_desc *adp) 686 struct bau_desc *adp)
623{ 687{
624 struct bau_control *bcp; 688 struct bau_control *bcp;
625 int i; 689 int cpu;
626 690
627 for (i = cur_cpu; i < cur_cpu + uv_blade_nr_possible_cpus(blade); i++) { 691 for_each_present_cpu(cpu) {
628 bcp = (struct bau_control *)&per_cpu(bau_control, i); 692 if (blade != uv_cpu_to_blade_id(cpu))
693 continue;
629 694
695 bcp = (struct bau_control *)&per_cpu(bau_control, cpu);
630 bcp->bau_msg_head = bau_tablesp->va_queue_first; 696 bcp->bau_msg_head = bau_tablesp->va_queue_first;
631 bcp->va_queue_first = bau_tablesp->va_queue_first; 697 bcp->va_queue_first = bau_tablesp->va_queue_first;
632 bcp->va_queue_last = bau_tablesp->va_queue_last; 698 bcp->va_queue_last = bau_tablesp->va_queue_last;
@@ -649,11 +715,10 @@ uv_activation_descriptor_init(int node, int pnode)
649 struct bau_desc *adp; 715 struct bau_desc *adp;
650 struct bau_desc *ad2; 716 struct bau_desc *ad2;
651 717
652 adp = (struct bau_desc *) 718 adp = (struct bau_desc *)kmalloc_node(16384, GFP_KERNEL, node);
653 kmalloc_node(16384, GFP_KERNEL, node);
654 BUG_ON(!adp); 719 BUG_ON(!adp);
655 720
656 pa = __pa((unsigned long)adp); 721 pa = uv_gpa(adp); /* need the real nasid*/
657 n = pa >> uv_nshift; 722 n = pa >> uv_nshift;
658 m = pa & uv_mmask; 723 m = pa & uv_mmask;
659 724
@@ -667,8 +732,12 @@ uv_activation_descriptor_init(int node, int pnode)
667 for (i = 0, ad2 = adp; i < UV_ACTIVATION_DESCRIPTOR_SIZE; i++, ad2++) { 732 for (i = 0, ad2 = adp; i < UV_ACTIVATION_DESCRIPTOR_SIZE; i++, ad2++) {
668 memset(ad2, 0, sizeof(struct bau_desc)); 733 memset(ad2, 0, sizeof(struct bau_desc));
669 ad2->header.sw_ack_flag = 1; 734 ad2->header.sw_ack_flag = 1;
670 ad2->header.base_dest_nodeid = 735 /*
671 uv_blade_to_pnode(uv_cpu_to_blade_id(0)); 736 * base_dest_nodeid is the first node in the partition, so
737 * the bit map will indicate partition-relative node numbers.
738 * note that base_dest_nodeid is actually a nasid.
739 */
740 ad2->header.base_dest_nodeid = uv_partition_base_pnode << 1;
672 ad2->header.command = UV_NET_ENDPOINT_INTD; 741 ad2->header.command = UV_NET_ENDPOINT_INTD;
673 ad2->header.int_both = 1; 742 ad2->header.int_both = 1;
674 /* 743 /*
@@ -686,6 +755,8 @@ static struct bau_payload_queue_entry * __init
686uv_payload_queue_init(int node, int pnode, struct bau_control *bau_tablesp) 755uv_payload_queue_init(int node, int pnode, struct bau_control *bau_tablesp)
687{ 756{
688 struct bau_payload_queue_entry *pqp; 757 struct bau_payload_queue_entry *pqp;
758 unsigned long pa;
759 int pn;
689 char *cp; 760 char *cp;
690 761
691 pqp = (struct bau_payload_queue_entry *) kmalloc_node( 762 pqp = (struct bau_payload_queue_entry *) kmalloc_node(
@@ -696,10 +767,14 @@ uv_payload_queue_init(int node, int pnode, struct bau_control *bau_tablesp)
696 cp = (char *)pqp + 31; 767 cp = (char *)pqp + 31;
697 pqp = (struct bau_payload_queue_entry *)(((unsigned long)cp >> 5) << 5); 768 pqp = (struct bau_payload_queue_entry *)(((unsigned long)cp >> 5) << 5);
698 bau_tablesp->va_queue_first = pqp; 769 bau_tablesp->va_queue_first = pqp;
770 /*
771 * need the pnode of where the memory was really allocated
772 */
773 pa = uv_gpa(pqp);
774 pn = pa >> uv_nshift;
699 uv_write_global_mmr64(pnode, 775 uv_write_global_mmr64(pnode,
700 UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST, 776 UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST,
701 ((unsigned long)pnode << 777 ((unsigned long)pn << UV_PAYLOADQ_PNODE_SHIFT) |
702 UV_PAYLOADQ_PNODE_SHIFT) |
703 uv_physnodeaddr(pqp)); 778 uv_physnodeaddr(pqp));
704 uv_write_global_mmr64(pnode, UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL, 779 uv_write_global_mmr64(pnode, UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL,
705 uv_physnodeaddr(pqp)); 780 uv_physnodeaddr(pqp));
@@ -715,8 +790,9 @@ uv_payload_queue_init(int node, int pnode, struct bau_control *bau_tablesp)
715/* 790/*
716 * Initialization of each UV blade's structures 791 * Initialization of each UV blade's structures
717 */ 792 */
718static int __init uv_init_blade(int blade, int node, int cur_cpu) 793static int __init uv_init_blade(int blade)
719{ 794{
795 int node;
720 int pnode; 796 int pnode;
721 unsigned long pa; 797 unsigned long pa;
722 unsigned long apicid; 798 unsigned long apicid;
@@ -724,16 +800,17 @@ static int __init uv_init_blade(int blade, int node, int cur_cpu)
724 struct bau_payload_queue_entry *pqp; 800 struct bau_payload_queue_entry *pqp;
725 struct bau_control *bau_tablesp; 801 struct bau_control *bau_tablesp;
726 802
803 node = blade_to_first_node(blade);
727 bau_tablesp = uv_table_bases_init(blade, node); 804 bau_tablesp = uv_table_bases_init(blade, node);
728 pnode = uv_blade_to_pnode(blade); 805 pnode = uv_blade_to_pnode(blade);
729 adp = uv_activation_descriptor_init(node, pnode); 806 adp = uv_activation_descriptor_init(node, pnode);
730 pqp = uv_payload_queue_init(node, pnode, bau_tablesp); 807 pqp = uv_payload_queue_init(node, pnode, bau_tablesp);
731 uv_table_bases_finish(blade, node, cur_cpu, bau_tablesp, adp); 808 uv_table_bases_finish(blade, bau_tablesp, adp);
732 /* 809 /*
733 * the below initialization can't be in firmware because the 810 * the below initialization can't be in firmware because the
734 * messaging IRQ will be determined by the OS 811 * messaging IRQ will be determined by the OS
735 */ 812 */
736 apicid = per_cpu(x86_cpu_to_apicid, cur_cpu); 813 apicid = blade_to_first_apicid(blade);
737 pa = uv_read_global_mmr64(pnode, UVH_BAU_DATA_CONFIG); 814 pa = uv_read_global_mmr64(pnode, UVH_BAU_DATA_CONFIG);
738 if ((pa & 0xff) != UV_BAU_MESSAGE) { 815 if ((pa & 0xff) != UV_BAU_MESSAGE) {
739 uv_write_global_mmr64(pnode, UVH_BAU_DATA_CONFIG, 816 uv_write_global_mmr64(pnode, UVH_BAU_DATA_CONFIG,
@@ -748,9 +825,7 @@ static int __init uv_init_blade(int blade, int node, int cur_cpu)
748static int __init uv_bau_init(void) 825static int __init uv_bau_init(void)
749{ 826{
750 int blade; 827 int blade;
751 int node;
752 int nblades; 828 int nblades;
753 int last_blade;
754 int cur_cpu; 829 int cur_cpu;
755 830
756 if (!is_uv_system()) 831 if (!is_uv_system())
@@ -763,29 +838,21 @@ static int __init uv_bau_init(void)
763 uv_bau_retry_limit = 1; 838 uv_bau_retry_limit = 1;
764 uv_nshift = uv_hub_info->n_val; 839 uv_nshift = uv_hub_info->n_val;
765 uv_mmask = (1UL << uv_hub_info->n_val) - 1; 840 uv_mmask = (1UL << uv_hub_info->n_val) - 1;
766 nblades = 0; 841 nblades = uv_num_possible_blades();
767 last_blade = -1; 842
768 cur_cpu = 0;
769 for_each_online_node(node) {
770 blade = uv_node_to_blade_id(node);
771 if (blade == last_blade)
772 continue;
773 last_blade = blade;
774 nblades++;
775 }
776 uv_bau_table_bases = (struct bau_control **) 843 uv_bau_table_bases = (struct bau_control **)
777 kmalloc(nblades * sizeof(struct bau_control *), GFP_KERNEL); 844 kmalloc(nblades * sizeof(struct bau_control *), GFP_KERNEL);
778 BUG_ON(!uv_bau_table_bases); 845 BUG_ON(!uv_bau_table_bases);
779 846
780 last_blade = -1; 847 uv_partition_base_pnode = 0x7fffffff;
781 for_each_online_node(node) { 848 for (blade = 0; blade < nblades; blade++)
782 blade = uv_node_to_blade_id(node); 849 if (uv_blade_nr_possible_cpus(blade) &&
783 if (blade == last_blade) 850 (uv_blade_to_pnode(blade) < uv_partition_base_pnode))
784 continue; 851 uv_partition_base_pnode = uv_blade_to_pnode(blade);
785 last_blade = blade; 852 for (blade = 0; blade < nblades; blade++)
786 uv_init_blade(blade, node, cur_cpu); 853 if (uv_blade_nr_possible_cpus(blade))
787 cur_cpu += uv_blade_nr_possible_cpus(blade); 854 uv_init_blade(blade);
788 } 855
789 alloc_intr_gate(UV_BAU_MESSAGE, uv_bau_message_intr1); 856 alloc_intr_gate(UV_BAU_MESSAGE, uv_bau_message_intr1);
790 uv_enable_timeouts(); 857 uv_enable_timeouts();
791 858
diff --git a/arch/x86/kernel/uv_sysfs.c b/arch/x86/kernel/uv_sysfs.c
index 67f9b9dbf800..36afb98675a4 100644
--- a/arch/x86/kernel/uv_sysfs.c
+++ b/arch/x86/kernel/uv_sysfs.c
@@ -21,6 +21,7 @@
21 21
22#include <linux/sysdev.h> 22#include <linux/sysdev.h>
23#include <asm/uv/bios.h> 23#include <asm/uv/bios.h>
24#include <asm/uv/uv.h>
24 25
25struct kobject *sgi_uv_kobj; 26struct kobject *sgi_uv_kobj;
26 27
@@ -47,6 +48,9 @@ static int __init sgi_uv_sysfs_init(void)
47{ 48{
48 unsigned long ret; 49 unsigned long ret;
49 50
51 if (!is_uv_system())
52 return -ENODEV;
53
50 if (!sgi_uv_kobj) 54 if (!sgi_uv_kobj)
51 sgi_uv_kobj = kobject_create_and_add("sgi_uv", firmware_kobj); 55 sgi_uv_kobj = kobject_create_and_add("sgi_uv", firmware_kobj);
52 if (!sgi_uv_kobj) { 56 if (!sgi_uv_kobj) {
diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
index 2b54fe002e94..0a5b04aa98f1 100644
--- a/arch/x86/kernel/xsave.c
+++ b/arch/x86/kernel/xsave.c
@@ -324,7 +324,7 @@ void __ref xsave_cntxt_init(void)
324 } 324 }
325 325
326 /* 326 /*
327 * for now OS knows only about FP/SSE 327 * Support only the state known to OS.
328 */ 328 */
329 pcntxt_mask = pcntxt_mask & XCNTXT_MASK; 329 pcntxt_mask = pcntxt_mask & XCNTXT_MASK;
330 xsave_init(); 330 xsave_init();
diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
index e94a11e42f98..a2085368a3dc 100644
--- a/arch/x86/lguest/boot.c
+++ b/arch/x86/lguest/boot.c
@@ -273,15 +273,15 @@ static void lguest_load_idt(const struct desc_ptr *desc)
273 * controls the entire thing and the Guest asks it to make changes using the 273 * controls the entire thing and the Guest asks it to make changes using the
274 * LOAD_GDT hypercall. 274 * LOAD_GDT hypercall.
275 * 275 *
276 * This is the opposite of the IDT code where we have a LOAD_IDT_ENTRY 276 * This is the exactly like the IDT code.
277 * hypercall and use that repeatedly to load a new IDT. I don't think it
278 * really matters, but wouldn't it be nice if they were the same? Wouldn't
279 * it be even better if you were the one to send the patch to fix it?
280 */ 277 */
281static void lguest_load_gdt(const struct desc_ptr *desc) 278static void lguest_load_gdt(const struct desc_ptr *desc)
282{ 279{
283 BUG_ON((desc->size + 1) / 8 != GDT_ENTRIES); 280 unsigned int i;
284 kvm_hypercall2(LHCALL_LOAD_GDT, __pa(desc->address), GDT_ENTRIES); 281 struct desc_struct *gdt = (void *)desc->address;
282
283 for (i = 0; i < (desc->size+1)/8; i++)
284 kvm_hypercall3(LHCALL_LOAD_GDT_ENTRY, i, gdt[i].a, gdt[i].b);
285} 285}
286 286
287/* For a single GDT entry which changes, we do the lazy thing: alter our GDT, 287/* For a single GDT entry which changes, we do the lazy thing: alter our GDT,
@@ -291,7 +291,9 @@ static void lguest_write_gdt_entry(struct desc_struct *dt, int entrynum,
291 const void *desc, int type) 291 const void *desc, int type)
292{ 292{
293 native_write_gdt_entry(dt, entrynum, desc, type); 293 native_write_gdt_entry(dt, entrynum, desc, type);
294 kvm_hypercall2(LHCALL_LOAD_GDT, __pa(dt), GDT_ENTRIES); 294 /* Tell Host about this new entry. */
295 kvm_hypercall3(LHCALL_LOAD_GDT_ENTRY, entrynum,
296 dt[entrynum].a, dt[entrynum].b);
295} 297}
296 298
297/* OK, I lied. There are three "thread local storage" GDT entries which change 299/* OK, I lied. There are three "thread local storage" GDT entries which change
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
index d4c4b2c4dbbe..8a450930834f 100644
--- a/arch/x86/mm/ioremap.c
+++ b/arch/x86/mm/ioremap.c
@@ -549,7 +549,7 @@ void __init early_ioremap_reset(void)
549} 549}
550 550
551static void __init __early_set_fixmap(enum fixed_addresses idx, 551static void __init __early_set_fixmap(enum fixed_addresses idx,
552 unsigned long phys, pgprot_t flags) 552 phys_addr_t phys, pgprot_t flags)
553{ 553{
554 unsigned long addr = __fix_to_virt(idx); 554 unsigned long addr = __fix_to_virt(idx);
555 pte_t *pte; 555 pte_t *pte;
@@ -568,7 +568,7 @@ static void __init __early_set_fixmap(enum fixed_addresses idx,
568} 568}
569 569
570static inline void __init early_set_fixmap(enum fixed_addresses idx, 570static inline void __init early_set_fixmap(enum fixed_addresses idx,
571 unsigned long phys, pgprot_t prot) 571 phys_addr_t phys, pgprot_t prot)
572{ 572{
573 if (after_paging_init) 573 if (after_paging_init)
574 __set_fixmap(idx, phys, prot); 574 __set_fixmap(idx, phys, prot);
@@ -609,9 +609,10 @@ static int __init check_early_ioremap_leak(void)
609late_initcall(check_early_ioremap_leak); 609late_initcall(check_early_ioremap_leak);
610 610
611static void __init __iomem * 611static void __init __iomem *
612__early_ioremap(unsigned long phys_addr, unsigned long size, pgprot_t prot) 612__early_ioremap(resource_size_t phys_addr, unsigned long size, pgprot_t prot)
613{ 613{
614 unsigned long offset, last_addr; 614 unsigned long offset;
615 resource_size_t last_addr;
615 unsigned int nrpages; 616 unsigned int nrpages;
616 enum fixed_addresses idx0, idx; 617 enum fixed_addresses idx0, idx;
617 int i, slot; 618 int i, slot;
@@ -627,15 +628,15 @@ __early_ioremap(unsigned long phys_addr, unsigned long size, pgprot_t prot)
627 } 628 }
628 629
629 if (slot < 0) { 630 if (slot < 0) {
630 printk(KERN_INFO "early_iomap(%08lx, %08lx) not found slot\n", 631 printk(KERN_INFO "early_iomap(%08llx, %08lx) not found slot\n",
631 phys_addr, size); 632 (u64)phys_addr, size);
632 WARN_ON(1); 633 WARN_ON(1);
633 return NULL; 634 return NULL;
634 } 635 }
635 636
636 if (early_ioremap_debug) { 637 if (early_ioremap_debug) {
637 printk(KERN_INFO "early_ioremap(%08lx, %08lx) [%d] => ", 638 printk(KERN_INFO "early_ioremap(%08llx, %08lx) [%d] => ",
638 phys_addr, size, slot); 639 (u64)phys_addr, size, slot);
639 dump_stack(); 640 dump_stack();
640 } 641 }
641 642
@@ -682,13 +683,15 @@ __early_ioremap(unsigned long phys_addr, unsigned long size, pgprot_t prot)
682} 683}
683 684
684/* Remap an IO device */ 685/* Remap an IO device */
685void __init __iomem *early_ioremap(unsigned long phys_addr, unsigned long size) 686void __init __iomem *
687early_ioremap(resource_size_t phys_addr, unsigned long size)
686{ 688{
687 return __early_ioremap(phys_addr, size, PAGE_KERNEL_IO); 689 return __early_ioremap(phys_addr, size, PAGE_KERNEL_IO);
688} 690}
689 691
690/* Remap memory */ 692/* Remap memory */
691void __init __iomem *early_memremap(unsigned long phys_addr, unsigned long size) 693void __init __iomem *
694early_memremap(resource_size_t phys_addr, unsigned long size)
692{ 695{
693 return __early_ioremap(phys_addr, size, PAGE_KERNEL); 696 return __early_ioremap(phys_addr, size, PAGE_KERNEL);
694} 697}
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index 41c805718158..e6718bb28065 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -31,7 +31,7 @@
31#ifdef CONFIG_X86_PAT 31#ifdef CONFIG_X86_PAT
32int __read_mostly pat_enabled = 1; 32int __read_mostly pat_enabled = 1;
33 33
34void __cpuinit pat_disable(const char *reason) 34static inline void pat_disable(const char *reason)
35{ 35{
36 pat_enabled = 0; 36 pat_enabled = 0;
37 printk(KERN_INFO "%s\n", reason); 37 printk(KERN_INFO "%s\n", reason);
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
index 5b7c7c8464fe..7aa03a5389f5 100644
--- a/arch/x86/mm/pgtable.c
+++ b/arch/x86/mm/pgtable.c
@@ -345,7 +345,8 @@ void __native_set_fixmap(enum fixed_addresses idx, pte_t pte)
345 fixmaps_set++; 345 fixmaps_set++;
346} 346}
347 347
348void native_set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t flags) 348void native_set_fixmap(enum fixed_addresses idx, phys_addr_t phys,
349 pgprot_t flags)
349{ 350{
350 __native_set_fixmap(idx, pfn_pte(phys >> PAGE_SHIFT, flags)); 351 __native_set_fixmap(idx, pfn_pte(phys >> PAGE_SHIFT, flags));
351} 352}
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 82cd39a6cbd3..f09e8c36ee80 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -42,6 +42,7 @@
42#include <asm/xen/hypervisor.h> 42#include <asm/xen/hypervisor.h>
43#include <asm/fixmap.h> 43#include <asm/fixmap.h>
44#include <asm/processor.h> 44#include <asm/processor.h>
45#include <asm/proto.h>
45#include <asm/msr-index.h> 46#include <asm/msr-index.h>
46#include <asm/setup.h> 47#include <asm/setup.h>
47#include <asm/desc.h> 48#include <asm/desc.h>
@@ -168,21 +169,23 @@ static void __init xen_banner(void)
168 xen_feature(XENFEAT_mmu_pt_update_preserve_ad) ? " (preserve-AD)" : ""); 169 xen_feature(XENFEAT_mmu_pt_update_preserve_ad) ? " (preserve-AD)" : "");
169} 170}
170 171
172static __read_mostly unsigned int cpuid_leaf1_edx_mask = ~0;
173static __read_mostly unsigned int cpuid_leaf1_ecx_mask = ~0;
174
171static void xen_cpuid(unsigned int *ax, unsigned int *bx, 175static void xen_cpuid(unsigned int *ax, unsigned int *bx,
172 unsigned int *cx, unsigned int *dx) 176 unsigned int *cx, unsigned int *dx)
173{ 177{
178 unsigned maskecx = ~0;
174 unsigned maskedx = ~0; 179 unsigned maskedx = ~0;
175 180
176 /* 181 /*
177 * Mask out inconvenient features, to try and disable as many 182 * Mask out inconvenient features, to try and disable as many
178 * unsupported kernel subsystems as possible. 183 * unsupported kernel subsystems as possible.
179 */ 184 */
180 if (*ax == 1) 185 if (*ax == 1) {
181 maskedx = ~((1 << X86_FEATURE_APIC) | /* disable APIC */ 186 maskecx = cpuid_leaf1_ecx_mask;
182 (1 << X86_FEATURE_ACPI) | /* disable ACPI */ 187 maskedx = cpuid_leaf1_edx_mask;
183 (1 << X86_FEATURE_MCE) | /* disable MCE */ 188 }
184 (1 << X86_FEATURE_MCA) | /* disable MCA */
185 (1 << X86_FEATURE_ACC)); /* thermal monitoring */
186 189
187 asm(XEN_EMULATE_PREFIX "cpuid" 190 asm(XEN_EMULATE_PREFIX "cpuid"
188 : "=a" (*ax), 191 : "=a" (*ax),
@@ -190,9 +193,43 @@ static void xen_cpuid(unsigned int *ax, unsigned int *bx,
190 "=c" (*cx), 193 "=c" (*cx),
191 "=d" (*dx) 194 "=d" (*dx)
192 : "0" (*ax), "2" (*cx)); 195 : "0" (*ax), "2" (*cx));
196
197 *cx &= maskecx;
193 *dx &= maskedx; 198 *dx &= maskedx;
194} 199}
195 200
201static __init void xen_init_cpuid_mask(void)
202{
203 unsigned int ax, bx, cx, dx;
204
205 cpuid_leaf1_edx_mask =
206 ~((1 << X86_FEATURE_MCE) | /* disable MCE */
207 (1 << X86_FEATURE_MCA) | /* disable MCA */
208 (1 << X86_FEATURE_ACC)); /* thermal monitoring */
209
210 if (!xen_initial_domain())
211 cpuid_leaf1_edx_mask &=
212 ~((1 << X86_FEATURE_APIC) | /* disable local APIC */
213 (1 << X86_FEATURE_ACPI)); /* disable ACPI */
214
215 ax = 1;
216 xen_cpuid(&ax, &bx, &cx, &dx);
217
218 /* cpuid claims we support xsave; try enabling it to see what happens */
219 if (cx & (1 << (X86_FEATURE_XSAVE % 32))) {
220 unsigned long cr4;
221
222 set_in_cr4(X86_CR4_OSXSAVE);
223
224 cr4 = read_cr4();
225
226 if ((cr4 & X86_CR4_OSXSAVE) == 0)
227 cpuid_leaf1_ecx_mask &= ~(1 << (X86_FEATURE_XSAVE % 32));
228
229 clear_in_cr4(X86_CR4_OSXSAVE);
230 }
231}
232
196static void xen_set_debugreg(int reg, unsigned long val) 233static void xen_set_debugreg(int reg, unsigned long val)
197{ 234{
198 HYPERVISOR_set_debugreg(reg, val); 235 HYPERVISOR_set_debugreg(reg, val);
@@ -284,12 +321,11 @@ static void xen_set_ldt(const void *addr, unsigned entries)
284 321
285static void xen_load_gdt(const struct desc_ptr *dtr) 322static void xen_load_gdt(const struct desc_ptr *dtr)
286{ 323{
287 unsigned long *frames;
288 unsigned long va = dtr->address; 324 unsigned long va = dtr->address;
289 unsigned int size = dtr->size + 1; 325 unsigned int size = dtr->size + 1;
290 unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE; 326 unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
327 unsigned long frames[pages];
291 int f; 328 int f;
292 struct multicall_space mcs;
293 329
294 /* A GDT can be up to 64k in size, which corresponds to 8192 330 /* A GDT can be up to 64k in size, which corresponds to 8192
295 8-byte entries, or 16 4k pages.. */ 331 8-byte entries, or 16 4k pages.. */
@@ -297,19 +333,26 @@ static void xen_load_gdt(const struct desc_ptr *dtr)
297 BUG_ON(size > 65536); 333 BUG_ON(size > 65536);
298 BUG_ON(va & ~PAGE_MASK); 334 BUG_ON(va & ~PAGE_MASK);
299 335
300 mcs = xen_mc_entry(sizeof(*frames) * pages);
301 frames = mcs.args;
302
303 for (f = 0; va < dtr->address + size; va += PAGE_SIZE, f++) { 336 for (f = 0; va < dtr->address + size; va += PAGE_SIZE, f++) {
304 frames[f] = arbitrary_virt_to_mfn((void *)va); 337 int level;
338 pte_t *ptep = lookup_address(va, &level);
339 unsigned long pfn, mfn;
340 void *virt;
341
342 BUG_ON(ptep == NULL);
343
344 pfn = pte_pfn(*ptep);
345 mfn = pfn_to_mfn(pfn);
346 virt = __va(PFN_PHYS(pfn));
347
348 frames[f] = mfn;
305 349
306 make_lowmem_page_readonly((void *)va); 350 make_lowmem_page_readonly((void *)va);
307 make_lowmem_page_readonly(mfn_to_virt(frames[f])); 351 make_lowmem_page_readonly(virt);
308 } 352 }
309 353
310 MULTI_set_gdt(mcs.mc, frames, size / sizeof(struct desc_struct)); 354 if (HYPERVISOR_set_gdt(frames, size / sizeof(struct desc_struct)))
311 355 BUG();
312 xen_mc_issue(PARAVIRT_LAZY_CPU);
313} 356}
314 357
315static void load_TLS_descriptor(struct thread_struct *t, 358static void load_TLS_descriptor(struct thread_struct *t,
@@ -385,7 +428,7 @@ static void xen_write_ldt_entry(struct desc_struct *dt, int entrynum,
385static int cvt_gate_to_trap(int vector, const gate_desc *val, 428static int cvt_gate_to_trap(int vector, const gate_desc *val,
386 struct trap_info *info) 429 struct trap_info *info)
387{ 430{
388 if (val->type != 0xf && val->type != 0xe) 431 if (val->type != GATE_TRAP && val->type != GATE_INTERRUPT)
389 return 0; 432 return 0;
390 433
391 info->vector = vector; 434 info->vector = vector;
@@ -393,8 +436,8 @@ static int cvt_gate_to_trap(int vector, const gate_desc *val,
393 info->cs = gate_segment(*val); 436 info->cs = gate_segment(*val);
394 info->flags = val->dpl; 437 info->flags = val->dpl;
395 /* interrupt gates clear IF */ 438 /* interrupt gates clear IF */
396 if (val->type == 0xe) 439 if (val->type == GATE_INTERRUPT)
397 info->flags |= 4; 440 info->flags |= 1 << 2;
398 441
399 return 1; 442 return 1;
400} 443}
@@ -872,7 +915,6 @@ static const struct machine_ops __initdata xen_machine_ops = {
872 .emergency_restart = xen_emergency_restart, 915 .emergency_restart = xen_emergency_restart,
873}; 916};
874 917
875
876/* First C function to be called on Xen boot */ 918/* First C function to be called on Xen boot */
877asmlinkage void __init xen_start_kernel(void) 919asmlinkage void __init xen_start_kernel(void)
878{ 920{
@@ -897,6 +939,8 @@ asmlinkage void __init xen_start_kernel(void)
897 939
898 xen_init_irq_ops(); 940 xen_init_irq_ops();
899 941
942 xen_init_cpuid_mask();
943
900#ifdef CONFIG_X86_LOCAL_APIC 944#ifdef CONFIG_X86_LOCAL_APIC
901 /* 945 /*
902 * set up the basic apic ops. 946 * set up the basic apic ops.
@@ -938,6 +982,11 @@ asmlinkage void __init xen_start_kernel(void)
938 if (!xen_initial_domain()) 982 if (!xen_initial_domain())
939 __supported_pte_mask &= ~(_PAGE_PWT | _PAGE_PCD); 983 __supported_pte_mask &= ~(_PAGE_PWT | _PAGE_PCD);
940 984
985#ifdef CONFIG_X86_64
986 /* Work out if we support NX */
987 check_efer();
988#endif
989
941 /* Don't do the full vcpu_info placement stuff until we have a 990 /* Don't do the full vcpu_info placement stuff until we have a
942 possible map and a non-dummy shared_info. */ 991 possible map and a non-dummy shared_info. */
943 per_cpu(xen_vcpu, 0) = &HYPERVISOR_shared_info->vcpu_info[0]; 992 per_cpu(xen_vcpu, 0) = &HYPERVISOR_shared_info->vcpu_info[0];
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index db3802fb7b84..9842b1212407 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -184,7 +184,7 @@ static inline unsigned p2m_index(unsigned long pfn)
184} 184}
185 185
186/* Build the parallel p2m_top_mfn structures */ 186/* Build the parallel p2m_top_mfn structures */
187void xen_setup_mfn_list_list(void) 187static void __init xen_build_mfn_list_list(void)
188{ 188{
189 unsigned pfn, idx; 189 unsigned pfn, idx;
190 190
@@ -198,7 +198,10 @@ void xen_setup_mfn_list_list(void)
198 unsigned topidx = idx * P2M_ENTRIES_PER_PAGE; 198 unsigned topidx = idx * P2M_ENTRIES_PER_PAGE;
199 p2m_top_mfn_list[idx] = virt_to_mfn(&p2m_top_mfn[topidx]); 199 p2m_top_mfn_list[idx] = virt_to_mfn(&p2m_top_mfn[topidx]);
200 } 200 }
201}
201 202
203void xen_setup_mfn_list_list(void)
204{
202 BUG_ON(HYPERVISOR_shared_info == &xen_dummy_shared_info); 205 BUG_ON(HYPERVISOR_shared_info == &xen_dummy_shared_info);
203 206
204 HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list = 207 HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list =
@@ -218,6 +221,8 @@ void __init xen_build_dynamic_phys_to_machine(void)
218 221
219 p2m_top[topidx] = &mfn_list[pfn]; 222 p2m_top[topidx] = &mfn_list[pfn];
220 } 223 }
224
225 xen_build_mfn_list_list();
221} 226}
222 227
223unsigned long get_phys_to_machine(unsigned long pfn) 228unsigned long get_phys_to_machine(unsigned long pfn)
@@ -233,47 +238,74 @@ unsigned long get_phys_to_machine(unsigned long pfn)
233} 238}
234EXPORT_SYMBOL_GPL(get_phys_to_machine); 239EXPORT_SYMBOL_GPL(get_phys_to_machine);
235 240
236static void alloc_p2m(unsigned long **pp, unsigned long *mfnp) 241/* install a new p2m_top page */
242bool install_p2mtop_page(unsigned long pfn, unsigned long *p)
237{ 243{
238 unsigned long *p; 244 unsigned topidx = p2m_top_index(pfn);
245 unsigned long **pfnp, *mfnp;
239 unsigned i; 246 unsigned i;
240 247
241 p = (void *)__get_free_page(GFP_KERNEL | __GFP_NOFAIL); 248 pfnp = &p2m_top[topidx];
242 BUG_ON(p == NULL); 249 mfnp = &p2m_top_mfn[topidx];
243 250
244 for (i = 0; i < P2M_ENTRIES_PER_PAGE; i++) 251 for (i = 0; i < P2M_ENTRIES_PER_PAGE; i++)
245 p[i] = INVALID_P2M_ENTRY; 252 p[i] = INVALID_P2M_ENTRY;
246 253
247 if (cmpxchg(pp, p2m_missing, p) != p2m_missing) 254 if (cmpxchg(pfnp, p2m_missing, p) == p2m_missing) {
248 free_page((unsigned long)p);
249 else
250 *mfnp = virt_to_mfn(p); 255 *mfnp = virt_to_mfn(p);
256 return true;
257 }
258
259 return false;
251} 260}
252 261
253void set_phys_to_machine(unsigned long pfn, unsigned long mfn) 262static void alloc_p2m(unsigned long pfn)
254{ 263{
255 unsigned topidx, idx; 264 unsigned long *p;
256 265
257 if (unlikely(xen_feature(XENFEAT_auto_translated_physmap))) { 266 p = (void *)__get_free_page(GFP_KERNEL | __GFP_NOFAIL);
258 BUG_ON(pfn != mfn && mfn != INVALID_P2M_ENTRY); 267 BUG_ON(p == NULL);
259 return; 268
260 } 269 if (!install_p2mtop_page(pfn, p))
270 free_page((unsigned long)p);
271}
272
273/* Try to install p2m mapping; fail if intermediate bits missing */
274bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn)
275{
276 unsigned topidx, idx;
261 277
262 if (unlikely(pfn >= MAX_DOMAIN_PAGES)) { 278 if (unlikely(pfn >= MAX_DOMAIN_PAGES)) {
263 BUG_ON(mfn != INVALID_P2M_ENTRY); 279 BUG_ON(mfn != INVALID_P2M_ENTRY);
264 return; 280 return true;
265 } 281 }
266 282
267 topidx = p2m_top_index(pfn); 283 topidx = p2m_top_index(pfn);
268 if (p2m_top[topidx] == p2m_missing) { 284 if (p2m_top[topidx] == p2m_missing) {
269 /* no need to allocate a page to store an invalid entry */
270 if (mfn == INVALID_P2M_ENTRY) 285 if (mfn == INVALID_P2M_ENTRY)
271 return; 286 return true;
272 alloc_p2m(&p2m_top[topidx], &p2m_top_mfn[topidx]); 287 return false;
273 } 288 }
274 289
275 idx = p2m_index(pfn); 290 idx = p2m_index(pfn);
276 p2m_top[topidx][idx] = mfn; 291 p2m_top[topidx][idx] = mfn;
292
293 return true;
294}
295
296void set_phys_to_machine(unsigned long pfn, unsigned long mfn)
297{
298 if (unlikely(xen_feature(XENFEAT_auto_translated_physmap))) {
299 BUG_ON(pfn != mfn && mfn != INVALID_P2M_ENTRY);
300 return;
301 }
302
303 if (unlikely(!__set_phys_to_machine(pfn, mfn))) {
304 alloc_p2m(pfn);
305
306 if (!__set_phys_to_machine(pfn, mfn))
307 BUG();
308 }
277} 309}
278 310
279unsigned long arbitrary_virt_to_mfn(void *vaddr) 311unsigned long arbitrary_virt_to_mfn(void *vaddr)
@@ -987,7 +1019,7 @@ static __init int xen_mark_pinned(struct mm_struct *mm, struct page *page,
987 return 0; 1019 return 0;
988} 1020}
989 1021
990void __init xen_mark_init_mm_pinned(void) 1022static void __init xen_mark_init_mm_pinned(void)
991{ 1023{
992 xen_pgd_walk(&init_mm, xen_mark_pinned, FIXADDR_TOP); 1024 xen_pgd_walk(&init_mm, xen_mark_pinned, FIXADDR_TOP);
993} 1025}
@@ -1270,8 +1302,8 @@ static void xen_flush_tlb_others(const struct cpumask *cpus,
1270 } *args; 1302 } *args;
1271 struct multicall_space mcs; 1303 struct multicall_space mcs;
1272 1304
1273 BUG_ON(cpumask_empty(cpus)); 1305 if (cpumask_empty(cpus))
1274 BUG_ON(!mm); 1306 return; /* nothing to do */
1275 1307
1276 mcs = xen_mc_entry(sizeof(*args)); 1308 mcs = xen_mc_entry(sizeof(*args));
1277 args = mcs.args; 1309 args = mcs.args;
@@ -1438,6 +1470,15 @@ static __init void xen_set_pte_init(pte_t *ptep, pte_t pte)
1438} 1470}
1439#endif 1471#endif
1440 1472
1473static void pin_pagetable_pfn(unsigned cmd, unsigned long pfn)
1474{
1475 struct mmuext_op op;
1476 op.cmd = cmd;
1477 op.arg1.mfn = pfn_to_mfn(pfn);
1478 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF))
1479 BUG();
1480}
1481
1441/* Early in boot, while setting up the initial pagetable, assume 1482/* Early in boot, while setting up the initial pagetable, assume
1442 everything is pinned. */ 1483 everything is pinned. */
1443static __init void xen_alloc_pte_init(struct mm_struct *mm, unsigned long pfn) 1484static __init void xen_alloc_pte_init(struct mm_struct *mm, unsigned long pfn)
@@ -1446,22 +1487,29 @@ static __init void xen_alloc_pte_init(struct mm_struct *mm, unsigned long pfn)
1446 BUG_ON(mem_map); /* should only be used early */ 1487 BUG_ON(mem_map); /* should only be used early */
1447#endif 1488#endif
1448 make_lowmem_page_readonly(__va(PFN_PHYS(pfn))); 1489 make_lowmem_page_readonly(__va(PFN_PHYS(pfn)));
1490 pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
1491}
1492
1493/* Used for pmd and pud */
1494static __init void xen_alloc_pmd_init(struct mm_struct *mm, unsigned long pfn)
1495{
1496#ifdef CONFIG_FLATMEM
1497 BUG_ON(mem_map); /* should only be used early */
1498#endif
1499 make_lowmem_page_readonly(__va(PFN_PHYS(pfn)));
1449} 1500}
1450 1501
1451/* Early release_pte assumes that all pts are pinned, since there's 1502/* Early release_pte assumes that all pts are pinned, since there's
1452 only init_mm and anything attached to that is pinned. */ 1503 only init_mm and anything attached to that is pinned. */
1453static void xen_release_pte_init(unsigned long pfn) 1504static __init void xen_release_pte_init(unsigned long pfn)
1454{ 1505{
1506 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn);
1455 make_lowmem_page_readwrite(__va(PFN_PHYS(pfn))); 1507 make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
1456} 1508}
1457 1509
1458static void pin_pagetable_pfn(unsigned cmd, unsigned long pfn) 1510static __init void xen_release_pmd_init(unsigned long pfn)
1459{ 1511{
1460 struct mmuext_op op; 1512 make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
1461 op.cmd = cmd;
1462 op.arg1.mfn = pfn_to_mfn(pfn);
1463 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF))
1464 BUG();
1465} 1513}
1466 1514
1467/* This needs to make sure the new pte page is pinned iff its being 1515/* This needs to make sure the new pte page is pinned iff its being
@@ -1750,7 +1798,7 @@ __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd,
1750} 1798}
1751#endif /* CONFIG_X86_64 */ 1799#endif /* CONFIG_X86_64 */
1752 1800
1753static void xen_set_fixmap(unsigned idx, unsigned long phys, pgprot_t prot) 1801static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot)
1754{ 1802{
1755 pte_t pte; 1803 pte_t pte;
1756 1804
@@ -1773,6 +1821,9 @@ static void xen_set_fixmap(unsigned idx, unsigned long phys, pgprot_t prot)
1773#ifdef CONFIG_X86_LOCAL_APIC 1821#ifdef CONFIG_X86_LOCAL_APIC
1774 case FIX_APIC_BASE: /* maps dummy local APIC */ 1822 case FIX_APIC_BASE: /* maps dummy local APIC */
1775#endif 1823#endif
1824 case FIX_TEXT_POKE0:
1825 case FIX_TEXT_POKE1:
1826 /* All local page mappings */
1776 pte = pfn_pte(phys, prot); 1827 pte = pfn_pte(phys, prot);
1777 break; 1828 break;
1778 1829
@@ -1819,7 +1870,6 @@ __init void xen_post_allocator_init(void)
1819 xen_mark_init_mm_pinned(); 1870 xen_mark_init_mm_pinned();
1820} 1871}
1821 1872
1822
1823const struct pv_mmu_ops xen_mmu_ops __initdata = { 1873const struct pv_mmu_ops xen_mmu_ops __initdata = {
1824 .pagetable_setup_start = xen_pagetable_setup_start, 1874 .pagetable_setup_start = xen_pagetable_setup_start,
1825 .pagetable_setup_done = xen_pagetable_setup_done, 1875 .pagetable_setup_done = xen_pagetable_setup_done,
@@ -1843,9 +1893,9 @@ const struct pv_mmu_ops xen_mmu_ops __initdata = {
1843 1893
1844 .alloc_pte = xen_alloc_pte_init, 1894 .alloc_pte = xen_alloc_pte_init,
1845 .release_pte = xen_release_pte_init, 1895 .release_pte = xen_release_pte_init,
1846 .alloc_pmd = xen_alloc_pte_init, 1896 .alloc_pmd = xen_alloc_pmd_init,
1847 .alloc_pmd_clone = paravirt_nop, 1897 .alloc_pmd_clone = paravirt_nop,
1848 .release_pmd = xen_release_pte_init, 1898 .release_pmd = xen_release_pmd_init,
1849 1899
1850#ifdef CONFIG_HIGHPTE 1900#ifdef CONFIG_HIGHPTE
1851 .kmap_atomic_pte = xen_kmap_atomic_pte, 1901 .kmap_atomic_pte = xen_kmap_atomic_pte,
@@ -1883,8 +1933,8 @@ const struct pv_mmu_ops xen_mmu_ops __initdata = {
1883 .make_pud = PV_CALLEE_SAVE(xen_make_pud), 1933 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
1884 .set_pgd = xen_set_pgd_hyper, 1934 .set_pgd = xen_set_pgd_hyper,
1885 1935
1886 .alloc_pud = xen_alloc_pte_init, 1936 .alloc_pud = xen_alloc_pmd_init,
1887 .release_pud = xen_release_pte_init, 1937 .release_pud = xen_release_pmd_init,
1888#endif /* PAGETABLE_LEVELS == 4 */ 1938#endif /* PAGETABLE_LEVELS == 4 */
1889 1939
1890 .activate_mm = xen_activate_mm, 1940 .activate_mm = xen_activate_mm,
diff --git a/arch/x86/xen/mmu.h b/arch/x86/xen/mmu.h
index 24d1b44a337d..da7302624897 100644
--- a/arch/x86/xen/mmu.h
+++ b/arch/x86/xen/mmu.h
@@ -11,6 +11,9 @@ enum pt_level {
11}; 11};
12 12
13 13
14bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn);
15bool install_p2mtop_page(unsigned long pfn, unsigned long *p);
16
14void set_pte_mfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags); 17void set_pte_mfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags);
15 18
16 19
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index 585a6e330837..429834ec1687 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -317,7 +317,7 @@ static int __cpuinit xen_cpu_up(unsigned int cpu)
317 BUG_ON(rc); 317 BUG_ON(rc);
318 318
319 while(per_cpu(cpu_state, cpu) != CPU_ONLINE) { 319 while(per_cpu(cpu_state, cpu) != CPU_ONLINE) {
320 HYPERVISOR_sched_op(SCHEDOP_yield, 0); 320 HYPERVISOR_sched_op(SCHEDOP_yield, NULL);
321 barrier(); 321 barrier();
322 } 322 }
323 323
@@ -422,7 +422,7 @@ static void xen_smp_send_call_function_ipi(const struct cpumask *mask)
422 /* Make sure other vcpus get a chance to run if they need to. */ 422 /* Make sure other vcpus get a chance to run if they need to. */
423 for_each_cpu(cpu, mask) { 423 for_each_cpu(cpu, mask) {
424 if (xen_vcpu_stolen(cpu)) { 424 if (xen_vcpu_stolen(cpu)) {
425 HYPERVISOR_sched_op(SCHEDOP_yield, 0); 425 HYPERVISOR_sched_op(SCHEDOP_yield, NULL);
426 break; 426 break;
427 } 427 }
428 } 428 }
diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
index 2f5ef2632ea2..20139464943c 100644
--- a/arch/x86/xen/xen-ops.h
+++ b/arch/x86/xen/xen-ops.h
@@ -57,8 +57,6 @@ irqreturn_t xen_debug_interrupt(int irq, void *dev_id);
57 57
58bool xen_vcpu_stolen(int vcpu); 58bool xen_vcpu_stolen(int vcpu);
59 59
60void xen_mark_init_mm_pinned(void);
61
62void xen_setup_vcpu_info_placement(void); 60void xen_setup_vcpu_info_placement(void);
63 61
64#ifdef CONFIG_SMP 62#ifdef CONFIG_SMP