aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/Kconfig16
-rw-r--r--arch/x86/Kconfig.cpu1
-rw-r--r--arch/x86/Makefile6
-rw-r--r--arch/x86/boot/memory.c29
-rw-r--r--arch/x86/ia32/sys_ia32.c19
-rw-r--r--arch/x86/include/asm/desc.h2
-rw-r--r--arch/x86/include/asm/fixmap.h4
-rw-r--r--arch/x86/include/asm/hardirq.h2
-rw-r--r--arch/x86/include/asm/io.h6
-rw-r--r--arch/x86/include/asm/lguest_hcall.h2
-rw-r--r--arch/x86/include/asm/mce.h1
-rw-r--r--arch/x86/include/asm/paravirt.h6
-rw-r--r--arch/x86/include/asm/pat.h4
-rw-r--r--arch/x86/include/asm/percpu.h10
-rw-r--r--arch/x86/include/asm/processor.h12
-rw-r--r--arch/x86/include/asm/ptrace.h7
-rw-r--r--arch/x86/include/asm/required-features.h2
-rw-r--r--arch/x86/include/asm/sigcontext.h6
-rw-r--r--arch/x86/include/asm/spinlock.h4
-rw-r--r--arch/x86/include/asm/tlbflush.h2
-rw-r--r--arch/x86/include/asm/topology.h2
-rw-r--r--arch/x86/include/asm/uv/uv_mmrs.h5
-rw-r--r--arch/x86/include/asm/xen/page.h3
-rw-r--r--arch/x86/include/asm/xsave.h3
-rw-r--r--arch/x86/kernel/Makefile3
-rw-r--r--arch/x86/kernel/amd_iommu_init.c16
-rw-r--r--arch/x86/kernel/apic/es7000_32.c8
-rw-r--r--arch/x86/kernel/apic/io_apic.c7
-rw-r--r--arch/x86/kernel/apic/nmi.c5
-rw-r--r--arch/x86/kernel/apic/x2apic_uv_x.c31
-rw-r--r--arch/x86/kernel/bios_uv.c3
-rw-r--r--arch/x86/kernel/cpu/common.c9
-rw-r--r--arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c58
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_64.c33
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_intel_64.c10
-rw-r--r--arch/x86/kernel/cpu/mtrr/generic.c6
-rw-r--r--arch/x86/kernel/cpu/proc.c2
-rw-r--r--arch/x86/kernel/e820.c11
-rw-r--r--arch/x86/kernel/entry_64.S3
-rw-r--r--arch/x86/kernel/ftrace.c2
-rw-r--r--arch/x86/kernel/hpet.c24
-rw-r--r--arch/x86/kernel/i8253.c2
-rw-r--r--arch/x86/kernel/kgdb.c3
-rw-r--r--arch/x86/kernel/kvmclock.c7
-rw-r--r--arch/x86/kernel/machine_kexec_32.c4
-rw-r--r--arch/x86/kernel/machine_kexec_64.c4
-rw-r--r--arch/x86/kernel/microcode_core.c35
-rw-r--r--arch/x86/kernel/mpparse.c7
-rw-r--r--arch/x86/kernel/paravirt.c2
-rw-r--r--arch/x86/kernel/pci-swiotlb.c2
-rw-r--r--arch/x86/kernel/quirks.c2
-rw-r--r--arch/x86/kernel/reboot.c8
-rw-r--r--arch/x86/kernel/tlb_uv.c189
-rw-r--r--arch/x86/kernel/tsc.c2
-rw-r--r--arch/x86/kernel/uv_sysfs.c4
-rw-r--r--arch/x86/kernel/uv_time.c10
-rw-r--r--arch/x86/kernel/vmiclock_32.c2
-rw-r--r--arch/x86/kernel/xsave.c6
-rw-r--r--arch/x86/kvm/mmu.c2
-rw-r--r--arch/x86/kvm/svm.c8
-rw-r--r--arch/x86/kvm/x86.c14
-rw-r--r--arch/x86/lguest/boot.c18
-rw-r--r--arch/x86/mm/gup.c16
-rw-r--r--arch/x86/mm/init.c18
-rw-r--r--arch/x86/mm/ioremap.c33
-rw-r--r--arch/x86/mm/kmmio.c2
-rw-r--r--arch/x86/mm/numa_32.c2
-rw-r--r--arch/x86/mm/numa_64.c3
-rw-r--r--arch/x86/mm/pageattr.c139
-rw-r--r--arch/x86/mm/pat.c191
-rw-r--r--arch/x86/mm/pgtable.c3
-rw-r--r--arch/x86/mm/srat_32.c2
-rw-r--r--arch/x86/mm/srat_64.c7
-rw-r--r--arch/x86/oprofile/backtrace.c2
-rw-r--r--arch/x86/pci/amd_bus.c6
-rw-r--r--arch/x86/pci/common.c5
-rw-r--r--arch/x86/pci/i386.c4
-rw-r--r--arch/x86/pci/mmconfig-shared.c6
-rw-r--r--arch/x86/vdso/vclock_gettime.c12
-rw-r--r--arch/x86/xen/Makefile5
-rw-r--r--arch/x86/xen/enlighten.c89
-rw-r--r--arch/x86/xen/mmu.c124
-rw-r--r--arch/x86/xen/mmu.h3
-rw-r--r--arch/x86/xen/smp.c4
-rw-r--r--arch/x86/xen/time.c7
-rw-r--r--arch/x86/xen/xen-ops.h21
86 files changed, 849 insertions, 571 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index bc25b9f5e4cd..a6efe0a2e9ae 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -277,6 +277,7 @@ config SPARSE_IRQ
277config NUMA_MIGRATE_IRQ_DESC 277config NUMA_MIGRATE_IRQ_DESC
278 bool "Move irq desc when changing irq smp_affinity" 278 bool "Move irq desc when changing irq smp_affinity"
279 depends on SPARSE_IRQ && NUMA 279 depends on SPARSE_IRQ && NUMA
280 depends on BROKEN
280 default n 281 default n
281 ---help--- 282 ---help---
282 This enables moving irq_desc to cpu/node that irq will use handled. 283 This enables moving irq_desc to cpu/node that irq will use handled.
@@ -353,6 +354,7 @@ config X86_UV
353 bool "SGI Ultraviolet" 354 bool "SGI Ultraviolet"
354 depends on X86_64 355 depends on X86_64
355 depends on X86_EXTENDED_PLATFORM 356 depends on X86_EXTENDED_PLATFORM
357 depends on NUMA
356 select X86_X2APIC 358 select X86_X2APIC
357 ---help--- 359 ---help---
358 This option is needed in order to support SGI Ultraviolet systems. 360 This option is needed in order to support SGI Ultraviolet systems.
@@ -496,6 +498,19 @@ config PARAVIRT
496 over full virtualization. However, when run without a hypervisor 498 over full virtualization. However, when run without a hypervisor
497 the kernel is theoretically slower and slightly larger. 499 the kernel is theoretically slower and slightly larger.
498 500
501config PARAVIRT_SPINLOCKS
502 bool "Paravirtualization layer for spinlocks"
503 depends on PARAVIRT && SMP && EXPERIMENTAL
504 ---help---
505 Paravirtualized spinlocks allow a pvops backend to replace the
506 spinlock implementation with something virtualization-friendly
507 (for example, block the virtual CPU rather than spinning).
508
509 Unfortunately the downside is an up to 5% performance hit on
510 native kernels, with various workloads.
511
512 If you are unsure how to answer this question, answer N.
513
499config PARAVIRT_CLOCK 514config PARAVIRT_CLOCK
500 bool 515 bool
501 default n 516 default n
@@ -663,6 +678,7 @@ config MAXSMP
663 678
664config NR_CPUS 679config NR_CPUS
665 int "Maximum number of CPUs" if SMP && !MAXSMP 680 int "Maximum number of CPUs" if SMP && !MAXSMP
681 range 2 8 if SMP && X86_32 && !X86_BIGSMP
666 range 2 512 if SMP && !MAXSMP 682 range 2 512 if SMP && !MAXSMP
667 default "1" if !SMP 683 default "1" if !SMP
668 default "4096" if MAXSMP 684 default "4096" if MAXSMP
diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
index 924e156a85ab..8130334329c0 100644
--- a/arch/x86/Kconfig.cpu
+++ b/arch/x86/Kconfig.cpu
@@ -506,6 +506,7 @@ config X86_PTRACE_BTS
506 bool "Branch Trace Store" 506 bool "Branch Trace Store"
507 default y 507 default y
508 depends on X86_DEBUGCTLMSR 508 depends on X86_DEBUGCTLMSR
509 depends on BROKEN
509 ---help--- 510 ---help---
510 This adds a ptrace interface to the hardware's branch trace store. 511 This adds a ptrace interface to the hardware's branch trace store.
511 512
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index f05d8c91d9e5..8c86b72afdc2 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -153,7 +153,7 @@ endif
153 153
154boot := arch/x86/boot 154boot := arch/x86/boot
155 155
156BOOT_TARGETS = bzlilo bzdisk fdimage fdimage144 fdimage288 isoimage install 156BOOT_TARGETS = bzlilo bzdisk fdimage fdimage144 fdimage288 isoimage
157 157
158PHONY += bzImage $(BOOT_TARGETS) 158PHONY += bzImage $(BOOT_TARGETS)
159 159
@@ -171,6 +171,10 @@ bzImage: vmlinux
171$(BOOT_TARGETS): vmlinux 171$(BOOT_TARGETS): vmlinux
172 $(Q)$(MAKE) $(build)=$(boot) $@ 172 $(Q)$(MAKE) $(build)=$(boot) $@
173 173
174PHONY += install
175install:
176 $(Q)$(MAKE) $(build)=$(boot) $@
177
174PHONY += vdso_install 178PHONY += vdso_install
175vdso_install: 179vdso_install:
176 $(Q)$(MAKE) $(build)=arch/x86/vdso $@ 180 $(Q)$(MAKE) $(build)=arch/x86/vdso $@
diff --git a/arch/x86/boot/memory.c b/arch/x86/boot/memory.c
index d989de810cac..cae3feb1035e 100644
--- a/arch/x86/boot/memory.c
+++ b/arch/x86/boot/memory.c
@@ -17,17 +17,12 @@
17 17
18#define SMAP 0x534d4150 /* ASCII "SMAP" */ 18#define SMAP 0x534d4150 /* ASCII "SMAP" */
19 19
20struct e820_ext_entry {
21 struct e820entry std;
22 u32 ext_flags;
23} __attribute__((packed));
24
25static int detect_memory_e820(void) 20static int detect_memory_e820(void)
26{ 21{
27 int count = 0; 22 int count = 0;
28 struct biosregs ireg, oreg; 23 struct biosregs ireg, oreg;
29 struct e820entry *desc = boot_params.e820_map; 24 struct e820entry *desc = boot_params.e820_map;
30 static struct e820_ext_entry buf; /* static so it is zeroed */ 25 static struct e820entry buf; /* static so it is zeroed */
31 26
32 initregs(&ireg); 27 initregs(&ireg);
33 ireg.ax = 0xe820; 28 ireg.ax = 0xe820;
@@ -36,10 +31,18 @@ static int detect_memory_e820(void)
36 ireg.di = (size_t)&buf; 31 ireg.di = (size_t)&buf;
37 32
38 /* 33 /*
39 * Set this here so that if the BIOS doesn't change this field 34 * Note: at least one BIOS is known which assumes that the
40 * but still doesn't change %ecx, we're still okay... 35 * buffer pointed to by one e820 call is the same one as
36 * the previous call, and only changes modified fields. Therefore,
37 * we use a temporary buffer and copy the results entry by entry.
38 *
39 * This routine deliberately does not try to account for
40 * ACPI 3+ extended attributes. This is because there are
41 * BIOSes in the field which report zero for the valid bit for
42 * all ranges, and we don't currently make any use of the
43 * other attribute bits. Revisit this if we see the extended
44 * attribute bits deployed in a meaningful way in the future.
41 */ 45 */
42 buf.ext_flags = 1;
43 46
44 do { 47 do {
45 intcall(0x15, &ireg, &oreg); 48 intcall(0x15, &ireg, &oreg);
@@ -61,13 +64,7 @@ static int detect_memory_e820(void)
61 break; 64 break;
62 } 65 }
63 66
64 /* ACPI 3.0 added the extended flags support. If bit 0 67 *desc++ = buf;
65 in the extended flags is zero, we're supposed to simply
66 ignore the entry -- a backwards incompatible change! */
67 if (oreg.cx > 20 && !(buf.ext_flags & 1))
68 continue;
69
70 *desc++ = buf.std;
71 count++; 68 count++;
72 } while (ireg.ebx && count < ARRAY_SIZE(boot_params.e820_map)); 69 } while (ireg.ebx && count < ARRAY_SIZE(boot_params.e820_map));
73 70
diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
index efac92fd1efb..085a8c35f149 100644
--- a/arch/x86/ia32/sys_ia32.c
+++ b/arch/x86/ia32/sys_ia32.c
@@ -129,21 +129,12 @@ asmlinkage long sys32_fstatat(unsigned int dfd, char __user *filename,
129 struct stat64 __user *statbuf, int flag) 129 struct stat64 __user *statbuf, int flag)
130{ 130{
131 struct kstat stat; 131 struct kstat stat;
132 int error = -EINVAL; 132 int error;
133 133
134 if ((flag & ~AT_SYMLINK_NOFOLLOW) != 0) 134 error = vfs_fstatat(dfd, filename, &stat, flag);
135 goto out; 135 if (error)
136 136 return error;
137 if (flag & AT_SYMLINK_NOFOLLOW) 137 return cp_stat64(statbuf, &stat);
138 error = vfs_lstat_fd(dfd, filename, &stat);
139 else
140 error = vfs_stat_fd(dfd, filename, &stat);
141
142 if (!error)
143 error = cp_stat64(statbuf, &stat);
144
145out:
146 return error;
147} 138}
148 139
149/* 140/*
diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
index 5623c50d67b2..c45f415ce315 100644
--- a/arch/x86/include/asm/desc.h
+++ b/arch/x86/include/asm/desc.h
@@ -37,7 +37,7 @@ extern gate_desc idt_table[];
37struct gdt_page { 37struct gdt_page {
38 struct desc_struct gdt[GDT_ENTRIES]; 38 struct desc_struct gdt[GDT_ENTRIES];
39} __attribute__((aligned(PAGE_SIZE))); 39} __attribute__((aligned(PAGE_SIZE)));
40DECLARE_PER_CPU(struct gdt_page, gdt_page); 40DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
41 41
42static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu) 42static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
43{ 43{
diff --git a/arch/x86/include/asm/fixmap.h b/arch/x86/include/asm/fixmap.h
index 81937a5dc77c..2d81af3974a0 100644
--- a/arch/x86/include/asm/fixmap.h
+++ b/arch/x86/include/asm/fixmap.h
@@ -151,11 +151,11 @@ extern pte_t *pkmap_page_table;
151 151
152void __native_set_fixmap(enum fixed_addresses idx, pte_t pte); 152void __native_set_fixmap(enum fixed_addresses idx, pte_t pte);
153void native_set_fixmap(enum fixed_addresses idx, 153void native_set_fixmap(enum fixed_addresses idx,
154 unsigned long phys, pgprot_t flags); 154 phys_addr_t phys, pgprot_t flags);
155 155
156#ifndef CONFIG_PARAVIRT 156#ifndef CONFIG_PARAVIRT
157static inline void __set_fixmap(enum fixed_addresses idx, 157static inline void __set_fixmap(enum fixed_addresses idx,
158 unsigned long phys, pgprot_t flags) 158 phys_addr_t phys, pgprot_t flags)
159{ 159{
160 native_set_fixmap(idx, phys, flags); 160 native_set_fixmap(idx, phys, flags);
161} 161}
diff --git a/arch/x86/include/asm/hardirq.h b/arch/x86/include/asm/hardirq.h
index 039db6aa8e02..37555e52f980 100644
--- a/arch/x86/include/asm/hardirq.h
+++ b/arch/x86/include/asm/hardirq.h
@@ -26,7 +26,7 @@ typedef struct {
26#endif 26#endif
27} ____cacheline_aligned irq_cpustat_t; 27} ____cacheline_aligned irq_cpustat_t;
28 28
29DECLARE_PER_CPU(irq_cpustat_t, irq_stat); 29DECLARE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
30 30
31/* We can have at most NR_VECTORS irqs routed to a cpu at a time */ 31/* We can have at most NR_VECTORS irqs routed to a cpu at a time */
32#define MAX_HARDIRQS_PER_CPU NR_VECTORS 32#define MAX_HARDIRQS_PER_CPU NR_VECTORS
diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
index e5383e3d2f8c..73739322b6d0 100644
--- a/arch/x86/include/asm/io.h
+++ b/arch/x86/include/asm/io.h
@@ -193,8 +193,10 @@ extern void __iomem *ioremap_wc(resource_size_t offset, unsigned long size);
193 */ 193 */
194extern void early_ioremap_init(void); 194extern void early_ioremap_init(void);
195extern void early_ioremap_reset(void); 195extern void early_ioremap_reset(void);
196extern void __iomem *early_ioremap(unsigned long offset, unsigned long size); 196extern void __iomem *early_ioremap(resource_size_t phys_addr,
197extern void __iomem *early_memremap(unsigned long offset, unsigned long size); 197 unsigned long size);
198extern void __iomem *early_memremap(resource_size_t phys_addr,
199 unsigned long size);
198extern void early_iounmap(void __iomem *addr, unsigned long size); 200extern void early_iounmap(void __iomem *addr, unsigned long size);
199 201
200#define IO_SPACE_LIMIT 0xffff 202#define IO_SPACE_LIMIT 0xffff
diff --git a/arch/x86/include/asm/lguest_hcall.h b/arch/x86/include/asm/lguest_hcall.h
index 0f4ee7148afe..faae1996487b 100644
--- a/arch/x86/include/asm/lguest_hcall.h
+++ b/arch/x86/include/asm/lguest_hcall.h
@@ -5,7 +5,6 @@
5#define LHCALL_FLUSH_ASYNC 0 5#define LHCALL_FLUSH_ASYNC 0
6#define LHCALL_LGUEST_INIT 1 6#define LHCALL_LGUEST_INIT 1
7#define LHCALL_SHUTDOWN 2 7#define LHCALL_SHUTDOWN 2
8#define LHCALL_LOAD_GDT 3
9#define LHCALL_NEW_PGTABLE 4 8#define LHCALL_NEW_PGTABLE 4
10#define LHCALL_FLUSH_TLB 5 9#define LHCALL_FLUSH_TLB 5
11#define LHCALL_LOAD_IDT_ENTRY 6 10#define LHCALL_LOAD_IDT_ENTRY 6
@@ -17,6 +16,7 @@
17#define LHCALL_SET_PMD 15 16#define LHCALL_SET_PMD 15
18#define LHCALL_LOAD_TLS 16 17#define LHCALL_LOAD_TLS 16
19#define LHCALL_NOTIFY 17 18#define LHCALL_NOTIFY 17
19#define LHCALL_LOAD_GDT_ENTRY 18
20 20
21#define LGUEST_TRAP_ENTRY 0x1F 21#define LGUEST_TRAP_ENTRY 0x1F
22 22
diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h
index 563933e06a35..4f8c199584e7 100644
--- a/arch/x86/include/asm/mce.h
+++ b/arch/x86/include/asm/mce.h
@@ -137,6 +137,7 @@ DECLARE_PER_CPU(mce_banks_t, mce_poll_banks);
137enum mcp_flags { 137enum mcp_flags {
138 MCP_TIMESTAMP = (1 << 0), /* log time stamp */ 138 MCP_TIMESTAMP = (1 << 0), /* log time stamp */
139 MCP_UC = (1 << 1), /* log uncorrected errors */ 139 MCP_UC = (1 << 1), /* log uncorrected errors */
140 MCP_DONTLOG = (1 << 2), /* only clear, don't log */
140}; 141};
141extern void machine_check_poll(enum mcp_flags flags, mce_banks_t *b); 142extern void machine_check_poll(enum mcp_flags flags, mce_banks_t *b);
142 143
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index 7727aa8b7dda..a53da004e08e 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -347,7 +347,7 @@ struct pv_mmu_ops {
347 /* Sometimes the physical address is a pfn, and sometimes its 347 /* Sometimes the physical address is a pfn, and sometimes its
348 an mfn. We can tell which is which from the index. */ 348 an mfn. We can tell which is which from the index. */
349 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx, 349 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
350 unsigned long phys, pgprot_t flags); 350 phys_addr_t phys, pgprot_t flags);
351}; 351};
352 352
353struct raw_spinlock; 353struct raw_spinlock;
@@ -1432,7 +1432,7 @@ static inline void arch_leave_lazy_mmu_mode(void)
1432void arch_flush_lazy_mmu_mode(void); 1432void arch_flush_lazy_mmu_mode(void);
1433 1433
1434static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx, 1434static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
1435 unsigned long phys, pgprot_t flags) 1435 phys_addr_t phys, pgprot_t flags)
1436{ 1436{
1437 pv_mmu_ops.set_fixmap(idx, phys, flags); 1437 pv_mmu_ops.set_fixmap(idx, phys, flags);
1438} 1438}
@@ -1443,7 +1443,7 @@ u64 _paravirt_ident_64(u64);
1443 1443
1444#define paravirt_nop ((void *)_paravirt_nop) 1444#define paravirt_nop ((void *)_paravirt_nop)
1445 1445
1446#ifdef CONFIG_SMP 1446#if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
1447 1447
1448static inline int __raw_spin_is_locked(struct raw_spinlock *lock) 1448static inline int __raw_spin_is_locked(struct raw_spinlock *lock)
1449{ 1449{
diff --git a/arch/x86/include/asm/pat.h b/arch/x86/include/asm/pat.h
index 2cd07b9422f4..7af14e512f97 100644
--- a/arch/x86/include/asm/pat.h
+++ b/arch/x86/include/asm/pat.h
@@ -18,9 +18,5 @@ extern int free_memtype(u64 start, u64 end);
18 18
19extern int kernel_map_sync_memtype(u64 base, unsigned long size, 19extern int kernel_map_sync_memtype(u64 base, unsigned long size,
20 unsigned long flag); 20 unsigned long flag);
21extern void map_devmem(unsigned long pfn, unsigned long size,
22 struct pgprot vma_prot);
23extern void unmap_devmem(unsigned long pfn, unsigned long size,
24 struct pgprot vma_prot);
25 21
26#endif /* _ASM_X86_PAT_H */ 22#endif /* _ASM_X86_PAT_H */
diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h
index aee103b26d01..02ecb30982a3 100644
--- a/arch/x86/include/asm/percpu.h
+++ b/arch/x86/include/asm/percpu.h
@@ -82,22 +82,22 @@ do { \
82 case 1: \ 82 case 1: \
83 asm(op "b %1,"__percpu_arg(0) \ 83 asm(op "b %1,"__percpu_arg(0) \
84 : "+m" (var) \ 84 : "+m" (var) \
85 : "ri" ((T__)val)); \ 85 : "qi" ((T__)(val))); \
86 break; \ 86 break; \
87 case 2: \ 87 case 2: \
88 asm(op "w %1,"__percpu_arg(0) \ 88 asm(op "w %1,"__percpu_arg(0) \
89 : "+m" (var) \ 89 : "+m" (var) \
90 : "ri" ((T__)val)); \ 90 : "ri" ((T__)(val))); \
91 break; \ 91 break; \
92 case 4: \ 92 case 4: \
93 asm(op "l %1,"__percpu_arg(0) \ 93 asm(op "l %1,"__percpu_arg(0) \
94 : "+m" (var) \ 94 : "+m" (var) \
95 : "ri" ((T__)val)); \ 95 : "ri" ((T__)(val))); \
96 break; \ 96 break; \
97 case 8: \ 97 case 8: \
98 asm(op "q %1,"__percpu_arg(0) \ 98 asm(op "q %1,"__percpu_arg(0) \
99 : "+m" (var) \ 99 : "+m" (var) \
100 : "re" ((T__)val)); \ 100 : "re" ((T__)(val))); \
101 break; \ 101 break; \
102 default: __bad_percpu_size(); \ 102 default: __bad_percpu_size(); \
103 } \ 103 } \
@@ -109,7 +109,7 @@ do { \
109 switch (sizeof(var)) { \ 109 switch (sizeof(var)) { \
110 case 1: \ 110 case 1: \
111 asm(op "b "__percpu_arg(1)",%0" \ 111 asm(op "b "__percpu_arg(1)",%0" \
112 : "=r" (ret__) \ 112 : "=q" (ret__) \
113 : "m" (var)); \ 113 : "m" (var)); \
114 break; \ 114 break; \
115 case 2: \ 115 case 2: \
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 34c52370f2fe..c2cceae709c8 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -138,7 +138,7 @@ extern struct tss_struct doublefault_tss;
138extern __u32 cleared_cpu_caps[NCAPINTS]; 138extern __u32 cleared_cpu_caps[NCAPINTS];
139 139
140#ifdef CONFIG_SMP 140#ifdef CONFIG_SMP
141DECLARE_PER_CPU(struct cpuinfo_x86, cpu_info); 141DECLARE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info);
142#define cpu_data(cpu) per_cpu(cpu_info, cpu) 142#define cpu_data(cpu) per_cpu(cpu_info, cpu)
143#define current_cpu_data __get_cpu_var(cpu_info) 143#define current_cpu_data __get_cpu_var(cpu_info)
144#else 144#else
@@ -270,7 +270,7 @@ struct tss_struct {
270 270
271} ____cacheline_aligned; 271} ____cacheline_aligned;
272 272
273DECLARE_PER_CPU(struct tss_struct, init_tss); 273DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
274 274
275/* 275/*
276 * Save the original ist values for checking stack pointers during debugging 276 * Save the original ist values for checking stack pointers during debugging
@@ -352,6 +352,11 @@ struct i387_soft_struct {
352 u32 entry_eip; 352 u32 entry_eip;
353}; 353};
354 354
355struct ymmh_struct {
356 /* 16 * 16 bytes for each YMMH-reg = 256 bytes */
357 u32 ymmh_space[64];
358};
359
355struct xsave_hdr_struct { 360struct xsave_hdr_struct {
356 u64 xstate_bv; 361 u64 xstate_bv;
357 u64 reserved1[2]; 362 u64 reserved1[2];
@@ -361,6 +366,7 @@ struct xsave_hdr_struct {
361struct xsave_struct { 366struct xsave_struct {
362 struct i387_fxsave_struct i387; 367 struct i387_fxsave_struct i387;
363 struct xsave_hdr_struct xsave_hdr; 368 struct xsave_hdr_struct xsave_hdr;
369 struct ymmh_struct ymmh;
364 /* new processor state extensions will go here */ 370 /* new processor state extensions will go here */
365} __attribute__ ((packed, aligned (64))); 371} __attribute__ ((packed, aligned (64)));
366 372
@@ -387,7 +393,7 @@ union irq_stack_union {
387 }; 393 };
388}; 394};
389 395
390DECLARE_PER_CPU(union irq_stack_union, irq_stack_union); 396DECLARE_PER_CPU_FIRST(union irq_stack_union, irq_stack_union);
391DECLARE_INIT_PER_CPU(irq_stack_union); 397DECLARE_INIT_PER_CPU(irq_stack_union);
392 398
393DECLARE_PER_CPU(char *, irq_stack_ptr); 399DECLARE_PER_CPU(char *, irq_stack_ptr);
diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
index e304b66abeea..624f133943ed 100644
--- a/arch/x86/include/asm/ptrace.h
+++ b/arch/x86/include/asm/ptrace.h
@@ -187,14 +187,15 @@ static inline int v8086_mode(struct pt_regs *regs)
187 187
188/* 188/*
189 * X86_32 CPUs don't save ss and esp if the CPU is already in kernel mode 189 * X86_32 CPUs don't save ss and esp if the CPU is already in kernel mode
190 * when it traps. So regs will be the current sp. 190 * when it traps. The previous stack will be directly underneath the saved
191 * registers, and 'sp/ss' won't even have been saved. Thus the '&regs->sp'.
191 * 192 *
192 * This is valid only for kernel mode traps. 193 * This is valid only for kernel mode traps.
193 */ 194 */
194static inline unsigned long kernel_trap_sp(struct pt_regs *regs) 195static inline unsigned long kernel_stack_pointer(struct pt_regs *regs)
195{ 196{
196#ifdef CONFIG_X86_32 197#ifdef CONFIG_X86_32
197 return (unsigned long)regs; 198 return (unsigned long)(&regs->sp);
198#else 199#else
199 return regs->sp; 200 return regs->sp;
200#endif 201#endif
diff --git a/arch/x86/include/asm/required-features.h b/arch/x86/include/asm/required-features.h
index d5cd6c586881..a4737dddfd58 100644
--- a/arch/x86/include/asm/required-features.h
+++ b/arch/x86/include/asm/required-features.h
@@ -50,7 +50,7 @@
50#ifdef CONFIG_X86_64 50#ifdef CONFIG_X86_64
51#define NEED_PSE 0 51#define NEED_PSE 0
52#define NEED_MSR (1<<(X86_FEATURE_MSR & 31)) 52#define NEED_MSR (1<<(X86_FEATURE_MSR & 31))
53#define NEED_PGE (1<<(X86_FEATURE_PGE & 31)) 53#define NEED_PGE 0
54#define NEED_FXSR (1<<(X86_FEATURE_FXSR & 31)) 54#define NEED_FXSR (1<<(X86_FEATURE_FXSR & 31))
55#define NEED_XMM (1<<(X86_FEATURE_XMM & 31)) 55#define NEED_XMM (1<<(X86_FEATURE_XMM & 31))
56#define NEED_XMM2 (1<<(X86_FEATURE_XMM2 & 31)) 56#define NEED_XMM2 (1<<(X86_FEATURE_XMM2 & 31))
diff --git a/arch/x86/include/asm/sigcontext.h b/arch/x86/include/asm/sigcontext.h
index ec666491aaa4..72e5a4491661 100644
--- a/arch/x86/include/asm/sigcontext.h
+++ b/arch/x86/include/asm/sigcontext.h
@@ -269,6 +269,11 @@ struct _xsave_hdr {
269 __u64 reserved2[5]; 269 __u64 reserved2[5];
270}; 270};
271 271
272struct _ymmh_state {
273 /* 16 * 16 bytes for each YMMH-reg */
274 __u32 ymmh_space[64];
275};
276
272/* 277/*
273 * Extended state pointed by the fpstate pointer in the sigcontext. 278 * Extended state pointed by the fpstate pointer in the sigcontext.
274 * In addition to the fpstate, information encoded in the xstate_hdr 279 * In addition to the fpstate, information encoded in the xstate_hdr
@@ -278,6 +283,7 @@ struct _xsave_hdr {
278struct _xstate { 283struct _xstate {
279 struct _fpstate fpstate; 284 struct _fpstate fpstate;
280 struct _xsave_hdr xstate_hdr; 285 struct _xsave_hdr xstate_hdr;
286 struct _ymmh_state ymmh;
281 /* new processor state extensions go here */ 287 /* new processor state extensions go here */
282}; 288};
283 289
diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
index e5e6caffec87..b7e5db876399 100644
--- a/arch/x86/include/asm/spinlock.h
+++ b/arch/x86/include/asm/spinlock.h
@@ -172,7 +172,7 @@ static inline int __ticket_spin_is_contended(raw_spinlock_t *lock)
172 return (((tmp >> TICKET_SHIFT) - tmp) & ((1 << TICKET_SHIFT) - 1)) > 1; 172 return (((tmp >> TICKET_SHIFT) - tmp) & ((1 << TICKET_SHIFT) - 1)) > 1;
173} 173}
174 174
175#ifndef CONFIG_PARAVIRT 175#ifndef CONFIG_PARAVIRT_SPINLOCKS
176 176
177static inline int __raw_spin_is_locked(raw_spinlock_t *lock) 177static inline int __raw_spin_is_locked(raw_spinlock_t *lock)
178{ 178{
@@ -206,7 +206,7 @@ static __always_inline void __raw_spin_lock_flags(raw_spinlock_t *lock,
206 __raw_spin_lock(lock); 206 __raw_spin_lock(lock);
207} 207}
208 208
209#endif 209#endif /* CONFIG_PARAVIRT_SPINLOCKS */
210 210
211static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock) 211static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock)
212{ 212{
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
index d3539f998f88..16a5c84b0329 100644
--- a/arch/x86/include/asm/tlbflush.h
+++ b/arch/x86/include/asm/tlbflush.h
@@ -152,7 +152,7 @@ struct tlb_state {
152 struct mm_struct *active_mm; 152 struct mm_struct *active_mm;
153 int state; 153 int state;
154}; 154};
155DECLARE_PER_CPU(struct tlb_state, cpu_tlbstate); 155DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate);
156 156
157static inline void reset_lazy_tlbstate(void) 157static inline void reset_lazy_tlbstate(void)
158{ 158{
diff --git a/arch/x86/include/asm/topology.h b/arch/x86/include/asm/topology.h
index 892b119dba6f..f44b49abca49 100644
--- a/arch/x86/include/asm/topology.h
+++ b/arch/x86/include/asm/topology.h
@@ -200,7 +200,7 @@ static inline void arch_fix_phys_package_id(int num, u32 slot)
200} 200}
201 201
202struct pci_bus; 202struct pci_bus;
203void set_pci_bus_resources_arch_default(struct pci_bus *b); 203void x86_pci_root_bus_res_quirks(struct pci_bus *b);
204 204
205#ifdef CONFIG_SMP 205#ifdef CONFIG_SMP
206#define mc_capable() (cpumask_weight(cpu_core_mask(0)) != nr_cpu_ids) 206#define mc_capable() (cpumask_weight(cpu_core_mask(0)) != nr_cpu_ids)
diff --git a/arch/x86/include/asm/uv/uv_mmrs.h b/arch/x86/include/asm/uv/uv_mmrs.h
index db68ac8a5ac2..2cae46c7c8a2 100644
--- a/arch/x86/include/asm/uv/uv_mmrs.h
+++ b/arch/x86/include/asm/uv/uv_mmrs.h
@@ -17,6 +17,11 @@
17/* ========================================================================= */ 17/* ========================================================================= */
18/* UVH_BAU_DATA_CONFIG */ 18/* UVH_BAU_DATA_CONFIG */
19/* ========================================================================= */ 19/* ========================================================================= */
20#define UVH_LB_BAU_MISC_CONTROL 0x320170UL
21#define UV_ENABLE_INTD_SOFT_ACK_MODE_SHIFT 15
22#define UV_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHIFT 16
23#define UV_INTD_SOFT_ACK_TIMEOUT_PERIOD 0x000000000bUL
24/* 1011 timebase 7 (168millisec) * 3 ticks -> 500ms */
20#define UVH_BAU_DATA_CONFIG 0x61680UL 25#define UVH_BAU_DATA_CONFIG 0x61680UL
21#define UVH_BAU_DATA_CONFIG_32 0x0438 26#define UVH_BAU_DATA_CONFIG_32 0x0438
22 27
diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h
index 1a918dde46b5..018a0a400799 100644
--- a/arch/x86/include/asm/xen/page.h
+++ b/arch/x86/include/asm/xen/page.h
@@ -124,7 +124,8 @@ static inline unsigned long mfn_to_local_pfn(unsigned long mfn)
124 124
125/* VIRT <-> MACHINE conversion */ 125/* VIRT <-> MACHINE conversion */
126#define virt_to_machine(v) (phys_to_machine(XPADDR(__pa(v)))) 126#define virt_to_machine(v) (phys_to_machine(XPADDR(__pa(v))))
127#define virt_to_mfn(v) (pfn_to_mfn(PFN_DOWN(__pa(v)))) 127#define virt_to_pfn(v) (PFN_DOWN(__pa(v)))
128#define virt_to_mfn(v) (pfn_to_mfn(virt_to_pfn(v)))
128#define mfn_to_virt(m) (__va(mfn_to_pfn(m) << PAGE_SHIFT)) 129#define mfn_to_virt(m) (__va(mfn_to_pfn(m) << PAGE_SHIFT))
129 130
130static inline unsigned long pte_mfn(pte_t pte) 131static inline unsigned long pte_mfn(pte_t pte)
diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
index 08e9a1ac07a9..727acc152344 100644
--- a/arch/x86/include/asm/xsave.h
+++ b/arch/x86/include/asm/xsave.h
@@ -7,6 +7,7 @@
7 7
8#define XSTATE_FP 0x1 8#define XSTATE_FP 0x1
9#define XSTATE_SSE 0x2 9#define XSTATE_SSE 0x2
10#define XSTATE_YMM 0x4
10 11
11#define XSTATE_FPSSE (XSTATE_FP | XSTATE_SSE) 12#define XSTATE_FPSSE (XSTATE_FP | XSTATE_SSE)
12 13
@@ -15,7 +16,7 @@
15/* 16/*
16 * These are the features that the OS can handle currently. 17 * These are the features that the OS can handle currently.
17 */ 18 */
18#define XCNTXT_MASK (XSTATE_FP | XSTATE_SSE) 19#define XCNTXT_MASK (XSTATE_FP | XSTATE_SSE | XSTATE_YMM)
19 20
20#ifdef CONFIG_X86_64 21#ifdef CONFIG_X86_64
21#define REX_PREFIX "0x48, " 22#define REX_PREFIX "0x48, "
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index 145cce75cda7..88d1bfc847d3 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -89,7 +89,8 @@ obj-$(CONFIG_DEBUG_NX_TEST) += test_nx.o
89obj-$(CONFIG_VMI) += vmi_32.o vmiclock_32.o 89obj-$(CONFIG_VMI) += vmi_32.o vmiclock_32.o
90obj-$(CONFIG_KVM_GUEST) += kvm.o 90obj-$(CONFIG_KVM_GUEST) += kvm.o
91obj-$(CONFIG_KVM_CLOCK) += kvmclock.o 91obj-$(CONFIG_KVM_CLOCK) += kvmclock.o
92obj-$(CONFIG_PARAVIRT) += paravirt.o paravirt_patch_$(BITS).o paravirt-spinlocks.o 92obj-$(CONFIG_PARAVIRT) += paravirt.o paravirt_patch_$(BITS).o
93obj-$(CONFIG_PARAVIRT_SPINLOCKS)+= paravirt-spinlocks.o
93obj-$(CONFIG_PARAVIRT_CLOCK) += pvclock.o 94obj-$(CONFIG_PARAVIRT_CLOCK) += pvclock.o
94 95
95obj-$(CONFIG_PCSPKR_PLATFORM) += pcspeaker.o 96obj-$(CONFIG_PCSPKR_PLATFORM) += pcspeaker.o
diff --git a/arch/x86/kernel/amd_iommu_init.c b/arch/x86/kernel/amd_iommu_init.c
index 42c33cebf00f..8c0be0902dac 100644
--- a/arch/x86/kernel/amd_iommu_init.c
+++ b/arch/x86/kernel/amd_iommu_init.c
@@ -49,10 +49,10 @@
49#define IVHD_DEV_EXT_SELECT 0x46 49#define IVHD_DEV_EXT_SELECT 0x46
50#define IVHD_DEV_EXT_SELECT_RANGE 0x47 50#define IVHD_DEV_EXT_SELECT_RANGE 0x47
51 51
52#define IVHD_FLAG_HT_TUN_EN 0x00 52#define IVHD_FLAG_HT_TUN_EN_MASK 0x01
53#define IVHD_FLAG_PASSPW_EN 0x01 53#define IVHD_FLAG_PASSPW_EN_MASK 0x02
54#define IVHD_FLAG_RESPASSPW_EN 0x02 54#define IVHD_FLAG_RESPASSPW_EN_MASK 0x04
55#define IVHD_FLAG_ISOC_EN 0x03 55#define IVHD_FLAG_ISOC_EN_MASK 0x08
56 56
57#define IVMD_FLAG_EXCL_RANGE 0x08 57#define IVMD_FLAG_EXCL_RANGE 0x08
58#define IVMD_FLAG_UNITY_MAP 0x01 58#define IVMD_FLAG_UNITY_MAP 0x01
@@ -569,19 +569,19 @@ static void __init init_iommu_from_acpi(struct amd_iommu *iommu,
569 * First set the recommended feature enable bits from ACPI 569 * First set the recommended feature enable bits from ACPI
570 * into the IOMMU control registers 570 * into the IOMMU control registers
571 */ 571 */
572 h->flags & IVHD_FLAG_HT_TUN_EN ? 572 h->flags & IVHD_FLAG_HT_TUN_EN_MASK ?
573 iommu_feature_enable(iommu, CONTROL_HT_TUN_EN) : 573 iommu_feature_enable(iommu, CONTROL_HT_TUN_EN) :
574 iommu_feature_disable(iommu, CONTROL_HT_TUN_EN); 574 iommu_feature_disable(iommu, CONTROL_HT_TUN_EN);
575 575
576 h->flags & IVHD_FLAG_PASSPW_EN ? 576 h->flags & IVHD_FLAG_PASSPW_EN_MASK ?
577 iommu_feature_enable(iommu, CONTROL_PASSPW_EN) : 577 iommu_feature_enable(iommu, CONTROL_PASSPW_EN) :
578 iommu_feature_disable(iommu, CONTROL_PASSPW_EN); 578 iommu_feature_disable(iommu, CONTROL_PASSPW_EN);
579 579
580 h->flags & IVHD_FLAG_RESPASSPW_EN ? 580 h->flags & IVHD_FLAG_RESPASSPW_EN_MASK ?
581 iommu_feature_enable(iommu, CONTROL_RESPASSPW_EN) : 581 iommu_feature_enable(iommu, CONTROL_RESPASSPW_EN) :
582 iommu_feature_disable(iommu, CONTROL_RESPASSPW_EN); 582 iommu_feature_disable(iommu, CONTROL_RESPASSPW_EN);
583 583
584 h->flags & IVHD_FLAG_ISOC_EN ? 584 h->flags & IVHD_FLAG_ISOC_EN_MASK ?
585 iommu_feature_enable(iommu, CONTROL_ISOC_EN) : 585 iommu_feature_enable(iommu, CONTROL_ISOC_EN) :
586 iommu_feature_disable(iommu, CONTROL_ISOC_EN); 586 iommu_feature_disable(iommu, CONTROL_ISOC_EN);
587 587
diff --git a/arch/x86/kernel/apic/es7000_32.c b/arch/x86/kernel/apic/es7000_32.c
index 1c11b819f245..302947775575 100644
--- a/arch/x86/kernel/apic/es7000_32.c
+++ b/arch/x86/kernel/apic/es7000_32.c
@@ -254,7 +254,7 @@ static int parse_unisys_oem(char *oemptr)
254} 254}
255 255
256#ifdef CONFIG_ACPI 256#ifdef CONFIG_ACPI
257static int find_unisys_acpi_oem_table(unsigned long *oem_addr) 257static int __init find_unisys_acpi_oem_table(unsigned long *oem_addr)
258{ 258{
259 struct acpi_table_header *header = NULL; 259 struct acpi_table_header *header = NULL;
260 struct es7000_oem_table *table; 260 struct es7000_oem_table *table;
@@ -285,7 +285,7 @@ static int find_unisys_acpi_oem_table(unsigned long *oem_addr)
285 return 0; 285 return 0;
286} 286}
287 287
288static void unmap_unisys_acpi_oem_table(unsigned long oem_addr) 288static void __init unmap_unisys_acpi_oem_table(unsigned long oem_addr)
289{ 289{
290 if (!oem_addr) 290 if (!oem_addr)
291 return; 291 return;
@@ -306,7 +306,7 @@ static int es7000_check_dsdt(void)
306static int es7000_acpi_ret; 306static int es7000_acpi_ret;
307 307
308/* Hook from generic ACPI tables.c */ 308/* Hook from generic ACPI tables.c */
309static int es7000_acpi_madt_oem_check(char *oem_id, char *oem_table_id) 309static int __init es7000_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
310{ 310{
311 unsigned long oem_addr = 0; 311 unsigned long oem_addr = 0;
312 int check_dsdt; 312 int check_dsdt;
@@ -717,7 +717,7 @@ struct apic apic_es7000_cluster = {
717 .safe_wait_icr_idle = native_safe_apic_wait_icr_idle, 717 .safe_wait_icr_idle = native_safe_apic_wait_icr_idle,
718}; 718};
719 719
720struct apic apic_es7000 = { 720struct apic __refdata apic_es7000 = {
721 721
722 .name = "es7000", 722 .name = "es7000",
723 .probe = probe_es7000, 723 .probe = probe_es7000,
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index 767fe7e46d68..30da617d18e4 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -2524,7 +2524,6 @@ static void irq_complete_move(struct irq_desc **descp)
2524static inline void irq_complete_move(struct irq_desc **descp) {} 2524static inline void irq_complete_move(struct irq_desc **descp) {}
2525#endif 2525#endif
2526 2526
2527#ifdef CONFIG_X86_X2APIC
2528static void __eoi_ioapic_irq(unsigned int irq, struct irq_cfg *cfg) 2527static void __eoi_ioapic_irq(unsigned int irq, struct irq_cfg *cfg)
2529{ 2528{
2530 int apic, pin; 2529 int apic, pin;
@@ -2558,6 +2557,7 @@ eoi_ioapic_irq(struct irq_desc *desc)
2558 spin_unlock_irqrestore(&ioapic_lock, flags); 2557 spin_unlock_irqrestore(&ioapic_lock, flags);
2559} 2558}
2560 2559
2560#ifdef CONFIG_X86_X2APIC
2561static void ack_x2apic_level(unsigned int irq) 2561static void ack_x2apic_level(unsigned int irq)
2562{ 2562{
2563 struct irq_desc *desc = irq_to_desc(irq); 2563 struct irq_desc *desc = irq_to_desc(irq);
@@ -2634,6 +2634,9 @@ static void ack_apic_level(unsigned int irq)
2634 */ 2634 */
2635 ack_APIC_irq(); 2635 ack_APIC_irq();
2636 2636
2637 if (irq_remapped(irq))
2638 eoi_ioapic_irq(desc);
2639
2637 /* Now we can move and renable the irq */ 2640 /* Now we can move and renable the irq */
2638 if (unlikely(do_unmask_irq)) { 2641 if (unlikely(do_unmask_irq)) {
2639 /* Only migrate the irq if the ack has been received. 2642 /* Only migrate the irq if the ack has been received.
@@ -3667,12 +3670,14 @@ int arch_setup_hpet_msi(unsigned int irq)
3667{ 3670{
3668 int ret; 3671 int ret;
3669 struct msi_msg msg; 3672 struct msi_msg msg;
3673 struct irq_desc *desc = irq_to_desc(irq);
3670 3674
3671 ret = msi_compose_msg(NULL, irq, &msg); 3675 ret = msi_compose_msg(NULL, irq, &msg);
3672 if (ret < 0) 3676 if (ret < 0)
3673 return ret; 3677 return ret;
3674 3678
3675 hpet_msi_write(irq, &msg); 3679 hpet_msi_write(irq, &msg);
3680 desc->status |= IRQ_MOVE_PCNTXT;
3676 set_irq_chip_and_handler_name(irq, &hpet_msi_type, handle_edge_irq, 3681 set_irq_chip_and_handler_name(irq, &hpet_msi_type, handle_edge_irq,
3677 "edge"); 3682 "edge");
3678 3683
diff --git a/arch/x86/kernel/apic/nmi.c b/arch/x86/kernel/apic/nmi.c
index d6bd62407152..ce4fbfa315a1 100644
--- a/arch/x86/kernel/apic/nmi.c
+++ b/arch/x86/kernel/apic/nmi.c
@@ -138,7 +138,7 @@ int __init check_nmi_watchdog(void)
138 if (!prev_nmi_count) 138 if (!prev_nmi_count)
139 goto error; 139 goto error;
140 140
141 alloc_cpumask_var(&backtrace_mask, GFP_KERNEL); 141 alloc_cpumask_var(&backtrace_mask, GFP_KERNEL|__GFP_ZERO);
142 printk(KERN_INFO "Testing NMI watchdog ... "); 142 printk(KERN_INFO "Testing NMI watchdog ... ");
143 143
144#ifdef CONFIG_SMP 144#ifdef CONFIG_SMP
@@ -414,7 +414,8 @@ nmi_watchdog_tick(struct pt_regs *regs, unsigned reason)
414 touched = 1; 414 touched = 1;
415 } 415 }
416 416
417 if (cpumask_test_cpu(cpu, backtrace_mask)) { 417 /* We can be called before check_nmi_watchdog, hence NULL check. */
418 if (backtrace_mask != NULL && cpumask_test_cpu(cpu, backtrace_mask)) {
418 static DEFINE_SPINLOCK(lock); /* Serialise the printks */ 419 static DEFINE_SPINLOCK(lock); /* Serialise the printks */
419 420
420 spin_lock(&lock); 421 spin_lock(&lock);
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
index 1248318436e8..2bda69352976 100644
--- a/arch/x86/kernel/apic/x2apic_uv_x.c
+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
@@ -19,6 +19,7 @@
19#include <linux/timer.h> 19#include <linux/timer.h>
20#include <linux/cpu.h> 20#include <linux/cpu.h>
21#include <linux/init.h> 21#include <linux/init.h>
22#include <linux/io.h>
22 23
23#include <asm/uv/uv_mmrs.h> 24#include <asm/uv/uv_mmrs.h>
24#include <asm/uv/uv_hub.h> 25#include <asm/uv/uv_hub.h>
@@ -34,6 +35,17 @@ DEFINE_PER_CPU(int, x2apic_extra_bits);
34 35
35static enum uv_system_type uv_system_type; 36static enum uv_system_type uv_system_type;
36 37
38static int early_get_nodeid(void)
39{
40 union uvh_node_id_u node_id;
41 unsigned long *mmr;
42
43 mmr = early_ioremap(UV_LOCAL_MMR_BASE | UVH_NODE_ID, sizeof(*mmr));
44 node_id.v = *mmr;
45 early_iounmap(mmr, sizeof(*mmr));
46 return node_id.s.node_id;
47}
48
37static int uv_acpi_madt_oem_check(char *oem_id, char *oem_table_id) 49static int uv_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
38{ 50{
39 if (!strcmp(oem_id, "SGI")) { 51 if (!strcmp(oem_id, "SGI")) {
@@ -42,6 +54,8 @@ static int uv_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
42 else if (!strcmp(oem_table_id, "UVX")) 54 else if (!strcmp(oem_table_id, "UVX"))
43 uv_system_type = UV_X2APIC; 55 uv_system_type = UV_X2APIC;
44 else if (!strcmp(oem_table_id, "UVH")) { 56 else if (!strcmp(oem_table_id, "UVH")) {
57 __get_cpu_var(x2apic_extra_bits) =
58 early_get_nodeid() << (UV_APIC_PNODE_SHIFT - 1);
45 uv_system_type = UV_NON_UNIQUE_APIC; 59 uv_system_type = UV_NON_UNIQUE_APIC;
46 return 1; 60 return 1;
47 } 61 }
@@ -549,7 +563,8 @@ void __init uv_system_init(void)
549 unsigned long gnode_upper, lowmem_redir_base, lowmem_redir_size; 563 unsigned long gnode_upper, lowmem_redir_base, lowmem_redir_size;
550 int bytes, nid, cpu, lcpu, pnode, blade, i, j, m_val, n_val; 564 int bytes, nid, cpu, lcpu, pnode, blade, i, j, m_val, n_val;
551 int max_pnode = 0; 565 int max_pnode = 0;
552 unsigned long mmr_base, present; 566 unsigned long mmr_base, present, paddr;
567 unsigned short pnode_mask;
553 568
554 map_low_mmrs(); 569 map_low_mmrs();
555 570
@@ -592,6 +607,7 @@ void __init uv_system_init(void)
592 } 607 }
593 } 608 }
594 609
610 pnode_mask = (1 << n_val) - 1;
595 node_id.v = uv_read_local_mmr(UVH_NODE_ID); 611 node_id.v = uv_read_local_mmr(UVH_NODE_ID);
596 gnode_upper = (((unsigned long)node_id.s.node_id) & 612 gnode_upper = (((unsigned long)node_id.s.node_id) &
597 ~((1 << n_val) - 1)) << m_val; 613 ~((1 << n_val) - 1)) << m_val;
@@ -615,7 +631,7 @@ void __init uv_system_init(void)
615 uv_cpu_hub_info(cpu)->numa_blade_id = blade; 631 uv_cpu_hub_info(cpu)->numa_blade_id = blade;
616 uv_cpu_hub_info(cpu)->blade_processor_id = lcpu; 632 uv_cpu_hub_info(cpu)->blade_processor_id = lcpu;
617 uv_cpu_hub_info(cpu)->pnode = pnode; 633 uv_cpu_hub_info(cpu)->pnode = pnode;
618 uv_cpu_hub_info(cpu)->pnode_mask = (1 << n_val) - 1; 634 uv_cpu_hub_info(cpu)->pnode_mask = pnode_mask;
619 uv_cpu_hub_info(cpu)->gpa_mask = (1 << (m_val + n_val)) - 1; 635 uv_cpu_hub_info(cpu)->gpa_mask = (1 << (m_val + n_val)) - 1;
620 uv_cpu_hub_info(cpu)->gnode_upper = gnode_upper; 636 uv_cpu_hub_info(cpu)->gnode_upper = gnode_upper;
621 uv_cpu_hub_info(cpu)->global_mmr_base = mmr_base; 637 uv_cpu_hub_info(cpu)->global_mmr_base = mmr_base;
@@ -631,6 +647,17 @@ void __init uv_system_init(void)
631 lcpu, blade); 647 lcpu, blade);
632 } 648 }
633 649
650 /* Add blade/pnode info for nodes without cpus */
651 for_each_online_node(nid) {
652 if (uv_node_to_blade[nid] >= 0)
653 continue;
654 paddr = node_start_pfn(nid) << PAGE_SHIFT;
655 paddr = uv_soc_phys_ram_to_gpa(paddr);
656 pnode = (paddr >> m_val) & pnode_mask;
657 blade = boot_pnode_to_blade(pnode);
658 uv_node_to_blade[nid] = blade;
659 }
660
634 map_gru_high(max_pnode); 661 map_gru_high(max_pnode);
635 map_mmr_high(max_pnode); 662 map_mmr_high(max_pnode);
636 map_config_high(max_pnode); 663 map_config_high(max_pnode);
diff --git a/arch/x86/kernel/bios_uv.c b/arch/x86/kernel/bios_uv.c
index f63882728d91..63a88e1f987d 100644
--- a/arch/x86/kernel/bios_uv.c
+++ b/arch/x86/kernel/bios_uv.c
@@ -182,7 +182,8 @@ void uv_bios_init(void)
182 memcpy(&uv_systab, tab, sizeof(struct uv_systab)); 182 memcpy(&uv_systab, tab, sizeof(struct uv_systab));
183 iounmap(tab); 183 iounmap(tab);
184 184
185 printk(KERN_INFO "EFI UV System Table Revision %d\n", tab->revision); 185 printk(KERN_INFO "EFI UV System Table Revision %d\n",
186 uv_systab.revision);
186} 187}
187#else /* !CONFIG_EFI */ 188#else /* !CONFIG_EFI */
188 189
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index c4f667896c28..77848d9fca68 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -114,6 +114,13 @@ DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
114} }; 114} };
115EXPORT_PER_CPU_SYMBOL_GPL(gdt_page); 115EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
116 116
117static int __init x86_xsave_setup(char *s)
118{
119 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
120 return 1;
121}
122__setup("noxsave", x86_xsave_setup);
123
117#ifdef CONFIG_X86_32 124#ifdef CONFIG_X86_32
118static int cachesize_override __cpuinitdata = -1; 125static int cachesize_override __cpuinitdata = -1;
119static int disable_x86_serial_nr __cpuinitdata = 1; 126static int disable_x86_serial_nr __cpuinitdata = 1;
@@ -1203,6 +1210,8 @@ void __cpuinit cpu_init(void)
1203 load_TR_desc(); 1210 load_TR_desc();
1204 load_LDT(&init_mm.context); 1211 load_LDT(&init_mm.context);
1205 1212
1213 t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap);
1214
1206#ifdef CONFIG_DOUBLEFAULT 1215#ifdef CONFIG_DOUBLEFAULT
1207 /* Set up doublefault TSS pointer in the GDT */ 1216 /* Set up doublefault TSS pointer in the GDT */
1208 __set_tss_desc(cpu, GDT_ENTRY_DOUBLEFAULT_TSS, &doublefault_tss); 1217 __set_tss_desc(cpu, GDT_ENTRY_DOUBLEFAULT_TSS, &doublefault_tss);
diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
index 9d3af380c6bd..208ecf6643df 100644
--- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
+++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
@@ -65,14 +65,18 @@ enum {
65struct acpi_cpufreq_data { 65struct acpi_cpufreq_data {
66 struct acpi_processor_performance *acpi_data; 66 struct acpi_processor_performance *acpi_data;
67 struct cpufreq_frequency_table *freq_table; 67 struct cpufreq_frequency_table *freq_table;
68 unsigned int max_freq;
69 unsigned int resume; 68 unsigned int resume;
70 unsigned int cpu_feature; 69 unsigned int cpu_feature;
71 u64 saved_aperf, saved_mperf;
72}; 70};
73 71
74static DEFINE_PER_CPU(struct acpi_cpufreq_data *, drv_data); 72static DEFINE_PER_CPU(struct acpi_cpufreq_data *, drv_data);
75 73
74struct acpi_msr_data {
75 u64 saved_aperf, saved_mperf;
76};
77
78static DEFINE_PER_CPU(struct acpi_msr_data, msr_data);
79
76DEFINE_TRACE(power_mark); 80DEFINE_TRACE(power_mark);
77 81
78/* acpi_perf_data is a pointer to percpu data. */ 82/* acpi_perf_data is a pointer to percpu data. */
@@ -153,7 +157,8 @@ struct drv_cmd {
153 u32 val; 157 u32 val;
154}; 158};
155 159
156static long do_drv_read(void *_cmd) 160/* Called via smp_call_function_single(), on the target CPU */
161static void do_drv_read(void *_cmd)
157{ 162{
158 struct drv_cmd *cmd = _cmd; 163 struct drv_cmd *cmd = _cmd;
159 u32 h; 164 u32 h;
@@ -170,10 +175,10 @@ static long do_drv_read(void *_cmd)
170 default: 175 default:
171 break; 176 break;
172 } 177 }
173 return 0;
174} 178}
175 179
176static long do_drv_write(void *_cmd) 180/* Called via smp_call_function_many(), on the target CPUs */
181static void do_drv_write(void *_cmd)
177{ 182{
178 struct drv_cmd *cmd = _cmd; 183 struct drv_cmd *cmd = _cmd;
179 u32 lo, hi; 184 u32 lo, hi;
@@ -192,23 +197,24 @@ static long do_drv_write(void *_cmd)
192 default: 197 default:
193 break; 198 break;
194 } 199 }
195 return 0;
196} 200}
197 201
198static void drv_read(struct drv_cmd *cmd) 202static void drv_read(struct drv_cmd *cmd)
199{ 203{
200 cmd->val = 0; 204 cmd->val = 0;
201 205
202 work_on_cpu(cpumask_any(cmd->mask), do_drv_read, cmd); 206 smp_call_function_single(cpumask_any(cmd->mask), do_drv_read, cmd, 1);
203} 207}
204 208
205static void drv_write(struct drv_cmd *cmd) 209static void drv_write(struct drv_cmd *cmd)
206{ 210{
207 unsigned int i; 211 int this_cpu;
208 212
209 for_each_cpu(i, cmd->mask) { 213 this_cpu = get_cpu();
210 work_on_cpu(i, do_drv_write, cmd); 214 if (cpumask_test_cpu(this_cpu, cmd->mask))
211 } 215 do_drv_write(cmd);
216 smp_call_function_many(cmd->mask, do_drv_write, cmd, 1);
217 put_cpu();
212} 218}
213 219
214static u32 get_cur_val(const struct cpumask *mask) 220static u32 get_cur_val(const struct cpumask *mask)
@@ -252,15 +258,13 @@ struct perf_pair {
252 } aperf, mperf; 258 } aperf, mperf;
253}; 259};
254 260
255 261/* Called via smp_call_function_single(), on the target CPU */
256static long read_measured_perf_ctrs(void *_cur) 262static void read_measured_perf_ctrs(void *_cur)
257{ 263{
258 struct perf_pair *cur = _cur; 264 struct perf_pair *cur = _cur;
259 265
260 rdmsr(MSR_IA32_APERF, cur->aperf.split.lo, cur->aperf.split.hi); 266 rdmsr(MSR_IA32_APERF, cur->aperf.split.lo, cur->aperf.split.hi);
261 rdmsr(MSR_IA32_MPERF, cur->mperf.split.lo, cur->mperf.split.hi); 267 rdmsr(MSR_IA32_MPERF, cur->mperf.split.lo, cur->mperf.split.hi);
262
263 return 0;
264} 268}
265 269
266/* 270/*
@@ -283,15 +287,15 @@ static unsigned int get_measured_perf(struct cpufreq_policy *policy,
283 unsigned int perf_percent; 287 unsigned int perf_percent;
284 unsigned int retval; 288 unsigned int retval;
285 289
286 if (!work_on_cpu(cpu, read_measured_perf_ctrs, &readin)) 290 if (smp_call_function_single(cpu, read_measured_perf_ctrs, &readin, 1))
287 return 0; 291 return 0;
288 292
289 cur.aperf.whole = readin.aperf.whole - 293 cur.aperf.whole = readin.aperf.whole -
290 per_cpu(drv_data, cpu)->saved_aperf; 294 per_cpu(msr_data, cpu).saved_aperf;
291 cur.mperf.whole = readin.mperf.whole - 295 cur.mperf.whole = readin.mperf.whole -
292 per_cpu(drv_data, cpu)->saved_mperf; 296 per_cpu(msr_data, cpu).saved_mperf;
293 per_cpu(drv_data, cpu)->saved_aperf = readin.aperf.whole; 297 per_cpu(msr_data, cpu).saved_aperf = readin.aperf.whole;
294 per_cpu(drv_data, cpu)->saved_mperf = readin.mperf.whole; 298 per_cpu(msr_data, cpu).saved_mperf = readin.mperf.whole;
295 299
296#ifdef __i386__ 300#ifdef __i386__
297 /* 301 /*
@@ -335,7 +339,7 @@ static unsigned int get_measured_perf(struct cpufreq_policy *policy,
335 339
336#endif 340#endif
337 341
338 retval = per_cpu(drv_data, policy->cpu)->max_freq * perf_percent / 100; 342 retval = (policy->cpuinfo.max_freq * perf_percent) / 100;
339 343
340 return retval; 344 return retval;
341} 345}
@@ -688,16 +692,11 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
688 /* Check for high latency (>20uS) from buggy BIOSes, like on T42 */ 692 /* Check for high latency (>20uS) from buggy BIOSes, like on T42 */
689 if (perf->control_register.space_id == ACPI_ADR_SPACE_FIXED_HARDWARE && 693 if (perf->control_register.space_id == ACPI_ADR_SPACE_FIXED_HARDWARE &&
690 policy->cpuinfo.transition_latency > 20 * 1000) { 694 policy->cpuinfo.transition_latency > 20 * 1000) {
691 static int print_once;
692 policy->cpuinfo.transition_latency = 20 * 1000; 695 policy->cpuinfo.transition_latency = 20 * 1000;
693 if (!print_once) { 696 printk_once(KERN_INFO "Capping off P-state tranision"
694 print_once = 1; 697 " latency at 20 uS\n");
695 printk(KERN_INFO "Capping off P-state tranision latency"
696 " at 20 uS\n");
697 }
698 } 698 }
699 699
700 data->max_freq = perf->states[0].core_frequency * 1000;
701 /* table init */ 700 /* table init */
702 for (i = 0; i < perf->state_count; i++) { 701 for (i = 0; i < perf->state_count; i++) {
703 if (i > 0 && perf->states[i].core_frequency >= 702 if (i > 0 && perf->states[i].core_frequency >=
@@ -716,6 +715,9 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
716 if (result) 715 if (result)
717 goto err_freqfree; 716 goto err_freqfree;
718 717
718 if (perf->states[0].core_frequency * 1000 != policy->cpuinfo.max_freq)
719 printk(KERN_WARNING FW_WARN "P-state 0 is not max freq\n");
720
719 switch (perf->control_register.space_id) { 721 switch (perf->control_register.space_id) {
720 case ACPI_ADR_SPACE_SYSTEM_IO: 722 case ACPI_ADR_SPACE_SYSTEM_IO:
721 /* Current speed is unknown and not detectable by IO port */ 723 /* Current speed is unknown and not detectable by IO port */
diff --git a/arch/x86/kernel/cpu/mcheck/mce_64.c b/arch/x86/kernel/cpu/mcheck/mce_64.c
index 863f89568b1a..6fb0b359d2a5 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_64.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_64.c
@@ -239,9 +239,10 @@ void machine_check_poll(enum mcp_flags flags, mce_banks_t *b)
239 * Don't get the IP here because it's unlikely to 239 * Don't get the IP here because it's unlikely to
240 * have anything to do with the actual error location. 240 * have anything to do with the actual error location.
241 */ 241 */
242 242 if (!(flags & MCP_DONTLOG)) {
243 mce_log(&m); 243 mce_log(&m);
244 add_taint(TAINT_MACHINE_CHECK); 244 add_taint(TAINT_MACHINE_CHECK);
245 }
245 246
246 /* 247 /*
247 * Clear state for this bank. 248 * Clear state for this bank.
@@ -452,13 +453,14 @@ void mce_log_therm_throt_event(__u64 status)
452 */ 453 */
453 454
454static int check_interval = 5 * 60; /* 5 minutes */ 455static int check_interval = 5 * 60; /* 5 minutes */
455static int next_interval; /* in jiffies */ 456static DEFINE_PER_CPU(int, next_interval); /* in jiffies */
456static void mcheck_timer(unsigned long); 457static void mcheck_timer(unsigned long);
457static DEFINE_PER_CPU(struct timer_list, mce_timer); 458static DEFINE_PER_CPU(struct timer_list, mce_timer);
458 459
459static void mcheck_timer(unsigned long data) 460static void mcheck_timer(unsigned long data)
460{ 461{
461 struct timer_list *t = &per_cpu(mce_timer, data); 462 struct timer_list *t = &per_cpu(mce_timer, data);
463 int *n;
462 464
463 WARN_ON(smp_processor_id() != data); 465 WARN_ON(smp_processor_id() != data);
464 466
@@ -470,14 +472,14 @@ static void mcheck_timer(unsigned long data)
470 * Alert userspace if needed. If we logged an MCE, reduce the 472 * Alert userspace if needed. If we logged an MCE, reduce the
471 * polling interval, otherwise increase the polling interval. 473 * polling interval, otherwise increase the polling interval.
472 */ 474 */
475 n = &__get_cpu_var(next_interval);
473 if (mce_notify_user()) { 476 if (mce_notify_user()) {
474 next_interval = max(next_interval/2, HZ/100); 477 *n = max(*n/2, HZ/100);
475 } else { 478 } else {
476 next_interval = min(next_interval * 2, 479 *n = min(*n*2, (int)round_jiffies_relative(check_interval*HZ));
477 (int)round_jiffies_relative(check_interval*HZ));
478 } 480 }
479 481
480 t->expires = jiffies + next_interval; 482 t->expires = jiffies + *n;
481 add_timer(t); 483 add_timer(t);
482} 484}
483 485
@@ -584,7 +586,7 @@ static void mce_init(void *dummy)
584 * Log the machine checks left over from the previous reset. 586 * Log the machine checks left over from the previous reset.
585 */ 587 */
586 bitmap_fill(all_banks, MAX_NR_BANKS); 588 bitmap_fill(all_banks, MAX_NR_BANKS);
587 machine_check_poll(MCP_UC, &all_banks); 589 machine_check_poll(MCP_UC|(!mce_bootlog ? MCP_DONTLOG : 0), &all_banks);
588 590
589 set_in_cr4(X86_CR4_MCE); 591 set_in_cr4(X86_CR4_MCE);
590 592
@@ -632,14 +634,13 @@ static void mce_cpu_features(struct cpuinfo_x86 *c)
632static void mce_init_timer(void) 634static void mce_init_timer(void)
633{ 635{
634 struct timer_list *t = &__get_cpu_var(mce_timer); 636 struct timer_list *t = &__get_cpu_var(mce_timer);
637 int *n = &__get_cpu_var(next_interval);
635 638
636 /* data race harmless because everyone sets to the same value */ 639 *n = check_interval * HZ;
637 if (!next_interval) 640 if (!*n)
638 next_interval = check_interval * HZ;
639 if (!next_interval)
640 return; 641 return;
641 setup_timer(t, mcheck_timer, smp_processor_id()); 642 setup_timer(t, mcheck_timer, smp_processor_id());
642 t->expires = round_jiffies(jiffies + next_interval); 643 t->expires = round_jiffies(jiffies + *n);
643 add_timer(t); 644 add_timer(t);
644} 645}
645 646
@@ -907,7 +908,6 @@ static void mce_cpu_restart(void *data)
907/* Reinit MCEs after user configuration changes */ 908/* Reinit MCEs after user configuration changes */
908static void mce_restart(void) 909static void mce_restart(void)
909{ 910{
910 next_interval = check_interval * HZ;
911 on_each_cpu(mce_cpu_restart, NULL, 1); 911 on_each_cpu(mce_cpu_restart, NULL, 1);
912} 912}
913 913
@@ -1110,7 +1110,8 @@ static int __cpuinit mce_cpu_callback(struct notifier_block *nfb,
1110 break; 1110 break;
1111 case CPU_DOWN_FAILED: 1111 case CPU_DOWN_FAILED:
1112 case CPU_DOWN_FAILED_FROZEN: 1112 case CPU_DOWN_FAILED_FROZEN:
1113 t->expires = round_jiffies(jiffies + next_interval); 1113 t->expires = round_jiffies(jiffies +
1114 __get_cpu_var(next_interval));
1114 add_timer_on(t, cpu); 1115 add_timer_on(t, cpu);
1115 smp_call_function_single(cpu, mce_reenable_cpu, &action, 1); 1116 smp_call_function_single(cpu, mce_reenable_cpu, &action, 1);
1116 break; 1117 break;
diff --git a/arch/x86/kernel/cpu/mcheck/mce_intel_64.c b/arch/x86/kernel/cpu/mcheck/mce_intel_64.c
index d6b72df89d69..cef3ee30744b 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_intel_64.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_intel_64.c
@@ -151,10 +151,11 @@ static void print_update(char *type, int *hdr, int num)
151static void cmci_discover(int banks, int boot) 151static void cmci_discover(int banks, int boot)
152{ 152{
153 unsigned long *owned = (void *)&__get_cpu_var(mce_banks_owned); 153 unsigned long *owned = (void *)&__get_cpu_var(mce_banks_owned);
154 unsigned long flags;
154 int hdr = 0; 155 int hdr = 0;
155 int i; 156 int i;
156 157
157 spin_lock(&cmci_discover_lock); 158 spin_lock_irqsave(&cmci_discover_lock, flags);
158 for (i = 0; i < banks; i++) { 159 for (i = 0; i < banks; i++) {
159 u64 val; 160 u64 val;
160 161
@@ -184,7 +185,7 @@ static void cmci_discover(int banks, int boot)
184 WARN_ON(!test_bit(i, __get_cpu_var(mce_poll_banks))); 185 WARN_ON(!test_bit(i, __get_cpu_var(mce_poll_banks)));
185 } 186 }
186 } 187 }
187 spin_unlock(&cmci_discover_lock); 188 spin_unlock_irqrestore(&cmci_discover_lock, flags);
188 if (hdr) 189 if (hdr)
189 printk(KERN_CONT "\n"); 190 printk(KERN_CONT "\n");
190} 191}
@@ -211,13 +212,14 @@ void cmci_recheck(void)
211 */ 212 */
212void cmci_clear(void) 213void cmci_clear(void)
213{ 214{
215 unsigned long flags;
214 int i; 216 int i;
215 int banks; 217 int banks;
216 u64 val; 218 u64 val;
217 219
218 if (!cmci_supported(&banks)) 220 if (!cmci_supported(&banks))
219 return; 221 return;
220 spin_lock(&cmci_discover_lock); 222 spin_lock_irqsave(&cmci_discover_lock, flags);
221 for (i = 0; i < banks; i++) { 223 for (i = 0; i < banks; i++) {
222 if (!test_bit(i, __get_cpu_var(mce_banks_owned))) 224 if (!test_bit(i, __get_cpu_var(mce_banks_owned)))
223 continue; 225 continue;
@@ -227,7 +229,7 @@ void cmci_clear(void)
227 wrmsrl(MSR_IA32_MC0_CTL2 + i, val); 229 wrmsrl(MSR_IA32_MC0_CTL2 + i, val);
228 __clear_bit(i, __get_cpu_var(mce_banks_owned)); 230 __clear_bit(i, __get_cpu_var(mce_banks_owned));
229 } 231 }
230 spin_unlock(&cmci_discover_lock); 232 spin_unlock_irqrestore(&cmci_discover_lock, flags);
231} 233}
232 234
233/* 235/*
diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c
index 0b776c09aff3..d21d4fb161f7 100644
--- a/arch/x86/kernel/cpu/mtrr/generic.c
+++ b/arch/x86/kernel/cpu/mtrr/generic.c
@@ -275,7 +275,11 @@ static void __init print_mtrr_state(void)
275 } 275 }
276 printk(KERN_DEBUG "MTRR variable ranges %sabled:\n", 276 printk(KERN_DEBUG "MTRR variable ranges %sabled:\n",
277 mtrr_state.enabled & 2 ? "en" : "dis"); 277 mtrr_state.enabled & 2 ? "en" : "dis");
278 high_width = ((size_or_mask ? ffs(size_or_mask) - 1 : 32) - (32 - PAGE_SHIFT) + 3) / 4; 278 if (size_or_mask & 0xffffffffUL)
279 high_width = ffs(size_or_mask & 0xffffffffUL) - 1;
280 else
281 high_width = ffs(size_or_mask>>32) + 32 - 1;
282 high_width = (high_width - (32 - PAGE_SHIFT) + 3) / 4;
279 for (i = 0; i < num_var_ranges; ++i) { 283 for (i = 0; i < num_var_ranges; ++i) {
280 if (mtrr_state.var_ranges[i].mask_lo & (1 << 11)) 284 if (mtrr_state.var_ranges[i].mask_lo & (1 << 11))
281 printk(KERN_DEBUG " %u base %0*X%05X000 mask %0*X%05X000 %s\n", 285 printk(KERN_DEBUG " %u base %0*X%05X000 mask %0*X%05X000 %s\n",
diff --git a/arch/x86/kernel/cpu/proc.c b/arch/x86/kernel/cpu/proc.c
index f93047fed791..d5e30397246b 100644
--- a/arch/x86/kernel/cpu/proc.c
+++ b/arch/x86/kernel/cpu/proc.c
@@ -14,7 +14,7 @@ static void show_cpuinfo_core(struct seq_file *m, struct cpuinfo_x86 *c,
14 if (c->x86_max_cores * smp_num_siblings > 1) { 14 if (c->x86_max_cores * smp_num_siblings > 1) {
15 seq_printf(m, "physical id\t: %d\n", c->phys_proc_id); 15 seq_printf(m, "physical id\t: %d\n", c->phys_proc_id);
16 seq_printf(m, "siblings\t: %d\n", 16 seq_printf(m, "siblings\t: %d\n",
17 cpumask_weight(cpu_sibling_mask(cpu))); 17 cpumask_weight(cpu_core_mask(cpu)));
18 seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id); 18 seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id);
19 seq_printf(m, "cpu cores\t: %d\n", c->booted_cores); 19 seq_printf(m, "cpu cores\t: %d\n", c->booted_cores);
20 seq_printf(m, "apicid\t\t: %d\n", c->apicid); 20 seq_printf(m, "apicid\t\t: %d\n", c->apicid);
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
index ef2c3563357d..006281302925 100644
--- a/arch/x86/kernel/e820.c
+++ b/arch/x86/kernel/e820.c
@@ -1074,12 +1074,13 @@ u64 __init early_reserve_e820(u64 startt, u64 sizet, u64 align)
1074 u64 addr; 1074 u64 addr;
1075 u64 start; 1075 u64 start;
1076 1076
1077 start = startt; 1077 for (start = startt; ; start += size) {
1078 while (size < sizet && (start + 1))
1079 start = find_e820_area_size(start, &size, align); 1078 start = find_e820_area_size(start, &size, align);
1080 1079 if (!(start + 1))
1081 if (size < sizet) 1080 return 0;
1082 return 0; 1081 if (size >= sizet)
1082 break;
1083 }
1083 1084
1084#ifdef CONFIG_X86_32 1085#ifdef CONFIG_X86_32
1085 if (start >= MAXMEM) 1086 if (start >= MAXMEM)
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index a331ec38af9e..38946c6e8433 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -1410,7 +1410,10 @@ ENTRY(paranoid_exit)
1410paranoid_swapgs: 1410paranoid_swapgs:
1411 TRACE_IRQS_IRETQ 0 1411 TRACE_IRQS_IRETQ 0
1412 SWAPGS_UNSAFE_STACK 1412 SWAPGS_UNSAFE_STACK
1413 RESTORE_ALL 8
1414 jmp irq_return
1413paranoid_restore: 1415paranoid_restore:
1416 TRACE_IRQS_IRETQ 0
1414 RESTORE_ALL 8 1417 RESTORE_ALL 8
1415 jmp irq_return 1418 jmp irq_return
1416paranoid_userspace: 1419paranoid_userspace:
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index 18dfa30795c9..b79c5533c421 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -442,7 +442,7 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
442 _ASM_EXTABLE(1b, 4b) 442 _ASM_EXTABLE(1b, 4b)
443 _ASM_EXTABLE(2b, 4b) 443 _ASM_EXTABLE(2b, 4b)
444 444
445 : [old] "=r" (old), [faulted] "=r" (faulted) 445 : [old] "=&r" (old), [faulted] "=r" (faulted)
446 : [parent] "r" (parent), [return_hooker] "r" (return_hooker) 446 : [parent] "r" (parent), [return_hooker] "r" (return_hooker)
447 : "memory" 447 : "memory"
448 ); 448 );
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
index 648b3a2a3a44..81408b93f887 100644
--- a/arch/x86/kernel/hpet.c
+++ b/arch/x86/kernel/hpet.c
@@ -236,6 +236,10 @@ static void hpet_stop_counter(void)
236 unsigned long cfg = hpet_readl(HPET_CFG); 236 unsigned long cfg = hpet_readl(HPET_CFG);
237 cfg &= ~HPET_CFG_ENABLE; 237 cfg &= ~HPET_CFG_ENABLE;
238 hpet_writel(cfg, HPET_CFG); 238 hpet_writel(cfg, HPET_CFG);
239}
240
241static void hpet_reset_counter(void)
242{
239 hpet_writel(0, HPET_COUNTER); 243 hpet_writel(0, HPET_COUNTER);
240 hpet_writel(0, HPET_COUNTER + 4); 244 hpet_writel(0, HPET_COUNTER + 4);
241} 245}
@@ -250,6 +254,7 @@ static void hpet_start_counter(void)
250static void hpet_restart_counter(void) 254static void hpet_restart_counter(void)
251{ 255{
252 hpet_stop_counter(); 256 hpet_stop_counter();
257 hpet_reset_counter();
253 hpet_start_counter(); 258 hpet_start_counter();
254} 259}
255 260
@@ -309,7 +314,7 @@ static int hpet_setup_msi_irq(unsigned int irq);
309static void hpet_set_mode(enum clock_event_mode mode, 314static void hpet_set_mode(enum clock_event_mode mode,
310 struct clock_event_device *evt, int timer) 315 struct clock_event_device *evt, int timer)
311{ 316{
312 unsigned long cfg; 317 unsigned long cfg, cmp, now;
313 uint64_t delta; 318 uint64_t delta;
314 319
315 switch (mode) { 320 switch (mode) {
@@ -317,12 +322,23 @@ static void hpet_set_mode(enum clock_event_mode mode,
317 hpet_stop_counter(); 322 hpet_stop_counter();
318 delta = ((uint64_t)(NSEC_PER_SEC/HZ)) * evt->mult; 323 delta = ((uint64_t)(NSEC_PER_SEC/HZ)) * evt->mult;
319 delta >>= evt->shift; 324 delta >>= evt->shift;
325 now = hpet_readl(HPET_COUNTER);
326 cmp = now + (unsigned long) delta;
320 cfg = hpet_readl(HPET_Tn_CFG(timer)); 327 cfg = hpet_readl(HPET_Tn_CFG(timer));
321 /* Make sure we use edge triggered interrupts */ 328 /* Make sure we use edge triggered interrupts */
322 cfg &= ~HPET_TN_LEVEL; 329 cfg &= ~HPET_TN_LEVEL;
323 cfg |= HPET_TN_ENABLE | HPET_TN_PERIODIC | 330 cfg |= HPET_TN_ENABLE | HPET_TN_PERIODIC |
324 HPET_TN_SETVAL | HPET_TN_32BIT; 331 HPET_TN_SETVAL | HPET_TN_32BIT;
325 hpet_writel(cfg, HPET_Tn_CFG(timer)); 332 hpet_writel(cfg, HPET_Tn_CFG(timer));
333 hpet_writel(cmp, HPET_Tn_CMP(timer));
334 udelay(1);
335 /*
336 * HPET on AMD 81xx needs a second write (with HPET_TN_SETVAL
337 * cleared) to T0_CMP to set the period. The HPET_TN_SETVAL
338 * bit is automatically cleared after the first write.
339 * (See AMD-8111 HyperTransport I/O Hub Data Sheet,
340 * Publication # 24674)
341 */
326 hpet_writel((unsigned long) delta, HPET_Tn_CMP(timer)); 342 hpet_writel((unsigned long) delta, HPET_Tn_CMP(timer));
327 hpet_start_counter(); 343 hpet_start_counter();
328 hpet_print_config(); 344 hpet_print_config();
@@ -722,7 +738,7 @@ static int hpet_cpuhp_notify(struct notifier_block *n,
722/* 738/*
723 * Clock source related code 739 * Clock source related code
724 */ 740 */
725static cycle_t read_hpet(void) 741static cycle_t read_hpet(struct clocksource *cs)
726{ 742{
727 return (cycle_t)hpet_readl(HPET_COUNTER); 743 return (cycle_t)hpet_readl(HPET_COUNTER);
728} 744}
@@ -756,7 +772,7 @@ static int hpet_clocksource_register(void)
756 hpet_restart_counter(); 772 hpet_restart_counter();
757 773
758 /* Verify whether hpet counter works */ 774 /* Verify whether hpet counter works */
759 t1 = read_hpet(); 775 t1 = hpet_readl(HPET_COUNTER);
760 rdtscll(start); 776 rdtscll(start);
761 777
762 /* 778 /*
@@ -770,7 +786,7 @@ static int hpet_clocksource_register(void)
770 rdtscll(now); 786 rdtscll(now);
771 } while ((now - start) < 200000UL); 787 } while ((now - start) < 200000UL);
772 788
773 if (t1 == read_hpet()) { 789 if (t1 == hpet_readl(HPET_COUNTER)) {
774 printk(KERN_WARNING 790 printk(KERN_WARNING
775 "HPET counter not counting. HPET disabled\n"); 791 "HPET counter not counting. HPET disabled\n");
776 return -ENODEV; 792 return -ENODEV;
diff --git a/arch/x86/kernel/i8253.c b/arch/x86/kernel/i8253.c
index 3475440baa54..c2e0bb0890d4 100644
--- a/arch/x86/kernel/i8253.c
+++ b/arch/x86/kernel/i8253.c
@@ -129,7 +129,7 @@ void __init setup_pit_timer(void)
129 * to just read by itself. So use jiffies to emulate a free 129 * to just read by itself. So use jiffies to emulate a free
130 * running counter: 130 * running counter:
131 */ 131 */
132static cycle_t pit_read(void) 132static cycle_t pit_read(struct clocksource *cs)
133{ 133{
134 static int old_count; 134 static int old_count;
135 static u32 old_jifs; 135 static u32 old_jifs;
diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
index eedfaebe1063..b1f4dffb919e 100644
--- a/arch/x86/kernel/kgdb.c
+++ b/arch/x86/kernel/kgdb.c
@@ -88,6 +88,7 @@ void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *regs)
88 gdb_regs[GDB_SS] = __KERNEL_DS; 88 gdb_regs[GDB_SS] = __KERNEL_DS;
89 gdb_regs[GDB_FS] = 0xFFFF; 89 gdb_regs[GDB_FS] = 0xFFFF;
90 gdb_regs[GDB_GS] = 0xFFFF; 90 gdb_regs[GDB_GS] = 0xFFFF;
91 gdb_regs[GDB_SP] = (int)&regs->sp;
91#else 92#else
92 gdb_regs[GDB_R8] = regs->r8; 93 gdb_regs[GDB_R8] = regs->r8;
93 gdb_regs[GDB_R9] = regs->r9; 94 gdb_regs[GDB_R9] = regs->r9;
@@ -100,8 +101,8 @@ void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *regs)
100 gdb_regs32[GDB_PS] = regs->flags; 101 gdb_regs32[GDB_PS] = regs->flags;
101 gdb_regs32[GDB_CS] = regs->cs; 102 gdb_regs32[GDB_CS] = regs->cs;
102 gdb_regs32[GDB_SS] = regs->ss; 103 gdb_regs32[GDB_SS] = regs->ss;
103#endif
104 gdb_regs[GDB_SP] = regs->sp; 104 gdb_regs[GDB_SP] = regs->sp;
105#endif
105} 106}
106 107
107/** 108/**
diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c
index 137f2e8132df..223af43f1526 100644
--- a/arch/x86/kernel/kvmclock.c
+++ b/arch/x86/kernel/kvmclock.c
@@ -77,6 +77,11 @@ static cycle_t kvm_clock_read(void)
77 return ret; 77 return ret;
78} 78}
79 79
80static cycle_t kvm_clock_get_cycles(struct clocksource *cs)
81{
82 return kvm_clock_read();
83}
84
80/* 85/*
81 * If we don't do that, there is the possibility that the guest 86 * If we don't do that, there is the possibility that the guest
82 * will calibrate under heavy load - thus, getting a lower lpj - 87 * will calibrate under heavy load - thus, getting a lower lpj -
@@ -107,7 +112,7 @@ static void kvm_get_preset_lpj(void)
107 112
108static struct clocksource kvm_clock = { 113static struct clocksource kvm_clock = {
109 .name = "kvm-clock", 114 .name = "kvm-clock",
110 .read = kvm_clock_read, 115 .read = kvm_clock_get_cycles,
111 .rating = 400, 116 .rating = 400,
112 .mask = CLOCKSOURCE_MASK(64), 117 .mask = CLOCKSOURCE_MASK(64),
113 .mult = 1 << KVM_SCALE, 118 .mult = 1 << KVM_SCALE,
diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
index e7368c1da01d..c1c429d00130 100644
--- a/arch/x86/kernel/machine_kexec_32.c
+++ b/arch/x86/kernel/machine_kexec_32.c
@@ -194,7 +194,7 @@ void machine_kexec(struct kimage *image)
194 unsigned int preserve_context); 194 unsigned int preserve_context);
195 195
196#ifdef CONFIG_KEXEC_JUMP 196#ifdef CONFIG_KEXEC_JUMP
197 if (kexec_image->preserve_context) 197 if (image->preserve_context)
198 save_processor_state(); 198 save_processor_state();
199#endif 199#endif
200 200
@@ -253,7 +253,7 @@ void machine_kexec(struct kimage *image)
253 image->preserve_context); 253 image->preserve_context);
254 254
255#ifdef CONFIG_KEXEC_JUMP 255#ifdef CONFIG_KEXEC_JUMP
256 if (kexec_image->preserve_context) 256 if (image->preserve_context)
257 restore_processor_state(); 257 restore_processor_state();
258#endif 258#endif
259 259
diff --git a/arch/x86/kernel/machine_kexec_64.c b/arch/x86/kernel/machine_kexec_64.c
index 89cea4d44679..84c3bf209e98 100644
--- a/arch/x86/kernel/machine_kexec_64.c
+++ b/arch/x86/kernel/machine_kexec_64.c
@@ -274,7 +274,7 @@ void machine_kexec(struct kimage *image)
274 int save_ftrace_enabled; 274 int save_ftrace_enabled;
275 275
276#ifdef CONFIG_KEXEC_JUMP 276#ifdef CONFIG_KEXEC_JUMP
277 if (kexec_image->preserve_context) 277 if (image->preserve_context)
278 save_processor_state(); 278 save_processor_state();
279#endif 279#endif
280 280
@@ -333,7 +333,7 @@ void machine_kexec(struct kimage *image)
333 image->preserve_context); 333 image->preserve_context);
334 334
335#ifdef CONFIG_KEXEC_JUMP 335#ifdef CONFIG_KEXEC_JUMP
336 if (kexec_image->preserve_context) 336 if (image->preserve_context)
337 restore_processor_state(); 337 restore_processor_state();
338#endif 338#endif
339 339
diff --git a/arch/x86/kernel/microcode_core.c b/arch/x86/kernel/microcode_core.c
index a0f3851ef310..98c470c069d1 100644
--- a/arch/x86/kernel/microcode_core.c
+++ b/arch/x86/kernel/microcode_core.c
@@ -108,40 +108,29 @@ struct ucode_cpu_info ucode_cpu_info[NR_CPUS];
108EXPORT_SYMBOL_GPL(ucode_cpu_info); 108EXPORT_SYMBOL_GPL(ucode_cpu_info);
109 109
110#ifdef CONFIG_MICROCODE_OLD_INTERFACE 110#ifdef CONFIG_MICROCODE_OLD_INTERFACE
111struct update_for_cpu {
112 const void __user *buf;
113 size_t size;
114};
115
116static long update_for_cpu(void *_ufc)
117{
118 struct update_for_cpu *ufc = _ufc;
119 int error;
120
121 error = microcode_ops->request_microcode_user(smp_processor_id(),
122 ufc->buf, ufc->size);
123 if (error < 0)
124 return error;
125 if (!error)
126 microcode_ops->apply_microcode(smp_processor_id());
127 return error;
128}
129
130static int do_microcode_update(const void __user *buf, size_t size) 111static int do_microcode_update(const void __user *buf, size_t size)
131{ 112{
113 cpumask_t old;
132 int error = 0; 114 int error = 0;
133 int cpu; 115 int cpu;
134 struct update_for_cpu ufc = { .buf = buf, .size = size }; 116
117 old = current->cpus_allowed;
135 118
136 for_each_online_cpu(cpu) { 119 for_each_online_cpu(cpu) {
137 struct ucode_cpu_info *uci = ucode_cpu_info + cpu; 120 struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
138 121
139 if (!uci->valid) 122 if (!uci->valid)
140 continue; 123 continue;
141 error = work_on_cpu(cpu, update_for_cpu, &ufc); 124
125 set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
126 error = microcode_ops->request_microcode_user(cpu, buf, size);
142 if (error < 0) 127 if (error < 0)
143 break; 128 goto out;
129 if (!error)
130 microcode_ops->apply_microcode(cpu);
144 } 131 }
132out:
133 set_cpus_allowed_ptr(current, &old);
145 return error; 134 return error;
146} 135}
147 136
@@ -391,8 +380,6 @@ static int mc_sysdev_add(struct sys_device *sys_dev)
391 return err; 380 return err;
392 381
393 err = microcode_init_cpu(cpu); 382 err = microcode_init_cpu(cpu);
394 if (err)
395 sysfs_remove_group(&sys_dev->kobj, &mc_attr_group);
396 383
397 return err; 384 return err;
398} 385}
diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c
index dce99dca6cf8..70fd7e414c15 100644
--- a/arch/x86/kernel/mpparse.c
+++ b/arch/x86/kernel/mpparse.c
@@ -679,7 +679,7 @@ void __init get_smp_config(void)
679 __get_smp_config(0); 679 __get_smp_config(0);
680} 680}
681 681
682static void smp_reserve_bootmem(struct mpf_intel *mpf) 682static void __init smp_reserve_bootmem(struct mpf_intel *mpf)
683{ 683{
684 unsigned long size = get_mpc_size(mpf->physptr); 684 unsigned long size = get_mpc_size(mpf->physptr);
685#ifdef CONFIG_X86_32 685#ifdef CONFIG_X86_32
@@ -838,7 +838,7 @@ static int __init get_MP_intsrc_index(struct mpc_intsrc *m)
838 838
839static struct mpc_intsrc __initdata *m_spare[SPARE_SLOT_NUM]; 839static struct mpc_intsrc __initdata *m_spare[SPARE_SLOT_NUM];
840 840
841static void check_irq_src(struct mpc_intsrc *m, int *nr_m_spare) 841static void __init check_irq_src(struct mpc_intsrc *m, int *nr_m_spare)
842{ 842{
843 int i; 843 int i;
844 844
@@ -866,7 +866,8 @@ static void check_irq_src(struct mpc_intsrc *m, int *nr_m_spare)
866 } 866 }
867} 867}
868#else /* CONFIG_X86_IO_APIC */ 868#else /* CONFIG_X86_IO_APIC */
869static inline void check_irq_src(struct mpc_intsrc *m, int *nr_m_spare) {} 869static
870inline void __init check_irq_src(struct mpc_intsrc *m, int *nr_m_spare) {}
870#endif /* CONFIG_X86_IO_APIC */ 871#endif /* CONFIG_X86_IO_APIC */
871 872
872static int check_slot(unsigned long mpc_new_phys, unsigned long mpc_new_length, 873static int check_slot(unsigned long mpc_new_phys, unsigned long mpc_new_length,
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
index 8e45f4464880..9faf43bea336 100644
--- a/arch/x86/kernel/paravirt.c
+++ b/arch/x86/kernel/paravirt.c
@@ -134,7 +134,9 @@ static void *get_call_destination(u8 type)
134 .pv_irq_ops = pv_irq_ops, 134 .pv_irq_ops = pv_irq_ops,
135 .pv_apic_ops = pv_apic_ops, 135 .pv_apic_ops = pv_apic_ops,
136 .pv_mmu_ops = pv_mmu_ops, 136 .pv_mmu_ops = pv_mmu_ops,
137#ifdef CONFIG_PARAVIRT_SPINLOCKS
137 .pv_lock_ops = pv_lock_ops, 138 .pv_lock_ops = pv_lock_ops,
139#endif
138 }; 140 };
139 return *((void **)&tmpl + type); 141 return *((void **)&tmpl + type);
140} 142}
diff --git a/arch/x86/kernel/pci-swiotlb.c b/arch/x86/kernel/pci-swiotlb.c
index 34f12e9996ed..221a3853e268 100644
--- a/arch/x86/kernel/pci-swiotlb.c
+++ b/arch/x86/kernel/pci-swiotlb.c
@@ -50,7 +50,7 @@ static void *x86_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
50 return swiotlb_alloc_coherent(hwdev, size, dma_handle, flags); 50 return swiotlb_alloc_coherent(hwdev, size, dma_handle, flags);
51} 51}
52 52
53struct dma_map_ops swiotlb_dma_ops = { 53static struct dma_map_ops swiotlb_dma_ops = {
54 .mapping_error = swiotlb_dma_mapping_error, 54 .mapping_error = swiotlb_dma_mapping_error,
55 .alloc_coherent = x86_swiotlb_alloc_coherent, 55 .alloc_coherent = x86_swiotlb_alloc_coherent,
56 .free_coherent = swiotlb_free_coherent, 56 .free_coherent = swiotlb_free_coherent,
diff --git a/arch/x86/kernel/quirks.c b/arch/x86/kernel/quirks.c
index e95022e4f5d5..7563b31b4f03 100644
--- a/arch/x86/kernel/quirks.c
+++ b/arch/x86/kernel/quirks.c
@@ -261,8 +261,6 @@ static void old_ich_force_enable_hpet_user(struct pci_dev *dev)
261{ 261{
262 if (hpet_force_user) 262 if (hpet_force_user)
263 old_ich_force_enable_hpet(dev); 263 old_ich_force_enable_hpet(dev);
264 else
265 hpet_print_force_info();
266} 264}
267 265
268DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_1, 266DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_1,
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index 1340dad417f4..667188e0b5a0 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -232,6 +232,14 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = {
232 DMI_MATCH(DMI_PRODUCT_NAME, "Dell DXP061"), 232 DMI_MATCH(DMI_PRODUCT_NAME, "Dell DXP061"),
233 }, 233 },
234 }, 234 },
235 { /* Handle problems with rebooting on Sony VGN-Z540N */
236 .callback = set_bios_reboot,
237 .ident = "Sony VGN-Z540N",
238 .matches = {
239 DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
240 DMI_MATCH(DMI_PRODUCT_NAME, "VGN-Z540N"),
241 },
242 },
235 { } 243 { }
236}; 244};
237 245
diff --git a/arch/x86/kernel/tlb_uv.c b/arch/x86/kernel/tlb_uv.c
index deb5ebb32c3b..ed0c33761e6d 100644
--- a/arch/x86/kernel/tlb_uv.c
+++ b/arch/x86/kernel/tlb_uv.c
@@ -25,6 +25,8 @@ static int uv_bau_retry_limit __read_mostly;
25 25
26/* position of pnode (which is nasid>>1): */ 26/* position of pnode (which is nasid>>1): */
27static int uv_nshift __read_mostly; 27static int uv_nshift __read_mostly;
28/* base pnode in this partition */
29static int uv_partition_base_pnode __read_mostly;
28 30
29static unsigned long uv_mmask __read_mostly; 31static unsigned long uv_mmask __read_mostly;
30 32
@@ -32,6 +34,34 @@ static DEFINE_PER_CPU(struct ptc_stats, ptcstats);
32static DEFINE_PER_CPU(struct bau_control, bau_control); 34static DEFINE_PER_CPU(struct bau_control, bau_control);
33 35
34/* 36/*
37 * Determine the first node on a blade.
38 */
39static int __init blade_to_first_node(int blade)
40{
41 int node, b;
42
43 for_each_online_node(node) {
44 b = uv_node_to_blade_id(node);
45 if (blade == b)
46 return node;
47 }
48 return -1; /* shouldn't happen */
49}
50
51/*
52 * Determine the apicid of the first cpu on a blade.
53 */
54static int __init blade_to_first_apicid(int blade)
55{
56 int cpu;
57
58 for_each_present_cpu(cpu)
59 if (blade == uv_cpu_to_blade_id(cpu))
60 return per_cpu(x86_cpu_to_apicid, cpu);
61 return -1;
62}
63
64/*
35 * Free a software acknowledge hardware resource by clearing its Pending 65 * Free a software acknowledge hardware resource by clearing its Pending
36 * bit. This will return a reply to the sender. 66 * bit. This will return a reply to the sender.
37 * If the message has timed out, a reply has already been sent by the 67 * If the message has timed out, a reply has already been sent by the
@@ -67,7 +97,7 @@ static void uv_bau_process_message(struct bau_payload_queue_entry *msg,
67 msp = __get_cpu_var(bau_control).msg_statuses + msg_slot; 97 msp = __get_cpu_var(bau_control).msg_statuses + msg_slot;
68 cpu = uv_blade_processor_id(); 98 cpu = uv_blade_processor_id();
69 msg->number_of_cpus = 99 msg->number_of_cpus =
70 uv_blade_nr_online_cpus(uv_node_to_blade_id(numa_node_id())); 100 uv_blade_nr_online_cpus(uv_node_to_blade_id(numa_node_id()));
71 this_cpu_mask = 1UL << cpu; 101 this_cpu_mask = 1UL << cpu;
72 if (msp->seen_by.bits & this_cpu_mask) 102 if (msp->seen_by.bits & this_cpu_mask)
73 return; 103 return;
@@ -215,14 +245,14 @@ static int uv_wait_completion(struct bau_desc *bau_desc,
215 * Returns @flush_mask if some remote flushing remains to be done. The 245 * Returns @flush_mask if some remote flushing remains to be done. The
216 * mask will have some bits still set. 246 * mask will have some bits still set.
217 */ 247 */
218const struct cpumask *uv_flush_send_and_wait(int cpu, int this_blade, 248const struct cpumask *uv_flush_send_and_wait(int cpu, int this_pnode,
219 struct bau_desc *bau_desc, 249 struct bau_desc *bau_desc,
220 struct cpumask *flush_mask) 250 struct cpumask *flush_mask)
221{ 251{
222 int completion_status = 0; 252 int completion_status = 0;
223 int right_shift; 253 int right_shift;
224 int tries = 0; 254 int tries = 0;
225 int blade; 255 int pnode;
226 int bit; 256 int bit;
227 unsigned long mmr_offset; 257 unsigned long mmr_offset;
228 unsigned long index; 258 unsigned long index;
@@ -265,8 +295,8 @@ const struct cpumask *uv_flush_send_and_wait(int cpu, int this_blade,
265 * use the IPI method of shootdown on them. 295 * use the IPI method of shootdown on them.
266 */ 296 */
267 for_each_cpu(bit, flush_mask) { 297 for_each_cpu(bit, flush_mask) {
268 blade = uv_cpu_to_blade_id(bit); 298 pnode = uv_cpu_to_pnode(bit);
269 if (blade == this_blade) 299 if (pnode == this_pnode)
270 continue; 300 continue;
271 cpumask_clear_cpu(bit, flush_mask); 301 cpumask_clear_cpu(bit, flush_mask);
272 } 302 }
@@ -309,16 +339,16 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
309 struct cpumask *flush_mask = __get_cpu_var(uv_flush_tlb_mask); 339 struct cpumask *flush_mask = __get_cpu_var(uv_flush_tlb_mask);
310 int i; 340 int i;
311 int bit; 341 int bit;
312 int blade; 342 int pnode;
313 int uv_cpu; 343 int uv_cpu;
314 int this_blade; 344 int this_pnode;
315 int locals = 0; 345 int locals = 0;
316 struct bau_desc *bau_desc; 346 struct bau_desc *bau_desc;
317 347
318 cpumask_andnot(flush_mask, cpumask, cpumask_of(cpu)); 348 cpumask_andnot(flush_mask, cpumask, cpumask_of(cpu));
319 349
320 uv_cpu = uv_blade_processor_id(); 350 uv_cpu = uv_blade_processor_id();
321 this_blade = uv_numa_blade_id(); 351 this_pnode = uv_hub_info->pnode;
322 bau_desc = __get_cpu_var(bau_control).descriptor_base; 352 bau_desc = __get_cpu_var(bau_control).descriptor_base;
323 bau_desc += UV_ITEMS_PER_DESCRIPTOR * uv_cpu; 353 bau_desc += UV_ITEMS_PER_DESCRIPTOR * uv_cpu;
324 354
@@ -326,13 +356,14 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
326 356
327 i = 0; 357 i = 0;
328 for_each_cpu(bit, flush_mask) { 358 for_each_cpu(bit, flush_mask) {
329 blade = uv_cpu_to_blade_id(bit); 359 pnode = uv_cpu_to_pnode(bit);
330 BUG_ON(blade > (UV_DISTRIBUTION_SIZE - 1)); 360 BUG_ON(pnode > (UV_DISTRIBUTION_SIZE - 1));
331 if (blade == this_blade) { 361 if (pnode == this_pnode) {
332 locals++; 362 locals++;
333 continue; 363 continue;
334 } 364 }
335 bau_node_set(blade, &bau_desc->distribution); 365 bau_node_set(pnode - uv_partition_base_pnode,
366 &bau_desc->distribution);
336 i++; 367 i++;
337 } 368 }
338 if (i == 0) { 369 if (i == 0) {
@@ -350,7 +381,7 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
350 bau_desc->payload.address = va; 381 bau_desc->payload.address = va;
351 bau_desc->payload.sending_cpu = cpu; 382 bau_desc->payload.sending_cpu = cpu;
352 383
353 return uv_flush_send_and_wait(uv_cpu, this_blade, bau_desc, flush_mask); 384 return uv_flush_send_and_wait(uv_cpu, this_pnode, bau_desc, flush_mask);
354} 385}
355 386
356/* 387/*
@@ -418,24 +449,58 @@ void uv_bau_message_interrupt(struct pt_regs *regs)
418 set_irq_regs(old_regs); 449 set_irq_regs(old_regs);
419} 450}
420 451
452/*
453 * uv_enable_timeouts
454 *
455 * Each target blade (i.e. blades that have cpu's) needs to have
456 * shootdown message timeouts enabled. The timeout does not cause
457 * an interrupt, but causes an error message to be returned to
458 * the sender.
459 */
421static void uv_enable_timeouts(void) 460static void uv_enable_timeouts(void)
422{ 461{
423 int i;
424 int blade; 462 int blade;
425 int last_blade; 463 int nblades;
426 int pnode; 464 int pnode;
427 int cur_cpu = 0; 465 unsigned long mmr_image;
428 unsigned long apicid;
429 466
430 last_blade = -1; 467 nblades = uv_num_possible_blades();
431 for_each_online_node(i) { 468
432 blade = uv_node_to_blade_id(i); 469 for (blade = 0; blade < nblades; blade++) {
433 if (blade == last_blade) 470 if (!uv_blade_nr_possible_cpus(blade))
434 continue; 471 continue;
435 last_blade = blade; 472
436 apicid = per_cpu(x86_cpu_to_apicid, cur_cpu);
437 pnode = uv_blade_to_pnode(blade); 473 pnode = uv_blade_to_pnode(blade);
438 cur_cpu += uv_blade_nr_possible_cpus(i); 474 mmr_image =
475 uv_read_global_mmr64(pnode, UVH_LB_BAU_MISC_CONTROL);
476 /*
477 * Set the timeout period and then lock it in, in three
478 * steps; captures and locks in the period.
479 *
480 * To program the period, the SOFT_ACK_MODE must be off.
481 */
482 mmr_image &= ~((unsigned long)1 <<
483 UV_ENABLE_INTD_SOFT_ACK_MODE_SHIFT);
484 uv_write_global_mmr64
485 (pnode, UVH_LB_BAU_MISC_CONTROL, mmr_image);
486 /*
487 * Set the 4-bit period.
488 */
489 mmr_image &= ~((unsigned long)0xf <<
490 UV_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHIFT);
491 mmr_image |= (UV_INTD_SOFT_ACK_TIMEOUT_PERIOD <<
492 UV_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHIFT);
493 uv_write_global_mmr64
494 (pnode, UVH_LB_BAU_MISC_CONTROL, mmr_image);
495 /*
496 * Subsequent reversals of the timebase bit (3) cause an
497 * immediate timeout of one or all INTD resources as
498 * indicated in bits 2:0 (7 causes all of them to timeout).
499 */
500 mmr_image |= ((unsigned long)1 <<
501 UV_ENABLE_INTD_SOFT_ACK_MODE_SHIFT);
502 uv_write_global_mmr64
503 (pnode, UVH_LB_BAU_MISC_CONTROL, mmr_image);
439 } 504 }
440} 505}
441 506
@@ -482,8 +547,7 @@ static int uv_ptc_seq_show(struct seq_file *file, void *data)
482 stat->requestee, stat->onetlb, stat->alltlb, 547 stat->requestee, stat->onetlb, stat->alltlb,
483 stat->s_retry, stat->d_retry, stat->ptc_i); 548 stat->s_retry, stat->d_retry, stat->ptc_i);
484 seq_printf(file, "%lx %ld %ld %ld %ld %ld %ld\n", 549 seq_printf(file, "%lx %ld %ld %ld %ld %ld %ld\n",
485 uv_read_global_mmr64(uv_blade_to_pnode 550 uv_read_global_mmr64(uv_cpu_to_pnode(cpu),
486 (uv_cpu_to_blade_id(cpu)),
487 UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE), 551 UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE),
488 stat->sflush, stat->dflush, 552 stat->sflush, stat->dflush,
489 stat->retriesok, stat->nomsg, 553 stat->retriesok, stat->nomsg,
@@ -617,16 +681,18 @@ static struct bau_control * __init uv_table_bases_init(int blade, int node)
617 * finish the initialization of the per-blade control structures 681 * finish the initialization of the per-blade control structures
618 */ 682 */
619static void __init 683static void __init
620uv_table_bases_finish(int blade, int node, int cur_cpu, 684uv_table_bases_finish(int blade,
621 struct bau_control *bau_tablesp, 685 struct bau_control *bau_tablesp,
622 struct bau_desc *adp) 686 struct bau_desc *adp)
623{ 687{
624 struct bau_control *bcp; 688 struct bau_control *bcp;
625 int i; 689 int cpu;
626 690
627 for (i = cur_cpu; i < cur_cpu + uv_blade_nr_possible_cpus(blade); i++) { 691 for_each_present_cpu(cpu) {
628 bcp = (struct bau_control *)&per_cpu(bau_control, i); 692 if (blade != uv_cpu_to_blade_id(cpu))
693 continue;
629 694
695 bcp = (struct bau_control *)&per_cpu(bau_control, cpu);
630 bcp->bau_msg_head = bau_tablesp->va_queue_first; 696 bcp->bau_msg_head = bau_tablesp->va_queue_first;
631 bcp->va_queue_first = bau_tablesp->va_queue_first; 697 bcp->va_queue_first = bau_tablesp->va_queue_first;
632 bcp->va_queue_last = bau_tablesp->va_queue_last; 698 bcp->va_queue_last = bau_tablesp->va_queue_last;
@@ -649,11 +715,10 @@ uv_activation_descriptor_init(int node, int pnode)
649 struct bau_desc *adp; 715 struct bau_desc *adp;
650 struct bau_desc *ad2; 716 struct bau_desc *ad2;
651 717
652 adp = (struct bau_desc *) 718 adp = (struct bau_desc *)kmalloc_node(16384, GFP_KERNEL, node);
653 kmalloc_node(16384, GFP_KERNEL, node);
654 BUG_ON(!adp); 719 BUG_ON(!adp);
655 720
656 pa = __pa((unsigned long)adp); 721 pa = uv_gpa(adp); /* need the real nasid*/
657 n = pa >> uv_nshift; 722 n = pa >> uv_nshift;
658 m = pa & uv_mmask; 723 m = pa & uv_mmask;
659 724
@@ -667,8 +732,12 @@ uv_activation_descriptor_init(int node, int pnode)
667 for (i = 0, ad2 = adp; i < UV_ACTIVATION_DESCRIPTOR_SIZE; i++, ad2++) { 732 for (i = 0, ad2 = adp; i < UV_ACTIVATION_DESCRIPTOR_SIZE; i++, ad2++) {
668 memset(ad2, 0, sizeof(struct bau_desc)); 733 memset(ad2, 0, sizeof(struct bau_desc));
669 ad2->header.sw_ack_flag = 1; 734 ad2->header.sw_ack_flag = 1;
670 ad2->header.base_dest_nodeid = 735 /*
671 uv_blade_to_pnode(uv_cpu_to_blade_id(0)); 736 * base_dest_nodeid is the first node in the partition, so
737 * the bit map will indicate partition-relative node numbers.
738 * note that base_dest_nodeid is actually a nasid.
739 */
740 ad2->header.base_dest_nodeid = uv_partition_base_pnode << 1;
672 ad2->header.command = UV_NET_ENDPOINT_INTD; 741 ad2->header.command = UV_NET_ENDPOINT_INTD;
673 ad2->header.int_both = 1; 742 ad2->header.int_both = 1;
674 /* 743 /*
@@ -686,6 +755,8 @@ static struct bau_payload_queue_entry * __init
686uv_payload_queue_init(int node, int pnode, struct bau_control *bau_tablesp) 755uv_payload_queue_init(int node, int pnode, struct bau_control *bau_tablesp)
687{ 756{
688 struct bau_payload_queue_entry *pqp; 757 struct bau_payload_queue_entry *pqp;
758 unsigned long pa;
759 int pn;
689 char *cp; 760 char *cp;
690 761
691 pqp = (struct bau_payload_queue_entry *) kmalloc_node( 762 pqp = (struct bau_payload_queue_entry *) kmalloc_node(
@@ -696,10 +767,14 @@ uv_payload_queue_init(int node, int pnode, struct bau_control *bau_tablesp)
696 cp = (char *)pqp + 31; 767 cp = (char *)pqp + 31;
697 pqp = (struct bau_payload_queue_entry *)(((unsigned long)cp >> 5) << 5); 768 pqp = (struct bau_payload_queue_entry *)(((unsigned long)cp >> 5) << 5);
698 bau_tablesp->va_queue_first = pqp; 769 bau_tablesp->va_queue_first = pqp;
770 /*
771 * need the pnode of where the memory was really allocated
772 */
773 pa = uv_gpa(pqp);
774 pn = pa >> uv_nshift;
699 uv_write_global_mmr64(pnode, 775 uv_write_global_mmr64(pnode,
700 UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST, 776 UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST,
701 ((unsigned long)pnode << 777 ((unsigned long)pn << UV_PAYLOADQ_PNODE_SHIFT) |
702 UV_PAYLOADQ_PNODE_SHIFT) |
703 uv_physnodeaddr(pqp)); 778 uv_physnodeaddr(pqp));
704 uv_write_global_mmr64(pnode, UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL, 779 uv_write_global_mmr64(pnode, UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL,
705 uv_physnodeaddr(pqp)); 780 uv_physnodeaddr(pqp));
@@ -715,8 +790,9 @@ uv_payload_queue_init(int node, int pnode, struct bau_control *bau_tablesp)
715/* 790/*
716 * Initialization of each UV blade's structures 791 * Initialization of each UV blade's structures
717 */ 792 */
718static int __init uv_init_blade(int blade, int node, int cur_cpu) 793static int __init uv_init_blade(int blade)
719{ 794{
795 int node;
720 int pnode; 796 int pnode;
721 unsigned long pa; 797 unsigned long pa;
722 unsigned long apicid; 798 unsigned long apicid;
@@ -724,16 +800,17 @@ static int __init uv_init_blade(int blade, int node, int cur_cpu)
724 struct bau_payload_queue_entry *pqp; 800 struct bau_payload_queue_entry *pqp;
725 struct bau_control *bau_tablesp; 801 struct bau_control *bau_tablesp;
726 802
803 node = blade_to_first_node(blade);
727 bau_tablesp = uv_table_bases_init(blade, node); 804 bau_tablesp = uv_table_bases_init(blade, node);
728 pnode = uv_blade_to_pnode(blade); 805 pnode = uv_blade_to_pnode(blade);
729 adp = uv_activation_descriptor_init(node, pnode); 806 adp = uv_activation_descriptor_init(node, pnode);
730 pqp = uv_payload_queue_init(node, pnode, bau_tablesp); 807 pqp = uv_payload_queue_init(node, pnode, bau_tablesp);
731 uv_table_bases_finish(blade, node, cur_cpu, bau_tablesp, adp); 808 uv_table_bases_finish(blade, bau_tablesp, adp);
732 /* 809 /*
733 * the below initialization can't be in firmware because the 810 * the below initialization can't be in firmware because the
734 * messaging IRQ will be determined by the OS 811 * messaging IRQ will be determined by the OS
735 */ 812 */
736 apicid = per_cpu(x86_cpu_to_apicid, cur_cpu); 813 apicid = blade_to_first_apicid(blade);
737 pa = uv_read_global_mmr64(pnode, UVH_BAU_DATA_CONFIG); 814 pa = uv_read_global_mmr64(pnode, UVH_BAU_DATA_CONFIG);
738 if ((pa & 0xff) != UV_BAU_MESSAGE) { 815 if ((pa & 0xff) != UV_BAU_MESSAGE) {
739 uv_write_global_mmr64(pnode, UVH_BAU_DATA_CONFIG, 816 uv_write_global_mmr64(pnode, UVH_BAU_DATA_CONFIG,
@@ -748,9 +825,7 @@ static int __init uv_init_blade(int blade, int node, int cur_cpu)
748static int __init uv_bau_init(void) 825static int __init uv_bau_init(void)
749{ 826{
750 int blade; 827 int blade;
751 int node;
752 int nblades; 828 int nblades;
753 int last_blade;
754 int cur_cpu; 829 int cur_cpu;
755 830
756 if (!is_uv_system()) 831 if (!is_uv_system())
@@ -763,29 +838,21 @@ static int __init uv_bau_init(void)
763 uv_bau_retry_limit = 1; 838 uv_bau_retry_limit = 1;
764 uv_nshift = uv_hub_info->n_val; 839 uv_nshift = uv_hub_info->n_val;
765 uv_mmask = (1UL << uv_hub_info->n_val) - 1; 840 uv_mmask = (1UL << uv_hub_info->n_val) - 1;
766 nblades = 0; 841 nblades = uv_num_possible_blades();
767 last_blade = -1; 842
768 cur_cpu = 0;
769 for_each_online_node(node) {
770 blade = uv_node_to_blade_id(node);
771 if (blade == last_blade)
772 continue;
773 last_blade = blade;
774 nblades++;
775 }
776 uv_bau_table_bases = (struct bau_control **) 843 uv_bau_table_bases = (struct bau_control **)
777 kmalloc(nblades * sizeof(struct bau_control *), GFP_KERNEL); 844 kmalloc(nblades * sizeof(struct bau_control *), GFP_KERNEL);
778 BUG_ON(!uv_bau_table_bases); 845 BUG_ON(!uv_bau_table_bases);
779 846
780 last_blade = -1; 847 uv_partition_base_pnode = 0x7fffffff;
781 for_each_online_node(node) { 848 for (blade = 0; blade < nblades; blade++)
782 blade = uv_node_to_blade_id(node); 849 if (uv_blade_nr_possible_cpus(blade) &&
783 if (blade == last_blade) 850 (uv_blade_to_pnode(blade) < uv_partition_base_pnode))
784 continue; 851 uv_partition_base_pnode = uv_blade_to_pnode(blade);
785 last_blade = blade; 852 for (blade = 0; blade < nblades; blade++)
786 uv_init_blade(blade, node, cur_cpu); 853 if (uv_blade_nr_possible_cpus(blade))
787 cur_cpu += uv_blade_nr_possible_cpus(blade); 854 uv_init_blade(blade);
788 } 855
789 alloc_intr_gate(UV_BAU_MESSAGE, uv_bau_message_intr1); 856 alloc_intr_gate(UV_BAU_MESSAGE, uv_bau_message_intr1);
790 uv_enable_timeouts(); 857 uv_enable_timeouts();
791 858
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 7a567ebe6361..d57de05dc430 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -699,7 +699,7 @@ static struct clocksource clocksource_tsc;
699 * code, which is necessary to support wrapping clocksources like pm 699 * code, which is necessary to support wrapping clocksources like pm
700 * timer. 700 * timer.
701 */ 701 */
702static cycle_t read_tsc(void) 702static cycle_t read_tsc(struct clocksource *cs)
703{ 703{
704 cycle_t ret = (cycle_t)get_cycles(); 704 cycle_t ret = (cycle_t)get_cycles();
705 705
diff --git a/arch/x86/kernel/uv_sysfs.c b/arch/x86/kernel/uv_sysfs.c
index 67f9b9dbf800..36afb98675a4 100644
--- a/arch/x86/kernel/uv_sysfs.c
+++ b/arch/x86/kernel/uv_sysfs.c
@@ -21,6 +21,7 @@
21 21
22#include <linux/sysdev.h> 22#include <linux/sysdev.h>
23#include <asm/uv/bios.h> 23#include <asm/uv/bios.h>
24#include <asm/uv/uv.h>
24 25
25struct kobject *sgi_uv_kobj; 26struct kobject *sgi_uv_kobj;
26 27
@@ -47,6 +48,9 @@ static int __init sgi_uv_sysfs_init(void)
47{ 48{
48 unsigned long ret; 49 unsigned long ret;
49 50
51 if (!is_uv_system())
52 return -ENODEV;
53
50 if (!sgi_uv_kobj) 54 if (!sgi_uv_kobj)
51 sgi_uv_kobj = kobject_create_and_add("sgi_uv", firmware_kobj); 55 sgi_uv_kobj = kobject_create_and_add("sgi_uv", firmware_kobj);
52 if (!sgi_uv_kobj) { 56 if (!sgi_uv_kobj) {
diff --git a/arch/x86/kernel/uv_time.c b/arch/x86/kernel/uv_time.c
index 2ffb6c53326e..583f11d5c480 100644
--- a/arch/x86/kernel/uv_time.c
+++ b/arch/x86/kernel/uv_time.c
@@ -29,7 +29,7 @@
29 29
30#define RTC_NAME "sgi_rtc" 30#define RTC_NAME "sgi_rtc"
31 31
32static cycle_t uv_read_rtc(void); 32static cycle_t uv_read_rtc(struct clocksource *cs);
33static int uv_rtc_next_event(unsigned long, struct clock_event_device *); 33static int uv_rtc_next_event(unsigned long, struct clock_event_device *);
34static void uv_rtc_timer_setup(enum clock_event_mode, 34static void uv_rtc_timer_setup(enum clock_event_mode,
35 struct clock_event_device *); 35 struct clock_event_device *);
@@ -123,7 +123,7 @@ static int uv_setup_intr(int cpu, u64 expires)
123 /* Initialize comparator value */ 123 /* Initialize comparator value */
124 uv_write_global_mmr64(pnode, UVH_INT_CMPB, expires); 124 uv_write_global_mmr64(pnode, UVH_INT_CMPB, expires);
125 125
126 return (expires < uv_read_rtc() && !uv_intr_pending(pnode)); 126 return (expires < uv_read_rtc(NULL) && !uv_intr_pending(pnode));
127} 127}
128 128
129/* 129/*
@@ -256,7 +256,7 @@ static int uv_rtc_unset_timer(int cpu)
256 256
257 spin_lock_irqsave(&head->lock, flags); 257 spin_lock_irqsave(&head->lock, flags);
258 258
259 if (head->next_cpu == bcpu && uv_read_rtc() >= *t) 259 if (head->next_cpu == bcpu && uv_read_rtc(NULL) >= *t)
260 rc = 1; 260 rc = 1;
261 261
262 *t = ULLONG_MAX; 262 *t = ULLONG_MAX;
@@ -278,7 +278,7 @@ static int uv_rtc_unset_timer(int cpu)
278/* 278/*
279 * Read the RTC. 279 * Read the RTC.
280 */ 280 */
281static cycle_t uv_read_rtc(void) 281static cycle_t uv_read_rtc(struct clocksource *cs)
282{ 282{
283 return (cycle_t)uv_read_local_mmr(UVH_RTC); 283 return (cycle_t)uv_read_local_mmr(UVH_RTC);
284} 284}
@@ -291,7 +291,7 @@ static int uv_rtc_next_event(unsigned long delta,
291{ 291{
292 int ced_cpu = cpumask_first(ced->cpumask); 292 int ced_cpu = cpumask_first(ced->cpumask);
293 293
294 return uv_rtc_set_timer(ced_cpu, delta + uv_read_rtc()); 294 return uv_rtc_set_timer(ced_cpu, delta + uv_read_rtc(NULL));
295} 295}
296 296
297/* 297/*
diff --git a/arch/x86/kernel/vmiclock_32.c b/arch/x86/kernel/vmiclock_32.c
index d303369a7bad..2b3eb82efeeb 100644
--- a/arch/x86/kernel/vmiclock_32.c
+++ b/arch/x86/kernel/vmiclock_32.c
@@ -283,7 +283,7 @@ void __devinit vmi_time_ap_init(void)
283/** vmi clocksource */ 283/** vmi clocksource */
284static struct clocksource clocksource_vmi; 284static struct clocksource clocksource_vmi;
285 285
286static cycle_t read_real_cycles(void) 286static cycle_t read_real_cycles(struct clocksource *cs)
287{ 287{
288 cycle_t ret = (cycle_t)vmi_timer_ops.get_cycle_counter(VMI_CYCLES_REAL); 288 cycle_t ret = (cycle_t)vmi_timer_ops.get_cycle_counter(VMI_CYCLES_REAL);
289 return max(ret, clocksource_vmi.cycle_last); 289 return max(ret, clocksource_vmi.cycle_last);
diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
index 2b54fe002e94..c5ee17e8c6d9 100644
--- a/arch/x86/kernel/xsave.c
+++ b/arch/x86/kernel/xsave.c
@@ -89,7 +89,7 @@ int save_i387_xstate(void __user *buf)
89 89
90 if (!used_math()) 90 if (!used_math())
91 return 0; 91 return 0;
92 clear_used_math(); /* trigger finit */ 92
93 if (task_thread_info(tsk)->status & TS_USEDFPU) { 93 if (task_thread_info(tsk)->status & TS_USEDFPU) {
94 /* 94 /*
95 * Start with clearing the user buffer. This will present a 95 * Start with clearing the user buffer. This will present a
@@ -114,6 +114,8 @@ int save_i387_xstate(void __user *buf)
114 return -1; 114 return -1;
115 } 115 }
116 116
117 clear_used_math(); /* trigger finit */
118
117 if (task_thread_info(tsk)->status & TS_XSAVE) { 119 if (task_thread_info(tsk)->status & TS_XSAVE) {
118 struct _fpstate __user *fx = buf; 120 struct _fpstate __user *fx = buf;
119 struct _xstate __user *x = buf; 121 struct _xstate __user *x = buf;
@@ -324,7 +326,7 @@ void __ref xsave_cntxt_init(void)
324 } 326 }
325 327
326 /* 328 /*
327 * for now OS knows only about FP/SSE 329 * Support only the state known to OS.
328 */ 330 */
329 pcntxt_mask = pcntxt_mask & XCNTXT_MASK; 331 pcntxt_mask = pcntxt_mask & XCNTXT_MASK;
330 xsave_init(); 332 xsave_init();
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 2a36f7f7c4c7..b6caf1329b1b 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -1248,7 +1248,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
1248 pgprintk("%s: adding gfn %lx role %x\n", __func__, gfn, role.word); 1248 pgprintk("%s: adding gfn %lx role %x\n", __func__, gfn, role.word);
1249 sp->gfn = gfn; 1249 sp->gfn = gfn;
1250 sp->role = role; 1250 sp->role = role;
1251 sp->global = role.cr4_pge; 1251 sp->global = 0;
1252 hlist_add_head(&sp->hash_link, bucket); 1252 hlist_add_head(&sp->hash_link, bucket);
1253 if (!direct) { 1253 if (!direct) {
1254 if (rmap_write_protect(vcpu->kvm, gfn)) 1254 if (rmap_write_protect(vcpu->kvm, gfn))
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 1821c2078199..1f8510c51d6e 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -411,7 +411,6 @@ static __init int svm_hardware_setup(void)
411 411
412 iopm_va = page_address(iopm_pages); 412 iopm_va = page_address(iopm_pages);
413 memset(iopm_va, 0xff, PAGE_SIZE * (1 << IOPM_ALLOC_ORDER)); 413 memset(iopm_va, 0xff, PAGE_SIZE * (1 << IOPM_ALLOC_ORDER));
414 clear_bit(0x80, iopm_va); /* allow direct access to PC debug port */
415 iopm_base = page_to_pfn(iopm_pages) << PAGE_SHIFT; 414 iopm_base = page_to_pfn(iopm_pages) << PAGE_SHIFT;
416 415
417 if (boot_cpu_has(X86_FEATURE_NX)) 416 if (boot_cpu_has(X86_FEATURE_NX))
@@ -796,6 +795,11 @@ static void svm_get_segment(struct kvm_vcpu *vcpu,
796 var->db = (s->attrib >> SVM_SELECTOR_DB_SHIFT) & 1; 795 var->db = (s->attrib >> SVM_SELECTOR_DB_SHIFT) & 1;
797 var->g = (s->attrib >> SVM_SELECTOR_G_SHIFT) & 1; 796 var->g = (s->attrib >> SVM_SELECTOR_G_SHIFT) & 1;
798 797
798 /* AMD's VMCB does not have an explicit unusable field, so emulate it
799 * for cross vendor migration purposes by "not present"
800 */
801 var->unusable = !var->present || (var->type == 0);
802
799 switch (seg) { 803 switch (seg) {
800 case VCPU_SREG_CS: 804 case VCPU_SREG_CS:
801 /* 805 /*
@@ -827,8 +831,6 @@ static void svm_get_segment(struct kvm_vcpu *vcpu,
827 var->type |= 0x1; 831 var->type |= 0x1;
828 break; 832 break;
829 } 833 }
830
831 var->unusable = !var->present;
832} 834}
833 835
834static int svm_get_cpl(struct kvm_vcpu *vcpu) 836static int svm_get_cpl(struct kvm_vcpu *vcpu)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 8ca100a9ecac..49079a46687b 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1121,9 +1121,9 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
1121 1121
1122static int is_efer_nx(void) 1122static int is_efer_nx(void)
1123{ 1123{
1124 u64 efer; 1124 unsigned long long efer = 0;
1125 1125
1126 rdmsrl(MSR_EFER, efer); 1126 rdmsrl_safe(MSR_EFER, &efer);
1127 return efer & EFER_NX; 1127 return efer & EFER_NX;
1128} 1128}
1129 1129
@@ -1259,7 +1259,7 @@ static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
1259 bit(X86_FEATURE_CMOV) | bit(X86_FEATURE_PSE36) | 1259 bit(X86_FEATURE_CMOV) | bit(X86_FEATURE_PSE36) |
1260 bit(X86_FEATURE_MMX) | bit(X86_FEATURE_FXSR) | 1260 bit(X86_FEATURE_MMX) | bit(X86_FEATURE_FXSR) |
1261 bit(X86_FEATURE_SYSCALL) | 1261 bit(X86_FEATURE_SYSCALL) |
1262 (bit(X86_FEATURE_NX) && is_efer_nx()) | 1262 (is_efer_nx() ? bit(X86_FEATURE_NX) : 0) |
1263#ifdef CONFIG_X86_64 1263#ifdef CONFIG_X86_64
1264 bit(X86_FEATURE_LM) | 1264 bit(X86_FEATURE_LM) |
1265#endif 1265#endif
@@ -2775,6 +2775,9 @@ out:
2775 2775
2776void kvm_arch_exit(void) 2776void kvm_arch_exit(void)
2777{ 2777{
2778 if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
2779 cpufreq_unregister_notifier(&kvmclock_cpufreq_notifier_block,
2780 CPUFREQ_TRANSITION_NOTIFIER);
2778 kvm_x86_ops = NULL; 2781 kvm_x86_ops = NULL;
2779 kvm_mmu_module_exit(); 2782 kvm_mmu_module_exit();
2780} 2783}
@@ -4159,6 +4162,11 @@ EXPORT_SYMBOL_GPL(kvm_put_guest_fpu);
4159 4162
4160void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu) 4163void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
4161{ 4164{
4165 if (vcpu->arch.time_page) {
4166 kvm_release_page_dirty(vcpu->arch.time_page);
4167 vcpu->arch.time_page = NULL;
4168 }
4169
4162 kvm_x86_ops->vcpu_free(vcpu); 4170 kvm_x86_ops->vcpu_free(vcpu);
4163} 4171}
4164 4172
diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
index e94a11e42f98..ca7ec44bafc3 100644
--- a/arch/x86/lguest/boot.c
+++ b/arch/x86/lguest/boot.c
@@ -273,15 +273,15 @@ static void lguest_load_idt(const struct desc_ptr *desc)
273 * controls the entire thing and the Guest asks it to make changes using the 273 * controls the entire thing and the Guest asks it to make changes using the
274 * LOAD_GDT hypercall. 274 * LOAD_GDT hypercall.
275 * 275 *
276 * This is the opposite of the IDT code where we have a LOAD_IDT_ENTRY 276 * This is the exactly like the IDT code.
277 * hypercall and use that repeatedly to load a new IDT. I don't think it
278 * really matters, but wouldn't it be nice if they were the same? Wouldn't
279 * it be even better if you were the one to send the patch to fix it?
280 */ 277 */
281static void lguest_load_gdt(const struct desc_ptr *desc) 278static void lguest_load_gdt(const struct desc_ptr *desc)
282{ 279{
283 BUG_ON((desc->size + 1) / 8 != GDT_ENTRIES); 280 unsigned int i;
284 kvm_hypercall2(LHCALL_LOAD_GDT, __pa(desc->address), GDT_ENTRIES); 281 struct desc_struct *gdt = (void *)desc->address;
282
283 for (i = 0; i < (desc->size+1)/8; i++)
284 kvm_hypercall3(LHCALL_LOAD_GDT_ENTRY, i, gdt[i].a, gdt[i].b);
285} 285}
286 286
287/* For a single GDT entry which changes, we do the lazy thing: alter our GDT, 287/* For a single GDT entry which changes, we do the lazy thing: alter our GDT,
@@ -291,7 +291,9 @@ static void lguest_write_gdt_entry(struct desc_struct *dt, int entrynum,
291 const void *desc, int type) 291 const void *desc, int type)
292{ 292{
293 native_write_gdt_entry(dt, entrynum, desc, type); 293 native_write_gdt_entry(dt, entrynum, desc, type);
294 kvm_hypercall2(LHCALL_LOAD_GDT, __pa(dt), GDT_ENTRIES); 294 /* Tell Host about this new entry. */
295 kvm_hypercall3(LHCALL_LOAD_GDT_ENTRY, entrynum,
296 dt[entrynum].a, dt[entrynum].b);
295} 297}
296 298
297/* OK, I lied. There are three "thread local storage" GDT entries which change 299/* OK, I lied. There are three "thread local storage" GDT entries which change
@@ -661,7 +663,7 @@ static unsigned long lguest_tsc_khz(void)
661 663
662/* If we can't use the TSC, the kernel falls back to our lower-priority 664/* If we can't use the TSC, the kernel falls back to our lower-priority
663 * "lguest_clock", where we read the time value given to us by the Host. */ 665 * "lguest_clock", where we read the time value given to us by the Host. */
664static cycle_t lguest_clock_read(void) 666static cycle_t lguest_clock_read(struct clocksource *cs)
665{ 667{
666 unsigned long sec, nsec; 668 unsigned long sec, nsec;
667 669
diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
index be54176e9eb2..6340cef6798a 100644
--- a/arch/x86/mm/gup.c
+++ b/arch/x86/mm/gup.c
@@ -219,6 +219,22 @@ static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end,
219 return 1; 219 return 1;
220} 220}
221 221
222/**
223 * get_user_pages_fast() - pin user pages in memory
224 * @start: starting user address
225 * @nr_pages: number of pages from start to pin
226 * @write: whether pages will be written to
227 * @pages: array that receives pointers to the pages pinned.
228 * Should be at least nr_pages long.
229 *
230 * Attempt to pin user pages in memory without taking mm->mmap_sem.
231 * If not successful, it will fall back to taking the lock and
232 * calling get_user_pages().
233 *
234 * Returns number of pages pinned. This may be fewer than the number
235 * requested. If nr_pages is 0 or negative, returns 0. If no pages
236 * were pinned, returns -errno.
237 */
222int get_user_pages_fast(unsigned long start, int nr_pages, int write, 238int get_user_pages_fast(unsigned long start, int nr_pages, int write,
223 struct page **pages) 239 struct page **pages)
224{ 240{
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index fd3da1dda1c9..ae4f7b5d7104 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -7,6 +7,7 @@
7#include <asm/page.h> 7#include <asm/page.h>
8#include <asm/page_types.h> 8#include <asm/page_types.h>
9#include <asm/sections.h> 9#include <asm/sections.h>
10#include <asm/setup.h>
10#include <asm/system.h> 11#include <asm/system.h>
11#include <asm/tlbflush.h> 12#include <asm/tlbflush.h>
12 13
@@ -304,8 +305,23 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
304#endif 305#endif
305 306
306#ifdef CONFIG_X86_64 307#ifdef CONFIG_X86_64
307 if (!after_bootmem) 308 if (!after_bootmem && !start) {
309 pud_t *pud;
310 pmd_t *pmd;
311
308 mmu_cr4_features = read_cr4(); 312 mmu_cr4_features = read_cr4();
313
314 /*
315 * _brk_end cannot change anymore, but it and _end may be
316 * located on different 2M pages. cleanup_highmap(), however,
317 * can only consider _end when it runs, so destroy any
318 * mappings beyond _brk_end here.
319 */
320 pud = pud_offset(pgd_offset_k(_brk_end), _brk_end);
321 pmd = pmd_offset(pud, _brk_end - 1);
322 while (++pmd <= pmd_offset(pud, (unsigned long)_end - 1))
323 pmd_clear(pmd);
324 }
309#endif 325#endif
310 __flush_tlb_all(); 326 __flush_tlb_all();
311 327
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
index 0dfa09d69e80..8a450930834f 100644
--- a/arch/x86/mm/ioremap.c
+++ b/arch/x86/mm/ioremap.c
@@ -280,15 +280,16 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
280 return NULL; 280 return NULL;
281 area->phys_addr = phys_addr; 281 area->phys_addr = phys_addr;
282 vaddr = (unsigned long) area->addr; 282 vaddr = (unsigned long) area->addr;
283 if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot)) { 283
284 if (kernel_map_sync_memtype(phys_addr, size, prot_val)) {
284 free_memtype(phys_addr, phys_addr + size); 285 free_memtype(phys_addr, phys_addr + size);
285 free_vm_area(area); 286 free_vm_area(area);
286 return NULL; 287 return NULL;
287 } 288 }
288 289
289 if (ioremap_change_attr(vaddr, size, prot_val) < 0) { 290 if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot)) {
290 free_memtype(phys_addr, phys_addr + size); 291 free_memtype(phys_addr, phys_addr + size);
291 vunmap(area->addr); 292 free_vm_area(area);
292 return NULL; 293 return NULL;
293 } 294 }
294 295
@@ -374,7 +375,8 @@ static void __iomem *ioremap_default(resource_size_t phys_addr,
374 * - UC_MINUS for non-WB-able memory with no other conflicting mappings 375 * - UC_MINUS for non-WB-able memory with no other conflicting mappings
375 * - Inherit from confliting mappings otherwise 376 * - Inherit from confliting mappings otherwise
376 */ 377 */
377 err = reserve_memtype(phys_addr, phys_addr + size, -1, &flags); 378 err = reserve_memtype(phys_addr, phys_addr + size,
379 _PAGE_CACHE_WB, &flags);
378 if (err < 0) 380 if (err < 0)
379 return NULL; 381 return NULL;
380 382
@@ -547,7 +549,7 @@ void __init early_ioremap_reset(void)
547} 549}
548 550
549static void __init __early_set_fixmap(enum fixed_addresses idx, 551static void __init __early_set_fixmap(enum fixed_addresses idx,
550 unsigned long phys, pgprot_t flags) 552 phys_addr_t phys, pgprot_t flags)
551{ 553{
552 unsigned long addr = __fix_to_virt(idx); 554 unsigned long addr = __fix_to_virt(idx);
553 pte_t *pte; 555 pte_t *pte;
@@ -566,7 +568,7 @@ static void __init __early_set_fixmap(enum fixed_addresses idx,
566} 568}
567 569
568static inline void __init early_set_fixmap(enum fixed_addresses idx, 570static inline void __init early_set_fixmap(enum fixed_addresses idx,
569 unsigned long phys, pgprot_t prot) 571 phys_addr_t phys, pgprot_t prot)
570{ 572{
571 if (after_paging_init) 573 if (after_paging_init)
572 __set_fixmap(idx, phys, prot); 574 __set_fixmap(idx, phys, prot);
@@ -607,9 +609,10 @@ static int __init check_early_ioremap_leak(void)
607late_initcall(check_early_ioremap_leak); 609late_initcall(check_early_ioremap_leak);
608 610
609static void __init __iomem * 611static void __init __iomem *
610__early_ioremap(unsigned long phys_addr, unsigned long size, pgprot_t prot) 612__early_ioremap(resource_size_t phys_addr, unsigned long size, pgprot_t prot)
611{ 613{
612 unsigned long offset, last_addr; 614 unsigned long offset;
615 resource_size_t last_addr;
613 unsigned int nrpages; 616 unsigned int nrpages;
614 enum fixed_addresses idx0, idx; 617 enum fixed_addresses idx0, idx;
615 int i, slot; 618 int i, slot;
@@ -625,15 +628,15 @@ __early_ioremap(unsigned long phys_addr, unsigned long size, pgprot_t prot)
625 } 628 }
626 629
627 if (slot < 0) { 630 if (slot < 0) {
628 printk(KERN_INFO "early_iomap(%08lx, %08lx) not found slot\n", 631 printk(KERN_INFO "early_iomap(%08llx, %08lx) not found slot\n",
629 phys_addr, size); 632 (u64)phys_addr, size);
630 WARN_ON(1); 633 WARN_ON(1);
631 return NULL; 634 return NULL;
632 } 635 }
633 636
634 if (early_ioremap_debug) { 637 if (early_ioremap_debug) {
635 printk(KERN_INFO "early_ioremap(%08lx, %08lx) [%d] => ", 638 printk(KERN_INFO "early_ioremap(%08llx, %08lx) [%d] => ",
636 phys_addr, size, slot); 639 (u64)phys_addr, size, slot);
637 dump_stack(); 640 dump_stack();
638 } 641 }
639 642
@@ -680,13 +683,15 @@ __early_ioremap(unsigned long phys_addr, unsigned long size, pgprot_t prot)
680} 683}
681 684
682/* Remap an IO device */ 685/* Remap an IO device */
683void __init __iomem *early_ioremap(unsigned long phys_addr, unsigned long size) 686void __init __iomem *
687early_ioremap(resource_size_t phys_addr, unsigned long size)
684{ 688{
685 return __early_ioremap(phys_addr, size, PAGE_KERNEL_IO); 689 return __early_ioremap(phys_addr, size, PAGE_KERNEL_IO);
686} 690}
687 691
688/* Remap memory */ 692/* Remap memory */
689void __init __iomem *early_memremap(unsigned long phys_addr, unsigned long size) 693void __init __iomem *
694early_memremap(resource_size_t phys_addr, unsigned long size)
690{ 695{
691 return __early_ioremap(phys_addr, size, PAGE_KERNEL); 696 return __early_ioremap(phys_addr, size, PAGE_KERNEL);
692} 697}
diff --git a/arch/x86/mm/kmmio.c b/arch/x86/mm/kmmio.c
index 4f115e00486b..50dc802a1c46 100644
--- a/arch/x86/mm/kmmio.c
+++ b/arch/x86/mm/kmmio.c
@@ -87,7 +87,7 @@ static struct kmmio_probe *get_kmmio_probe(unsigned long addr)
87{ 87{
88 struct kmmio_probe *p; 88 struct kmmio_probe *p;
89 list_for_each_entry_rcu(p, &kmmio_probes, list) { 89 list_for_each_entry_rcu(p, &kmmio_probes, list) {
90 if (addr >= p->addr && addr <= (p->addr + p->len)) 90 if (addr >= p->addr && addr < (p->addr + p->len))
91 return p; 91 return p;
92 } 92 }
93 return NULL; 93 return NULL;
diff --git a/arch/x86/mm/numa_32.c b/arch/x86/mm/numa_32.c
index 3daefa04ace5..d2530062fe00 100644
--- a/arch/x86/mm/numa_32.c
+++ b/arch/x86/mm/numa_32.c
@@ -257,7 +257,7 @@ void resume_map_numa_kva(pgd_t *pgd_base)
257} 257}
258#endif 258#endif
259 259
260static unsigned long calculate_numa_remap_pages(void) 260static __init unsigned long calculate_numa_remap_pages(void)
261{ 261{
262 int nid; 262 int nid;
263 unsigned long size, reserve_pages = 0; 263 unsigned long size, reserve_pages = 0;
diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c
index d73aaa892371..2d05a12029dc 100644
--- a/arch/x86/mm/numa_64.c
+++ b/arch/x86/mm/numa_64.c
@@ -188,6 +188,9 @@ void __init setup_node_bootmem(int nodeid, unsigned long start,
188 const int pgdat_size = roundup(sizeof(pg_data_t), PAGE_SIZE); 188 const int pgdat_size = roundup(sizeof(pg_data_t), PAGE_SIZE);
189 int nid; 189 int nid;
190 190
191 if (!end)
192 return;
193
191 start = roundup(start, ZONE_ALIGN); 194 start = roundup(start, ZONE_ALIGN);
192 195
193 printk(KERN_INFO "Bootmem setup node %d %016lx-%016lx\n", nodeid, 196 printk(KERN_INFO "Bootmem setup node %d %016lx-%016lx\n", nodeid,
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index d71e1b636ce6..0f9052bcec4b 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -153,7 +153,7 @@ static void __cpa_flush_all(void *arg)
153 */ 153 */
154 __flush_tlb_all(); 154 __flush_tlb_all();
155 155
156 if (cache && boot_cpu_data.x86_model >= 4) 156 if (cache && boot_cpu_data.x86 >= 4)
157 wbinvd(); 157 wbinvd();
158} 158}
159 159
@@ -204,6 +204,11 @@ static void cpa_flush_range(unsigned long start, int numpages, int cache)
204 } 204 }
205} 205}
206 206
207static void wbinvd_local(void *unused)
208{
209 wbinvd();
210}
211
207static void cpa_flush_array(unsigned long *start, int numpages, int cache, 212static void cpa_flush_array(unsigned long *start, int numpages, int cache,
208 int in_flags, struct page **pages) 213 int in_flags, struct page **pages)
209{ 214{
@@ -218,8 +223,9 @@ static void cpa_flush_array(unsigned long *start, int numpages, int cache,
218 223
219 /* 4M threshold */ 224 /* 4M threshold */
220 if (numpages >= 1024) { 225 if (numpages >= 1024) {
221 if (boot_cpu_data.x86_model >= 4) 226 if (boot_cpu_data.x86 >= 4)
222 wbinvd(); 227 on_each_cpu(wbinvd_local, NULL, 1);
228
223 return; 229 return;
224 } 230 }
225 /* 231 /*
@@ -945,71 +951,94 @@ int _set_memory_uc(unsigned long addr, int numpages)
945 951
946int set_memory_uc(unsigned long addr, int numpages) 952int set_memory_uc(unsigned long addr, int numpages)
947{ 953{
954 int ret;
955
948 /* 956 /*
949 * for now UC MINUS. see comments in ioremap_nocache() 957 * for now UC MINUS. see comments in ioremap_nocache()
950 */ 958 */
951 if (reserve_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE, 959 ret = reserve_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE,
952 _PAGE_CACHE_UC_MINUS, NULL)) 960 _PAGE_CACHE_UC_MINUS, NULL);
953 return -EINVAL; 961 if (ret)
962 goto out_err;
963
964 ret = _set_memory_uc(addr, numpages);
965 if (ret)
966 goto out_free;
954 967
955 return _set_memory_uc(addr, numpages); 968 return 0;
969
970out_free:
971 free_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE);
972out_err:
973 return ret;
956} 974}
957EXPORT_SYMBOL(set_memory_uc); 975EXPORT_SYMBOL(set_memory_uc);
958 976
959int set_memory_array_uc(unsigned long *addr, int addrinarray) 977int set_memory_array_uc(unsigned long *addr, int addrinarray)
960{ 978{
961 unsigned long start; 979 int i, j;
962 unsigned long end; 980 int ret;
963 int i; 981
964 /* 982 /*
965 * for now UC MINUS. see comments in ioremap_nocache() 983 * for now UC MINUS. see comments in ioremap_nocache()
966 */ 984 */
967 for (i = 0; i < addrinarray; i++) { 985 for (i = 0; i < addrinarray; i++) {
968 start = __pa(addr[i]); 986 ret = reserve_memtype(__pa(addr[i]), __pa(addr[i]) + PAGE_SIZE,
969 for (end = start + PAGE_SIZE; i < addrinarray - 1; end += PAGE_SIZE) { 987 _PAGE_CACHE_UC_MINUS, NULL);
970 if (end != __pa(addr[i + 1])) 988 if (ret)
971 break; 989 goto out_free;
972 i++;
973 }
974 if (reserve_memtype(start, end, _PAGE_CACHE_UC_MINUS, NULL))
975 goto out;
976 } 990 }
977 991
978 return change_page_attr_set(addr, addrinarray, 992 ret = change_page_attr_set(addr, addrinarray,
979 __pgprot(_PAGE_CACHE_UC_MINUS), 1); 993 __pgprot(_PAGE_CACHE_UC_MINUS), 1);
980out: 994 if (ret)
981 for (i = 0; i < addrinarray; i++) { 995 goto out_free;
982 unsigned long tmp = __pa(addr[i]); 996
983 997 return 0;
984 if (tmp == start) 998
985 break; 999out_free:
986 for (end = tmp + PAGE_SIZE; i < addrinarray - 1; end += PAGE_SIZE) { 1000 for (j = 0; j < i; j++)
987 if (end != __pa(addr[i + 1])) 1001 free_memtype(__pa(addr[j]), __pa(addr[j]) + PAGE_SIZE);
988 break; 1002
989 i++; 1003 return ret;
990 }
991 free_memtype(tmp, end);
992 }
993 return -EINVAL;
994} 1004}
995EXPORT_SYMBOL(set_memory_array_uc); 1005EXPORT_SYMBOL(set_memory_array_uc);
996 1006
997int _set_memory_wc(unsigned long addr, int numpages) 1007int _set_memory_wc(unsigned long addr, int numpages)
998{ 1008{
999 return change_page_attr_set(&addr, numpages, 1009 int ret;
1010 ret = change_page_attr_set(&addr, numpages,
1011 __pgprot(_PAGE_CACHE_UC_MINUS), 0);
1012
1013 if (!ret) {
1014 ret = change_page_attr_set(&addr, numpages,
1000 __pgprot(_PAGE_CACHE_WC), 0); 1015 __pgprot(_PAGE_CACHE_WC), 0);
1016 }
1017 return ret;
1001} 1018}
1002 1019
1003int set_memory_wc(unsigned long addr, int numpages) 1020int set_memory_wc(unsigned long addr, int numpages)
1004{ 1021{
1022 int ret;
1023
1005 if (!pat_enabled) 1024 if (!pat_enabled)
1006 return set_memory_uc(addr, numpages); 1025 return set_memory_uc(addr, numpages);
1007 1026
1008 if (reserve_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE, 1027 ret = reserve_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE,
1009 _PAGE_CACHE_WC, NULL)) 1028 _PAGE_CACHE_WC, NULL);
1010 return -EINVAL; 1029 if (ret)
1030 goto out_err;
1011 1031
1012 return _set_memory_wc(addr, numpages); 1032 ret = _set_memory_wc(addr, numpages);
1033 if (ret)
1034 goto out_free;
1035
1036 return 0;
1037
1038out_free:
1039 free_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE);
1040out_err:
1041 return ret;
1013} 1042}
1014EXPORT_SYMBOL(set_memory_wc); 1043EXPORT_SYMBOL(set_memory_wc);
1015 1044
@@ -1021,29 +1050,31 @@ int _set_memory_wb(unsigned long addr, int numpages)
1021 1050
1022int set_memory_wb(unsigned long addr, int numpages) 1051int set_memory_wb(unsigned long addr, int numpages)
1023{ 1052{
1024 free_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE); 1053 int ret;
1054
1055 ret = _set_memory_wb(addr, numpages);
1056 if (ret)
1057 return ret;
1025 1058
1026 return _set_memory_wb(addr, numpages); 1059 free_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE);
1060 return 0;
1027} 1061}
1028EXPORT_SYMBOL(set_memory_wb); 1062EXPORT_SYMBOL(set_memory_wb);
1029 1063
1030int set_memory_array_wb(unsigned long *addr, int addrinarray) 1064int set_memory_array_wb(unsigned long *addr, int addrinarray)
1031{ 1065{
1032 int i; 1066 int i;
1067 int ret;
1033 1068
1034 for (i = 0; i < addrinarray; i++) { 1069 ret = change_page_attr_clear(addr, addrinarray,
1035 unsigned long start = __pa(addr[i]);
1036 unsigned long end;
1037
1038 for (end = start + PAGE_SIZE; i < addrinarray - 1; end += PAGE_SIZE) {
1039 if (end != __pa(addr[i + 1]))
1040 break;
1041 i++;
1042 }
1043 free_memtype(start, end);
1044 }
1045 return change_page_attr_clear(addr, addrinarray,
1046 __pgprot(_PAGE_CACHE_MASK), 1); 1070 __pgprot(_PAGE_CACHE_MASK), 1);
1071 if (ret)
1072 return ret;
1073
1074 for (i = 0; i < addrinarray; i++)
1075 free_memtype(__pa(addr[i]), __pa(addr[i]) + PAGE_SIZE);
1076
1077 return 0;
1047} 1078}
1048EXPORT_SYMBOL(set_memory_array_wb); 1079EXPORT_SYMBOL(set_memory_array_wb);
1049 1080
@@ -1136,6 +1167,8 @@ int set_pages_array_wb(struct page **pages, int addrinarray)
1136 1167
1137 retval = cpa_clear_pages_array(pages, addrinarray, 1168 retval = cpa_clear_pages_array(pages, addrinarray,
1138 __pgprot(_PAGE_CACHE_MASK)); 1169 __pgprot(_PAGE_CACHE_MASK));
1170 if (retval)
1171 return retval;
1139 1172
1140 for (i = 0; i < addrinarray; i++) { 1173 for (i = 0; i < addrinarray; i++) {
1141 start = (unsigned long)page_address(pages[i]); 1174 start = (unsigned long)page_address(pages[i]);
@@ -1143,7 +1176,7 @@ int set_pages_array_wb(struct page **pages, int addrinarray)
1143 free_memtype(start, end); 1176 free_memtype(start, end);
1144 } 1177 }
1145 1178
1146 return retval; 1179 return 0;
1147} 1180}
1148EXPORT_SYMBOL(set_pages_array_wb); 1181EXPORT_SYMBOL(set_pages_array_wb);
1149 1182
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index 640339ee4fb2..e6718bb28065 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -31,7 +31,7 @@
31#ifdef CONFIG_X86_PAT 31#ifdef CONFIG_X86_PAT
32int __read_mostly pat_enabled = 1; 32int __read_mostly pat_enabled = 1;
33 33
34void __cpuinit pat_disable(const char *reason) 34static inline void pat_disable(const char *reason)
35{ 35{
36 pat_enabled = 0; 36 pat_enabled = 0;
37 printk(KERN_INFO "%s\n", reason); 37 printk(KERN_INFO "%s\n", reason);
@@ -182,10 +182,10 @@ static unsigned long pat_x_mtrr_type(u64 start, u64 end, unsigned long req_type)
182 u8 mtrr_type; 182 u8 mtrr_type;
183 183
184 mtrr_type = mtrr_type_lookup(start, end); 184 mtrr_type = mtrr_type_lookup(start, end);
185 if (mtrr_type == MTRR_TYPE_UNCACHABLE) 185 if (mtrr_type != MTRR_TYPE_WRBACK)
186 return _PAGE_CACHE_UC; 186 return _PAGE_CACHE_UC_MINUS;
187 if (mtrr_type == MTRR_TYPE_WRCOMB) 187
188 return _PAGE_CACHE_WC; 188 return _PAGE_CACHE_WB;
189 } 189 }
190 190
191 return req_type; 191 return req_type;
@@ -352,23 +352,13 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
352 return 0; 352 return 0;
353 } 353 }
354 354
355 if (req_type == -1) { 355 /*
356 /* 356 * Call mtrr_lookup to get the type hint. This is an
357 * Call mtrr_lookup to get the type hint. This is an 357 * optimization for /dev/mem mmap'ers into WB memory (BIOS
358 * optimization for /dev/mem mmap'ers into WB memory (BIOS 358 * tools and ACPI tools). Use WB request for WB memory and use
359 * tools and ACPI tools). Use WB request for WB memory and use 359 * UC_MINUS otherwise.
360 * UC_MINUS otherwise. 360 */
361 */ 361 actual_type = pat_x_mtrr_type(start, end, req_type & _PAGE_CACHE_MASK);
362 u8 mtrr_type = mtrr_type_lookup(start, end);
363
364 if (mtrr_type == MTRR_TYPE_WRBACK)
365 actual_type = _PAGE_CACHE_WB;
366 else
367 actual_type = _PAGE_CACHE_UC_MINUS;
368 } else {
369 actual_type = pat_x_mtrr_type(start, end,
370 req_type & _PAGE_CACHE_MASK);
371 }
372 362
373 if (new_type) 363 if (new_type)
374 *new_type = actual_type; 364 *new_type = actual_type;
@@ -546,9 +536,7 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
546int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn, 536int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
547 unsigned long size, pgprot_t *vma_prot) 537 unsigned long size, pgprot_t *vma_prot)
548{ 538{
549 u64 offset = ((u64) pfn) << PAGE_SHIFT; 539 unsigned long flags = _PAGE_CACHE_WB;
550 unsigned long flags = -1;
551 int retval;
552 540
553 if (!range_is_allowed(pfn, size)) 541 if (!range_is_allowed(pfn, size))
554 return 0; 542 return 0;
@@ -576,64 +564,11 @@ int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
576 } 564 }
577#endif 565#endif
578 566
579 /*
580 * With O_SYNC, we can only take UC_MINUS mapping. Fail if we cannot.
581 *
582 * Without O_SYNC, we want to get
583 * - WB for WB-able memory and no other conflicting mappings
584 * - UC_MINUS for non-WB-able memory with no other conflicting mappings
585 * - Inherit from confliting mappings otherwise
586 */
587 if (flags != -1) {
588 retval = reserve_memtype(offset, offset + size, flags, NULL);
589 } else {
590 retval = reserve_memtype(offset, offset + size, -1, &flags);
591 }
592
593 if (retval < 0)
594 return 0;
595
596 if (((pfn < max_low_pfn_mapped) ||
597 (pfn >= (1UL<<(32 - PAGE_SHIFT)) && pfn < max_pfn_mapped)) &&
598 ioremap_change_attr((unsigned long)__va(offset), size, flags) < 0) {
599 free_memtype(offset, offset + size);
600 printk(KERN_INFO
601 "%s:%d /dev/mem ioremap_change_attr failed %s for %Lx-%Lx\n",
602 current->comm, current->pid,
603 cattr_name(flags),
604 offset, (unsigned long long)(offset + size));
605 return 0;
606 }
607
608 *vma_prot = __pgprot((pgprot_val(*vma_prot) & ~_PAGE_CACHE_MASK) | 567 *vma_prot = __pgprot((pgprot_val(*vma_prot) & ~_PAGE_CACHE_MASK) |
609 flags); 568 flags);
610 return 1; 569 return 1;
611} 570}
612 571
613void map_devmem(unsigned long pfn, unsigned long size, pgprot_t vma_prot)
614{
615 unsigned long want_flags = (pgprot_val(vma_prot) & _PAGE_CACHE_MASK);
616 u64 addr = (u64)pfn << PAGE_SHIFT;
617 unsigned long flags;
618
619 reserve_memtype(addr, addr + size, want_flags, &flags);
620 if (flags != want_flags) {
621 printk(KERN_INFO
622 "%s:%d /dev/mem expected mapping type %s for %Lx-%Lx, got %s\n",
623 current->comm, current->pid,
624 cattr_name(want_flags),
625 addr, (unsigned long long)(addr + size),
626 cattr_name(flags));
627 }
628}
629
630void unmap_devmem(unsigned long pfn, unsigned long size, pgprot_t vma_prot)
631{
632 u64 addr = (u64)pfn << PAGE_SHIFT;
633
634 free_memtype(addr, addr + size);
635}
636
637/* 572/*
638 * Change the memory type for the physial address range in kernel identity 573 * Change the memory type for the physial address range in kernel identity
639 * mapping space if that range is a part of identity map. 574 * mapping space if that range is a part of identity map.
@@ -671,8 +606,8 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
671{ 606{
672 int is_ram = 0; 607 int is_ram = 0;
673 int ret; 608 int ret;
674 unsigned long flags;
675 unsigned long want_flags = (pgprot_val(*vma_prot) & _PAGE_CACHE_MASK); 609 unsigned long want_flags = (pgprot_val(*vma_prot) & _PAGE_CACHE_MASK);
610 unsigned long flags = want_flags;
676 611
677 is_ram = pat_pagerange_is_ram(paddr, paddr + size); 612 is_ram = pat_pagerange_is_ram(paddr, paddr + size);
678 613
@@ -734,29 +669,28 @@ static void free_pfn_range(u64 paddr, unsigned long size)
734 * 669 *
735 * If the vma has a linear pfn mapping for the entire range, we get the prot 670 * If the vma has a linear pfn mapping for the entire range, we get the prot
736 * from pte and reserve the entire vma range with single reserve_pfn_range call. 671 * from pte and reserve the entire vma range with single reserve_pfn_range call.
737 * Otherwise, we reserve the entire vma range, my ging through the PTEs page
738 * by page to get physical address and protection.
739 */ 672 */
740int track_pfn_vma_copy(struct vm_area_struct *vma) 673int track_pfn_vma_copy(struct vm_area_struct *vma)
741{ 674{
742 int retval = 0;
743 unsigned long i, j;
744 resource_size_t paddr; 675 resource_size_t paddr;
745 unsigned long prot; 676 unsigned long prot;
746 unsigned long vma_start = vma->vm_start; 677 unsigned long vma_size = vma->vm_end - vma->vm_start;
747 unsigned long vma_end = vma->vm_end;
748 unsigned long vma_size = vma_end - vma_start;
749 pgprot_t pgprot; 678 pgprot_t pgprot;
750 679
751 if (!pat_enabled) 680 if (!pat_enabled)
752 return 0; 681 return 0;
753 682
683 /*
684 * For now, only handle remap_pfn_range() vmas where
685 * is_linear_pfn_mapping() == TRUE. Handling of
686 * vm_insert_pfn() is TBD.
687 */
754 if (is_linear_pfn_mapping(vma)) { 688 if (is_linear_pfn_mapping(vma)) {
755 /* 689 /*
756 * reserve the whole chunk covered by vma. We need the 690 * reserve the whole chunk covered by vma. We need the
757 * starting address and protection from pte. 691 * starting address and protection from pte.
758 */ 692 */
759 if (follow_phys(vma, vma_start, 0, &prot, &paddr)) { 693 if (follow_phys(vma, vma->vm_start, 0, &prot, &paddr)) {
760 WARN_ON_ONCE(1); 694 WARN_ON_ONCE(1);
761 return -EINVAL; 695 return -EINVAL;
762 } 696 }
@@ -764,28 +698,7 @@ int track_pfn_vma_copy(struct vm_area_struct *vma)
764 return reserve_pfn_range(paddr, vma_size, &pgprot, 1); 698 return reserve_pfn_range(paddr, vma_size, &pgprot, 1);
765 } 699 }
766 700
767 /* reserve entire vma page by page, using pfn and prot from pte */
768 for (i = 0; i < vma_size; i += PAGE_SIZE) {
769 if (follow_phys(vma, vma_start + i, 0, &prot, &paddr))
770 continue;
771
772 pgprot = __pgprot(prot);
773 retval = reserve_pfn_range(paddr, PAGE_SIZE, &pgprot, 1);
774 if (retval)
775 goto cleanup_ret;
776 }
777 return 0; 701 return 0;
778
779cleanup_ret:
780 /* Reserve error: Cleanup partial reservation and return error */
781 for (j = 0; j < i; j += PAGE_SIZE) {
782 if (follow_phys(vma, vma_start + j, 0, &prot, &paddr))
783 continue;
784
785 free_pfn_range(paddr, PAGE_SIZE);
786 }
787
788 return retval;
789} 702}
790 703
791/* 704/*
@@ -795,50 +708,28 @@ cleanup_ret:
795 * prot is passed in as a parameter for the new mapping. If the vma has a 708 * prot is passed in as a parameter for the new mapping. If the vma has a
796 * linear pfn mapping for the entire range reserve the entire vma range with 709 * linear pfn mapping for the entire range reserve the entire vma range with
797 * single reserve_pfn_range call. 710 * single reserve_pfn_range call.
798 * Otherwise, we look t the pfn and size and reserve only the specified range
799 * page by page.
800 *
801 * Note that this function can be called with caller trying to map only a
802 * subrange/page inside the vma.
803 */ 711 */
804int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t *prot, 712int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t *prot,
805 unsigned long pfn, unsigned long size) 713 unsigned long pfn, unsigned long size)
806{ 714{
807 int retval = 0;
808 unsigned long i, j;
809 resource_size_t base_paddr;
810 resource_size_t paddr; 715 resource_size_t paddr;
811 unsigned long vma_start = vma->vm_start; 716 unsigned long vma_size = vma->vm_end - vma->vm_start;
812 unsigned long vma_end = vma->vm_end;
813 unsigned long vma_size = vma_end - vma_start;
814 717
815 if (!pat_enabled) 718 if (!pat_enabled)
816 return 0; 719 return 0;
817 720
721 /*
722 * For now, only handle remap_pfn_range() vmas where
723 * is_linear_pfn_mapping() == TRUE. Handling of
724 * vm_insert_pfn() is TBD.
725 */
818 if (is_linear_pfn_mapping(vma)) { 726 if (is_linear_pfn_mapping(vma)) {
819 /* reserve the whole chunk starting from vm_pgoff */ 727 /* reserve the whole chunk starting from vm_pgoff */
820 paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT; 728 paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT;
821 return reserve_pfn_range(paddr, vma_size, prot, 0); 729 return reserve_pfn_range(paddr, vma_size, prot, 0);
822 } 730 }
823 731
824 /* reserve page by page using pfn and size */
825 base_paddr = (resource_size_t)pfn << PAGE_SHIFT;
826 for (i = 0; i < size; i += PAGE_SIZE) {
827 paddr = base_paddr + i;
828 retval = reserve_pfn_range(paddr, PAGE_SIZE, prot, 0);
829 if (retval)
830 goto cleanup_ret;
831 }
832 return 0; 732 return 0;
833
834cleanup_ret:
835 /* Reserve error: Cleanup partial reservation and return error */
836 for (j = 0; j < i; j += PAGE_SIZE) {
837 paddr = base_paddr + j;
838 free_pfn_range(paddr, PAGE_SIZE);
839 }
840
841 return retval;
842} 733}
843 734
844/* 735/*
@@ -849,39 +740,23 @@ cleanup_ret:
849void untrack_pfn_vma(struct vm_area_struct *vma, unsigned long pfn, 740void untrack_pfn_vma(struct vm_area_struct *vma, unsigned long pfn,
850 unsigned long size) 741 unsigned long size)
851{ 742{
852 unsigned long i;
853 resource_size_t paddr; 743 resource_size_t paddr;
854 unsigned long prot; 744 unsigned long vma_size = vma->vm_end - vma->vm_start;
855 unsigned long vma_start = vma->vm_start;
856 unsigned long vma_end = vma->vm_end;
857 unsigned long vma_size = vma_end - vma_start;
858 745
859 if (!pat_enabled) 746 if (!pat_enabled)
860 return; 747 return;
861 748
749 /*
750 * For now, only handle remap_pfn_range() vmas where
751 * is_linear_pfn_mapping() == TRUE. Handling of
752 * vm_insert_pfn() is TBD.
753 */
862 if (is_linear_pfn_mapping(vma)) { 754 if (is_linear_pfn_mapping(vma)) {
863 /* free the whole chunk starting from vm_pgoff */ 755 /* free the whole chunk starting from vm_pgoff */
864 paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT; 756 paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT;
865 free_pfn_range(paddr, vma_size); 757 free_pfn_range(paddr, vma_size);
866 return; 758 return;
867 } 759 }
868
869 if (size != 0 && size != vma_size) {
870 /* free page by page, using pfn and size */
871 paddr = (resource_size_t)pfn << PAGE_SHIFT;
872 for (i = 0; i < size; i += PAGE_SIZE) {
873 paddr = paddr + i;
874 free_pfn_range(paddr, PAGE_SIZE);
875 }
876 } else {
877 /* free entire vma, page by page, using the pfn from pte */
878 for (i = 0; i < vma_size; i += PAGE_SIZE) {
879 if (follow_phys(vma, vma_start + i, 0, &prot, &paddr))
880 continue;
881
882 free_pfn_range(paddr, PAGE_SIZE);
883 }
884 }
885} 760}
886 761
887pgprot_t pgprot_writecombine(pgprot_t prot) 762pgprot_t pgprot_writecombine(pgprot_t prot)
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
index 5b7c7c8464fe..7aa03a5389f5 100644
--- a/arch/x86/mm/pgtable.c
+++ b/arch/x86/mm/pgtable.c
@@ -345,7 +345,8 @@ void __native_set_fixmap(enum fixed_addresses idx, pte_t pte)
345 fixmaps_set++; 345 fixmaps_set++;
346} 346}
347 347
348void native_set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t flags) 348void native_set_fixmap(enum fixed_addresses idx, phys_addr_t phys,
349 pgprot_t flags)
349{ 350{
350 __native_set_fixmap(idx, pfn_pte(phys >> PAGE_SHIFT, flags)); 351 __native_set_fixmap(idx, pfn_pte(phys >> PAGE_SHIFT, flags));
351} 352}
diff --git a/arch/x86/mm/srat_32.c b/arch/x86/mm/srat_32.c
index 16ae70fc57e7..29a0e37114f8 100644
--- a/arch/x86/mm/srat_32.c
+++ b/arch/x86/mm/srat_32.c
@@ -216,7 +216,7 @@ int __init get_memcfg_from_srat(void)
216 216
217 if (num_memory_chunks == 0) { 217 if (num_memory_chunks == 0) {
218 printk(KERN_WARNING 218 printk(KERN_WARNING
219 "could not finy any ACPI SRAT memory areas.\n"); 219 "could not find any ACPI SRAT memory areas.\n");
220 goto out_fail; 220 goto out_fail;
221 } 221 }
222 222
diff --git a/arch/x86/mm/srat_64.c b/arch/x86/mm/srat_64.c
index c7d272b8574c..01765955baaf 100644
--- a/arch/x86/mm/srat_64.c
+++ b/arch/x86/mm/srat_64.c
@@ -28,6 +28,7 @@ int acpi_numa __initdata;
28static struct acpi_table_slit *acpi_slit; 28static struct acpi_table_slit *acpi_slit;
29 29
30static nodemask_t nodes_parsed __initdata; 30static nodemask_t nodes_parsed __initdata;
31static nodemask_t cpu_nodes_parsed __initdata;
31static struct bootnode nodes[MAX_NUMNODES] __initdata; 32static struct bootnode nodes[MAX_NUMNODES] __initdata;
32static struct bootnode nodes_add[MAX_NUMNODES]; 33static struct bootnode nodes_add[MAX_NUMNODES];
33static int found_add_area __initdata; 34static int found_add_area __initdata;
@@ -141,6 +142,7 @@ acpi_numa_x2apic_affinity_init(struct acpi_srat_x2apic_cpu_affinity *pa)
141 142
142 apic_id = pa->apic_id; 143 apic_id = pa->apic_id;
143 apicid_to_node[apic_id] = node; 144 apicid_to_node[apic_id] = node;
145 node_set(node, cpu_nodes_parsed);
144 acpi_numa = 1; 146 acpi_numa = 1;
145 printk(KERN_INFO "SRAT: PXM %u -> APIC %u -> Node %u\n", 147 printk(KERN_INFO "SRAT: PXM %u -> APIC %u -> Node %u\n",
146 pxm, apic_id, node); 148 pxm, apic_id, node);
@@ -174,6 +176,7 @@ acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa)
174 else 176 else
175 apic_id = pa->apic_id; 177 apic_id = pa->apic_id;
176 apicid_to_node[apic_id] = node; 178 apicid_to_node[apic_id] = node;
179 node_set(node, cpu_nodes_parsed);
177 acpi_numa = 1; 180 acpi_numa = 1;
178 printk(KERN_INFO "SRAT: PXM %u -> APIC %u -> Node %u\n", 181 printk(KERN_INFO "SRAT: PXM %u -> APIC %u -> Node %u\n",
179 pxm, apic_id, node); 182 pxm, apic_id, node);
@@ -358,6 +361,7 @@ static void __init unparse_node(int node)
358{ 361{
359 int i; 362 int i;
360 node_clear(node, nodes_parsed); 363 node_clear(node, nodes_parsed);
364 node_clear(node, cpu_nodes_parsed);
361 for (i = 0; i < MAX_LOCAL_APIC; i++) { 365 for (i = 0; i < MAX_LOCAL_APIC; i++) {
362 if (apicid_to_node[i] == node) 366 if (apicid_to_node[i] == node)
363 apicid_to_node[i] = NUMA_NO_NODE; 367 apicid_to_node[i] = NUMA_NO_NODE;
@@ -402,7 +406,8 @@ int __init acpi_scan_nodes(unsigned long start, unsigned long end)
402 return -1; 406 return -1;
403 } 407 }
404 408
405 node_possible_map = nodes_parsed; 409 /* Account for nodes with cpus and no memory */
410 nodes_or(node_possible_map, nodes_parsed, cpu_nodes_parsed);
406 411
407 /* Finally register nodes */ 412 /* Finally register nodes */
408 for_each_node_mask(i, node_possible_map) 413 for_each_node_mask(i, node_possible_map)
diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c
index 04df67f8a7ba..044897be021f 100644
--- a/arch/x86/oprofile/backtrace.c
+++ b/arch/x86/oprofile/backtrace.c
@@ -76,9 +76,9 @@ void
76x86_backtrace(struct pt_regs * const regs, unsigned int depth) 76x86_backtrace(struct pt_regs * const regs, unsigned int depth)
77{ 77{
78 struct frame_head *head = (struct frame_head *)frame_pointer(regs); 78 struct frame_head *head = (struct frame_head *)frame_pointer(regs);
79 unsigned long stack = kernel_trap_sp(regs);
80 79
81 if (!user_mode_vm(regs)) { 80 if (!user_mode_vm(regs)) {
81 unsigned long stack = kernel_stack_pointer(regs);
82 if (depth) 82 if (depth)
83 dump_trace(NULL, regs, (unsigned long *)stack, 0, 83 dump_trace(NULL, regs, (unsigned long *)stack, 0,
84 &backtrace_ops, &depth); 84 &backtrace_ops, &depth);
diff --git a/arch/x86/pci/amd_bus.c b/arch/x86/pci/amd_bus.c
index 9bb09823b362..f893d6a6e803 100644
--- a/arch/x86/pci/amd_bus.c
+++ b/arch/x86/pci/amd_bus.c
@@ -94,12 +94,16 @@ struct pci_root_info {
94static int pci_root_num; 94static int pci_root_num;
95static struct pci_root_info pci_root_info[PCI_ROOT_NR]; 95static struct pci_root_info pci_root_info[PCI_ROOT_NR];
96 96
97void set_pci_bus_resources_arch_default(struct pci_bus *b) 97void x86_pci_root_bus_res_quirks(struct pci_bus *b)
98{ 98{
99 int i; 99 int i;
100 int j; 100 int j;
101 struct pci_root_info *info; 101 struct pci_root_info *info;
102 102
103 /* don't go for it if _CRS is used */
104 if (pci_probe & PCI_USE__CRS)
105 return;
106
103 /* if only one root bus, don't need to anything */ 107 /* if only one root bus, don't need to anything */
104 if (pci_root_num < 2) 108 if (pci_root_num < 2)
105 return; 109 return;
diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c
index 8c362b96b644..2202b6257b82 100644
--- a/arch/x86/pci/common.c
+++ b/arch/x86/pci/common.c
@@ -147,10 +147,13 @@ static void __devinit pcibios_fixup_device_resources(struct pci_dev *dev)
147 * are examined. 147 * are examined.
148 */ 148 */
149 149
150void __devinit pcibios_fixup_bus(struct pci_bus *b) 150void __devinit pcibios_fixup_bus(struct pci_bus *b)
151{ 151{
152 struct pci_dev *dev; 152 struct pci_dev *dev;
153 153
154 /* root bus? */
155 if (!b->parent)
156 x86_pci_root_bus_res_quirks(b);
154 pci_read_bridge_bases(b); 157 pci_read_bridge_bases(b);
155 list_for_each_entry(dev, &b->devices, bus_list) 158 list_for_each_entry(dev, &b->devices, bus_list)
156 pcibios_fixup_device_resources(dev); 159 pcibios_fixup_device_resources(dev);
diff --git a/arch/x86/pci/i386.c b/arch/x86/pci/i386.c
index f1817f71e009..a85bef20a3b9 100644
--- a/arch/x86/pci/i386.c
+++ b/arch/x86/pci/i386.c
@@ -238,6 +238,10 @@ void __init pcibios_resource_survey(void)
238 */ 238 */
239fs_initcall(pcibios_assign_resources); 239fs_initcall(pcibios_assign_resources);
240 240
241void __weak x86_pci_root_bus_res_quirks(struct pci_bus *b)
242{
243}
244
241/* 245/*
242 * If we set up a device for bus mastering, we need to check the latency 246 * If we set up a device for bus mastering, we need to check the latency
243 * timer as certain crappy BIOSes forget to set it properly. 247 * timer as certain crappy BIOSes forget to set it properly.
diff --git a/arch/x86/pci/mmconfig-shared.c b/arch/x86/pci/mmconfig-shared.c
index 905bb526b133..5fa10bb9604f 100644
--- a/arch/x86/pci/mmconfig-shared.c
+++ b/arch/x86/pci/mmconfig-shared.c
@@ -375,7 +375,7 @@ static acpi_status __init check_mcfg_resource(struct acpi_resource *res,
375 if (!fixmem32) 375 if (!fixmem32)
376 return AE_OK; 376 return AE_OK;
377 if ((mcfg_res->start >= fixmem32->address) && 377 if ((mcfg_res->start >= fixmem32->address) &&
378 (mcfg_res->end < (fixmem32->address + 378 (mcfg_res->end <= (fixmem32->address +
379 fixmem32->address_length))) { 379 fixmem32->address_length))) {
380 mcfg_res->flags = 1; 380 mcfg_res->flags = 1;
381 return AE_CTRL_TERMINATE; 381 return AE_CTRL_TERMINATE;
@@ -392,7 +392,7 @@ static acpi_status __init check_mcfg_resource(struct acpi_resource *res,
392 return AE_OK; 392 return AE_OK;
393 393
394 if ((mcfg_res->start >= address.minimum) && 394 if ((mcfg_res->start >= address.minimum) &&
395 (mcfg_res->end < (address.minimum + address.address_length))) { 395 (mcfg_res->end <= (address.minimum + address.address_length))) {
396 mcfg_res->flags = 1; 396 mcfg_res->flags = 1;
397 return AE_CTRL_TERMINATE; 397 return AE_CTRL_TERMINATE;
398 } 398 }
@@ -439,7 +439,7 @@ static int __init is_mmconf_reserved(check_reserved_t is_reserved,
439 u64 old_size = size; 439 u64 old_size = size;
440 int valid = 0; 440 int valid = 0;
441 441
442 while (!is_reserved(addr, addr + size - 1, E820_RESERVED)) { 442 while (!is_reserved(addr, addr + size, E820_RESERVED)) {
443 size >>= 1; 443 size >>= 1;
444 if (size < (16UL<<20)) 444 if (size < (16UL<<20))
445 break; 445 break;
diff --git a/arch/x86/vdso/vclock_gettime.c b/arch/x86/vdso/vclock_gettime.c
index d9d35824c56f..6a40b78b46aa 100644
--- a/arch/x86/vdso/vclock_gettime.c
+++ b/arch/x86/vdso/vclock_gettime.c
@@ -104,11 +104,13 @@ notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
104{ 104{
105 long ret; 105 long ret;
106 if (likely(gtod->sysctl_enabled && gtod->clock.vread)) { 106 if (likely(gtod->sysctl_enabled && gtod->clock.vread)) {
107 BUILD_BUG_ON(offsetof(struct timeval, tv_usec) != 107 if (likely(tv != NULL)) {
108 offsetof(struct timespec, tv_nsec) || 108 BUILD_BUG_ON(offsetof(struct timeval, tv_usec) !=
109 sizeof(*tv) != sizeof(struct timespec)); 109 offsetof(struct timespec, tv_nsec) ||
110 do_realtime((struct timespec *)tv); 110 sizeof(*tv) != sizeof(struct timespec));
111 tv->tv_usec /= 1000; 111 do_realtime((struct timespec *)tv);
112 tv->tv_usec /= 1000;
113 }
112 if (unlikely(tz != NULL)) { 114 if (unlikely(tz != NULL)) {
113 /* Avoid memcpy. Some old compilers fail to inline it */ 115 /* Avoid memcpy. Some old compilers fail to inline it */
114 tz->tz_minuteswest = gtod->sys_tz.tz_minuteswest; 116 tz->tz_minuteswest = gtod->sys_tz.tz_minuteswest;
diff --git a/arch/x86/xen/Makefile b/arch/x86/xen/Makefile
index 3b767d03fd6a..172438f86a02 100644
--- a/arch/x86/xen/Makefile
+++ b/arch/x86/xen/Makefile
@@ -9,5 +9,6 @@ obj-y := enlighten.o setup.o multicalls.o mmu.o irq.o \
9 time.o xen-asm.o xen-asm_$(BITS).o \ 9 time.o xen-asm.o xen-asm_$(BITS).o \
10 grant-table.o suspend.o 10 grant-table.o suspend.o
11 11
12obj-$(CONFIG_SMP) += smp.o spinlock.o 12obj-$(CONFIG_SMP) += smp.o
13obj-$(CONFIG_XEN_DEBUG_FS) += debugfs.o \ No newline at end of file 13obj-$(CONFIG_PARAVIRT_SPINLOCKS)+= spinlock.o
14obj-$(CONFIG_XEN_DEBUG_FS) += debugfs.o
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 82cd39a6cbd3..f09e8c36ee80 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -42,6 +42,7 @@
42#include <asm/xen/hypervisor.h> 42#include <asm/xen/hypervisor.h>
43#include <asm/fixmap.h> 43#include <asm/fixmap.h>
44#include <asm/processor.h> 44#include <asm/processor.h>
45#include <asm/proto.h>
45#include <asm/msr-index.h> 46#include <asm/msr-index.h>
46#include <asm/setup.h> 47#include <asm/setup.h>
47#include <asm/desc.h> 48#include <asm/desc.h>
@@ -168,21 +169,23 @@ static void __init xen_banner(void)
168 xen_feature(XENFEAT_mmu_pt_update_preserve_ad) ? " (preserve-AD)" : ""); 169 xen_feature(XENFEAT_mmu_pt_update_preserve_ad) ? " (preserve-AD)" : "");
169} 170}
170 171
172static __read_mostly unsigned int cpuid_leaf1_edx_mask = ~0;
173static __read_mostly unsigned int cpuid_leaf1_ecx_mask = ~0;
174
171static void xen_cpuid(unsigned int *ax, unsigned int *bx, 175static void xen_cpuid(unsigned int *ax, unsigned int *bx,
172 unsigned int *cx, unsigned int *dx) 176 unsigned int *cx, unsigned int *dx)
173{ 177{
178 unsigned maskecx = ~0;
174 unsigned maskedx = ~0; 179 unsigned maskedx = ~0;
175 180
176 /* 181 /*
177 * Mask out inconvenient features, to try and disable as many 182 * Mask out inconvenient features, to try and disable as many
178 * unsupported kernel subsystems as possible. 183 * unsupported kernel subsystems as possible.
179 */ 184 */
180 if (*ax == 1) 185 if (*ax == 1) {
181 maskedx = ~((1 << X86_FEATURE_APIC) | /* disable APIC */ 186 maskecx = cpuid_leaf1_ecx_mask;
182 (1 << X86_FEATURE_ACPI) | /* disable ACPI */ 187 maskedx = cpuid_leaf1_edx_mask;
183 (1 << X86_FEATURE_MCE) | /* disable MCE */ 188 }
184 (1 << X86_FEATURE_MCA) | /* disable MCA */
185 (1 << X86_FEATURE_ACC)); /* thermal monitoring */
186 189
187 asm(XEN_EMULATE_PREFIX "cpuid" 190 asm(XEN_EMULATE_PREFIX "cpuid"
188 : "=a" (*ax), 191 : "=a" (*ax),
@@ -190,9 +193,43 @@ static void xen_cpuid(unsigned int *ax, unsigned int *bx,
190 "=c" (*cx), 193 "=c" (*cx),
191 "=d" (*dx) 194 "=d" (*dx)
192 : "0" (*ax), "2" (*cx)); 195 : "0" (*ax), "2" (*cx));
196
197 *cx &= maskecx;
193 *dx &= maskedx; 198 *dx &= maskedx;
194} 199}
195 200
201static __init void xen_init_cpuid_mask(void)
202{
203 unsigned int ax, bx, cx, dx;
204
205 cpuid_leaf1_edx_mask =
206 ~((1 << X86_FEATURE_MCE) | /* disable MCE */
207 (1 << X86_FEATURE_MCA) | /* disable MCA */
208 (1 << X86_FEATURE_ACC)); /* thermal monitoring */
209
210 if (!xen_initial_domain())
211 cpuid_leaf1_edx_mask &=
212 ~((1 << X86_FEATURE_APIC) | /* disable local APIC */
213 (1 << X86_FEATURE_ACPI)); /* disable ACPI */
214
215 ax = 1;
216 xen_cpuid(&ax, &bx, &cx, &dx);
217
218 /* cpuid claims we support xsave; try enabling it to see what happens */
219 if (cx & (1 << (X86_FEATURE_XSAVE % 32))) {
220 unsigned long cr4;
221
222 set_in_cr4(X86_CR4_OSXSAVE);
223
224 cr4 = read_cr4();
225
226 if ((cr4 & X86_CR4_OSXSAVE) == 0)
227 cpuid_leaf1_ecx_mask &= ~(1 << (X86_FEATURE_XSAVE % 32));
228
229 clear_in_cr4(X86_CR4_OSXSAVE);
230 }
231}
232
196static void xen_set_debugreg(int reg, unsigned long val) 233static void xen_set_debugreg(int reg, unsigned long val)
197{ 234{
198 HYPERVISOR_set_debugreg(reg, val); 235 HYPERVISOR_set_debugreg(reg, val);
@@ -284,12 +321,11 @@ static void xen_set_ldt(const void *addr, unsigned entries)
284 321
285static void xen_load_gdt(const struct desc_ptr *dtr) 322static void xen_load_gdt(const struct desc_ptr *dtr)
286{ 323{
287 unsigned long *frames;
288 unsigned long va = dtr->address; 324 unsigned long va = dtr->address;
289 unsigned int size = dtr->size + 1; 325 unsigned int size = dtr->size + 1;
290 unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE; 326 unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
327 unsigned long frames[pages];
291 int f; 328 int f;
292 struct multicall_space mcs;
293 329
294 /* A GDT can be up to 64k in size, which corresponds to 8192 330 /* A GDT can be up to 64k in size, which corresponds to 8192
295 8-byte entries, or 16 4k pages.. */ 331 8-byte entries, or 16 4k pages.. */
@@ -297,19 +333,26 @@ static void xen_load_gdt(const struct desc_ptr *dtr)
297 BUG_ON(size > 65536); 333 BUG_ON(size > 65536);
298 BUG_ON(va & ~PAGE_MASK); 334 BUG_ON(va & ~PAGE_MASK);
299 335
300 mcs = xen_mc_entry(sizeof(*frames) * pages);
301 frames = mcs.args;
302
303 for (f = 0; va < dtr->address + size; va += PAGE_SIZE, f++) { 336 for (f = 0; va < dtr->address + size; va += PAGE_SIZE, f++) {
304 frames[f] = arbitrary_virt_to_mfn((void *)va); 337 int level;
338 pte_t *ptep = lookup_address(va, &level);
339 unsigned long pfn, mfn;
340 void *virt;
341
342 BUG_ON(ptep == NULL);
343
344 pfn = pte_pfn(*ptep);
345 mfn = pfn_to_mfn(pfn);
346 virt = __va(PFN_PHYS(pfn));
347
348 frames[f] = mfn;
305 349
306 make_lowmem_page_readonly((void *)va); 350 make_lowmem_page_readonly((void *)va);
307 make_lowmem_page_readonly(mfn_to_virt(frames[f])); 351 make_lowmem_page_readonly(virt);
308 } 352 }
309 353
310 MULTI_set_gdt(mcs.mc, frames, size / sizeof(struct desc_struct)); 354 if (HYPERVISOR_set_gdt(frames, size / sizeof(struct desc_struct)))
311 355 BUG();
312 xen_mc_issue(PARAVIRT_LAZY_CPU);
313} 356}
314 357
315static void load_TLS_descriptor(struct thread_struct *t, 358static void load_TLS_descriptor(struct thread_struct *t,
@@ -385,7 +428,7 @@ static void xen_write_ldt_entry(struct desc_struct *dt, int entrynum,
385static int cvt_gate_to_trap(int vector, const gate_desc *val, 428static int cvt_gate_to_trap(int vector, const gate_desc *val,
386 struct trap_info *info) 429 struct trap_info *info)
387{ 430{
388 if (val->type != 0xf && val->type != 0xe) 431 if (val->type != GATE_TRAP && val->type != GATE_INTERRUPT)
389 return 0; 432 return 0;
390 433
391 info->vector = vector; 434 info->vector = vector;
@@ -393,8 +436,8 @@ static int cvt_gate_to_trap(int vector, const gate_desc *val,
393 info->cs = gate_segment(*val); 436 info->cs = gate_segment(*val);
394 info->flags = val->dpl; 437 info->flags = val->dpl;
395 /* interrupt gates clear IF */ 438 /* interrupt gates clear IF */
396 if (val->type == 0xe) 439 if (val->type == GATE_INTERRUPT)
397 info->flags |= 4; 440 info->flags |= 1 << 2;
398 441
399 return 1; 442 return 1;
400} 443}
@@ -872,7 +915,6 @@ static const struct machine_ops __initdata xen_machine_ops = {
872 .emergency_restart = xen_emergency_restart, 915 .emergency_restart = xen_emergency_restart,
873}; 916};
874 917
875
876/* First C function to be called on Xen boot */ 918/* First C function to be called on Xen boot */
877asmlinkage void __init xen_start_kernel(void) 919asmlinkage void __init xen_start_kernel(void)
878{ 920{
@@ -897,6 +939,8 @@ asmlinkage void __init xen_start_kernel(void)
897 939
898 xen_init_irq_ops(); 940 xen_init_irq_ops();
899 941
942 xen_init_cpuid_mask();
943
900#ifdef CONFIG_X86_LOCAL_APIC 944#ifdef CONFIG_X86_LOCAL_APIC
901 /* 945 /*
902 * set up the basic apic ops. 946 * set up the basic apic ops.
@@ -938,6 +982,11 @@ asmlinkage void __init xen_start_kernel(void)
938 if (!xen_initial_domain()) 982 if (!xen_initial_domain())
939 __supported_pte_mask &= ~(_PAGE_PWT | _PAGE_PCD); 983 __supported_pte_mask &= ~(_PAGE_PWT | _PAGE_PCD);
940 984
985#ifdef CONFIG_X86_64
986 /* Work out if we support NX */
987 check_efer();
988#endif
989
941 /* Don't do the full vcpu_info placement stuff until we have a 990 /* Don't do the full vcpu_info placement stuff until we have a
942 possible map and a non-dummy shared_info. */ 991 possible map and a non-dummy shared_info. */
943 per_cpu(xen_vcpu, 0) = &HYPERVISOR_shared_info->vcpu_info[0]; 992 per_cpu(xen_vcpu, 0) = &HYPERVISOR_shared_info->vcpu_info[0];
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index db3802fb7b84..fba55b1a4021 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -42,6 +42,7 @@
42#include <linux/highmem.h> 42#include <linux/highmem.h>
43#include <linux/debugfs.h> 43#include <linux/debugfs.h>
44#include <linux/bug.h> 44#include <linux/bug.h>
45#include <linux/module.h>
45 46
46#include <asm/pgtable.h> 47#include <asm/pgtable.h>
47#include <asm/tlbflush.h> 48#include <asm/tlbflush.h>
@@ -184,7 +185,7 @@ static inline unsigned p2m_index(unsigned long pfn)
184} 185}
185 186
186/* Build the parallel p2m_top_mfn structures */ 187/* Build the parallel p2m_top_mfn structures */
187void xen_setup_mfn_list_list(void) 188static void __init xen_build_mfn_list_list(void)
188{ 189{
189 unsigned pfn, idx; 190 unsigned pfn, idx;
190 191
@@ -198,7 +199,10 @@ void xen_setup_mfn_list_list(void)
198 unsigned topidx = idx * P2M_ENTRIES_PER_PAGE; 199 unsigned topidx = idx * P2M_ENTRIES_PER_PAGE;
199 p2m_top_mfn_list[idx] = virt_to_mfn(&p2m_top_mfn[topidx]); 200 p2m_top_mfn_list[idx] = virt_to_mfn(&p2m_top_mfn[topidx]);
200 } 201 }
202}
201 203
204void xen_setup_mfn_list_list(void)
205{
202 BUG_ON(HYPERVISOR_shared_info == &xen_dummy_shared_info); 206 BUG_ON(HYPERVISOR_shared_info == &xen_dummy_shared_info);
203 207
204 HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list = 208 HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list =
@@ -218,6 +222,8 @@ void __init xen_build_dynamic_phys_to_machine(void)
218 222
219 p2m_top[topidx] = &mfn_list[pfn]; 223 p2m_top[topidx] = &mfn_list[pfn];
220 } 224 }
225
226 xen_build_mfn_list_list();
221} 227}
222 228
223unsigned long get_phys_to_machine(unsigned long pfn) 229unsigned long get_phys_to_machine(unsigned long pfn)
@@ -233,47 +239,74 @@ unsigned long get_phys_to_machine(unsigned long pfn)
233} 239}
234EXPORT_SYMBOL_GPL(get_phys_to_machine); 240EXPORT_SYMBOL_GPL(get_phys_to_machine);
235 241
236static void alloc_p2m(unsigned long **pp, unsigned long *mfnp) 242/* install a new p2m_top page */
243bool install_p2mtop_page(unsigned long pfn, unsigned long *p)
237{ 244{
238 unsigned long *p; 245 unsigned topidx = p2m_top_index(pfn);
246 unsigned long **pfnp, *mfnp;
239 unsigned i; 247 unsigned i;
240 248
241 p = (void *)__get_free_page(GFP_KERNEL | __GFP_NOFAIL); 249 pfnp = &p2m_top[topidx];
242 BUG_ON(p == NULL); 250 mfnp = &p2m_top_mfn[topidx];
243 251
244 for (i = 0; i < P2M_ENTRIES_PER_PAGE; i++) 252 for (i = 0; i < P2M_ENTRIES_PER_PAGE; i++)
245 p[i] = INVALID_P2M_ENTRY; 253 p[i] = INVALID_P2M_ENTRY;
246 254
247 if (cmpxchg(pp, p2m_missing, p) != p2m_missing) 255 if (cmpxchg(pfnp, p2m_missing, p) == p2m_missing) {
248 free_page((unsigned long)p);
249 else
250 *mfnp = virt_to_mfn(p); 256 *mfnp = virt_to_mfn(p);
257 return true;
258 }
259
260 return false;
251} 261}
252 262
253void set_phys_to_machine(unsigned long pfn, unsigned long mfn) 263static void alloc_p2m(unsigned long pfn)
254{ 264{
255 unsigned topidx, idx; 265 unsigned long *p;
256 266
257 if (unlikely(xen_feature(XENFEAT_auto_translated_physmap))) { 267 p = (void *)__get_free_page(GFP_KERNEL | __GFP_NOFAIL);
258 BUG_ON(pfn != mfn && mfn != INVALID_P2M_ENTRY); 268 BUG_ON(p == NULL);
259 return; 269
260 } 270 if (!install_p2mtop_page(pfn, p))
271 free_page((unsigned long)p);
272}
273
274/* Try to install p2m mapping; fail if intermediate bits missing */
275bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn)
276{
277 unsigned topidx, idx;
261 278
262 if (unlikely(pfn >= MAX_DOMAIN_PAGES)) { 279 if (unlikely(pfn >= MAX_DOMAIN_PAGES)) {
263 BUG_ON(mfn != INVALID_P2M_ENTRY); 280 BUG_ON(mfn != INVALID_P2M_ENTRY);
264 return; 281 return true;
265 } 282 }
266 283
267 topidx = p2m_top_index(pfn); 284 topidx = p2m_top_index(pfn);
268 if (p2m_top[topidx] == p2m_missing) { 285 if (p2m_top[topidx] == p2m_missing) {
269 /* no need to allocate a page to store an invalid entry */
270 if (mfn == INVALID_P2M_ENTRY) 286 if (mfn == INVALID_P2M_ENTRY)
271 return; 287 return true;
272 alloc_p2m(&p2m_top[topidx], &p2m_top_mfn[topidx]); 288 return false;
273 } 289 }
274 290
275 idx = p2m_index(pfn); 291 idx = p2m_index(pfn);
276 p2m_top[topidx][idx] = mfn; 292 p2m_top[topidx][idx] = mfn;
293
294 return true;
295}
296
297void set_phys_to_machine(unsigned long pfn, unsigned long mfn)
298{
299 if (unlikely(xen_feature(XENFEAT_auto_translated_physmap))) {
300 BUG_ON(pfn != mfn && mfn != INVALID_P2M_ENTRY);
301 return;
302 }
303
304 if (unlikely(!__set_phys_to_machine(pfn, mfn))) {
305 alloc_p2m(pfn);
306
307 if (!__set_phys_to_machine(pfn, mfn))
308 BUG();
309 }
277} 310}
278 311
279unsigned long arbitrary_virt_to_mfn(void *vaddr) 312unsigned long arbitrary_virt_to_mfn(void *vaddr)
@@ -987,7 +1020,7 @@ static __init int xen_mark_pinned(struct mm_struct *mm, struct page *page,
987 return 0; 1020 return 0;
988} 1021}
989 1022
990void __init xen_mark_init_mm_pinned(void) 1023static void __init xen_mark_init_mm_pinned(void)
991{ 1024{
992 xen_pgd_walk(&init_mm, xen_mark_pinned, FIXADDR_TOP); 1025 xen_pgd_walk(&init_mm, xen_mark_pinned, FIXADDR_TOP);
993} 1026}
@@ -1270,8 +1303,8 @@ static void xen_flush_tlb_others(const struct cpumask *cpus,
1270 } *args; 1303 } *args;
1271 struct multicall_space mcs; 1304 struct multicall_space mcs;
1272 1305
1273 BUG_ON(cpumask_empty(cpus)); 1306 if (cpumask_empty(cpus))
1274 BUG_ON(!mm); 1307 return; /* nothing to do */
1275 1308
1276 mcs = xen_mc_entry(sizeof(*args)); 1309 mcs = xen_mc_entry(sizeof(*args));
1277 args = mcs.args; 1310 args = mcs.args;
@@ -1438,6 +1471,15 @@ static __init void xen_set_pte_init(pte_t *ptep, pte_t pte)
1438} 1471}
1439#endif 1472#endif
1440 1473
1474static void pin_pagetable_pfn(unsigned cmd, unsigned long pfn)
1475{
1476 struct mmuext_op op;
1477 op.cmd = cmd;
1478 op.arg1.mfn = pfn_to_mfn(pfn);
1479 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF))
1480 BUG();
1481}
1482
1441/* Early in boot, while setting up the initial pagetable, assume 1483/* Early in boot, while setting up the initial pagetable, assume
1442 everything is pinned. */ 1484 everything is pinned. */
1443static __init void xen_alloc_pte_init(struct mm_struct *mm, unsigned long pfn) 1485static __init void xen_alloc_pte_init(struct mm_struct *mm, unsigned long pfn)
@@ -1446,22 +1488,29 @@ static __init void xen_alloc_pte_init(struct mm_struct *mm, unsigned long pfn)
1446 BUG_ON(mem_map); /* should only be used early */ 1488 BUG_ON(mem_map); /* should only be used early */
1447#endif 1489#endif
1448 make_lowmem_page_readonly(__va(PFN_PHYS(pfn))); 1490 make_lowmem_page_readonly(__va(PFN_PHYS(pfn)));
1491 pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
1492}
1493
1494/* Used for pmd and pud */
1495static __init void xen_alloc_pmd_init(struct mm_struct *mm, unsigned long pfn)
1496{
1497#ifdef CONFIG_FLATMEM
1498 BUG_ON(mem_map); /* should only be used early */
1499#endif
1500 make_lowmem_page_readonly(__va(PFN_PHYS(pfn)));
1449} 1501}
1450 1502
1451/* Early release_pte assumes that all pts are pinned, since there's 1503/* Early release_pte assumes that all pts are pinned, since there's
1452 only init_mm and anything attached to that is pinned. */ 1504 only init_mm and anything attached to that is pinned. */
1453static void xen_release_pte_init(unsigned long pfn) 1505static __init void xen_release_pte_init(unsigned long pfn)
1454{ 1506{
1507 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn);
1455 make_lowmem_page_readwrite(__va(PFN_PHYS(pfn))); 1508 make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
1456} 1509}
1457 1510
1458static void pin_pagetable_pfn(unsigned cmd, unsigned long pfn) 1511static __init void xen_release_pmd_init(unsigned long pfn)
1459{ 1512{
1460 struct mmuext_op op; 1513 make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
1461 op.cmd = cmd;
1462 op.arg1.mfn = pfn_to_mfn(pfn);
1463 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF))
1464 BUG();
1465} 1514}
1466 1515
1467/* This needs to make sure the new pte page is pinned iff its being 1516/* This needs to make sure the new pte page is pinned iff its being
@@ -1746,11 +1795,16 @@ __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd,
1746 1795
1747 pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(swapper_pg_dir))); 1796 pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(swapper_pg_dir)));
1748 1797
1798 reserve_early(__pa(xen_start_info->pt_base),
1799 __pa(xen_start_info->pt_base +
1800 xen_start_info->nr_pt_frames * PAGE_SIZE),
1801 "XEN PAGETABLES");
1802
1749 return swapper_pg_dir; 1803 return swapper_pg_dir;
1750} 1804}
1751#endif /* CONFIG_X86_64 */ 1805#endif /* CONFIG_X86_64 */
1752 1806
1753static void xen_set_fixmap(unsigned idx, unsigned long phys, pgprot_t prot) 1807static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot)
1754{ 1808{
1755 pte_t pte; 1809 pte_t pte;
1756 1810
@@ -1773,6 +1827,9 @@ static void xen_set_fixmap(unsigned idx, unsigned long phys, pgprot_t prot)
1773#ifdef CONFIG_X86_LOCAL_APIC 1827#ifdef CONFIG_X86_LOCAL_APIC
1774 case FIX_APIC_BASE: /* maps dummy local APIC */ 1828 case FIX_APIC_BASE: /* maps dummy local APIC */
1775#endif 1829#endif
1830 case FIX_TEXT_POKE0:
1831 case FIX_TEXT_POKE1:
1832 /* All local page mappings */
1776 pte = pfn_pte(phys, prot); 1833 pte = pfn_pte(phys, prot);
1777 break; 1834 break;
1778 1835
@@ -1819,7 +1876,6 @@ __init void xen_post_allocator_init(void)
1819 xen_mark_init_mm_pinned(); 1876 xen_mark_init_mm_pinned();
1820} 1877}
1821 1878
1822
1823const struct pv_mmu_ops xen_mmu_ops __initdata = { 1879const struct pv_mmu_ops xen_mmu_ops __initdata = {
1824 .pagetable_setup_start = xen_pagetable_setup_start, 1880 .pagetable_setup_start = xen_pagetable_setup_start,
1825 .pagetable_setup_done = xen_pagetable_setup_done, 1881 .pagetable_setup_done = xen_pagetable_setup_done,
@@ -1843,9 +1899,9 @@ const struct pv_mmu_ops xen_mmu_ops __initdata = {
1843 1899
1844 .alloc_pte = xen_alloc_pte_init, 1900 .alloc_pte = xen_alloc_pte_init,
1845 .release_pte = xen_release_pte_init, 1901 .release_pte = xen_release_pte_init,
1846 .alloc_pmd = xen_alloc_pte_init, 1902 .alloc_pmd = xen_alloc_pmd_init,
1847 .alloc_pmd_clone = paravirt_nop, 1903 .alloc_pmd_clone = paravirt_nop,
1848 .release_pmd = xen_release_pte_init, 1904 .release_pmd = xen_release_pmd_init,
1849 1905
1850#ifdef CONFIG_HIGHPTE 1906#ifdef CONFIG_HIGHPTE
1851 .kmap_atomic_pte = xen_kmap_atomic_pte, 1907 .kmap_atomic_pte = xen_kmap_atomic_pte,
@@ -1883,8 +1939,8 @@ const struct pv_mmu_ops xen_mmu_ops __initdata = {
1883 .make_pud = PV_CALLEE_SAVE(xen_make_pud), 1939 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
1884 .set_pgd = xen_set_pgd_hyper, 1940 .set_pgd = xen_set_pgd_hyper,
1885 1941
1886 .alloc_pud = xen_alloc_pte_init, 1942 .alloc_pud = xen_alloc_pmd_init,
1887 .release_pud = xen_release_pte_init, 1943 .release_pud = xen_release_pmd_init,
1888#endif /* PAGETABLE_LEVELS == 4 */ 1944#endif /* PAGETABLE_LEVELS == 4 */
1889 1945
1890 .activate_mm = xen_activate_mm, 1946 .activate_mm = xen_activate_mm,
diff --git a/arch/x86/xen/mmu.h b/arch/x86/xen/mmu.h
index 24d1b44a337d..da7302624897 100644
--- a/arch/x86/xen/mmu.h
+++ b/arch/x86/xen/mmu.h
@@ -11,6 +11,9 @@ enum pt_level {
11}; 11};
12 12
13 13
14bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn);
15bool install_p2mtop_page(unsigned long pfn, unsigned long *p);
16
14void set_pte_mfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags); 17void set_pte_mfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags);
15 18
16 19
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index 585a6e330837..429834ec1687 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -317,7 +317,7 @@ static int __cpuinit xen_cpu_up(unsigned int cpu)
317 BUG_ON(rc); 317 BUG_ON(rc);
318 318
319 while(per_cpu(cpu_state, cpu) != CPU_ONLINE) { 319 while(per_cpu(cpu_state, cpu) != CPU_ONLINE) {
320 HYPERVISOR_sched_op(SCHEDOP_yield, 0); 320 HYPERVISOR_sched_op(SCHEDOP_yield, NULL);
321 barrier(); 321 barrier();
322 } 322 }
323 323
@@ -422,7 +422,7 @@ static void xen_smp_send_call_function_ipi(const struct cpumask *mask)
422 /* Make sure other vcpus get a chance to run if they need to. */ 422 /* Make sure other vcpus get a chance to run if they need to. */
423 for_each_cpu(cpu, mask) { 423 for_each_cpu(cpu, mask) {
424 if (xen_vcpu_stolen(cpu)) { 424 if (xen_vcpu_stolen(cpu)) {
425 HYPERVISOR_sched_op(SCHEDOP_yield, 0); 425 HYPERVISOR_sched_op(SCHEDOP_yield, NULL);
426 break; 426 break;
427 } 427 }
428 } 428 }
diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c
index 14f240623497..0a5aa44299a5 100644
--- a/arch/x86/xen/time.c
+++ b/arch/x86/xen/time.c
@@ -213,6 +213,11 @@ cycle_t xen_clocksource_read(void)
213 return ret; 213 return ret;
214} 214}
215 215
216static cycle_t xen_clocksource_get_cycles(struct clocksource *cs)
217{
218 return xen_clocksource_read();
219}
220
216static void xen_read_wallclock(struct timespec *ts) 221static void xen_read_wallclock(struct timespec *ts)
217{ 222{
218 struct shared_info *s = HYPERVISOR_shared_info; 223 struct shared_info *s = HYPERVISOR_shared_info;
@@ -241,7 +246,7 @@ int xen_set_wallclock(unsigned long now)
241static struct clocksource xen_clocksource __read_mostly = { 246static struct clocksource xen_clocksource __read_mostly = {
242 .name = "xen", 247 .name = "xen",
243 .rating = 400, 248 .rating = 400,
244 .read = xen_clocksource_read, 249 .read = xen_clocksource_get_cycles,
245 .mask = ~0, 250 .mask = ~0,
246 .mult = 1<<XEN_SHIFT, /* time directly in nanoseconds */ 251 .mult = 1<<XEN_SHIFT, /* time directly in nanoseconds */
247 .shift = XEN_SHIFT, 252 .shift = XEN_SHIFT,
diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
index 2f5ef2632ea2..ca6596b05d53 100644
--- a/arch/x86/xen/xen-ops.h
+++ b/arch/x86/xen/xen-ops.h
@@ -57,22 +57,31 @@ irqreturn_t xen_debug_interrupt(int irq, void *dev_id);
57 57
58bool xen_vcpu_stolen(int vcpu); 58bool xen_vcpu_stolen(int vcpu);
59 59
60void xen_mark_init_mm_pinned(void);
61
62void xen_setup_vcpu_info_placement(void); 60void xen_setup_vcpu_info_placement(void);
63 61
64#ifdef CONFIG_SMP 62#ifdef CONFIG_SMP
65void xen_smp_init(void); 63void xen_smp_init(void);
66 64
67void __init xen_init_spinlocks(void);
68__cpuinit void xen_init_lock_cpu(int cpu);
69void xen_uninit_lock_cpu(int cpu);
70
71extern cpumask_var_t xen_cpu_initialized_map; 65extern cpumask_var_t xen_cpu_initialized_map;
72#else 66#else
73static inline void xen_smp_init(void) {} 67static inline void xen_smp_init(void) {}
74#endif 68#endif
75 69
70#ifdef CONFIG_PARAVIRT_SPINLOCKS
71void __init xen_init_spinlocks(void);
72__cpuinit void xen_init_lock_cpu(int cpu);
73void xen_uninit_lock_cpu(int cpu);
74#else
75static inline void xen_init_spinlocks(void)
76{
77}
78static inline void xen_init_lock_cpu(int cpu)
79{
80}
81static inline void xen_uninit_lock_cpu(int cpu)
82{
83}
84#endif
76 85
77/* Declare an asm function, along with symbols needed to make it 86/* Declare an asm function, along with symbols needed to make it
78 inlineable */ 87 inlineable */