aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-05-08 04:50:00 -0400
committerIngo Molnar <mingo@elte.hu>2009-05-08 04:50:00 -0400
commitf066a155334642b8a206eec625b1925d88c48aeb (patch)
treecb12975e60b70d1dae3b7397bab955de78a4d01e /arch/x86
parente7c064889606aab3569669078c69b87b2c527e72 (diff)
parent33df4db04a79660150e1948e3296eeb451ac121b (diff)
Merge branch 'x86/urgent' into x86/xen
Conflicts: arch/frv/include/asm/pgtable.h arch/x86/include/asm/required-features.h arch/x86/xen/mmu.c Merge reason: x86/xen was on a .29 base still, move it to a fresher branch and pick up Xen fixes as well, plus resolve conflicts Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/Kconfig9
-rw-r--r--arch/x86/Kconfig.cpu1
-rw-r--r--arch/x86/Makefile6
-rw-r--r--arch/x86/boot/video-vga.c33
-rw-r--r--arch/x86/ia32/sys_ia32.c19
-rw-r--r--[-rwxr-xr-x]arch/x86/include/asm/cpu_debug.h0
-rw-r--r--arch/x86/include/asm/cpufeature.h1
-rw-r--r--arch/x86/include/asm/desc.h2
-rw-r--r--arch/x86/include/asm/fixmap.h4
-rw-r--r--arch/x86/include/asm/hardirq.h2
-rw-r--r--arch/x86/include/asm/io.h6
-rw-r--r--arch/x86/include/asm/lguest_hcall.h2
-rw-r--r--arch/x86/include/asm/mce.h1
-rw-r--r--arch/x86/include/asm/paravirt.h4
-rw-r--r--arch/x86/include/asm/pat.h4
-rw-r--r--arch/x86/include/asm/processor.h12
-rw-r--r--arch/x86/include/asm/sigcontext.h6
-rw-r--r--arch/x86/include/asm/tlbflush.h2
-rw-r--r--arch/x86/include/asm/topology.h2
-rw-r--r--arch/x86/include/asm/uv/uv_mmrs.h5
-rw-r--r--arch/x86/include/asm/xsave.h3
-rw-r--r--arch/x86/kernel/amd_iommu_init.c16
-rw-r--r--arch/x86/kernel/apic/apic.c6
-rw-r--r--arch/x86/kernel/apic/apic_flat_64.c4
-rw-r--r--arch/x86/kernel/apic/io_apic.c7
-rw-r--r--arch/x86/kernel/apic/nmi.c5
-rw-r--r--arch/x86/kernel/apic/x2apic_uv_x.c31
-rw-r--r--arch/x86/kernel/bios_uv.c3
-rw-r--r--arch/x86/kernel/cpu/addon_cpuid_features.c1
-rw-r--r--arch/x86/kernel/cpu/common.c2
-rw-r--r--[-rwxr-xr-x]arch/x86/kernel/cpu/cpu_debug.c0
-rw-r--r--arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c101
-rw-r--r--arch/x86/kernel/cpu/cpufreq/longhaul.c1
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_64.c33
-rw-r--r--arch/x86/kernel/cpu/proc.c2
-rw-r--r--arch/x86/kernel/e820.c11
-rw-r--r--arch/x86/kernel/entry_64.S3
-rw-r--r--arch/x86/kernel/ftrace.c3
-rw-r--r--arch/x86/kernel/hpet.c24
-rw-r--r--arch/x86/kernel/i8253.c2
-rw-r--r--arch/x86/kernel/irq.c2
-rw-r--r--arch/x86/kernel/kvmclock.c7
-rw-r--r--arch/x86/kernel/machine_kexec_32.c4
-rw-r--r--arch/x86/kernel/machine_kexec_64.c4
-rw-r--r--arch/x86/kernel/microcode_core.c35
-rw-r--r--arch/x86/kernel/mpparse.c7
-rw-r--r--arch/x86/kernel/pci-swiotlb.c2
-rw-r--r--arch/x86/kernel/ptrace.c3
-rw-r--r--arch/x86/kernel/quirks.c2
-rw-r--r--arch/x86/kernel/reboot.c8
-rw-r--r--arch/x86/kernel/tlb_uv.c189
-rw-r--r--arch/x86/kernel/tsc.c2
-rw-r--r--arch/x86/kernel/uv_sysfs.c4
-rw-r--r--arch/x86/kernel/uv_time.c10
-rw-r--r--arch/x86/kernel/vmiclock_32.c2
-rw-r--r--arch/x86/kernel/xsave.c6
-rw-r--r--arch/x86/kvm/mmu.c2
-rw-r--r--arch/x86/kvm/x86.c8
-rw-r--r--arch/x86/lguest/boot.c18
-rw-r--r--arch/x86/mm/gup.c16
-rw-r--r--arch/x86/mm/init.c18
-rw-r--r--arch/x86/mm/ioremap.c33
-rw-r--r--arch/x86/mm/kmmio.c2
-rw-r--r--arch/x86/mm/numa_32.c2
-rw-r--r--arch/x86/mm/numa_64.c3
-rw-r--r--arch/x86/mm/pageattr.c127
-rw-r--r--arch/x86/mm/pat.c191
-rw-r--r--arch/x86/mm/pgtable.c3
-rw-r--r--arch/x86/mm/srat_32.c2
-rw-r--r--arch/x86/mm/srat_64.c7
-rw-r--r--arch/x86/pci/amd_bus.c6
-rw-r--r--arch/x86/pci/common.c5
-rw-r--r--arch/x86/pci/i386.c4
-rw-r--r--arch/x86/pci/mmconfig-shared.c6
-rw-r--r--arch/x86/vdso/vclock_gettime.c12
-rw-r--r--arch/x86/xen/mmu.c7
-rw-r--r--arch/x86/xen/time.c7
77 files changed, 647 insertions, 498 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 4b3408206091..df9e885eee14 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -252,17 +252,13 @@ config SMP
252 252
253config X86_X2APIC 253config X86_X2APIC
254 bool "Support x2apic" 254 bool "Support x2apic"
255 depends on X86_LOCAL_APIC && X86_64 255 depends on X86_LOCAL_APIC && X86_64 && INTR_REMAP
256 select INTR_REMAP
257 ---help--- 256 ---help---
258 This enables x2apic support on CPUs that have this feature. 257 This enables x2apic support on CPUs that have this feature.
259 258
260 This allows 32-bit apic IDs (so it can support very large systems), 259 This allows 32-bit apic IDs (so it can support very large systems),
261 and accesses the local apic via MSRs not via mmio. 260 and accesses the local apic via MSRs not via mmio.
262 261
263 ( On certain CPU models you may need to enable INTR_REMAP too,
264 to get functional x2apic mode. )
265
266 If you don't know what to do here, say N. 262 If you don't know what to do here, say N.
267 263
268config SPARSE_IRQ 264config SPARSE_IRQ
@@ -281,6 +277,7 @@ config SPARSE_IRQ
281config NUMA_MIGRATE_IRQ_DESC 277config NUMA_MIGRATE_IRQ_DESC
282 bool "Move irq desc when changing irq smp_affinity" 278 bool "Move irq desc when changing irq smp_affinity"
283 depends on SPARSE_IRQ && NUMA 279 depends on SPARSE_IRQ && NUMA
280 depends on BROKEN
284 default n 281 default n
285 ---help--- 282 ---help---
286 This enables moving irq_desc to cpu/node that irq will use handled. 283 This enables moving irq_desc to cpu/node that irq will use handled.
@@ -357,6 +354,7 @@ config X86_UV
357 bool "SGI Ultraviolet" 354 bool "SGI Ultraviolet"
358 depends on X86_64 355 depends on X86_64
359 depends on X86_EXTENDED_PLATFORM 356 depends on X86_EXTENDED_PLATFORM
357 depends on NUMA
360 select X86_X2APIC 358 select X86_X2APIC
361 ---help--- 359 ---help---
362 This option is needed in order to support SGI Ultraviolet systems. 360 This option is needed in order to support SGI Ultraviolet systems.
@@ -667,6 +665,7 @@ config MAXSMP
667 665
668config NR_CPUS 666config NR_CPUS
669 int "Maximum number of CPUs" if SMP && !MAXSMP 667 int "Maximum number of CPUs" if SMP && !MAXSMP
668 range 2 8 if SMP && X86_32 && !X86_BIGSMP
670 range 2 512 if SMP && !MAXSMP 669 range 2 512 if SMP && !MAXSMP
671 default "1" if !SMP 670 default "1" if !SMP
672 default "4096" if MAXSMP 671 default "4096" if MAXSMP
diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
index 924e156a85ab..8130334329c0 100644
--- a/arch/x86/Kconfig.cpu
+++ b/arch/x86/Kconfig.cpu
@@ -506,6 +506,7 @@ config X86_PTRACE_BTS
506 bool "Branch Trace Store" 506 bool "Branch Trace Store"
507 default y 507 default y
508 depends on X86_DEBUGCTLMSR 508 depends on X86_DEBUGCTLMSR
509 depends on BROKEN
509 ---help--- 510 ---help---
510 This adds a ptrace interface to the hardware's branch trace store. 511 This adds a ptrace interface to the hardware's branch trace store.
511 512
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index f05d8c91d9e5..8c86b72afdc2 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -153,7 +153,7 @@ endif
153 153
154boot := arch/x86/boot 154boot := arch/x86/boot
155 155
156BOOT_TARGETS = bzlilo bzdisk fdimage fdimage144 fdimage288 isoimage install 156BOOT_TARGETS = bzlilo bzdisk fdimage fdimage144 fdimage288 isoimage
157 157
158PHONY += bzImage $(BOOT_TARGETS) 158PHONY += bzImage $(BOOT_TARGETS)
159 159
@@ -171,6 +171,10 @@ bzImage: vmlinux
171$(BOOT_TARGETS): vmlinux 171$(BOOT_TARGETS): vmlinux
172 $(Q)$(MAKE) $(build)=$(boot) $@ 172 $(Q)$(MAKE) $(build)=$(boot) $@
173 173
174PHONY += install
175install:
176 $(Q)$(MAKE) $(build)=$(boot) $@
177
174PHONY += vdso_install 178PHONY += vdso_install
175vdso_install: 179vdso_install:
176 $(Q)$(MAKE) $(build)=arch/x86/vdso $@ 180 $(Q)$(MAKE) $(build)=arch/x86/vdso $@
diff --git a/arch/x86/boot/video-vga.c b/arch/x86/boot/video-vga.c
index 95d86ce0421c..9e0587a37768 100644
--- a/arch/x86/boot/video-vga.c
+++ b/arch/x86/boot/video-vga.c
@@ -129,22 +129,18 @@ u16 vga_crtc(void)
129 return (inb(0x3cc) & 1) ? 0x3d4 : 0x3b4; 129 return (inb(0x3cc) & 1) ? 0x3d4 : 0x3b4;
130} 130}
131 131
132static void vga_set_480_scanlines(int lines) 132static void vga_set_480_scanlines(void)
133{ 133{
134 u16 crtc; /* CRTC base address */ 134 u16 crtc; /* CRTC base address */
135 u8 csel; /* CRTC miscellaneous output register */ 135 u8 csel; /* CRTC miscellaneous output register */
136 u8 ovfw; /* CRTC overflow register */
137 int end = lines-1;
138 136
139 crtc = vga_crtc(); 137 crtc = vga_crtc();
140 138
141 ovfw = 0x3c | ((end >> (8-1)) & 0x02) | ((end >> (9-6)) & 0x40);
142
143 out_idx(0x0c, crtc, 0x11); /* Vertical sync end, unlock CR0-7 */ 139 out_idx(0x0c, crtc, 0x11); /* Vertical sync end, unlock CR0-7 */
144 out_idx(0x0b, crtc, 0x06); /* Vertical total */ 140 out_idx(0x0b, crtc, 0x06); /* Vertical total */
145 out_idx(ovfw, crtc, 0x07); /* Vertical overflow */ 141 out_idx(0x3e, crtc, 0x07); /* Vertical overflow */
146 out_idx(0xea, crtc, 0x10); /* Vertical sync start */ 142 out_idx(0xea, crtc, 0x10); /* Vertical sync start */
147 out_idx(end, crtc, 0x12); /* Vertical display end */ 143 out_idx(0xdf, crtc, 0x12); /* Vertical display end */
148 out_idx(0xe7, crtc, 0x15); /* Vertical blank start */ 144 out_idx(0xe7, crtc, 0x15); /* Vertical blank start */
149 out_idx(0x04, crtc, 0x16); /* Vertical blank end */ 145 out_idx(0x04, crtc, 0x16); /* Vertical blank end */
150 csel = inb(0x3cc); 146 csel = inb(0x3cc);
@@ -153,21 +149,38 @@ static void vga_set_480_scanlines(int lines)
153 outb(csel, 0x3c2); 149 outb(csel, 0x3c2);
154} 150}
155 151
152static void vga_set_vertical_end(int lines)
153{
154 u16 crtc; /* CRTC base address */
155 u8 ovfw; /* CRTC overflow register */
156 int end = lines-1;
157
158 crtc = vga_crtc();
159
160 ovfw = 0x3c | ((end >> (8-1)) & 0x02) | ((end >> (9-6)) & 0x40);
161
162 out_idx(ovfw, crtc, 0x07); /* Vertical overflow */
163 out_idx(end, crtc, 0x12); /* Vertical display end */
164}
165
156static void vga_set_80x30(void) 166static void vga_set_80x30(void)
157{ 167{
158 vga_set_480_scanlines(30*16); 168 vga_set_480_scanlines();
169 vga_set_vertical_end(30*16);
159} 170}
160 171
161static void vga_set_80x34(void) 172static void vga_set_80x34(void)
162{ 173{
174 vga_set_480_scanlines();
163 vga_set_14font(); 175 vga_set_14font();
164 vga_set_480_scanlines(34*14); 176 vga_set_vertical_end(34*14);
165} 177}
166 178
167static void vga_set_80x60(void) 179static void vga_set_80x60(void)
168{ 180{
181 vga_set_480_scanlines();
169 vga_set_8font(); 182 vga_set_8font();
170 vga_set_480_scanlines(60*8); 183 vga_set_vertical_end(60*8);
171} 184}
172 185
173static int vga_set_mode(struct mode_info *mode) 186static int vga_set_mode(struct mode_info *mode)
diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
index efac92fd1efb..085a8c35f149 100644
--- a/arch/x86/ia32/sys_ia32.c
+++ b/arch/x86/ia32/sys_ia32.c
@@ -129,21 +129,12 @@ asmlinkage long sys32_fstatat(unsigned int dfd, char __user *filename,
129 struct stat64 __user *statbuf, int flag) 129 struct stat64 __user *statbuf, int flag)
130{ 130{
131 struct kstat stat; 131 struct kstat stat;
132 int error = -EINVAL; 132 int error;
133 133
134 if ((flag & ~AT_SYMLINK_NOFOLLOW) != 0) 134 error = vfs_fstatat(dfd, filename, &stat, flag);
135 goto out; 135 if (error)
136 136 return error;
137 if (flag & AT_SYMLINK_NOFOLLOW) 137 return cp_stat64(statbuf, &stat);
138 error = vfs_lstat_fd(dfd, filename, &stat);
139 else
140 error = vfs_stat_fd(dfd, filename, &stat);
141
142 if (!error)
143 error = cp_stat64(statbuf, &stat);
144
145out:
146 return error;
147} 138}
148 139
149/* 140/*
diff --git a/arch/x86/include/asm/cpu_debug.h b/arch/x86/include/asm/cpu_debug.h
index 222802029fa6..222802029fa6 100755..100644
--- a/arch/x86/include/asm/cpu_debug.h
+++ b/arch/x86/include/asm/cpu_debug.h
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
index 0beba0d1468d..bb83b1c397aa 100644
--- a/arch/x86/include/asm/cpufeature.h
+++ b/arch/x86/include/asm/cpufeature.h
@@ -154,6 +154,7 @@
154 * CPUID levels like 0x6, 0xA etc 154 * CPUID levels like 0x6, 0xA etc
155 */ 155 */
156#define X86_FEATURE_IDA (7*32+ 0) /* Intel Dynamic Acceleration */ 156#define X86_FEATURE_IDA (7*32+ 0) /* Intel Dynamic Acceleration */
157#define X86_FEATURE_ARAT (7*32+ 1) /* Always Running APIC Timer */
157 158
158/* Virtualization flags: Linux defined */ 159/* Virtualization flags: Linux defined */
159#define X86_FEATURE_TPR_SHADOW (8*32+ 0) /* Intel TPR Shadow */ 160#define X86_FEATURE_TPR_SHADOW (8*32+ 0) /* Intel TPR Shadow */
diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
index 5623c50d67b2..c45f415ce315 100644
--- a/arch/x86/include/asm/desc.h
+++ b/arch/x86/include/asm/desc.h
@@ -37,7 +37,7 @@ extern gate_desc idt_table[];
37struct gdt_page { 37struct gdt_page {
38 struct desc_struct gdt[GDT_ENTRIES]; 38 struct desc_struct gdt[GDT_ENTRIES];
39} __attribute__((aligned(PAGE_SIZE))); 39} __attribute__((aligned(PAGE_SIZE)));
40DECLARE_PER_CPU(struct gdt_page, gdt_page); 40DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
41 41
42static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu) 42static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
43{ 43{
diff --git a/arch/x86/include/asm/fixmap.h b/arch/x86/include/asm/fixmap.h
index 81937a5dc77c..2d81af3974a0 100644
--- a/arch/x86/include/asm/fixmap.h
+++ b/arch/x86/include/asm/fixmap.h
@@ -151,11 +151,11 @@ extern pte_t *pkmap_page_table;
151 151
152void __native_set_fixmap(enum fixed_addresses idx, pte_t pte); 152void __native_set_fixmap(enum fixed_addresses idx, pte_t pte);
153void native_set_fixmap(enum fixed_addresses idx, 153void native_set_fixmap(enum fixed_addresses idx,
154 unsigned long phys, pgprot_t flags); 154 phys_addr_t phys, pgprot_t flags);
155 155
156#ifndef CONFIG_PARAVIRT 156#ifndef CONFIG_PARAVIRT
157static inline void __set_fixmap(enum fixed_addresses idx, 157static inline void __set_fixmap(enum fixed_addresses idx,
158 unsigned long phys, pgprot_t flags) 158 phys_addr_t phys, pgprot_t flags)
159{ 159{
160 native_set_fixmap(idx, phys, flags); 160 native_set_fixmap(idx, phys, flags);
161} 161}
diff --git a/arch/x86/include/asm/hardirq.h b/arch/x86/include/asm/hardirq.h
index 039db6aa8e02..37555e52f980 100644
--- a/arch/x86/include/asm/hardirq.h
+++ b/arch/x86/include/asm/hardirq.h
@@ -26,7 +26,7 @@ typedef struct {
26#endif 26#endif
27} ____cacheline_aligned irq_cpustat_t; 27} ____cacheline_aligned irq_cpustat_t;
28 28
29DECLARE_PER_CPU(irq_cpustat_t, irq_stat); 29DECLARE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
30 30
31/* We can have at most NR_VECTORS irqs routed to a cpu at a time */ 31/* We can have at most NR_VECTORS irqs routed to a cpu at a time */
32#define MAX_HARDIRQS_PER_CPU NR_VECTORS 32#define MAX_HARDIRQS_PER_CPU NR_VECTORS
diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
index e5383e3d2f8c..73739322b6d0 100644
--- a/arch/x86/include/asm/io.h
+++ b/arch/x86/include/asm/io.h
@@ -193,8 +193,10 @@ extern void __iomem *ioremap_wc(resource_size_t offset, unsigned long size);
193 */ 193 */
194extern void early_ioremap_init(void); 194extern void early_ioremap_init(void);
195extern void early_ioremap_reset(void); 195extern void early_ioremap_reset(void);
196extern void __iomem *early_ioremap(unsigned long offset, unsigned long size); 196extern void __iomem *early_ioremap(resource_size_t phys_addr,
197extern void __iomem *early_memremap(unsigned long offset, unsigned long size); 197 unsigned long size);
198extern void __iomem *early_memremap(resource_size_t phys_addr,
199 unsigned long size);
198extern void early_iounmap(void __iomem *addr, unsigned long size); 200extern void early_iounmap(void __iomem *addr, unsigned long size);
199 201
200#define IO_SPACE_LIMIT 0xffff 202#define IO_SPACE_LIMIT 0xffff
diff --git a/arch/x86/include/asm/lguest_hcall.h b/arch/x86/include/asm/lguest_hcall.h
index 0f4ee7148afe..faae1996487b 100644
--- a/arch/x86/include/asm/lguest_hcall.h
+++ b/arch/x86/include/asm/lguest_hcall.h
@@ -5,7 +5,6 @@
5#define LHCALL_FLUSH_ASYNC 0 5#define LHCALL_FLUSH_ASYNC 0
6#define LHCALL_LGUEST_INIT 1 6#define LHCALL_LGUEST_INIT 1
7#define LHCALL_SHUTDOWN 2 7#define LHCALL_SHUTDOWN 2
8#define LHCALL_LOAD_GDT 3
9#define LHCALL_NEW_PGTABLE 4 8#define LHCALL_NEW_PGTABLE 4
10#define LHCALL_FLUSH_TLB 5 9#define LHCALL_FLUSH_TLB 5
11#define LHCALL_LOAD_IDT_ENTRY 6 10#define LHCALL_LOAD_IDT_ENTRY 6
@@ -17,6 +16,7 @@
17#define LHCALL_SET_PMD 15 16#define LHCALL_SET_PMD 15
18#define LHCALL_LOAD_TLS 16 17#define LHCALL_LOAD_TLS 16
19#define LHCALL_NOTIFY 17 18#define LHCALL_NOTIFY 17
19#define LHCALL_LOAD_GDT_ENTRY 18
20 20
21#define LGUEST_TRAP_ENTRY 0x1F 21#define LGUEST_TRAP_ENTRY 0x1F
22 22
diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h
index 563933e06a35..4f8c199584e7 100644
--- a/arch/x86/include/asm/mce.h
+++ b/arch/x86/include/asm/mce.h
@@ -137,6 +137,7 @@ DECLARE_PER_CPU(mce_banks_t, mce_poll_banks);
137enum mcp_flags { 137enum mcp_flags {
138 MCP_TIMESTAMP = (1 << 0), /* log time stamp */ 138 MCP_TIMESTAMP = (1 << 0), /* log time stamp */
139 MCP_UC = (1 << 1), /* log uncorrected errors */ 139 MCP_UC = (1 << 1), /* log uncorrected errors */
140 MCP_DONTLOG = (1 << 2), /* only clear, don't log */
140}; 141};
141extern void machine_check_poll(enum mcp_flags flags, mce_banks_t *b); 142extern void machine_check_poll(enum mcp_flags flags, mce_banks_t *b);
142 143
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index bc384be6aa44..1fe583783792 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -349,7 +349,7 @@ struct pv_mmu_ops {
349 /* Sometimes the physical address is a pfn, and sometimes its 349 /* Sometimes the physical address is a pfn, and sometimes its
350 an mfn. We can tell which is which from the index. */ 350 an mfn. We can tell which is which from the index. */
351 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx, 351 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
352 unsigned long phys, pgprot_t flags); 352 phys_addr_t phys, pgprot_t flags);
353}; 353};
354 354
355struct raw_spinlock; 355struct raw_spinlock;
@@ -1432,7 +1432,7 @@ static inline void arch_leave_lazy_mmu_mode(void)
1432void arch_flush_lazy_mmu_mode(void); 1432void arch_flush_lazy_mmu_mode(void);
1433 1433
1434static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx, 1434static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
1435 unsigned long phys, pgprot_t flags) 1435 phys_addr_t phys, pgprot_t flags)
1436{ 1436{
1437 pv_mmu_ops.set_fixmap(idx, phys, flags); 1437 pv_mmu_ops.set_fixmap(idx, phys, flags);
1438} 1438}
diff --git a/arch/x86/include/asm/pat.h b/arch/x86/include/asm/pat.h
index 2cd07b9422f4..7af14e512f97 100644
--- a/arch/x86/include/asm/pat.h
+++ b/arch/x86/include/asm/pat.h
@@ -18,9 +18,5 @@ extern int free_memtype(u64 start, u64 end);
18 18
19extern int kernel_map_sync_memtype(u64 base, unsigned long size, 19extern int kernel_map_sync_memtype(u64 base, unsigned long size,
20 unsigned long flag); 20 unsigned long flag);
21extern void map_devmem(unsigned long pfn, unsigned long size,
22 struct pgprot vma_prot);
23extern void unmap_devmem(unsigned long pfn, unsigned long size,
24 struct pgprot vma_prot);
25 21
26#endif /* _ASM_X86_PAT_H */ 22#endif /* _ASM_X86_PAT_H */
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 34c52370f2fe..c2cceae709c8 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -138,7 +138,7 @@ extern struct tss_struct doublefault_tss;
138extern __u32 cleared_cpu_caps[NCAPINTS]; 138extern __u32 cleared_cpu_caps[NCAPINTS];
139 139
140#ifdef CONFIG_SMP 140#ifdef CONFIG_SMP
141DECLARE_PER_CPU(struct cpuinfo_x86, cpu_info); 141DECLARE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info);
142#define cpu_data(cpu) per_cpu(cpu_info, cpu) 142#define cpu_data(cpu) per_cpu(cpu_info, cpu)
143#define current_cpu_data __get_cpu_var(cpu_info) 143#define current_cpu_data __get_cpu_var(cpu_info)
144#else 144#else
@@ -270,7 +270,7 @@ struct tss_struct {
270 270
271} ____cacheline_aligned; 271} ____cacheline_aligned;
272 272
273DECLARE_PER_CPU(struct tss_struct, init_tss); 273DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
274 274
275/* 275/*
276 * Save the original ist values for checking stack pointers during debugging 276 * Save the original ist values for checking stack pointers during debugging
@@ -352,6 +352,11 @@ struct i387_soft_struct {
352 u32 entry_eip; 352 u32 entry_eip;
353}; 353};
354 354
355struct ymmh_struct {
356 /* 16 * 16 bytes for each YMMH-reg = 256 bytes */
357 u32 ymmh_space[64];
358};
359
355struct xsave_hdr_struct { 360struct xsave_hdr_struct {
356 u64 xstate_bv; 361 u64 xstate_bv;
357 u64 reserved1[2]; 362 u64 reserved1[2];
@@ -361,6 +366,7 @@ struct xsave_hdr_struct {
361struct xsave_struct { 366struct xsave_struct {
362 struct i387_fxsave_struct i387; 367 struct i387_fxsave_struct i387;
363 struct xsave_hdr_struct xsave_hdr; 368 struct xsave_hdr_struct xsave_hdr;
369 struct ymmh_struct ymmh;
364 /* new processor state extensions will go here */ 370 /* new processor state extensions will go here */
365} __attribute__ ((packed, aligned (64))); 371} __attribute__ ((packed, aligned (64)));
366 372
@@ -387,7 +393,7 @@ union irq_stack_union {
387 }; 393 };
388}; 394};
389 395
390DECLARE_PER_CPU(union irq_stack_union, irq_stack_union); 396DECLARE_PER_CPU_FIRST(union irq_stack_union, irq_stack_union);
391DECLARE_INIT_PER_CPU(irq_stack_union); 397DECLARE_INIT_PER_CPU(irq_stack_union);
392 398
393DECLARE_PER_CPU(char *, irq_stack_ptr); 399DECLARE_PER_CPU(char *, irq_stack_ptr);
diff --git a/arch/x86/include/asm/sigcontext.h b/arch/x86/include/asm/sigcontext.h
index ec666491aaa4..72e5a4491661 100644
--- a/arch/x86/include/asm/sigcontext.h
+++ b/arch/x86/include/asm/sigcontext.h
@@ -269,6 +269,11 @@ struct _xsave_hdr {
269 __u64 reserved2[5]; 269 __u64 reserved2[5];
270}; 270};
271 271
272struct _ymmh_state {
273 /* 16 * 16 bytes for each YMMH-reg */
274 __u32 ymmh_space[64];
275};
276
272/* 277/*
273 * Extended state pointed by the fpstate pointer in the sigcontext. 278 * Extended state pointed by the fpstate pointer in the sigcontext.
274 * In addition to the fpstate, information encoded in the xstate_hdr 279 * In addition to the fpstate, information encoded in the xstate_hdr
@@ -278,6 +283,7 @@ struct _xsave_hdr {
278struct _xstate { 283struct _xstate {
279 struct _fpstate fpstate; 284 struct _fpstate fpstate;
280 struct _xsave_hdr xstate_hdr; 285 struct _xsave_hdr xstate_hdr;
286 struct _ymmh_state ymmh;
281 /* new processor state extensions go here */ 287 /* new processor state extensions go here */
282}; 288};
283 289
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
index d3539f998f88..16a5c84b0329 100644
--- a/arch/x86/include/asm/tlbflush.h
+++ b/arch/x86/include/asm/tlbflush.h
@@ -152,7 +152,7 @@ struct tlb_state {
152 struct mm_struct *active_mm; 152 struct mm_struct *active_mm;
153 int state; 153 int state;
154}; 154};
155DECLARE_PER_CPU(struct tlb_state, cpu_tlbstate); 155DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate);
156 156
157static inline void reset_lazy_tlbstate(void) 157static inline void reset_lazy_tlbstate(void)
158{ 158{
diff --git a/arch/x86/include/asm/topology.h b/arch/x86/include/asm/topology.h
index 892b119dba6f..f44b49abca49 100644
--- a/arch/x86/include/asm/topology.h
+++ b/arch/x86/include/asm/topology.h
@@ -200,7 +200,7 @@ static inline void arch_fix_phys_package_id(int num, u32 slot)
200} 200}
201 201
202struct pci_bus; 202struct pci_bus;
203void set_pci_bus_resources_arch_default(struct pci_bus *b); 203void x86_pci_root_bus_res_quirks(struct pci_bus *b);
204 204
205#ifdef CONFIG_SMP 205#ifdef CONFIG_SMP
206#define mc_capable() (cpumask_weight(cpu_core_mask(0)) != nr_cpu_ids) 206#define mc_capable() (cpumask_weight(cpu_core_mask(0)) != nr_cpu_ids)
diff --git a/arch/x86/include/asm/uv/uv_mmrs.h b/arch/x86/include/asm/uv/uv_mmrs.h
index db68ac8a5ac2..2cae46c7c8a2 100644
--- a/arch/x86/include/asm/uv/uv_mmrs.h
+++ b/arch/x86/include/asm/uv/uv_mmrs.h
@@ -17,6 +17,11 @@
17/* ========================================================================= */ 17/* ========================================================================= */
18/* UVH_BAU_DATA_CONFIG */ 18/* UVH_BAU_DATA_CONFIG */
19/* ========================================================================= */ 19/* ========================================================================= */
20#define UVH_LB_BAU_MISC_CONTROL 0x320170UL
21#define UV_ENABLE_INTD_SOFT_ACK_MODE_SHIFT 15
22#define UV_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHIFT 16
23#define UV_INTD_SOFT_ACK_TIMEOUT_PERIOD 0x000000000bUL
24/* 1011 timebase 7 (168millisec) * 3 ticks -> 500ms */
20#define UVH_BAU_DATA_CONFIG 0x61680UL 25#define UVH_BAU_DATA_CONFIG 0x61680UL
21#define UVH_BAU_DATA_CONFIG_32 0x0438 26#define UVH_BAU_DATA_CONFIG_32 0x0438
22 27
diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
index 08e9a1ac07a9..727acc152344 100644
--- a/arch/x86/include/asm/xsave.h
+++ b/arch/x86/include/asm/xsave.h
@@ -7,6 +7,7 @@
7 7
8#define XSTATE_FP 0x1 8#define XSTATE_FP 0x1
9#define XSTATE_SSE 0x2 9#define XSTATE_SSE 0x2
10#define XSTATE_YMM 0x4
10 11
11#define XSTATE_FPSSE (XSTATE_FP | XSTATE_SSE) 12#define XSTATE_FPSSE (XSTATE_FP | XSTATE_SSE)
12 13
@@ -15,7 +16,7 @@
15/* 16/*
16 * These are the features that the OS can handle currently. 17 * These are the features that the OS can handle currently.
17 */ 18 */
18#define XCNTXT_MASK (XSTATE_FP | XSTATE_SSE) 19#define XCNTXT_MASK (XSTATE_FP | XSTATE_SSE | XSTATE_YMM)
19 20
20#ifdef CONFIG_X86_64 21#ifdef CONFIG_X86_64
21#define REX_PREFIX "0x48, " 22#define REX_PREFIX "0x48, "
diff --git a/arch/x86/kernel/amd_iommu_init.c b/arch/x86/kernel/amd_iommu_init.c
index 42c33cebf00f..8c0be0902dac 100644
--- a/arch/x86/kernel/amd_iommu_init.c
+++ b/arch/x86/kernel/amd_iommu_init.c
@@ -49,10 +49,10 @@
49#define IVHD_DEV_EXT_SELECT 0x46 49#define IVHD_DEV_EXT_SELECT 0x46
50#define IVHD_DEV_EXT_SELECT_RANGE 0x47 50#define IVHD_DEV_EXT_SELECT_RANGE 0x47
51 51
52#define IVHD_FLAG_HT_TUN_EN 0x00 52#define IVHD_FLAG_HT_TUN_EN_MASK 0x01
53#define IVHD_FLAG_PASSPW_EN 0x01 53#define IVHD_FLAG_PASSPW_EN_MASK 0x02
54#define IVHD_FLAG_RESPASSPW_EN 0x02 54#define IVHD_FLAG_RESPASSPW_EN_MASK 0x04
55#define IVHD_FLAG_ISOC_EN 0x03 55#define IVHD_FLAG_ISOC_EN_MASK 0x08
56 56
57#define IVMD_FLAG_EXCL_RANGE 0x08 57#define IVMD_FLAG_EXCL_RANGE 0x08
58#define IVMD_FLAG_UNITY_MAP 0x01 58#define IVMD_FLAG_UNITY_MAP 0x01
@@ -569,19 +569,19 @@ static void __init init_iommu_from_acpi(struct amd_iommu *iommu,
569 * First set the recommended feature enable bits from ACPI 569 * First set the recommended feature enable bits from ACPI
570 * into the IOMMU control registers 570 * into the IOMMU control registers
571 */ 571 */
572 h->flags & IVHD_FLAG_HT_TUN_EN ? 572 h->flags & IVHD_FLAG_HT_TUN_EN_MASK ?
573 iommu_feature_enable(iommu, CONTROL_HT_TUN_EN) : 573 iommu_feature_enable(iommu, CONTROL_HT_TUN_EN) :
574 iommu_feature_disable(iommu, CONTROL_HT_TUN_EN); 574 iommu_feature_disable(iommu, CONTROL_HT_TUN_EN);
575 575
576 h->flags & IVHD_FLAG_PASSPW_EN ? 576 h->flags & IVHD_FLAG_PASSPW_EN_MASK ?
577 iommu_feature_enable(iommu, CONTROL_PASSPW_EN) : 577 iommu_feature_enable(iommu, CONTROL_PASSPW_EN) :
578 iommu_feature_disable(iommu, CONTROL_PASSPW_EN); 578 iommu_feature_disable(iommu, CONTROL_PASSPW_EN);
579 579
580 h->flags & IVHD_FLAG_RESPASSPW_EN ? 580 h->flags & IVHD_FLAG_RESPASSPW_EN_MASK ?
581 iommu_feature_enable(iommu, CONTROL_RESPASSPW_EN) : 581 iommu_feature_enable(iommu, CONTROL_RESPASSPW_EN) :
582 iommu_feature_disable(iommu, CONTROL_RESPASSPW_EN); 582 iommu_feature_disable(iommu, CONTROL_RESPASSPW_EN);
583 583
584 h->flags & IVHD_FLAG_ISOC_EN ? 584 h->flags & IVHD_FLAG_ISOC_EN_MASK ?
585 iommu_feature_enable(iommu, CONTROL_ISOC_EN) : 585 iommu_feature_enable(iommu, CONTROL_ISOC_EN) :
586 iommu_feature_disable(iommu, CONTROL_ISOC_EN); 586 iommu_feature_disable(iommu, CONTROL_ISOC_EN);
587 587
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index 098ec84b8c00..f2870920f246 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -431,6 +431,12 @@ static void __cpuinit setup_APIC_timer(void)
431{ 431{
432 struct clock_event_device *levt = &__get_cpu_var(lapic_events); 432 struct clock_event_device *levt = &__get_cpu_var(lapic_events);
433 433
434 if (cpu_has(&current_cpu_data, X86_FEATURE_ARAT)) {
435 lapic_clockevent.features &= ~CLOCK_EVT_FEAT_C3STOP;
436 /* Make LAPIC timer preferrable over percpu HPET */
437 lapic_clockevent.rating = 150;
438 }
439
434 memcpy(levt, &lapic_clockevent, sizeof(*levt)); 440 memcpy(levt, &lapic_clockevent, sizeof(*levt));
435 levt->cpumask = cpumask_of(smp_processor_id()); 441 levt->cpumask = cpumask_of(smp_processor_id());
436 442
diff --git a/arch/x86/kernel/apic/apic_flat_64.c b/arch/x86/kernel/apic/apic_flat_64.c
index 0014714ea97b..306e5e88fb6f 100644
--- a/arch/x86/kernel/apic/apic_flat_64.c
+++ b/arch/x86/kernel/apic/apic_flat_64.c
@@ -212,7 +212,7 @@ struct apic apic_flat = {
212 .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH, 212 .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH,
213 .wait_for_init_deassert = NULL, 213 .wait_for_init_deassert = NULL,
214 .smp_callin_clear_local_apic = NULL, 214 .smp_callin_clear_local_apic = NULL,
215 .inquire_remote_apic = NULL, 215 .inquire_remote_apic = default_inquire_remote_apic,
216 216
217 .read = native_apic_mem_read, 217 .read = native_apic_mem_read,
218 .write = native_apic_mem_write, 218 .write = native_apic_mem_write,
@@ -362,7 +362,7 @@ struct apic apic_physflat = {
362 .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH, 362 .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH,
363 .wait_for_init_deassert = NULL, 363 .wait_for_init_deassert = NULL,
364 .smp_callin_clear_local_apic = NULL, 364 .smp_callin_clear_local_apic = NULL,
365 .inquire_remote_apic = NULL, 365 .inquire_remote_apic = default_inquire_remote_apic,
366 366
367 .read = native_apic_mem_read, 367 .read = native_apic_mem_read,
368 .write = native_apic_mem_write, 368 .write = native_apic_mem_write,
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index 767fe7e46d68..30da617d18e4 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -2524,7 +2524,6 @@ static void irq_complete_move(struct irq_desc **descp)
2524static inline void irq_complete_move(struct irq_desc **descp) {} 2524static inline void irq_complete_move(struct irq_desc **descp) {}
2525#endif 2525#endif
2526 2526
2527#ifdef CONFIG_X86_X2APIC
2528static void __eoi_ioapic_irq(unsigned int irq, struct irq_cfg *cfg) 2527static void __eoi_ioapic_irq(unsigned int irq, struct irq_cfg *cfg)
2529{ 2528{
2530 int apic, pin; 2529 int apic, pin;
@@ -2558,6 +2557,7 @@ eoi_ioapic_irq(struct irq_desc *desc)
2558 spin_unlock_irqrestore(&ioapic_lock, flags); 2557 spin_unlock_irqrestore(&ioapic_lock, flags);
2559} 2558}
2560 2559
2560#ifdef CONFIG_X86_X2APIC
2561static void ack_x2apic_level(unsigned int irq) 2561static void ack_x2apic_level(unsigned int irq)
2562{ 2562{
2563 struct irq_desc *desc = irq_to_desc(irq); 2563 struct irq_desc *desc = irq_to_desc(irq);
@@ -2634,6 +2634,9 @@ static void ack_apic_level(unsigned int irq)
2634 */ 2634 */
2635 ack_APIC_irq(); 2635 ack_APIC_irq();
2636 2636
2637 if (irq_remapped(irq))
2638 eoi_ioapic_irq(desc);
2639
2637 /* Now we can move and renable the irq */ 2640 /* Now we can move and renable the irq */
2638 if (unlikely(do_unmask_irq)) { 2641 if (unlikely(do_unmask_irq)) {
2639 /* Only migrate the irq if the ack has been received. 2642 /* Only migrate the irq if the ack has been received.
@@ -3667,12 +3670,14 @@ int arch_setup_hpet_msi(unsigned int irq)
3667{ 3670{
3668 int ret; 3671 int ret;
3669 struct msi_msg msg; 3672 struct msi_msg msg;
3673 struct irq_desc *desc = irq_to_desc(irq);
3670 3674
3671 ret = msi_compose_msg(NULL, irq, &msg); 3675 ret = msi_compose_msg(NULL, irq, &msg);
3672 if (ret < 0) 3676 if (ret < 0)
3673 return ret; 3677 return ret;
3674 3678
3675 hpet_msi_write(irq, &msg); 3679 hpet_msi_write(irq, &msg);
3680 desc->status |= IRQ_MOVE_PCNTXT;
3676 set_irq_chip_and_handler_name(irq, &hpet_msi_type, handle_edge_irq, 3681 set_irq_chip_and_handler_name(irq, &hpet_msi_type, handle_edge_irq,
3677 "edge"); 3682 "edge");
3678 3683
diff --git a/arch/x86/kernel/apic/nmi.c b/arch/x86/kernel/apic/nmi.c
index d6bd62407152..ce4fbfa315a1 100644
--- a/arch/x86/kernel/apic/nmi.c
+++ b/arch/x86/kernel/apic/nmi.c
@@ -138,7 +138,7 @@ int __init check_nmi_watchdog(void)
138 if (!prev_nmi_count) 138 if (!prev_nmi_count)
139 goto error; 139 goto error;
140 140
141 alloc_cpumask_var(&backtrace_mask, GFP_KERNEL); 141 alloc_cpumask_var(&backtrace_mask, GFP_KERNEL|__GFP_ZERO);
142 printk(KERN_INFO "Testing NMI watchdog ... "); 142 printk(KERN_INFO "Testing NMI watchdog ... ");
143 143
144#ifdef CONFIG_SMP 144#ifdef CONFIG_SMP
@@ -414,7 +414,8 @@ nmi_watchdog_tick(struct pt_regs *regs, unsigned reason)
414 touched = 1; 414 touched = 1;
415 } 415 }
416 416
417 if (cpumask_test_cpu(cpu, backtrace_mask)) { 417 /* We can be called before check_nmi_watchdog, hence NULL check. */
418 if (backtrace_mask != NULL && cpumask_test_cpu(cpu, backtrace_mask)) {
418 static DEFINE_SPINLOCK(lock); /* Serialise the printks */ 419 static DEFINE_SPINLOCK(lock); /* Serialise the printks */
419 420
420 spin_lock(&lock); 421 spin_lock(&lock);
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
index 1248318436e8..2bda69352976 100644
--- a/arch/x86/kernel/apic/x2apic_uv_x.c
+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
@@ -19,6 +19,7 @@
19#include <linux/timer.h> 19#include <linux/timer.h>
20#include <linux/cpu.h> 20#include <linux/cpu.h>
21#include <linux/init.h> 21#include <linux/init.h>
22#include <linux/io.h>
22 23
23#include <asm/uv/uv_mmrs.h> 24#include <asm/uv/uv_mmrs.h>
24#include <asm/uv/uv_hub.h> 25#include <asm/uv/uv_hub.h>
@@ -34,6 +35,17 @@ DEFINE_PER_CPU(int, x2apic_extra_bits);
34 35
35static enum uv_system_type uv_system_type; 36static enum uv_system_type uv_system_type;
36 37
38static int early_get_nodeid(void)
39{
40 union uvh_node_id_u node_id;
41 unsigned long *mmr;
42
43 mmr = early_ioremap(UV_LOCAL_MMR_BASE | UVH_NODE_ID, sizeof(*mmr));
44 node_id.v = *mmr;
45 early_iounmap(mmr, sizeof(*mmr));
46 return node_id.s.node_id;
47}
48
37static int uv_acpi_madt_oem_check(char *oem_id, char *oem_table_id) 49static int uv_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
38{ 50{
39 if (!strcmp(oem_id, "SGI")) { 51 if (!strcmp(oem_id, "SGI")) {
@@ -42,6 +54,8 @@ static int uv_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
42 else if (!strcmp(oem_table_id, "UVX")) 54 else if (!strcmp(oem_table_id, "UVX"))
43 uv_system_type = UV_X2APIC; 55 uv_system_type = UV_X2APIC;
44 else if (!strcmp(oem_table_id, "UVH")) { 56 else if (!strcmp(oem_table_id, "UVH")) {
57 __get_cpu_var(x2apic_extra_bits) =
58 early_get_nodeid() << (UV_APIC_PNODE_SHIFT - 1);
45 uv_system_type = UV_NON_UNIQUE_APIC; 59 uv_system_type = UV_NON_UNIQUE_APIC;
46 return 1; 60 return 1;
47 } 61 }
@@ -549,7 +563,8 @@ void __init uv_system_init(void)
549 unsigned long gnode_upper, lowmem_redir_base, lowmem_redir_size; 563 unsigned long gnode_upper, lowmem_redir_base, lowmem_redir_size;
550 int bytes, nid, cpu, lcpu, pnode, blade, i, j, m_val, n_val; 564 int bytes, nid, cpu, lcpu, pnode, blade, i, j, m_val, n_val;
551 int max_pnode = 0; 565 int max_pnode = 0;
552 unsigned long mmr_base, present; 566 unsigned long mmr_base, present, paddr;
567 unsigned short pnode_mask;
553 568
554 map_low_mmrs(); 569 map_low_mmrs();
555 570
@@ -592,6 +607,7 @@ void __init uv_system_init(void)
592 } 607 }
593 } 608 }
594 609
610 pnode_mask = (1 << n_val) - 1;
595 node_id.v = uv_read_local_mmr(UVH_NODE_ID); 611 node_id.v = uv_read_local_mmr(UVH_NODE_ID);
596 gnode_upper = (((unsigned long)node_id.s.node_id) & 612 gnode_upper = (((unsigned long)node_id.s.node_id) &
597 ~((1 << n_val) - 1)) << m_val; 613 ~((1 << n_val) - 1)) << m_val;
@@ -615,7 +631,7 @@ void __init uv_system_init(void)
615 uv_cpu_hub_info(cpu)->numa_blade_id = blade; 631 uv_cpu_hub_info(cpu)->numa_blade_id = blade;
616 uv_cpu_hub_info(cpu)->blade_processor_id = lcpu; 632 uv_cpu_hub_info(cpu)->blade_processor_id = lcpu;
617 uv_cpu_hub_info(cpu)->pnode = pnode; 633 uv_cpu_hub_info(cpu)->pnode = pnode;
618 uv_cpu_hub_info(cpu)->pnode_mask = (1 << n_val) - 1; 634 uv_cpu_hub_info(cpu)->pnode_mask = pnode_mask;
619 uv_cpu_hub_info(cpu)->gpa_mask = (1 << (m_val + n_val)) - 1; 635 uv_cpu_hub_info(cpu)->gpa_mask = (1 << (m_val + n_val)) - 1;
620 uv_cpu_hub_info(cpu)->gnode_upper = gnode_upper; 636 uv_cpu_hub_info(cpu)->gnode_upper = gnode_upper;
621 uv_cpu_hub_info(cpu)->global_mmr_base = mmr_base; 637 uv_cpu_hub_info(cpu)->global_mmr_base = mmr_base;
@@ -631,6 +647,17 @@ void __init uv_system_init(void)
631 lcpu, blade); 647 lcpu, blade);
632 } 648 }
633 649
650 /* Add blade/pnode info for nodes without cpus */
651 for_each_online_node(nid) {
652 if (uv_node_to_blade[nid] >= 0)
653 continue;
654 paddr = node_start_pfn(nid) << PAGE_SHIFT;
655 paddr = uv_soc_phys_ram_to_gpa(paddr);
656 pnode = (paddr >> m_val) & pnode_mask;
657 blade = boot_pnode_to_blade(pnode);
658 uv_node_to_blade[nid] = blade;
659 }
660
634 map_gru_high(max_pnode); 661 map_gru_high(max_pnode);
635 map_mmr_high(max_pnode); 662 map_mmr_high(max_pnode);
636 map_config_high(max_pnode); 663 map_config_high(max_pnode);
diff --git a/arch/x86/kernel/bios_uv.c b/arch/x86/kernel/bios_uv.c
index f63882728d91..63a88e1f987d 100644
--- a/arch/x86/kernel/bios_uv.c
+++ b/arch/x86/kernel/bios_uv.c
@@ -182,7 +182,8 @@ void uv_bios_init(void)
182 memcpy(&uv_systab, tab, sizeof(struct uv_systab)); 182 memcpy(&uv_systab, tab, sizeof(struct uv_systab));
183 iounmap(tab); 183 iounmap(tab);
184 184
185 printk(KERN_INFO "EFI UV System Table Revision %d\n", tab->revision); 185 printk(KERN_INFO "EFI UV System Table Revision %d\n",
186 uv_systab.revision);
186} 187}
187#else /* !CONFIG_EFI */ 188#else /* !CONFIG_EFI */
188 189
diff --git a/arch/x86/kernel/cpu/addon_cpuid_features.c b/arch/x86/kernel/cpu/addon_cpuid_features.c
index 8220ae69849d..c965e5212714 100644
--- a/arch/x86/kernel/cpu/addon_cpuid_features.c
+++ b/arch/x86/kernel/cpu/addon_cpuid_features.c
@@ -31,6 +31,7 @@ void __cpuinit init_scattered_cpuid_features(struct cpuinfo_x86 *c)
31 31
32 static const struct cpuid_bit __cpuinitconst cpuid_bits[] = { 32 static const struct cpuid_bit __cpuinitconst cpuid_bits[] = {
33 { X86_FEATURE_IDA, CR_EAX, 1, 0x00000006 }, 33 { X86_FEATURE_IDA, CR_EAX, 1, 0x00000006 },
34 { X86_FEATURE_ARAT, CR_EAX, 2, 0x00000006 },
34 { 0, 0, 0, 0 } 35 { 0, 0, 0, 0 }
35 }; 36 };
36 37
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index c4f667896c28..c1caefc82e62 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -1203,6 +1203,8 @@ void __cpuinit cpu_init(void)
1203 load_TR_desc(); 1203 load_TR_desc();
1204 load_LDT(&init_mm.context); 1204 load_LDT(&init_mm.context);
1205 1205
1206 t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap);
1207
1206#ifdef CONFIG_DOUBLEFAULT 1208#ifdef CONFIG_DOUBLEFAULT
1207 /* Set up doublefault TSS pointer in the GDT */ 1209 /* Set up doublefault TSS pointer in the GDT */
1208 __set_tss_desc(cpu, GDT_ENTRY_DOUBLEFAULT_TSS, &doublefault_tss); 1210 __set_tss_desc(cpu, GDT_ENTRY_DOUBLEFAULT_TSS, &doublefault_tss);
diff --git a/arch/x86/kernel/cpu/cpu_debug.c b/arch/x86/kernel/cpu/cpu_debug.c
index 46e29ab96c6a..46e29ab96c6a 100755..100644
--- a/arch/x86/kernel/cpu/cpu_debug.c
+++ b/arch/x86/kernel/cpu/cpu_debug.c
diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
index 19f6b9d27e83..208ecf6643df 100644
--- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
+++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
@@ -65,13 +65,18 @@ enum {
65struct acpi_cpufreq_data { 65struct acpi_cpufreq_data {
66 struct acpi_processor_performance *acpi_data; 66 struct acpi_processor_performance *acpi_data;
67 struct cpufreq_frequency_table *freq_table; 67 struct cpufreq_frequency_table *freq_table;
68 unsigned int max_freq;
69 unsigned int resume; 68 unsigned int resume;
70 unsigned int cpu_feature; 69 unsigned int cpu_feature;
71}; 70};
72 71
73static DEFINE_PER_CPU(struct acpi_cpufreq_data *, drv_data); 72static DEFINE_PER_CPU(struct acpi_cpufreq_data *, drv_data);
74 73
74struct acpi_msr_data {
75 u64 saved_aperf, saved_mperf;
76};
77
78static DEFINE_PER_CPU(struct acpi_msr_data, msr_data);
79
75DEFINE_TRACE(power_mark); 80DEFINE_TRACE(power_mark);
76 81
77/* acpi_perf_data is a pointer to percpu data. */ 82/* acpi_perf_data is a pointer to percpu data. */
@@ -152,7 +157,8 @@ struct drv_cmd {
152 u32 val; 157 u32 val;
153}; 158};
154 159
155static long do_drv_read(void *_cmd) 160/* Called via smp_call_function_single(), on the target CPU */
161static void do_drv_read(void *_cmd)
156{ 162{
157 struct drv_cmd *cmd = _cmd; 163 struct drv_cmd *cmd = _cmd;
158 u32 h; 164 u32 h;
@@ -169,10 +175,10 @@ static long do_drv_read(void *_cmd)
169 default: 175 default:
170 break; 176 break;
171 } 177 }
172 return 0;
173} 178}
174 179
175static long do_drv_write(void *_cmd) 180/* Called via smp_call_function_many(), on the target CPUs */
181static void do_drv_write(void *_cmd)
176{ 182{
177 struct drv_cmd *cmd = _cmd; 183 struct drv_cmd *cmd = _cmd;
178 u32 lo, hi; 184 u32 lo, hi;
@@ -191,23 +197,24 @@ static long do_drv_write(void *_cmd)
191 default: 197 default:
192 break; 198 break;
193 } 199 }
194 return 0;
195} 200}
196 201
197static void drv_read(struct drv_cmd *cmd) 202static void drv_read(struct drv_cmd *cmd)
198{ 203{
199 cmd->val = 0; 204 cmd->val = 0;
200 205
201 work_on_cpu(cpumask_any(cmd->mask), do_drv_read, cmd); 206 smp_call_function_single(cpumask_any(cmd->mask), do_drv_read, cmd, 1);
202} 207}
203 208
204static void drv_write(struct drv_cmd *cmd) 209static void drv_write(struct drv_cmd *cmd)
205{ 210{
206 unsigned int i; 211 int this_cpu;
207 212
208 for_each_cpu(i, cmd->mask) { 213 this_cpu = get_cpu();
209 work_on_cpu(i, do_drv_write, cmd); 214 if (cpumask_test_cpu(this_cpu, cmd->mask))
210 } 215 do_drv_write(cmd);
216 smp_call_function_many(cmd->mask, do_drv_write, cmd, 1);
217 put_cpu();
211} 218}
212 219
213static u32 get_cur_val(const struct cpumask *mask) 220static u32 get_cur_val(const struct cpumask *mask)
@@ -241,28 +248,23 @@ static u32 get_cur_val(const struct cpumask *mask)
241 return cmd.val; 248 return cmd.val;
242} 249}
243 250
244struct perf_cur { 251struct perf_pair {
245 union { 252 union {
246 struct { 253 struct {
247 u32 lo; 254 u32 lo;
248 u32 hi; 255 u32 hi;
249 } split; 256 } split;
250 u64 whole; 257 u64 whole;
251 } aperf_cur, mperf_cur; 258 } aperf, mperf;
252}; 259};
253 260
254 261/* Called via smp_call_function_single(), on the target CPU */
255static long read_measured_perf_ctrs(void *_cur) 262static void read_measured_perf_ctrs(void *_cur)
256{ 263{
257 struct perf_cur *cur = _cur; 264 struct perf_pair *cur = _cur;
258
259 rdmsr(MSR_IA32_APERF, cur->aperf_cur.split.lo, cur->aperf_cur.split.hi);
260 rdmsr(MSR_IA32_MPERF, cur->mperf_cur.split.lo, cur->mperf_cur.split.hi);
261 265
262 wrmsr(MSR_IA32_APERF, 0, 0); 266 rdmsr(MSR_IA32_APERF, cur->aperf.split.lo, cur->aperf.split.hi);
263 wrmsr(MSR_IA32_MPERF, 0, 0); 267 rdmsr(MSR_IA32_MPERF, cur->mperf.split.lo, cur->mperf.split.hi);
264
265 return 0;
266} 268}
267 269
268/* 270/*
@@ -281,58 +283,63 @@ static long read_measured_perf_ctrs(void *_cur)
281static unsigned int get_measured_perf(struct cpufreq_policy *policy, 283static unsigned int get_measured_perf(struct cpufreq_policy *policy,
282 unsigned int cpu) 284 unsigned int cpu)
283{ 285{
284 struct perf_cur cur; 286 struct perf_pair readin, cur;
285 unsigned int perf_percent; 287 unsigned int perf_percent;
286 unsigned int retval; 288 unsigned int retval;
287 289
288 if (!work_on_cpu(cpu, read_measured_perf_ctrs, &cur)) 290 if (smp_call_function_single(cpu, read_measured_perf_ctrs, &readin, 1))
289 return 0; 291 return 0;
290 292
293 cur.aperf.whole = readin.aperf.whole -
294 per_cpu(msr_data, cpu).saved_aperf;
295 cur.mperf.whole = readin.mperf.whole -
296 per_cpu(msr_data, cpu).saved_mperf;
297 per_cpu(msr_data, cpu).saved_aperf = readin.aperf.whole;
298 per_cpu(msr_data, cpu).saved_mperf = readin.mperf.whole;
299
291#ifdef __i386__ 300#ifdef __i386__
292 /* 301 /*
293 * We dont want to do 64 bit divide with 32 bit kernel 302 * We dont want to do 64 bit divide with 32 bit kernel
294 * Get an approximate value. Return failure in case we cannot get 303 * Get an approximate value. Return failure in case we cannot get
295 * an approximate value. 304 * an approximate value.
296 */ 305 */
297 if (unlikely(cur.aperf_cur.split.hi || cur.mperf_cur.split.hi)) { 306 if (unlikely(cur.aperf.split.hi || cur.mperf.split.hi)) {
298 int shift_count; 307 int shift_count;
299 u32 h; 308 u32 h;
300 309
301 h = max_t(u32, cur.aperf_cur.split.hi, cur.mperf_cur.split.hi); 310 h = max_t(u32, cur.aperf.split.hi, cur.mperf.split.hi);
302 shift_count = fls(h); 311 shift_count = fls(h);
303 312
304 cur.aperf_cur.whole >>= shift_count; 313 cur.aperf.whole >>= shift_count;
305 cur.mperf_cur.whole >>= shift_count; 314 cur.mperf.whole >>= shift_count;
306 } 315 }
307 316
308 if (((unsigned long)(-1) / 100) < cur.aperf_cur.split.lo) { 317 if (((unsigned long)(-1) / 100) < cur.aperf.split.lo) {
309 int shift_count = 7; 318 int shift_count = 7;
310 cur.aperf_cur.split.lo >>= shift_count; 319 cur.aperf.split.lo >>= shift_count;
311 cur.mperf_cur.split.lo >>= shift_count; 320 cur.mperf.split.lo >>= shift_count;
312 } 321 }
313 322
314 if (cur.aperf_cur.split.lo && cur.mperf_cur.split.lo) 323 if (cur.aperf.split.lo && cur.mperf.split.lo)
315 perf_percent = (cur.aperf_cur.split.lo * 100) / 324 perf_percent = (cur.aperf.split.lo * 100) / cur.mperf.split.lo;
316 cur.mperf_cur.split.lo;
317 else 325 else
318 perf_percent = 0; 326 perf_percent = 0;
319 327
320#else 328#else
321 if (unlikely(((unsigned long)(-1) / 100) < cur.aperf_cur.whole)) { 329 if (unlikely(((unsigned long)(-1) / 100) < cur.aperf.whole)) {
322 int shift_count = 7; 330 int shift_count = 7;
323 cur.aperf_cur.whole >>= shift_count; 331 cur.aperf.whole >>= shift_count;
324 cur.mperf_cur.whole >>= shift_count; 332 cur.mperf.whole >>= shift_count;
325 } 333 }
326 334
327 if (cur.aperf_cur.whole && cur.mperf_cur.whole) 335 if (cur.aperf.whole && cur.mperf.whole)
328 perf_percent = (cur.aperf_cur.whole * 100) / 336 perf_percent = (cur.aperf.whole * 100) / cur.mperf.whole;
329 cur.mperf_cur.whole;
330 else 337 else
331 perf_percent = 0; 338 perf_percent = 0;
332 339
333#endif 340#endif
334 341
335 retval = per_cpu(drv_data, policy->cpu)->max_freq * perf_percent / 100; 342 retval = (policy->cpuinfo.max_freq * perf_percent) / 100;
336 343
337 return retval; 344 return retval;
338} 345}
@@ -685,16 +692,11 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
685 /* Check for high latency (>20uS) from buggy BIOSes, like on T42 */ 692 /* Check for high latency (>20uS) from buggy BIOSes, like on T42 */
686 if (perf->control_register.space_id == ACPI_ADR_SPACE_FIXED_HARDWARE && 693 if (perf->control_register.space_id == ACPI_ADR_SPACE_FIXED_HARDWARE &&
687 policy->cpuinfo.transition_latency > 20 * 1000) { 694 policy->cpuinfo.transition_latency > 20 * 1000) {
688 static int print_once;
689 policy->cpuinfo.transition_latency = 20 * 1000; 695 policy->cpuinfo.transition_latency = 20 * 1000;
690 if (!print_once) { 696 printk_once(KERN_INFO "Capping off P-state tranision"
691 print_once = 1; 697 " latency at 20 uS\n");
692 printk(KERN_INFO "Capping off P-state tranision latency"
693 " at 20 uS\n");
694 }
695 } 698 }
696 699
697 data->max_freq = perf->states[0].core_frequency * 1000;
698 /* table init */ 700 /* table init */
699 for (i = 0; i < perf->state_count; i++) { 701 for (i = 0; i < perf->state_count; i++) {
700 if (i > 0 && perf->states[i].core_frequency >= 702 if (i > 0 && perf->states[i].core_frequency >=
@@ -713,6 +715,9 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
713 if (result) 715 if (result)
714 goto err_freqfree; 716 goto err_freqfree;
715 717
718 if (perf->states[0].core_frequency * 1000 != policy->cpuinfo.max_freq)
719 printk(KERN_WARNING FW_WARN "P-state 0 is not max freq\n");
720
716 switch (perf->control_register.space_id) { 721 switch (perf->control_register.space_id) {
717 case ACPI_ADR_SPACE_SYSTEM_IO: 722 case ACPI_ADR_SPACE_SYSTEM_IO:
718 /* Current speed is unknown and not detectable by IO port */ 723 /* Current speed is unknown and not detectable by IO port */
diff --git a/arch/x86/kernel/cpu/cpufreq/longhaul.c b/arch/x86/kernel/cpu/cpufreq/longhaul.c
index 0bd48e65a0ca..ce2ed3e4aad9 100644
--- a/arch/x86/kernel/cpu/cpufreq/longhaul.c
+++ b/arch/x86/kernel/cpu/cpufreq/longhaul.c
@@ -33,7 +33,6 @@
33#include <linux/timex.h> 33#include <linux/timex.h>
34#include <linux/io.h> 34#include <linux/io.h>
35#include <linux/acpi.h> 35#include <linux/acpi.h>
36#include <linux/kernel.h>
37 36
38#include <asm/msr.h> 37#include <asm/msr.h>
39#include <acpi/processor.h> 38#include <acpi/processor.h>
diff --git a/arch/x86/kernel/cpu/mcheck/mce_64.c b/arch/x86/kernel/cpu/mcheck/mce_64.c
index 863f89568b1a..6fb0b359d2a5 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_64.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_64.c
@@ -239,9 +239,10 @@ void machine_check_poll(enum mcp_flags flags, mce_banks_t *b)
239 * Don't get the IP here because it's unlikely to 239 * Don't get the IP here because it's unlikely to
240 * have anything to do with the actual error location. 240 * have anything to do with the actual error location.
241 */ 241 */
242 242 if (!(flags & MCP_DONTLOG)) {
243 mce_log(&m); 243 mce_log(&m);
244 add_taint(TAINT_MACHINE_CHECK); 244 add_taint(TAINT_MACHINE_CHECK);
245 }
245 246
246 /* 247 /*
247 * Clear state for this bank. 248 * Clear state for this bank.
@@ -452,13 +453,14 @@ void mce_log_therm_throt_event(__u64 status)
452 */ 453 */
453 454
454static int check_interval = 5 * 60; /* 5 minutes */ 455static int check_interval = 5 * 60; /* 5 minutes */
455static int next_interval; /* in jiffies */ 456static DEFINE_PER_CPU(int, next_interval); /* in jiffies */
456static void mcheck_timer(unsigned long); 457static void mcheck_timer(unsigned long);
457static DEFINE_PER_CPU(struct timer_list, mce_timer); 458static DEFINE_PER_CPU(struct timer_list, mce_timer);
458 459
459static void mcheck_timer(unsigned long data) 460static void mcheck_timer(unsigned long data)
460{ 461{
461 struct timer_list *t = &per_cpu(mce_timer, data); 462 struct timer_list *t = &per_cpu(mce_timer, data);
463 int *n;
462 464
463 WARN_ON(smp_processor_id() != data); 465 WARN_ON(smp_processor_id() != data);
464 466
@@ -470,14 +472,14 @@ static void mcheck_timer(unsigned long data)
470 * Alert userspace if needed. If we logged an MCE, reduce the 472 * Alert userspace if needed. If we logged an MCE, reduce the
471 * polling interval, otherwise increase the polling interval. 473 * polling interval, otherwise increase the polling interval.
472 */ 474 */
475 n = &__get_cpu_var(next_interval);
473 if (mce_notify_user()) { 476 if (mce_notify_user()) {
474 next_interval = max(next_interval/2, HZ/100); 477 *n = max(*n/2, HZ/100);
475 } else { 478 } else {
476 next_interval = min(next_interval * 2, 479 *n = min(*n*2, (int)round_jiffies_relative(check_interval*HZ));
477 (int)round_jiffies_relative(check_interval*HZ));
478 } 480 }
479 481
480 t->expires = jiffies + next_interval; 482 t->expires = jiffies + *n;
481 add_timer(t); 483 add_timer(t);
482} 484}
483 485
@@ -584,7 +586,7 @@ static void mce_init(void *dummy)
584 * Log the machine checks left over from the previous reset. 586 * Log the machine checks left over from the previous reset.
585 */ 587 */
586 bitmap_fill(all_banks, MAX_NR_BANKS); 588 bitmap_fill(all_banks, MAX_NR_BANKS);
587 machine_check_poll(MCP_UC, &all_banks); 589 machine_check_poll(MCP_UC|(!mce_bootlog ? MCP_DONTLOG : 0), &all_banks);
588 590
589 set_in_cr4(X86_CR4_MCE); 591 set_in_cr4(X86_CR4_MCE);
590 592
@@ -632,14 +634,13 @@ static void mce_cpu_features(struct cpuinfo_x86 *c)
632static void mce_init_timer(void) 634static void mce_init_timer(void)
633{ 635{
634 struct timer_list *t = &__get_cpu_var(mce_timer); 636 struct timer_list *t = &__get_cpu_var(mce_timer);
637 int *n = &__get_cpu_var(next_interval);
635 638
636 /* data race harmless because everyone sets to the same value */ 639 *n = check_interval * HZ;
637 if (!next_interval) 640 if (!*n)
638 next_interval = check_interval * HZ;
639 if (!next_interval)
640 return; 641 return;
641 setup_timer(t, mcheck_timer, smp_processor_id()); 642 setup_timer(t, mcheck_timer, smp_processor_id());
642 t->expires = round_jiffies(jiffies + next_interval); 643 t->expires = round_jiffies(jiffies + *n);
643 add_timer(t); 644 add_timer(t);
644} 645}
645 646
@@ -907,7 +908,6 @@ static void mce_cpu_restart(void *data)
907/* Reinit MCEs after user configuration changes */ 908/* Reinit MCEs after user configuration changes */
908static void mce_restart(void) 909static void mce_restart(void)
909{ 910{
910 next_interval = check_interval * HZ;
911 on_each_cpu(mce_cpu_restart, NULL, 1); 911 on_each_cpu(mce_cpu_restart, NULL, 1);
912} 912}
913 913
@@ -1110,7 +1110,8 @@ static int __cpuinit mce_cpu_callback(struct notifier_block *nfb,
1110 break; 1110 break;
1111 case CPU_DOWN_FAILED: 1111 case CPU_DOWN_FAILED:
1112 case CPU_DOWN_FAILED_FROZEN: 1112 case CPU_DOWN_FAILED_FROZEN:
1113 t->expires = round_jiffies(jiffies + next_interval); 1113 t->expires = round_jiffies(jiffies +
1114 __get_cpu_var(next_interval));
1114 add_timer_on(t, cpu); 1115 add_timer_on(t, cpu);
1115 smp_call_function_single(cpu, mce_reenable_cpu, &action, 1); 1116 smp_call_function_single(cpu, mce_reenable_cpu, &action, 1);
1116 break; 1117 break;
diff --git a/arch/x86/kernel/cpu/proc.c b/arch/x86/kernel/cpu/proc.c
index f93047fed791..d5e30397246b 100644
--- a/arch/x86/kernel/cpu/proc.c
+++ b/arch/x86/kernel/cpu/proc.c
@@ -14,7 +14,7 @@ static void show_cpuinfo_core(struct seq_file *m, struct cpuinfo_x86 *c,
14 if (c->x86_max_cores * smp_num_siblings > 1) { 14 if (c->x86_max_cores * smp_num_siblings > 1) {
15 seq_printf(m, "physical id\t: %d\n", c->phys_proc_id); 15 seq_printf(m, "physical id\t: %d\n", c->phys_proc_id);
16 seq_printf(m, "siblings\t: %d\n", 16 seq_printf(m, "siblings\t: %d\n",
17 cpumask_weight(cpu_sibling_mask(cpu))); 17 cpumask_weight(cpu_core_mask(cpu)));
18 seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id); 18 seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id);
19 seq_printf(m, "cpu cores\t: %d\n", c->booted_cores); 19 seq_printf(m, "cpu cores\t: %d\n", c->booted_cores);
20 seq_printf(m, "apicid\t\t: %d\n", c->apicid); 20 seq_printf(m, "apicid\t\t: %d\n", c->apicid);
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
index ef2c3563357d..006281302925 100644
--- a/arch/x86/kernel/e820.c
+++ b/arch/x86/kernel/e820.c
@@ -1074,12 +1074,13 @@ u64 __init early_reserve_e820(u64 startt, u64 sizet, u64 align)
1074 u64 addr; 1074 u64 addr;
1075 u64 start; 1075 u64 start;
1076 1076
1077 start = startt; 1077 for (start = startt; ; start += size) {
1078 while (size < sizet && (start + 1))
1079 start = find_e820_area_size(start, &size, align); 1078 start = find_e820_area_size(start, &size, align);
1080 1079 if (!(start + 1))
1081 if (size < sizet) 1080 return 0;
1082 return 0; 1081 if (size >= sizet)
1082 break;
1083 }
1083 1084
1084#ifdef CONFIG_X86_32 1085#ifdef CONFIG_X86_32
1085 if (start >= MAXMEM) 1086 if (start >= MAXMEM)
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index a331ec38af9e..38946c6e8433 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -1410,7 +1410,10 @@ ENTRY(paranoid_exit)
1410paranoid_swapgs: 1410paranoid_swapgs:
1411 TRACE_IRQS_IRETQ 0 1411 TRACE_IRQS_IRETQ 0
1412 SWAPGS_UNSAFE_STACK 1412 SWAPGS_UNSAFE_STACK
1413 RESTORE_ALL 8
1414 jmp irq_return
1413paranoid_restore: 1415paranoid_restore:
1416 TRACE_IRQS_IRETQ 0
1414 RESTORE_ALL 8 1417 RESTORE_ALL 8
1415 jmp irq_return 1418 jmp irq_return
1416paranoid_userspace: 1419paranoid_userspace:
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index 61df77532120..18dfa30795c9 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -18,9 +18,10 @@
18#include <linux/init.h> 18#include <linux/init.h>
19#include <linux/list.h> 19#include <linux/list.h>
20 20
21#include <trace/syscall.h>
22
21#include <asm/cacheflush.h> 23#include <asm/cacheflush.h>
22#include <asm/ftrace.h> 24#include <asm/ftrace.h>
23#include <linux/ftrace.h>
24#include <asm/nops.h> 25#include <asm/nops.h>
25#include <asm/nmi.h> 26#include <asm/nmi.h>
26 27
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
index 648b3a2a3a44..81408b93f887 100644
--- a/arch/x86/kernel/hpet.c
+++ b/arch/x86/kernel/hpet.c
@@ -236,6 +236,10 @@ static void hpet_stop_counter(void)
236 unsigned long cfg = hpet_readl(HPET_CFG); 236 unsigned long cfg = hpet_readl(HPET_CFG);
237 cfg &= ~HPET_CFG_ENABLE; 237 cfg &= ~HPET_CFG_ENABLE;
238 hpet_writel(cfg, HPET_CFG); 238 hpet_writel(cfg, HPET_CFG);
239}
240
241static void hpet_reset_counter(void)
242{
239 hpet_writel(0, HPET_COUNTER); 243 hpet_writel(0, HPET_COUNTER);
240 hpet_writel(0, HPET_COUNTER + 4); 244 hpet_writel(0, HPET_COUNTER + 4);
241} 245}
@@ -250,6 +254,7 @@ static void hpet_start_counter(void)
250static void hpet_restart_counter(void) 254static void hpet_restart_counter(void)
251{ 255{
252 hpet_stop_counter(); 256 hpet_stop_counter();
257 hpet_reset_counter();
253 hpet_start_counter(); 258 hpet_start_counter();
254} 259}
255 260
@@ -309,7 +314,7 @@ static int hpet_setup_msi_irq(unsigned int irq);
309static void hpet_set_mode(enum clock_event_mode mode, 314static void hpet_set_mode(enum clock_event_mode mode,
310 struct clock_event_device *evt, int timer) 315 struct clock_event_device *evt, int timer)
311{ 316{
312 unsigned long cfg; 317 unsigned long cfg, cmp, now;
313 uint64_t delta; 318 uint64_t delta;
314 319
315 switch (mode) { 320 switch (mode) {
@@ -317,12 +322,23 @@ static void hpet_set_mode(enum clock_event_mode mode,
317 hpet_stop_counter(); 322 hpet_stop_counter();
318 delta = ((uint64_t)(NSEC_PER_SEC/HZ)) * evt->mult; 323 delta = ((uint64_t)(NSEC_PER_SEC/HZ)) * evt->mult;
319 delta >>= evt->shift; 324 delta >>= evt->shift;
325 now = hpet_readl(HPET_COUNTER);
326 cmp = now + (unsigned long) delta;
320 cfg = hpet_readl(HPET_Tn_CFG(timer)); 327 cfg = hpet_readl(HPET_Tn_CFG(timer));
321 /* Make sure we use edge triggered interrupts */ 328 /* Make sure we use edge triggered interrupts */
322 cfg &= ~HPET_TN_LEVEL; 329 cfg &= ~HPET_TN_LEVEL;
323 cfg |= HPET_TN_ENABLE | HPET_TN_PERIODIC | 330 cfg |= HPET_TN_ENABLE | HPET_TN_PERIODIC |
324 HPET_TN_SETVAL | HPET_TN_32BIT; 331 HPET_TN_SETVAL | HPET_TN_32BIT;
325 hpet_writel(cfg, HPET_Tn_CFG(timer)); 332 hpet_writel(cfg, HPET_Tn_CFG(timer));
333 hpet_writel(cmp, HPET_Tn_CMP(timer));
334 udelay(1);
335 /*
336 * HPET on AMD 81xx needs a second write (with HPET_TN_SETVAL
337 * cleared) to T0_CMP to set the period. The HPET_TN_SETVAL
338 * bit is automatically cleared after the first write.
339 * (See AMD-8111 HyperTransport I/O Hub Data Sheet,
340 * Publication # 24674)
341 */
326 hpet_writel((unsigned long) delta, HPET_Tn_CMP(timer)); 342 hpet_writel((unsigned long) delta, HPET_Tn_CMP(timer));
327 hpet_start_counter(); 343 hpet_start_counter();
328 hpet_print_config(); 344 hpet_print_config();
@@ -722,7 +738,7 @@ static int hpet_cpuhp_notify(struct notifier_block *n,
722/* 738/*
723 * Clock source related code 739 * Clock source related code
724 */ 740 */
725static cycle_t read_hpet(void) 741static cycle_t read_hpet(struct clocksource *cs)
726{ 742{
727 return (cycle_t)hpet_readl(HPET_COUNTER); 743 return (cycle_t)hpet_readl(HPET_COUNTER);
728} 744}
@@ -756,7 +772,7 @@ static int hpet_clocksource_register(void)
756 hpet_restart_counter(); 772 hpet_restart_counter();
757 773
758 /* Verify whether hpet counter works */ 774 /* Verify whether hpet counter works */
759 t1 = read_hpet(); 775 t1 = hpet_readl(HPET_COUNTER);
760 rdtscll(start); 776 rdtscll(start);
761 777
762 /* 778 /*
@@ -770,7 +786,7 @@ static int hpet_clocksource_register(void)
770 rdtscll(now); 786 rdtscll(now);
771 } while ((now - start) < 200000UL); 787 } while ((now - start) < 200000UL);
772 788
773 if (t1 == read_hpet()) { 789 if (t1 == hpet_readl(HPET_COUNTER)) {
774 printk(KERN_WARNING 790 printk(KERN_WARNING
775 "HPET counter not counting. HPET disabled\n"); 791 "HPET counter not counting. HPET disabled\n");
776 return -ENODEV; 792 return -ENODEV;
diff --git a/arch/x86/kernel/i8253.c b/arch/x86/kernel/i8253.c
index 3475440baa54..c2e0bb0890d4 100644
--- a/arch/x86/kernel/i8253.c
+++ b/arch/x86/kernel/i8253.c
@@ -129,7 +129,7 @@ void __init setup_pit_timer(void)
129 * to just read by itself. So use jiffies to emulate a free 129 * to just read by itself. So use jiffies to emulate a free
130 * running counter: 130 * running counter:
131 */ 131 */
132static cycle_t pit_read(void) 132static cycle_t pit_read(struct clocksource *cs)
133{ 133{
134 static int old_count; 134 static int old_count;
135 static u32 old_jifs; 135 static u32 old_jifs;
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
index 3aaf7b9e3a8b..c3fe010d74c8 100644
--- a/arch/x86/kernel/irq.c
+++ b/arch/x86/kernel/irq.c
@@ -65,7 +65,7 @@ static int show_other_interrupts(struct seq_file *p, int prec)
65 seq_printf(p, " Spurious interrupts\n"); 65 seq_printf(p, " Spurious interrupts\n");
66#endif 66#endif
67 if (generic_interrupt_extension) { 67 if (generic_interrupt_extension) {
68 seq_printf(p, "PLT: "); 68 seq_printf(p, "%*s: ", prec, "PLT");
69 for_each_online_cpu(j) 69 for_each_online_cpu(j)
70 seq_printf(p, "%10u ", irq_stats(j)->generic_irqs); 70 seq_printf(p, "%10u ", irq_stats(j)->generic_irqs);
71 seq_printf(p, " Platform interrupts\n"); 71 seq_printf(p, " Platform interrupts\n");
diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c
index 137f2e8132df..223af43f1526 100644
--- a/arch/x86/kernel/kvmclock.c
+++ b/arch/x86/kernel/kvmclock.c
@@ -77,6 +77,11 @@ static cycle_t kvm_clock_read(void)
77 return ret; 77 return ret;
78} 78}
79 79
80static cycle_t kvm_clock_get_cycles(struct clocksource *cs)
81{
82 return kvm_clock_read();
83}
84
80/* 85/*
81 * If we don't do that, there is the possibility that the guest 86 * If we don't do that, there is the possibility that the guest
82 * will calibrate under heavy load - thus, getting a lower lpj - 87 * will calibrate under heavy load - thus, getting a lower lpj -
@@ -107,7 +112,7 @@ static void kvm_get_preset_lpj(void)
107 112
108static struct clocksource kvm_clock = { 113static struct clocksource kvm_clock = {
109 .name = "kvm-clock", 114 .name = "kvm-clock",
110 .read = kvm_clock_read, 115 .read = kvm_clock_get_cycles,
111 .rating = 400, 116 .rating = 400,
112 .mask = CLOCKSOURCE_MASK(64), 117 .mask = CLOCKSOURCE_MASK(64),
113 .mult = 1 << KVM_SCALE, 118 .mult = 1 << KVM_SCALE,
diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
index e7368c1da01d..c1c429d00130 100644
--- a/arch/x86/kernel/machine_kexec_32.c
+++ b/arch/x86/kernel/machine_kexec_32.c
@@ -194,7 +194,7 @@ void machine_kexec(struct kimage *image)
194 unsigned int preserve_context); 194 unsigned int preserve_context);
195 195
196#ifdef CONFIG_KEXEC_JUMP 196#ifdef CONFIG_KEXEC_JUMP
197 if (kexec_image->preserve_context) 197 if (image->preserve_context)
198 save_processor_state(); 198 save_processor_state();
199#endif 199#endif
200 200
@@ -253,7 +253,7 @@ void machine_kexec(struct kimage *image)
253 image->preserve_context); 253 image->preserve_context);
254 254
255#ifdef CONFIG_KEXEC_JUMP 255#ifdef CONFIG_KEXEC_JUMP
256 if (kexec_image->preserve_context) 256 if (image->preserve_context)
257 restore_processor_state(); 257 restore_processor_state();
258#endif 258#endif
259 259
diff --git a/arch/x86/kernel/machine_kexec_64.c b/arch/x86/kernel/machine_kexec_64.c
index 89cea4d44679..84c3bf209e98 100644
--- a/arch/x86/kernel/machine_kexec_64.c
+++ b/arch/x86/kernel/machine_kexec_64.c
@@ -274,7 +274,7 @@ void machine_kexec(struct kimage *image)
274 int save_ftrace_enabled; 274 int save_ftrace_enabled;
275 275
276#ifdef CONFIG_KEXEC_JUMP 276#ifdef CONFIG_KEXEC_JUMP
277 if (kexec_image->preserve_context) 277 if (image->preserve_context)
278 save_processor_state(); 278 save_processor_state();
279#endif 279#endif
280 280
@@ -333,7 +333,7 @@ void machine_kexec(struct kimage *image)
333 image->preserve_context); 333 image->preserve_context);
334 334
335#ifdef CONFIG_KEXEC_JUMP 335#ifdef CONFIG_KEXEC_JUMP
336 if (kexec_image->preserve_context) 336 if (image->preserve_context)
337 restore_processor_state(); 337 restore_processor_state();
338#endif 338#endif
339 339
diff --git a/arch/x86/kernel/microcode_core.c b/arch/x86/kernel/microcode_core.c
index a0f3851ef310..98c470c069d1 100644
--- a/arch/x86/kernel/microcode_core.c
+++ b/arch/x86/kernel/microcode_core.c
@@ -108,40 +108,29 @@ struct ucode_cpu_info ucode_cpu_info[NR_CPUS];
108EXPORT_SYMBOL_GPL(ucode_cpu_info); 108EXPORT_SYMBOL_GPL(ucode_cpu_info);
109 109
110#ifdef CONFIG_MICROCODE_OLD_INTERFACE 110#ifdef CONFIG_MICROCODE_OLD_INTERFACE
111struct update_for_cpu {
112 const void __user *buf;
113 size_t size;
114};
115
116static long update_for_cpu(void *_ufc)
117{
118 struct update_for_cpu *ufc = _ufc;
119 int error;
120
121 error = microcode_ops->request_microcode_user(smp_processor_id(),
122 ufc->buf, ufc->size);
123 if (error < 0)
124 return error;
125 if (!error)
126 microcode_ops->apply_microcode(smp_processor_id());
127 return error;
128}
129
130static int do_microcode_update(const void __user *buf, size_t size) 111static int do_microcode_update(const void __user *buf, size_t size)
131{ 112{
113 cpumask_t old;
132 int error = 0; 114 int error = 0;
133 int cpu; 115 int cpu;
134 struct update_for_cpu ufc = { .buf = buf, .size = size }; 116
117 old = current->cpus_allowed;
135 118
136 for_each_online_cpu(cpu) { 119 for_each_online_cpu(cpu) {
137 struct ucode_cpu_info *uci = ucode_cpu_info + cpu; 120 struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
138 121
139 if (!uci->valid) 122 if (!uci->valid)
140 continue; 123 continue;
141 error = work_on_cpu(cpu, update_for_cpu, &ufc); 124
125 set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
126 error = microcode_ops->request_microcode_user(cpu, buf, size);
142 if (error < 0) 127 if (error < 0)
143 break; 128 goto out;
129 if (!error)
130 microcode_ops->apply_microcode(cpu);
144 } 131 }
132out:
133 set_cpus_allowed_ptr(current, &old);
145 return error; 134 return error;
146} 135}
147 136
@@ -391,8 +380,6 @@ static int mc_sysdev_add(struct sys_device *sys_dev)
391 return err; 380 return err;
392 381
393 err = microcode_init_cpu(cpu); 382 err = microcode_init_cpu(cpu);
394 if (err)
395 sysfs_remove_group(&sys_dev->kobj, &mc_attr_group);
396 383
397 return err; 384 return err;
398} 385}
diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c
index dce99dca6cf8..70fd7e414c15 100644
--- a/arch/x86/kernel/mpparse.c
+++ b/arch/x86/kernel/mpparse.c
@@ -679,7 +679,7 @@ void __init get_smp_config(void)
679 __get_smp_config(0); 679 __get_smp_config(0);
680} 680}
681 681
682static void smp_reserve_bootmem(struct mpf_intel *mpf) 682static void __init smp_reserve_bootmem(struct mpf_intel *mpf)
683{ 683{
684 unsigned long size = get_mpc_size(mpf->physptr); 684 unsigned long size = get_mpc_size(mpf->physptr);
685#ifdef CONFIG_X86_32 685#ifdef CONFIG_X86_32
@@ -838,7 +838,7 @@ static int __init get_MP_intsrc_index(struct mpc_intsrc *m)
838 838
839static struct mpc_intsrc __initdata *m_spare[SPARE_SLOT_NUM]; 839static struct mpc_intsrc __initdata *m_spare[SPARE_SLOT_NUM];
840 840
841static void check_irq_src(struct mpc_intsrc *m, int *nr_m_spare) 841static void __init check_irq_src(struct mpc_intsrc *m, int *nr_m_spare)
842{ 842{
843 int i; 843 int i;
844 844
@@ -866,7 +866,8 @@ static void check_irq_src(struct mpc_intsrc *m, int *nr_m_spare)
866 } 866 }
867} 867}
868#else /* CONFIG_X86_IO_APIC */ 868#else /* CONFIG_X86_IO_APIC */
869static inline void check_irq_src(struct mpc_intsrc *m, int *nr_m_spare) {} 869static
870inline void __init check_irq_src(struct mpc_intsrc *m, int *nr_m_spare) {}
870#endif /* CONFIG_X86_IO_APIC */ 871#endif /* CONFIG_X86_IO_APIC */
871 872
872static int check_slot(unsigned long mpc_new_phys, unsigned long mpc_new_length, 873static int check_slot(unsigned long mpc_new_phys, unsigned long mpc_new_length,
diff --git a/arch/x86/kernel/pci-swiotlb.c b/arch/x86/kernel/pci-swiotlb.c
index 34f12e9996ed..221a3853e268 100644
--- a/arch/x86/kernel/pci-swiotlb.c
+++ b/arch/x86/kernel/pci-swiotlb.c
@@ -50,7 +50,7 @@ static void *x86_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
50 return swiotlb_alloc_coherent(hwdev, size, dma_handle, flags); 50 return swiotlb_alloc_coherent(hwdev, size, dma_handle, flags);
51} 51}
52 52
53struct dma_map_ops swiotlb_dma_ops = { 53static struct dma_map_ops swiotlb_dma_ops = {
54 .mapping_error = swiotlb_dma_mapping_error, 54 .mapping_error = swiotlb_dma_mapping_error,
55 .alloc_coherent = x86_swiotlb_alloc_coherent, 55 .alloc_coherent = x86_swiotlb_alloc_coherent,
56 .free_coherent = swiotlb_free_coherent, 56 .free_coherent = swiotlb_free_coherent,
diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
index fe9345c967de..23b7c8f017e2 100644
--- a/arch/x86/kernel/ptrace.c
+++ b/arch/x86/kernel/ptrace.c
@@ -21,7 +21,6 @@
21#include <linux/audit.h> 21#include <linux/audit.h>
22#include <linux/seccomp.h> 22#include <linux/seccomp.h>
23#include <linux/signal.h> 23#include <linux/signal.h>
24#include <linux/ftrace.h>
25 24
26#include <asm/uaccess.h> 25#include <asm/uaccess.h>
27#include <asm/pgtable.h> 26#include <asm/pgtable.h>
@@ -35,6 +34,8 @@
35#include <asm/proto.h> 34#include <asm/proto.h>
36#include <asm/ds.h> 35#include <asm/ds.h>
37 36
37#include <trace/syscall.h>
38
38#include "tls.h" 39#include "tls.h"
39 40
40enum x86_regset { 41enum x86_regset {
diff --git a/arch/x86/kernel/quirks.c b/arch/x86/kernel/quirks.c
index e95022e4f5d5..7563b31b4f03 100644
--- a/arch/x86/kernel/quirks.c
+++ b/arch/x86/kernel/quirks.c
@@ -261,8 +261,6 @@ static void old_ich_force_enable_hpet_user(struct pci_dev *dev)
261{ 261{
262 if (hpet_force_user) 262 if (hpet_force_user)
263 old_ich_force_enable_hpet(dev); 263 old_ich_force_enable_hpet(dev);
264 else
265 hpet_print_force_info();
266} 264}
267 265
268DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_1, 266DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_1,
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index 2aef36d8aca2..1340dad417f4 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -224,6 +224,14 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = {
224 DMI_MATCH(DMI_PRODUCT_NAME, "Dell XPS710"), 224 DMI_MATCH(DMI_PRODUCT_NAME, "Dell XPS710"),
225 }, 225 },
226 }, 226 },
227 { /* Handle problems with rebooting on Dell DXP061 */
228 .callback = set_bios_reboot,
229 .ident = "Dell DXP061",
230 .matches = {
231 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
232 DMI_MATCH(DMI_PRODUCT_NAME, "Dell DXP061"),
233 },
234 },
227 { } 235 { }
228}; 236};
229 237
diff --git a/arch/x86/kernel/tlb_uv.c b/arch/x86/kernel/tlb_uv.c
index deb5ebb32c3b..ed0c33761e6d 100644
--- a/arch/x86/kernel/tlb_uv.c
+++ b/arch/x86/kernel/tlb_uv.c
@@ -25,6 +25,8 @@ static int uv_bau_retry_limit __read_mostly;
25 25
26/* position of pnode (which is nasid>>1): */ 26/* position of pnode (which is nasid>>1): */
27static int uv_nshift __read_mostly; 27static int uv_nshift __read_mostly;
28/* base pnode in this partition */
29static int uv_partition_base_pnode __read_mostly;
28 30
29static unsigned long uv_mmask __read_mostly; 31static unsigned long uv_mmask __read_mostly;
30 32
@@ -32,6 +34,34 @@ static DEFINE_PER_CPU(struct ptc_stats, ptcstats);
32static DEFINE_PER_CPU(struct bau_control, bau_control); 34static DEFINE_PER_CPU(struct bau_control, bau_control);
33 35
34/* 36/*
37 * Determine the first node on a blade.
38 */
39static int __init blade_to_first_node(int blade)
40{
41 int node, b;
42
43 for_each_online_node(node) {
44 b = uv_node_to_blade_id(node);
45 if (blade == b)
46 return node;
47 }
48 return -1; /* shouldn't happen */
49}
50
51/*
52 * Determine the apicid of the first cpu on a blade.
53 */
54static int __init blade_to_first_apicid(int blade)
55{
56 int cpu;
57
58 for_each_present_cpu(cpu)
59 if (blade == uv_cpu_to_blade_id(cpu))
60 return per_cpu(x86_cpu_to_apicid, cpu);
61 return -1;
62}
63
64/*
35 * Free a software acknowledge hardware resource by clearing its Pending 65 * Free a software acknowledge hardware resource by clearing its Pending
36 * bit. This will return a reply to the sender. 66 * bit. This will return a reply to the sender.
37 * If the message has timed out, a reply has already been sent by the 67 * If the message has timed out, a reply has already been sent by the
@@ -67,7 +97,7 @@ static void uv_bau_process_message(struct bau_payload_queue_entry *msg,
67 msp = __get_cpu_var(bau_control).msg_statuses + msg_slot; 97 msp = __get_cpu_var(bau_control).msg_statuses + msg_slot;
68 cpu = uv_blade_processor_id(); 98 cpu = uv_blade_processor_id();
69 msg->number_of_cpus = 99 msg->number_of_cpus =
70 uv_blade_nr_online_cpus(uv_node_to_blade_id(numa_node_id())); 100 uv_blade_nr_online_cpus(uv_node_to_blade_id(numa_node_id()));
71 this_cpu_mask = 1UL << cpu; 101 this_cpu_mask = 1UL << cpu;
72 if (msp->seen_by.bits & this_cpu_mask) 102 if (msp->seen_by.bits & this_cpu_mask)
73 return; 103 return;
@@ -215,14 +245,14 @@ static int uv_wait_completion(struct bau_desc *bau_desc,
215 * Returns @flush_mask if some remote flushing remains to be done. The 245 * Returns @flush_mask if some remote flushing remains to be done. The
216 * mask will have some bits still set. 246 * mask will have some bits still set.
217 */ 247 */
218const struct cpumask *uv_flush_send_and_wait(int cpu, int this_blade, 248const struct cpumask *uv_flush_send_and_wait(int cpu, int this_pnode,
219 struct bau_desc *bau_desc, 249 struct bau_desc *bau_desc,
220 struct cpumask *flush_mask) 250 struct cpumask *flush_mask)
221{ 251{
222 int completion_status = 0; 252 int completion_status = 0;
223 int right_shift; 253 int right_shift;
224 int tries = 0; 254 int tries = 0;
225 int blade; 255 int pnode;
226 int bit; 256 int bit;
227 unsigned long mmr_offset; 257 unsigned long mmr_offset;
228 unsigned long index; 258 unsigned long index;
@@ -265,8 +295,8 @@ const struct cpumask *uv_flush_send_and_wait(int cpu, int this_blade,
265 * use the IPI method of shootdown on them. 295 * use the IPI method of shootdown on them.
266 */ 296 */
267 for_each_cpu(bit, flush_mask) { 297 for_each_cpu(bit, flush_mask) {
268 blade = uv_cpu_to_blade_id(bit); 298 pnode = uv_cpu_to_pnode(bit);
269 if (blade == this_blade) 299 if (pnode == this_pnode)
270 continue; 300 continue;
271 cpumask_clear_cpu(bit, flush_mask); 301 cpumask_clear_cpu(bit, flush_mask);
272 } 302 }
@@ -309,16 +339,16 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
309 struct cpumask *flush_mask = __get_cpu_var(uv_flush_tlb_mask); 339 struct cpumask *flush_mask = __get_cpu_var(uv_flush_tlb_mask);
310 int i; 340 int i;
311 int bit; 341 int bit;
312 int blade; 342 int pnode;
313 int uv_cpu; 343 int uv_cpu;
314 int this_blade; 344 int this_pnode;
315 int locals = 0; 345 int locals = 0;
316 struct bau_desc *bau_desc; 346 struct bau_desc *bau_desc;
317 347
318 cpumask_andnot(flush_mask, cpumask, cpumask_of(cpu)); 348 cpumask_andnot(flush_mask, cpumask, cpumask_of(cpu));
319 349
320 uv_cpu = uv_blade_processor_id(); 350 uv_cpu = uv_blade_processor_id();
321 this_blade = uv_numa_blade_id(); 351 this_pnode = uv_hub_info->pnode;
322 bau_desc = __get_cpu_var(bau_control).descriptor_base; 352 bau_desc = __get_cpu_var(bau_control).descriptor_base;
323 bau_desc += UV_ITEMS_PER_DESCRIPTOR * uv_cpu; 353 bau_desc += UV_ITEMS_PER_DESCRIPTOR * uv_cpu;
324 354
@@ -326,13 +356,14 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
326 356
327 i = 0; 357 i = 0;
328 for_each_cpu(bit, flush_mask) { 358 for_each_cpu(bit, flush_mask) {
329 blade = uv_cpu_to_blade_id(bit); 359 pnode = uv_cpu_to_pnode(bit);
330 BUG_ON(blade > (UV_DISTRIBUTION_SIZE - 1)); 360 BUG_ON(pnode > (UV_DISTRIBUTION_SIZE - 1));
331 if (blade == this_blade) { 361 if (pnode == this_pnode) {
332 locals++; 362 locals++;
333 continue; 363 continue;
334 } 364 }
335 bau_node_set(blade, &bau_desc->distribution); 365 bau_node_set(pnode - uv_partition_base_pnode,
366 &bau_desc->distribution);
336 i++; 367 i++;
337 } 368 }
338 if (i == 0) { 369 if (i == 0) {
@@ -350,7 +381,7 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
350 bau_desc->payload.address = va; 381 bau_desc->payload.address = va;
351 bau_desc->payload.sending_cpu = cpu; 382 bau_desc->payload.sending_cpu = cpu;
352 383
353 return uv_flush_send_and_wait(uv_cpu, this_blade, bau_desc, flush_mask); 384 return uv_flush_send_and_wait(uv_cpu, this_pnode, bau_desc, flush_mask);
354} 385}
355 386
356/* 387/*
@@ -418,24 +449,58 @@ void uv_bau_message_interrupt(struct pt_regs *regs)
418 set_irq_regs(old_regs); 449 set_irq_regs(old_regs);
419} 450}
420 451
452/*
453 * uv_enable_timeouts
454 *
455 * Each target blade (i.e. blades that have cpu's) needs to have
456 * shootdown message timeouts enabled. The timeout does not cause
457 * an interrupt, but causes an error message to be returned to
458 * the sender.
459 */
421static void uv_enable_timeouts(void) 460static void uv_enable_timeouts(void)
422{ 461{
423 int i;
424 int blade; 462 int blade;
425 int last_blade; 463 int nblades;
426 int pnode; 464 int pnode;
427 int cur_cpu = 0; 465 unsigned long mmr_image;
428 unsigned long apicid;
429 466
430 last_blade = -1; 467 nblades = uv_num_possible_blades();
431 for_each_online_node(i) { 468
432 blade = uv_node_to_blade_id(i); 469 for (blade = 0; blade < nblades; blade++) {
433 if (blade == last_blade) 470 if (!uv_blade_nr_possible_cpus(blade))
434 continue; 471 continue;
435 last_blade = blade; 472
436 apicid = per_cpu(x86_cpu_to_apicid, cur_cpu);
437 pnode = uv_blade_to_pnode(blade); 473 pnode = uv_blade_to_pnode(blade);
438 cur_cpu += uv_blade_nr_possible_cpus(i); 474 mmr_image =
475 uv_read_global_mmr64(pnode, UVH_LB_BAU_MISC_CONTROL);
476 /*
477 * Set the timeout period and then lock it in, in three
478 * steps; captures and locks in the period.
479 *
480 * To program the period, the SOFT_ACK_MODE must be off.
481 */
482 mmr_image &= ~((unsigned long)1 <<
483 UV_ENABLE_INTD_SOFT_ACK_MODE_SHIFT);
484 uv_write_global_mmr64
485 (pnode, UVH_LB_BAU_MISC_CONTROL, mmr_image);
486 /*
487 * Set the 4-bit period.
488 */
489 mmr_image &= ~((unsigned long)0xf <<
490 UV_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHIFT);
491 mmr_image |= (UV_INTD_SOFT_ACK_TIMEOUT_PERIOD <<
492 UV_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHIFT);
493 uv_write_global_mmr64
494 (pnode, UVH_LB_BAU_MISC_CONTROL, mmr_image);
495 /*
496 * Subsequent reversals of the timebase bit (3) cause an
497 * immediate timeout of one or all INTD resources as
498 * indicated in bits 2:0 (7 causes all of them to timeout).
499 */
500 mmr_image |= ((unsigned long)1 <<
501 UV_ENABLE_INTD_SOFT_ACK_MODE_SHIFT);
502 uv_write_global_mmr64
503 (pnode, UVH_LB_BAU_MISC_CONTROL, mmr_image);
439 } 504 }
440} 505}
441 506
@@ -482,8 +547,7 @@ static int uv_ptc_seq_show(struct seq_file *file, void *data)
482 stat->requestee, stat->onetlb, stat->alltlb, 547 stat->requestee, stat->onetlb, stat->alltlb,
483 stat->s_retry, stat->d_retry, stat->ptc_i); 548 stat->s_retry, stat->d_retry, stat->ptc_i);
484 seq_printf(file, "%lx %ld %ld %ld %ld %ld %ld\n", 549 seq_printf(file, "%lx %ld %ld %ld %ld %ld %ld\n",
485 uv_read_global_mmr64(uv_blade_to_pnode 550 uv_read_global_mmr64(uv_cpu_to_pnode(cpu),
486 (uv_cpu_to_blade_id(cpu)),
487 UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE), 551 UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE),
488 stat->sflush, stat->dflush, 552 stat->sflush, stat->dflush,
489 stat->retriesok, stat->nomsg, 553 stat->retriesok, stat->nomsg,
@@ -617,16 +681,18 @@ static struct bau_control * __init uv_table_bases_init(int blade, int node)
617 * finish the initialization of the per-blade control structures 681 * finish the initialization of the per-blade control structures
618 */ 682 */
619static void __init 683static void __init
620uv_table_bases_finish(int blade, int node, int cur_cpu, 684uv_table_bases_finish(int blade,
621 struct bau_control *bau_tablesp, 685 struct bau_control *bau_tablesp,
622 struct bau_desc *adp) 686 struct bau_desc *adp)
623{ 687{
624 struct bau_control *bcp; 688 struct bau_control *bcp;
625 int i; 689 int cpu;
626 690
627 for (i = cur_cpu; i < cur_cpu + uv_blade_nr_possible_cpus(blade); i++) { 691 for_each_present_cpu(cpu) {
628 bcp = (struct bau_control *)&per_cpu(bau_control, i); 692 if (blade != uv_cpu_to_blade_id(cpu))
693 continue;
629 694
695 bcp = (struct bau_control *)&per_cpu(bau_control, cpu);
630 bcp->bau_msg_head = bau_tablesp->va_queue_first; 696 bcp->bau_msg_head = bau_tablesp->va_queue_first;
631 bcp->va_queue_first = bau_tablesp->va_queue_first; 697 bcp->va_queue_first = bau_tablesp->va_queue_first;
632 bcp->va_queue_last = bau_tablesp->va_queue_last; 698 bcp->va_queue_last = bau_tablesp->va_queue_last;
@@ -649,11 +715,10 @@ uv_activation_descriptor_init(int node, int pnode)
649 struct bau_desc *adp; 715 struct bau_desc *adp;
650 struct bau_desc *ad2; 716 struct bau_desc *ad2;
651 717
652 adp = (struct bau_desc *) 718 adp = (struct bau_desc *)kmalloc_node(16384, GFP_KERNEL, node);
653 kmalloc_node(16384, GFP_KERNEL, node);
654 BUG_ON(!adp); 719 BUG_ON(!adp);
655 720
656 pa = __pa((unsigned long)adp); 721 pa = uv_gpa(adp); /* need the real nasid*/
657 n = pa >> uv_nshift; 722 n = pa >> uv_nshift;
658 m = pa & uv_mmask; 723 m = pa & uv_mmask;
659 724
@@ -667,8 +732,12 @@ uv_activation_descriptor_init(int node, int pnode)
667 for (i = 0, ad2 = adp; i < UV_ACTIVATION_DESCRIPTOR_SIZE; i++, ad2++) { 732 for (i = 0, ad2 = adp; i < UV_ACTIVATION_DESCRIPTOR_SIZE; i++, ad2++) {
668 memset(ad2, 0, sizeof(struct bau_desc)); 733 memset(ad2, 0, sizeof(struct bau_desc));
669 ad2->header.sw_ack_flag = 1; 734 ad2->header.sw_ack_flag = 1;
670 ad2->header.base_dest_nodeid = 735 /*
671 uv_blade_to_pnode(uv_cpu_to_blade_id(0)); 736 * base_dest_nodeid is the first node in the partition, so
737 * the bit map will indicate partition-relative node numbers.
738 * note that base_dest_nodeid is actually a nasid.
739 */
740 ad2->header.base_dest_nodeid = uv_partition_base_pnode << 1;
672 ad2->header.command = UV_NET_ENDPOINT_INTD; 741 ad2->header.command = UV_NET_ENDPOINT_INTD;
673 ad2->header.int_both = 1; 742 ad2->header.int_both = 1;
674 /* 743 /*
@@ -686,6 +755,8 @@ static struct bau_payload_queue_entry * __init
686uv_payload_queue_init(int node, int pnode, struct bau_control *bau_tablesp) 755uv_payload_queue_init(int node, int pnode, struct bau_control *bau_tablesp)
687{ 756{
688 struct bau_payload_queue_entry *pqp; 757 struct bau_payload_queue_entry *pqp;
758 unsigned long pa;
759 int pn;
689 char *cp; 760 char *cp;
690 761
691 pqp = (struct bau_payload_queue_entry *) kmalloc_node( 762 pqp = (struct bau_payload_queue_entry *) kmalloc_node(
@@ -696,10 +767,14 @@ uv_payload_queue_init(int node, int pnode, struct bau_control *bau_tablesp)
696 cp = (char *)pqp + 31; 767 cp = (char *)pqp + 31;
697 pqp = (struct bau_payload_queue_entry *)(((unsigned long)cp >> 5) << 5); 768 pqp = (struct bau_payload_queue_entry *)(((unsigned long)cp >> 5) << 5);
698 bau_tablesp->va_queue_first = pqp; 769 bau_tablesp->va_queue_first = pqp;
770 /*
771 * need the pnode of where the memory was really allocated
772 */
773 pa = uv_gpa(pqp);
774 pn = pa >> uv_nshift;
699 uv_write_global_mmr64(pnode, 775 uv_write_global_mmr64(pnode,
700 UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST, 776 UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST,
701 ((unsigned long)pnode << 777 ((unsigned long)pn << UV_PAYLOADQ_PNODE_SHIFT) |
702 UV_PAYLOADQ_PNODE_SHIFT) |
703 uv_physnodeaddr(pqp)); 778 uv_physnodeaddr(pqp));
704 uv_write_global_mmr64(pnode, UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL, 779 uv_write_global_mmr64(pnode, UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL,
705 uv_physnodeaddr(pqp)); 780 uv_physnodeaddr(pqp));
@@ -715,8 +790,9 @@ uv_payload_queue_init(int node, int pnode, struct bau_control *bau_tablesp)
715/* 790/*
716 * Initialization of each UV blade's structures 791 * Initialization of each UV blade's structures
717 */ 792 */
718static int __init uv_init_blade(int blade, int node, int cur_cpu) 793static int __init uv_init_blade(int blade)
719{ 794{
795 int node;
720 int pnode; 796 int pnode;
721 unsigned long pa; 797 unsigned long pa;
722 unsigned long apicid; 798 unsigned long apicid;
@@ -724,16 +800,17 @@ static int __init uv_init_blade(int blade, int node, int cur_cpu)
724 struct bau_payload_queue_entry *pqp; 800 struct bau_payload_queue_entry *pqp;
725 struct bau_control *bau_tablesp; 801 struct bau_control *bau_tablesp;
726 802
803 node = blade_to_first_node(blade);
727 bau_tablesp = uv_table_bases_init(blade, node); 804 bau_tablesp = uv_table_bases_init(blade, node);
728 pnode = uv_blade_to_pnode(blade); 805 pnode = uv_blade_to_pnode(blade);
729 adp = uv_activation_descriptor_init(node, pnode); 806 adp = uv_activation_descriptor_init(node, pnode);
730 pqp = uv_payload_queue_init(node, pnode, bau_tablesp); 807 pqp = uv_payload_queue_init(node, pnode, bau_tablesp);
731 uv_table_bases_finish(blade, node, cur_cpu, bau_tablesp, adp); 808 uv_table_bases_finish(blade, bau_tablesp, adp);
732 /* 809 /*
733 * the below initialization can't be in firmware because the 810 * the below initialization can't be in firmware because the
734 * messaging IRQ will be determined by the OS 811 * messaging IRQ will be determined by the OS
735 */ 812 */
736 apicid = per_cpu(x86_cpu_to_apicid, cur_cpu); 813 apicid = blade_to_first_apicid(blade);
737 pa = uv_read_global_mmr64(pnode, UVH_BAU_DATA_CONFIG); 814 pa = uv_read_global_mmr64(pnode, UVH_BAU_DATA_CONFIG);
738 if ((pa & 0xff) != UV_BAU_MESSAGE) { 815 if ((pa & 0xff) != UV_BAU_MESSAGE) {
739 uv_write_global_mmr64(pnode, UVH_BAU_DATA_CONFIG, 816 uv_write_global_mmr64(pnode, UVH_BAU_DATA_CONFIG,
@@ -748,9 +825,7 @@ static int __init uv_init_blade(int blade, int node, int cur_cpu)
748static int __init uv_bau_init(void) 825static int __init uv_bau_init(void)
749{ 826{
750 int blade; 827 int blade;
751 int node;
752 int nblades; 828 int nblades;
753 int last_blade;
754 int cur_cpu; 829 int cur_cpu;
755 830
756 if (!is_uv_system()) 831 if (!is_uv_system())
@@ -763,29 +838,21 @@ static int __init uv_bau_init(void)
763 uv_bau_retry_limit = 1; 838 uv_bau_retry_limit = 1;
764 uv_nshift = uv_hub_info->n_val; 839 uv_nshift = uv_hub_info->n_val;
765 uv_mmask = (1UL << uv_hub_info->n_val) - 1; 840 uv_mmask = (1UL << uv_hub_info->n_val) - 1;
766 nblades = 0; 841 nblades = uv_num_possible_blades();
767 last_blade = -1; 842
768 cur_cpu = 0;
769 for_each_online_node(node) {
770 blade = uv_node_to_blade_id(node);
771 if (blade == last_blade)
772 continue;
773 last_blade = blade;
774 nblades++;
775 }
776 uv_bau_table_bases = (struct bau_control **) 843 uv_bau_table_bases = (struct bau_control **)
777 kmalloc(nblades * sizeof(struct bau_control *), GFP_KERNEL); 844 kmalloc(nblades * sizeof(struct bau_control *), GFP_KERNEL);
778 BUG_ON(!uv_bau_table_bases); 845 BUG_ON(!uv_bau_table_bases);
779 846
780 last_blade = -1; 847 uv_partition_base_pnode = 0x7fffffff;
781 for_each_online_node(node) { 848 for (blade = 0; blade < nblades; blade++)
782 blade = uv_node_to_blade_id(node); 849 if (uv_blade_nr_possible_cpus(blade) &&
783 if (blade == last_blade) 850 (uv_blade_to_pnode(blade) < uv_partition_base_pnode))
784 continue; 851 uv_partition_base_pnode = uv_blade_to_pnode(blade);
785 last_blade = blade; 852 for (blade = 0; blade < nblades; blade++)
786 uv_init_blade(blade, node, cur_cpu); 853 if (uv_blade_nr_possible_cpus(blade))
787 cur_cpu += uv_blade_nr_possible_cpus(blade); 854 uv_init_blade(blade);
788 } 855
789 alloc_intr_gate(UV_BAU_MESSAGE, uv_bau_message_intr1); 856 alloc_intr_gate(UV_BAU_MESSAGE, uv_bau_message_intr1);
790 uv_enable_timeouts(); 857 uv_enable_timeouts();
791 858
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 7a567ebe6361..d57de05dc430 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -699,7 +699,7 @@ static struct clocksource clocksource_tsc;
699 * code, which is necessary to support wrapping clocksources like pm 699 * code, which is necessary to support wrapping clocksources like pm
700 * timer. 700 * timer.
701 */ 701 */
702static cycle_t read_tsc(void) 702static cycle_t read_tsc(struct clocksource *cs)
703{ 703{
704 cycle_t ret = (cycle_t)get_cycles(); 704 cycle_t ret = (cycle_t)get_cycles();
705 705
diff --git a/arch/x86/kernel/uv_sysfs.c b/arch/x86/kernel/uv_sysfs.c
index 67f9b9dbf800..36afb98675a4 100644
--- a/arch/x86/kernel/uv_sysfs.c
+++ b/arch/x86/kernel/uv_sysfs.c
@@ -21,6 +21,7 @@
21 21
22#include <linux/sysdev.h> 22#include <linux/sysdev.h>
23#include <asm/uv/bios.h> 23#include <asm/uv/bios.h>
24#include <asm/uv/uv.h>
24 25
25struct kobject *sgi_uv_kobj; 26struct kobject *sgi_uv_kobj;
26 27
@@ -47,6 +48,9 @@ static int __init sgi_uv_sysfs_init(void)
47{ 48{
48 unsigned long ret; 49 unsigned long ret;
49 50
51 if (!is_uv_system())
52 return -ENODEV;
53
50 if (!sgi_uv_kobj) 54 if (!sgi_uv_kobj)
51 sgi_uv_kobj = kobject_create_and_add("sgi_uv", firmware_kobj); 55 sgi_uv_kobj = kobject_create_and_add("sgi_uv", firmware_kobj);
52 if (!sgi_uv_kobj) { 56 if (!sgi_uv_kobj) {
diff --git a/arch/x86/kernel/uv_time.c b/arch/x86/kernel/uv_time.c
index 2ffb6c53326e..583f11d5c480 100644
--- a/arch/x86/kernel/uv_time.c
+++ b/arch/x86/kernel/uv_time.c
@@ -29,7 +29,7 @@
29 29
30#define RTC_NAME "sgi_rtc" 30#define RTC_NAME "sgi_rtc"
31 31
32static cycle_t uv_read_rtc(void); 32static cycle_t uv_read_rtc(struct clocksource *cs);
33static int uv_rtc_next_event(unsigned long, struct clock_event_device *); 33static int uv_rtc_next_event(unsigned long, struct clock_event_device *);
34static void uv_rtc_timer_setup(enum clock_event_mode, 34static void uv_rtc_timer_setup(enum clock_event_mode,
35 struct clock_event_device *); 35 struct clock_event_device *);
@@ -123,7 +123,7 @@ static int uv_setup_intr(int cpu, u64 expires)
123 /* Initialize comparator value */ 123 /* Initialize comparator value */
124 uv_write_global_mmr64(pnode, UVH_INT_CMPB, expires); 124 uv_write_global_mmr64(pnode, UVH_INT_CMPB, expires);
125 125
126 return (expires < uv_read_rtc() && !uv_intr_pending(pnode)); 126 return (expires < uv_read_rtc(NULL) && !uv_intr_pending(pnode));
127} 127}
128 128
129/* 129/*
@@ -256,7 +256,7 @@ static int uv_rtc_unset_timer(int cpu)
256 256
257 spin_lock_irqsave(&head->lock, flags); 257 spin_lock_irqsave(&head->lock, flags);
258 258
259 if (head->next_cpu == bcpu && uv_read_rtc() >= *t) 259 if (head->next_cpu == bcpu && uv_read_rtc(NULL) >= *t)
260 rc = 1; 260 rc = 1;
261 261
262 *t = ULLONG_MAX; 262 *t = ULLONG_MAX;
@@ -278,7 +278,7 @@ static int uv_rtc_unset_timer(int cpu)
278/* 278/*
279 * Read the RTC. 279 * Read the RTC.
280 */ 280 */
281static cycle_t uv_read_rtc(void) 281static cycle_t uv_read_rtc(struct clocksource *cs)
282{ 282{
283 return (cycle_t)uv_read_local_mmr(UVH_RTC); 283 return (cycle_t)uv_read_local_mmr(UVH_RTC);
284} 284}
@@ -291,7 +291,7 @@ static int uv_rtc_next_event(unsigned long delta,
291{ 291{
292 int ced_cpu = cpumask_first(ced->cpumask); 292 int ced_cpu = cpumask_first(ced->cpumask);
293 293
294 return uv_rtc_set_timer(ced_cpu, delta + uv_read_rtc()); 294 return uv_rtc_set_timer(ced_cpu, delta + uv_read_rtc(NULL));
295} 295}
296 296
297/* 297/*
diff --git a/arch/x86/kernel/vmiclock_32.c b/arch/x86/kernel/vmiclock_32.c
index d303369a7bad..2b3eb82efeeb 100644
--- a/arch/x86/kernel/vmiclock_32.c
+++ b/arch/x86/kernel/vmiclock_32.c
@@ -283,7 +283,7 @@ void __devinit vmi_time_ap_init(void)
283/** vmi clocksource */ 283/** vmi clocksource */
284static struct clocksource clocksource_vmi; 284static struct clocksource clocksource_vmi;
285 285
286static cycle_t read_real_cycles(void) 286static cycle_t read_real_cycles(struct clocksource *cs)
287{ 287{
288 cycle_t ret = (cycle_t)vmi_timer_ops.get_cycle_counter(VMI_CYCLES_REAL); 288 cycle_t ret = (cycle_t)vmi_timer_ops.get_cycle_counter(VMI_CYCLES_REAL);
289 return max(ret, clocksource_vmi.cycle_last); 289 return max(ret, clocksource_vmi.cycle_last);
diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
index 2b54fe002e94..c5ee17e8c6d9 100644
--- a/arch/x86/kernel/xsave.c
+++ b/arch/x86/kernel/xsave.c
@@ -89,7 +89,7 @@ int save_i387_xstate(void __user *buf)
89 89
90 if (!used_math()) 90 if (!used_math())
91 return 0; 91 return 0;
92 clear_used_math(); /* trigger finit */ 92
93 if (task_thread_info(tsk)->status & TS_USEDFPU) { 93 if (task_thread_info(tsk)->status & TS_USEDFPU) {
94 /* 94 /*
95 * Start with clearing the user buffer. This will present a 95 * Start with clearing the user buffer. This will present a
@@ -114,6 +114,8 @@ int save_i387_xstate(void __user *buf)
114 return -1; 114 return -1;
115 } 115 }
116 116
117 clear_used_math(); /* trigger finit */
118
117 if (task_thread_info(tsk)->status & TS_XSAVE) { 119 if (task_thread_info(tsk)->status & TS_XSAVE) {
118 struct _fpstate __user *fx = buf; 120 struct _fpstate __user *fx = buf;
119 struct _xstate __user *x = buf; 121 struct _xstate __user *x = buf;
@@ -324,7 +326,7 @@ void __ref xsave_cntxt_init(void)
324 } 326 }
325 327
326 /* 328 /*
327 * for now OS knows only about FP/SSE 329 * Support only the state known to OS.
328 */ 330 */
329 pcntxt_mask = pcntxt_mask & XCNTXT_MASK; 331 pcntxt_mask = pcntxt_mask & XCNTXT_MASK;
330 xsave_init(); 332 xsave_init();
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 2a36f7f7c4c7..b6caf1329b1b 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -1248,7 +1248,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
1248 pgprintk("%s: adding gfn %lx role %x\n", __func__, gfn, role.word); 1248 pgprintk("%s: adding gfn %lx role %x\n", __func__, gfn, role.word);
1249 sp->gfn = gfn; 1249 sp->gfn = gfn;
1250 sp->role = role; 1250 sp->role = role;
1251 sp->global = role.cr4_pge; 1251 sp->global = 0;
1252 hlist_add_head(&sp->hash_link, bucket); 1252 hlist_add_head(&sp->hash_link, bucket);
1253 if (!direct) { 1253 if (!direct) {
1254 if (rmap_write_protect(vcpu->kvm, gfn)) 1254 if (rmap_write_protect(vcpu->kvm, gfn))
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 8ca100a9ecac..7c1ce5ac6131 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -2775,6 +2775,9 @@ out:
2775 2775
2776void kvm_arch_exit(void) 2776void kvm_arch_exit(void)
2777{ 2777{
2778 if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
2779 cpufreq_unregister_notifier(&kvmclock_cpufreq_notifier_block,
2780 CPUFREQ_TRANSITION_NOTIFIER);
2778 kvm_x86_ops = NULL; 2781 kvm_x86_ops = NULL;
2779 kvm_mmu_module_exit(); 2782 kvm_mmu_module_exit();
2780} 2783}
@@ -4159,6 +4162,11 @@ EXPORT_SYMBOL_GPL(kvm_put_guest_fpu);
4159 4162
4160void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu) 4163void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
4161{ 4164{
4165 if (vcpu->arch.time_page) {
4166 kvm_release_page_dirty(vcpu->arch.time_page);
4167 vcpu->arch.time_page = NULL;
4168 }
4169
4162 kvm_x86_ops->vcpu_free(vcpu); 4170 kvm_x86_ops->vcpu_free(vcpu);
4163} 4171}
4164 4172
diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
index cfb2d68dc795..8f935c6d5512 100644
--- a/arch/x86/lguest/boot.c
+++ b/arch/x86/lguest/boot.c
@@ -279,15 +279,15 @@ static void lguest_load_idt(const struct desc_ptr *desc)
279 * controls the entire thing and the Guest asks it to make changes using the 279 * controls the entire thing and the Guest asks it to make changes using the
280 * LOAD_GDT hypercall. 280 * LOAD_GDT hypercall.
281 * 281 *
282 * This is the opposite of the IDT code where we have a LOAD_IDT_ENTRY 282 * This is the exactly like the IDT code.
283 * hypercall and use that repeatedly to load a new IDT. I don't think it
284 * really matters, but wouldn't it be nice if they were the same? Wouldn't
285 * it be even better if you were the one to send the patch to fix it?
286 */ 283 */
287static void lguest_load_gdt(const struct desc_ptr *desc) 284static void lguest_load_gdt(const struct desc_ptr *desc)
288{ 285{
289 BUG_ON((desc->size + 1) / 8 != GDT_ENTRIES); 286 unsigned int i;
290 kvm_hypercall2(LHCALL_LOAD_GDT, __pa(desc->address), GDT_ENTRIES); 287 struct desc_struct *gdt = (void *)desc->address;
288
289 for (i = 0; i < (desc->size+1)/8; i++)
290 kvm_hypercall3(LHCALL_LOAD_GDT_ENTRY, i, gdt[i].a, gdt[i].b);
291} 291}
292 292
293/* For a single GDT entry which changes, we do the lazy thing: alter our GDT, 293/* For a single GDT entry which changes, we do the lazy thing: alter our GDT,
@@ -297,7 +297,9 @@ static void lguest_write_gdt_entry(struct desc_struct *dt, int entrynum,
297 const void *desc, int type) 297 const void *desc, int type)
298{ 298{
299 native_write_gdt_entry(dt, entrynum, desc, type); 299 native_write_gdt_entry(dt, entrynum, desc, type);
300 kvm_hypercall2(LHCALL_LOAD_GDT, __pa(dt), GDT_ENTRIES); 300 /* Tell Host about this new entry. */
301 kvm_hypercall3(LHCALL_LOAD_GDT_ENTRY, entrynum,
302 dt[entrynum].a, dt[entrynum].b);
301} 303}
302 304
303/* OK, I lied. There are three "thread local storage" GDT entries which change 305/* OK, I lied. There are three "thread local storage" GDT entries which change
@@ -667,7 +669,7 @@ static unsigned long lguest_tsc_khz(void)
667 669
668/* If we can't use the TSC, the kernel falls back to our lower-priority 670/* If we can't use the TSC, the kernel falls back to our lower-priority
669 * "lguest_clock", where we read the time value given to us by the Host. */ 671 * "lguest_clock", where we read the time value given to us by the Host. */
670static cycle_t lguest_clock_read(void) 672static cycle_t lguest_clock_read(struct clocksource *cs)
671{ 673{
672 unsigned long sec, nsec; 674 unsigned long sec, nsec;
673 675
diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
index be54176e9eb2..6340cef6798a 100644
--- a/arch/x86/mm/gup.c
+++ b/arch/x86/mm/gup.c
@@ -219,6 +219,22 @@ static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end,
219 return 1; 219 return 1;
220} 220}
221 221
222/**
223 * get_user_pages_fast() - pin user pages in memory
224 * @start: starting user address
225 * @nr_pages: number of pages from start to pin
226 * @write: whether pages will be written to
227 * @pages: array that receives pointers to the pages pinned.
228 * Should be at least nr_pages long.
229 *
230 * Attempt to pin user pages in memory without taking mm->mmap_sem.
231 * If not successful, it will fall back to taking the lock and
232 * calling get_user_pages().
233 *
234 * Returns number of pages pinned. This may be fewer than the number
235 * requested. If nr_pages is 0 or negative, returns 0. If no pages
236 * were pinned, returns -errno.
237 */
222int get_user_pages_fast(unsigned long start, int nr_pages, int write, 238int get_user_pages_fast(unsigned long start, int nr_pages, int write,
223 struct page **pages) 239 struct page **pages)
224{ 240{
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index fd3da1dda1c9..ae4f7b5d7104 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -7,6 +7,7 @@
7#include <asm/page.h> 7#include <asm/page.h>
8#include <asm/page_types.h> 8#include <asm/page_types.h>
9#include <asm/sections.h> 9#include <asm/sections.h>
10#include <asm/setup.h>
10#include <asm/system.h> 11#include <asm/system.h>
11#include <asm/tlbflush.h> 12#include <asm/tlbflush.h>
12 13
@@ -304,8 +305,23 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
304#endif 305#endif
305 306
306#ifdef CONFIG_X86_64 307#ifdef CONFIG_X86_64
307 if (!after_bootmem) 308 if (!after_bootmem && !start) {
309 pud_t *pud;
310 pmd_t *pmd;
311
308 mmu_cr4_features = read_cr4(); 312 mmu_cr4_features = read_cr4();
313
314 /*
315 * _brk_end cannot change anymore, but it and _end may be
316 * located on different 2M pages. cleanup_highmap(), however,
317 * can only consider _end when it runs, so destroy any
318 * mappings beyond _brk_end here.
319 */
320 pud = pud_offset(pgd_offset_k(_brk_end), _brk_end);
321 pmd = pmd_offset(pud, _brk_end - 1);
322 while (++pmd <= pmd_offset(pud, (unsigned long)_end - 1))
323 pmd_clear(pmd);
324 }
309#endif 325#endif
310 __flush_tlb_all(); 326 __flush_tlb_all();
311 327
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
index 0dfa09d69e80..8a450930834f 100644
--- a/arch/x86/mm/ioremap.c
+++ b/arch/x86/mm/ioremap.c
@@ -280,15 +280,16 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
280 return NULL; 280 return NULL;
281 area->phys_addr = phys_addr; 281 area->phys_addr = phys_addr;
282 vaddr = (unsigned long) area->addr; 282 vaddr = (unsigned long) area->addr;
283 if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot)) { 283
284 if (kernel_map_sync_memtype(phys_addr, size, prot_val)) {
284 free_memtype(phys_addr, phys_addr + size); 285 free_memtype(phys_addr, phys_addr + size);
285 free_vm_area(area); 286 free_vm_area(area);
286 return NULL; 287 return NULL;
287 } 288 }
288 289
289 if (ioremap_change_attr(vaddr, size, prot_val) < 0) { 290 if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot)) {
290 free_memtype(phys_addr, phys_addr + size); 291 free_memtype(phys_addr, phys_addr + size);
291 vunmap(area->addr); 292 free_vm_area(area);
292 return NULL; 293 return NULL;
293 } 294 }
294 295
@@ -374,7 +375,8 @@ static void __iomem *ioremap_default(resource_size_t phys_addr,
374 * - UC_MINUS for non-WB-able memory with no other conflicting mappings 375 * - UC_MINUS for non-WB-able memory with no other conflicting mappings
375 * - Inherit from confliting mappings otherwise 376 * - Inherit from confliting mappings otherwise
376 */ 377 */
377 err = reserve_memtype(phys_addr, phys_addr + size, -1, &flags); 378 err = reserve_memtype(phys_addr, phys_addr + size,
379 _PAGE_CACHE_WB, &flags);
378 if (err < 0) 380 if (err < 0)
379 return NULL; 381 return NULL;
380 382
@@ -547,7 +549,7 @@ void __init early_ioremap_reset(void)
547} 549}
548 550
549static void __init __early_set_fixmap(enum fixed_addresses idx, 551static void __init __early_set_fixmap(enum fixed_addresses idx,
550 unsigned long phys, pgprot_t flags) 552 phys_addr_t phys, pgprot_t flags)
551{ 553{
552 unsigned long addr = __fix_to_virt(idx); 554 unsigned long addr = __fix_to_virt(idx);
553 pte_t *pte; 555 pte_t *pte;
@@ -566,7 +568,7 @@ static void __init __early_set_fixmap(enum fixed_addresses idx,
566} 568}
567 569
568static inline void __init early_set_fixmap(enum fixed_addresses idx, 570static inline void __init early_set_fixmap(enum fixed_addresses idx,
569 unsigned long phys, pgprot_t prot) 571 phys_addr_t phys, pgprot_t prot)
570{ 572{
571 if (after_paging_init) 573 if (after_paging_init)
572 __set_fixmap(idx, phys, prot); 574 __set_fixmap(idx, phys, prot);
@@ -607,9 +609,10 @@ static int __init check_early_ioremap_leak(void)
607late_initcall(check_early_ioremap_leak); 609late_initcall(check_early_ioremap_leak);
608 610
609static void __init __iomem * 611static void __init __iomem *
610__early_ioremap(unsigned long phys_addr, unsigned long size, pgprot_t prot) 612__early_ioremap(resource_size_t phys_addr, unsigned long size, pgprot_t prot)
611{ 613{
612 unsigned long offset, last_addr; 614 unsigned long offset;
615 resource_size_t last_addr;
613 unsigned int nrpages; 616 unsigned int nrpages;
614 enum fixed_addresses idx0, idx; 617 enum fixed_addresses idx0, idx;
615 int i, slot; 618 int i, slot;
@@ -625,15 +628,15 @@ __early_ioremap(unsigned long phys_addr, unsigned long size, pgprot_t prot)
625 } 628 }
626 629
627 if (slot < 0) { 630 if (slot < 0) {
628 printk(KERN_INFO "early_iomap(%08lx, %08lx) not found slot\n", 631 printk(KERN_INFO "early_iomap(%08llx, %08lx) not found slot\n",
629 phys_addr, size); 632 (u64)phys_addr, size);
630 WARN_ON(1); 633 WARN_ON(1);
631 return NULL; 634 return NULL;
632 } 635 }
633 636
634 if (early_ioremap_debug) { 637 if (early_ioremap_debug) {
635 printk(KERN_INFO "early_ioremap(%08lx, %08lx) [%d] => ", 638 printk(KERN_INFO "early_ioremap(%08llx, %08lx) [%d] => ",
636 phys_addr, size, slot); 639 (u64)phys_addr, size, slot);
637 dump_stack(); 640 dump_stack();
638 } 641 }
639 642
@@ -680,13 +683,15 @@ __early_ioremap(unsigned long phys_addr, unsigned long size, pgprot_t prot)
680} 683}
681 684
682/* Remap an IO device */ 685/* Remap an IO device */
683void __init __iomem *early_ioremap(unsigned long phys_addr, unsigned long size) 686void __init __iomem *
687early_ioremap(resource_size_t phys_addr, unsigned long size)
684{ 688{
685 return __early_ioremap(phys_addr, size, PAGE_KERNEL_IO); 689 return __early_ioremap(phys_addr, size, PAGE_KERNEL_IO);
686} 690}
687 691
688/* Remap memory */ 692/* Remap memory */
689void __init __iomem *early_memremap(unsigned long phys_addr, unsigned long size) 693void __init __iomem *
694early_memremap(resource_size_t phys_addr, unsigned long size)
690{ 695{
691 return __early_ioremap(phys_addr, size, PAGE_KERNEL); 696 return __early_ioremap(phys_addr, size, PAGE_KERNEL);
692} 697}
diff --git a/arch/x86/mm/kmmio.c b/arch/x86/mm/kmmio.c
index 4f115e00486b..50dc802a1c46 100644
--- a/arch/x86/mm/kmmio.c
+++ b/arch/x86/mm/kmmio.c
@@ -87,7 +87,7 @@ static struct kmmio_probe *get_kmmio_probe(unsigned long addr)
87{ 87{
88 struct kmmio_probe *p; 88 struct kmmio_probe *p;
89 list_for_each_entry_rcu(p, &kmmio_probes, list) { 89 list_for_each_entry_rcu(p, &kmmio_probes, list) {
90 if (addr >= p->addr && addr <= (p->addr + p->len)) 90 if (addr >= p->addr && addr < (p->addr + p->len))
91 return p; 91 return p;
92 } 92 }
93 return NULL; 93 return NULL;
diff --git a/arch/x86/mm/numa_32.c b/arch/x86/mm/numa_32.c
index 3daefa04ace5..d2530062fe00 100644
--- a/arch/x86/mm/numa_32.c
+++ b/arch/x86/mm/numa_32.c
@@ -257,7 +257,7 @@ void resume_map_numa_kva(pgd_t *pgd_base)
257} 257}
258#endif 258#endif
259 259
260static unsigned long calculate_numa_remap_pages(void) 260static __init unsigned long calculate_numa_remap_pages(void)
261{ 261{
262 int nid; 262 int nid;
263 unsigned long size, reserve_pages = 0; 263 unsigned long size, reserve_pages = 0;
diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c
index d73aaa892371..2d05a12029dc 100644
--- a/arch/x86/mm/numa_64.c
+++ b/arch/x86/mm/numa_64.c
@@ -188,6 +188,9 @@ void __init setup_node_bootmem(int nodeid, unsigned long start,
188 const int pgdat_size = roundup(sizeof(pg_data_t), PAGE_SIZE); 188 const int pgdat_size = roundup(sizeof(pg_data_t), PAGE_SIZE);
189 int nid; 189 int nid;
190 190
191 if (!end)
192 return;
193
191 start = roundup(start, ZONE_ALIGN); 194 start = roundup(start, ZONE_ALIGN);
192 195
193 printk(KERN_INFO "Bootmem setup node %d %016lx-%016lx\n", nodeid, 196 printk(KERN_INFO "Bootmem setup node %d %016lx-%016lx\n", nodeid,
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index 660cac75ae11..b81b41a0481f 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -931,71 +931,94 @@ int _set_memory_uc(unsigned long addr, int numpages)
931 931
932int set_memory_uc(unsigned long addr, int numpages) 932int set_memory_uc(unsigned long addr, int numpages)
933{ 933{
934 int ret;
935
934 /* 936 /*
935 * for now UC MINUS. see comments in ioremap_nocache() 937 * for now UC MINUS. see comments in ioremap_nocache()
936 */ 938 */
937 if (reserve_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE, 939 ret = reserve_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE,
938 _PAGE_CACHE_UC_MINUS, NULL)) 940 _PAGE_CACHE_UC_MINUS, NULL);
939 return -EINVAL; 941 if (ret)
942 goto out_err;
943
944 ret = _set_memory_uc(addr, numpages);
945 if (ret)
946 goto out_free;
940 947
941 return _set_memory_uc(addr, numpages); 948 return 0;
949
950out_free:
951 free_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE);
952out_err:
953 return ret;
942} 954}
943EXPORT_SYMBOL(set_memory_uc); 955EXPORT_SYMBOL(set_memory_uc);
944 956
945int set_memory_array_uc(unsigned long *addr, int addrinarray) 957int set_memory_array_uc(unsigned long *addr, int addrinarray)
946{ 958{
947 unsigned long start; 959 int i, j;
948 unsigned long end; 960 int ret;
949 int i; 961
950 /* 962 /*
951 * for now UC MINUS. see comments in ioremap_nocache() 963 * for now UC MINUS. see comments in ioremap_nocache()
952 */ 964 */
953 for (i = 0; i < addrinarray; i++) { 965 for (i = 0; i < addrinarray; i++) {
954 start = __pa(addr[i]); 966 ret = reserve_memtype(__pa(addr[i]), __pa(addr[i]) + PAGE_SIZE,
955 for (end = start + PAGE_SIZE; i < addrinarray - 1; end += PAGE_SIZE) { 967 _PAGE_CACHE_UC_MINUS, NULL);
956 if (end != __pa(addr[i + 1])) 968 if (ret)
957 break; 969 goto out_free;
958 i++;
959 }
960 if (reserve_memtype(start, end, _PAGE_CACHE_UC_MINUS, NULL))
961 goto out;
962 } 970 }
963 971
964 return change_page_attr_set(addr, addrinarray, 972 ret = change_page_attr_set(addr, addrinarray,
965 __pgprot(_PAGE_CACHE_UC_MINUS), 1); 973 __pgprot(_PAGE_CACHE_UC_MINUS), 1);
966out: 974 if (ret)
967 for (i = 0; i < addrinarray; i++) { 975 goto out_free;
968 unsigned long tmp = __pa(addr[i]); 976
969 977 return 0;
970 if (tmp == start) 978
971 break; 979out_free:
972 for (end = tmp + PAGE_SIZE; i < addrinarray - 1; end += PAGE_SIZE) { 980 for (j = 0; j < i; j++)
973 if (end != __pa(addr[i + 1])) 981 free_memtype(__pa(addr[j]), __pa(addr[j]) + PAGE_SIZE);
974 break; 982
975 i++; 983 return ret;
976 }
977 free_memtype(tmp, end);
978 }
979 return -EINVAL;
980} 984}
981EXPORT_SYMBOL(set_memory_array_uc); 985EXPORT_SYMBOL(set_memory_array_uc);
982 986
983int _set_memory_wc(unsigned long addr, int numpages) 987int _set_memory_wc(unsigned long addr, int numpages)
984{ 988{
985 return change_page_attr_set(&addr, numpages, 989 int ret;
990 ret = change_page_attr_set(&addr, numpages,
991 __pgprot(_PAGE_CACHE_UC_MINUS), 0);
992
993 if (!ret) {
994 ret = change_page_attr_set(&addr, numpages,
986 __pgprot(_PAGE_CACHE_WC), 0); 995 __pgprot(_PAGE_CACHE_WC), 0);
996 }
997 return ret;
987} 998}
988 999
989int set_memory_wc(unsigned long addr, int numpages) 1000int set_memory_wc(unsigned long addr, int numpages)
990{ 1001{
1002 int ret;
1003
991 if (!pat_enabled) 1004 if (!pat_enabled)
992 return set_memory_uc(addr, numpages); 1005 return set_memory_uc(addr, numpages);
993 1006
994 if (reserve_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE, 1007 ret = reserve_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE,
995 _PAGE_CACHE_WC, NULL)) 1008 _PAGE_CACHE_WC, NULL);
996 return -EINVAL; 1009 if (ret)
1010 goto out_err;
1011
1012 ret = _set_memory_wc(addr, numpages);
1013 if (ret)
1014 goto out_free;
1015
1016 return 0;
997 1017
998 return _set_memory_wc(addr, numpages); 1018out_free:
1019 free_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE);
1020out_err:
1021 return ret;
999} 1022}
1000EXPORT_SYMBOL(set_memory_wc); 1023EXPORT_SYMBOL(set_memory_wc);
1001 1024
@@ -1007,29 +1030,31 @@ int _set_memory_wb(unsigned long addr, int numpages)
1007 1030
1008int set_memory_wb(unsigned long addr, int numpages) 1031int set_memory_wb(unsigned long addr, int numpages)
1009{ 1032{
1010 free_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE); 1033 int ret;
1034
1035 ret = _set_memory_wb(addr, numpages);
1036 if (ret)
1037 return ret;
1011 1038
1012 return _set_memory_wb(addr, numpages); 1039 free_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE);
1040 return 0;
1013} 1041}
1014EXPORT_SYMBOL(set_memory_wb); 1042EXPORT_SYMBOL(set_memory_wb);
1015 1043
1016int set_memory_array_wb(unsigned long *addr, int addrinarray) 1044int set_memory_array_wb(unsigned long *addr, int addrinarray)
1017{ 1045{
1018 int i; 1046 int i;
1047 int ret;
1019 1048
1020 for (i = 0; i < addrinarray; i++) { 1049 ret = change_page_attr_clear(addr, addrinarray,
1021 unsigned long start = __pa(addr[i]);
1022 unsigned long end;
1023
1024 for (end = start + PAGE_SIZE; i < addrinarray - 1; end += PAGE_SIZE) {
1025 if (end != __pa(addr[i + 1]))
1026 break;
1027 i++;
1028 }
1029 free_memtype(start, end);
1030 }
1031 return change_page_attr_clear(addr, addrinarray,
1032 __pgprot(_PAGE_CACHE_MASK), 1); 1050 __pgprot(_PAGE_CACHE_MASK), 1);
1051 if (ret)
1052 return ret;
1053
1054 for (i = 0; i < addrinarray; i++)
1055 free_memtype(__pa(addr[i]), __pa(addr[i]) + PAGE_SIZE);
1056
1057 return 0;
1033} 1058}
1034EXPORT_SYMBOL(set_memory_array_wb); 1059EXPORT_SYMBOL(set_memory_array_wb);
1035 1060
@@ -1122,6 +1147,8 @@ int set_pages_array_wb(struct page **pages, int addrinarray)
1122 1147
1123 retval = cpa_clear_pages_array(pages, addrinarray, 1148 retval = cpa_clear_pages_array(pages, addrinarray,
1124 __pgprot(_PAGE_CACHE_MASK)); 1149 __pgprot(_PAGE_CACHE_MASK));
1150 if (retval)
1151 return retval;
1125 1152
1126 for (i = 0; i < addrinarray; i++) { 1153 for (i = 0; i < addrinarray; i++) {
1127 start = (unsigned long)page_address(pages[i]); 1154 start = (unsigned long)page_address(pages[i]);
@@ -1129,7 +1156,7 @@ int set_pages_array_wb(struct page **pages, int addrinarray)
1129 free_memtype(start, end); 1156 free_memtype(start, end);
1130 } 1157 }
1131 1158
1132 return retval; 1159 return 0;
1133} 1160}
1134EXPORT_SYMBOL(set_pages_array_wb); 1161EXPORT_SYMBOL(set_pages_array_wb);
1135 1162
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index 640339ee4fb2..e6718bb28065 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -31,7 +31,7 @@
31#ifdef CONFIG_X86_PAT 31#ifdef CONFIG_X86_PAT
32int __read_mostly pat_enabled = 1; 32int __read_mostly pat_enabled = 1;
33 33
34void __cpuinit pat_disable(const char *reason) 34static inline void pat_disable(const char *reason)
35{ 35{
36 pat_enabled = 0; 36 pat_enabled = 0;
37 printk(KERN_INFO "%s\n", reason); 37 printk(KERN_INFO "%s\n", reason);
@@ -182,10 +182,10 @@ static unsigned long pat_x_mtrr_type(u64 start, u64 end, unsigned long req_type)
182 u8 mtrr_type; 182 u8 mtrr_type;
183 183
184 mtrr_type = mtrr_type_lookup(start, end); 184 mtrr_type = mtrr_type_lookup(start, end);
185 if (mtrr_type == MTRR_TYPE_UNCACHABLE) 185 if (mtrr_type != MTRR_TYPE_WRBACK)
186 return _PAGE_CACHE_UC; 186 return _PAGE_CACHE_UC_MINUS;
187 if (mtrr_type == MTRR_TYPE_WRCOMB) 187
188 return _PAGE_CACHE_WC; 188 return _PAGE_CACHE_WB;
189 } 189 }
190 190
191 return req_type; 191 return req_type;
@@ -352,23 +352,13 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
352 return 0; 352 return 0;
353 } 353 }
354 354
355 if (req_type == -1) { 355 /*
356 /* 356 * Call mtrr_lookup to get the type hint. This is an
357 * Call mtrr_lookup to get the type hint. This is an 357 * optimization for /dev/mem mmap'ers into WB memory (BIOS
358 * optimization for /dev/mem mmap'ers into WB memory (BIOS 358 * tools and ACPI tools). Use WB request for WB memory and use
359 * tools and ACPI tools). Use WB request for WB memory and use 359 * UC_MINUS otherwise.
360 * UC_MINUS otherwise. 360 */
361 */ 361 actual_type = pat_x_mtrr_type(start, end, req_type & _PAGE_CACHE_MASK);
362 u8 mtrr_type = mtrr_type_lookup(start, end);
363
364 if (mtrr_type == MTRR_TYPE_WRBACK)
365 actual_type = _PAGE_CACHE_WB;
366 else
367 actual_type = _PAGE_CACHE_UC_MINUS;
368 } else {
369 actual_type = pat_x_mtrr_type(start, end,
370 req_type & _PAGE_CACHE_MASK);
371 }
372 362
373 if (new_type) 363 if (new_type)
374 *new_type = actual_type; 364 *new_type = actual_type;
@@ -546,9 +536,7 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
546int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn, 536int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
547 unsigned long size, pgprot_t *vma_prot) 537 unsigned long size, pgprot_t *vma_prot)
548{ 538{
549 u64 offset = ((u64) pfn) << PAGE_SHIFT; 539 unsigned long flags = _PAGE_CACHE_WB;
550 unsigned long flags = -1;
551 int retval;
552 540
553 if (!range_is_allowed(pfn, size)) 541 if (!range_is_allowed(pfn, size))
554 return 0; 542 return 0;
@@ -576,64 +564,11 @@ int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
576 } 564 }
577#endif 565#endif
578 566
579 /*
580 * With O_SYNC, we can only take UC_MINUS mapping. Fail if we cannot.
581 *
582 * Without O_SYNC, we want to get
583 * - WB for WB-able memory and no other conflicting mappings
584 * - UC_MINUS for non-WB-able memory with no other conflicting mappings
585 * - Inherit from confliting mappings otherwise
586 */
587 if (flags != -1) {
588 retval = reserve_memtype(offset, offset + size, flags, NULL);
589 } else {
590 retval = reserve_memtype(offset, offset + size, -1, &flags);
591 }
592
593 if (retval < 0)
594 return 0;
595
596 if (((pfn < max_low_pfn_mapped) ||
597 (pfn >= (1UL<<(32 - PAGE_SHIFT)) && pfn < max_pfn_mapped)) &&
598 ioremap_change_attr((unsigned long)__va(offset), size, flags) < 0) {
599 free_memtype(offset, offset + size);
600 printk(KERN_INFO
601 "%s:%d /dev/mem ioremap_change_attr failed %s for %Lx-%Lx\n",
602 current->comm, current->pid,
603 cattr_name(flags),
604 offset, (unsigned long long)(offset + size));
605 return 0;
606 }
607
608 *vma_prot = __pgprot((pgprot_val(*vma_prot) & ~_PAGE_CACHE_MASK) | 567 *vma_prot = __pgprot((pgprot_val(*vma_prot) & ~_PAGE_CACHE_MASK) |
609 flags); 568 flags);
610 return 1; 569 return 1;
611} 570}
612 571
613void map_devmem(unsigned long pfn, unsigned long size, pgprot_t vma_prot)
614{
615 unsigned long want_flags = (pgprot_val(vma_prot) & _PAGE_CACHE_MASK);
616 u64 addr = (u64)pfn << PAGE_SHIFT;
617 unsigned long flags;
618
619 reserve_memtype(addr, addr + size, want_flags, &flags);
620 if (flags != want_flags) {
621 printk(KERN_INFO
622 "%s:%d /dev/mem expected mapping type %s for %Lx-%Lx, got %s\n",
623 current->comm, current->pid,
624 cattr_name(want_flags),
625 addr, (unsigned long long)(addr + size),
626 cattr_name(flags));
627 }
628}
629
630void unmap_devmem(unsigned long pfn, unsigned long size, pgprot_t vma_prot)
631{
632 u64 addr = (u64)pfn << PAGE_SHIFT;
633
634 free_memtype(addr, addr + size);
635}
636
637/* 572/*
638 * Change the memory type for the physial address range in kernel identity 573 * Change the memory type for the physial address range in kernel identity
639 * mapping space if that range is a part of identity map. 574 * mapping space if that range is a part of identity map.
@@ -671,8 +606,8 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
671{ 606{
672 int is_ram = 0; 607 int is_ram = 0;
673 int ret; 608 int ret;
674 unsigned long flags;
675 unsigned long want_flags = (pgprot_val(*vma_prot) & _PAGE_CACHE_MASK); 609 unsigned long want_flags = (pgprot_val(*vma_prot) & _PAGE_CACHE_MASK);
610 unsigned long flags = want_flags;
676 611
677 is_ram = pat_pagerange_is_ram(paddr, paddr + size); 612 is_ram = pat_pagerange_is_ram(paddr, paddr + size);
678 613
@@ -734,29 +669,28 @@ static void free_pfn_range(u64 paddr, unsigned long size)
734 * 669 *
735 * If the vma has a linear pfn mapping for the entire range, we get the prot 670 * If the vma has a linear pfn mapping for the entire range, we get the prot
736 * from pte and reserve the entire vma range with single reserve_pfn_range call. 671 * from pte and reserve the entire vma range with single reserve_pfn_range call.
737 * Otherwise, we reserve the entire vma range, my ging through the PTEs page
738 * by page to get physical address and protection.
739 */ 672 */
740int track_pfn_vma_copy(struct vm_area_struct *vma) 673int track_pfn_vma_copy(struct vm_area_struct *vma)
741{ 674{
742 int retval = 0;
743 unsigned long i, j;
744 resource_size_t paddr; 675 resource_size_t paddr;
745 unsigned long prot; 676 unsigned long prot;
746 unsigned long vma_start = vma->vm_start; 677 unsigned long vma_size = vma->vm_end - vma->vm_start;
747 unsigned long vma_end = vma->vm_end;
748 unsigned long vma_size = vma_end - vma_start;
749 pgprot_t pgprot; 678 pgprot_t pgprot;
750 679
751 if (!pat_enabled) 680 if (!pat_enabled)
752 return 0; 681 return 0;
753 682
683 /*
684 * For now, only handle remap_pfn_range() vmas where
685 * is_linear_pfn_mapping() == TRUE. Handling of
686 * vm_insert_pfn() is TBD.
687 */
754 if (is_linear_pfn_mapping(vma)) { 688 if (is_linear_pfn_mapping(vma)) {
755 /* 689 /*
756 * reserve the whole chunk covered by vma. We need the 690 * reserve the whole chunk covered by vma. We need the
757 * starting address and protection from pte. 691 * starting address and protection from pte.
758 */ 692 */
759 if (follow_phys(vma, vma_start, 0, &prot, &paddr)) { 693 if (follow_phys(vma, vma->vm_start, 0, &prot, &paddr)) {
760 WARN_ON_ONCE(1); 694 WARN_ON_ONCE(1);
761 return -EINVAL; 695 return -EINVAL;
762 } 696 }
@@ -764,28 +698,7 @@ int track_pfn_vma_copy(struct vm_area_struct *vma)
764 return reserve_pfn_range(paddr, vma_size, &pgprot, 1); 698 return reserve_pfn_range(paddr, vma_size, &pgprot, 1);
765 } 699 }
766 700
767 /* reserve entire vma page by page, using pfn and prot from pte */
768 for (i = 0; i < vma_size; i += PAGE_SIZE) {
769 if (follow_phys(vma, vma_start + i, 0, &prot, &paddr))
770 continue;
771
772 pgprot = __pgprot(prot);
773 retval = reserve_pfn_range(paddr, PAGE_SIZE, &pgprot, 1);
774 if (retval)
775 goto cleanup_ret;
776 }
777 return 0; 701 return 0;
778
779cleanup_ret:
780 /* Reserve error: Cleanup partial reservation and return error */
781 for (j = 0; j < i; j += PAGE_SIZE) {
782 if (follow_phys(vma, vma_start + j, 0, &prot, &paddr))
783 continue;
784
785 free_pfn_range(paddr, PAGE_SIZE);
786 }
787
788 return retval;
789} 702}
790 703
791/* 704/*
@@ -795,50 +708,28 @@ cleanup_ret:
795 * prot is passed in as a parameter for the new mapping. If the vma has a 708 * prot is passed in as a parameter for the new mapping. If the vma has a
796 * linear pfn mapping for the entire range reserve the entire vma range with 709 * linear pfn mapping for the entire range reserve the entire vma range with
797 * single reserve_pfn_range call. 710 * single reserve_pfn_range call.
798 * Otherwise, we look t the pfn and size and reserve only the specified range
799 * page by page.
800 *
801 * Note that this function can be called with caller trying to map only a
802 * subrange/page inside the vma.
803 */ 711 */
804int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t *prot, 712int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t *prot,
805 unsigned long pfn, unsigned long size) 713 unsigned long pfn, unsigned long size)
806{ 714{
807 int retval = 0;
808 unsigned long i, j;
809 resource_size_t base_paddr;
810 resource_size_t paddr; 715 resource_size_t paddr;
811 unsigned long vma_start = vma->vm_start; 716 unsigned long vma_size = vma->vm_end - vma->vm_start;
812 unsigned long vma_end = vma->vm_end;
813 unsigned long vma_size = vma_end - vma_start;
814 717
815 if (!pat_enabled) 718 if (!pat_enabled)
816 return 0; 719 return 0;
817 720
721 /*
722 * For now, only handle remap_pfn_range() vmas where
723 * is_linear_pfn_mapping() == TRUE. Handling of
724 * vm_insert_pfn() is TBD.
725 */
818 if (is_linear_pfn_mapping(vma)) { 726 if (is_linear_pfn_mapping(vma)) {
819 /* reserve the whole chunk starting from vm_pgoff */ 727 /* reserve the whole chunk starting from vm_pgoff */
820 paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT; 728 paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT;
821 return reserve_pfn_range(paddr, vma_size, prot, 0); 729 return reserve_pfn_range(paddr, vma_size, prot, 0);
822 } 730 }
823 731
824 /* reserve page by page using pfn and size */
825 base_paddr = (resource_size_t)pfn << PAGE_SHIFT;
826 for (i = 0; i < size; i += PAGE_SIZE) {
827 paddr = base_paddr + i;
828 retval = reserve_pfn_range(paddr, PAGE_SIZE, prot, 0);
829 if (retval)
830 goto cleanup_ret;
831 }
832 return 0; 732 return 0;
833
834cleanup_ret:
835 /* Reserve error: Cleanup partial reservation and return error */
836 for (j = 0; j < i; j += PAGE_SIZE) {
837 paddr = base_paddr + j;
838 free_pfn_range(paddr, PAGE_SIZE);
839 }
840
841 return retval;
842} 733}
843 734
844/* 735/*
@@ -849,39 +740,23 @@ cleanup_ret:
849void untrack_pfn_vma(struct vm_area_struct *vma, unsigned long pfn, 740void untrack_pfn_vma(struct vm_area_struct *vma, unsigned long pfn,
850 unsigned long size) 741 unsigned long size)
851{ 742{
852 unsigned long i;
853 resource_size_t paddr; 743 resource_size_t paddr;
854 unsigned long prot; 744 unsigned long vma_size = vma->vm_end - vma->vm_start;
855 unsigned long vma_start = vma->vm_start;
856 unsigned long vma_end = vma->vm_end;
857 unsigned long vma_size = vma_end - vma_start;
858 745
859 if (!pat_enabled) 746 if (!pat_enabled)
860 return; 747 return;
861 748
749 /*
750 * For now, only handle remap_pfn_range() vmas where
751 * is_linear_pfn_mapping() == TRUE. Handling of
752 * vm_insert_pfn() is TBD.
753 */
862 if (is_linear_pfn_mapping(vma)) { 754 if (is_linear_pfn_mapping(vma)) {
863 /* free the whole chunk starting from vm_pgoff */ 755 /* free the whole chunk starting from vm_pgoff */
864 paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT; 756 paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT;
865 free_pfn_range(paddr, vma_size); 757 free_pfn_range(paddr, vma_size);
866 return; 758 return;
867 } 759 }
868
869 if (size != 0 && size != vma_size) {
870 /* free page by page, using pfn and size */
871 paddr = (resource_size_t)pfn << PAGE_SHIFT;
872 for (i = 0; i < size; i += PAGE_SIZE) {
873 paddr = paddr + i;
874 free_pfn_range(paddr, PAGE_SIZE);
875 }
876 } else {
877 /* free entire vma, page by page, using the pfn from pte */
878 for (i = 0; i < vma_size; i += PAGE_SIZE) {
879 if (follow_phys(vma, vma_start + i, 0, &prot, &paddr))
880 continue;
881
882 free_pfn_range(paddr, PAGE_SIZE);
883 }
884 }
885} 760}
886 761
887pgprot_t pgprot_writecombine(pgprot_t prot) 762pgprot_t pgprot_writecombine(pgprot_t prot)
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
index 5b7c7c8464fe..7aa03a5389f5 100644
--- a/arch/x86/mm/pgtable.c
+++ b/arch/x86/mm/pgtable.c
@@ -345,7 +345,8 @@ void __native_set_fixmap(enum fixed_addresses idx, pte_t pte)
345 fixmaps_set++; 345 fixmaps_set++;
346} 346}
347 347
348void native_set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t flags) 348void native_set_fixmap(enum fixed_addresses idx, phys_addr_t phys,
349 pgprot_t flags)
349{ 350{
350 __native_set_fixmap(idx, pfn_pte(phys >> PAGE_SHIFT, flags)); 351 __native_set_fixmap(idx, pfn_pte(phys >> PAGE_SHIFT, flags));
351} 352}
diff --git a/arch/x86/mm/srat_32.c b/arch/x86/mm/srat_32.c
index 16ae70fc57e7..29a0e37114f8 100644
--- a/arch/x86/mm/srat_32.c
+++ b/arch/x86/mm/srat_32.c
@@ -216,7 +216,7 @@ int __init get_memcfg_from_srat(void)
216 216
217 if (num_memory_chunks == 0) { 217 if (num_memory_chunks == 0) {
218 printk(KERN_WARNING 218 printk(KERN_WARNING
219 "could not finy any ACPI SRAT memory areas.\n"); 219 "could not find any ACPI SRAT memory areas.\n");
220 goto out_fail; 220 goto out_fail;
221 } 221 }
222 222
diff --git a/arch/x86/mm/srat_64.c b/arch/x86/mm/srat_64.c
index c7d272b8574c..01765955baaf 100644
--- a/arch/x86/mm/srat_64.c
+++ b/arch/x86/mm/srat_64.c
@@ -28,6 +28,7 @@ int acpi_numa __initdata;
28static struct acpi_table_slit *acpi_slit; 28static struct acpi_table_slit *acpi_slit;
29 29
30static nodemask_t nodes_parsed __initdata; 30static nodemask_t nodes_parsed __initdata;
31static nodemask_t cpu_nodes_parsed __initdata;
31static struct bootnode nodes[MAX_NUMNODES] __initdata; 32static struct bootnode nodes[MAX_NUMNODES] __initdata;
32static struct bootnode nodes_add[MAX_NUMNODES]; 33static struct bootnode nodes_add[MAX_NUMNODES];
33static int found_add_area __initdata; 34static int found_add_area __initdata;
@@ -141,6 +142,7 @@ acpi_numa_x2apic_affinity_init(struct acpi_srat_x2apic_cpu_affinity *pa)
141 142
142 apic_id = pa->apic_id; 143 apic_id = pa->apic_id;
143 apicid_to_node[apic_id] = node; 144 apicid_to_node[apic_id] = node;
145 node_set(node, cpu_nodes_parsed);
144 acpi_numa = 1; 146 acpi_numa = 1;
145 printk(KERN_INFO "SRAT: PXM %u -> APIC %u -> Node %u\n", 147 printk(KERN_INFO "SRAT: PXM %u -> APIC %u -> Node %u\n",
146 pxm, apic_id, node); 148 pxm, apic_id, node);
@@ -174,6 +176,7 @@ acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa)
174 else 176 else
175 apic_id = pa->apic_id; 177 apic_id = pa->apic_id;
176 apicid_to_node[apic_id] = node; 178 apicid_to_node[apic_id] = node;
179 node_set(node, cpu_nodes_parsed);
177 acpi_numa = 1; 180 acpi_numa = 1;
178 printk(KERN_INFO "SRAT: PXM %u -> APIC %u -> Node %u\n", 181 printk(KERN_INFO "SRAT: PXM %u -> APIC %u -> Node %u\n",
179 pxm, apic_id, node); 182 pxm, apic_id, node);
@@ -358,6 +361,7 @@ static void __init unparse_node(int node)
358{ 361{
359 int i; 362 int i;
360 node_clear(node, nodes_parsed); 363 node_clear(node, nodes_parsed);
364 node_clear(node, cpu_nodes_parsed);
361 for (i = 0; i < MAX_LOCAL_APIC; i++) { 365 for (i = 0; i < MAX_LOCAL_APIC; i++) {
362 if (apicid_to_node[i] == node) 366 if (apicid_to_node[i] == node)
363 apicid_to_node[i] = NUMA_NO_NODE; 367 apicid_to_node[i] = NUMA_NO_NODE;
@@ -402,7 +406,8 @@ int __init acpi_scan_nodes(unsigned long start, unsigned long end)
402 return -1; 406 return -1;
403 } 407 }
404 408
405 node_possible_map = nodes_parsed; 409 /* Account for nodes with cpus and no memory */
410 nodes_or(node_possible_map, nodes_parsed, cpu_nodes_parsed);
406 411
407 /* Finally register nodes */ 412 /* Finally register nodes */
408 for_each_node_mask(i, node_possible_map) 413 for_each_node_mask(i, node_possible_map)
diff --git a/arch/x86/pci/amd_bus.c b/arch/x86/pci/amd_bus.c
index 9bb09823b362..f893d6a6e803 100644
--- a/arch/x86/pci/amd_bus.c
+++ b/arch/x86/pci/amd_bus.c
@@ -94,12 +94,16 @@ struct pci_root_info {
94static int pci_root_num; 94static int pci_root_num;
95static struct pci_root_info pci_root_info[PCI_ROOT_NR]; 95static struct pci_root_info pci_root_info[PCI_ROOT_NR];
96 96
97void set_pci_bus_resources_arch_default(struct pci_bus *b) 97void x86_pci_root_bus_res_quirks(struct pci_bus *b)
98{ 98{
99 int i; 99 int i;
100 int j; 100 int j;
101 struct pci_root_info *info; 101 struct pci_root_info *info;
102 102
103 /* don't go for it if _CRS is used */
104 if (pci_probe & PCI_USE__CRS)
105 return;
106
103 /* if only one root bus, don't need to anything */ 107 /* if only one root bus, don't need to anything */
104 if (pci_root_num < 2) 108 if (pci_root_num < 2)
105 return; 109 return;
diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c
index 8c362b96b644..2202b6257b82 100644
--- a/arch/x86/pci/common.c
+++ b/arch/x86/pci/common.c
@@ -147,10 +147,13 @@ static void __devinit pcibios_fixup_device_resources(struct pci_dev *dev)
147 * are examined. 147 * are examined.
148 */ 148 */
149 149
150void __devinit pcibios_fixup_bus(struct pci_bus *b) 150void __devinit pcibios_fixup_bus(struct pci_bus *b)
151{ 151{
152 struct pci_dev *dev; 152 struct pci_dev *dev;
153 153
154 /* root bus? */
155 if (!b->parent)
156 x86_pci_root_bus_res_quirks(b);
154 pci_read_bridge_bases(b); 157 pci_read_bridge_bases(b);
155 list_for_each_entry(dev, &b->devices, bus_list) 158 list_for_each_entry(dev, &b->devices, bus_list)
156 pcibios_fixup_device_resources(dev); 159 pcibios_fixup_device_resources(dev);
diff --git a/arch/x86/pci/i386.c b/arch/x86/pci/i386.c
index f1817f71e009..a85bef20a3b9 100644
--- a/arch/x86/pci/i386.c
+++ b/arch/x86/pci/i386.c
@@ -238,6 +238,10 @@ void __init pcibios_resource_survey(void)
238 */ 238 */
239fs_initcall(pcibios_assign_resources); 239fs_initcall(pcibios_assign_resources);
240 240
241void __weak x86_pci_root_bus_res_quirks(struct pci_bus *b)
242{
243}
244
241/* 245/*
242 * If we set up a device for bus mastering, we need to check the latency 246 * If we set up a device for bus mastering, we need to check the latency
243 * timer as certain crappy BIOSes forget to set it properly. 247 * timer as certain crappy BIOSes forget to set it properly.
diff --git a/arch/x86/pci/mmconfig-shared.c b/arch/x86/pci/mmconfig-shared.c
index 905bb526b133..5fa10bb9604f 100644
--- a/arch/x86/pci/mmconfig-shared.c
+++ b/arch/x86/pci/mmconfig-shared.c
@@ -375,7 +375,7 @@ static acpi_status __init check_mcfg_resource(struct acpi_resource *res,
375 if (!fixmem32) 375 if (!fixmem32)
376 return AE_OK; 376 return AE_OK;
377 if ((mcfg_res->start >= fixmem32->address) && 377 if ((mcfg_res->start >= fixmem32->address) &&
378 (mcfg_res->end < (fixmem32->address + 378 (mcfg_res->end <= (fixmem32->address +
379 fixmem32->address_length))) { 379 fixmem32->address_length))) {
380 mcfg_res->flags = 1; 380 mcfg_res->flags = 1;
381 return AE_CTRL_TERMINATE; 381 return AE_CTRL_TERMINATE;
@@ -392,7 +392,7 @@ static acpi_status __init check_mcfg_resource(struct acpi_resource *res,
392 return AE_OK; 392 return AE_OK;
393 393
394 if ((mcfg_res->start >= address.minimum) && 394 if ((mcfg_res->start >= address.minimum) &&
395 (mcfg_res->end < (address.minimum + address.address_length))) { 395 (mcfg_res->end <= (address.minimum + address.address_length))) {
396 mcfg_res->flags = 1; 396 mcfg_res->flags = 1;
397 return AE_CTRL_TERMINATE; 397 return AE_CTRL_TERMINATE;
398 } 398 }
@@ -439,7 +439,7 @@ static int __init is_mmconf_reserved(check_reserved_t is_reserved,
439 u64 old_size = size; 439 u64 old_size = size;
440 int valid = 0; 440 int valid = 0;
441 441
442 while (!is_reserved(addr, addr + size - 1, E820_RESERVED)) { 442 while (!is_reserved(addr, addr + size, E820_RESERVED)) {
443 size >>= 1; 443 size >>= 1;
444 if (size < (16UL<<20)) 444 if (size < (16UL<<20))
445 break; 445 break;
diff --git a/arch/x86/vdso/vclock_gettime.c b/arch/x86/vdso/vclock_gettime.c
index d9d35824c56f..6a40b78b46aa 100644
--- a/arch/x86/vdso/vclock_gettime.c
+++ b/arch/x86/vdso/vclock_gettime.c
@@ -104,11 +104,13 @@ notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
104{ 104{
105 long ret; 105 long ret;
106 if (likely(gtod->sysctl_enabled && gtod->clock.vread)) { 106 if (likely(gtod->sysctl_enabled && gtod->clock.vread)) {
107 BUILD_BUG_ON(offsetof(struct timeval, tv_usec) != 107 if (likely(tv != NULL)) {
108 offsetof(struct timespec, tv_nsec) || 108 BUILD_BUG_ON(offsetof(struct timeval, tv_usec) !=
109 sizeof(*tv) != sizeof(struct timespec)); 109 offsetof(struct timespec, tv_nsec) ||
110 do_realtime((struct timespec *)tv); 110 sizeof(*tv) != sizeof(struct timespec));
111 tv->tv_usec /= 1000; 111 do_realtime((struct timespec *)tv);
112 tv->tv_usec /= 1000;
113 }
112 if (unlikely(tz != NULL)) { 114 if (unlikely(tz != NULL)) {
113 /* Avoid memcpy. Some old compilers fail to inline it */ 115 /* Avoid memcpy. Some old compilers fail to inline it */
114 tz->tz_minuteswest = gtod->sys_tz.tz_minuteswest; 116 tz->tz_minuteswest = gtod->sys_tz.tz_minuteswest;
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index a96f5b9393ea..760e3a512059 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -1785,11 +1785,16 @@ __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd,
1785 1785
1786 pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(swapper_pg_dir))); 1786 pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(swapper_pg_dir)));
1787 1787
1788 reserve_early(__pa(xen_start_info->pt_base),
1789 __pa(xen_start_info->pt_base +
1790 xen_start_info->nr_pt_frames * PAGE_SIZE),
1791 "XEN PAGETABLES");
1792
1788 return swapper_pg_dir; 1793 return swapper_pg_dir;
1789} 1794}
1790#endif /* CONFIG_X86_64 */ 1795#endif /* CONFIG_X86_64 */
1791 1796
1792static void xen_set_fixmap(unsigned idx, unsigned long phys, pgprot_t prot) 1797static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot)
1793{ 1798{
1794 pte_t pte; 1799 pte_t pte;
1795 1800
diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c
index 14f240623497..0a5aa44299a5 100644
--- a/arch/x86/xen/time.c
+++ b/arch/x86/xen/time.c
@@ -213,6 +213,11 @@ cycle_t xen_clocksource_read(void)
213 return ret; 213 return ret;
214} 214}
215 215
216static cycle_t xen_clocksource_get_cycles(struct clocksource *cs)
217{
218 return xen_clocksource_read();
219}
220
216static void xen_read_wallclock(struct timespec *ts) 221static void xen_read_wallclock(struct timespec *ts)
217{ 222{
218 struct shared_info *s = HYPERVISOR_shared_info; 223 struct shared_info *s = HYPERVISOR_shared_info;
@@ -241,7 +246,7 @@ int xen_set_wallclock(unsigned long now)
241static struct clocksource xen_clocksource __read_mostly = { 246static struct clocksource xen_clocksource __read_mostly = {
242 .name = "xen", 247 .name = "xen",
243 .rating = 400, 248 .rating = 400,
244 .read = xen_clocksource_read, 249 .read = xen_clocksource_get_cycles,
245 .mask = ~0, 250 .mask = ~0,
246 .mult = 1<<XEN_SHIFT, /* time directly in nanoseconds */ 251 .mult = 1<<XEN_SHIFT, /* time directly in nanoseconds */
247 .shift = XEN_SHIFT, 252 .shift = XEN_SHIFT,