aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/Kconfig9
-rw-r--r--arch/x86/Kconfig.cpu1
-rw-r--r--arch/x86/Makefile6
-rw-r--r--arch/x86/boot/video-vga.c33
-rw-r--r--arch/x86/ia32/sys_ia32.c19
-rw-r--r--[-rwxr-xr-x]arch/x86/include/asm/cpu_debug.h0
-rw-r--r--arch/x86/include/asm/cpufeature.h1
-rw-r--r--arch/x86/include/asm/desc.h2
-rw-r--r--arch/x86/include/asm/fixmap.h4
-rw-r--r--arch/x86/include/asm/hardirq.h2
-rw-r--r--arch/x86/include/asm/io.h6
-rw-r--r--arch/x86/include/asm/lguest_hcall.h2
-rw-r--r--arch/x86/include/asm/paravirt.h4
-rw-r--r--arch/x86/include/asm/pat.h4
-rw-r--r--arch/x86/include/asm/processor.h12
-rw-r--r--arch/x86/include/asm/required-features.h2
-rw-r--r--arch/x86/include/asm/sigcontext.h6
-rw-r--r--arch/x86/include/asm/tlbflush.h2
-rw-r--r--arch/x86/include/asm/topology.h2
-rw-r--r--arch/x86/include/asm/uv/uv_mmrs.h5
-rw-r--r--arch/x86/include/asm/xen/page.h3
-rw-r--r--arch/x86/include/asm/xsave.h3
-rw-r--r--arch/x86/kernel/apic/apic.c6
-rw-r--r--arch/x86/kernel/apic/apic_flat_64.c4
-rw-r--r--arch/x86/kernel/apic/io_apic.c7
-rw-r--r--arch/x86/kernel/apic/nmi.c5
-rw-r--r--arch/x86/kernel/apic/x2apic_uv_x.c31
-rw-r--r--arch/x86/kernel/bios_uv.c3
-rw-r--r--arch/x86/kernel/cpu/addon_cpuid_features.c1
-rw-r--r--[-rwxr-xr-x]arch/x86/kernel/cpu/cpu_debug.c0
-rw-r--r--arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c101
-rw-r--r--arch/x86/kernel/cpu/cpufreq/longhaul.c1
-rw-r--r--arch/x86/kernel/entry_64.S3
-rw-r--r--arch/x86/kernel/ftrace.c2
-rw-r--r--arch/x86/kernel/hpet.c24
-rw-r--r--arch/x86/kernel/i8253.c2
-rw-r--r--arch/x86/kernel/irq.c2
-rw-r--r--arch/x86/kernel/kvmclock.c7
-rw-r--r--arch/x86/kernel/microcode_core.c35
-rw-r--r--arch/x86/kernel/mpparse.c7
-rw-r--r--arch/x86/kernel/pci-swiotlb.c2
-rw-r--r--arch/x86/kernel/ptrace.c3
-rw-r--r--arch/x86/kernel/quirks.c2
-rw-r--r--arch/x86/kernel/reboot.c8
-rw-r--r--arch/x86/kernel/tlb_uv.c189
-rw-r--r--arch/x86/kernel/tsc.c2
-rw-r--r--arch/x86/kernel/uv_sysfs.c4
-rw-r--r--arch/x86/kernel/uv_time.c10
-rw-r--r--arch/x86/kernel/vmiclock_32.c2
-rw-r--r--arch/x86/kernel/xsave.c6
-rw-r--r--arch/x86/kvm/mmu.c2
-rw-r--r--arch/x86/kvm/x86.c8
-rw-r--r--arch/x86/lguest/boot.c18
-rw-r--r--arch/x86/mm/gup.c16
-rw-r--r--arch/x86/mm/ioremap.c33
-rw-r--r--arch/x86/mm/numa_32.c2
-rw-r--r--arch/x86/mm/numa_64.c3
-rw-r--r--arch/x86/mm/pageattr.c127
-rw-r--r--arch/x86/mm/pat.c191
-rw-r--r--arch/x86/mm/pgtable.c3
-rw-r--r--arch/x86/mm/srat_64.c6
-rw-r--r--arch/x86/pci/amd_bus.c6
-rw-r--r--arch/x86/pci/common.c5
-rw-r--r--arch/x86/pci/i386.c4
-rw-r--r--arch/x86/pci/mmconfig-shared.c6
-rw-r--r--arch/x86/xen/enlighten.c89
-rw-r--r--arch/x86/xen/mmu.c118
-rw-r--r--arch/x86/xen/mmu.h3
-rw-r--r--arch/x86/xen/smp.c4
-rw-r--r--arch/x86/xen/time.c7
-rw-r--r--arch/x86/xen/xen-ops.h2
71 files changed, 736 insertions, 514 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 6da24fc6a09e..32ada97c964d 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -252,17 +252,13 @@ config SMP
252 252
253config X86_X2APIC 253config X86_X2APIC
254 bool "Support x2apic" 254 bool "Support x2apic"
255 depends on X86_LOCAL_APIC && X86_64 255 depends on X86_LOCAL_APIC && X86_64 && INTR_REMAP
256 select INTR_REMAP
257 ---help--- 256 ---help---
258 This enables x2apic support on CPUs that have this feature. 257 This enables x2apic support on CPUs that have this feature.
259 258
260 This allows 32-bit apic IDs (so it can support very large systems), 259 This allows 32-bit apic IDs (so it can support very large systems),
261 and accesses the local apic via MSRs not via mmio. 260 and accesses the local apic via MSRs not via mmio.
262 261
263 ( On certain CPU models you may need to enable INTR_REMAP too,
264 to get functional x2apic mode. )
265
266 If you don't know what to do here, say N. 262 If you don't know what to do here, say N.
267 263
268config SPARSE_IRQ 264config SPARSE_IRQ
@@ -281,6 +277,7 @@ config SPARSE_IRQ
281config NUMA_MIGRATE_IRQ_DESC 277config NUMA_MIGRATE_IRQ_DESC
282 bool "Move irq desc when changing irq smp_affinity" 278 bool "Move irq desc when changing irq smp_affinity"
283 depends on SPARSE_IRQ && NUMA 279 depends on SPARSE_IRQ && NUMA
280 depends on BROKEN
284 default n 281 default n
285 ---help--- 282 ---help---
286 This enables moving irq_desc to cpu/node that irq will use handled. 283 This enables moving irq_desc to cpu/node that irq will use handled.
@@ -357,6 +354,7 @@ config X86_UV
357 bool "SGI Ultraviolet" 354 bool "SGI Ultraviolet"
358 depends on X86_64 355 depends on X86_64
359 depends on X86_EXTENDED_PLATFORM 356 depends on X86_EXTENDED_PLATFORM
357 depends on NUMA
360 select X86_X2APIC 358 select X86_X2APIC
361 ---help--- 359 ---help---
362 This option is needed in order to support SGI Ultraviolet systems. 360 This option is needed in order to support SGI Ultraviolet systems.
@@ -667,6 +665,7 @@ config MAXSMP
667 665
668config NR_CPUS 666config NR_CPUS
669 int "Maximum number of CPUs" if SMP && !MAXSMP 667 int "Maximum number of CPUs" if SMP && !MAXSMP
668 range 2 8 if SMP && X86_32 && !X86_BIGSMP
670 range 2 512 if SMP && !MAXSMP 669 range 2 512 if SMP && !MAXSMP
671 default "1" if !SMP 670 default "1" if !SMP
672 default "4096" if MAXSMP 671 default "4096" if MAXSMP
diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
index 924e156a85ab..8130334329c0 100644
--- a/arch/x86/Kconfig.cpu
+++ b/arch/x86/Kconfig.cpu
@@ -506,6 +506,7 @@ config X86_PTRACE_BTS
506 bool "Branch Trace Store" 506 bool "Branch Trace Store"
507 default y 507 default y
508 depends on X86_DEBUGCTLMSR 508 depends on X86_DEBUGCTLMSR
509 depends on BROKEN
509 ---help--- 510 ---help---
510 This adds a ptrace interface to the hardware's branch trace store. 511 This adds a ptrace interface to the hardware's branch trace store.
511 512
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index f05d8c91d9e5..8c86b72afdc2 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -153,7 +153,7 @@ endif
153 153
154boot := arch/x86/boot 154boot := arch/x86/boot
155 155
156BOOT_TARGETS = bzlilo bzdisk fdimage fdimage144 fdimage288 isoimage install 156BOOT_TARGETS = bzlilo bzdisk fdimage fdimage144 fdimage288 isoimage
157 157
158PHONY += bzImage $(BOOT_TARGETS) 158PHONY += bzImage $(BOOT_TARGETS)
159 159
@@ -171,6 +171,10 @@ bzImage: vmlinux
171$(BOOT_TARGETS): vmlinux 171$(BOOT_TARGETS): vmlinux
172 $(Q)$(MAKE) $(build)=$(boot) $@ 172 $(Q)$(MAKE) $(build)=$(boot) $@
173 173
174PHONY += install
175install:
176 $(Q)$(MAKE) $(build)=$(boot) $@
177
174PHONY += vdso_install 178PHONY += vdso_install
175vdso_install: 179vdso_install:
176 $(Q)$(MAKE) $(build)=arch/x86/vdso $@ 180 $(Q)$(MAKE) $(build)=arch/x86/vdso $@
diff --git a/arch/x86/boot/video-vga.c b/arch/x86/boot/video-vga.c
index 95d86ce0421c..9e0587a37768 100644
--- a/arch/x86/boot/video-vga.c
+++ b/arch/x86/boot/video-vga.c
@@ -129,22 +129,18 @@ u16 vga_crtc(void)
129 return (inb(0x3cc) & 1) ? 0x3d4 : 0x3b4; 129 return (inb(0x3cc) & 1) ? 0x3d4 : 0x3b4;
130} 130}
131 131
132static void vga_set_480_scanlines(int lines) 132static void vga_set_480_scanlines(void)
133{ 133{
134 u16 crtc; /* CRTC base address */ 134 u16 crtc; /* CRTC base address */
135 u8 csel; /* CRTC miscellaneous output register */ 135 u8 csel; /* CRTC miscellaneous output register */
136 u8 ovfw; /* CRTC overflow register */
137 int end = lines-1;
138 136
139 crtc = vga_crtc(); 137 crtc = vga_crtc();
140 138
141 ovfw = 0x3c | ((end >> (8-1)) & 0x02) | ((end >> (9-6)) & 0x40);
142
143 out_idx(0x0c, crtc, 0x11); /* Vertical sync end, unlock CR0-7 */ 139 out_idx(0x0c, crtc, 0x11); /* Vertical sync end, unlock CR0-7 */
144 out_idx(0x0b, crtc, 0x06); /* Vertical total */ 140 out_idx(0x0b, crtc, 0x06); /* Vertical total */
145 out_idx(ovfw, crtc, 0x07); /* Vertical overflow */ 141 out_idx(0x3e, crtc, 0x07); /* Vertical overflow */
146 out_idx(0xea, crtc, 0x10); /* Vertical sync start */ 142 out_idx(0xea, crtc, 0x10); /* Vertical sync start */
147 out_idx(end, crtc, 0x12); /* Vertical display end */ 143 out_idx(0xdf, crtc, 0x12); /* Vertical display end */
148 out_idx(0xe7, crtc, 0x15); /* Vertical blank start */ 144 out_idx(0xe7, crtc, 0x15); /* Vertical blank start */
149 out_idx(0x04, crtc, 0x16); /* Vertical blank end */ 145 out_idx(0x04, crtc, 0x16); /* Vertical blank end */
150 csel = inb(0x3cc); 146 csel = inb(0x3cc);
@@ -153,21 +149,38 @@ static void vga_set_480_scanlines(int lines)
153 outb(csel, 0x3c2); 149 outb(csel, 0x3c2);
154} 150}
155 151
152static void vga_set_vertical_end(int lines)
153{
154 u16 crtc; /* CRTC base address */
155 u8 ovfw; /* CRTC overflow register */
156 int end = lines-1;
157
158 crtc = vga_crtc();
159
160 ovfw = 0x3c | ((end >> (8-1)) & 0x02) | ((end >> (9-6)) & 0x40);
161
162 out_idx(ovfw, crtc, 0x07); /* Vertical overflow */
163 out_idx(end, crtc, 0x12); /* Vertical display end */
164}
165
156static void vga_set_80x30(void) 166static void vga_set_80x30(void)
157{ 167{
158 vga_set_480_scanlines(30*16); 168 vga_set_480_scanlines();
169 vga_set_vertical_end(30*16);
159} 170}
160 171
161static void vga_set_80x34(void) 172static void vga_set_80x34(void)
162{ 173{
174 vga_set_480_scanlines();
163 vga_set_14font(); 175 vga_set_14font();
164 vga_set_480_scanlines(34*14); 176 vga_set_vertical_end(34*14);
165} 177}
166 178
167static void vga_set_80x60(void) 179static void vga_set_80x60(void)
168{ 180{
181 vga_set_480_scanlines();
169 vga_set_8font(); 182 vga_set_8font();
170 vga_set_480_scanlines(60*8); 183 vga_set_vertical_end(60*8);
171} 184}
172 185
173static int vga_set_mode(struct mode_info *mode) 186static int vga_set_mode(struct mode_info *mode)
diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
index efac92fd1efb..085a8c35f149 100644
--- a/arch/x86/ia32/sys_ia32.c
+++ b/arch/x86/ia32/sys_ia32.c
@@ -129,21 +129,12 @@ asmlinkage long sys32_fstatat(unsigned int dfd, char __user *filename,
129 struct stat64 __user *statbuf, int flag) 129 struct stat64 __user *statbuf, int flag)
130{ 130{
131 struct kstat stat; 131 struct kstat stat;
132 int error = -EINVAL; 132 int error;
133 133
134 if ((flag & ~AT_SYMLINK_NOFOLLOW) != 0) 134 error = vfs_fstatat(dfd, filename, &stat, flag);
135 goto out; 135 if (error)
136 136 return error;
137 if (flag & AT_SYMLINK_NOFOLLOW) 137 return cp_stat64(statbuf, &stat);
138 error = vfs_lstat_fd(dfd, filename, &stat);
139 else
140 error = vfs_stat_fd(dfd, filename, &stat);
141
142 if (!error)
143 error = cp_stat64(statbuf, &stat);
144
145out:
146 return error;
147} 138}
148 139
149/* 140/*
diff --git a/arch/x86/include/asm/cpu_debug.h b/arch/x86/include/asm/cpu_debug.h
index 222802029fa6..222802029fa6 100755..100644
--- a/arch/x86/include/asm/cpu_debug.h
+++ b/arch/x86/include/asm/cpu_debug.h
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
index 0beba0d1468d..bb83b1c397aa 100644
--- a/arch/x86/include/asm/cpufeature.h
+++ b/arch/x86/include/asm/cpufeature.h
@@ -154,6 +154,7 @@
154 * CPUID levels like 0x6, 0xA etc 154 * CPUID levels like 0x6, 0xA etc
155 */ 155 */
156#define X86_FEATURE_IDA (7*32+ 0) /* Intel Dynamic Acceleration */ 156#define X86_FEATURE_IDA (7*32+ 0) /* Intel Dynamic Acceleration */
157#define X86_FEATURE_ARAT (7*32+ 1) /* Always Running APIC Timer */
157 158
158/* Virtualization flags: Linux defined */ 159/* Virtualization flags: Linux defined */
159#define X86_FEATURE_TPR_SHADOW (8*32+ 0) /* Intel TPR Shadow */ 160#define X86_FEATURE_TPR_SHADOW (8*32+ 0) /* Intel TPR Shadow */
diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
index 5623c50d67b2..c45f415ce315 100644
--- a/arch/x86/include/asm/desc.h
+++ b/arch/x86/include/asm/desc.h
@@ -37,7 +37,7 @@ extern gate_desc idt_table[];
37struct gdt_page { 37struct gdt_page {
38 struct desc_struct gdt[GDT_ENTRIES]; 38 struct desc_struct gdt[GDT_ENTRIES];
39} __attribute__((aligned(PAGE_SIZE))); 39} __attribute__((aligned(PAGE_SIZE)));
40DECLARE_PER_CPU(struct gdt_page, gdt_page); 40DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
41 41
42static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu) 42static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
43{ 43{
diff --git a/arch/x86/include/asm/fixmap.h b/arch/x86/include/asm/fixmap.h
index 81937a5dc77c..2d81af3974a0 100644
--- a/arch/x86/include/asm/fixmap.h
+++ b/arch/x86/include/asm/fixmap.h
@@ -151,11 +151,11 @@ extern pte_t *pkmap_page_table;
151 151
152void __native_set_fixmap(enum fixed_addresses idx, pte_t pte); 152void __native_set_fixmap(enum fixed_addresses idx, pte_t pte);
153void native_set_fixmap(enum fixed_addresses idx, 153void native_set_fixmap(enum fixed_addresses idx,
154 unsigned long phys, pgprot_t flags); 154 phys_addr_t phys, pgprot_t flags);
155 155
156#ifndef CONFIG_PARAVIRT 156#ifndef CONFIG_PARAVIRT
157static inline void __set_fixmap(enum fixed_addresses idx, 157static inline void __set_fixmap(enum fixed_addresses idx,
158 unsigned long phys, pgprot_t flags) 158 phys_addr_t phys, pgprot_t flags)
159{ 159{
160 native_set_fixmap(idx, phys, flags); 160 native_set_fixmap(idx, phys, flags);
161} 161}
diff --git a/arch/x86/include/asm/hardirq.h b/arch/x86/include/asm/hardirq.h
index f5ebe2aaca4b..9ebc5c255032 100644
--- a/arch/x86/include/asm/hardirq.h
+++ b/arch/x86/include/asm/hardirq.h
@@ -28,7 +28,7 @@ typedef struct {
28#endif 28#endif
29} ____cacheline_aligned irq_cpustat_t; 29} ____cacheline_aligned irq_cpustat_t;
30 30
31DECLARE_PER_CPU(irq_cpustat_t, irq_stat); 31DECLARE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
32 32
33/* We can have at most NR_VECTORS irqs routed to a cpu at a time */ 33/* We can have at most NR_VECTORS irqs routed to a cpu at a time */
34#define MAX_HARDIRQS_PER_CPU NR_VECTORS 34#define MAX_HARDIRQS_PER_CPU NR_VECTORS
diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
index e5383e3d2f8c..73739322b6d0 100644
--- a/arch/x86/include/asm/io.h
+++ b/arch/x86/include/asm/io.h
@@ -193,8 +193,10 @@ extern void __iomem *ioremap_wc(resource_size_t offset, unsigned long size);
193 */ 193 */
194extern void early_ioremap_init(void); 194extern void early_ioremap_init(void);
195extern void early_ioremap_reset(void); 195extern void early_ioremap_reset(void);
196extern void __iomem *early_ioremap(unsigned long offset, unsigned long size); 196extern void __iomem *early_ioremap(resource_size_t phys_addr,
197extern void __iomem *early_memremap(unsigned long offset, unsigned long size); 197 unsigned long size);
198extern void __iomem *early_memremap(resource_size_t phys_addr,
199 unsigned long size);
198extern void early_iounmap(void __iomem *addr, unsigned long size); 200extern void early_iounmap(void __iomem *addr, unsigned long size);
199 201
200#define IO_SPACE_LIMIT 0xffff 202#define IO_SPACE_LIMIT 0xffff
diff --git a/arch/x86/include/asm/lguest_hcall.h b/arch/x86/include/asm/lguest_hcall.h
index 0f4ee7148afe..faae1996487b 100644
--- a/arch/x86/include/asm/lguest_hcall.h
+++ b/arch/x86/include/asm/lguest_hcall.h
@@ -5,7 +5,6 @@
5#define LHCALL_FLUSH_ASYNC 0 5#define LHCALL_FLUSH_ASYNC 0
6#define LHCALL_LGUEST_INIT 1 6#define LHCALL_LGUEST_INIT 1
7#define LHCALL_SHUTDOWN 2 7#define LHCALL_SHUTDOWN 2
8#define LHCALL_LOAD_GDT 3
9#define LHCALL_NEW_PGTABLE 4 8#define LHCALL_NEW_PGTABLE 4
10#define LHCALL_FLUSH_TLB 5 9#define LHCALL_FLUSH_TLB 5
11#define LHCALL_LOAD_IDT_ENTRY 6 10#define LHCALL_LOAD_IDT_ENTRY 6
@@ -17,6 +16,7 @@
17#define LHCALL_SET_PMD 15 16#define LHCALL_SET_PMD 15
18#define LHCALL_LOAD_TLS 16 17#define LHCALL_LOAD_TLS 16
19#define LHCALL_NOTIFY 17 18#define LHCALL_NOTIFY 17
19#define LHCALL_LOAD_GDT_ENTRY 18
20 20
21#define LGUEST_TRAP_ENTRY 0x1F 21#define LGUEST_TRAP_ENTRY 0x1F
22 22
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index 7727aa8b7dda..378e3691c08c 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -347,7 +347,7 @@ struct pv_mmu_ops {
347 /* Sometimes the physical address is a pfn, and sometimes its 347 /* Sometimes the physical address is a pfn, and sometimes its
348 an mfn. We can tell which is which from the index. */ 348 an mfn. We can tell which is which from the index. */
349 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx, 349 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
350 unsigned long phys, pgprot_t flags); 350 phys_addr_t phys, pgprot_t flags);
351}; 351};
352 352
353struct raw_spinlock; 353struct raw_spinlock;
@@ -1432,7 +1432,7 @@ static inline void arch_leave_lazy_mmu_mode(void)
1432void arch_flush_lazy_mmu_mode(void); 1432void arch_flush_lazy_mmu_mode(void);
1433 1433
1434static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx, 1434static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
1435 unsigned long phys, pgprot_t flags) 1435 phys_addr_t phys, pgprot_t flags)
1436{ 1436{
1437 pv_mmu_ops.set_fixmap(idx, phys, flags); 1437 pv_mmu_ops.set_fixmap(idx, phys, flags);
1438} 1438}
diff --git a/arch/x86/include/asm/pat.h b/arch/x86/include/asm/pat.h
index 2cd07b9422f4..7af14e512f97 100644
--- a/arch/x86/include/asm/pat.h
+++ b/arch/x86/include/asm/pat.h
@@ -18,9 +18,5 @@ extern int free_memtype(u64 start, u64 end);
18 18
19extern int kernel_map_sync_memtype(u64 base, unsigned long size, 19extern int kernel_map_sync_memtype(u64 base, unsigned long size,
20 unsigned long flag); 20 unsigned long flag);
21extern void map_devmem(unsigned long pfn, unsigned long size,
22 struct pgprot vma_prot);
23extern void unmap_devmem(unsigned long pfn, unsigned long size,
24 struct pgprot vma_prot);
25 21
26#endif /* _ASM_X86_PAT_H */ 22#endif /* _ASM_X86_PAT_H */
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 34c52370f2fe..c2cceae709c8 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -138,7 +138,7 @@ extern struct tss_struct doublefault_tss;
138extern __u32 cleared_cpu_caps[NCAPINTS]; 138extern __u32 cleared_cpu_caps[NCAPINTS];
139 139
140#ifdef CONFIG_SMP 140#ifdef CONFIG_SMP
141DECLARE_PER_CPU(struct cpuinfo_x86, cpu_info); 141DECLARE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info);
142#define cpu_data(cpu) per_cpu(cpu_info, cpu) 142#define cpu_data(cpu) per_cpu(cpu_info, cpu)
143#define current_cpu_data __get_cpu_var(cpu_info) 143#define current_cpu_data __get_cpu_var(cpu_info)
144#else 144#else
@@ -270,7 +270,7 @@ struct tss_struct {
270 270
271} ____cacheline_aligned; 271} ____cacheline_aligned;
272 272
273DECLARE_PER_CPU(struct tss_struct, init_tss); 273DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
274 274
275/* 275/*
276 * Save the original ist values for checking stack pointers during debugging 276 * Save the original ist values for checking stack pointers during debugging
@@ -352,6 +352,11 @@ struct i387_soft_struct {
352 u32 entry_eip; 352 u32 entry_eip;
353}; 353};
354 354
355struct ymmh_struct {
356 /* 16 * 16 bytes for each YMMH-reg = 256 bytes */
357 u32 ymmh_space[64];
358};
359
355struct xsave_hdr_struct { 360struct xsave_hdr_struct {
356 u64 xstate_bv; 361 u64 xstate_bv;
357 u64 reserved1[2]; 362 u64 reserved1[2];
@@ -361,6 +366,7 @@ struct xsave_hdr_struct {
361struct xsave_struct { 366struct xsave_struct {
362 struct i387_fxsave_struct i387; 367 struct i387_fxsave_struct i387;
363 struct xsave_hdr_struct xsave_hdr; 368 struct xsave_hdr_struct xsave_hdr;
369 struct ymmh_struct ymmh;
364 /* new processor state extensions will go here */ 370 /* new processor state extensions will go here */
365} __attribute__ ((packed, aligned (64))); 371} __attribute__ ((packed, aligned (64)));
366 372
@@ -387,7 +393,7 @@ union irq_stack_union {
387 }; 393 };
388}; 394};
389 395
390DECLARE_PER_CPU(union irq_stack_union, irq_stack_union); 396DECLARE_PER_CPU_FIRST(union irq_stack_union, irq_stack_union);
391DECLARE_INIT_PER_CPU(irq_stack_union); 397DECLARE_INIT_PER_CPU(irq_stack_union);
392 398
393DECLARE_PER_CPU(char *, irq_stack_ptr); 399DECLARE_PER_CPU(char *, irq_stack_ptr);
diff --git a/arch/x86/include/asm/required-features.h b/arch/x86/include/asm/required-features.h
index d5cd6c586881..a4737dddfd58 100644
--- a/arch/x86/include/asm/required-features.h
+++ b/arch/x86/include/asm/required-features.h
@@ -50,7 +50,7 @@
50#ifdef CONFIG_X86_64 50#ifdef CONFIG_X86_64
51#define NEED_PSE 0 51#define NEED_PSE 0
52#define NEED_MSR (1<<(X86_FEATURE_MSR & 31)) 52#define NEED_MSR (1<<(X86_FEATURE_MSR & 31))
53#define NEED_PGE (1<<(X86_FEATURE_PGE & 31)) 53#define NEED_PGE 0
54#define NEED_FXSR (1<<(X86_FEATURE_FXSR & 31)) 54#define NEED_FXSR (1<<(X86_FEATURE_FXSR & 31))
55#define NEED_XMM (1<<(X86_FEATURE_XMM & 31)) 55#define NEED_XMM (1<<(X86_FEATURE_XMM & 31))
56#define NEED_XMM2 (1<<(X86_FEATURE_XMM2 & 31)) 56#define NEED_XMM2 (1<<(X86_FEATURE_XMM2 & 31))
diff --git a/arch/x86/include/asm/sigcontext.h b/arch/x86/include/asm/sigcontext.h
index ec666491aaa4..72e5a4491661 100644
--- a/arch/x86/include/asm/sigcontext.h
+++ b/arch/x86/include/asm/sigcontext.h
@@ -269,6 +269,11 @@ struct _xsave_hdr {
269 __u64 reserved2[5]; 269 __u64 reserved2[5];
270}; 270};
271 271
272struct _ymmh_state {
273 /* 16 * 16 bytes for each YMMH-reg */
274 __u32 ymmh_space[64];
275};
276
272/* 277/*
273 * Extended state pointed by the fpstate pointer in the sigcontext. 278 * Extended state pointed by the fpstate pointer in the sigcontext.
274 * In addition to the fpstate, information encoded in the xstate_hdr 279 * In addition to the fpstate, information encoded in the xstate_hdr
@@ -278,6 +283,7 @@ struct _xsave_hdr {
278struct _xstate { 283struct _xstate {
279 struct _fpstate fpstate; 284 struct _fpstate fpstate;
280 struct _xsave_hdr xstate_hdr; 285 struct _xsave_hdr xstate_hdr;
286 struct _ymmh_state ymmh;
281 /* new processor state extensions go here */ 287 /* new processor state extensions go here */
282}; 288};
283 289
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
index d3539f998f88..16a5c84b0329 100644
--- a/arch/x86/include/asm/tlbflush.h
+++ b/arch/x86/include/asm/tlbflush.h
@@ -152,7 +152,7 @@ struct tlb_state {
152 struct mm_struct *active_mm; 152 struct mm_struct *active_mm;
153 int state; 153 int state;
154}; 154};
155DECLARE_PER_CPU(struct tlb_state, cpu_tlbstate); 155DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate);
156 156
157static inline void reset_lazy_tlbstate(void) 157static inline void reset_lazy_tlbstate(void)
158{ 158{
diff --git a/arch/x86/include/asm/topology.h b/arch/x86/include/asm/topology.h
index 892b119dba6f..f44b49abca49 100644
--- a/arch/x86/include/asm/topology.h
+++ b/arch/x86/include/asm/topology.h
@@ -200,7 +200,7 @@ static inline void arch_fix_phys_package_id(int num, u32 slot)
200} 200}
201 201
202struct pci_bus; 202struct pci_bus;
203void set_pci_bus_resources_arch_default(struct pci_bus *b); 203void x86_pci_root_bus_res_quirks(struct pci_bus *b);
204 204
205#ifdef CONFIG_SMP 205#ifdef CONFIG_SMP
206#define mc_capable() (cpumask_weight(cpu_core_mask(0)) != nr_cpu_ids) 206#define mc_capable() (cpumask_weight(cpu_core_mask(0)) != nr_cpu_ids)
diff --git a/arch/x86/include/asm/uv/uv_mmrs.h b/arch/x86/include/asm/uv/uv_mmrs.h
index db68ac8a5ac2..2cae46c7c8a2 100644
--- a/arch/x86/include/asm/uv/uv_mmrs.h
+++ b/arch/x86/include/asm/uv/uv_mmrs.h
@@ -17,6 +17,11 @@
17/* ========================================================================= */ 17/* ========================================================================= */
18/* UVH_BAU_DATA_CONFIG */ 18/* UVH_BAU_DATA_CONFIG */
19/* ========================================================================= */ 19/* ========================================================================= */
20#define UVH_LB_BAU_MISC_CONTROL 0x320170UL
21#define UV_ENABLE_INTD_SOFT_ACK_MODE_SHIFT 15
22#define UV_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHIFT 16
23#define UV_INTD_SOFT_ACK_TIMEOUT_PERIOD 0x000000000bUL
24/* 1011 timebase 7 (168millisec) * 3 ticks -> 500ms */
20#define UVH_BAU_DATA_CONFIG 0x61680UL 25#define UVH_BAU_DATA_CONFIG 0x61680UL
21#define UVH_BAU_DATA_CONFIG_32 0x0438 26#define UVH_BAU_DATA_CONFIG_32 0x0438
22 27
diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h
index 1a918dde46b5..018a0a400799 100644
--- a/arch/x86/include/asm/xen/page.h
+++ b/arch/x86/include/asm/xen/page.h
@@ -124,7 +124,8 @@ static inline unsigned long mfn_to_local_pfn(unsigned long mfn)
124 124
125/* VIRT <-> MACHINE conversion */ 125/* VIRT <-> MACHINE conversion */
126#define virt_to_machine(v) (phys_to_machine(XPADDR(__pa(v)))) 126#define virt_to_machine(v) (phys_to_machine(XPADDR(__pa(v))))
127#define virt_to_mfn(v) (pfn_to_mfn(PFN_DOWN(__pa(v)))) 127#define virt_to_pfn(v) (PFN_DOWN(__pa(v)))
128#define virt_to_mfn(v) (pfn_to_mfn(virt_to_pfn(v)))
128#define mfn_to_virt(m) (__va(mfn_to_pfn(m) << PAGE_SHIFT)) 129#define mfn_to_virt(m) (__va(mfn_to_pfn(m) << PAGE_SHIFT))
129 130
130static inline unsigned long pte_mfn(pte_t pte) 131static inline unsigned long pte_mfn(pte_t pte)
diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
index 08e9a1ac07a9..727acc152344 100644
--- a/arch/x86/include/asm/xsave.h
+++ b/arch/x86/include/asm/xsave.h
@@ -7,6 +7,7 @@
7 7
8#define XSTATE_FP 0x1 8#define XSTATE_FP 0x1
9#define XSTATE_SSE 0x2 9#define XSTATE_SSE 0x2
10#define XSTATE_YMM 0x4
10 11
11#define XSTATE_FPSSE (XSTATE_FP | XSTATE_SSE) 12#define XSTATE_FPSSE (XSTATE_FP | XSTATE_SSE)
12 13
@@ -15,7 +16,7 @@
15/* 16/*
16 * These are the features that the OS can handle currently. 17 * These are the features that the OS can handle currently.
17 */ 18 */
18#define XCNTXT_MASK (XSTATE_FP | XSTATE_SSE) 19#define XCNTXT_MASK (XSTATE_FP | XSTATE_SSE | XSTATE_YMM)
19 20
20#ifdef CONFIG_X86_64 21#ifdef CONFIG_X86_64
21#define REX_PREFIX "0x48, " 22#define REX_PREFIX "0x48, "
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index fb504f843e58..e9021a908020 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -432,6 +432,12 @@ static void __cpuinit setup_APIC_timer(void)
432{ 432{
433 struct clock_event_device *levt = &__get_cpu_var(lapic_events); 433 struct clock_event_device *levt = &__get_cpu_var(lapic_events);
434 434
435 if (cpu_has(&current_cpu_data, X86_FEATURE_ARAT)) {
436 lapic_clockevent.features &= ~CLOCK_EVT_FEAT_C3STOP;
437 /* Make LAPIC timer preferrable over percpu HPET */
438 lapic_clockevent.rating = 150;
439 }
440
435 memcpy(levt, &lapic_clockevent, sizeof(*levt)); 441 memcpy(levt, &lapic_clockevent, sizeof(*levt));
436 levt->cpumask = cpumask_of(smp_processor_id()); 442 levt->cpumask = cpumask_of(smp_processor_id());
437 443
diff --git a/arch/x86/kernel/apic/apic_flat_64.c b/arch/x86/kernel/apic/apic_flat_64.c
index 0014714ea97b..306e5e88fb6f 100644
--- a/arch/x86/kernel/apic/apic_flat_64.c
+++ b/arch/x86/kernel/apic/apic_flat_64.c
@@ -212,7 +212,7 @@ struct apic apic_flat = {
212 .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH, 212 .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH,
213 .wait_for_init_deassert = NULL, 213 .wait_for_init_deassert = NULL,
214 .smp_callin_clear_local_apic = NULL, 214 .smp_callin_clear_local_apic = NULL,
215 .inquire_remote_apic = NULL, 215 .inquire_remote_apic = default_inquire_remote_apic,
216 216
217 .read = native_apic_mem_read, 217 .read = native_apic_mem_read,
218 .write = native_apic_mem_write, 218 .write = native_apic_mem_write,
@@ -362,7 +362,7 @@ struct apic apic_physflat = {
362 .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH, 362 .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH,
363 .wait_for_init_deassert = NULL, 363 .wait_for_init_deassert = NULL,
364 .smp_callin_clear_local_apic = NULL, 364 .smp_callin_clear_local_apic = NULL,
365 .inquire_remote_apic = NULL, 365 .inquire_remote_apic = default_inquire_remote_apic,
366 366
367 .read = native_apic_mem_read, 367 .read = native_apic_mem_read,
368 .write = native_apic_mem_write, 368 .write = native_apic_mem_write,
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index 767fe7e46d68..30da617d18e4 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -2524,7 +2524,6 @@ static void irq_complete_move(struct irq_desc **descp)
2524static inline void irq_complete_move(struct irq_desc **descp) {} 2524static inline void irq_complete_move(struct irq_desc **descp) {}
2525#endif 2525#endif
2526 2526
2527#ifdef CONFIG_X86_X2APIC
2528static void __eoi_ioapic_irq(unsigned int irq, struct irq_cfg *cfg) 2527static void __eoi_ioapic_irq(unsigned int irq, struct irq_cfg *cfg)
2529{ 2528{
2530 int apic, pin; 2529 int apic, pin;
@@ -2558,6 +2557,7 @@ eoi_ioapic_irq(struct irq_desc *desc)
2558 spin_unlock_irqrestore(&ioapic_lock, flags); 2557 spin_unlock_irqrestore(&ioapic_lock, flags);
2559} 2558}
2560 2559
2560#ifdef CONFIG_X86_X2APIC
2561static void ack_x2apic_level(unsigned int irq) 2561static void ack_x2apic_level(unsigned int irq)
2562{ 2562{
2563 struct irq_desc *desc = irq_to_desc(irq); 2563 struct irq_desc *desc = irq_to_desc(irq);
@@ -2634,6 +2634,9 @@ static void ack_apic_level(unsigned int irq)
2634 */ 2634 */
2635 ack_APIC_irq(); 2635 ack_APIC_irq();
2636 2636
2637 if (irq_remapped(irq))
2638 eoi_ioapic_irq(desc);
2639
2637 /* Now we can move and renable the irq */ 2640 /* Now we can move and renable the irq */
2638 if (unlikely(do_unmask_irq)) { 2641 if (unlikely(do_unmask_irq)) {
2639 /* Only migrate the irq if the ack has been received. 2642 /* Only migrate the irq if the ack has been received.
@@ -3667,12 +3670,14 @@ int arch_setup_hpet_msi(unsigned int irq)
3667{ 3670{
3668 int ret; 3671 int ret;
3669 struct msi_msg msg; 3672 struct msi_msg msg;
3673 struct irq_desc *desc = irq_to_desc(irq);
3670 3674
3671 ret = msi_compose_msg(NULL, irq, &msg); 3675 ret = msi_compose_msg(NULL, irq, &msg);
3672 if (ret < 0) 3676 if (ret < 0)
3673 return ret; 3677 return ret;
3674 3678
3675 hpet_msi_write(irq, &msg); 3679 hpet_msi_write(irq, &msg);
3680 desc->status |= IRQ_MOVE_PCNTXT;
3676 set_irq_chip_and_handler_name(irq, &hpet_msi_type, handle_edge_irq, 3681 set_irq_chip_and_handler_name(irq, &hpet_msi_type, handle_edge_irq,
3677 "edge"); 3682 "edge");
3678 3683
diff --git a/arch/x86/kernel/apic/nmi.c b/arch/x86/kernel/apic/nmi.c
index d6bd62407152..ce4fbfa315a1 100644
--- a/arch/x86/kernel/apic/nmi.c
+++ b/arch/x86/kernel/apic/nmi.c
@@ -138,7 +138,7 @@ int __init check_nmi_watchdog(void)
138 if (!prev_nmi_count) 138 if (!prev_nmi_count)
139 goto error; 139 goto error;
140 140
141 alloc_cpumask_var(&backtrace_mask, GFP_KERNEL); 141 alloc_cpumask_var(&backtrace_mask, GFP_KERNEL|__GFP_ZERO);
142 printk(KERN_INFO "Testing NMI watchdog ... "); 142 printk(KERN_INFO "Testing NMI watchdog ... ");
143 143
144#ifdef CONFIG_SMP 144#ifdef CONFIG_SMP
@@ -414,7 +414,8 @@ nmi_watchdog_tick(struct pt_regs *regs, unsigned reason)
414 touched = 1; 414 touched = 1;
415 } 415 }
416 416
417 if (cpumask_test_cpu(cpu, backtrace_mask)) { 417 /* We can be called before check_nmi_watchdog, hence NULL check. */
418 if (backtrace_mask != NULL && cpumask_test_cpu(cpu, backtrace_mask)) {
418 static DEFINE_SPINLOCK(lock); /* Serialise the printks */ 419 static DEFINE_SPINLOCK(lock); /* Serialise the printks */
419 420
420 spin_lock(&lock); 421 spin_lock(&lock);
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
index 1248318436e8..2bda69352976 100644
--- a/arch/x86/kernel/apic/x2apic_uv_x.c
+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
@@ -19,6 +19,7 @@
19#include <linux/timer.h> 19#include <linux/timer.h>
20#include <linux/cpu.h> 20#include <linux/cpu.h>
21#include <linux/init.h> 21#include <linux/init.h>
22#include <linux/io.h>
22 23
23#include <asm/uv/uv_mmrs.h> 24#include <asm/uv/uv_mmrs.h>
24#include <asm/uv/uv_hub.h> 25#include <asm/uv/uv_hub.h>
@@ -34,6 +35,17 @@ DEFINE_PER_CPU(int, x2apic_extra_bits);
34 35
35static enum uv_system_type uv_system_type; 36static enum uv_system_type uv_system_type;
36 37
38static int early_get_nodeid(void)
39{
40 union uvh_node_id_u node_id;
41 unsigned long *mmr;
42
43 mmr = early_ioremap(UV_LOCAL_MMR_BASE | UVH_NODE_ID, sizeof(*mmr));
44 node_id.v = *mmr;
45 early_iounmap(mmr, sizeof(*mmr));
46 return node_id.s.node_id;
47}
48
37static int uv_acpi_madt_oem_check(char *oem_id, char *oem_table_id) 49static int uv_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
38{ 50{
39 if (!strcmp(oem_id, "SGI")) { 51 if (!strcmp(oem_id, "SGI")) {
@@ -42,6 +54,8 @@ static int uv_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
42 else if (!strcmp(oem_table_id, "UVX")) 54 else if (!strcmp(oem_table_id, "UVX"))
43 uv_system_type = UV_X2APIC; 55 uv_system_type = UV_X2APIC;
44 else if (!strcmp(oem_table_id, "UVH")) { 56 else if (!strcmp(oem_table_id, "UVH")) {
57 __get_cpu_var(x2apic_extra_bits) =
58 early_get_nodeid() << (UV_APIC_PNODE_SHIFT - 1);
45 uv_system_type = UV_NON_UNIQUE_APIC; 59 uv_system_type = UV_NON_UNIQUE_APIC;
46 return 1; 60 return 1;
47 } 61 }
@@ -549,7 +563,8 @@ void __init uv_system_init(void)
549 unsigned long gnode_upper, lowmem_redir_base, lowmem_redir_size; 563 unsigned long gnode_upper, lowmem_redir_base, lowmem_redir_size;
550 int bytes, nid, cpu, lcpu, pnode, blade, i, j, m_val, n_val; 564 int bytes, nid, cpu, lcpu, pnode, blade, i, j, m_val, n_val;
551 int max_pnode = 0; 565 int max_pnode = 0;
552 unsigned long mmr_base, present; 566 unsigned long mmr_base, present, paddr;
567 unsigned short pnode_mask;
553 568
554 map_low_mmrs(); 569 map_low_mmrs();
555 570
@@ -592,6 +607,7 @@ void __init uv_system_init(void)
592 } 607 }
593 } 608 }
594 609
610 pnode_mask = (1 << n_val) - 1;
595 node_id.v = uv_read_local_mmr(UVH_NODE_ID); 611 node_id.v = uv_read_local_mmr(UVH_NODE_ID);
596 gnode_upper = (((unsigned long)node_id.s.node_id) & 612 gnode_upper = (((unsigned long)node_id.s.node_id) &
597 ~((1 << n_val) - 1)) << m_val; 613 ~((1 << n_val) - 1)) << m_val;
@@ -615,7 +631,7 @@ void __init uv_system_init(void)
615 uv_cpu_hub_info(cpu)->numa_blade_id = blade; 631 uv_cpu_hub_info(cpu)->numa_blade_id = blade;
616 uv_cpu_hub_info(cpu)->blade_processor_id = lcpu; 632 uv_cpu_hub_info(cpu)->blade_processor_id = lcpu;
617 uv_cpu_hub_info(cpu)->pnode = pnode; 633 uv_cpu_hub_info(cpu)->pnode = pnode;
618 uv_cpu_hub_info(cpu)->pnode_mask = (1 << n_val) - 1; 634 uv_cpu_hub_info(cpu)->pnode_mask = pnode_mask;
619 uv_cpu_hub_info(cpu)->gpa_mask = (1 << (m_val + n_val)) - 1; 635 uv_cpu_hub_info(cpu)->gpa_mask = (1 << (m_val + n_val)) - 1;
620 uv_cpu_hub_info(cpu)->gnode_upper = gnode_upper; 636 uv_cpu_hub_info(cpu)->gnode_upper = gnode_upper;
621 uv_cpu_hub_info(cpu)->global_mmr_base = mmr_base; 637 uv_cpu_hub_info(cpu)->global_mmr_base = mmr_base;
@@ -631,6 +647,17 @@ void __init uv_system_init(void)
631 lcpu, blade); 647 lcpu, blade);
632 } 648 }
633 649
650 /* Add blade/pnode info for nodes without cpus */
651 for_each_online_node(nid) {
652 if (uv_node_to_blade[nid] >= 0)
653 continue;
654 paddr = node_start_pfn(nid) << PAGE_SHIFT;
655 paddr = uv_soc_phys_ram_to_gpa(paddr);
656 pnode = (paddr >> m_val) & pnode_mask;
657 blade = boot_pnode_to_blade(pnode);
658 uv_node_to_blade[nid] = blade;
659 }
660
634 map_gru_high(max_pnode); 661 map_gru_high(max_pnode);
635 map_mmr_high(max_pnode); 662 map_mmr_high(max_pnode);
636 map_config_high(max_pnode); 663 map_config_high(max_pnode);
diff --git a/arch/x86/kernel/bios_uv.c b/arch/x86/kernel/bios_uv.c
index f63882728d91..63a88e1f987d 100644
--- a/arch/x86/kernel/bios_uv.c
+++ b/arch/x86/kernel/bios_uv.c
@@ -182,7 +182,8 @@ void uv_bios_init(void)
182 memcpy(&uv_systab, tab, sizeof(struct uv_systab)); 182 memcpy(&uv_systab, tab, sizeof(struct uv_systab));
183 iounmap(tab); 183 iounmap(tab);
184 184
185 printk(KERN_INFO "EFI UV System Table Revision %d\n", tab->revision); 185 printk(KERN_INFO "EFI UV System Table Revision %d\n",
186 uv_systab.revision);
186} 187}
187#else /* !CONFIG_EFI */ 188#else /* !CONFIG_EFI */
188 189
diff --git a/arch/x86/kernel/cpu/addon_cpuid_features.c b/arch/x86/kernel/cpu/addon_cpuid_features.c
index 8220ae69849d..c965e5212714 100644
--- a/arch/x86/kernel/cpu/addon_cpuid_features.c
+++ b/arch/x86/kernel/cpu/addon_cpuid_features.c
@@ -31,6 +31,7 @@ void __cpuinit init_scattered_cpuid_features(struct cpuinfo_x86 *c)
31 31
32 static const struct cpuid_bit __cpuinitconst cpuid_bits[] = { 32 static const struct cpuid_bit __cpuinitconst cpuid_bits[] = {
33 { X86_FEATURE_IDA, CR_EAX, 1, 0x00000006 }, 33 { X86_FEATURE_IDA, CR_EAX, 1, 0x00000006 },
34 { X86_FEATURE_ARAT, CR_EAX, 2, 0x00000006 },
34 { 0, 0, 0, 0 } 35 { 0, 0, 0, 0 }
35 }; 36 };
36 37
diff --git a/arch/x86/kernel/cpu/cpu_debug.c b/arch/x86/kernel/cpu/cpu_debug.c
index 46e29ab96c6a..46e29ab96c6a 100755..100644
--- a/arch/x86/kernel/cpu/cpu_debug.c
+++ b/arch/x86/kernel/cpu/cpu_debug.c
diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
index 19f6b9d27e83..208ecf6643df 100644
--- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
+++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
@@ -65,13 +65,18 @@ enum {
65struct acpi_cpufreq_data { 65struct acpi_cpufreq_data {
66 struct acpi_processor_performance *acpi_data; 66 struct acpi_processor_performance *acpi_data;
67 struct cpufreq_frequency_table *freq_table; 67 struct cpufreq_frequency_table *freq_table;
68 unsigned int max_freq;
69 unsigned int resume; 68 unsigned int resume;
70 unsigned int cpu_feature; 69 unsigned int cpu_feature;
71}; 70};
72 71
73static DEFINE_PER_CPU(struct acpi_cpufreq_data *, drv_data); 72static DEFINE_PER_CPU(struct acpi_cpufreq_data *, drv_data);
74 73
74struct acpi_msr_data {
75 u64 saved_aperf, saved_mperf;
76};
77
78static DEFINE_PER_CPU(struct acpi_msr_data, msr_data);
79
75DEFINE_TRACE(power_mark); 80DEFINE_TRACE(power_mark);
76 81
77/* acpi_perf_data is a pointer to percpu data. */ 82/* acpi_perf_data is a pointer to percpu data. */
@@ -152,7 +157,8 @@ struct drv_cmd {
152 u32 val; 157 u32 val;
153}; 158};
154 159
155static long do_drv_read(void *_cmd) 160/* Called via smp_call_function_single(), on the target CPU */
161static void do_drv_read(void *_cmd)
156{ 162{
157 struct drv_cmd *cmd = _cmd; 163 struct drv_cmd *cmd = _cmd;
158 u32 h; 164 u32 h;
@@ -169,10 +175,10 @@ static long do_drv_read(void *_cmd)
169 default: 175 default:
170 break; 176 break;
171 } 177 }
172 return 0;
173} 178}
174 179
175static long do_drv_write(void *_cmd) 180/* Called via smp_call_function_many(), on the target CPUs */
181static void do_drv_write(void *_cmd)
176{ 182{
177 struct drv_cmd *cmd = _cmd; 183 struct drv_cmd *cmd = _cmd;
178 u32 lo, hi; 184 u32 lo, hi;
@@ -191,23 +197,24 @@ static long do_drv_write(void *_cmd)
191 default: 197 default:
192 break; 198 break;
193 } 199 }
194 return 0;
195} 200}
196 201
197static void drv_read(struct drv_cmd *cmd) 202static void drv_read(struct drv_cmd *cmd)
198{ 203{
199 cmd->val = 0; 204 cmd->val = 0;
200 205
201 work_on_cpu(cpumask_any(cmd->mask), do_drv_read, cmd); 206 smp_call_function_single(cpumask_any(cmd->mask), do_drv_read, cmd, 1);
202} 207}
203 208
204static void drv_write(struct drv_cmd *cmd) 209static void drv_write(struct drv_cmd *cmd)
205{ 210{
206 unsigned int i; 211 int this_cpu;
207 212
208 for_each_cpu(i, cmd->mask) { 213 this_cpu = get_cpu();
209 work_on_cpu(i, do_drv_write, cmd); 214 if (cpumask_test_cpu(this_cpu, cmd->mask))
210 } 215 do_drv_write(cmd);
216 smp_call_function_many(cmd->mask, do_drv_write, cmd, 1);
217 put_cpu();
211} 218}
212 219
213static u32 get_cur_val(const struct cpumask *mask) 220static u32 get_cur_val(const struct cpumask *mask)
@@ -241,28 +248,23 @@ static u32 get_cur_val(const struct cpumask *mask)
241 return cmd.val; 248 return cmd.val;
242} 249}
243 250
244struct perf_cur { 251struct perf_pair {
245 union { 252 union {
246 struct { 253 struct {
247 u32 lo; 254 u32 lo;
248 u32 hi; 255 u32 hi;
249 } split; 256 } split;
250 u64 whole; 257 u64 whole;
251 } aperf_cur, mperf_cur; 258 } aperf, mperf;
252}; 259};
253 260
254 261/* Called via smp_call_function_single(), on the target CPU */
255static long read_measured_perf_ctrs(void *_cur) 262static void read_measured_perf_ctrs(void *_cur)
256{ 263{
257 struct perf_cur *cur = _cur; 264 struct perf_pair *cur = _cur;
258
259 rdmsr(MSR_IA32_APERF, cur->aperf_cur.split.lo, cur->aperf_cur.split.hi);
260 rdmsr(MSR_IA32_MPERF, cur->mperf_cur.split.lo, cur->mperf_cur.split.hi);
261 265
262 wrmsr(MSR_IA32_APERF, 0, 0); 266 rdmsr(MSR_IA32_APERF, cur->aperf.split.lo, cur->aperf.split.hi);
263 wrmsr(MSR_IA32_MPERF, 0, 0); 267 rdmsr(MSR_IA32_MPERF, cur->mperf.split.lo, cur->mperf.split.hi);
264
265 return 0;
266} 268}
267 269
268/* 270/*
@@ -281,58 +283,63 @@ static long read_measured_perf_ctrs(void *_cur)
281static unsigned int get_measured_perf(struct cpufreq_policy *policy, 283static unsigned int get_measured_perf(struct cpufreq_policy *policy,
282 unsigned int cpu) 284 unsigned int cpu)
283{ 285{
284 struct perf_cur cur; 286 struct perf_pair readin, cur;
285 unsigned int perf_percent; 287 unsigned int perf_percent;
286 unsigned int retval; 288 unsigned int retval;
287 289
288 if (!work_on_cpu(cpu, read_measured_perf_ctrs, &cur)) 290 if (smp_call_function_single(cpu, read_measured_perf_ctrs, &readin, 1))
289 return 0; 291 return 0;
290 292
293 cur.aperf.whole = readin.aperf.whole -
294 per_cpu(msr_data, cpu).saved_aperf;
295 cur.mperf.whole = readin.mperf.whole -
296 per_cpu(msr_data, cpu).saved_mperf;
297 per_cpu(msr_data, cpu).saved_aperf = readin.aperf.whole;
298 per_cpu(msr_data, cpu).saved_mperf = readin.mperf.whole;
299
291#ifdef __i386__ 300#ifdef __i386__
292 /* 301 /*
293 * We dont want to do 64 bit divide with 32 bit kernel 302 * We dont want to do 64 bit divide with 32 bit kernel
294 * Get an approximate value. Return failure in case we cannot get 303 * Get an approximate value. Return failure in case we cannot get
295 * an approximate value. 304 * an approximate value.
296 */ 305 */
297 if (unlikely(cur.aperf_cur.split.hi || cur.mperf_cur.split.hi)) { 306 if (unlikely(cur.aperf.split.hi || cur.mperf.split.hi)) {
298 int shift_count; 307 int shift_count;
299 u32 h; 308 u32 h;
300 309
301 h = max_t(u32, cur.aperf_cur.split.hi, cur.mperf_cur.split.hi); 310 h = max_t(u32, cur.aperf.split.hi, cur.mperf.split.hi);
302 shift_count = fls(h); 311 shift_count = fls(h);
303 312
304 cur.aperf_cur.whole >>= shift_count; 313 cur.aperf.whole >>= shift_count;
305 cur.mperf_cur.whole >>= shift_count; 314 cur.mperf.whole >>= shift_count;
306 } 315 }
307 316
308 if (((unsigned long)(-1) / 100) < cur.aperf_cur.split.lo) { 317 if (((unsigned long)(-1) / 100) < cur.aperf.split.lo) {
309 int shift_count = 7; 318 int shift_count = 7;
310 cur.aperf_cur.split.lo >>= shift_count; 319 cur.aperf.split.lo >>= shift_count;
311 cur.mperf_cur.split.lo >>= shift_count; 320 cur.mperf.split.lo >>= shift_count;
312 } 321 }
313 322
314 if (cur.aperf_cur.split.lo && cur.mperf_cur.split.lo) 323 if (cur.aperf.split.lo && cur.mperf.split.lo)
315 perf_percent = (cur.aperf_cur.split.lo * 100) / 324 perf_percent = (cur.aperf.split.lo * 100) / cur.mperf.split.lo;
316 cur.mperf_cur.split.lo;
317 else 325 else
318 perf_percent = 0; 326 perf_percent = 0;
319 327
320#else 328#else
321 if (unlikely(((unsigned long)(-1) / 100) < cur.aperf_cur.whole)) { 329 if (unlikely(((unsigned long)(-1) / 100) < cur.aperf.whole)) {
322 int shift_count = 7; 330 int shift_count = 7;
323 cur.aperf_cur.whole >>= shift_count; 331 cur.aperf.whole >>= shift_count;
324 cur.mperf_cur.whole >>= shift_count; 332 cur.mperf.whole >>= shift_count;
325 } 333 }
326 334
327 if (cur.aperf_cur.whole && cur.mperf_cur.whole) 335 if (cur.aperf.whole && cur.mperf.whole)
328 perf_percent = (cur.aperf_cur.whole * 100) / 336 perf_percent = (cur.aperf.whole * 100) / cur.mperf.whole;
329 cur.mperf_cur.whole;
330 else 337 else
331 perf_percent = 0; 338 perf_percent = 0;
332 339
333#endif 340#endif
334 341
335 retval = per_cpu(drv_data, policy->cpu)->max_freq * perf_percent / 100; 342 retval = (policy->cpuinfo.max_freq * perf_percent) / 100;
336 343
337 return retval; 344 return retval;
338} 345}
@@ -685,16 +692,11 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
685 /* Check for high latency (>20uS) from buggy BIOSes, like on T42 */ 692 /* Check for high latency (>20uS) from buggy BIOSes, like on T42 */
686 if (perf->control_register.space_id == ACPI_ADR_SPACE_FIXED_HARDWARE && 693 if (perf->control_register.space_id == ACPI_ADR_SPACE_FIXED_HARDWARE &&
687 policy->cpuinfo.transition_latency > 20 * 1000) { 694 policy->cpuinfo.transition_latency > 20 * 1000) {
688 static int print_once;
689 policy->cpuinfo.transition_latency = 20 * 1000; 695 policy->cpuinfo.transition_latency = 20 * 1000;
690 if (!print_once) { 696 printk_once(KERN_INFO "Capping off P-state tranision"
691 print_once = 1; 697 " latency at 20 uS\n");
692 printk(KERN_INFO "Capping off P-state tranision latency"
693 " at 20 uS\n");
694 }
695 } 698 }
696 699
697 data->max_freq = perf->states[0].core_frequency * 1000;
698 /* table init */ 700 /* table init */
699 for (i = 0; i < perf->state_count; i++) { 701 for (i = 0; i < perf->state_count; i++) {
700 if (i > 0 && perf->states[i].core_frequency >= 702 if (i > 0 && perf->states[i].core_frequency >=
@@ -713,6 +715,9 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
713 if (result) 715 if (result)
714 goto err_freqfree; 716 goto err_freqfree;
715 717
718 if (perf->states[0].core_frequency * 1000 != policy->cpuinfo.max_freq)
719 printk(KERN_WARNING FW_WARN "P-state 0 is not max freq\n");
720
716 switch (perf->control_register.space_id) { 721 switch (perf->control_register.space_id) {
717 case ACPI_ADR_SPACE_SYSTEM_IO: 722 case ACPI_ADR_SPACE_SYSTEM_IO:
718 /* Current speed is unknown and not detectable by IO port */ 723 /* Current speed is unknown and not detectable by IO port */
diff --git a/arch/x86/kernel/cpu/cpufreq/longhaul.c b/arch/x86/kernel/cpu/cpufreq/longhaul.c
index 0bd48e65a0ca..ce2ed3e4aad9 100644
--- a/arch/x86/kernel/cpu/cpufreq/longhaul.c
+++ b/arch/x86/kernel/cpu/cpufreq/longhaul.c
@@ -33,7 +33,6 @@
33#include <linux/timex.h> 33#include <linux/timex.h>
34#include <linux/io.h> 34#include <linux/io.h>
35#include <linux/acpi.h> 35#include <linux/acpi.h>
36#include <linux/kernel.h>
37 36
38#include <asm/msr.h> 37#include <asm/msr.h>
39#include <acpi/processor.h> 38#include <acpi/processor.h>
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index 1d46cba56fd8..891004619142 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -1417,7 +1417,10 @@ ENTRY(paranoid_exit)
1417paranoid_swapgs: 1417paranoid_swapgs:
1418 TRACE_IRQS_IRETQ 0 1418 TRACE_IRQS_IRETQ 0
1419 SWAPGS_UNSAFE_STACK 1419 SWAPGS_UNSAFE_STACK
1420 RESTORE_ALL 8
1421 jmp irq_return
1420paranoid_restore: 1422paranoid_restore:
1423 TRACE_IRQS_IRETQ 0
1421 RESTORE_ALL 8 1424 RESTORE_ALL 8
1422 jmp irq_return 1425 jmp irq_return
1423paranoid_userspace: 1426paranoid_userspace:
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index 70a10ca100f6..18dfa30795c9 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -18,6 +18,8 @@
18#include <linux/init.h> 18#include <linux/init.h>
19#include <linux/list.h> 19#include <linux/list.h>
20 20
21#include <trace/syscall.h>
22
21#include <asm/cacheflush.h> 23#include <asm/cacheflush.h>
22#include <asm/ftrace.h> 24#include <asm/ftrace.h>
23#include <asm/nops.h> 25#include <asm/nops.h>
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
index 648b3a2a3a44..81408b93f887 100644
--- a/arch/x86/kernel/hpet.c
+++ b/arch/x86/kernel/hpet.c
@@ -236,6 +236,10 @@ static void hpet_stop_counter(void)
236 unsigned long cfg = hpet_readl(HPET_CFG); 236 unsigned long cfg = hpet_readl(HPET_CFG);
237 cfg &= ~HPET_CFG_ENABLE; 237 cfg &= ~HPET_CFG_ENABLE;
238 hpet_writel(cfg, HPET_CFG); 238 hpet_writel(cfg, HPET_CFG);
239}
240
241static void hpet_reset_counter(void)
242{
239 hpet_writel(0, HPET_COUNTER); 243 hpet_writel(0, HPET_COUNTER);
240 hpet_writel(0, HPET_COUNTER + 4); 244 hpet_writel(0, HPET_COUNTER + 4);
241} 245}
@@ -250,6 +254,7 @@ static void hpet_start_counter(void)
250static void hpet_restart_counter(void) 254static void hpet_restart_counter(void)
251{ 255{
252 hpet_stop_counter(); 256 hpet_stop_counter();
257 hpet_reset_counter();
253 hpet_start_counter(); 258 hpet_start_counter();
254} 259}
255 260
@@ -309,7 +314,7 @@ static int hpet_setup_msi_irq(unsigned int irq);
309static void hpet_set_mode(enum clock_event_mode mode, 314static void hpet_set_mode(enum clock_event_mode mode,
310 struct clock_event_device *evt, int timer) 315 struct clock_event_device *evt, int timer)
311{ 316{
312 unsigned long cfg; 317 unsigned long cfg, cmp, now;
313 uint64_t delta; 318 uint64_t delta;
314 319
315 switch (mode) { 320 switch (mode) {
@@ -317,12 +322,23 @@ static void hpet_set_mode(enum clock_event_mode mode,
317 hpet_stop_counter(); 322 hpet_stop_counter();
318 delta = ((uint64_t)(NSEC_PER_SEC/HZ)) * evt->mult; 323 delta = ((uint64_t)(NSEC_PER_SEC/HZ)) * evt->mult;
319 delta >>= evt->shift; 324 delta >>= evt->shift;
325 now = hpet_readl(HPET_COUNTER);
326 cmp = now + (unsigned long) delta;
320 cfg = hpet_readl(HPET_Tn_CFG(timer)); 327 cfg = hpet_readl(HPET_Tn_CFG(timer));
321 /* Make sure we use edge triggered interrupts */ 328 /* Make sure we use edge triggered interrupts */
322 cfg &= ~HPET_TN_LEVEL; 329 cfg &= ~HPET_TN_LEVEL;
323 cfg |= HPET_TN_ENABLE | HPET_TN_PERIODIC | 330 cfg |= HPET_TN_ENABLE | HPET_TN_PERIODIC |
324 HPET_TN_SETVAL | HPET_TN_32BIT; 331 HPET_TN_SETVAL | HPET_TN_32BIT;
325 hpet_writel(cfg, HPET_Tn_CFG(timer)); 332 hpet_writel(cfg, HPET_Tn_CFG(timer));
333 hpet_writel(cmp, HPET_Tn_CMP(timer));
334 udelay(1);
335 /*
336 * HPET on AMD 81xx needs a second write (with HPET_TN_SETVAL
337 * cleared) to T0_CMP to set the period. The HPET_TN_SETVAL
338 * bit is automatically cleared after the first write.
339 * (See AMD-8111 HyperTransport I/O Hub Data Sheet,
340 * Publication # 24674)
341 */
326 hpet_writel((unsigned long) delta, HPET_Tn_CMP(timer)); 342 hpet_writel((unsigned long) delta, HPET_Tn_CMP(timer));
327 hpet_start_counter(); 343 hpet_start_counter();
328 hpet_print_config(); 344 hpet_print_config();
@@ -722,7 +738,7 @@ static int hpet_cpuhp_notify(struct notifier_block *n,
722/* 738/*
723 * Clock source related code 739 * Clock source related code
724 */ 740 */
725static cycle_t read_hpet(void) 741static cycle_t read_hpet(struct clocksource *cs)
726{ 742{
727 return (cycle_t)hpet_readl(HPET_COUNTER); 743 return (cycle_t)hpet_readl(HPET_COUNTER);
728} 744}
@@ -756,7 +772,7 @@ static int hpet_clocksource_register(void)
756 hpet_restart_counter(); 772 hpet_restart_counter();
757 773
758 /* Verify whether hpet counter works */ 774 /* Verify whether hpet counter works */
759 t1 = read_hpet(); 775 t1 = hpet_readl(HPET_COUNTER);
760 rdtscll(start); 776 rdtscll(start);
761 777
762 /* 778 /*
@@ -770,7 +786,7 @@ static int hpet_clocksource_register(void)
770 rdtscll(now); 786 rdtscll(now);
771 } while ((now - start) < 200000UL); 787 } while ((now - start) < 200000UL);
772 788
773 if (t1 == read_hpet()) { 789 if (t1 == hpet_readl(HPET_COUNTER)) {
774 printk(KERN_WARNING 790 printk(KERN_WARNING
775 "HPET counter not counting. HPET disabled\n"); 791 "HPET counter not counting. HPET disabled\n");
776 return -ENODEV; 792 return -ENODEV;
diff --git a/arch/x86/kernel/i8253.c b/arch/x86/kernel/i8253.c
index 3475440baa54..c2e0bb0890d4 100644
--- a/arch/x86/kernel/i8253.c
+++ b/arch/x86/kernel/i8253.c
@@ -129,7 +129,7 @@ void __init setup_pit_timer(void)
129 * to just read by itself. So use jiffies to emulate a free 129 * to just read by itself. So use jiffies to emulate a free
130 * running counter: 130 * running counter:
131 */ 131 */
132static cycle_t pit_read(void) 132static cycle_t pit_read(struct clocksource *cs)
133{ 133{
134 static int old_count; 134 static int old_count;
135 static u32 old_jifs; 135 static u32 old_jifs;
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
index 849cfabb1fdc..8279fb8df17f 100644
--- a/arch/x86/kernel/irq.c
+++ b/arch/x86/kernel/irq.c
@@ -73,7 +73,7 @@ static int show_other_interrupts(struct seq_file *p, int prec)
73 seq_printf(p, " Performance pending work\n"); 73 seq_printf(p, " Performance pending work\n");
74#endif 74#endif
75 if (generic_interrupt_extension) { 75 if (generic_interrupt_extension) {
76 seq_printf(p, "PLT: "); 76 seq_printf(p, "%*s: ", prec, "PLT");
77 for_each_online_cpu(j) 77 for_each_online_cpu(j)
78 seq_printf(p, "%10u ", irq_stats(j)->generic_irqs); 78 seq_printf(p, "%10u ", irq_stats(j)->generic_irqs);
79 seq_printf(p, " Platform interrupts\n"); 79 seq_printf(p, " Platform interrupts\n");
diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c
index 137f2e8132df..223af43f1526 100644
--- a/arch/x86/kernel/kvmclock.c
+++ b/arch/x86/kernel/kvmclock.c
@@ -77,6 +77,11 @@ static cycle_t kvm_clock_read(void)
77 return ret; 77 return ret;
78} 78}
79 79
80static cycle_t kvm_clock_get_cycles(struct clocksource *cs)
81{
82 return kvm_clock_read();
83}
84
80/* 85/*
81 * If we don't do that, there is the possibility that the guest 86 * If we don't do that, there is the possibility that the guest
82 * will calibrate under heavy load - thus, getting a lower lpj - 87 * will calibrate under heavy load - thus, getting a lower lpj -
@@ -107,7 +112,7 @@ static void kvm_get_preset_lpj(void)
107 112
108static struct clocksource kvm_clock = { 113static struct clocksource kvm_clock = {
109 .name = "kvm-clock", 114 .name = "kvm-clock",
110 .read = kvm_clock_read, 115 .read = kvm_clock_get_cycles,
111 .rating = 400, 116 .rating = 400,
112 .mask = CLOCKSOURCE_MASK(64), 117 .mask = CLOCKSOURCE_MASK(64),
113 .mult = 1 << KVM_SCALE, 118 .mult = 1 << KVM_SCALE,
diff --git a/arch/x86/kernel/microcode_core.c b/arch/x86/kernel/microcode_core.c
index a0f3851ef310..98c470c069d1 100644
--- a/arch/x86/kernel/microcode_core.c
+++ b/arch/x86/kernel/microcode_core.c
@@ -108,40 +108,29 @@ struct ucode_cpu_info ucode_cpu_info[NR_CPUS];
108EXPORT_SYMBOL_GPL(ucode_cpu_info); 108EXPORT_SYMBOL_GPL(ucode_cpu_info);
109 109
110#ifdef CONFIG_MICROCODE_OLD_INTERFACE 110#ifdef CONFIG_MICROCODE_OLD_INTERFACE
111struct update_for_cpu {
112 const void __user *buf;
113 size_t size;
114};
115
116static long update_for_cpu(void *_ufc)
117{
118 struct update_for_cpu *ufc = _ufc;
119 int error;
120
121 error = microcode_ops->request_microcode_user(smp_processor_id(),
122 ufc->buf, ufc->size);
123 if (error < 0)
124 return error;
125 if (!error)
126 microcode_ops->apply_microcode(smp_processor_id());
127 return error;
128}
129
130static int do_microcode_update(const void __user *buf, size_t size) 111static int do_microcode_update(const void __user *buf, size_t size)
131{ 112{
113 cpumask_t old;
132 int error = 0; 114 int error = 0;
133 int cpu; 115 int cpu;
134 struct update_for_cpu ufc = { .buf = buf, .size = size }; 116
117 old = current->cpus_allowed;
135 118
136 for_each_online_cpu(cpu) { 119 for_each_online_cpu(cpu) {
137 struct ucode_cpu_info *uci = ucode_cpu_info + cpu; 120 struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
138 121
139 if (!uci->valid) 122 if (!uci->valid)
140 continue; 123 continue;
141 error = work_on_cpu(cpu, update_for_cpu, &ufc); 124
125 set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
126 error = microcode_ops->request_microcode_user(cpu, buf, size);
142 if (error < 0) 127 if (error < 0)
143 break; 128 goto out;
129 if (!error)
130 microcode_ops->apply_microcode(cpu);
144 } 131 }
132out:
133 set_cpus_allowed_ptr(current, &old);
145 return error; 134 return error;
146} 135}
147 136
@@ -391,8 +380,6 @@ static int mc_sysdev_add(struct sys_device *sys_dev)
391 return err; 380 return err;
392 381
393 err = microcode_init_cpu(cpu); 382 err = microcode_init_cpu(cpu);
394 if (err)
395 sysfs_remove_group(&sys_dev->kobj, &mc_attr_group);
396 383
397 return err; 384 return err;
398} 385}
diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c
index dce99dca6cf8..70fd7e414c15 100644
--- a/arch/x86/kernel/mpparse.c
+++ b/arch/x86/kernel/mpparse.c
@@ -679,7 +679,7 @@ void __init get_smp_config(void)
679 __get_smp_config(0); 679 __get_smp_config(0);
680} 680}
681 681
682static void smp_reserve_bootmem(struct mpf_intel *mpf) 682static void __init smp_reserve_bootmem(struct mpf_intel *mpf)
683{ 683{
684 unsigned long size = get_mpc_size(mpf->physptr); 684 unsigned long size = get_mpc_size(mpf->physptr);
685#ifdef CONFIG_X86_32 685#ifdef CONFIG_X86_32
@@ -838,7 +838,7 @@ static int __init get_MP_intsrc_index(struct mpc_intsrc *m)
838 838
839static struct mpc_intsrc __initdata *m_spare[SPARE_SLOT_NUM]; 839static struct mpc_intsrc __initdata *m_spare[SPARE_SLOT_NUM];
840 840
841static void check_irq_src(struct mpc_intsrc *m, int *nr_m_spare) 841static void __init check_irq_src(struct mpc_intsrc *m, int *nr_m_spare)
842{ 842{
843 int i; 843 int i;
844 844
@@ -866,7 +866,8 @@ static void check_irq_src(struct mpc_intsrc *m, int *nr_m_spare)
866 } 866 }
867} 867}
868#else /* CONFIG_X86_IO_APIC */ 868#else /* CONFIG_X86_IO_APIC */
869static inline void check_irq_src(struct mpc_intsrc *m, int *nr_m_spare) {} 869static
870inline void __init check_irq_src(struct mpc_intsrc *m, int *nr_m_spare) {}
870#endif /* CONFIG_X86_IO_APIC */ 871#endif /* CONFIG_X86_IO_APIC */
871 872
872static int check_slot(unsigned long mpc_new_phys, unsigned long mpc_new_length, 873static int check_slot(unsigned long mpc_new_phys, unsigned long mpc_new_length,
diff --git a/arch/x86/kernel/pci-swiotlb.c b/arch/x86/kernel/pci-swiotlb.c
index 34f12e9996ed..221a3853e268 100644
--- a/arch/x86/kernel/pci-swiotlb.c
+++ b/arch/x86/kernel/pci-swiotlb.c
@@ -50,7 +50,7 @@ static void *x86_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
50 return swiotlb_alloc_coherent(hwdev, size, dma_handle, flags); 50 return swiotlb_alloc_coherent(hwdev, size, dma_handle, flags);
51} 51}
52 52
53struct dma_map_ops swiotlb_dma_ops = { 53static struct dma_map_ops swiotlb_dma_ops = {
54 .mapping_error = swiotlb_dma_mapping_error, 54 .mapping_error = swiotlb_dma_mapping_error,
55 .alloc_coherent = x86_swiotlb_alloc_coherent, 55 .alloc_coherent = x86_swiotlb_alloc_coherent,
56 .free_coherent = swiotlb_free_coherent, 56 .free_coherent = swiotlb_free_coherent,
diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
index fe9345c967de..23b7c8f017e2 100644
--- a/arch/x86/kernel/ptrace.c
+++ b/arch/x86/kernel/ptrace.c
@@ -21,7 +21,6 @@
21#include <linux/audit.h> 21#include <linux/audit.h>
22#include <linux/seccomp.h> 22#include <linux/seccomp.h>
23#include <linux/signal.h> 23#include <linux/signal.h>
24#include <linux/ftrace.h>
25 24
26#include <asm/uaccess.h> 25#include <asm/uaccess.h>
27#include <asm/pgtable.h> 26#include <asm/pgtable.h>
@@ -35,6 +34,8 @@
35#include <asm/proto.h> 34#include <asm/proto.h>
36#include <asm/ds.h> 35#include <asm/ds.h>
37 36
37#include <trace/syscall.h>
38
38#include "tls.h" 39#include "tls.h"
39 40
40enum x86_regset { 41enum x86_regset {
diff --git a/arch/x86/kernel/quirks.c b/arch/x86/kernel/quirks.c
index e95022e4f5d5..7563b31b4f03 100644
--- a/arch/x86/kernel/quirks.c
+++ b/arch/x86/kernel/quirks.c
@@ -261,8 +261,6 @@ static void old_ich_force_enable_hpet_user(struct pci_dev *dev)
261{ 261{
262 if (hpet_force_user) 262 if (hpet_force_user)
263 old_ich_force_enable_hpet(dev); 263 old_ich_force_enable_hpet(dev);
264 else
265 hpet_print_force_info();
266} 264}
267 265
268DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_1, 266DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_1,
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index 2aef36d8aca2..1340dad417f4 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -224,6 +224,14 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = {
224 DMI_MATCH(DMI_PRODUCT_NAME, "Dell XPS710"), 224 DMI_MATCH(DMI_PRODUCT_NAME, "Dell XPS710"),
225 }, 225 },
226 }, 226 },
227 { /* Handle problems with rebooting on Dell DXP061 */
228 .callback = set_bios_reboot,
229 .ident = "Dell DXP061",
230 .matches = {
231 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
232 DMI_MATCH(DMI_PRODUCT_NAME, "Dell DXP061"),
233 },
234 },
227 { } 235 { }
228}; 236};
229 237
diff --git a/arch/x86/kernel/tlb_uv.c b/arch/x86/kernel/tlb_uv.c
index deb5ebb32c3b..ed0c33761e6d 100644
--- a/arch/x86/kernel/tlb_uv.c
+++ b/arch/x86/kernel/tlb_uv.c
@@ -25,6 +25,8 @@ static int uv_bau_retry_limit __read_mostly;
25 25
26/* position of pnode (which is nasid>>1): */ 26/* position of pnode (which is nasid>>1): */
27static int uv_nshift __read_mostly; 27static int uv_nshift __read_mostly;
28/* base pnode in this partition */
29static int uv_partition_base_pnode __read_mostly;
28 30
29static unsigned long uv_mmask __read_mostly; 31static unsigned long uv_mmask __read_mostly;
30 32
@@ -32,6 +34,34 @@ static DEFINE_PER_CPU(struct ptc_stats, ptcstats);
32static DEFINE_PER_CPU(struct bau_control, bau_control); 34static DEFINE_PER_CPU(struct bau_control, bau_control);
33 35
34/* 36/*
37 * Determine the first node on a blade.
38 */
39static int __init blade_to_first_node(int blade)
40{
41 int node, b;
42
43 for_each_online_node(node) {
44 b = uv_node_to_blade_id(node);
45 if (blade == b)
46 return node;
47 }
48 return -1; /* shouldn't happen */
49}
50
51/*
52 * Determine the apicid of the first cpu on a blade.
53 */
54static int __init blade_to_first_apicid(int blade)
55{
56 int cpu;
57
58 for_each_present_cpu(cpu)
59 if (blade == uv_cpu_to_blade_id(cpu))
60 return per_cpu(x86_cpu_to_apicid, cpu);
61 return -1;
62}
63
64/*
35 * Free a software acknowledge hardware resource by clearing its Pending 65 * Free a software acknowledge hardware resource by clearing its Pending
36 * bit. This will return a reply to the sender. 66 * bit. This will return a reply to the sender.
37 * If the message has timed out, a reply has already been sent by the 67 * If the message has timed out, a reply has already been sent by the
@@ -67,7 +97,7 @@ static void uv_bau_process_message(struct bau_payload_queue_entry *msg,
67 msp = __get_cpu_var(bau_control).msg_statuses + msg_slot; 97 msp = __get_cpu_var(bau_control).msg_statuses + msg_slot;
68 cpu = uv_blade_processor_id(); 98 cpu = uv_blade_processor_id();
69 msg->number_of_cpus = 99 msg->number_of_cpus =
70 uv_blade_nr_online_cpus(uv_node_to_blade_id(numa_node_id())); 100 uv_blade_nr_online_cpus(uv_node_to_blade_id(numa_node_id()));
71 this_cpu_mask = 1UL << cpu; 101 this_cpu_mask = 1UL << cpu;
72 if (msp->seen_by.bits & this_cpu_mask) 102 if (msp->seen_by.bits & this_cpu_mask)
73 return; 103 return;
@@ -215,14 +245,14 @@ static int uv_wait_completion(struct bau_desc *bau_desc,
215 * Returns @flush_mask if some remote flushing remains to be done. The 245 * Returns @flush_mask if some remote flushing remains to be done. The
216 * mask will have some bits still set. 246 * mask will have some bits still set.
217 */ 247 */
218const struct cpumask *uv_flush_send_and_wait(int cpu, int this_blade, 248const struct cpumask *uv_flush_send_and_wait(int cpu, int this_pnode,
219 struct bau_desc *bau_desc, 249 struct bau_desc *bau_desc,
220 struct cpumask *flush_mask) 250 struct cpumask *flush_mask)
221{ 251{
222 int completion_status = 0; 252 int completion_status = 0;
223 int right_shift; 253 int right_shift;
224 int tries = 0; 254 int tries = 0;
225 int blade; 255 int pnode;
226 int bit; 256 int bit;
227 unsigned long mmr_offset; 257 unsigned long mmr_offset;
228 unsigned long index; 258 unsigned long index;
@@ -265,8 +295,8 @@ const struct cpumask *uv_flush_send_and_wait(int cpu, int this_blade,
265 * use the IPI method of shootdown on them. 295 * use the IPI method of shootdown on them.
266 */ 296 */
267 for_each_cpu(bit, flush_mask) { 297 for_each_cpu(bit, flush_mask) {
268 blade = uv_cpu_to_blade_id(bit); 298 pnode = uv_cpu_to_pnode(bit);
269 if (blade == this_blade) 299 if (pnode == this_pnode)
270 continue; 300 continue;
271 cpumask_clear_cpu(bit, flush_mask); 301 cpumask_clear_cpu(bit, flush_mask);
272 } 302 }
@@ -309,16 +339,16 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
309 struct cpumask *flush_mask = __get_cpu_var(uv_flush_tlb_mask); 339 struct cpumask *flush_mask = __get_cpu_var(uv_flush_tlb_mask);
310 int i; 340 int i;
311 int bit; 341 int bit;
312 int blade; 342 int pnode;
313 int uv_cpu; 343 int uv_cpu;
314 int this_blade; 344 int this_pnode;
315 int locals = 0; 345 int locals = 0;
316 struct bau_desc *bau_desc; 346 struct bau_desc *bau_desc;
317 347
318 cpumask_andnot(flush_mask, cpumask, cpumask_of(cpu)); 348 cpumask_andnot(flush_mask, cpumask, cpumask_of(cpu));
319 349
320 uv_cpu = uv_blade_processor_id(); 350 uv_cpu = uv_blade_processor_id();
321 this_blade = uv_numa_blade_id(); 351 this_pnode = uv_hub_info->pnode;
322 bau_desc = __get_cpu_var(bau_control).descriptor_base; 352 bau_desc = __get_cpu_var(bau_control).descriptor_base;
323 bau_desc += UV_ITEMS_PER_DESCRIPTOR * uv_cpu; 353 bau_desc += UV_ITEMS_PER_DESCRIPTOR * uv_cpu;
324 354
@@ -326,13 +356,14 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
326 356
327 i = 0; 357 i = 0;
328 for_each_cpu(bit, flush_mask) { 358 for_each_cpu(bit, flush_mask) {
329 blade = uv_cpu_to_blade_id(bit); 359 pnode = uv_cpu_to_pnode(bit);
330 BUG_ON(blade > (UV_DISTRIBUTION_SIZE - 1)); 360 BUG_ON(pnode > (UV_DISTRIBUTION_SIZE - 1));
331 if (blade == this_blade) { 361 if (pnode == this_pnode) {
332 locals++; 362 locals++;
333 continue; 363 continue;
334 } 364 }
335 bau_node_set(blade, &bau_desc->distribution); 365 bau_node_set(pnode - uv_partition_base_pnode,
366 &bau_desc->distribution);
336 i++; 367 i++;
337 } 368 }
338 if (i == 0) { 369 if (i == 0) {
@@ -350,7 +381,7 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
350 bau_desc->payload.address = va; 381 bau_desc->payload.address = va;
351 bau_desc->payload.sending_cpu = cpu; 382 bau_desc->payload.sending_cpu = cpu;
352 383
353 return uv_flush_send_and_wait(uv_cpu, this_blade, bau_desc, flush_mask); 384 return uv_flush_send_and_wait(uv_cpu, this_pnode, bau_desc, flush_mask);
354} 385}
355 386
356/* 387/*
@@ -418,24 +449,58 @@ void uv_bau_message_interrupt(struct pt_regs *regs)
418 set_irq_regs(old_regs); 449 set_irq_regs(old_regs);
419} 450}
420 451
452/*
453 * uv_enable_timeouts
454 *
455 * Each target blade (i.e. blades that have cpu's) needs to have
456 * shootdown message timeouts enabled. The timeout does not cause
457 * an interrupt, but causes an error message to be returned to
458 * the sender.
459 */
421static void uv_enable_timeouts(void) 460static void uv_enable_timeouts(void)
422{ 461{
423 int i;
424 int blade; 462 int blade;
425 int last_blade; 463 int nblades;
426 int pnode; 464 int pnode;
427 int cur_cpu = 0; 465 unsigned long mmr_image;
428 unsigned long apicid;
429 466
430 last_blade = -1; 467 nblades = uv_num_possible_blades();
431 for_each_online_node(i) { 468
432 blade = uv_node_to_blade_id(i); 469 for (blade = 0; blade < nblades; blade++) {
433 if (blade == last_blade) 470 if (!uv_blade_nr_possible_cpus(blade))
434 continue; 471 continue;
435 last_blade = blade; 472
436 apicid = per_cpu(x86_cpu_to_apicid, cur_cpu);
437 pnode = uv_blade_to_pnode(blade); 473 pnode = uv_blade_to_pnode(blade);
438 cur_cpu += uv_blade_nr_possible_cpus(i); 474 mmr_image =
475 uv_read_global_mmr64(pnode, UVH_LB_BAU_MISC_CONTROL);
476 /*
477 * Set the timeout period and then lock it in, in three
478 * steps; captures and locks in the period.
479 *
480 * To program the period, the SOFT_ACK_MODE must be off.
481 */
482 mmr_image &= ~((unsigned long)1 <<
483 UV_ENABLE_INTD_SOFT_ACK_MODE_SHIFT);
484 uv_write_global_mmr64
485 (pnode, UVH_LB_BAU_MISC_CONTROL, mmr_image);
486 /*
487 * Set the 4-bit period.
488 */
489 mmr_image &= ~((unsigned long)0xf <<
490 UV_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHIFT);
491 mmr_image |= (UV_INTD_SOFT_ACK_TIMEOUT_PERIOD <<
492 UV_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHIFT);
493 uv_write_global_mmr64
494 (pnode, UVH_LB_BAU_MISC_CONTROL, mmr_image);
495 /*
496 * Subsequent reversals of the timebase bit (3) cause an
497 * immediate timeout of one or all INTD resources as
498 * indicated in bits 2:0 (7 causes all of them to timeout).
499 */
500 mmr_image |= ((unsigned long)1 <<
501 UV_ENABLE_INTD_SOFT_ACK_MODE_SHIFT);
502 uv_write_global_mmr64
503 (pnode, UVH_LB_BAU_MISC_CONTROL, mmr_image);
439 } 504 }
440} 505}
441 506
@@ -482,8 +547,7 @@ static int uv_ptc_seq_show(struct seq_file *file, void *data)
482 stat->requestee, stat->onetlb, stat->alltlb, 547 stat->requestee, stat->onetlb, stat->alltlb,
483 stat->s_retry, stat->d_retry, stat->ptc_i); 548 stat->s_retry, stat->d_retry, stat->ptc_i);
484 seq_printf(file, "%lx %ld %ld %ld %ld %ld %ld\n", 549 seq_printf(file, "%lx %ld %ld %ld %ld %ld %ld\n",
485 uv_read_global_mmr64(uv_blade_to_pnode 550 uv_read_global_mmr64(uv_cpu_to_pnode(cpu),
486 (uv_cpu_to_blade_id(cpu)),
487 UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE), 551 UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE),
488 stat->sflush, stat->dflush, 552 stat->sflush, stat->dflush,
489 stat->retriesok, stat->nomsg, 553 stat->retriesok, stat->nomsg,
@@ -617,16 +681,18 @@ static struct bau_control * __init uv_table_bases_init(int blade, int node)
617 * finish the initialization of the per-blade control structures 681 * finish the initialization of the per-blade control structures
618 */ 682 */
619static void __init 683static void __init
620uv_table_bases_finish(int blade, int node, int cur_cpu, 684uv_table_bases_finish(int blade,
621 struct bau_control *bau_tablesp, 685 struct bau_control *bau_tablesp,
622 struct bau_desc *adp) 686 struct bau_desc *adp)
623{ 687{
624 struct bau_control *bcp; 688 struct bau_control *bcp;
625 int i; 689 int cpu;
626 690
627 for (i = cur_cpu; i < cur_cpu + uv_blade_nr_possible_cpus(blade); i++) { 691 for_each_present_cpu(cpu) {
628 bcp = (struct bau_control *)&per_cpu(bau_control, i); 692 if (blade != uv_cpu_to_blade_id(cpu))
693 continue;
629 694
695 bcp = (struct bau_control *)&per_cpu(bau_control, cpu);
630 bcp->bau_msg_head = bau_tablesp->va_queue_first; 696 bcp->bau_msg_head = bau_tablesp->va_queue_first;
631 bcp->va_queue_first = bau_tablesp->va_queue_first; 697 bcp->va_queue_first = bau_tablesp->va_queue_first;
632 bcp->va_queue_last = bau_tablesp->va_queue_last; 698 bcp->va_queue_last = bau_tablesp->va_queue_last;
@@ -649,11 +715,10 @@ uv_activation_descriptor_init(int node, int pnode)
649 struct bau_desc *adp; 715 struct bau_desc *adp;
650 struct bau_desc *ad2; 716 struct bau_desc *ad2;
651 717
652 adp = (struct bau_desc *) 718 adp = (struct bau_desc *)kmalloc_node(16384, GFP_KERNEL, node);
653 kmalloc_node(16384, GFP_KERNEL, node);
654 BUG_ON(!adp); 719 BUG_ON(!adp);
655 720
656 pa = __pa((unsigned long)adp); 721 pa = uv_gpa(adp); /* need the real nasid*/
657 n = pa >> uv_nshift; 722 n = pa >> uv_nshift;
658 m = pa & uv_mmask; 723 m = pa & uv_mmask;
659 724
@@ -667,8 +732,12 @@ uv_activation_descriptor_init(int node, int pnode)
667 for (i = 0, ad2 = adp; i < UV_ACTIVATION_DESCRIPTOR_SIZE; i++, ad2++) { 732 for (i = 0, ad2 = adp; i < UV_ACTIVATION_DESCRIPTOR_SIZE; i++, ad2++) {
668 memset(ad2, 0, sizeof(struct bau_desc)); 733 memset(ad2, 0, sizeof(struct bau_desc));
669 ad2->header.sw_ack_flag = 1; 734 ad2->header.sw_ack_flag = 1;
670 ad2->header.base_dest_nodeid = 735 /*
671 uv_blade_to_pnode(uv_cpu_to_blade_id(0)); 736 * base_dest_nodeid is the first node in the partition, so
737 * the bit map will indicate partition-relative node numbers.
738 * note that base_dest_nodeid is actually a nasid.
739 */
740 ad2->header.base_dest_nodeid = uv_partition_base_pnode << 1;
672 ad2->header.command = UV_NET_ENDPOINT_INTD; 741 ad2->header.command = UV_NET_ENDPOINT_INTD;
673 ad2->header.int_both = 1; 742 ad2->header.int_both = 1;
674 /* 743 /*
@@ -686,6 +755,8 @@ static struct bau_payload_queue_entry * __init
686uv_payload_queue_init(int node, int pnode, struct bau_control *bau_tablesp) 755uv_payload_queue_init(int node, int pnode, struct bau_control *bau_tablesp)
687{ 756{
688 struct bau_payload_queue_entry *pqp; 757 struct bau_payload_queue_entry *pqp;
758 unsigned long pa;
759 int pn;
689 char *cp; 760 char *cp;
690 761
691 pqp = (struct bau_payload_queue_entry *) kmalloc_node( 762 pqp = (struct bau_payload_queue_entry *) kmalloc_node(
@@ -696,10 +767,14 @@ uv_payload_queue_init(int node, int pnode, struct bau_control *bau_tablesp)
696 cp = (char *)pqp + 31; 767 cp = (char *)pqp + 31;
697 pqp = (struct bau_payload_queue_entry *)(((unsigned long)cp >> 5) << 5); 768 pqp = (struct bau_payload_queue_entry *)(((unsigned long)cp >> 5) << 5);
698 bau_tablesp->va_queue_first = pqp; 769 bau_tablesp->va_queue_first = pqp;
770 /*
771 * need the pnode of where the memory was really allocated
772 */
773 pa = uv_gpa(pqp);
774 pn = pa >> uv_nshift;
699 uv_write_global_mmr64(pnode, 775 uv_write_global_mmr64(pnode,
700 UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST, 776 UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST,
701 ((unsigned long)pnode << 777 ((unsigned long)pn << UV_PAYLOADQ_PNODE_SHIFT) |
702 UV_PAYLOADQ_PNODE_SHIFT) |
703 uv_physnodeaddr(pqp)); 778 uv_physnodeaddr(pqp));
704 uv_write_global_mmr64(pnode, UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL, 779 uv_write_global_mmr64(pnode, UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL,
705 uv_physnodeaddr(pqp)); 780 uv_physnodeaddr(pqp));
@@ -715,8 +790,9 @@ uv_payload_queue_init(int node, int pnode, struct bau_control *bau_tablesp)
715/* 790/*
716 * Initialization of each UV blade's structures 791 * Initialization of each UV blade's structures
717 */ 792 */
718static int __init uv_init_blade(int blade, int node, int cur_cpu) 793static int __init uv_init_blade(int blade)
719{ 794{
795 int node;
720 int pnode; 796 int pnode;
721 unsigned long pa; 797 unsigned long pa;
722 unsigned long apicid; 798 unsigned long apicid;
@@ -724,16 +800,17 @@ static int __init uv_init_blade(int blade, int node, int cur_cpu)
724 struct bau_payload_queue_entry *pqp; 800 struct bau_payload_queue_entry *pqp;
725 struct bau_control *bau_tablesp; 801 struct bau_control *bau_tablesp;
726 802
803 node = blade_to_first_node(blade);
727 bau_tablesp = uv_table_bases_init(blade, node); 804 bau_tablesp = uv_table_bases_init(blade, node);
728 pnode = uv_blade_to_pnode(blade); 805 pnode = uv_blade_to_pnode(blade);
729 adp = uv_activation_descriptor_init(node, pnode); 806 adp = uv_activation_descriptor_init(node, pnode);
730 pqp = uv_payload_queue_init(node, pnode, bau_tablesp); 807 pqp = uv_payload_queue_init(node, pnode, bau_tablesp);
731 uv_table_bases_finish(blade, node, cur_cpu, bau_tablesp, adp); 808 uv_table_bases_finish(blade, bau_tablesp, adp);
732 /* 809 /*
733 * the below initialization can't be in firmware because the 810 * the below initialization can't be in firmware because the
734 * messaging IRQ will be determined by the OS 811 * messaging IRQ will be determined by the OS
735 */ 812 */
736 apicid = per_cpu(x86_cpu_to_apicid, cur_cpu); 813 apicid = blade_to_first_apicid(blade);
737 pa = uv_read_global_mmr64(pnode, UVH_BAU_DATA_CONFIG); 814 pa = uv_read_global_mmr64(pnode, UVH_BAU_DATA_CONFIG);
738 if ((pa & 0xff) != UV_BAU_MESSAGE) { 815 if ((pa & 0xff) != UV_BAU_MESSAGE) {
739 uv_write_global_mmr64(pnode, UVH_BAU_DATA_CONFIG, 816 uv_write_global_mmr64(pnode, UVH_BAU_DATA_CONFIG,
@@ -748,9 +825,7 @@ static int __init uv_init_blade(int blade, int node, int cur_cpu)
748static int __init uv_bau_init(void) 825static int __init uv_bau_init(void)
749{ 826{
750 int blade; 827 int blade;
751 int node;
752 int nblades; 828 int nblades;
753 int last_blade;
754 int cur_cpu; 829 int cur_cpu;
755 830
756 if (!is_uv_system()) 831 if (!is_uv_system())
@@ -763,29 +838,21 @@ static int __init uv_bau_init(void)
763 uv_bau_retry_limit = 1; 838 uv_bau_retry_limit = 1;
764 uv_nshift = uv_hub_info->n_val; 839 uv_nshift = uv_hub_info->n_val;
765 uv_mmask = (1UL << uv_hub_info->n_val) - 1; 840 uv_mmask = (1UL << uv_hub_info->n_val) - 1;
766 nblades = 0; 841 nblades = uv_num_possible_blades();
767 last_blade = -1; 842
768 cur_cpu = 0;
769 for_each_online_node(node) {
770 blade = uv_node_to_blade_id(node);
771 if (blade == last_blade)
772 continue;
773 last_blade = blade;
774 nblades++;
775 }
776 uv_bau_table_bases = (struct bau_control **) 843 uv_bau_table_bases = (struct bau_control **)
777 kmalloc(nblades * sizeof(struct bau_control *), GFP_KERNEL); 844 kmalloc(nblades * sizeof(struct bau_control *), GFP_KERNEL);
778 BUG_ON(!uv_bau_table_bases); 845 BUG_ON(!uv_bau_table_bases);
779 846
780 last_blade = -1; 847 uv_partition_base_pnode = 0x7fffffff;
781 for_each_online_node(node) { 848 for (blade = 0; blade < nblades; blade++)
782 blade = uv_node_to_blade_id(node); 849 if (uv_blade_nr_possible_cpus(blade) &&
783 if (blade == last_blade) 850 (uv_blade_to_pnode(blade) < uv_partition_base_pnode))
784 continue; 851 uv_partition_base_pnode = uv_blade_to_pnode(blade);
785 last_blade = blade; 852 for (blade = 0; blade < nblades; blade++)
786 uv_init_blade(blade, node, cur_cpu); 853 if (uv_blade_nr_possible_cpus(blade))
787 cur_cpu += uv_blade_nr_possible_cpus(blade); 854 uv_init_blade(blade);
788 } 855
789 alloc_intr_gate(UV_BAU_MESSAGE, uv_bau_message_intr1); 856 alloc_intr_gate(UV_BAU_MESSAGE, uv_bau_message_intr1);
790 uv_enable_timeouts(); 857 uv_enable_timeouts();
791 858
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 7a567ebe6361..d57de05dc430 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -699,7 +699,7 @@ static struct clocksource clocksource_tsc;
699 * code, which is necessary to support wrapping clocksources like pm 699 * code, which is necessary to support wrapping clocksources like pm
700 * timer. 700 * timer.
701 */ 701 */
702static cycle_t read_tsc(void) 702static cycle_t read_tsc(struct clocksource *cs)
703{ 703{
704 cycle_t ret = (cycle_t)get_cycles(); 704 cycle_t ret = (cycle_t)get_cycles();
705 705
diff --git a/arch/x86/kernel/uv_sysfs.c b/arch/x86/kernel/uv_sysfs.c
index 67f9b9dbf800..36afb98675a4 100644
--- a/arch/x86/kernel/uv_sysfs.c
+++ b/arch/x86/kernel/uv_sysfs.c
@@ -21,6 +21,7 @@
21 21
22#include <linux/sysdev.h> 22#include <linux/sysdev.h>
23#include <asm/uv/bios.h> 23#include <asm/uv/bios.h>
24#include <asm/uv/uv.h>
24 25
25struct kobject *sgi_uv_kobj; 26struct kobject *sgi_uv_kobj;
26 27
@@ -47,6 +48,9 @@ static int __init sgi_uv_sysfs_init(void)
47{ 48{
48 unsigned long ret; 49 unsigned long ret;
49 50
51 if (!is_uv_system())
52 return -ENODEV;
53
50 if (!sgi_uv_kobj) 54 if (!sgi_uv_kobj)
51 sgi_uv_kobj = kobject_create_and_add("sgi_uv", firmware_kobj); 55 sgi_uv_kobj = kobject_create_and_add("sgi_uv", firmware_kobj);
52 if (!sgi_uv_kobj) { 56 if (!sgi_uv_kobj) {
diff --git a/arch/x86/kernel/uv_time.c b/arch/x86/kernel/uv_time.c
index 2ffb6c53326e..583f11d5c480 100644
--- a/arch/x86/kernel/uv_time.c
+++ b/arch/x86/kernel/uv_time.c
@@ -29,7 +29,7 @@
29 29
30#define RTC_NAME "sgi_rtc" 30#define RTC_NAME "sgi_rtc"
31 31
32static cycle_t uv_read_rtc(void); 32static cycle_t uv_read_rtc(struct clocksource *cs);
33static int uv_rtc_next_event(unsigned long, struct clock_event_device *); 33static int uv_rtc_next_event(unsigned long, struct clock_event_device *);
34static void uv_rtc_timer_setup(enum clock_event_mode, 34static void uv_rtc_timer_setup(enum clock_event_mode,
35 struct clock_event_device *); 35 struct clock_event_device *);
@@ -123,7 +123,7 @@ static int uv_setup_intr(int cpu, u64 expires)
123 /* Initialize comparator value */ 123 /* Initialize comparator value */
124 uv_write_global_mmr64(pnode, UVH_INT_CMPB, expires); 124 uv_write_global_mmr64(pnode, UVH_INT_CMPB, expires);
125 125
126 return (expires < uv_read_rtc() && !uv_intr_pending(pnode)); 126 return (expires < uv_read_rtc(NULL) && !uv_intr_pending(pnode));
127} 127}
128 128
129/* 129/*
@@ -256,7 +256,7 @@ static int uv_rtc_unset_timer(int cpu)
256 256
257 spin_lock_irqsave(&head->lock, flags); 257 spin_lock_irqsave(&head->lock, flags);
258 258
259 if (head->next_cpu == bcpu && uv_read_rtc() >= *t) 259 if (head->next_cpu == bcpu && uv_read_rtc(NULL) >= *t)
260 rc = 1; 260 rc = 1;
261 261
262 *t = ULLONG_MAX; 262 *t = ULLONG_MAX;
@@ -278,7 +278,7 @@ static int uv_rtc_unset_timer(int cpu)
278/* 278/*
279 * Read the RTC. 279 * Read the RTC.
280 */ 280 */
281static cycle_t uv_read_rtc(void) 281static cycle_t uv_read_rtc(struct clocksource *cs)
282{ 282{
283 return (cycle_t)uv_read_local_mmr(UVH_RTC); 283 return (cycle_t)uv_read_local_mmr(UVH_RTC);
284} 284}
@@ -291,7 +291,7 @@ static int uv_rtc_next_event(unsigned long delta,
291{ 291{
292 int ced_cpu = cpumask_first(ced->cpumask); 292 int ced_cpu = cpumask_first(ced->cpumask);
293 293
294 return uv_rtc_set_timer(ced_cpu, delta + uv_read_rtc()); 294 return uv_rtc_set_timer(ced_cpu, delta + uv_read_rtc(NULL));
295} 295}
296 296
297/* 297/*
diff --git a/arch/x86/kernel/vmiclock_32.c b/arch/x86/kernel/vmiclock_32.c
index d303369a7bad..2b3eb82efeeb 100644
--- a/arch/x86/kernel/vmiclock_32.c
+++ b/arch/x86/kernel/vmiclock_32.c
@@ -283,7 +283,7 @@ void __devinit vmi_time_ap_init(void)
283/** vmi clocksource */ 283/** vmi clocksource */
284static struct clocksource clocksource_vmi; 284static struct clocksource clocksource_vmi;
285 285
286static cycle_t read_real_cycles(void) 286static cycle_t read_real_cycles(struct clocksource *cs)
287{ 287{
288 cycle_t ret = (cycle_t)vmi_timer_ops.get_cycle_counter(VMI_CYCLES_REAL); 288 cycle_t ret = (cycle_t)vmi_timer_ops.get_cycle_counter(VMI_CYCLES_REAL);
289 return max(ret, clocksource_vmi.cycle_last); 289 return max(ret, clocksource_vmi.cycle_last);
diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
index 2b54fe002e94..c5ee17e8c6d9 100644
--- a/arch/x86/kernel/xsave.c
+++ b/arch/x86/kernel/xsave.c
@@ -89,7 +89,7 @@ int save_i387_xstate(void __user *buf)
89 89
90 if (!used_math()) 90 if (!used_math())
91 return 0; 91 return 0;
92 clear_used_math(); /* trigger finit */ 92
93 if (task_thread_info(tsk)->status & TS_USEDFPU) { 93 if (task_thread_info(tsk)->status & TS_USEDFPU) {
94 /* 94 /*
95 * Start with clearing the user buffer. This will present a 95 * Start with clearing the user buffer. This will present a
@@ -114,6 +114,8 @@ int save_i387_xstate(void __user *buf)
114 return -1; 114 return -1;
115 } 115 }
116 116
117 clear_used_math(); /* trigger finit */
118
117 if (task_thread_info(tsk)->status & TS_XSAVE) { 119 if (task_thread_info(tsk)->status & TS_XSAVE) {
118 struct _fpstate __user *fx = buf; 120 struct _fpstate __user *fx = buf;
119 struct _xstate __user *x = buf; 121 struct _xstate __user *x = buf;
@@ -324,7 +326,7 @@ void __ref xsave_cntxt_init(void)
324 } 326 }
325 327
326 /* 328 /*
327 * for now OS knows only about FP/SSE 329 * Support only the state known to OS.
328 */ 330 */
329 pcntxt_mask = pcntxt_mask & XCNTXT_MASK; 331 pcntxt_mask = pcntxt_mask & XCNTXT_MASK;
330 xsave_init(); 332 xsave_init();
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 2a36f7f7c4c7..b6caf1329b1b 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -1248,7 +1248,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
1248 pgprintk("%s: adding gfn %lx role %x\n", __func__, gfn, role.word); 1248 pgprintk("%s: adding gfn %lx role %x\n", __func__, gfn, role.word);
1249 sp->gfn = gfn; 1249 sp->gfn = gfn;
1250 sp->role = role; 1250 sp->role = role;
1251 sp->global = role.cr4_pge; 1251 sp->global = 0;
1252 hlist_add_head(&sp->hash_link, bucket); 1252 hlist_add_head(&sp->hash_link, bucket);
1253 if (!direct) { 1253 if (!direct) {
1254 if (rmap_write_protect(vcpu->kvm, gfn)) 1254 if (rmap_write_protect(vcpu->kvm, gfn))
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 8ca100a9ecac..7c1ce5ac6131 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -2775,6 +2775,9 @@ out:
2775 2775
2776void kvm_arch_exit(void) 2776void kvm_arch_exit(void)
2777{ 2777{
2778 if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
2779 cpufreq_unregister_notifier(&kvmclock_cpufreq_notifier_block,
2780 CPUFREQ_TRANSITION_NOTIFIER);
2778 kvm_x86_ops = NULL; 2781 kvm_x86_ops = NULL;
2779 kvm_mmu_module_exit(); 2782 kvm_mmu_module_exit();
2780} 2783}
@@ -4159,6 +4162,11 @@ EXPORT_SYMBOL_GPL(kvm_put_guest_fpu);
4159 4162
4160void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu) 4163void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
4161{ 4164{
4165 if (vcpu->arch.time_page) {
4166 kvm_release_page_dirty(vcpu->arch.time_page);
4167 vcpu->arch.time_page = NULL;
4168 }
4169
4162 kvm_x86_ops->vcpu_free(vcpu); 4170 kvm_x86_ops->vcpu_free(vcpu);
4163} 4171}
4164 4172
diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
index e94a11e42f98..ca7ec44bafc3 100644
--- a/arch/x86/lguest/boot.c
+++ b/arch/x86/lguest/boot.c
@@ -273,15 +273,15 @@ static void lguest_load_idt(const struct desc_ptr *desc)
273 * controls the entire thing and the Guest asks it to make changes using the 273 * controls the entire thing and the Guest asks it to make changes using the
274 * LOAD_GDT hypercall. 274 * LOAD_GDT hypercall.
275 * 275 *
276 * This is the opposite of the IDT code where we have a LOAD_IDT_ENTRY 276 * This is the exactly like the IDT code.
277 * hypercall and use that repeatedly to load a new IDT. I don't think it
278 * really matters, but wouldn't it be nice if they were the same? Wouldn't
279 * it be even better if you were the one to send the patch to fix it?
280 */ 277 */
281static void lguest_load_gdt(const struct desc_ptr *desc) 278static void lguest_load_gdt(const struct desc_ptr *desc)
282{ 279{
283 BUG_ON((desc->size + 1) / 8 != GDT_ENTRIES); 280 unsigned int i;
284 kvm_hypercall2(LHCALL_LOAD_GDT, __pa(desc->address), GDT_ENTRIES); 281 struct desc_struct *gdt = (void *)desc->address;
282
283 for (i = 0; i < (desc->size+1)/8; i++)
284 kvm_hypercall3(LHCALL_LOAD_GDT_ENTRY, i, gdt[i].a, gdt[i].b);
285} 285}
286 286
287/* For a single GDT entry which changes, we do the lazy thing: alter our GDT, 287/* For a single GDT entry which changes, we do the lazy thing: alter our GDT,
@@ -291,7 +291,9 @@ static void lguest_write_gdt_entry(struct desc_struct *dt, int entrynum,
291 const void *desc, int type) 291 const void *desc, int type)
292{ 292{
293 native_write_gdt_entry(dt, entrynum, desc, type); 293 native_write_gdt_entry(dt, entrynum, desc, type);
294 kvm_hypercall2(LHCALL_LOAD_GDT, __pa(dt), GDT_ENTRIES); 294 /* Tell Host about this new entry. */
295 kvm_hypercall3(LHCALL_LOAD_GDT_ENTRY, entrynum,
296 dt[entrynum].a, dt[entrynum].b);
295} 297}
296 298
297/* OK, I lied. There are three "thread local storage" GDT entries which change 299/* OK, I lied. There are three "thread local storage" GDT entries which change
@@ -661,7 +663,7 @@ static unsigned long lguest_tsc_khz(void)
661 663
662/* If we can't use the TSC, the kernel falls back to our lower-priority 664/* If we can't use the TSC, the kernel falls back to our lower-priority
663 * "lguest_clock", where we read the time value given to us by the Host. */ 665 * "lguest_clock", where we read the time value given to us by the Host. */
664static cycle_t lguest_clock_read(void) 666static cycle_t lguest_clock_read(struct clocksource *cs)
665{ 667{
666 unsigned long sec, nsec; 668 unsigned long sec, nsec;
667 669
diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
index be54176e9eb2..6340cef6798a 100644
--- a/arch/x86/mm/gup.c
+++ b/arch/x86/mm/gup.c
@@ -219,6 +219,22 @@ static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end,
219 return 1; 219 return 1;
220} 220}
221 221
222/**
223 * get_user_pages_fast() - pin user pages in memory
224 * @start: starting user address
225 * @nr_pages: number of pages from start to pin
226 * @write: whether pages will be written to
227 * @pages: array that receives pointers to the pages pinned.
228 * Should be at least nr_pages long.
229 *
230 * Attempt to pin user pages in memory without taking mm->mmap_sem.
231 * If not successful, it will fall back to taking the lock and
232 * calling get_user_pages().
233 *
234 * Returns number of pages pinned. This may be fewer than the number
235 * requested. If nr_pages is 0 or negative, returns 0. If no pages
236 * were pinned, returns -errno.
237 */
222int get_user_pages_fast(unsigned long start, int nr_pages, int write, 238int get_user_pages_fast(unsigned long start, int nr_pages, int write,
223 struct page **pages) 239 struct page **pages)
224{ 240{
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
index 0dfa09d69e80..8a450930834f 100644
--- a/arch/x86/mm/ioremap.c
+++ b/arch/x86/mm/ioremap.c
@@ -280,15 +280,16 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
280 return NULL; 280 return NULL;
281 area->phys_addr = phys_addr; 281 area->phys_addr = phys_addr;
282 vaddr = (unsigned long) area->addr; 282 vaddr = (unsigned long) area->addr;
283 if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot)) { 283
284 if (kernel_map_sync_memtype(phys_addr, size, prot_val)) {
284 free_memtype(phys_addr, phys_addr + size); 285 free_memtype(phys_addr, phys_addr + size);
285 free_vm_area(area); 286 free_vm_area(area);
286 return NULL; 287 return NULL;
287 } 288 }
288 289
289 if (ioremap_change_attr(vaddr, size, prot_val) < 0) { 290 if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot)) {
290 free_memtype(phys_addr, phys_addr + size); 291 free_memtype(phys_addr, phys_addr + size);
291 vunmap(area->addr); 292 free_vm_area(area);
292 return NULL; 293 return NULL;
293 } 294 }
294 295
@@ -374,7 +375,8 @@ static void __iomem *ioremap_default(resource_size_t phys_addr,
374 * - UC_MINUS for non-WB-able memory with no other conflicting mappings 375 * - UC_MINUS for non-WB-able memory with no other conflicting mappings
375 * - Inherit from confliting mappings otherwise 376 * - Inherit from confliting mappings otherwise
376 */ 377 */
377 err = reserve_memtype(phys_addr, phys_addr + size, -1, &flags); 378 err = reserve_memtype(phys_addr, phys_addr + size,
379 _PAGE_CACHE_WB, &flags);
378 if (err < 0) 380 if (err < 0)
379 return NULL; 381 return NULL;
380 382
@@ -547,7 +549,7 @@ void __init early_ioremap_reset(void)
547} 549}
548 550
549static void __init __early_set_fixmap(enum fixed_addresses idx, 551static void __init __early_set_fixmap(enum fixed_addresses idx,
550 unsigned long phys, pgprot_t flags) 552 phys_addr_t phys, pgprot_t flags)
551{ 553{
552 unsigned long addr = __fix_to_virt(idx); 554 unsigned long addr = __fix_to_virt(idx);
553 pte_t *pte; 555 pte_t *pte;
@@ -566,7 +568,7 @@ static void __init __early_set_fixmap(enum fixed_addresses idx,
566} 568}
567 569
568static inline void __init early_set_fixmap(enum fixed_addresses idx, 570static inline void __init early_set_fixmap(enum fixed_addresses idx,
569 unsigned long phys, pgprot_t prot) 571 phys_addr_t phys, pgprot_t prot)
570{ 572{
571 if (after_paging_init) 573 if (after_paging_init)
572 __set_fixmap(idx, phys, prot); 574 __set_fixmap(idx, phys, prot);
@@ -607,9 +609,10 @@ static int __init check_early_ioremap_leak(void)
607late_initcall(check_early_ioremap_leak); 609late_initcall(check_early_ioremap_leak);
608 610
609static void __init __iomem * 611static void __init __iomem *
610__early_ioremap(unsigned long phys_addr, unsigned long size, pgprot_t prot) 612__early_ioremap(resource_size_t phys_addr, unsigned long size, pgprot_t prot)
611{ 613{
612 unsigned long offset, last_addr; 614 unsigned long offset;
615 resource_size_t last_addr;
613 unsigned int nrpages; 616 unsigned int nrpages;
614 enum fixed_addresses idx0, idx; 617 enum fixed_addresses idx0, idx;
615 int i, slot; 618 int i, slot;
@@ -625,15 +628,15 @@ __early_ioremap(unsigned long phys_addr, unsigned long size, pgprot_t prot)
625 } 628 }
626 629
627 if (slot < 0) { 630 if (slot < 0) {
628 printk(KERN_INFO "early_iomap(%08lx, %08lx) not found slot\n", 631 printk(KERN_INFO "early_iomap(%08llx, %08lx) not found slot\n",
629 phys_addr, size); 632 (u64)phys_addr, size);
630 WARN_ON(1); 633 WARN_ON(1);
631 return NULL; 634 return NULL;
632 } 635 }
633 636
634 if (early_ioremap_debug) { 637 if (early_ioremap_debug) {
635 printk(KERN_INFO "early_ioremap(%08lx, %08lx) [%d] => ", 638 printk(KERN_INFO "early_ioremap(%08llx, %08lx) [%d] => ",
636 phys_addr, size, slot); 639 (u64)phys_addr, size, slot);
637 dump_stack(); 640 dump_stack();
638 } 641 }
639 642
@@ -680,13 +683,15 @@ __early_ioremap(unsigned long phys_addr, unsigned long size, pgprot_t prot)
680} 683}
681 684
682/* Remap an IO device */ 685/* Remap an IO device */
683void __init __iomem *early_ioremap(unsigned long phys_addr, unsigned long size) 686void __init __iomem *
687early_ioremap(resource_size_t phys_addr, unsigned long size)
684{ 688{
685 return __early_ioremap(phys_addr, size, PAGE_KERNEL_IO); 689 return __early_ioremap(phys_addr, size, PAGE_KERNEL_IO);
686} 690}
687 691
688/* Remap memory */ 692/* Remap memory */
689void __init __iomem *early_memremap(unsigned long phys_addr, unsigned long size) 693void __init __iomem *
694early_memremap(resource_size_t phys_addr, unsigned long size)
690{ 695{
691 return __early_ioremap(phys_addr, size, PAGE_KERNEL); 696 return __early_ioremap(phys_addr, size, PAGE_KERNEL);
692} 697}
diff --git a/arch/x86/mm/numa_32.c b/arch/x86/mm/numa_32.c
index 3daefa04ace5..d2530062fe00 100644
--- a/arch/x86/mm/numa_32.c
+++ b/arch/x86/mm/numa_32.c
@@ -257,7 +257,7 @@ void resume_map_numa_kva(pgd_t *pgd_base)
257} 257}
258#endif 258#endif
259 259
260static unsigned long calculate_numa_remap_pages(void) 260static __init unsigned long calculate_numa_remap_pages(void)
261{ 261{
262 int nid; 262 int nid;
263 unsigned long size, reserve_pages = 0; 263 unsigned long size, reserve_pages = 0;
diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c
index d73aaa892371..2d05a12029dc 100644
--- a/arch/x86/mm/numa_64.c
+++ b/arch/x86/mm/numa_64.c
@@ -188,6 +188,9 @@ void __init setup_node_bootmem(int nodeid, unsigned long start,
188 const int pgdat_size = roundup(sizeof(pg_data_t), PAGE_SIZE); 188 const int pgdat_size = roundup(sizeof(pg_data_t), PAGE_SIZE);
189 int nid; 189 int nid;
190 190
191 if (!end)
192 return;
193
191 start = roundup(start, ZONE_ALIGN); 194 start = roundup(start, ZONE_ALIGN);
192 195
193 printk(KERN_INFO "Bootmem setup node %d %016lx-%016lx\n", nodeid, 196 printk(KERN_INFO "Bootmem setup node %d %016lx-%016lx\n", nodeid,
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index d71e1b636ce6..797f9f107cb6 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -945,71 +945,94 @@ int _set_memory_uc(unsigned long addr, int numpages)
945 945
946int set_memory_uc(unsigned long addr, int numpages) 946int set_memory_uc(unsigned long addr, int numpages)
947{ 947{
948 int ret;
949
948 /* 950 /*
949 * for now UC MINUS. see comments in ioremap_nocache() 951 * for now UC MINUS. see comments in ioremap_nocache()
950 */ 952 */
951 if (reserve_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE, 953 ret = reserve_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE,
952 _PAGE_CACHE_UC_MINUS, NULL)) 954 _PAGE_CACHE_UC_MINUS, NULL);
953 return -EINVAL; 955 if (ret)
956 goto out_err;
957
958 ret = _set_memory_uc(addr, numpages);
959 if (ret)
960 goto out_free;
954 961
955 return _set_memory_uc(addr, numpages); 962 return 0;
963
964out_free:
965 free_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE);
966out_err:
967 return ret;
956} 968}
957EXPORT_SYMBOL(set_memory_uc); 969EXPORT_SYMBOL(set_memory_uc);
958 970
959int set_memory_array_uc(unsigned long *addr, int addrinarray) 971int set_memory_array_uc(unsigned long *addr, int addrinarray)
960{ 972{
961 unsigned long start; 973 int i, j;
962 unsigned long end; 974 int ret;
963 int i; 975
964 /* 976 /*
965 * for now UC MINUS. see comments in ioremap_nocache() 977 * for now UC MINUS. see comments in ioremap_nocache()
966 */ 978 */
967 for (i = 0; i < addrinarray; i++) { 979 for (i = 0; i < addrinarray; i++) {
968 start = __pa(addr[i]); 980 ret = reserve_memtype(__pa(addr[i]), __pa(addr[i]) + PAGE_SIZE,
969 for (end = start + PAGE_SIZE; i < addrinarray - 1; end += PAGE_SIZE) { 981 _PAGE_CACHE_UC_MINUS, NULL);
970 if (end != __pa(addr[i + 1])) 982 if (ret)
971 break; 983 goto out_free;
972 i++;
973 }
974 if (reserve_memtype(start, end, _PAGE_CACHE_UC_MINUS, NULL))
975 goto out;
976 } 984 }
977 985
978 return change_page_attr_set(addr, addrinarray, 986 ret = change_page_attr_set(addr, addrinarray,
979 __pgprot(_PAGE_CACHE_UC_MINUS), 1); 987 __pgprot(_PAGE_CACHE_UC_MINUS), 1);
980out: 988 if (ret)
981 for (i = 0; i < addrinarray; i++) { 989 goto out_free;
982 unsigned long tmp = __pa(addr[i]); 990
983 991 return 0;
984 if (tmp == start) 992
985 break; 993out_free:
986 for (end = tmp + PAGE_SIZE; i < addrinarray - 1; end += PAGE_SIZE) { 994 for (j = 0; j < i; j++)
987 if (end != __pa(addr[i + 1])) 995 free_memtype(__pa(addr[j]), __pa(addr[j]) + PAGE_SIZE);
988 break; 996
989 i++; 997 return ret;
990 }
991 free_memtype(tmp, end);
992 }
993 return -EINVAL;
994} 998}
995EXPORT_SYMBOL(set_memory_array_uc); 999EXPORT_SYMBOL(set_memory_array_uc);
996 1000
997int _set_memory_wc(unsigned long addr, int numpages) 1001int _set_memory_wc(unsigned long addr, int numpages)
998{ 1002{
999 return change_page_attr_set(&addr, numpages, 1003 int ret;
1004 ret = change_page_attr_set(&addr, numpages,
1005 __pgprot(_PAGE_CACHE_UC_MINUS), 0);
1006
1007 if (!ret) {
1008 ret = change_page_attr_set(&addr, numpages,
1000 __pgprot(_PAGE_CACHE_WC), 0); 1009 __pgprot(_PAGE_CACHE_WC), 0);
1010 }
1011 return ret;
1001} 1012}
1002 1013
1003int set_memory_wc(unsigned long addr, int numpages) 1014int set_memory_wc(unsigned long addr, int numpages)
1004{ 1015{
1016 int ret;
1017
1005 if (!pat_enabled) 1018 if (!pat_enabled)
1006 return set_memory_uc(addr, numpages); 1019 return set_memory_uc(addr, numpages);
1007 1020
1008 if (reserve_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE, 1021 ret = reserve_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE,
1009 _PAGE_CACHE_WC, NULL)) 1022 _PAGE_CACHE_WC, NULL);
1010 return -EINVAL; 1023 if (ret)
1024 goto out_err;
1025
1026 ret = _set_memory_wc(addr, numpages);
1027 if (ret)
1028 goto out_free;
1029
1030 return 0;
1011 1031
1012 return _set_memory_wc(addr, numpages); 1032out_free:
1033 free_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE);
1034out_err:
1035 return ret;
1013} 1036}
1014EXPORT_SYMBOL(set_memory_wc); 1037EXPORT_SYMBOL(set_memory_wc);
1015 1038
@@ -1021,29 +1044,31 @@ int _set_memory_wb(unsigned long addr, int numpages)
1021 1044
1022int set_memory_wb(unsigned long addr, int numpages) 1045int set_memory_wb(unsigned long addr, int numpages)
1023{ 1046{
1024 free_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE); 1047 int ret;
1048
1049 ret = _set_memory_wb(addr, numpages);
1050 if (ret)
1051 return ret;
1025 1052
1026 return _set_memory_wb(addr, numpages); 1053 free_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE);
1054 return 0;
1027} 1055}
1028EXPORT_SYMBOL(set_memory_wb); 1056EXPORT_SYMBOL(set_memory_wb);
1029 1057
1030int set_memory_array_wb(unsigned long *addr, int addrinarray) 1058int set_memory_array_wb(unsigned long *addr, int addrinarray)
1031{ 1059{
1032 int i; 1060 int i;
1061 int ret;
1033 1062
1034 for (i = 0; i < addrinarray; i++) { 1063 ret = change_page_attr_clear(addr, addrinarray,
1035 unsigned long start = __pa(addr[i]);
1036 unsigned long end;
1037
1038 for (end = start + PAGE_SIZE; i < addrinarray - 1; end += PAGE_SIZE) {
1039 if (end != __pa(addr[i + 1]))
1040 break;
1041 i++;
1042 }
1043 free_memtype(start, end);
1044 }
1045 return change_page_attr_clear(addr, addrinarray,
1046 __pgprot(_PAGE_CACHE_MASK), 1); 1064 __pgprot(_PAGE_CACHE_MASK), 1);
1065 if (ret)
1066 return ret;
1067
1068 for (i = 0; i < addrinarray; i++)
1069 free_memtype(__pa(addr[i]), __pa(addr[i]) + PAGE_SIZE);
1070
1071 return 0;
1047} 1072}
1048EXPORT_SYMBOL(set_memory_array_wb); 1073EXPORT_SYMBOL(set_memory_array_wb);
1049 1074
@@ -1136,6 +1161,8 @@ int set_pages_array_wb(struct page **pages, int addrinarray)
1136 1161
1137 retval = cpa_clear_pages_array(pages, addrinarray, 1162 retval = cpa_clear_pages_array(pages, addrinarray,
1138 __pgprot(_PAGE_CACHE_MASK)); 1163 __pgprot(_PAGE_CACHE_MASK));
1164 if (retval)
1165 return retval;
1139 1166
1140 for (i = 0; i < addrinarray; i++) { 1167 for (i = 0; i < addrinarray; i++) {
1141 start = (unsigned long)page_address(pages[i]); 1168 start = (unsigned long)page_address(pages[i]);
@@ -1143,7 +1170,7 @@ int set_pages_array_wb(struct page **pages, int addrinarray)
1143 free_memtype(start, end); 1170 free_memtype(start, end);
1144 } 1171 }
1145 1172
1146 return retval; 1173 return 0;
1147} 1174}
1148EXPORT_SYMBOL(set_pages_array_wb); 1175EXPORT_SYMBOL(set_pages_array_wb);
1149 1176
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index 640339ee4fb2..e6718bb28065 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -31,7 +31,7 @@
31#ifdef CONFIG_X86_PAT 31#ifdef CONFIG_X86_PAT
32int __read_mostly pat_enabled = 1; 32int __read_mostly pat_enabled = 1;
33 33
34void __cpuinit pat_disable(const char *reason) 34static inline void pat_disable(const char *reason)
35{ 35{
36 pat_enabled = 0; 36 pat_enabled = 0;
37 printk(KERN_INFO "%s\n", reason); 37 printk(KERN_INFO "%s\n", reason);
@@ -182,10 +182,10 @@ static unsigned long pat_x_mtrr_type(u64 start, u64 end, unsigned long req_type)
182 u8 mtrr_type; 182 u8 mtrr_type;
183 183
184 mtrr_type = mtrr_type_lookup(start, end); 184 mtrr_type = mtrr_type_lookup(start, end);
185 if (mtrr_type == MTRR_TYPE_UNCACHABLE) 185 if (mtrr_type != MTRR_TYPE_WRBACK)
186 return _PAGE_CACHE_UC; 186 return _PAGE_CACHE_UC_MINUS;
187 if (mtrr_type == MTRR_TYPE_WRCOMB) 187
188 return _PAGE_CACHE_WC; 188 return _PAGE_CACHE_WB;
189 } 189 }
190 190
191 return req_type; 191 return req_type;
@@ -352,23 +352,13 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
352 return 0; 352 return 0;
353 } 353 }
354 354
355 if (req_type == -1) { 355 /*
356 /* 356 * Call mtrr_lookup to get the type hint. This is an
357 * Call mtrr_lookup to get the type hint. This is an 357 * optimization for /dev/mem mmap'ers into WB memory (BIOS
358 * optimization for /dev/mem mmap'ers into WB memory (BIOS 358 * tools and ACPI tools). Use WB request for WB memory and use
359 * tools and ACPI tools). Use WB request for WB memory and use 359 * UC_MINUS otherwise.
360 * UC_MINUS otherwise. 360 */
361 */ 361 actual_type = pat_x_mtrr_type(start, end, req_type & _PAGE_CACHE_MASK);
362 u8 mtrr_type = mtrr_type_lookup(start, end);
363
364 if (mtrr_type == MTRR_TYPE_WRBACK)
365 actual_type = _PAGE_CACHE_WB;
366 else
367 actual_type = _PAGE_CACHE_UC_MINUS;
368 } else {
369 actual_type = pat_x_mtrr_type(start, end,
370 req_type & _PAGE_CACHE_MASK);
371 }
372 362
373 if (new_type) 363 if (new_type)
374 *new_type = actual_type; 364 *new_type = actual_type;
@@ -546,9 +536,7 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
546int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn, 536int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
547 unsigned long size, pgprot_t *vma_prot) 537 unsigned long size, pgprot_t *vma_prot)
548{ 538{
549 u64 offset = ((u64) pfn) << PAGE_SHIFT; 539 unsigned long flags = _PAGE_CACHE_WB;
550 unsigned long flags = -1;
551 int retval;
552 540
553 if (!range_is_allowed(pfn, size)) 541 if (!range_is_allowed(pfn, size))
554 return 0; 542 return 0;
@@ -576,64 +564,11 @@ int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
576 } 564 }
577#endif 565#endif
578 566
579 /*
580 * With O_SYNC, we can only take UC_MINUS mapping. Fail if we cannot.
581 *
582 * Without O_SYNC, we want to get
583 * - WB for WB-able memory and no other conflicting mappings
584 * - UC_MINUS for non-WB-able memory with no other conflicting mappings
585 * - Inherit from confliting mappings otherwise
586 */
587 if (flags != -1) {
588 retval = reserve_memtype(offset, offset + size, flags, NULL);
589 } else {
590 retval = reserve_memtype(offset, offset + size, -1, &flags);
591 }
592
593 if (retval < 0)
594 return 0;
595
596 if (((pfn < max_low_pfn_mapped) ||
597 (pfn >= (1UL<<(32 - PAGE_SHIFT)) && pfn < max_pfn_mapped)) &&
598 ioremap_change_attr((unsigned long)__va(offset), size, flags) < 0) {
599 free_memtype(offset, offset + size);
600 printk(KERN_INFO
601 "%s:%d /dev/mem ioremap_change_attr failed %s for %Lx-%Lx\n",
602 current->comm, current->pid,
603 cattr_name(flags),
604 offset, (unsigned long long)(offset + size));
605 return 0;
606 }
607
608 *vma_prot = __pgprot((pgprot_val(*vma_prot) & ~_PAGE_CACHE_MASK) | 567 *vma_prot = __pgprot((pgprot_val(*vma_prot) & ~_PAGE_CACHE_MASK) |
609 flags); 568 flags);
610 return 1; 569 return 1;
611} 570}
612 571
613void map_devmem(unsigned long pfn, unsigned long size, pgprot_t vma_prot)
614{
615 unsigned long want_flags = (pgprot_val(vma_prot) & _PAGE_CACHE_MASK);
616 u64 addr = (u64)pfn << PAGE_SHIFT;
617 unsigned long flags;
618
619 reserve_memtype(addr, addr + size, want_flags, &flags);
620 if (flags != want_flags) {
621 printk(KERN_INFO
622 "%s:%d /dev/mem expected mapping type %s for %Lx-%Lx, got %s\n",
623 current->comm, current->pid,
624 cattr_name(want_flags),
625 addr, (unsigned long long)(addr + size),
626 cattr_name(flags));
627 }
628}
629
630void unmap_devmem(unsigned long pfn, unsigned long size, pgprot_t vma_prot)
631{
632 u64 addr = (u64)pfn << PAGE_SHIFT;
633
634 free_memtype(addr, addr + size);
635}
636
637/* 572/*
638 * Change the memory type for the physial address range in kernel identity 573 * Change the memory type for the physial address range in kernel identity
639 * mapping space if that range is a part of identity map. 574 * mapping space if that range is a part of identity map.
@@ -671,8 +606,8 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
671{ 606{
672 int is_ram = 0; 607 int is_ram = 0;
673 int ret; 608 int ret;
674 unsigned long flags;
675 unsigned long want_flags = (pgprot_val(*vma_prot) & _PAGE_CACHE_MASK); 609 unsigned long want_flags = (pgprot_val(*vma_prot) & _PAGE_CACHE_MASK);
610 unsigned long flags = want_flags;
676 611
677 is_ram = pat_pagerange_is_ram(paddr, paddr + size); 612 is_ram = pat_pagerange_is_ram(paddr, paddr + size);
678 613
@@ -734,29 +669,28 @@ static void free_pfn_range(u64 paddr, unsigned long size)
734 * 669 *
735 * If the vma has a linear pfn mapping for the entire range, we get the prot 670 * If the vma has a linear pfn mapping for the entire range, we get the prot
736 * from pte and reserve the entire vma range with single reserve_pfn_range call. 671 * from pte and reserve the entire vma range with single reserve_pfn_range call.
737 * Otherwise, we reserve the entire vma range, my ging through the PTEs page
738 * by page to get physical address and protection.
739 */ 672 */
740int track_pfn_vma_copy(struct vm_area_struct *vma) 673int track_pfn_vma_copy(struct vm_area_struct *vma)
741{ 674{
742 int retval = 0;
743 unsigned long i, j;
744 resource_size_t paddr; 675 resource_size_t paddr;
745 unsigned long prot; 676 unsigned long prot;
746 unsigned long vma_start = vma->vm_start; 677 unsigned long vma_size = vma->vm_end - vma->vm_start;
747 unsigned long vma_end = vma->vm_end;
748 unsigned long vma_size = vma_end - vma_start;
749 pgprot_t pgprot; 678 pgprot_t pgprot;
750 679
751 if (!pat_enabled) 680 if (!pat_enabled)
752 return 0; 681 return 0;
753 682
683 /*
684 * For now, only handle remap_pfn_range() vmas where
685 * is_linear_pfn_mapping() == TRUE. Handling of
686 * vm_insert_pfn() is TBD.
687 */
754 if (is_linear_pfn_mapping(vma)) { 688 if (is_linear_pfn_mapping(vma)) {
755 /* 689 /*
756 * reserve the whole chunk covered by vma. We need the 690 * reserve the whole chunk covered by vma. We need the
757 * starting address and protection from pte. 691 * starting address and protection from pte.
758 */ 692 */
759 if (follow_phys(vma, vma_start, 0, &prot, &paddr)) { 693 if (follow_phys(vma, vma->vm_start, 0, &prot, &paddr)) {
760 WARN_ON_ONCE(1); 694 WARN_ON_ONCE(1);
761 return -EINVAL; 695 return -EINVAL;
762 } 696 }
@@ -764,28 +698,7 @@ int track_pfn_vma_copy(struct vm_area_struct *vma)
764 return reserve_pfn_range(paddr, vma_size, &pgprot, 1); 698 return reserve_pfn_range(paddr, vma_size, &pgprot, 1);
765 } 699 }
766 700
767 /* reserve entire vma page by page, using pfn and prot from pte */
768 for (i = 0; i < vma_size; i += PAGE_SIZE) {
769 if (follow_phys(vma, vma_start + i, 0, &prot, &paddr))
770 continue;
771
772 pgprot = __pgprot(prot);
773 retval = reserve_pfn_range(paddr, PAGE_SIZE, &pgprot, 1);
774 if (retval)
775 goto cleanup_ret;
776 }
777 return 0; 701 return 0;
778
779cleanup_ret:
780 /* Reserve error: Cleanup partial reservation and return error */
781 for (j = 0; j < i; j += PAGE_SIZE) {
782 if (follow_phys(vma, vma_start + j, 0, &prot, &paddr))
783 continue;
784
785 free_pfn_range(paddr, PAGE_SIZE);
786 }
787
788 return retval;
789} 702}
790 703
791/* 704/*
@@ -795,50 +708,28 @@ cleanup_ret:
795 * prot is passed in as a parameter for the new mapping. If the vma has a 708 * prot is passed in as a parameter for the new mapping. If the vma has a
796 * linear pfn mapping for the entire range reserve the entire vma range with 709 * linear pfn mapping for the entire range reserve the entire vma range with
797 * single reserve_pfn_range call. 710 * single reserve_pfn_range call.
798 * Otherwise, we look t the pfn and size and reserve only the specified range
799 * page by page.
800 *
801 * Note that this function can be called with caller trying to map only a
802 * subrange/page inside the vma.
803 */ 711 */
804int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t *prot, 712int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t *prot,
805 unsigned long pfn, unsigned long size) 713 unsigned long pfn, unsigned long size)
806{ 714{
807 int retval = 0;
808 unsigned long i, j;
809 resource_size_t base_paddr;
810 resource_size_t paddr; 715 resource_size_t paddr;
811 unsigned long vma_start = vma->vm_start; 716 unsigned long vma_size = vma->vm_end - vma->vm_start;
812 unsigned long vma_end = vma->vm_end;
813 unsigned long vma_size = vma_end - vma_start;
814 717
815 if (!pat_enabled) 718 if (!pat_enabled)
816 return 0; 719 return 0;
817 720
721 /*
722 * For now, only handle remap_pfn_range() vmas where
723 * is_linear_pfn_mapping() == TRUE. Handling of
724 * vm_insert_pfn() is TBD.
725 */
818 if (is_linear_pfn_mapping(vma)) { 726 if (is_linear_pfn_mapping(vma)) {
819 /* reserve the whole chunk starting from vm_pgoff */ 727 /* reserve the whole chunk starting from vm_pgoff */
820 paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT; 728 paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT;
821 return reserve_pfn_range(paddr, vma_size, prot, 0); 729 return reserve_pfn_range(paddr, vma_size, prot, 0);
822 } 730 }
823 731
824 /* reserve page by page using pfn and size */
825 base_paddr = (resource_size_t)pfn << PAGE_SHIFT;
826 for (i = 0; i < size; i += PAGE_SIZE) {
827 paddr = base_paddr + i;
828 retval = reserve_pfn_range(paddr, PAGE_SIZE, prot, 0);
829 if (retval)
830 goto cleanup_ret;
831 }
832 return 0; 732 return 0;
833
834cleanup_ret:
835 /* Reserve error: Cleanup partial reservation and return error */
836 for (j = 0; j < i; j += PAGE_SIZE) {
837 paddr = base_paddr + j;
838 free_pfn_range(paddr, PAGE_SIZE);
839 }
840
841 return retval;
842} 733}
843 734
844/* 735/*
@@ -849,39 +740,23 @@ cleanup_ret:
849void untrack_pfn_vma(struct vm_area_struct *vma, unsigned long pfn, 740void untrack_pfn_vma(struct vm_area_struct *vma, unsigned long pfn,
850 unsigned long size) 741 unsigned long size)
851{ 742{
852 unsigned long i;
853 resource_size_t paddr; 743 resource_size_t paddr;
854 unsigned long prot; 744 unsigned long vma_size = vma->vm_end - vma->vm_start;
855 unsigned long vma_start = vma->vm_start;
856 unsigned long vma_end = vma->vm_end;
857 unsigned long vma_size = vma_end - vma_start;
858 745
859 if (!pat_enabled) 746 if (!pat_enabled)
860 return; 747 return;
861 748
749 /*
750 * For now, only handle remap_pfn_range() vmas where
751 * is_linear_pfn_mapping() == TRUE. Handling of
752 * vm_insert_pfn() is TBD.
753 */
862 if (is_linear_pfn_mapping(vma)) { 754 if (is_linear_pfn_mapping(vma)) {
863 /* free the whole chunk starting from vm_pgoff */ 755 /* free the whole chunk starting from vm_pgoff */
864 paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT; 756 paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT;
865 free_pfn_range(paddr, vma_size); 757 free_pfn_range(paddr, vma_size);
866 return; 758 return;
867 } 759 }
868
869 if (size != 0 && size != vma_size) {
870 /* free page by page, using pfn and size */
871 paddr = (resource_size_t)pfn << PAGE_SHIFT;
872 for (i = 0; i < size; i += PAGE_SIZE) {
873 paddr = paddr + i;
874 free_pfn_range(paddr, PAGE_SIZE);
875 }
876 } else {
877 /* free entire vma, page by page, using the pfn from pte */
878 for (i = 0; i < vma_size; i += PAGE_SIZE) {
879 if (follow_phys(vma, vma_start + i, 0, &prot, &paddr))
880 continue;
881
882 free_pfn_range(paddr, PAGE_SIZE);
883 }
884 }
885} 760}
886 761
887pgprot_t pgprot_writecombine(pgprot_t prot) 762pgprot_t pgprot_writecombine(pgprot_t prot)
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
index 5b7c7c8464fe..7aa03a5389f5 100644
--- a/arch/x86/mm/pgtable.c
+++ b/arch/x86/mm/pgtable.c
@@ -345,7 +345,8 @@ void __native_set_fixmap(enum fixed_addresses idx, pte_t pte)
345 fixmaps_set++; 345 fixmaps_set++;
346} 346}
347 347
348void native_set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t flags) 348void native_set_fixmap(enum fixed_addresses idx, phys_addr_t phys,
349 pgprot_t flags)
349{ 350{
350 __native_set_fixmap(idx, pfn_pte(phys >> PAGE_SHIFT, flags)); 351 __native_set_fixmap(idx, pfn_pte(phys >> PAGE_SHIFT, flags));
351} 352}
diff --git a/arch/x86/mm/srat_64.c b/arch/x86/mm/srat_64.c
index c7d272b8574c..33c5fa57e43d 100644
--- a/arch/x86/mm/srat_64.c
+++ b/arch/x86/mm/srat_64.c
@@ -28,6 +28,7 @@ int acpi_numa __initdata;
28static struct acpi_table_slit *acpi_slit; 28static struct acpi_table_slit *acpi_slit;
29 29
30static nodemask_t nodes_parsed __initdata; 30static nodemask_t nodes_parsed __initdata;
31static nodemask_t cpu_nodes_parsed __initdata;
31static struct bootnode nodes[MAX_NUMNODES] __initdata; 32static struct bootnode nodes[MAX_NUMNODES] __initdata;
32static struct bootnode nodes_add[MAX_NUMNODES]; 33static struct bootnode nodes_add[MAX_NUMNODES];
33static int found_add_area __initdata; 34static int found_add_area __initdata;
@@ -141,6 +142,7 @@ acpi_numa_x2apic_affinity_init(struct acpi_srat_x2apic_cpu_affinity *pa)
141 142
142 apic_id = pa->apic_id; 143 apic_id = pa->apic_id;
143 apicid_to_node[apic_id] = node; 144 apicid_to_node[apic_id] = node;
145 node_set(node, cpu_nodes_parsed);
144 acpi_numa = 1; 146 acpi_numa = 1;
145 printk(KERN_INFO "SRAT: PXM %u -> APIC %u -> Node %u\n", 147 printk(KERN_INFO "SRAT: PXM %u -> APIC %u -> Node %u\n",
146 pxm, apic_id, node); 148 pxm, apic_id, node);
@@ -174,6 +176,7 @@ acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa)
174 else 176 else
175 apic_id = pa->apic_id; 177 apic_id = pa->apic_id;
176 apicid_to_node[apic_id] = node; 178 apicid_to_node[apic_id] = node;
179 node_set(node, cpu_nodes_parsed);
177 acpi_numa = 1; 180 acpi_numa = 1;
178 printk(KERN_INFO "SRAT: PXM %u -> APIC %u -> Node %u\n", 181 printk(KERN_INFO "SRAT: PXM %u -> APIC %u -> Node %u\n",
179 pxm, apic_id, node); 182 pxm, apic_id, node);
@@ -402,7 +405,8 @@ int __init acpi_scan_nodes(unsigned long start, unsigned long end)
402 return -1; 405 return -1;
403 } 406 }
404 407
405 node_possible_map = nodes_parsed; 408 /* Account for nodes with cpus and no memory */
409 nodes_or(node_possible_map, nodes_parsed, cpu_nodes_parsed);
406 410
407 /* Finally register nodes */ 411 /* Finally register nodes */
408 for_each_node_mask(i, node_possible_map) 412 for_each_node_mask(i, node_possible_map)
diff --git a/arch/x86/pci/amd_bus.c b/arch/x86/pci/amd_bus.c
index 9bb09823b362..f893d6a6e803 100644
--- a/arch/x86/pci/amd_bus.c
+++ b/arch/x86/pci/amd_bus.c
@@ -94,12 +94,16 @@ struct pci_root_info {
94static int pci_root_num; 94static int pci_root_num;
95static struct pci_root_info pci_root_info[PCI_ROOT_NR]; 95static struct pci_root_info pci_root_info[PCI_ROOT_NR];
96 96
97void set_pci_bus_resources_arch_default(struct pci_bus *b) 97void x86_pci_root_bus_res_quirks(struct pci_bus *b)
98{ 98{
99 int i; 99 int i;
100 int j; 100 int j;
101 struct pci_root_info *info; 101 struct pci_root_info *info;
102 102
103 /* don't go for it if _CRS is used */
104 if (pci_probe & PCI_USE__CRS)
105 return;
106
103 /* if only one root bus, don't need to anything */ 107 /* if only one root bus, don't need to anything */
104 if (pci_root_num < 2) 108 if (pci_root_num < 2)
105 return; 109 return;
diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c
index 8c362b96b644..2202b6257b82 100644
--- a/arch/x86/pci/common.c
+++ b/arch/x86/pci/common.c
@@ -147,10 +147,13 @@ static void __devinit pcibios_fixup_device_resources(struct pci_dev *dev)
147 * are examined. 147 * are examined.
148 */ 148 */
149 149
150void __devinit pcibios_fixup_bus(struct pci_bus *b) 150void __devinit pcibios_fixup_bus(struct pci_bus *b)
151{ 151{
152 struct pci_dev *dev; 152 struct pci_dev *dev;
153 153
154 /* root bus? */
155 if (!b->parent)
156 x86_pci_root_bus_res_quirks(b);
154 pci_read_bridge_bases(b); 157 pci_read_bridge_bases(b);
155 list_for_each_entry(dev, &b->devices, bus_list) 158 list_for_each_entry(dev, &b->devices, bus_list)
156 pcibios_fixup_device_resources(dev); 159 pcibios_fixup_device_resources(dev);
diff --git a/arch/x86/pci/i386.c b/arch/x86/pci/i386.c
index f1817f71e009..a85bef20a3b9 100644
--- a/arch/x86/pci/i386.c
+++ b/arch/x86/pci/i386.c
@@ -238,6 +238,10 @@ void __init pcibios_resource_survey(void)
238 */ 238 */
239fs_initcall(pcibios_assign_resources); 239fs_initcall(pcibios_assign_resources);
240 240
241void __weak x86_pci_root_bus_res_quirks(struct pci_bus *b)
242{
243}
244
241/* 245/*
242 * If we set up a device for bus mastering, we need to check the latency 246 * If we set up a device for bus mastering, we need to check the latency
243 * timer as certain crappy BIOSes forget to set it properly. 247 * timer as certain crappy BIOSes forget to set it properly.
diff --git a/arch/x86/pci/mmconfig-shared.c b/arch/x86/pci/mmconfig-shared.c
index 905bb526b133..5fa10bb9604f 100644
--- a/arch/x86/pci/mmconfig-shared.c
+++ b/arch/x86/pci/mmconfig-shared.c
@@ -375,7 +375,7 @@ static acpi_status __init check_mcfg_resource(struct acpi_resource *res,
375 if (!fixmem32) 375 if (!fixmem32)
376 return AE_OK; 376 return AE_OK;
377 if ((mcfg_res->start >= fixmem32->address) && 377 if ((mcfg_res->start >= fixmem32->address) &&
378 (mcfg_res->end < (fixmem32->address + 378 (mcfg_res->end <= (fixmem32->address +
379 fixmem32->address_length))) { 379 fixmem32->address_length))) {
380 mcfg_res->flags = 1; 380 mcfg_res->flags = 1;
381 return AE_CTRL_TERMINATE; 381 return AE_CTRL_TERMINATE;
@@ -392,7 +392,7 @@ static acpi_status __init check_mcfg_resource(struct acpi_resource *res,
392 return AE_OK; 392 return AE_OK;
393 393
394 if ((mcfg_res->start >= address.minimum) && 394 if ((mcfg_res->start >= address.minimum) &&
395 (mcfg_res->end < (address.minimum + address.address_length))) { 395 (mcfg_res->end <= (address.minimum + address.address_length))) {
396 mcfg_res->flags = 1; 396 mcfg_res->flags = 1;
397 return AE_CTRL_TERMINATE; 397 return AE_CTRL_TERMINATE;
398 } 398 }
@@ -439,7 +439,7 @@ static int __init is_mmconf_reserved(check_reserved_t is_reserved,
439 u64 old_size = size; 439 u64 old_size = size;
440 int valid = 0; 440 int valid = 0;
441 441
442 while (!is_reserved(addr, addr + size - 1, E820_RESERVED)) { 442 while (!is_reserved(addr, addr + size, E820_RESERVED)) {
443 size >>= 1; 443 size >>= 1;
444 if (size < (16UL<<20)) 444 if (size < (16UL<<20))
445 break; 445 break;
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 82cd39a6cbd3..f09e8c36ee80 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -42,6 +42,7 @@
42#include <asm/xen/hypervisor.h> 42#include <asm/xen/hypervisor.h>
43#include <asm/fixmap.h> 43#include <asm/fixmap.h>
44#include <asm/processor.h> 44#include <asm/processor.h>
45#include <asm/proto.h>
45#include <asm/msr-index.h> 46#include <asm/msr-index.h>
46#include <asm/setup.h> 47#include <asm/setup.h>
47#include <asm/desc.h> 48#include <asm/desc.h>
@@ -168,21 +169,23 @@ static void __init xen_banner(void)
168 xen_feature(XENFEAT_mmu_pt_update_preserve_ad) ? " (preserve-AD)" : ""); 169 xen_feature(XENFEAT_mmu_pt_update_preserve_ad) ? " (preserve-AD)" : "");
169} 170}
170 171
172static __read_mostly unsigned int cpuid_leaf1_edx_mask = ~0;
173static __read_mostly unsigned int cpuid_leaf1_ecx_mask = ~0;
174
171static void xen_cpuid(unsigned int *ax, unsigned int *bx, 175static void xen_cpuid(unsigned int *ax, unsigned int *bx,
172 unsigned int *cx, unsigned int *dx) 176 unsigned int *cx, unsigned int *dx)
173{ 177{
178 unsigned maskecx = ~0;
174 unsigned maskedx = ~0; 179 unsigned maskedx = ~0;
175 180
176 /* 181 /*
177 * Mask out inconvenient features, to try and disable as many 182 * Mask out inconvenient features, to try and disable as many
178 * unsupported kernel subsystems as possible. 183 * unsupported kernel subsystems as possible.
179 */ 184 */
180 if (*ax == 1) 185 if (*ax == 1) {
181 maskedx = ~((1 << X86_FEATURE_APIC) | /* disable APIC */ 186 maskecx = cpuid_leaf1_ecx_mask;
182 (1 << X86_FEATURE_ACPI) | /* disable ACPI */ 187 maskedx = cpuid_leaf1_edx_mask;
183 (1 << X86_FEATURE_MCE) | /* disable MCE */ 188 }
184 (1 << X86_FEATURE_MCA) | /* disable MCA */
185 (1 << X86_FEATURE_ACC)); /* thermal monitoring */
186 189
187 asm(XEN_EMULATE_PREFIX "cpuid" 190 asm(XEN_EMULATE_PREFIX "cpuid"
188 : "=a" (*ax), 191 : "=a" (*ax),
@@ -190,9 +193,43 @@ static void xen_cpuid(unsigned int *ax, unsigned int *bx,
190 "=c" (*cx), 193 "=c" (*cx),
191 "=d" (*dx) 194 "=d" (*dx)
192 : "0" (*ax), "2" (*cx)); 195 : "0" (*ax), "2" (*cx));
196
197 *cx &= maskecx;
193 *dx &= maskedx; 198 *dx &= maskedx;
194} 199}
195 200
201static __init void xen_init_cpuid_mask(void)
202{
203 unsigned int ax, bx, cx, dx;
204
205 cpuid_leaf1_edx_mask =
206 ~((1 << X86_FEATURE_MCE) | /* disable MCE */
207 (1 << X86_FEATURE_MCA) | /* disable MCA */
208 (1 << X86_FEATURE_ACC)); /* thermal monitoring */
209
210 if (!xen_initial_domain())
211 cpuid_leaf1_edx_mask &=
212 ~((1 << X86_FEATURE_APIC) | /* disable local APIC */
213 (1 << X86_FEATURE_ACPI)); /* disable ACPI */
214
215 ax = 1;
216 xen_cpuid(&ax, &bx, &cx, &dx);
217
218 /* cpuid claims we support xsave; try enabling it to see what happens */
219 if (cx & (1 << (X86_FEATURE_XSAVE % 32))) {
220 unsigned long cr4;
221
222 set_in_cr4(X86_CR4_OSXSAVE);
223
224 cr4 = read_cr4();
225
226 if ((cr4 & X86_CR4_OSXSAVE) == 0)
227 cpuid_leaf1_ecx_mask &= ~(1 << (X86_FEATURE_XSAVE % 32));
228
229 clear_in_cr4(X86_CR4_OSXSAVE);
230 }
231}
232
196static void xen_set_debugreg(int reg, unsigned long val) 233static void xen_set_debugreg(int reg, unsigned long val)
197{ 234{
198 HYPERVISOR_set_debugreg(reg, val); 235 HYPERVISOR_set_debugreg(reg, val);
@@ -284,12 +321,11 @@ static void xen_set_ldt(const void *addr, unsigned entries)
284 321
285static void xen_load_gdt(const struct desc_ptr *dtr) 322static void xen_load_gdt(const struct desc_ptr *dtr)
286{ 323{
287 unsigned long *frames;
288 unsigned long va = dtr->address; 324 unsigned long va = dtr->address;
289 unsigned int size = dtr->size + 1; 325 unsigned int size = dtr->size + 1;
290 unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE; 326 unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
327 unsigned long frames[pages];
291 int f; 328 int f;
292 struct multicall_space mcs;
293 329
294 /* A GDT can be up to 64k in size, which corresponds to 8192 330 /* A GDT can be up to 64k in size, which corresponds to 8192
295 8-byte entries, or 16 4k pages.. */ 331 8-byte entries, or 16 4k pages.. */
@@ -297,19 +333,26 @@ static void xen_load_gdt(const struct desc_ptr *dtr)
297 BUG_ON(size > 65536); 333 BUG_ON(size > 65536);
298 BUG_ON(va & ~PAGE_MASK); 334 BUG_ON(va & ~PAGE_MASK);
299 335
300 mcs = xen_mc_entry(sizeof(*frames) * pages);
301 frames = mcs.args;
302
303 for (f = 0; va < dtr->address + size; va += PAGE_SIZE, f++) { 336 for (f = 0; va < dtr->address + size; va += PAGE_SIZE, f++) {
304 frames[f] = arbitrary_virt_to_mfn((void *)va); 337 int level;
338 pte_t *ptep = lookup_address(va, &level);
339 unsigned long pfn, mfn;
340 void *virt;
341
342 BUG_ON(ptep == NULL);
343
344 pfn = pte_pfn(*ptep);
345 mfn = pfn_to_mfn(pfn);
346 virt = __va(PFN_PHYS(pfn));
347
348 frames[f] = mfn;
305 349
306 make_lowmem_page_readonly((void *)va); 350 make_lowmem_page_readonly((void *)va);
307 make_lowmem_page_readonly(mfn_to_virt(frames[f])); 351 make_lowmem_page_readonly(virt);
308 } 352 }
309 353
310 MULTI_set_gdt(mcs.mc, frames, size / sizeof(struct desc_struct)); 354 if (HYPERVISOR_set_gdt(frames, size / sizeof(struct desc_struct)))
311 355 BUG();
312 xen_mc_issue(PARAVIRT_LAZY_CPU);
313} 356}
314 357
315static void load_TLS_descriptor(struct thread_struct *t, 358static void load_TLS_descriptor(struct thread_struct *t,
@@ -385,7 +428,7 @@ static void xen_write_ldt_entry(struct desc_struct *dt, int entrynum,
385static int cvt_gate_to_trap(int vector, const gate_desc *val, 428static int cvt_gate_to_trap(int vector, const gate_desc *val,
386 struct trap_info *info) 429 struct trap_info *info)
387{ 430{
388 if (val->type != 0xf && val->type != 0xe) 431 if (val->type != GATE_TRAP && val->type != GATE_INTERRUPT)
389 return 0; 432 return 0;
390 433
391 info->vector = vector; 434 info->vector = vector;
@@ -393,8 +436,8 @@ static int cvt_gate_to_trap(int vector, const gate_desc *val,
393 info->cs = gate_segment(*val); 436 info->cs = gate_segment(*val);
394 info->flags = val->dpl; 437 info->flags = val->dpl;
395 /* interrupt gates clear IF */ 438 /* interrupt gates clear IF */
396 if (val->type == 0xe) 439 if (val->type == GATE_INTERRUPT)
397 info->flags |= 4; 440 info->flags |= 1 << 2;
398 441
399 return 1; 442 return 1;
400} 443}
@@ -872,7 +915,6 @@ static const struct machine_ops __initdata xen_machine_ops = {
872 .emergency_restart = xen_emergency_restart, 915 .emergency_restart = xen_emergency_restart,
873}; 916};
874 917
875
876/* First C function to be called on Xen boot */ 918/* First C function to be called on Xen boot */
877asmlinkage void __init xen_start_kernel(void) 919asmlinkage void __init xen_start_kernel(void)
878{ 920{
@@ -897,6 +939,8 @@ asmlinkage void __init xen_start_kernel(void)
897 939
898 xen_init_irq_ops(); 940 xen_init_irq_ops();
899 941
942 xen_init_cpuid_mask();
943
900#ifdef CONFIG_X86_LOCAL_APIC 944#ifdef CONFIG_X86_LOCAL_APIC
901 /* 945 /*
902 * set up the basic apic ops. 946 * set up the basic apic ops.
@@ -938,6 +982,11 @@ asmlinkage void __init xen_start_kernel(void)
938 if (!xen_initial_domain()) 982 if (!xen_initial_domain())
939 __supported_pte_mask &= ~(_PAGE_PWT | _PAGE_PCD); 983 __supported_pte_mask &= ~(_PAGE_PWT | _PAGE_PCD);
940 984
985#ifdef CONFIG_X86_64
986 /* Work out if we support NX */
987 check_efer();
988#endif
989
941 /* Don't do the full vcpu_info placement stuff until we have a 990 /* Don't do the full vcpu_info placement stuff until we have a
942 possible map and a non-dummy shared_info. */ 991 possible map and a non-dummy shared_info. */
943 per_cpu(xen_vcpu, 0) = &HYPERVISOR_shared_info->vcpu_info[0]; 992 per_cpu(xen_vcpu, 0) = &HYPERVISOR_shared_info->vcpu_info[0];
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index db3802fb7b84..9842b1212407 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -184,7 +184,7 @@ static inline unsigned p2m_index(unsigned long pfn)
184} 184}
185 185
186/* Build the parallel p2m_top_mfn structures */ 186/* Build the parallel p2m_top_mfn structures */
187void xen_setup_mfn_list_list(void) 187static void __init xen_build_mfn_list_list(void)
188{ 188{
189 unsigned pfn, idx; 189 unsigned pfn, idx;
190 190
@@ -198,7 +198,10 @@ void xen_setup_mfn_list_list(void)
198 unsigned topidx = idx * P2M_ENTRIES_PER_PAGE; 198 unsigned topidx = idx * P2M_ENTRIES_PER_PAGE;
199 p2m_top_mfn_list[idx] = virt_to_mfn(&p2m_top_mfn[topidx]); 199 p2m_top_mfn_list[idx] = virt_to_mfn(&p2m_top_mfn[topidx]);
200 } 200 }
201}
201 202
203void xen_setup_mfn_list_list(void)
204{
202 BUG_ON(HYPERVISOR_shared_info == &xen_dummy_shared_info); 205 BUG_ON(HYPERVISOR_shared_info == &xen_dummy_shared_info);
203 206
204 HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list = 207 HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list =
@@ -218,6 +221,8 @@ void __init xen_build_dynamic_phys_to_machine(void)
218 221
219 p2m_top[topidx] = &mfn_list[pfn]; 222 p2m_top[topidx] = &mfn_list[pfn];
220 } 223 }
224
225 xen_build_mfn_list_list();
221} 226}
222 227
223unsigned long get_phys_to_machine(unsigned long pfn) 228unsigned long get_phys_to_machine(unsigned long pfn)
@@ -233,47 +238,74 @@ unsigned long get_phys_to_machine(unsigned long pfn)
233} 238}
234EXPORT_SYMBOL_GPL(get_phys_to_machine); 239EXPORT_SYMBOL_GPL(get_phys_to_machine);
235 240
236static void alloc_p2m(unsigned long **pp, unsigned long *mfnp) 241/* install a new p2m_top page */
242bool install_p2mtop_page(unsigned long pfn, unsigned long *p)
237{ 243{
238 unsigned long *p; 244 unsigned topidx = p2m_top_index(pfn);
245 unsigned long **pfnp, *mfnp;
239 unsigned i; 246 unsigned i;
240 247
241 p = (void *)__get_free_page(GFP_KERNEL | __GFP_NOFAIL); 248 pfnp = &p2m_top[topidx];
242 BUG_ON(p == NULL); 249 mfnp = &p2m_top_mfn[topidx];
243 250
244 for (i = 0; i < P2M_ENTRIES_PER_PAGE; i++) 251 for (i = 0; i < P2M_ENTRIES_PER_PAGE; i++)
245 p[i] = INVALID_P2M_ENTRY; 252 p[i] = INVALID_P2M_ENTRY;
246 253
247 if (cmpxchg(pp, p2m_missing, p) != p2m_missing) 254 if (cmpxchg(pfnp, p2m_missing, p) == p2m_missing) {
248 free_page((unsigned long)p);
249 else
250 *mfnp = virt_to_mfn(p); 255 *mfnp = virt_to_mfn(p);
256 return true;
257 }
258
259 return false;
251} 260}
252 261
253void set_phys_to_machine(unsigned long pfn, unsigned long mfn) 262static void alloc_p2m(unsigned long pfn)
254{ 263{
255 unsigned topidx, idx; 264 unsigned long *p;
256 265
257 if (unlikely(xen_feature(XENFEAT_auto_translated_physmap))) { 266 p = (void *)__get_free_page(GFP_KERNEL | __GFP_NOFAIL);
258 BUG_ON(pfn != mfn && mfn != INVALID_P2M_ENTRY); 267 BUG_ON(p == NULL);
259 return; 268
260 } 269 if (!install_p2mtop_page(pfn, p))
270 free_page((unsigned long)p);
271}
272
273/* Try to install p2m mapping; fail if intermediate bits missing */
274bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn)
275{
276 unsigned topidx, idx;
261 277
262 if (unlikely(pfn >= MAX_DOMAIN_PAGES)) { 278 if (unlikely(pfn >= MAX_DOMAIN_PAGES)) {
263 BUG_ON(mfn != INVALID_P2M_ENTRY); 279 BUG_ON(mfn != INVALID_P2M_ENTRY);
264 return; 280 return true;
265 } 281 }
266 282
267 topidx = p2m_top_index(pfn); 283 topidx = p2m_top_index(pfn);
268 if (p2m_top[topidx] == p2m_missing) { 284 if (p2m_top[topidx] == p2m_missing) {
269 /* no need to allocate a page to store an invalid entry */
270 if (mfn == INVALID_P2M_ENTRY) 285 if (mfn == INVALID_P2M_ENTRY)
271 return; 286 return true;
272 alloc_p2m(&p2m_top[topidx], &p2m_top_mfn[topidx]); 287 return false;
273 } 288 }
274 289
275 idx = p2m_index(pfn); 290 idx = p2m_index(pfn);
276 p2m_top[topidx][idx] = mfn; 291 p2m_top[topidx][idx] = mfn;
292
293 return true;
294}
295
296void set_phys_to_machine(unsigned long pfn, unsigned long mfn)
297{
298 if (unlikely(xen_feature(XENFEAT_auto_translated_physmap))) {
299 BUG_ON(pfn != mfn && mfn != INVALID_P2M_ENTRY);
300 return;
301 }
302
303 if (unlikely(!__set_phys_to_machine(pfn, mfn))) {
304 alloc_p2m(pfn);
305
306 if (!__set_phys_to_machine(pfn, mfn))
307 BUG();
308 }
277} 309}
278 310
279unsigned long arbitrary_virt_to_mfn(void *vaddr) 311unsigned long arbitrary_virt_to_mfn(void *vaddr)
@@ -987,7 +1019,7 @@ static __init int xen_mark_pinned(struct mm_struct *mm, struct page *page,
987 return 0; 1019 return 0;
988} 1020}
989 1021
990void __init xen_mark_init_mm_pinned(void) 1022static void __init xen_mark_init_mm_pinned(void)
991{ 1023{
992 xen_pgd_walk(&init_mm, xen_mark_pinned, FIXADDR_TOP); 1024 xen_pgd_walk(&init_mm, xen_mark_pinned, FIXADDR_TOP);
993} 1025}
@@ -1270,8 +1302,8 @@ static void xen_flush_tlb_others(const struct cpumask *cpus,
1270 } *args; 1302 } *args;
1271 struct multicall_space mcs; 1303 struct multicall_space mcs;
1272 1304
1273 BUG_ON(cpumask_empty(cpus)); 1305 if (cpumask_empty(cpus))
1274 BUG_ON(!mm); 1306 return; /* nothing to do */
1275 1307
1276 mcs = xen_mc_entry(sizeof(*args)); 1308 mcs = xen_mc_entry(sizeof(*args));
1277 args = mcs.args; 1309 args = mcs.args;
@@ -1438,6 +1470,15 @@ static __init void xen_set_pte_init(pte_t *ptep, pte_t pte)
1438} 1470}
1439#endif 1471#endif
1440 1472
1473static void pin_pagetable_pfn(unsigned cmd, unsigned long pfn)
1474{
1475 struct mmuext_op op;
1476 op.cmd = cmd;
1477 op.arg1.mfn = pfn_to_mfn(pfn);
1478 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF))
1479 BUG();
1480}
1481
1441/* Early in boot, while setting up the initial pagetable, assume 1482/* Early in boot, while setting up the initial pagetable, assume
1442 everything is pinned. */ 1483 everything is pinned. */
1443static __init void xen_alloc_pte_init(struct mm_struct *mm, unsigned long pfn) 1484static __init void xen_alloc_pte_init(struct mm_struct *mm, unsigned long pfn)
@@ -1446,22 +1487,29 @@ static __init void xen_alloc_pte_init(struct mm_struct *mm, unsigned long pfn)
1446 BUG_ON(mem_map); /* should only be used early */ 1487 BUG_ON(mem_map); /* should only be used early */
1447#endif 1488#endif
1448 make_lowmem_page_readonly(__va(PFN_PHYS(pfn))); 1489 make_lowmem_page_readonly(__va(PFN_PHYS(pfn)));
1490 pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
1491}
1492
1493/* Used for pmd and pud */
1494static __init void xen_alloc_pmd_init(struct mm_struct *mm, unsigned long pfn)
1495{
1496#ifdef CONFIG_FLATMEM
1497 BUG_ON(mem_map); /* should only be used early */
1498#endif
1499 make_lowmem_page_readonly(__va(PFN_PHYS(pfn)));
1449} 1500}
1450 1501
1451/* Early release_pte assumes that all pts are pinned, since there's 1502/* Early release_pte assumes that all pts are pinned, since there's
1452 only init_mm and anything attached to that is pinned. */ 1503 only init_mm and anything attached to that is pinned. */
1453static void xen_release_pte_init(unsigned long pfn) 1504static __init void xen_release_pte_init(unsigned long pfn)
1454{ 1505{
1506 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn);
1455 make_lowmem_page_readwrite(__va(PFN_PHYS(pfn))); 1507 make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
1456} 1508}
1457 1509
1458static void pin_pagetable_pfn(unsigned cmd, unsigned long pfn) 1510static __init void xen_release_pmd_init(unsigned long pfn)
1459{ 1511{
1460 struct mmuext_op op; 1512 make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
1461 op.cmd = cmd;
1462 op.arg1.mfn = pfn_to_mfn(pfn);
1463 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF))
1464 BUG();
1465} 1513}
1466 1514
1467/* This needs to make sure the new pte page is pinned iff its being 1515/* This needs to make sure the new pte page is pinned iff its being
@@ -1750,7 +1798,7 @@ __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd,
1750} 1798}
1751#endif /* CONFIG_X86_64 */ 1799#endif /* CONFIG_X86_64 */
1752 1800
1753static void xen_set_fixmap(unsigned idx, unsigned long phys, pgprot_t prot) 1801static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot)
1754{ 1802{
1755 pte_t pte; 1803 pte_t pte;
1756 1804
@@ -1773,6 +1821,9 @@ static void xen_set_fixmap(unsigned idx, unsigned long phys, pgprot_t prot)
1773#ifdef CONFIG_X86_LOCAL_APIC 1821#ifdef CONFIG_X86_LOCAL_APIC
1774 case FIX_APIC_BASE: /* maps dummy local APIC */ 1822 case FIX_APIC_BASE: /* maps dummy local APIC */
1775#endif 1823#endif
1824 case FIX_TEXT_POKE0:
1825 case FIX_TEXT_POKE1:
1826 /* All local page mappings */
1776 pte = pfn_pte(phys, prot); 1827 pte = pfn_pte(phys, prot);
1777 break; 1828 break;
1778 1829
@@ -1819,7 +1870,6 @@ __init void xen_post_allocator_init(void)
1819 xen_mark_init_mm_pinned(); 1870 xen_mark_init_mm_pinned();
1820} 1871}
1821 1872
1822
1823const struct pv_mmu_ops xen_mmu_ops __initdata = { 1873const struct pv_mmu_ops xen_mmu_ops __initdata = {
1824 .pagetable_setup_start = xen_pagetable_setup_start, 1874 .pagetable_setup_start = xen_pagetable_setup_start,
1825 .pagetable_setup_done = xen_pagetable_setup_done, 1875 .pagetable_setup_done = xen_pagetable_setup_done,
@@ -1843,9 +1893,9 @@ const struct pv_mmu_ops xen_mmu_ops __initdata = {
1843 1893
1844 .alloc_pte = xen_alloc_pte_init, 1894 .alloc_pte = xen_alloc_pte_init,
1845 .release_pte = xen_release_pte_init, 1895 .release_pte = xen_release_pte_init,
1846 .alloc_pmd = xen_alloc_pte_init, 1896 .alloc_pmd = xen_alloc_pmd_init,
1847 .alloc_pmd_clone = paravirt_nop, 1897 .alloc_pmd_clone = paravirt_nop,
1848 .release_pmd = xen_release_pte_init, 1898 .release_pmd = xen_release_pmd_init,
1849 1899
1850#ifdef CONFIG_HIGHPTE 1900#ifdef CONFIG_HIGHPTE
1851 .kmap_atomic_pte = xen_kmap_atomic_pte, 1901 .kmap_atomic_pte = xen_kmap_atomic_pte,
@@ -1883,8 +1933,8 @@ const struct pv_mmu_ops xen_mmu_ops __initdata = {
1883 .make_pud = PV_CALLEE_SAVE(xen_make_pud), 1933 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
1884 .set_pgd = xen_set_pgd_hyper, 1934 .set_pgd = xen_set_pgd_hyper,
1885 1935
1886 .alloc_pud = xen_alloc_pte_init, 1936 .alloc_pud = xen_alloc_pmd_init,
1887 .release_pud = xen_release_pte_init, 1937 .release_pud = xen_release_pmd_init,
1888#endif /* PAGETABLE_LEVELS == 4 */ 1938#endif /* PAGETABLE_LEVELS == 4 */
1889 1939
1890 .activate_mm = xen_activate_mm, 1940 .activate_mm = xen_activate_mm,
diff --git a/arch/x86/xen/mmu.h b/arch/x86/xen/mmu.h
index 24d1b44a337d..da7302624897 100644
--- a/arch/x86/xen/mmu.h
+++ b/arch/x86/xen/mmu.h
@@ -11,6 +11,9 @@ enum pt_level {
11}; 11};
12 12
13 13
14bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn);
15bool install_p2mtop_page(unsigned long pfn, unsigned long *p);
16
14void set_pte_mfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags); 17void set_pte_mfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags);
15 18
16 19
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index 585a6e330837..429834ec1687 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -317,7 +317,7 @@ static int __cpuinit xen_cpu_up(unsigned int cpu)
317 BUG_ON(rc); 317 BUG_ON(rc);
318 318
319 while(per_cpu(cpu_state, cpu) != CPU_ONLINE) { 319 while(per_cpu(cpu_state, cpu) != CPU_ONLINE) {
320 HYPERVISOR_sched_op(SCHEDOP_yield, 0); 320 HYPERVISOR_sched_op(SCHEDOP_yield, NULL);
321 barrier(); 321 barrier();
322 } 322 }
323 323
@@ -422,7 +422,7 @@ static void xen_smp_send_call_function_ipi(const struct cpumask *mask)
422 /* Make sure other vcpus get a chance to run if they need to. */ 422 /* Make sure other vcpus get a chance to run if they need to. */
423 for_each_cpu(cpu, mask) { 423 for_each_cpu(cpu, mask) {
424 if (xen_vcpu_stolen(cpu)) { 424 if (xen_vcpu_stolen(cpu)) {
425 HYPERVISOR_sched_op(SCHEDOP_yield, 0); 425 HYPERVISOR_sched_op(SCHEDOP_yield, NULL);
426 break; 426 break;
427 } 427 }
428 } 428 }
diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c
index 14f240623497..0a5aa44299a5 100644
--- a/arch/x86/xen/time.c
+++ b/arch/x86/xen/time.c
@@ -213,6 +213,11 @@ cycle_t xen_clocksource_read(void)
213 return ret; 213 return ret;
214} 214}
215 215
216static cycle_t xen_clocksource_get_cycles(struct clocksource *cs)
217{
218 return xen_clocksource_read();
219}
220
216static void xen_read_wallclock(struct timespec *ts) 221static void xen_read_wallclock(struct timespec *ts)
217{ 222{
218 struct shared_info *s = HYPERVISOR_shared_info; 223 struct shared_info *s = HYPERVISOR_shared_info;
@@ -241,7 +246,7 @@ int xen_set_wallclock(unsigned long now)
241static struct clocksource xen_clocksource __read_mostly = { 246static struct clocksource xen_clocksource __read_mostly = {
242 .name = "xen", 247 .name = "xen",
243 .rating = 400, 248 .rating = 400,
244 .read = xen_clocksource_read, 249 .read = xen_clocksource_get_cycles,
245 .mask = ~0, 250 .mask = ~0,
246 .mult = 1<<XEN_SHIFT, /* time directly in nanoseconds */ 251 .mult = 1<<XEN_SHIFT, /* time directly in nanoseconds */
247 .shift = XEN_SHIFT, 252 .shift = XEN_SHIFT,
diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
index 2f5ef2632ea2..20139464943c 100644
--- a/arch/x86/xen/xen-ops.h
+++ b/arch/x86/xen/xen-ops.h
@@ -57,8 +57,6 @@ irqreturn_t xen_debug_interrupt(int irq, void *dev_id);
57 57
58bool xen_vcpu_stolen(int vcpu); 58bool xen_vcpu_stolen(int vcpu);
59 59
60void xen_mark_init_mm_pinned(void);
61
62void xen_setup_vcpu_info_placement(void); 60void xen_setup_vcpu_info_placement(void);
63 61
64#ifdef CONFIG_SMP 62#ifdef CONFIG_SMP