aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-05-11 03:33:06 -0400
committerIngo Molnar <mingo@elte.hu>2009-05-11 03:50:02 -0400
commit7a309490da98981558a07183786201f02a6341e2 (patch)
tree204bfd3bc344dbb02be0b1eac29b956f6722e661 /arch/x86
parent9a8709d44139748fe2e0ab56d20d8c384c8b65ad (diff)
parent091bf7624d1c90cec9e578a18529f615213ff847 (diff)
Merge commit 'v2.6.30-rc5' into x86/apic
Merge reason: this branch was on a .30-rc2 base - sync it up with all the latest fixes. Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/Kconfig2
-rw-r--r--arch/x86/Kconfig.cpu1
-rw-r--r--arch/x86/Makefile6
-rw-r--r--arch/x86/ia32/sys_ia32.c19
-rw-r--r--arch/x86/include/asm/desc.h2
-rw-r--r--arch/x86/include/asm/hardirq.h2
-rw-r--r--arch/x86/include/asm/lguest_hcall.h2
-rw-r--r--arch/x86/include/asm/mce.h1
-rw-r--r--arch/x86/include/asm/pat.h4
-rw-r--r--arch/x86/include/asm/processor.h6
-rw-r--r--arch/x86/include/asm/tlbflush.h2
-rw-r--r--arch/x86/include/asm/topology.h2
-rw-r--r--arch/x86/kernel/amd_iommu_init.c16
-rw-r--r--arch/x86/kernel/apic/io_apic.c2
-rw-r--r--arch/x86/kernel/apic/nmi.c5
-rw-r--r--arch/x86/kernel/apic/x2apic_uv_x.c15
-rw-r--r--arch/x86/kernel/cpu/common.c2
-rw-r--r--arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c30
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_64.c33
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_intel_64.c10
-rw-r--r--arch/x86/kernel/cpu/proc.c2
-rw-r--r--arch/x86/kernel/e820.c11
-rw-r--r--arch/x86/kernel/entry_64.S3
-rw-r--r--arch/x86/kernel/hpet.c24
-rw-r--r--arch/x86/kernel/i8253.c2
-rw-r--r--arch/x86/kernel/kvmclock.c7
-rw-r--r--arch/x86/kernel/machine_kexec_32.c4
-rw-r--r--arch/x86/kernel/machine_kexec_64.c4
-rw-r--r--arch/x86/kernel/microcode_core.c2
-rw-r--r--arch/x86/kernel/pci-swiotlb.c2
-rw-r--r--arch/x86/kernel/quirks.c2
-rw-r--r--arch/x86/kernel/tsc.c2
-rw-r--r--arch/x86/kernel/uv_time.c10
-rw-r--r--arch/x86/kernel/vmiclock_32.c2
-rw-r--r--arch/x86/kernel/xsave.c4
-rw-r--r--arch/x86/kvm/mmu.c2
-rw-r--r--arch/x86/kvm/x86.c8
-rw-r--r--arch/x86/lguest/boot.c18
-rw-r--r--arch/x86/mm/init.c18
-rw-r--r--arch/x86/mm/ioremap.c10
-rw-r--r--arch/x86/mm/kmmio.c2
-rw-r--r--arch/x86/mm/numa_32.c2
-rw-r--r--arch/x86/mm/numa_64.c3
-rw-r--r--arch/x86/mm/pageattr.c127
-rw-r--r--arch/x86/mm/pat.c189
-rw-r--r--arch/x86/mm/srat_32.c2
-rw-r--r--arch/x86/mm/srat_64.c7
-rw-r--r--arch/x86/pci/amd_bus.c6
-rw-r--r--arch/x86/pci/common.c5
-rw-r--r--arch/x86/pci/i386.c4
-rw-r--r--arch/x86/pci/mmconfig-shared.c6
-rw-r--r--arch/x86/vdso/vclock_gettime.c12
-rw-r--r--arch/x86/xen/mmu.c5
-rw-r--r--arch/x86/xen/time.c7
54 files changed, 340 insertions, 336 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 58fb7b3bcd1a..3b8890d6409b 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -277,6 +277,7 @@ config SPARSE_IRQ
277config NUMA_MIGRATE_IRQ_DESC 277config NUMA_MIGRATE_IRQ_DESC
278 bool "Move irq desc when changing irq smp_affinity" 278 bool "Move irq desc when changing irq smp_affinity"
279 depends on SPARSE_IRQ && NUMA 279 depends on SPARSE_IRQ && NUMA
280 depends on BROKEN
280 default n 281 default n
281 ---help--- 282 ---help---
282 This enables moving irq_desc to cpu/node that irq will use handled. 283 This enables moving irq_desc to cpu/node that irq will use handled.
@@ -664,6 +665,7 @@ config MAXSMP
664 665
665config NR_CPUS 666config NR_CPUS
666 int "Maximum number of CPUs" if SMP && !MAXSMP 667 int "Maximum number of CPUs" if SMP && !MAXSMP
668 range 2 8 if SMP && X86_32 && !X86_BIGSMP
667 range 2 512 if SMP && !MAXSMP 669 range 2 512 if SMP && !MAXSMP
668 default "1" if !SMP 670 default "1" if !SMP
669 default "4096" if MAXSMP 671 default "4096" if MAXSMP
diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
index 924e156a85ab..8130334329c0 100644
--- a/arch/x86/Kconfig.cpu
+++ b/arch/x86/Kconfig.cpu
@@ -506,6 +506,7 @@ config X86_PTRACE_BTS
506 bool "Branch Trace Store" 506 bool "Branch Trace Store"
507 default y 507 default y
508 depends on X86_DEBUGCTLMSR 508 depends on X86_DEBUGCTLMSR
509 depends on BROKEN
509 ---help--- 510 ---help---
510 This adds a ptrace interface to the hardware's branch trace store. 511 This adds a ptrace interface to the hardware's branch trace store.
511 512
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index f05d8c91d9e5..8c86b72afdc2 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -153,7 +153,7 @@ endif
153 153
154boot := arch/x86/boot 154boot := arch/x86/boot
155 155
156BOOT_TARGETS = bzlilo bzdisk fdimage fdimage144 fdimage288 isoimage install 156BOOT_TARGETS = bzlilo bzdisk fdimage fdimage144 fdimage288 isoimage
157 157
158PHONY += bzImage $(BOOT_TARGETS) 158PHONY += bzImage $(BOOT_TARGETS)
159 159
@@ -171,6 +171,10 @@ bzImage: vmlinux
171$(BOOT_TARGETS): vmlinux 171$(BOOT_TARGETS): vmlinux
172 $(Q)$(MAKE) $(build)=$(boot) $@ 172 $(Q)$(MAKE) $(build)=$(boot) $@
173 173
174PHONY += install
175install:
176 $(Q)$(MAKE) $(build)=$(boot) $@
177
174PHONY += vdso_install 178PHONY += vdso_install
175vdso_install: 179vdso_install:
176 $(Q)$(MAKE) $(build)=arch/x86/vdso $@ 180 $(Q)$(MAKE) $(build)=arch/x86/vdso $@
diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
index efac92fd1efb..085a8c35f149 100644
--- a/arch/x86/ia32/sys_ia32.c
+++ b/arch/x86/ia32/sys_ia32.c
@@ -129,21 +129,12 @@ asmlinkage long sys32_fstatat(unsigned int dfd, char __user *filename,
129 struct stat64 __user *statbuf, int flag) 129 struct stat64 __user *statbuf, int flag)
130{ 130{
131 struct kstat stat; 131 struct kstat stat;
132 int error = -EINVAL; 132 int error;
133 133
134 if ((flag & ~AT_SYMLINK_NOFOLLOW) != 0) 134 error = vfs_fstatat(dfd, filename, &stat, flag);
135 goto out; 135 if (error)
136 136 return error;
137 if (flag & AT_SYMLINK_NOFOLLOW) 137 return cp_stat64(statbuf, &stat);
138 error = vfs_lstat_fd(dfd, filename, &stat);
139 else
140 error = vfs_stat_fd(dfd, filename, &stat);
141
142 if (!error)
143 error = cp_stat64(statbuf, &stat);
144
145out:
146 return error;
147} 138}
148 139
149/* 140/*
diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
index 5623c50d67b2..c45f415ce315 100644
--- a/arch/x86/include/asm/desc.h
+++ b/arch/x86/include/asm/desc.h
@@ -37,7 +37,7 @@ extern gate_desc idt_table[];
37struct gdt_page { 37struct gdt_page {
38 struct desc_struct gdt[GDT_ENTRIES]; 38 struct desc_struct gdt[GDT_ENTRIES];
39} __attribute__((aligned(PAGE_SIZE))); 39} __attribute__((aligned(PAGE_SIZE)));
40DECLARE_PER_CPU(struct gdt_page, gdt_page); 40DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
41 41
42static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu) 42static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
43{ 43{
diff --git a/arch/x86/include/asm/hardirq.h b/arch/x86/include/asm/hardirq.h
index 039db6aa8e02..37555e52f980 100644
--- a/arch/x86/include/asm/hardirq.h
+++ b/arch/x86/include/asm/hardirq.h
@@ -26,7 +26,7 @@ typedef struct {
26#endif 26#endif
27} ____cacheline_aligned irq_cpustat_t; 27} ____cacheline_aligned irq_cpustat_t;
28 28
29DECLARE_PER_CPU(irq_cpustat_t, irq_stat); 29DECLARE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
30 30
31/* We can have at most NR_VECTORS irqs routed to a cpu at a time */ 31/* We can have at most NR_VECTORS irqs routed to a cpu at a time */
32#define MAX_HARDIRQS_PER_CPU NR_VECTORS 32#define MAX_HARDIRQS_PER_CPU NR_VECTORS
diff --git a/arch/x86/include/asm/lguest_hcall.h b/arch/x86/include/asm/lguest_hcall.h
index 0f4ee7148afe..faae1996487b 100644
--- a/arch/x86/include/asm/lguest_hcall.h
+++ b/arch/x86/include/asm/lguest_hcall.h
@@ -5,7 +5,6 @@
5#define LHCALL_FLUSH_ASYNC 0 5#define LHCALL_FLUSH_ASYNC 0
6#define LHCALL_LGUEST_INIT 1 6#define LHCALL_LGUEST_INIT 1
7#define LHCALL_SHUTDOWN 2 7#define LHCALL_SHUTDOWN 2
8#define LHCALL_LOAD_GDT 3
9#define LHCALL_NEW_PGTABLE 4 8#define LHCALL_NEW_PGTABLE 4
10#define LHCALL_FLUSH_TLB 5 9#define LHCALL_FLUSH_TLB 5
11#define LHCALL_LOAD_IDT_ENTRY 6 10#define LHCALL_LOAD_IDT_ENTRY 6
@@ -17,6 +16,7 @@
17#define LHCALL_SET_PMD 15 16#define LHCALL_SET_PMD 15
18#define LHCALL_LOAD_TLS 16 17#define LHCALL_LOAD_TLS 16
19#define LHCALL_NOTIFY 17 18#define LHCALL_NOTIFY 17
19#define LHCALL_LOAD_GDT_ENTRY 18
20 20
21#define LGUEST_TRAP_ENTRY 0x1F 21#define LGUEST_TRAP_ENTRY 0x1F
22 22
diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h
index 563933e06a35..4f8c199584e7 100644
--- a/arch/x86/include/asm/mce.h
+++ b/arch/x86/include/asm/mce.h
@@ -137,6 +137,7 @@ DECLARE_PER_CPU(mce_banks_t, mce_poll_banks);
137enum mcp_flags { 137enum mcp_flags {
138 MCP_TIMESTAMP = (1 << 0), /* log time stamp */ 138 MCP_TIMESTAMP = (1 << 0), /* log time stamp */
139 MCP_UC = (1 << 1), /* log uncorrected errors */ 139 MCP_UC = (1 << 1), /* log uncorrected errors */
140 MCP_DONTLOG = (1 << 2), /* only clear, don't log */
140}; 141};
141extern void machine_check_poll(enum mcp_flags flags, mce_banks_t *b); 142extern void machine_check_poll(enum mcp_flags flags, mce_banks_t *b);
142 143
diff --git a/arch/x86/include/asm/pat.h b/arch/x86/include/asm/pat.h
index 2cd07b9422f4..7af14e512f97 100644
--- a/arch/x86/include/asm/pat.h
+++ b/arch/x86/include/asm/pat.h
@@ -18,9 +18,5 @@ extern int free_memtype(u64 start, u64 end);
18 18
19extern int kernel_map_sync_memtype(u64 base, unsigned long size, 19extern int kernel_map_sync_memtype(u64 base, unsigned long size,
20 unsigned long flag); 20 unsigned long flag);
21extern void map_devmem(unsigned long pfn, unsigned long size,
22 struct pgprot vma_prot);
23extern void unmap_devmem(unsigned long pfn, unsigned long size,
24 struct pgprot vma_prot);
25 21
26#endif /* _ASM_X86_PAT_H */ 22#endif /* _ASM_X86_PAT_H */
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index fcf4d92e7e04..c2cceae709c8 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -138,7 +138,7 @@ extern struct tss_struct doublefault_tss;
138extern __u32 cleared_cpu_caps[NCAPINTS]; 138extern __u32 cleared_cpu_caps[NCAPINTS];
139 139
140#ifdef CONFIG_SMP 140#ifdef CONFIG_SMP
141DECLARE_PER_CPU(struct cpuinfo_x86, cpu_info); 141DECLARE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info);
142#define cpu_data(cpu) per_cpu(cpu_info, cpu) 142#define cpu_data(cpu) per_cpu(cpu_info, cpu)
143#define current_cpu_data __get_cpu_var(cpu_info) 143#define current_cpu_data __get_cpu_var(cpu_info)
144#else 144#else
@@ -270,7 +270,7 @@ struct tss_struct {
270 270
271} ____cacheline_aligned; 271} ____cacheline_aligned;
272 272
273DECLARE_PER_CPU(struct tss_struct, init_tss); 273DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
274 274
275/* 275/*
276 * Save the original ist values for checking stack pointers during debugging 276 * Save the original ist values for checking stack pointers during debugging
@@ -393,7 +393,7 @@ union irq_stack_union {
393 }; 393 };
394}; 394};
395 395
396DECLARE_PER_CPU(union irq_stack_union, irq_stack_union); 396DECLARE_PER_CPU_FIRST(union irq_stack_union, irq_stack_union);
397DECLARE_INIT_PER_CPU(irq_stack_union); 397DECLARE_INIT_PER_CPU(irq_stack_union);
398 398
399DECLARE_PER_CPU(char *, irq_stack_ptr); 399DECLARE_PER_CPU(char *, irq_stack_ptr);
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
index d3539f998f88..16a5c84b0329 100644
--- a/arch/x86/include/asm/tlbflush.h
+++ b/arch/x86/include/asm/tlbflush.h
@@ -152,7 +152,7 @@ struct tlb_state {
152 struct mm_struct *active_mm; 152 struct mm_struct *active_mm;
153 int state; 153 int state;
154}; 154};
155DECLARE_PER_CPU(struct tlb_state, cpu_tlbstate); 155DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate);
156 156
157static inline void reset_lazy_tlbstate(void) 157static inline void reset_lazy_tlbstate(void)
158{ 158{
diff --git a/arch/x86/include/asm/topology.h b/arch/x86/include/asm/topology.h
index 892b119dba6f..f44b49abca49 100644
--- a/arch/x86/include/asm/topology.h
+++ b/arch/x86/include/asm/topology.h
@@ -200,7 +200,7 @@ static inline void arch_fix_phys_package_id(int num, u32 slot)
200} 200}
201 201
202struct pci_bus; 202struct pci_bus;
203void set_pci_bus_resources_arch_default(struct pci_bus *b); 203void x86_pci_root_bus_res_quirks(struct pci_bus *b);
204 204
205#ifdef CONFIG_SMP 205#ifdef CONFIG_SMP
206#define mc_capable() (cpumask_weight(cpu_core_mask(0)) != nr_cpu_ids) 206#define mc_capable() (cpumask_weight(cpu_core_mask(0)) != nr_cpu_ids)
diff --git a/arch/x86/kernel/amd_iommu_init.c b/arch/x86/kernel/amd_iommu_init.c
index 42c33cebf00f..8c0be0902dac 100644
--- a/arch/x86/kernel/amd_iommu_init.c
+++ b/arch/x86/kernel/amd_iommu_init.c
@@ -49,10 +49,10 @@
49#define IVHD_DEV_EXT_SELECT 0x46 49#define IVHD_DEV_EXT_SELECT 0x46
50#define IVHD_DEV_EXT_SELECT_RANGE 0x47 50#define IVHD_DEV_EXT_SELECT_RANGE 0x47
51 51
52#define IVHD_FLAG_HT_TUN_EN 0x00 52#define IVHD_FLAG_HT_TUN_EN_MASK 0x01
53#define IVHD_FLAG_PASSPW_EN 0x01 53#define IVHD_FLAG_PASSPW_EN_MASK 0x02
54#define IVHD_FLAG_RESPASSPW_EN 0x02 54#define IVHD_FLAG_RESPASSPW_EN_MASK 0x04
55#define IVHD_FLAG_ISOC_EN 0x03 55#define IVHD_FLAG_ISOC_EN_MASK 0x08
56 56
57#define IVMD_FLAG_EXCL_RANGE 0x08 57#define IVMD_FLAG_EXCL_RANGE 0x08
58#define IVMD_FLAG_UNITY_MAP 0x01 58#define IVMD_FLAG_UNITY_MAP 0x01
@@ -569,19 +569,19 @@ static void __init init_iommu_from_acpi(struct amd_iommu *iommu,
569 * First set the recommended feature enable bits from ACPI 569 * First set the recommended feature enable bits from ACPI
570 * into the IOMMU control registers 570 * into the IOMMU control registers
571 */ 571 */
572 h->flags & IVHD_FLAG_HT_TUN_EN ? 572 h->flags & IVHD_FLAG_HT_TUN_EN_MASK ?
573 iommu_feature_enable(iommu, CONTROL_HT_TUN_EN) : 573 iommu_feature_enable(iommu, CONTROL_HT_TUN_EN) :
574 iommu_feature_disable(iommu, CONTROL_HT_TUN_EN); 574 iommu_feature_disable(iommu, CONTROL_HT_TUN_EN);
575 575
576 h->flags & IVHD_FLAG_PASSPW_EN ? 576 h->flags & IVHD_FLAG_PASSPW_EN_MASK ?
577 iommu_feature_enable(iommu, CONTROL_PASSPW_EN) : 577 iommu_feature_enable(iommu, CONTROL_PASSPW_EN) :
578 iommu_feature_disable(iommu, CONTROL_PASSPW_EN); 578 iommu_feature_disable(iommu, CONTROL_PASSPW_EN);
579 579
580 h->flags & IVHD_FLAG_RESPASSPW_EN ? 580 h->flags & IVHD_FLAG_RESPASSPW_EN_MASK ?
581 iommu_feature_enable(iommu, CONTROL_RESPASSPW_EN) : 581 iommu_feature_enable(iommu, CONTROL_RESPASSPW_EN) :
582 iommu_feature_disable(iommu, CONTROL_RESPASSPW_EN); 582 iommu_feature_disable(iommu, CONTROL_RESPASSPW_EN);
583 583
584 h->flags & IVHD_FLAG_ISOC_EN ? 584 h->flags & IVHD_FLAG_ISOC_EN_MASK ?
585 iommu_feature_enable(iommu, CONTROL_ISOC_EN) : 585 iommu_feature_enable(iommu, CONTROL_ISOC_EN) :
586 iommu_feature_disable(iommu, CONTROL_ISOC_EN); 586 iommu_feature_disable(iommu, CONTROL_ISOC_EN);
587 587
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index a80335ba12cc..2afe145d277f 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -3627,12 +3627,14 @@ int arch_setup_hpet_msi(unsigned int irq)
3627{ 3627{
3628 int ret; 3628 int ret;
3629 struct msi_msg msg; 3629 struct msi_msg msg;
3630 struct irq_desc *desc = irq_to_desc(irq);
3630 3631
3631 ret = msi_compose_msg(NULL, irq, &msg); 3632 ret = msi_compose_msg(NULL, irq, &msg);
3632 if (ret < 0) 3633 if (ret < 0)
3633 return ret; 3634 return ret;
3634 3635
3635 hpet_msi_write(irq, &msg); 3636 hpet_msi_write(irq, &msg);
3637 desc->status |= IRQ_MOVE_PCNTXT;
3636 set_irq_chip_and_handler_name(irq, &hpet_msi_type, handle_edge_irq, 3638 set_irq_chip_and_handler_name(irq, &hpet_msi_type, handle_edge_irq,
3637 "edge"); 3639 "edge");
3638 3640
diff --git a/arch/x86/kernel/apic/nmi.c b/arch/x86/kernel/apic/nmi.c
index d6bd62407152..ce4fbfa315a1 100644
--- a/arch/x86/kernel/apic/nmi.c
+++ b/arch/x86/kernel/apic/nmi.c
@@ -138,7 +138,7 @@ int __init check_nmi_watchdog(void)
138 if (!prev_nmi_count) 138 if (!prev_nmi_count)
139 goto error; 139 goto error;
140 140
141 alloc_cpumask_var(&backtrace_mask, GFP_KERNEL); 141 alloc_cpumask_var(&backtrace_mask, GFP_KERNEL|__GFP_ZERO);
142 printk(KERN_INFO "Testing NMI watchdog ... "); 142 printk(KERN_INFO "Testing NMI watchdog ... ");
143 143
144#ifdef CONFIG_SMP 144#ifdef CONFIG_SMP
@@ -414,7 +414,8 @@ nmi_watchdog_tick(struct pt_regs *regs, unsigned reason)
414 touched = 1; 414 touched = 1;
415 } 415 }
416 416
417 if (cpumask_test_cpu(cpu, backtrace_mask)) { 417 /* We can be called before check_nmi_watchdog, hence NULL check. */
418 if (backtrace_mask != NULL && cpumask_test_cpu(cpu, backtrace_mask)) {
418 static DEFINE_SPINLOCK(lock); /* Serialise the printks */ 419 static DEFINE_SPINLOCK(lock); /* Serialise the printks */
419 420
420 spin_lock(&lock); 421 spin_lock(&lock);
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
index 9d9e2281a829..780a733a5e7a 100644
--- a/arch/x86/kernel/apic/x2apic_uv_x.c
+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
@@ -19,6 +19,7 @@
19#include <linux/timer.h> 19#include <linux/timer.h>
20#include <linux/cpu.h> 20#include <linux/cpu.h>
21#include <linux/init.h> 21#include <linux/init.h>
22#include <linux/io.h>
22 23
23#include <asm/uv/uv_mmrs.h> 24#include <asm/uv/uv_mmrs.h>
24#include <asm/uv/uv_hub.h> 25#include <asm/uv/uv_hub.h>
@@ -34,6 +35,17 @@ DEFINE_PER_CPU(int, x2apic_extra_bits);
34 35
35static enum uv_system_type uv_system_type; 36static enum uv_system_type uv_system_type;
36 37
38static int early_get_nodeid(void)
39{
40 union uvh_node_id_u node_id;
41 unsigned long *mmr;
42
43 mmr = early_ioremap(UV_LOCAL_MMR_BASE | UVH_NODE_ID, sizeof(*mmr));
44 node_id.v = *mmr;
45 early_iounmap(mmr, sizeof(*mmr));
46 return node_id.s.node_id;
47}
48
37static int uv_acpi_madt_oem_check(char *oem_id, char *oem_table_id) 49static int uv_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
38{ 50{
39 if (!strcmp(oem_id, "SGI")) { 51 if (!strcmp(oem_id, "SGI")) {
@@ -42,6 +54,8 @@ static int uv_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
42 else if (!strcmp(oem_table_id, "UVX")) 54 else if (!strcmp(oem_table_id, "UVX"))
43 uv_system_type = UV_X2APIC; 55 uv_system_type = UV_X2APIC;
44 else if (!strcmp(oem_table_id, "UVH")) { 56 else if (!strcmp(oem_table_id, "UVH")) {
57 __get_cpu_var(x2apic_extra_bits) =
58 early_get_nodeid() << (UV_APIC_PNODE_SHIFT - 1);
45 uv_system_type = UV_NON_UNIQUE_APIC; 59 uv_system_type = UV_NON_UNIQUE_APIC;
46 return 1; 60 return 1;
47 } 61 }
@@ -641,6 +655,7 @@ void __init uv_system_init(void)
641 if (uv_node_to_blade[nid] >= 0) 655 if (uv_node_to_blade[nid] >= 0)
642 continue; 656 continue;
643 paddr = node_start_pfn(nid) << PAGE_SHIFT; 657 paddr = node_start_pfn(nid) << PAGE_SHIFT;
658 paddr = uv_soc_phys_ram_to_gpa(paddr);
644 pnode = (paddr >> m_val) & pnode_mask; 659 pnode = (paddr >> m_val) & pnode_mask;
645 blade = boot_pnode_to_blade(pnode); 660 blade = boot_pnode_to_blade(pnode);
646 uv_node_to_blade[nid] = blade; 661 uv_node_to_blade[nid] = blade;
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index c4f667896c28..c1caefc82e62 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -1203,6 +1203,8 @@ void __cpuinit cpu_init(void)
1203 load_TR_desc(); 1203 load_TR_desc();
1204 load_LDT(&init_mm.context); 1204 load_LDT(&init_mm.context);
1205 1205
1206 t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap);
1207
1206#ifdef CONFIG_DOUBLEFAULT 1208#ifdef CONFIG_DOUBLEFAULT
1207 /* Set up doublefault TSS pointer in the GDT */ 1209 /* Set up doublefault TSS pointer in the GDT */
1208 __set_tss_desc(cpu, GDT_ENTRY_DOUBLEFAULT_TSS, &doublefault_tss); 1210 __set_tss_desc(cpu, GDT_ENTRY_DOUBLEFAULT_TSS, &doublefault_tss);
diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
index ecdb682ab516..208ecf6643df 100644
--- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
+++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
@@ -65,14 +65,18 @@ enum {
65struct acpi_cpufreq_data { 65struct acpi_cpufreq_data {
66 struct acpi_processor_performance *acpi_data; 66 struct acpi_processor_performance *acpi_data;
67 struct cpufreq_frequency_table *freq_table; 67 struct cpufreq_frequency_table *freq_table;
68 unsigned int max_freq;
69 unsigned int resume; 68 unsigned int resume;
70 unsigned int cpu_feature; 69 unsigned int cpu_feature;
71 u64 saved_aperf, saved_mperf;
72}; 70};
73 71
74static DEFINE_PER_CPU(struct acpi_cpufreq_data *, drv_data); 72static DEFINE_PER_CPU(struct acpi_cpufreq_data *, drv_data);
75 73
74struct acpi_msr_data {
75 u64 saved_aperf, saved_mperf;
76};
77
78static DEFINE_PER_CPU(struct acpi_msr_data, msr_data);
79
76DEFINE_TRACE(power_mark); 80DEFINE_TRACE(power_mark);
77 81
78/* acpi_perf_data is a pointer to percpu data. */ 82/* acpi_perf_data is a pointer to percpu data. */
@@ -287,11 +291,11 @@ static unsigned int get_measured_perf(struct cpufreq_policy *policy,
287 return 0; 291 return 0;
288 292
289 cur.aperf.whole = readin.aperf.whole - 293 cur.aperf.whole = readin.aperf.whole -
290 per_cpu(drv_data, cpu)->saved_aperf; 294 per_cpu(msr_data, cpu).saved_aperf;
291 cur.mperf.whole = readin.mperf.whole - 295 cur.mperf.whole = readin.mperf.whole -
292 per_cpu(drv_data, cpu)->saved_mperf; 296 per_cpu(msr_data, cpu).saved_mperf;
293 per_cpu(drv_data, cpu)->saved_aperf = readin.aperf.whole; 297 per_cpu(msr_data, cpu).saved_aperf = readin.aperf.whole;
294 per_cpu(drv_data, cpu)->saved_mperf = readin.mperf.whole; 298 per_cpu(msr_data, cpu).saved_mperf = readin.mperf.whole;
295 299
296#ifdef __i386__ 300#ifdef __i386__
297 /* 301 /*
@@ -335,7 +339,7 @@ static unsigned int get_measured_perf(struct cpufreq_policy *policy,
335 339
336#endif 340#endif
337 341
338 retval = per_cpu(drv_data, policy->cpu)->max_freq * perf_percent / 100; 342 retval = (policy->cpuinfo.max_freq * perf_percent) / 100;
339 343
340 return retval; 344 return retval;
341} 345}
@@ -688,16 +692,11 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
688 /* Check for high latency (>20uS) from buggy BIOSes, like on T42 */ 692 /* Check for high latency (>20uS) from buggy BIOSes, like on T42 */
689 if (perf->control_register.space_id == ACPI_ADR_SPACE_FIXED_HARDWARE && 693 if (perf->control_register.space_id == ACPI_ADR_SPACE_FIXED_HARDWARE &&
690 policy->cpuinfo.transition_latency > 20 * 1000) { 694 policy->cpuinfo.transition_latency > 20 * 1000) {
691 static int print_once;
692 policy->cpuinfo.transition_latency = 20 * 1000; 695 policy->cpuinfo.transition_latency = 20 * 1000;
693 if (!print_once) { 696 printk_once(KERN_INFO "Capping off P-state tranision"
694 print_once = 1; 697 " latency at 20 uS\n");
695 printk(KERN_INFO "Capping off P-state tranision latency"
696 " at 20 uS\n");
697 }
698 } 698 }
699 699
700 data->max_freq = perf->states[0].core_frequency * 1000;
701 /* table init */ 700 /* table init */
702 for (i = 0; i < perf->state_count; i++) { 701 for (i = 0; i < perf->state_count; i++) {
703 if (i > 0 && perf->states[i].core_frequency >= 702 if (i > 0 && perf->states[i].core_frequency >=
@@ -716,6 +715,9 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
716 if (result) 715 if (result)
717 goto err_freqfree; 716 goto err_freqfree;
718 717
718 if (perf->states[0].core_frequency * 1000 != policy->cpuinfo.max_freq)
719 printk(KERN_WARNING FW_WARN "P-state 0 is not max freq\n");
720
719 switch (perf->control_register.space_id) { 721 switch (perf->control_register.space_id) {
720 case ACPI_ADR_SPACE_SYSTEM_IO: 722 case ACPI_ADR_SPACE_SYSTEM_IO:
721 /* Current speed is unknown and not detectable by IO port */ 723 /* Current speed is unknown and not detectable by IO port */
diff --git a/arch/x86/kernel/cpu/mcheck/mce_64.c b/arch/x86/kernel/cpu/mcheck/mce_64.c
index 863f89568b1a..6fb0b359d2a5 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_64.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_64.c
@@ -239,9 +239,10 @@ void machine_check_poll(enum mcp_flags flags, mce_banks_t *b)
239 * Don't get the IP here because it's unlikely to 239 * Don't get the IP here because it's unlikely to
240 * have anything to do with the actual error location. 240 * have anything to do with the actual error location.
241 */ 241 */
242 242 if (!(flags & MCP_DONTLOG)) {
243 mce_log(&m); 243 mce_log(&m);
244 add_taint(TAINT_MACHINE_CHECK); 244 add_taint(TAINT_MACHINE_CHECK);
245 }
245 246
246 /* 247 /*
247 * Clear state for this bank. 248 * Clear state for this bank.
@@ -452,13 +453,14 @@ void mce_log_therm_throt_event(__u64 status)
452 */ 453 */
453 454
454static int check_interval = 5 * 60; /* 5 minutes */ 455static int check_interval = 5 * 60; /* 5 minutes */
455static int next_interval; /* in jiffies */ 456static DEFINE_PER_CPU(int, next_interval); /* in jiffies */
456static void mcheck_timer(unsigned long); 457static void mcheck_timer(unsigned long);
457static DEFINE_PER_CPU(struct timer_list, mce_timer); 458static DEFINE_PER_CPU(struct timer_list, mce_timer);
458 459
459static void mcheck_timer(unsigned long data) 460static void mcheck_timer(unsigned long data)
460{ 461{
461 struct timer_list *t = &per_cpu(mce_timer, data); 462 struct timer_list *t = &per_cpu(mce_timer, data);
463 int *n;
462 464
463 WARN_ON(smp_processor_id() != data); 465 WARN_ON(smp_processor_id() != data);
464 466
@@ -470,14 +472,14 @@ static void mcheck_timer(unsigned long data)
470 * Alert userspace if needed. If we logged an MCE, reduce the 472 * Alert userspace if needed. If we logged an MCE, reduce the
471 * polling interval, otherwise increase the polling interval. 473 * polling interval, otherwise increase the polling interval.
472 */ 474 */
475 n = &__get_cpu_var(next_interval);
473 if (mce_notify_user()) { 476 if (mce_notify_user()) {
474 next_interval = max(next_interval/2, HZ/100); 477 *n = max(*n/2, HZ/100);
475 } else { 478 } else {
476 next_interval = min(next_interval * 2, 479 *n = min(*n*2, (int)round_jiffies_relative(check_interval*HZ));
477 (int)round_jiffies_relative(check_interval*HZ));
478 } 480 }
479 481
480 t->expires = jiffies + next_interval; 482 t->expires = jiffies + *n;
481 add_timer(t); 483 add_timer(t);
482} 484}
483 485
@@ -584,7 +586,7 @@ static void mce_init(void *dummy)
584 * Log the machine checks left over from the previous reset. 586 * Log the machine checks left over from the previous reset.
585 */ 587 */
586 bitmap_fill(all_banks, MAX_NR_BANKS); 588 bitmap_fill(all_banks, MAX_NR_BANKS);
587 machine_check_poll(MCP_UC, &all_banks); 589 machine_check_poll(MCP_UC|(!mce_bootlog ? MCP_DONTLOG : 0), &all_banks);
588 590
589 set_in_cr4(X86_CR4_MCE); 591 set_in_cr4(X86_CR4_MCE);
590 592
@@ -632,14 +634,13 @@ static void mce_cpu_features(struct cpuinfo_x86 *c)
632static void mce_init_timer(void) 634static void mce_init_timer(void)
633{ 635{
634 struct timer_list *t = &__get_cpu_var(mce_timer); 636 struct timer_list *t = &__get_cpu_var(mce_timer);
637 int *n = &__get_cpu_var(next_interval);
635 638
636 /* data race harmless because everyone sets to the same value */ 639 *n = check_interval * HZ;
637 if (!next_interval) 640 if (!*n)
638 next_interval = check_interval * HZ;
639 if (!next_interval)
640 return; 641 return;
641 setup_timer(t, mcheck_timer, smp_processor_id()); 642 setup_timer(t, mcheck_timer, smp_processor_id());
642 t->expires = round_jiffies(jiffies + next_interval); 643 t->expires = round_jiffies(jiffies + *n);
643 add_timer(t); 644 add_timer(t);
644} 645}
645 646
@@ -907,7 +908,6 @@ static void mce_cpu_restart(void *data)
907/* Reinit MCEs after user configuration changes */ 908/* Reinit MCEs after user configuration changes */
908static void mce_restart(void) 909static void mce_restart(void)
909{ 910{
910 next_interval = check_interval * HZ;
911 on_each_cpu(mce_cpu_restart, NULL, 1); 911 on_each_cpu(mce_cpu_restart, NULL, 1);
912} 912}
913 913
@@ -1110,7 +1110,8 @@ static int __cpuinit mce_cpu_callback(struct notifier_block *nfb,
1110 break; 1110 break;
1111 case CPU_DOWN_FAILED: 1111 case CPU_DOWN_FAILED:
1112 case CPU_DOWN_FAILED_FROZEN: 1112 case CPU_DOWN_FAILED_FROZEN:
1113 t->expires = round_jiffies(jiffies + next_interval); 1113 t->expires = round_jiffies(jiffies +
1114 __get_cpu_var(next_interval));
1114 add_timer_on(t, cpu); 1115 add_timer_on(t, cpu);
1115 smp_call_function_single(cpu, mce_reenable_cpu, &action, 1); 1116 smp_call_function_single(cpu, mce_reenable_cpu, &action, 1);
1116 break; 1117 break;
diff --git a/arch/x86/kernel/cpu/mcheck/mce_intel_64.c b/arch/x86/kernel/cpu/mcheck/mce_intel_64.c
index d6b72df89d69..cef3ee30744b 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_intel_64.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_intel_64.c
@@ -151,10 +151,11 @@ static void print_update(char *type, int *hdr, int num)
151static void cmci_discover(int banks, int boot) 151static void cmci_discover(int banks, int boot)
152{ 152{
153 unsigned long *owned = (void *)&__get_cpu_var(mce_banks_owned); 153 unsigned long *owned = (void *)&__get_cpu_var(mce_banks_owned);
154 unsigned long flags;
154 int hdr = 0; 155 int hdr = 0;
155 int i; 156 int i;
156 157
157 spin_lock(&cmci_discover_lock); 158 spin_lock_irqsave(&cmci_discover_lock, flags);
158 for (i = 0; i < banks; i++) { 159 for (i = 0; i < banks; i++) {
159 u64 val; 160 u64 val;
160 161
@@ -184,7 +185,7 @@ static void cmci_discover(int banks, int boot)
184 WARN_ON(!test_bit(i, __get_cpu_var(mce_poll_banks))); 185 WARN_ON(!test_bit(i, __get_cpu_var(mce_poll_banks)));
185 } 186 }
186 } 187 }
187 spin_unlock(&cmci_discover_lock); 188 spin_unlock_irqrestore(&cmci_discover_lock, flags);
188 if (hdr) 189 if (hdr)
189 printk(KERN_CONT "\n"); 190 printk(KERN_CONT "\n");
190} 191}
@@ -211,13 +212,14 @@ void cmci_recheck(void)
211 */ 212 */
212void cmci_clear(void) 213void cmci_clear(void)
213{ 214{
215 unsigned long flags;
214 int i; 216 int i;
215 int banks; 217 int banks;
216 u64 val; 218 u64 val;
217 219
218 if (!cmci_supported(&banks)) 220 if (!cmci_supported(&banks))
219 return; 221 return;
220 spin_lock(&cmci_discover_lock); 222 spin_lock_irqsave(&cmci_discover_lock, flags);
221 for (i = 0; i < banks; i++) { 223 for (i = 0; i < banks; i++) {
222 if (!test_bit(i, __get_cpu_var(mce_banks_owned))) 224 if (!test_bit(i, __get_cpu_var(mce_banks_owned)))
223 continue; 225 continue;
@@ -227,7 +229,7 @@ void cmci_clear(void)
227 wrmsrl(MSR_IA32_MC0_CTL2 + i, val); 229 wrmsrl(MSR_IA32_MC0_CTL2 + i, val);
228 __clear_bit(i, __get_cpu_var(mce_banks_owned)); 230 __clear_bit(i, __get_cpu_var(mce_banks_owned));
229 } 231 }
230 spin_unlock(&cmci_discover_lock); 232 spin_unlock_irqrestore(&cmci_discover_lock, flags);
231} 233}
232 234
233/* 235/*
diff --git a/arch/x86/kernel/cpu/proc.c b/arch/x86/kernel/cpu/proc.c
index f93047fed791..d5e30397246b 100644
--- a/arch/x86/kernel/cpu/proc.c
+++ b/arch/x86/kernel/cpu/proc.c
@@ -14,7 +14,7 @@ static void show_cpuinfo_core(struct seq_file *m, struct cpuinfo_x86 *c,
14 if (c->x86_max_cores * smp_num_siblings > 1) { 14 if (c->x86_max_cores * smp_num_siblings > 1) {
15 seq_printf(m, "physical id\t: %d\n", c->phys_proc_id); 15 seq_printf(m, "physical id\t: %d\n", c->phys_proc_id);
16 seq_printf(m, "siblings\t: %d\n", 16 seq_printf(m, "siblings\t: %d\n",
17 cpumask_weight(cpu_sibling_mask(cpu))); 17 cpumask_weight(cpu_core_mask(cpu)));
18 seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id); 18 seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id);
19 seq_printf(m, "cpu cores\t: %d\n", c->booted_cores); 19 seq_printf(m, "cpu cores\t: %d\n", c->booted_cores);
20 seq_printf(m, "apicid\t\t: %d\n", c->apicid); 20 seq_printf(m, "apicid\t\t: %d\n", c->apicid);
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
index ef2c3563357d..006281302925 100644
--- a/arch/x86/kernel/e820.c
+++ b/arch/x86/kernel/e820.c
@@ -1074,12 +1074,13 @@ u64 __init early_reserve_e820(u64 startt, u64 sizet, u64 align)
1074 u64 addr; 1074 u64 addr;
1075 u64 start; 1075 u64 start;
1076 1076
1077 start = startt; 1077 for (start = startt; ; start += size) {
1078 while (size < sizet && (start + 1))
1079 start = find_e820_area_size(start, &size, align); 1078 start = find_e820_area_size(start, &size, align);
1080 1079 if (!(start + 1))
1081 if (size < sizet) 1080 return 0;
1082 return 0; 1081 if (size >= sizet)
1082 break;
1083 }
1083 1084
1084#ifdef CONFIG_X86_32 1085#ifdef CONFIG_X86_32
1085 if (start >= MAXMEM) 1086 if (start >= MAXMEM)
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index a331ec38af9e..38946c6e8433 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -1410,7 +1410,10 @@ ENTRY(paranoid_exit)
1410paranoid_swapgs: 1410paranoid_swapgs:
1411 TRACE_IRQS_IRETQ 0 1411 TRACE_IRQS_IRETQ 0
1412 SWAPGS_UNSAFE_STACK 1412 SWAPGS_UNSAFE_STACK
1413 RESTORE_ALL 8
1414 jmp irq_return
1413paranoid_restore: 1415paranoid_restore:
1416 TRACE_IRQS_IRETQ 0
1414 RESTORE_ALL 8 1417 RESTORE_ALL 8
1415 jmp irq_return 1418 jmp irq_return
1416paranoid_userspace: 1419paranoid_userspace:
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
index 648b3a2a3a44..81408b93f887 100644
--- a/arch/x86/kernel/hpet.c
+++ b/arch/x86/kernel/hpet.c
@@ -236,6 +236,10 @@ static void hpet_stop_counter(void)
236 unsigned long cfg = hpet_readl(HPET_CFG); 236 unsigned long cfg = hpet_readl(HPET_CFG);
237 cfg &= ~HPET_CFG_ENABLE; 237 cfg &= ~HPET_CFG_ENABLE;
238 hpet_writel(cfg, HPET_CFG); 238 hpet_writel(cfg, HPET_CFG);
239}
240
241static void hpet_reset_counter(void)
242{
239 hpet_writel(0, HPET_COUNTER); 243 hpet_writel(0, HPET_COUNTER);
240 hpet_writel(0, HPET_COUNTER + 4); 244 hpet_writel(0, HPET_COUNTER + 4);
241} 245}
@@ -250,6 +254,7 @@ static void hpet_start_counter(void)
250static void hpet_restart_counter(void) 254static void hpet_restart_counter(void)
251{ 255{
252 hpet_stop_counter(); 256 hpet_stop_counter();
257 hpet_reset_counter();
253 hpet_start_counter(); 258 hpet_start_counter();
254} 259}
255 260
@@ -309,7 +314,7 @@ static int hpet_setup_msi_irq(unsigned int irq);
309static void hpet_set_mode(enum clock_event_mode mode, 314static void hpet_set_mode(enum clock_event_mode mode,
310 struct clock_event_device *evt, int timer) 315 struct clock_event_device *evt, int timer)
311{ 316{
312 unsigned long cfg; 317 unsigned long cfg, cmp, now;
313 uint64_t delta; 318 uint64_t delta;
314 319
315 switch (mode) { 320 switch (mode) {
@@ -317,12 +322,23 @@ static void hpet_set_mode(enum clock_event_mode mode,
317 hpet_stop_counter(); 322 hpet_stop_counter();
318 delta = ((uint64_t)(NSEC_PER_SEC/HZ)) * evt->mult; 323 delta = ((uint64_t)(NSEC_PER_SEC/HZ)) * evt->mult;
319 delta >>= evt->shift; 324 delta >>= evt->shift;
325 now = hpet_readl(HPET_COUNTER);
326 cmp = now + (unsigned long) delta;
320 cfg = hpet_readl(HPET_Tn_CFG(timer)); 327 cfg = hpet_readl(HPET_Tn_CFG(timer));
321 /* Make sure we use edge triggered interrupts */ 328 /* Make sure we use edge triggered interrupts */
322 cfg &= ~HPET_TN_LEVEL; 329 cfg &= ~HPET_TN_LEVEL;
323 cfg |= HPET_TN_ENABLE | HPET_TN_PERIODIC | 330 cfg |= HPET_TN_ENABLE | HPET_TN_PERIODIC |
324 HPET_TN_SETVAL | HPET_TN_32BIT; 331 HPET_TN_SETVAL | HPET_TN_32BIT;
325 hpet_writel(cfg, HPET_Tn_CFG(timer)); 332 hpet_writel(cfg, HPET_Tn_CFG(timer));
333 hpet_writel(cmp, HPET_Tn_CMP(timer));
334 udelay(1);
335 /*
336 * HPET on AMD 81xx needs a second write (with HPET_TN_SETVAL
337 * cleared) to T0_CMP to set the period. The HPET_TN_SETVAL
338 * bit is automatically cleared after the first write.
339 * (See AMD-8111 HyperTransport I/O Hub Data Sheet,
340 * Publication # 24674)
341 */
326 hpet_writel((unsigned long) delta, HPET_Tn_CMP(timer)); 342 hpet_writel((unsigned long) delta, HPET_Tn_CMP(timer));
327 hpet_start_counter(); 343 hpet_start_counter();
328 hpet_print_config(); 344 hpet_print_config();
@@ -722,7 +738,7 @@ static int hpet_cpuhp_notify(struct notifier_block *n,
722/* 738/*
723 * Clock source related code 739 * Clock source related code
724 */ 740 */
725static cycle_t read_hpet(void) 741static cycle_t read_hpet(struct clocksource *cs)
726{ 742{
727 return (cycle_t)hpet_readl(HPET_COUNTER); 743 return (cycle_t)hpet_readl(HPET_COUNTER);
728} 744}
@@ -756,7 +772,7 @@ static int hpet_clocksource_register(void)
756 hpet_restart_counter(); 772 hpet_restart_counter();
757 773
758 /* Verify whether hpet counter works */ 774 /* Verify whether hpet counter works */
759 t1 = read_hpet(); 775 t1 = hpet_readl(HPET_COUNTER);
760 rdtscll(start); 776 rdtscll(start);
761 777
762 /* 778 /*
@@ -770,7 +786,7 @@ static int hpet_clocksource_register(void)
770 rdtscll(now); 786 rdtscll(now);
771 } while ((now - start) < 200000UL); 787 } while ((now - start) < 200000UL);
772 788
773 if (t1 == read_hpet()) { 789 if (t1 == hpet_readl(HPET_COUNTER)) {
774 printk(KERN_WARNING 790 printk(KERN_WARNING
775 "HPET counter not counting. HPET disabled\n"); 791 "HPET counter not counting. HPET disabled\n");
776 return -ENODEV; 792 return -ENODEV;
diff --git a/arch/x86/kernel/i8253.c b/arch/x86/kernel/i8253.c
index 3475440baa54..c2e0bb0890d4 100644
--- a/arch/x86/kernel/i8253.c
+++ b/arch/x86/kernel/i8253.c
@@ -129,7 +129,7 @@ void __init setup_pit_timer(void)
129 * to just read by itself. So use jiffies to emulate a free 129 * to just read by itself. So use jiffies to emulate a free
130 * running counter: 130 * running counter:
131 */ 131 */
132static cycle_t pit_read(void) 132static cycle_t pit_read(struct clocksource *cs)
133{ 133{
134 static int old_count; 134 static int old_count;
135 static u32 old_jifs; 135 static u32 old_jifs;
diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c
index 137f2e8132df..223af43f1526 100644
--- a/arch/x86/kernel/kvmclock.c
+++ b/arch/x86/kernel/kvmclock.c
@@ -77,6 +77,11 @@ static cycle_t kvm_clock_read(void)
77 return ret; 77 return ret;
78} 78}
79 79
80static cycle_t kvm_clock_get_cycles(struct clocksource *cs)
81{
82 return kvm_clock_read();
83}
84
80/* 85/*
81 * If we don't do that, there is the possibility that the guest 86 * If we don't do that, there is the possibility that the guest
82 * will calibrate under heavy load - thus, getting a lower lpj - 87 * will calibrate under heavy load - thus, getting a lower lpj -
@@ -107,7 +112,7 @@ static void kvm_get_preset_lpj(void)
107 112
108static struct clocksource kvm_clock = { 113static struct clocksource kvm_clock = {
109 .name = "kvm-clock", 114 .name = "kvm-clock",
110 .read = kvm_clock_read, 115 .read = kvm_clock_get_cycles,
111 .rating = 400, 116 .rating = 400,
112 .mask = CLOCKSOURCE_MASK(64), 117 .mask = CLOCKSOURCE_MASK(64),
113 .mult = 1 << KVM_SCALE, 118 .mult = 1 << KVM_SCALE,
diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
index e7368c1da01d..c1c429d00130 100644
--- a/arch/x86/kernel/machine_kexec_32.c
+++ b/arch/x86/kernel/machine_kexec_32.c
@@ -194,7 +194,7 @@ void machine_kexec(struct kimage *image)
194 unsigned int preserve_context); 194 unsigned int preserve_context);
195 195
196#ifdef CONFIG_KEXEC_JUMP 196#ifdef CONFIG_KEXEC_JUMP
197 if (kexec_image->preserve_context) 197 if (image->preserve_context)
198 save_processor_state(); 198 save_processor_state();
199#endif 199#endif
200 200
@@ -253,7 +253,7 @@ void machine_kexec(struct kimage *image)
253 image->preserve_context); 253 image->preserve_context);
254 254
255#ifdef CONFIG_KEXEC_JUMP 255#ifdef CONFIG_KEXEC_JUMP
256 if (kexec_image->preserve_context) 256 if (image->preserve_context)
257 restore_processor_state(); 257 restore_processor_state();
258#endif 258#endif
259 259
diff --git a/arch/x86/kernel/machine_kexec_64.c b/arch/x86/kernel/machine_kexec_64.c
index 89cea4d44679..84c3bf209e98 100644
--- a/arch/x86/kernel/machine_kexec_64.c
+++ b/arch/x86/kernel/machine_kexec_64.c
@@ -274,7 +274,7 @@ void machine_kexec(struct kimage *image)
274 int save_ftrace_enabled; 274 int save_ftrace_enabled;
275 275
276#ifdef CONFIG_KEXEC_JUMP 276#ifdef CONFIG_KEXEC_JUMP
277 if (kexec_image->preserve_context) 277 if (image->preserve_context)
278 save_processor_state(); 278 save_processor_state();
279#endif 279#endif
280 280
@@ -333,7 +333,7 @@ void machine_kexec(struct kimage *image)
333 image->preserve_context); 333 image->preserve_context);
334 334
335#ifdef CONFIG_KEXEC_JUMP 335#ifdef CONFIG_KEXEC_JUMP
336 if (kexec_image->preserve_context) 336 if (image->preserve_context)
337 restore_processor_state(); 337 restore_processor_state();
338#endif 338#endif
339 339
diff --git a/arch/x86/kernel/microcode_core.c b/arch/x86/kernel/microcode_core.c
index 2e0eb4140951..98c470c069d1 100644
--- a/arch/x86/kernel/microcode_core.c
+++ b/arch/x86/kernel/microcode_core.c
@@ -380,8 +380,6 @@ static int mc_sysdev_add(struct sys_device *sys_dev)
380 return err; 380 return err;
381 381
382 err = microcode_init_cpu(cpu); 382 err = microcode_init_cpu(cpu);
383 if (err)
384 sysfs_remove_group(&sys_dev->kobj, &mc_attr_group);
385 383
386 return err; 384 return err;
387} 385}
diff --git a/arch/x86/kernel/pci-swiotlb.c b/arch/x86/kernel/pci-swiotlb.c
index 34f12e9996ed..221a3853e268 100644
--- a/arch/x86/kernel/pci-swiotlb.c
+++ b/arch/x86/kernel/pci-swiotlb.c
@@ -50,7 +50,7 @@ static void *x86_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
50 return swiotlb_alloc_coherent(hwdev, size, dma_handle, flags); 50 return swiotlb_alloc_coherent(hwdev, size, dma_handle, flags);
51} 51}
52 52
53struct dma_map_ops swiotlb_dma_ops = { 53static struct dma_map_ops swiotlb_dma_ops = {
54 .mapping_error = swiotlb_dma_mapping_error, 54 .mapping_error = swiotlb_dma_mapping_error,
55 .alloc_coherent = x86_swiotlb_alloc_coherent, 55 .alloc_coherent = x86_swiotlb_alloc_coherent,
56 .free_coherent = swiotlb_free_coherent, 56 .free_coherent = swiotlb_free_coherent,
diff --git a/arch/x86/kernel/quirks.c b/arch/x86/kernel/quirks.c
index e95022e4f5d5..7563b31b4f03 100644
--- a/arch/x86/kernel/quirks.c
+++ b/arch/x86/kernel/quirks.c
@@ -261,8 +261,6 @@ static void old_ich_force_enable_hpet_user(struct pci_dev *dev)
261{ 261{
262 if (hpet_force_user) 262 if (hpet_force_user)
263 old_ich_force_enable_hpet(dev); 263 old_ich_force_enable_hpet(dev);
264 else
265 hpet_print_force_info();
266} 264}
267 265
268DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_1, 266DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_1,
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 7a567ebe6361..d57de05dc430 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -699,7 +699,7 @@ static struct clocksource clocksource_tsc;
699 * code, which is necessary to support wrapping clocksources like pm 699 * code, which is necessary to support wrapping clocksources like pm
700 * timer. 700 * timer.
701 */ 701 */
702static cycle_t read_tsc(void) 702static cycle_t read_tsc(struct clocksource *cs)
703{ 703{
704 cycle_t ret = (cycle_t)get_cycles(); 704 cycle_t ret = (cycle_t)get_cycles();
705 705
diff --git a/arch/x86/kernel/uv_time.c b/arch/x86/kernel/uv_time.c
index 2ffb6c53326e..583f11d5c480 100644
--- a/arch/x86/kernel/uv_time.c
+++ b/arch/x86/kernel/uv_time.c
@@ -29,7 +29,7 @@
29 29
30#define RTC_NAME "sgi_rtc" 30#define RTC_NAME "sgi_rtc"
31 31
32static cycle_t uv_read_rtc(void); 32static cycle_t uv_read_rtc(struct clocksource *cs);
33static int uv_rtc_next_event(unsigned long, struct clock_event_device *); 33static int uv_rtc_next_event(unsigned long, struct clock_event_device *);
34static void uv_rtc_timer_setup(enum clock_event_mode, 34static void uv_rtc_timer_setup(enum clock_event_mode,
35 struct clock_event_device *); 35 struct clock_event_device *);
@@ -123,7 +123,7 @@ static int uv_setup_intr(int cpu, u64 expires)
123 /* Initialize comparator value */ 123 /* Initialize comparator value */
124 uv_write_global_mmr64(pnode, UVH_INT_CMPB, expires); 124 uv_write_global_mmr64(pnode, UVH_INT_CMPB, expires);
125 125
126 return (expires < uv_read_rtc() && !uv_intr_pending(pnode)); 126 return (expires < uv_read_rtc(NULL) && !uv_intr_pending(pnode));
127} 127}
128 128
129/* 129/*
@@ -256,7 +256,7 @@ static int uv_rtc_unset_timer(int cpu)
256 256
257 spin_lock_irqsave(&head->lock, flags); 257 spin_lock_irqsave(&head->lock, flags);
258 258
259 if (head->next_cpu == bcpu && uv_read_rtc() >= *t) 259 if (head->next_cpu == bcpu && uv_read_rtc(NULL) >= *t)
260 rc = 1; 260 rc = 1;
261 261
262 *t = ULLONG_MAX; 262 *t = ULLONG_MAX;
@@ -278,7 +278,7 @@ static int uv_rtc_unset_timer(int cpu)
278/* 278/*
279 * Read the RTC. 279 * Read the RTC.
280 */ 280 */
281static cycle_t uv_read_rtc(void) 281static cycle_t uv_read_rtc(struct clocksource *cs)
282{ 282{
283 return (cycle_t)uv_read_local_mmr(UVH_RTC); 283 return (cycle_t)uv_read_local_mmr(UVH_RTC);
284} 284}
@@ -291,7 +291,7 @@ static int uv_rtc_next_event(unsigned long delta,
291{ 291{
292 int ced_cpu = cpumask_first(ced->cpumask); 292 int ced_cpu = cpumask_first(ced->cpumask);
293 293
294 return uv_rtc_set_timer(ced_cpu, delta + uv_read_rtc()); 294 return uv_rtc_set_timer(ced_cpu, delta + uv_read_rtc(NULL));
295} 295}
296 296
297/* 297/*
diff --git a/arch/x86/kernel/vmiclock_32.c b/arch/x86/kernel/vmiclock_32.c
index d303369a7bad..2b3eb82efeeb 100644
--- a/arch/x86/kernel/vmiclock_32.c
+++ b/arch/x86/kernel/vmiclock_32.c
@@ -283,7 +283,7 @@ void __devinit vmi_time_ap_init(void)
283/** vmi clocksource */ 283/** vmi clocksource */
284static struct clocksource clocksource_vmi; 284static struct clocksource clocksource_vmi;
285 285
286static cycle_t read_real_cycles(void) 286static cycle_t read_real_cycles(struct clocksource *cs)
287{ 287{
288 cycle_t ret = (cycle_t)vmi_timer_ops.get_cycle_counter(VMI_CYCLES_REAL); 288 cycle_t ret = (cycle_t)vmi_timer_ops.get_cycle_counter(VMI_CYCLES_REAL);
289 return max(ret, clocksource_vmi.cycle_last); 289 return max(ret, clocksource_vmi.cycle_last);
diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
index 0a5b04aa98f1..c5ee17e8c6d9 100644
--- a/arch/x86/kernel/xsave.c
+++ b/arch/x86/kernel/xsave.c
@@ -89,7 +89,7 @@ int save_i387_xstate(void __user *buf)
89 89
90 if (!used_math()) 90 if (!used_math())
91 return 0; 91 return 0;
92 clear_used_math(); /* trigger finit */ 92
93 if (task_thread_info(tsk)->status & TS_USEDFPU) { 93 if (task_thread_info(tsk)->status & TS_USEDFPU) {
94 /* 94 /*
95 * Start with clearing the user buffer. This will present a 95 * Start with clearing the user buffer. This will present a
@@ -114,6 +114,8 @@ int save_i387_xstate(void __user *buf)
114 return -1; 114 return -1;
115 } 115 }
116 116
117 clear_used_math(); /* trigger finit */
118
117 if (task_thread_info(tsk)->status & TS_XSAVE) { 119 if (task_thread_info(tsk)->status & TS_XSAVE) {
118 struct _fpstate __user *fx = buf; 120 struct _fpstate __user *fx = buf;
119 struct _xstate __user *x = buf; 121 struct _xstate __user *x = buf;
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 2a36f7f7c4c7..b6caf1329b1b 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -1248,7 +1248,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
1248 pgprintk("%s: adding gfn %lx role %x\n", __func__, gfn, role.word); 1248 pgprintk("%s: adding gfn %lx role %x\n", __func__, gfn, role.word);
1249 sp->gfn = gfn; 1249 sp->gfn = gfn;
1250 sp->role = role; 1250 sp->role = role;
1251 sp->global = role.cr4_pge; 1251 sp->global = 0;
1252 hlist_add_head(&sp->hash_link, bucket); 1252 hlist_add_head(&sp->hash_link, bucket);
1253 if (!direct) { 1253 if (!direct) {
1254 if (rmap_write_protect(vcpu->kvm, gfn)) 1254 if (rmap_write_protect(vcpu->kvm, gfn))
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 8ca100a9ecac..7c1ce5ac6131 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -2775,6 +2775,9 @@ out:
2775 2775
2776void kvm_arch_exit(void) 2776void kvm_arch_exit(void)
2777{ 2777{
2778 if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
2779 cpufreq_unregister_notifier(&kvmclock_cpufreq_notifier_block,
2780 CPUFREQ_TRANSITION_NOTIFIER);
2778 kvm_x86_ops = NULL; 2781 kvm_x86_ops = NULL;
2779 kvm_mmu_module_exit(); 2782 kvm_mmu_module_exit();
2780} 2783}
@@ -4159,6 +4162,11 @@ EXPORT_SYMBOL_GPL(kvm_put_guest_fpu);
4159 4162
4160void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu) 4163void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
4161{ 4164{
4165 if (vcpu->arch.time_page) {
4166 kvm_release_page_dirty(vcpu->arch.time_page);
4167 vcpu->arch.time_page = NULL;
4168 }
4169
4162 kvm_x86_ops->vcpu_free(vcpu); 4170 kvm_x86_ops->vcpu_free(vcpu);
4163} 4171}
4164 4172
diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
index e94a11e42f98..ca7ec44bafc3 100644
--- a/arch/x86/lguest/boot.c
+++ b/arch/x86/lguest/boot.c
@@ -273,15 +273,15 @@ static void lguest_load_idt(const struct desc_ptr *desc)
273 * controls the entire thing and the Guest asks it to make changes using the 273 * controls the entire thing and the Guest asks it to make changes using the
274 * LOAD_GDT hypercall. 274 * LOAD_GDT hypercall.
275 * 275 *
276 * This is the opposite of the IDT code where we have a LOAD_IDT_ENTRY 276 * This is the exactly like the IDT code.
277 * hypercall and use that repeatedly to load a new IDT. I don't think it
278 * really matters, but wouldn't it be nice if they were the same? Wouldn't
279 * it be even better if you were the one to send the patch to fix it?
280 */ 277 */
281static void lguest_load_gdt(const struct desc_ptr *desc) 278static void lguest_load_gdt(const struct desc_ptr *desc)
282{ 279{
283 BUG_ON((desc->size + 1) / 8 != GDT_ENTRIES); 280 unsigned int i;
284 kvm_hypercall2(LHCALL_LOAD_GDT, __pa(desc->address), GDT_ENTRIES); 281 struct desc_struct *gdt = (void *)desc->address;
282
283 for (i = 0; i < (desc->size+1)/8; i++)
284 kvm_hypercall3(LHCALL_LOAD_GDT_ENTRY, i, gdt[i].a, gdt[i].b);
285} 285}
286 286
287/* For a single GDT entry which changes, we do the lazy thing: alter our GDT, 287/* For a single GDT entry which changes, we do the lazy thing: alter our GDT,
@@ -291,7 +291,9 @@ static void lguest_write_gdt_entry(struct desc_struct *dt, int entrynum,
291 const void *desc, int type) 291 const void *desc, int type)
292{ 292{
293 native_write_gdt_entry(dt, entrynum, desc, type); 293 native_write_gdt_entry(dt, entrynum, desc, type);
294 kvm_hypercall2(LHCALL_LOAD_GDT, __pa(dt), GDT_ENTRIES); 294 /* Tell Host about this new entry. */
295 kvm_hypercall3(LHCALL_LOAD_GDT_ENTRY, entrynum,
296 dt[entrynum].a, dt[entrynum].b);
295} 297}
296 298
297/* OK, I lied. There are three "thread local storage" GDT entries which change 299/* OK, I lied. There are three "thread local storage" GDT entries which change
@@ -661,7 +663,7 @@ static unsigned long lguest_tsc_khz(void)
661 663
662/* If we can't use the TSC, the kernel falls back to our lower-priority 664/* If we can't use the TSC, the kernel falls back to our lower-priority
663 * "lguest_clock", where we read the time value given to us by the Host. */ 665 * "lguest_clock", where we read the time value given to us by the Host. */
664static cycle_t lguest_clock_read(void) 666static cycle_t lguest_clock_read(struct clocksource *cs)
665{ 667{
666 unsigned long sec, nsec; 668 unsigned long sec, nsec;
667 669
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index fd3da1dda1c9..ae4f7b5d7104 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -7,6 +7,7 @@
7#include <asm/page.h> 7#include <asm/page.h>
8#include <asm/page_types.h> 8#include <asm/page_types.h>
9#include <asm/sections.h> 9#include <asm/sections.h>
10#include <asm/setup.h>
10#include <asm/system.h> 11#include <asm/system.h>
11#include <asm/tlbflush.h> 12#include <asm/tlbflush.h>
12 13
@@ -304,8 +305,23 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
304#endif 305#endif
305 306
306#ifdef CONFIG_X86_64 307#ifdef CONFIG_X86_64
307 if (!after_bootmem) 308 if (!after_bootmem && !start) {
309 pud_t *pud;
310 pmd_t *pmd;
311
308 mmu_cr4_features = read_cr4(); 312 mmu_cr4_features = read_cr4();
313
314 /*
315 * _brk_end cannot change anymore, but it and _end may be
316 * located on different 2M pages. cleanup_highmap(), however,
317 * can only consider _end when it runs, so destroy any
318 * mappings beyond _brk_end here.
319 */
320 pud = pud_offset(pgd_offset_k(_brk_end), _brk_end);
321 pmd = pmd_offset(pud, _brk_end - 1);
322 while (++pmd <= pmd_offset(pud, (unsigned long)_end - 1))
323 pmd_clear(pmd);
324 }
309#endif 325#endif
310 __flush_tlb_all(); 326 __flush_tlb_all();
311 327
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
index 09daebfdb11c..8a450930834f 100644
--- a/arch/x86/mm/ioremap.c
+++ b/arch/x86/mm/ioremap.c
@@ -280,15 +280,16 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
280 return NULL; 280 return NULL;
281 area->phys_addr = phys_addr; 281 area->phys_addr = phys_addr;
282 vaddr = (unsigned long) area->addr; 282 vaddr = (unsigned long) area->addr;
283 if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot)) { 283
284 if (kernel_map_sync_memtype(phys_addr, size, prot_val)) {
284 free_memtype(phys_addr, phys_addr + size); 285 free_memtype(phys_addr, phys_addr + size);
285 free_vm_area(area); 286 free_vm_area(area);
286 return NULL; 287 return NULL;
287 } 288 }
288 289
289 if (ioremap_change_attr(vaddr, size, prot_val) < 0) { 290 if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot)) {
290 free_memtype(phys_addr, phys_addr + size); 291 free_memtype(phys_addr, phys_addr + size);
291 vunmap(area->addr); 292 free_vm_area(area);
292 return NULL; 293 return NULL;
293 } 294 }
294 295
@@ -374,7 +375,8 @@ static void __iomem *ioremap_default(resource_size_t phys_addr,
374 * - UC_MINUS for non-WB-able memory with no other conflicting mappings 375 * - UC_MINUS for non-WB-able memory with no other conflicting mappings
375 * - Inherit from confliting mappings otherwise 376 * - Inherit from confliting mappings otherwise
376 */ 377 */
377 err = reserve_memtype(phys_addr, phys_addr + size, -1, &flags); 378 err = reserve_memtype(phys_addr, phys_addr + size,
379 _PAGE_CACHE_WB, &flags);
378 if (err < 0) 380 if (err < 0)
379 return NULL; 381 return NULL;
380 382
diff --git a/arch/x86/mm/kmmio.c b/arch/x86/mm/kmmio.c
index 4f115e00486b..50dc802a1c46 100644
--- a/arch/x86/mm/kmmio.c
+++ b/arch/x86/mm/kmmio.c
@@ -87,7 +87,7 @@ static struct kmmio_probe *get_kmmio_probe(unsigned long addr)
87{ 87{
88 struct kmmio_probe *p; 88 struct kmmio_probe *p;
89 list_for_each_entry_rcu(p, &kmmio_probes, list) { 89 list_for_each_entry_rcu(p, &kmmio_probes, list) {
90 if (addr >= p->addr && addr <= (p->addr + p->len)) 90 if (addr >= p->addr && addr < (p->addr + p->len))
91 return p; 91 return p;
92 } 92 }
93 return NULL; 93 return NULL;
diff --git a/arch/x86/mm/numa_32.c b/arch/x86/mm/numa_32.c
index 3daefa04ace5..d2530062fe00 100644
--- a/arch/x86/mm/numa_32.c
+++ b/arch/x86/mm/numa_32.c
@@ -257,7 +257,7 @@ void resume_map_numa_kva(pgd_t *pgd_base)
257} 257}
258#endif 258#endif
259 259
260static unsigned long calculate_numa_remap_pages(void) 260static __init unsigned long calculate_numa_remap_pages(void)
261{ 261{
262 int nid; 262 int nid;
263 unsigned long size, reserve_pages = 0; 263 unsigned long size, reserve_pages = 0;
diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c
index d73aaa892371..2d05a12029dc 100644
--- a/arch/x86/mm/numa_64.c
+++ b/arch/x86/mm/numa_64.c
@@ -188,6 +188,9 @@ void __init setup_node_bootmem(int nodeid, unsigned long start,
188 const int pgdat_size = roundup(sizeof(pg_data_t), PAGE_SIZE); 188 const int pgdat_size = roundup(sizeof(pg_data_t), PAGE_SIZE);
189 int nid; 189 int nid;
190 190
191 if (!end)
192 return;
193
191 start = roundup(start, ZONE_ALIGN); 194 start = roundup(start, ZONE_ALIGN);
192 195
193 printk(KERN_INFO "Bootmem setup node %d %016lx-%016lx\n", nodeid, 196 printk(KERN_INFO "Bootmem setup node %d %016lx-%016lx\n", nodeid,
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index d71e1b636ce6..797f9f107cb6 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -945,71 +945,94 @@ int _set_memory_uc(unsigned long addr, int numpages)
945 945
946int set_memory_uc(unsigned long addr, int numpages) 946int set_memory_uc(unsigned long addr, int numpages)
947{ 947{
948 int ret;
949
948 /* 950 /*
949 * for now UC MINUS. see comments in ioremap_nocache() 951 * for now UC MINUS. see comments in ioremap_nocache()
950 */ 952 */
951 if (reserve_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE, 953 ret = reserve_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE,
952 _PAGE_CACHE_UC_MINUS, NULL)) 954 _PAGE_CACHE_UC_MINUS, NULL);
953 return -EINVAL; 955 if (ret)
956 goto out_err;
957
958 ret = _set_memory_uc(addr, numpages);
959 if (ret)
960 goto out_free;
954 961
955 return _set_memory_uc(addr, numpages); 962 return 0;
963
964out_free:
965 free_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE);
966out_err:
967 return ret;
956} 968}
957EXPORT_SYMBOL(set_memory_uc); 969EXPORT_SYMBOL(set_memory_uc);
958 970
959int set_memory_array_uc(unsigned long *addr, int addrinarray) 971int set_memory_array_uc(unsigned long *addr, int addrinarray)
960{ 972{
961 unsigned long start; 973 int i, j;
962 unsigned long end; 974 int ret;
963 int i; 975
964 /* 976 /*
965 * for now UC MINUS. see comments in ioremap_nocache() 977 * for now UC MINUS. see comments in ioremap_nocache()
966 */ 978 */
967 for (i = 0; i < addrinarray; i++) { 979 for (i = 0; i < addrinarray; i++) {
968 start = __pa(addr[i]); 980 ret = reserve_memtype(__pa(addr[i]), __pa(addr[i]) + PAGE_SIZE,
969 for (end = start + PAGE_SIZE; i < addrinarray - 1; end += PAGE_SIZE) { 981 _PAGE_CACHE_UC_MINUS, NULL);
970 if (end != __pa(addr[i + 1])) 982 if (ret)
971 break; 983 goto out_free;
972 i++;
973 }
974 if (reserve_memtype(start, end, _PAGE_CACHE_UC_MINUS, NULL))
975 goto out;
976 } 984 }
977 985
978 return change_page_attr_set(addr, addrinarray, 986 ret = change_page_attr_set(addr, addrinarray,
979 __pgprot(_PAGE_CACHE_UC_MINUS), 1); 987 __pgprot(_PAGE_CACHE_UC_MINUS), 1);
980out: 988 if (ret)
981 for (i = 0; i < addrinarray; i++) { 989 goto out_free;
982 unsigned long tmp = __pa(addr[i]); 990
983 991 return 0;
984 if (tmp == start) 992
985 break; 993out_free:
986 for (end = tmp + PAGE_SIZE; i < addrinarray - 1; end += PAGE_SIZE) { 994 for (j = 0; j < i; j++)
987 if (end != __pa(addr[i + 1])) 995 free_memtype(__pa(addr[j]), __pa(addr[j]) + PAGE_SIZE);
988 break; 996
989 i++; 997 return ret;
990 }
991 free_memtype(tmp, end);
992 }
993 return -EINVAL;
994} 998}
995EXPORT_SYMBOL(set_memory_array_uc); 999EXPORT_SYMBOL(set_memory_array_uc);
996 1000
997int _set_memory_wc(unsigned long addr, int numpages) 1001int _set_memory_wc(unsigned long addr, int numpages)
998{ 1002{
999 return change_page_attr_set(&addr, numpages, 1003 int ret;
1004 ret = change_page_attr_set(&addr, numpages,
1005 __pgprot(_PAGE_CACHE_UC_MINUS), 0);
1006
1007 if (!ret) {
1008 ret = change_page_attr_set(&addr, numpages,
1000 __pgprot(_PAGE_CACHE_WC), 0); 1009 __pgprot(_PAGE_CACHE_WC), 0);
1010 }
1011 return ret;
1001} 1012}
1002 1013
1003int set_memory_wc(unsigned long addr, int numpages) 1014int set_memory_wc(unsigned long addr, int numpages)
1004{ 1015{
1016 int ret;
1017
1005 if (!pat_enabled) 1018 if (!pat_enabled)
1006 return set_memory_uc(addr, numpages); 1019 return set_memory_uc(addr, numpages);
1007 1020
1008 if (reserve_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE, 1021 ret = reserve_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE,
1009 _PAGE_CACHE_WC, NULL)) 1022 _PAGE_CACHE_WC, NULL);
1010 return -EINVAL; 1023 if (ret)
1024 goto out_err;
1025
1026 ret = _set_memory_wc(addr, numpages);
1027 if (ret)
1028 goto out_free;
1029
1030 return 0;
1011 1031
1012 return _set_memory_wc(addr, numpages); 1032out_free:
1033 free_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE);
1034out_err:
1035 return ret;
1013} 1036}
1014EXPORT_SYMBOL(set_memory_wc); 1037EXPORT_SYMBOL(set_memory_wc);
1015 1038
@@ -1021,29 +1044,31 @@ int _set_memory_wb(unsigned long addr, int numpages)
1021 1044
1022int set_memory_wb(unsigned long addr, int numpages) 1045int set_memory_wb(unsigned long addr, int numpages)
1023{ 1046{
1024 free_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE); 1047 int ret;
1048
1049 ret = _set_memory_wb(addr, numpages);
1050 if (ret)
1051 return ret;
1025 1052
1026 return _set_memory_wb(addr, numpages); 1053 free_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE);
1054 return 0;
1027} 1055}
1028EXPORT_SYMBOL(set_memory_wb); 1056EXPORT_SYMBOL(set_memory_wb);
1029 1057
1030int set_memory_array_wb(unsigned long *addr, int addrinarray) 1058int set_memory_array_wb(unsigned long *addr, int addrinarray)
1031{ 1059{
1032 int i; 1060 int i;
1061 int ret;
1033 1062
1034 for (i = 0; i < addrinarray; i++) { 1063 ret = change_page_attr_clear(addr, addrinarray,
1035 unsigned long start = __pa(addr[i]);
1036 unsigned long end;
1037
1038 for (end = start + PAGE_SIZE; i < addrinarray - 1; end += PAGE_SIZE) {
1039 if (end != __pa(addr[i + 1]))
1040 break;
1041 i++;
1042 }
1043 free_memtype(start, end);
1044 }
1045 return change_page_attr_clear(addr, addrinarray,
1046 __pgprot(_PAGE_CACHE_MASK), 1); 1064 __pgprot(_PAGE_CACHE_MASK), 1);
1065 if (ret)
1066 return ret;
1067
1068 for (i = 0; i < addrinarray; i++)
1069 free_memtype(__pa(addr[i]), __pa(addr[i]) + PAGE_SIZE);
1070
1071 return 0;
1047} 1072}
1048EXPORT_SYMBOL(set_memory_array_wb); 1073EXPORT_SYMBOL(set_memory_array_wb);
1049 1074
@@ -1136,6 +1161,8 @@ int set_pages_array_wb(struct page **pages, int addrinarray)
1136 1161
1137 retval = cpa_clear_pages_array(pages, addrinarray, 1162 retval = cpa_clear_pages_array(pages, addrinarray,
1138 __pgprot(_PAGE_CACHE_MASK)); 1163 __pgprot(_PAGE_CACHE_MASK));
1164 if (retval)
1165 return retval;
1139 1166
1140 for (i = 0; i < addrinarray; i++) { 1167 for (i = 0; i < addrinarray; i++) {
1141 start = (unsigned long)page_address(pages[i]); 1168 start = (unsigned long)page_address(pages[i]);
@@ -1143,7 +1170,7 @@ int set_pages_array_wb(struct page **pages, int addrinarray)
1143 free_memtype(start, end); 1170 free_memtype(start, end);
1144 } 1171 }
1145 1172
1146 return retval; 1173 return 0;
1147} 1174}
1148EXPORT_SYMBOL(set_pages_array_wb); 1175EXPORT_SYMBOL(set_pages_array_wb);
1149 1176
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index c009a241d562..e6718bb28065 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -182,10 +182,10 @@ static unsigned long pat_x_mtrr_type(u64 start, u64 end, unsigned long req_type)
182 u8 mtrr_type; 182 u8 mtrr_type;
183 183
184 mtrr_type = mtrr_type_lookup(start, end); 184 mtrr_type = mtrr_type_lookup(start, end);
185 if (mtrr_type == MTRR_TYPE_UNCACHABLE) 185 if (mtrr_type != MTRR_TYPE_WRBACK)
186 return _PAGE_CACHE_UC; 186 return _PAGE_CACHE_UC_MINUS;
187 if (mtrr_type == MTRR_TYPE_WRCOMB) 187
188 return _PAGE_CACHE_WC; 188 return _PAGE_CACHE_WB;
189 } 189 }
190 190
191 return req_type; 191 return req_type;
@@ -352,23 +352,13 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
352 return 0; 352 return 0;
353 } 353 }
354 354
355 if (req_type == -1) { 355 /*
356 /* 356 * Call mtrr_lookup to get the type hint. This is an
357 * Call mtrr_lookup to get the type hint. This is an 357 * optimization for /dev/mem mmap'ers into WB memory (BIOS
358 * optimization for /dev/mem mmap'ers into WB memory (BIOS 358 * tools and ACPI tools). Use WB request for WB memory and use
359 * tools and ACPI tools). Use WB request for WB memory and use 359 * UC_MINUS otherwise.
360 * UC_MINUS otherwise. 360 */
361 */ 361 actual_type = pat_x_mtrr_type(start, end, req_type & _PAGE_CACHE_MASK);
362 u8 mtrr_type = mtrr_type_lookup(start, end);
363
364 if (mtrr_type == MTRR_TYPE_WRBACK)
365 actual_type = _PAGE_CACHE_WB;
366 else
367 actual_type = _PAGE_CACHE_UC_MINUS;
368 } else {
369 actual_type = pat_x_mtrr_type(start, end,
370 req_type & _PAGE_CACHE_MASK);
371 }
372 362
373 if (new_type) 363 if (new_type)
374 *new_type = actual_type; 364 *new_type = actual_type;
@@ -546,9 +536,7 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
546int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn, 536int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
547 unsigned long size, pgprot_t *vma_prot) 537 unsigned long size, pgprot_t *vma_prot)
548{ 538{
549 u64 offset = ((u64) pfn) << PAGE_SHIFT; 539 unsigned long flags = _PAGE_CACHE_WB;
550 unsigned long flags = -1;
551 int retval;
552 540
553 if (!range_is_allowed(pfn, size)) 541 if (!range_is_allowed(pfn, size))
554 return 0; 542 return 0;
@@ -576,64 +564,11 @@ int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
576 } 564 }
577#endif 565#endif
578 566
579 /*
580 * With O_SYNC, we can only take UC_MINUS mapping. Fail if we cannot.
581 *
582 * Without O_SYNC, we want to get
583 * - WB for WB-able memory and no other conflicting mappings
584 * - UC_MINUS for non-WB-able memory with no other conflicting mappings
585 * - Inherit from confliting mappings otherwise
586 */
587 if (flags != -1) {
588 retval = reserve_memtype(offset, offset + size, flags, NULL);
589 } else {
590 retval = reserve_memtype(offset, offset + size, -1, &flags);
591 }
592
593 if (retval < 0)
594 return 0;
595
596 if (((pfn < max_low_pfn_mapped) ||
597 (pfn >= (1UL<<(32 - PAGE_SHIFT)) && pfn < max_pfn_mapped)) &&
598 ioremap_change_attr((unsigned long)__va(offset), size, flags) < 0) {
599 free_memtype(offset, offset + size);
600 printk(KERN_INFO
601 "%s:%d /dev/mem ioremap_change_attr failed %s for %Lx-%Lx\n",
602 current->comm, current->pid,
603 cattr_name(flags),
604 offset, (unsigned long long)(offset + size));
605 return 0;
606 }
607
608 *vma_prot = __pgprot((pgprot_val(*vma_prot) & ~_PAGE_CACHE_MASK) | 567 *vma_prot = __pgprot((pgprot_val(*vma_prot) & ~_PAGE_CACHE_MASK) |
609 flags); 568 flags);
610 return 1; 569 return 1;
611} 570}
612 571
613void map_devmem(unsigned long pfn, unsigned long size, pgprot_t vma_prot)
614{
615 unsigned long want_flags = (pgprot_val(vma_prot) & _PAGE_CACHE_MASK);
616 u64 addr = (u64)pfn << PAGE_SHIFT;
617 unsigned long flags;
618
619 reserve_memtype(addr, addr + size, want_flags, &flags);
620 if (flags != want_flags) {
621 printk(KERN_INFO
622 "%s:%d /dev/mem expected mapping type %s for %Lx-%Lx, got %s\n",
623 current->comm, current->pid,
624 cattr_name(want_flags),
625 addr, (unsigned long long)(addr + size),
626 cattr_name(flags));
627 }
628}
629
630void unmap_devmem(unsigned long pfn, unsigned long size, pgprot_t vma_prot)
631{
632 u64 addr = (u64)pfn << PAGE_SHIFT;
633
634 free_memtype(addr, addr + size);
635}
636
637/* 572/*
638 * Change the memory type for the physial address range in kernel identity 573 * Change the memory type for the physial address range in kernel identity
639 * mapping space if that range is a part of identity map. 574 * mapping space if that range is a part of identity map.
@@ -671,8 +606,8 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
671{ 606{
672 int is_ram = 0; 607 int is_ram = 0;
673 int ret; 608 int ret;
674 unsigned long flags;
675 unsigned long want_flags = (pgprot_val(*vma_prot) & _PAGE_CACHE_MASK); 609 unsigned long want_flags = (pgprot_val(*vma_prot) & _PAGE_CACHE_MASK);
610 unsigned long flags = want_flags;
676 611
677 is_ram = pat_pagerange_is_ram(paddr, paddr + size); 612 is_ram = pat_pagerange_is_ram(paddr, paddr + size);
678 613
@@ -734,29 +669,28 @@ static void free_pfn_range(u64 paddr, unsigned long size)
734 * 669 *
735 * If the vma has a linear pfn mapping for the entire range, we get the prot 670 * If the vma has a linear pfn mapping for the entire range, we get the prot
736 * from pte and reserve the entire vma range with single reserve_pfn_range call. 671 * from pte and reserve the entire vma range with single reserve_pfn_range call.
737 * Otherwise, we reserve the entire vma range, my ging through the PTEs page
738 * by page to get physical address and protection.
739 */ 672 */
740int track_pfn_vma_copy(struct vm_area_struct *vma) 673int track_pfn_vma_copy(struct vm_area_struct *vma)
741{ 674{
742 int retval = 0;
743 unsigned long i, j;
744 resource_size_t paddr; 675 resource_size_t paddr;
745 unsigned long prot; 676 unsigned long prot;
746 unsigned long vma_start = vma->vm_start; 677 unsigned long vma_size = vma->vm_end - vma->vm_start;
747 unsigned long vma_end = vma->vm_end;
748 unsigned long vma_size = vma_end - vma_start;
749 pgprot_t pgprot; 678 pgprot_t pgprot;
750 679
751 if (!pat_enabled) 680 if (!pat_enabled)
752 return 0; 681 return 0;
753 682
683 /*
684 * For now, only handle remap_pfn_range() vmas where
685 * is_linear_pfn_mapping() == TRUE. Handling of
686 * vm_insert_pfn() is TBD.
687 */
754 if (is_linear_pfn_mapping(vma)) { 688 if (is_linear_pfn_mapping(vma)) {
755 /* 689 /*
756 * reserve the whole chunk covered by vma. We need the 690 * reserve the whole chunk covered by vma. We need the
757 * starting address and protection from pte. 691 * starting address and protection from pte.
758 */ 692 */
759 if (follow_phys(vma, vma_start, 0, &prot, &paddr)) { 693 if (follow_phys(vma, vma->vm_start, 0, &prot, &paddr)) {
760 WARN_ON_ONCE(1); 694 WARN_ON_ONCE(1);
761 return -EINVAL; 695 return -EINVAL;
762 } 696 }
@@ -764,28 +698,7 @@ int track_pfn_vma_copy(struct vm_area_struct *vma)
764 return reserve_pfn_range(paddr, vma_size, &pgprot, 1); 698 return reserve_pfn_range(paddr, vma_size, &pgprot, 1);
765 } 699 }
766 700
767 /* reserve entire vma page by page, using pfn and prot from pte */
768 for (i = 0; i < vma_size; i += PAGE_SIZE) {
769 if (follow_phys(vma, vma_start + i, 0, &prot, &paddr))
770 continue;
771
772 pgprot = __pgprot(prot);
773 retval = reserve_pfn_range(paddr, PAGE_SIZE, &pgprot, 1);
774 if (retval)
775 goto cleanup_ret;
776 }
777 return 0; 701 return 0;
778
779cleanup_ret:
780 /* Reserve error: Cleanup partial reservation and return error */
781 for (j = 0; j < i; j += PAGE_SIZE) {
782 if (follow_phys(vma, vma_start + j, 0, &prot, &paddr))
783 continue;
784
785 free_pfn_range(paddr, PAGE_SIZE);
786 }
787
788 return retval;
789} 702}
790 703
791/* 704/*
@@ -795,50 +708,28 @@ cleanup_ret:
795 * prot is passed in as a parameter for the new mapping. If the vma has a 708 * prot is passed in as a parameter for the new mapping. If the vma has a
796 * linear pfn mapping for the entire range reserve the entire vma range with 709 * linear pfn mapping for the entire range reserve the entire vma range with
797 * single reserve_pfn_range call. 710 * single reserve_pfn_range call.
798 * Otherwise, we look t the pfn and size and reserve only the specified range
799 * page by page.
800 *
801 * Note that this function can be called with caller trying to map only a
802 * subrange/page inside the vma.
803 */ 711 */
804int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t *prot, 712int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t *prot,
805 unsigned long pfn, unsigned long size) 713 unsigned long pfn, unsigned long size)
806{ 714{
807 int retval = 0;
808 unsigned long i, j;
809 resource_size_t base_paddr;
810 resource_size_t paddr; 715 resource_size_t paddr;
811 unsigned long vma_start = vma->vm_start; 716 unsigned long vma_size = vma->vm_end - vma->vm_start;
812 unsigned long vma_end = vma->vm_end;
813 unsigned long vma_size = vma_end - vma_start;
814 717
815 if (!pat_enabled) 718 if (!pat_enabled)
816 return 0; 719 return 0;
817 720
721 /*
722 * For now, only handle remap_pfn_range() vmas where
723 * is_linear_pfn_mapping() == TRUE. Handling of
724 * vm_insert_pfn() is TBD.
725 */
818 if (is_linear_pfn_mapping(vma)) { 726 if (is_linear_pfn_mapping(vma)) {
819 /* reserve the whole chunk starting from vm_pgoff */ 727 /* reserve the whole chunk starting from vm_pgoff */
820 paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT; 728 paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT;
821 return reserve_pfn_range(paddr, vma_size, prot, 0); 729 return reserve_pfn_range(paddr, vma_size, prot, 0);
822 } 730 }
823 731
824 /* reserve page by page using pfn and size */
825 base_paddr = (resource_size_t)pfn << PAGE_SHIFT;
826 for (i = 0; i < size; i += PAGE_SIZE) {
827 paddr = base_paddr + i;
828 retval = reserve_pfn_range(paddr, PAGE_SIZE, prot, 0);
829 if (retval)
830 goto cleanup_ret;
831 }
832 return 0; 732 return 0;
833
834cleanup_ret:
835 /* Reserve error: Cleanup partial reservation and return error */
836 for (j = 0; j < i; j += PAGE_SIZE) {
837 paddr = base_paddr + j;
838 free_pfn_range(paddr, PAGE_SIZE);
839 }
840
841 return retval;
842} 733}
843 734
844/* 735/*
@@ -849,39 +740,23 @@ cleanup_ret:
849void untrack_pfn_vma(struct vm_area_struct *vma, unsigned long pfn, 740void untrack_pfn_vma(struct vm_area_struct *vma, unsigned long pfn,
850 unsigned long size) 741 unsigned long size)
851{ 742{
852 unsigned long i;
853 resource_size_t paddr; 743 resource_size_t paddr;
854 unsigned long prot; 744 unsigned long vma_size = vma->vm_end - vma->vm_start;
855 unsigned long vma_start = vma->vm_start;
856 unsigned long vma_end = vma->vm_end;
857 unsigned long vma_size = vma_end - vma_start;
858 745
859 if (!pat_enabled) 746 if (!pat_enabled)
860 return; 747 return;
861 748
749 /*
750 * For now, only handle remap_pfn_range() vmas where
751 * is_linear_pfn_mapping() == TRUE. Handling of
752 * vm_insert_pfn() is TBD.
753 */
862 if (is_linear_pfn_mapping(vma)) { 754 if (is_linear_pfn_mapping(vma)) {
863 /* free the whole chunk starting from vm_pgoff */ 755 /* free the whole chunk starting from vm_pgoff */
864 paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT; 756 paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT;
865 free_pfn_range(paddr, vma_size); 757 free_pfn_range(paddr, vma_size);
866 return; 758 return;
867 } 759 }
868
869 if (size != 0 && size != vma_size) {
870 /* free page by page, using pfn and size */
871 paddr = (resource_size_t)pfn << PAGE_SHIFT;
872 for (i = 0; i < size; i += PAGE_SIZE) {
873 paddr = paddr + i;
874 free_pfn_range(paddr, PAGE_SIZE);
875 }
876 } else {
877 /* free entire vma, page by page, using the pfn from pte */
878 for (i = 0; i < vma_size; i += PAGE_SIZE) {
879 if (follow_phys(vma, vma_start + i, 0, &prot, &paddr))
880 continue;
881
882 free_pfn_range(paddr, PAGE_SIZE);
883 }
884 }
885} 760}
886 761
887pgprot_t pgprot_writecombine(pgprot_t prot) 762pgprot_t pgprot_writecombine(pgprot_t prot)
diff --git a/arch/x86/mm/srat_32.c b/arch/x86/mm/srat_32.c
index 16ae70fc57e7..29a0e37114f8 100644
--- a/arch/x86/mm/srat_32.c
+++ b/arch/x86/mm/srat_32.c
@@ -216,7 +216,7 @@ int __init get_memcfg_from_srat(void)
216 216
217 if (num_memory_chunks == 0) { 217 if (num_memory_chunks == 0) {
218 printk(KERN_WARNING 218 printk(KERN_WARNING
219 "could not finy any ACPI SRAT memory areas.\n"); 219 "could not find any ACPI SRAT memory areas.\n");
220 goto out_fail; 220 goto out_fail;
221 } 221 }
222 222
diff --git a/arch/x86/mm/srat_64.c b/arch/x86/mm/srat_64.c
index c7d272b8574c..01765955baaf 100644
--- a/arch/x86/mm/srat_64.c
+++ b/arch/x86/mm/srat_64.c
@@ -28,6 +28,7 @@ int acpi_numa __initdata;
28static struct acpi_table_slit *acpi_slit; 28static struct acpi_table_slit *acpi_slit;
29 29
30static nodemask_t nodes_parsed __initdata; 30static nodemask_t nodes_parsed __initdata;
31static nodemask_t cpu_nodes_parsed __initdata;
31static struct bootnode nodes[MAX_NUMNODES] __initdata; 32static struct bootnode nodes[MAX_NUMNODES] __initdata;
32static struct bootnode nodes_add[MAX_NUMNODES]; 33static struct bootnode nodes_add[MAX_NUMNODES];
33static int found_add_area __initdata; 34static int found_add_area __initdata;
@@ -141,6 +142,7 @@ acpi_numa_x2apic_affinity_init(struct acpi_srat_x2apic_cpu_affinity *pa)
141 142
142 apic_id = pa->apic_id; 143 apic_id = pa->apic_id;
143 apicid_to_node[apic_id] = node; 144 apicid_to_node[apic_id] = node;
145 node_set(node, cpu_nodes_parsed);
144 acpi_numa = 1; 146 acpi_numa = 1;
145 printk(KERN_INFO "SRAT: PXM %u -> APIC %u -> Node %u\n", 147 printk(KERN_INFO "SRAT: PXM %u -> APIC %u -> Node %u\n",
146 pxm, apic_id, node); 148 pxm, apic_id, node);
@@ -174,6 +176,7 @@ acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa)
174 else 176 else
175 apic_id = pa->apic_id; 177 apic_id = pa->apic_id;
176 apicid_to_node[apic_id] = node; 178 apicid_to_node[apic_id] = node;
179 node_set(node, cpu_nodes_parsed);
177 acpi_numa = 1; 180 acpi_numa = 1;
178 printk(KERN_INFO "SRAT: PXM %u -> APIC %u -> Node %u\n", 181 printk(KERN_INFO "SRAT: PXM %u -> APIC %u -> Node %u\n",
179 pxm, apic_id, node); 182 pxm, apic_id, node);
@@ -358,6 +361,7 @@ static void __init unparse_node(int node)
358{ 361{
359 int i; 362 int i;
360 node_clear(node, nodes_parsed); 363 node_clear(node, nodes_parsed);
364 node_clear(node, cpu_nodes_parsed);
361 for (i = 0; i < MAX_LOCAL_APIC; i++) { 365 for (i = 0; i < MAX_LOCAL_APIC; i++) {
362 if (apicid_to_node[i] == node) 366 if (apicid_to_node[i] == node)
363 apicid_to_node[i] = NUMA_NO_NODE; 367 apicid_to_node[i] = NUMA_NO_NODE;
@@ -402,7 +406,8 @@ int __init acpi_scan_nodes(unsigned long start, unsigned long end)
402 return -1; 406 return -1;
403 } 407 }
404 408
405 node_possible_map = nodes_parsed; 409 /* Account for nodes with cpus and no memory */
410 nodes_or(node_possible_map, nodes_parsed, cpu_nodes_parsed);
406 411
407 /* Finally register nodes */ 412 /* Finally register nodes */
408 for_each_node_mask(i, node_possible_map) 413 for_each_node_mask(i, node_possible_map)
diff --git a/arch/x86/pci/amd_bus.c b/arch/x86/pci/amd_bus.c
index 9bb09823b362..f893d6a6e803 100644
--- a/arch/x86/pci/amd_bus.c
+++ b/arch/x86/pci/amd_bus.c
@@ -94,12 +94,16 @@ struct pci_root_info {
94static int pci_root_num; 94static int pci_root_num;
95static struct pci_root_info pci_root_info[PCI_ROOT_NR]; 95static struct pci_root_info pci_root_info[PCI_ROOT_NR];
96 96
97void set_pci_bus_resources_arch_default(struct pci_bus *b) 97void x86_pci_root_bus_res_quirks(struct pci_bus *b)
98{ 98{
99 int i; 99 int i;
100 int j; 100 int j;
101 struct pci_root_info *info; 101 struct pci_root_info *info;
102 102
103 /* don't go for it if _CRS is used */
104 if (pci_probe & PCI_USE__CRS)
105 return;
106
103 /* if only one root bus, don't need to anything */ 107 /* if only one root bus, don't need to anything */
104 if (pci_root_num < 2) 108 if (pci_root_num < 2)
105 return; 109 return;
diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c
index 8c362b96b644..2202b6257b82 100644
--- a/arch/x86/pci/common.c
+++ b/arch/x86/pci/common.c
@@ -147,10 +147,13 @@ static void __devinit pcibios_fixup_device_resources(struct pci_dev *dev)
147 * are examined. 147 * are examined.
148 */ 148 */
149 149
150void __devinit pcibios_fixup_bus(struct pci_bus *b) 150void __devinit pcibios_fixup_bus(struct pci_bus *b)
151{ 151{
152 struct pci_dev *dev; 152 struct pci_dev *dev;
153 153
154 /* root bus? */
155 if (!b->parent)
156 x86_pci_root_bus_res_quirks(b);
154 pci_read_bridge_bases(b); 157 pci_read_bridge_bases(b);
155 list_for_each_entry(dev, &b->devices, bus_list) 158 list_for_each_entry(dev, &b->devices, bus_list)
156 pcibios_fixup_device_resources(dev); 159 pcibios_fixup_device_resources(dev);
diff --git a/arch/x86/pci/i386.c b/arch/x86/pci/i386.c
index f1817f71e009..a85bef20a3b9 100644
--- a/arch/x86/pci/i386.c
+++ b/arch/x86/pci/i386.c
@@ -238,6 +238,10 @@ void __init pcibios_resource_survey(void)
238 */ 238 */
239fs_initcall(pcibios_assign_resources); 239fs_initcall(pcibios_assign_resources);
240 240
241void __weak x86_pci_root_bus_res_quirks(struct pci_bus *b)
242{
243}
244
241/* 245/*
242 * If we set up a device for bus mastering, we need to check the latency 246 * If we set up a device for bus mastering, we need to check the latency
243 * timer as certain crappy BIOSes forget to set it properly. 247 * timer as certain crappy BIOSes forget to set it properly.
diff --git a/arch/x86/pci/mmconfig-shared.c b/arch/x86/pci/mmconfig-shared.c
index 905bb526b133..5fa10bb9604f 100644
--- a/arch/x86/pci/mmconfig-shared.c
+++ b/arch/x86/pci/mmconfig-shared.c
@@ -375,7 +375,7 @@ static acpi_status __init check_mcfg_resource(struct acpi_resource *res,
375 if (!fixmem32) 375 if (!fixmem32)
376 return AE_OK; 376 return AE_OK;
377 if ((mcfg_res->start >= fixmem32->address) && 377 if ((mcfg_res->start >= fixmem32->address) &&
378 (mcfg_res->end < (fixmem32->address + 378 (mcfg_res->end <= (fixmem32->address +
379 fixmem32->address_length))) { 379 fixmem32->address_length))) {
380 mcfg_res->flags = 1; 380 mcfg_res->flags = 1;
381 return AE_CTRL_TERMINATE; 381 return AE_CTRL_TERMINATE;
@@ -392,7 +392,7 @@ static acpi_status __init check_mcfg_resource(struct acpi_resource *res,
392 return AE_OK; 392 return AE_OK;
393 393
394 if ((mcfg_res->start >= address.minimum) && 394 if ((mcfg_res->start >= address.minimum) &&
395 (mcfg_res->end < (address.minimum + address.address_length))) { 395 (mcfg_res->end <= (address.minimum + address.address_length))) {
396 mcfg_res->flags = 1; 396 mcfg_res->flags = 1;
397 return AE_CTRL_TERMINATE; 397 return AE_CTRL_TERMINATE;
398 } 398 }
@@ -439,7 +439,7 @@ static int __init is_mmconf_reserved(check_reserved_t is_reserved,
439 u64 old_size = size; 439 u64 old_size = size;
440 int valid = 0; 440 int valid = 0;
441 441
442 while (!is_reserved(addr, addr + size - 1, E820_RESERVED)) { 442 while (!is_reserved(addr, addr + size, E820_RESERVED)) {
443 size >>= 1; 443 size >>= 1;
444 if (size < (16UL<<20)) 444 if (size < (16UL<<20))
445 break; 445 break;
diff --git a/arch/x86/vdso/vclock_gettime.c b/arch/x86/vdso/vclock_gettime.c
index d9d35824c56f..6a40b78b46aa 100644
--- a/arch/x86/vdso/vclock_gettime.c
+++ b/arch/x86/vdso/vclock_gettime.c
@@ -104,11 +104,13 @@ notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
104{ 104{
105 long ret; 105 long ret;
106 if (likely(gtod->sysctl_enabled && gtod->clock.vread)) { 106 if (likely(gtod->sysctl_enabled && gtod->clock.vread)) {
107 BUILD_BUG_ON(offsetof(struct timeval, tv_usec) != 107 if (likely(tv != NULL)) {
108 offsetof(struct timespec, tv_nsec) || 108 BUILD_BUG_ON(offsetof(struct timeval, tv_usec) !=
109 sizeof(*tv) != sizeof(struct timespec)); 109 offsetof(struct timespec, tv_nsec) ||
110 do_realtime((struct timespec *)tv); 110 sizeof(*tv) != sizeof(struct timespec));
111 tv->tv_usec /= 1000; 111 do_realtime((struct timespec *)tv);
112 tv->tv_usec /= 1000;
113 }
112 if (unlikely(tz != NULL)) { 114 if (unlikely(tz != NULL)) {
113 /* Avoid memcpy. Some old compilers fail to inline it */ 115 /* Avoid memcpy. Some old compilers fail to inline it */
114 tz->tz_minuteswest = gtod->sys_tz.tz_minuteswest; 116 tz->tz_minuteswest = gtod->sys_tz.tz_minuteswest;
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index 9842b1212407..e25a78e1113a 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -1794,6 +1794,11 @@ __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd,
1794 1794
1795 pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(swapper_pg_dir))); 1795 pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(swapper_pg_dir)));
1796 1796
1797 reserve_early(__pa(xen_start_info->pt_base),
1798 __pa(xen_start_info->pt_base +
1799 xen_start_info->nr_pt_frames * PAGE_SIZE),
1800 "XEN PAGETABLES");
1801
1797 return swapper_pg_dir; 1802 return swapper_pg_dir;
1798} 1803}
1799#endif /* CONFIG_X86_64 */ 1804#endif /* CONFIG_X86_64 */
diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c
index 14f240623497..0a5aa44299a5 100644
--- a/arch/x86/xen/time.c
+++ b/arch/x86/xen/time.c
@@ -213,6 +213,11 @@ cycle_t xen_clocksource_read(void)
213 return ret; 213 return ret;
214} 214}
215 215
216static cycle_t xen_clocksource_get_cycles(struct clocksource *cs)
217{
218 return xen_clocksource_read();
219}
220
216static void xen_read_wallclock(struct timespec *ts) 221static void xen_read_wallclock(struct timespec *ts)
217{ 222{
218 struct shared_info *s = HYPERVISOR_shared_info; 223 struct shared_info *s = HYPERVISOR_shared_info;
@@ -241,7 +246,7 @@ int xen_set_wallclock(unsigned long now)
241static struct clocksource xen_clocksource __read_mostly = { 246static struct clocksource xen_clocksource __read_mostly = {
242 .name = "xen", 247 .name = "xen",
243 .rating = 400, 248 .rating = 400,
244 .read = xen_clocksource_read, 249 .read = xen_clocksource_get_cycles,
245 .mask = ~0, 250 .mask = ~0,
246 .mult = 1<<XEN_SHIFT, /* time directly in nanoseconds */ 251 .mult = 1<<XEN_SHIFT, /* time directly in nanoseconds */
247 .shift = XEN_SHIFT, 252 .shift = XEN_SHIFT,