aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/ia64/Makefile5
-rw-r--r--arch/ia64/include/asm/bitops.h2
-rw-r--r--arch/ia64/include/asm/pgtable.h1
-rw-r--r--arch/ia64/kernel/ia64_ksyms.c4
-rw-r--r--arch/ia64/kernel/iosapic.c4
-rw-r--r--arch/ia64/kernel/pci-dma.c5
-rw-r--r--arch/ia64/kernel/topology.c6
-rw-r--r--arch/ia64/kvm/mmio.c6
-rw-r--r--arch/ia64/kvm/vcpu.c6
-rw-r--r--arch/ia64/kvm/vcpu.h13
-rw-r--r--arch/mn10300/include/asm/pci.h1
-rw-r--r--arch/powerpc/include/asm/kvm_host.h2
-rw-r--r--arch/powerpc/kernel/dma.c6
-rw-r--r--arch/s390/kvm/interrupt.c2
-rw-r--r--arch/sh/boards/board-ap325rxa.c2
-rw-r--r--arch/sh/boards/mach-migor/setup.c2
-rw-r--r--arch/sh/kernel/cpu/sh2/setup-sh7619.c2
-rw-r--r--arch/sh/kernel/cpu/sh2a/setup-mxg.c2
-rw-r--r--arch/sh/kernel/cpu/sh2a/setup-sh7201.c2
-rw-r--r--arch/sh/kernel/cpu/sh2a/setup-sh7203.c2
-rw-r--r--arch/sh/kernel/cpu/sh2a/setup-sh7206.c2
-rw-r--r--arch/sh/kernel/cpu/sh3/setup-sh7705.c2
-rw-r--r--arch/sh/kernel/cpu/sh3/setup-sh770x.c2
-rw-r--r--arch/sh/kernel/cpu/sh3/setup-sh7710.c2
-rw-r--r--arch/sh/kernel/cpu/sh3/setup-sh7720.c2
-rw-r--r--arch/sh/kernel/cpu/sh4/setup-sh4-202.c2
-rw-r--r--arch/sh/kernel/cpu/sh4/setup-sh7750.c2
-rw-r--r--arch/sh/kernel/cpu/sh4/setup-sh7760.c2
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7343.c2
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7366.c2
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7722.c2
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7723.c2
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7724.c2
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7763.c2
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7770.c2
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7780.c2
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7785.c2
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7786.c2
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-shx3.c2
-rw-r--r--arch/sh/kernel/cpu/sh5/setup-sh5.c2
-rw-r--r--arch/x86/Kconfig2
-rw-r--r--arch/x86/kernel/apic/x2apic_cluster.c8
-rw-r--r--arch/x86/kernel/apic/x2apic_phys.c8
-rw-r--r--arch/x86/kernel/cpu/amd.c7
-rw-r--r--arch/x86/kernel/cpu/common.c48
-rw-r--r--arch/x86/kernel/cpu/mcheck/therm_throt.c18
-rw-r--r--arch/x86/kernel/cpu/perf_counter.c40
-rw-r--r--arch/x86/kernel/efi.c2
-rw-r--r--arch/x86/kernel/reboot.c16
-rw-r--r--arch/x86/kernel/tsc.c29
-rw-r--r--arch/x86/kernel/vmi_32.c2
-rw-r--r--arch/x86/kvm/i8254.c3
-rw-r--r--arch/x86/kvm/mmu.c48
-rw-r--r--arch/x86/kvm/svm.c6
-rw-r--r--arch/x86/kvm/vmx.c6
-rw-r--r--arch/x86/kvm/x86.c44
56 files changed, 263 insertions, 139 deletions
diff --git a/arch/ia64/Makefile b/arch/ia64/Makefile
index 58a7e46affda..e7cbaa02cd0b 100644
--- a/arch/ia64/Makefile
+++ b/arch/ia64/Makefile
@@ -41,11 +41,6 @@ $(error Sorry, you need a newer version of the assember, one that is built from
41 ftp://ftp.hpl.hp.com/pub/linux-ia64/gas-030124.tar.gz) 41 ftp://ftp.hpl.hp.com/pub/linux-ia64/gas-030124.tar.gz)
42endif 42endif
43 43
44ifeq ($(call cc-version),0304)
45 cflags-$(CONFIG_ITANIUM) += -mtune=merced
46 cflags-$(CONFIG_MCKINLEY) += -mtune=mckinley
47endif
48
49KBUILD_CFLAGS += $(cflags-y) 44KBUILD_CFLAGS += $(cflags-y)
50head-y := arch/ia64/kernel/head.o arch/ia64/kernel/init_task.o 45head-y := arch/ia64/kernel/head.o arch/ia64/kernel/init_task.o
51 46
diff --git a/arch/ia64/include/asm/bitops.h b/arch/ia64/include/asm/bitops.h
index e2ca80037335..57a2787bc9fb 100644
--- a/arch/ia64/include/asm/bitops.h
+++ b/arch/ia64/include/asm/bitops.h
@@ -286,7 +286,7 @@ __test_and_clear_bit(int nr, volatile void * addr)
286{ 286{
287 __u32 *p = (__u32 *) addr + (nr >> 5); 287 __u32 *p = (__u32 *) addr + (nr >> 5);
288 __u32 m = 1 << (nr & 31); 288 __u32 m = 1 << (nr & 31);
289 int oldbitset = *p & m; 289 int oldbitset = (*p & m) != 0;
290 290
291 *p &= ~m; 291 *p &= ~m;
292 return oldbitset; 292 return oldbitset;
diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
index 0a9cc73d35c7..8840a690d1e7 100644
--- a/arch/ia64/include/asm/pgtable.h
+++ b/arch/ia64/include/asm/pgtable.h
@@ -155,7 +155,6 @@
155#include <linux/bitops.h> 155#include <linux/bitops.h>
156#include <asm/cacheflush.h> 156#include <asm/cacheflush.h>
157#include <asm/mmu_context.h> 157#include <asm/mmu_context.h>
158#include <asm/processor.h>
159 158
160/* 159/*
161 * Next come the mappings that determine how mmap() protection bits 160 * Next come the mappings that determine how mmap() protection bits
diff --git a/arch/ia64/kernel/ia64_ksyms.c b/arch/ia64/kernel/ia64_ksyms.c
index 2d311864e359..8ebccb589e1c 100644
--- a/arch/ia64/kernel/ia64_ksyms.c
+++ b/arch/ia64/kernel/ia64_ksyms.c
@@ -21,6 +21,7 @@ EXPORT_SYMBOL(csum_ipv6_magic);
21 21
22#include <asm/page.h> 22#include <asm/page.h>
23EXPORT_SYMBOL(clear_page); 23EXPORT_SYMBOL(clear_page);
24EXPORT_SYMBOL(copy_page);
24 25
25#ifdef CONFIG_VIRTUAL_MEM_MAP 26#ifdef CONFIG_VIRTUAL_MEM_MAP
26#include <linux/bootmem.h> 27#include <linux/bootmem.h>
@@ -60,9 +61,6 @@ EXPORT_SYMBOL(__udivdi3);
60EXPORT_SYMBOL(__moddi3); 61EXPORT_SYMBOL(__moddi3);
61EXPORT_SYMBOL(__umoddi3); 62EXPORT_SYMBOL(__umoddi3);
62 63
63#include <asm/page.h>
64EXPORT_SYMBOL(copy_page);
65
66#if defined(CONFIG_MD_RAID456) || defined(CONFIG_MD_RAID456_MODULE) 64#if defined(CONFIG_MD_RAID456) || defined(CONFIG_MD_RAID456_MODULE)
67extern void xor_ia64_2(void); 65extern void xor_ia64_2(void);
68extern void xor_ia64_3(void); 66extern void xor_ia64_3(void);
diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c
index c48b03f2b61d..dab4d393908c 100644
--- a/arch/ia64/kernel/iosapic.c
+++ b/arch/ia64/kernel/iosapic.c
@@ -1072,6 +1072,10 @@ iosapic_init (unsigned long phys_addr, unsigned int gsi_base)
1072 } 1072 }
1073 1073
1074 addr = ioremap(phys_addr, 0); 1074 addr = ioremap(phys_addr, 0);
1075 if (addr == NULL) {
1076 spin_unlock_irqrestore(&iosapic_lock, flags);
1077 return -ENOMEM;
1078 }
1075 ver = iosapic_version(addr); 1079 ver = iosapic_version(addr);
1076 if ((err = iosapic_check_gsi_range(gsi_base, ver))) { 1080 if ((err = iosapic_check_gsi_range(gsi_base, ver))) {
1077 iounmap(addr); 1081 iounmap(addr);
diff --git a/arch/ia64/kernel/pci-dma.c b/arch/ia64/kernel/pci-dma.c
index 05695962fe44..f6b1ff0aea76 100644
--- a/arch/ia64/kernel/pci-dma.c
+++ b/arch/ia64/kernel/pci-dma.c
@@ -69,11 +69,6 @@ iommu_dma_init(void)
69 69
70int iommu_dma_supported(struct device *dev, u64 mask) 70int iommu_dma_supported(struct device *dev, u64 mask)
71{ 71{
72 struct dma_map_ops *ops = platform_dma_get_ops(dev);
73
74 if (ops->dma_supported)
75 return ops->dma_supported(dev, mask);
76
77 /* Copied from i386. Doesn't make much sense, because it will 72 /* Copied from i386. Doesn't make much sense, because it will
78 only work for pci_alloc_coherent. 73 only work for pci_alloc_coherent.
79 The caller just has to use GFP_DMA in this case. */ 74 The caller just has to use GFP_DMA in this case. */
diff --git a/arch/ia64/kernel/topology.c b/arch/ia64/kernel/topology.c
index bc80dff1df7a..8f060352e129 100644
--- a/arch/ia64/kernel/topology.c
+++ b/arch/ia64/kernel/topology.c
@@ -372,6 +372,10 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
372 retval = kobject_init_and_add(&all_cpu_cache_info[cpu].kobj, 372 retval = kobject_init_and_add(&all_cpu_cache_info[cpu].kobj,
373 &cache_ktype_percpu_entry, &sys_dev->kobj, 373 &cache_ktype_percpu_entry, &sys_dev->kobj,
374 "%s", "cache"); 374 "%s", "cache");
375 if (unlikely(retval < 0)) {
376 cpu_cache_sysfs_exit(cpu);
377 return retval;
378 }
375 379
376 for (i = 0; i < all_cpu_cache_info[cpu].num_cache_leaves; i++) { 380 for (i = 0; i < all_cpu_cache_info[cpu].num_cache_leaves; i++) {
377 this_object = LEAF_KOBJECT_PTR(cpu,i); 381 this_object = LEAF_KOBJECT_PTR(cpu,i);
@@ -385,7 +389,7 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
385 } 389 }
386 kobject_put(&all_cpu_cache_info[cpu].kobj); 390 kobject_put(&all_cpu_cache_info[cpu].kobj);
387 cpu_cache_sysfs_exit(cpu); 391 cpu_cache_sysfs_exit(cpu);
388 break; 392 return retval;
389 } 393 }
390 kobject_uevent(&(this_object->kobj), KOBJ_ADD); 394 kobject_uevent(&(this_object->kobj), KOBJ_ADD);
391 } 395 }
diff --git a/arch/ia64/kvm/mmio.c b/arch/ia64/kvm/mmio.c
index 21f63fffc379..9bf55afd08d0 100644
--- a/arch/ia64/kvm/mmio.c
+++ b/arch/ia64/kvm/mmio.c
@@ -247,7 +247,8 @@ void emulate_io_inst(struct kvm_vcpu *vcpu, u64 padr, u64 ma)
247 vcpu_get_fpreg(vcpu, inst.M9.f2, &v); 247 vcpu_get_fpreg(vcpu, inst.M9.f2, &v);
248 /* Write high word. FIXME: this is a kludge! */ 248 /* Write high word. FIXME: this is a kludge! */
249 v.u.bits[1] &= 0x3ffff; 249 v.u.bits[1] &= 0x3ffff;
250 mmio_access(vcpu, padr + 8, &v.u.bits[1], 8, ma, IOREQ_WRITE); 250 mmio_access(vcpu, padr + 8, (u64 *)&v.u.bits[1], 8,
251 ma, IOREQ_WRITE);
251 data = v.u.bits[0]; 252 data = v.u.bits[0];
252 size = 3; 253 size = 3;
253 } else if (inst.M10.major == 7 && inst.M10.x6 == 0x3B) { 254 } else if (inst.M10.major == 7 && inst.M10.x6 == 0x3B) {
@@ -265,7 +266,8 @@ void emulate_io_inst(struct kvm_vcpu *vcpu, u64 padr, u64 ma)
265 266
266 /* Write high word.FIXME: this is a kludge! */ 267 /* Write high word.FIXME: this is a kludge! */
267 v.u.bits[1] &= 0x3ffff; 268 v.u.bits[1] &= 0x3ffff;
268 mmio_access(vcpu, padr + 8, &v.u.bits[1], 8, ma, IOREQ_WRITE); 269 mmio_access(vcpu, padr + 8, (u64 *)&v.u.bits[1],
270 8, ma, IOREQ_WRITE);
269 data = v.u.bits[0]; 271 data = v.u.bits[0];
270 size = 3; 272 size = 3;
271 } else if (inst.M10.major == 7 && inst.M10.x6 == 0x31) { 273 } else if (inst.M10.major == 7 && inst.M10.x6 == 0x31) {
diff --git a/arch/ia64/kvm/vcpu.c b/arch/ia64/kvm/vcpu.c
index 46b02cbcc874..cc406d064a09 100644
--- a/arch/ia64/kvm/vcpu.c
+++ b/arch/ia64/kvm/vcpu.c
@@ -461,7 +461,7 @@ void setreg(unsigned long regnum, unsigned long val,
461u64 vcpu_get_gr(struct kvm_vcpu *vcpu, unsigned long reg) 461u64 vcpu_get_gr(struct kvm_vcpu *vcpu, unsigned long reg)
462{ 462{
463 struct kvm_pt_regs *regs = vcpu_regs(vcpu); 463 struct kvm_pt_regs *regs = vcpu_regs(vcpu);
464 u64 val; 464 unsigned long val;
465 465
466 if (!reg) 466 if (!reg)
467 return 0; 467 return 0;
@@ -469,7 +469,7 @@ u64 vcpu_get_gr(struct kvm_vcpu *vcpu, unsigned long reg)
469 return val; 469 return val;
470} 470}
471 471
472void vcpu_set_gr(struct kvm_vcpu *vcpu, u64 reg, u64 value, int nat) 472void vcpu_set_gr(struct kvm_vcpu *vcpu, unsigned long reg, u64 value, int nat)
473{ 473{
474 struct kvm_pt_regs *regs = vcpu_regs(vcpu); 474 struct kvm_pt_regs *regs = vcpu_regs(vcpu);
475 long sof = (regs->cr_ifs) & 0x7f; 475 long sof = (regs->cr_ifs) & 0x7f;
@@ -1072,7 +1072,7 @@ void kvm_ttag(struct kvm_vcpu *vcpu, INST64 inst)
1072 vcpu_set_gr(vcpu, inst.M46.r1, tag, 0); 1072 vcpu_set_gr(vcpu, inst.M46.r1, tag, 0);
1073} 1073}
1074 1074
1075int vcpu_tpa(struct kvm_vcpu *vcpu, u64 vadr, u64 *padr) 1075int vcpu_tpa(struct kvm_vcpu *vcpu, u64 vadr, unsigned long *padr)
1076{ 1076{
1077 struct thash_data *data; 1077 struct thash_data *data;
1078 union ia64_isr visr, pt_isr; 1078 union ia64_isr visr, pt_isr;
diff --git a/arch/ia64/kvm/vcpu.h b/arch/ia64/kvm/vcpu.h
index 042af92ced83..360724d3ae69 100644
--- a/arch/ia64/kvm/vcpu.h
+++ b/arch/ia64/kvm/vcpu.h
@@ -686,14 +686,15 @@ static inline int highest_inservice_irq(struct kvm_vcpu *vcpu)
686 return highest_bits((int *)&(VMX(vcpu, insvc[0]))); 686 return highest_bits((int *)&(VMX(vcpu, insvc[0])));
687} 687}
688 688
689extern void vcpu_get_fpreg(struct kvm_vcpu *vcpu, u64 reg, 689extern void vcpu_get_fpreg(struct kvm_vcpu *vcpu, unsigned long reg,
690 struct ia64_fpreg *val); 690 struct ia64_fpreg *val);
691extern void vcpu_set_fpreg(struct kvm_vcpu *vcpu, u64 reg, 691extern void vcpu_set_fpreg(struct kvm_vcpu *vcpu, unsigned long reg,
692 struct ia64_fpreg *val); 692 struct ia64_fpreg *val);
693extern u64 vcpu_get_gr(struct kvm_vcpu *vcpu, u64 reg); 693extern u64 vcpu_get_gr(struct kvm_vcpu *vcpu, unsigned long reg);
694extern void vcpu_set_gr(struct kvm_vcpu *vcpu, u64 reg, u64 val, int nat); 694extern void vcpu_set_gr(struct kvm_vcpu *vcpu, unsigned long reg,
695extern u64 vcpu_get_psr(struct kvm_vcpu *vcpu); 695 u64 val, int nat);
696extern void vcpu_set_psr(struct kvm_vcpu *vcpu, u64 val); 696extern unsigned long vcpu_get_psr(struct kvm_vcpu *vcpu);
697extern void vcpu_set_psr(struct kvm_vcpu *vcpu, unsigned long val);
697extern u64 vcpu_thash(struct kvm_vcpu *vcpu, u64 vadr); 698extern u64 vcpu_thash(struct kvm_vcpu *vcpu, u64 vadr);
698extern void vcpu_bsw0(struct kvm_vcpu *vcpu); 699extern void vcpu_bsw0(struct kvm_vcpu *vcpu);
699extern void thash_vhpt_insert(struct kvm_vcpu *v, u64 pte, 700extern void thash_vhpt_insert(struct kvm_vcpu *v, u64 pte,
diff --git a/arch/mn10300/include/asm/pci.h b/arch/mn10300/include/asm/pci.h
index 35d2ed6396f6..19aecc90f7a4 100644
--- a/arch/mn10300/include/asm/pci.h
+++ b/arch/mn10300/include/asm/pci.h
@@ -59,7 +59,6 @@ void pcibios_penalize_isa_irq(int irq);
59#include <linux/slab.h> 59#include <linux/slab.h>
60#include <asm/scatterlist.h> 60#include <asm/scatterlist.h>
61#include <linux/string.h> 61#include <linux/string.h>
62#include <linux/mm.h>
63#include <asm/io.h> 62#include <asm/io.h>
64 63
65struct pci_dev; 64struct pci_dev;
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index dfdf13c9fefd..fddc3ed715fa 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -34,7 +34,7 @@
34#define KVM_COALESCED_MMIO_PAGE_OFFSET 1 34#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
35 35
36/* We don't currently support large pages. */ 36/* We don't currently support large pages. */
37#define KVM_PAGES_PER_HPAGE (1<<31) 37#define KVM_PAGES_PER_HPAGE (1UL << 31)
38 38
39struct kvm; 39struct kvm;
40struct kvm_run; 40struct kvm_run;
diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c
index 20a60d661ba8..ccf129d47d84 100644
--- a/arch/powerpc/kernel/dma.c
+++ b/arch/powerpc/kernel/dma.c
@@ -7,6 +7,7 @@
7 7
8#include <linux/device.h> 8#include <linux/device.h>
9#include <linux/dma-mapping.h> 9#include <linux/dma-mapping.h>
10#include <linux/lmb.h>
10#include <asm/bug.h> 11#include <asm/bug.h>
11#include <asm/abs_addr.h> 12#include <asm/abs_addr.h>
12 13
@@ -90,11 +91,10 @@ static void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sg,
90static int dma_direct_dma_supported(struct device *dev, u64 mask) 91static int dma_direct_dma_supported(struct device *dev, u64 mask)
91{ 92{
92#ifdef CONFIG_PPC64 93#ifdef CONFIG_PPC64
93 /* Could be improved to check for memory though it better be 94 /* Could be improved so platforms can set the limit in case
94 * done via some global so platforms can set the limit in case
95 * they have limited DMA windows 95 * they have limited DMA windows
96 */ 96 */
97 return mask >= DMA_BIT_MASK(32); 97 return mask >= (lmb_end_of_DRAM() - 1);
98#else 98#else
99 return 1; 99 return 1;
100#endif 100#endif
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index f04f5301b1b4..4d613415c435 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -386,7 +386,7 @@ no_timer:
386 } 386 }
387 __unset_cpu_idle(vcpu); 387 __unset_cpu_idle(vcpu);
388 __set_current_state(TASK_RUNNING); 388 __set_current_state(TASK_RUNNING);
389 remove_wait_queue(&vcpu->wq, &wait); 389 remove_wait_queue(&vcpu->arch.local_int.wq, &wait);
390 spin_unlock_bh(&vcpu->arch.local_int.lock); 390 spin_unlock_bh(&vcpu->arch.local_int.lock);
391 spin_unlock(&vcpu->arch.local_int.float_int->lock); 391 spin_unlock(&vcpu->arch.local_int.float_int->lock);
392 hrtimer_try_to_cancel(&vcpu->arch.ckc_timer); 392 hrtimer_try_to_cancel(&vcpu->arch.ckc_timer);
diff --git a/arch/sh/boards/board-ap325rxa.c b/arch/sh/boards/board-ap325rxa.c
index 7ffd1b4315bd..b9c88cc519e2 100644
--- a/arch/sh/boards/board-ap325rxa.c
+++ b/arch/sh/boards/board-ap325rxa.c
@@ -547,7 +547,7 @@ static int __init ap325rxa_devices_setup(void)
547 return platform_add_devices(ap325rxa_devices, 547 return platform_add_devices(ap325rxa_devices,
548 ARRAY_SIZE(ap325rxa_devices)); 548 ARRAY_SIZE(ap325rxa_devices));
549} 549}
550device_initcall(ap325rxa_devices_setup); 550arch_initcall(ap325rxa_devices_setup);
551 551
552/* Return the board specific boot mode pin configuration */ 552/* Return the board specific boot mode pin configuration */
553static int ap325rxa_mode_pins(void) 553static int ap325rxa_mode_pins(void)
diff --git a/arch/sh/boards/mach-migor/setup.c b/arch/sh/boards/mach-migor/setup.c
index f70f4644deb4..f9b2e4df35b9 100644
--- a/arch/sh/boards/mach-migor/setup.c
+++ b/arch/sh/boards/mach-migor/setup.c
@@ -608,7 +608,7 @@ static int __init migor_devices_setup(void)
608 608
609 return platform_add_devices(migor_devices, ARRAY_SIZE(migor_devices)); 609 return platform_add_devices(migor_devices, ARRAY_SIZE(migor_devices));
610} 610}
611__initcall(migor_devices_setup); 611arch_initcall(migor_devices_setup);
612 612
613/* Return the board specific boot mode pin configuration */ 613/* Return the board specific boot mode pin configuration */
614static int migor_mode_pins(void) 614static int migor_mode_pins(void)
diff --git a/arch/sh/kernel/cpu/sh2/setup-sh7619.c b/arch/sh/kernel/cpu/sh2/setup-sh7619.c
index 13798733f2db..8555c05e8667 100644
--- a/arch/sh/kernel/cpu/sh2/setup-sh7619.c
+++ b/arch/sh/kernel/cpu/sh2/setup-sh7619.c
@@ -187,7 +187,7 @@ static int __init sh7619_devices_setup(void)
187 return platform_add_devices(sh7619_devices, 187 return platform_add_devices(sh7619_devices,
188 ARRAY_SIZE(sh7619_devices)); 188 ARRAY_SIZE(sh7619_devices));
189} 189}
190__initcall(sh7619_devices_setup); 190arch_initcall(sh7619_devices_setup);
191 191
192void __init plat_irq_setup(void) 192void __init plat_irq_setup(void)
193{ 193{
diff --git a/arch/sh/kernel/cpu/sh2a/setup-mxg.c b/arch/sh/kernel/cpu/sh2a/setup-mxg.c
index 869c2da4820b..b67376445315 100644
--- a/arch/sh/kernel/cpu/sh2a/setup-mxg.c
+++ b/arch/sh/kernel/cpu/sh2a/setup-mxg.c
@@ -238,7 +238,7 @@ static int __init mxg_devices_setup(void)
238 return platform_add_devices(mxg_devices, 238 return platform_add_devices(mxg_devices,
239 ARRAY_SIZE(mxg_devices)); 239 ARRAY_SIZE(mxg_devices));
240} 240}
241__initcall(mxg_devices_setup); 241arch_initcall(mxg_devices_setup);
242 242
243void __init plat_irq_setup(void) 243void __init plat_irq_setup(void)
244{ 244{
diff --git a/arch/sh/kernel/cpu/sh2a/setup-sh7201.c b/arch/sh/kernel/cpu/sh2a/setup-sh7201.c
index d8febe128066..fbde5b75deb9 100644
--- a/arch/sh/kernel/cpu/sh2a/setup-sh7201.c
+++ b/arch/sh/kernel/cpu/sh2a/setup-sh7201.c
@@ -357,7 +357,7 @@ static int __init sh7201_devices_setup(void)
357 return platform_add_devices(sh7201_devices, 357 return platform_add_devices(sh7201_devices,
358 ARRAY_SIZE(sh7201_devices)); 358 ARRAY_SIZE(sh7201_devices));
359} 359}
360__initcall(sh7201_devices_setup); 360arch_initcall(sh7201_devices_setup);
361 361
362void __init plat_irq_setup(void) 362void __init plat_irq_setup(void)
363{ 363{
diff --git a/arch/sh/kernel/cpu/sh2a/setup-sh7203.c b/arch/sh/kernel/cpu/sh2a/setup-sh7203.c
index 62e3039d2398..d3fd536c9a84 100644
--- a/arch/sh/kernel/cpu/sh2a/setup-sh7203.c
+++ b/arch/sh/kernel/cpu/sh2a/setup-sh7203.c
@@ -367,7 +367,7 @@ static int __init sh7203_devices_setup(void)
367 return platform_add_devices(sh7203_devices, 367 return platform_add_devices(sh7203_devices,
368 ARRAY_SIZE(sh7203_devices)); 368 ARRAY_SIZE(sh7203_devices));
369} 369}
370__initcall(sh7203_devices_setup); 370arch_initcall(sh7203_devices_setup);
371 371
372void __init plat_irq_setup(void) 372void __init plat_irq_setup(void)
373{ 373{
diff --git a/arch/sh/kernel/cpu/sh2a/setup-sh7206.c b/arch/sh/kernel/cpu/sh2a/setup-sh7206.c
index 3e6f3d7a58be..a9ccc5e8d9e9 100644
--- a/arch/sh/kernel/cpu/sh2a/setup-sh7206.c
+++ b/arch/sh/kernel/cpu/sh2a/setup-sh7206.c
@@ -338,7 +338,7 @@ static int __init sh7206_devices_setup(void)
338 return platform_add_devices(sh7206_devices, 338 return platform_add_devices(sh7206_devices,
339 ARRAY_SIZE(sh7206_devices)); 339 ARRAY_SIZE(sh7206_devices));
340} 340}
341__initcall(sh7206_devices_setup); 341arch_initcall(sh7206_devices_setup);
342 342
343void __init plat_irq_setup(void) 343void __init plat_irq_setup(void)
344{ 344{
diff --git a/arch/sh/kernel/cpu/sh3/setup-sh7705.c b/arch/sh/kernel/cpu/sh3/setup-sh7705.c
index 88f742fed9ed..c23105983878 100644
--- a/arch/sh/kernel/cpu/sh3/setup-sh7705.c
+++ b/arch/sh/kernel/cpu/sh3/setup-sh7705.c
@@ -222,7 +222,7 @@ static int __init sh7705_devices_setup(void)
222 return platform_add_devices(sh7705_devices, 222 return platform_add_devices(sh7705_devices,
223 ARRAY_SIZE(sh7705_devices)); 223 ARRAY_SIZE(sh7705_devices));
224} 224}
225__initcall(sh7705_devices_setup); 225arch_initcall(sh7705_devices_setup);
226 226
227static struct platform_device *sh7705_early_devices[] __initdata = { 227static struct platform_device *sh7705_early_devices[] __initdata = {
228 &tmu0_device, 228 &tmu0_device,
diff --git a/arch/sh/kernel/cpu/sh3/setup-sh770x.c b/arch/sh/kernel/cpu/sh3/setup-sh770x.c
index c56306798584..347ab35d0697 100644
--- a/arch/sh/kernel/cpu/sh3/setup-sh770x.c
+++ b/arch/sh/kernel/cpu/sh3/setup-sh770x.c
@@ -250,7 +250,7 @@ static int __init sh770x_devices_setup(void)
250 return platform_add_devices(sh770x_devices, 250 return platform_add_devices(sh770x_devices,
251 ARRAY_SIZE(sh770x_devices)); 251 ARRAY_SIZE(sh770x_devices));
252} 252}
253__initcall(sh770x_devices_setup); 253arch_initcall(sh770x_devices_setup);
254 254
255static struct platform_device *sh770x_early_devices[] __initdata = { 255static struct platform_device *sh770x_early_devices[] __initdata = {
256 &tmu0_device, 256 &tmu0_device,
diff --git a/arch/sh/kernel/cpu/sh3/setup-sh7710.c b/arch/sh/kernel/cpu/sh3/setup-sh7710.c
index efa76c8148f4..717e90ae1097 100644
--- a/arch/sh/kernel/cpu/sh3/setup-sh7710.c
+++ b/arch/sh/kernel/cpu/sh3/setup-sh7710.c
@@ -226,7 +226,7 @@ static int __init sh7710_devices_setup(void)
226 return platform_add_devices(sh7710_devices, 226 return platform_add_devices(sh7710_devices,
227 ARRAY_SIZE(sh7710_devices)); 227 ARRAY_SIZE(sh7710_devices));
228} 228}
229__initcall(sh7710_devices_setup); 229arch_initcall(sh7710_devices_setup);
230 230
231static struct platform_device *sh7710_early_devices[] __initdata = { 231static struct platform_device *sh7710_early_devices[] __initdata = {
232 &tmu0_device, 232 &tmu0_device,
diff --git a/arch/sh/kernel/cpu/sh3/setup-sh7720.c b/arch/sh/kernel/cpu/sh3/setup-sh7720.c
index 5b2107798edb..74d8baaf8e96 100644
--- a/arch/sh/kernel/cpu/sh3/setup-sh7720.c
+++ b/arch/sh/kernel/cpu/sh3/setup-sh7720.c
@@ -388,7 +388,7 @@ static int __init sh7720_devices_setup(void)
388 return platform_add_devices(sh7720_devices, 388 return platform_add_devices(sh7720_devices,
389 ARRAY_SIZE(sh7720_devices)); 389 ARRAY_SIZE(sh7720_devices));
390} 390}
391__initcall(sh7720_devices_setup); 391arch_initcall(sh7720_devices_setup);
392 392
393static struct platform_device *sh7720_early_devices[] __initdata = { 393static struct platform_device *sh7720_early_devices[] __initdata = {
394 &cmt0_device, 394 &cmt0_device,
diff --git a/arch/sh/kernel/cpu/sh4/setup-sh4-202.c b/arch/sh/kernel/cpu/sh4/setup-sh4-202.c
index 6d088d123591..de4827df19aa 100644
--- a/arch/sh/kernel/cpu/sh4/setup-sh4-202.c
+++ b/arch/sh/kernel/cpu/sh4/setup-sh4-202.c
@@ -138,7 +138,7 @@ static int __init sh4202_devices_setup(void)
138 return platform_add_devices(sh4202_devices, 138 return platform_add_devices(sh4202_devices,
139 ARRAY_SIZE(sh4202_devices)); 139 ARRAY_SIZE(sh4202_devices));
140} 140}
141__initcall(sh4202_devices_setup); 141arch_initcall(sh4202_devices_setup);
142 142
143static struct platform_device *sh4202_early_devices[] __initdata = { 143static struct platform_device *sh4202_early_devices[] __initdata = {
144 &tmu0_device, 144 &tmu0_device,
diff --git a/arch/sh/kernel/cpu/sh4/setup-sh7750.c b/arch/sh/kernel/cpu/sh4/setup-sh7750.c
index 851672d15cf4..1b8b122e8f3d 100644
--- a/arch/sh/kernel/cpu/sh4/setup-sh7750.c
+++ b/arch/sh/kernel/cpu/sh4/setup-sh7750.c
@@ -239,7 +239,7 @@ static int __init sh7750_devices_setup(void)
239 return platform_add_devices(sh7750_devices, 239 return platform_add_devices(sh7750_devices,
240 ARRAY_SIZE(sh7750_devices)); 240 ARRAY_SIZE(sh7750_devices));
241} 241}
242__initcall(sh7750_devices_setup); 242arch_initcall(sh7750_devices_setup);
243 243
244static struct platform_device *sh7750_early_devices[] __initdata = { 244static struct platform_device *sh7750_early_devices[] __initdata = {
245 &tmu0_device, 245 &tmu0_device,
diff --git a/arch/sh/kernel/cpu/sh4/setup-sh7760.c b/arch/sh/kernel/cpu/sh4/setup-sh7760.c
index 5b822519bd90..7fbb7be9284c 100644
--- a/arch/sh/kernel/cpu/sh4/setup-sh7760.c
+++ b/arch/sh/kernel/cpu/sh4/setup-sh7760.c
@@ -265,7 +265,7 @@ static int __init sh7760_devices_setup(void)
265 return platform_add_devices(sh7760_devices, 265 return platform_add_devices(sh7760_devices,
266 ARRAY_SIZE(sh7760_devices)); 266 ARRAY_SIZE(sh7760_devices));
267} 267}
268__initcall(sh7760_devices_setup); 268arch_initcall(sh7760_devices_setup);
269 269
270static struct platform_device *sh7760_early_devices[] __initdata = { 270static struct platform_device *sh7760_early_devices[] __initdata = {
271 &tmu0_device, 271 &tmu0_device,
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7343.c b/arch/sh/kernel/cpu/sh4a/setup-sh7343.c
index 6307e087c864..ac4d5672ec1a 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7343.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7343.c
@@ -325,7 +325,7 @@ static int __init sh7343_devices_setup(void)
325 return platform_add_devices(sh7343_devices, 325 return platform_add_devices(sh7343_devices,
326 ARRAY_SIZE(sh7343_devices)); 326 ARRAY_SIZE(sh7343_devices));
327} 327}
328__initcall(sh7343_devices_setup); 328arch_initcall(sh7343_devices_setup);
329 329
330static struct platform_device *sh7343_early_devices[] __initdata = { 330static struct platform_device *sh7343_early_devices[] __initdata = {
331 &cmt_device, 331 &cmt_device,
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7366.c b/arch/sh/kernel/cpu/sh4a/setup-sh7366.c
index c18f7d09281b..1a956b1beccc 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7366.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7366.c
@@ -318,7 +318,7 @@ static int __init sh7366_devices_setup(void)
318 return platform_add_devices(sh7366_devices, 318 return platform_add_devices(sh7366_devices,
319 ARRAY_SIZE(sh7366_devices)); 319 ARRAY_SIZE(sh7366_devices));
320} 320}
321__initcall(sh7366_devices_setup); 321arch_initcall(sh7366_devices_setup);
322 322
323static struct platform_device *sh7366_early_devices[] __initdata = { 323static struct platform_device *sh7366_early_devices[] __initdata = {
324 &cmt_device, 324 &cmt_device,
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7722.c b/arch/sh/kernel/cpu/sh4a/setup-sh7722.c
index ea524a2da3e4..cda76ebf87c3 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7722.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7722.c
@@ -359,7 +359,7 @@ static int __init sh7722_devices_setup(void)
359 return platform_add_devices(sh7722_devices, 359 return platform_add_devices(sh7722_devices,
360 ARRAY_SIZE(sh7722_devices)); 360 ARRAY_SIZE(sh7722_devices));
361} 361}
362__initcall(sh7722_devices_setup); 362arch_initcall(sh7722_devices_setup);
363 363
364static struct platform_device *sh7722_early_devices[] __initdata = { 364static struct platform_device *sh7722_early_devices[] __initdata = {
365 &cmt_device, 365 &cmt_device,
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7723.c b/arch/sh/kernel/cpu/sh4a/setup-sh7723.c
index e1bb80b2a27b..b45dace9539f 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7723.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7723.c
@@ -473,7 +473,7 @@ static int __init sh7723_devices_setup(void)
473 return platform_add_devices(sh7723_devices, 473 return platform_add_devices(sh7723_devices,
474 ARRAY_SIZE(sh7723_devices)); 474 ARRAY_SIZE(sh7723_devices));
475} 475}
476__initcall(sh7723_devices_setup); 476arch_initcall(sh7723_devices_setup);
477 477
478static struct platform_device *sh7723_early_devices[] __initdata = { 478static struct platform_device *sh7723_early_devices[] __initdata = {
479 &cmt_device, 479 &cmt_device,
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7724.c b/arch/sh/kernel/cpu/sh4a/setup-sh7724.c
index e5ac9eb11c63..a04edaab9a29 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7724.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7724.c
@@ -508,7 +508,7 @@ static int __init sh7724_devices_setup(void)
508 return platform_add_devices(sh7724_devices, 508 return platform_add_devices(sh7724_devices,
509 ARRAY_SIZE(sh7724_devices)); 509 ARRAY_SIZE(sh7724_devices));
510} 510}
511device_initcall(sh7724_devices_setup); 511arch_initcall(sh7724_devices_setup);
512 512
513static struct platform_device *sh7724_early_devices[] __initdata = { 513static struct platform_device *sh7724_early_devices[] __initdata = {
514 &cmt_device, 514 &cmt_device,
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7763.c b/arch/sh/kernel/cpu/sh4a/setup-sh7763.c
index f1e0c0d36da7..4659fff6b842 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7763.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7763.c
@@ -314,7 +314,7 @@ static int __init sh7763_devices_setup(void)
314 return platform_add_devices(sh7763_devices, 314 return platform_add_devices(sh7763_devices,
315 ARRAY_SIZE(sh7763_devices)); 315 ARRAY_SIZE(sh7763_devices));
316} 316}
317__initcall(sh7763_devices_setup); 317arch_initcall(sh7763_devices_setup);
318 318
319static struct platform_device *sh7763_early_devices[] __initdata = { 319static struct platform_device *sh7763_early_devices[] __initdata = {
320 &tmu0_device, 320 &tmu0_device,
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7770.c b/arch/sh/kernel/cpu/sh4a/setup-sh7770.c
index 1e86209db284..eead08d89d32 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7770.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7770.c
@@ -368,7 +368,7 @@ static int __init sh7770_devices_setup(void)
368 return platform_add_devices(sh7770_devices, 368 return platform_add_devices(sh7770_devices,
369 ARRAY_SIZE(sh7770_devices)); 369 ARRAY_SIZE(sh7770_devices));
370} 370}
371__initcall(sh7770_devices_setup); 371arch_initcall(sh7770_devices_setup);
372 372
373static struct platform_device *sh7770_early_devices[] __initdata = { 373static struct platform_device *sh7770_early_devices[] __initdata = {
374 &tmu0_device, 374 &tmu0_device,
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7780.c b/arch/sh/kernel/cpu/sh4a/setup-sh7780.c
index 715e05b431e5..2c901f446959 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7780.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7780.c
@@ -256,7 +256,7 @@ static int __init sh7780_devices_setup(void)
256 return platform_add_devices(sh7780_devices, 256 return platform_add_devices(sh7780_devices,
257 ARRAY_SIZE(sh7780_devices)); 257 ARRAY_SIZE(sh7780_devices));
258} 258}
259__initcall(sh7780_devices_setup); 259arch_initcall(sh7780_devices_setup);
260 260
261static struct platform_device *sh7780_early_devices[] __initdata = { 261static struct platform_device *sh7780_early_devices[] __initdata = {
262 &tmu0_device, 262 &tmu0_device,
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7785.c b/arch/sh/kernel/cpu/sh4a/setup-sh7785.c
index af561402570b..7f6c718b6c36 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7785.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7785.c
@@ -263,7 +263,7 @@ static int __init sh7785_devices_setup(void)
263 return platform_add_devices(sh7785_devices, 263 return platform_add_devices(sh7785_devices,
264 ARRAY_SIZE(sh7785_devices)); 264 ARRAY_SIZE(sh7785_devices));
265} 265}
266__initcall(sh7785_devices_setup); 266arch_initcall(sh7785_devices_setup);
267 267
268static struct platform_device *sh7785_early_devices[] __initdata = { 268static struct platform_device *sh7785_early_devices[] __initdata = {
269 &tmu0_device, 269 &tmu0_device,
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7786.c b/arch/sh/kernel/cpu/sh4a/setup-sh7786.c
index b70049470a0b..0104a8ec5369 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7786.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7786.c
@@ -547,7 +547,7 @@ static int __init sh7786_devices_setup(void)
547 return platform_add_devices(sh7786_devices, 547 return platform_add_devices(sh7786_devices,
548 ARRAY_SIZE(sh7786_devices)); 548 ARRAY_SIZE(sh7786_devices));
549} 549}
550device_initcall(sh7786_devices_setup); 550arch_initcall(sh7786_devices_setup);
551 551
552void __init plat_early_device_setup(void) 552void __init plat_early_device_setup(void)
553{ 553{
diff --git a/arch/sh/kernel/cpu/sh4a/setup-shx3.c b/arch/sh/kernel/cpu/sh4a/setup-shx3.c
index 53c65fd9ccef..07f078961c71 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-shx3.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-shx3.c
@@ -256,7 +256,7 @@ static int __init shx3_devices_setup(void)
256 return platform_add_devices(shx3_devices, 256 return platform_add_devices(shx3_devices,
257 ARRAY_SIZE(shx3_devices)); 257 ARRAY_SIZE(shx3_devices));
258} 258}
259__initcall(shx3_devices_setup); 259arch_initcall(shx3_devices_setup);
260 260
261void __init plat_early_device_setup(void) 261void __init plat_early_device_setup(void)
262{ 262{
diff --git a/arch/sh/kernel/cpu/sh5/setup-sh5.c b/arch/sh/kernel/cpu/sh5/setup-sh5.c
index f5ff1ac57fc2..6a0f82f70032 100644
--- a/arch/sh/kernel/cpu/sh5/setup-sh5.c
+++ b/arch/sh/kernel/cpu/sh5/setup-sh5.c
@@ -186,7 +186,7 @@ static int __init sh5_devices_setup(void)
186 return platform_add_devices(sh5_devices, 186 return platform_add_devices(sh5_devices,
187 ARRAY_SIZE(sh5_devices)); 187 ARRAY_SIZE(sh5_devices));
188} 188}
189__initcall(sh5_devices_setup); 189arch_initcall(sh5_devices_setup);
190 190
191void __init plat_early_device_setup(void) 191void __init plat_early_device_setup(void)
192{ 192{
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 738bdc6b0f8b..13ffa5df37d7 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -24,6 +24,7 @@ config X86
24 select HAVE_UNSTABLE_SCHED_CLOCK 24 select HAVE_UNSTABLE_SCHED_CLOCK
25 select HAVE_IDE 25 select HAVE_IDE
26 select HAVE_OPROFILE 26 select HAVE_OPROFILE
27 select HAVE_PERF_COUNTERS if (!M386 && !M486)
27 select HAVE_IOREMAP_PROT 28 select HAVE_IOREMAP_PROT
28 select HAVE_KPROBES 29 select HAVE_KPROBES
29 select ARCH_WANT_OPTIONAL_GPIOLIB 30 select ARCH_WANT_OPTIONAL_GPIOLIB
@@ -742,7 +743,6 @@ config X86_UP_IOAPIC
742config X86_LOCAL_APIC 743config X86_LOCAL_APIC
743 def_bool y 744 def_bool y
744 depends on X86_64 || SMP || X86_32_NON_STANDARD || X86_UP_APIC 745 depends on X86_64 || SMP || X86_32_NON_STANDARD || X86_UP_APIC
745 select HAVE_PERF_COUNTERS if (!M386 && !M486)
746 746
747config X86_IO_APIC 747config X86_IO_APIC
748 def_bool y 748 def_bool y
diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c
index 2ed4e2bb3b32..a5371ec36776 100644
--- a/arch/x86/kernel/apic/x2apic_cluster.c
+++ b/arch/x86/kernel/apic/x2apic_cluster.c
@@ -17,11 +17,13 @@ static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
17 return x2apic_enabled(); 17 return x2apic_enabled();
18} 18}
19 19
20/* Start with all IRQs pointing to boot CPU. IRQ balancing will shift them. */ 20/*
21 21 * need to use more than cpu 0, because we need more vectors when
22 * MSI-X are used.
23 */
22static const struct cpumask *x2apic_target_cpus(void) 24static const struct cpumask *x2apic_target_cpus(void)
23{ 25{
24 return cpumask_of(0); 26 return cpu_online_mask;
25} 27}
26 28
27/* 29/*
diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c
index 0b631c6a2e00..a8989aadc99a 100644
--- a/arch/x86/kernel/apic/x2apic_phys.c
+++ b/arch/x86/kernel/apic/x2apic_phys.c
@@ -27,11 +27,13 @@ static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
27 return 0; 27 return 0;
28} 28}
29 29
30/* Start with all IRQs pointing to boot CPU. IRQ balancing will shift them. */ 30/*
31 31 * need to use more than cpu 0, because we need more vectors when
32 * MSI-X are used.
33 */
32static const struct cpumask *x2apic_target_cpus(void) 34static const struct cpumask *x2apic_target_cpus(void)
33{ 35{
34 return cpumask_of(0); 36 return cpu_online_mask;
35} 37}
36 38
37static void x2apic_vector_allocation_domain(int cpu, struct cpumask *retmask) 39static void x2apic_vector_allocation_domain(int cpu, struct cpumask *retmask)
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index e2485b03f1cf..63fddcd082cd 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -400,6 +400,13 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
400 level = cpuid_eax(1); 400 level = cpuid_eax(1);
401 if((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58) 401 if((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58)
402 set_cpu_cap(c, X86_FEATURE_REP_GOOD); 402 set_cpu_cap(c, X86_FEATURE_REP_GOOD);
403
404 /*
405 * Some BIOSes incorrectly force this feature, but only K8
406 * revision D (model = 0x14) and later actually support it.
407 */
408 if (c->x86_model < 0x14)
409 clear_cpu_cap(c, X86_FEATURE_LAHF_LM);
403 } 410 }
404 if (c->x86 == 0x10 || c->x86 == 0x11) 411 if (c->x86 == 0x10 || c->x86 == 0x11)
405 set_cpu_cap(c, X86_FEATURE_REP_GOOD); 412 set_cpu_cap(c, X86_FEATURE_REP_GOOD);
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index f1961c07af9a..5ce60a88027b 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -59,7 +59,30 @@ void __init setup_cpu_local_masks(void)
59 alloc_bootmem_cpumask_var(&cpu_sibling_setup_mask); 59 alloc_bootmem_cpumask_var(&cpu_sibling_setup_mask);
60} 60}
61 61
62static const struct cpu_dev *this_cpu __cpuinitdata; 62static void __cpuinit default_init(struct cpuinfo_x86 *c)
63{
64#ifdef CONFIG_X86_64
65 display_cacheinfo(c);
66#else
67 /* Not much we can do here... */
68 /* Check if at least it has cpuid */
69 if (c->cpuid_level == -1) {
70 /* No cpuid. It must be an ancient CPU */
71 if (c->x86 == 4)
72 strcpy(c->x86_model_id, "486");
73 else if (c->x86 == 3)
74 strcpy(c->x86_model_id, "386");
75 }
76#endif
77}
78
79static const struct cpu_dev __cpuinitconst default_cpu = {
80 .c_init = default_init,
81 .c_vendor = "Unknown",
82 .c_x86_vendor = X86_VENDOR_UNKNOWN,
83};
84
85static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
63 86
64DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = { 87DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
65#ifdef CONFIG_X86_64 88#ifdef CONFIG_X86_64
@@ -332,29 +355,6 @@ void switch_to_new_gdt(int cpu)
332 355
333static const struct cpu_dev *__cpuinitdata cpu_devs[X86_VENDOR_NUM] = {}; 356static const struct cpu_dev *__cpuinitdata cpu_devs[X86_VENDOR_NUM] = {};
334 357
335static void __cpuinit default_init(struct cpuinfo_x86 *c)
336{
337#ifdef CONFIG_X86_64
338 display_cacheinfo(c);
339#else
340 /* Not much we can do here... */
341 /* Check if at least it has cpuid */
342 if (c->cpuid_level == -1) {
343 /* No cpuid. It must be an ancient CPU */
344 if (c->x86 == 4)
345 strcpy(c->x86_model_id, "486");
346 else if (c->x86 == 3)
347 strcpy(c->x86_model_id, "386");
348 }
349#endif
350}
351
352static const struct cpu_dev __cpuinitconst default_cpu = {
353 .c_init = default_init,
354 .c_vendor = "Unknown",
355 .c_x86_vendor = X86_VENDOR_UNKNOWN,
356};
357
358static void __cpuinit get_model_name(struct cpuinfo_x86 *c) 358static void __cpuinit get_model_name(struct cpuinfo_x86 *c)
359{ 359{
360 unsigned int *v; 360 unsigned int *v;
diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c
index bff8dd191dd5..8bc64cfbe936 100644
--- a/arch/x86/kernel/cpu/mcheck/therm_throt.c
+++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c
@@ -36,6 +36,7 @@
36 36
37static DEFINE_PER_CPU(__u64, next_check) = INITIAL_JIFFIES; 37static DEFINE_PER_CPU(__u64, next_check) = INITIAL_JIFFIES;
38static DEFINE_PER_CPU(unsigned long, thermal_throttle_count); 38static DEFINE_PER_CPU(unsigned long, thermal_throttle_count);
39static DEFINE_PER_CPU(bool, thermal_throttle_active);
39 40
40static atomic_t therm_throt_en = ATOMIC_INIT(0); 41static atomic_t therm_throt_en = ATOMIC_INIT(0);
41 42
@@ -96,24 +97,27 @@ static int therm_throt_process(int curr)
96{ 97{
97 unsigned int cpu = smp_processor_id(); 98 unsigned int cpu = smp_processor_id();
98 __u64 tmp_jiffs = get_jiffies_64(); 99 __u64 tmp_jiffs = get_jiffies_64();
100 bool was_throttled = __get_cpu_var(thermal_throttle_active);
101 bool is_throttled = __get_cpu_var(thermal_throttle_active) = curr;
99 102
100 if (curr) 103 if (is_throttled)
101 __get_cpu_var(thermal_throttle_count)++; 104 __get_cpu_var(thermal_throttle_count)++;
102 105
103 if (time_before64(tmp_jiffs, __get_cpu_var(next_check))) 106 if (!(was_throttled ^ is_throttled) &&
107 time_before64(tmp_jiffs, __get_cpu_var(next_check)))
104 return 0; 108 return 0;
105 109
106 __get_cpu_var(next_check) = tmp_jiffs + CHECK_INTERVAL; 110 __get_cpu_var(next_check) = tmp_jiffs + CHECK_INTERVAL;
107 111
108 /* if we just entered the thermal event */ 112 /* if we just entered the thermal event */
109 if (curr) { 113 if (is_throttled) {
110 printk(KERN_CRIT "CPU%d: Temperature above threshold, " 114 printk(KERN_CRIT "CPU%d: Temperature above threshold, "
111 "cpu clock throttled (total events = %lu)\n", cpu, 115 "cpu clock throttled (total events = %lu)\n",
112 __get_cpu_var(thermal_throttle_count)); 116 cpu, __get_cpu_var(thermal_throttle_count));
113 117
114 add_taint(TAINT_MACHINE_CHECK); 118 add_taint(TAINT_MACHINE_CHECK);
115 } else { 119 } else if (was_throttled) {
116 printk(KERN_CRIT "CPU%d: Temperature/speed normal\n", cpu); 120 printk(KERN_INFO "CPU%d: Temperature/speed normal\n", cpu);
117 } 121 }
118 122
119 return 1; 123 return 1;
diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c
index b237c181aa41..396e35db7058 100644
--- a/arch/x86/kernel/cpu/perf_counter.c
+++ b/arch/x86/kernel/cpu/perf_counter.c
@@ -97,6 +97,7 @@ struct x86_pmu {
97 int num_counters_fixed; 97 int num_counters_fixed;
98 int counter_bits; 98 int counter_bits;
99 u64 counter_mask; 99 u64 counter_mask;
100 int apic;
100 u64 max_period; 101 u64 max_period;
101 u64 intel_ctrl; 102 u64 intel_ctrl;
102 void (*enable_bts)(u64 config); 103 void (*enable_bts)(u64 config);
@@ -116,8 +117,8 @@ static const u64 p6_perfmon_event_map[] =
116{ 117{
117 [PERF_COUNT_HW_CPU_CYCLES] = 0x0079, 118 [PERF_COUNT_HW_CPU_CYCLES] = 0x0079,
118 [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0, 119 [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
119 [PERF_COUNT_HW_CACHE_REFERENCES] = 0x0000, 120 [PERF_COUNT_HW_CACHE_REFERENCES] = 0x0f2e,
120 [PERF_COUNT_HW_CACHE_MISSES] = 0x0000, 121 [PERF_COUNT_HW_CACHE_MISSES] = 0x012e,
121 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4, 122 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4,
122 [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5, 123 [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5,
123 [PERF_COUNT_HW_BUS_CYCLES] = 0x0062, 124 [PERF_COUNT_HW_BUS_CYCLES] = 0x0062,
@@ -660,6 +661,7 @@ static DEFINE_MUTEX(pmc_reserve_mutex);
660 661
661static bool reserve_pmc_hardware(void) 662static bool reserve_pmc_hardware(void)
662{ 663{
664#ifdef CONFIG_X86_LOCAL_APIC
663 int i; 665 int i;
664 666
665 if (nmi_watchdog == NMI_LOCAL_APIC) 667 if (nmi_watchdog == NMI_LOCAL_APIC)
@@ -674,9 +676,11 @@ static bool reserve_pmc_hardware(void)
674 if (!reserve_evntsel_nmi(x86_pmu.eventsel + i)) 676 if (!reserve_evntsel_nmi(x86_pmu.eventsel + i))
675 goto eventsel_fail; 677 goto eventsel_fail;
676 } 678 }
679#endif
677 680
678 return true; 681 return true;
679 682
683#ifdef CONFIG_X86_LOCAL_APIC
680eventsel_fail: 684eventsel_fail:
681 for (i--; i >= 0; i--) 685 for (i--; i >= 0; i--)
682 release_evntsel_nmi(x86_pmu.eventsel + i); 686 release_evntsel_nmi(x86_pmu.eventsel + i);
@@ -691,10 +695,12 @@ perfctr_fail:
691 enable_lapic_nmi_watchdog(); 695 enable_lapic_nmi_watchdog();
692 696
693 return false; 697 return false;
698#endif
694} 699}
695 700
696static void release_pmc_hardware(void) 701static void release_pmc_hardware(void)
697{ 702{
703#ifdef CONFIG_X86_LOCAL_APIC
698 int i; 704 int i;
699 705
700 for (i = 0; i < x86_pmu.num_counters; i++) { 706 for (i = 0; i < x86_pmu.num_counters; i++) {
@@ -704,6 +710,7 @@ static void release_pmc_hardware(void)
704 710
705 if (nmi_watchdog == NMI_LOCAL_APIC) 711 if (nmi_watchdog == NMI_LOCAL_APIC)
706 enable_lapic_nmi_watchdog(); 712 enable_lapic_nmi_watchdog();
713#endif
707} 714}
708 715
709static inline bool bts_available(void) 716static inline bool bts_available(void)
@@ -934,6 +941,15 @@ static int __hw_perf_counter_init(struct perf_counter *counter)
934 hwc->sample_period = x86_pmu.max_period; 941 hwc->sample_period = x86_pmu.max_period;
935 hwc->last_period = hwc->sample_period; 942 hwc->last_period = hwc->sample_period;
936 atomic64_set(&hwc->period_left, hwc->sample_period); 943 atomic64_set(&hwc->period_left, hwc->sample_period);
944 } else {
945 /*
946 * If we have a PMU initialized but no APIC
947 * interrupts, we cannot sample hardware
948 * counters (user-space has to fall back and
949 * sample via a hrtimer based software counter):
950 */
951 if (!x86_pmu.apic)
952 return -EOPNOTSUPP;
937 } 953 }
938 954
939 counter->destroy = hw_perf_counter_destroy; 955 counter->destroy = hw_perf_counter_destroy;
@@ -1755,18 +1771,22 @@ void smp_perf_pending_interrupt(struct pt_regs *regs)
1755 1771
1756void set_perf_counter_pending(void) 1772void set_perf_counter_pending(void)
1757{ 1773{
1774#ifdef CONFIG_X86_LOCAL_APIC
1758 apic->send_IPI_self(LOCAL_PENDING_VECTOR); 1775 apic->send_IPI_self(LOCAL_PENDING_VECTOR);
1776#endif
1759} 1777}
1760 1778
1761void perf_counters_lapic_init(void) 1779void perf_counters_lapic_init(void)
1762{ 1780{
1763 if (!x86_pmu_initialized()) 1781#ifdef CONFIG_X86_LOCAL_APIC
1782 if (!x86_pmu.apic || !x86_pmu_initialized())
1764 return; 1783 return;
1765 1784
1766 /* 1785 /*
1767 * Always use NMI for PMU 1786 * Always use NMI for PMU
1768 */ 1787 */
1769 apic_write(APIC_LVTPC, APIC_DM_NMI); 1788 apic_write(APIC_LVTPC, APIC_DM_NMI);
1789#endif
1770} 1790}
1771 1791
1772static int __kprobes 1792static int __kprobes
@@ -1790,7 +1810,9 @@ perf_counter_nmi_handler(struct notifier_block *self,
1790 1810
1791 regs = args->regs; 1811 regs = args->regs;
1792 1812
1813#ifdef CONFIG_X86_LOCAL_APIC
1793 apic_write(APIC_LVTPC, APIC_DM_NMI); 1814 apic_write(APIC_LVTPC, APIC_DM_NMI);
1815#endif
1794 /* 1816 /*
1795 * Can't rely on the handled return value to say it was our NMI, two 1817 * Can't rely on the handled return value to say it was our NMI, two
1796 * counters could trigger 'simultaneously' raising two back-to-back NMIs. 1818 * counters could trigger 'simultaneously' raising two back-to-back NMIs.
@@ -1821,6 +1843,7 @@ static struct x86_pmu p6_pmu = {
1821 .event_map = p6_pmu_event_map, 1843 .event_map = p6_pmu_event_map,
1822 .raw_event = p6_pmu_raw_event, 1844 .raw_event = p6_pmu_raw_event,
1823 .max_events = ARRAY_SIZE(p6_perfmon_event_map), 1845 .max_events = ARRAY_SIZE(p6_perfmon_event_map),
1846 .apic = 1,
1824 .max_period = (1ULL << 31) - 1, 1847 .max_period = (1ULL << 31) - 1,
1825 .version = 0, 1848 .version = 0,
1826 .num_counters = 2, 1849 .num_counters = 2,
@@ -1847,6 +1870,7 @@ static struct x86_pmu intel_pmu = {
1847 .event_map = intel_pmu_event_map, 1870 .event_map = intel_pmu_event_map,
1848 .raw_event = intel_pmu_raw_event, 1871 .raw_event = intel_pmu_raw_event,
1849 .max_events = ARRAY_SIZE(intel_perfmon_event_map), 1872 .max_events = ARRAY_SIZE(intel_perfmon_event_map),
1873 .apic = 1,
1850 /* 1874 /*
1851 * Intel PMCs cannot be accessed sanely above 32 bit width, 1875 * Intel PMCs cannot be accessed sanely above 32 bit width,
1852 * so we install an artificial 1<<31 period regardless of 1876 * so we install an artificial 1<<31 period regardless of
@@ -1872,6 +1896,7 @@ static struct x86_pmu amd_pmu = {
1872 .num_counters = 4, 1896 .num_counters = 4,
1873 .counter_bits = 48, 1897 .counter_bits = 48,
1874 .counter_mask = (1ULL << 48) - 1, 1898 .counter_mask = (1ULL << 48) - 1,
1899 .apic = 1,
1875 /* use highest bit to detect overflow */ 1900 /* use highest bit to detect overflow */
1876 .max_period = (1ULL << 47) - 1, 1901 .max_period = (1ULL << 47) - 1,
1877}; 1902};
@@ -1897,13 +1922,14 @@ static int p6_pmu_init(void)
1897 return -ENODEV; 1922 return -ENODEV;
1898 } 1923 }
1899 1924
1925 x86_pmu = p6_pmu;
1926
1900 if (!cpu_has_apic) { 1927 if (!cpu_has_apic) {
1901 pr_info("no Local APIC, try rebooting with lapic"); 1928 pr_info("no APIC, boot with the \"lapic\" boot parameter to force-enable it.\n");
1902 return -ENODEV; 1929 pr_info("no hardware sampling interrupt available.\n");
1930 x86_pmu.apic = 0;
1903 } 1931 }
1904 1932
1905 x86_pmu = p6_pmu;
1906
1907 return 0; 1933 return 0;
1908} 1934}
1909 1935
diff --git a/arch/x86/kernel/efi.c b/arch/x86/kernel/efi.c
index 19ccf6d0dccf..fe26ba3e3451 100644
--- a/arch/x86/kernel/efi.c
+++ b/arch/x86/kernel/efi.c
@@ -354,7 +354,7 @@ void __init efi_init(void)
354 */ 354 */
355 c16 = tmp = early_ioremap(efi.systab->fw_vendor, 2); 355 c16 = tmp = early_ioremap(efi.systab->fw_vendor, 2);
356 if (c16) { 356 if (c16) {
357 for (i = 0; i < sizeof(vendor) && *c16; ++i) 357 for (i = 0; i < sizeof(vendor) - 1 && *c16; ++i)
358 vendor[i] = *c16++; 358 vendor[i] = *c16++;
359 vendor[i] = '\0'; 359 vendor[i] = '\0';
360 } else 360 } else
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index 834c9da8bf9d..a06e8d101844 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -405,7 +405,7 @@ EXPORT_SYMBOL(machine_real_restart);
405#endif /* CONFIG_X86_32 */ 405#endif /* CONFIG_X86_32 */
406 406
407/* 407/*
408 * Apple MacBook5,2 (2009 MacBook) needs reboot=p 408 * Some Apple MacBook and MacBookPro's needs reboot=p to be able to reboot
409 */ 409 */
410static int __init set_pci_reboot(const struct dmi_system_id *d) 410static int __init set_pci_reboot(const struct dmi_system_id *d)
411{ 411{
@@ -418,12 +418,20 @@ static int __init set_pci_reboot(const struct dmi_system_id *d)
418} 418}
419 419
420static struct dmi_system_id __initdata pci_reboot_dmi_table[] = { 420static struct dmi_system_id __initdata pci_reboot_dmi_table[] = {
421 { /* Handle problems with rebooting on Apple MacBook5,2 */ 421 { /* Handle problems with rebooting on Apple MacBook5 */
422 .callback = set_pci_reboot, 422 .callback = set_pci_reboot,
423 .ident = "Apple MacBook", 423 .ident = "Apple MacBook5",
424 .matches = { 424 .matches = {
425 DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."), 425 DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
426 DMI_MATCH(DMI_PRODUCT_NAME, "MacBook5,2"), 426 DMI_MATCH(DMI_PRODUCT_NAME, "MacBook5"),
427 },
428 },
429 { /* Handle problems with rebooting on Apple MacBookPro5 */
430 .callback = set_pci_reboot,
431 .ident = "Apple MacBookPro5",
432 .matches = {
433 DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
434 DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro5"),
427 }, 435 },
428 }, 436 },
429 { } 437 { }
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 6e1a368d21d4..71f4368b357e 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -275,15 +275,20 @@ static unsigned long pit_calibrate_tsc(u32 latch, unsigned long ms, int loopmin)
275 * use the TSC value at the transitions to calculate a pretty 275 * use the TSC value at the transitions to calculate a pretty
276 * good value for the TSC frequencty. 276 * good value for the TSC frequencty.
277 */ 277 */
278static inline int pit_verify_msb(unsigned char val)
279{
280 /* Ignore LSB */
281 inb(0x42);
282 return inb(0x42) == val;
283}
284
278static inline int pit_expect_msb(unsigned char val, u64 *tscp, unsigned long *deltap) 285static inline int pit_expect_msb(unsigned char val, u64 *tscp, unsigned long *deltap)
279{ 286{
280 int count; 287 int count;
281 u64 tsc = 0; 288 u64 tsc = 0;
282 289
283 for (count = 0; count < 50000; count++) { 290 for (count = 0; count < 50000; count++) {
284 /* Ignore LSB */ 291 if (!pit_verify_msb(val))
285 inb(0x42);
286 if (inb(0x42) != val)
287 break; 292 break;
288 tsc = get_cycles(); 293 tsc = get_cycles();
289 } 294 }
@@ -336,8 +341,7 @@ static unsigned long quick_pit_calibrate(void)
336 * to do that is to just read back the 16-bit counter 341 * to do that is to just read back the 16-bit counter
337 * once from the PIT. 342 * once from the PIT.
338 */ 343 */
339 inb(0x42); 344 pit_verify_msb(0);
340 inb(0x42);
341 345
342 if (pit_expect_msb(0xff, &tsc, &d1)) { 346 if (pit_expect_msb(0xff, &tsc, &d1)) {
343 for (i = 1; i <= MAX_QUICK_PIT_ITERATIONS; i++) { 347 for (i = 1; i <= MAX_QUICK_PIT_ITERATIONS; i++) {
@@ -348,8 +352,19 @@ static unsigned long quick_pit_calibrate(void)
348 * Iterate until the error is less than 500 ppm 352 * Iterate until the error is less than 500 ppm
349 */ 353 */
350 delta -= tsc; 354 delta -= tsc;
351 if (d1+d2 < delta >> 11) 355 if (d1+d2 >= delta >> 11)
352 goto success; 356 continue;
357
358 /*
359 * Check the PIT one more time to verify that
360 * all TSC reads were stable wrt the PIT.
361 *
362 * This also guarantees serialization of the
363 * last cycle read ('d2') in pit_expect_msb.
364 */
365 if (!pit_verify_msb(0xfe - i))
366 break;
367 goto success;
353 } 368 }
354 } 369 }
355 printk("Fast TSC calibration failed\n"); 370 printk("Fast TSC calibration failed\n");
diff --git a/arch/x86/kernel/vmi_32.c b/arch/x86/kernel/vmi_32.c
index b263423fbe2a..95a7289e4b0c 100644
--- a/arch/x86/kernel/vmi_32.c
+++ b/arch/x86/kernel/vmi_32.c
@@ -441,7 +441,7 @@ vmi_startup_ipi_hook(int phys_apicid, unsigned long start_eip,
441 ap.ds = __USER_DS; 441 ap.ds = __USER_DS;
442 ap.es = __USER_DS; 442 ap.es = __USER_DS;
443 ap.fs = __KERNEL_PERCPU; 443 ap.fs = __KERNEL_PERCPU;
444 ap.gs = 0; 444 ap.gs = __KERNEL_STACK_CANARY;
445 445
446 ap.eflags = 0; 446 ap.eflags = 0;
447 447
diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c
index 4d6f0d293ee2..21f68e00524f 100644
--- a/arch/x86/kvm/i8254.c
+++ b/arch/x86/kvm/i8254.c
@@ -104,6 +104,9 @@ static s64 __kpit_elapsed(struct kvm *kvm)
104 ktime_t remaining; 104 ktime_t remaining;
105 struct kvm_kpit_state *ps = &kvm->arch.vpit->pit_state; 105 struct kvm_kpit_state *ps = &kvm->arch.vpit->pit_state;
106 106
107 if (!ps->pit_timer.period)
108 return 0;
109
107 /* 110 /*
108 * The Counter does not stop when it reaches zero. In 111 * The Counter does not stop when it reaches zero. In
109 * Modes 0, 1, 4, and 5 the Counter ``wraps around'' to 112 * Modes 0, 1, 4, and 5 the Counter ``wraps around'' to
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 7030b5f911bf..0ef5bb2b4043 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -489,16 +489,20 @@ static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn, int lpage)
489 * 489 *
490 * If rmapp bit zero is one, (then rmap & ~1) points to a struct kvm_rmap_desc 490 * If rmapp bit zero is one, (then rmap & ~1) points to a struct kvm_rmap_desc
491 * containing more mappings. 491 * containing more mappings.
492 *
493 * Returns the number of rmap entries before the spte was added or zero if
494 * the spte was not added.
495 *
492 */ 496 */
493static void rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn, int lpage) 497static int rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn, int lpage)
494{ 498{
495 struct kvm_mmu_page *sp; 499 struct kvm_mmu_page *sp;
496 struct kvm_rmap_desc *desc; 500 struct kvm_rmap_desc *desc;
497 unsigned long *rmapp; 501 unsigned long *rmapp;
498 int i; 502 int i, count = 0;
499 503
500 if (!is_rmap_pte(*spte)) 504 if (!is_rmap_pte(*spte))
501 return; 505 return count;
502 gfn = unalias_gfn(vcpu->kvm, gfn); 506 gfn = unalias_gfn(vcpu->kvm, gfn);
503 sp = page_header(__pa(spte)); 507 sp = page_header(__pa(spte));
504 sp->gfns[spte - sp->spt] = gfn; 508 sp->gfns[spte - sp->spt] = gfn;
@@ -515,8 +519,10 @@ static void rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn, int lpage)
515 } else { 519 } else {
516 rmap_printk("rmap_add: %p %llx many->many\n", spte, *spte); 520 rmap_printk("rmap_add: %p %llx many->many\n", spte, *spte);
517 desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul); 521 desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
518 while (desc->shadow_ptes[RMAP_EXT-1] && desc->more) 522 while (desc->shadow_ptes[RMAP_EXT-1] && desc->more) {
519 desc = desc->more; 523 desc = desc->more;
524 count += RMAP_EXT;
525 }
520 if (desc->shadow_ptes[RMAP_EXT-1]) { 526 if (desc->shadow_ptes[RMAP_EXT-1]) {
521 desc->more = mmu_alloc_rmap_desc(vcpu); 527 desc->more = mmu_alloc_rmap_desc(vcpu);
522 desc = desc->more; 528 desc = desc->more;
@@ -525,6 +531,7 @@ static void rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn, int lpage)
525 ; 531 ;
526 desc->shadow_ptes[i] = spte; 532 desc->shadow_ptes[i] = spte;
527 } 533 }
534 return count;
528} 535}
529 536
530static void rmap_desc_remove_entry(unsigned long *rmapp, 537static void rmap_desc_remove_entry(unsigned long *rmapp,
@@ -754,6 +761,19 @@ static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp)
754 return young; 761 return young;
755} 762}
756 763
764#define RMAP_RECYCLE_THRESHOLD 1000
765
766static void rmap_recycle(struct kvm_vcpu *vcpu, gfn_t gfn, int lpage)
767{
768 unsigned long *rmapp;
769
770 gfn = unalias_gfn(vcpu->kvm, gfn);
771 rmapp = gfn_to_rmap(vcpu->kvm, gfn, lpage);
772
773 kvm_unmap_rmapp(vcpu->kvm, rmapp);
774 kvm_flush_remote_tlbs(vcpu->kvm);
775}
776
757int kvm_age_hva(struct kvm *kvm, unsigned long hva) 777int kvm_age_hva(struct kvm *kvm, unsigned long hva)
758{ 778{
759 return kvm_handle_hva(kvm, hva, kvm_age_rmapp); 779 return kvm_handle_hva(kvm, hva, kvm_age_rmapp);
@@ -1407,24 +1427,25 @@ static int kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp)
1407 */ 1427 */
1408void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages) 1428void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages)
1409{ 1429{
1430 int used_pages;
1431
1432 used_pages = kvm->arch.n_alloc_mmu_pages - kvm->arch.n_free_mmu_pages;
1433 used_pages = max(0, used_pages);
1434
1410 /* 1435 /*
1411 * If we set the number of mmu pages to be smaller be than the 1436 * If we set the number of mmu pages to be smaller be than the
1412 * number of actived pages , we must to free some mmu pages before we 1437 * number of actived pages , we must to free some mmu pages before we
1413 * change the value 1438 * change the value
1414 */ 1439 */
1415 1440
1416 if ((kvm->arch.n_alloc_mmu_pages - kvm->arch.n_free_mmu_pages) > 1441 if (used_pages > kvm_nr_mmu_pages) {
1417 kvm_nr_mmu_pages) { 1442 while (used_pages > kvm_nr_mmu_pages) {
1418 int n_used_mmu_pages = kvm->arch.n_alloc_mmu_pages
1419 - kvm->arch.n_free_mmu_pages;
1420
1421 while (n_used_mmu_pages > kvm_nr_mmu_pages) {
1422 struct kvm_mmu_page *page; 1443 struct kvm_mmu_page *page;
1423 1444
1424 page = container_of(kvm->arch.active_mmu_pages.prev, 1445 page = container_of(kvm->arch.active_mmu_pages.prev,
1425 struct kvm_mmu_page, link); 1446 struct kvm_mmu_page, link);
1426 kvm_mmu_zap_page(kvm, page); 1447 kvm_mmu_zap_page(kvm, page);
1427 n_used_mmu_pages--; 1448 used_pages--;
1428 } 1449 }
1429 kvm->arch.n_free_mmu_pages = 0; 1450 kvm->arch.n_free_mmu_pages = 0;
1430 } 1451 }
@@ -1740,6 +1761,7 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
1740{ 1761{
1741 int was_rmapped = 0; 1762 int was_rmapped = 0;
1742 int was_writeble = is_writeble_pte(*shadow_pte); 1763 int was_writeble = is_writeble_pte(*shadow_pte);
1764 int rmap_count;
1743 1765
1744 pgprintk("%s: spte %llx access %x write_fault %d" 1766 pgprintk("%s: spte %llx access %x write_fault %d"
1745 " user_fault %d gfn %lx\n", 1767 " user_fault %d gfn %lx\n",
@@ -1781,9 +1803,11 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
1781 1803
1782 page_header_update_slot(vcpu->kvm, shadow_pte, gfn); 1804 page_header_update_slot(vcpu->kvm, shadow_pte, gfn);
1783 if (!was_rmapped) { 1805 if (!was_rmapped) {
1784 rmap_add(vcpu, shadow_pte, gfn, largepage); 1806 rmap_count = rmap_add(vcpu, shadow_pte, gfn, largepage);
1785 if (!is_rmap_pte(*shadow_pte)) 1807 if (!is_rmap_pte(*shadow_pte))
1786 kvm_release_pfn_clean(pfn); 1808 kvm_release_pfn_clean(pfn);
1809 if (rmap_count > RMAP_RECYCLE_THRESHOLD)
1810 rmap_recycle(vcpu, gfn, largepage);
1787 } else { 1811 } else {
1788 if (was_writeble) 1812 if (was_writeble)
1789 kvm_release_pfn_dirty(pfn); 1813 kvm_release_pfn_dirty(pfn);
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 71510e07e69e..b1f658ad2f06 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -711,6 +711,7 @@ static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
711 svm->vmcb->control.tsc_offset += delta; 711 svm->vmcb->control.tsc_offset += delta;
712 vcpu->cpu = cpu; 712 vcpu->cpu = cpu;
713 kvm_migrate_timers(vcpu); 713 kvm_migrate_timers(vcpu);
714 svm->asid_generation = 0;
714 } 715 }
715 716
716 for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++) 717 for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
@@ -1031,7 +1032,6 @@ static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *svm_data)
1031 svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ALL_ASID; 1032 svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ALL_ASID;
1032 } 1033 }
1033 1034
1034 svm->vcpu.cpu = svm_data->cpu;
1035 svm->asid_generation = svm_data->asid_generation; 1035 svm->asid_generation = svm_data->asid_generation;
1036 svm->vmcb->control.asid = svm_data->next_asid++; 1036 svm->vmcb->control.asid = svm_data->next_asid++;
1037} 1037}
@@ -2300,8 +2300,8 @@ static void pre_svm_run(struct vcpu_svm *svm)
2300 struct svm_cpu_data *svm_data = per_cpu(svm_data, cpu); 2300 struct svm_cpu_data *svm_data = per_cpu(svm_data, cpu);
2301 2301
2302 svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING; 2302 svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING;
2303 if (svm->vcpu.cpu != cpu || 2303 /* FIXME: handle wraparound of asid_generation */
2304 svm->asid_generation != svm_data->asid_generation) 2304 if (svm->asid_generation != svm_data->asid_generation)
2305 new_asid(svm, svm_data); 2305 new_asid(svm, svm_data);
2306} 2306}
2307 2307
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 356a0ce85c68..29f912927a58 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -3157,8 +3157,8 @@ static void handle_invalid_guest_state(struct kvm_vcpu *vcpu,
3157 struct vcpu_vmx *vmx = to_vmx(vcpu); 3157 struct vcpu_vmx *vmx = to_vmx(vcpu);
3158 enum emulation_result err = EMULATE_DONE; 3158 enum emulation_result err = EMULATE_DONE;
3159 3159
3160 preempt_enable();
3161 local_irq_enable(); 3160 local_irq_enable();
3161 preempt_enable();
3162 3162
3163 while (!guest_state_valid(vcpu)) { 3163 while (!guest_state_valid(vcpu)) {
3164 err = emulate_instruction(vcpu, kvm_run, 0, 0, 0); 3164 err = emulate_instruction(vcpu, kvm_run, 0, 0, 0);
@@ -3168,7 +3168,7 @@ static void handle_invalid_guest_state(struct kvm_vcpu *vcpu,
3168 3168
3169 if (err != EMULATE_DONE) { 3169 if (err != EMULATE_DONE) {
3170 kvm_report_emulation_failure(vcpu, "emulation failure"); 3170 kvm_report_emulation_failure(vcpu, "emulation failure");
3171 return; 3171 break;
3172 } 3172 }
3173 3173
3174 if (signal_pending(current)) 3174 if (signal_pending(current))
@@ -3177,8 +3177,8 @@ static void handle_invalid_guest_state(struct kvm_vcpu *vcpu,
3177 schedule(); 3177 schedule();
3178 } 3178 }
3179 3179
3180 local_irq_disable();
3181 preempt_disable(); 3180 preempt_disable();
3181 local_irq_disable();
3182 3182
3183 vmx->invalid_state_emulation_result = err; 3183 vmx->invalid_state_emulation_result = err;
3184} 3184}
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index fe5474aec41a..3d4529011828 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -704,11 +704,48 @@ static bool msr_mtrr_valid(unsigned msr)
704 return false; 704 return false;
705} 705}
706 706
707static bool valid_pat_type(unsigned t)
708{
709 return t < 8 && (1 << t) & 0xf3; /* 0, 1, 4, 5, 6, 7 */
710}
711
712static bool valid_mtrr_type(unsigned t)
713{
714 return t < 8 && (1 << t) & 0x73; /* 0, 1, 4, 5, 6 */
715}
716
717static bool mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data)
718{
719 int i;
720
721 if (!msr_mtrr_valid(msr))
722 return false;
723
724 if (msr == MSR_IA32_CR_PAT) {
725 for (i = 0; i < 8; i++)
726 if (!valid_pat_type((data >> (i * 8)) & 0xff))
727 return false;
728 return true;
729 } else if (msr == MSR_MTRRdefType) {
730 if (data & ~0xcff)
731 return false;
732 return valid_mtrr_type(data & 0xff);
733 } else if (msr >= MSR_MTRRfix64K_00000 && msr <= MSR_MTRRfix4K_F8000) {
734 for (i = 0; i < 8 ; i++)
735 if (!valid_mtrr_type((data >> (i * 8)) & 0xff))
736 return false;
737 return true;
738 }
739
740 /* variable MTRRs */
741 return valid_mtrr_type(data & 0xff);
742}
743
707static int set_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 data) 744static int set_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 data)
708{ 745{
709 u64 *p = (u64 *)&vcpu->arch.mtrr_state.fixed_ranges; 746 u64 *p = (u64 *)&vcpu->arch.mtrr_state.fixed_ranges;
710 747
711 if (!msr_mtrr_valid(msr)) 748 if (!mtrr_valid(vcpu, msr, data))
712 return 1; 749 return 1;
713 750
714 if (msr == MSR_MTRRdefType) { 751 if (msr == MSR_MTRRdefType) {
@@ -1079,14 +1116,13 @@ long kvm_arch_dev_ioctl(struct file *filp,
1079 if (copy_to_user(user_msr_list, &msr_list, sizeof msr_list)) 1116 if (copy_to_user(user_msr_list, &msr_list, sizeof msr_list))
1080 goto out; 1117 goto out;
1081 r = -E2BIG; 1118 r = -E2BIG;
1082 if (n < num_msrs_to_save) 1119 if (n < msr_list.nmsrs)
1083 goto out; 1120 goto out;
1084 r = -EFAULT; 1121 r = -EFAULT;
1085 if (copy_to_user(user_msr_list->indices, &msrs_to_save, 1122 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
1086 num_msrs_to_save * sizeof(u32))) 1123 num_msrs_to_save * sizeof(u32)))
1087 goto out; 1124 goto out;
1088 if (copy_to_user(user_msr_list->indices 1125 if (copy_to_user(user_msr_list->indices + num_msrs_to_save,
1089 + num_msrs_to_save * sizeof(u32),
1090 &emulated_msrs, 1126 &emulated_msrs,
1091 ARRAY_SIZE(emulated_msrs) * sizeof(u32))) 1127 ARRAY_SIZE(emulated_msrs) * sizeof(u32)))
1092 goto out; 1128 goto out;