aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-12-28 03:23:13 -0500
committerIngo Molnar <mingo@elte.hu>2009-12-28 03:23:13 -0500
commit605c1a187f3ce82fbc243e2163c5ca8d1926df8e (patch)
treec8065a8c5606a66f81dc494ce22a5baa5e0dfe7e /arch/x86
parent17a2a9b57a9a7d2fd8f97df951b5e63e0bd56ef5 (diff)
parentce9277fb08e6e721482f7011ca28dcd0449b197c (diff)
Merge branch 'iommu/fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/linux-2.6-iommu into x86/urgent
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/Kconfig11
-rw-r--r--arch/x86/Kconfig.debug4
-rw-r--r--arch/x86/boot/header.S2
-rw-r--r--arch/x86/boot/version.c4
-rw-r--r--arch/x86/ia32/ia32entry.S2
-rw-r--r--arch/x86/ia32/sys_ia32.c43
-rw-r--r--arch/x86/include/asm/asm-offsets.h1
-rw-r--r--arch/x86/include/asm/dma-mapping.h2
-rw-r--r--arch/x86/include/asm/elf.h1
-rw-r--r--arch/x86/include/asm/geode.h219
-rw-r--r--arch/x86/include/asm/olpc.h2
-rw-r--r--arch/x86/include/asm/paravirt.h14
-rw-r--r--arch/x86/include/asm/paravirt_types.h14
-rw-r--r--arch/x86/include/asm/pci_x86.h20
-rw-r--r--arch/x86/include/asm/percpu.h104
-rw-r--r--arch/x86/include/asm/ptrace.h2
-rw-r--r--arch/x86/include/asm/spinlock.h62
-rw-r--r--arch/x86/include/asm/spinlock_types.h10
-rw-r--r--arch/x86/include/asm/sys_ia32.h4
-rw-r--r--arch/x86/include/asm/syscalls.h2
-rw-r--r--arch/x86/include/asm/topology.h9
-rw-r--r--arch/x86/include/asm/uv/bios.h11
-rw-r--r--arch/x86/include/asm/uv/uv_hub.h44
-rw-r--r--arch/x86/include/asm/xen/hypervisor.h27
-rw-r--r--arch/x86/kernel/Makefile1
-rw-r--r--arch/x86/kernel/acpi/cstate.c2
-rw-r--r--arch/x86/kernel/amd_iommu.c4
-rw-r--r--arch/x86/kernel/amd_iommu_init.c13
-rw-r--r--arch/x86/kernel/apic/io_apic.c4
-rw-r--r--arch/x86/kernel/apic/nmi.c8
-rw-r--r--arch/x86/kernel/bios_uv.c8
-rw-r--r--arch/x86/kernel/cpu/common.c8
-rw-r--r--arch/x86/kernel/cpu/cpu_debug.c30
-rw-r--r--arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c45
-rw-r--r--arch/x86/kernel/cpu/cpufreq/powernow-k6.c2
-rw-r--r--arch/x86/kernel/cpu/cpufreq/powernow-k7.c19
-rw-r--r--arch/x86/kernel/cpu/cpufreq/powernow-k8.c32
-rw-r--r--arch/x86/kernel/cpu/cpufreq/speedstep-ich.c2
-rw-r--r--arch/x86/kernel/cpu/cpufreq/speedstep-lib.c6
-rw-r--r--arch/x86/kernel/cpu/cpufreq/speedstep-lib.h24
-rw-r--r--arch/x86/kernel/cpu/cpufreq/speedstep-smi.c2
-rw-r--r--arch/x86/kernel/cpu/intel_cacheinfo.c54
-rw-r--r--arch/x86/kernel/cpu/mtrr/if.c11
-rw-r--r--arch/x86/kernel/cpu/perf_event.c31
-rw-r--r--arch/x86/kernel/ds.c4
-rw-r--r--arch/x86/kernel/dumpstack.c8
-rw-r--r--arch/x86/kernel/dumpstack_64.c33
-rw-r--r--arch/x86/kernel/entry_64.S6
-rw-r--r--arch/x86/kernel/geode_32.c196
-rw-r--r--arch/x86/kernel/hw_breakpoint.c5
-rw-r--r--arch/x86/kernel/irq.c14
-rw-r--r--arch/x86/kernel/kgdb.c14
-rw-r--r--arch/x86/kernel/mfgpt_32.c410
-rw-r--r--arch/x86/kernel/olpc.c4
-rw-r--r--arch/x86/kernel/paravirt-spinlocks.c4
-rw-r--r--arch/x86/kernel/pci-calgary_64.c6
-rw-r--r--arch/x86/kernel/pci-gart_64.c6
-rw-r--r--arch/x86/kernel/ptrace.c135
-rw-r--r--arch/x86/kernel/reboot_fixups_32.c2
-rw-r--r--arch/x86/kernel/sys_i386_32.c27
-rw-r--r--arch/x86/kernel/sys_x86_64.c17
-rw-r--r--arch/x86/kernel/syscall_table_32.S2
-rw-r--r--arch/x86/kernel/tsc_sync.c10
-rw-r--r--arch/x86/kernel/x8664_ksyms_64.c4
-rw-r--r--arch/x86/kvm/svm.c64
-rw-r--r--arch/x86/lib/Makefile4
-rw-r--r--arch/x86/mm/pat.c3
-rw-r--r--arch/x86/pci/Makefile5
-rw-r--r--arch/x86/pci/acpi.c74
-rw-r--r--arch/x86/pci/amd_bus.c120
-rw-r--r--arch/x86/pci/bus_numa.c101
-rw-r--r--arch/x86/pci/bus_numa.h27
-rw-r--r--arch/x86/pci/common.c20
-rw-r--r--arch/x86/pci/early.c7
-rw-r--r--arch/x86/pci/i386.c42
-rw-r--r--arch/x86/pci/intel_bus.c90
-rw-r--r--arch/x86/pci/mmconfig-shared.c356
-rw-r--r--arch/x86/pci/mmconfig_32.c16
-rw-r--r--arch/x86/pci/mmconfig_64.c88
-rw-r--r--arch/x86/tools/test_get_len.c2
-rw-r--r--arch/x86/xen/enlighten.c6
-rw-r--r--arch/x86/xen/smp.c41
-rw-r--r--arch/x86/xen/spinlock.c16
-rw-r--r--arch/x86/xen/time.c24
84 files changed, 1152 insertions, 1781 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 32a1918e1b88..3b2a5aca4edb 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -2012,18 +2012,9 @@ config SCx200HR_TIMER
2012 processor goes idle (as is done by the scheduler). The 2012 processor goes idle (as is done by the scheduler). The
2013 other workaround is idle=poll boot option. 2013 other workaround is idle=poll boot option.
2014 2014
2015config GEODE_MFGPT_TIMER
2016 def_bool y
2017 prompt "Geode Multi-Function General Purpose Timer (MFGPT) events"
2018 depends on MGEODE_LX && GENERIC_TIME && GENERIC_CLOCKEVENTS
2019 ---help---
2020 This driver provides a clock event source based on the MFGPT
2021 timer(s) in the CS5535 and CS5536 companion chip for the geode.
2022 MFGPTs have a better resolution and max interval than the
2023 generic PIT, and are suitable for use as high-res timers.
2024
2025config OLPC 2015config OLPC
2026 bool "One Laptop Per Child support" 2016 bool "One Laptop Per Child support"
2017 select GPIOLIB
2027 default n 2018 default n
2028 ---help--- 2019 ---help---
2029 Add support for detecting the unique features of the OLPC 2020 Add support for detecting the unique features of the OLPC
diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
index 731318e5ac1d..bc01e3ebfeb2 100644
--- a/arch/x86/Kconfig.debug
+++ b/arch/x86/Kconfig.debug
@@ -187,8 +187,8 @@ config HAVE_MMIOTRACE_SUPPORT
187 def_bool y 187 def_bool y
188 188
189config X86_DECODER_SELFTEST 189config X86_DECODER_SELFTEST
190 bool "x86 instruction decoder selftest" 190 bool "x86 instruction decoder selftest"
191 depends on DEBUG_KERNEL 191 depends on DEBUG_KERNEL && KPROBES
192 ---help--- 192 ---help---
193 Perform x86 instruction decoder selftests at build time. 193 Perform x86 instruction decoder selftests at build time.
194 This option is useful for checking the sanity of x86 instruction 194 This option is useful for checking the sanity of x86 instruction
diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
index b31cc54b4641..93e689f4bd86 100644
--- a/arch/x86/boot/header.S
+++ b/arch/x86/boot/header.S
@@ -16,7 +16,7 @@
16 */ 16 */
17 17
18#include <asm/segment.h> 18#include <asm/segment.h>
19#include <linux/utsrelease.h> 19#include <generated/utsrelease.h>
20#include <asm/boot.h> 20#include <asm/boot.h>
21#include <asm/e820.h> 21#include <asm/e820.h>
22#include <asm/page_types.h> 22#include <asm/page_types.h>
diff --git a/arch/x86/boot/version.c b/arch/x86/boot/version.c
index 2723d9b5ce43..2b15aa488ffb 100644
--- a/arch/x86/boot/version.c
+++ b/arch/x86/boot/version.c
@@ -13,8 +13,8 @@
13 */ 13 */
14 14
15#include "boot.h" 15#include "boot.h"
16#include <linux/utsrelease.h> 16#include <generated/utsrelease.h>
17#include <linux/compile.h> 17#include <generated/compile.h>
18 18
19const char kernel_version[] = 19const char kernel_version[] =
20 UTS_RELEASE " (" LINUX_COMPILE_BY "@" LINUX_COMPILE_HOST ") " 20 UTS_RELEASE " (" LINUX_COMPILE_BY "@" LINUX_COMPILE_HOST ") "
diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
index 4eefdca9832b..53147ad85b96 100644
--- a/arch/x86/ia32/ia32entry.S
+++ b/arch/x86/ia32/ia32entry.S
@@ -696,7 +696,7 @@ ia32_sys_call_table:
696 .quad quiet_ni_syscall /* streams2 */ 696 .quad quiet_ni_syscall /* streams2 */
697 .quad stub32_vfork /* 190 */ 697 .quad stub32_vfork /* 190 */
698 .quad compat_sys_getrlimit 698 .quad compat_sys_getrlimit
699 .quad sys32_mmap2 699 .quad sys_mmap_pgoff
700 .quad sys32_truncate64 700 .quad sys32_truncate64
701 .quad sys32_ftruncate64 701 .quad sys32_ftruncate64
702 .quad sys32_stat64 /* 195 */ 702 .quad sys32_stat64 /* 195 */
diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
index df82c0e48ded..422572c77923 100644
--- a/arch/x86/ia32/sys_ia32.c
+++ b/arch/x86/ia32/sys_ia32.c
@@ -155,9 +155,6 @@ struct mmap_arg_struct {
155asmlinkage long sys32_mmap(struct mmap_arg_struct __user *arg) 155asmlinkage long sys32_mmap(struct mmap_arg_struct __user *arg)
156{ 156{
157 struct mmap_arg_struct a; 157 struct mmap_arg_struct a;
158 struct file *file = NULL;
159 unsigned long retval;
160 struct mm_struct *mm ;
161 158
162 if (copy_from_user(&a, arg, sizeof(a))) 159 if (copy_from_user(&a, arg, sizeof(a)))
163 return -EFAULT; 160 return -EFAULT;
@@ -165,22 +162,8 @@ asmlinkage long sys32_mmap(struct mmap_arg_struct __user *arg)
165 if (a.offset & ~PAGE_MASK) 162 if (a.offset & ~PAGE_MASK)
166 return -EINVAL; 163 return -EINVAL;
167 164
168 if (!(a.flags & MAP_ANONYMOUS)) { 165 return sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd,
169 file = fget(a.fd);
170 if (!file)
171 return -EBADF;
172 }
173
174 mm = current->mm;
175 down_write(&mm->mmap_sem);
176 retval = do_mmap_pgoff(file, a.addr, a.len, a.prot, a.flags,
177 a.offset>>PAGE_SHIFT); 166 a.offset>>PAGE_SHIFT);
178 if (file)
179 fput(file);
180
181 up_write(&mm->mmap_sem);
182
183 return retval;
184} 167}
185 168
186asmlinkage long sys32_mprotect(unsigned long start, size_t len, 169asmlinkage long sys32_mprotect(unsigned long start, size_t len,
@@ -483,30 +466,6 @@ asmlinkage long sys32_sendfile(int out_fd, int in_fd,
483 return ret; 466 return ret;
484} 467}
485 468
486asmlinkage long sys32_mmap2(unsigned long addr, unsigned long len,
487 unsigned long prot, unsigned long flags,
488 unsigned long fd, unsigned long pgoff)
489{
490 struct mm_struct *mm = current->mm;
491 unsigned long error;
492 struct file *file = NULL;
493
494 flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
495 if (!(flags & MAP_ANONYMOUS)) {
496 file = fget(fd);
497 if (!file)
498 return -EBADF;
499 }
500
501 down_write(&mm->mmap_sem);
502 error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
503 up_write(&mm->mmap_sem);
504
505 if (file)
506 fput(file);
507 return error;
508}
509
510asmlinkage long sys32_olduname(struct oldold_utsname __user *name) 469asmlinkage long sys32_olduname(struct oldold_utsname __user *name)
511{ 470{
512 char *arch = "x86_64"; 471 char *arch = "x86_64";
diff --git a/arch/x86/include/asm/asm-offsets.h b/arch/x86/include/asm/asm-offsets.h
new file mode 100644
index 000000000000..d370ee36a182
--- /dev/null
+++ b/arch/x86/include/asm/asm-offsets.h
@@ -0,0 +1 @@
#include <generated/asm-offsets.h>
diff --git a/arch/x86/include/asm/dma-mapping.h b/arch/x86/include/asm/dma-mapping.h
index 0f6c02f3b7d4..ac91eed21061 100644
--- a/arch/x86/include/asm/dma-mapping.h
+++ b/arch/x86/include/asm/dma-mapping.h
@@ -67,7 +67,7 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
67 if (!dev->dma_mask) 67 if (!dev->dma_mask)
68 return 0; 68 return 0;
69 69
70 return addr + size <= *dev->dma_mask; 70 return addr + size - 1 <= *dev->dma_mask;
71} 71}
72 72
73static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) 73static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
index 8a024babe5e6..b4501ee223ad 100644
--- a/arch/x86/include/asm/elf.h
+++ b/arch/x86/include/asm/elf.h
@@ -239,7 +239,6 @@ extern int force_personality32;
239#endif /* !CONFIG_X86_32 */ 239#endif /* !CONFIG_X86_32 */
240 240
241#define CORE_DUMP_USE_REGSET 241#define CORE_DUMP_USE_REGSET
242#define USE_ELF_CORE_DUMP
243#define ELF_EXEC_PAGESIZE 4096 242#define ELF_EXEC_PAGESIZE 4096
244 243
245/* This is the location that an ET_DYN program is loaded if exec'ed. Typical 244/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
diff --git a/arch/x86/include/asm/geode.h b/arch/x86/include/asm/geode.h
index ad3c2ed75481..7cd73552a4e8 100644
--- a/arch/x86/include/asm/geode.h
+++ b/arch/x86/include/asm/geode.h
@@ -12,160 +12,7 @@
12 12
13#include <asm/processor.h> 13#include <asm/processor.h>
14#include <linux/io.h> 14#include <linux/io.h>
15 15#include <linux/cs5535.h>
16/* Generic southbridge functions */
17
18#define GEODE_DEV_PMS 0
19#define GEODE_DEV_ACPI 1
20#define GEODE_DEV_GPIO 2
21#define GEODE_DEV_MFGPT 3
22
23extern int geode_get_dev_base(unsigned int dev);
24
25/* Useful macros */
26#define geode_pms_base() geode_get_dev_base(GEODE_DEV_PMS)
27#define geode_acpi_base() geode_get_dev_base(GEODE_DEV_ACPI)
28#define geode_gpio_base() geode_get_dev_base(GEODE_DEV_GPIO)
29#define geode_mfgpt_base() geode_get_dev_base(GEODE_DEV_MFGPT)
30
31/* MSRS */
32
33#define MSR_GLIU_P2D_RO0 0x10000029
34
35#define MSR_LX_GLD_MSR_CONFIG 0x48002001
36#define MSR_LX_MSR_PADSEL 0x48002011 /* NOT 0x48000011; the data
37 * sheet has the wrong value */
38#define MSR_GLCP_SYS_RSTPLL 0x4C000014
39#define MSR_GLCP_DOTPLL 0x4C000015
40
41#define MSR_LBAR_SMB 0x5140000B
42#define MSR_LBAR_GPIO 0x5140000C
43#define MSR_LBAR_MFGPT 0x5140000D
44#define MSR_LBAR_ACPI 0x5140000E
45#define MSR_LBAR_PMS 0x5140000F
46
47#define MSR_DIVIL_SOFT_RESET 0x51400017
48
49#define MSR_PIC_YSEL_LOW 0x51400020
50#define MSR_PIC_YSEL_HIGH 0x51400021
51#define MSR_PIC_ZSEL_LOW 0x51400022
52#define MSR_PIC_ZSEL_HIGH 0x51400023
53#define MSR_PIC_IRQM_LPC 0x51400025
54
55#define MSR_MFGPT_IRQ 0x51400028
56#define MSR_MFGPT_NR 0x51400029
57#define MSR_MFGPT_SETUP 0x5140002B
58
59#define MSR_LX_SPARE_MSR 0x80000011 /* DC-specific */
60
61#define MSR_GX_GLD_MSR_CONFIG 0xC0002001
62#define MSR_GX_MSR_PADSEL 0xC0002011
63
64/* Resource Sizes */
65
66#define LBAR_GPIO_SIZE 0xFF
67#define LBAR_MFGPT_SIZE 0x40
68#define LBAR_ACPI_SIZE 0x40
69#define LBAR_PMS_SIZE 0x80
70
71/* ACPI registers (PMS block) */
72
73/*
74 * PM1_EN is only valid when VSA is enabled for 16 bit reads.
75 * When VSA is not enabled, *always* read both PM1_STS and PM1_EN
76 * with a 32 bit read at offset 0x0
77 */
78
79#define PM1_STS 0x00
80#define PM1_EN 0x02
81#define PM1_CNT 0x08
82#define PM2_CNT 0x0C
83#define PM_TMR 0x10
84#define PM_GPE0_STS 0x18
85#define PM_GPE0_EN 0x1C
86
87/* PMC registers (PMS block) */
88
89#define PM_SSD 0x00
90#define PM_SCXA 0x04
91#define PM_SCYA 0x08
92#define PM_OUT_SLPCTL 0x0C
93#define PM_SCLK 0x10
94#define PM_SED 0x1
95#define PM_SCXD 0x18
96#define PM_SCYD 0x1C
97#define PM_IN_SLPCTL 0x20
98#define PM_WKD 0x30
99#define PM_WKXD 0x34
100#define PM_RD 0x38
101#define PM_WKXA 0x3C
102#define PM_FSD 0x40
103#define PM_TSD 0x44
104#define PM_PSD 0x48
105#define PM_NWKD 0x4C
106#define PM_AWKD 0x50
107#define PM_SSC 0x54
108
109/* VSA2 magic values */
110
111#define VSA_VRC_INDEX 0xAC1C
112#define VSA_VRC_DATA 0xAC1E
113#define VSA_VR_UNLOCK 0xFC53 /* unlock virtual register */
114#define VSA_VR_SIGNATURE 0x0003
115#define VSA_VR_MEM_SIZE 0x0200
116#define AMD_VSA_SIG 0x4132 /* signature is ascii 'VSA2' */
117#define GSW_VSA_SIG 0x534d /* General Software signature */
118/* GPIO */
119
120#define GPIO_OUTPUT_VAL 0x00
121#define GPIO_OUTPUT_ENABLE 0x04
122#define GPIO_OUTPUT_OPEN_DRAIN 0x08
123#define GPIO_OUTPUT_INVERT 0x0C
124#define GPIO_OUTPUT_AUX1 0x10
125#define GPIO_OUTPUT_AUX2 0x14
126#define GPIO_PULL_UP 0x18
127#define GPIO_PULL_DOWN 0x1C
128#define GPIO_INPUT_ENABLE 0x20
129#define GPIO_INPUT_INVERT 0x24
130#define GPIO_INPUT_FILTER 0x28
131#define GPIO_INPUT_EVENT_COUNT 0x2C
132#define GPIO_READ_BACK 0x30
133#define GPIO_INPUT_AUX1 0x34
134#define GPIO_EVENTS_ENABLE 0x38
135#define GPIO_LOCK_ENABLE 0x3C
136#define GPIO_POSITIVE_EDGE_EN 0x40
137#define GPIO_NEGATIVE_EDGE_EN 0x44
138#define GPIO_POSITIVE_EDGE_STS 0x48
139#define GPIO_NEGATIVE_EDGE_STS 0x4C
140
141#define GPIO_MAP_X 0xE0
142#define GPIO_MAP_Y 0xE4
143#define GPIO_MAP_Z 0xE8
144#define GPIO_MAP_W 0xEC
145
146static inline u32 geode_gpio(unsigned int nr)
147{
148 BUG_ON(nr > 28);
149 return 1 << nr;
150}
151
152extern void geode_gpio_set(u32, unsigned int);
153extern void geode_gpio_clear(u32, unsigned int);
154extern int geode_gpio_isset(u32, unsigned int);
155extern void geode_gpio_setup_event(unsigned int, int, int);
156extern void geode_gpio_set_irq(unsigned int, unsigned int);
157
158static inline void geode_gpio_event_irq(unsigned int gpio, int pair)
159{
160 geode_gpio_setup_event(gpio, pair, 0);
161}
162
163static inline void geode_gpio_event_pme(unsigned int gpio, int pair)
164{
165 geode_gpio_setup_event(gpio, pair, 1);
166}
167
168/* Specific geode tests */
169 16
170static inline int is_geode_gx(void) 17static inline int is_geode_gx(void)
171{ 18{
@@ -186,68 +33,4 @@ static inline int is_geode(void)
186 return (is_geode_gx() || is_geode_lx()); 33 return (is_geode_gx() || is_geode_lx());
187} 34}
188 35
189#ifdef CONFIG_MGEODE_LX
190extern int geode_has_vsa2(void);
191#else
192static inline int geode_has_vsa2(void)
193{
194 return 0;
195}
196#endif
197
198/* MFGPTs */
199
200#define MFGPT_MAX_TIMERS 8
201#define MFGPT_TIMER_ANY (-1)
202
203#define MFGPT_DOMAIN_WORKING 1
204#define MFGPT_DOMAIN_STANDBY 2
205#define MFGPT_DOMAIN_ANY (MFGPT_DOMAIN_WORKING | MFGPT_DOMAIN_STANDBY)
206
207#define MFGPT_CMP1 0
208#define MFGPT_CMP2 1
209
210#define MFGPT_EVENT_IRQ 0
211#define MFGPT_EVENT_NMI 1
212#define MFGPT_EVENT_RESET 3
213
214#define MFGPT_REG_CMP1 0
215#define MFGPT_REG_CMP2 2
216#define MFGPT_REG_COUNTER 4
217#define MFGPT_REG_SETUP 6
218
219#define MFGPT_SETUP_CNTEN (1 << 15)
220#define MFGPT_SETUP_CMP2 (1 << 14)
221#define MFGPT_SETUP_CMP1 (1 << 13)
222#define MFGPT_SETUP_SETUP (1 << 12)
223#define MFGPT_SETUP_STOPEN (1 << 11)
224#define MFGPT_SETUP_EXTEN (1 << 10)
225#define MFGPT_SETUP_REVEN (1 << 5)
226#define MFGPT_SETUP_CLKSEL (1 << 4)
227
228static inline void geode_mfgpt_write(int timer, u16 reg, u16 value)
229{
230 u32 base = geode_get_dev_base(GEODE_DEV_MFGPT);
231 outw(value, base + reg + (timer * 8));
232}
233
234static inline u16 geode_mfgpt_read(int timer, u16 reg)
235{
236 u32 base = geode_get_dev_base(GEODE_DEV_MFGPT);
237 return inw(base + reg + (timer * 8));
238}
239
240extern int geode_mfgpt_toggle_event(int timer, int cmp, int event, int enable);
241extern int geode_mfgpt_set_irq(int timer, int cmp, int *irq, int enable);
242extern int geode_mfgpt_alloc_timer(int timer, int domain);
243
244#define geode_mfgpt_setup_irq(t, c, i) geode_mfgpt_set_irq((t), (c), (i), 1)
245#define geode_mfgpt_release_irq(t, c, i) geode_mfgpt_set_irq((t), (c), (i), 0)
246
247#ifdef CONFIG_GEODE_MFGPT_TIMER
248extern int __init mfgpt_timer_setup(void);
249#else
250static inline int mfgpt_timer_setup(void) { return 0; }
251#endif
252
253#endif /* _ASM_X86_GEODE_H */ 36#endif /* _ASM_X86_GEODE_H */
diff --git a/arch/x86/include/asm/olpc.h b/arch/x86/include/asm/olpc.h
index 834a30295fab..3a57385d9fa7 100644
--- a/arch/x86/include/asm/olpc.h
+++ b/arch/x86/include/asm/olpc.h
@@ -120,7 +120,7 @@ extern int olpc_ec_mask_unset(uint8_t bits);
120 120
121/* GPIO assignments */ 121/* GPIO assignments */
122 122
123#define OLPC_GPIO_MIC_AC geode_gpio(1) 123#define OLPC_GPIO_MIC_AC 1
124#define OLPC_GPIO_DCON_IRQ geode_gpio(7) 124#define OLPC_GPIO_DCON_IRQ geode_gpio(7)
125#define OLPC_GPIO_THRM_ALRM geode_gpio(10) 125#define OLPC_GPIO_THRM_ALRM geode_gpio(10)
126#define OLPC_GPIO_SMB_CLK geode_gpio(14) 126#define OLPC_GPIO_SMB_CLK geode_gpio(14)
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index efb38994859c..dd59a85a918f 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -731,34 +731,34 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
731 731
732#if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS) 732#if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
733 733
734static inline int __raw_spin_is_locked(struct raw_spinlock *lock) 734static inline int arch_spin_is_locked(struct arch_spinlock *lock)
735{ 735{
736 return PVOP_CALL1(int, pv_lock_ops.spin_is_locked, lock); 736 return PVOP_CALL1(int, pv_lock_ops.spin_is_locked, lock);
737} 737}
738 738
739static inline int __raw_spin_is_contended(struct raw_spinlock *lock) 739static inline int arch_spin_is_contended(struct arch_spinlock *lock)
740{ 740{
741 return PVOP_CALL1(int, pv_lock_ops.spin_is_contended, lock); 741 return PVOP_CALL1(int, pv_lock_ops.spin_is_contended, lock);
742} 742}
743#define __raw_spin_is_contended __raw_spin_is_contended 743#define arch_spin_is_contended arch_spin_is_contended
744 744
745static __always_inline void __raw_spin_lock(struct raw_spinlock *lock) 745static __always_inline void arch_spin_lock(struct arch_spinlock *lock)
746{ 746{
747 PVOP_VCALL1(pv_lock_ops.spin_lock, lock); 747 PVOP_VCALL1(pv_lock_ops.spin_lock, lock);
748} 748}
749 749
750static __always_inline void __raw_spin_lock_flags(struct raw_spinlock *lock, 750static __always_inline void arch_spin_lock_flags(struct arch_spinlock *lock,
751 unsigned long flags) 751 unsigned long flags)
752{ 752{
753 PVOP_VCALL2(pv_lock_ops.spin_lock_flags, lock, flags); 753 PVOP_VCALL2(pv_lock_ops.spin_lock_flags, lock, flags);
754} 754}
755 755
756static __always_inline int __raw_spin_trylock(struct raw_spinlock *lock) 756static __always_inline int arch_spin_trylock(struct arch_spinlock *lock)
757{ 757{
758 return PVOP_CALL1(int, pv_lock_ops.spin_trylock, lock); 758 return PVOP_CALL1(int, pv_lock_ops.spin_trylock, lock);
759} 759}
760 760
761static __always_inline void __raw_spin_unlock(struct raw_spinlock *lock) 761static __always_inline void arch_spin_unlock(struct arch_spinlock *lock)
762{ 762{
763 PVOP_VCALL1(pv_lock_ops.spin_unlock, lock); 763 PVOP_VCALL1(pv_lock_ops.spin_unlock, lock);
764} 764}
diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
index 9357473c8da0..b1e70d51e40c 100644
--- a/arch/x86/include/asm/paravirt_types.h
+++ b/arch/x86/include/asm/paravirt_types.h
@@ -318,14 +318,14 @@ struct pv_mmu_ops {
318 phys_addr_t phys, pgprot_t flags); 318 phys_addr_t phys, pgprot_t flags);
319}; 319};
320 320
321struct raw_spinlock; 321struct arch_spinlock;
322struct pv_lock_ops { 322struct pv_lock_ops {
323 int (*spin_is_locked)(struct raw_spinlock *lock); 323 int (*spin_is_locked)(struct arch_spinlock *lock);
324 int (*spin_is_contended)(struct raw_spinlock *lock); 324 int (*spin_is_contended)(struct arch_spinlock *lock);
325 void (*spin_lock)(struct raw_spinlock *lock); 325 void (*spin_lock)(struct arch_spinlock *lock);
326 void (*spin_lock_flags)(struct raw_spinlock *lock, unsigned long flags); 326 void (*spin_lock_flags)(struct arch_spinlock *lock, unsigned long flags);
327 int (*spin_trylock)(struct raw_spinlock *lock); 327 int (*spin_trylock)(struct arch_spinlock *lock);
328 void (*spin_unlock)(struct raw_spinlock *lock); 328 void (*spin_unlock)(struct arch_spinlock *lock);
329}; 329};
330 330
331/* This contains all the paravirt structures: we get a convenient 331/* This contains all the paravirt structures: we get a convenient
diff --git a/arch/x86/include/asm/pci_x86.h b/arch/x86/include/asm/pci_x86.h
index b399988eee3a..b4bf9a942ed0 100644
--- a/arch/x86/include/asm/pci_x86.h
+++ b/arch/x86/include/asm/pci_x86.h
@@ -118,11 +118,27 @@ extern int __init pcibios_init(void);
118 118
119/* pci-mmconfig.c */ 119/* pci-mmconfig.c */
120 120
121/* "PCI MMCONFIG %04x [bus %02x-%02x]" */
122#define PCI_MMCFG_RESOURCE_NAME_LEN (22 + 4 + 2 + 2)
123
124struct pci_mmcfg_region {
125 struct list_head list;
126 struct resource res;
127 u64 address;
128 char __iomem *virt;
129 u16 segment;
130 u8 start_bus;
131 u8 end_bus;
132 char name[PCI_MMCFG_RESOURCE_NAME_LEN];
133};
134
121extern int __init pci_mmcfg_arch_init(void); 135extern int __init pci_mmcfg_arch_init(void);
122extern void __init pci_mmcfg_arch_free(void); 136extern void __init pci_mmcfg_arch_free(void);
137extern struct pci_mmcfg_region *pci_mmconfig_lookup(int segment, int bus);
138
139extern struct list_head pci_mmcfg_list;
123 140
124extern struct acpi_mcfg_allocation *pci_mmcfg_config; 141#define PCI_MMCFG_BUS_OFFSET(bus) ((bus) << 20)
125extern int pci_mmcfg_config_num;
126 142
127/* 143/*
128 * AMD Fam10h CPUs are buggy, and cannot access MMIO config space 144 * AMD Fam10h CPUs are buggy, and cannot access MMIO config space
diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h
index b65a36defeb7..0c44196b78ac 100644
--- a/arch/x86/include/asm/percpu.h
+++ b/arch/x86/include/asm/percpu.h
@@ -74,31 +74,31 @@ extern void __bad_percpu_size(void);
74 74
75#define percpu_to_op(op, var, val) \ 75#define percpu_to_op(op, var, val) \
76do { \ 76do { \
77 typedef typeof(var) T__; \ 77 typedef typeof(var) pto_T__; \
78 if (0) { \ 78 if (0) { \
79 T__ tmp__; \ 79 pto_T__ pto_tmp__; \
80 tmp__ = (val); \ 80 pto_tmp__ = (val); \
81 } \ 81 } \
82 switch (sizeof(var)) { \ 82 switch (sizeof(var)) { \
83 case 1: \ 83 case 1: \
84 asm(op "b %1,"__percpu_arg(0) \ 84 asm(op "b %1,"__percpu_arg(0) \
85 : "+m" (var) \ 85 : "+m" (var) \
86 : "qi" ((T__)(val))); \ 86 : "qi" ((pto_T__)(val))); \
87 break; \ 87 break; \
88 case 2: \ 88 case 2: \
89 asm(op "w %1,"__percpu_arg(0) \ 89 asm(op "w %1,"__percpu_arg(0) \
90 : "+m" (var) \ 90 : "+m" (var) \
91 : "ri" ((T__)(val))); \ 91 : "ri" ((pto_T__)(val))); \
92 break; \ 92 break; \
93 case 4: \ 93 case 4: \
94 asm(op "l %1,"__percpu_arg(0) \ 94 asm(op "l %1,"__percpu_arg(0) \
95 : "+m" (var) \ 95 : "+m" (var) \
96 : "ri" ((T__)(val))); \ 96 : "ri" ((pto_T__)(val))); \
97 break; \ 97 break; \
98 case 8: \ 98 case 8: \
99 asm(op "q %1,"__percpu_arg(0) \ 99 asm(op "q %1,"__percpu_arg(0) \
100 : "+m" (var) \ 100 : "+m" (var) \
101 : "re" ((T__)(val))); \ 101 : "re" ((pto_T__)(val))); \
102 break; \ 102 break; \
103 default: __bad_percpu_size(); \ 103 default: __bad_percpu_size(); \
104 } \ 104 } \
@@ -106,31 +106,31 @@ do { \
106 106
107#define percpu_from_op(op, var, constraint) \ 107#define percpu_from_op(op, var, constraint) \
108({ \ 108({ \
109 typeof(var) ret__; \ 109 typeof(var) pfo_ret__; \
110 switch (sizeof(var)) { \ 110 switch (sizeof(var)) { \
111 case 1: \ 111 case 1: \
112 asm(op "b "__percpu_arg(1)",%0" \ 112 asm(op "b "__percpu_arg(1)",%0" \
113 : "=q" (ret__) \ 113 : "=q" (pfo_ret__) \
114 : constraint); \ 114 : constraint); \
115 break; \ 115 break; \
116 case 2: \ 116 case 2: \
117 asm(op "w "__percpu_arg(1)",%0" \ 117 asm(op "w "__percpu_arg(1)",%0" \
118 : "=r" (ret__) \ 118 : "=r" (pfo_ret__) \
119 : constraint); \ 119 : constraint); \
120 break; \ 120 break; \
121 case 4: \ 121 case 4: \
122 asm(op "l "__percpu_arg(1)",%0" \ 122 asm(op "l "__percpu_arg(1)",%0" \
123 : "=r" (ret__) \ 123 : "=r" (pfo_ret__) \
124 : constraint); \ 124 : constraint); \
125 break; \ 125 break; \
126 case 8: \ 126 case 8: \
127 asm(op "q "__percpu_arg(1)",%0" \ 127 asm(op "q "__percpu_arg(1)",%0" \
128 : "=r" (ret__) \ 128 : "=r" (pfo_ret__) \
129 : constraint); \ 129 : constraint); \
130 break; \ 130 break; \
131 default: __bad_percpu_size(); \ 131 default: __bad_percpu_size(); \
132 } \ 132 } \
133 ret__; \ 133 pfo_ret__; \
134}) 134})
135 135
136/* 136/*
@@ -153,6 +153,84 @@ do { \
153#define percpu_or(var, val) percpu_to_op("or", per_cpu__##var, val) 153#define percpu_or(var, val) percpu_to_op("or", per_cpu__##var, val)
154#define percpu_xor(var, val) percpu_to_op("xor", per_cpu__##var, val) 154#define percpu_xor(var, val) percpu_to_op("xor", per_cpu__##var, val)
155 155
156#define __this_cpu_read_1(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
157#define __this_cpu_read_2(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
158#define __this_cpu_read_4(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
159
160#define __this_cpu_write_1(pcp, val) percpu_to_op("mov", (pcp), val)
161#define __this_cpu_write_2(pcp, val) percpu_to_op("mov", (pcp), val)
162#define __this_cpu_write_4(pcp, val) percpu_to_op("mov", (pcp), val)
163#define __this_cpu_add_1(pcp, val) percpu_to_op("add", (pcp), val)
164#define __this_cpu_add_2(pcp, val) percpu_to_op("add", (pcp), val)
165#define __this_cpu_add_4(pcp, val) percpu_to_op("add", (pcp), val)
166#define __this_cpu_and_1(pcp, val) percpu_to_op("and", (pcp), val)
167#define __this_cpu_and_2(pcp, val) percpu_to_op("and", (pcp), val)
168#define __this_cpu_and_4(pcp, val) percpu_to_op("and", (pcp), val)
169#define __this_cpu_or_1(pcp, val) percpu_to_op("or", (pcp), val)
170#define __this_cpu_or_2(pcp, val) percpu_to_op("or", (pcp), val)
171#define __this_cpu_or_4(pcp, val) percpu_to_op("or", (pcp), val)
172#define __this_cpu_xor_1(pcp, val) percpu_to_op("xor", (pcp), val)
173#define __this_cpu_xor_2(pcp, val) percpu_to_op("xor", (pcp), val)
174#define __this_cpu_xor_4(pcp, val) percpu_to_op("xor", (pcp), val)
175
176#define this_cpu_read_1(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
177#define this_cpu_read_2(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
178#define this_cpu_read_4(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
179#define this_cpu_write_1(pcp, val) percpu_to_op("mov", (pcp), val)
180#define this_cpu_write_2(pcp, val) percpu_to_op("mov", (pcp), val)
181#define this_cpu_write_4(pcp, val) percpu_to_op("mov", (pcp), val)
182#define this_cpu_add_1(pcp, val) percpu_to_op("add", (pcp), val)
183#define this_cpu_add_2(pcp, val) percpu_to_op("add", (pcp), val)
184#define this_cpu_add_4(pcp, val) percpu_to_op("add", (pcp), val)
185#define this_cpu_and_1(pcp, val) percpu_to_op("and", (pcp), val)
186#define this_cpu_and_2(pcp, val) percpu_to_op("and", (pcp), val)
187#define this_cpu_and_4(pcp, val) percpu_to_op("and", (pcp), val)
188#define this_cpu_or_1(pcp, val) percpu_to_op("or", (pcp), val)
189#define this_cpu_or_2(pcp, val) percpu_to_op("or", (pcp), val)
190#define this_cpu_or_4(pcp, val) percpu_to_op("or", (pcp), val)
191#define this_cpu_xor_1(pcp, val) percpu_to_op("xor", (pcp), val)
192#define this_cpu_xor_2(pcp, val) percpu_to_op("xor", (pcp), val)
193#define this_cpu_xor_4(pcp, val) percpu_to_op("xor", (pcp), val)
194
195#define irqsafe_cpu_add_1(pcp, val) percpu_to_op("add", (pcp), val)
196#define irqsafe_cpu_add_2(pcp, val) percpu_to_op("add", (pcp), val)
197#define irqsafe_cpu_add_4(pcp, val) percpu_to_op("add", (pcp), val)
198#define irqsafe_cpu_and_1(pcp, val) percpu_to_op("and", (pcp), val)
199#define irqsafe_cpu_and_2(pcp, val) percpu_to_op("and", (pcp), val)
200#define irqsafe_cpu_and_4(pcp, val) percpu_to_op("and", (pcp), val)
201#define irqsafe_cpu_or_1(pcp, val) percpu_to_op("or", (pcp), val)
202#define irqsafe_cpu_or_2(pcp, val) percpu_to_op("or", (pcp), val)
203#define irqsafe_cpu_or_4(pcp, val) percpu_to_op("or", (pcp), val)
204#define irqsafe_cpu_xor_1(pcp, val) percpu_to_op("xor", (pcp), val)
205#define irqsafe_cpu_xor_2(pcp, val) percpu_to_op("xor", (pcp), val)
206#define irqsafe_cpu_xor_4(pcp, val) percpu_to_op("xor", (pcp), val)
207
208/*
209 * Per cpu atomic 64 bit operations are only available under 64 bit.
210 * 32 bit must fall back to generic operations.
211 */
212#ifdef CONFIG_X86_64
213#define __this_cpu_read_8(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
214#define __this_cpu_write_8(pcp, val) percpu_to_op("mov", (pcp), val)
215#define __this_cpu_add_8(pcp, val) percpu_to_op("add", (pcp), val)
216#define __this_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val)
217#define __this_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val)
218#define __this_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val)
219
220#define this_cpu_read_8(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
221#define this_cpu_write_8(pcp, val) percpu_to_op("mov", (pcp), val)
222#define this_cpu_add_8(pcp, val) percpu_to_op("add", (pcp), val)
223#define this_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val)
224#define this_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val)
225#define this_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val)
226
227#define irqsafe_cpu_add_8(pcp, val) percpu_to_op("add", (pcp), val)
228#define irqsafe_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val)
229#define irqsafe_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val)
230#define irqsafe_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val)
231
232#endif
233
156/* This is not atomic against other CPUs -- CPU preemption needs to be off */ 234/* This is not atomic against other CPUs -- CPU preemption needs to be off */
157#define x86_test_and_clear_bit_percpu(bit, var) \ 235#define x86_test_and_clear_bit_percpu(bit, var) \
158({ \ 236({ \
diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
index 3d11fd0f44c5..9d369f680321 100644
--- a/arch/x86/include/asm/ptrace.h
+++ b/arch/x86/include/asm/ptrace.h
@@ -292,6 +292,8 @@ extern void user_enable_block_step(struct task_struct *);
292#define arch_has_block_step() (boot_cpu_data.x86 >= 6) 292#define arch_has_block_step() (boot_cpu_data.x86 >= 6)
293#endif 293#endif
294 294
295#define ARCH_HAS_USER_SINGLE_STEP_INFO
296
295struct user_desc; 297struct user_desc;
296extern int do_get_thread_area(struct task_struct *p, int idx, 298extern int do_get_thread_area(struct task_struct *p, int idx,
297 struct user_desc __user *info); 299 struct user_desc __user *info);
diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
index 4e77853321db..3089f70c0c52 100644
--- a/arch/x86/include/asm/spinlock.h
+++ b/arch/x86/include/asm/spinlock.h
@@ -58,7 +58,7 @@
58#if (NR_CPUS < 256) 58#if (NR_CPUS < 256)
59#define TICKET_SHIFT 8 59#define TICKET_SHIFT 8
60 60
61static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock) 61static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock)
62{ 62{
63 short inc = 0x0100; 63 short inc = 0x0100;
64 64
@@ -77,7 +77,7 @@ static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock)
77 : "memory", "cc"); 77 : "memory", "cc");
78} 78}
79 79
80static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock) 80static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock)
81{ 81{
82 int tmp, new; 82 int tmp, new;
83 83
@@ -96,7 +96,7 @@ static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock)
96 return tmp; 96 return tmp;
97} 97}
98 98
99static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock) 99static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
100{ 100{
101 asm volatile(UNLOCK_LOCK_PREFIX "incb %0" 101 asm volatile(UNLOCK_LOCK_PREFIX "incb %0"
102 : "+m" (lock->slock) 102 : "+m" (lock->slock)
@@ -106,7 +106,7 @@ static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock)
106#else 106#else
107#define TICKET_SHIFT 16 107#define TICKET_SHIFT 16
108 108
109static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock) 109static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock)
110{ 110{
111 int inc = 0x00010000; 111 int inc = 0x00010000;
112 int tmp; 112 int tmp;
@@ -127,7 +127,7 @@ static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock)
127 : "memory", "cc"); 127 : "memory", "cc");
128} 128}
129 129
130static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock) 130static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock)
131{ 131{
132 int tmp; 132 int tmp;
133 int new; 133 int new;
@@ -149,7 +149,7 @@ static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock)
149 return tmp; 149 return tmp;
150} 150}
151 151
152static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock) 152static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
153{ 153{
154 asm volatile(UNLOCK_LOCK_PREFIX "incw %0" 154 asm volatile(UNLOCK_LOCK_PREFIX "incw %0"
155 : "+m" (lock->slock) 155 : "+m" (lock->slock)
@@ -158,14 +158,14 @@ static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock)
158} 158}
159#endif 159#endif
160 160
161static inline int __ticket_spin_is_locked(raw_spinlock_t *lock) 161static inline int __ticket_spin_is_locked(arch_spinlock_t *lock)
162{ 162{
163 int tmp = ACCESS_ONCE(lock->slock); 163 int tmp = ACCESS_ONCE(lock->slock);
164 164
165 return !!(((tmp >> TICKET_SHIFT) ^ tmp) & ((1 << TICKET_SHIFT) - 1)); 165 return !!(((tmp >> TICKET_SHIFT) ^ tmp) & ((1 << TICKET_SHIFT) - 1));
166} 166}
167 167
168static inline int __ticket_spin_is_contended(raw_spinlock_t *lock) 168static inline int __ticket_spin_is_contended(arch_spinlock_t *lock)
169{ 169{
170 int tmp = ACCESS_ONCE(lock->slock); 170 int tmp = ACCESS_ONCE(lock->slock);
171 171
@@ -174,43 +174,43 @@ static inline int __ticket_spin_is_contended(raw_spinlock_t *lock)
174 174
175#ifndef CONFIG_PARAVIRT_SPINLOCKS 175#ifndef CONFIG_PARAVIRT_SPINLOCKS
176 176
177static inline int __raw_spin_is_locked(raw_spinlock_t *lock) 177static inline int arch_spin_is_locked(arch_spinlock_t *lock)
178{ 178{
179 return __ticket_spin_is_locked(lock); 179 return __ticket_spin_is_locked(lock);
180} 180}
181 181
182static inline int __raw_spin_is_contended(raw_spinlock_t *lock) 182static inline int arch_spin_is_contended(arch_spinlock_t *lock)
183{ 183{
184 return __ticket_spin_is_contended(lock); 184 return __ticket_spin_is_contended(lock);
185} 185}
186#define __raw_spin_is_contended __raw_spin_is_contended 186#define arch_spin_is_contended arch_spin_is_contended
187 187
188static __always_inline void __raw_spin_lock(raw_spinlock_t *lock) 188static __always_inline void arch_spin_lock(arch_spinlock_t *lock)
189{ 189{
190 __ticket_spin_lock(lock); 190 __ticket_spin_lock(lock);
191} 191}
192 192
193static __always_inline int __raw_spin_trylock(raw_spinlock_t *lock) 193static __always_inline int arch_spin_trylock(arch_spinlock_t *lock)
194{ 194{
195 return __ticket_spin_trylock(lock); 195 return __ticket_spin_trylock(lock);
196} 196}
197 197
198static __always_inline void __raw_spin_unlock(raw_spinlock_t *lock) 198static __always_inline void arch_spin_unlock(arch_spinlock_t *lock)
199{ 199{
200 __ticket_spin_unlock(lock); 200 __ticket_spin_unlock(lock);
201} 201}
202 202
203static __always_inline void __raw_spin_lock_flags(raw_spinlock_t *lock, 203static __always_inline void arch_spin_lock_flags(arch_spinlock_t *lock,
204 unsigned long flags) 204 unsigned long flags)
205{ 205{
206 __raw_spin_lock(lock); 206 arch_spin_lock(lock);
207} 207}
208 208
209#endif /* CONFIG_PARAVIRT_SPINLOCKS */ 209#endif /* CONFIG_PARAVIRT_SPINLOCKS */
210 210
211static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock) 211static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
212{ 212{
213 while (__raw_spin_is_locked(lock)) 213 while (arch_spin_is_locked(lock))
214 cpu_relax(); 214 cpu_relax();
215} 215}
216 216
@@ -232,7 +232,7 @@ static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock)
232 * read_can_lock - would read_trylock() succeed? 232 * read_can_lock - would read_trylock() succeed?
233 * @lock: the rwlock in question. 233 * @lock: the rwlock in question.
234 */ 234 */
235static inline int __raw_read_can_lock(raw_rwlock_t *lock) 235static inline int arch_read_can_lock(arch_rwlock_t *lock)
236{ 236{
237 return (int)(lock)->lock > 0; 237 return (int)(lock)->lock > 0;
238} 238}
@@ -241,12 +241,12 @@ static inline int __raw_read_can_lock(raw_rwlock_t *lock)
241 * write_can_lock - would write_trylock() succeed? 241 * write_can_lock - would write_trylock() succeed?
242 * @lock: the rwlock in question. 242 * @lock: the rwlock in question.
243 */ 243 */
244static inline int __raw_write_can_lock(raw_rwlock_t *lock) 244static inline int arch_write_can_lock(arch_rwlock_t *lock)
245{ 245{
246 return (lock)->lock == RW_LOCK_BIAS; 246 return (lock)->lock == RW_LOCK_BIAS;
247} 247}
248 248
249static inline void __raw_read_lock(raw_rwlock_t *rw) 249static inline void arch_read_lock(arch_rwlock_t *rw)
250{ 250{
251 asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t" 251 asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t"
252 "jns 1f\n" 252 "jns 1f\n"
@@ -255,7 +255,7 @@ static inline void __raw_read_lock(raw_rwlock_t *rw)
255 ::LOCK_PTR_REG (rw) : "memory"); 255 ::LOCK_PTR_REG (rw) : "memory");
256} 256}
257 257
258static inline void __raw_write_lock(raw_rwlock_t *rw) 258static inline void arch_write_lock(arch_rwlock_t *rw)
259{ 259{
260 asm volatile(LOCK_PREFIX " subl %1,(%0)\n\t" 260 asm volatile(LOCK_PREFIX " subl %1,(%0)\n\t"
261 "jz 1f\n" 261 "jz 1f\n"
@@ -264,7 +264,7 @@ static inline void __raw_write_lock(raw_rwlock_t *rw)
264 ::LOCK_PTR_REG (rw), "i" (RW_LOCK_BIAS) : "memory"); 264 ::LOCK_PTR_REG (rw), "i" (RW_LOCK_BIAS) : "memory");
265} 265}
266 266
267static inline int __raw_read_trylock(raw_rwlock_t *lock) 267static inline int arch_read_trylock(arch_rwlock_t *lock)
268{ 268{
269 atomic_t *count = (atomic_t *)lock; 269 atomic_t *count = (atomic_t *)lock;
270 270
@@ -274,7 +274,7 @@ static inline int __raw_read_trylock(raw_rwlock_t *lock)
274 return 0; 274 return 0;
275} 275}
276 276
277static inline int __raw_write_trylock(raw_rwlock_t *lock) 277static inline int arch_write_trylock(arch_rwlock_t *lock)
278{ 278{
279 atomic_t *count = (atomic_t *)lock; 279 atomic_t *count = (atomic_t *)lock;
280 280
@@ -284,23 +284,23 @@ static inline int __raw_write_trylock(raw_rwlock_t *lock)
284 return 0; 284 return 0;
285} 285}
286 286
287static inline void __raw_read_unlock(raw_rwlock_t *rw) 287static inline void arch_read_unlock(arch_rwlock_t *rw)
288{ 288{
289 asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory"); 289 asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory");
290} 290}
291 291
292static inline void __raw_write_unlock(raw_rwlock_t *rw) 292static inline void arch_write_unlock(arch_rwlock_t *rw)
293{ 293{
294 asm volatile(LOCK_PREFIX "addl %1, %0" 294 asm volatile(LOCK_PREFIX "addl %1, %0"
295 : "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory"); 295 : "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory");
296} 296}
297 297
298#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) 298#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
299#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) 299#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
300 300
301#define _raw_spin_relax(lock) cpu_relax() 301#define arch_spin_relax(lock) cpu_relax()
302#define _raw_read_relax(lock) cpu_relax() 302#define arch_read_relax(lock) cpu_relax()
303#define _raw_write_relax(lock) cpu_relax() 303#define arch_write_relax(lock) cpu_relax()
304 304
305/* The {read|write|spin}_lock() on x86 are full memory barriers. */ 305/* The {read|write|spin}_lock() on x86 are full memory barriers. */
306static inline void smp_mb__after_lock(void) { } 306static inline void smp_mb__after_lock(void) { }
diff --git a/arch/x86/include/asm/spinlock_types.h b/arch/x86/include/asm/spinlock_types.h
index 845f81c87091..dcb48b2edc11 100644
--- a/arch/x86/include/asm/spinlock_types.h
+++ b/arch/x86/include/asm/spinlock_types.h
@@ -5,16 +5,16 @@
5# error "please don't include this file directly" 5# error "please don't include this file directly"
6#endif 6#endif
7 7
8typedef struct raw_spinlock { 8typedef struct arch_spinlock {
9 unsigned int slock; 9 unsigned int slock;
10} raw_spinlock_t; 10} arch_spinlock_t;
11 11
12#define __RAW_SPIN_LOCK_UNLOCKED { 0 } 12#define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
13 13
14typedef struct { 14typedef struct {
15 unsigned int lock; 15 unsigned int lock;
16} raw_rwlock_t; 16} arch_rwlock_t;
17 17
18#define __RAW_RW_LOCK_UNLOCKED { RW_LOCK_BIAS } 18#define __ARCH_RW_LOCK_UNLOCKED { RW_LOCK_BIAS }
19 19
20#endif /* _ASM_X86_SPINLOCK_TYPES_H */ 20#endif /* _ASM_X86_SPINLOCK_TYPES_H */
diff --git a/arch/x86/include/asm/sys_ia32.h b/arch/x86/include/asm/sys_ia32.h
index 9af9decb38c3..d5f69045c100 100644
--- a/arch/x86/include/asm/sys_ia32.h
+++ b/arch/x86/include/asm/sys_ia32.h
@@ -30,7 +30,6 @@ struct mmap_arg_struct;
30asmlinkage long sys32_mmap(struct mmap_arg_struct __user *); 30asmlinkage long sys32_mmap(struct mmap_arg_struct __user *);
31asmlinkage long sys32_mprotect(unsigned long, size_t, unsigned long); 31asmlinkage long sys32_mprotect(unsigned long, size_t, unsigned long);
32 32
33asmlinkage long sys32_pipe(int __user *);
34struct sigaction32; 33struct sigaction32;
35struct old_sigaction32; 34struct old_sigaction32;
36asmlinkage long sys32_rt_sigaction(int, struct sigaction32 __user *, 35asmlinkage long sys32_rt_sigaction(int, struct sigaction32 __user *,
@@ -57,9 +56,6 @@ asmlinkage long sys32_pwrite(unsigned int, char __user *, u32, u32, u32);
57asmlinkage long sys32_personality(unsigned long); 56asmlinkage long sys32_personality(unsigned long);
58asmlinkage long sys32_sendfile(int, int, compat_off_t __user *, s32); 57asmlinkage long sys32_sendfile(int, int, compat_off_t __user *, s32);
59 58
60asmlinkage long sys32_mmap2(unsigned long, unsigned long, unsigned long,
61 unsigned long, unsigned long, unsigned long);
62
63struct oldold_utsname; 59struct oldold_utsname;
64struct old_utsname; 60struct old_utsname;
65asmlinkage long sys32_olduname(struct oldold_utsname __user *); 61asmlinkage long sys32_olduname(struct oldold_utsname __user *);
diff --git a/arch/x86/include/asm/syscalls.h b/arch/x86/include/asm/syscalls.h
index b0ce78061708..8868b9420b0e 100644
--- a/arch/x86/include/asm/syscalls.h
+++ b/arch/x86/include/asm/syscalls.h
@@ -56,8 +56,6 @@ struct sel_arg_struct;
56struct oldold_utsname; 56struct oldold_utsname;
57struct old_utsname; 57struct old_utsname;
58 58
59asmlinkage long sys_mmap2(unsigned long, unsigned long, unsigned long,
60 unsigned long, unsigned long, unsigned long);
61asmlinkage int old_mmap(struct mmap_arg_struct __user *); 59asmlinkage int old_mmap(struct mmap_arg_struct __user *);
62asmlinkage int old_select(struct sel_arg_struct __user *); 60asmlinkage int old_select(struct sel_arg_struct __user *);
63asmlinkage int sys_ipc(uint, int, int, int, void __user *, long); 61asmlinkage int sys_ipc(uint, int, int, int, void __user *, long);
diff --git a/arch/x86/include/asm/topology.h b/arch/x86/include/asm/topology.h
index 40e37b10c6c0..c5087d796587 100644
--- a/arch/x86/include/asm/topology.h
+++ b/arch/x86/include/asm/topology.h
@@ -35,11 +35,16 @@
35# endif 35# endif
36#endif 36#endif
37 37
38/* Node not present */ 38/*
39#define NUMA_NO_NODE (-1) 39 * to preserve the visibility of NUMA_NO_NODE definition,
40 * moved to there from here. May be used independent of
41 * CONFIG_NUMA.
42 */
43#include <linux/numa.h>
40 44
41#ifdef CONFIG_NUMA 45#ifdef CONFIG_NUMA
42#include <linux/cpumask.h> 46#include <linux/cpumask.h>
47
43#include <asm/mpspec.h> 48#include <asm/mpspec.h>
44 49
45#ifdef CONFIG_X86_32 50#ifdef CONFIG_X86_32
diff --git a/arch/x86/include/asm/uv/bios.h b/arch/x86/include/asm/uv/bios.h
index 7ed17ff502b9..2751f3075d8b 100644
--- a/arch/x86/include/asm/uv/bios.h
+++ b/arch/x86/include/asm/uv/bios.h
@@ -76,15 +76,6 @@ union partition_info_u {
76 }; 76 };
77}; 77};
78 78
79union uv_watchlist_u {
80 u64 val;
81 struct {
82 u64 blade : 16,
83 size : 32,
84 filler : 16;
85 };
86};
87
88enum uv_memprotect { 79enum uv_memprotect {
89 UV_MEMPROT_RESTRICT_ACCESS, 80 UV_MEMPROT_RESTRICT_ACCESS,
90 UV_MEMPROT_ALLOW_AMO, 81 UV_MEMPROT_ALLOW_AMO,
@@ -100,7 +91,7 @@ extern s64 uv_bios_call_reentrant(enum uv_bios_cmd, u64, u64, u64, u64, u64);
100 91
101extern s64 uv_bios_get_sn_info(int, int *, long *, long *, long *); 92extern s64 uv_bios_get_sn_info(int, int *, long *, long *, long *);
102extern s64 uv_bios_freq_base(u64, u64 *); 93extern s64 uv_bios_freq_base(u64, u64 *);
103extern int uv_bios_mq_watchlist_alloc(int, unsigned long, unsigned int, 94extern int uv_bios_mq_watchlist_alloc(unsigned long, unsigned int,
104 unsigned long *); 95 unsigned long *);
105extern int uv_bios_mq_watchlist_free(int, int); 96extern int uv_bios_mq_watchlist_free(int, int);
106extern s64 uv_bios_change_memprotect(u64, u64, enum uv_memprotect); 97extern s64 uv_bios_change_memprotect(u64, u64, enum uv_memprotect);
diff --git a/arch/x86/include/asm/uv/uv_hub.h b/arch/x86/include/asm/uv/uv_hub.h
index d1414af98559..811bfabc80b7 100644
--- a/arch/x86/include/asm/uv/uv_hub.h
+++ b/arch/x86/include/asm/uv/uv_hub.h
@@ -172,6 +172,8 @@ DECLARE_PER_CPU(struct uv_hub_info_s, __uv_hub_info);
172#define UV_LOCAL_MMR_SIZE (64UL * 1024 * 1024) 172#define UV_LOCAL_MMR_SIZE (64UL * 1024 * 1024)
173#define UV_GLOBAL_MMR32_SIZE (64UL * 1024 * 1024) 173#define UV_GLOBAL_MMR32_SIZE (64UL * 1024 * 1024)
174 174
175#define UV_GLOBAL_GRU_MMR_BASE 0x4000000
176
175#define UV_GLOBAL_MMR32_PNODE_SHIFT 15 177#define UV_GLOBAL_MMR32_PNODE_SHIFT 15
176#define UV_GLOBAL_MMR64_PNODE_SHIFT 26 178#define UV_GLOBAL_MMR64_PNODE_SHIFT 26
177 179
@@ -232,6 +234,26 @@ static inline unsigned long uv_gpa(void *v)
232 return uv_soc_phys_ram_to_gpa(__pa(v)); 234 return uv_soc_phys_ram_to_gpa(__pa(v));
233} 235}
234 236
237/* Top two bits indicate the requested address is in MMR space. */
238static inline int
239uv_gpa_in_mmr_space(unsigned long gpa)
240{
241 return (gpa >> 62) == 0x3UL;
242}
243
244/* UV global physical address --> socket phys RAM */
245static inline unsigned long uv_gpa_to_soc_phys_ram(unsigned long gpa)
246{
247 unsigned long paddr = gpa & uv_hub_info->gpa_mask;
248 unsigned long remap_base = uv_hub_info->lowmem_remap_base;
249 unsigned long remap_top = uv_hub_info->lowmem_remap_top;
250
251 if (paddr >= remap_base && paddr < remap_base + remap_top)
252 paddr -= remap_base;
253 return paddr;
254}
255
256
235/* gnode -> pnode */ 257/* gnode -> pnode */
236static inline unsigned long uv_gpa_to_gnode(unsigned long gpa) 258static inline unsigned long uv_gpa_to_gnode(unsigned long gpa)
237{ 259{
@@ -308,6 +330,15 @@ static inline unsigned long uv_read_global_mmr64(int pnode,
308} 330}
309 331
310/* 332/*
333 * Global MMR space addresses when referenced by the GRU. (GRU does
334 * NOT use socket addressing).
335 */
336static inline unsigned long uv_global_gru_mmr_address(int pnode, unsigned long offset)
337{
338 return UV_GLOBAL_GRU_MMR_BASE | offset | (pnode << uv_hub_info->m_val);
339}
340
341/*
311 * Access hub local MMRs. Faster than using global space but only local MMRs 342 * Access hub local MMRs. Faster than using global space but only local MMRs
312 * are accessible. 343 * are accessible.
313 */ 344 */
@@ -434,6 +465,14 @@ static inline void uv_set_cpu_scir_bits(int cpu, unsigned char value)
434 } 465 }
435} 466}
436 467
468static unsigned long uv_hub_ipi_value(int apicid, int vector, int mode)
469{
470 return (1UL << UVH_IPI_INT_SEND_SHFT) |
471 ((apicid) << UVH_IPI_INT_APIC_ID_SHFT) |
472 (mode << UVH_IPI_INT_DELIVERY_MODE_SHFT) |
473 (vector << UVH_IPI_INT_VECTOR_SHFT);
474}
475
437static inline void uv_hub_send_ipi(int pnode, int apicid, int vector) 476static inline void uv_hub_send_ipi(int pnode, int apicid, int vector)
438{ 477{
439 unsigned long val; 478 unsigned long val;
@@ -442,10 +481,7 @@ static inline void uv_hub_send_ipi(int pnode, int apicid, int vector)
442 if (vector == NMI_VECTOR) 481 if (vector == NMI_VECTOR)
443 dmode = dest_NMI; 482 dmode = dest_NMI;
444 483
445 val = (1UL << UVH_IPI_INT_SEND_SHFT) | 484 val = uv_hub_ipi_value(apicid, vector, dmode);
446 ((apicid) << UVH_IPI_INT_APIC_ID_SHFT) |
447 (dmode << UVH_IPI_INT_DELIVERY_MODE_SHFT) |
448 (vector << UVH_IPI_INT_VECTOR_SHFT);
449 uv_write_global_mmr64(pnode, UVH_IPI_INT, val); 485 uv_write_global_mmr64(pnode, UVH_IPI_INT, val);
450} 486}
451 487
diff --git a/arch/x86/include/asm/xen/hypervisor.h b/arch/x86/include/asm/xen/hypervisor.h
index d5b7e90c0edf..396ff4cc8ed4 100644
--- a/arch/x86/include/asm/xen/hypervisor.h
+++ b/arch/x86/include/asm/xen/hypervisor.h
@@ -37,31 +37,4 @@
37extern struct shared_info *HYPERVISOR_shared_info; 37extern struct shared_info *HYPERVISOR_shared_info;
38extern struct start_info *xen_start_info; 38extern struct start_info *xen_start_info;
39 39
40enum xen_domain_type {
41 XEN_NATIVE, /* running on bare hardware */
42 XEN_PV_DOMAIN, /* running in a PV domain */
43 XEN_HVM_DOMAIN, /* running in a Xen hvm domain */
44};
45
46#ifdef CONFIG_XEN
47extern enum xen_domain_type xen_domain_type;
48#else
49#define xen_domain_type XEN_NATIVE
50#endif
51
52#define xen_domain() (xen_domain_type != XEN_NATIVE)
53#define xen_pv_domain() (xen_domain() && \
54 xen_domain_type == XEN_PV_DOMAIN)
55#define xen_hvm_domain() (xen_domain() && \
56 xen_domain_type == XEN_HVM_DOMAIN)
57
58#ifdef CONFIG_XEN_DOM0
59#include <xen/interface/xen.h>
60
61#define xen_initial_domain() (xen_pv_domain() && \
62 xen_start_info->flags & SIF_INITDOMAIN)
63#else /* !CONFIG_XEN_DOM0 */
64#define xen_initial_domain() (0)
65#endif /* CONFIG_XEN_DOM0 */
66
67#endif /* _ASM_X86_XEN_HYPERVISOR_H */ 40#endif /* _ASM_X86_XEN_HYPERVISOR_H */
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index 4f2e66e29ecc..d87f09bc5a52 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -89,7 +89,6 @@ obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
89obj-$(CONFIG_HPET_TIMER) += hpet.o 89obj-$(CONFIG_HPET_TIMER) += hpet.o
90 90
91obj-$(CONFIG_K8_NB) += k8.o 91obj-$(CONFIG_K8_NB) += k8.o
92obj-$(CONFIG_MGEODE_LX) += geode_32.o mfgpt_32.o
93obj-$(CONFIG_DEBUG_RODATA_TEST) += test_rodata.o 92obj-$(CONFIG_DEBUG_RODATA_TEST) += test_rodata.o
94obj-$(CONFIG_DEBUG_NX_TEST) += test_nx.o 93obj-$(CONFIG_DEBUG_NX_TEST) += test_nx.o
95 94
diff --git a/arch/x86/kernel/acpi/cstate.c b/arch/x86/kernel/acpi/cstate.c
index 59cdfa4686b2..2e837f5080fe 100644
--- a/arch/x86/kernel/acpi/cstate.c
+++ b/arch/x86/kernel/acpi/cstate.c
@@ -48,7 +48,7 @@ void acpi_processor_power_init_bm_check(struct acpi_processor_flags *flags,
48 * P4, Core and beyond CPUs 48 * P4, Core and beyond CPUs
49 */ 49 */
50 if (c->x86_vendor == X86_VENDOR_INTEL && 50 if (c->x86_vendor == X86_VENDOR_INTEL &&
51 (c->x86 > 0xf || (c->x86 == 6 && c->x86_model >= 14))) 51 (c->x86 > 0xf || (c->x86 == 6 && c->x86_model >= 0x0f)))
52 flags->bm_control = 0; 52 flags->bm_control = 0;
53} 53}
54EXPORT_SYMBOL(acpi_processor_power_init_bm_check); 54EXPORT_SYMBOL(acpi_processor_power_init_bm_check);
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
index b990b5cc9541..23824fef789c 100644
--- a/arch/x86/kernel/amd_iommu.c
+++ b/arch/x86/kernel/amd_iommu.c
@@ -19,7 +19,7 @@
19 19
20#include <linux/pci.h> 20#include <linux/pci.h>
21#include <linux/gfp.h> 21#include <linux/gfp.h>
22#include <linux/bitops.h> 22#include <linux/bitmap.h>
23#include <linux/debugfs.h> 23#include <linux/debugfs.h>
24#include <linux/scatterlist.h> 24#include <linux/scatterlist.h>
25#include <linux/dma-mapping.h> 25#include <linux/dma-mapping.h>
@@ -1162,7 +1162,7 @@ static void dma_ops_free_addresses(struct dma_ops_domain *dom,
1162 1162
1163 address = (address % APERTURE_RANGE_SIZE) >> PAGE_SHIFT; 1163 address = (address % APERTURE_RANGE_SIZE) >> PAGE_SHIFT;
1164 1164
1165 iommu_area_free(range->bitmap, address, pages); 1165 bitmap_clear(range->bitmap, address, pages);
1166 1166
1167} 1167}
1168 1168
diff --git a/arch/x86/kernel/amd_iommu_init.c b/arch/x86/kernel/amd_iommu_init.c
index 309a52f96e0b..fb490ce7dd55 100644
--- a/arch/x86/kernel/amd_iommu_init.c
+++ b/arch/x86/kernel/amd_iommu_init.c
@@ -138,6 +138,11 @@ int amd_iommus_present;
138bool amd_iommu_np_cache __read_mostly; 138bool amd_iommu_np_cache __read_mostly;
139 139
140/* 140/*
141 * Set to true if ACPI table parsing and hardware intialization went properly
142 */
143static bool amd_iommu_initialized;
144
145/*
141 * List of protection domains - used during resume 146 * List of protection domains - used during resume
142 */ 147 */
143LIST_HEAD(amd_iommu_pd_list); 148LIST_HEAD(amd_iommu_pd_list);
@@ -929,6 +934,8 @@ static int __init init_iommu_all(struct acpi_table_header *table)
929 } 934 }
930 WARN_ON(p != end); 935 WARN_ON(p != end);
931 936
937 amd_iommu_initialized = true;
938
932 return 0; 939 return 0;
933} 940}
934 941
@@ -1263,6 +1270,9 @@ static int __init amd_iommu_init(void)
1263 if (acpi_table_parse("IVRS", init_iommu_all) != 0) 1270 if (acpi_table_parse("IVRS", init_iommu_all) != 0)
1264 goto free; 1271 goto free;
1265 1272
1273 if (!amd_iommu_initialized)
1274 goto free;
1275
1266 if (acpi_table_parse("IVRS", init_memory_definitions) != 0) 1276 if (acpi_table_parse("IVRS", init_memory_definitions) != 0)
1267 goto free; 1277 goto free;
1268 1278
@@ -1345,6 +1355,9 @@ void __init amd_iommu_detect(void)
1345 iommu_detected = 1; 1355 iommu_detected = 1;
1346 amd_iommu_detected = 1; 1356 amd_iommu_detected = 1;
1347 x86_init.iommu.iommu_init = amd_iommu_init; 1357 x86_init.iommu.iommu_init = amd_iommu_init;
1358
1359 /* Make sure ACS will be enabled */
1360 pci_request_acs();
1348 } 1361 }
1349} 1362}
1350 1363
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index 98ced709e829..de00c4619a55 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -2432,7 +2432,7 @@ asmlinkage void smp_irq_move_cleanup_interrupt(void)
2432 continue; 2432 continue;
2433 2433
2434 cfg = irq_cfg(irq); 2434 cfg = irq_cfg(irq);
2435 spin_lock(&desc->lock); 2435 raw_spin_lock(&desc->lock);
2436 2436
2437 if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain)) 2437 if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain))
2438 goto unlock; 2438 goto unlock;
@@ -2451,7 +2451,7 @@ asmlinkage void smp_irq_move_cleanup_interrupt(void)
2451 } 2451 }
2452 __get_cpu_var(vector_irq)[vector] = -1; 2452 __get_cpu_var(vector_irq)[vector] = -1;
2453unlock: 2453unlock:
2454 spin_unlock(&desc->lock); 2454 raw_spin_unlock(&desc->lock);
2455 } 2455 }
2456 2456
2457 irq_exit(); 2457 irq_exit();
diff --git a/arch/x86/kernel/apic/nmi.c b/arch/x86/kernel/apic/nmi.c
index 6389432a9dbf..0159a69396cb 100644
--- a/arch/x86/kernel/apic/nmi.c
+++ b/arch/x86/kernel/apic/nmi.c
@@ -361,7 +361,7 @@ void stop_apic_nmi_watchdog(void *unused)
361 */ 361 */
362 362
363static DEFINE_PER_CPU(unsigned, last_irq_sum); 363static DEFINE_PER_CPU(unsigned, last_irq_sum);
364static DEFINE_PER_CPU(local_t, alert_counter); 364static DEFINE_PER_CPU(long, alert_counter);
365static DEFINE_PER_CPU(int, nmi_touch); 365static DEFINE_PER_CPU(int, nmi_touch);
366 366
367void touch_nmi_watchdog(void) 367void touch_nmi_watchdog(void)
@@ -438,8 +438,8 @@ nmi_watchdog_tick(struct pt_regs *regs, unsigned reason)
438 * Ayiee, looks like this CPU is stuck ... 438 * Ayiee, looks like this CPU is stuck ...
439 * wait a few IRQs (5 seconds) before doing the oops ... 439 * wait a few IRQs (5 seconds) before doing the oops ...
440 */ 440 */
441 local_inc(&__get_cpu_var(alert_counter)); 441 __this_cpu_inc(per_cpu_var(alert_counter));
442 if (local_read(&__get_cpu_var(alert_counter)) == 5 * nmi_hz) 442 if (__this_cpu_read(per_cpu_var(alert_counter)) == 5 * nmi_hz)
443 /* 443 /*
444 * die_nmi will return ONLY if NOTIFY_STOP happens.. 444 * die_nmi will return ONLY if NOTIFY_STOP happens..
445 */ 445 */
@@ -447,7 +447,7 @@ nmi_watchdog_tick(struct pt_regs *regs, unsigned reason)
447 regs, panic_on_timeout); 447 regs, panic_on_timeout);
448 } else { 448 } else {
449 __get_cpu_var(last_irq_sum) = sum; 449 __get_cpu_var(last_irq_sum) = sum;
450 local_set(&__get_cpu_var(alert_counter), 0); 450 __this_cpu_write(per_cpu_var(alert_counter), 0);
451 } 451 }
452 452
453 /* see if the nmi watchdog went off */ 453 /* see if the nmi watchdog went off */
diff --git a/arch/x86/kernel/bios_uv.c b/arch/x86/kernel/bios_uv.c
index 63a88e1f987d..b0206a211b09 100644
--- a/arch/x86/kernel/bios_uv.c
+++ b/arch/x86/kernel/bios_uv.c
@@ -101,21 +101,17 @@ s64 uv_bios_get_sn_info(int fc, int *uvtype, long *partid, long *coher,
101} 101}
102 102
103int 103int
104uv_bios_mq_watchlist_alloc(int blade, unsigned long addr, unsigned int mq_size, 104uv_bios_mq_watchlist_alloc(unsigned long addr, unsigned int mq_size,
105 unsigned long *intr_mmr_offset) 105 unsigned long *intr_mmr_offset)
106{ 106{
107 union uv_watchlist_u size_blade;
108 u64 watchlist; 107 u64 watchlist;
109 s64 ret; 108 s64 ret;
110 109
111 size_blade.size = mq_size;
112 size_blade.blade = blade;
113
114 /* 110 /*
115 * bios returns watchlist number or negative error number. 111 * bios returns watchlist number or negative error number.
116 */ 112 */
117 ret = (int)uv_bios_call_irqsave(UV_BIOS_WATCHLIST_ALLOC, addr, 113 ret = (int)uv_bios_call_irqsave(UV_BIOS_WATCHLIST_ALLOC, addr,
118 size_blade.val, (u64)intr_mmr_offset, 114 mq_size, (u64)intr_mmr_offset,
119 (u64)&watchlist, 0); 115 (u64)&watchlist, 0);
120 if (ret < BIOS_STATUS_SUCCESS) 116 if (ret < BIOS_STATUS_SUCCESS)
121 return ret; 117 return ret;
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 0ee9a3254eec..4868e4a951ee 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -1095,7 +1095,7 @@ static void clear_all_debug_regs(void)
1095 1095
1096void __cpuinit cpu_init(void) 1096void __cpuinit cpu_init(void)
1097{ 1097{
1098 struct orig_ist *orig_ist; 1098 struct orig_ist *oist;
1099 struct task_struct *me; 1099 struct task_struct *me;
1100 struct tss_struct *t; 1100 struct tss_struct *t;
1101 unsigned long v; 1101 unsigned long v;
@@ -1104,7 +1104,7 @@ void __cpuinit cpu_init(void)
1104 1104
1105 cpu = stack_smp_processor_id(); 1105 cpu = stack_smp_processor_id();
1106 t = &per_cpu(init_tss, cpu); 1106 t = &per_cpu(init_tss, cpu);
1107 orig_ist = &per_cpu(orig_ist, cpu); 1107 oist = &per_cpu(orig_ist, cpu);
1108 1108
1109#ifdef CONFIG_NUMA 1109#ifdef CONFIG_NUMA
1110 if (cpu != 0 && percpu_read(node_number) == 0 && 1110 if (cpu != 0 && percpu_read(node_number) == 0 &&
@@ -1145,12 +1145,12 @@ void __cpuinit cpu_init(void)
1145 /* 1145 /*
1146 * set up and load the per-CPU TSS 1146 * set up and load the per-CPU TSS
1147 */ 1147 */
1148 if (!orig_ist->ist[0]) { 1148 if (!oist->ist[0]) {
1149 char *estacks = per_cpu(exception_stacks, cpu); 1149 char *estacks = per_cpu(exception_stacks, cpu);
1150 1150
1151 for (v = 0; v < N_EXCEPTION_STACKS; v++) { 1151 for (v = 0; v < N_EXCEPTION_STACKS; v++) {
1152 estacks += exception_stack_sizes[v]; 1152 estacks += exception_stack_sizes[v];
1153 orig_ist->ist[v] = t->x86_tss.ist[v] = 1153 oist->ist[v] = t->x86_tss.ist[v] =
1154 (unsigned long)estacks; 1154 (unsigned long)estacks;
1155 } 1155 }
1156 } 1156 }
diff --git a/arch/x86/kernel/cpu/cpu_debug.c b/arch/x86/kernel/cpu/cpu_debug.c
index dca325c03999..b368cd862997 100644
--- a/arch/x86/kernel/cpu/cpu_debug.c
+++ b/arch/x86/kernel/cpu/cpu_debug.c
@@ -30,9 +30,9 @@
30#include <asm/apic.h> 30#include <asm/apic.h>
31#include <asm/desc.h> 31#include <asm/desc.h>
32 32
33static DEFINE_PER_CPU(struct cpu_cpuX_base [CPU_REG_ALL_BIT], cpu_arr); 33static DEFINE_PER_CPU(struct cpu_cpuX_base [CPU_REG_ALL_BIT], cpud_arr);
34static DEFINE_PER_CPU(struct cpu_private * [MAX_CPU_FILES], priv_arr); 34static DEFINE_PER_CPU(struct cpu_private * [MAX_CPU_FILES], cpud_priv_arr);
35static DEFINE_PER_CPU(int, cpu_priv_count); 35static DEFINE_PER_CPU(int, cpud_priv_count);
36 36
37static DEFINE_MUTEX(cpu_debug_lock); 37static DEFINE_MUTEX(cpu_debug_lock);
38 38
@@ -531,7 +531,7 @@ static int cpu_create_file(unsigned cpu, unsigned type, unsigned reg,
531 531
532 /* Already intialized */ 532 /* Already intialized */
533 if (file == CPU_INDEX_BIT) 533 if (file == CPU_INDEX_BIT)
534 if (per_cpu(cpu_arr[type].init, cpu)) 534 if (per_cpu(cpud_arr[type].init, cpu))
535 return 0; 535 return 0;
536 536
537 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 537 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
@@ -543,8 +543,8 @@ static int cpu_create_file(unsigned cpu, unsigned type, unsigned reg,
543 priv->reg = reg; 543 priv->reg = reg;
544 priv->file = file; 544 priv->file = file;
545 mutex_lock(&cpu_debug_lock); 545 mutex_lock(&cpu_debug_lock);
546 per_cpu(priv_arr[type], cpu) = priv; 546 per_cpu(cpud_priv_arr[type], cpu) = priv;
547 per_cpu(cpu_priv_count, cpu)++; 547 per_cpu(cpud_priv_count, cpu)++;
548 mutex_unlock(&cpu_debug_lock); 548 mutex_unlock(&cpu_debug_lock);
549 549
550 if (file) 550 if (file)
@@ -552,10 +552,10 @@ static int cpu_create_file(unsigned cpu, unsigned type, unsigned reg,
552 dentry, (void *)priv, &cpu_fops); 552 dentry, (void *)priv, &cpu_fops);
553 else { 553 else {
554 debugfs_create_file(cpu_base[type].name, S_IRUGO, 554 debugfs_create_file(cpu_base[type].name, S_IRUGO,
555 per_cpu(cpu_arr[type].dentry, cpu), 555 per_cpu(cpud_arr[type].dentry, cpu),
556 (void *)priv, &cpu_fops); 556 (void *)priv, &cpu_fops);
557 mutex_lock(&cpu_debug_lock); 557 mutex_lock(&cpu_debug_lock);
558 per_cpu(cpu_arr[type].init, cpu) = 1; 558 per_cpu(cpud_arr[type].init, cpu) = 1;
559 mutex_unlock(&cpu_debug_lock); 559 mutex_unlock(&cpu_debug_lock);
560 } 560 }
561 561
@@ -615,7 +615,7 @@ static int cpu_init_allreg(unsigned cpu, struct dentry *dentry)
615 if (!is_typeflag_valid(cpu, cpu_base[type].flag)) 615 if (!is_typeflag_valid(cpu, cpu_base[type].flag))
616 continue; 616 continue;
617 cpu_dentry = debugfs_create_dir(cpu_base[type].name, dentry); 617 cpu_dentry = debugfs_create_dir(cpu_base[type].name, dentry);
618 per_cpu(cpu_arr[type].dentry, cpu) = cpu_dentry; 618 per_cpu(cpud_arr[type].dentry, cpu) = cpu_dentry;
619 619
620 if (type < CPU_TSS_BIT) 620 if (type < CPU_TSS_BIT)
621 err = cpu_init_msr(cpu, type, cpu_dentry); 621 err = cpu_init_msr(cpu, type, cpu_dentry);
@@ -647,11 +647,11 @@ static int cpu_init_cpu(void)
647 err = cpu_init_allreg(cpu, cpu_dentry); 647 err = cpu_init_allreg(cpu, cpu_dentry);
648 648
649 pr_info("cpu%d(%d) debug files %d\n", 649 pr_info("cpu%d(%d) debug files %d\n",
650 cpu, nr_cpu_ids, per_cpu(cpu_priv_count, cpu)); 650 cpu, nr_cpu_ids, per_cpu(cpud_priv_count, cpu));
651 if (per_cpu(cpu_priv_count, cpu) > MAX_CPU_FILES) { 651 if (per_cpu(cpud_priv_count, cpu) > MAX_CPU_FILES) {
652 pr_err("Register files count %d exceeds limit %d\n", 652 pr_err("Register files count %d exceeds limit %d\n",
653 per_cpu(cpu_priv_count, cpu), MAX_CPU_FILES); 653 per_cpu(cpud_priv_count, cpu), MAX_CPU_FILES);
654 per_cpu(cpu_priv_count, cpu) = MAX_CPU_FILES; 654 per_cpu(cpud_priv_count, cpu) = MAX_CPU_FILES;
655 err = -ENFILE; 655 err = -ENFILE;
656 } 656 }
657 if (err) 657 if (err)
@@ -676,8 +676,8 @@ static void __exit cpu_debug_exit(void)
676 debugfs_remove_recursive(cpu_debugfs_dir); 676 debugfs_remove_recursive(cpu_debugfs_dir);
677 677
678 for (cpu = 0; cpu < nr_cpu_ids; cpu++) 678 for (cpu = 0; cpu < nr_cpu_ids; cpu++)
679 for (i = 0; i < per_cpu(cpu_priv_count, cpu); i++) 679 for (i = 0; i < per_cpu(cpud_priv_count, cpu); i++)
680 kfree(per_cpu(priv_arr[i], cpu)); 680 kfree(per_cpu(cpud_priv_arr[i], cpu));
681} 681}
682 682
683module_init(cpu_debug_init); 683module_init(cpu_debug_init);
diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
index 8b581d3905cb..f28decf8dde3 100644
--- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
+++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
@@ -68,9 +68,9 @@ struct acpi_cpufreq_data {
68 unsigned int cpu_feature; 68 unsigned int cpu_feature;
69}; 69};
70 70
71static DEFINE_PER_CPU(struct acpi_cpufreq_data *, drv_data); 71static DEFINE_PER_CPU(struct acpi_cpufreq_data *, acfreq_data);
72 72
73static DEFINE_PER_CPU(struct aperfmperf, old_perf); 73static DEFINE_PER_CPU(struct aperfmperf, acfreq_old_perf);
74 74
75/* acpi_perf_data is a pointer to percpu data. */ 75/* acpi_perf_data is a pointer to percpu data. */
76static struct acpi_processor_performance *acpi_perf_data; 76static struct acpi_processor_performance *acpi_perf_data;
@@ -214,14 +214,14 @@ static u32 get_cur_val(const struct cpumask *mask)
214 if (unlikely(cpumask_empty(mask))) 214 if (unlikely(cpumask_empty(mask)))
215 return 0; 215 return 0;
216 216
217 switch (per_cpu(drv_data, cpumask_first(mask))->cpu_feature) { 217 switch (per_cpu(acfreq_data, cpumask_first(mask))->cpu_feature) {
218 case SYSTEM_INTEL_MSR_CAPABLE: 218 case SYSTEM_INTEL_MSR_CAPABLE:
219 cmd.type = SYSTEM_INTEL_MSR_CAPABLE; 219 cmd.type = SYSTEM_INTEL_MSR_CAPABLE;
220 cmd.addr.msr.reg = MSR_IA32_PERF_STATUS; 220 cmd.addr.msr.reg = MSR_IA32_PERF_STATUS;
221 break; 221 break;
222 case SYSTEM_IO_CAPABLE: 222 case SYSTEM_IO_CAPABLE:
223 cmd.type = SYSTEM_IO_CAPABLE; 223 cmd.type = SYSTEM_IO_CAPABLE;
224 perf = per_cpu(drv_data, cpumask_first(mask))->acpi_data; 224 perf = per_cpu(acfreq_data, cpumask_first(mask))->acpi_data;
225 cmd.addr.io.port = perf->control_register.address; 225 cmd.addr.io.port = perf->control_register.address;
226 cmd.addr.io.bit_width = perf->control_register.bit_width; 226 cmd.addr.io.bit_width = perf->control_register.bit_width;
227 break; 227 break;
@@ -268,8 +268,8 @@ static unsigned int get_measured_perf(struct cpufreq_policy *policy,
268 if (smp_call_function_single(cpu, read_measured_perf_ctrs, &perf, 1)) 268 if (smp_call_function_single(cpu, read_measured_perf_ctrs, &perf, 1))
269 return 0; 269 return 0;
270 270
271 ratio = calc_aperfmperf_ratio(&per_cpu(old_perf, cpu), &perf); 271 ratio = calc_aperfmperf_ratio(&per_cpu(acfreq_old_perf, cpu), &perf);
272 per_cpu(old_perf, cpu) = perf; 272 per_cpu(acfreq_old_perf, cpu) = perf;
273 273
274 retval = (policy->cpuinfo.max_freq * ratio) >> APERFMPERF_SHIFT; 274 retval = (policy->cpuinfo.max_freq * ratio) >> APERFMPERF_SHIFT;
275 275
@@ -278,7 +278,7 @@ static unsigned int get_measured_perf(struct cpufreq_policy *policy,
278 278
279static unsigned int get_cur_freq_on_cpu(unsigned int cpu) 279static unsigned int get_cur_freq_on_cpu(unsigned int cpu)
280{ 280{
281 struct acpi_cpufreq_data *data = per_cpu(drv_data, cpu); 281 struct acpi_cpufreq_data *data = per_cpu(acfreq_data, cpu);
282 unsigned int freq; 282 unsigned int freq;
283 unsigned int cached_freq; 283 unsigned int cached_freq;
284 284
@@ -322,7 +322,7 @@ static unsigned int check_freqs(const struct cpumask *mask, unsigned int freq,
322static int acpi_cpufreq_target(struct cpufreq_policy *policy, 322static int acpi_cpufreq_target(struct cpufreq_policy *policy,
323 unsigned int target_freq, unsigned int relation) 323 unsigned int target_freq, unsigned int relation)
324{ 324{
325 struct acpi_cpufreq_data *data = per_cpu(drv_data, policy->cpu); 325 struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu);
326 struct acpi_processor_performance *perf; 326 struct acpi_processor_performance *perf;
327 struct cpufreq_freqs freqs; 327 struct cpufreq_freqs freqs;
328 struct drv_cmd cmd; 328 struct drv_cmd cmd;
@@ -416,7 +416,7 @@ out:
416 416
417static int acpi_cpufreq_verify(struct cpufreq_policy *policy) 417static int acpi_cpufreq_verify(struct cpufreq_policy *policy)
418{ 418{
419 struct acpi_cpufreq_data *data = per_cpu(drv_data, policy->cpu); 419 struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu);
420 420
421 dprintk("acpi_cpufreq_verify\n"); 421 dprintk("acpi_cpufreq_verify\n");
422 422
@@ -574,7 +574,7 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
574 return -ENOMEM; 574 return -ENOMEM;
575 575
576 data->acpi_data = per_cpu_ptr(acpi_perf_data, cpu); 576 data->acpi_data = per_cpu_ptr(acpi_perf_data, cpu);
577 per_cpu(drv_data, cpu) = data; 577 per_cpu(acfreq_data, cpu) = data;
578 578
579 if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) 579 if (cpu_has(c, X86_FEATURE_CONSTANT_TSC))
580 acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS; 580 acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
@@ -725,20 +725,20 @@ err_unreg:
725 acpi_processor_unregister_performance(perf, cpu); 725 acpi_processor_unregister_performance(perf, cpu);
726err_free: 726err_free:
727 kfree(data); 727 kfree(data);
728 per_cpu(drv_data, cpu) = NULL; 728 per_cpu(acfreq_data, cpu) = NULL;
729 729
730 return result; 730 return result;
731} 731}
732 732
733static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy) 733static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy)
734{ 734{
735 struct acpi_cpufreq_data *data = per_cpu(drv_data, policy->cpu); 735 struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu);
736 736
737 dprintk("acpi_cpufreq_cpu_exit\n"); 737 dprintk("acpi_cpufreq_cpu_exit\n");
738 738
739 if (data) { 739 if (data) {
740 cpufreq_frequency_table_put_attr(policy->cpu); 740 cpufreq_frequency_table_put_attr(policy->cpu);
741 per_cpu(drv_data, policy->cpu) = NULL; 741 per_cpu(acfreq_data, policy->cpu) = NULL;
742 acpi_processor_unregister_performance(data->acpi_data, 742 acpi_processor_unregister_performance(data->acpi_data,
743 policy->cpu); 743 policy->cpu);
744 kfree(data); 744 kfree(data);
@@ -749,7 +749,7 @@ static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy)
749 749
750static int acpi_cpufreq_resume(struct cpufreq_policy *policy) 750static int acpi_cpufreq_resume(struct cpufreq_policy *policy)
751{ 751{
752 struct acpi_cpufreq_data *data = per_cpu(drv_data, policy->cpu); 752 struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu);
753 753
754 dprintk("acpi_cpufreq_resume\n"); 754 dprintk("acpi_cpufreq_resume\n");
755 755
@@ -764,14 +764,15 @@ static struct freq_attr *acpi_cpufreq_attr[] = {
764}; 764};
765 765
766static struct cpufreq_driver acpi_cpufreq_driver = { 766static struct cpufreq_driver acpi_cpufreq_driver = {
767 .verify = acpi_cpufreq_verify, 767 .verify = acpi_cpufreq_verify,
768 .target = acpi_cpufreq_target, 768 .target = acpi_cpufreq_target,
769 .init = acpi_cpufreq_cpu_init, 769 .bios_limit = acpi_processor_get_bios_limit,
770 .exit = acpi_cpufreq_cpu_exit, 770 .init = acpi_cpufreq_cpu_init,
771 .resume = acpi_cpufreq_resume, 771 .exit = acpi_cpufreq_cpu_exit,
772 .name = "acpi-cpufreq", 772 .resume = acpi_cpufreq_resume,
773 .owner = THIS_MODULE, 773 .name = "acpi-cpufreq",
774 .attr = acpi_cpufreq_attr, 774 .owner = THIS_MODULE,
775 .attr = acpi_cpufreq_attr,
775}; 776};
776 777
777static int __init acpi_cpufreq_init(void) 778static int __init acpi_cpufreq_init(void)
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k6.c b/arch/x86/kernel/cpu/cpufreq/powernow-k6.c
index f10dea409f40..cb01dac267d3 100644
--- a/arch/x86/kernel/cpu/cpufreq/powernow-k6.c
+++ b/arch/x86/kernel/cpu/cpufreq/powernow-k6.c
@@ -164,7 +164,7 @@ static int powernow_k6_cpu_init(struct cpufreq_policy *policy)
164 } 164 }
165 165
166 /* cpuinfo and default policy values */ 166 /* cpuinfo and default policy values */
167 policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; 167 policy->cpuinfo.transition_latency = 200000;
168 policy->cur = busfreq * max_multiplier; 168 policy->cur = busfreq * max_multiplier;
169 169
170 result = cpufreq_frequency_table_cpuinfo(policy, clock_ratio); 170 result = cpufreq_frequency_table_cpuinfo(policy, clock_ratio);
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k7.c b/arch/x86/kernel/cpu/cpufreq/powernow-k7.c
index d47c775eb0ab..9a97116f89e5 100644
--- a/arch/x86/kernel/cpu/cpufreq/powernow-k7.c
+++ b/arch/x86/kernel/cpu/cpufreq/powernow-k7.c
@@ -714,14 +714,17 @@ static struct freq_attr *powernow_table_attr[] = {
714}; 714};
715 715
716static struct cpufreq_driver powernow_driver = { 716static struct cpufreq_driver powernow_driver = {
717 .verify = powernow_verify, 717 .verify = powernow_verify,
718 .target = powernow_target, 718 .target = powernow_target,
719 .get = powernow_get, 719 .get = powernow_get,
720 .init = powernow_cpu_init, 720#ifdef CONFIG_X86_POWERNOW_K7_ACPI
721 .exit = powernow_cpu_exit, 721 .bios_limit = acpi_processor_get_bios_limit,
722 .name = "powernow-k7", 722#endif
723 .owner = THIS_MODULE, 723 .init = powernow_cpu_init,
724 .attr = powernow_table_attr, 724 .exit = powernow_cpu_exit,
725 .name = "powernow-k7",
726 .owner = THIS_MODULE,
727 .attr = powernow_table_attr,
725}; 728};
726 729
727static int __init powernow_init(void) 730static int __init powernow_init(void)
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
index 3f12dabeab52..f125e5c551c0 100644
--- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
+++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
@@ -1118,7 +1118,7 @@ static int transition_frequency_pstate(struct powernow_k8_data *data,
1118static int powernowk8_target(struct cpufreq_policy *pol, 1118static int powernowk8_target(struct cpufreq_policy *pol,
1119 unsigned targfreq, unsigned relation) 1119 unsigned targfreq, unsigned relation)
1120{ 1120{
1121 cpumask_t oldmask; 1121 cpumask_var_t oldmask;
1122 struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu); 1122 struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu);
1123 u32 checkfid; 1123 u32 checkfid;
1124 u32 checkvid; 1124 u32 checkvid;
@@ -1131,9 +1131,13 @@ static int powernowk8_target(struct cpufreq_policy *pol,
1131 checkfid = data->currfid; 1131 checkfid = data->currfid;
1132 checkvid = data->currvid; 1132 checkvid = data->currvid;
1133 1133
1134 /* only run on specific CPU from here on */ 1134 /* only run on specific CPU from here on. */
1135 oldmask = current->cpus_allowed; 1135 /* This is poor form: use a workqueue or smp_call_function_single */
1136 set_cpus_allowed_ptr(current, &cpumask_of_cpu(pol->cpu)); 1136 if (!alloc_cpumask_var(&oldmask, GFP_KERNEL))
1137 return -ENOMEM;
1138
1139 cpumask_copy(oldmask, tsk_cpus_allowed(current));
1140 set_cpus_allowed_ptr(current, cpumask_of(pol->cpu));
1137 1141
1138 if (smp_processor_id() != pol->cpu) { 1142 if (smp_processor_id() != pol->cpu) {
1139 printk(KERN_ERR PFX "limiting to cpu %u failed\n", pol->cpu); 1143 printk(KERN_ERR PFX "limiting to cpu %u failed\n", pol->cpu);
@@ -1193,7 +1197,8 @@ static int powernowk8_target(struct cpufreq_policy *pol,
1193 ret = 0; 1197 ret = 0;
1194 1198
1195err_out: 1199err_out:
1196 set_cpus_allowed_ptr(current, &oldmask); 1200 set_cpus_allowed_ptr(current, oldmask);
1201 free_cpumask_var(oldmask);
1197 return ret; 1202 return ret;
1198} 1203}
1199 1204
@@ -1393,14 +1398,15 @@ static struct freq_attr *powernow_k8_attr[] = {
1393}; 1398};
1394 1399
1395static struct cpufreq_driver cpufreq_amd64_driver = { 1400static struct cpufreq_driver cpufreq_amd64_driver = {
1396 .verify = powernowk8_verify, 1401 .verify = powernowk8_verify,
1397 .target = powernowk8_target, 1402 .target = powernowk8_target,
1398 .init = powernowk8_cpu_init, 1403 .bios_limit = acpi_processor_get_bios_limit,
1399 .exit = __devexit_p(powernowk8_cpu_exit), 1404 .init = powernowk8_cpu_init,
1400 .get = powernowk8_get, 1405 .exit = __devexit_p(powernowk8_cpu_exit),
1401 .name = "powernow-k8", 1406 .get = powernowk8_get,
1402 .owner = THIS_MODULE, 1407 .name = "powernow-k8",
1403 .attr = powernow_k8_attr, 1408 .owner = THIS_MODULE,
1409 .attr = powernow_k8_attr,
1404}; 1410};
1405 1411
1406/* driver entry point for init */ 1412/* driver entry point for init */
diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c b/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c
index 3ae5a7a3a500..2ce8e0b5cc54 100644
--- a/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c
+++ b/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c
@@ -39,7 +39,7 @@ static struct pci_dev *speedstep_chipset_dev;
39 39
40/* speedstep_processor 40/* speedstep_processor
41 */ 41 */
42static unsigned int speedstep_processor; 42static enum speedstep_processor speedstep_processor;
43 43
44static u32 pmbase; 44static u32 pmbase;
45 45
diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-lib.c b/arch/x86/kernel/cpu/cpufreq/speedstep-lib.c
index f4c290b8482f..ad0083abfa23 100644
--- a/arch/x86/kernel/cpu/cpufreq/speedstep-lib.c
+++ b/arch/x86/kernel/cpu/cpufreq/speedstep-lib.c
@@ -34,7 +34,7 @@ static int relaxed_check;
34 * GET PROCESSOR CORE SPEED IN KHZ * 34 * GET PROCESSOR CORE SPEED IN KHZ *
35 *********************************************************************/ 35 *********************************************************************/
36 36
37static unsigned int pentium3_get_frequency(unsigned int processor) 37static unsigned int pentium3_get_frequency(enum speedstep_processor processor)
38{ 38{
39 /* See table 14 of p3_ds.pdf and table 22 of 29834003.pdf */ 39 /* See table 14 of p3_ds.pdf and table 22 of 29834003.pdf */
40 struct { 40 struct {
@@ -227,7 +227,7 @@ static unsigned int pentium4_get_frequency(void)
227 227
228 228
229/* Warning: may get called from smp_call_function_single. */ 229/* Warning: may get called from smp_call_function_single. */
230unsigned int speedstep_get_frequency(unsigned int processor) 230unsigned int speedstep_get_frequency(enum speedstep_processor processor)
231{ 231{
232 switch (processor) { 232 switch (processor) {
233 case SPEEDSTEP_CPU_PCORE: 233 case SPEEDSTEP_CPU_PCORE:
@@ -380,7 +380,7 @@ EXPORT_SYMBOL_GPL(speedstep_detect_processor);
380 * DETECT SPEEDSTEP SPEEDS * 380 * DETECT SPEEDSTEP SPEEDS *
381 *********************************************************************/ 381 *********************************************************************/
382 382
383unsigned int speedstep_get_freqs(unsigned int processor, 383unsigned int speedstep_get_freqs(enum speedstep_processor processor,
384 unsigned int *low_speed, 384 unsigned int *low_speed,
385 unsigned int *high_speed, 385 unsigned int *high_speed,
386 unsigned int *transition_latency, 386 unsigned int *transition_latency,
diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-lib.h b/arch/x86/kernel/cpu/cpufreq/speedstep-lib.h
index 2b6c04e5a304..70d9cea1219d 100644
--- a/arch/x86/kernel/cpu/cpufreq/speedstep-lib.h
+++ b/arch/x86/kernel/cpu/cpufreq/speedstep-lib.h
@@ -11,18 +11,18 @@
11 11
12 12
13/* processors */ 13/* processors */
14 14enum speedstep_processor {
15#define SPEEDSTEP_CPU_PIII_C_EARLY 0x00000001 /* Coppermine core */ 15 SPEEDSTEP_CPU_PIII_C_EARLY = 0x00000001, /* Coppermine core */
16#define SPEEDSTEP_CPU_PIII_C 0x00000002 /* Coppermine core */ 16 SPEEDSTEP_CPU_PIII_C = 0x00000002, /* Coppermine core */
17#define SPEEDSTEP_CPU_PIII_T 0x00000003 /* Tualatin core */ 17 SPEEDSTEP_CPU_PIII_T = 0x00000003, /* Tualatin core */
18#define SPEEDSTEP_CPU_P4M 0x00000004 /* P4-M */ 18 SPEEDSTEP_CPU_P4M = 0x00000004, /* P4-M */
19
20/* the following processors are not speedstep-capable and are not auto-detected 19/* the following processors are not speedstep-capable and are not auto-detected
21 * in speedstep_detect_processor(). However, their speed can be detected using 20 * in speedstep_detect_processor(). However, their speed can be detected using
22 * the speedstep_get_frequency() call. */ 21 * the speedstep_get_frequency() call. */
23#define SPEEDSTEP_CPU_PM 0xFFFFFF03 /* Pentium M */ 22 SPEEDSTEP_CPU_PM = 0xFFFFFF03, /* Pentium M */
24#define SPEEDSTEP_CPU_P4D 0xFFFFFF04 /* desktop P4 */ 23 SPEEDSTEP_CPU_P4D = 0xFFFFFF04, /* desktop P4 */
25#define SPEEDSTEP_CPU_PCORE 0xFFFFFF05 /* Core */ 24 SPEEDSTEP_CPU_PCORE = 0xFFFFFF05, /* Core */
25};
26 26
27/* speedstep states -- only two of them */ 27/* speedstep states -- only two of them */
28 28
@@ -31,10 +31,10 @@
31 31
32 32
33/* detect a speedstep-capable processor */ 33/* detect a speedstep-capable processor */
34extern unsigned int speedstep_detect_processor (void); 34extern enum speedstep_processor speedstep_detect_processor(void);
35 35
36/* detect the current speed (in khz) of the processor */ 36/* detect the current speed (in khz) of the processor */
37extern unsigned int speedstep_get_frequency(unsigned int processor); 37extern unsigned int speedstep_get_frequency(enum speedstep_processor processor);
38 38
39 39
40/* detect the low and high speeds of the processor. The callback 40/* detect the low and high speeds of the processor. The callback
@@ -42,7 +42,7 @@ extern unsigned int speedstep_get_frequency(unsigned int processor);
42 * SPEEDSTEP_LOW; the second argument is zero so that no 42 * SPEEDSTEP_LOW; the second argument is zero so that no
43 * cpufreq_notify_transition calls are initiated. 43 * cpufreq_notify_transition calls are initiated.
44 */ 44 */
45extern unsigned int speedstep_get_freqs(unsigned int processor, 45extern unsigned int speedstep_get_freqs(enum speedstep_processor processor,
46 unsigned int *low_speed, 46 unsigned int *low_speed,
47 unsigned int *high_speed, 47 unsigned int *high_speed,
48 unsigned int *transition_latency, 48 unsigned int *transition_latency,
diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-smi.c b/arch/x86/kernel/cpu/cpufreq/speedstep-smi.c
index befea088e4f5..04d73c114e49 100644
--- a/arch/x86/kernel/cpu/cpufreq/speedstep-smi.c
+++ b/arch/x86/kernel/cpu/cpufreq/speedstep-smi.c
@@ -35,7 +35,7 @@ static int smi_cmd;
35static unsigned int smi_sig; 35static unsigned int smi_sig;
36 36
37/* info about the processor */ 37/* info about the processor */
38static unsigned int speedstep_processor; 38static enum speedstep_processor speedstep_processor;
39 39
40/* 40/*
41 * There are only two frequency states for each processor. Values 41 * There are only two frequency states for each processor. Values
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
index 63ada177b40c..fc6c8ef92dcc 100644
--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
@@ -499,8 +499,8 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
499#ifdef CONFIG_SYSFS 499#ifdef CONFIG_SYSFS
500 500
501/* pointer to _cpuid4_info array (for each cache leaf) */ 501/* pointer to _cpuid4_info array (for each cache leaf) */
502static DEFINE_PER_CPU(struct _cpuid4_info *, cpuid4_info); 502static DEFINE_PER_CPU(struct _cpuid4_info *, ici_cpuid4_info);
503#define CPUID4_INFO_IDX(x, y) (&((per_cpu(cpuid4_info, x))[y])) 503#define CPUID4_INFO_IDX(x, y) (&((per_cpu(ici_cpuid4_info, x))[y]))
504 504
505#ifdef CONFIG_SMP 505#ifdef CONFIG_SMP
506static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) 506static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
@@ -512,7 +512,7 @@ static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
512 512
513 if ((index == 3) && (c->x86_vendor == X86_VENDOR_AMD)) { 513 if ((index == 3) && (c->x86_vendor == X86_VENDOR_AMD)) {
514 for_each_cpu(i, c->llc_shared_map) { 514 for_each_cpu(i, c->llc_shared_map) {
515 if (!per_cpu(cpuid4_info, i)) 515 if (!per_cpu(ici_cpuid4_info, i))
516 continue; 516 continue;
517 this_leaf = CPUID4_INFO_IDX(i, index); 517 this_leaf = CPUID4_INFO_IDX(i, index);
518 for_each_cpu(sibling, c->llc_shared_map) { 518 for_each_cpu(sibling, c->llc_shared_map) {
@@ -536,7 +536,7 @@ static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
536 c->apicid >> index_msb) { 536 c->apicid >> index_msb) {
537 cpumask_set_cpu(i, 537 cpumask_set_cpu(i,
538 to_cpumask(this_leaf->shared_cpu_map)); 538 to_cpumask(this_leaf->shared_cpu_map));
539 if (i != cpu && per_cpu(cpuid4_info, i)) { 539 if (i != cpu && per_cpu(ici_cpuid4_info, i)) {
540 sibling_leaf = 540 sibling_leaf =
541 CPUID4_INFO_IDX(i, index); 541 CPUID4_INFO_IDX(i, index);
542 cpumask_set_cpu(cpu, to_cpumask( 542 cpumask_set_cpu(cpu, to_cpumask(
@@ -575,8 +575,8 @@ static void __cpuinit free_cache_attributes(unsigned int cpu)
575 for (i = 0; i < num_cache_leaves; i++) 575 for (i = 0; i < num_cache_leaves; i++)
576 cache_remove_shared_cpu_map(cpu, i); 576 cache_remove_shared_cpu_map(cpu, i);
577 577
578 kfree(per_cpu(cpuid4_info, cpu)); 578 kfree(per_cpu(ici_cpuid4_info, cpu));
579 per_cpu(cpuid4_info, cpu) = NULL; 579 per_cpu(ici_cpuid4_info, cpu) = NULL;
580} 580}
581 581
582static int 582static int
@@ -615,15 +615,15 @@ static int __cpuinit detect_cache_attributes(unsigned int cpu)
615 if (num_cache_leaves == 0) 615 if (num_cache_leaves == 0)
616 return -ENOENT; 616 return -ENOENT;
617 617
618 per_cpu(cpuid4_info, cpu) = kzalloc( 618 per_cpu(ici_cpuid4_info, cpu) = kzalloc(
619 sizeof(struct _cpuid4_info) * num_cache_leaves, GFP_KERNEL); 619 sizeof(struct _cpuid4_info) * num_cache_leaves, GFP_KERNEL);
620 if (per_cpu(cpuid4_info, cpu) == NULL) 620 if (per_cpu(ici_cpuid4_info, cpu) == NULL)
621 return -ENOMEM; 621 return -ENOMEM;
622 622
623 smp_call_function_single(cpu, get_cpu_leaves, &retval, true); 623 smp_call_function_single(cpu, get_cpu_leaves, &retval, true);
624 if (retval) { 624 if (retval) {
625 kfree(per_cpu(cpuid4_info, cpu)); 625 kfree(per_cpu(ici_cpuid4_info, cpu));
626 per_cpu(cpuid4_info, cpu) = NULL; 626 per_cpu(ici_cpuid4_info, cpu) = NULL;
627 } 627 }
628 628
629 return retval; 629 return retval;
@@ -635,7 +635,7 @@ static int __cpuinit detect_cache_attributes(unsigned int cpu)
635extern struct sysdev_class cpu_sysdev_class; /* from drivers/base/cpu.c */ 635extern struct sysdev_class cpu_sysdev_class; /* from drivers/base/cpu.c */
636 636
637/* pointer to kobject for cpuX/cache */ 637/* pointer to kobject for cpuX/cache */
638static DEFINE_PER_CPU(struct kobject *, cache_kobject); 638static DEFINE_PER_CPU(struct kobject *, ici_cache_kobject);
639 639
640struct _index_kobject { 640struct _index_kobject {
641 struct kobject kobj; 641 struct kobject kobj;
@@ -644,8 +644,8 @@ struct _index_kobject {
644}; 644};
645 645
646/* pointer to array of kobjects for cpuX/cache/indexY */ 646/* pointer to array of kobjects for cpuX/cache/indexY */
647static DEFINE_PER_CPU(struct _index_kobject *, index_kobject); 647static DEFINE_PER_CPU(struct _index_kobject *, ici_index_kobject);
648#define INDEX_KOBJECT_PTR(x, y) (&((per_cpu(index_kobject, x))[y])) 648#define INDEX_KOBJECT_PTR(x, y) (&((per_cpu(ici_index_kobject, x))[y]))
649 649
650#define show_one_plus(file_name, object, val) \ 650#define show_one_plus(file_name, object, val) \
651static ssize_t show_##file_name \ 651static ssize_t show_##file_name \
@@ -864,10 +864,10 @@ static struct kobj_type ktype_percpu_entry = {
864 864
865static void __cpuinit cpuid4_cache_sysfs_exit(unsigned int cpu) 865static void __cpuinit cpuid4_cache_sysfs_exit(unsigned int cpu)
866{ 866{
867 kfree(per_cpu(cache_kobject, cpu)); 867 kfree(per_cpu(ici_cache_kobject, cpu));
868 kfree(per_cpu(index_kobject, cpu)); 868 kfree(per_cpu(ici_index_kobject, cpu));
869 per_cpu(cache_kobject, cpu) = NULL; 869 per_cpu(ici_cache_kobject, cpu) = NULL;
870 per_cpu(index_kobject, cpu) = NULL; 870 per_cpu(ici_index_kobject, cpu) = NULL;
871 free_cache_attributes(cpu); 871 free_cache_attributes(cpu);
872} 872}
873 873
@@ -883,14 +883,14 @@ static int __cpuinit cpuid4_cache_sysfs_init(unsigned int cpu)
883 return err; 883 return err;
884 884
885 /* Allocate all required memory */ 885 /* Allocate all required memory */
886 per_cpu(cache_kobject, cpu) = 886 per_cpu(ici_cache_kobject, cpu) =
887 kzalloc(sizeof(struct kobject), GFP_KERNEL); 887 kzalloc(sizeof(struct kobject), GFP_KERNEL);
888 if (unlikely(per_cpu(cache_kobject, cpu) == NULL)) 888 if (unlikely(per_cpu(ici_cache_kobject, cpu) == NULL))
889 goto err_out; 889 goto err_out;
890 890
891 per_cpu(index_kobject, cpu) = kzalloc( 891 per_cpu(ici_index_kobject, cpu) = kzalloc(
892 sizeof(struct _index_kobject) * num_cache_leaves, GFP_KERNEL); 892 sizeof(struct _index_kobject) * num_cache_leaves, GFP_KERNEL);
893 if (unlikely(per_cpu(index_kobject, cpu) == NULL)) 893 if (unlikely(per_cpu(ici_index_kobject, cpu) == NULL))
894 goto err_out; 894 goto err_out;
895 895
896 return 0; 896 return 0;
@@ -914,7 +914,7 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
914 if (unlikely(retval < 0)) 914 if (unlikely(retval < 0))
915 return retval; 915 return retval;
916 916
917 retval = kobject_init_and_add(per_cpu(cache_kobject, cpu), 917 retval = kobject_init_and_add(per_cpu(ici_cache_kobject, cpu),
918 &ktype_percpu_entry, 918 &ktype_percpu_entry,
919 &sys_dev->kobj, "%s", "cache"); 919 &sys_dev->kobj, "%s", "cache");
920 if (retval < 0) { 920 if (retval < 0) {
@@ -928,12 +928,12 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
928 this_object->index = i; 928 this_object->index = i;
929 retval = kobject_init_and_add(&(this_object->kobj), 929 retval = kobject_init_and_add(&(this_object->kobj),
930 &ktype_cache, 930 &ktype_cache,
931 per_cpu(cache_kobject, cpu), 931 per_cpu(ici_cache_kobject, cpu),
932 "index%1lu", i); 932 "index%1lu", i);
933 if (unlikely(retval)) { 933 if (unlikely(retval)) {
934 for (j = 0; j < i; j++) 934 for (j = 0; j < i; j++)
935 kobject_put(&(INDEX_KOBJECT_PTR(cpu, j)->kobj)); 935 kobject_put(&(INDEX_KOBJECT_PTR(cpu, j)->kobj));
936 kobject_put(per_cpu(cache_kobject, cpu)); 936 kobject_put(per_cpu(ici_cache_kobject, cpu));
937 cpuid4_cache_sysfs_exit(cpu); 937 cpuid4_cache_sysfs_exit(cpu);
938 return retval; 938 return retval;
939 } 939 }
@@ -941,7 +941,7 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
941 } 941 }
942 cpumask_set_cpu(cpu, to_cpumask(cache_dev_map)); 942 cpumask_set_cpu(cpu, to_cpumask(cache_dev_map));
943 943
944 kobject_uevent(per_cpu(cache_kobject, cpu), KOBJ_ADD); 944 kobject_uevent(per_cpu(ici_cache_kobject, cpu), KOBJ_ADD);
945 return 0; 945 return 0;
946} 946}
947 947
@@ -950,7 +950,7 @@ static void __cpuinit cache_remove_dev(struct sys_device * sys_dev)
950 unsigned int cpu = sys_dev->id; 950 unsigned int cpu = sys_dev->id;
951 unsigned long i; 951 unsigned long i;
952 952
953 if (per_cpu(cpuid4_info, cpu) == NULL) 953 if (per_cpu(ici_cpuid4_info, cpu) == NULL)
954 return; 954 return;
955 if (!cpumask_test_cpu(cpu, to_cpumask(cache_dev_map))) 955 if (!cpumask_test_cpu(cpu, to_cpumask(cache_dev_map)))
956 return; 956 return;
@@ -958,7 +958,7 @@ static void __cpuinit cache_remove_dev(struct sys_device * sys_dev)
958 958
959 for (i = 0; i < num_cache_leaves; i++) 959 for (i = 0; i < num_cache_leaves; i++)
960 kobject_put(&(INDEX_KOBJECT_PTR(cpu, i)->kobj)); 960 kobject_put(&(INDEX_KOBJECT_PTR(cpu, i)->kobj));
961 kobject_put(per_cpu(cache_kobject, cpu)); 961 kobject_put(per_cpu(ici_cache_kobject, cpu));
962 cpuid4_cache_sysfs_exit(cpu); 962 cpuid4_cache_sysfs_exit(cpu);
963} 963}
964 964
diff --git a/arch/x86/kernel/cpu/mtrr/if.c b/arch/x86/kernel/cpu/mtrr/if.c
index 3c1b12d461d1..e006e56f699c 100644
--- a/arch/x86/kernel/cpu/mtrr/if.c
+++ b/arch/x86/kernel/cpu/mtrr/if.c
@@ -4,6 +4,7 @@
4#include <linux/proc_fs.h> 4#include <linux/proc_fs.h>
5#include <linux/module.h> 5#include <linux/module.h>
6#include <linux/ctype.h> 6#include <linux/ctype.h>
7#include <linux/string.h>
7#include <linux/init.h> 8#include <linux/init.h>
8 9
9#define LINE_SIZE 80 10#define LINE_SIZE 80
@@ -133,8 +134,7 @@ mtrr_write(struct file *file, const char __user *buf, size_t len, loff_t * ppos)
133 return -EINVAL; 134 return -EINVAL;
134 135
135 base = simple_strtoull(line + 5, &ptr, 0); 136 base = simple_strtoull(line + 5, &ptr, 0);
136 while (isspace(*ptr)) 137 ptr = skip_spaces(ptr);
137 ptr++;
138 138
139 if (strncmp(ptr, "size=", 5)) 139 if (strncmp(ptr, "size=", 5))
140 return -EINVAL; 140 return -EINVAL;
@@ -142,14 +142,11 @@ mtrr_write(struct file *file, const char __user *buf, size_t len, loff_t * ppos)
142 size = simple_strtoull(ptr + 5, &ptr, 0); 142 size = simple_strtoull(ptr + 5, &ptr, 0);
143 if ((base & 0xfff) || (size & 0xfff)) 143 if ((base & 0xfff) || (size & 0xfff))
144 return -EINVAL; 144 return -EINVAL;
145 while (isspace(*ptr)) 145 ptr = skip_spaces(ptr);
146 ptr++;
147 146
148 if (strncmp(ptr, "type=", 5)) 147 if (strncmp(ptr, "type=", 5))
149 return -EINVAL; 148 return -EINVAL;
150 ptr += 5; 149 ptr = skip_spaces(ptr + 5);
151 while (isspace(*ptr))
152 ptr++;
153 150
154 for (i = 0; i < MTRR_NUM_TYPES; ++i) { 151 for (i = 0; i < MTRR_NUM_TYPES; ++i) {
155 if (strcmp(ptr, mtrr_strings[i])) 152 if (strcmp(ptr, mtrr_strings[i]))
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index ab1a8a89b984..45506d5dd8df 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -1632,6 +1632,7 @@ static void intel_pmu_drain_bts_buffer(struct cpu_hw_events *cpuc)
1632 1632
1633 data.period = event->hw.last_period; 1633 data.period = event->hw.last_period;
1634 data.addr = 0; 1634 data.addr = 0;
1635 data.raw = NULL;
1635 regs.ip = 0; 1636 regs.ip = 0;
1636 1637
1637 /* 1638 /*
@@ -1749,6 +1750,7 @@ static int p6_pmu_handle_irq(struct pt_regs *regs)
1749 u64 val; 1750 u64 val;
1750 1751
1751 data.addr = 0; 1752 data.addr = 0;
1753 data.raw = NULL;
1752 1754
1753 cpuc = &__get_cpu_var(cpu_hw_events); 1755 cpuc = &__get_cpu_var(cpu_hw_events);
1754 1756
@@ -1794,6 +1796,7 @@ static int intel_pmu_handle_irq(struct pt_regs *regs)
1794 u64 ack, status; 1796 u64 ack, status;
1795 1797
1796 data.addr = 0; 1798 data.addr = 0;
1799 data.raw = NULL;
1797 1800
1798 cpuc = &__get_cpu_var(cpu_hw_events); 1801 cpuc = &__get_cpu_var(cpu_hw_events);
1799 1802
@@ -1857,6 +1860,7 @@ static int amd_pmu_handle_irq(struct pt_regs *regs)
1857 u64 val; 1860 u64 val;
1858 1861
1859 data.addr = 0; 1862 data.addr = 0;
1863 data.raw = NULL;
1860 1864
1861 cpuc = &__get_cpu_var(cpu_hw_events); 1865 cpuc = &__get_cpu_var(cpu_hw_events);
1862 1866
@@ -2062,12 +2066,6 @@ static __init int p6_pmu_init(void)
2062 2066
2063 x86_pmu = p6_pmu; 2067 x86_pmu = p6_pmu;
2064 2068
2065 if (!cpu_has_apic) {
2066 pr_info("no APIC, boot with the \"lapic\" boot parameter to force-enable it.\n");
2067 pr_info("no hardware sampling interrupt available.\n");
2068 x86_pmu.apic = 0;
2069 }
2070
2071 return 0; 2069 return 0;
2072} 2070}
2073 2071
@@ -2159,6 +2157,16 @@ static __init int amd_pmu_init(void)
2159 return 0; 2157 return 0;
2160} 2158}
2161 2159
2160static void __init pmu_check_apic(void)
2161{
2162 if (cpu_has_apic)
2163 return;
2164
2165 x86_pmu.apic = 0;
2166 pr_info("no APIC, boot with the \"lapic\" boot parameter to force-enable it.\n");
2167 pr_info("no hardware sampling interrupt available.\n");
2168}
2169
2162void __init init_hw_perf_events(void) 2170void __init init_hw_perf_events(void)
2163{ 2171{
2164 int err; 2172 int err;
@@ -2180,6 +2188,8 @@ void __init init_hw_perf_events(void)
2180 return; 2188 return;
2181 } 2189 }
2182 2190
2191 pmu_check_apic();
2192
2183 pr_cont("%s PMU driver.\n", x86_pmu.name); 2193 pr_cont("%s PMU driver.\n", x86_pmu.name);
2184 2194
2185 if (x86_pmu.num_events > X86_PMC_MAX_GENERIC) { 2195 if (x86_pmu.num_events > X86_PMC_MAX_GENERIC) {
@@ -2287,7 +2297,7 @@ void callchain_store(struct perf_callchain_entry *entry, u64 ip)
2287 2297
2288static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_irq_entry); 2298static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_irq_entry);
2289static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_nmi_entry); 2299static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_nmi_entry);
2290static DEFINE_PER_CPU(int, in_nmi_frame); 2300static DEFINE_PER_CPU(int, in_ignored_frame);
2291 2301
2292 2302
2293static void 2303static void
@@ -2303,8 +2313,9 @@ static void backtrace_warning(void *data, char *msg)
2303 2313
2304static int backtrace_stack(void *data, char *name) 2314static int backtrace_stack(void *data, char *name)
2305{ 2315{
2306 per_cpu(in_nmi_frame, smp_processor_id()) = 2316 per_cpu(in_ignored_frame, smp_processor_id()) =
2307 x86_is_stack_id(NMI_STACK, name); 2317 x86_is_stack_id(NMI_STACK, name) ||
2318 x86_is_stack_id(DEBUG_STACK, name);
2308 2319
2309 return 0; 2320 return 0;
2310} 2321}
@@ -2313,7 +2324,7 @@ static void backtrace_address(void *data, unsigned long addr, int reliable)
2313{ 2324{
2314 struct perf_callchain_entry *entry = data; 2325 struct perf_callchain_entry *entry = data;
2315 2326
2316 if (per_cpu(in_nmi_frame, smp_processor_id())) 2327 if (per_cpu(in_ignored_frame, smp_processor_id()))
2317 return; 2328 return;
2318 2329
2319 if (reliable) 2330 if (reliable)
diff --git a/arch/x86/kernel/ds.c b/arch/x86/kernel/ds.c
index ef42a038f1a6..1c47390dd0e5 100644
--- a/arch/x86/kernel/ds.c
+++ b/arch/x86/kernel/ds.c
@@ -265,13 +265,13 @@ struct ds_context {
265 int cpu; 265 int cpu;
266}; 266};
267 267
268static DEFINE_PER_CPU(struct ds_context *, cpu_context); 268static DEFINE_PER_CPU(struct ds_context *, cpu_ds_context);
269 269
270 270
271static struct ds_context *ds_get_context(struct task_struct *task, int cpu) 271static struct ds_context *ds_get_context(struct task_struct *task, int cpu)
272{ 272{
273 struct ds_context **p_context = 273 struct ds_context **p_context =
274 (task ? &task->thread.ds_ctx : &per_cpu(cpu_context, cpu)); 274 (task ? &task->thread.ds_ctx : &per_cpu(cpu_ds_context, cpu));
275 struct ds_context *context = NULL; 275 struct ds_context *context = NULL;
276 struct ds_context *new_context = NULL; 276 struct ds_context *new_context = NULL;
277 277
diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
index b8ce165dde5d..0a0aa1cec8f1 100644
--- a/arch/x86/kernel/dumpstack.c
+++ b/arch/x86/kernel/dumpstack.c
@@ -188,7 +188,7 @@ void dump_stack(void)
188} 188}
189EXPORT_SYMBOL(dump_stack); 189EXPORT_SYMBOL(dump_stack);
190 190
191static raw_spinlock_t die_lock = __RAW_SPIN_LOCK_UNLOCKED; 191static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED;
192static int die_owner = -1; 192static int die_owner = -1;
193static unsigned int die_nest_count; 193static unsigned int die_nest_count;
194 194
@@ -207,11 +207,11 @@ unsigned __kprobes long oops_begin(void)
207 /* racy, but better than risking deadlock. */ 207 /* racy, but better than risking deadlock. */
208 raw_local_irq_save(flags); 208 raw_local_irq_save(flags);
209 cpu = smp_processor_id(); 209 cpu = smp_processor_id();
210 if (!__raw_spin_trylock(&die_lock)) { 210 if (!arch_spin_trylock(&die_lock)) {
211 if (cpu == die_owner) 211 if (cpu == die_owner)
212 /* nested oops. should stop eventually */; 212 /* nested oops. should stop eventually */;
213 else 213 else
214 __raw_spin_lock(&die_lock); 214 arch_spin_lock(&die_lock);
215 } 215 }
216 die_nest_count++; 216 die_nest_count++;
217 die_owner = cpu; 217 die_owner = cpu;
@@ -231,7 +231,7 @@ void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
231 die_nest_count--; 231 die_nest_count--;
232 if (!die_nest_count) 232 if (!die_nest_count)
233 /* Nest count reaches zero, release the lock. */ 233 /* Nest count reaches zero, release the lock. */
234 __raw_spin_unlock(&die_lock); 234 arch_spin_unlock(&die_lock);
235 raw_local_irq_restore(flags); 235 raw_local_irq_restore(flags);
236 oops_exit(); 236 oops_exit();
237 237
diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
index 8e740934bd1f..b13af53883aa 100644
--- a/arch/x86/kernel/dumpstack_64.c
+++ b/arch/x86/kernel/dumpstack_64.c
@@ -103,6 +103,35 @@ static unsigned long *in_exception_stack(unsigned cpu, unsigned long stack,
103 return NULL; 103 return NULL;
104} 104}
105 105
106static inline int
107in_irq_stack(unsigned long *stack, unsigned long *irq_stack,
108 unsigned long *irq_stack_end)
109{
110 return (stack >= irq_stack && stack < irq_stack_end);
111}
112
113/*
114 * We are returning from the irq stack and go to the previous one.
115 * If the previous stack is also in the irq stack, then bp in the first
116 * frame of the irq stack points to the previous, interrupted one.
117 * Otherwise we have another level of indirection: We first save
118 * the bp of the previous stack, then we switch the stack to the irq one
119 * and save a new bp that links to the previous one.
120 * (See save_args())
121 */
122static inline unsigned long
123fixup_bp_irq_link(unsigned long bp, unsigned long *stack,
124 unsigned long *irq_stack, unsigned long *irq_stack_end)
125{
126#ifdef CONFIG_FRAME_POINTER
127 struct stack_frame *frame = (struct stack_frame *)bp;
128
129 if (!in_irq_stack(stack, irq_stack, irq_stack_end))
130 return (unsigned long)frame->next_frame;
131#endif
132 return bp;
133}
134
106/* 135/*
107 * x86-64 can have up to three kernel stacks: 136 * x86-64 can have up to three kernel stacks:
108 * process stack 137 * process stack
@@ -175,7 +204,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
175 irq_stack = irq_stack_end - 204 irq_stack = irq_stack_end -
176 (IRQ_STACK_SIZE - 64) / sizeof(*irq_stack); 205 (IRQ_STACK_SIZE - 64) / sizeof(*irq_stack);
177 206
178 if (stack >= irq_stack && stack < irq_stack_end) { 207 if (in_irq_stack(stack, irq_stack, irq_stack_end)) {
179 if (ops->stack(data, "IRQ") < 0) 208 if (ops->stack(data, "IRQ") < 0)
180 break; 209 break;
181 bp = print_context_stack(tinfo, stack, bp, 210 bp = print_context_stack(tinfo, stack, bp,
@@ -186,6 +215,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
186 * pointer (index -1 to end) in the IRQ stack: 215 * pointer (index -1 to end) in the IRQ stack:
187 */ 216 */
188 stack = (unsigned long *) (irq_stack_end[-1]); 217 stack = (unsigned long *) (irq_stack_end[-1]);
218 bp = fixup_bp_irq_link(bp, stack, irq_stack,
219 irq_stack_end);
189 irq_stack_end = NULL; 220 irq_stack_end = NULL;
190 ops->stack(data, "EOI"); 221 ops->stack(data, "EOI");
191 continue; 222 continue;
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index 73d9b2c0e217..0697ff139837 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -1076,10 +1076,10 @@ ENTRY(\sym)
1076 TRACE_IRQS_OFF 1076 TRACE_IRQS_OFF
1077 movq %rsp,%rdi /* pt_regs pointer */ 1077 movq %rsp,%rdi /* pt_regs pointer */
1078 xorl %esi,%esi /* no error code */ 1078 xorl %esi,%esi /* no error code */
1079 PER_CPU(init_tss, %rbp) 1079 PER_CPU(init_tss, %r12)
1080 subq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%rbp) 1080 subq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%r12)
1081 call \do_sym 1081 call \do_sym
1082 addq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%rbp) 1082 addq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%r12)
1083 jmp paranoid_exit /* %ebx: no swapgs flag */ 1083 jmp paranoid_exit /* %ebx: no swapgs flag */
1084 CFI_ENDPROC 1084 CFI_ENDPROC
1085END(\sym) 1085END(\sym)
diff --git a/arch/x86/kernel/geode_32.c b/arch/x86/kernel/geode_32.c
deleted file mode 100644
index 9b08e852fd1a..000000000000
--- a/arch/x86/kernel/geode_32.c
+++ /dev/null
@@ -1,196 +0,0 @@
1/*
2 * AMD Geode southbridge support code
3 * Copyright (C) 2006, Advanced Micro Devices, Inc.
4 * Copyright (C) 2007, Andres Salomon <dilinger@debian.org>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of version 2 of the GNU General Public License
8 * as published by the Free Software Foundation.
9 */
10
11#include <linux/kernel.h>
12#include <linux/module.h>
13#include <linux/ioport.h>
14#include <linux/io.h>
15#include <asm/msr.h>
16#include <asm/geode.h>
17
18static struct {
19 char *name;
20 u32 msr;
21 int size;
22 u32 base;
23} lbars[] = {
24 { "geode-pms", MSR_LBAR_PMS, LBAR_PMS_SIZE, 0 },
25 { "geode-acpi", MSR_LBAR_ACPI, LBAR_ACPI_SIZE, 0 },
26 { "geode-gpio", MSR_LBAR_GPIO, LBAR_GPIO_SIZE, 0 },
27 { "geode-mfgpt", MSR_LBAR_MFGPT, LBAR_MFGPT_SIZE, 0 }
28};
29
30static void __init init_lbars(void)
31{
32 u32 lo, hi;
33 int i;
34
35 for (i = 0; i < ARRAY_SIZE(lbars); i++) {
36 rdmsr(lbars[i].msr, lo, hi);
37 if (hi & 0x01)
38 lbars[i].base = lo & 0x0000ffff;
39
40 if (lbars[i].base == 0)
41 printk(KERN_ERR "geode: Couldn't initialize '%s'\n",
42 lbars[i].name);
43 }
44}
45
46int geode_get_dev_base(unsigned int dev)
47{
48 BUG_ON(dev >= ARRAY_SIZE(lbars));
49 return lbars[dev].base;
50}
51EXPORT_SYMBOL_GPL(geode_get_dev_base);
52
53/* === GPIO API === */
54
55void geode_gpio_set(u32 gpio, unsigned int reg)
56{
57 u32 base = geode_get_dev_base(GEODE_DEV_GPIO);
58
59 if (!base)
60 return;
61
62 /* low bank register */
63 if (gpio & 0xFFFF)
64 outl(gpio & 0xFFFF, base + reg);
65 /* high bank register */
66 gpio >>= 16;
67 if (gpio)
68 outl(gpio, base + 0x80 + reg);
69}
70EXPORT_SYMBOL_GPL(geode_gpio_set);
71
72void geode_gpio_clear(u32 gpio, unsigned int reg)
73{
74 u32 base = geode_get_dev_base(GEODE_DEV_GPIO);
75
76 if (!base)
77 return;
78
79 /* low bank register */
80 if (gpio & 0xFFFF)
81 outl((gpio & 0xFFFF) << 16, base + reg);
82 /* high bank register */
83 gpio &= (0xFFFF << 16);
84 if (gpio)
85 outl(gpio, base + 0x80 + reg);
86}
87EXPORT_SYMBOL_GPL(geode_gpio_clear);
88
89int geode_gpio_isset(u32 gpio, unsigned int reg)
90{
91 u32 base = geode_get_dev_base(GEODE_DEV_GPIO);
92 u32 val;
93
94 if (!base)
95 return 0;
96
97 /* low bank register */
98 if (gpio & 0xFFFF) {
99 val = inl(base + reg) & (gpio & 0xFFFF);
100 if ((gpio & 0xFFFF) == val)
101 return 1;
102 }
103 /* high bank register */
104 gpio >>= 16;
105 if (gpio) {
106 val = inl(base + 0x80 + reg) & gpio;
107 if (gpio == val)
108 return 1;
109 }
110 return 0;
111}
112EXPORT_SYMBOL_GPL(geode_gpio_isset);
113
114void geode_gpio_set_irq(unsigned int group, unsigned int irq)
115{
116 u32 lo, hi;
117
118 if (group > 7 || irq > 15)
119 return;
120
121 rdmsr(MSR_PIC_ZSEL_HIGH, lo, hi);
122
123 lo &= ~(0xF << (group * 4));
124 lo |= (irq & 0xF) << (group * 4);
125
126 wrmsr(MSR_PIC_ZSEL_HIGH, lo, hi);
127}
128EXPORT_SYMBOL_GPL(geode_gpio_set_irq);
129
130void geode_gpio_setup_event(unsigned int gpio, int pair, int pme)
131{
132 u32 base = geode_get_dev_base(GEODE_DEV_GPIO);
133 u32 offset, shift, val;
134
135 if (gpio >= 24)
136 offset = GPIO_MAP_W;
137 else if (gpio >= 16)
138 offset = GPIO_MAP_Z;
139 else if (gpio >= 8)
140 offset = GPIO_MAP_Y;
141 else
142 offset = GPIO_MAP_X;
143
144 shift = (gpio % 8) * 4;
145
146 val = inl(base + offset);
147
148 /* Clear whatever was there before */
149 val &= ~(0xF << shift);
150
151 /* And set the new value */
152
153 val |= ((pair & 7) << shift);
154
155 /* Set the PME bit if this is a PME event */
156
157 if (pme)
158 val |= (1 << (shift + 3));
159
160 outl(val, base + offset);
161}
162EXPORT_SYMBOL_GPL(geode_gpio_setup_event);
163
164int geode_has_vsa2(void)
165{
166 static int has_vsa2 = -1;
167
168 if (has_vsa2 == -1) {
169 u16 val;
170
171 /*
172 * The VSA has virtual registers that we can query for a
173 * signature.
174 */
175 outw(VSA_VR_UNLOCK, VSA_VRC_INDEX);
176 outw(VSA_VR_SIGNATURE, VSA_VRC_INDEX);
177
178 val = inw(VSA_VRC_DATA);
179 has_vsa2 = (val == AMD_VSA_SIG || val == GSW_VSA_SIG);
180 }
181
182 return has_vsa2;
183}
184EXPORT_SYMBOL_GPL(geode_has_vsa2);
185
186static int __init geode_southbridge_init(void)
187{
188 if (!is_geode())
189 return -ENODEV;
190
191 init_lbars();
192 (void) mfgpt_timer_setup();
193 return 0;
194}
195
196postcore_initcall(geode_southbridge_init);
diff --git a/arch/x86/kernel/hw_breakpoint.c b/arch/x86/kernel/hw_breakpoint.c
index d42f65ac4927..05d5fec64a94 100644
--- a/arch/x86/kernel/hw_breakpoint.c
+++ b/arch/x86/kernel/hw_breakpoint.c
@@ -362,8 +362,7 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp,
362 return ret; 362 return ret;
363 } 363 }
364 364
365 if (bp->callback) 365 ret = arch_store_info(bp);
366 ret = arch_store_info(bp);
367 366
368 if (ret < 0) 367 if (ret < 0)
369 return ret; 368 return ret;
@@ -519,7 +518,7 @@ static int __kprobes hw_breakpoint_handler(struct die_args *args)
519 break; 518 break;
520 } 519 }
521 520
522 (bp->callback)(bp, args->regs); 521 perf_bp_event(bp, args->regs);
523 522
524 rcu_read_unlock(); 523 rcu_read_unlock();
525 } 524 }
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
index 664bcb7384ac..91fd0c70a18a 100644
--- a/arch/x86/kernel/irq.c
+++ b/arch/x86/kernel/irq.c
@@ -149,7 +149,7 @@ int show_interrupts(struct seq_file *p, void *v)
149 if (!desc) 149 if (!desc)
150 return 0; 150 return 0;
151 151
152 spin_lock_irqsave(&desc->lock, flags); 152 raw_spin_lock_irqsave(&desc->lock, flags);
153 for_each_online_cpu(j) 153 for_each_online_cpu(j)
154 any_count |= kstat_irqs_cpu(i, j); 154 any_count |= kstat_irqs_cpu(i, j);
155 action = desc->action; 155 action = desc->action;
@@ -170,7 +170,7 @@ int show_interrupts(struct seq_file *p, void *v)
170 170
171 seq_putc(p, '\n'); 171 seq_putc(p, '\n');
172out: 172out:
173 spin_unlock_irqrestore(&desc->lock, flags); 173 raw_spin_unlock_irqrestore(&desc->lock, flags);
174 return 0; 174 return 0;
175} 175}
176 176
@@ -294,12 +294,12 @@ void fixup_irqs(void)
294 continue; 294 continue;
295 295
296 /* interrupt's are disabled at this point */ 296 /* interrupt's are disabled at this point */
297 spin_lock(&desc->lock); 297 raw_spin_lock(&desc->lock);
298 298
299 affinity = desc->affinity; 299 affinity = desc->affinity;
300 if (!irq_has_action(irq) || 300 if (!irq_has_action(irq) ||
301 cpumask_equal(affinity, cpu_online_mask)) { 301 cpumask_equal(affinity, cpu_online_mask)) {
302 spin_unlock(&desc->lock); 302 raw_spin_unlock(&desc->lock);
303 continue; 303 continue;
304 } 304 }
305 305
@@ -326,7 +326,7 @@ void fixup_irqs(void)
326 if (!(desc->status & IRQ_MOVE_PCNTXT) && desc->chip->unmask) 326 if (!(desc->status & IRQ_MOVE_PCNTXT) && desc->chip->unmask)
327 desc->chip->unmask(irq); 327 desc->chip->unmask(irq);
328 328
329 spin_unlock(&desc->lock); 329 raw_spin_unlock(&desc->lock);
330 330
331 if (break_affinity && set_affinity) 331 if (break_affinity && set_affinity)
332 printk("Broke affinity for irq %i\n", irq); 332 printk("Broke affinity for irq %i\n", irq);
@@ -356,10 +356,10 @@ void fixup_irqs(void)
356 irq = __get_cpu_var(vector_irq)[vector]; 356 irq = __get_cpu_var(vector_irq)[vector];
357 357
358 desc = irq_to_desc(irq); 358 desc = irq_to_desc(irq);
359 spin_lock(&desc->lock); 359 raw_spin_lock(&desc->lock);
360 if (desc->chip->retrigger) 360 if (desc->chip->retrigger)
361 desc->chip->retrigger(irq); 361 desc->chip->retrigger(irq);
362 spin_unlock(&desc->lock); 362 raw_spin_unlock(&desc->lock);
363 } 363 }
364 } 364 }
365} 365}
diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
index 20a5b3689463..dd74fe7273b1 100644
--- a/arch/x86/kernel/kgdb.c
+++ b/arch/x86/kernel/kgdb.c
@@ -86,9 +86,15 @@ void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *regs)
86 gdb_regs[GDB_DS] = regs->ds; 86 gdb_regs[GDB_DS] = regs->ds;
87 gdb_regs[GDB_ES] = regs->es; 87 gdb_regs[GDB_ES] = regs->es;
88 gdb_regs[GDB_CS] = regs->cs; 88 gdb_regs[GDB_CS] = regs->cs;
89 gdb_regs[GDB_SS] = __KERNEL_DS;
90 gdb_regs[GDB_FS] = 0xFFFF; 89 gdb_regs[GDB_FS] = 0xFFFF;
91 gdb_regs[GDB_GS] = 0xFFFF; 90 gdb_regs[GDB_GS] = 0xFFFF;
91 if (user_mode_vm(regs)) {
92 gdb_regs[GDB_SS] = regs->ss;
93 gdb_regs[GDB_SP] = regs->sp;
94 } else {
95 gdb_regs[GDB_SS] = __KERNEL_DS;
96 gdb_regs[GDB_SP] = kernel_stack_pointer(regs);
97 }
92#else 98#else
93 gdb_regs[GDB_R8] = regs->r8; 99 gdb_regs[GDB_R8] = regs->r8;
94 gdb_regs[GDB_R9] = regs->r9; 100 gdb_regs[GDB_R9] = regs->r9;
@@ -101,8 +107,8 @@ void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *regs)
101 gdb_regs32[GDB_PS] = regs->flags; 107 gdb_regs32[GDB_PS] = regs->flags;
102 gdb_regs32[GDB_CS] = regs->cs; 108 gdb_regs32[GDB_CS] = regs->cs;
103 gdb_regs32[GDB_SS] = regs->ss; 109 gdb_regs32[GDB_SS] = regs->ss;
104#endif
105 gdb_regs[GDB_SP] = kernel_stack_pointer(regs); 110 gdb_regs[GDB_SP] = kernel_stack_pointer(regs);
111#endif
106} 112}
107 113
108/** 114/**
@@ -220,8 +226,7 @@ static void kgdb_correct_hw_break(void)
220 dr7 |= ((breakinfo[breakno].len << 2) | 226 dr7 |= ((breakinfo[breakno].len << 2) |
221 breakinfo[breakno].type) << 227 breakinfo[breakno].type) <<
222 ((breakno << 2) + 16); 228 ((breakno << 2) + 16);
223 if (breakno >= 0 && breakno <= 3) 229 set_debugreg(breakinfo[breakno].addr, breakno);
224 set_debugreg(breakinfo[breakno].addr, breakno);
225 230
226 } else { 231 } else {
227 if ((dr7 & breakbit) && !breakinfo[breakno].enabled) { 232 if ((dr7 & breakbit) && !breakinfo[breakno].enabled) {
@@ -395,7 +400,6 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
395 /* set the trace bit if we're stepping */ 400 /* set the trace bit if we're stepping */
396 if (remcomInBuffer[0] == 's') { 401 if (remcomInBuffer[0] == 's') {
397 linux_regs->flags |= X86_EFLAGS_TF; 402 linux_regs->flags |= X86_EFLAGS_TF;
398 kgdb_single_step = 1;
399 atomic_set(&kgdb_cpu_doing_single_step, 403 atomic_set(&kgdb_cpu_doing_single_step,
400 raw_smp_processor_id()); 404 raw_smp_processor_id());
401 } 405 }
diff --git a/arch/x86/kernel/mfgpt_32.c b/arch/x86/kernel/mfgpt_32.c
deleted file mode 100644
index 2a62d843f015..000000000000
--- a/arch/x86/kernel/mfgpt_32.c
+++ /dev/null
@@ -1,410 +0,0 @@
1/*
2 * Driver/API for AMD Geode Multi-Function General Purpose Timers (MFGPT)
3 *
4 * Copyright (C) 2006, Advanced Micro Devices, Inc.
5 * Copyright (C) 2007, Andres Salomon <dilinger@debian.org>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of version 2 of the GNU General Public License
9 * as published by the Free Software Foundation.
10 *
11 * The MFGPTs are documented in AMD Geode CS5536 Companion Device Data Book.
12 */
13
14/*
15 * We are using the 32.768kHz input clock - it's the only one that has the
16 * ranges we find desirable. The following table lists the suitable
17 * divisors and the associated Hz, minimum interval and the maximum interval:
18 *
19 * Divisor Hz Min Delta (s) Max Delta (s)
20 * 1 32768 .00048828125 2.000
21 * 2 16384 .0009765625 4.000
22 * 4 8192 .001953125 8.000
23 * 8 4096 .00390625 16.000
24 * 16 2048 .0078125 32.000
25 * 32 1024 .015625 64.000
26 * 64 512 .03125 128.000
27 * 128 256 .0625 256.000
28 * 256 128 .125 512.000
29 */
30
31#include <linux/kernel.h>
32#include <linux/interrupt.h>
33#include <linux/module.h>
34#include <asm/geode.h>
35
36#define MFGPT_DEFAULT_IRQ 7
37
38static struct mfgpt_timer_t {
39 unsigned int avail:1;
40} mfgpt_timers[MFGPT_MAX_TIMERS];
41
42/* Selected from the table above */
43
44#define MFGPT_DIVISOR 16
45#define MFGPT_SCALE 4 /* divisor = 2^(scale) */
46#define MFGPT_HZ (32768 / MFGPT_DIVISOR)
47#define MFGPT_PERIODIC (MFGPT_HZ / HZ)
48
49/* Allow for disabling of MFGPTs */
50static int disable;
51static int __init mfgpt_disable(char *s)
52{
53 disable = 1;
54 return 1;
55}
56__setup("nomfgpt", mfgpt_disable);
57
58/* Reset the MFGPT timers. This is required by some broken BIOSes which already
59 * do the same and leave the system in an unstable state. TinyBIOS 0.98 is
60 * affected at least (0.99 is OK with MFGPT workaround left to off).
61 */
62static int __init mfgpt_fix(char *s)
63{
64 u32 val, dummy;
65
66 /* The following udocumented bit resets the MFGPT timers */
67 val = 0xFF; dummy = 0;
68 wrmsr(MSR_MFGPT_SETUP, val, dummy);
69 return 1;
70}
71__setup("mfgptfix", mfgpt_fix);
72
73/*
74 * Check whether any MFGPTs are available for the kernel to use. In most
75 * cases, firmware that uses AMD's VSA code will claim all timers during
76 * bootup; we certainly don't want to take them if they're already in use.
77 * In other cases (such as with VSAless OpenFirmware), the system firmware
78 * leaves timers available for us to use.
79 */
80
81
82static int timers = -1;
83
84static void geode_mfgpt_detect(void)
85{
86 int i;
87 u16 val;
88
89 timers = 0;
90
91 if (disable) {
92 printk(KERN_INFO "geode-mfgpt: MFGPT support is disabled\n");
93 goto done;
94 }
95
96 if (!geode_get_dev_base(GEODE_DEV_MFGPT)) {
97 printk(KERN_INFO "geode-mfgpt: MFGPT LBAR is not set up\n");
98 goto done;
99 }
100
101 for (i = 0; i < MFGPT_MAX_TIMERS; i++) {
102 val = geode_mfgpt_read(i, MFGPT_REG_SETUP);
103 if (!(val & MFGPT_SETUP_SETUP)) {
104 mfgpt_timers[i].avail = 1;
105 timers++;
106 }
107 }
108
109done:
110 printk(KERN_INFO "geode-mfgpt: %d MFGPT timers available.\n", timers);
111}
112
113int geode_mfgpt_toggle_event(int timer, int cmp, int event, int enable)
114{
115 u32 msr, mask, value, dummy;
116 int shift = (cmp == MFGPT_CMP1) ? 0 : 8;
117
118 if (timer < 0 || timer >= MFGPT_MAX_TIMERS)
119 return -EIO;
120
121 /*
122 * The register maps for these are described in sections 6.17.1.x of
123 * the AMD Geode CS5536 Companion Device Data Book.
124 */
125 switch (event) {
126 case MFGPT_EVENT_RESET:
127 /*
128 * XXX: According to the docs, we cannot reset timers above
129 * 6; that is, resets for 7 and 8 will be ignored. Is this
130 * a problem? -dilinger
131 */
132 msr = MSR_MFGPT_NR;
133 mask = 1 << (timer + 24);
134 break;
135
136 case MFGPT_EVENT_NMI:
137 msr = MSR_MFGPT_NR;
138 mask = 1 << (timer + shift);
139 break;
140
141 case MFGPT_EVENT_IRQ:
142 msr = MSR_MFGPT_IRQ;
143 mask = 1 << (timer + shift);
144 break;
145
146 default:
147 return -EIO;
148 }
149
150 rdmsr(msr, value, dummy);
151
152 if (enable)
153 value |= mask;
154 else
155 value &= ~mask;
156
157 wrmsr(msr, value, dummy);
158 return 0;
159}
160EXPORT_SYMBOL_GPL(geode_mfgpt_toggle_event);
161
162int geode_mfgpt_set_irq(int timer, int cmp, int *irq, int enable)
163{
164 u32 zsel, lpc, dummy;
165 int shift;
166
167 if (timer < 0 || timer >= MFGPT_MAX_TIMERS)
168 return -EIO;
169
170 /*
171 * Unfortunately, MFGPTs come in pairs sharing their IRQ lines. If VSA
172 * is using the same CMP of the timer's Siamese twin, the IRQ is set to
173 * 2, and we mustn't use nor change it.
174 * XXX: Likewise, 2 Linux drivers might clash if the 2nd overwrites the
175 * IRQ of the 1st. This can only happen if forcing an IRQ, calling this
176 * with *irq==0 is safe. Currently there _are_ no 2 drivers.
177 */
178 rdmsr(MSR_PIC_ZSEL_LOW, zsel, dummy);
179 shift = ((cmp == MFGPT_CMP1 ? 0 : 4) + timer % 4) * 4;
180 if (((zsel >> shift) & 0xF) == 2)
181 return -EIO;
182
183 /* Choose IRQ: if none supplied, keep IRQ already set or use default */
184 if (!*irq)
185 *irq = (zsel >> shift) & 0xF;
186 if (!*irq)
187 *irq = MFGPT_DEFAULT_IRQ;
188
189 /* Can't use IRQ if it's 0 (=disabled), 2, or routed to LPC */
190 if (*irq < 1 || *irq == 2 || *irq > 15)
191 return -EIO;
192 rdmsr(MSR_PIC_IRQM_LPC, lpc, dummy);
193 if (lpc & (1 << *irq))
194 return -EIO;
195
196 /* All chosen and checked - go for it */
197 if (geode_mfgpt_toggle_event(timer, cmp, MFGPT_EVENT_IRQ, enable))
198 return -EIO;
199 if (enable) {
200 zsel = (zsel & ~(0xF << shift)) | (*irq << shift);
201 wrmsr(MSR_PIC_ZSEL_LOW, zsel, dummy);
202 }
203
204 return 0;
205}
206
207static int mfgpt_get(int timer)
208{
209 mfgpt_timers[timer].avail = 0;
210 printk(KERN_INFO "geode-mfgpt: Registered timer %d\n", timer);
211 return timer;
212}
213
214int geode_mfgpt_alloc_timer(int timer, int domain)
215{
216 int i;
217
218 if (timers == -1) {
219 /* timers haven't been detected yet */
220 geode_mfgpt_detect();
221 }
222
223 if (!timers)
224 return -1;
225
226 if (timer >= MFGPT_MAX_TIMERS)
227 return -1;
228
229 if (timer < 0) {
230 /* Try to find an available timer */
231 for (i = 0; i < MFGPT_MAX_TIMERS; i++) {
232 if (mfgpt_timers[i].avail)
233 return mfgpt_get(i);
234
235 if (i == 5 && domain == MFGPT_DOMAIN_WORKING)
236 break;
237 }
238 } else {
239 /* If they requested a specific timer, try to honor that */
240 if (mfgpt_timers[timer].avail)
241 return mfgpt_get(timer);
242 }
243
244 /* No timers available - too bad */
245 return -1;
246}
247EXPORT_SYMBOL_GPL(geode_mfgpt_alloc_timer);
248
249
250#ifdef CONFIG_GEODE_MFGPT_TIMER
251
252/*
253 * The MFPGT timers on the CS5536 provide us with suitable timers to use
254 * as clock event sources - not as good as a HPET or APIC, but certainly
255 * better than the PIT. This isn't a general purpose MFGPT driver, but
256 * a simplified one designed specifically to act as a clock event source.
257 * For full details about the MFGPT, please consult the CS5536 data sheet.
258 */
259
260#include <linux/clocksource.h>
261#include <linux/clockchips.h>
262
263static unsigned int mfgpt_tick_mode = CLOCK_EVT_MODE_SHUTDOWN;
264static u16 mfgpt_event_clock;
265
266static int irq;
267static int __init mfgpt_setup(char *str)
268{
269 get_option(&str, &irq);
270 return 1;
271}
272__setup("mfgpt_irq=", mfgpt_setup);
273
274static void mfgpt_disable_timer(u16 clock)
275{
276 /* avoid races by clearing CMP1 and CMP2 unconditionally */
277 geode_mfgpt_write(clock, MFGPT_REG_SETUP, (u16) ~MFGPT_SETUP_CNTEN |
278 MFGPT_SETUP_CMP1 | MFGPT_SETUP_CMP2);
279}
280
281static int mfgpt_next_event(unsigned long, struct clock_event_device *);
282static void mfgpt_set_mode(enum clock_event_mode, struct clock_event_device *);
283
284static struct clock_event_device mfgpt_clockevent = {
285 .name = "mfgpt-timer",
286 .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
287 .set_mode = mfgpt_set_mode,
288 .set_next_event = mfgpt_next_event,
289 .rating = 250,
290 .cpumask = cpu_all_mask,
291 .shift = 32
292};
293
294static void mfgpt_start_timer(u16 delta)
295{
296 geode_mfgpt_write(mfgpt_event_clock, MFGPT_REG_CMP2, (u16) delta);
297 geode_mfgpt_write(mfgpt_event_clock, MFGPT_REG_COUNTER, 0);
298
299 geode_mfgpt_write(mfgpt_event_clock, MFGPT_REG_SETUP,
300 MFGPT_SETUP_CNTEN | MFGPT_SETUP_CMP2);
301}
302
303static void mfgpt_set_mode(enum clock_event_mode mode,
304 struct clock_event_device *evt)
305{
306 mfgpt_disable_timer(mfgpt_event_clock);
307
308 if (mode == CLOCK_EVT_MODE_PERIODIC)
309 mfgpt_start_timer(MFGPT_PERIODIC);
310
311 mfgpt_tick_mode = mode;
312}
313
314static int mfgpt_next_event(unsigned long delta, struct clock_event_device *evt)
315{
316 mfgpt_start_timer(delta);
317 return 0;
318}
319
320static irqreturn_t mfgpt_tick(int irq, void *dev_id)
321{
322 u16 val = geode_mfgpt_read(mfgpt_event_clock, MFGPT_REG_SETUP);
323
324 /* See if the interrupt was for us */
325 if (!(val & (MFGPT_SETUP_SETUP | MFGPT_SETUP_CMP2 | MFGPT_SETUP_CMP1)))
326 return IRQ_NONE;
327
328 /* Turn off the clock (and clear the event) */
329 mfgpt_disable_timer(mfgpt_event_clock);
330
331 if (mfgpt_tick_mode == CLOCK_EVT_MODE_SHUTDOWN)
332 return IRQ_HANDLED;
333
334 /* Clear the counter */
335 geode_mfgpt_write(mfgpt_event_clock, MFGPT_REG_COUNTER, 0);
336
337 /* Restart the clock in periodic mode */
338
339 if (mfgpt_tick_mode == CLOCK_EVT_MODE_PERIODIC) {
340 geode_mfgpt_write(mfgpt_event_clock, MFGPT_REG_SETUP,
341 MFGPT_SETUP_CNTEN | MFGPT_SETUP_CMP2);
342 }
343
344 mfgpt_clockevent.event_handler(&mfgpt_clockevent);
345 return IRQ_HANDLED;
346}
347
348static struct irqaction mfgptirq = {
349 .handler = mfgpt_tick,
350 .flags = IRQF_DISABLED | IRQF_NOBALANCING | IRQF_TIMER,
351 .name = "mfgpt-timer"
352};
353
354int __init mfgpt_timer_setup(void)
355{
356 int timer, ret;
357 u16 val;
358
359 timer = geode_mfgpt_alloc_timer(MFGPT_TIMER_ANY, MFGPT_DOMAIN_WORKING);
360 if (timer < 0) {
361 printk(KERN_ERR
362 "mfgpt-timer: Could not allocate a MFPGT timer\n");
363 return -ENODEV;
364 }
365
366 mfgpt_event_clock = timer;
367
368 /* Set up the IRQ on the MFGPT side */
369 if (geode_mfgpt_setup_irq(mfgpt_event_clock, MFGPT_CMP2, &irq)) {
370 printk(KERN_ERR "mfgpt-timer: Could not set up IRQ %d\n", irq);
371 return -EIO;
372 }
373
374 /* And register it with the kernel */
375 ret = setup_irq(irq, &mfgptirq);
376
377 if (ret) {
378 printk(KERN_ERR
379 "mfgpt-timer: Unable to set up the interrupt.\n");
380 goto err;
381 }
382
383 /* Set the clock scale and enable the event mode for CMP2 */
384 val = MFGPT_SCALE | (3 << 8);
385
386 geode_mfgpt_write(mfgpt_event_clock, MFGPT_REG_SETUP, val);
387
388 /* Set up the clock event */
389 mfgpt_clockevent.mult = div_sc(MFGPT_HZ, NSEC_PER_SEC,
390 mfgpt_clockevent.shift);
391 mfgpt_clockevent.min_delta_ns = clockevent_delta2ns(0xF,
392 &mfgpt_clockevent);
393 mfgpt_clockevent.max_delta_ns = clockevent_delta2ns(0xFFFE,
394 &mfgpt_clockevent);
395
396 printk(KERN_INFO
397 "mfgpt-timer: Registering MFGPT timer %d as a clock event, using IRQ %d\n",
398 timer, irq);
399 clockevents_register_device(&mfgpt_clockevent);
400
401 return 0;
402
403err:
404 geode_mfgpt_release_irq(mfgpt_event_clock, MFGPT_CMP2, &irq);
405 printk(KERN_ERR
406 "mfgpt-timer: Unable to set up the MFGPT clock source\n");
407 return -EIO;
408}
409
410#endif
diff --git a/arch/x86/kernel/olpc.c b/arch/x86/kernel/olpc.c
index 4006c522adc7..9d1d263f786f 100644
--- a/arch/x86/kernel/olpc.c
+++ b/arch/x86/kernel/olpc.c
@@ -212,7 +212,7 @@ static int __init olpc_init(void)
212 unsigned char *romsig; 212 unsigned char *romsig;
213 213
214 /* The ioremap check is dangerous; limit what we run it on */ 214 /* The ioremap check is dangerous; limit what we run it on */
215 if (!is_geode() || geode_has_vsa2()) 215 if (!is_geode() || cs5535_has_vsa2())
216 return 0; 216 return 0;
217 217
218 spin_lock_init(&ec_lock); 218 spin_lock_init(&ec_lock);
@@ -244,7 +244,7 @@ static int __init olpc_init(void)
244 (unsigned char *) &olpc_platform_info.ecver, 1); 244 (unsigned char *) &olpc_platform_info.ecver, 1);
245 245
246 /* check to see if the VSA exists */ 246 /* check to see if the VSA exists */
247 if (geode_has_vsa2()) 247 if (cs5535_has_vsa2())
248 olpc_platform_info.flags |= OLPC_F_VSA; 248 olpc_platform_info.flags |= OLPC_F_VSA;
249 249
250 printk(KERN_INFO "OLPC board revision %s%X (EC=%x)\n", 250 printk(KERN_INFO "OLPC board revision %s%X (EC=%x)\n",
diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
index 3a7c5a44082e..676b8c77a976 100644
--- a/arch/x86/kernel/paravirt-spinlocks.c
+++ b/arch/x86/kernel/paravirt-spinlocks.c
@@ -8,9 +8,9 @@
8#include <asm/paravirt.h> 8#include <asm/paravirt.h>
9 9
10static inline void 10static inline void
11default_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) 11default_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
12{ 12{
13 __raw_spin_lock(lock); 13 arch_spin_lock(lock);
14} 14}
15 15
16struct pv_lock_ops pv_lock_ops = { 16struct pv_lock_ops pv_lock_ops = {
diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c
index c563e4c8ff39..2bbde6078143 100644
--- a/arch/x86/kernel/pci-calgary_64.c
+++ b/arch/x86/kernel/pci-calgary_64.c
@@ -31,7 +31,7 @@
31#include <linux/string.h> 31#include <linux/string.h>
32#include <linux/crash_dump.h> 32#include <linux/crash_dump.h>
33#include <linux/dma-mapping.h> 33#include <linux/dma-mapping.h>
34#include <linux/bitops.h> 34#include <linux/bitmap.h>
35#include <linux/pci_ids.h> 35#include <linux/pci_ids.h>
36#include <linux/pci.h> 36#include <linux/pci.h>
37#include <linux/delay.h> 37#include <linux/delay.h>
@@ -212,7 +212,7 @@ static void iommu_range_reserve(struct iommu_table *tbl,
212 212
213 spin_lock_irqsave(&tbl->it_lock, flags); 213 spin_lock_irqsave(&tbl->it_lock, flags);
214 214
215 iommu_area_reserve(tbl->it_map, index, npages); 215 bitmap_set(tbl->it_map, index, npages);
216 216
217 spin_unlock_irqrestore(&tbl->it_lock, flags); 217 spin_unlock_irqrestore(&tbl->it_lock, flags);
218} 218}
@@ -303,7 +303,7 @@ static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
303 303
304 spin_lock_irqsave(&tbl->it_lock, flags); 304 spin_lock_irqsave(&tbl->it_lock, flags);
305 305
306 iommu_area_free(tbl->it_map, entry, npages); 306 bitmap_clear(tbl->it_map, entry, npages);
307 307
308 spin_unlock_irqrestore(&tbl->it_lock, flags); 308 spin_unlock_irqrestore(&tbl->it_lock, flags);
309} 309}
diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c
index 56c0e730d3fe..34de53b46f87 100644
--- a/arch/x86/kernel/pci-gart_64.c
+++ b/arch/x86/kernel/pci-gart_64.c
@@ -23,7 +23,7 @@
23#include <linux/module.h> 23#include <linux/module.h>
24#include <linux/topology.h> 24#include <linux/topology.h>
25#include <linux/interrupt.h> 25#include <linux/interrupt.h>
26#include <linux/bitops.h> 26#include <linux/bitmap.h>
27#include <linux/kdebug.h> 27#include <linux/kdebug.h>
28#include <linux/scatterlist.h> 28#include <linux/scatterlist.h>
29#include <linux/iommu-helper.h> 29#include <linux/iommu-helper.h>
@@ -126,7 +126,7 @@ static void free_iommu(unsigned long offset, int size)
126 unsigned long flags; 126 unsigned long flags;
127 127
128 spin_lock_irqsave(&iommu_bitmap_lock, flags); 128 spin_lock_irqsave(&iommu_bitmap_lock, flags);
129 iommu_area_free(iommu_gart_bitmap, offset, size); 129 bitmap_clear(iommu_gart_bitmap, offset, size);
130 if (offset >= next_bit) 130 if (offset >= next_bit)
131 next_bit = offset + size; 131 next_bit = offset + size;
132 spin_unlock_irqrestore(&iommu_bitmap_lock, flags); 132 spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
@@ -792,7 +792,7 @@ int __init gart_iommu_init(void)
792 * Out of IOMMU space handling. 792 * Out of IOMMU space handling.
793 * Reserve some invalid pages at the beginning of the GART. 793 * Reserve some invalid pages at the beginning of the GART.
794 */ 794 */
795 iommu_area_reserve(iommu_gart_bitmap, 0, EMERGENCY_PAGES); 795 bitmap_set(iommu_gart_bitmap, 0, EMERGENCY_PAGES);
796 796
797 pr_info("PCI-DMA: Reserving %luMB of IOMMU area in the AGP aperture\n", 797 pr_info("PCI-DMA: Reserving %luMB of IOMMU area in the AGP aperture\n",
798 iommu_size >> 20); 798 iommu_size >> 20);
diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
index 04d182a7cfdb..017d937639fe 100644
--- a/arch/x86/kernel/ptrace.c
+++ b/arch/x86/kernel/ptrace.c
@@ -509,14 +509,14 @@ static int genregs_get(struct task_struct *target,
509{ 509{
510 if (kbuf) { 510 if (kbuf) {
511 unsigned long *k = kbuf; 511 unsigned long *k = kbuf;
512 while (count > 0) { 512 while (count >= sizeof(*k)) {
513 *k++ = getreg(target, pos); 513 *k++ = getreg(target, pos);
514 count -= sizeof(*k); 514 count -= sizeof(*k);
515 pos += sizeof(*k); 515 pos += sizeof(*k);
516 } 516 }
517 } else { 517 } else {
518 unsigned long __user *u = ubuf; 518 unsigned long __user *u = ubuf;
519 while (count > 0) { 519 while (count >= sizeof(*u)) {
520 if (__put_user(getreg(target, pos), u++)) 520 if (__put_user(getreg(target, pos), u++))
521 return -EFAULT; 521 return -EFAULT;
522 count -= sizeof(*u); 522 count -= sizeof(*u);
@@ -535,14 +535,14 @@ static int genregs_set(struct task_struct *target,
535 int ret = 0; 535 int ret = 0;
536 if (kbuf) { 536 if (kbuf) {
537 const unsigned long *k = kbuf; 537 const unsigned long *k = kbuf;
538 while (count > 0 && !ret) { 538 while (count >= sizeof(*k) && !ret) {
539 ret = putreg(target, pos, *k++); 539 ret = putreg(target, pos, *k++);
540 count -= sizeof(*k); 540 count -= sizeof(*k);
541 pos += sizeof(*k); 541 pos += sizeof(*k);
542 } 542 }
543 } else { 543 } else {
544 const unsigned long __user *u = ubuf; 544 const unsigned long __user *u = ubuf;
545 while (count > 0 && !ret) { 545 while (count >= sizeof(*u) && !ret) {
546 unsigned long word; 546 unsigned long word;
547 ret = __get_user(word, u++); 547 ret = __get_user(word, u++);
548 if (ret) 548 if (ret)
@@ -555,7 +555,9 @@ static int genregs_set(struct task_struct *target,
555 return ret; 555 return ret;
556} 556}
557 557
558static void ptrace_triggered(struct perf_event *bp, void *data) 558static void ptrace_triggered(struct perf_event *bp, int nmi,
559 struct perf_sample_data *data,
560 struct pt_regs *regs)
559{ 561{
560 int i; 562 int i;
561 struct thread_struct *thread = &(current->thread); 563 struct thread_struct *thread = &(current->thread);
@@ -593,13 +595,13 @@ static unsigned long ptrace_get_dr7(struct perf_event *bp[])
593 return dr7; 595 return dr7;
594} 596}
595 597
596static struct perf_event * 598static int
597ptrace_modify_breakpoint(struct perf_event *bp, int len, int type, 599ptrace_modify_breakpoint(struct perf_event *bp, int len, int type,
598 struct task_struct *tsk, int disabled) 600 struct task_struct *tsk, int disabled)
599{ 601{
600 int err; 602 int err;
601 int gen_len, gen_type; 603 int gen_len, gen_type;
602 DEFINE_BREAKPOINT_ATTR(attr); 604 struct perf_event_attr attr;
603 605
604 /* 606 /*
605 * We shoud have at least an inactive breakpoint at this 607 * We shoud have at least an inactive breakpoint at this
@@ -607,18 +609,18 @@ ptrace_modify_breakpoint(struct perf_event *bp, int len, int type,
607 * written the address register first 609 * written the address register first
608 */ 610 */
609 if (!bp) 611 if (!bp)
610 return ERR_PTR(-EINVAL); 612 return -EINVAL;
611 613
612 err = arch_bp_generic_fields(len, type, &gen_len, &gen_type); 614 err = arch_bp_generic_fields(len, type, &gen_len, &gen_type);
613 if (err) 615 if (err)
614 return ERR_PTR(err); 616 return err;
615 617
616 attr = bp->attr; 618 attr = bp->attr;
617 attr.bp_len = gen_len; 619 attr.bp_len = gen_len;
618 attr.bp_type = gen_type; 620 attr.bp_type = gen_type;
619 attr.disabled = disabled; 621 attr.disabled = disabled;
620 622
621 return modify_user_hw_breakpoint(bp, &attr, bp->callback, tsk); 623 return modify_user_hw_breakpoint(bp, &attr);
622} 624}
623 625
624/* 626/*
@@ -656,28 +658,17 @@ restore:
656 if (!second_pass) 658 if (!second_pass)
657 continue; 659 continue;
658 660
659 thread->ptrace_bps[i] = NULL; 661 rc = ptrace_modify_breakpoint(bp, len, type,
660 bp = ptrace_modify_breakpoint(bp, len, type,
661 tsk, 1); 662 tsk, 1);
662 if (IS_ERR(bp)) { 663 if (rc)
663 rc = PTR_ERR(bp);
664 thread->ptrace_bps[i] = NULL;
665 break; 664 break;
666 }
667 thread->ptrace_bps[i] = bp;
668 } 665 }
669 continue; 666 continue;
670 } 667 }
671 668
672 bp = ptrace_modify_breakpoint(bp, len, type, tsk, 0); 669 rc = ptrace_modify_breakpoint(bp, len, type, tsk, 0);
673 670 if (rc)
674 /* Incorrect bp, or we have a bug in bp API */
675 if (IS_ERR(bp)) {
676 rc = PTR_ERR(bp);
677 thread->ptrace_bps[i] = NULL;
678 break; 671 break;
679 }
680 thread->ptrace_bps[i] = bp;
681 } 672 }
682 /* 673 /*
683 * Make a second pass to free the remaining unused breakpoints 674 * Make a second pass to free the remaining unused breakpoints
@@ -721,9 +712,10 @@ static int ptrace_set_breakpoint_addr(struct task_struct *tsk, int nr,
721{ 712{
722 struct perf_event *bp; 713 struct perf_event *bp;
723 struct thread_struct *t = &tsk->thread; 714 struct thread_struct *t = &tsk->thread;
724 DEFINE_BREAKPOINT_ATTR(attr); 715 struct perf_event_attr attr;
725 716
726 if (!t->ptrace_bps[nr]) { 717 if (!t->ptrace_bps[nr]) {
718 hw_breakpoint_init(&attr);
727 /* 719 /*
728 * Put stub len and type to register (reserve) an inactive but 720 * Put stub len and type to register (reserve) an inactive but
729 * correct bp 721 * correct bp
@@ -734,26 +726,32 @@ static int ptrace_set_breakpoint_addr(struct task_struct *tsk, int nr,
734 attr.disabled = 1; 726 attr.disabled = 1;
735 727
736 bp = register_user_hw_breakpoint(&attr, ptrace_triggered, tsk); 728 bp = register_user_hw_breakpoint(&attr, ptrace_triggered, tsk);
729
730 /*
731 * CHECKME: the previous code returned -EIO if the addr wasn't
732 * a valid task virtual addr. The new one will return -EINVAL in
733 * this case.
734 * -EINVAL may be what we want for in-kernel breakpoints users,
735 * but -EIO looks better for ptrace, since we refuse a register
736 * writing for the user. And anyway this is the previous
737 * behaviour.
738 */
739 if (IS_ERR(bp))
740 return PTR_ERR(bp);
741
742 t->ptrace_bps[nr] = bp;
737 } else { 743 } else {
744 int err;
745
738 bp = t->ptrace_bps[nr]; 746 bp = t->ptrace_bps[nr];
739 t->ptrace_bps[nr] = NULL;
740 747
741 attr = bp->attr; 748 attr = bp->attr;
742 attr.bp_addr = addr; 749 attr.bp_addr = addr;
743 bp = modify_user_hw_breakpoint(bp, &attr, bp->callback, tsk); 750 err = modify_user_hw_breakpoint(bp, &attr);
751 if (err)
752 return err;
744 } 753 }
745 /*
746 * CHECKME: the previous code returned -EIO if the addr wasn't a
747 * valid task virtual addr. The new one will return -EINVAL in this
748 * case.
749 * -EINVAL may be what we want for in-kernel breakpoints users, but
750 * -EIO looks better for ptrace, since we refuse a register writing
751 * for the user. And anyway this is the previous behaviour.
752 */
753 if (IS_ERR(bp))
754 return PTR_ERR(bp);
755 754
756 t->ptrace_bps[nr] = bp;
757 755
758 return 0; 756 return 0;
759} 757}
@@ -1460,14 +1458,14 @@ static int genregs32_get(struct task_struct *target,
1460{ 1458{
1461 if (kbuf) { 1459 if (kbuf) {
1462 compat_ulong_t *k = kbuf; 1460 compat_ulong_t *k = kbuf;
1463 while (count > 0) { 1461 while (count >= sizeof(*k)) {
1464 getreg32(target, pos, k++); 1462 getreg32(target, pos, k++);
1465 count -= sizeof(*k); 1463 count -= sizeof(*k);
1466 pos += sizeof(*k); 1464 pos += sizeof(*k);
1467 } 1465 }
1468 } else { 1466 } else {
1469 compat_ulong_t __user *u = ubuf; 1467 compat_ulong_t __user *u = ubuf;
1470 while (count > 0) { 1468 while (count >= sizeof(*u)) {
1471 compat_ulong_t word; 1469 compat_ulong_t word;
1472 getreg32(target, pos, &word); 1470 getreg32(target, pos, &word);
1473 if (__put_user(word, u++)) 1471 if (__put_user(word, u++))
@@ -1488,14 +1486,14 @@ static int genregs32_set(struct task_struct *target,
1488 int ret = 0; 1486 int ret = 0;
1489 if (kbuf) { 1487 if (kbuf) {
1490 const compat_ulong_t *k = kbuf; 1488 const compat_ulong_t *k = kbuf;
1491 while (count > 0 && !ret) { 1489 while (count >= sizeof(*k) && !ret) {
1492 ret = putreg32(target, pos, *k++); 1490 ret = putreg32(target, pos, *k++);
1493 count -= sizeof(*k); 1491 count -= sizeof(*k);
1494 pos += sizeof(*k); 1492 pos += sizeof(*k);
1495 } 1493 }
1496 } else { 1494 } else {
1497 const compat_ulong_t __user *u = ubuf; 1495 const compat_ulong_t __user *u = ubuf;
1498 while (count > 0 && !ret) { 1496 while (count >= sizeof(*u) && !ret) {
1499 compat_ulong_t word; 1497 compat_ulong_t word;
1500 ret = __get_user(word, u++); 1498 ret = __get_user(word, u++);
1501 if (ret) 1499 if (ret)
@@ -1678,21 +1676,33 @@ const struct user_regset_view *task_user_regset_view(struct task_struct *task)
1678#endif 1676#endif
1679} 1677}
1680 1678
1681void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, 1679static void fill_sigtrap_info(struct task_struct *tsk,
1682 int error_code, int si_code) 1680 struct pt_regs *regs,
1681 int error_code, int si_code,
1682 struct siginfo *info)
1683{ 1683{
1684 struct siginfo info;
1685
1686 tsk->thread.trap_no = 1; 1684 tsk->thread.trap_no = 1;
1687 tsk->thread.error_code = error_code; 1685 tsk->thread.error_code = error_code;
1688 1686
1689 memset(&info, 0, sizeof(info)); 1687 memset(info, 0, sizeof(*info));
1690 info.si_signo = SIGTRAP; 1688 info->si_signo = SIGTRAP;
1691 info.si_code = si_code; 1689 info->si_code = si_code;
1690 info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL;
1691}
1692
1693void user_single_step_siginfo(struct task_struct *tsk,
1694 struct pt_regs *regs,
1695 struct siginfo *info)
1696{
1697 fill_sigtrap_info(tsk, regs, 0, TRAP_BRKPT, info);
1698}
1692 1699
1693 /* User-mode ip? */ 1700void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
1694 info.si_addr = user_mode_vm(regs) ? (void __user *) regs->ip : NULL; 1701 int error_code, int si_code)
1702{
1703 struct siginfo info;
1695 1704
1705 fill_sigtrap_info(tsk, regs, error_code, si_code, &info);
1696 /* Send us the fake SIGTRAP */ 1706 /* Send us the fake SIGTRAP */
1697 force_sig_info(SIGTRAP, &info, tsk); 1707 force_sig_info(SIGTRAP, &info, tsk);
1698} 1708}
@@ -1757,29 +1767,22 @@ asmregparm long syscall_trace_enter(struct pt_regs *regs)
1757 1767
1758asmregparm void syscall_trace_leave(struct pt_regs *regs) 1768asmregparm void syscall_trace_leave(struct pt_regs *regs)
1759{ 1769{
1770 bool step;
1771
1760 if (unlikely(current->audit_context)) 1772 if (unlikely(current->audit_context))
1761 audit_syscall_exit(AUDITSC_RESULT(regs->ax), regs->ax); 1773 audit_syscall_exit(AUDITSC_RESULT(regs->ax), regs->ax);
1762 1774
1763 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) 1775 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
1764 trace_sys_exit(regs, regs->ax); 1776 trace_sys_exit(regs, regs->ax);
1765 1777
1766 if (test_thread_flag(TIF_SYSCALL_TRACE))
1767 tracehook_report_syscall_exit(regs, 0);
1768
1769 /* 1778 /*
1770 * If TIF_SYSCALL_EMU is set, we only get here because of 1779 * If TIF_SYSCALL_EMU is set, we only get here because of
1771 * TIF_SINGLESTEP (i.e. this is PTRACE_SYSEMU_SINGLESTEP). 1780 * TIF_SINGLESTEP (i.e. this is PTRACE_SYSEMU_SINGLESTEP).
1772 * We already reported this syscall instruction in 1781 * We already reported this syscall instruction in
1773 * syscall_trace_enter(), so don't do any more now. 1782 * syscall_trace_enter().
1774 */
1775 if (unlikely(test_thread_flag(TIF_SYSCALL_EMU)))
1776 return;
1777
1778 /*
1779 * If we are single-stepping, synthesize a trap to follow the
1780 * system call instruction.
1781 */ 1783 */
1782 if (test_thread_flag(TIF_SINGLESTEP) && 1784 step = unlikely(test_thread_flag(TIF_SINGLESTEP)) &&
1783 tracehook_consider_fatal_signal(current, SIGTRAP)) 1785 !test_thread_flag(TIF_SYSCALL_EMU);
1784 send_sigtrap(current, regs, 0, TRAP_BRKPT); 1786 if (step || test_thread_flag(TIF_SYSCALL_TRACE))
1787 tracehook_report_syscall_exit(regs, step);
1785} 1788}
diff --git a/arch/x86/kernel/reboot_fixups_32.c b/arch/x86/kernel/reboot_fixups_32.c
index 201eab63b05f..fda313ebbb03 100644
--- a/arch/x86/kernel/reboot_fixups_32.c
+++ b/arch/x86/kernel/reboot_fixups_32.c
@@ -12,7 +12,7 @@
12#include <linux/interrupt.h> 12#include <linux/interrupt.h>
13#include <asm/reboot_fixups.h> 13#include <asm/reboot_fixups.h>
14#include <asm/msr.h> 14#include <asm/msr.h>
15#include <asm/geode.h> 15#include <linux/cs5535.h>
16 16
17static void cs5530a_warm_reset(struct pci_dev *dev) 17static void cs5530a_warm_reset(struct pci_dev *dev)
18{ 18{
diff --git a/arch/x86/kernel/sys_i386_32.c b/arch/x86/kernel/sys_i386_32.c
index 1884a8d12bfa..dee1ff7cba58 100644
--- a/arch/x86/kernel/sys_i386_32.c
+++ b/arch/x86/kernel/sys_i386_32.c
@@ -24,31 +24,6 @@
24 24
25#include <asm/syscalls.h> 25#include <asm/syscalls.h>
26 26
27asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
28 unsigned long prot, unsigned long flags,
29 unsigned long fd, unsigned long pgoff)
30{
31 int error = -EBADF;
32 struct file *file = NULL;
33 struct mm_struct *mm = current->mm;
34
35 flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
36 if (!(flags & MAP_ANONYMOUS)) {
37 file = fget(fd);
38 if (!file)
39 goto out;
40 }
41
42 down_write(&mm->mmap_sem);
43 error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
44 up_write(&mm->mmap_sem);
45
46 if (file)
47 fput(file);
48out:
49 return error;
50}
51
52/* 27/*
53 * Perform the select(nd, in, out, ex, tv) and mmap() system 28 * Perform the select(nd, in, out, ex, tv) and mmap() system
54 * calls. Linux/i386 didn't use to be able to handle more than 29 * calls. Linux/i386 didn't use to be able to handle more than
@@ -77,7 +52,7 @@ asmlinkage int old_mmap(struct mmap_arg_struct __user *arg)
77 if (a.offset & ~PAGE_MASK) 52 if (a.offset & ~PAGE_MASK)
78 goto out; 53 goto out;
79 54
80 err = sys_mmap2(a.addr, a.len, a.prot, a.flags, 55 err = sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags,
81 a.fd, a.offset >> PAGE_SHIFT); 56 a.fd, a.offset >> PAGE_SHIFT);
82out: 57out:
83 return err; 58 return err;
diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
index 45e00eb09c3a..8aa2057efd12 100644
--- a/arch/x86/kernel/sys_x86_64.c
+++ b/arch/x86/kernel/sys_x86_64.c
@@ -23,26 +23,11 @@ SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len,
23 unsigned long, fd, unsigned long, off) 23 unsigned long, fd, unsigned long, off)
24{ 24{
25 long error; 25 long error;
26 struct file *file;
27
28 error = -EINVAL; 26 error = -EINVAL;
29 if (off & ~PAGE_MASK) 27 if (off & ~PAGE_MASK)
30 goto out; 28 goto out;
31 29
32 error = -EBADF; 30 error = sys_mmap_pgoff(addr, len, prot, flags, fd, off >> PAGE_SHIFT);
33 file = NULL;
34 flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
35 if (!(flags & MAP_ANONYMOUS)) {
36 file = fget(fd);
37 if (!file)
38 goto out;
39 }
40 down_write(&current->mm->mmap_sem);
41 error = do_mmap_pgoff(file, addr, len, prot, flags, off >> PAGE_SHIFT);
42 up_write(&current->mm->mmap_sem);
43
44 if (file)
45 fput(file);
46out: 31out:
47 return error; 32 return error;
48} 33}
diff --git a/arch/x86/kernel/syscall_table_32.S b/arch/x86/kernel/syscall_table_32.S
index 70c2125d55b9..15228b5d3eb7 100644
--- a/arch/x86/kernel/syscall_table_32.S
+++ b/arch/x86/kernel/syscall_table_32.S
@@ -191,7 +191,7 @@ ENTRY(sys_call_table)
191 .long sys_ni_syscall /* reserved for streams2 */ 191 .long sys_ni_syscall /* reserved for streams2 */
192 .long ptregs_vfork /* 190 */ 192 .long ptregs_vfork /* 190 */
193 .long sys_getrlimit 193 .long sys_getrlimit
194 .long sys_mmap2 194 .long sys_mmap_pgoff
195 .long sys_truncate64 195 .long sys_truncate64
196 .long sys_ftruncate64 196 .long sys_ftruncate64
197 .long sys_stat64 /* 195 */ 197 .long sys_stat64 /* 195 */
diff --git a/arch/x86/kernel/tsc_sync.c b/arch/x86/kernel/tsc_sync.c
index eed156851f5d..0aa5fed8b9e6 100644
--- a/arch/x86/kernel/tsc_sync.c
+++ b/arch/x86/kernel/tsc_sync.c
@@ -33,7 +33,7 @@ static __cpuinitdata atomic_t stop_count;
33 * we want to have the fastest, inlined, non-debug version 33 * we want to have the fastest, inlined, non-debug version
34 * of a critical section, to be able to prove TSC time-warps: 34 * of a critical section, to be able to prove TSC time-warps:
35 */ 35 */
36static __cpuinitdata raw_spinlock_t sync_lock = __RAW_SPIN_LOCK_UNLOCKED; 36static __cpuinitdata arch_spinlock_t sync_lock = __ARCH_SPIN_LOCK_UNLOCKED;
37 37
38static __cpuinitdata cycles_t last_tsc; 38static __cpuinitdata cycles_t last_tsc;
39static __cpuinitdata cycles_t max_warp; 39static __cpuinitdata cycles_t max_warp;
@@ -62,13 +62,13 @@ static __cpuinit void check_tsc_warp(void)
62 * previous TSC that was measured (possibly on 62 * previous TSC that was measured (possibly on
63 * another CPU) and update the previous TSC timestamp. 63 * another CPU) and update the previous TSC timestamp.
64 */ 64 */
65 __raw_spin_lock(&sync_lock); 65 arch_spin_lock(&sync_lock);
66 prev = last_tsc; 66 prev = last_tsc;
67 rdtsc_barrier(); 67 rdtsc_barrier();
68 now = get_cycles(); 68 now = get_cycles();
69 rdtsc_barrier(); 69 rdtsc_barrier();
70 last_tsc = now; 70 last_tsc = now;
71 __raw_spin_unlock(&sync_lock); 71 arch_spin_unlock(&sync_lock);
72 72
73 /* 73 /*
74 * Be nice every now and then (and also check whether 74 * Be nice every now and then (and also check whether
@@ -87,10 +87,10 @@ static __cpuinit void check_tsc_warp(void)
87 * we saw a time-warp of the TSC going backwards: 87 * we saw a time-warp of the TSC going backwards:
88 */ 88 */
89 if (unlikely(prev > now)) { 89 if (unlikely(prev > now)) {
90 __raw_spin_lock(&sync_lock); 90 arch_spin_lock(&sync_lock);
91 max_warp = max(max_warp, prev - now); 91 max_warp = max(max_warp, prev - now);
92 nr_warps++; 92 nr_warps++;
93 __raw_spin_unlock(&sync_lock); 93 arch_spin_unlock(&sync_lock);
94 } 94 }
95 } 95 }
96 WARN(!(now-start), 96 WARN(!(now-start),
diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
index 9fafaf83b3b8..619f7f88b8cc 100644
--- a/arch/x86/kernel/x8664_ksyms_64.c
+++ b/arch/x86/kernel/x8664_ksyms_64.c
@@ -54,4 +54,6 @@ EXPORT_SYMBOL(__memcpy);
54 54
55EXPORT_SYMBOL(empty_zero_page); 55EXPORT_SYMBOL(empty_zero_page);
56EXPORT_SYMBOL(init_level4_pgt); 56EXPORT_SYMBOL(init_level4_pgt);
57EXPORT_SYMBOL(load_gs_index); 57#ifndef CONFIG_PARAVIRT
58EXPORT_SYMBOL(native_load_gs_index);
59#endif
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 3de0b37ec038..1d9b33843c80 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -316,7 +316,7 @@ static void svm_hardware_disable(void *garbage)
316static int svm_hardware_enable(void *garbage) 316static int svm_hardware_enable(void *garbage)
317{ 317{
318 318
319 struct svm_cpu_data *svm_data; 319 struct svm_cpu_data *sd;
320 uint64_t efer; 320 uint64_t efer;
321 struct descriptor_table gdt_descr; 321 struct descriptor_table gdt_descr;
322 struct desc_struct *gdt; 322 struct desc_struct *gdt;
@@ -331,63 +331,61 @@ static int svm_hardware_enable(void *garbage)
331 me); 331 me);
332 return -EINVAL; 332 return -EINVAL;
333 } 333 }
334 svm_data = per_cpu(svm_data, me); 334 sd = per_cpu(svm_data, me);
335 335
336 if (!svm_data) { 336 if (!sd) {
337 printk(KERN_ERR "svm_hardware_enable: svm_data is NULL on %d\n", 337 printk(KERN_ERR "svm_hardware_enable: svm_data is NULL on %d\n",
338 me); 338 me);
339 return -EINVAL; 339 return -EINVAL;
340 } 340 }
341 341
342 svm_data->asid_generation = 1; 342 sd->asid_generation = 1;
343 svm_data->max_asid = cpuid_ebx(SVM_CPUID_FUNC) - 1; 343 sd->max_asid = cpuid_ebx(SVM_CPUID_FUNC) - 1;
344 svm_data->next_asid = svm_data->max_asid + 1; 344 sd->next_asid = sd->max_asid + 1;
345 345
346 kvm_get_gdt(&gdt_descr); 346 kvm_get_gdt(&gdt_descr);
347 gdt = (struct desc_struct *)gdt_descr.base; 347 gdt = (struct desc_struct *)gdt_descr.base;
348 svm_data->tss_desc = (struct kvm_ldttss_desc *)(gdt + GDT_ENTRY_TSS); 348 sd->tss_desc = (struct kvm_ldttss_desc *)(gdt + GDT_ENTRY_TSS);
349 349
350 wrmsrl(MSR_EFER, efer | EFER_SVME); 350 wrmsrl(MSR_EFER, efer | EFER_SVME);
351 351
352 wrmsrl(MSR_VM_HSAVE_PA, 352 wrmsrl(MSR_VM_HSAVE_PA, page_to_pfn(sd->save_area) << PAGE_SHIFT);
353 page_to_pfn(svm_data->save_area) << PAGE_SHIFT);
354 353
355 return 0; 354 return 0;
356} 355}
357 356
358static void svm_cpu_uninit(int cpu) 357static void svm_cpu_uninit(int cpu)
359{ 358{
360 struct svm_cpu_data *svm_data 359 struct svm_cpu_data *sd = per_cpu(svm_data, raw_smp_processor_id());
361 = per_cpu(svm_data, raw_smp_processor_id());
362 360
363 if (!svm_data) 361 if (!sd)
364 return; 362 return;
365 363
366 per_cpu(svm_data, raw_smp_processor_id()) = NULL; 364 per_cpu(svm_data, raw_smp_processor_id()) = NULL;
367 __free_page(svm_data->save_area); 365 __free_page(sd->save_area);
368 kfree(svm_data); 366 kfree(sd);
369} 367}
370 368
371static int svm_cpu_init(int cpu) 369static int svm_cpu_init(int cpu)
372{ 370{
373 struct svm_cpu_data *svm_data; 371 struct svm_cpu_data *sd;
374 int r; 372 int r;
375 373
376 svm_data = kzalloc(sizeof(struct svm_cpu_data), GFP_KERNEL); 374 sd = kzalloc(sizeof(struct svm_cpu_data), GFP_KERNEL);
377 if (!svm_data) 375 if (!sd)
378 return -ENOMEM; 376 return -ENOMEM;
379 svm_data->cpu = cpu; 377 sd->cpu = cpu;
380 svm_data->save_area = alloc_page(GFP_KERNEL); 378 sd->save_area = alloc_page(GFP_KERNEL);
381 r = -ENOMEM; 379 r = -ENOMEM;
382 if (!svm_data->save_area) 380 if (!sd->save_area)
383 goto err_1; 381 goto err_1;
384 382
385 per_cpu(svm_data, cpu) = svm_data; 383 per_cpu(svm_data, cpu) = sd;
386 384
387 return 0; 385 return 0;
388 386
389err_1: 387err_1:
390 kfree(svm_data); 388 kfree(sd);
391 return r; 389 return r;
392 390
393} 391}
@@ -1092,16 +1090,16 @@ static void save_host_msrs(struct kvm_vcpu *vcpu)
1092#endif 1090#endif
1093} 1091}
1094 1092
1095static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *svm_data) 1093static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *sd)
1096{ 1094{
1097 if (svm_data->next_asid > svm_data->max_asid) { 1095 if (sd->next_asid > sd->max_asid) {
1098 ++svm_data->asid_generation; 1096 ++sd->asid_generation;
1099 svm_data->next_asid = 1; 1097 sd->next_asid = 1;
1100 svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ALL_ASID; 1098 svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ALL_ASID;
1101 } 1099 }
1102 1100
1103 svm->asid_generation = svm_data->asid_generation; 1101 svm->asid_generation = sd->asid_generation;
1104 svm->vmcb->control.asid = svm_data->next_asid++; 1102 svm->vmcb->control.asid = sd->next_asid++;
1105} 1103}
1106 1104
1107static unsigned long svm_get_dr(struct kvm_vcpu *vcpu, int dr) 1105static unsigned long svm_get_dr(struct kvm_vcpu *vcpu, int dr)
@@ -2429,8 +2427,8 @@ static void reload_tss(struct kvm_vcpu *vcpu)
2429{ 2427{
2430 int cpu = raw_smp_processor_id(); 2428 int cpu = raw_smp_processor_id();
2431 2429
2432 struct svm_cpu_data *svm_data = per_cpu(svm_data, cpu); 2430 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
2433 svm_data->tss_desc->type = 9; /* available 32/64-bit TSS */ 2431 sd->tss_desc->type = 9; /* available 32/64-bit TSS */
2434 load_TR_desc(); 2432 load_TR_desc();
2435} 2433}
2436 2434
@@ -2438,12 +2436,12 @@ static void pre_svm_run(struct vcpu_svm *svm)
2438{ 2436{
2439 int cpu = raw_smp_processor_id(); 2437 int cpu = raw_smp_processor_id();
2440 2438
2441 struct svm_cpu_data *svm_data = per_cpu(svm_data, cpu); 2439 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
2442 2440
2443 svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING; 2441 svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING;
2444 /* FIXME: handle wraparound of asid_generation */ 2442 /* FIXME: handle wraparound of asid_generation */
2445 if (svm->asid_generation != svm_data->asid_generation) 2443 if (svm->asid_generation != sd->asid_generation)
2446 new_asid(svm, svm_data); 2444 new_asid(svm, sd);
2447} 2445}
2448 2446
2449static void svm_inject_nmi(struct kvm_vcpu *vcpu) 2447static void svm_inject_nmi(struct kvm_vcpu *vcpu)
diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile
index 706be8bf967b..cffd754f3039 100644
--- a/arch/x86/lib/Makefile
+++ b/arch/x86/lib/Makefile
@@ -5,7 +5,7 @@
5inat_tables_script = $(srctree)/arch/x86/tools/gen-insn-attr-x86.awk 5inat_tables_script = $(srctree)/arch/x86/tools/gen-insn-attr-x86.awk
6inat_tables_maps = $(srctree)/arch/x86/lib/x86-opcode-map.txt 6inat_tables_maps = $(srctree)/arch/x86/lib/x86-opcode-map.txt
7quiet_cmd_inat_tables = GEN $@ 7quiet_cmd_inat_tables = GEN $@
8 cmd_inat_tables = $(AWK) -f $(inat_tables_script) $(inat_tables_maps) > $@ 8 cmd_inat_tables = $(AWK) -f $(inat_tables_script) $(inat_tables_maps) > $@ || rm -f $@
9 9
10$(obj)/inat-tables.c: $(inat_tables_script) $(inat_tables_maps) 10$(obj)/inat-tables.c: $(inat_tables_script) $(inat_tables_maps)
11 $(call cmd,inat_tables) 11 $(call cmd,inat_tables)
@@ -20,7 +20,7 @@ lib-y := delay.o
20lib-y += thunk_$(BITS).o 20lib-y += thunk_$(BITS).o
21lib-y += usercopy_$(BITS).o getuser.o putuser.o 21lib-y += usercopy_$(BITS).o getuser.o putuser.o
22lib-y += memcpy_$(BITS).o 22lib-y += memcpy_$(BITS).o
23lib-y += insn.o inat.o 23lib-$(CONFIG_KPROBES) += insn.o inat.o
24 24
25obj-y += msr.o msr-reg.o msr-reg-export.o 25obj-y += msr.o msr-reg.o msr-reg-export.o
26 26
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index 66b55d6e69ed..ae9648eb1c7f 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -704,9 +704,8 @@ int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
704 if (!range_is_allowed(pfn, size)) 704 if (!range_is_allowed(pfn, size))
705 return 0; 705 return 0;
706 706
707 if (file->f_flags & O_SYNC) { 707 if (file->f_flags & O_DSYNC)
708 flags = _PAGE_CACHE_UC_MINUS; 708 flags = _PAGE_CACHE_UC_MINUS;
709 }
710 709
711#ifdef CONFIG_X86_32 710#ifdef CONFIG_X86_32
712 /* 711 /*
diff --git a/arch/x86/pci/Makefile b/arch/x86/pci/Makefile
index d49202e740ea..564b008a51c7 100644
--- a/arch/x86/pci/Makefile
+++ b/arch/x86/pci/Makefile
@@ -15,3 +15,8 @@ obj-$(CONFIG_X86_NUMAQ) += numaq_32.o
15 15
16obj-y += common.o early.o 16obj-y += common.o early.o
17obj-y += amd_bus.o 17obj-y += amd_bus.o
18obj-$(CONFIG_X86_64) += bus_numa.o intel_bus.o
19
20ifeq ($(CONFIG_PCI_DEBUG),y)
21EXTRA_CFLAGS += -DDEBUG
22endif
diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c
index 1014eb4bfc37..959e548a7039 100644
--- a/arch/x86/pci/acpi.c
+++ b/arch/x86/pci/acpi.c
@@ -7,6 +7,7 @@
7#include <asm/pci_x86.h> 7#include <asm/pci_x86.h>
8 8
9struct pci_root_info { 9struct pci_root_info {
10 struct acpi_device *bridge;
10 char *name; 11 char *name;
11 unsigned int res_num; 12 unsigned int res_num;
12 struct resource *res; 13 struct resource *res;
@@ -58,6 +59,30 @@ bus_has_transparent_bridge(struct pci_bus *bus)
58 return false; 59 return false;
59} 60}
60 61
62static void
63align_resource(struct acpi_device *bridge, struct resource *res)
64{
65 int align = (res->flags & IORESOURCE_MEM) ? 16 : 4;
66
67 /*
68 * Host bridge windows are not BARs, but the decoders on the PCI side
69 * that claim this address space have starting alignment and length
70 * constraints, so fix any obvious BIOS goofs.
71 */
72 if (!IS_ALIGNED(res->start, align)) {
73 dev_printk(KERN_DEBUG, &bridge->dev,
74 "host bridge window %pR invalid; "
75 "aligning start to %d-byte boundary\n", res, align);
76 res->start &= ~(align - 1);
77 }
78 if (!IS_ALIGNED(res->end + 1, align)) {
79 dev_printk(KERN_DEBUG, &bridge->dev,
80 "host bridge window %pR invalid; "
81 "aligning end to %d-byte boundary\n", res, align);
82 res->end = ALIGN(res->end, align) - 1;
83 }
84}
85
61static acpi_status 86static acpi_status
62setup_resource(struct acpi_resource *acpi_res, void *data) 87setup_resource(struct acpi_resource *acpi_res, void *data)
63{ 88{
@@ -91,11 +116,12 @@ setup_resource(struct acpi_resource *acpi_res, void *data)
91 start = addr.minimum + addr.translation_offset; 116 start = addr.minimum + addr.translation_offset;
92 end = start + addr.address_length - 1; 117 end = start + addr.address_length - 1;
93 if (info->res_num >= max_root_bus_resources) { 118 if (info->res_num >= max_root_bus_resources) {
94 printk(KERN_WARNING "PCI: Failed to allocate 0x%lx-0x%lx " 119 if (pci_probe & PCI_USE__CRS)
95 "from %s for %s due to _CRS returning more than " 120 printk(KERN_WARNING "PCI: Failed to allocate "
96 "%d resource descriptors\n", (unsigned long) start, 121 "0x%lx-0x%lx from %s for %s due to _CRS "
97 (unsigned long) end, root->name, info->name, 122 "returning more than %d resource descriptors\n",
98 max_root_bus_resources); 123 (unsigned long) start, (unsigned long) end,
124 root->name, info->name, max_root_bus_resources);
99 return AE_OK; 125 return AE_OK;
100 } 126 }
101 127
@@ -105,14 +131,28 @@ setup_resource(struct acpi_resource *acpi_res, void *data)
105 res->start = start; 131 res->start = start;
106 res->end = end; 132 res->end = end;
107 res->child = NULL; 133 res->child = NULL;
134 align_resource(info->bridge, res);
135
136 if (!(pci_probe & PCI_USE__CRS)) {
137 dev_printk(KERN_DEBUG, &info->bridge->dev,
138 "host bridge window %pR (ignored)\n", res);
139 return AE_OK;
140 }
108 141
109 if (insert_resource(root, res)) { 142 if (insert_resource(root, res)) {
110 printk(KERN_ERR "PCI: Failed to allocate 0x%lx-0x%lx " 143 dev_err(&info->bridge->dev,
111 "from %s for %s\n", (unsigned long) res->start, 144 "can't allocate host bridge window %pR\n", res);
112 (unsigned long) res->end, root->name, info->name);
113 } else { 145 } else {
114 info->bus->resource[info->res_num] = res; 146 info->bus->resource[info->res_num] = res;
115 info->res_num++; 147 info->res_num++;
148 if (addr.translation_offset)
149 dev_info(&info->bridge->dev, "host bridge window %pR "
150 "(PCI address [%#llx-%#llx])\n",
151 res, res->start - addr.translation_offset,
152 res->end - addr.translation_offset);
153 else
154 dev_info(&info->bridge->dev,
155 "host bridge window %pR\n", res);
116 } 156 }
117 return AE_OK; 157 return AE_OK;
118} 158}
@@ -124,6 +164,12 @@ get_current_resources(struct acpi_device *device, int busnum,
124 struct pci_root_info info; 164 struct pci_root_info info;
125 size_t size; 165 size_t size;
126 166
167 if (!(pci_probe & PCI_USE__CRS))
168 dev_info(&device->dev,
169 "ignoring host bridge windows from ACPI; "
170 "boot with \"pci=use_crs\" to use them\n");
171
172 info.bridge = device;
127 info.bus = bus; 173 info.bus = bus;
128 info.res_num = 0; 174 info.res_num = 0;
129 acpi_walk_resources(device->handle, METHOD_NAME__CRS, count_resource, 175 acpi_walk_resources(device->handle, METHOD_NAME__CRS, count_resource,
@@ -163,8 +209,9 @@ struct pci_bus * __devinit pci_acpi_scan_root(struct acpi_device *device, int do
163#endif 209#endif
164 210
165 if (domain && !pci_domains_supported) { 211 if (domain && !pci_domains_supported) {
166 printk(KERN_WARNING "PCI: Multiple domains not supported " 212 printk(KERN_WARNING "pci_bus %04x:%02x: "
167 "(dom %d, bus %d)\n", domain, busnum); 213 "ignored (multiple domains not supported)\n",
214 domain, busnum);
168 return NULL; 215 return NULL;
169 } 216 }
170 217
@@ -188,7 +235,8 @@ struct pci_bus * __devinit pci_acpi_scan_root(struct acpi_device *device, int do
188 */ 235 */
189 sd = kzalloc(sizeof(*sd), GFP_KERNEL); 236 sd = kzalloc(sizeof(*sd), GFP_KERNEL);
190 if (!sd) { 237 if (!sd) {
191 printk(KERN_ERR "PCI: OOM, not probing PCI bus %02x\n", busnum); 238 printk(KERN_WARNING "pci_bus %04x:%02x: "
239 "ignored (out of memory)\n", domain, busnum);
192 return NULL; 240 return NULL;
193 } 241 }
194 242
@@ -209,9 +257,7 @@ struct pci_bus * __devinit pci_acpi_scan_root(struct acpi_device *device, int do
209 } else { 257 } else {
210 bus = pci_create_bus(NULL, busnum, &pci_root_ops, sd); 258 bus = pci_create_bus(NULL, busnum, &pci_root_ops, sd);
211 if (bus) { 259 if (bus) {
212 if (pci_probe & PCI_USE__CRS) 260 get_current_resources(device, busnum, domain, bus);
213 get_current_resources(device, busnum, domain,
214 bus);
215 bus->subordinate = pci_scan_child_bus(bus); 261 bus->subordinate = pci_scan_child_bus(bus);
216 } 262 }
217 } 263 }
diff --git a/arch/x86/pci/amd_bus.c b/arch/x86/pci/amd_bus.c
index 572ee9782f2a..95ecbd495955 100644
--- a/arch/x86/pci/amd_bus.c
+++ b/arch/x86/pci/amd_bus.c
@@ -6,10 +6,10 @@
6 6
7#ifdef CONFIG_X86_64 7#ifdef CONFIG_X86_64
8#include <asm/pci-direct.h> 8#include <asm/pci-direct.h>
9#include <asm/mpspec.h>
10#include <linux/cpumask.h>
11#endif 9#endif
12 10
11#include "bus_numa.h"
12
13/* 13/*
14 * This discovers the pcibus <-> node mapping on AMD K8. 14 * This discovers the pcibus <-> node mapping on AMD K8.
15 * also get peer root bus resource for io,mmio 15 * also get peer root bus resource for io,mmio
@@ -17,67 +17,6 @@
17 17
18#ifdef CONFIG_X86_64 18#ifdef CONFIG_X86_64
19 19
20/*
21 * sub bus (transparent) will use entres from 3 to store extra from root,
22 * so need to make sure have enought slot there, increase PCI_BUS_NUM_RESOURCES?
23 */
24#define RES_NUM 16
25struct pci_root_info {
26 char name[12];
27 unsigned int res_num;
28 struct resource res[RES_NUM];
29 int bus_min;
30 int bus_max;
31 int node;
32 int link;
33};
34
35/* 4 at this time, it may become to 32 */
36#define PCI_ROOT_NR 4
37static int pci_root_num;
38static struct pci_root_info pci_root_info[PCI_ROOT_NR];
39
40void x86_pci_root_bus_res_quirks(struct pci_bus *b)
41{
42 int i;
43 int j;
44 struct pci_root_info *info;
45
46 /* don't go for it if _CRS is used already */
47 if (b->resource[0] != &ioport_resource ||
48 b->resource[1] != &iomem_resource)
49 return;
50
51 /* if only one root bus, don't need to anything */
52 if (pci_root_num < 2)
53 return;
54
55 for (i = 0; i < pci_root_num; i++) {
56 if (pci_root_info[i].bus_min == b->number)
57 break;
58 }
59
60 if (i == pci_root_num)
61 return;
62
63 printk(KERN_DEBUG "PCI: peer root bus %02x res updated from pci conf\n",
64 b->number);
65
66 info = &pci_root_info[i];
67 for (j = 0; j < info->res_num; j++) {
68 struct resource *res;
69 struct resource *root;
70
71 res = &info->res[j];
72 b->resource[j] = res;
73 if (res->flags & IORESOURCE_IO)
74 root = &ioport_resource;
75 else
76 root = &iomem_resource;
77 insert_resource(root, res);
78 }
79}
80
81#define RANGE_NUM 16 20#define RANGE_NUM 16
82 21
83struct res_range { 22struct res_range {
@@ -130,52 +69,6 @@ static void __init update_range(struct res_range *range, size_t start,
130 } 69 }
131} 70}
132 71
133static void __init update_res(struct pci_root_info *info, size_t start,
134 size_t end, unsigned long flags, int merge)
135{
136 int i;
137 struct resource *res;
138
139 if (!merge)
140 goto addit;
141
142 /* try to merge it with old one */
143 for (i = 0; i < info->res_num; i++) {
144 size_t final_start, final_end;
145 size_t common_start, common_end;
146
147 res = &info->res[i];
148 if (res->flags != flags)
149 continue;
150
151 common_start = max((size_t)res->start, start);
152 common_end = min((size_t)res->end, end);
153 if (common_start > common_end + 1)
154 continue;
155
156 final_start = min((size_t)res->start, start);
157 final_end = max((size_t)res->end, end);
158
159 res->start = final_start;
160 res->end = final_end;
161 return;
162 }
163
164addit:
165
166 /* need to add that */
167 if (info->res_num >= RES_NUM)
168 return;
169
170 res = &info->res[info->res_num];
171 res->name = info->name;
172 res->flags = flags;
173 res->start = start;
174 res->end = end;
175 res->child = NULL;
176 info->res_num++;
177}
178
179struct pci_hostbridge_probe { 72struct pci_hostbridge_probe {
180 u32 bus; 73 u32 bus;
181 u32 slot; 74 u32 slot;
@@ -230,7 +123,6 @@ static int __init early_fill_mp_bus_info(void)
230 int j; 123 int j;
231 unsigned bus; 124 unsigned bus;
232 unsigned slot; 125 unsigned slot;
233 int found;
234 int node; 126 int node;
235 int link; 127 int link;
236 int def_node; 128 int def_node;
@@ -247,7 +139,7 @@ static int __init early_fill_mp_bus_info(void)
247 if (!early_pci_allowed()) 139 if (!early_pci_allowed())
248 return -1; 140 return -1;
249 141
250 found = 0; 142 found_all_numa_early = 0;
251 for (i = 0; i < ARRAY_SIZE(pci_probes); i++) { 143 for (i = 0; i < ARRAY_SIZE(pci_probes); i++) {
252 u32 id; 144 u32 id;
253 u16 device; 145 u16 device;
@@ -261,12 +153,12 @@ static int __init early_fill_mp_bus_info(void)
261 device = (id>>16) & 0xffff; 153 device = (id>>16) & 0xffff;
262 if (pci_probes[i].vendor == vendor && 154 if (pci_probes[i].vendor == vendor &&
263 pci_probes[i].device == device) { 155 pci_probes[i].device == device) {
264 found = 1; 156 found_all_numa_early = 1;
265 break; 157 break;
266 } 158 }
267 } 159 }
268 160
269 if (!found) 161 if (!found_all_numa_early)
270 return 0; 162 return 0;
271 163
272 pci_root_num = 0; 164 pci_root_num = 0;
@@ -488,7 +380,7 @@ static int __init early_fill_mp_bus_info(void)
488 info = &pci_root_info[i]; 380 info = &pci_root_info[i];
489 res_num = info->res_num; 381 res_num = info->res_num;
490 busnum = info->bus_min; 382 busnum = info->bus_min;
491 printk(KERN_DEBUG "bus: [%02x,%02x] on node %x link %x\n", 383 printk(KERN_DEBUG "bus: [%02x, %02x] on node %x link %x\n",
492 info->bus_min, info->bus_max, info->node, info->link); 384 info->bus_min, info->bus_max, info->node, info->link);
493 for (j = 0; j < res_num; j++) { 385 for (j = 0; j < res_num; j++) {
494 res = &info->res[j]; 386 res = &info->res[j];
diff --git a/arch/x86/pci/bus_numa.c b/arch/x86/pci/bus_numa.c
new file mode 100644
index 000000000000..145df00e0387
--- /dev/null
+++ b/arch/x86/pci/bus_numa.c
@@ -0,0 +1,101 @@
1#include <linux/init.h>
2#include <linux/pci.h>
3
4#include "bus_numa.h"
5
6int pci_root_num;
7struct pci_root_info pci_root_info[PCI_ROOT_NR];
8int found_all_numa_early;
9
10void x86_pci_root_bus_res_quirks(struct pci_bus *b)
11{
12 int i;
13 int j;
14 struct pci_root_info *info;
15
16 /* don't go for it if _CRS is used already */
17 if (b->resource[0] != &ioport_resource ||
18 b->resource[1] != &iomem_resource)
19 return;
20
21 if (!pci_root_num)
22 return;
23
24 /* for amd, if only one root bus, don't need to do anything */
25 if (pci_root_num < 2 && found_all_numa_early)
26 return;
27
28 for (i = 0; i < pci_root_num; i++) {
29 if (pci_root_info[i].bus_min == b->number)
30 break;
31 }
32
33 if (i == pci_root_num)
34 return;
35
36 printk(KERN_DEBUG "PCI: peer root bus %02x res updated from pci conf\n",
37 b->number);
38
39 info = &pci_root_info[i];
40 for (j = 0; j < info->res_num; j++) {
41 struct resource *res;
42 struct resource *root;
43
44 res = &info->res[j];
45 b->resource[j] = res;
46 if (res->flags & IORESOURCE_IO)
47 root = &ioport_resource;
48 else
49 root = &iomem_resource;
50 insert_resource(root, res);
51 }
52}
53
54void __init update_res(struct pci_root_info *info, size_t start,
55 size_t end, unsigned long flags, int merge)
56{
57 int i;
58 struct resource *res;
59
60 if (start > end)
61 return;
62
63 if (!merge)
64 goto addit;
65
66 /* try to merge it with old one */
67 for (i = 0; i < info->res_num; i++) {
68 size_t final_start, final_end;
69 size_t common_start, common_end;
70
71 res = &info->res[i];
72 if (res->flags != flags)
73 continue;
74
75 common_start = max((size_t)res->start, start);
76 common_end = min((size_t)res->end, end);
77 if (common_start > common_end + 1)
78 continue;
79
80 final_start = min((size_t)res->start, start);
81 final_end = max((size_t)res->end, end);
82
83 res->start = final_start;
84 res->end = final_end;
85 return;
86 }
87
88addit:
89
90 /* need to add that */
91 if (info->res_num >= RES_NUM)
92 return;
93
94 res = &info->res[info->res_num];
95 res->name = info->name;
96 res->flags = flags;
97 res->start = start;
98 res->end = end;
99 res->child = NULL;
100 info->res_num++;
101}
diff --git a/arch/x86/pci/bus_numa.h b/arch/x86/pci/bus_numa.h
new file mode 100644
index 000000000000..adbc23fe82ac
--- /dev/null
+++ b/arch/x86/pci/bus_numa.h
@@ -0,0 +1,27 @@
1#ifdef CONFIG_X86_64
2
3/*
4 * sub bus (transparent) will use entres from 3 to store extra from
5 * root, so need to make sure we have enough slot there, Should we
6 * increase PCI_BUS_NUM_RESOURCES?
7 */
8#define RES_NUM 16
9struct pci_root_info {
10 char name[12];
11 unsigned int res_num;
12 struct resource res[RES_NUM];
13 int bus_min;
14 int bus_max;
15 int node;
16 int link;
17};
18
19/* 4 at this time, it may become to 32 */
20#define PCI_ROOT_NR 4
21extern int pci_root_num;
22extern struct pci_root_info pci_root_info[PCI_ROOT_NR];
23extern int found_all_numa_early;
24
25extern void update_res(struct pci_root_info *info, size_t start,
26 size_t end, unsigned long flags, int merge);
27#endif
diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c
index 1331fcf26143..d2552c68e94d 100644
--- a/arch/x86/pci/common.c
+++ b/arch/x86/pci/common.c
@@ -410,8 +410,6 @@ struct pci_bus * __devinit pcibios_scan_root(int busnum)
410 return bus; 410 return bus;
411} 411}
412 412
413extern u8 pci_cache_line_size;
414
415int __init pcibios_init(void) 413int __init pcibios_init(void)
416{ 414{
417 struct cpuinfo_x86 *c = &boot_cpu_data; 415 struct cpuinfo_x86 *c = &boot_cpu_data;
@@ -422,15 +420,19 @@ int __init pcibios_init(void)
422 } 420 }
423 421
424 /* 422 /*
425 * Assume PCI cacheline size of 32 bytes for all x86s except K7/K8 423 * Set PCI cacheline size to that of the CPU if the CPU has reported it.
426 * and P4. It's also good for 386/486s (which actually have 16) 424 * (For older CPUs that don't support cpuid, we se it to 32 bytes
425 * It's also good for 386/486s (which actually have 16)
427 * as quite a few PCI devices do not support smaller values. 426 * as quite a few PCI devices do not support smaller values.
428 */ 427 */
429 pci_cache_line_size = 32 >> 2; 428 if (c->x86_clflush_size > 0) {
430 if (c->x86 >= 6 && c->x86_vendor == X86_VENDOR_AMD) 429 pci_dfl_cache_line_size = c->x86_clflush_size >> 2;
431 pci_cache_line_size = 64 >> 2; /* K7 & K8 */ 430 printk(KERN_DEBUG "PCI: pci_cache_line_size set to %d bytes\n",
432 else if (c->x86 > 6 && c->x86_vendor == X86_VENDOR_INTEL) 431 pci_dfl_cache_line_size << 2);
433 pci_cache_line_size = 128 >> 2; /* P4 */ 432 } else {
433 pci_dfl_cache_line_size = 32 >> 2;
434 printk(KERN_DEBUG "PCI: Unknown cacheline size. Setting to 32 bytes\n");
435 }
434 436
435 pcibios_resource_survey(); 437 pcibios_resource_survey();
436 438
diff --git a/arch/x86/pci/early.c b/arch/x86/pci/early.c
index aaf26ae58cd5..d1067d539bee 100644
--- a/arch/x86/pci/early.c
+++ b/arch/x86/pci/early.c
@@ -12,8 +12,6 @@ u32 read_pci_config(u8 bus, u8 slot, u8 func, u8 offset)
12 u32 v; 12 u32 v;
13 outl(0x80000000 | (bus<<16) | (slot<<11) | (func<<8) | offset, 0xcf8); 13 outl(0x80000000 | (bus<<16) | (slot<<11) | (func<<8) | offset, 0xcf8);
14 v = inl(0xcfc); 14 v = inl(0xcfc);
15 if (v != 0xffffffff)
16 pr_debug("%x reading 4 from %x: %x\n", slot, offset, v);
17 return v; 15 return v;
18} 16}
19 17
@@ -22,7 +20,6 @@ u8 read_pci_config_byte(u8 bus, u8 slot, u8 func, u8 offset)
22 u8 v; 20 u8 v;
23 outl(0x80000000 | (bus<<16) | (slot<<11) | (func<<8) | offset, 0xcf8); 21 outl(0x80000000 | (bus<<16) | (slot<<11) | (func<<8) | offset, 0xcf8);
24 v = inb(0xcfc + (offset&3)); 22 v = inb(0xcfc + (offset&3));
25 pr_debug("%x reading 1 from %x: %x\n", slot, offset, v);
26 return v; 23 return v;
27} 24}
28 25
@@ -31,28 +28,24 @@ u16 read_pci_config_16(u8 bus, u8 slot, u8 func, u8 offset)
31 u16 v; 28 u16 v;
32 outl(0x80000000 | (bus<<16) | (slot<<11) | (func<<8) | offset, 0xcf8); 29 outl(0x80000000 | (bus<<16) | (slot<<11) | (func<<8) | offset, 0xcf8);
33 v = inw(0xcfc + (offset&2)); 30 v = inw(0xcfc + (offset&2));
34 pr_debug("%x reading 2 from %x: %x\n", slot, offset, v);
35 return v; 31 return v;
36} 32}
37 33
38void write_pci_config(u8 bus, u8 slot, u8 func, u8 offset, 34void write_pci_config(u8 bus, u8 slot, u8 func, u8 offset,
39 u32 val) 35 u32 val)
40{ 36{
41 pr_debug("%x writing to %x: %x\n", slot, offset, val);
42 outl(0x80000000 | (bus<<16) | (slot<<11) | (func<<8) | offset, 0xcf8); 37 outl(0x80000000 | (bus<<16) | (slot<<11) | (func<<8) | offset, 0xcf8);
43 outl(val, 0xcfc); 38 outl(val, 0xcfc);
44} 39}
45 40
46void write_pci_config_byte(u8 bus, u8 slot, u8 func, u8 offset, u8 val) 41void write_pci_config_byte(u8 bus, u8 slot, u8 func, u8 offset, u8 val)
47{ 42{
48 pr_debug("%x writing to %x: %x\n", slot, offset, val);
49 outl(0x80000000 | (bus<<16) | (slot<<11) | (func<<8) | offset, 0xcf8); 43 outl(0x80000000 | (bus<<16) | (slot<<11) | (func<<8) | offset, 0xcf8);
50 outb(val, 0xcfc + (offset&3)); 44 outb(val, 0xcfc + (offset&3));
51} 45}
52 46
53void write_pci_config_16(u8 bus, u8 slot, u8 func, u8 offset, u16 val) 47void write_pci_config_16(u8 bus, u8 slot, u8 func, u8 offset, u16 val)
54{ 48{
55 pr_debug("%x writing to %x: %x\n", slot, offset, val);
56 outl(0x80000000 | (bus<<16) | (slot<<11) | (func<<8) | offset, 0xcf8); 49 outl(0x80000000 | (bus<<16) | (slot<<11) | (func<<8) | offset, 0xcf8);
57 outw(val, 0xcfc + (offset&2)); 50 outw(val, 0xcfc + (offset&2));
58} 51}
diff --git a/arch/x86/pci/i386.c b/arch/x86/pci/i386.c
index b22d13b0c71d..5dc9e8c63fcd 100644
--- a/arch/x86/pci/i386.c
+++ b/arch/x86/pci/i386.c
@@ -129,7 +129,9 @@ static void __init pcibios_allocate_bus_resources(struct list_head *bus_list)
129 continue; 129 continue;
130 if (!r->start || 130 if (!r->start ||
131 pci_claim_resource(dev, idx) < 0) { 131 pci_claim_resource(dev, idx) < 0) {
132 dev_info(&dev->dev, "BAR %d: can't allocate resource\n", idx); 132 dev_info(&dev->dev,
133 "can't reserve window %pR\n",
134 r);
133 /* 135 /*
134 * Something is wrong with the region. 136 * Something is wrong with the region.
135 * Invalidate the resource to prevent 137 * Invalidate the resource to prevent
@@ -144,16 +146,29 @@ static void __init pcibios_allocate_bus_resources(struct list_head *bus_list)
144 } 146 }
145} 147}
146 148
149struct pci_check_idx_range {
150 int start;
151 int end;
152};
153
147static void __init pcibios_allocate_resources(int pass) 154static void __init pcibios_allocate_resources(int pass)
148{ 155{
149 struct pci_dev *dev = NULL; 156 struct pci_dev *dev = NULL;
150 int idx, disabled; 157 int idx, disabled, i;
151 u16 command; 158 u16 command;
152 struct resource *r; 159 struct resource *r;
153 160
161 struct pci_check_idx_range idx_range[] = {
162 { PCI_STD_RESOURCES, PCI_STD_RESOURCE_END },
163#ifdef CONFIG_PCI_IOV
164 { PCI_IOV_RESOURCES, PCI_IOV_RESOURCE_END },
165#endif
166 };
167
154 for_each_pci_dev(dev) { 168 for_each_pci_dev(dev) {
155 pci_read_config_word(dev, PCI_COMMAND, &command); 169 pci_read_config_word(dev, PCI_COMMAND, &command);
156 for (idx = 0; idx < PCI_ROM_RESOURCE; idx++) { 170 for (i = 0; i < ARRAY_SIZE(idx_range); i++)
171 for (idx = idx_range[i].start; idx <= idx_range[i].end; idx++) {
157 r = &dev->resource[idx]; 172 r = &dev->resource[idx];
158 if (r->parent) /* Already allocated */ 173 if (r->parent) /* Already allocated */
159 continue; 174 continue;
@@ -164,12 +179,12 @@ static void __init pcibios_allocate_resources(int pass)
164 else 179 else
165 disabled = !(command & PCI_COMMAND_MEMORY); 180 disabled = !(command & PCI_COMMAND_MEMORY);
166 if (pass == disabled) { 181 if (pass == disabled) {
167 dev_dbg(&dev->dev, "resource %#08llx-%#08llx (f=%lx, d=%d, p=%d)\n", 182 dev_dbg(&dev->dev,
168 (unsigned long long) r->start, 183 "BAR %d: reserving %pr (d=%d, p=%d)\n",
169 (unsigned long long) r->end, 184 idx, r, disabled, pass);
170 r->flags, disabled, pass);
171 if (pci_claim_resource(dev, idx) < 0) { 185 if (pci_claim_resource(dev, idx) < 0) {
172 dev_info(&dev->dev, "BAR %d: can't allocate resource\n", idx); 186 dev_info(&dev->dev,
187 "can't reserve %pR\n", r);
173 /* We'll assign a new address later */ 188 /* We'll assign a new address later */
174 r->end -= r->start; 189 r->end -= r->start;
175 r->start = 0; 190 r->start = 0;
@@ -182,7 +197,7 @@ static void __init pcibios_allocate_resources(int pass)
182 /* Turn the ROM off, leave the resource region, 197 /* Turn the ROM off, leave the resource region,
183 * but keep it unregistered. */ 198 * but keep it unregistered. */
184 u32 reg; 199 u32 reg;
185 dev_dbg(&dev->dev, "disabling ROM\n"); 200 dev_dbg(&dev->dev, "disabling ROM %pR\n", r);
186 r->flags &= ~IORESOURCE_ROM_ENABLE; 201 r->flags &= ~IORESOURCE_ROM_ENABLE;
187 pci_read_config_dword(dev, 202 pci_read_config_dword(dev,
188 dev->rom_base_reg, &reg); 203 dev->rom_base_reg, &reg);
@@ -282,6 +297,15 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
282 return -EINVAL; 297 return -EINVAL;
283 298
284 prot = pgprot_val(vma->vm_page_prot); 299 prot = pgprot_val(vma->vm_page_prot);
300
301 /*
302 * Return error if pat is not enabled and write_combine is requested.
303 * Caller can followup with UC MINUS request and add a WC mtrr if there
304 * is a free mtrr slot.
305 */
306 if (!pat_enabled && write_combine)
307 return -EINVAL;
308
285 if (pat_enabled && write_combine) 309 if (pat_enabled && write_combine)
286 prot |= _PAGE_CACHE_WC; 310 prot |= _PAGE_CACHE_WC;
287 else if (pat_enabled || boot_cpu_data.x86 > 3) 311 else if (pat_enabled || boot_cpu_data.x86 > 3)
diff --git a/arch/x86/pci/intel_bus.c b/arch/x86/pci/intel_bus.c
new file mode 100644
index 000000000000..b7a55dc55d13
--- /dev/null
+++ b/arch/x86/pci/intel_bus.c
@@ -0,0 +1,90 @@
1/*
2 * to read io range from IOH pci conf, need to do it after mmconfig is there
3 */
4
5#include <linux/delay.h>
6#include <linux/dmi.h>
7#include <linux/pci.h>
8#include <linux/init.h>
9#include <asm/pci_x86.h>
10
11#include "bus_numa.h"
12
13static inline void print_ioh_resources(struct pci_root_info *info)
14{
15 int res_num;
16 int busnum;
17 int i;
18
19 printk(KERN_DEBUG "IOH bus: [%02x, %02x]\n",
20 info->bus_min, info->bus_max);
21 res_num = info->res_num;
22 busnum = info->bus_min;
23 for (i = 0; i < res_num; i++) {
24 struct resource *res;
25
26 res = &info->res[i];
27 printk(KERN_DEBUG "IOH bus: %02x index %x %s: [%llx, %llx]\n",
28 busnum, i,
29 (res->flags & IORESOURCE_IO) ? "io port" :
30 "mmio",
31 res->start, res->end);
32 }
33}
34
35#define IOH_LIO 0x108
36#define IOH_LMMIOL 0x10c
37#define IOH_LMMIOH 0x110
38#define IOH_LMMIOH_BASEU 0x114
39#define IOH_LMMIOH_LIMITU 0x118
40#define IOH_LCFGBUS 0x11c
41
42static void __devinit pci_root_bus_res(struct pci_dev *dev)
43{
44 u16 word;
45 u32 dword;
46 struct pci_root_info *info;
47 u16 io_base, io_end;
48 u32 mmiol_base, mmiol_end;
49 u64 mmioh_base, mmioh_end;
50 int bus_base, bus_end;
51
52 if (pci_root_num >= PCI_ROOT_NR) {
53 printk(KERN_DEBUG "intel_bus.c: PCI_ROOT_NR is too small\n");
54 return;
55 }
56
57 info = &pci_root_info[pci_root_num];
58 pci_root_num++;
59
60 pci_read_config_word(dev, IOH_LCFGBUS, &word);
61 bus_base = (word & 0xff);
62 bus_end = (word & 0xff00) >> 8;
63 sprintf(info->name, "PCI Bus #%02x", bus_base);
64 info->bus_min = bus_base;
65 info->bus_max = bus_end;
66
67 pci_read_config_word(dev, IOH_LIO, &word);
68 io_base = (word & 0xf0) << (12 - 4);
69 io_end = (word & 0xf000) | 0xfff;
70 update_res(info, io_base, io_end, IORESOURCE_IO, 0);
71
72 pci_read_config_dword(dev, IOH_LMMIOL, &dword);
73 mmiol_base = (dword & 0xff00) << (24 - 8);
74 mmiol_end = (dword & 0xff000000) | 0xffffff;
75 update_res(info, mmiol_base, mmiol_end, IORESOURCE_MEM, 0);
76
77 pci_read_config_dword(dev, IOH_LMMIOH, &dword);
78 mmioh_base = ((u64)(dword & 0xfc00)) << (26 - 10);
79 mmioh_end = ((u64)(dword & 0xfc000000) | 0x3ffffff);
80 pci_read_config_dword(dev, IOH_LMMIOH_BASEU, &dword);
81 mmioh_base |= ((u64)(dword & 0x7ffff)) << 32;
82 pci_read_config_dword(dev, IOH_LMMIOH_LIMITU, &dword);
83 mmioh_end |= ((u64)(dword & 0x7ffff)) << 32;
84 update_res(info, mmioh_base, mmioh_end, IORESOURCE_MEM, 0);
85
86 print_ioh_resources(info);
87}
88
89/* intel IOH */
90DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x342e, pci_root_bus_res);
diff --git a/arch/x86/pci/mmconfig-shared.c b/arch/x86/pci/mmconfig-shared.c
index 602c172d3bd5..b19d1e54201e 100644
--- a/arch/x86/pci/mmconfig-shared.c
+++ b/arch/x86/pci/mmconfig-shared.c
@@ -15,48 +15,98 @@
15#include <linux/acpi.h> 15#include <linux/acpi.h>
16#include <linux/sfi_acpi.h> 16#include <linux/sfi_acpi.h>
17#include <linux/bitmap.h> 17#include <linux/bitmap.h>
18#include <linux/sort.h> 18#include <linux/dmi.h>
19#include <asm/e820.h> 19#include <asm/e820.h>
20#include <asm/pci_x86.h> 20#include <asm/pci_x86.h>
21#include <asm/acpi.h> 21#include <asm/acpi.h>
22 22
23#define PREFIX "PCI: " 23#define PREFIX "PCI: "
24 24
25/* aperture is up to 256MB but BIOS may reserve less */
26#define MMCONFIG_APER_MIN (2 * 1024*1024)
27#define MMCONFIG_APER_MAX (256 * 1024*1024)
28
29/* Indicate if the mmcfg resources have been placed into the resource table. */ 25/* Indicate if the mmcfg resources have been placed into the resource table. */
30static int __initdata pci_mmcfg_resources_inserted; 26static int __initdata pci_mmcfg_resources_inserted;
31 27
32static __init int extend_mmcfg(int num) 28LIST_HEAD(pci_mmcfg_list);
29
30static __init void pci_mmconfig_remove(struct pci_mmcfg_region *cfg)
33{ 31{
34 struct acpi_mcfg_allocation *new; 32 if (cfg->res.parent)
35 int new_num = pci_mmcfg_config_num + num; 33 release_resource(&cfg->res);
34 list_del(&cfg->list);
35 kfree(cfg);
36}
36 37
37 new = kzalloc(sizeof(pci_mmcfg_config[0]) * new_num, GFP_KERNEL); 38static __init void free_all_mmcfg(void)
38 if (!new) 39{
39 return -1; 40 struct pci_mmcfg_region *cfg, *tmp;
40 41
41 if (pci_mmcfg_config) { 42 pci_mmcfg_arch_free();
42 memcpy(new, pci_mmcfg_config, 43 list_for_each_entry_safe(cfg, tmp, &pci_mmcfg_list, list)
43 sizeof(pci_mmcfg_config[0]) * new_num); 44 pci_mmconfig_remove(cfg);
44 kfree(pci_mmcfg_config); 45}
46
47static __init void list_add_sorted(struct pci_mmcfg_region *new)
48{
49 struct pci_mmcfg_region *cfg;
50
51 /* keep list sorted by segment and starting bus number */
52 list_for_each_entry(cfg, &pci_mmcfg_list, list) {
53 if (cfg->segment > new->segment ||
54 (cfg->segment == new->segment &&
55 cfg->start_bus >= new->start_bus)) {
56 list_add_tail(&new->list, &cfg->list);
57 return;
58 }
45 } 59 }
46 pci_mmcfg_config = new; 60 list_add_tail(&new->list, &pci_mmcfg_list);
61}
47 62
48 return 0; 63static __init struct pci_mmcfg_region *pci_mmconfig_add(int segment, int start,
64 int end, u64 addr)
65{
66 struct pci_mmcfg_region *new;
67 int num_buses;
68 struct resource *res;
69
70 if (addr == 0)
71 return NULL;
72
73 new = kzalloc(sizeof(*new), GFP_KERNEL);
74 if (!new)
75 return NULL;
76
77 new->address = addr;
78 new->segment = segment;
79 new->start_bus = start;
80 new->end_bus = end;
81
82 list_add_sorted(new);
83
84 num_buses = end - start + 1;
85 res = &new->res;
86 res->start = addr + PCI_MMCFG_BUS_OFFSET(start);
87 res->end = addr + PCI_MMCFG_BUS_OFFSET(num_buses) - 1;
88 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
89 snprintf(new->name, PCI_MMCFG_RESOURCE_NAME_LEN,
90 "PCI MMCONFIG %04x [bus %02x-%02x]", segment, start, end);
91 res->name = new->name;
92
93 printk(KERN_INFO PREFIX "MMCONFIG for domain %04x [bus %02x-%02x] at "
94 "%pR (base %#lx)\n", segment, start, end, &new->res,
95 (unsigned long) addr);
96
97 return new;
49} 98}
50 99
51static __init void fill_one_mmcfg(u64 addr, int segment, int start, int end) 100struct pci_mmcfg_region *pci_mmconfig_lookup(int segment, int bus)
52{ 101{
53 int i = pci_mmcfg_config_num; 102 struct pci_mmcfg_region *cfg;
54 103
55 pci_mmcfg_config_num++; 104 list_for_each_entry(cfg, &pci_mmcfg_list, list)
56 pci_mmcfg_config[i].address = addr; 105 if (cfg->segment == segment &&
57 pci_mmcfg_config[i].pci_segment = segment; 106 cfg->start_bus <= bus && bus <= cfg->end_bus)
58 pci_mmcfg_config[i].start_bus_number = start; 107 return cfg;
59 pci_mmcfg_config[i].end_bus_number = end; 108
109 return NULL;
60} 110}
61 111
62static const char __init *pci_mmcfg_e7520(void) 112static const char __init *pci_mmcfg_e7520(void)
@@ -68,11 +118,9 @@ static const char __init *pci_mmcfg_e7520(void)
68 if (win == 0x0000 || win == 0xf000) 118 if (win == 0x0000 || win == 0xf000)
69 return NULL; 119 return NULL;
70 120
71 if (extend_mmcfg(1) == -1) 121 if (pci_mmconfig_add(0, 0, 255, win << 16) == NULL)
72 return NULL; 122 return NULL;
73 123
74 fill_one_mmcfg(win << 16, 0, 0, 255);
75
76 return "Intel Corporation E7520 Memory Controller Hub"; 124 return "Intel Corporation E7520 Memory Controller Hub";
77} 125}
78 126
@@ -114,11 +162,9 @@ static const char __init *pci_mmcfg_intel_945(void)
114 if ((pciexbar & mask) >= 0xf0000000U) 162 if ((pciexbar & mask) >= 0xf0000000U)
115 return NULL; 163 return NULL;
116 164
117 if (extend_mmcfg(1) == -1) 165 if (pci_mmconfig_add(0, 0, (len >> 20) - 1, pciexbar & mask) == NULL)
118 return NULL; 166 return NULL;
119 167
120 fill_one_mmcfg(pciexbar & mask, 0, 0, (len >> 20) - 1);
121
122 return "Intel Corporation 945G/GZ/P/PL Express Memory Controller Hub"; 168 return "Intel Corporation 945G/GZ/P/PL Express Memory Controller Hub";
123} 169}
124 170
@@ -127,7 +173,7 @@ static const char __init *pci_mmcfg_amd_fam10h(void)
127 u32 low, high, address; 173 u32 low, high, address;
128 u64 base, msr; 174 u64 base, msr;
129 int i; 175 int i;
130 unsigned segnbits = 0, busnbits; 176 unsigned segnbits = 0, busnbits, end_bus;
131 177
132 if (!(pci_probe & PCI_CHECK_ENABLE_AMD_MMCONF)) 178 if (!(pci_probe & PCI_CHECK_ENABLE_AMD_MMCONF))
133 return NULL; 179 return NULL;
@@ -161,11 +207,13 @@ static const char __init *pci_mmcfg_amd_fam10h(void)
161 busnbits = 8; 207 busnbits = 8;
162 } 208 }
163 209
164 if (extend_mmcfg(1 << segnbits) == -1) 210 end_bus = (1 << busnbits) - 1;
165 return NULL;
166
167 for (i = 0; i < (1 << segnbits); i++) 211 for (i = 0; i < (1 << segnbits); i++)
168 fill_one_mmcfg(base + (1<<28) * i, i, 0, (1 << busnbits) - 1); 212 if (pci_mmconfig_add(i, 0, end_bus,
213 base + (1<<28) * i) == NULL) {
214 free_all_mmcfg();
215 return NULL;
216 }
169 217
170 return "AMD Family 10h NB"; 218 return "AMD Family 10h NB";
171} 219}
@@ -190,7 +238,7 @@ static const char __init *pci_mmcfg_nvidia_mcp55(void)
190 /* 238 /*
191 * do check if amd fam10h already took over 239 * do check if amd fam10h already took over
192 */ 240 */
193 if (!acpi_disabled || pci_mmcfg_config_num || mcp55_checked) 241 if (!acpi_disabled || !list_empty(&pci_mmcfg_list) || mcp55_checked)
194 return NULL; 242 return NULL;
195 243
196 mcp55_checked = true; 244 mcp55_checked = true;
@@ -213,16 +261,14 @@ static const char __init *pci_mmcfg_nvidia_mcp55(void)
213 if (!(extcfg & extcfg_enable_mask)) 261 if (!(extcfg & extcfg_enable_mask))
214 continue; 262 continue;
215 263
216 if (extend_mmcfg(1) == -1)
217 continue;
218
219 size_index = (extcfg & extcfg_size_mask) >> extcfg_size_shift; 264 size_index = (extcfg & extcfg_size_mask) >> extcfg_size_shift;
220 base = extcfg & extcfg_base_mask[size_index]; 265 base = extcfg & extcfg_base_mask[size_index];
221 /* base could > 4G */ 266 /* base could > 4G */
222 base <<= extcfg_base_lshift; 267 base <<= extcfg_base_lshift;
223 start = (extcfg & extcfg_start_mask) >> extcfg_start_shift; 268 start = (extcfg & extcfg_start_mask) >> extcfg_start_shift;
224 end = start + extcfg_sizebus[size_index] - 1; 269 end = start + extcfg_sizebus[size_index] - 1;
225 fill_one_mmcfg(base, 0, start, end); 270 if (pci_mmconfig_add(0, start, end, base) == NULL)
271 continue;
226 mcp55_mmconf_found++; 272 mcp55_mmconf_found++;
227 } 273 }
228 274
@@ -253,45 +299,27 @@ static struct pci_mmcfg_hostbridge_probe pci_mmcfg_probes[] __initdata = {
253 0x0369, pci_mmcfg_nvidia_mcp55 }, 299 0x0369, pci_mmcfg_nvidia_mcp55 },
254}; 300};
255 301
256static int __init cmp_mmcfg(const void *x1, const void *x2)
257{
258 const typeof(pci_mmcfg_config[0]) *m1 = x1;
259 const typeof(pci_mmcfg_config[0]) *m2 = x2;
260 int start1, start2;
261
262 start1 = m1->start_bus_number;
263 start2 = m2->start_bus_number;
264
265 return start1 - start2;
266}
267
268static void __init pci_mmcfg_check_end_bus_number(void) 302static void __init pci_mmcfg_check_end_bus_number(void)
269{ 303{
270 int i; 304 struct pci_mmcfg_region *cfg, *cfgx;
271 typeof(pci_mmcfg_config[0]) *cfg, *cfgx;
272
273 /* sort them at first */
274 sort(pci_mmcfg_config, pci_mmcfg_config_num,
275 sizeof(pci_mmcfg_config[0]), cmp_mmcfg, NULL);
276 305
277 /* last one*/ 306 /* last one*/
278 if (pci_mmcfg_config_num > 0) { 307 cfg = list_entry(pci_mmcfg_list.prev, typeof(*cfg), list);
279 i = pci_mmcfg_config_num - 1; 308 if (cfg)
280 cfg = &pci_mmcfg_config[i]; 309 if (cfg->end_bus < cfg->start_bus)
281 if (cfg->end_bus_number < cfg->start_bus_number) 310 cfg->end_bus = 255;
282 cfg->end_bus_number = 255;
283 }
284 311
285 /* don't overlap please */ 312 if (list_is_singular(&pci_mmcfg_list))
286 for (i = 0; i < pci_mmcfg_config_num - 1; i++) { 313 return;
287 cfg = &pci_mmcfg_config[i];
288 cfgx = &pci_mmcfg_config[i+1];
289 314
290 if (cfg->end_bus_number < cfg->start_bus_number) 315 /* don't overlap please */
291 cfg->end_bus_number = 255; 316 list_for_each_entry(cfg, &pci_mmcfg_list, list) {
317 if (cfg->end_bus < cfg->start_bus)
318 cfg->end_bus = 255;
292 319
293 if (cfg->end_bus_number >= cfgx->start_bus_number) 320 cfgx = list_entry(cfg->list.next, typeof(*cfg), list);
294 cfg->end_bus_number = cfgx->start_bus_number - 1; 321 if (cfg != cfgx && cfg->end_bus >= cfgx->start_bus)
322 cfg->end_bus = cfgx->start_bus - 1;
295 } 323 }
296} 324}
297 325
@@ -306,8 +334,7 @@ static int __init pci_mmcfg_check_hostbridge(void)
306 if (!raw_pci_ops) 334 if (!raw_pci_ops)
307 return 0; 335 return 0;
308 336
309 pci_mmcfg_config_num = 0; 337 free_all_mmcfg();
310 pci_mmcfg_config = NULL;
311 338
312 for (i = 0; i < ARRAY_SIZE(pci_mmcfg_probes); i++) { 339 for (i = 0; i < ARRAY_SIZE(pci_mmcfg_probes); i++) {
313 bus = pci_mmcfg_probes[i].bus; 340 bus = pci_mmcfg_probes[i].bus;
@@ -322,45 +349,22 @@ static int __init pci_mmcfg_check_hostbridge(void)
322 name = pci_mmcfg_probes[i].probe(); 349 name = pci_mmcfg_probes[i].probe();
323 350
324 if (name) 351 if (name)
325 printk(KERN_INFO "PCI: Found %s with MMCONFIG support.\n", 352 printk(KERN_INFO PREFIX "%s with MMCONFIG support\n",
326 name); 353 name);
327 } 354 }
328 355
329 /* some end_bus_number is crazy, fix it */ 356 /* some end_bus_number is crazy, fix it */
330 pci_mmcfg_check_end_bus_number(); 357 pci_mmcfg_check_end_bus_number();
331 358
332 return pci_mmcfg_config_num != 0; 359 return !list_empty(&pci_mmcfg_list);
333} 360}
334 361
335static void __init pci_mmcfg_insert_resources(void) 362static void __init pci_mmcfg_insert_resources(void)
336{ 363{
337#define PCI_MMCFG_RESOURCE_NAME_LEN 24 364 struct pci_mmcfg_region *cfg;
338 int i;
339 struct resource *res;
340 char *names;
341 unsigned num_buses;
342
343 res = kcalloc(PCI_MMCFG_RESOURCE_NAME_LEN + sizeof(*res),
344 pci_mmcfg_config_num, GFP_KERNEL);
345 if (!res) {
346 printk(KERN_ERR "PCI: Unable to allocate MMCONFIG resources\n");
347 return;
348 }
349 365
350 names = (void *)&res[pci_mmcfg_config_num]; 366 list_for_each_entry(cfg, &pci_mmcfg_list, list)
351 for (i = 0; i < pci_mmcfg_config_num; i++, res++) { 367 insert_resource(&iomem_resource, &cfg->res);
352 struct acpi_mcfg_allocation *cfg = &pci_mmcfg_config[i];
353 num_buses = cfg->end_bus_number - cfg->start_bus_number + 1;
354 res->name = names;
355 snprintf(names, PCI_MMCFG_RESOURCE_NAME_LEN,
356 "PCI MMCONFIG %u [%02x-%02x]", cfg->pci_segment,
357 cfg->start_bus_number, cfg->end_bus_number);
358 res->start = cfg->address + (cfg->start_bus_number << 20);
359 res->end = res->start + (num_buses << 20) - 1;
360 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
361 insert_resource(&iomem_resource, res);
362 names += PCI_MMCFG_RESOURCE_NAME_LEN;
363 }
364 368
365 /* Mark that the resources have been inserted. */ 369 /* Mark that the resources have been inserted. */
366 pci_mmcfg_resources_inserted = 1; 370 pci_mmcfg_resources_inserted = 1;
@@ -437,11 +441,12 @@ static int __init is_acpi_reserved(u64 start, u64 end, unsigned not_used)
437typedef int (*check_reserved_t)(u64 start, u64 end, unsigned type); 441typedef int (*check_reserved_t)(u64 start, u64 end, unsigned type);
438 442
439static int __init is_mmconf_reserved(check_reserved_t is_reserved, 443static int __init is_mmconf_reserved(check_reserved_t is_reserved,
440 u64 addr, u64 size, int i, 444 struct pci_mmcfg_region *cfg, int with_e820)
441 typeof(pci_mmcfg_config[0]) *cfg, int with_e820)
442{ 445{
446 u64 addr = cfg->res.start;
447 u64 size = resource_size(&cfg->res);
443 u64 old_size = size; 448 u64 old_size = size;
444 int valid = 0; 449 int valid = 0, num_buses;
445 450
446 while (!is_reserved(addr, addr + size, E820_RESERVED)) { 451 while (!is_reserved(addr, addr + size, E820_RESERVED)) {
447 size >>= 1; 452 size >>= 1;
@@ -450,19 +455,25 @@ static int __init is_mmconf_reserved(check_reserved_t is_reserved,
450 } 455 }
451 456
452 if (size >= (16UL<<20) || size == old_size) { 457 if (size >= (16UL<<20) || size == old_size) {
453 printk(KERN_NOTICE 458 printk(KERN_INFO PREFIX "MMCONFIG at %pR reserved in %s\n",
454 "PCI: MCFG area at %Lx reserved in %s\n", 459 &cfg->res,
455 addr, with_e820?"E820":"ACPI motherboard resources"); 460 with_e820 ? "E820" : "ACPI motherboard resources");
456 valid = 1; 461 valid = 1;
457 462
458 if (old_size != size) { 463 if (old_size != size) {
459 /* update end_bus_number */ 464 /* update end_bus */
460 cfg->end_bus_number = cfg->start_bus_number + ((size>>20) - 1); 465 cfg->end_bus = cfg->start_bus + ((size>>20) - 1);
461 printk(KERN_NOTICE "PCI: updated MCFG configuration %d: base %lx " 466 num_buses = cfg->end_bus - cfg->start_bus + 1;
462 "segment %hu buses %u - %u\n", 467 cfg->res.end = cfg->res.start +
463 i, (unsigned long)cfg->address, cfg->pci_segment, 468 PCI_MMCFG_BUS_OFFSET(num_buses) - 1;
464 (unsigned int)cfg->start_bus_number, 469 snprintf(cfg->name, PCI_MMCFG_RESOURCE_NAME_LEN,
465 (unsigned int)cfg->end_bus_number); 470 "PCI MMCONFIG %04x [bus %02x-%02x]",
471 cfg->segment, cfg->start_bus, cfg->end_bus);
472 printk(KERN_INFO PREFIX
473 "MMCONFIG for %04x [bus%02x-%02x] "
474 "at %pR (base %#lx) (size reduced!)\n",
475 cfg->segment, cfg->start_bus, cfg->end_bus,
476 &cfg->res, (unsigned long) cfg->address);
466 } 477 }
467 } 478 }
468 479
@@ -471,45 +482,26 @@ static int __init is_mmconf_reserved(check_reserved_t is_reserved,
471 482
472static void __init pci_mmcfg_reject_broken(int early) 483static void __init pci_mmcfg_reject_broken(int early)
473{ 484{
474 typeof(pci_mmcfg_config[0]) *cfg; 485 struct pci_mmcfg_region *cfg;
475 int i;
476 486
477 if ((pci_mmcfg_config_num == 0) || 487 list_for_each_entry(cfg, &pci_mmcfg_list, list) {
478 (pci_mmcfg_config == NULL) ||
479 (pci_mmcfg_config[0].address == 0))
480 return;
481
482 for (i = 0; i < pci_mmcfg_config_num; i++) {
483 int valid = 0; 488 int valid = 0;
484 u64 addr, size;
485
486 cfg = &pci_mmcfg_config[i];
487 addr = cfg->start_bus_number;
488 addr <<= 20;
489 addr += cfg->address;
490 size = cfg->end_bus_number + 1 - cfg->start_bus_number;
491 size <<= 20;
492 printk(KERN_NOTICE "PCI: MCFG configuration %d: base %lx "
493 "segment %hu buses %u - %u\n",
494 i, (unsigned long)cfg->address, cfg->pci_segment,
495 (unsigned int)cfg->start_bus_number,
496 (unsigned int)cfg->end_bus_number);
497 489
498 if (!early && !acpi_disabled) 490 if (!early && !acpi_disabled)
499 valid = is_mmconf_reserved(is_acpi_reserved, addr, size, i, cfg, 0); 491 valid = is_mmconf_reserved(is_acpi_reserved, cfg, 0);
500 492
501 if (valid) 493 if (valid)
502 continue; 494 continue;
503 495
504 if (!early) 496 if (!early)
505 printk(KERN_ERR "PCI: BIOS Bug: MCFG area at %Lx is not" 497 printk(KERN_ERR FW_BUG PREFIX
506 " reserved in ACPI motherboard resources\n", 498 "MMCONFIG at %pR not reserved in "
507 cfg->address); 499 "ACPI motherboard resources\n", &cfg->res);
508 500
509 /* Don't try to do this check unless configuration 501 /* Don't try to do this check unless configuration
510 type 1 is available. how about type 2 ?*/ 502 type 1 is available. how about type 2 ?*/
511 if (raw_pci_ops) 503 if (raw_pci_ops)
512 valid = is_mmconf_reserved(e820_all_mapped, addr, size, i, cfg, 1); 504 valid = is_mmconf_reserved(e820_all_mapped, cfg, 1);
513 505
514 if (!valid) 506 if (!valid)
515 goto reject; 507 goto reject;
@@ -518,34 +510,41 @@ static void __init pci_mmcfg_reject_broken(int early)
518 return; 510 return;
519 511
520reject: 512reject:
521 printk(KERN_INFO "PCI: Not using MMCONFIG.\n"); 513 printk(KERN_INFO PREFIX "not using MMCONFIG\n");
522 pci_mmcfg_arch_free(); 514 free_all_mmcfg();
523 kfree(pci_mmcfg_config);
524 pci_mmcfg_config = NULL;
525 pci_mmcfg_config_num = 0;
526} 515}
527 516
528static int __initdata known_bridge; 517static int __initdata known_bridge;
529 518
530static int acpi_mcfg_64bit_base_addr __initdata = FALSE; 519static int __init acpi_mcfg_check_entry(struct acpi_table_mcfg *mcfg,
520 struct acpi_mcfg_allocation *cfg)
521{
522 int year;
531 523
532/* The physical address of the MMCONFIG aperture. Set from ACPI tables. */ 524 if (cfg->address < 0xFFFFFFFF)
533struct acpi_mcfg_allocation *pci_mmcfg_config; 525 return 0;
534int pci_mmcfg_config_num;
535 526
536static int __init acpi_mcfg_oem_check(struct acpi_table_mcfg *mcfg)
537{
538 if (!strcmp(mcfg->header.oem_id, "SGI")) 527 if (!strcmp(mcfg->header.oem_id, "SGI"))
539 acpi_mcfg_64bit_base_addr = TRUE; 528 return 0;
540 529
541 return 0; 530 if (mcfg->header.revision >= 1) {
531 if (dmi_get_date(DMI_BIOS_DATE, &year, NULL, NULL) &&
532 year >= 2010)
533 return 0;
534 }
535
536 printk(KERN_ERR PREFIX "MCFG region for %04x [bus %02x-%02x] at %#llx "
537 "is above 4GB, ignored\n", cfg->pci_segment,
538 cfg->start_bus_number, cfg->end_bus_number, cfg->address);
539 return -EINVAL;
542} 540}
543 541
544static int __init pci_parse_mcfg(struct acpi_table_header *header) 542static int __init pci_parse_mcfg(struct acpi_table_header *header)
545{ 543{
546 struct acpi_table_mcfg *mcfg; 544 struct acpi_table_mcfg *mcfg;
545 struct acpi_mcfg_allocation *cfg_table, *cfg;
547 unsigned long i; 546 unsigned long i;
548 int config_size; 547 int entries;
549 548
550 if (!header) 549 if (!header)
551 return -EINVAL; 550 return -EINVAL;
@@ -553,38 +552,33 @@ static int __init pci_parse_mcfg(struct acpi_table_header *header)
553 mcfg = (struct acpi_table_mcfg *)header; 552 mcfg = (struct acpi_table_mcfg *)header;
554 553
555 /* how many config structures do we have */ 554 /* how many config structures do we have */
556 pci_mmcfg_config_num = 0; 555 free_all_mmcfg();
556 entries = 0;
557 i = header->length - sizeof(struct acpi_table_mcfg); 557 i = header->length - sizeof(struct acpi_table_mcfg);
558 while (i >= sizeof(struct acpi_mcfg_allocation)) { 558 while (i >= sizeof(struct acpi_mcfg_allocation)) {
559 ++pci_mmcfg_config_num; 559 entries++;
560 i -= sizeof(struct acpi_mcfg_allocation); 560 i -= sizeof(struct acpi_mcfg_allocation);
561 }; 561 };
562 if (pci_mmcfg_config_num == 0) { 562 if (entries == 0) {
563 printk(KERN_ERR PREFIX "MMCONFIG has no entries\n"); 563 printk(KERN_ERR PREFIX "MMCONFIG has no entries\n");
564 return -ENODEV; 564 return -ENODEV;
565 } 565 }
566 566
567 config_size = pci_mmcfg_config_num * sizeof(*pci_mmcfg_config); 567 cfg_table = (struct acpi_mcfg_allocation *) &mcfg[1];
568 pci_mmcfg_config = kmalloc(config_size, GFP_KERNEL); 568 for (i = 0; i < entries; i++) {
569 if (!pci_mmcfg_config) { 569 cfg = &cfg_table[i];
570 printk(KERN_WARNING PREFIX 570 if (acpi_mcfg_check_entry(mcfg, cfg)) {
571 "No memory for MCFG config tables\n"); 571 free_all_mmcfg();
572 return -ENOMEM;
573 }
574
575 memcpy(pci_mmcfg_config, &mcfg[1], config_size);
576
577 acpi_mcfg_oem_check(mcfg);
578
579 for (i = 0; i < pci_mmcfg_config_num; ++i) {
580 if ((pci_mmcfg_config[i].address > 0xFFFFFFFF) &&
581 !acpi_mcfg_64bit_base_addr) {
582 printk(KERN_ERR PREFIX
583 "MMCONFIG not in low 4GB of memory\n");
584 kfree(pci_mmcfg_config);
585 pci_mmcfg_config_num = 0;
586 return -ENODEV; 572 return -ENODEV;
587 } 573 }
574
575 if (pci_mmconfig_add(cfg->pci_segment, cfg->start_bus_number,
576 cfg->end_bus_number, cfg->address) == NULL) {
577 printk(KERN_WARNING PREFIX
578 "no memory for MCFG entries\n");
579 free_all_mmcfg();
580 return -ENOMEM;
581 }
588 } 582 }
589 583
590 return 0; 584 return 0;
@@ -614,9 +608,7 @@ static void __init __pci_mmcfg_init(int early)
614 608
615 pci_mmcfg_reject_broken(early); 609 pci_mmcfg_reject_broken(early);
616 610
617 if ((pci_mmcfg_config_num == 0) || 611 if (list_empty(&pci_mmcfg_list))
618 (pci_mmcfg_config == NULL) ||
619 (pci_mmcfg_config[0].address == 0))
620 return; 612 return;
621 613
622 if (pci_mmcfg_arch_init()) 614 if (pci_mmcfg_arch_init())
@@ -648,9 +640,7 @@ static int __init pci_mmcfg_late_insert_resources(void)
648 */ 640 */
649 if ((pci_mmcfg_resources_inserted == 1) || 641 if ((pci_mmcfg_resources_inserted == 1) ||
650 (pci_probe & PCI_PROBE_MMCONF) == 0 || 642 (pci_probe & PCI_PROBE_MMCONF) == 0 ||
651 (pci_mmcfg_config_num == 0) || 643 list_empty(&pci_mmcfg_list))
652 (pci_mmcfg_config == NULL) ||
653 (pci_mmcfg_config[0].address == 0))
654 return 1; 644 return 1;
655 645
656 /* 646 /*
diff --git a/arch/x86/pci/mmconfig_32.c b/arch/x86/pci/mmconfig_32.c
index f10a7e94a84c..90d5fd476ed4 100644
--- a/arch/x86/pci/mmconfig_32.c
+++ b/arch/x86/pci/mmconfig_32.c
@@ -27,18 +27,10 @@ static int mmcfg_last_accessed_cpu;
27 */ 27 */
28static u32 get_base_addr(unsigned int seg, int bus, unsigned devfn) 28static u32 get_base_addr(unsigned int seg, int bus, unsigned devfn)
29{ 29{
30 struct acpi_mcfg_allocation *cfg; 30 struct pci_mmcfg_region *cfg = pci_mmconfig_lookup(seg, bus);
31 int cfg_num;
32
33 for (cfg_num = 0; cfg_num < pci_mmcfg_config_num; cfg_num++) {
34 cfg = &pci_mmcfg_config[cfg_num];
35 if (cfg->pci_segment == seg &&
36 (cfg->start_bus_number <= bus) &&
37 (cfg->end_bus_number >= bus))
38 return cfg->address;
39 }
40 31
41 /* Fall back to type 0 */ 32 if (cfg)
33 return cfg->address;
42 return 0; 34 return 0;
43} 35}
44 36
@@ -47,7 +39,7 @@ static u32 get_base_addr(unsigned int seg, int bus, unsigned devfn)
47 */ 39 */
48static void pci_exp_set_dev_base(unsigned int base, int bus, int devfn) 40static void pci_exp_set_dev_base(unsigned int base, int bus, int devfn)
49{ 41{
50 u32 dev_base = base | (bus << 20) | (devfn << 12); 42 u32 dev_base = base | PCI_MMCFG_BUS_OFFSET(bus) | (devfn << 12);
51 int cpu = smp_processor_id(); 43 int cpu = smp_processor_id();
52 if (dev_base != mmcfg_last_accessed_device || 44 if (dev_base != mmcfg_last_accessed_device ||
53 cpu != mmcfg_last_accessed_cpu) { 45 cpu != mmcfg_last_accessed_cpu) {
diff --git a/arch/x86/pci/mmconfig_64.c b/arch/x86/pci/mmconfig_64.c
index 94349f8b2f96..e783841bd1d7 100644
--- a/arch/x86/pci/mmconfig_64.c
+++ b/arch/x86/pci/mmconfig_64.c
@@ -12,38 +12,15 @@
12#include <asm/e820.h> 12#include <asm/e820.h>
13#include <asm/pci_x86.h> 13#include <asm/pci_x86.h>
14 14
15/* Static virtual mapping of the MMCONFIG aperture */ 15#define PREFIX "PCI: "
16struct mmcfg_virt {
17 struct acpi_mcfg_allocation *cfg;
18 char __iomem *virt;
19};
20static struct mmcfg_virt *pci_mmcfg_virt;
21
22static char __iomem *get_virt(unsigned int seg, unsigned bus)
23{
24 struct acpi_mcfg_allocation *cfg;
25 int cfg_num;
26
27 for (cfg_num = 0; cfg_num < pci_mmcfg_config_num; cfg_num++) {
28 cfg = pci_mmcfg_virt[cfg_num].cfg;
29 if (cfg->pci_segment == seg &&
30 (cfg->start_bus_number <= bus) &&
31 (cfg->end_bus_number >= bus))
32 return pci_mmcfg_virt[cfg_num].virt;
33 }
34
35 /* Fall back to type 0 */
36 return NULL;
37}
38 16
39static char __iomem *pci_dev_base(unsigned int seg, unsigned int bus, unsigned int devfn) 17static char __iomem *pci_dev_base(unsigned int seg, unsigned int bus, unsigned int devfn)
40{ 18{
41 char __iomem *addr; 19 struct pci_mmcfg_region *cfg = pci_mmconfig_lookup(seg, bus);
42 20
43 addr = get_virt(seg, bus); 21 if (cfg && cfg->virt)
44 if (!addr) 22 return cfg->virt + (PCI_MMCFG_BUS_OFFSET(bus) | (devfn << 12));
45 return NULL; 23 return NULL;
46 return addr + ((bus << 20) | (devfn << 12));
47} 24}
48 25
49static int pci_mmcfg_read(unsigned int seg, unsigned int bus, 26static int pci_mmcfg_read(unsigned int seg, unsigned int bus,
@@ -109,42 +86,30 @@ static struct pci_raw_ops pci_mmcfg = {
109 .write = pci_mmcfg_write, 86 .write = pci_mmcfg_write,
110}; 87};
111 88
112static void __iomem * __init mcfg_ioremap(struct acpi_mcfg_allocation *cfg) 89static void __iomem * __init mcfg_ioremap(struct pci_mmcfg_region *cfg)
113{ 90{
114 void __iomem *addr; 91 void __iomem *addr;
115 u64 start, size; 92 u64 start, size;
93 int num_buses;
116 94
117 start = cfg->start_bus_number; 95 start = cfg->address + PCI_MMCFG_BUS_OFFSET(cfg->start_bus);
118 start <<= 20; 96 num_buses = cfg->end_bus - cfg->start_bus + 1;
119 start += cfg->address; 97 size = PCI_MMCFG_BUS_OFFSET(num_buses);
120 size = cfg->end_bus_number + 1 - cfg->start_bus_number;
121 size <<= 20;
122 addr = ioremap_nocache(start, size); 98 addr = ioremap_nocache(start, size);
123 if (addr) { 99 if (addr)
124 printk(KERN_INFO "PCI: Using MMCONFIG at %Lx - %Lx\n", 100 addr -= PCI_MMCFG_BUS_OFFSET(cfg->start_bus);
125 start, start + size - 1);
126 addr -= cfg->start_bus_number << 20;
127 }
128 return addr; 101 return addr;
129} 102}
130 103
131int __init pci_mmcfg_arch_init(void) 104int __init pci_mmcfg_arch_init(void)
132{ 105{
133 int i; 106 struct pci_mmcfg_region *cfg;
134 pci_mmcfg_virt = kzalloc(sizeof(*pci_mmcfg_virt) *
135 pci_mmcfg_config_num, GFP_KERNEL);
136 if (pci_mmcfg_virt == NULL) {
137 printk(KERN_ERR "PCI: Can not allocate memory for mmconfig structures\n");
138 return 0;
139 }
140 107
141 for (i = 0; i < pci_mmcfg_config_num; ++i) { 108 list_for_each_entry(cfg, &pci_mmcfg_list, list) {
142 pci_mmcfg_virt[i].cfg = &pci_mmcfg_config[i]; 109 cfg->virt = mcfg_ioremap(cfg);
143 pci_mmcfg_virt[i].virt = mcfg_ioremap(&pci_mmcfg_config[i]); 110 if (!cfg->virt) {
144 if (!pci_mmcfg_virt[i].virt) { 111 printk(KERN_ERR PREFIX "can't map MMCONFIG at %pR\n",
145 printk(KERN_ERR "PCI: Cannot map mmconfig aperture for " 112 &cfg->res);
146 "segment %d\n",
147 pci_mmcfg_config[i].pci_segment);
148 pci_mmcfg_arch_free(); 113 pci_mmcfg_arch_free();
149 return 0; 114 return 0;
150 } 115 }
@@ -155,19 +120,12 @@ int __init pci_mmcfg_arch_init(void)
155 120
156void __init pci_mmcfg_arch_free(void) 121void __init pci_mmcfg_arch_free(void)
157{ 122{
158 int i; 123 struct pci_mmcfg_region *cfg;
159
160 if (pci_mmcfg_virt == NULL)
161 return;
162 124
163 for (i = 0; i < pci_mmcfg_config_num; ++i) { 125 list_for_each_entry(cfg, &pci_mmcfg_list, list) {
164 if (pci_mmcfg_virt[i].virt) { 126 if (cfg->virt) {
165 iounmap(pci_mmcfg_virt[i].virt + (pci_mmcfg_virt[i].cfg->start_bus_number << 20)); 127 iounmap(cfg->virt + PCI_MMCFG_BUS_OFFSET(cfg->start_bus));
166 pci_mmcfg_virt[i].virt = NULL; 128 cfg->virt = NULL;
167 pci_mmcfg_virt[i].cfg = NULL;
168 } 129 }
169 } 130 }
170
171 kfree(pci_mmcfg_virt);
172 pci_mmcfg_virt = NULL;
173} 131}
diff --git a/arch/x86/tools/test_get_len.c b/arch/x86/tools/test_get_len.c
index d8214dc03fa7..bee8d6ac2691 100644
--- a/arch/x86/tools/test_get_len.c
+++ b/arch/x86/tools/test_get_len.c
@@ -113,7 +113,7 @@ int main(int argc, char **argv)
113 char line[BUFSIZE], sym[BUFSIZE] = "<unknown>"; 113 char line[BUFSIZE], sym[BUFSIZE] = "<unknown>";
114 unsigned char insn_buf[16]; 114 unsigned char insn_buf[16];
115 struct insn insn; 115 struct insn insn;
116 int insns = 0, c; 116 int insns = 0;
117 int warnings = 0; 117 int warnings = 0;
118 118
119 parse_args(argc, argv); 119 parse_args(argc, argv);
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index b8e45f164e2a..2b26dd5930c6 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -27,7 +27,9 @@
27#include <linux/page-flags.h> 27#include <linux/page-flags.h>
28#include <linux/highmem.h> 28#include <linux/highmem.h>
29#include <linux/console.h> 29#include <linux/console.h>
30#include <linux/pci.h>
30 31
32#include <xen/xen.h>
31#include <xen/interface/xen.h> 33#include <xen/interface/xen.h>
32#include <xen/interface/version.h> 34#include <xen/interface/version.h>
33#include <xen/interface/physdev.h> 35#include <xen/interface/physdev.h>
@@ -1175,7 +1177,11 @@ asmlinkage void __init xen_start_kernel(void)
1175 add_preferred_console("xenboot", 0, NULL); 1177 add_preferred_console("xenboot", 0, NULL);
1176 add_preferred_console("tty", 0, NULL); 1178 add_preferred_console("tty", 0, NULL);
1177 add_preferred_console("hvc", 0, NULL); 1179 add_preferred_console("hvc", 0, NULL);
1180 } else {
1181 /* Make sure ACS will be enabled */
1182 pci_request_acs();
1178 } 1183 }
1184
1179 1185
1180 xen_raw_console_write("about to get started...\n"); 1186 xen_raw_console_write("about to get started...\n");
1181 1187
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index 64757c0ba5fc..563d20504988 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -35,10 +35,10 @@
35 35
36cpumask_var_t xen_cpu_initialized_map; 36cpumask_var_t xen_cpu_initialized_map;
37 37
38static DEFINE_PER_CPU(int, resched_irq); 38static DEFINE_PER_CPU(int, xen_resched_irq);
39static DEFINE_PER_CPU(int, callfunc_irq); 39static DEFINE_PER_CPU(int, xen_callfunc_irq);
40static DEFINE_PER_CPU(int, callfuncsingle_irq); 40static DEFINE_PER_CPU(int, xen_callfuncsingle_irq);
41static DEFINE_PER_CPU(int, debug_irq) = -1; 41static DEFINE_PER_CPU(int, xen_debug_irq) = -1;
42 42
43static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id); 43static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id);
44static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id); 44static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id);
@@ -103,7 +103,7 @@ static int xen_smp_intr_init(unsigned int cpu)
103 NULL); 103 NULL);
104 if (rc < 0) 104 if (rc < 0)
105 goto fail; 105 goto fail;
106 per_cpu(resched_irq, cpu) = rc; 106 per_cpu(xen_resched_irq, cpu) = rc;
107 107
108 callfunc_name = kasprintf(GFP_KERNEL, "callfunc%d", cpu); 108 callfunc_name = kasprintf(GFP_KERNEL, "callfunc%d", cpu);
109 rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_VECTOR, 109 rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_VECTOR,
@@ -114,7 +114,7 @@ static int xen_smp_intr_init(unsigned int cpu)
114 NULL); 114 NULL);
115 if (rc < 0) 115 if (rc < 0)
116 goto fail; 116 goto fail;
117 per_cpu(callfunc_irq, cpu) = rc; 117 per_cpu(xen_callfunc_irq, cpu) = rc;
118 118
119 debug_name = kasprintf(GFP_KERNEL, "debug%d", cpu); 119 debug_name = kasprintf(GFP_KERNEL, "debug%d", cpu);
120 rc = bind_virq_to_irqhandler(VIRQ_DEBUG, cpu, xen_debug_interrupt, 120 rc = bind_virq_to_irqhandler(VIRQ_DEBUG, cpu, xen_debug_interrupt,
@@ -122,7 +122,7 @@ static int xen_smp_intr_init(unsigned int cpu)
122 debug_name, NULL); 122 debug_name, NULL);
123 if (rc < 0) 123 if (rc < 0)
124 goto fail; 124 goto fail;
125 per_cpu(debug_irq, cpu) = rc; 125 per_cpu(xen_debug_irq, cpu) = rc;
126 126
127 callfunc_name = kasprintf(GFP_KERNEL, "callfuncsingle%d", cpu); 127 callfunc_name = kasprintf(GFP_KERNEL, "callfuncsingle%d", cpu);
128 rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_SINGLE_VECTOR, 128 rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_SINGLE_VECTOR,
@@ -133,19 +133,20 @@ static int xen_smp_intr_init(unsigned int cpu)
133 NULL); 133 NULL);
134 if (rc < 0) 134 if (rc < 0)
135 goto fail; 135 goto fail;
136 per_cpu(callfuncsingle_irq, cpu) = rc; 136 per_cpu(xen_callfuncsingle_irq, cpu) = rc;
137 137
138 return 0; 138 return 0;
139 139
140 fail: 140 fail:
141 if (per_cpu(resched_irq, cpu) >= 0) 141 if (per_cpu(xen_resched_irq, cpu) >= 0)
142 unbind_from_irqhandler(per_cpu(resched_irq, cpu), NULL); 142 unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu), NULL);
143 if (per_cpu(callfunc_irq, cpu) >= 0) 143 if (per_cpu(xen_callfunc_irq, cpu) >= 0)
144 unbind_from_irqhandler(per_cpu(callfunc_irq, cpu), NULL); 144 unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu), NULL);
145 if (per_cpu(debug_irq, cpu) >= 0) 145 if (per_cpu(xen_debug_irq, cpu) >= 0)
146 unbind_from_irqhandler(per_cpu(debug_irq, cpu), NULL); 146 unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu), NULL);
147 if (per_cpu(callfuncsingle_irq, cpu) >= 0) 147 if (per_cpu(xen_callfuncsingle_irq, cpu) >= 0)
148 unbind_from_irqhandler(per_cpu(callfuncsingle_irq, cpu), NULL); 148 unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu),
149 NULL);
149 150
150 return rc; 151 return rc;
151} 152}
@@ -349,10 +350,10 @@ static void xen_cpu_die(unsigned int cpu)
349 current->state = TASK_UNINTERRUPTIBLE; 350 current->state = TASK_UNINTERRUPTIBLE;
350 schedule_timeout(HZ/10); 351 schedule_timeout(HZ/10);
351 } 352 }
352 unbind_from_irqhandler(per_cpu(resched_irq, cpu), NULL); 353 unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu), NULL);
353 unbind_from_irqhandler(per_cpu(callfunc_irq, cpu), NULL); 354 unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu), NULL);
354 unbind_from_irqhandler(per_cpu(debug_irq, cpu), NULL); 355 unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu), NULL);
355 unbind_from_irqhandler(per_cpu(callfuncsingle_irq, cpu), NULL); 356 unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu), NULL);
356 xen_uninit_lock_cpu(cpu); 357 xen_uninit_lock_cpu(cpu);
357 xen_teardown_timer(cpu); 358 xen_teardown_timer(cpu);
358 359
diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c
index 36a5141108df..24ded31b5aec 100644
--- a/arch/x86/xen/spinlock.c
+++ b/arch/x86/xen/spinlock.c
@@ -120,14 +120,14 @@ struct xen_spinlock {
120 unsigned short spinners; /* count of waiting cpus */ 120 unsigned short spinners; /* count of waiting cpus */
121}; 121};
122 122
123static int xen_spin_is_locked(struct raw_spinlock *lock) 123static int xen_spin_is_locked(struct arch_spinlock *lock)
124{ 124{
125 struct xen_spinlock *xl = (struct xen_spinlock *)lock; 125 struct xen_spinlock *xl = (struct xen_spinlock *)lock;
126 126
127 return xl->lock != 0; 127 return xl->lock != 0;
128} 128}
129 129
130static int xen_spin_is_contended(struct raw_spinlock *lock) 130static int xen_spin_is_contended(struct arch_spinlock *lock)
131{ 131{
132 struct xen_spinlock *xl = (struct xen_spinlock *)lock; 132 struct xen_spinlock *xl = (struct xen_spinlock *)lock;
133 133
@@ -136,7 +136,7 @@ static int xen_spin_is_contended(struct raw_spinlock *lock)
136 return xl->spinners != 0; 136 return xl->spinners != 0;
137} 137}
138 138
139static int xen_spin_trylock(struct raw_spinlock *lock) 139static int xen_spin_trylock(struct arch_spinlock *lock)
140{ 140{
141 struct xen_spinlock *xl = (struct xen_spinlock *)lock; 141 struct xen_spinlock *xl = (struct xen_spinlock *)lock;
142 u8 old = 1; 142 u8 old = 1;
@@ -181,7 +181,7 @@ static inline void unspinning_lock(struct xen_spinlock *xl, struct xen_spinlock
181 __get_cpu_var(lock_spinners) = prev; 181 __get_cpu_var(lock_spinners) = prev;
182} 182}
183 183
184static noinline int xen_spin_lock_slow(struct raw_spinlock *lock, bool irq_enable) 184static noinline int xen_spin_lock_slow(struct arch_spinlock *lock, bool irq_enable)
185{ 185{
186 struct xen_spinlock *xl = (struct xen_spinlock *)lock; 186 struct xen_spinlock *xl = (struct xen_spinlock *)lock;
187 struct xen_spinlock *prev; 187 struct xen_spinlock *prev;
@@ -254,7 +254,7 @@ out:
254 return ret; 254 return ret;
255} 255}
256 256
257static inline void __xen_spin_lock(struct raw_spinlock *lock, bool irq_enable) 257static inline void __xen_spin_lock(struct arch_spinlock *lock, bool irq_enable)
258{ 258{
259 struct xen_spinlock *xl = (struct xen_spinlock *)lock; 259 struct xen_spinlock *xl = (struct xen_spinlock *)lock;
260 unsigned timeout; 260 unsigned timeout;
@@ -291,12 +291,12 @@ static inline void __xen_spin_lock(struct raw_spinlock *lock, bool irq_enable)
291 spin_time_accum_total(start_spin); 291 spin_time_accum_total(start_spin);
292} 292}
293 293
294static void xen_spin_lock(struct raw_spinlock *lock) 294static void xen_spin_lock(struct arch_spinlock *lock)
295{ 295{
296 __xen_spin_lock(lock, false); 296 __xen_spin_lock(lock, false);
297} 297}
298 298
299static void xen_spin_lock_flags(struct raw_spinlock *lock, unsigned long flags) 299static void xen_spin_lock_flags(struct arch_spinlock *lock, unsigned long flags)
300{ 300{
301 __xen_spin_lock(lock, !raw_irqs_disabled_flags(flags)); 301 __xen_spin_lock(lock, !raw_irqs_disabled_flags(flags));
302} 302}
@@ -317,7 +317,7 @@ static noinline void xen_spin_unlock_slow(struct xen_spinlock *xl)
317 } 317 }
318} 318}
319 319
320static void xen_spin_unlock(struct raw_spinlock *lock) 320static void xen_spin_unlock(struct arch_spinlock *lock)
321{ 321{
322 struct xen_spinlock *xl = (struct xen_spinlock *)lock; 322 struct xen_spinlock *xl = (struct xen_spinlock *)lock;
323 323
diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c
index 9d1f853120d8..0d3f07cd1b5f 100644
--- a/arch/x86/xen/time.c
+++ b/arch/x86/xen/time.c
@@ -31,14 +31,14 @@
31#define NS_PER_TICK (1000000000LL / HZ) 31#define NS_PER_TICK (1000000000LL / HZ)
32 32
33/* runstate info updated by Xen */ 33/* runstate info updated by Xen */
34static DEFINE_PER_CPU(struct vcpu_runstate_info, runstate); 34static DEFINE_PER_CPU(struct vcpu_runstate_info, xen_runstate);
35 35
36/* snapshots of runstate info */ 36/* snapshots of runstate info */
37static DEFINE_PER_CPU(struct vcpu_runstate_info, runstate_snapshot); 37static DEFINE_PER_CPU(struct vcpu_runstate_info, xen_runstate_snapshot);
38 38
39/* unused ns of stolen and blocked time */ 39/* unused ns of stolen and blocked time */
40static DEFINE_PER_CPU(u64, residual_stolen); 40static DEFINE_PER_CPU(u64, xen_residual_stolen);
41static DEFINE_PER_CPU(u64, residual_blocked); 41static DEFINE_PER_CPU(u64, xen_residual_blocked);
42 42
43/* return an consistent snapshot of 64-bit time/counter value */ 43/* return an consistent snapshot of 64-bit time/counter value */
44static u64 get64(const u64 *p) 44static u64 get64(const u64 *p)
@@ -79,7 +79,7 @@ static void get_runstate_snapshot(struct vcpu_runstate_info *res)
79 79
80 BUG_ON(preemptible()); 80 BUG_ON(preemptible());
81 81
82 state = &__get_cpu_var(runstate); 82 state = &__get_cpu_var(xen_runstate);
83 83
84 /* 84 /*
85 * The runstate info is always updated by the hypervisor on 85 * The runstate info is always updated by the hypervisor on
@@ -97,14 +97,14 @@ static void get_runstate_snapshot(struct vcpu_runstate_info *res)
97/* return true when a vcpu could run but has no real cpu to run on */ 97/* return true when a vcpu could run but has no real cpu to run on */
98bool xen_vcpu_stolen(int vcpu) 98bool xen_vcpu_stolen(int vcpu)
99{ 99{
100 return per_cpu(runstate, vcpu).state == RUNSTATE_runnable; 100 return per_cpu(xen_runstate, vcpu).state == RUNSTATE_runnable;
101} 101}
102 102
103void xen_setup_runstate_info(int cpu) 103void xen_setup_runstate_info(int cpu)
104{ 104{
105 struct vcpu_register_runstate_memory_area area; 105 struct vcpu_register_runstate_memory_area area;
106 106
107 area.addr.v = &per_cpu(runstate, cpu); 107 area.addr.v = &per_cpu(xen_runstate, cpu);
108 108
109 if (HYPERVISOR_vcpu_op(VCPUOP_register_runstate_memory_area, 109 if (HYPERVISOR_vcpu_op(VCPUOP_register_runstate_memory_area,
110 cpu, &area)) 110 cpu, &area))
@@ -122,7 +122,7 @@ static void do_stolen_accounting(void)
122 122
123 WARN_ON(state.state != RUNSTATE_running); 123 WARN_ON(state.state != RUNSTATE_running);
124 124
125 snap = &__get_cpu_var(runstate_snapshot); 125 snap = &__get_cpu_var(xen_runstate_snapshot);
126 126
127 /* work out how much time the VCPU has not been runn*ing* */ 127 /* work out how much time the VCPU has not been runn*ing* */
128 blocked = state.time[RUNSTATE_blocked] - snap->time[RUNSTATE_blocked]; 128 blocked = state.time[RUNSTATE_blocked] - snap->time[RUNSTATE_blocked];
@@ -133,24 +133,24 @@ static void do_stolen_accounting(void)
133 133
134 /* Add the appropriate number of ticks of stolen time, 134 /* Add the appropriate number of ticks of stolen time,
135 including any left-overs from last time. */ 135 including any left-overs from last time. */
136 stolen = runnable + offline + __get_cpu_var(residual_stolen); 136 stolen = runnable + offline + __get_cpu_var(xen_residual_stolen);
137 137
138 if (stolen < 0) 138 if (stolen < 0)
139 stolen = 0; 139 stolen = 0;
140 140
141 ticks = iter_div_u64_rem(stolen, NS_PER_TICK, &stolen); 141 ticks = iter_div_u64_rem(stolen, NS_PER_TICK, &stolen);
142 __get_cpu_var(residual_stolen) = stolen; 142 __get_cpu_var(xen_residual_stolen) = stolen;
143 account_steal_ticks(ticks); 143 account_steal_ticks(ticks);
144 144
145 /* Add the appropriate number of ticks of blocked time, 145 /* Add the appropriate number of ticks of blocked time,
146 including any left-overs from last time. */ 146 including any left-overs from last time. */
147 blocked += __get_cpu_var(residual_blocked); 147 blocked += __get_cpu_var(xen_residual_blocked);
148 148
149 if (blocked < 0) 149 if (blocked < 0)
150 blocked = 0; 150 blocked = 0;
151 151
152 ticks = iter_div_u64_rem(blocked, NS_PER_TICK, &blocked); 152 ticks = iter_div_u64_rem(blocked, NS_PER_TICK, &blocked);
153 __get_cpu_var(residual_blocked) = blocked; 153 __get_cpu_var(xen_residual_blocked) = blocked;
154 account_idle_ticks(ticks); 154 account_idle_ticks(ticks);
155} 155}
156 156