aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r--arch/x86/kernel/Makefile2
-rw-r--r--arch/x86/kernel/acpi/boot.c1
-rw-r--r--arch/x86/kernel/amd_iommu.c51
-rw-r--r--arch/x86/kernel/amd_iommu_init.c6
-rw-r--r--arch/x86/kernel/apic.c2
-rw-r--r--arch/x86/kernel/cpu/cpufreq/powernow-k8.c18
-rw-r--r--arch/x86/kernel/cpu/cpufreq/powernow-k8.h17
-rw-r--r--arch/x86/kernel/ds.c81
-rw-r--r--arch/x86/kernel/early-quirks.c18
-rw-r--r--arch/x86/kernel/es7000_32.c9
-rw-r--r--arch/x86/kernel/hpet.c4
-rw-r--r--arch/x86/kernel/i387.c2
-rw-r--r--arch/x86/kernel/io_apic.c62
-rw-r--r--arch/x86/kernel/kvmclock.c2
-rw-r--r--arch/x86/kernel/mpparse.c3
-rw-r--r--arch/x86/kernel/paravirt-spinlocks.c3
-rw-r--r--arch/x86/kernel/pci-calgary_64.c2
-rw-r--r--arch/x86/kernel/pci-gart_64.c2
-rw-r--r--arch/x86/kernel/reboot.c9
-rw-r--r--arch/x86/kernel/setup.c14
-rw-r--r--arch/x86/kernel/smpboot.c2
-rw-r--r--arch/x86/kernel/tsc_sync.c4
-rw-r--r--arch/x86/kernel/vmi_32.c16
-rw-r--r--arch/x86/kernel/xsave.c2
24 files changed, 194 insertions, 138 deletions
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index e489ff9cb3e2..b62a7667828e 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -41,7 +41,7 @@ obj-$(CONFIG_X86_TRAMPOLINE) += trampoline.o
41obj-y += process.o 41obj-y += process.o
42obj-y += i387.o xsave.o 42obj-y += i387.o xsave.o
43obj-y += ptrace.o 43obj-y += ptrace.o
44obj-y += ds.o 44obj-$(CONFIG_X86_DS) += ds.o
45obj-$(CONFIG_X86_32) += tls.o 45obj-$(CONFIG_X86_32) += tls.o
46obj-$(CONFIG_IA32_EMULATION) += tls.o 46obj-$(CONFIG_IA32_EMULATION) += tls.o
47obj-y += step.o 47obj-y += step.o
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index 8c1f76abae9e..4c51a2f8fd31 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -1343,7 +1343,6 @@ static void __init acpi_process_madt(void)
1343 error = acpi_parse_madt_ioapic_entries(); 1343 error = acpi_parse_madt_ioapic_entries();
1344 if (!error) { 1344 if (!error) {
1345 acpi_irq_model = ACPI_IRQ_MODEL_IOAPIC; 1345 acpi_irq_model = ACPI_IRQ_MODEL_IOAPIC;
1346 acpi_irq_balance_set(NULL);
1347 acpi_ioapic = 1; 1346 acpi_ioapic = 1;
1348 1347
1349 smp_found_config = 1; 1348 smp_found_config = 1;
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
index 331b318304eb..a7b6dec6fc3f 100644
--- a/arch/x86/kernel/amd_iommu.c
+++ b/arch/x86/kernel/amd_iommu.c
@@ -187,6 +187,8 @@ static int iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd)
187 187
188 spin_lock_irqsave(&iommu->lock, flags); 188 spin_lock_irqsave(&iommu->lock, flags);
189 ret = __iommu_queue_command(iommu, cmd); 189 ret = __iommu_queue_command(iommu, cmd);
190 if (!ret)
191 iommu->need_sync = 1;
190 spin_unlock_irqrestore(&iommu->lock, flags); 192 spin_unlock_irqrestore(&iommu->lock, flags);
191 193
192 return ret; 194 return ret;
@@ -210,10 +212,13 @@ static int iommu_completion_wait(struct amd_iommu *iommu)
210 cmd.data[0] = CMD_COMPL_WAIT_INT_MASK; 212 cmd.data[0] = CMD_COMPL_WAIT_INT_MASK;
211 CMD_SET_TYPE(&cmd, CMD_COMPL_WAIT); 213 CMD_SET_TYPE(&cmd, CMD_COMPL_WAIT);
212 214
213 iommu->need_sync = 0;
214
215 spin_lock_irqsave(&iommu->lock, flags); 215 spin_lock_irqsave(&iommu->lock, flags);
216 216
217 if (!iommu->need_sync)
218 goto out;
219
220 iommu->need_sync = 0;
221
217 ret = __iommu_queue_command(iommu, &cmd); 222 ret = __iommu_queue_command(iommu, &cmd);
218 223
219 if (ret) 224 if (ret)
@@ -254,8 +259,6 @@ static int iommu_queue_inv_dev_entry(struct amd_iommu *iommu, u16 devid)
254 259
255 ret = iommu_queue_command(iommu, &cmd); 260 ret = iommu_queue_command(iommu, &cmd);
256 261
257 iommu->need_sync = 1;
258
259 return ret; 262 return ret;
260} 263}
261 264
@@ -281,8 +284,6 @@ static int iommu_queue_inv_iommu_pages(struct amd_iommu *iommu,
281 284
282 ret = iommu_queue_command(iommu, &cmd); 285 ret = iommu_queue_command(iommu, &cmd);
283 286
284 iommu->need_sync = 1;
285
286 return ret; 287 return ret;
287} 288}
288 289
@@ -343,7 +344,7 @@ static int iommu_map(struct protection_domain *dom,
343 u64 __pte, *pte, *page; 344 u64 __pte, *pte, *page;
344 345
345 bus_addr = PAGE_ALIGN(bus_addr); 346 bus_addr = PAGE_ALIGN(bus_addr);
346 phys_addr = PAGE_ALIGN(bus_addr); 347 phys_addr = PAGE_ALIGN(phys_addr);
347 348
348 /* only support 512GB address spaces for now */ 349 /* only support 512GB address spaces for now */
349 if (bus_addr > IOMMU_MAP_SIZE_L3 || !(prot & IOMMU_PROT_MASK)) 350 if (bus_addr > IOMMU_MAP_SIZE_L3 || !(prot & IOMMU_PROT_MASK))
@@ -537,7 +538,7 @@ static void dma_ops_free_addresses(struct dma_ops_domain *dom,
537 address >>= PAGE_SHIFT; 538 address >>= PAGE_SHIFT;
538 iommu_area_free(dom->bitmap, address, pages); 539 iommu_area_free(dom->bitmap, address, pages);
539 540
540 if (address + pages >= dom->next_bit) 541 if (address >= dom->next_bit)
541 dom->need_flush = true; 542 dom->need_flush = true;
542} 543}
543 544
@@ -599,7 +600,7 @@ static void dma_ops_free_pagetable(struct dma_ops_domain *dma_dom)
599 continue; 600 continue;
600 601
601 p2 = IOMMU_PTE_PAGE(p1[i]); 602 p2 = IOMMU_PTE_PAGE(p1[i]);
602 for (j = 0; j < 512; ++i) { 603 for (j = 0; j < 512; ++j) {
603 if (!IOMMU_PTE_PRESENT(p2[j])) 604 if (!IOMMU_PTE_PRESENT(p2[j]))
604 continue; 605 continue;
605 p3 = IOMMU_PTE_PAGE(p2[j]); 606 p3 = IOMMU_PTE_PAGE(p2[j]);
@@ -762,8 +763,6 @@ static void set_device_domain(struct amd_iommu *iommu,
762 write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); 763 write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
763 764
764 iommu_queue_inv_dev_entry(iommu, devid); 765 iommu_queue_inv_dev_entry(iommu, devid);
765
766 iommu->need_sync = 1;
767} 766}
768 767
769/***************************************************************************** 768/*****************************************************************************
@@ -858,6 +857,9 @@ static int get_device_resources(struct device *dev,
858 print_devid(_bdf, 1); 857 print_devid(_bdf, 1);
859 } 858 }
860 859
860 if (domain_for_device(_bdf) == NULL)
861 set_device_domain(*iommu, *domain, _bdf);
862
861 return 1; 863 return 1;
862} 864}
863 865
@@ -908,7 +910,7 @@ static void dma_ops_domain_unmap(struct amd_iommu *iommu,
908 if (address >= dom->aperture_size) 910 if (address >= dom->aperture_size)
909 return; 911 return;
910 912
911 WARN_ON(address & 0xfffULL || address > dom->aperture_size); 913 WARN_ON(address & ~PAGE_MASK || address >= dom->aperture_size);
912 914
913 pte = dom->pte_pages[IOMMU_PTE_L1_INDEX(address)]; 915 pte = dom->pte_pages[IOMMU_PTE_L1_INDEX(address)];
914 pte += IOMMU_PTE_L0_INDEX(address); 916 pte += IOMMU_PTE_L0_INDEX(address);
@@ -920,8 +922,8 @@ static void dma_ops_domain_unmap(struct amd_iommu *iommu,
920 922
921/* 923/*
922 * This function contains common code for mapping of a physically 924 * This function contains common code for mapping of a physically
923 * contiguous memory region into DMA address space. It is uses by all 925 * contiguous memory region into DMA address space. It is used by all
924 * mapping functions provided by this IOMMU driver. 926 * mapping functions provided with this IOMMU driver.
925 * Must be called with the domain lock held. 927 * Must be called with the domain lock held.
926 */ 928 */
927static dma_addr_t __map_single(struct device *dev, 929static dma_addr_t __map_single(struct device *dev,
@@ -981,7 +983,8 @@ static void __unmap_single(struct amd_iommu *iommu,
981 dma_addr_t i, start; 983 dma_addr_t i, start;
982 unsigned int pages; 984 unsigned int pages;
983 985
984 if ((dma_addr == 0) || (dma_addr + size > dma_dom->aperture_size)) 986 if ((dma_addr == bad_dma_address) ||
987 (dma_addr + size > dma_dom->aperture_size))
985 return; 988 return;
986 989
987 pages = iommu_num_pages(dma_addr, size, PAGE_SIZE); 990 pages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
@@ -1031,8 +1034,7 @@ static dma_addr_t map_single(struct device *dev, phys_addr_t paddr,
1031 if (addr == bad_dma_address) 1034 if (addr == bad_dma_address)
1032 goto out; 1035 goto out;
1033 1036
1034 if (unlikely(iommu->need_sync)) 1037 iommu_completion_wait(iommu);
1035 iommu_completion_wait(iommu);
1036 1038
1037out: 1039out:
1038 spin_unlock_irqrestore(&domain->lock, flags); 1040 spin_unlock_irqrestore(&domain->lock, flags);
@@ -1060,8 +1062,7 @@ static void unmap_single(struct device *dev, dma_addr_t dma_addr,
1060 1062
1061 __unmap_single(iommu, domain->priv, dma_addr, size, dir); 1063 __unmap_single(iommu, domain->priv, dma_addr, size, dir);
1062 1064
1063 if (unlikely(iommu->need_sync)) 1065 iommu_completion_wait(iommu);
1064 iommu_completion_wait(iommu);
1065 1066
1066 spin_unlock_irqrestore(&domain->lock, flags); 1067 spin_unlock_irqrestore(&domain->lock, flags);
1067} 1068}
@@ -1127,8 +1128,7 @@ static int map_sg(struct device *dev, struct scatterlist *sglist,
1127 goto unmap; 1128 goto unmap;
1128 } 1129 }
1129 1130
1130 if (unlikely(iommu->need_sync)) 1131 iommu_completion_wait(iommu);
1131 iommu_completion_wait(iommu);
1132 1132
1133out: 1133out:
1134 spin_unlock_irqrestore(&domain->lock, flags); 1134 spin_unlock_irqrestore(&domain->lock, flags);
@@ -1173,8 +1173,7 @@ static void unmap_sg(struct device *dev, struct scatterlist *sglist,
1173 s->dma_address = s->dma_length = 0; 1173 s->dma_address = s->dma_length = 0;
1174 } 1174 }
1175 1175
1176 if (unlikely(iommu->need_sync)) 1176 iommu_completion_wait(iommu);
1177 iommu_completion_wait(iommu);
1178 1177
1179 spin_unlock_irqrestore(&domain->lock, flags); 1178 spin_unlock_irqrestore(&domain->lock, flags);
1180} 1179}
@@ -1225,8 +1224,7 @@ static void *alloc_coherent(struct device *dev, size_t size,
1225 goto out; 1224 goto out;
1226 } 1225 }
1227 1226
1228 if (unlikely(iommu->need_sync)) 1227 iommu_completion_wait(iommu);
1229 iommu_completion_wait(iommu);
1230 1228
1231out: 1229out:
1232 spin_unlock_irqrestore(&domain->lock, flags); 1230 spin_unlock_irqrestore(&domain->lock, flags);
@@ -1257,8 +1255,7 @@ static void free_coherent(struct device *dev, size_t size,
1257 1255
1258 __unmap_single(iommu, domain->priv, dma_addr, size, DMA_BIDIRECTIONAL); 1256 __unmap_single(iommu, domain->priv, dma_addr, size, DMA_BIDIRECTIONAL);
1259 1257
1260 if (unlikely(iommu->need_sync)) 1258 iommu_completion_wait(iommu);
1261 iommu_completion_wait(iommu);
1262 1259
1263 spin_unlock_irqrestore(&domain->lock, flags); 1260 spin_unlock_irqrestore(&domain->lock, flags);
1264 1261
diff --git a/arch/x86/kernel/amd_iommu_init.c b/arch/x86/kernel/amd_iommu_init.c
index 0cdcda35a05f..30ae2701b3df 100644
--- a/arch/x86/kernel/amd_iommu_init.c
+++ b/arch/x86/kernel/amd_iommu_init.c
@@ -121,7 +121,7 @@ u16 amd_iommu_last_bdf; /* largest PCI device id we have
121LIST_HEAD(amd_iommu_unity_map); /* a list of required unity mappings 121LIST_HEAD(amd_iommu_unity_map); /* a list of required unity mappings
122 we find in ACPI */ 122 we find in ACPI */
123unsigned amd_iommu_aperture_order = 26; /* size of aperture in power of 2 */ 123unsigned amd_iommu_aperture_order = 26; /* size of aperture in power of 2 */
124int amd_iommu_isolate; /* if 1, device isolation is enabled */ 124int amd_iommu_isolate = 1; /* if 1, device isolation is enabled */
125bool amd_iommu_unmap_flush; /* if true, flush on every unmap */ 125bool amd_iommu_unmap_flush; /* if true, flush on every unmap */
126 126
127LIST_HEAD(amd_iommu_list); /* list of all AMD IOMMUs in the 127LIST_HEAD(amd_iommu_list); /* list of all AMD IOMMUs in the
@@ -1213,7 +1213,9 @@ static int __init parse_amd_iommu_options(char *str)
1213 for (; *str; ++str) { 1213 for (; *str; ++str) {
1214 if (strncmp(str, "isolate", 7) == 0) 1214 if (strncmp(str, "isolate", 7) == 0)
1215 amd_iommu_isolate = 1; 1215 amd_iommu_isolate = 1;
1216 if (strncmp(str, "fullflush", 11) == 0) 1216 if (strncmp(str, "share", 5) == 0)
1217 amd_iommu_isolate = 0;
1218 if (strncmp(str, "fullflush", 9) == 0)
1217 amd_iommu_unmap_flush = true; 1219 amd_iommu_unmap_flush = true;
1218 } 1220 }
1219 1221
diff --git a/arch/x86/kernel/apic.c b/arch/x86/kernel/apic.c
index 1d410ee4b064..fc26ff390f14 100644
--- a/arch/x86/kernel/apic.c
+++ b/arch/x86/kernel/apic.c
@@ -1309,7 +1309,7 @@ void enable_x2apic(void)
1309 } 1309 }
1310} 1310}
1311 1311
1312void enable_IR_x2apic(void) 1312void __init enable_IR_x2apic(void)
1313{ 1313{
1314#ifdef CONFIG_INTR_REMAP 1314#ifdef CONFIG_INTR_REMAP
1315 int ret; 1315 int ret;
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
index d3dcd58b87cd..7f05f44b97e9 100644
--- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
+++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
@@ -115,9 +115,20 @@ static int query_current_values_with_pending_wait(struct powernow_k8_data *data)
115 u32 i = 0; 115 u32 i = 0;
116 116
117 if (cpu_family == CPU_HW_PSTATE) { 117 if (cpu_family == CPU_HW_PSTATE) {
118 rdmsr(MSR_PSTATE_STATUS, lo, hi); 118 if (data->currpstate == HW_PSTATE_INVALID) {
119 i = lo & HW_PSTATE_MASK; 119 /* read (initial) hw pstate if not yet set */
120 data->currpstate = i; 120 rdmsr(MSR_PSTATE_STATUS, lo, hi);
121 i = lo & HW_PSTATE_MASK;
122
123 /*
124 * a workaround for family 11h erratum 311 might cause
125 * an "out-of-range Pstate if the core is in Pstate-0
126 */
127 if (i >= data->numps)
128 data->currpstate = HW_PSTATE_0;
129 else
130 data->currpstate = i;
131 }
121 return 0; 132 return 0;
122 } 133 }
123 do { 134 do {
@@ -1121,6 +1132,7 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
1121 } 1132 }
1122 1133
1123 data->cpu = pol->cpu; 1134 data->cpu = pol->cpu;
1135 data->currpstate = HW_PSTATE_INVALID;
1124 1136
1125 if (powernow_k8_cpu_init_acpi(data)) { 1137 if (powernow_k8_cpu_init_acpi(data)) {
1126 /* 1138 /*
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.h b/arch/x86/kernel/cpu/cpufreq/powernow-k8.h
index ab48cfed4d96..65cfb5d7f77f 100644
--- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.h
+++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.h
@@ -5,6 +5,19 @@
5 * http://www.gnu.org/licenses/gpl.html 5 * http://www.gnu.org/licenses/gpl.html
6 */ 6 */
7 7
8
9enum pstate {
10 HW_PSTATE_INVALID = 0xff,
11 HW_PSTATE_0 = 0,
12 HW_PSTATE_1 = 1,
13 HW_PSTATE_2 = 2,
14 HW_PSTATE_3 = 3,
15 HW_PSTATE_4 = 4,
16 HW_PSTATE_5 = 5,
17 HW_PSTATE_6 = 6,
18 HW_PSTATE_7 = 7,
19};
20
8struct powernow_k8_data { 21struct powernow_k8_data {
9 unsigned int cpu; 22 unsigned int cpu;
10 23
@@ -23,7 +36,9 @@ struct powernow_k8_data {
23 u32 exttype; /* extended interface = 1 */ 36 u32 exttype; /* extended interface = 1 */
24 37
25 /* keep track of the current fid / vid or pstate */ 38 /* keep track of the current fid / vid or pstate */
26 u32 currvid, currfid, currpstate; 39 u32 currvid;
40 u32 currfid;
41 enum pstate currpstate;
27 42
28 /* the powernow_table includes all frequency and vid/fid pairings: 43 /* the powernow_table includes all frequency and vid/fid pairings:
29 * fid are the lower 8 bits of the index, vid are the upper 8 bits. 44 * fid are the lower 8 bits of the index, vid are the upper 8 bits.
diff --git a/arch/x86/kernel/ds.c b/arch/x86/kernel/ds.c
index 2b69994fd3a8..a2d1176c38ee 100644
--- a/arch/x86/kernel/ds.c
+++ b/arch/x86/kernel/ds.c
@@ -21,8 +21,6 @@
21 */ 21 */
22 22
23 23
24#ifdef CONFIG_X86_DS
25
26#include <asm/ds.h> 24#include <asm/ds.h>
27 25
28#include <linux/errno.h> 26#include <linux/errno.h>
@@ -211,14 +209,15 @@ static DEFINE_PER_CPU(struct ds_context *, system_context);
211static inline struct ds_context *ds_get_context(struct task_struct *task) 209static inline struct ds_context *ds_get_context(struct task_struct *task)
212{ 210{
213 struct ds_context *context; 211 struct ds_context *context;
212 unsigned long irq;
214 213
215 spin_lock(&ds_lock); 214 spin_lock_irqsave(&ds_lock, irq);
216 215
217 context = (task ? task->thread.ds_ctx : this_system_context); 216 context = (task ? task->thread.ds_ctx : this_system_context);
218 if (context) 217 if (context)
219 context->count++; 218 context->count++;
220 219
221 spin_unlock(&ds_lock); 220 spin_unlock_irqrestore(&ds_lock, irq);
222 221
223 return context; 222 return context;
224} 223}
@@ -226,18 +225,16 @@ static inline struct ds_context *ds_get_context(struct task_struct *task)
226/* 225/*
227 * Same as ds_get_context, but allocates the context and it's DS 226 * Same as ds_get_context, but allocates the context and it's DS
228 * structure, if necessary; returns NULL; if out of memory. 227 * structure, if necessary; returns NULL; if out of memory.
229 *
230 * pre: requires ds_lock to be held
231 */ 228 */
232static inline struct ds_context *ds_alloc_context(struct task_struct *task) 229static inline struct ds_context *ds_alloc_context(struct task_struct *task)
233{ 230{
234 struct ds_context **p_context = 231 struct ds_context **p_context =
235 (task ? &task->thread.ds_ctx : &this_system_context); 232 (task ? &task->thread.ds_ctx : &this_system_context);
236 struct ds_context *context = *p_context; 233 struct ds_context *context = *p_context;
234 unsigned long irq;
237 235
238 if (!context) { 236 if (!context) {
239 context = kzalloc(sizeof(*context), GFP_KERNEL); 237 context = kzalloc(sizeof(*context), GFP_KERNEL);
240
241 if (!context) 238 if (!context)
242 return NULL; 239 return NULL;
243 240
@@ -247,18 +244,27 @@ static inline struct ds_context *ds_alloc_context(struct task_struct *task)
247 return NULL; 244 return NULL;
248 } 245 }
249 246
250 *p_context = context; 247 spin_lock_irqsave(&ds_lock, irq);
251 248
252 context->this = p_context; 249 if (*p_context) {
253 context->task = task; 250 kfree(context->ds);
251 kfree(context);
252
253 context = *p_context;
254 } else {
255 *p_context = context;
254 256
255 if (task) 257 context->this = p_context;
256 set_tsk_thread_flag(task, TIF_DS_AREA_MSR); 258 context->task = task;
257 259
258 if (!task || (task == current)) 260 if (task)
259 wrmsr(MSR_IA32_DS_AREA, (unsigned long)context->ds, 0); 261 set_tsk_thread_flag(task, TIF_DS_AREA_MSR);
260 262
261 get_tracer(task); 263 if (!task || (task == current))
264 wrmsrl(MSR_IA32_DS_AREA,
265 (unsigned long)context->ds);
266 }
267 spin_unlock_irqrestore(&ds_lock, irq);
262 } 268 }
263 269
264 context->count++; 270 context->count++;
@@ -272,10 +278,12 @@ static inline struct ds_context *ds_alloc_context(struct task_struct *task)
272 */ 278 */
273static inline void ds_put_context(struct ds_context *context) 279static inline void ds_put_context(struct ds_context *context)
274{ 280{
281 unsigned long irq;
282
275 if (!context) 283 if (!context)
276 return; 284 return;
277 285
278 spin_lock(&ds_lock); 286 spin_lock_irqsave(&ds_lock, irq);
279 287
280 if (--context->count) 288 if (--context->count)
281 goto out; 289 goto out;
@@ -297,7 +305,7 @@ static inline void ds_put_context(struct ds_context *context)
297 kfree(context->ds); 305 kfree(context->ds);
298 kfree(context); 306 kfree(context);
299 out: 307 out:
300 spin_unlock(&ds_lock); 308 spin_unlock_irqrestore(&ds_lock, irq);
301} 309}
302 310
303 311
@@ -368,6 +376,7 @@ static int ds_request(struct task_struct *task, void *base, size_t size,
368 struct ds_context *context; 376 struct ds_context *context;
369 unsigned long buffer, adj; 377 unsigned long buffer, adj;
370 const unsigned long alignment = (1 << 3); 378 const unsigned long alignment = (1 << 3);
379 unsigned long irq;
371 int error = 0; 380 int error = 0;
372 381
373 if (!ds_cfg.sizeof_ds) 382 if (!ds_cfg.sizeof_ds)
@@ -382,25 +391,27 @@ static int ds_request(struct task_struct *task, void *base, size_t size,
382 return -EOPNOTSUPP; 391 return -EOPNOTSUPP;
383 392
384 393
385 spin_lock(&ds_lock);
386
387 if (!check_tracer(task))
388 return -EPERM;
389
390 error = -ENOMEM;
391 context = ds_alloc_context(task); 394 context = ds_alloc_context(task);
392 if (!context) 395 if (!context)
396 return -ENOMEM;
397
398 spin_lock_irqsave(&ds_lock, irq);
399
400 error = -EPERM;
401 if (!check_tracer(task))
393 goto out_unlock; 402 goto out_unlock;
394 403
404 get_tracer(task);
405
395 error = -EALREADY; 406 error = -EALREADY;
396 if (context->owner[qual] == current) 407 if (context->owner[qual] == current)
397 goto out_unlock; 408 goto out_put_tracer;
398 error = -EPERM; 409 error = -EPERM;
399 if (context->owner[qual] != NULL) 410 if (context->owner[qual] != NULL)
400 goto out_unlock; 411 goto out_put_tracer;
401 context->owner[qual] = current; 412 context->owner[qual] = current;
402 413
403 spin_unlock(&ds_lock); 414 spin_unlock_irqrestore(&ds_lock, irq);
404 415
405 416
406 error = -ENOMEM; 417 error = -ENOMEM;
@@ -448,10 +459,17 @@ static int ds_request(struct task_struct *task, void *base, size_t size,
448 out_release: 459 out_release:
449 context->owner[qual] = NULL; 460 context->owner[qual] = NULL;
450 ds_put_context(context); 461 ds_put_context(context);
462 put_tracer(task);
463 return error;
464
465 out_put_tracer:
466 spin_unlock_irqrestore(&ds_lock, irq);
467 ds_put_context(context);
468 put_tracer(task);
451 return error; 469 return error;
452 470
453 out_unlock: 471 out_unlock:
454 spin_unlock(&ds_lock); 472 spin_unlock_irqrestore(&ds_lock, irq);
455 ds_put_context(context); 473 ds_put_context(context);
456 return error; 474 return error;
457} 475}
@@ -801,13 +819,21 @@ static const struct ds_configuration ds_cfg_var = {
801 .sizeof_ds = sizeof(long) * 12, 819 .sizeof_ds = sizeof(long) * 12,
802 .sizeof_field = sizeof(long), 820 .sizeof_field = sizeof(long),
803 .sizeof_rec[ds_bts] = sizeof(long) * 3, 821 .sizeof_rec[ds_bts] = sizeof(long) * 3,
822#ifdef __i386__
804 .sizeof_rec[ds_pebs] = sizeof(long) * 10 823 .sizeof_rec[ds_pebs] = sizeof(long) * 10
824#else
825 .sizeof_rec[ds_pebs] = sizeof(long) * 18
826#endif
805}; 827};
806static const struct ds_configuration ds_cfg_64 = { 828static const struct ds_configuration ds_cfg_64 = {
807 .sizeof_ds = 8 * 12, 829 .sizeof_ds = 8 * 12,
808 .sizeof_field = 8, 830 .sizeof_field = 8,
809 .sizeof_rec[ds_bts] = 8 * 3, 831 .sizeof_rec[ds_bts] = 8 * 3,
832#ifdef __i386__
810 .sizeof_rec[ds_pebs] = 8 * 10 833 .sizeof_rec[ds_pebs] = 8 * 10
834#else
835 .sizeof_rec[ds_pebs] = 8 * 18
836#endif
811}; 837};
812 838
813static inline void 839static inline void
@@ -861,4 +887,3 @@ void ds_free(struct ds_context *context)
861 while (leftovers--) 887 while (leftovers--)
862 ds_put_context(context); 888 ds_put_context(context);
863} 889}
864#endif /* CONFIG_X86_DS */
diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c
index 3ce029ffaa55..1b894b72c0f5 100644
--- a/arch/x86/kernel/early-quirks.c
+++ b/arch/x86/kernel/early-quirks.c
@@ -188,20 +188,6 @@ static void __init ati_bugs_contd(int num, int slot, int func)
188} 188}
189#endif 189#endif
190 190
191#ifdef CONFIG_DMAR
192static void __init intel_g33_dmar(int num, int slot, int func)
193{
194 struct acpi_table_header *dmar_tbl;
195 acpi_status status;
196
197 status = acpi_get_table(ACPI_SIG_DMAR, 0, &dmar_tbl);
198 if (ACPI_SUCCESS(status)) {
199 printk(KERN_INFO "BIOS BUG: DMAR advertised on Intel G31/G33 chipset -- ignoring\n");
200 dmar_disabled = 1;
201 }
202}
203#endif
204
205#define QFLAG_APPLY_ONCE 0x1 191#define QFLAG_APPLY_ONCE 0x1
206#define QFLAG_APPLIED 0x2 192#define QFLAG_APPLIED 0x2
207#define QFLAG_DONE (QFLAG_APPLY_ONCE|QFLAG_APPLIED) 193#define QFLAG_DONE (QFLAG_APPLY_ONCE|QFLAG_APPLIED)
@@ -225,10 +211,6 @@ static struct chipset early_qrk[] __initdata = {
225 PCI_CLASS_SERIAL_SMBUS, PCI_ANY_ID, 0, ati_bugs }, 211 PCI_CLASS_SERIAL_SMBUS, PCI_ANY_ID, 0, ati_bugs },
226 { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_SBX00_SMBUS, 212 { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_SBX00_SMBUS,
227 PCI_CLASS_SERIAL_SMBUS, PCI_ANY_ID, 0, ati_bugs_contd }, 213 PCI_CLASS_SERIAL_SMBUS, PCI_ANY_ID, 0, ati_bugs_contd },
228#ifdef CONFIG_DMAR
229 { PCI_VENDOR_ID_INTEL, 0x29c0,
230 PCI_CLASS_BRIDGE_HOST, PCI_ANY_ID, 0, intel_g33_dmar },
231#endif
232 {} 214 {}
233}; 215};
234 216
diff --git a/arch/x86/kernel/es7000_32.c b/arch/x86/kernel/es7000_32.c
index f454c78fcef6..0aa2c443d600 100644
--- a/arch/x86/kernel/es7000_32.c
+++ b/arch/x86/kernel/es7000_32.c
@@ -250,31 +250,24 @@ int __init find_unisys_acpi_oem_table(unsigned long *oem_addr)
250{ 250{
251 struct acpi_table_header *header = NULL; 251 struct acpi_table_header *header = NULL;
252 int i = 0; 252 int i = 0;
253 acpi_size tbl_size;
254 253
255 while (ACPI_SUCCESS(acpi_get_table_with_size("OEM1", i++, &header, &tbl_size))) { 254 while (ACPI_SUCCESS(acpi_get_table("OEM1", i++, &header))) {
256 if (!memcmp((char *) &header->oem_id, "UNISYS", 6)) { 255 if (!memcmp((char *) &header->oem_id, "UNISYS", 6)) {
257 struct oem_table *t = (struct oem_table *)header; 256 struct oem_table *t = (struct oem_table *)header;
258 257
259 oem_addrX = t->OEMTableAddr; 258 oem_addrX = t->OEMTableAddr;
260 oem_size = t->OEMTableSize; 259 oem_size = t->OEMTableSize;
261 early_acpi_os_unmap_memory(header, tbl_size);
262 260
263 *oem_addr = (unsigned long)__acpi_map_table(oem_addrX, 261 *oem_addr = (unsigned long)__acpi_map_table(oem_addrX,
264 oem_size); 262 oem_size);
265 return 0; 263 return 0;
266 } 264 }
267 early_acpi_os_unmap_memory(header, tbl_size);
268 } 265 }
269 return -1; 266 return -1;
270} 267}
271 268
272void __init unmap_unisys_acpi_oem_table(unsigned long oem_addr) 269void __init unmap_unisys_acpi_oem_table(unsigned long oem_addr)
273{ 270{
274 if (!oem_addr)
275 return;
276
277 __acpi_unmap_table((char *)oem_addr, oem_size);
278} 271}
279#endif 272#endif
280 273
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
index 77017e834cf7..067d8de913f6 100644
--- a/arch/x86/kernel/hpet.c
+++ b/arch/x86/kernel/hpet.c
@@ -322,7 +322,7 @@ static int hpet_next_event(unsigned long delta,
322 * what we wrote hit the chip before we compare it to the 322 * what we wrote hit the chip before we compare it to the
323 * counter. 323 * counter.
324 */ 324 */
325 WARN_ON((u32)hpet_readl(HPET_T0_CMP) != cnt); 325 WARN_ON_ONCE((u32)hpet_readl(HPET_Tn_CMP(timer)) != cnt);
326 326
327 return (s32)((u32)hpet_readl(HPET_COUNTER) - cnt) >= 0 ? -ETIME : 0; 327 return (s32)((u32)hpet_readl(HPET_COUNTER) - cnt) >= 0 ? -ETIME : 0;
328} 328}
@@ -445,7 +445,7 @@ static int hpet_setup_irq(struct hpet_dev *dev)
445{ 445{
446 446
447 if (request_irq(dev->irq, hpet_interrupt_handler, 447 if (request_irq(dev->irq, hpet_interrupt_handler,
448 IRQF_SHARED|IRQF_NOBALANCING, dev->name, dev)) 448 IRQF_DISABLED|IRQF_NOBALANCING, dev->name, dev))
449 return -1; 449 return -1;
450 450
451 disable_irq(dev->irq); 451 disable_irq(dev->irq);
diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
index 1f20608d4ca8..b0f61f0dcd0a 100644
--- a/arch/x86/kernel/i387.c
+++ b/arch/x86/kernel/i387.c
@@ -58,7 +58,7 @@ void __cpuinit mxcsr_feature_mask_init(void)
58 stts(); 58 stts();
59} 59}
60 60
61void __init init_thread_xstate(void) 61void __cpuinit init_thread_xstate(void)
62{ 62{
63 if (!HAVE_HWFP) { 63 if (!HAVE_HWFP) {
64 xstate_size = sizeof(struct i387_soft_struct); 64 xstate_size = sizeof(struct i387_soft_struct);
diff --git a/arch/x86/kernel/io_apic.c b/arch/x86/kernel/io_apic.c
index 7a3f2028e2eb..9043251210fb 100644
--- a/arch/x86/kernel/io_apic.c
+++ b/arch/x86/kernel/io_apic.c
@@ -1140,6 +1140,20 @@ static void __clear_irq_vector(int irq)
1140 1140
1141 cfg->vector = 0; 1141 cfg->vector = 0;
1142 cpus_clear(cfg->domain); 1142 cpus_clear(cfg->domain);
1143
1144 if (likely(!cfg->move_in_progress))
1145 return;
1146 cpus_and(mask, cfg->old_domain, cpu_online_map);
1147 for_each_cpu_mask_nr(cpu, mask) {
1148 for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS;
1149 vector++) {
1150 if (per_cpu(vector_irq, cpu)[vector] != irq)
1151 continue;
1152 per_cpu(vector_irq, cpu)[vector] = -1;
1153 break;
1154 }
1155 }
1156 cfg->move_in_progress = 0;
1143} 1157}
1144 1158
1145void __setup_vector_irq(int cpu) 1159void __setup_vector_irq(int cpu)
@@ -3594,27 +3608,7 @@ int __init io_apic_get_redir_entries (int ioapic)
3594 3608
3595int __init probe_nr_irqs(void) 3609int __init probe_nr_irqs(void)
3596{ 3610{
3597 int idx; 3611 return NR_IRQS;
3598 int nr = 0;
3599#ifndef CONFIG_XEN
3600 int nr_min = 32;
3601#else
3602 int nr_min = NR_IRQS;
3603#endif
3604
3605 for (idx = 0; idx < nr_ioapics; idx++)
3606 nr += io_apic_get_redir_entries(idx) + 1;
3607
3608 /* double it for hotplug and msi and nmi */
3609 nr <<= 1;
3610
3611 /* something wrong ? */
3612 if (nr < nr_min)
3613 nr = nr_min;
3614 if (WARN_ON(nr > NR_IRQS))
3615 nr = NR_IRQS;
3616
3617 return nr;
3618} 3612}
3619 3613
3620/* -------------------------------------------------------------------------- 3614/* --------------------------------------------------------------------------
@@ -3761,7 +3755,9 @@ int acpi_get_override_irq(int bus_irq, int *trigger, int *polarity)
3761void __init setup_ioapic_dest(void) 3755void __init setup_ioapic_dest(void)
3762{ 3756{
3763 int pin, ioapic, irq, irq_entry; 3757 int pin, ioapic, irq, irq_entry;
3758 struct irq_desc *desc;
3764 struct irq_cfg *cfg; 3759 struct irq_cfg *cfg;
3760 cpumask_t mask;
3765 3761
3766 if (skip_ioapic_setup == 1) 3762 if (skip_ioapic_setup == 1)
3767 return; 3763 return;
@@ -3778,16 +3774,30 @@ void __init setup_ioapic_dest(void)
3778 * cpu is online. 3774 * cpu is online.
3779 */ 3775 */
3780 cfg = irq_cfg(irq); 3776 cfg = irq_cfg(irq);
3781 if (!cfg->vector) 3777 if (!cfg->vector) {
3782 setup_IO_APIC_irq(ioapic, pin, irq, 3778 setup_IO_APIC_irq(ioapic, pin, irq,
3783 irq_trigger(irq_entry), 3779 irq_trigger(irq_entry),
3784 irq_polarity(irq_entry)); 3780 irq_polarity(irq_entry));
3781 continue;
3782
3783 }
3784
3785 /*
3786 * Honour affinities which have been set in early boot
3787 */
3788 desc = irq_to_desc(irq);
3789 if (desc->status &
3790 (IRQ_NO_BALANCING | IRQ_AFFINITY_SET))
3791 mask = desc->affinity;
3792 else
3793 mask = TARGET_CPUS;
3794
3785#ifdef CONFIG_INTR_REMAP 3795#ifdef CONFIG_INTR_REMAP
3786 else if (intr_remapping_enabled) 3796 if (intr_remapping_enabled)
3787 set_ir_ioapic_affinity_irq(irq, TARGET_CPUS); 3797 set_ir_ioapic_affinity_irq(irq, mask);
3788#endif
3789 else 3798 else
3790 set_ioapic_affinity_irq(irq, TARGET_CPUS); 3799#endif
3800 set_ioapic_affinity_irq(irq, mask);
3791 } 3801 }
3792 3802
3793 } 3803 }
diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c
index 774ac4991568..e169ae9b6a62 100644
--- a/arch/x86/kernel/kvmclock.c
+++ b/arch/x86/kernel/kvmclock.c
@@ -128,7 +128,7 @@ static int kvm_register_clock(char *txt)
128} 128}
129 129
130#ifdef CONFIG_X86_LOCAL_APIC 130#ifdef CONFIG_X86_LOCAL_APIC
131static void kvm_setup_secondary_clock(void) 131static void __cpuinit kvm_setup_secondary_clock(void)
132{ 132{
133 /* 133 /*
134 * Now that the first cpu already had this clocksource initialized, 134 * Now that the first cpu already had this clocksource initialized,
diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c
index f98f4e1dba09..0f4c1fd5a1f4 100644
--- a/arch/x86/kernel/mpparse.c
+++ b/arch/x86/kernel/mpparse.c
@@ -604,6 +604,9 @@ static void __init __get_smp_config(unsigned int early)
604 printk(KERN_INFO "Using ACPI for processor (LAPIC) " 604 printk(KERN_INFO "Using ACPI for processor (LAPIC) "
605 "configuration information\n"); 605 "configuration information\n");
606 606
607 if (!mpf)
608 return;
609
607 printk(KERN_INFO "Intel MultiProcessor Specification v1.%d\n", 610 printk(KERN_INFO "Intel MultiProcessor Specification v1.%d\n",
608 mpf->mpf_specification); 611 mpf->mpf_specification);
609#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_32) 612#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_32)
diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
index 0e9f1982b1dd..95777b0faa73 100644
--- a/arch/x86/kernel/paravirt-spinlocks.c
+++ b/arch/x86/kernel/paravirt-spinlocks.c
@@ -7,7 +7,8 @@
7 7
8#include <asm/paravirt.h> 8#include <asm/paravirt.h>
9 9
10static void default_spin_lock_flags(struct raw_spinlock *lock, unsigned long flags) 10static inline void
11default_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
11{ 12{
12 __raw_spin_lock(lock); 13 __raw_spin_lock(lock);
13} 14}
diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c
index e1e731d78f38..d28bbdc35e4e 100644
--- a/arch/x86/kernel/pci-calgary_64.c
+++ b/arch/x86/kernel/pci-calgary_64.c
@@ -1567,7 +1567,7 @@ static int __init calgary_parse_options(char *p)
1567 ++p; 1567 ++p;
1568 if (*p == '\0') 1568 if (*p == '\0')
1569 break; 1569 break;
1570 bridge = simple_strtol(p, &endp, 0); 1570 bridge = simple_strtoul(p, &endp, 0);
1571 if (p == endp) 1571 if (p == endp)
1572 break; 1572 break;
1573 1573
diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c
index a42b02b4df68..ba7ad83e20a8 100644
--- a/arch/x86/kernel/pci-gart_64.c
+++ b/arch/x86/kernel/pci-gart_64.c
@@ -123,6 +123,8 @@ static void free_iommu(unsigned long offset, int size)
123 123
124 spin_lock_irqsave(&iommu_bitmap_lock, flags); 124 spin_lock_irqsave(&iommu_bitmap_lock, flags);
125 iommu_area_free(iommu_gart_bitmap, offset, size); 125 iommu_area_free(iommu_gart_bitmap, offset, size);
126 if (offset >= next_bit)
127 next_bit = offset + size;
126 spin_unlock_irqrestore(&iommu_bitmap_lock, flags); 128 spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
127} 129}
128 130
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index 724adfc63cb9..cc5a2545dd41 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -169,6 +169,15 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = {
169 DMI_MATCH(DMI_BOARD_NAME, "0KW626"), 169 DMI_MATCH(DMI_BOARD_NAME, "0KW626"),
170 }, 170 },
171 }, 171 },
172 { /* Handle problems with rebooting on Dell Optiplex 330 with 0KP561 */
173 .callback = set_bios_reboot,
174 .ident = "Dell OptiPlex 330",
175 .matches = {
176 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
177 DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 330"),
178 DMI_MATCH(DMI_BOARD_NAME, "0KP561"),
179 },
180 },
172 { /* Handle problems with rebooting on Dell 2400's */ 181 { /* Handle problems with rebooting on Dell 2400's */
173 .callback = set_bios_reboot, 182 .callback = set_bios_reboot,
174 .ident = "Dell PowerEdge 2400", 183 .ident = "Dell PowerEdge 2400",
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 0fa6790c1dd3..bdec76e55594 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -764,7 +764,7 @@ static struct dmi_system_id __initdata bad_bios_dmi_table[] = {
764 .callback = dmi_low_memory_corruption, 764 .callback = dmi_low_memory_corruption,
765 .ident = "Phoenix BIOS", 765 .ident = "Phoenix BIOS",
766 .matches = { 766 .matches = {
767 DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies, LTD"), 767 DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies"),
768 }, 768 },
769 }, 769 },
770#endif 770#endif
@@ -794,6 +794,9 @@ void __init setup_arch(char **cmdline_p)
794 printk(KERN_INFO "Command line: %s\n", boot_command_line); 794 printk(KERN_INFO "Command line: %s\n", boot_command_line);
795#endif 795#endif
796 796
797 /* VMI may relocate the fixmap; do this before touching ioremap area */
798 vmi_init();
799
797 early_cpu_init(); 800 early_cpu_init();
798 early_ioremap_init(); 801 early_ioremap_init();
799 802
@@ -880,13 +883,8 @@ void __init setup_arch(char **cmdline_p)
880 check_efer(); 883 check_efer();
881#endif 884#endif
882 885
883#if defined(CONFIG_VMI) && defined(CONFIG_X86_32) 886 /* Must be before kernel pagetables are setup */
884 /* 887 vmi_activate();
885 * Must be before kernel pagetables are setup
886 * or fixmap area is touched.
887 */
888 vmi_init();
889#endif
890 888
891 /* after early param, so could get panic from serial */ 889 /* after early param, so could get panic from serial */
892 reserve_early_setup_data(); 890 reserve_early_setup_data();
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 7b1093397319..f71f96fc9e62 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -294,9 +294,7 @@ static void __cpuinit start_secondary(void *unused)
294 * fragile that we want to limit the things done here to the 294 * fragile that we want to limit the things done here to the
295 * most necessary things. 295 * most necessary things.
296 */ 296 */
297#ifdef CONFIG_VMI
298 vmi_bringup(); 297 vmi_bringup();
299#endif
300 cpu_init(); 298 cpu_init();
301 preempt_disable(); 299 preempt_disable();
302 smp_callin(); 300 smp_callin();
diff --git a/arch/x86/kernel/tsc_sync.c b/arch/x86/kernel/tsc_sync.c
index 9ffb01c31c40..1c0dfbca87c1 100644
--- a/arch/x86/kernel/tsc_sync.c
+++ b/arch/x86/kernel/tsc_sync.c
@@ -46,7 +46,9 @@ static __cpuinit void check_tsc_warp(void)
46 cycles_t start, now, prev, end; 46 cycles_t start, now, prev, end;
47 int i; 47 int i;
48 48
49 rdtsc_barrier();
49 start = get_cycles(); 50 start = get_cycles();
51 rdtsc_barrier();
50 /* 52 /*
51 * The measurement runs for 20 msecs: 53 * The measurement runs for 20 msecs:
52 */ 54 */
@@ -61,7 +63,9 @@ static __cpuinit void check_tsc_warp(void)
61 */ 63 */
62 __raw_spin_lock(&sync_lock); 64 __raw_spin_lock(&sync_lock);
63 prev = last_tsc; 65 prev = last_tsc;
66 rdtsc_barrier();
64 now = get_cycles(); 67 now = get_cycles();
68 rdtsc_barrier();
65 last_tsc = now; 69 last_tsc = now;
66 __raw_spin_unlock(&sync_lock); 70 __raw_spin_unlock(&sync_lock);
67 71
diff --git a/arch/x86/kernel/vmi_32.c b/arch/x86/kernel/vmi_32.c
index 8b6c393ab9fd..22fd6577156a 100644
--- a/arch/x86/kernel/vmi_32.c
+++ b/arch/x86/kernel/vmi_32.c
@@ -960,8 +960,6 @@ static inline int __init activate_vmi(void)
960 960
961void __init vmi_init(void) 961void __init vmi_init(void)
962{ 962{
963 unsigned long flags;
964
965 if (!vmi_rom) 963 if (!vmi_rom)
966 probe_vmi_rom(); 964 probe_vmi_rom();
967 else 965 else
@@ -973,13 +971,21 @@ void __init vmi_init(void)
973 971
974 reserve_top_address(-vmi_rom->virtual_top); 972 reserve_top_address(-vmi_rom->virtual_top);
975 973
976 local_irq_save(flags);
977 activate_vmi();
978
979#ifdef CONFIG_X86_IO_APIC 974#ifdef CONFIG_X86_IO_APIC
980 /* This is virtual hardware; timer routing is wired correctly */ 975 /* This is virtual hardware; timer routing is wired correctly */
981 no_timer_check = 1; 976 no_timer_check = 1;
982#endif 977#endif
978}
979
980void vmi_activate(void)
981{
982 unsigned long flags;
983
984 if (!vmi_rom)
985 return;
986
987 local_irq_save(flags);
988 activate_vmi();
983 local_irq_restore(flags & X86_EFLAGS_IF); 989 local_irq_restore(flags & X86_EFLAGS_IF);
984} 990}
985 991
diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
index b13acb75e822..15c3e6999182 100644
--- a/arch/x86/kernel/xsave.c
+++ b/arch/x86/kernel/xsave.c
@@ -310,7 +310,7 @@ static void __init setup_xstate_init(void)
310/* 310/*
311 * Enable and initialize the xsave feature. 311 * Enable and initialize the xsave feature.
312 */ 312 */
313void __init xsave_cntxt_init(void) 313void __ref xsave_cntxt_init(void)
314{ 314{
315 unsigned int eax, ebx, ecx, edx; 315 unsigned int eax, ebx, ecx, edx;
316 316