aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2016-01-12 05:01:12 -0500
committerThomas Gleixner <tglx@linutronix.de>2016-01-12 05:01:12 -0500
commit1f16f116b01c110db20ab808562c8b8bc3ee3d6e (patch)
tree44db563f64cf5f8d62af8f99a61e2b248c44ea3a /arch/x86
parent03724ac3d48f8f0e3caf1d30fa134f8fd96c94e2 (diff)
parentf9eccf24615672896dc13251410c3f2f33a14f95 (diff)
Merge branches 'clockevents/4.4-fixes' and 'clockevents/4.5-fixes' of http://git.linaro.org/people/daniel.lezcano/linux into timers/urgent
Pull in fixes from Daniel Lezcano: - Fix the vt8500 timer leading to a system lock up when dealing with too small delta (Roman Volkov) - Select the CLKSRC_MMIO when the fsl_ftm_timer is enabled with COMPILE_TEST (Daniel Lezcano) - Prevent to compile timers using the 'iomem' API when the architecture has not HAS_IOMEM set (Richard Weinberger)
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/boot/boot.h1
-rw-r--r--arch/x86/boot/video-mode.c2
-rw-r--r--arch/x86/boot/video.c2
-rw-r--r--arch/x86/entry/entry_64.S19
-rw-r--r--arch/x86/include/asm/page_types.h16
-rw-r--r--arch/x86/include/asm/pgtable_types.h14
-rw-r--r--arch/x86/include/asm/x86_init.h1
-rw-r--r--arch/x86/kernel/cpu/microcode/core.c1
-rw-r--r--arch/x86/kernel/cpu/perf_event.c2
-rw-r--r--arch/x86/kernel/cpu/perf_event.h5
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c2
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_cqm.c2
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_lbr.c4
-rw-r--r--arch/x86/kernel/irq_work.c2
-rw-r--r--arch/x86/kernel/pmem.c12
-rw-r--r--arch/x86/kernel/setup.c2
-rw-r--r--arch/x86/kernel/signal.c17
-rw-r--r--arch/x86/kernel/smpboot.c9
-rw-r--r--arch/x86/kvm/cpuid.h8
-rw-r--r--arch/x86/kvm/mtrr.c25
-rw-r--r--arch/x86/kvm/svm.c4
-rw-r--r--arch/x86/kvm/vmx.c12
-rw-r--r--arch/x86/kvm/x86.c73
-rw-r--r--arch/x86/mm/dump_pagetables.c2
-rw-r--r--arch/x86/mm/mpx.c6
-rw-r--r--arch/x86/pci/bus_numa.c13
-rw-r--r--arch/x86/um/signal.c18
-rw-r--r--arch/x86/xen/mmu.c9
-rw-r--r--arch/x86/xen/suspend.c20
29 files changed, 174 insertions, 129 deletions
diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
index 0033e96c3f09..9011a88353de 100644
--- a/arch/x86/boot/boot.h
+++ b/arch/x86/boot/boot.h
@@ -23,7 +23,6 @@
23#include <stdarg.h> 23#include <stdarg.h>
24#include <linux/types.h> 24#include <linux/types.h>
25#include <linux/edd.h> 25#include <linux/edd.h>
26#include <asm/boot.h>
27#include <asm/setup.h> 26#include <asm/setup.h>
28#include "bitops.h" 27#include "bitops.h"
29#include "ctype.h" 28#include "ctype.h"
diff --git a/arch/x86/boot/video-mode.c b/arch/x86/boot/video-mode.c
index aa8a96b052e3..95c7a818c0ed 100644
--- a/arch/x86/boot/video-mode.c
+++ b/arch/x86/boot/video-mode.c
@@ -19,6 +19,8 @@
19#include "video.h" 19#include "video.h"
20#include "vesa.h" 20#include "vesa.h"
21 21
22#include <uapi/asm/boot.h>
23
22/* 24/*
23 * Common variables 25 * Common variables
24 */ 26 */
diff --git a/arch/x86/boot/video.c b/arch/x86/boot/video.c
index 05111bb8d018..77780e386e9b 100644
--- a/arch/x86/boot/video.c
+++ b/arch/x86/boot/video.c
@@ -13,6 +13,8 @@
13 * Select video mode 13 * Select video mode
14 */ 14 */
15 15
16#include <uapi/asm/boot.h>
17
16#include "boot.h" 18#include "boot.h"
17#include "video.h" 19#include "video.h"
18#include "vesa.h" 20#include "vesa.h"
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index 53616ca03244..a55697d19824 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -509,6 +509,17 @@ END(irq_entries_start)
509 * tracking that we're in kernel mode. 509 * tracking that we're in kernel mode.
510 */ 510 */
511 SWAPGS 511 SWAPGS
512
513 /*
514 * We need to tell lockdep that IRQs are off. We can't do this until
515 * we fix gsbase, and we should do it before enter_from_user_mode
516 * (which can take locks). Since TRACE_IRQS_OFF idempotent,
517 * the simplest way to handle it is to just call it twice if
518 * we enter from user mode. There's no reason to optimize this since
519 * TRACE_IRQS_OFF is a no-op if lockdep is off.
520 */
521 TRACE_IRQS_OFF
522
512#ifdef CONFIG_CONTEXT_TRACKING 523#ifdef CONFIG_CONTEXT_TRACKING
513 call enter_from_user_mode 524 call enter_from_user_mode
514#endif 525#endif
@@ -1049,12 +1060,18 @@ ENTRY(error_entry)
1049 SWAPGS 1060 SWAPGS
1050 1061
1051.Lerror_entry_from_usermode_after_swapgs: 1062.Lerror_entry_from_usermode_after_swapgs:
1063 /*
1064 * We need to tell lockdep that IRQs are off. We can't do this until
1065 * we fix gsbase, and we should do it before enter_from_user_mode
1066 * (which can take locks).
1067 */
1068 TRACE_IRQS_OFF
1052#ifdef CONFIG_CONTEXT_TRACKING 1069#ifdef CONFIG_CONTEXT_TRACKING
1053 call enter_from_user_mode 1070 call enter_from_user_mode
1054#endif 1071#endif
1072 ret
1055 1073
1056.Lerror_entry_done: 1074.Lerror_entry_done:
1057
1058 TRACE_IRQS_OFF 1075 TRACE_IRQS_OFF
1059 ret 1076 ret
1060 1077
diff --git a/arch/x86/include/asm/page_types.h b/arch/x86/include/asm/page_types.h
index c5b7fb2774d0..cc071c6f7d4d 100644
--- a/arch/x86/include/asm/page_types.h
+++ b/arch/x86/include/asm/page_types.h
@@ -9,19 +9,21 @@
9#define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT) 9#define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT)
10#define PAGE_MASK (~(PAGE_SIZE-1)) 10#define PAGE_MASK (~(PAGE_SIZE-1))
11 11
12#define PMD_PAGE_SIZE (_AC(1, UL) << PMD_SHIFT)
13#define PMD_PAGE_MASK (~(PMD_PAGE_SIZE-1))
14
15#define PUD_PAGE_SIZE (_AC(1, UL) << PUD_SHIFT)
16#define PUD_PAGE_MASK (~(PUD_PAGE_SIZE-1))
17
12#define __PHYSICAL_MASK ((phys_addr_t)((1ULL << __PHYSICAL_MASK_SHIFT) - 1)) 18#define __PHYSICAL_MASK ((phys_addr_t)((1ULL << __PHYSICAL_MASK_SHIFT) - 1))
13#define __VIRTUAL_MASK ((1UL << __VIRTUAL_MASK_SHIFT) - 1) 19#define __VIRTUAL_MASK ((1UL << __VIRTUAL_MASK_SHIFT) - 1)
14 20
15/* Cast PAGE_MASK to a signed type so that it is sign-extended if 21/* Cast *PAGE_MASK to a signed type so that it is sign-extended if
16 virtual addresses are 32-bits but physical addresses are larger 22 virtual addresses are 32-bits but physical addresses are larger
17 (ie, 32-bit PAE). */ 23 (ie, 32-bit PAE). */
18#define PHYSICAL_PAGE_MASK (((signed long)PAGE_MASK) & __PHYSICAL_MASK) 24#define PHYSICAL_PAGE_MASK (((signed long)PAGE_MASK) & __PHYSICAL_MASK)
19 25#define PHYSICAL_PMD_PAGE_MASK (((signed long)PMD_PAGE_MASK) & __PHYSICAL_MASK)
20#define PMD_PAGE_SIZE (_AC(1, UL) << PMD_SHIFT) 26#define PHYSICAL_PUD_PAGE_MASK (((signed long)PUD_PAGE_MASK) & __PHYSICAL_MASK)
21#define PMD_PAGE_MASK (~(PMD_PAGE_SIZE-1))
22
23#define PUD_PAGE_SIZE (_AC(1, UL) << PUD_SHIFT)
24#define PUD_PAGE_MASK (~(PUD_PAGE_SIZE-1))
25 27
26#define HPAGE_SHIFT PMD_SHIFT 28#define HPAGE_SHIFT PMD_SHIFT
27#define HPAGE_SIZE (_AC(1,UL) << HPAGE_SHIFT) 29#define HPAGE_SIZE (_AC(1,UL) << HPAGE_SHIFT)
diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
index dd5b0aa9dd2f..a471cadb9630 100644
--- a/arch/x86/include/asm/pgtable_types.h
+++ b/arch/x86/include/asm/pgtable_types.h
@@ -279,17 +279,14 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
279static inline pudval_t pud_pfn_mask(pud_t pud) 279static inline pudval_t pud_pfn_mask(pud_t pud)
280{ 280{
281 if (native_pud_val(pud) & _PAGE_PSE) 281 if (native_pud_val(pud) & _PAGE_PSE)
282 return PUD_PAGE_MASK & PHYSICAL_PAGE_MASK; 282 return PHYSICAL_PUD_PAGE_MASK;
283 else 283 else
284 return PTE_PFN_MASK; 284 return PTE_PFN_MASK;
285} 285}
286 286
287static inline pudval_t pud_flags_mask(pud_t pud) 287static inline pudval_t pud_flags_mask(pud_t pud)
288{ 288{
289 if (native_pud_val(pud) & _PAGE_PSE) 289 return ~pud_pfn_mask(pud);
290 return ~(PUD_PAGE_MASK & (pudval_t)PHYSICAL_PAGE_MASK);
291 else
292 return ~PTE_PFN_MASK;
293} 290}
294 291
295static inline pudval_t pud_flags(pud_t pud) 292static inline pudval_t pud_flags(pud_t pud)
@@ -300,17 +297,14 @@ static inline pudval_t pud_flags(pud_t pud)
300static inline pmdval_t pmd_pfn_mask(pmd_t pmd) 297static inline pmdval_t pmd_pfn_mask(pmd_t pmd)
301{ 298{
302 if (native_pmd_val(pmd) & _PAGE_PSE) 299 if (native_pmd_val(pmd) & _PAGE_PSE)
303 return PMD_PAGE_MASK & PHYSICAL_PAGE_MASK; 300 return PHYSICAL_PMD_PAGE_MASK;
304 else 301 else
305 return PTE_PFN_MASK; 302 return PTE_PFN_MASK;
306} 303}
307 304
308static inline pmdval_t pmd_flags_mask(pmd_t pmd) 305static inline pmdval_t pmd_flags_mask(pmd_t pmd)
309{ 306{
310 if (native_pmd_val(pmd) & _PAGE_PSE) 307 return ~pmd_pfn_mask(pmd);
311 return ~(PMD_PAGE_MASK & (pmdval_t)PHYSICAL_PAGE_MASK);
312 else
313 return ~PTE_PFN_MASK;
314} 308}
315 309
316static inline pmdval_t pmd_flags(pmd_t pmd) 310static inline pmdval_t pmd_flags(pmd_t pmd)
diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
index 48d34d28f5a6..cd0fc0cc78bc 100644
--- a/arch/x86/include/asm/x86_init.h
+++ b/arch/x86/include/asm/x86_init.h
@@ -1,7 +1,6 @@
1#ifndef _ASM_X86_PLATFORM_H 1#ifndef _ASM_X86_PLATFORM_H
2#define _ASM_X86_PLATFORM_H 2#define _ASM_X86_PLATFORM_H
3 3
4#include <asm/pgtable_types.h>
5#include <asm/bootparam.h> 4#include <asm/bootparam.h>
6 5
7struct mpc_bus; 6struct mpc_bus;
diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
index 7fc27f1cca58..b3e94ef461fd 100644
--- a/arch/x86/kernel/cpu/microcode/core.c
+++ b/arch/x86/kernel/cpu/microcode/core.c
@@ -698,3 +698,4 @@ int __init microcode_init(void)
698 return error; 698 return error;
699 699
700} 700}
701late_initcall(microcode_init);
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index 4562cf070c27..2bf79d7c97df 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -5,7 +5,7 @@
5 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar 5 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
6 * Copyright (C) 2009 Jaswinder Singh Rajput 6 * Copyright (C) 2009 Jaswinder Singh Rajput
7 * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter 7 * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
8 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> 8 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra
9 * Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com> 9 * Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
10 * Copyright (C) 2009 Google, Inc., Stephane Eranian 10 * Copyright (C) 2009 Google, Inc., Stephane Eranian
11 * 11 *
diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h
index 499f533dd3cc..d0e35ebb2adb 100644
--- a/arch/x86/kernel/cpu/perf_event.h
+++ b/arch/x86/kernel/cpu/perf_event.h
@@ -5,7 +5,7 @@
5 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar 5 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
6 * Copyright (C) 2009 Jaswinder Singh Rajput 6 * Copyright (C) 2009 Jaswinder Singh Rajput
7 * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter 7 * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
8 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> 8 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra
9 * Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com> 9 * Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
10 * Copyright (C) 2009 Google, Inc., Stephane Eranian 10 * Copyright (C) 2009 Google, Inc., Stephane Eranian
11 * 11 *
@@ -387,7 +387,7 @@ struct cpu_hw_events {
387/* Check flags and event code/umask, and set the HSW N/A flag */ 387/* Check flags and event code/umask, and set the HSW N/A flag */
388#define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_NA(code, n) \ 388#define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_NA(code, n) \
389 __EVENT_CONSTRAINT(code, n, \ 389 __EVENT_CONSTRAINT(code, n, \
390 INTEL_ARCH_EVENT_MASK|INTEL_ARCH_EVENT_MASK, \ 390 INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
391 HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_NA_HSW) 391 HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_NA_HSW)
392 392
393 393
@@ -627,6 +627,7 @@ struct x86_perf_task_context {
627 u64 lbr_from[MAX_LBR_ENTRIES]; 627 u64 lbr_from[MAX_LBR_ENTRIES];
628 u64 lbr_to[MAX_LBR_ENTRIES]; 628 u64 lbr_to[MAX_LBR_ENTRIES];
629 u64 lbr_info[MAX_LBR_ENTRIES]; 629 u64 lbr_info[MAX_LBR_ENTRIES];
630 int tos;
630 int lbr_callstack_users; 631 int lbr_callstack_users;
631 int lbr_stack_state; 632 int lbr_stack_state;
632}; 633};
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index f63360be2238..e2a430021e46 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -232,7 +232,7 @@ static struct event_constraint intel_hsw_event_constraints[] = {
232 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ 232 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
233 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ 233 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
234 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */ 234 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
235 INTEL_EVENT_CONSTRAINT(0x48, 0x4), /* L1D_PEND_MISS.* */ 235 INTEL_UEVENT_CONSTRAINT(0x148, 0x4), /* L1D_PEND_MISS.PENDING */
236 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */ 236 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
237 INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */ 237 INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
238 /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */ 238 /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
diff --git a/arch/x86/kernel/cpu/perf_event_intel_cqm.c b/arch/x86/kernel/cpu/perf_event_intel_cqm.c
index 377e8f8ed391..a316ca96f1b6 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_cqm.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_cqm.c
@@ -298,7 +298,7 @@ static bool __match_event(struct perf_event *a, struct perf_event *b)
298static inline struct perf_cgroup *event_to_cgroup(struct perf_event *event) 298static inline struct perf_cgroup *event_to_cgroup(struct perf_event *event)
299{ 299{
300 if (event->attach_state & PERF_ATTACH_TASK) 300 if (event->attach_state & PERF_ATTACH_TASK)
301 return perf_cgroup_from_task(event->hw.target); 301 return perf_cgroup_from_task(event->hw.target, event->ctx);
302 302
303 return event->cgrp; 303 return event->cgrp;
304} 304}
diff --git a/arch/x86/kernel/cpu/perf_event_intel_lbr.c b/arch/x86/kernel/cpu/perf_event_intel_lbr.c
index bfd0b717e944..659f01e165d5 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_lbr.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_lbr.c
@@ -239,7 +239,7 @@ static void __intel_pmu_lbr_restore(struct x86_perf_task_context *task_ctx)
239 } 239 }
240 240
241 mask = x86_pmu.lbr_nr - 1; 241 mask = x86_pmu.lbr_nr - 1;
242 tos = intel_pmu_lbr_tos(); 242 tos = task_ctx->tos;
243 for (i = 0; i < tos; i++) { 243 for (i = 0; i < tos; i++) {
244 lbr_idx = (tos - i) & mask; 244 lbr_idx = (tos - i) & mask;
245 wrmsrl(x86_pmu.lbr_from + lbr_idx, task_ctx->lbr_from[i]); 245 wrmsrl(x86_pmu.lbr_from + lbr_idx, task_ctx->lbr_from[i]);
@@ -247,6 +247,7 @@ static void __intel_pmu_lbr_restore(struct x86_perf_task_context *task_ctx)
247 if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO) 247 if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO)
248 wrmsrl(MSR_LBR_INFO_0 + lbr_idx, task_ctx->lbr_info[i]); 248 wrmsrl(MSR_LBR_INFO_0 + lbr_idx, task_ctx->lbr_info[i]);
249 } 249 }
250 wrmsrl(x86_pmu.lbr_tos, tos);
250 task_ctx->lbr_stack_state = LBR_NONE; 251 task_ctx->lbr_stack_state = LBR_NONE;
251} 252}
252 253
@@ -270,6 +271,7 @@ static void __intel_pmu_lbr_save(struct x86_perf_task_context *task_ctx)
270 if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO) 271 if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO)
271 rdmsrl(MSR_LBR_INFO_0 + lbr_idx, task_ctx->lbr_info[i]); 272 rdmsrl(MSR_LBR_INFO_0 + lbr_idx, task_ctx->lbr_info[i]);
272 } 273 }
274 task_ctx->tos = tos;
273 task_ctx->lbr_stack_state = LBR_VALID; 275 task_ctx->lbr_stack_state = LBR_VALID;
274} 276}
275 277
diff --git a/arch/x86/kernel/irq_work.c b/arch/x86/kernel/irq_work.c
index dc5fa6a1e8d6..3512ba607361 100644
--- a/arch/x86/kernel/irq_work.c
+++ b/arch/x86/kernel/irq_work.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * x86 specific code for irq_work 2 * x86 specific code for irq_work
3 * 3 *
4 * Copyright (C) 2010 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> 4 * Copyright (C) 2010 Red Hat, Inc., Peter Zijlstra
5 */ 5 */
6 6
7#include <linux/kernel.h> 7#include <linux/kernel.h>
diff --git a/arch/x86/kernel/pmem.c b/arch/x86/kernel/pmem.c
index 4f00b63d7ff3..14415aff1813 100644
--- a/arch/x86/kernel/pmem.c
+++ b/arch/x86/kernel/pmem.c
@@ -4,10 +4,22 @@
4 */ 4 */
5#include <linux/platform_device.h> 5#include <linux/platform_device.h>
6#include <linux/module.h> 6#include <linux/module.h>
7#include <linux/ioport.h>
8
9static int found(u64 start, u64 end, void *data)
10{
11 return 1;
12}
7 13
8static __init int register_e820_pmem(void) 14static __init int register_e820_pmem(void)
9{ 15{
16 char *pmem = "Persistent Memory (legacy)";
10 struct platform_device *pdev; 17 struct platform_device *pdev;
18 int rc;
19
20 rc = walk_iomem_res(pmem, IORESOURCE_MEM, 0, -1, NULL, found);
21 if (rc <= 0)
22 return 0;
11 23
12 /* 24 /*
13 * See drivers/nvdimm/e820.c for the implementation, this is 25 * See drivers/nvdimm/e820.c for the implementation, this is
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 29db25f9a745..d2bbe343fda7 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -1250,8 +1250,6 @@ void __init setup_arch(char **cmdline_p)
1250 if (efi_enabled(EFI_BOOT)) 1250 if (efi_enabled(EFI_BOOT))
1251 efi_apply_memmap_quirks(); 1251 efi_apply_memmap_quirks();
1252#endif 1252#endif
1253
1254 microcode_init();
1255} 1253}
1256 1254
1257#ifdef CONFIG_X86_32 1255#ifdef CONFIG_X86_32
diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
index b7ffb7c00075..cb6282c3638f 100644
--- a/arch/x86/kernel/signal.c
+++ b/arch/x86/kernel/signal.c
@@ -690,12 +690,15 @@ handle_signal(struct ksignal *ksig, struct pt_regs *regs)
690 signal_setup_done(failed, ksig, stepping); 690 signal_setup_done(failed, ksig, stepping);
691} 691}
692 692
693#ifdef CONFIG_X86_32 693static inline unsigned long get_nr_restart_syscall(const struct pt_regs *regs)
694#define NR_restart_syscall __NR_restart_syscall 694{
695#else /* !CONFIG_X86_32 */ 695#if defined(CONFIG_X86_32) || !defined(CONFIG_X86_64)
696#define NR_restart_syscall \ 696 return __NR_restart_syscall;
697 test_thread_flag(TIF_IA32) ? __NR_ia32_restart_syscall : __NR_restart_syscall 697#else /* !CONFIG_X86_32 && CONFIG_X86_64 */
698#endif /* CONFIG_X86_32 */ 698 return test_thread_flag(TIF_IA32) ? __NR_ia32_restart_syscall :
699 __NR_restart_syscall | (regs->orig_ax & __X32_SYSCALL_BIT);
700#endif /* CONFIG_X86_32 || !CONFIG_X86_64 */
701}
699 702
700/* 703/*
701 * Note that 'init' is a special process: it doesn't get signals it doesn't 704 * Note that 'init' is a special process: it doesn't get signals it doesn't
@@ -724,7 +727,7 @@ void do_signal(struct pt_regs *regs)
724 break; 727 break;
725 728
726 case -ERESTART_RESTARTBLOCK: 729 case -ERESTART_RESTARTBLOCK:
727 regs->ax = NR_restart_syscall; 730 regs->ax = get_nr_restart_syscall(regs);
728 regs->ip -= 2; 731 regs->ip -= 2;
729 break; 732 break;
730 } 733 }
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 892ee2e5ecbc..fbabe4fcc7fb 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -509,7 +509,7 @@ void __inquire_remote_apic(int apicid)
509 */ 509 */
510#define UDELAY_10MS_DEFAULT 10000 510#define UDELAY_10MS_DEFAULT 10000
511 511
512static unsigned int init_udelay = INT_MAX; 512static unsigned int init_udelay = UINT_MAX;
513 513
514static int __init cpu_init_udelay(char *str) 514static int __init cpu_init_udelay(char *str)
515{ 515{
@@ -522,14 +522,15 @@ early_param("cpu_init_udelay", cpu_init_udelay);
522static void __init smp_quirk_init_udelay(void) 522static void __init smp_quirk_init_udelay(void)
523{ 523{
524 /* if cmdline changed it from default, leave it alone */ 524 /* if cmdline changed it from default, leave it alone */
525 if (init_udelay != INT_MAX) 525 if (init_udelay != UINT_MAX)
526 return; 526 return;
527 527
528 /* if modern processor, use no delay */ 528 /* if modern processor, use no delay */
529 if (((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) && (boot_cpu_data.x86 == 6)) || 529 if (((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) && (boot_cpu_data.x86 == 6)) ||
530 ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) && (boot_cpu_data.x86 >= 0xF))) 530 ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) && (boot_cpu_data.x86 >= 0xF))) {
531 init_udelay = 0; 531 init_udelay = 0;
532 532 return;
533 }
533 /* else, use legacy delay */ 534 /* else, use legacy delay */
534 init_udelay = UDELAY_10MS_DEFAULT; 535 init_udelay = UDELAY_10MS_DEFAULT;
535} 536}
diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h
index 06332cb7e7d1..3f5c48ddba45 100644
--- a/arch/x86/kvm/cpuid.h
+++ b/arch/x86/kvm/cpuid.h
@@ -38,6 +38,14 @@ static inline bool guest_cpuid_has_xsave(struct kvm_vcpu *vcpu)
38 return best && (best->ecx & bit(X86_FEATURE_XSAVE)); 38 return best && (best->ecx & bit(X86_FEATURE_XSAVE));
39} 39}
40 40
41static inline bool guest_cpuid_has_mtrr(struct kvm_vcpu *vcpu)
42{
43 struct kvm_cpuid_entry2 *best;
44
45 best = kvm_find_cpuid_entry(vcpu, 1, 0);
46 return best && (best->edx & bit(X86_FEATURE_MTRR));
47}
48
41static inline bool guest_cpuid_has_tsc_adjust(struct kvm_vcpu *vcpu) 49static inline bool guest_cpuid_has_tsc_adjust(struct kvm_vcpu *vcpu)
42{ 50{
43 struct kvm_cpuid_entry2 *best; 51 struct kvm_cpuid_entry2 *best;
diff --git a/arch/x86/kvm/mtrr.c b/arch/x86/kvm/mtrr.c
index 9e8bf13572e6..3f8c732117ec 100644
--- a/arch/x86/kvm/mtrr.c
+++ b/arch/x86/kvm/mtrr.c
@@ -120,14 +120,22 @@ static u8 mtrr_default_type(struct kvm_mtrr *mtrr_state)
120 return mtrr_state->deftype & IA32_MTRR_DEF_TYPE_TYPE_MASK; 120 return mtrr_state->deftype & IA32_MTRR_DEF_TYPE_TYPE_MASK;
121} 121}
122 122
123static u8 mtrr_disabled_type(void) 123static u8 mtrr_disabled_type(struct kvm_vcpu *vcpu)
124{ 124{
125 /* 125 /*
126 * Intel SDM 11.11.2.2: all MTRRs are disabled when 126 * Intel SDM 11.11.2.2: all MTRRs are disabled when
127 * IA32_MTRR_DEF_TYPE.E bit is cleared, and the UC 127 * IA32_MTRR_DEF_TYPE.E bit is cleared, and the UC
128 * memory type is applied to all of physical memory. 128 * memory type is applied to all of physical memory.
129 *
130 * However, virtual machines can be run with CPUID such that
131 * there are no MTRRs. In that case, the firmware will never
132 * enable MTRRs and it is obviously undesirable to run the
133 * guest entirely with UC memory and we use WB.
129 */ 134 */
130 return MTRR_TYPE_UNCACHABLE; 135 if (guest_cpuid_has_mtrr(vcpu))
136 return MTRR_TYPE_UNCACHABLE;
137 else
138 return MTRR_TYPE_WRBACK;
131} 139}
132 140
133/* 141/*
@@ -267,7 +275,7 @@ static int fixed_mtrr_addr_to_seg(u64 addr)
267 275
268 for (seg = 0; seg < seg_num; seg++) { 276 for (seg = 0; seg < seg_num; seg++) {
269 mtrr_seg = &fixed_seg_table[seg]; 277 mtrr_seg = &fixed_seg_table[seg];
270 if (mtrr_seg->start >= addr && addr < mtrr_seg->end) 278 if (mtrr_seg->start <= addr && addr < mtrr_seg->end)
271 return seg; 279 return seg;
272 } 280 }
273 281
@@ -300,7 +308,6 @@ static void var_mtrr_range(struct kvm_mtrr_range *range, u64 *start, u64 *end)
300 *start = range->base & PAGE_MASK; 308 *start = range->base & PAGE_MASK;
301 309
302 mask = range->mask & PAGE_MASK; 310 mask = range->mask & PAGE_MASK;
303 mask |= ~0ULL << boot_cpu_data.x86_phys_bits;
304 311
305 /* This cannot overflow because writing to the reserved bits of 312 /* This cannot overflow because writing to the reserved bits of
306 * variable MTRRs causes a #GP. 313 * variable MTRRs causes a #GP.
@@ -356,10 +363,14 @@ static void set_var_mtrr_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data)
356 if (var_mtrr_range_is_valid(cur)) 363 if (var_mtrr_range_is_valid(cur))
357 list_del(&mtrr_state->var_ranges[index].node); 364 list_del(&mtrr_state->var_ranges[index].node);
358 365
366 /* Extend the mask with all 1 bits to the left, since those
367 * bits must implicitly be 0. The bits are then cleared
368 * when reading them.
369 */
359 if (!is_mtrr_mask) 370 if (!is_mtrr_mask)
360 cur->base = data; 371 cur->base = data;
361 else 372 else
362 cur->mask = data; 373 cur->mask = data | (-1LL << cpuid_maxphyaddr(vcpu));
363 374
364 /* add it to the list if it's enabled. */ 375 /* add it to the list if it's enabled. */
365 if (var_mtrr_range_is_valid(cur)) { 376 if (var_mtrr_range_is_valid(cur)) {
@@ -426,6 +437,8 @@ int kvm_mtrr_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
426 *pdata = vcpu->arch.mtrr_state.var_ranges[index].base; 437 *pdata = vcpu->arch.mtrr_state.var_ranges[index].base;
427 else 438 else
428 *pdata = vcpu->arch.mtrr_state.var_ranges[index].mask; 439 *pdata = vcpu->arch.mtrr_state.var_ranges[index].mask;
440
441 *pdata &= (1ULL << cpuid_maxphyaddr(vcpu)) - 1;
429 } 442 }
430 443
431 return 0; 444 return 0;
@@ -670,7 +683,7 @@ u8 kvm_mtrr_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn)
670 } 683 }
671 684
672 if (iter.mtrr_disabled) 685 if (iter.mtrr_disabled)
673 return mtrr_disabled_type(); 686 return mtrr_disabled_type(vcpu);
674 687
675 /* not contained in any MTRRs. */ 688 /* not contained in any MTRRs. */
676 if (type == -1) 689 if (type == -1)
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 83a1c643f9a5..899c40f826dd 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -3422,6 +3422,8 @@ static int handle_exit(struct kvm_vcpu *vcpu)
3422 struct kvm_run *kvm_run = vcpu->run; 3422 struct kvm_run *kvm_run = vcpu->run;
3423 u32 exit_code = svm->vmcb->control.exit_code; 3423 u32 exit_code = svm->vmcb->control.exit_code;
3424 3424
3425 trace_kvm_exit(exit_code, vcpu, KVM_ISA_SVM);
3426
3425 if (!is_cr_intercept(svm, INTERCEPT_CR0_WRITE)) 3427 if (!is_cr_intercept(svm, INTERCEPT_CR0_WRITE))
3426 vcpu->arch.cr0 = svm->vmcb->save.cr0; 3428 vcpu->arch.cr0 = svm->vmcb->save.cr0;
3427 if (npt_enabled) 3429 if (npt_enabled)
@@ -3892,8 +3894,6 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
3892 vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp; 3894 vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp;
3893 vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip; 3895 vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip;
3894 3896
3895 trace_kvm_exit(svm->vmcb->control.exit_code, vcpu, KVM_ISA_SVM);
3896
3897 if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI)) 3897 if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI))
3898 kvm_before_handle_nmi(&svm->vcpu); 3898 kvm_before_handle_nmi(&svm->vcpu);
3899 3899
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 87acc5221740..44976a596fa6 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -2803,7 +2803,7 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
2803 msr_info->data = vcpu->arch.ia32_xss; 2803 msr_info->data = vcpu->arch.ia32_xss;
2804 break; 2804 break;
2805 case MSR_TSC_AUX: 2805 case MSR_TSC_AUX:
2806 if (!guest_cpuid_has_rdtscp(vcpu)) 2806 if (!guest_cpuid_has_rdtscp(vcpu) && !msr_info->host_initiated)
2807 return 1; 2807 return 1;
2808 /* Otherwise falls through */ 2808 /* Otherwise falls through */
2809 default: 2809 default:
@@ -2909,7 +2909,7 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
2909 clear_atomic_switch_msr(vmx, MSR_IA32_XSS); 2909 clear_atomic_switch_msr(vmx, MSR_IA32_XSS);
2910 break; 2910 break;
2911 case MSR_TSC_AUX: 2911 case MSR_TSC_AUX:
2912 if (!guest_cpuid_has_rdtscp(vcpu)) 2912 if (!guest_cpuid_has_rdtscp(vcpu) && !msr_info->host_initiated)
2913 return 1; 2913 return 1;
2914 /* Check reserved bit, higher 32 bits should be zero */ 2914 /* Check reserved bit, higher 32 bits should be zero */
2915 if ((data >> 32) != 0) 2915 if ((data >> 32) != 0)
@@ -7394,11 +7394,6 @@ static int handle_invvpid(struct kvm_vcpu *vcpu)
7394 7394
7395 switch (type) { 7395 switch (type) {
7396 case VMX_VPID_EXTENT_ALL_CONTEXT: 7396 case VMX_VPID_EXTENT_ALL_CONTEXT:
7397 if (get_vmcs12(vcpu)->virtual_processor_id == 0) {
7398 nested_vmx_failValid(vcpu,
7399 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
7400 return 1;
7401 }
7402 __vmx_flush_tlb(vcpu, to_vmx(vcpu)->nested.vpid02); 7397 __vmx_flush_tlb(vcpu, to_vmx(vcpu)->nested.vpid02);
7403 nested_vmx_succeed(vcpu); 7398 nested_vmx_succeed(vcpu);
7404 break; 7399 break;
@@ -8047,6 +8042,8 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu)
8047 u32 exit_reason = vmx->exit_reason; 8042 u32 exit_reason = vmx->exit_reason;
8048 u32 vectoring_info = vmx->idt_vectoring_info; 8043 u32 vectoring_info = vmx->idt_vectoring_info;
8049 8044
8045 trace_kvm_exit(exit_reason, vcpu, KVM_ISA_VMX);
8046
8050 /* 8047 /*
8051 * Flush logged GPAs PML buffer, this will make dirty_bitmap more 8048 * Flush logged GPAs PML buffer, this will make dirty_bitmap more
8052 * updated. Another good is, in kvm_vm_ioctl_get_dirty_log, before 8049 * updated. Another good is, in kvm_vm_ioctl_get_dirty_log, before
@@ -8673,7 +8670,6 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
8673 vmx->loaded_vmcs->launched = 1; 8670 vmx->loaded_vmcs->launched = 1;
8674 8671
8675 vmx->exit_reason = vmcs_read32(VM_EXIT_REASON); 8672 vmx->exit_reason = vmcs_read32(VM_EXIT_REASON);
8676 trace_kvm_exit(vmx->exit_reason, vcpu, KVM_ISA_VMX);
8677 8673
8678 /* 8674 /*
8679 * the KVM_REQ_EVENT optimization bit is only on for one entry, and if 8675 * the KVM_REQ_EVENT optimization bit is only on for one entry, and if
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 00462bd63129..7ffc224bbe41 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -2763,6 +2763,26 @@ static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
2763 return 0; 2763 return 0;
2764} 2764}
2765 2765
2766static int kvm_cpu_accept_dm_intr(struct kvm_vcpu *vcpu)
2767{
2768 return (!lapic_in_kernel(vcpu) ||
2769 kvm_apic_accept_pic_intr(vcpu));
2770}
2771
2772/*
2773 * if userspace requested an interrupt window, check that the
2774 * interrupt window is open.
2775 *
2776 * No need to exit to userspace if we already have an interrupt queued.
2777 */
2778static int kvm_vcpu_ready_for_interrupt_injection(struct kvm_vcpu *vcpu)
2779{
2780 return kvm_arch_interrupt_allowed(vcpu) &&
2781 !kvm_cpu_has_interrupt(vcpu) &&
2782 !kvm_event_needs_reinjection(vcpu) &&
2783 kvm_cpu_accept_dm_intr(vcpu);
2784}
2785
2766static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, 2786static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
2767 struct kvm_interrupt *irq) 2787 struct kvm_interrupt *irq)
2768{ 2788{
@@ -2786,6 +2806,7 @@ static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
2786 return -EEXIST; 2806 return -EEXIST;
2787 2807
2788 vcpu->arch.pending_external_vector = irq->irq; 2808 vcpu->arch.pending_external_vector = irq->irq;
2809 kvm_make_request(KVM_REQ_EVENT, vcpu);
2789 return 0; 2810 return 0;
2790} 2811}
2791 2812
@@ -3551,9 +3572,11 @@ static int kvm_vm_ioctl_get_pit(struct kvm *kvm, struct kvm_pit_state *ps)
3551 3572
3552static int kvm_vm_ioctl_set_pit(struct kvm *kvm, struct kvm_pit_state *ps) 3573static int kvm_vm_ioctl_set_pit(struct kvm *kvm, struct kvm_pit_state *ps)
3553{ 3574{
3575 int i;
3554 mutex_lock(&kvm->arch.vpit->pit_state.lock); 3576 mutex_lock(&kvm->arch.vpit->pit_state.lock);
3555 memcpy(&kvm->arch.vpit->pit_state, ps, sizeof(struct kvm_pit_state)); 3577 memcpy(&kvm->arch.vpit->pit_state, ps, sizeof(struct kvm_pit_state));
3556 kvm_pit_load_count(kvm, 0, ps->channels[0].count, 0); 3578 for (i = 0; i < 3; i++)
3579 kvm_pit_load_count(kvm, i, ps->channels[i].count, 0);
3557 mutex_unlock(&kvm->arch.vpit->pit_state.lock); 3580 mutex_unlock(&kvm->arch.vpit->pit_state.lock);
3558 return 0; 3581 return 0;
3559} 3582}
@@ -3572,6 +3595,7 @@ static int kvm_vm_ioctl_get_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps)
3572static int kvm_vm_ioctl_set_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps) 3595static int kvm_vm_ioctl_set_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps)
3573{ 3596{
3574 int start = 0; 3597 int start = 0;
3598 int i;
3575 u32 prev_legacy, cur_legacy; 3599 u32 prev_legacy, cur_legacy;
3576 mutex_lock(&kvm->arch.vpit->pit_state.lock); 3600 mutex_lock(&kvm->arch.vpit->pit_state.lock);
3577 prev_legacy = kvm->arch.vpit->pit_state.flags & KVM_PIT_FLAGS_HPET_LEGACY; 3601 prev_legacy = kvm->arch.vpit->pit_state.flags & KVM_PIT_FLAGS_HPET_LEGACY;
@@ -3581,7 +3605,8 @@ static int kvm_vm_ioctl_set_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps)
3581 memcpy(&kvm->arch.vpit->pit_state.channels, &ps->channels, 3605 memcpy(&kvm->arch.vpit->pit_state.channels, &ps->channels,
3582 sizeof(kvm->arch.vpit->pit_state.channels)); 3606 sizeof(kvm->arch.vpit->pit_state.channels));
3583 kvm->arch.vpit->pit_state.flags = ps->flags; 3607 kvm->arch.vpit->pit_state.flags = ps->flags;
3584 kvm_pit_load_count(kvm, 0, kvm->arch.vpit->pit_state.channels[0].count, start); 3608 for (i = 0; i < 3; i++)
3609 kvm_pit_load_count(kvm, i, kvm->arch.vpit->pit_state.channels[i].count, start);
3585 mutex_unlock(&kvm->arch.vpit->pit_state.lock); 3610 mutex_unlock(&kvm->arch.vpit->pit_state.lock);
3586 return 0; 3611 return 0;
3587} 3612}
@@ -5910,23 +5935,10 @@ static int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt)
5910 return emulator_write_emulated(ctxt, rip, instruction, 3, NULL); 5935 return emulator_write_emulated(ctxt, rip, instruction, 3, NULL);
5911} 5936}
5912 5937
5913/*
5914 * Check if userspace requested an interrupt window, and that the
5915 * interrupt window is open.
5916 *
5917 * No need to exit to userspace if we already have an interrupt queued.
5918 */
5919static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu) 5938static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu)
5920{ 5939{
5921 if (!vcpu->run->request_interrupt_window || pic_in_kernel(vcpu->kvm)) 5940 return vcpu->run->request_interrupt_window &&
5922 return false; 5941 likely(!pic_in_kernel(vcpu->kvm));
5923
5924 if (kvm_cpu_has_interrupt(vcpu))
5925 return false;
5926
5927 return (irqchip_split(vcpu->kvm)
5928 ? kvm_apic_accept_pic_intr(vcpu)
5929 : kvm_arch_interrupt_allowed(vcpu));
5930} 5942}
5931 5943
5932static void post_kvm_run_save(struct kvm_vcpu *vcpu) 5944static void post_kvm_run_save(struct kvm_vcpu *vcpu)
@@ -5937,17 +5949,9 @@ static void post_kvm_run_save(struct kvm_vcpu *vcpu)
5937 kvm_run->flags = is_smm(vcpu) ? KVM_RUN_X86_SMM : 0; 5949 kvm_run->flags = is_smm(vcpu) ? KVM_RUN_X86_SMM : 0;
5938 kvm_run->cr8 = kvm_get_cr8(vcpu); 5950 kvm_run->cr8 = kvm_get_cr8(vcpu);
5939 kvm_run->apic_base = kvm_get_apic_base(vcpu); 5951 kvm_run->apic_base = kvm_get_apic_base(vcpu);
5940 if (!irqchip_in_kernel(vcpu->kvm)) 5952 kvm_run->ready_for_interrupt_injection =
5941 kvm_run->ready_for_interrupt_injection = 5953 pic_in_kernel(vcpu->kvm) ||
5942 kvm_arch_interrupt_allowed(vcpu) && 5954 kvm_vcpu_ready_for_interrupt_injection(vcpu);
5943 !kvm_cpu_has_interrupt(vcpu) &&
5944 !kvm_event_needs_reinjection(vcpu);
5945 else if (!pic_in_kernel(vcpu->kvm))
5946 kvm_run->ready_for_interrupt_injection =
5947 kvm_apic_accept_pic_intr(vcpu) &&
5948 !kvm_cpu_has_interrupt(vcpu);
5949 else
5950 kvm_run->ready_for_interrupt_injection = 1;
5951} 5955}
5952 5956
5953static void update_cr8_intercept(struct kvm_vcpu *vcpu) 5957static void update_cr8_intercept(struct kvm_vcpu *vcpu)
@@ -6360,8 +6364,10 @@ void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm,
6360static int vcpu_enter_guest(struct kvm_vcpu *vcpu) 6364static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
6361{ 6365{
6362 int r; 6366 int r;
6363 bool req_int_win = !lapic_in_kernel(vcpu) && 6367 bool req_int_win =
6364 vcpu->run->request_interrupt_window; 6368 dm_request_for_irq_injection(vcpu) &&
6369 kvm_cpu_accept_dm_intr(vcpu);
6370
6365 bool req_immediate_exit = false; 6371 bool req_immediate_exit = false;
6366 6372
6367 if (vcpu->requests) { 6373 if (vcpu->requests) {
@@ -6513,6 +6519,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
6513 if (req_immediate_exit) 6519 if (req_immediate_exit)
6514 smp_send_reschedule(vcpu->cpu); 6520 smp_send_reschedule(vcpu->cpu);
6515 6521
6522 trace_kvm_entry(vcpu->vcpu_id);
6523 wait_lapic_expire(vcpu);
6516 __kvm_guest_enter(); 6524 __kvm_guest_enter();
6517 6525
6518 if (unlikely(vcpu->arch.switch_db_regs)) { 6526 if (unlikely(vcpu->arch.switch_db_regs)) {
@@ -6525,8 +6533,6 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
6525 vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_RELOAD; 6533 vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_RELOAD;
6526 } 6534 }
6527 6535
6528 trace_kvm_entry(vcpu->vcpu_id);
6529 wait_lapic_expire(vcpu);
6530 kvm_x86_ops->run(vcpu); 6536 kvm_x86_ops->run(vcpu);
6531 6537
6532 /* 6538 /*
@@ -6663,7 +6669,8 @@ static int vcpu_run(struct kvm_vcpu *vcpu)
6663 if (kvm_cpu_has_pending_timer(vcpu)) 6669 if (kvm_cpu_has_pending_timer(vcpu))
6664 kvm_inject_pending_timer_irqs(vcpu); 6670 kvm_inject_pending_timer_irqs(vcpu);
6665 6671
6666 if (dm_request_for_irq_injection(vcpu)) { 6672 if (dm_request_for_irq_injection(vcpu) &&
6673 kvm_vcpu_ready_for_interrupt_injection(vcpu)) {
6667 r = 0; 6674 r = 0;
6668 vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN; 6675 vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
6669 ++vcpu->stat.request_irq_exits; 6676 ++vcpu->stat.request_irq_exits;
diff --git a/arch/x86/mm/dump_pagetables.c b/arch/x86/mm/dump_pagetables.c
index a035c2aa7801..0f1c6fc3ddd8 100644
--- a/arch/x86/mm/dump_pagetables.c
+++ b/arch/x86/mm/dump_pagetables.c
@@ -89,7 +89,7 @@ static struct addr_marker address_markers[] = {
89 { 0/* VMALLOC_START */, "vmalloc() Area" }, 89 { 0/* VMALLOC_START */, "vmalloc() Area" },
90 { 0/*VMALLOC_END*/, "vmalloc() End" }, 90 { 0/*VMALLOC_END*/, "vmalloc() End" },
91# ifdef CONFIG_HIGHMEM 91# ifdef CONFIG_HIGHMEM
92 { 0/*PKMAP_BASE*/, "Persisent kmap() Area" }, 92 { 0/*PKMAP_BASE*/, "Persistent kmap() Area" },
93# endif 93# endif
94 { 0/*FIXADDR_START*/, "Fixmap Area" }, 94 { 0/*FIXADDR_START*/, "Fixmap Area" },
95#endif 95#endif
diff --git a/arch/x86/mm/mpx.c b/arch/x86/mm/mpx.c
index 1202d5ca2fb5..b2fd67da1701 100644
--- a/arch/x86/mm/mpx.c
+++ b/arch/x86/mm/mpx.c
@@ -101,19 +101,19 @@ static int get_reg_offset(struct insn *insn, struct pt_regs *regs,
101 switch (type) { 101 switch (type) {
102 case REG_TYPE_RM: 102 case REG_TYPE_RM:
103 regno = X86_MODRM_RM(insn->modrm.value); 103 regno = X86_MODRM_RM(insn->modrm.value);
104 if (X86_REX_B(insn->rex_prefix.value) == 1) 104 if (X86_REX_B(insn->rex_prefix.value))
105 regno += 8; 105 regno += 8;
106 break; 106 break;
107 107
108 case REG_TYPE_INDEX: 108 case REG_TYPE_INDEX:
109 regno = X86_SIB_INDEX(insn->sib.value); 109 regno = X86_SIB_INDEX(insn->sib.value);
110 if (X86_REX_X(insn->rex_prefix.value) == 1) 110 if (X86_REX_X(insn->rex_prefix.value))
111 regno += 8; 111 regno += 8;
112 break; 112 break;
113 113
114 case REG_TYPE_BASE: 114 case REG_TYPE_BASE:
115 regno = X86_SIB_BASE(insn->sib.value); 115 regno = X86_SIB_BASE(insn->sib.value);
116 if (X86_REX_B(insn->rex_prefix.value) == 1) 116 if (X86_REX_B(insn->rex_prefix.value))
117 regno += 8; 117 regno += 8;
118 break; 118 break;
119 119
diff --git a/arch/x86/pci/bus_numa.c b/arch/x86/pci/bus_numa.c
index 7bcf06a7cd12..6eb3c8af96e2 100644
--- a/arch/x86/pci/bus_numa.c
+++ b/arch/x86/pci/bus_numa.c
@@ -50,18 +50,9 @@ void x86_pci_root_bus_resources(int bus, struct list_head *resources)
50 if (!found) 50 if (!found)
51 pci_add_resource(resources, &info->busn); 51 pci_add_resource(resources, &info->busn);
52 52
53 list_for_each_entry(root_res, &info->resources, list) { 53 list_for_each_entry(root_res, &info->resources, list)
54 struct resource *res; 54 pci_add_resource(resources, &root_res->res);
55 struct resource *root;
56 55
57 res = &root_res->res;
58 pci_add_resource(resources, res);
59 if (res->flags & IORESOURCE_IO)
60 root = &ioport_resource;
61 else
62 root = &iomem_resource;
63 insert_resource(root, res);
64 }
65 return; 56 return;
66 57
67default_resources: 58default_resources:
diff --git a/arch/x86/um/signal.c b/arch/x86/um/signal.c
index 06934a8a4872..14fcd01ed992 100644
--- a/arch/x86/um/signal.c
+++ b/arch/x86/um/signal.c
@@ -211,7 +211,7 @@ static int copy_sc_from_user(struct pt_regs *regs,
211 if (err) 211 if (err)
212 return 1; 212 return 1;
213 213
214 err = convert_fxsr_from_user(&fpx, sc.fpstate); 214 err = convert_fxsr_from_user(&fpx, (void *)sc.fpstate);
215 if (err) 215 if (err)
216 return 1; 216 return 1;
217 217
@@ -227,7 +227,7 @@ static int copy_sc_from_user(struct pt_regs *regs,
227 { 227 {
228 struct user_i387_struct fp; 228 struct user_i387_struct fp;
229 229
230 err = copy_from_user(&fp, sc.fpstate, 230 err = copy_from_user(&fp, (void *)sc.fpstate,
231 sizeof(struct user_i387_struct)); 231 sizeof(struct user_i387_struct));
232 if (err) 232 if (err)
233 return 1; 233 return 1;
@@ -291,7 +291,7 @@ static int copy_sc_to_user(struct sigcontext __user *to,
291#endif 291#endif
292#undef PUTREG 292#undef PUTREG
293 sc.oldmask = mask; 293 sc.oldmask = mask;
294 sc.fpstate = to_fp; 294 sc.fpstate = (unsigned long)to_fp;
295 295
296 err = copy_to_user(to, &sc, sizeof(struct sigcontext)); 296 err = copy_to_user(to, &sc, sizeof(struct sigcontext));
297 if (err) 297 if (err)
@@ -468,12 +468,10 @@ long sys_sigreturn(void)
468 struct sigframe __user *frame = (struct sigframe __user *)(sp - 8); 468 struct sigframe __user *frame = (struct sigframe __user *)(sp - 8);
469 sigset_t set; 469 sigset_t set;
470 struct sigcontext __user *sc = &frame->sc; 470 struct sigcontext __user *sc = &frame->sc;
471 unsigned long __user *oldmask = &sc->oldmask;
472 unsigned long __user *extramask = frame->extramask;
473 int sig_size = (_NSIG_WORDS - 1) * sizeof(unsigned long); 471 int sig_size = (_NSIG_WORDS - 1) * sizeof(unsigned long);
474 472
475 if (copy_from_user(&set.sig[0], oldmask, sizeof(set.sig[0])) || 473 if (copy_from_user(&set.sig[0], &sc->oldmask, sizeof(set.sig[0])) ||
476 copy_from_user(&set.sig[1], extramask, sig_size)) 474 copy_from_user(&set.sig[1], frame->extramask, sig_size))
477 goto segfault; 475 goto segfault;
478 476
479 set_current_blocked(&set); 477 set_current_blocked(&set);
@@ -505,6 +503,7 @@ int setup_signal_stack_si(unsigned long stack_top, struct ksignal *ksig,
505{ 503{
506 struct rt_sigframe __user *frame; 504 struct rt_sigframe __user *frame;
507 int err = 0, sig = ksig->sig; 505 int err = 0, sig = ksig->sig;
506 unsigned long fp_to;
508 507
509 frame = (struct rt_sigframe __user *) 508 frame = (struct rt_sigframe __user *)
510 round_down(stack_top - sizeof(struct rt_sigframe), 16); 509 round_down(stack_top - sizeof(struct rt_sigframe), 16);
@@ -526,7 +525,10 @@ int setup_signal_stack_si(unsigned long stack_top, struct ksignal *ksig,
526 err |= __save_altstack(&frame->uc.uc_stack, PT_REGS_SP(regs)); 525 err |= __save_altstack(&frame->uc.uc_stack, PT_REGS_SP(regs));
527 err |= copy_sc_to_user(&frame->uc.uc_mcontext, &frame->fpstate, regs, 526 err |= copy_sc_to_user(&frame->uc.uc_mcontext, &frame->fpstate, regs,
528 set->sig[0]); 527 set->sig[0]);
529 err |= __put_user(&frame->fpstate, &frame->uc.uc_mcontext.fpstate); 528
529 fp_to = (unsigned long)&frame->fpstate;
530
531 err |= __put_user(fp_to, &frame->uc.uc_mcontext.fpstate);
530 if (sizeof(*set) == 16) { 532 if (sizeof(*set) == 16) {
531 err |= __put_user(set->sig[0], &frame->uc.uc_sigmask.sig[0]); 533 err |= __put_user(set->sig[0], &frame->uc.uc_sigmask.sig[0]);
532 err |= __put_user(set->sig[1], &frame->uc.uc_sigmask.sig[1]); 534 err |= __put_user(set->sig[1], &frame->uc.uc_sigmask.sig[1]);
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index ac161db63388..cb5e266a8bf7 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -2495,14 +2495,9 @@ void __init xen_init_mmu_ops(void)
2495{ 2495{
2496 x86_init.paging.pagetable_init = xen_pagetable_init; 2496 x86_init.paging.pagetable_init = xen_pagetable_init;
2497 2497
2498 /* Optimization - we can use the HVM one but it has no idea which 2498 if (xen_feature(XENFEAT_auto_translated_physmap))
2499 * VCPUs are descheduled - which means that it will needlessly IPI
2500 * them. Xen knows so let it do the job.
2501 */
2502 if (xen_feature(XENFEAT_auto_translated_physmap)) {
2503 pv_mmu_ops.flush_tlb_others = xen_flush_tlb_others;
2504 return; 2499 return;
2505 } 2500
2506 pv_mmu_ops = xen_mmu_ops; 2501 pv_mmu_ops = xen_mmu_ops;
2507 2502
2508 memset(dummy_mapping, 0xff, PAGE_SIZE); 2503 memset(dummy_mapping, 0xff, PAGE_SIZE);
diff --git a/arch/x86/xen/suspend.c b/arch/x86/xen/suspend.c
index feddabdab448..3705eabd7e22 100644
--- a/arch/x86/xen/suspend.c
+++ b/arch/x86/xen/suspend.c
@@ -68,26 +68,16 @@ static void xen_pv_post_suspend(int suspend_cancelled)
68 68
69void xen_arch_pre_suspend(void) 69void xen_arch_pre_suspend(void)
70{ 70{
71 int cpu;
72
73 for_each_online_cpu(cpu)
74 xen_pmu_finish(cpu);
75
76 if (xen_pv_domain()) 71 if (xen_pv_domain())
77 xen_pv_pre_suspend(); 72 xen_pv_pre_suspend();
78} 73}
79 74
80void xen_arch_post_suspend(int cancelled) 75void xen_arch_post_suspend(int cancelled)
81{ 76{
82 int cpu;
83
84 if (xen_pv_domain()) 77 if (xen_pv_domain())
85 xen_pv_post_suspend(cancelled); 78 xen_pv_post_suspend(cancelled);
86 else 79 else
87 xen_hvm_post_suspend(cancelled); 80 xen_hvm_post_suspend(cancelled);
88
89 for_each_online_cpu(cpu)
90 xen_pmu_init(cpu);
91} 81}
92 82
93static void xen_vcpu_notify_restore(void *data) 83static void xen_vcpu_notify_restore(void *data)
@@ -106,10 +96,20 @@ static void xen_vcpu_notify_suspend(void *data)
106 96
107void xen_arch_resume(void) 97void xen_arch_resume(void)
108{ 98{
99 int cpu;
100
109 on_each_cpu(xen_vcpu_notify_restore, NULL, 1); 101 on_each_cpu(xen_vcpu_notify_restore, NULL, 1);
102
103 for_each_online_cpu(cpu)
104 xen_pmu_init(cpu);
110} 105}
111 106
112void xen_arch_suspend(void) 107void xen_arch_suspend(void)
113{ 108{
109 int cpu;
110
111 for_each_online_cpu(cpu)
112 xen_pmu_finish(cpu);
113
114 on_each_cpu(xen_vcpu_notify_suspend, NULL, 1); 114 on_each_cpu(xen_vcpu_notify_suspend, NULL, 1);
115} 115}