diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2013-04-24 14:33:46 -0400 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2013-04-24 14:33:54 -0400 |
commit | 6402c7dc2a19c19bd8cdc7d80878b850da418942 (patch) | |
tree | cda2ea2df40442e2aa016119f3548cc504127ea8 /arch/x86/kernel/cpu | |
parent | 77c675ba18836802f6b73d2d773481d06ebc0f04 (diff) | |
parent | 60d509fa6a9c4653a86ad830e4c4b30360b23f0e (diff) |
Merge branch 'linus' into timers/core
Reason: Get upstream fixes before adding conflicting code.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch/x86/kernel/cpu')
-rw-r--r-- | arch/x86/kernel/cpu/mshyperv.c | 18 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/perf_event_intel.c | 24 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/perf_event_intel_ds.c | 13 |
3 files changed, 37 insertions, 18 deletions
diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c index a7d26d83fb70..8f4be53ea04b 100644 --- a/arch/x86/kernel/cpu/mshyperv.c +++ b/arch/x86/kernel/cpu/mshyperv.c | |||
@@ -35,13 +35,6 @@ static bool __init ms_hyperv_platform(void) | |||
35 | if (!boot_cpu_has(X86_FEATURE_HYPERVISOR)) | 35 | if (!boot_cpu_has(X86_FEATURE_HYPERVISOR)) |
36 | return false; | 36 | return false; |
37 | 37 | ||
38 | /* | ||
39 | * Xen emulates Hyper-V to support enlightened Windows. | ||
40 | * Check to see first if we are on a Xen Hypervisor. | ||
41 | */ | ||
42 | if (xen_cpuid_base()) | ||
43 | return false; | ||
44 | |||
45 | cpuid(HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS, | 38 | cpuid(HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS, |
46 | &eax, &hyp_signature[0], &hyp_signature[1], &hyp_signature[2]); | 39 | &eax, &hyp_signature[0], &hyp_signature[1], &hyp_signature[2]); |
47 | 40 | ||
@@ -82,12 +75,6 @@ static void __init ms_hyperv_init_platform(void) | |||
82 | 75 | ||
83 | if (ms_hyperv.features & HV_X64_MSR_TIME_REF_COUNT_AVAILABLE) | 76 | if (ms_hyperv.features & HV_X64_MSR_TIME_REF_COUNT_AVAILABLE) |
84 | clocksource_register_hz(&hyperv_cs, NSEC_PER_SEC/100); | 77 | clocksource_register_hz(&hyperv_cs, NSEC_PER_SEC/100); |
85 | #if IS_ENABLED(CONFIG_HYPERV) | ||
86 | /* | ||
87 | * Setup the IDT for hypervisor callback. | ||
88 | */ | ||
89 | alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR, hyperv_callback_vector); | ||
90 | #endif | ||
91 | } | 78 | } |
92 | 79 | ||
93 | const __refconst struct hypervisor_x86 x86_hyper_ms_hyperv = { | 80 | const __refconst struct hypervisor_x86 x86_hyper_ms_hyperv = { |
@@ -103,6 +90,11 @@ static irq_handler_t vmbus_isr; | |||
103 | 90 | ||
104 | void hv_register_vmbus_handler(int irq, irq_handler_t handler) | 91 | void hv_register_vmbus_handler(int irq, irq_handler_t handler) |
105 | { | 92 | { |
93 | /* | ||
94 | * Setup the IDT for hypervisor callback. | ||
95 | */ | ||
96 | alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR, hyperv_callback_vector); | ||
97 | |||
106 | vmbus_irq = irq; | 98 | vmbus_irq = irq; |
107 | vmbus_isr = handler; | 99 | vmbus_isr = handler; |
108 | } | 100 | } |
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index 529c8931fc02..cc45deb791b0 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c | |||
@@ -101,6 +101,10 @@ static struct event_constraint intel_snb_event_constraints[] __read_mostly = | |||
101 | FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ | 101 | FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ |
102 | FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ | 102 | FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ |
103 | FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */ | 103 | FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */ |
104 | INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_DISPATCH */ | ||
105 | INTEL_UEVENT_CONSTRAINT(0x05a3, 0xf), /* CYCLE_ACTIVITY.STALLS_L2_PENDING */ | ||
106 | INTEL_UEVENT_CONSTRAINT(0x02a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */ | ||
107 | INTEL_UEVENT_CONSTRAINT(0x06a3, 0x4), /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */ | ||
104 | INTEL_EVENT_CONSTRAINT(0x48, 0x4), /* L1D_PEND_MISS.PENDING */ | 108 | INTEL_EVENT_CONSTRAINT(0x48, 0x4), /* L1D_PEND_MISS.PENDING */ |
105 | INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */ | 109 | INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */ |
106 | INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */ | 110 | INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */ |
@@ -149,8 +153,14 @@ static struct event_constraint intel_gen_event_constraints[] __read_mostly = | |||
149 | }; | 153 | }; |
150 | 154 | ||
151 | static struct extra_reg intel_snb_extra_regs[] __read_mostly = { | 155 | static struct extra_reg intel_snb_extra_regs[] __read_mostly = { |
152 | INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0x3fffffffffull, RSP_0), | 156 | INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0x3f807f8fffull, RSP_0), |
153 | INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0x3fffffffffull, RSP_1), | 157 | INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0x3f807f8fffull, RSP_1), |
158 | EVENT_EXTRA_END | ||
159 | }; | ||
160 | |||
161 | static struct extra_reg intel_snbep_extra_regs[] __read_mostly = { | ||
162 | INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0x3fffff8fffull, RSP_0), | ||
163 | INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0x3fffff8fffull, RSP_1), | ||
154 | EVENT_EXTRA_END | 164 | EVENT_EXTRA_END |
155 | }; | 165 | }; |
156 | 166 | ||
@@ -2093,7 +2103,10 @@ __init int intel_pmu_init(void) | |||
2093 | x86_pmu.event_constraints = intel_snb_event_constraints; | 2103 | x86_pmu.event_constraints = intel_snb_event_constraints; |
2094 | x86_pmu.pebs_constraints = intel_snb_pebs_event_constraints; | 2104 | x86_pmu.pebs_constraints = intel_snb_pebs_event_constraints; |
2095 | x86_pmu.pebs_aliases = intel_pebs_aliases_snb; | 2105 | x86_pmu.pebs_aliases = intel_pebs_aliases_snb; |
2096 | x86_pmu.extra_regs = intel_snb_extra_regs; | 2106 | if (boot_cpu_data.x86_model == 45) |
2107 | x86_pmu.extra_regs = intel_snbep_extra_regs; | ||
2108 | else | ||
2109 | x86_pmu.extra_regs = intel_snb_extra_regs; | ||
2097 | /* all extra regs are per-cpu when HT is on */ | 2110 | /* all extra regs are per-cpu when HT is on */ |
2098 | x86_pmu.er_flags |= ERF_HAS_RSP_1; | 2111 | x86_pmu.er_flags |= ERF_HAS_RSP_1; |
2099 | x86_pmu.er_flags |= ERF_NO_HT_SHARING; | 2112 | x86_pmu.er_flags |= ERF_NO_HT_SHARING; |
@@ -2119,7 +2132,10 @@ __init int intel_pmu_init(void) | |||
2119 | x86_pmu.event_constraints = intel_ivb_event_constraints; | 2132 | x86_pmu.event_constraints = intel_ivb_event_constraints; |
2120 | x86_pmu.pebs_constraints = intel_ivb_pebs_event_constraints; | 2133 | x86_pmu.pebs_constraints = intel_ivb_pebs_event_constraints; |
2121 | x86_pmu.pebs_aliases = intel_pebs_aliases_snb; | 2134 | x86_pmu.pebs_aliases = intel_pebs_aliases_snb; |
2122 | x86_pmu.extra_regs = intel_snb_extra_regs; | 2135 | if (boot_cpu_data.x86_model == 62) |
2136 | x86_pmu.extra_regs = intel_snbep_extra_regs; | ||
2137 | else | ||
2138 | x86_pmu.extra_regs = intel_snb_extra_regs; | ||
2123 | /* all extra regs are per-cpu when HT is on */ | 2139 | /* all extra regs are per-cpu when HT is on */ |
2124 | x86_pmu.er_flags |= ERF_HAS_RSP_1; | 2140 | x86_pmu.er_flags |= ERF_HAS_RSP_1; |
2125 | x86_pmu.er_flags |= ERF_NO_HT_SHARING; | 2141 | x86_pmu.er_flags |= ERF_NO_HT_SHARING; |
diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c index 826054a4f2ee..26830f3af0df 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_ds.c +++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c | |||
@@ -314,10 +314,11 @@ int intel_pmu_drain_bts_buffer(void) | |||
314 | if (top <= at) | 314 | if (top <= at) |
315 | return 0; | 315 | return 0; |
316 | 316 | ||
317 | memset(®s, 0, sizeof(regs)); | ||
318 | |||
317 | ds->bts_index = ds->bts_buffer_base; | 319 | ds->bts_index = ds->bts_buffer_base; |
318 | 320 | ||
319 | perf_sample_data_init(&data, 0, event->hw.last_period); | 321 | perf_sample_data_init(&data, 0, event->hw.last_period); |
320 | regs.ip = 0; | ||
321 | 322 | ||
322 | /* | 323 | /* |
323 | * Prepare a generic sample, i.e. fill in the invariant fields. | 324 | * Prepare a generic sample, i.e. fill in the invariant fields. |
@@ -729,3 +730,13 @@ void intel_ds_init(void) | |||
729 | } | 730 | } |
730 | } | 731 | } |
731 | } | 732 | } |
733 | |||
734 | void perf_restore_debug_store(void) | ||
735 | { | ||
736 | struct debug_store *ds = __this_cpu_read(cpu_hw_events.ds); | ||
737 | |||
738 | if (!x86_pmu.bts && !x86_pmu.pebs) | ||
739 | return; | ||
740 | |||
741 | wrmsrl(MSR_IA32_DS_AREA, (unsigned long)ds); | ||
742 | } | ||