aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/cpu
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2015-12-19 05:49:13 -0500
committerThomas Gleixner <tglx@linutronix.de>2015-12-19 05:49:13 -0500
commit0fa85119cd480c1ded7a81ed64f723fe16a15355 (patch)
tree8648434601c5112a1d9ab091ab11507fe88e0962 /arch/x86/kernel/cpu
parentd6ccc3ec95251d8d3276f2900b59cbc468dd74f4 (diff)
parent1eab0e42450c6038e2bb17da438370fe639973f3 (diff)
Merge branch 'linus' into x86/cleanups
Pull in upstream changes so we can apply depending patches.
Diffstat (limited to 'arch/x86/kernel/cpu')
-rw-r--r--arch/x86/kernel/cpu/amd.c13
-rw-r--r--arch/x86/kernel/cpu/common.c3
-rw-r--r--arch/x86/kernel/cpu/intel.c1
-rw-r--r--arch/x86/kernel/cpu/microcode/core.c1
-rw-r--r--arch/x86/kernel/cpu/perf_event.c2
-rw-r--r--arch/x86/kernel/cpu/perf_event.h5
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c2
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_cqm.c2
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_lbr.c4
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_rapl.c6
-rw-r--r--arch/x86/kernel/cpu/perf_event_msr.c7
11 files changed, 28 insertions, 18 deletions
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index 4a70fc6d400a..a8816b325162 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -352,6 +352,7 @@ static void amd_detect_cmp(struct cpuinfo_x86 *c)
352#ifdef CONFIG_SMP 352#ifdef CONFIG_SMP
353 unsigned bits; 353 unsigned bits;
354 int cpu = smp_processor_id(); 354 int cpu = smp_processor_id();
355 unsigned int socket_id, core_complex_id;
355 356
356 bits = c->x86_coreid_bits; 357 bits = c->x86_coreid_bits;
357 /* Low order bits define the core id (index of core in socket) */ 358 /* Low order bits define the core id (index of core in socket) */
@@ -361,6 +362,18 @@ static void amd_detect_cmp(struct cpuinfo_x86 *c)
361 /* use socket ID also for last level cache */ 362 /* use socket ID also for last level cache */
362 per_cpu(cpu_llc_id, cpu) = c->phys_proc_id; 363 per_cpu(cpu_llc_id, cpu) = c->phys_proc_id;
363 amd_get_topology(c); 364 amd_get_topology(c);
365
366 /*
367 * Fix percpu cpu_llc_id here as LLC topology is different
368 * for Fam17h systems.
369 */
370 if (c->x86 != 0x17 || !cpuid_edx(0x80000006))
371 return;
372
373 socket_id = (c->apicid >> bits) - 1;
374 core_complex_id = (c->apicid & ((1 << bits) - 1)) >> 3;
375
376 per_cpu(cpu_llc_id, cpu) = (socket_id << 3) | core_complex_id;
364#endif 377#endif
365} 378}
366 379
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 4ddd780aeac9..c2b7522cbf35 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -273,10 +273,9 @@ __setup("nosmap", setup_disable_smap);
273 273
274static __always_inline void setup_smap(struct cpuinfo_x86 *c) 274static __always_inline void setup_smap(struct cpuinfo_x86 *c)
275{ 275{
276 unsigned long eflags; 276 unsigned long eflags = native_save_fl();
277 277
278 /* This should have been cleared long ago */ 278 /* This should have been cleared long ago */
279 raw_local_save_flags(eflags);
280 BUG_ON(eflags & X86_EFLAGS_AC); 279 BUG_ON(eflags & X86_EFLAGS_AC);
281 280
282 if (cpu_has(c, X86_FEATURE_SMAP)) { 281 if (cpu_has(c, X86_FEATURE_SMAP)) {
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index 98a13db5f4be..209ac1e7d1f0 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -97,6 +97,7 @@ static void early_init_intel(struct cpuinfo_x86 *c)
97 switch (c->x86_model) { 97 switch (c->x86_model) {
98 case 0x27: /* Penwell */ 98 case 0x27: /* Penwell */
99 case 0x35: /* Cloverview */ 99 case 0x35: /* Cloverview */
100 case 0x4a: /* Merrifield */
100 set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC_S3); 101 set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC_S3);
101 break; 102 break;
102 default: 103 default:
diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
index 7fc27f1cca58..b3e94ef461fd 100644
--- a/arch/x86/kernel/cpu/microcode/core.c
+++ b/arch/x86/kernel/cpu/microcode/core.c
@@ -698,3 +698,4 @@ int __init microcode_init(void)
698 return error; 698 return error;
699 699
700} 700}
701late_initcall(microcode_init);
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index 4562cf070c27..2bf79d7c97df 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -5,7 +5,7 @@
5 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar 5 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
6 * Copyright (C) 2009 Jaswinder Singh Rajput 6 * Copyright (C) 2009 Jaswinder Singh Rajput
7 * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter 7 * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
8 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> 8 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra
9 * Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com> 9 * Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
10 * Copyright (C) 2009 Google, Inc., Stephane Eranian 10 * Copyright (C) 2009 Google, Inc., Stephane Eranian
11 * 11 *
diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h
index 499f533dd3cc..d0e35ebb2adb 100644
--- a/arch/x86/kernel/cpu/perf_event.h
+++ b/arch/x86/kernel/cpu/perf_event.h
@@ -5,7 +5,7 @@
5 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar 5 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
6 * Copyright (C) 2009 Jaswinder Singh Rajput 6 * Copyright (C) 2009 Jaswinder Singh Rajput
7 * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter 7 * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
8 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> 8 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra
9 * Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com> 9 * Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
10 * Copyright (C) 2009 Google, Inc., Stephane Eranian 10 * Copyright (C) 2009 Google, Inc., Stephane Eranian
11 * 11 *
@@ -387,7 +387,7 @@ struct cpu_hw_events {
387/* Check flags and event code/umask, and set the HSW N/A flag */ 387/* Check flags and event code/umask, and set the HSW N/A flag */
388#define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_NA(code, n) \ 388#define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_NA(code, n) \
389 __EVENT_CONSTRAINT(code, n, \ 389 __EVENT_CONSTRAINT(code, n, \
390 INTEL_ARCH_EVENT_MASK|INTEL_ARCH_EVENT_MASK, \ 390 INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
391 HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_NA_HSW) 391 HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_NA_HSW)
392 392
393 393
@@ -627,6 +627,7 @@ struct x86_perf_task_context {
627 u64 lbr_from[MAX_LBR_ENTRIES]; 627 u64 lbr_from[MAX_LBR_ENTRIES];
628 u64 lbr_to[MAX_LBR_ENTRIES]; 628 u64 lbr_to[MAX_LBR_ENTRIES];
629 u64 lbr_info[MAX_LBR_ENTRIES]; 629 u64 lbr_info[MAX_LBR_ENTRIES];
630 int tos;
630 int lbr_callstack_users; 631 int lbr_callstack_users;
631 int lbr_stack_state; 632 int lbr_stack_state;
632}; 633};
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index f63360be2238..e2a430021e46 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -232,7 +232,7 @@ static struct event_constraint intel_hsw_event_constraints[] = {
232 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ 232 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
233 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ 233 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
234 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */ 234 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
235 INTEL_EVENT_CONSTRAINT(0x48, 0x4), /* L1D_PEND_MISS.* */ 235 INTEL_UEVENT_CONSTRAINT(0x148, 0x4), /* L1D_PEND_MISS.PENDING */
236 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */ 236 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
237 INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */ 237 INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
238 /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */ 238 /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
diff --git a/arch/x86/kernel/cpu/perf_event_intel_cqm.c b/arch/x86/kernel/cpu/perf_event_intel_cqm.c
index 377e8f8ed391..a316ca96f1b6 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_cqm.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_cqm.c
@@ -298,7 +298,7 @@ static bool __match_event(struct perf_event *a, struct perf_event *b)
298static inline struct perf_cgroup *event_to_cgroup(struct perf_event *event) 298static inline struct perf_cgroup *event_to_cgroup(struct perf_event *event)
299{ 299{
300 if (event->attach_state & PERF_ATTACH_TASK) 300 if (event->attach_state & PERF_ATTACH_TASK)
301 return perf_cgroup_from_task(event->hw.target); 301 return perf_cgroup_from_task(event->hw.target, event->ctx);
302 302
303 return event->cgrp; 303 return event->cgrp;
304} 304}
diff --git a/arch/x86/kernel/cpu/perf_event_intel_lbr.c b/arch/x86/kernel/cpu/perf_event_intel_lbr.c
index bfd0b717e944..659f01e165d5 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_lbr.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_lbr.c
@@ -239,7 +239,7 @@ static void __intel_pmu_lbr_restore(struct x86_perf_task_context *task_ctx)
239 } 239 }
240 240
241 mask = x86_pmu.lbr_nr - 1; 241 mask = x86_pmu.lbr_nr - 1;
242 tos = intel_pmu_lbr_tos(); 242 tos = task_ctx->tos;
243 for (i = 0; i < tos; i++) { 243 for (i = 0; i < tos; i++) {
244 lbr_idx = (tos - i) & mask; 244 lbr_idx = (tos - i) & mask;
245 wrmsrl(x86_pmu.lbr_from + lbr_idx, task_ctx->lbr_from[i]); 245 wrmsrl(x86_pmu.lbr_from + lbr_idx, task_ctx->lbr_from[i]);
@@ -247,6 +247,7 @@ static void __intel_pmu_lbr_restore(struct x86_perf_task_context *task_ctx)
247 if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO) 247 if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO)
248 wrmsrl(MSR_LBR_INFO_0 + lbr_idx, task_ctx->lbr_info[i]); 248 wrmsrl(MSR_LBR_INFO_0 + lbr_idx, task_ctx->lbr_info[i]);
249 } 249 }
250 wrmsrl(x86_pmu.lbr_tos, tos);
250 task_ctx->lbr_stack_state = LBR_NONE; 251 task_ctx->lbr_stack_state = LBR_NONE;
251} 252}
252 253
@@ -270,6 +271,7 @@ static void __intel_pmu_lbr_save(struct x86_perf_task_context *task_ctx)
270 if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO) 271 if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO)
271 rdmsrl(MSR_LBR_INFO_0 + lbr_idx, task_ctx->lbr_info[i]); 272 rdmsrl(MSR_LBR_INFO_0 + lbr_idx, task_ctx->lbr_info[i]);
272 } 273 }
274 task_ctx->tos = tos;
273 task_ctx->lbr_stack_state = LBR_VALID; 275 task_ctx->lbr_stack_state = LBR_VALID;
274} 276}
275 277
diff --git a/arch/x86/kernel/cpu/perf_event_intel_rapl.c b/arch/x86/kernel/cpu/perf_event_intel_rapl.c
index 81431c0f0614..ed446bdcbf31 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_rapl.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_rapl.c
@@ -107,12 +107,6 @@ static ssize_t __rapl_##_var##_show(struct kobject *kobj, \
107static struct kobj_attribute format_attr_##_var = \ 107static struct kobj_attribute format_attr_##_var = \
108 __ATTR(_name, 0444, __rapl_##_var##_show, NULL) 108 __ATTR(_name, 0444, __rapl_##_var##_show, NULL)
109 109
110#define RAPL_EVENT_DESC(_name, _config) \
111{ \
112 .attr = __ATTR(_name, 0444, rapl_event_show, NULL), \
113 .config = _config, \
114}
115
116#define RAPL_CNTR_WIDTH 32 /* 32-bit rapl counters */ 110#define RAPL_CNTR_WIDTH 32 /* 32-bit rapl counters */
117 111
118#define RAPL_EVENT_ATTR_STR(_name, v, str) \ 112#define RAPL_EVENT_ATTR_STR(_name, v, str) \
diff --git a/arch/x86/kernel/cpu/perf_event_msr.c b/arch/x86/kernel/cpu/perf_event_msr.c
index f32ac13934f2..ec863b9a9f78 100644
--- a/arch/x86/kernel/cpu/perf_event_msr.c
+++ b/arch/x86/kernel/cpu/perf_event_msr.c
@@ -163,10 +163,9 @@ again:
163 goto again; 163 goto again;
164 164
165 delta = now - prev; 165 delta = now - prev;
166 if (unlikely(event->hw.event_base == MSR_SMI_COUNT)) { 166 if (unlikely(event->hw.event_base == MSR_SMI_COUNT))
167 delta <<= 32; 167 delta = sign_extend64(delta, 31);
168 delta >>= 32; /* sign extend */ 168
169 }
170 local64_add(now - prev, &event->count); 169 local64_add(now - prev, &event->count);
171} 170}
172 171