diff options
author | Takashi Iwai <tiwai@suse.de> | 2015-12-23 02:33:34 -0500 |
---|---|---|
committer | Takashi Iwai <tiwai@suse.de> | 2015-12-23 02:33:34 -0500 |
commit | 59c8231089be96165735585694a801ae58ec6c95 (patch) | |
tree | 41bd60a9aec5df20e07a81fbb526c8bc05e997fc /arch/x86 | |
parent | de5126cc3c0b0f291d08fa591dcdf237bc595a56 (diff) | |
parent | 0fb0b822d157325b66c503d23332f64899bfb828 (diff) |
Merge branch 'for-linus' into for-next
Conflicts:
drivers/gpu/drm/i915/intel_pm.c
Diffstat (limited to 'arch/x86')
-rw-r--r-- | arch/x86/kernel/cpu/perf_event.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/perf_event.h | 5 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/perf_event_intel.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/perf_event_intel_cqm.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/perf_event_intel_lbr.c | 4 | ||||
-rw-r--r-- | arch/x86/kernel/irq_work.c | 2 | ||||
-rw-r--r-- | arch/x86/mm/dump_pagetables.c | 2 | ||||
-rw-r--r-- | arch/x86/um/signal.c | 18 | ||||
-rw-r--r-- | arch/x86/xen/mmu.c | 9 | ||||
-rw-r--r-- | arch/x86/xen/suspend.c | 20 |
10 files changed, 33 insertions, 33 deletions
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 4562cf070c27..2bf79d7c97df 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c | |||
@@ -5,7 +5,7 @@ | |||
5 | * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar | 5 | * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar |
6 | * Copyright (C) 2009 Jaswinder Singh Rajput | 6 | * Copyright (C) 2009 Jaswinder Singh Rajput |
7 | * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter | 7 | * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter |
8 | * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> | 8 | * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra |
9 | * Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com> | 9 | * Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com> |
10 | * Copyright (C) 2009 Google, Inc., Stephane Eranian | 10 | * Copyright (C) 2009 Google, Inc., Stephane Eranian |
11 | * | 11 | * |
diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h index 499f533dd3cc..d0e35ebb2adb 100644 --- a/arch/x86/kernel/cpu/perf_event.h +++ b/arch/x86/kernel/cpu/perf_event.h | |||
@@ -5,7 +5,7 @@ | |||
5 | * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar | 5 | * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar |
6 | * Copyright (C) 2009 Jaswinder Singh Rajput | 6 | * Copyright (C) 2009 Jaswinder Singh Rajput |
7 | * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter | 7 | * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter |
8 | * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> | 8 | * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra |
9 | * Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com> | 9 | * Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com> |
10 | * Copyright (C) 2009 Google, Inc., Stephane Eranian | 10 | * Copyright (C) 2009 Google, Inc., Stephane Eranian |
11 | * | 11 | * |
@@ -387,7 +387,7 @@ struct cpu_hw_events { | |||
387 | /* Check flags and event code/umask, and set the HSW N/A flag */ | 387 | /* Check flags and event code/umask, and set the HSW N/A flag */ |
388 | #define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_NA(code, n) \ | 388 | #define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_NA(code, n) \ |
389 | __EVENT_CONSTRAINT(code, n, \ | 389 | __EVENT_CONSTRAINT(code, n, \ |
390 | INTEL_ARCH_EVENT_MASK|INTEL_ARCH_EVENT_MASK, \ | 390 | INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \ |
391 | HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_NA_HSW) | 391 | HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_NA_HSW) |
392 | 392 | ||
393 | 393 | ||
@@ -627,6 +627,7 @@ struct x86_perf_task_context { | |||
627 | u64 lbr_from[MAX_LBR_ENTRIES]; | 627 | u64 lbr_from[MAX_LBR_ENTRIES]; |
628 | u64 lbr_to[MAX_LBR_ENTRIES]; | 628 | u64 lbr_to[MAX_LBR_ENTRIES]; |
629 | u64 lbr_info[MAX_LBR_ENTRIES]; | 629 | u64 lbr_info[MAX_LBR_ENTRIES]; |
630 | int tos; | ||
630 | int lbr_callstack_users; | 631 | int lbr_callstack_users; |
631 | int lbr_stack_state; | 632 | int lbr_stack_state; |
632 | }; | 633 | }; |
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index f63360be2238..e2a430021e46 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c | |||
@@ -232,7 +232,7 @@ static struct event_constraint intel_hsw_event_constraints[] = { | |||
232 | FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ | 232 | FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ |
233 | FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ | 233 | FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ |
234 | FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */ | 234 | FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */ |
235 | INTEL_EVENT_CONSTRAINT(0x48, 0x4), /* L1D_PEND_MISS.* */ | 235 | INTEL_UEVENT_CONSTRAINT(0x148, 0x4), /* L1D_PEND_MISS.PENDING */ |
236 | INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */ | 236 | INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */ |
237 | INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */ | 237 | INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */ |
238 | /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */ | 238 | /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */ |
diff --git a/arch/x86/kernel/cpu/perf_event_intel_cqm.c b/arch/x86/kernel/cpu/perf_event_intel_cqm.c index 377e8f8ed391..a316ca96f1b6 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_cqm.c +++ b/arch/x86/kernel/cpu/perf_event_intel_cqm.c | |||
@@ -298,7 +298,7 @@ static bool __match_event(struct perf_event *a, struct perf_event *b) | |||
298 | static inline struct perf_cgroup *event_to_cgroup(struct perf_event *event) | 298 | static inline struct perf_cgroup *event_to_cgroup(struct perf_event *event) |
299 | { | 299 | { |
300 | if (event->attach_state & PERF_ATTACH_TASK) | 300 | if (event->attach_state & PERF_ATTACH_TASK) |
301 | return perf_cgroup_from_task(event->hw.target); | 301 | return perf_cgroup_from_task(event->hw.target, event->ctx); |
302 | 302 | ||
303 | return event->cgrp; | 303 | return event->cgrp; |
304 | } | 304 | } |
diff --git a/arch/x86/kernel/cpu/perf_event_intel_lbr.c b/arch/x86/kernel/cpu/perf_event_intel_lbr.c index bfd0b717e944..659f01e165d5 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_lbr.c +++ b/arch/x86/kernel/cpu/perf_event_intel_lbr.c | |||
@@ -239,7 +239,7 @@ static void __intel_pmu_lbr_restore(struct x86_perf_task_context *task_ctx) | |||
239 | } | 239 | } |
240 | 240 | ||
241 | mask = x86_pmu.lbr_nr - 1; | 241 | mask = x86_pmu.lbr_nr - 1; |
242 | tos = intel_pmu_lbr_tos(); | 242 | tos = task_ctx->tos; |
243 | for (i = 0; i < tos; i++) { | 243 | for (i = 0; i < tos; i++) { |
244 | lbr_idx = (tos - i) & mask; | 244 | lbr_idx = (tos - i) & mask; |
245 | wrmsrl(x86_pmu.lbr_from + lbr_idx, task_ctx->lbr_from[i]); | 245 | wrmsrl(x86_pmu.lbr_from + lbr_idx, task_ctx->lbr_from[i]); |
@@ -247,6 +247,7 @@ static void __intel_pmu_lbr_restore(struct x86_perf_task_context *task_ctx) | |||
247 | if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO) | 247 | if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO) |
248 | wrmsrl(MSR_LBR_INFO_0 + lbr_idx, task_ctx->lbr_info[i]); | 248 | wrmsrl(MSR_LBR_INFO_0 + lbr_idx, task_ctx->lbr_info[i]); |
249 | } | 249 | } |
250 | wrmsrl(x86_pmu.lbr_tos, tos); | ||
250 | task_ctx->lbr_stack_state = LBR_NONE; | 251 | task_ctx->lbr_stack_state = LBR_NONE; |
251 | } | 252 | } |
252 | 253 | ||
@@ -270,6 +271,7 @@ static void __intel_pmu_lbr_save(struct x86_perf_task_context *task_ctx) | |||
270 | if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO) | 271 | if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO) |
271 | rdmsrl(MSR_LBR_INFO_0 + lbr_idx, task_ctx->lbr_info[i]); | 272 | rdmsrl(MSR_LBR_INFO_0 + lbr_idx, task_ctx->lbr_info[i]); |
272 | } | 273 | } |
274 | task_ctx->tos = tos; | ||
273 | task_ctx->lbr_stack_state = LBR_VALID; | 275 | task_ctx->lbr_stack_state = LBR_VALID; |
274 | } | 276 | } |
275 | 277 | ||
diff --git a/arch/x86/kernel/irq_work.c b/arch/x86/kernel/irq_work.c index dc5fa6a1e8d6..3512ba607361 100644 --- a/arch/x86/kernel/irq_work.c +++ b/arch/x86/kernel/irq_work.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * x86 specific code for irq_work | 2 | * x86 specific code for irq_work |
3 | * | 3 | * |
4 | * Copyright (C) 2010 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> | 4 | * Copyright (C) 2010 Red Hat, Inc., Peter Zijlstra |
5 | */ | 5 | */ |
6 | 6 | ||
7 | #include <linux/kernel.h> | 7 | #include <linux/kernel.h> |
diff --git a/arch/x86/mm/dump_pagetables.c b/arch/x86/mm/dump_pagetables.c index a035c2aa7801..0f1c6fc3ddd8 100644 --- a/arch/x86/mm/dump_pagetables.c +++ b/arch/x86/mm/dump_pagetables.c | |||
@@ -89,7 +89,7 @@ static struct addr_marker address_markers[] = { | |||
89 | { 0/* VMALLOC_START */, "vmalloc() Area" }, | 89 | { 0/* VMALLOC_START */, "vmalloc() Area" }, |
90 | { 0/*VMALLOC_END*/, "vmalloc() End" }, | 90 | { 0/*VMALLOC_END*/, "vmalloc() End" }, |
91 | # ifdef CONFIG_HIGHMEM | 91 | # ifdef CONFIG_HIGHMEM |
92 | { 0/*PKMAP_BASE*/, "Persisent kmap() Area" }, | 92 | { 0/*PKMAP_BASE*/, "Persistent kmap() Area" }, |
93 | # endif | 93 | # endif |
94 | { 0/*FIXADDR_START*/, "Fixmap Area" }, | 94 | { 0/*FIXADDR_START*/, "Fixmap Area" }, |
95 | #endif | 95 | #endif |
diff --git a/arch/x86/um/signal.c b/arch/x86/um/signal.c index 06934a8a4872..e5f854ce2d72 100644 --- a/arch/x86/um/signal.c +++ b/arch/x86/um/signal.c | |||
@@ -211,7 +211,7 @@ static int copy_sc_from_user(struct pt_regs *regs, | |||
211 | if (err) | 211 | if (err) |
212 | return 1; | 212 | return 1; |
213 | 213 | ||
214 | err = convert_fxsr_from_user(&fpx, sc.fpstate); | 214 | err = convert_fxsr_from_user(&fpx, (void *)sc.fpstate); |
215 | if (err) | 215 | if (err) |
216 | return 1; | 216 | return 1; |
217 | 217 | ||
@@ -227,7 +227,7 @@ static int copy_sc_from_user(struct pt_regs *regs, | |||
227 | { | 227 | { |
228 | struct user_i387_struct fp; | 228 | struct user_i387_struct fp; |
229 | 229 | ||
230 | err = copy_from_user(&fp, sc.fpstate, | 230 | err = copy_from_user(&fp, (void *)sc.fpstate, |
231 | sizeof(struct user_i387_struct)); | 231 | sizeof(struct user_i387_struct)); |
232 | if (err) | 232 | if (err) |
233 | return 1; | 233 | return 1; |
@@ -291,7 +291,7 @@ static int copy_sc_to_user(struct sigcontext __user *to, | |||
291 | #endif | 291 | #endif |
292 | #undef PUTREG | 292 | #undef PUTREG |
293 | sc.oldmask = mask; | 293 | sc.oldmask = mask; |
294 | sc.fpstate = to_fp; | 294 | sc.fpstate = (unsigned long)to_fp; |
295 | 295 | ||
296 | err = copy_to_user(to, &sc, sizeof(struct sigcontext)); | 296 | err = copy_to_user(to, &sc, sizeof(struct sigcontext)); |
297 | if (err) | 297 | if (err) |
@@ -468,12 +468,10 @@ long sys_sigreturn(void) | |||
468 | struct sigframe __user *frame = (struct sigframe __user *)(sp - 8); | 468 | struct sigframe __user *frame = (struct sigframe __user *)(sp - 8); |
469 | sigset_t set; | 469 | sigset_t set; |
470 | struct sigcontext __user *sc = &frame->sc; | 470 | struct sigcontext __user *sc = &frame->sc; |
471 | unsigned long __user *oldmask = &sc->oldmask; | ||
472 | unsigned long __user *extramask = frame->extramask; | ||
473 | int sig_size = (_NSIG_WORDS - 1) * sizeof(unsigned long); | 471 | int sig_size = (_NSIG_WORDS - 1) * sizeof(unsigned long); |
474 | 472 | ||
475 | if (copy_from_user(&set.sig[0], oldmask, sizeof(set.sig[0])) || | 473 | if (copy_from_user(&set.sig[0], (void *)sc->oldmask, sizeof(set.sig[0])) || |
476 | copy_from_user(&set.sig[1], extramask, sig_size)) | 474 | copy_from_user(&set.sig[1], frame->extramask, sig_size)) |
477 | goto segfault; | 475 | goto segfault; |
478 | 476 | ||
479 | set_current_blocked(&set); | 477 | set_current_blocked(&set); |
@@ -505,6 +503,7 @@ int setup_signal_stack_si(unsigned long stack_top, struct ksignal *ksig, | |||
505 | { | 503 | { |
506 | struct rt_sigframe __user *frame; | 504 | struct rt_sigframe __user *frame; |
507 | int err = 0, sig = ksig->sig; | 505 | int err = 0, sig = ksig->sig; |
506 | unsigned long fp_to; | ||
508 | 507 | ||
509 | frame = (struct rt_sigframe __user *) | 508 | frame = (struct rt_sigframe __user *) |
510 | round_down(stack_top - sizeof(struct rt_sigframe), 16); | 509 | round_down(stack_top - sizeof(struct rt_sigframe), 16); |
@@ -526,7 +525,10 @@ int setup_signal_stack_si(unsigned long stack_top, struct ksignal *ksig, | |||
526 | err |= __save_altstack(&frame->uc.uc_stack, PT_REGS_SP(regs)); | 525 | err |= __save_altstack(&frame->uc.uc_stack, PT_REGS_SP(regs)); |
527 | err |= copy_sc_to_user(&frame->uc.uc_mcontext, &frame->fpstate, regs, | 526 | err |= copy_sc_to_user(&frame->uc.uc_mcontext, &frame->fpstate, regs, |
528 | set->sig[0]); | 527 | set->sig[0]); |
529 | err |= __put_user(&frame->fpstate, &frame->uc.uc_mcontext.fpstate); | 528 | |
529 | fp_to = (unsigned long)&frame->fpstate; | ||
530 | |||
531 | err |= __put_user(fp_to, &frame->uc.uc_mcontext.fpstate); | ||
530 | if (sizeof(*set) == 16) { | 532 | if (sizeof(*set) == 16) { |
531 | err |= __put_user(set->sig[0], &frame->uc.uc_sigmask.sig[0]); | 533 | err |= __put_user(set->sig[0], &frame->uc.uc_sigmask.sig[0]); |
532 | err |= __put_user(set->sig[1], &frame->uc.uc_sigmask.sig[1]); | 534 | err |= __put_user(set->sig[1], &frame->uc.uc_sigmask.sig[1]); |
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index ac161db63388..cb5e266a8bf7 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c | |||
@@ -2495,14 +2495,9 @@ void __init xen_init_mmu_ops(void) | |||
2495 | { | 2495 | { |
2496 | x86_init.paging.pagetable_init = xen_pagetable_init; | 2496 | x86_init.paging.pagetable_init = xen_pagetable_init; |
2497 | 2497 | ||
2498 | /* Optimization - we can use the HVM one but it has no idea which | 2498 | if (xen_feature(XENFEAT_auto_translated_physmap)) |
2499 | * VCPUs are descheduled - which means that it will needlessly IPI | ||
2500 | * them. Xen knows so let it do the job. | ||
2501 | */ | ||
2502 | if (xen_feature(XENFEAT_auto_translated_physmap)) { | ||
2503 | pv_mmu_ops.flush_tlb_others = xen_flush_tlb_others; | ||
2504 | return; | 2499 | return; |
2505 | } | 2500 | |
2506 | pv_mmu_ops = xen_mmu_ops; | 2501 | pv_mmu_ops = xen_mmu_ops; |
2507 | 2502 | ||
2508 | memset(dummy_mapping, 0xff, PAGE_SIZE); | 2503 | memset(dummy_mapping, 0xff, PAGE_SIZE); |
diff --git a/arch/x86/xen/suspend.c b/arch/x86/xen/suspend.c index feddabdab448..3705eabd7e22 100644 --- a/arch/x86/xen/suspend.c +++ b/arch/x86/xen/suspend.c | |||
@@ -68,26 +68,16 @@ static void xen_pv_post_suspend(int suspend_cancelled) | |||
68 | 68 | ||
69 | void xen_arch_pre_suspend(void) | 69 | void xen_arch_pre_suspend(void) |
70 | { | 70 | { |
71 | int cpu; | ||
72 | |||
73 | for_each_online_cpu(cpu) | ||
74 | xen_pmu_finish(cpu); | ||
75 | |||
76 | if (xen_pv_domain()) | 71 | if (xen_pv_domain()) |
77 | xen_pv_pre_suspend(); | 72 | xen_pv_pre_suspend(); |
78 | } | 73 | } |
79 | 74 | ||
80 | void xen_arch_post_suspend(int cancelled) | 75 | void xen_arch_post_suspend(int cancelled) |
81 | { | 76 | { |
82 | int cpu; | ||
83 | |||
84 | if (xen_pv_domain()) | 77 | if (xen_pv_domain()) |
85 | xen_pv_post_suspend(cancelled); | 78 | xen_pv_post_suspend(cancelled); |
86 | else | 79 | else |
87 | xen_hvm_post_suspend(cancelled); | 80 | xen_hvm_post_suspend(cancelled); |
88 | |||
89 | for_each_online_cpu(cpu) | ||
90 | xen_pmu_init(cpu); | ||
91 | } | 81 | } |
92 | 82 | ||
93 | static void xen_vcpu_notify_restore(void *data) | 83 | static void xen_vcpu_notify_restore(void *data) |
@@ -106,10 +96,20 @@ static void xen_vcpu_notify_suspend(void *data) | |||
106 | 96 | ||
107 | void xen_arch_resume(void) | 97 | void xen_arch_resume(void) |
108 | { | 98 | { |
99 | int cpu; | ||
100 | |||
109 | on_each_cpu(xen_vcpu_notify_restore, NULL, 1); | 101 | on_each_cpu(xen_vcpu_notify_restore, NULL, 1); |
102 | |||
103 | for_each_online_cpu(cpu) | ||
104 | xen_pmu_init(cpu); | ||
110 | } | 105 | } |
111 | 106 | ||
112 | void xen_arch_suspend(void) | 107 | void xen_arch_suspend(void) |
113 | { | 108 | { |
109 | int cpu; | ||
110 | |||
111 | for_each_online_cpu(cpu) | ||
112 | xen_pmu_finish(cpu); | ||
113 | |||
114 | on_each_cpu(xen_vcpu_notify_suspend, NULL, 1); | 114 | on_each_cpu(xen_vcpu_notify_suspend, NULL, 1); |
115 | } | 115 | } |