aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r--arch/x86/kernel/aperture_64.c1
-rw-r--r--arch/x86/kernel/apic/x2apic_uv_x.c4
-rw-r--r--arch/x86/kernel/cpu/intel.c3
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce.c15
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_intel.c4
-rw-r--r--arch/x86/kernel/cpu/perf_event.c39
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c37
-rw-r--r--arch/x86/kernel/cpu/perf_event_p6.c8
-rw-r--r--arch/x86/kernel/cpu/perfctr-watchdog.c2
-rw-r--r--arch/x86/kernel/dumpstack_64.c10
-rw-r--r--arch/x86/kernel/hw_breakpoint.c12
-rw-r--r--arch/x86/kernel/k8.c14
-rw-r--r--arch/x86/kernel/pci-gart_64.c2
-rw-r--r--arch/x86/kernel/process.c2
14 files changed, 93 insertions, 60 deletions
diff --git a/arch/x86/kernel/aperture_64.c b/arch/x86/kernel/aperture_64.c
index f147a95fd84a..3704997e8b25 100644
--- a/arch/x86/kernel/aperture_64.c
+++ b/arch/x86/kernel/aperture_64.c
@@ -31,7 +31,6 @@
31#include <asm/x86_init.h> 31#include <asm/x86_init.h>
32 32
33int gart_iommu_aperture; 33int gart_iommu_aperture;
34EXPORT_SYMBOL_GPL(gart_iommu_aperture);
35int gart_iommu_aperture_disabled __initdata; 34int gart_iommu_aperture_disabled __initdata;
36int gart_iommu_aperture_allowed __initdata; 35int gart_iommu_aperture_allowed __initdata;
37 36
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
index 3740c8a4eae7..49dbeaef2a27 100644
--- a/arch/x86/kernel/apic/x2apic_uv_x.c
+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
@@ -120,11 +120,9 @@ EXPORT_SYMBOL_GPL(uv_possible_blades);
120unsigned long sn_rtc_cycles_per_second; 120unsigned long sn_rtc_cycles_per_second;
121EXPORT_SYMBOL(sn_rtc_cycles_per_second); 121EXPORT_SYMBOL(sn_rtc_cycles_per_second);
122 122
123/* Start with all IRQs pointing to boot CPU. IRQ balancing will shift them. */
124
125static const struct cpumask *uv_target_cpus(void) 123static const struct cpumask *uv_target_cpus(void)
126{ 124{
127 return cpumask_of(0); 125 return cpu_online_mask;
128} 126}
129 127
130static void uv_vector_allocation_domain(int cpu, struct cpumask *retmask) 128static void uv_vector_allocation_domain(int cpu, struct cpumask *retmask)
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index 879666f4d871..7e1cca13af35 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -70,7 +70,8 @@ static void __cpuinit early_init_intel(struct cpuinfo_x86 *c)
70 if (c->x86_power & (1 << 8)) { 70 if (c->x86_power & (1 << 8)) {
71 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); 71 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
72 set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC); 72 set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
73 sched_clock_stable = 1; 73 if (!check_tsc_unstable())
74 sched_clock_stable = 1;
74 } 75 }
75 76
76 /* 77 /*
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index 28cba46bf32c..3ab9c886b613 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -46,6 +46,13 @@
46 46
47#include "mce-internal.h" 47#include "mce-internal.h"
48 48
49static DEFINE_MUTEX(mce_read_mutex);
50
51#define rcu_dereference_check_mce(p) \
52 rcu_dereference_check((p), \
53 rcu_read_lock_sched_held() || \
54 lockdep_is_held(&mce_read_mutex))
55
49#define CREATE_TRACE_POINTS 56#define CREATE_TRACE_POINTS
50#include <trace/events/mce.h> 57#include <trace/events/mce.h>
51 58
@@ -158,7 +165,7 @@ void mce_log(struct mce *mce)
158 mce->finished = 0; 165 mce->finished = 0;
159 wmb(); 166 wmb();
160 for (;;) { 167 for (;;) {
161 entry = rcu_dereference(mcelog.next); 168 entry = rcu_dereference_check_mce(mcelog.next);
162 for (;;) { 169 for (;;) {
163 /* 170 /*
164 * When the buffer fills up discard new entries. 171 * When the buffer fills up discard new entries.
@@ -1485,8 +1492,6 @@ static void collect_tscs(void *data)
1485 rdtscll(cpu_tsc[smp_processor_id()]); 1492 rdtscll(cpu_tsc[smp_processor_id()]);
1486} 1493}
1487 1494
1488static DEFINE_MUTEX(mce_read_mutex);
1489
1490static ssize_t mce_read(struct file *filp, char __user *ubuf, size_t usize, 1495static ssize_t mce_read(struct file *filp, char __user *ubuf, size_t usize,
1491 loff_t *off) 1496 loff_t *off)
1492{ 1497{
@@ -1500,7 +1505,7 @@ static ssize_t mce_read(struct file *filp, char __user *ubuf, size_t usize,
1500 return -ENOMEM; 1505 return -ENOMEM;
1501 1506
1502 mutex_lock(&mce_read_mutex); 1507 mutex_lock(&mce_read_mutex);
1503 next = rcu_dereference(mcelog.next); 1508 next = rcu_dereference_check_mce(mcelog.next);
1504 1509
1505 /* Only supports full reads right now */ 1510 /* Only supports full reads right now */
1506 if (*off != 0 || usize < MCE_LOG_LEN*sizeof(struct mce)) { 1511 if (*off != 0 || usize < MCE_LOG_LEN*sizeof(struct mce)) {
@@ -1565,7 +1570,7 @@ timeout:
1565static unsigned int mce_poll(struct file *file, poll_table *wait) 1570static unsigned int mce_poll(struct file *file, poll_table *wait)
1566{ 1571{
1567 poll_wait(file, &mce_wait, wait); 1572 poll_wait(file, &mce_wait, wait);
1568 if (rcu_dereference(mcelog.next)) 1573 if (rcu_dereference_check_mce(mcelog.next))
1569 return POLLIN | POLLRDNORM; 1574 return POLLIN | POLLRDNORM;
1570 return 0; 1575 return 0;
1571} 1576}
diff --git a/arch/x86/kernel/cpu/mcheck/mce_intel.c b/arch/x86/kernel/cpu/mcheck/mce_intel.c
index 7c785634af2b..d15df6e49bf0 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_intel.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_intel.c
@@ -95,7 +95,7 @@ static void cmci_discover(int banks, int boot)
95 95
96 /* Already owned by someone else? */ 96 /* Already owned by someone else? */
97 if (val & CMCI_EN) { 97 if (val & CMCI_EN) {
98 if (test_and_clear_bit(i, owned) || boot) 98 if (test_and_clear_bit(i, owned) && !boot)
99 print_update("SHD", &hdr, i); 99 print_update("SHD", &hdr, i);
100 __clear_bit(i, __get_cpu_var(mce_poll_banks)); 100 __clear_bit(i, __get_cpu_var(mce_poll_banks));
101 continue; 101 continue;
@@ -107,7 +107,7 @@ static void cmci_discover(int banks, int boot)
107 107
108 /* Did the enable bit stick? -- the bank supports CMCI */ 108 /* Did the enable bit stick? -- the bank supports CMCI */
109 if (val & CMCI_EN) { 109 if (val & CMCI_EN) {
110 if (!test_and_set_bit(i, owned) || boot) 110 if (!test_and_set_bit(i, owned) && !boot)
111 print_update("CMCI", &hdr, i); 111 print_update("CMCI", &hdr, i);
112 __clear_bit(i, __get_cpu_var(mce_poll_banks)); 112 __clear_bit(i, __get_cpu_var(mce_poll_banks));
113 } else { 113 } else {
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index b1fbdeecf6c9..42aafd11e170 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -73,10 +73,10 @@ struct debug_store {
73struct event_constraint { 73struct event_constraint {
74 union { 74 union {
75 unsigned long idxmsk[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; 75 unsigned long idxmsk[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
76 u64 idxmsk64[1]; 76 u64 idxmsk64;
77 }; 77 };
78 int code; 78 u64 code;
79 int cmask; 79 u64 cmask;
80 int weight; 80 int weight;
81}; 81};
82 82
@@ -103,7 +103,7 @@ struct cpu_hw_events {
103}; 103};
104 104
105#define __EVENT_CONSTRAINT(c, n, m, w) {\ 105#define __EVENT_CONSTRAINT(c, n, m, w) {\
106 { .idxmsk64[0] = (n) }, \ 106 { .idxmsk64 = (n) }, \
107 .code = (c), \ 107 .code = (c), \
108 .cmask = (m), \ 108 .cmask = (m), \
109 .weight = (w), \ 109 .weight = (w), \
@@ -116,7 +116,7 @@ struct cpu_hw_events {
116 EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVTSEL_MASK) 116 EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVTSEL_MASK)
117 117
118#define FIXED_EVENT_CONSTRAINT(c, n) \ 118#define FIXED_EVENT_CONSTRAINT(c, n) \
119 EVENT_CONSTRAINT(c, n, INTEL_ARCH_FIXED_MASK) 119 EVENT_CONSTRAINT(c, (1ULL << (32+n)), INTEL_ARCH_FIXED_MASK)
120 120
121#define EVENT_CONSTRAINT_END \ 121#define EVENT_CONSTRAINT_END \
122 EVENT_CONSTRAINT(0, 0, 0) 122 EVENT_CONSTRAINT(0, 0, 0)
@@ -503,6 +503,9 @@ static int __hw_perf_event_init(struct perf_event *event)
503 */ 503 */
504 if (attr->type == PERF_TYPE_RAW) { 504 if (attr->type == PERF_TYPE_RAW) {
505 hwc->config |= x86_pmu.raw_event(attr->config); 505 hwc->config |= x86_pmu.raw_event(attr->config);
506 if ((hwc->config & ARCH_PERFMON_EVENTSEL_ANY) &&
507 perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
508 return -EACCES;
506 return 0; 509 return 0;
507 } 510 }
508 511
@@ -553,9 +556,9 @@ static void x86_pmu_disable_all(void)
553 if (!test_bit(idx, cpuc->active_mask)) 556 if (!test_bit(idx, cpuc->active_mask))
554 continue; 557 continue;
555 rdmsrl(x86_pmu.eventsel + idx, val); 558 rdmsrl(x86_pmu.eventsel + idx, val);
556 if (!(val & ARCH_PERFMON_EVENTSEL0_ENABLE)) 559 if (!(val & ARCH_PERFMON_EVENTSEL_ENABLE))
557 continue; 560 continue;
558 val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE; 561 val &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
559 wrmsrl(x86_pmu.eventsel + idx, val); 562 wrmsrl(x86_pmu.eventsel + idx, val);
560 } 563 }
561} 564}
@@ -590,7 +593,7 @@ static void x86_pmu_enable_all(void)
590 continue; 593 continue;
591 594
592 val = event->hw.config; 595 val = event->hw.config;
593 val |= ARCH_PERFMON_EVENTSEL0_ENABLE; 596 val |= ARCH_PERFMON_EVENTSEL_ENABLE;
594 wrmsrl(x86_pmu.eventsel + idx, val); 597 wrmsrl(x86_pmu.eventsel + idx, val);
595 } 598 }
596} 599}
@@ -612,8 +615,8 @@ static int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
612 bitmap_zero(used_mask, X86_PMC_IDX_MAX); 615 bitmap_zero(used_mask, X86_PMC_IDX_MAX);
613 616
614 for (i = 0; i < n; i++) { 617 for (i = 0; i < n; i++) {
615 constraints[i] = 618 c = x86_pmu.get_event_constraints(cpuc, cpuc->event_list[i]);
616 x86_pmu.get_event_constraints(cpuc, cpuc->event_list[i]); 619 constraints[i] = c;
617 } 620 }
618 621
619 /* 622 /*
@@ -853,7 +856,7 @@ void hw_perf_enable(void)
853static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc, int idx) 856static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc, int idx)
854{ 857{
855 (void)checking_wrmsrl(hwc->config_base + idx, 858 (void)checking_wrmsrl(hwc->config_base + idx,
856 hwc->config | ARCH_PERFMON_EVENTSEL0_ENABLE); 859 hwc->config | ARCH_PERFMON_EVENTSEL_ENABLE);
857} 860}
858 861
859static inline void x86_pmu_disable_event(struct hw_perf_event *hwc, int idx) 862static inline void x86_pmu_disable_event(struct hw_perf_event *hwc, int idx)
@@ -1094,8 +1097,7 @@ static int x86_pmu_handle_irq(struct pt_regs *regs)
1094 int idx, handled = 0; 1097 int idx, handled = 0;
1095 u64 val; 1098 u64 val;
1096 1099
1097 data.addr = 0; 1100 perf_sample_data_init(&data, 0);
1098 data.raw = NULL;
1099 1101
1100 cpuc = &__get_cpu_var(cpu_hw_events); 1102 cpuc = &__get_cpu_var(cpu_hw_events);
1101 1103
@@ -1347,6 +1349,7 @@ static void __init pmu_check_apic(void)
1347 1349
1348void __init init_hw_perf_events(void) 1350void __init init_hw_perf_events(void)
1349{ 1351{
1352 struct event_constraint *c;
1350 int err; 1353 int err;
1351 1354
1352 pr_info("Performance Events: "); 1355 pr_info("Performance Events: ");
@@ -1395,6 +1398,16 @@ void __init init_hw_perf_events(void)
1395 __EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_events) - 1, 1398 __EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_events) - 1,
1396 0, x86_pmu.num_events); 1399 0, x86_pmu.num_events);
1397 1400
1401 if (x86_pmu.event_constraints) {
1402 for_each_event_constraint(c, x86_pmu.event_constraints) {
1403 if (c->cmask != INTEL_ARCH_FIXED_MASK)
1404 continue;
1405
1406 c->idxmsk64 |= (1ULL << x86_pmu.num_events) - 1;
1407 c->weight += x86_pmu.num_events;
1408 }
1409 }
1410
1398 pr_info("... version: %d\n", x86_pmu.version); 1411 pr_info("... version: %d\n", x86_pmu.version);
1399 pr_info("... bit width: %d\n", x86_pmu.event_bits); 1412 pr_info("... bit width: %d\n", x86_pmu.event_bits);
1400 pr_info("... generic registers: %d\n", x86_pmu.num_events); 1413 pr_info("... generic registers: %d\n", x86_pmu.num_events);
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index 977e7544738c..44b60c852107 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -1,7 +1,7 @@
1#ifdef CONFIG_CPU_SUP_INTEL 1#ifdef CONFIG_CPU_SUP_INTEL
2 2
3/* 3/*
4 * Intel PerfMon v3. Used on Core2 and later. 4 * Intel PerfMon, used on Core and later.
5 */ 5 */
6static const u64 intel_perfmon_event_map[] = 6static const u64 intel_perfmon_event_map[] =
7{ 7{
@@ -27,8 +27,14 @@ static struct event_constraint intel_core_event_constraints[] =
27 27
28static struct event_constraint intel_core2_event_constraints[] = 28static struct event_constraint intel_core2_event_constraints[] =
29{ 29{
30 FIXED_EVENT_CONSTRAINT(0xc0, (0x3|(1ULL<<32))), /* INSTRUCTIONS_RETIRED */ 30 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
31 FIXED_EVENT_CONSTRAINT(0x3c, (0x3|(1ULL<<33))), /* UNHALTED_CORE_CYCLES */ 31 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
32 /*
33 * Core2 has Fixed Counter 2 listed as CPU_CLK_UNHALTED.REF and event
34 * 0x013c as CPU_CLK_UNHALTED.BUS and specifies there is a fixed
35 * ratio between these counters.
36 */
37 /* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */
32 INTEL_EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */ 38 INTEL_EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */
33 INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */ 39 INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
34 INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */ 40 INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
@@ -37,14 +43,16 @@ static struct event_constraint intel_core2_event_constraints[] =
37 INTEL_EVENT_CONSTRAINT(0x18, 0x1), /* IDLE_DURING_DIV */ 43 INTEL_EVENT_CONSTRAINT(0x18, 0x1), /* IDLE_DURING_DIV */
38 INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */ 44 INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
39 INTEL_EVENT_CONSTRAINT(0xa1, 0x1), /* RS_UOPS_DISPATCH_CYCLES */ 45 INTEL_EVENT_CONSTRAINT(0xa1, 0x1), /* RS_UOPS_DISPATCH_CYCLES */
46 INTEL_EVENT_CONSTRAINT(0xc9, 0x1), /* ITLB_MISS_RETIRED (T30-9) */
40 INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED */ 47 INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED */
41 EVENT_CONSTRAINT_END 48 EVENT_CONSTRAINT_END
42}; 49};
43 50
44static struct event_constraint intel_nehalem_event_constraints[] = 51static struct event_constraint intel_nehalem_event_constraints[] =
45{ 52{
46 FIXED_EVENT_CONSTRAINT(0xc0, (0xf|(1ULL<<32))), /* INSTRUCTIONS_RETIRED */ 53 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
47 FIXED_EVENT_CONSTRAINT(0x3c, (0xf|(1ULL<<33))), /* UNHALTED_CORE_CYCLES */ 54 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
55 /* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */
48 INTEL_EVENT_CONSTRAINT(0x40, 0x3), /* L1D_CACHE_LD */ 56 INTEL_EVENT_CONSTRAINT(0x40, 0x3), /* L1D_CACHE_LD */
49 INTEL_EVENT_CONSTRAINT(0x41, 0x3), /* L1D_CACHE_ST */ 57 INTEL_EVENT_CONSTRAINT(0x41, 0x3), /* L1D_CACHE_ST */
50 INTEL_EVENT_CONSTRAINT(0x42, 0x3), /* L1D_CACHE_LOCK */ 58 INTEL_EVENT_CONSTRAINT(0x42, 0x3), /* L1D_CACHE_LOCK */
@@ -58,8 +66,9 @@ static struct event_constraint intel_nehalem_event_constraints[] =
58 66
59static struct event_constraint intel_westmere_event_constraints[] = 67static struct event_constraint intel_westmere_event_constraints[] =
60{ 68{
61 FIXED_EVENT_CONSTRAINT(0xc0, (0xf|(1ULL<<32))), /* INSTRUCTIONS_RETIRED */ 69 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
62 FIXED_EVENT_CONSTRAINT(0x3c, (0xf|(1ULL<<33))), /* UNHALTED_CORE_CYCLES */ 70 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
71 /* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */
63 INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */ 72 INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
64 INTEL_EVENT_CONSTRAINT(0x60, 0x1), /* OFFCORE_REQUESTS_OUTSTANDING */ 73 INTEL_EVENT_CONSTRAINT(0x60, 0x1), /* OFFCORE_REQUESTS_OUTSTANDING */
65 INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */ 74 INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
@@ -68,8 +77,9 @@ static struct event_constraint intel_westmere_event_constraints[] =
68 77
69static struct event_constraint intel_gen_event_constraints[] = 78static struct event_constraint intel_gen_event_constraints[] =
70{ 79{
71 FIXED_EVENT_CONSTRAINT(0xc0, (0x3|(1ULL<<32))), /* INSTRUCTIONS_RETIRED */ 80 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
72 FIXED_EVENT_CONSTRAINT(0x3c, (0x3|(1ULL<<33))), /* UNHALTED_CORE_CYCLES */ 81 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
82 /* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */
73 EVENT_CONSTRAINT_END 83 EVENT_CONSTRAINT_END
74}; 84};
75 85
@@ -580,10 +590,9 @@ static void intel_pmu_drain_bts_buffer(void)
580 590
581 ds->bts_index = ds->bts_buffer_base; 591 ds->bts_index = ds->bts_buffer_base;
582 592
593 perf_sample_data_init(&data, 0);
583 594
584 data.period = event->hw.last_period; 595 data.period = event->hw.last_period;
585 data.addr = 0;
586 data.raw = NULL;
587 regs.ip = 0; 596 regs.ip = 0;
588 597
589 /* 598 /*
@@ -732,8 +741,7 @@ static int intel_pmu_handle_irq(struct pt_regs *regs)
732 int bit, loops; 741 int bit, loops;
733 u64 ack, status; 742 u64 ack, status;
734 743
735 data.addr = 0; 744 perf_sample_data_init(&data, 0);
736 data.raw = NULL;
737 745
738 cpuc = &__get_cpu_var(cpu_hw_events); 746 cpuc = &__get_cpu_var(cpu_hw_events);
739 747
@@ -935,7 +943,7 @@ static __init int intel_pmu_init(void)
935 x86_pmu.event_constraints = intel_nehalem_event_constraints; 943 x86_pmu.event_constraints = intel_nehalem_event_constraints;
936 pr_cont("Nehalem/Corei7 events, "); 944 pr_cont("Nehalem/Corei7 events, ");
937 break; 945 break;
938 case 28: 946 case 28: /* Atom */
939 memcpy(hw_cache_event_ids, atom_hw_cache_event_ids, 947 memcpy(hw_cache_event_ids, atom_hw_cache_event_ids,
940 sizeof(hw_cache_event_ids)); 948 sizeof(hw_cache_event_ids));
941 949
@@ -951,6 +959,7 @@ static __init int intel_pmu_init(void)
951 x86_pmu.event_constraints = intel_westmere_event_constraints; 959 x86_pmu.event_constraints = intel_westmere_event_constraints;
952 pr_cont("Westmere events, "); 960 pr_cont("Westmere events, ");
953 break; 961 break;
962
954 default: 963 default:
955 /* 964 /*
956 * default constraints for v2 and up 965 * default constraints for v2 and up
diff --git a/arch/x86/kernel/cpu/perf_event_p6.c b/arch/x86/kernel/cpu/perf_event_p6.c
index 1ca5ba078afd..a4e67b99d91c 100644
--- a/arch/x86/kernel/cpu/perf_event_p6.c
+++ b/arch/x86/kernel/cpu/perf_event_p6.c
@@ -62,7 +62,7 @@ static void p6_pmu_disable_all(void)
62 62
63 /* p6 only has one enable register */ 63 /* p6 only has one enable register */
64 rdmsrl(MSR_P6_EVNTSEL0, val); 64 rdmsrl(MSR_P6_EVNTSEL0, val);
65 val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE; 65 val &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
66 wrmsrl(MSR_P6_EVNTSEL0, val); 66 wrmsrl(MSR_P6_EVNTSEL0, val);
67} 67}
68 68
@@ -72,7 +72,7 @@ static void p6_pmu_enable_all(void)
72 72
73 /* p6 only has one enable register */ 73 /* p6 only has one enable register */
74 rdmsrl(MSR_P6_EVNTSEL0, val); 74 rdmsrl(MSR_P6_EVNTSEL0, val);
75 val |= ARCH_PERFMON_EVENTSEL0_ENABLE; 75 val |= ARCH_PERFMON_EVENTSEL_ENABLE;
76 wrmsrl(MSR_P6_EVNTSEL0, val); 76 wrmsrl(MSR_P6_EVNTSEL0, val);
77} 77}
78 78
@@ -83,7 +83,7 @@ p6_pmu_disable_event(struct hw_perf_event *hwc, int idx)
83 u64 val = P6_NOP_EVENT; 83 u64 val = P6_NOP_EVENT;
84 84
85 if (cpuc->enabled) 85 if (cpuc->enabled)
86 val |= ARCH_PERFMON_EVENTSEL0_ENABLE; 86 val |= ARCH_PERFMON_EVENTSEL_ENABLE;
87 87
88 (void)checking_wrmsrl(hwc->config_base + idx, val); 88 (void)checking_wrmsrl(hwc->config_base + idx, val);
89} 89}
@@ -95,7 +95,7 @@ static void p6_pmu_enable_event(struct hw_perf_event *hwc, int idx)
95 95
96 val = hwc->config; 96 val = hwc->config;
97 if (cpuc->enabled) 97 if (cpuc->enabled)
98 val |= ARCH_PERFMON_EVENTSEL0_ENABLE; 98 val |= ARCH_PERFMON_EVENTSEL_ENABLE;
99 99
100 (void)checking_wrmsrl(hwc->config_base + idx, val); 100 (void)checking_wrmsrl(hwc->config_base + idx, val);
101} 101}
diff --git a/arch/x86/kernel/cpu/perfctr-watchdog.c b/arch/x86/kernel/cpu/perfctr-watchdog.c
index 74f4e85a5727..fb329e9f8494 100644
--- a/arch/x86/kernel/cpu/perfctr-watchdog.c
+++ b/arch/x86/kernel/cpu/perfctr-watchdog.c
@@ -680,7 +680,7 @@ static int setup_intel_arch_watchdog(unsigned nmi_hz)
680 cpu_nmi_set_wd_enabled(); 680 cpu_nmi_set_wd_enabled();
681 681
682 apic_write(APIC_LVTPC, APIC_DM_NMI); 682 apic_write(APIC_LVTPC, APIC_DM_NMI);
683 evntsel |= ARCH_PERFMON_EVENTSEL0_ENABLE; 683 evntsel |= ARCH_PERFMON_EVENTSEL_ENABLE;
684 wrmsr(evntsel_msr, evntsel, 0); 684 wrmsr(evntsel_msr, evntsel, 0);
685 intel_arch_wd_ops.checkbit = 1ULL << (eax.split.bit_width - 1); 685 intel_arch_wd_ops.checkbit = 1ULL << (eax.split.bit_width - 1);
686 return 1; 686 return 1;
diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
index dce99abb4496..d5e2a2ebb627 100644
--- a/arch/x86/kernel/dumpstack_64.c
+++ b/arch/x86/kernel/dumpstack_64.c
@@ -120,9 +120,15 @@ fixup_bp_irq_link(unsigned long bp, unsigned long *stack,
120{ 120{
121#ifdef CONFIG_FRAME_POINTER 121#ifdef CONFIG_FRAME_POINTER
122 struct stack_frame *frame = (struct stack_frame *)bp; 122 struct stack_frame *frame = (struct stack_frame *)bp;
123 unsigned long next;
123 124
124 if (!in_irq_stack(stack, irq_stack, irq_stack_end)) 125 if (!in_irq_stack(stack, irq_stack, irq_stack_end)) {
125 return (unsigned long)frame->next_frame; 126 if (!probe_kernel_address(&frame->next_frame, next))
127 return next;
128 else
129 WARN_ONCE(1, "Perf: bad frame pointer = %p in "
130 "callchain\n", &frame->next_frame);
131 }
126#endif 132#endif
127 return bp; 133 return bp;
128} 134}
diff --git a/arch/x86/kernel/hw_breakpoint.c b/arch/x86/kernel/hw_breakpoint.c
index dca2802c666f..d6cc065f519f 100644
--- a/arch/x86/kernel/hw_breakpoint.c
+++ b/arch/x86/kernel/hw_breakpoint.c
@@ -344,13 +344,6 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp,
344 } 344 }
345 345
346 /* 346 /*
347 * For kernel-addresses, either the address or symbol name can be
348 * specified.
349 */
350 if (info->name)
351 info->address = (unsigned long)
352 kallsyms_lookup_name(info->name);
353 /*
354 * Check that the low-order bits of the address are appropriate 347 * Check that the low-order bits of the address are appropriate
355 * for the alignment implied by len. 348 * for the alignment implied by len.
356 */ 349 */
@@ -535,8 +528,3 @@ void hw_breakpoint_pmu_read(struct perf_event *bp)
535{ 528{
536 /* TODO */ 529 /* TODO */
537} 530}
538
539void hw_breakpoint_pmu_unthrottle(struct perf_event *bp)
540{
541 /* TODO */
542}
diff --git a/arch/x86/kernel/k8.c b/arch/x86/kernel/k8.c
index cbc4332a77b2..9b895464dd03 100644
--- a/arch/x86/kernel/k8.c
+++ b/arch/x86/kernel/k8.c
@@ -121,3 +121,17 @@ void k8_flush_garts(void)
121} 121}
122EXPORT_SYMBOL_GPL(k8_flush_garts); 122EXPORT_SYMBOL_GPL(k8_flush_garts);
123 123
124static __init int init_k8_nbs(void)
125{
126 int err = 0;
127
128 err = cache_k8_northbridges();
129
130 if (err < 0)
131 printk(KERN_NOTICE "K8 NB: Cannot enumerate AMD northbridges.\n");
132
133 return err;
134}
135
136/* This has to go after the PCI subsystem */
137fs_initcall(init_k8_nbs);
diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c
index 34de53b46f87..f3af115a573a 100644
--- a/arch/x86/kernel/pci-gart_64.c
+++ b/arch/x86/kernel/pci-gart_64.c
@@ -735,7 +735,7 @@ int __init gart_iommu_init(void)
735 unsigned long scratch; 735 unsigned long scratch;
736 long i; 736 long i;
737 737
738 if (cache_k8_northbridges() < 0 || num_k8_northbridges == 0) 738 if (num_k8_northbridges == 0)
739 return 0; 739 return 0;
740 740
741#ifndef CONFIG_AGP_AMD64 741#ifndef CONFIG_AGP_AMD64
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 02d678065d7d..ad9540676fcc 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -607,7 +607,7 @@ void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c)
607{ 607{
608#ifdef CONFIG_SMP 608#ifdef CONFIG_SMP
609 if (pm_idle == poll_idle && smp_num_siblings > 1) { 609 if (pm_idle == poll_idle && smp_num_siblings > 1) {
610 printk(KERN_WARNING "WARNING: polling idle and HT enabled," 610 printk_once(KERN_WARNING "WARNING: polling idle and HT enabled,"
611 " performance may degrade.\n"); 611 " performance may degrade.\n");
612 } 612 }
613#endif 613#endif